From beb15981c065ae4ed9a311077ec39909275640b6 Mon Sep 17 00:00:00 2001 From: Apple Date: Thu, 22 Sep 2016 17:57:50 +0000 Subject: [PATCH] libdispatch-703.1.4.tar.gz --- .gitmodules | 3 + INSTALL | 74 +- Makefile.am | 15 +- PATCHES | 67 +- autogen.sh | 0 config/config.h | 76 +- configure.ac | 199 +- dispatch/Makefile.am | 8 + dispatch/base.h | 118 +- dispatch/block.h | 9 +- dispatch/data.h | 14 +- dispatch/dispatch.h | 24 +- dispatch/group.h | 13 +- dispatch/introspection.h | 8 +- dispatch/io.h | 12 +- dispatch/module.map | 20 + dispatch/module.modulemap | 10 + dispatch/object.h | 109 +- dispatch/once.h | 25 +- dispatch/queue.h | 446 +- dispatch/semaphore.h | 6 +- dispatch/source.h | 69 +- dispatch/time.h | 8 +- libdispatch.xcodeproj/project.pbxproj | 1694 +++--- man/Makefile.am | 2 + man/dispatch_object.3 | 5 +- man/dispatch_queue_create.3 | 4 - man/dispatch_source_create.3 | 26 +- man/dispatch_time.3 | 2 +- os/Makefile.am | 11 +- os/firehose_buffer_private.h | 184 + os/firehose_server_private.h | 332 ++ os/linux_base.h | 94 + os/object.h | 128 +- os/object_private.h | 56 +- os/voucher_activity_private.h | 327 ++ {private => os}/voucher_private.h | 82 +- private/Makefile.am | 1 + private/benchmark.h | 9 +- private/data_private.h | 20 +- private/introspection_private.h | 2 +- private/io_private.h | 35 +- private/layout_private.h | 10 - private/mach_private.h | 277 +- private/module.modulemap | 11 + private/private.h | 93 +- private/queue_private.h | 155 +- private/source_private.h | 150 +- private/voucher_activity_private.h | 619 --- src/Makefile.am | 138 +- src/allocator.c | 27 +- src/allocator_internal.h | 10 +- src/apply.c | 194 +- src/block.cpp | 9 +- src/data.c | 68 +- src/data.m | 31 +- src/data_internal.h | 36 +- src/firehose/firehose.defs | 56 + src/firehose/firehose_buffer.c | 1147 ++++ src/firehose/firehose_buffer_internal.h | 211 + src/firehose/firehose_inline_internal.h | 502 ++ src/firehose/firehose_internal.h | 51 + src/firehose/firehose_reply.defs | 43 + src/firehose/firehose_server.c | 1137 ++++ src/firehose/firehose_server_internal.h | 71 + src/firehose/firehose_server_object.m | 47 + src/firehose/firehose_types.defs | 28 + src/init.c | 532 +- src/inline_internal.h | 2414 +++++++-- src/internal.h | 585 +- src/introspection.c | 530 +- src/introspection_internal.h | 81 +- src/io.c | 256 +- src/io_internal.h | 12 +- src/libdispatch.codes | 13 + src/object.c | 111 +- src/object.m | 195 +- src/object_internal.h | 572 +- src/once.c | 41 +- src/queue.c | 4824 +++++++++++------ src/queue_internal.h | 798 ++- src/semaphore.c | 470 +- src/semaphore_internal.h | 81 +- src/shims.h | 121 +- src/shims/atomic.h | 463 +- src/shims/atomic_sfb.h | 8 +- src/shims/hw_config.h | 10 + src/shims/linux_stubs.c | 53 + src/shims/linux_stubs.h | 101 + src/shims/lock.c | 421 ++ src/shims/lock.h | 539 ++ src/shims/time.h | 5 + src/shims/tsd.h | 216 +- src/shims/yield.h | 30 +- src/source.c | 4792 +++++++++++----- src/source_internal.h | 144 +- src/swift/Block.swift | 114 + src/swift/Data.swift | 277 + src/swift/Dispatch.apinotes | 328 ++ src/swift/Dispatch.swift | 211 + src/swift/DispatchStubs.cc | 207 + src/swift/IO.swift | 129 + src/swift/Private.swift | 474 ++ src/swift/Queue.swift | 421 ++ src/swift/Source.swift | 425 ++ src/swift/Time.swift | 110 + src/swift/Wrapper.swift | 319 ++ src/time.c | 13 + src/trace.h | 66 +- src/transform.c | 81 +- src/voucher.c | 2394 +++----- src/voucher_internal.h | 712 +-- xcodeconfig/libdispatch-dyld-stub.xcconfig | 28 + .../libdispatch-introspection.xcconfig | 2 +- xcodeconfig/libdispatch-mp-static.xcconfig | 30 + ...cconfig => libdispatch-up-static.xcconfig} | 2 +- xcodeconfig/libdispatch.aliases | 5 +- xcodeconfig/libdispatch.order | 11 +- xcodeconfig/libdispatch.unexport | 34 - xcodeconfig/libdispatch.xcconfig | 18 +- xcodeconfig/libdispatch_objc.aliases | 34 - xcodeconfig/libfirehose.xcconfig | 36 + xcodeconfig/libfirehose_kernel.xcconfig | 35 + xcodescripts/install-headers.sh | 1 + xcodescripts/mig-headers.sh | 6 + .../run-on-install.sh | 9 +- 126 files changed, 24596 insertions(+), 9822 deletions(-) create mode 100644 .gitmodules mode change 100644 => 100755 autogen.sh create mode 100644 dispatch/module.map create mode 100644 dispatch/module.modulemap create mode 100644 os/firehose_buffer_private.h create mode 100644 os/firehose_server_private.h create mode 100644 os/linux_base.h create mode 100644 os/voucher_activity_private.h rename {private => os}/voucher_private.h (94%) create mode 100644 private/module.modulemap delete mode 100644 private/voucher_activity_private.h create mode 100644 src/firehose/firehose.defs create mode 100644 src/firehose/firehose_buffer.c create mode 100644 src/firehose/firehose_buffer_internal.h create mode 100644 src/firehose/firehose_inline_internal.h create mode 100644 src/firehose/firehose_internal.h create mode 100644 src/firehose/firehose_reply.defs create mode 100644 src/firehose/firehose_server.c create mode 100644 src/firehose/firehose_server_internal.h create mode 100644 src/firehose/firehose_server_object.m create mode 100644 src/firehose/firehose_types.defs create mode 100644 src/libdispatch.codes create mode 100644 src/shims/linux_stubs.c create mode 100644 src/shims/linux_stubs.h create mode 100644 src/shims/lock.c create mode 100644 src/shims/lock.h create mode 100644 src/swift/Block.swift create mode 100644 src/swift/Data.swift create mode 100644 src/swift/Dispatch.apinotes create mode 100644 src/swift/Dispatch.swift create mode 100644 src/swift/DispatchStubs.cc create mode 100644 src/swift/IO.swift create mode 100644 src/swift/Private.swift create mode 100644 src/swift/Queue.swift create mode 100644 src/swift/Source.swift create mode 100644 src/swift/Time.swift create mode 100644 src/swift/Wrapper.swift create mode 100644 xcodeconfig/libdispatch-dyld-stub.xcconfig create mode 100644 xcodeconfig/libdispatch-mp-static.xcconfig rename xcodeconfig/{libdispatch-static.xcconfig => libdispatch-up-static.xcconfig} (89%) delete mode 100644 xcodeconfig/libdispatch.unexport delete mode 100644 xcodeconfig/libdispatch_objc.aliases create mode 100644 xcodeconfig/libfirehose.xcconfig create mode 100644 xcodeconfig/libfirehose_kernel.xcconfig rename xcodeconfig/libdispatch_macosx.aliases => xcodescripts/run-on-install.sh (79%) diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..e6068b4 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "libpwq"] + path = libpwq + url = https://github.com/mheily/libpwq.git diff --git a/INSTALL b/INSTALL index faf66d2..9113e4a 100644 --- a/INSTALL +++ b/INSTALL @@ -4,8 +4,9 @@ GCD is a concurrent programming framework first shipped with Mac OS X Snow Leopard. This package is an open source bundling of libdispatch, the core user space library implementing GCD. At the time of writing, support for the BSD kqueue API, and specifically extensions introduced in Mac OS X Snow -Leopard and FreeBSD 9-CURRENT, are required to use libdispatch. Other -systems are currently unsupported. +Leopard and FreeBSD 9-CURRENT, are required to use libdispatch. Support +for Linux is a work in progress (see Linux notes below). Other systems are +currently unsupported. Configuring and installing libdispatch @@ -20,10 +21,15 @@ An uncustomized install requires: The following configure options may be of general interest: ---with-apple-libc-source +--with-apple-libpthread-source - Specify the path to Apple's Libc package, so that appropriate headers can - be found and used. + Specify the path to Apple's libpthread package, so that appropriate headers + can be found and used. + +--with-apple-libplatform-source + + Specify the path to Apple's libplatform package, so that appropriate headers + can be found and used. --with-apple-libclosure-source @@ -38,52 +44,48 @@ The following configure options may be of general interest: --with-blocks-runtime On systems where -fblocks is supported, specify an additional library path - in which libBlocksRuntime can be found. This is not required on Mac OS X, + in which libBlocksRuntime can be found. This is not required on OS X, where the Blocks runtime is included in libSystem, but is required on FreeBSD. The following options are likely to only be useful when building libdispatch on -Mac OS X as a replacement for /usr/lib/system/libdispatch.dylib: +OS X as a replacement for /usr/lib/system/libdispatch.dylib: --with-apple-objc4-source Specify the path to Apple's objc4 package, so that appropriate headers can be found and used. ---with-apple-libauto-source - - Specify the path to Apple's libauto package, so that appropriate headers - can be found and used. - --disable-libdispatch-init-constructor Do not tag libdispatch's init routine as __constructor, in which case it must be run manually before libdispatch routines can be called. This is the - default when building on Mac OS X. For /usr/lib/system/libdispatch.dylib + default when building on OS X. For /usr/lib/system/libdispatch.dylib the init routine is called automatically during process start. --enable-apple-tsd-optimizations Use a non-portable allocation scheme for pthread per-thread data (TSD) keys - when building libdispatch for /usr/lib/system on Mac OS X. This should not - be used on other OS's, or on Mac OS X when building a stand-alone library. + when building libdispatch for /usr/lib/system on OS X. This should not + be used on other OS's, or on OS X when building a stand-alone library. Typical configuration commands The following command lines create the configuration required to build -libdispatch for /usr/lib/system on OS X MountainLion: +libdispatch for /usr/lib/system on OS X El Capitan: - sh autogen.sh + clangpath=$(dirname `xcrun --find clang`) + sudo mkdir -p "$clangpath/../local/lib/clang/enable_objc_gc" + LIBTOOLIZE=glibtoolize sh autogen.sh cflags='-arch x86_64 -arch i386 -g -Os' ./configure CFLAGS="$cflags" OBJCFLAGS="$cflags" CXXFLAGS="$cflags" \ - --prefix=/usr --libdir=/usr/lib/system \ - --disable-dependency-tracking --disable-static \ + --prefix=/usr --libdir=/usr/lib/system --disable-static \ --enable-apple-tsd-optimizations \ - --with-apple-libc-source=/path/to/10.8.0/Libc-825.24 \ - --with-apple-libclosure-source=/path/to/10.8.0/libclosure-59 \ - --with-apple-xnu-source=/path/to/10.8.0/xnu-2050.7.9 \ - --with-apple-objc4-source=/path/to/10.8.0/objc4-532 \ - --with-apple-libauto-source=/path/to/10.8.0/libauto-185.1 + --with-apple-libpthread-source=/path/to/10.11.0/libpthread-137.1.1 \ + --with-apple-libplatform-source=/path/to/10.11.0/libplatform-73.1.1 \ + --with-apple-libclosure-source=/path/to/10.11.0/libclosure-65 \ + --with-apple-xnu-source=/path/to/10.11.0/xnu-3247.1.106 \ + --with-apple-objc4-source=/path/to/10.11.0/objc4-680 make check Typical configuration line for FreeBSD 8.x and 9.x to build libdispatch with @@ -92,3 +94,27 @@ clang and blocks support: sh autogen.sh ./configure CC=clang --with-blocks-runtime=/usr/local/lib make check + +Instructions for building on Linux. Initial focus is on ubuntu 15.04. +Prepare your system + 1. Install compiler, autotools + sudo apt-get install clang + sudo apt-get install autoconf libtool pkg-config + 2. Install dtrace (to generate provider.h) + sudo apt-get install systemtap-sdt-dev + 3. Install libdispatch pre-reqs + sudo apt-get install libblocksruntime-dev libkqueue-dev libbsd-dev + +Initialize git submodules: + We are using git submodules to incorporate a specific revision of the + upstream pthread_workqueue library into the build. + git submodule init + git submodule update + +Build: + sh autogen.sh + ./configure + make + +Note: the build currently fails building tests, but libdispatch.so should + build successfully. diff --git a/Makefile.am b/Makefile.am index 72f4322..cc01c7c 100644 --- a/Makefile.am +++ b/Makefile.am @@ -4,14 +4,27 @@ ACLOCAL_AMFLAGS = -I m4 +if BUILD_OWN_PTHREAD_WORKQUEUES SUBDIRS= \ dispatch \ + libpwq \ man \ os \ private \ - src + src \ + tests +else +SUBDIRS= \ + dispatch \ + man \ + os \ + private \ + src \ + tests +endif EXTRA_DIST= \ + README.md \ LICENSE \ PATCHES \ autogen.sh \ diff --git a/PATCHES b/PATCHES index 4f88387..28f7c52 100644 --- a/PATCHES +++ b/PATCHES @@ -1,14 +1,21 @@ The libdispatch project exists in a parallel open source repository at: - http://svn.macosforge.org/repository/libdispatch/trunk + http://github.com/apple/swift-corelibs-libdispatch + +Externally contributed changes are synchronized back to the internal repository +via pull request of the result of `git am` of the contributed patch series. -Externally committed revisions are periodically synchronized back to the -internal repository (this repository). +Internal changes are synchronized from the internal darwin/trunk branch to the +external repository via `gi am` on the github darwin/trunk branch and merge to +github master. Key: APPLIED: change set was applied to internal repository. INTERNAL: change set originated internally (i.e. already applied). SKIPPED: change set was skipped. +svn revisions until r218 from legacy open source repository at + http://svn.macosforge.org/repository/libdispatch/trunk + [ 1] SKIPPED [ 2] SKIPPED [ 3] INTERNAL rdar://problem/7148356 @@ -192,3 +199,57 @@ Key: [ 181] [ 182] [ 183] INTERNAL rdar://problem/7581831 +[ 202] INTERNAL libdispatch-187.5 +[ 212] INTERNAL libdispatch-228.18 +[ 213] INTERNAL rdar://problem/11754320 +[ 216] INTERNAL libdispatch-339.1.9 +[ 217] INTERNAL libdispatch-442.1.4 +[ 218] INTERNAL libdispatch-500.1.5 + +github commits starting with 29bdc2f from + + http://github.com/apple/swift-corelibs-libdispatch + +[29bdc2f] INTERNAL libdispatch-500.1.5 +[a60acd6] APPLIED rdar://23661056 +[39ac720] APPLIED rdar://23705483 +[acd56f6] APPLIED rdar://23754944 +[394d9a1] APPLIED rdar://23772602 +[3691f26] APPLIED rdar://23868354 +[8904f45] APPLIED rdar://23868354 +[6dbebd6] APPLIED rdar://23868354 +[b2ccfeb] APPLIED rdar://23868354 +[e7ca00f] APPLIED rdar://23868354 +[35eb408] APPLIED rdar://25159995 +[32411c2] APPLIED rdar://25159995 +[31586d5] APPLIED rdar://25159995 +[50faff5] APPLIED rdar://25159995 +[3ce4e3d] APPLIED rdar://25159995 +[b647aee] APPLIED rdar://25159995 +[ab7e16c] APPLIED rdar://25159995 +[cef2960] APPLIED rdar://25159995 +[dfa43cd] APPLIED rdar://25159995 +[8b9c3a9] APPLIED rdar://25159995 +[fefb6cf] APPLIED rdar://25159995 +[1a9c57f] APPLIED rdar://25159995 +[c04488a] APPLIED rdar://25159995 +[f1d58d1] APPLIED rdar://25159995 +[be83e85] APPLIED rdar://25159995 +[79fbb13] APPLIED rdar://25159995 +[6ead519] APPLIED rdar://25159995 +[1fa1513] APPLIED rdar://25159995 +[4a6ec51] APPLIED rdar://25159995 +[bc16cc9] APPLIED rdar://25159995 +[954ace4] APPLIED rdar://25159995 +[5ea30b5] APPLIED rdar://26822213 +[9f1e778] APPLIED rdar://26822213 +[3339b81] APPLIED rdar://26822213 +[4fa8d8d] APPLIED rdar://26822213 +[e922531] APPLIED rdar://26822213 +[195cbcf] APPLIED rdar://27303844 +[5b893c8] APPLIED rdar://27303844 +[92689ed] APPLIED rdar://27303844 +[ecc14fa] APPLIED rdar://27303844 +[2dbf83c] APPLIED rdar://27303844 +[78b9e82] APPLIED rdar://27303844 +[2c0e5ee] APPLIED rdar://27303844 diff --git a/autogen.sh b/autogen.sh old mode 100644 new mode 100755 diff --git a/config/config.h b/config/config.h index cac6ac9..ca3a1db 100644 --- a/config/config.h +++ b/config/config.h @@ -13,6 +13,10 @@ don't. */ #define HAVE_DECL_FD_COPY 1 +/* Define to 1 if you have the declaration of `NOTE_LOWAT', and to 0 if you + don't. */ +#define HAVE_DECL_NOTE_LOWAT 1 + /* Define to 1 if you have the declaration of `NOTE_NONE', and to 0 if you don't. */ #define HAVE_DECL_NOTE_NONE 1 @@ -21,6 +25,10 @@ don't. */ #define HAVE_DECL_NOTE_REAP 1 +/* Define to 1 if you have the declaration of `NOTE_REVOKE', and to 0 if you + don't. */ +#define HAVE_DECL_NOTE_REVOKE 1 + /* Define to 1 if you have the declaration of `NOTE_SIGNAL', and to 0 if you don't. */ #define HAVE_DECL_NOTE_SIGNAL 1 @@ -79,6 +87,9 @@ /* Define to 1 if you have the `mach_absolute_time' function. */ #define HAVE_MACH_ABSOLUTE_TIME 1 +/* Define to 1 if you have the `mach_port_construct' function. */ +#define HAVE_MACH_PORT_CONSTRUCT 1 + /* Define to 1 if you have the `malloc_create_zone' function. */ #define HAVE_MALLOC_CREATE_ZONE 1 @@ -106,17 +117,20 @@ /* Define to 1 if you have the header file. */ /* #undef HAVE_PTHREAD_NP_H */ +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_QOS_H 1 + /* Define if pthread work queues are present */ #define HAVE_PTHREAD_WORKQUEUES 1 -/* Define to 1 if you have the `pthread_workqueue_setdispatch_np' function. */ -#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 1 +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_WORKQUEUE_H 1 -/* Define to 1 if you have the `_pthread_workqueue_init' function. */ -#define HAVE__PTHREAD_WORKQUEUE_INIT 1 +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_WORKQUEUE_PRIVATE_H 1 -/* Define to 1 if you have the header file. */ -#define HAVE_PTHREAD_QOS_H 1 +/* Define to 1 if you have the `pthread_workqueue_setdispatch_np' function. */ +#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 1 /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 @@ -151,8 +165,10 @@ /* Define to 1 if you have the header file. */ #define HAVE_UNISTD_H 1 -/* Define to the sub-directory in which libtool stores uninstalled libraries. - */ +/* Define to 1 if you have the `_pthread_workqueue_init' function. */ +#define HAVE__PTHREAD_WORKQUEUE_INIT 1 + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ #define LT_OBJDIR ".libs/" /* Name of package */ @@ -165,13 +181,16 @@ #define PACKAGE_NAME "libdispatch" /* Define to the full name and version of this package. */ -#define PACKAGE_STRING "libdispatch 1.2" +#define PACKAGE_STRING "libdispatch 1.3" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "libdispatch" +/* Define to the home page for this package. */ +#define PACKAGE_URL "http://libdispatch.macosforge.org" + /* Define to the version of this package. */ -#define PACKAGE_VERSION "1.2" +#define PACKAGE_VERSION "1.3" /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 @@ -188,20 +207,30 @@ /* Define to use POSIX semaphores */ /* #undef USE_POSIX_SEM */ -/* Version number of package */ -#define VERSION "1.2" - -/* Define to 1 if on AIX 3. - System headers sometimes define this. - We just want to avoid a redefinition error message. */ +/* Enable extensions on AIX 3, Interix. */ #ifndef _ALL_SOURCE -/* # undef _ALL_SOURCE */ +# define _ALL_SOURCE 1 #endif - /* Enable GNU extensions on systems that have them. */ #ifndef _GNU_SOURCE # define _GNU_SOURCE 1 #endif +/* Enable threading extensions on Solaris. */ +#ifndef _POSIX_PTHREAD_SEMANTICS +# define _POSIX_PTHREAD_SEMANTICS 1 +#endif +/* Enable extensions on HP NonStop. */ +#ifndef _TANDEM_SOURCE +# define _TANDEM_SOURCE 1 +#endif +/* Enable general extensions on Solaris. */ +#ifndef __EXTENSIONS__ +# define __EXTENSIONS__ 1 +#endif + + +/* Version number of package */ +#define VERSION "1.3" /* Define to 1 if on MINIX. */ /* #undef _MINIX */ @@ -215,14 +244,3 @@ /* Define if using Darwin $NOCANCEL */ #define __DARWIN_NON_CANCELABLE 1 - -/* Enable extensions on Solaris. */ -#ifndef __EXTENSIONS__ -# define __EXTENSIONS__ 1 -#endif -#ifndef _POSIX_PTHREAD_SEMANTICS -# define _POSIX_PTHREAD_SEMANTICS 1 -#endif -#ifndef _TANDEM_SOURCE -# define _TANDEM_SOURCE 1 -#endif diff --git a/configure.ac b/configure.ac index d0b626d..e5c7c5e 100644 --- a/configure.ac +++ b/configure.ac @@ -2,11 +2,11 @@ # When this file changes, rerun autogen.sh. # -AC_PREREQ(2.59) -AC_INIT([libdispatch], [1.2], [libdispatch@macosforge.org], [libdispatch]) +AC_PREREQ(2.69) +AC_INIT([libdispatch], [1.3], [libdispatch@macosforge.org], [libdispatch], [http://libdispatch.macosforge.org]) AC_REVISION([$$]) AC_CONFIG_AUX_DIR(config) -AC_CONFIG_HEADER([config/config.h]) +AC_CONFIG_HEADER([config/config_ac.h]) AC_CONFIG_MACRO_DIR([m4]) ac_clean_files=a.out.dSYM AM_MAINTAINER_MODE @@ -14,32 +14,42 @@ AM_MAINTAINER_MODE AC_PROG_CC([clang gcc cc]) AC_PROG_CXX([clang++ g++ c++]) AC_PROG_OBJC([clang gcc cc]) +AC_PROG_OBJCXX([clang++ g++ c++]) # # On Mac OS X, some required header files come from other source packages; # allow specifying where those are. # -AC_ARG_WITH([apple-libc-source], - [AS_HELP_STRING([--with-apple-libc-source], - [Specify path to Apple Libc source])], [ - apple_libc_source_pthreads_path=${withval}/pthreads - CPPFLAGS="$CPPFLAGS -I$apple_libc_source_pthreads_path" +AC_ARG_WITH([apple-libpthread-source], + [AS_HELP_STRING([--with-apple-libpthread-source], + [Specify path to Apple libpthread source])], [ + apple_libpthread_source_path=${withval} + CPPFLAGS="$CPPFLAGS -isystem $apple_libpthread_source_path" +]) + +AC_ARG_WITH([apple-libplatform-source], + [AS_HELP_STRING([--with-apple-libplatform-source], + [Specify path to Apple libplatform source])], [ + apple_libplatform_source_include_path=${withval}/include + CPPFLAGS="$CPPFLAGS -isystem $apple_libplatform_source_include_path" ]) AC_ARG_WITH([apple-libclosure-source], [AS_HELP_STRING([--with-apple-libclosure-source], [Specify path to Apple libclosure source])], [ apple_libclosure_source_path=${withval} - CPPFLAGS="$CPPFLAGS -I$apple_libclosure_source_path" + CPPFLAGS="$CPPFLAGS -isystem $apple_libclosure_source_path" ]) AC_ARG_WITH([apple-xnu-source], [AS_HELP_STRING([--with-apple-xnu-source], [Specify path to Apple XNU source])], [ + apple_xnu_source_libsyscall_path=${withval}/libsyscall + apple_xnu_source_libproc_path=${withval}/libsyscall/wrappers/libproc apple_xnu_source_libkern_path=${withval}/libkern apple_xnu_source_bsd_path=${withval}/bsd apple_xnu_source_osfmk_path=${withval}/osfmk - CPPFLAGS="$CPPFLAGS -idirafter $apple_xnu_source_libkern_path -isystem $apple_xnu_source_bsd_path" + CPPFLAGS="$CPPFLAGS -idirafter $apple_xnu_source_libkern_path -isystem $apple_xnu_source_bsd_path -isystem $apple_xnu_source_libsyscall_path -isystem $apple_xnu_source_libproc_path " ]) AC_ARG_WITH([apple-objc4-source], @@ -48,19 +58,12 @@ AC_ARG_WITH([apple-objc4-source], apple_objc4_source_runtime_path=${withval}/runtime ]) -AC_ARG_WITH([apple-libauto-source], - [AS_HELP_STRING([--with-apple-libauto-source], - [Specify path to Apple libauto source])], [ - apple_libauto_source_path=${withval} - CPPFLAGS="$CPPFLAGS -I$apple_libauto_source_path" -]) - AC_CACHE_CHECK([for System.framework/PrivateHeaders], dispatch_cv_system_privateheaders, [AS_IF([test -d /System/Library/Frameworks/System.framework/PrivateHeaders], [dispatch_cv_system_privateheaders=yes], [dispatch_cv_system_privateheaders=no])] ) AS_IF([test "x$dispatch_cv_system_privateheaders" != "xno"], - [CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/System.framework/PrivateHeaders"] + [CPPFLAGS="$CPPFLAGS -isystem /System/Library/Frameworks/System.framework/PrivateHeaders"] ) # @@ -91,16 +94,79 @@ AS_IF([test "x$enable_apple_tsd_optimizations" = "xyes"], [Define to use non-portable pthread TSD optimizations for Mac OS X)])] ) +AC_CANONICAL_TARGET + +# +# Enable building Swift overlay support into libdispatch +# +AC_ARG_WITH([swift-toolchain], + [AS_HELP_STRING([--with-swift-toolchain], [Specify path to Swift toolchain])], + [swift_toolchain_path=${withval} + AC_DEFINE(HAVE_SWIFT, 1, [Define if building for Swift]) + SWIFTC="$swift_toolchain_path/bin/swiftc" + case $target_os in + linux*) + os_string="linux" + ;; + *) + os_string=$target_os + ;; + esac + SWIFT_LIBDIR="$swift_toolchain_path/lib/swift/$os_string/$target_cpu" + have_swift=true], + [have_swift=false] +) +AM_CONDITIONAL(HAVE_SWIFT, $have_swift) +AC_SUBST([SWIFTC]) +AC_SUBST([SWIFT_LIBDIR]) + +# +# Enable use of gold linker when building the Swift overlay +# to avoid a symbol relocation issue. +# Ultimately the request to use gold should be passed in as an arg +# +AC_CHECK_PROG(use_gold_linker, ld.gold, true, false) +AM_CONDITIONAL(USE_GOLD_LINKER, $use_gold_linker) + +# +# Enable __thread based TSD on platforms where it is efficient +# Allow override based on command line argument to configure +# +AC_ARG_ENABLE([thread-local-storage], + [AS_HELP_STRING([--enable-thread-local-storage], + [Enable usage of thread local storage via __thread])],, + [case $target_os in + linux*) + enable_thread_local_storage=yes + ;; + *) + enable_thread_local_storage=no + esac] +) +AS_IF([test "x$enable_thread_local_storage" = "xyes"], + [AC_DEFINE(DISPATCH_USE_THREAD_LOCAL_STORAGE, 1, + [Enable usage of thread local storage via __thread])] +) + AC_USE_SYSTEM_EXTENSIONS -AM_INIT_AUTOMAKE([foreign no-dependencies]) +AM_INIT_AUTOMAKE([foreign no-dependencies subdir-objects]) LT_INIT([disable-static]) AC_PROG_INSTALL AC_PATH_PROGS(MIG, mig) +AC_PATH_PROG(DTRACE, dtrace) +AS_IF([test "x$DTRACE" != "x"], [use_dtrace=true],[ + use_dtrace=false + CPPFLAGS="$CPPFLAGS -DDISPATCH_USE_DTRACE=0" +]) +AM_CONDITIONAL(USE_DTRACE, $use_dtrace) AC_PATH_PROG(LEAKS, leaks) AS_IF([test "x$LEAKS" != "x"], - [AC_DEFINE(HAVE_LEAKS, 1, [Define if Apple leaks program is present])] + [AC_DEFINE(HAVE_LEAKS, 1, [Define if Apple leaks program is present]) + have_leaks=true], + [have_leaks=false] ) +AM_CONDITIONAL(HAVE_LEAKS, $have_leaks) DISPATCH_C_ATOMIC_BUILTINS @@ -124,25 +190,66 @@ AC_CHECK_HEADER(sys/event.h, [], [PKG_CHECK_MODULES(KQUEUE, libkqueue)] ) +AC_CHECK_FUNCS([strlcpy getprogname], [], + [PKG_CHECK_MODULES(BSD_OVERLAY, libbsd-overlay,[ + AC_DEFINE(HAVE_STRLCPY, 1, []) + AC_DEFINE(HAVE_GETPROGNAME, 1, []) + ])], [#include ] +) + # # Checks for header files. # AC_HEADER_STDC -AC_CHECK_HEADERS([TargetConditionals.h pthread_np.h malloc/malloc.h libkern/OSCrossEndian.h libkern/OSAtomic.h sys/guarded.h libproc_internal.h]) +AC_CHECK_HEADERS([TargetConditionals.h pthread_np.h malloc/malloc.h libkern/OSCrossEndian.h libkern/OSAtomic.h sys/guarded.h fcntl.h]) -# hack for pthread_machdep.h's #include -AS_IF([test -n "$apple_xnu_source_osfmk_path"], [ +# hack for pthread/private headers +AS_IF([test -n "$apple_libpthread_source_path" -a -n "$apple_xnu_source_osfmk_path"], [ saveCPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS -I." + ln -fsh "$apple_libpthread_source_path"/private/tsd_private.h pthread_machdep.h + ln -fsh "$apple_libpthread_source_path"/private pthread ln -fsh "$apple_xnu_source_osfmk_path" System + mkdir -p mach && ln -fsh "$apple_xnu_source_osfmk_path"/mach/coalition.h mach ]) -AC_CHECK_HEADERS([pthread_machdep.h]) -AS_IF([test -n "$apple_xnu_source_osfmk_path"], [ - rm -f System +AC_CHECK_HEADERS([pthread_machdep.h pthread/qos.h]) + +# pthread_workqueues. +# Look for own version first, then system version. +AS_IF([test -f $srcdir/libpwq/configure.ac], + [AC_DEFINE(BUILD_OWN_PTHREAD_WORKQUEUES, 1, [Define if building pthread work queues from source]) + ac_configure_args="--disable-libpwq-install $ac_configure_args" + AC_CONFIG_SUBDIRS([libpwq]) + build_own_pthread_workqueues=true + AC_DEFINE(HAVE_PTHREAD_WORKQUEUES, 1, [Define if pthread work queues are present]) + have_pthread_workqueues=true], + [build_own_pthread_workqueues=false + AC_CHECK_HEADERS([pthread/workqueue_private.h pthread_workqueue.h], + [AC_DEFINE(HAVE_PTHREAD_WORKQUEUES, 1, [Define if pthread work queues are present]) + have_pthread_workqueues=true], + [have_pthread_workqueues=false] + )] +) +AM_CONDITIONAL(BUILD_OWN_PTHREAD_WORKQUEUES, $build_own_pthread_workqueues) +AM_CONDITIONAL(HAVE_PTHREAD_WORKQUEUES, $have_pthread_workqueues) + +AC_CHECK_HEADERS([libproc_internal.h], [], [], [#include ]) +AC_CHECK_FUNCS([pthread_workqueue_setdispatch_np _pthread_workqueue_init]) +AS_IF([test -n "$apple_libpthread_source_path" -a -n "$apple_xnu_source_osfmk_path"], [ + rm -f pthread_machdep.h pthread System mach/coalition.h CPPFLAGS="$saveCPPFLAGS" + AC_CONFIG_COMMANDS([src/pthread_machdep.h], + [ln -fsh "$apple_libpthread_source_path"/private/tsd_private.h src/pthread_machdep.h], + [apple_libpthread_source_path="$apple_libpthread_source_path"]) + AC_CONFIG_COMMANDS([src/pthread], + [ln -fsh "$apple_libpthread_source_path"/private src/pthread], + [apple_libpthread_source_path="$apple_libpthread_source_path"]) AC_CONFIG_COMMANDS([src/System], [ln -fsh "$apple_xnu_source_osfmk_path" src/System], [apple_xnu_source_osfmk_path="$apple_xnu_source_osfmk_path"]) + AC_CONFIG_COMMANDS([src/mach/coalition.h], + [ln -fsh "$apple_xnu_source_osfmk_path"/mach/coalition.h src/mach], + [apple_xnu_source_osfmk_path="$apple_xnu_source_osfmk_path"]) ]) # hack for xnu/bsd/sys/event.h EVFILT_SOCK declaration AS_IF([test -n "$apple_xnu_source_bsd_path"], [ @@ -193,28 +300,20 @@ AC_CHECK_HEADER([mach/mach.h], [ have_mach=true], [have_mach=false] ) AM_CONDITIONAL(USE_MIG, $have_mach) - -# -# We use the availability of pthread_workqueue.h to decide whether to compile -# in support for pthread work queues. -# -AC_CHECK_HEADER([pthread_workqueue.h], - [AC_DEFINE(HAVE_PTHREAD_WORKQUEUES, 1, [Define if pthread work queues are present])] -) -AC_CHECK_FUNCS([pthread_workqueue_setdispatch_np]) +AC_CHECK_FUNCS([mach_port_construct]) # # Find functions and declarations we care about. # AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC], [], [], [[#include ]]) -AC_CHECK_DECLS([NOTE_NONE, NOTE_REAP, NOTE_SIGNAL], [], [], +AC_CHECK_DECLS([NOTE_NONE, NOTE_REAP, NOTE_REVOKE, NOTE_SIGNAL, NOTE_LOWAT], [], [], [[#include ]]) AC_CHECK_DECLS([FD_COPY], [], [], [[#include ]]) AC_CHECK_DECLS([SIGEMT], [], [], [[#include ]]) AC_CHECK_DECLS([VQ_UPDATE, VQ_VERYLOWDISK, VQ_QUOTA], [], [], [[#include ]]) AC_CHECK_DECLS([program_invocation_short_name], [], [], [[#include ]]) -AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time malloc_create_zone sysconf getprogname]) +AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time malloc_create_zone sysconf]) AC_CHECK_DECLS([POSIX_SPAWN_START_SUSPENDED], [have_posix_spawn_start_suspended=true], [have_posix_spawn_start_suspended=false], @@ -273,15 +372,17 @@ AS_IF([test "x$dispatch_cv_cc_omit_leaf_fp" != "xno"], [ ]) AC_SUBST([OMIT_LEAF_FP_FLAGS]) -AC_CACHE_CHECK([for darwin linker], [dispatch_cv_ld_darwin], [ - saveLDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS -dynamiclib -compatibility_version 1.2.3 -current_version 4.5.6 -dead_strip" - AC_LINK_IFELSE([AC_LANG_PROGRAM([ - extern int foo; int foo;], [foo = 0;])], - [dispatch_cv_ld_darwin="yes"], [dispatch_cv_ld_darwin="no"]) - LDFLAGS="$saveLDFLAGS" +AS_IF([test "x$have_mach" = "xtrue"], [ + AC_CACHE_CHECK([for darwin linker], [dispatch_cv_ld_darwin], [ + saveLDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -dynamiclib -compatibility_version 1.2.3 -current_version 4.5.6 -dead_strip" + AC_LINK_IFELSE([AC_LANG_PROGRAM([ + extern int foo; int foo;], [foo = 0;])], + [dispatch_cv_ld_darwin="yes"], [dispatch_cv_ld_darwin="no"]) + LDFLAGS="$saveLDFLAGS" + ]) ]) -AM_CONDITIONAL(HAVE_DARWIN_LD, [test "x$dispatch_cv_ld_darwin" != "xno"]) +AM_CONDITIONAL(HAVE_DARWIN_LD, [test "x$dispatch_cv_ld_darwin" == "xyes"]) # # Temporary: some versions of clang do not mark __builtin_trap() as @@ -295,5 +396,11 @@ AC_COMPILE_IFELSE( # # Generate Makefiles. # -AC_CONFIG_FILES([Makefile dispatch/Makefile man/Makefile os/Makefile private/Makefile src/Makefile]) +AC_CONFIG_FILES([Makefile dispatch/Makefile man/Makefile os/Makefile private/Makefile src/Makefile tests/Makefile]) + +# +# Generate testsuite links +# +AC_CONFIG_LINKS([tests/dispatch:$top_srcdir/private tests/leaks-wrapper:tests/leaks-wrapper.sh]) + AC_OUTPUT diff --git a/dispatch/Makefile.am b/dispatch/Makefile.am index 6dc850b..53ea598 100644 --- a/dispatch/Makefile.am +++ b/dispatch/Makefile.am @@ -2,10 +2,15 @@ # # +if HAVE_SWIFT +dispatchdir=${prefix}/lib/swift/dispatch +else dispatchdir=$(includedir)/dispatch +endif dispatch_HEADERS= \ base.h \ + block.h \ data.h \ dispatch.h \ group.h \ @@ -18,3 +23,6 @@ dispatch_HEADERS= \ source.h \ time.h +if HAVE_SWIFT +dispatch_HEADERS+=module.map +endif diff --git a/dispatch/base.h b/dispatch/base.h index 01d5ec5..8adfb0b 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -25,6 +25,22 @@ #error "Please #include instead of this file directly." #endif +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif +#ifndef __has_include +#define __has_include(x) 0 +#endif +#ifndef __has_feature +#define __has_feature(x) 0 +#endif +#ifndef __has_attribute +#define __has_attribute(x) 0 +#endif +#ifndef __has_extension +#define __has_extension(x) 0 +#endif + #if __GNUC__ #define DISPATCH_NORETURN __attribute__((__noreturn__)) #define DISPATCH_NOTHROW __attribute__((__nothrow__)) @@ -48,6 +64,7 @@ #define DISPATCH_MALLOC __attribute__((__malloc__)) #define DISPATCH_ALWAYS_INLINE __attribute__((__always_inline__)) #define DISPATCH_UNAVAILABLE __attribute__((__unavailable__)) +#define DISPATCH_UNAVAILABLE_MSG(msg) __attribute__((__unavailable__(msg))) #else /*! @parseOnly */ #define DISPATCH_NORETURN @@ -83,6 +100,24 @@ #define DISPATCH_ALWAYS_INLINE /*! @parseOnly */ #define DISPATCH_UNAVAILABLE +/*! @parseOnly */ +#define DISPATCH_UNAVAILABLE_MSG(msg) +#endif + +#ifdef __linux__ +#define DISPATCH_LINUX_UNAVAILABLE() \ + DISPATCH_UNAVAILABLE_MSG( \ + "This interface is unavailable on linux systems") +#else +#define DISPATCH_LINUX_UNAVAILABLE() +#endif + +#ifndef DISPATCH_ALIAS_V2 +#if TARGET_OS_MAC +#define DISPATCH_ALIAS_V2(sym) __asm__("_" #sym "$V2") +#else +#define DISPATCH_ALIAS_V2(sym) +#endif #endif #if TARGET_OS_WIN32 && defined(__DISPATCH_BUILDING_DISPATCH__) && \ @@ -108,23 +143,58 @@ #if __GNUC__ #define DISPATCH_EXPECT(x, v) __builtin_expect((x), (v)) +#define dispatch_compiler_barrier() __asm__ __volatile__("" ::: "memory") #else #define DISPATCH_EXPECT(x, v) (x) +#define dispatch_compiler_barrier() do { } while (0) +#endif + +#if __has_attribute(not_tail_called) +#define DISPATCH_NOT_TAIL_CALLED __attribute__((__not_tail_called__)) +#else +#define DISPATCH_NOT_TAIL_CALLED +#endif + +#if __has_builtin(__builtin_assume) +#define DISPATCH_COMPILER_CAN_ASSUME(expr) __builtin_assume(expr) +#else +#define DISPATCH_COMPILER_CAN_ASSUME(expr) ((void)(expr)) +#endif + +#if __has_attribute(noescape) +#define DISPATCH_NOESCAPE __attribute__((__noescape__)) +#else +#define DISPATCH_NOESCAPE +#endif + +#if __has_feature(assume_nonnull) +#define DISPATCH_ASSUME_NONNULL_BEGIN _Pragma("clang assume_nonnull begin") +#define DISPATCH_ASSUME_NONNULL_END _Pragma("clang assume_nonnull end") +#else +#define DISPATCH_ASSUME_NONNULL_BEGIN +#define DISPATCH_ASSUME_NONNULL_END +#endif + +#if !__has_feature(nullability) +#ifndef _Nullable +#define _Nullable +#endif +#ifndef _Nonnull +#define _Nonnull +#endif +#ifndef _Null_unspecified +#define _Null_unspecified +#endif #endif #ifndef DISPATCH_RETURNS_RETAINED_BLOCK -#if defined(__has_attribute) #if __has_attribute(ns_returns_retained) #define DISPATCH_RETURNS_RETAINED_BLOCK __attribute__((__ns_returns_retained__)) #else #define DISPATCH_RETURNS_RETAINED_BLOCK #endif -#else -#define DISPATCH_RETURNS_RETAINED_BLOCK -#endif #endif -#if defined(__has_feature) && defined(__has_extension) #if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums) #define DISPATCH_ENUM(name, type, ...) \ typedef enum : type { __VA_ARGS__ } name##_t @@ -132,17 +202,47 @@ #define DISPATCH_ENUM(name, type, ...) \ enum { __VA_ARGS__ }; typedef type name##_t #endif + #if __has_feature(enumerator_attributes) #define DISPATCH_ENUM_AVAILABLE_STARTING __OSX_AVAILABLE_STARTING +#define DISPATCH_ENUM_AVAILABLE(os, version) __##os##_AVAILABLE(version) #else #define DISPATCH_ENUM_AVAILABLE_STARTING(...) +#define DISPATCH_ENUM_AVAILABLE(...) #endif + +#if defined(SWIFT_SDK_OVERLAY_DISPATCH_EPOCH) && \ + SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#define DISPATCH_SWIFT3_OVERLAY 1 #else -#define DISPATCH_ENUM(name, type, ...) \ - enum { __VA_ARGS__ }; typedef type name##_t -#define DISPATCH_ENUM_AVAILABLE_STARTING(...) +#define DISPATCH_SWIFT3_OVERLAY 0 +#endif // SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 + +#if __has_feature(attribute_availability_swift) +#define DISPATCH_SWIFT_UNAVAILABLE(_msg) \ + __attribute__((__availability__(swift, unavailable, message=_msg))) +#else +#define DISPATCH_SWIFT_UNAVAILABLE(_msg) +#endif + +#if DISPATCH_SWIFT3_OVERLAY +#define DISPATCH_SWIFT3_UNAVAILABLE(_msg) DISPATCH_SWIFT_UNAVAILABLE(_msg) +#else +#define DISPATCH_SWIFT3_UNAVAILABLE(_msg) +#endif + +#if __has_attribute(swift_private) +#define DISPATCH_REFINED_FOR_SWIFT __attribute__((__swift_private__)) +#else +#define DISPATCH_REFINED_FOR_SWIFT +#endif + +#if __has_attribute(swift_name) +#define DISPATCH_SWIFT_NAME(_name) __attribute__((__swift_name__(#_name))) +#else +#define DISPATCH_SWIFT_NAME(_name) #endif -typedef void (*dispatch_function_t)(void *); +typedef void (*dispatch_function_t)(void *_Nullable); #endif diff --git a/dispatch/block.h b/dispatch/block.h index e82f665..cd56b23 100644 --- a/dispatch/block.h +++ b/dispatch/block.h @@ -32,6 +32,8 @@ * @group Dispatch block objects */ +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -270,7 +272,8 @@ dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW void -dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block); +dispatch_block_perform(dispatch_block_flags_t flags, + DISPATCH_NOESCAPE dispatch_block_t block); /*! * @function dispatch_block_wait @@ -288,7 +291,7 @@ dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block); * dispatch block object may either be waited on once and executed once, * or it may be executed any number of times. The behavior of any other * combination is undefined. Submission to a dispatch queue counts as an - * execution, even if cancelation (dispatch_block_cancel) means the block's + * execution, even if cancellation (dispatch_block_cancel) means the block's * code never runs. * * The result of calling this function from multiple threads simultaneously @@ -417,6 +420,8 @@ dispatch_block_testcancel(dispatch_block_t block); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif // __BLOCKS__ #endif // __DISPATCH_BLOCK__ diff --git a/dispatch/data.h b/dispatch/data.h index d656584..7ceee06 100644 --- a/dispatch/data.h +++ b/dispatch/data.h @@ -26,6 +26,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @header @@ -39,7 +41,7 @@ __BEGIN_DECLS * @typedef dispatch_data_t * A dispatch object representing memory regions. */ -DISPATCH_DECL(dispatch_data); +DISPATCH_DATA_DECL(dispatch_data); /*! * @var dispatch_data_empty @@ -120,8 +122,8 @@ DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_create(const void *buffer, size_t size, - dispatch_queue_t queue, - dispatch_block_t destructor); + dispatch_queue_t _Nullable queue, + dispatch_block_t _Nullable destructor); #endif /* __BLOCKS__ */ /*! @@ -161,8 +163,8 @@ DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_create_map(dispatch_data_t data, - const void **buffer_ptr, - size_t *size_ptr); + const void *_Nullable *_Nullable buffer_ptr, + size_t *_Nullable size_ptr); /*! * @function dispatch_data_create_concat @@ -275,4 +277,6 @@ dispatch_data_copy_region(dispatch_data_t data, __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif /* __DISPATCH_DATA__ */ diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index bb32bdf..a26b951 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -24,8 +24,22 @@ #ifdef __APPLE__ #include #include -#endif +#else +#define __OSX_AVAILABLE_STARTING(x, y) +#define __OSX_AVAILABLE_BUT_DEPRECATED(...) +#define __OSX_AVAILABLE_BUT_DEPRECATED_MSG(...) +#define __OSX_AVAILABLE(...) +#define __IOS_AVAILABLE(...) +#define __TVOS_AVAILABLE(...) +#define __WATCHOS_AVAILABLE(...) +#define __OSX_DEPRECATED(...) +#define __IOS_DEPRECATED(...) +#define __TVOS_DEPRECATED(...) +#define __WATCHOS_DEPRECATED(...) +#endif // __APPLE__ + #include +#include #include #include #include @@ -33,11 +47,13 @@ #include #include -#ifndef __OSX_AVAILABLE_STARTING -#define __OSX_AVAILABLE_STARTING(x, y) +#if defined(__linux__) && defined(__has_feature) +#if __has_feature(modules) +#include // for off_t (to match Glibc.modulemap) +#endif #endif -#define DISPATCH_API_VERSION 20141121 +#define DISPATCH_API_VERSION 20160712 #ifndef __DISPATCH_BUILDING_DISPATCH__ diff --git a/dispatch/group.h b/dispatch/group.h index 77420c1..c50ad89 100644 --- a/dispatch/group.h +++ b/dispatch/group.h @@ -26,6 +26,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + /*! * @typedef dispatch_group_t * @abstract @@ -119,7 +121,7 @@ DISPATCH_NOTHROW void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_function_t work); /*! @@ -132,8 +134,7 @@ dispatch_group_async_f(dispatch_group_t group, * @discussion * This function waits for the completion of the blocks associated with the * given dispatch group, and returns after all blocks have completed or when - * the specified timeout has elapsed. When a timeout occurs, the group is - * restored to its original state. + * the specified timeout has elapsed. * * This function will return immediately if there are no blocks associated * with the dispatch group (i.e. the group is empty). @@ -229,7 +230,7 @@ DISPATCH_NOTHROW void dispatch_group_notify_f(dispatch_group_t group, dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_function_t work); /*! @@ -260,7 +261,7 @@ dispatch_group_enter(dispatch_group_t group); * * @discussion * Calling this function indicates block has completed and left the dispatch - * groupJ by a means other than dispatch_group_async(). + * group by a means other than dispatch_group_async(). * * @param group * The dispatch group to update. @@ -273,4 +274,6 @@ dispatch_group_leave(dispatch_group_t group); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/dispatch/introspection.h b/dispatch/introspection.h index d20d90a..9cfb4d1 100644 --- a/dispatch/introspection.h +++ b/dispatch/introspection.h @@ -23,6 +23,8 @@ #include +DISPATCH_ASSUME_NONNULL_BEGIN + /*! * @header * @@ -152,7 +154,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT void dispatch_introspection_hook_queue_callout_begin(dispatch_queue_t queue, - void *context, dispatch_function_t function); + void *_Nullable context, dispatch_function_t function); /*! * @function dispatch_introspection_hook_queue_callout_end @@ -177,8 +179,10 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT void dispatch_introspection_hook_queue_callout_end(dispatch_queue_t queue, - void *context, dispatch_function_t function); + void *_Nullable context, dispatch_function_t function); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/dispatch/io.h b/dispatch/io.h index d53d488..5814bc0 100644 --- a/dispatch/io.h +++ b/dispatch/io.h @@ -26,6 +26,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @header @@ -38,7 +40,7 @@ __BEGIN_DECLS * The application may set policies on the dispatch I/O channel to indicate the * desired frequency of I/O handlers for long-running operations. * - * Dispatch I/O also provides a memory managment model for I/O buffers that + * Dispatch I/O also provides a memory management model for I/O buffers that * avoids unnecessary copying of data when pipelined between channels. Dispatch * I/O monitors the overall memory pressure and I/O access patterns for the * application to optimize resource utilization. @@ -145,7 +147,7 @@ void dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, - void (^handler)(dispatch_data_t data, int error)); + void (^handler)(dispatch_data_t _Nullable data, int error)); #endif /* __BLOCKS__ */ /*! @@ -168,7 +170,7 @@ DISPATCH_DECL(dispatch_io); * bytes. Read and write operations on a channel of this type are performed * serially (in order of creation) and read/write data at the file pointer * position that is current at the time the operation starts executing. - * Operations of different type (read vs. write) may be perfomed simultaneously. + * Operations of different type (read vs. write) may be performed simultaneously. * Offsets passed to operations on a channel of this type are ignored. * * @const DISPATCH_IO_RANDOM A dispatch I/O channel representing a random @@ -302,7 +304,7 @@ dispatch_io_create_with_io(dispatch_io_type_t type, * @param data The data object to be handled. * @param error An errno condition for the operation. */ -typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t data, +typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t _Nullable data, int error); /*! @@ -586,4 +588,6 @@ dispatch_io_set_interval(dispatch_io_t channel, __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif /* __DISPATCH_IO__ */ diff --git a/dispatch/module.map b/dispatch/module.map new file mode 100644 index 0000000..6f3c8aa --- /dev/null +++ b/dispatch/module.map @@ -0,0 +1,20 @@ +module Dispatch { + requires blocks + export * + link "dispatch" + link "BlocksRuntime" +} + +module DispatchIntrospection [system] [extern_c] { + header "introspection.h" + export * +} + +module CDispatch [system] [extern_c] { + umbrella header "dispatch.h" + module * { export * } + export * + requires blocks + link "dispatch" + link "BlocksRuntime" +} diff --git a/dispatch/module.modulemap b/dispatch/module.modulemap new file mode 100644 index 0000000..addaae4 --- /dev/null +++ b/dispatch/module.modulemap @@ -0,0 +1,10 @@ +module Dispatch [system] [extern_c] { + umbrella header "dispatch.h" + module * { export * } + export * +} + +module DispatchIntrospection [system] [extern_c] { + header "introspection.h" + export * +} diff --git a/dispatch/object.h b/dispatch/object.h index a9b805e..8b20301 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -26,6 +26,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + /*! * @typedef dispatch_object_t * @@ -46,16 +48,23 @@ * analyzer, and enables them to be added to Cocoa collections. * See for details. */ -OS_OBJECT_DECL(dispatch_object); +OS_OBJECT_DECL_CLASS(dispatch_object); + +#if OS_OBJECT_SWIFT3 +#define DISPATCH_DECL(name) OS_OBJECT_DECL_SUBCLASS_SWIFT(name, dispatch_object) +#else // OS_OBJECT_SWIFT3 #define DISPATCH_DECL(name) OS_OBJECT_DECL_SUBCLASS(name, dispatch_object) -#define DISPATCH_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object)) -#define DISPATCH_RETURNS_RETAINED OS_OBJECT_RETURNS_RETAINED + DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void _dispatch_object_validate(dispatch_object_t object) { void *isa = *(void* volatile*)(OS_OBJECT_BRIDGE void*)object; (void)isa; } +#endif // OS_OBJECT_SWIFT3 + +#define DISPATCH_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object)) +#define DISPATCH_RETURNS_RETAINED OS_OBJECT_RETURNS_RETAINED #elif defined(__cplusplus) && !defined(__DISPATCH_BUILDING_DISPATCH__) /* * Dispatch objects are NOT C++ objects. Nevertheless, we can at least keep C++ @@ -99,6 +108,38 @@ typedef union { #define DISPATCH_RETURNS_RETAINED #endif +#if OS_OBJECT_SWIFT3 && OS_OBJECT_USE_OBJC +#define DISPATCH_SOURCE_TYPE_DECL(name) \ + DISPATCH_EXPORT struct dispatch_source_type_s \ + _dispatch_source_type_##name; \ + OS_OBJECT_DECL_PROTOCOL(dispatch_source_##name, ); \ + OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL( \ + dispatch_source, dispatch_source_##name) +#define DISPATCH_SOURCE_DECL(name) \ + DISPATCH_DECL(name); \ + OS_OBJECT_DECL_PROTOCOL(name, ); \ + OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, name) +#ifndef DISPATCH_DATA_DECL +#define DISPATCH_DATA_DECL(name) OS_OBJECT_DECL_SWIFT(name) +#endif // DISPATCH_DATA_DECL +#elif !TARGET_OS_WIN32 +/*! @parseOnly */ +#define DISPATCH_SOURCE_DECL(name) \ + DISPATCH_DECL(name); +/*! @parseOnly */ +#define DISPATCH_DATA_DECL(name) DISPATCH_DECL(name) +/*! @parseOnly */ +#define DISPATCH_SOURCE_TYPE_DECL(name) \ + DISPATCH_EXPORT const struct dispatch_source_type_s \ + _dispatch_source_type_##name +#else +#define DISPATCH_SOURCE_DECL(name) \ + DISPATCH_DECL(name); +#define DISPATCH_SOURCE_TYPE_DECL(name) \ + DISPATCH_EXPORT struct dispatch_source_type_s _dispatch_source_type_##name +#define DISPATCH_DATA_DECL(name) DISPATCH_DECL(name) +#endif + #ifdef __BLOCKS__ /*! * @typedef dispatch_block_t @@ -162,11 +203,13 @@ __BEGIN_DECLS */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Can't be used with ARC") void dispatch_retain(dispatch_object_t object); #if OS_OBJECT_USE_OBJC_RETAIN_RELEASE #undef dispatch_retain -#define dispatch_retain(object) ({ dispatch_object_t _o = (object); \ +#define dispatch_retain(object) \ + __extension__({ dispatch_object_t _o = (object); \ _dispatch_object_validate(_o); (void)[_o retain]; }) #endif @@ -188,11 +231,13 @@ dispatch_retain(dispatch_object_t object); */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Can't be used with ARC") void dispatch_release(dispatch_object_t object); #if OS_OBJECT_USE_OBJC_RETAIN_RELEASE #undef dispatch_release -#define dispatch_release(object) ({ dispatch_object_t _o = (object); \ +#define dispatch_release(object) \ + __extension__({ dispatch_object_t _o = (object); \ _dispatch_object_validate(_o); [_o release]; }) #endif @@ -211,7 +256,7 @@ dispatch_release(dispatch_object_t object); __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW -void * +void *_Nullable dispatch_get_context(dispatch_object_t object); /*! @@ -228,9 +273,9 @@ dispatch_get_context(dispatch_object_t object); * */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NOTHROW //DISPATCH_NONNULL1 +DISPATCH_EXPORT DISPATCH_NOTHROW void -dispatch_set_context(dispatch_object_t object, void *context); +dispatch_set_context(dispatch_object_t object, void *_Nullable context); /*! * @function dispatch_set_finalizer_f @@ -254,10 +299,38 @@ dispatch_set_context(dispatch_object_t object, void *context); * context of the dispatch object at the time the finalizer call is made. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NOTHROW //DISPATCH_NONNULL1 +DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_set_finalizer_f(dispatch_object_t object, - dispatch_function_t finalizer); + dispatch_function_t _Nullable finalizer); + +/*! + * @function dispatch_activate + * + * @abstract + * Activates the specified dispatch object. + * + * @discussion + * Dispatch objects such as queues and sources may be created in an inactive + * state. Objects in this state have to be activated before any blocks + * associated with them will be invoked. + * + * The target queue of inactive objects can be changed using + * dispatch_set_target_queue(). Change of target queue is no longer permitted + * once an initially inactive object has been activated. + * + * Calling dispatch_activate() on an active object has no effect. + * Releasing the last reference count on an inactive object is undefined. + * + * @param object + * The object to be activated. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_activate(dispatch_object_t object); /*! * @function dispatch_suspend @@ -288,6 +361,20 @@ dispatch_suspend(dispatch_object_t object); * @abstract * Resumes the invocation of blocks on a dispatch object. * + * @discussion + * Dispatch objects can be suspended with dispatch_suspend(), which increments + * an internal suspension count. dispatch_resume() is the inverse operation, + * and consumes suspension counts. When the last suspension count is consumed, + * blocks associated with the object will be invoked again. + * + * For backward compatibility reasons, dispatch_resume() on an inactive and not + * otherwise suspended dispatch source object has the same effect as calling + * dispatch_activate(). For new code, using dispatch_activate() is preferred. + * + * If the specified object has zero suspension count and is not an inactive + * source, this function will result in an assertion and the process being + * terminated. + * * @param object * The object to be resumed. * The result of passing NULL in this parameter is undefined. @@ -468,4 +555,6 @@ dispatch_debugv(dispatch_object_t object, const char *message, va_list ap); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/dispatch/once.h b/dispatch/once.h index 32cf2e8..a8f5644 100644 --- a/dispatch/once.h +++ b/dispatch/once.h @@ -26,6 +26,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -35,6 +37,7 @@ __BEGIN_DECLS * A predicate for use with dispatch_once(). It must be initialized to zero. * Note: static and global variables default to zero. */ +DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") typedef long dispatch_once_t; /*! @@ -57,16 +60,23 @@ typedef long dispatch_once_t; #ifdef __BLOCKS__ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") void -dispatch_once(dispatch_once_t *predicate, dispatch_block_t block); +dispatch_once(dispatch_once_t *predicate, + DISPATCH_NOESCAPE dispatch_block_t block); DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") void -_dispatch_once(dispatch_once_t *predicate, dispatch_block_t block) +_dispatch_once(dispatch_once_t *predicate, + DISPATCH_NOESCAPE dispatch_block_t block) { if (DISPATCH_EXPECT(*predicate, ~0l) != ~0l) { dispatch_once(predicate, block); + } else { + dispatch_compiler_barrier(); } + DISPATCH_COMPILER_CAN_ASSUME(*predicate == ~0l); } #undef dispatch_once #define dispatch_once _dispatch_once @@ -74,23 +84,30 @@ _dispatch_once(dispatch_once_t *predicate, dispatch_block_t block) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") void -dispatch_once_f(dispatch_once_t *predicate, void *context, +dispatch_once_f(dispatch_once_t *predicate, void *_Nullable context, dispatch_function_t function); DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") void -_dispatch_once_f(dispatch_once_t *predicate, void *context, +_dispatch_once_f(dispatch_once_t *predicate, void *_Nullable context, dispatch_function_t function) { if (DISPATCH_EXPECT(*predicate, ~0l) != ~0l) { dispatch_once_f(predicate, context, function); + } else { + dispatch_compiler_barrier(); } + DISPATCH_COMPILER_CAN_ASSUME(*predicate == ~0l); } #undef dispatch_once_f #define dispatch_once_f _dispatch_once_f __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/dispatch/queue.h b/dispatch/queue.h index b3cb54f..264c344 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -26,6 +26,12 @@ #include // for HeaderDoc #endif +#if __has_include() +#include +#endif + +DISPATCH_ASSUME_NONNULL_BEGIN + /*! * @header * @@ -131,7 +137,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_async_f(dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_function_t work); /*! @@ -168,7 +174,7 @@ dispatch_async_f(dispatch_queue_t queue, __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void -dispatch_sync(dispatch_queue_t queue, dispatch_block_t block); +dispatch_sync(dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block); #endif /*! @@ -197,7 +203,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_sync_f(dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_function_t work); /*! @@ -230,7 +236,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_apply(size_t iterations, dispatch_queue_t queue, - void (^block)(size_t)); + DISPATCH_NOESCAPE void (^block)(size_t)); #endif /*! @@ -263,8 +269,8 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_apply_f(size_t iterations, dispatch_queue_t queue, - void *context, - void (*work)(void *, size_t)); + void *_Nullable context, + void (*work)(void *_Nullable, size_t)); /*! * @function dispatch_get_current_queue @@ -288,7 +294,7 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t queue, * When dispatch_get_current_queue() is called on the main thread, it may * or may not return the same value as dispatch_get_main_queue(). Comparing * the two is not a valid way to test whether code is executing on the - * main thread. + * main thread (see dispatch_assert_queue() and dispatch_assert_queue_not()). * * This function is deprecated and will be removed in a future release. * @@ -365,7 +371,6 @@ typedef long dispatch_queue_priority_t; * Alias for qos_class_t type. */ #if __has_include() -#include typedef qos_class_t dispatch_qos_class_t; #else typedef unsigned int dispatch_qos_class_t; @@ -425,12 +430,24 @@ DISPATCH_DECL(dispatch_queue_attr); /*! * @const DISPATCH_QUEUE_SERIAL + * * @discussion A dispatch queue that invokes blocks serially in FIFO order. */ #define DISPATCH_QUEUE_SERIAL NULL +/*! + * @const DISPATCH_QUEUE_SERIAL_INACTIVE + * + * @discussion + * A dispatch queue that invokes blocks serially in FIFO order, and that is + * created initially inactive. See dispatch_queue_attr_make_initially_inactive(). + */ +#define DISPATCH_QUEUE_SERIAL_INACTIVE \ + dispatch_queue_attr_make_initially_inactive(DISPATCH_QUEUE_SERIAL) + /*! * @const DISPATCH_QUEUE_CONCURRENT + * * @discussion A dispatch queue that may invoke blocks concurrently and supports * barrier blocks submitted with the dispatch barrier API. */ @@ -441,12 +458,173 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; +/*! + * @const DISPATCH_QUEUE_CONCURRENT_INACTIVE + * + * @discussion + * A dispatch queue that may invoke blocks concurrently and supports barrier + * blocks submitted with the dispatch barrier API, and that is created initially + * inactive. See dispatch_queue_attr_make_initially_inactive(). + */ +#define DISPATCH_QUEUE_CONCURRENT_INACTIVE \ + dispatch_queue_attr_make_initially_inactive(DISPATCH_QUEUE_CONCURRENT) + +/*! + * @function dispatch_queue_attr_make_initially_inactive + * + * @abstract + * Returns an attribute value which may be provided to dispatch_queue_create() + * or dispatch_queue_create_with_target(), in order to make the created queue + * initially inactive. + * + * @discussion + * Dispatch queues may be created in an inactive state. Queues in this state + * have to be activated before any blocks associated with them will be invoked. + * + * A queue in inactive state cannot be deallocated, dispatch_activate() must be + * called before the last reference to a queue created with this attribute is + * released. + * + * The target queue of a queue in inactive state can be changed using + * dispatch_set_target_queue(). Change of target queue is no longer permitted + * once an initially inactive queue has been activated. + * + * @param attr + * A queue attribute value to be combined with the initially inactive attribute. + * + * @return + * Returns an attribute value which may be provided to dispatch_queue_create() + * and dispatch_queue_create_with_target(). + * The new value combines the attributes specified by the 'attr' parameter with + * the initially inactive attribute. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +dispatch_queue_attr_t +dispatch_queue_attr_make_initially_inactive( + dispatch_queue_attr_t _Nullable attr); + +/*! + * @const DISPATCH_QUEUE_SERIAL_WITH_AUTORELEASE_POOL + * + * @discussion + * A dispatch queue created with this attribute invokes blocks serially in FIFO + * order, and surrounds execution of any block submitted asynchronously to it + * with the equivalent of a individual Objective-C @autoreleasepool + * scope. + * + * See dispatch_queue_attr_make_with_autorelease_frequency(). + */ +#define DISPATCH_QUEUE_SERIAL_WITH_AUTORELEASE_POOL \ + dispatch_queue_attr_make_with_autorelease_frequency(\ + DISPATCH_QUEUE_SERIAL, DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM) + +/*! + * @const DISPATCH_QUEUE_CONCURRENT_WITH_AUTORELEASE_POOL + * + * @discussion + * A dispatch queue created with this attribute may invokes blocks concurrently + * and supports barrier blocks submitted with the dispatch barrier API. It also + * surrounds execution of any block submitted asynchronously to it with the + * equivalent of a individual Objective-C @autoreleasepool + * + * See dispatch_queue_attr_make_with_autorelease_frequency(). + */ +#define DISPATCH_QUEUE_CONCURRENT_WITH_AUTORELEASE_POOL \ + dispatch_queue_attr_make_with_autorelease_frequency(\ + DISPATCH_QUEUE_CONCURRENT, DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM) + +/*! + * @typedef dispatch_autorelease_frequency_t + * Values to pass to the dispatch_queue_attr_make_with_autorelease_frequency() + * function. + * + * @const DISPATCH_AUTORELEASE_FREQUENCY_INHERIT + * Dispatch queues with this autorelease frequency inherit the behavior from + * their target queue. This is the default behavior for manually created queues. + * + * @const DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM + * Dispatch queues with this autorelease frequency push and pop an autorelease + * pool around the execution of every block that was submitted to it + * asynchronously. + * @see dispatch_queue_attr_make_with_autorelease_frequency(). + * + * @const DISPATCH_AUTORELEASE_FREQUENCY_NEVER + * Dispatch queues with this autorelease frequency never set up an individual + * autorelease pool around the execution of a block that is submitted to it + * asynchronously. This is the behavior of the global concurrent queues. + */ +DISPATCH_ENUM(dispatch_autorelease_frequency, unsigned long, + DISPATCH_AUTORELEASE_FREQUENCY_INHERIT + DISPATCH_ENUM_AVAILABLE(OSX, 10.12) + DISPATCH_ENUM_AVAILABLE(IOS, 10.0) + DISPATCH_ENUM_AVAILABLE(TVOS, 10.0) + DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 0, + DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM + DISPATCH_ENUM_AVAILABLE(OSX, 10.12) + DISPATCH_ENUM_AVAILABLE(IOS, 10.0) + DISPATCH_ENUM_AVAILABLE(TVOS, 10.0) + DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 1, + DISPATCH_AUTORELEASE_FREQUENCY_NEVER + DISPATCH_ENUM_AVAILABLE(OSX, 10.12) + DISPATCH_ENUM_AVAILABLE(IOS, 10.0) + DISPATCH_ENUM_AVAILABLE(TVOS, 10.0) + DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 2, +); + +/*! + * @function dispatch_queue_attr_make_with_autorelease_frequency + * + * @abstract + * Returns a dispatch queue attribute value with the autorelease frequency + * set to the specified value. + * + * @discussion + * When a queue uses the per-workitem autorelease frequency (either directly + * or inherithed from its target queue), any block submitted asynchronously to + * this queue (via dispatch_async(), dispatch_barrier_async(), + * dispatch_group_notify(), etc...) is executed as if surrounded by a individual + * Objective-C @autoreleasepool scope. + * + * Autorelease frequency has no effect on blocks that are submitted + * synchronously to a queue (via dispatch_sync(), dispatch_barrier_sync()). + * + * The global concurrent queues have the DISPATCH_AUTORELEASE_FREQUENCY_NEVER + * behavior. Manually created dispatch queues use + * DISPATCH_AUTORELEASE_FREQUENCY_INHERIT by default. + * + * Queues created with this attribute cannot change target queues after having + * been activated. See dispatch_set_target_queue() and dispatch_activate(). + * + * @param attr + * A queue attribute value to be combined with the specified autorelease + * frequency or NULL. + * + * @param frequency + * The requested autorelease frequency. + * + * @return + * Returns an attribute value which may be provided to dispatch_queue_create() + * or NULL if an invalid autorelease frequency was requested. + * This new value combines the attributes specified by the 'attr' parameter and + * the chosen autorelease frequency. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +dispatch_queue_attr_t +dispatch_queue_attr_make_with_autorelease_frequency( + dispatch_queue_attr_t _Nullable attr, + dispatch_autorelease_frequency_t frequency); + /*! * @function dispatch_queue_attr_make_with_qos_class * * @abstract * Returns an attribute value which may be provided to dispatch_queue_create() - * in order to assign a QOS class and relative priority to the queue. + * or dispatch_queue_create_with_target(), in order to assign a QOS class and + * relative priority to the queue. * * @discussion * When specified in this manner, the QOS class and relative priority take @@ -487,17 +665,86 @@ struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; * results in NULL being returned. * * @return - * Returns an attribute value which may be provided to dispatch_queue_create(), - * or NULL if an invalid QOS class was requested. + * Returns an attribute value which may be provided to dispatch_queue_create() + * and dispatch_queue_create_with_target(), or NULL if an invalid QOS class was + * requested. * The new value combines the attributes specified by the 'attr' parameter and * the new QOS class and relative priority. */ __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW dispatch_queue_attr_t -dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t attr, +dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t _Nullable attr, dispatch_qos_class_t qos_class, int relative_priority); +/*! + * @const DISPATCH_TARGET_QUEUE_DEFAULT + * @discussion Constant to pass to the dispatch_queue_create_with_target(), + * dispatch_set_target_queue() and dispatch_source_create() functions to + * indicate that the default target queue for the object type in question + * should be used. + */ +#define DISPATCH_TARGET_QUEUE_DEFAULT NULL + +/*! + * @function dispatch_queue_create_with_target + * + * @abstract + * Creates a new dispatch queue with a specified target queue. + * + * @discussion + * Dispatch queues created with the DISPATCH_QUEUE_SERIAL or a NULL attribute + * invoke blocks serially in FIFO order. + * + * Dispatch queues created with the DISPATCH_QUEUE_CONCURRENT attribute may + * invoke blocks concurrently (similarly to the global concurrent queues, but + * potentially with more overhead), and support barrier blocks submitted with + * the dispatch barrier API, which e.g. enables the implementation of efficient + * reader-writer schemes. + * + * When a dispatch queue is no longer needed, it should be released with + * dispatch_release(). Note that any pending blocks submitted to a queue will + * hold a reference to that queue. Therefore a queue will not be deallocated + * until all pending blocks have finished. + * + * When using a dispatch queue attribute @a attr specifying a QoS class (derived + * from the result of dispatch_queue_attr_make_with_qos_class()), passing the + * result of dispatch_get_global_queue() in @a target will ignore the QoS class + * of that global queue and will use the global queue with the QoS class + * specified by attr instead. + * + * Queues created with dispatch_queue_create_with_target() cannot have their + * target queue changed, unless created inactive (See + * dispatch_queue_attr_make_initially_inactive()), in which case the target + * queue can be changed until the newly created queue is activated with + * dispatch_activate(). + * + * @param label + * A string label to attach to the queue. + * This parameter is optional and may be NULL. + * + * @param attr + * A predefined attribute such as DISPATCH_QUEUE_SERIAL, + * DISPATCH_QUEUE_CONCURRENT, or the result of a call to + * a dispatch_queue_attr_make_with_* function. + * + * @param target + * The target queue for the newly created queue. The target queue is retained. + * If this parameter is DISPATCH_TARGET_QUEUE_DEFAULT, sets the queue's target + * queue to the default target queue for the given queue type. + * + * @result + * The newly created dispatch queue. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_queue_t +dispatch_queue_create_with_target(const char *_Nullable label, + dispatch_queue_attr_t _Nullable attr, dispatch_queue_t _Nullable target) + DISPATCH_ALIAS_V2(dispatch_queue_create_with_target); + /*! * @function dispatch_queue_create * @@ -534,8 +781,9 @@ dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t attr, * This parameter is optional and may be NULL. * * @param attr - * DISPATCH_QUEUE_SERIAL, DISPATCH_QUEUE_CONCURRENT, or the result of a call to - * the function dispatch_queue_attr_make_with_qos_class(). + * A predefined attribute such as DISPATCH_QUEUE_SERIAL, + * DISPATCH_QUEUE_CONCURRENT, or the result of a call to + * a dispatch_queue_attr_make_with_* function. * * @result * The newly created dispatch queue. @@ -544,7 +792,8 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t -dispatch_queue_create(const char *label, dispatch_queue_attr_t attr); +dispatch_queue_create(const char *_Nullable label, + dispatch_queue_attr_t _Nullable attr); /*! * @const DISPATCH_CURRENT_QUEUE_LABEL @@ -572,7 +821,7 @@ dispatch_queue_create(const char *label, dispatch_queue_attr_t attr); __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW const char * -dispatch_queue_get_label(dispatch_queue_t queue); +dispatch_queue_get_label(dispatch_queue_t _Nullable queue); /*! * @function dispatch_queue_get_qos_class @@ -612,15 +861,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NONNULL1 DISPATCH_NOTHROW dispatch_qos_class_t dispatch_queue_get_qos_class(dispatch_queue_t queue, - int *relative_priority_ptr); - -/*! - * @const DISPATCH_TARGET_QUEUE_DEFAULT - * @discussion Constant to pass to the dispatch_set_target_queue() and - * dispatch_source_create() functions to indicate that the default target queue - * for the given object type should be used. - */ -#define DISPATCH_TARGET_QUEUE_DEFAULT NULL + int *_Nullable relative_priority_ptr); /*! * @function dispatch_set_target_queue @@ -657,6 +898,20 @@ dispatch_queue_get_qos_class(dispatch_queue_t queue, * For all other dispatch object types, the only function of the target queue * is to determine where an object's finalizer function is invoked. * + * In general, changing the target queue of an object is an asynchronous + * operation that doesn't take effect immediately, and doesn't affect blocks + * already associated with the specified object. + * + * However, if an object is inactive at the time dispatch_set_target_queue() is + * called, then the target queue change takes effect immediately, and will + * affect blocks already associated with the specified object. After an + * initially inactive object has been activated, calling + * dispatch_set_target_queue() results in an assertion and the process being + * terminated. + * + * If a dispatch queue is active and targeted by other dispatch objects, + * changing its target queue results in undefined behavior. + * * @param object * The object to modify. * The result of passing NULL in this parameter is undefined. @@ -668,9 +923,10 @@ dispatch_queue_get_qos_class(dispatch_queue_t queue, * to the default target queue for the given object type. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NOTHROW // DISPATCH_NONNULL1 +DISPATCH_EXPORT DISPATCH_NOTHROW void -dispatch_set_target_queue(dispatch_object_t object, dispatch_queue_t queue); +dispatch_set_target_queue(dispatch_object_t object, + dispatch_queue_t _Nullable queue); /*! * @function dispatch_main @@ -751,7 +1007,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_function_t work); /*! @@ -831,7 +1087,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_barrier_async_f(dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_function_t work); /*! @@ -858,7 +1114,8 @@ dispatch_barrier_async_f(dispatch_queue_t queue, __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void -dispatch_barrier_sync(dispatch_queue_t queue, dispatch_block_t block); +dispatch_barrier_sync(dispatch_queue_t queue, + DISPATCH_NOESCAPE dispatch_block_t block); #endif /*! @@ -890,7 +1147,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_barrier_sync_f(dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_function_t work); /*! @@ -920,7 +1177,7 @@ dispatch_barrier_sync_f(dispatch_queue_t queue, * The key to set the context for, typically a pointer to a static variable * specific to the subsystem. Keys are only compared as pointers and never * dereferenced. Passing a string constant directly is not recommended. - * The NULL key is reserved and attemps to set a context for it are ignored. + * The NULL key is reserved and attempts to set a context for it are ignored. * * @param context * The new subsystem-specific context for the object. This may be NULL. @@ -933,7 +1190,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_queue_set_specific(dispatch_queue_t queue, const void *key, - void *context, dispatch_function_t destructor); + void *_Nullable context, dispatch_function_t _Nullable destructor); /*! * @function dispatch_queue_get_specific @@ -961,7 +1218,7 @@ dispatch_queue_set_specific(dispatch_queue_t queue, const void *key, __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW -void * +void *_Nullable dispatch_queue_get_specific(dispatch_queue_t queue, const void *key); /*! @@ -987,9 +1244,128 @@ dispatch_queue_get_specific(dispatch_queue_t queue, const void *key); */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW -void * +void *_Nullable dispatch_get_specific(const void *key); +/*! + * @functiongroup Dispatch assertion API + * + * This API asserts at runtime that code is executing in (or out of) the context + * of a given queue. It can be used to check that a block accessing a resource + * does so from the proper queue protecting the resource. It also can be used + * to verify that a block that could cause a deadlock if run on a given queue + * never executes on that queue. + */ + +/*! + * @function dispatch_assert_queue + * + * @abstract + * Verifies that the current block is executing on a given dispatch queue. + * + * @discussion + * Some code expects to be run on a specific dispatch queue. This function + * verifies that that expectation is true. + * + * If the currently executing block was submitted to the specified queue or to + * any queue targeting it (see dispatch_set_target_queue()), this function + * returns. + * + * If the currently executing block was submitted with a synchronous API + * (dispatch_sync(), dispatch_barrier_sync(), ...), the context of the + * submitting block is also evaluated (recursively). + * If a synchronously submitting block is found that was itself submitted to + * the specified queue or to any queue targeting it, this function returns. + * + * Otherwise this function asserts: it logs an explanation to the system log and + * terminates the application. + * + * Passing the result of dispatch_get_main_queue() to this function verifies + * that the current block was submitted to the main queue, or to a queue + * targeting it, or is running on the main thread (in any context). + * + * When dispatch_assert_queue() is called outside of the context of a + * submitted block (for example from the context of a thread created manually + * with pthread_create()) then this function will also assert and terminate + * the application. + * + * The variant dispatch_assert_queue_debug() is compiled out when the + * preprocessor macro NDEBUG is defined. (See also assert(3)). + * + * @param queue + * The dispatch queue that the current block is expected to run on. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_NONNULL1 +void +dispatch_assert_queue(dispatch_queue_t queue) + DISPATCH_ALIAS_V2(dispatch_assert_queue); + +/*! + * @function dispatch_assert_queue_barrier + * + * @abstract + * Verifies that the current block is executing on a given dispatch queue, + * and that the block acts as a barrier on that queue. + * + * @discussion + * This behaves exactly like dispatch_assert_queue(), with the additional check + * that the current block acts as a barrier on the specified queue, which is + * always true if the specified queue is serial (see DISPATCH_BLOCK_BARRIER or + * dispatch_barrier_async() for details). + * + * The variant dispatch_assert_queue_barrier_debug() is compiled out when the + * preprocessor macro NDEBUG is defined. (See also assert()). + * + * @param queue + * The dispatch queue that the current block is expected to run as a barrier on. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_NONNULL1 +void +dispatch_assert_queue_barrier(dispatch_queue_t queue); + +/*! + * @function dispatch_assert_queue_not + * + * @abstract + * Verifies that the current block is not executing on a given dispatch queue. + * + * @discussion + * This function is the equivalent of dispatch_queue_assert() with the test for + * equality inverted. That means that it will terminate the application when + * dispatch_queue_assert() would return, and vice-versa. See discussion there. + * + * The variant dispatch_assert_queue_not_debug() is compiled out when the + * preprocessor macro NDEBUG is defined. (See also assert(3)). + * + * @param queue + * The dispatch queue that the current block is expected not to run on. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_NONNULL1 +void +dispatch_assert_queue_not(dispatch_queue_t queue) + DISPATCH_ALIAS_V2(dispatch_assert_queue_not); + +#ifdef NDEBUG +#define dispatch_assert_queue_debug(q) ((void)(0 && (q))) +#define dispatch_assert_queue_barrier_debug(q) ((void)(0 && (q))) +#define dispatch_assert_queue_not_debug(q) ((void)(0 && (q))) +#else +#define dispatch_assert_queue_debug(q) dispatch_assert_queue(q) +#define dispatch_assert_queue_barrier_debug(q) dispatch_assert_queue_barrier(q) +#define dispatch_assert_queue_not_debug(q) dispatch_assert_queue_not(q) +#endif + __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/dispatch/semaphore.h b/dispatch/semaphore.h index 8f68407..b6139d7 100644 --- a/dispatch/semaphore.h +++ b/dispatch/semaphore.h @@ -26,6 +26,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + /*! * @typedef dispatch_semaphore_t * @@ -44,7 +46,7 @@ __BEGIN_DECLS * * @discussion * Passing zero for the value is useful for when two threads need to reconcile - * the completion of a particular event. Passing a value greather than zero is + * the completion of a particular event. Passing a value greater than zero is * useful for managing a finite pool of resources, where the pool size is equal * to the value. * @@ -110,4 +112,6 @@ dispatch_semaphore_signal(dispatch_semaphore_t dsema); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif /* __DISPATCH_SEMAPHORE__ */ diff --git a/dispatch/source.h b/dispatch/source.h index f01fd93..63b3ff3 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -35,6 +35,8 @@ #include #endif +DISPATCH_ASSUME_NONNULL_BEGIN + /*! * @header * The dispatch framework provides a suite of interfaces for monitoring low- @@ -52,7 +54,7 @@ * Dispatch sources are used to automatically submit event handler blocks to * dispatch queues in response to external events. */ -DISPATCH_DECL(dispatch_source); +DISPATCH_SOURCE_DECL(dispatch_source); __BEGIN_DECLS @@ -64,21 +66,11 @@ __BEGIN_DECLS * is being monitored by the dispatch source. Constants of this type are * passed as a parameter to dispatch_source_create() and determine how the * handle argument is interpreted (i.e. as a file descriptor, mach port, - * signal number, process identifer, etc.), and how the mask arugment is + * signal number, process identifier, etc.), and how the mask argument is * interpreted. */ typedef const struct dispatch_source_type_s *dispatch_source_type_t; -#if !TARGET_OS_WIN32 -/*! @parseOnly */ -#define DISPATCH_SOURCE_TYPE_DECL(name) \ - DISPATCH_EXPORT const struct dispatch_source_type_s \ - _dispatch_source_type_##name -#else -#define DISPATCH_SOURCE_TYPE_DECL(name) \ - DISPATCH_EXPORT struct dispatch_source_type_s _dispatch_source_type_##name -#endif - /*! * @const DISPATCH_SOURCE_TYPE_DATA_ADD * @discussion A dispatch source that coalesces data obtained via calls to @@ -109,7 +101,7 @@ DISPATCH_SOURCE_TYPE_DECL(data_or); * The mask is a mask of desired events from dispatch_source_mach_send_flags_t. */ #define DISPATCH_SOURCE_TYPE_MACH_SEND (&_dispatch_source_type_mach_send) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(mach_send); /*! @@ -119,7 +111,7 @@ DISPATCH_SOURCE_TYPE_DECL(mach_send); * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_MACH_RECV (&_dispatch_source_type_mach_recv) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(mach_recv); /*! @@ -132,7 +124,7 @@ DISPATCH_SOURCE_TYPE_DECL(mach_recv); */ #define DISPATCH_SOURCE_TYPE_MEMORYPRESSURE \ (&_dispatch_source_type_memorypressure) -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_8_0) +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_8_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(memorypressure); /*! @@ -143,7 +135,7 @@ DISPATCH_SOURCE_TYPE_DECL(memorypressure); * The mask is a mask of desired events from dispatch_source_proc_flags_t. */ #define DISPATCH_SOURCE_TYPE_PROC (&_dispatch_source_type_proc) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(proc); /*! @@ -186,7 +178,7 @@ DISPATCH_SOURCE_TYPE_DECL(timer); * The mask is a mask of desired events from dispatch_source_vnode_flags_t. */ #define DISPATCH_SOURCE_TYPE_VNODE (&_dispatch_source_type_vnode) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(vnode); /*! @@ -289,6 +281,9 @@ typedef unsigned long dispatch_source_proc_flags_t; * * @constant DISPATCH_VNODE_REVOKE * The filesystem object was revoked. + * + * @constant DISPATCH_VNODE_FUNLOCK + * The filesystem object was unlocked. */ #define DISPATCH_VNODE_DELETE 0x1 @@ -298,6 +293,7 @@ typedef unsigned long dispatch_source_proc_flags_t; #define DISPATCH_VNODE_LINK 0x10 #define DISPATCH_VNODE_RENAME 0x20 #define DISPATCH_VNODE_REVOKE 0x40 +#define DISPATCH_VNODE_FUNLOCK 0x100 typedef unsigned long dispatch_source_vnode_flags_t; @@ -321,7 +317,6 @@ typedef unsigned long dispatch_source_vnode_flags_t; typedef unsigned long dispatch_source_timer_flags_t; - /*! * @function dispatch_source_create * @@ -335,23 +330,36 @@ typedef unsigned long dispatch_source_timer_flags_t; * will be coalesced and delivered after the dispatch source is resumed or the * event handler block has returned. * - * Dispatch sources are created in a suspended state. After creating the + * Dispatch sources are created in an inactive state. After creating the * source and setting any desired attributes (i.e. the handler, context, etc.), - * a call must be made to dispatch_resume() in order to begin event delivery. + * a call must be made to dispatch_activate() in order to begin event delivery. + * + * Calling dispatch_set_target_queue() on a source once it has been activated + * is not allowed (see dispatch_activate() and dispatch_set_target_queue()). + * + * For backward compatibility reasons, dispatch_resume() on an inactive, + * and not otherwise suspended source has the same effect as calling + * dispatch_activate(). For new code, using dispatch_activate() is preferred. * * @param type * Declares the type of the dispatch source. Must be one of the defined * dispatch_source_type_t constants. + * * @param handle * The underlying system handle to monitor. The interpretation of this argument * is determined by the constant provided in the type parameter. + * * @param mask * A mask of flags specifying which events are desired. The interpretation of * this argument is determined by the constant provided in the type parameter. + * * @param queue * The dispatch queue to which the event handler block will be submitted. * If queue is DISPATCH_TARGET_QUEUE_DEFAULT, the source will submit the event * handler block to the default priority global queue. + * + * @result + * The newly created dispatch source. Or NULL if invalid arguments are passed. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT @@ -360,7 +368,7 @@ dispatch_source_t dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, unsigned long mask, - dispatch_queue_t queue); + dispatch_queue_t _Nullable queue); /*! * @function dispatch_source_set_event_handler @@ -380,7 +388,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_event_handler(dispatch_source_t source, - dispatch_block_t handler); + dispatch_block_t _Nullable handler); #endif /* __BLOCKS__ */ /*! @@ -395,15 +403,14 @@ dispatch_source_set_event_handler(dispatch_source_t source, * * @param handler * The event handler function to submit to the source's target queue. - * The context parameter passed to the event handler function is the current - * context of the dispatch source at the time the handler call is made. - * The result of passing NULL in this parameter is undefined. + * The context parameter passed to the event handler function is the context of + * the dispatch source current at the time the event handler was set. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_event_handler_f(dispatch_source_t source, - dispatch_function_t handler); + dispatch_function_t _Nullable handler); /*! * @function dispatch_source_set_cancel_handler @@ -437,7 +444,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_cancel_handler(dispatch_source_t source, - dispatch_block_t handler); + dispatch_block_t _Nullable handler); #endif /* __BLOCKS__ */ /*! @@ -462,7 +469,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_cancel_handler_f(dispatch_source_t source, - dispatch_function_t handler); + dispatch_function_t _Nullable handler); /*! * @function dispatch_source_cancel @@ -712,7 +719,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_registration_handler(dispatch_source_t source, - dispatch_block_t handler); + dispatch_block_t _Nullable handler); #endif /* __BLOCKS__ */ /*! @@ -737,8 +744,10 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_registration_handler_f(dispatch_source_t source, - dispatch_function_t handler); + dispatch_function_t _Nullable handler); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/dispatch/time.h b/dispatch/time.h index e0bc2f6..c2152ea 100644 --- a/dispatch/time.h +++ b/dispatch/time.h @@ -33,6 +33,8 @@ #include #endif +DISPATCH_ASSUME_NONNULL_BEGIN + #ifdef NSEC_PER_SEC #undef NSEC_PER_SEC #endif @@ -102,7 +104,7 @@ dispatch_time(dispatch_time_t when, int64_t delta); * On Mac OS X the wall clock is based on gettimeofday(3). * * @param when - * A struct timespect to add time to. If NULL is passed, then + * A struct timespec to add time to. If NULL is passed, then * dispatch_walltime() will use the result of gettimeofday(3). * * @param delta @@ -114,8 +116,10 @@ dispatch_time(dispatch_time_t when, int64_t delta); __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_time_t -dispatch_walltime(const struct timespec *when, int64_t delta); +dispatch_walltime(const struct timespec *_Nullable when, int64_t delta); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index 230d5a0..fb0ba91 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -24,11 +24,33 @@ buildPhases = ( ); dependencies = ( - 4552540F19B138B700B88766 /* PBXTargetDependency */, + 92F3FECF1BEC6F1000025962 /* PBXTargetDependency */, ); name = libdispatch_tests; productName = libdispatch_tests; }; + 6E2ECAFD1C49C2FF00A30A32 /* libdispatch_kernel */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 6E2ECAFE1C49C30000A30A32 /* Build configuration list for PBXAggregateTarget "libdispatch_kernel" */; + buildPhases = ( + ); + dependencies = ( + 6E2ECB021C49C31200A30A32 /* PBXTargetDependency */, + ); + name = libdispatch_kernel; + productName = libdispatch_kernel; + }; + 92CBD7201BED924F006E0892 /* libdispatch_tests_legacy */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 92CBD7231BED924F006E0892 /* Build configuration list for PBXAggregateTarget "libdispatch_tests_legacy" */; + buildPhases = ( + ); + dependencies = ( + 92CBD75A1BED926C006E0892 /* PBXTargetDependency */, + ); + name = libdispatch_tests_legacy; + productName = libdispatch_tests; + }; C927F35A10FD7F0600C5AB8B /* libdispatch_tools */ = { isa = PBXAggregateTarget; buildConfigurationList = C927F35E10FD7F0B00C5AB8B /* Build configuration list for PBXAggregateTarget "libdispatch_tools" */; @@ -58,6 +80,68 @@ 5AAB45C010D30B79004407EA /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; 5AAB45C410D30CC7004407EA /* io.h in Headers */ = {isa = PBXBuildFile; fileRef = 5AAB45C310D30CC7004407EA /* io.h */; settings = {ATTRIBUTES = (Public, ); }; }; 5AAB45C610D30D0C004407EA /* data.h in Headers */ = {isa = PBXBuildFile; fileRef = 5AAB45C510D30D0C004407EA /* data.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 6E040C731C499C6500411A2E /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + 6E040C751C499CE600411A2E /* firehose_buffer_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 6E21F2E81BBB23FA0000C6A5 /* firehose_server_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */; }; + 6E21F2E91BBB240E0000C6A5 /* firehose_server.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E21F2E51BBB23F00000C6A5 /* firehose_server.c */; }; + 6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; + 6E90269C1BB9BD50004DC3AD /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6E9955581C3AF7710071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; + 6E99558A1C3AF7900071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; + 6E9955CF1C3B218E0071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; + 6E9956011C3B21980071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; + 6E9956021C3B21990071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; + 6E9956031C3B219A0071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; + 6E9956041C3B219B0071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; + 6E9956051C3B219B0071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; + 6E9956071C3B21AA0071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; + 6E9956081C3B21B30071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; + 6E9956091C3B21B40071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; + 6E9B6B5F1BB4F3C8009E324D /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; }; + 6EA283D71CAB93920041B2E0 /* libdispatch.codes in Copy Trace Definitions */ = {isa = PBXBuildFile; fileRef = 6EA283D01CAB93270041B2E0 /* libdispatch.codes */; }; + 6EB60D2C1BBB197B0092FA94 /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; + 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + 6EBEC7E61BBDD30D009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + 6EBEC7E71BBDD30F009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + 6EBEC7E81BBDD324009B1596 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6EBEC7E91BBDD325009B1596 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6EBEC7EA1BBDD326009B1596 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6ED64B401BBD898300C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + 6ED64B411BBD898400C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + 6ED64B421BBD898500C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + 6ED64B431BBD898600C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + 6ED64B441BBD898700C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + 6ED64B461BBD89AF00C35F4D /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + 6ED64B471BBD89AF00C35F4D /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + 6ED64B481BBD89B100C35F4D /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + 6ED64B491BBD89BC00C35F4D /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6ED64B4A1BBD89BD00C35F4D /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6ED64B4B1BBD89BE00C35F4D /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6ED64B4F1BBD8A1400C35F4D /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; + 6ED64B501BBD8A1400C35F4D /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; + 6ED64B511BBD8A2100C35F4D /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; }; + 6ED64B521BBD8A2100C35F4D /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; }; + 6ED64B531BBD8A2300C35F4D /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; }; + 6ED64B571BBD8A3B00C35F4D /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; + 6ED64B581BBD8A3E00C35F4D /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; + 6ED64B591BBD8A3F00C35F4D /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; + 6EDF10B81BBB488A007F14BF /* firehose_buffer_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 6EE664271BE2FD5C00ED7B1C /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + 6EF0B26D1BA8C527007FA4F6 /* firehose_server_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 72EA3FBA1AF41EA400BBA227 /* firehose_server_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 6EF0B2711BA8C540007FA4F6 /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; + 6EF0B2781BA8C56E007FA4F6 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Client, ); }; }; + 6EF0B27A1BA8C57D007FA4F6 /* firehose_server_object.m in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9D1AE1BB7300289540 /* firehose_server_object.m */; }; + 6EF2CAA51C88998A001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; + 6EF2CAAC1C8899D5001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAAD1C8899E9001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAAE1C8899EA001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAAF1C8899EB001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAB01C8899EB001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAB11C8899EC001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAB21C8899EC001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAB41C889D65001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; + 6EF2CAB51C889D67001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */ = {isa = PBXBuildFile; fileRef = 721F5C5C0F15520500FF03A6 /* semaphore.h */; settings = {ATTRIBUTES = (Public, ); }; }; 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; 72CC94300ECCD8750031B751 /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = 72CC942F0ECCD8750031B751 /* base.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -76,6 +160,48 @@ 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + C00B0DF31C5AEBBE000330B3 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + C00B0DF41C5AEBBE000330B3 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + C00B0DF61C5AEBBE000330B3 /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + C00B0DF81C5AEBBE000330B3 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + C00B0DF91C5AEBBE000330B3 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + C00B0DFB1C5AEBBE000330B3 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; + C00B0DFD1C5AEBBE000330B3 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + C00B0DFE1C5AEBBE000330B3 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + C00B0DFF1C5AEBBE000330B3 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + C00B0E001C5AEBBE000330B3 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + C00B0E011C5AEBBE000330B3 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + C00B0E021C5AEBBE000330B3 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + C00B0E031C5AEBBE000330B3 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + C00B0E041C5AEBBE000330B3 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + C00B0E051C5AEBBE000330B3 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + C01866A61C5973210040FC07 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + C01866A71C5973210040FC07 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + C01866A81C5973210040FC07 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + C01866A91C5973210040FC07 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + C01866AA1C5973210040FC07 /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + C01866AB1C5973210040FC07 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + C01866AC1C5973210040FC07 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + C01866AD1C5973210040FC07 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + C01866AE1C5973210040FC07 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + C01866AF1C5973210040FC07 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + C01866B01C5973210040FC07 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; + C01866B11C5973210040FC07 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + C01866B21C5973210040FC07 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + C01866B31C5973210040FC07 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + C01866B41C5973210040FC07 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + C01866B51C5973210040FC07 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + C01866B61C5973210040FC07 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + C01866B71C5973210040FC07 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + C01866B81C5973210040FC07 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + C01866B91C5973210040FC07 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + C90144651C73A8A3002638FC /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C901445E1C73A7FE002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Public, ); }; }; + C90144661C73A9F6002638FC /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C90144641C73A845002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Private, ); }; }; C913AC0F143BD34800B78976 /* data_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C913AC0E143BD34800B78976 /* data_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; C93D6165143E190E00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; C93D6166143E190F00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; @@ -208,9 +334,6 @@ E49F24D2125D57FA0057C971 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; E49F24D3125D57FA0057C971 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; E49F24D4125D57FA0057C971 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; - E4A2C9C5176019820000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; }; - E4A2C9C6176019830000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; }; - E4A2C9C7176019840000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; }; E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */; }; E4B3C3FF18C50D0E0039F49F /* voucher_activity_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */; }; E4B515BD164B2DA300E003AF /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; @@ -284,321 +407,6 @@ /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ - 455253A819B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = DF80F67E10B5C71600FAB5AE; - remoteInfo = dispatch_test; - }; - 455253AA19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01C78108E68D400FAA873; - remoteInfo = dispatch_apply; - }; - 455253AC19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4EB36CD1088F0B000C33AD4; - remoteInfo = dispatch_api; - }; - 455253AE19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CA7108E6C5000FAA873; - remoteInfo = dispatch_c99; - }; - 455253B019B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4C72A26115C3F65009F3CE1; - remoteInfo = dispatch_cf_main; - }; - 455253B219B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CB9108E6C7200FAA873; - remoteInfo = dispatch_deadname; - }; - 455253B419B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CC3108E6CC300FAA873; - remoteInfo = dispatch_debug; - }; - 455253B619B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CCC108E6CD400FAA873; - remoteInfo = dispatch_group; - }; - 455253B819B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CD5108E6CE300FAA873; - remoteInfo = dispatch_overcommit; - }; - 455253BA19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CDE108E6CF300FAA873; - remoteInfo = dispatch_pingpong; - }; - 455253BC19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CE7108E6D0500FAA873; - remoteInfo = dispatch_plusplus; - }; - 455253BE19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CF0108E6D2900FAA873; - remoteInfo = dispatch_priority; - }; - 455253C019B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CF9108E6D3800FAA873; - remoteInfo = dispatch_priority2; - }; - 455253C219B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E46D768811D0365F00615518; - remoteInfo = dispatch_concur; - }; - 455253C419B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4324AAC12250F0800A3CAD5; - remoteInfo = dispatch_context_for_key; - }; - 455253C619B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D02108E6D5600FAA873; - remoteInfo = dispatch_proc; - }; - 455253C819B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D0B108E6D6000FAA873; - remoteInfo = dispatch_queue_finalizer; - }; - 455253CA19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D14108E6D7300FAA873; - remoteInfo = dispatch_read; - }; - 455253CC19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D1D108E6D8B00FAA873; - remoteInfo = dispatch_read2; - }; - 455253CE19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D26108E6D9A00FAA873; - remoteInfo = dispatch_after; - }; - 455253D019B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D2F108E6DA700FAA873; - remoteInfo = dispatch_timer; - }; - 455253D219B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4CE9BC31151AB2A00D710C0; - remoteInfo = dispatch_timer_short; - }; - 455253D419B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = 5A2BA66D11D0369E0081FF89; - remoteInfo = dispatch_timer_timeout; - }; - 455253D619B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D38108E6DB200FAA873; - remoteInfo = dispatch_suspend_timer; - }; - 455253D819B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D41108E6DBF00FAA873; - remoteInfo = dispatch_sema; - }; - 455253DA19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D53108E6DDC00FAA873; - remoteInfo = dispatch_timer_bit31; - }; - 455253DC19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D5C108E6E0400FAA873; - remoteInfo = dispatch_timer_bit63; - }; - 455253DE19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D74108E6E4B00FAA873; - remoteInfo = dispatch_timer_set_time; - }; - 455253E019B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D7D108E6E6600FAA873; - remoteInfo = dispatch_drift; - }; - 455253E219B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D86108E6E7200FAA873; - remoteInfo = dispatch_starfish; - }; - 455253E419B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D8F108E6E7E00FAA873; - remoteInfo = dispatch_cascade; - }; - 455253E619B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D98108E6E9500FAA873; - remoteInfo = dispatch_readsync; - }; - 455253E819B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4E24A0710E0020B00C3C692; - remoteInfo = dispatch_sync_on_main; - }; - 455253EA19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4E24A1810E0021C00C3C692; - remoteInfo = dispatch_sync_gc; - }; - 455253EC19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4E24C3210E01DF800C3C692; - remoteInfo = dispatch_apply_gc; - }; - 455253EE19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = 5AAB464A10D330C5004407EA; - remoteInfo = dispatch_data; - }; - 455253F019B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = 5A11B20E10DB124C000FAD7A; - remoteInfo = dispatch_io; - }; - 455253F219B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = 5AA78BAB114821D0009A233B; - remoteInfo = dispatch_io_net; - }; - 455253F419B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = 5AF00EF51135FA1300CA14CE; - remoteInfo = dispatch_vm; - }; - 455253F619B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4E33EB6121C9C9400F4B71C; - remoteInfo = dispatch_vnode; - }; - 455253F819B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = C9E804AF1963EC5F00C2B970; - remoteInfo = dispatch_qos; - }; - 455253FA19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = C9B1FF84113F458A00843414; - remoteInfo = dispatch_select; - }; - 455253FC19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = C985912B143D584100718FE3; - remoteInfo = dispatch_transform; - }; - 455253FE19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01DA1108E6EE000FAA873; - remoteInfo = nsoperation; - }; - 4552540019B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CB0108E6C6300FAA873; - remoteInfo = cffd; - }; 4552540219B1384900B88766 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; @@ -627,13 +435,55 @@ remoteGlobalIDString = E454824F16C1F0FE0042EC2D; remoteInfo = apply_bench; }; - 4552540E19B138B700B88766 /* PBXContainerItemProxy */ = { + 6E2ECB011C49C31200A30A32 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 6E040C621C499B1B00411A2E; + remoteInfo = libfirehose_kernel; + }; + 6EF0B27D1BA8C5BF007FA4F6 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 6EB4E4081BA8BCAD00D7B9D2; + remoteInfo = libfirehose_server; + }; + 92CBD7591BED926C006E0892 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; proxyType = 1; remoteGlobalIDString = E4D01DC5108E708E00FAA873; remoteInfo = all; }; + 92F3FECE1BEC6F1000025962 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 92F3FECA1BEC69E500025962; + remoteInfo = darwintests; + }; + C00B0E101C5AEBBE000330B3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01CB9108E6C7200FAA873; + remoteInfo = dispatch_deadname; + }; + C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = C00B0DF01C5AEBBE000330B3; + remoteInfo = "libdispatch dyld stub"; + }; + C01866C11C597AEA0040FC07 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = C01866A41C5973210040FC07; + remoteInfo = "libdispatch static"; + }; C927F36610FD7F1000C5AB8B /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */; @@ -685,6 +535,20 @@ }; /* End PBXContainerItemProxy section */ +/* Begin PBXCopyFilesBuildPhase section */ + 6EA283D61CAB933E0041B2E0 /* Copy Trace Definitions */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 8; + dstPath = "$(INSTALL_PATH_PREFIX)/usr/local/share/misc"; + dstSubfolderSpec = 0; + files = ( + 6EA283D71CAB93920041B2E0 /* libdispatch.codes in Copy Trace Definitions */, + ); + name = "Copy Trace Definitions"; + runOnlyForDeploymentPostprocessing = 1; + }; +/* End PBXCopyFilesBuildPhase section */ + /* Begin PBXFileReference section */ 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = allocator_internal.h; sourceTree = ""; }; 2BBF5A62154B64F5002B20F9 /* allocator.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = allocator.c; sourceTree = ""; }; @@ -696,13 +560,84 @@ 5AAB45BF10D30B79004407EA /* data.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = data.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; 5AAB45C310D30CC7004407EA /* io.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io.h; sourceTree = ""; tabWidth = 8; }; 5AAB45C510D30D0C004407EA /* data.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data.h; sourceTree = ""; tabWidth = 8; }; + 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libfirehose_kernel.a; sourceTree = BUILT_PRODUCTS_DIR; }; + 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose_kernel.xcconfig; sourceTree = ""; }; + 6E1612691C79606E006FC9A9 /* dispatch_queue_label.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_queue_label.c; sourceTree = ""; }; + 6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_server_internal.h; sourceTree = ""; }; + 6E21F2E51BBB23F00000C6A5 /* firehose_server.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = firehose_server.c; sourceTree = ""; }; + 6E326A8F1C2245C4002A6505 /* dispatch_transform.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_transform.c; sourceTree = ""; }; + 6E326AB11C224830002A6505 /* dispatch_cascade.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_cascade.c; sourceTree = ""; }; + 6E326AB31C224870002A6505 /* dispatch_qos.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_qos.c; sourceTree = ""; }; + 6E326AB51C225477002A6505 /* dispatch_proc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_proc.c; sourceTree = ""; }; + 6E326AB71C225FCA002A6505 /* dispatch_vnode.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_vnode.c; sourceTree = ""; }; + 6E326AB91C229866002A6505 /* dispatch_read.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_read.c; sourceTree = ""; }; + 6E326ABB1C229895002A6505 /* dispatch_read2.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_read2.c; sourceTree = ""; }; + 6E326ABD1C22A577002A6505 /* dispatch_io_net.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_io_net.c; sourceTree = ""; }; + 6E326ABE1C22A577002A6505 /* dispatch_io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_io.c; sourceTree = ""; }; + 6E326AD81C233209002A6505 /* dispatch_sync_gc.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = dispatch_sync_gc.m; sourceTree = ""; }; + 6E326AD91C233209002A6505 /* dispatch_sync_on_main.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_sync_on_main.c; sourceTree = ""; }; + 6E326ADC1C234396002A6505 /* dispatch_readsync.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_readsync.c; sourceTree = ""; }; + 6E326ADE1C23451A002A6505 /* dispatch_concur.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_concur.c; sourceTree = ""; }; + 6E326AE01C234780002A6505 /* dispatch_starfish.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_starfish.c; sourceTree = ""; }; + 6E326AE61C2392E8002A6505 /* dispatch_timer.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer.c; sourceTree = ""; }; + 6E326AEF1C239303002A6505 /* dispatch_context_for_key.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_context_for_key.c; sourceTree = ""; }; + 6E326B121C239431002A6505 /* dispatch_suspend_timer.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_suspend_timer.c; sourceTree = ""; }; + 6E326B131C239431002A6505 /* dispatch_timer_bit.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_bit.c; sourceTree = ""; }; + 6E326B151C239431002A6505 /* dispatch_timer_set_time.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_set_time.c; sourceTree = ""; }; + 6E326B161C239431002A6505 /* dispatch_timer_short.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_short.c; sourceTree = ""; }; + 6E326B171C239431002A6505 /* dispatch_timer_timeout.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_timeout.c; sourceTree = ""; }; + 6E326B441C239B61002A6505 /* dispatch_priority.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_priority.c; sourceTree = ""; }; 6E4130C91B431697001A152D /* backward-compat.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "backward-compat.xcconfig"; sourceTree = ""; }; + 6E4FC9D11C84123600520351 /* os_venture_basic.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = os_venture_basic.c; sourceTree = ""; }; + 6E62B0531C55806200D2C7C0 /* dispatch_trysync.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_trysync.c; sourceTree = ""; }; + 6E67D8D31C16C20B00FC98AC /* dispatch_apply.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_apply.c; sourceTree = ""; }; + 6E67D8D91C16C94B00FC98AC /* dispatch_cf_main.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_cf_main.c; sourceTree = ""; }; + 6E67D90D1C16CCEB00FC98AC /* dispatch_debug.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_debug.c; sourceTree = ""; }; + 6E67D90F1C16CF0B00FC98AC /* dispatch_group.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_group.c; sourceTree = ""; }; + 6E67D9111C17669C00FC98AC /* dispatch_queue_finalizer.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_queue_finalizer.c; sourceTree = ""; }; + 6E67D9131C17676D00FC98AC /* dispatch_overcommit.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_overcommit.c; sourceTree = ""; }; + 6E67D9151C1768B300FC98AC /* dispatch_pingpong.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_pingpong.c; sourceTree = ""; }; + 6E67D9171C17BA7200FC98AC /* nsoperation.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = nsoperation.m; sourceTree = ""; }; + 6E8E4E6D1C1A35EE0004F5CC /* dispatch_select.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_select.c; sourceTree = ""; }; + 6E8E4E6E1C1A35EE0004F5CC /* test_lib.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = test_lib.c; sourceTree = ""; }; + 6E8E4E6F1C1A35EE0004F5CC /* test_lib.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = test_lib.h; sourceTree = ""; }; + 6E8E4E9B1C1A4EF10004F5CC /* dispatch_sema.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_sema.c; sourceTree = ""; }; + 6E8E4EC31C1A57760004F5CC /* dispatch_after.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_after.c; sourceTree = ""; }; + 6E8E4EC51C1A5D450004F5CC /* cf_file_descriptor.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = cf_file_descriptor.c; sourceTree = ""; }; + 6E8E4EC71C1A61680004F5CC /* dispatch_data.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = dispatch_data.m; sourceTree = ""; }; + 6E8E4EC91C1A670B0004F5CC /* dispatch_vm.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_vm.c; sourceTree = ""; }; + 6E8E4ECB1C1A72650004F5CC /* dispatch_drift.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_drift.c; sourceTree = ""; }; + 6E9926711D01295F000CB89A /* dispatch_block.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_block.c; sourceTree = ""; }; + 6E9955571C3AF7710071D40C /* venture_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = venture_private.h; sourceTree = ""; }; + 6E9955CE1C3B218E0071D40C /* venture.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = venture.c; sourceTree = ""; }; + 6E9956061C3B21AA0071D40C /* venture_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = venture_internal.h; sourceTree = ""; }; + 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_internal.h; sourceTree = ""; }; + 6EA283D01CAB93270041B2E0 /* libdispatch.codes */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.codes; sourceTree = ""; }; + 6EA2CB841C005DEF0076794A /* dispatch_source.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_source.c; sourceTree = ""; }; + 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libfirehose_server.a; sourceTree = BUILT_PRODUCTS_DIR; }; + 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose.xcconfig; sourceTree = ""; }; + 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_inline_internal.h; sourceTree = ""; }; + 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_kevent_cancel_races.c; sourceTree = ""; }; + 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_private.h; sourceTree = ""; }; + 6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_state_machine.c; sourceTree = ""; }; + 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_internal.h; sourceTree = ""; }; + 6EF2CAA41C88998A001ABE83 /* lock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lock.h; sourceTree = ""; }; + 6EF2CAAB1C8899D5001ABE83 /* lock.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = lock.c; path = shims/lock.c; sourceTree = ""; }; 721F5C5C0F15520500FF03A6 /* semaphore.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore.h; sourceTree = ""; }; 721F5CCE0F15553500FF03A6 /* semaphore.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = semaphore.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + 72406A031AF95DF800DF4E2B /* firehose_reply.defs */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.mig; path = firehose_reply.defs; sourceTree = ""; }; + 72406A391AF9926000DF4E2B /* firehose_types.defs */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.mig; path = firehose_types.defs; sourceTree = ""; }; 72B54F690EB169EB00DBECBA /* dispatch_source_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_source_create.3; sourceTree = ""; }; 72CC940C0ECCD5720031B751 /* dispatch_object.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_object.3; sourceTree = ""; }; 72CC940D0ECCD5720031B751 /* dispatch.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch.3; sourceTree = ""; }; 72CC942F0ECCD8750031B751 /* base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = base.h; sourceTree = ""; }; + 72DEAA971AE181D300289540 /* firehose_buffer.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = firehose_buffer.c; sourceTree = ""; }; + 72DEAA9B1AE1B0BD00289540 /* firehose.defs */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.mig; path = firehose.defs; sourceTree = ""; }; + 72DEAA9D1AE1BB7300289540 /* firehose_server_object.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = firehose_server_object.m; sourceTree = ""; }; + 72EA3FBA1AF41EA400BBA227 /* firehose_server_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = firehose_server_private.h; sourceTree = ""; }; + 924D8EAA1C116B9F002AC2BC /* dispatch_c99.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_c99.c; sourceTree = ""; }; + 92F3FE8F1BEC686300025962 /* dispatch_api.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_api.c; sourceTree = ""; }; + 92F3FE921BEC686300025962 /* Makefile */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.make; path = Makefile; sourceTree = ""; }; 96032E4A0F5CC8C700241C5F /* time.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = time.c; sourceTree = ""; }; 96032E4C0F5CC8D100241C5F /* time.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; 960F0E7D0F3FB232000D88BF /* dispatch_apply.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_apply.3; sourceTree = ""; }; @@ -722,21 +657,27 @@ 96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue_private.h; sourceTree = ""; }; 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = ""; }; 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_dyld_stub.a; sourceTree = BUILT_PRODUCTS_DIR; }; + C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-dyld-stub.xcconfig"; sourceTree = ""; }; + C01866BD1C5973210040FC07 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; + C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = "libdispatch-mp-static.xcconfig"; sourceTree = ""; }; + C01866BF1C5976C90040FC07 /* run-on-install.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "run-on-install.sh"; sourceTree = ""; }; + C901445E1C73A7FE002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; path = module.modulemap; sourceTree = ""; }; + C90144641C73A845002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; path = module.modulemap; sourceTree = ""; }; C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = ""; tabWidth = 8; }; C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = ddt.xcodeproj; path = tools/ddt/ddt.xcodeproj; sourceTree = ""; }; + C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = dispatch_objc.m; sourceTree = ""; }; C9C5F80D143C1771006DC718 /* transform.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = transform.c; sourceTree = ""; }; D2AAC046055464E500DB518D /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-resolved.xcconfig"; sourceTree = ""; }; E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-resolver.xcconfig"; sourceTree = ""; }; E4128ED513BA9A1700ABB2CB /* hw_config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hw_config.h; sourceTree = ""; }; - E416F53F175D04B800B23711 /* libdispatch_macosx.aliases */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch_macosx.aliases; sourceTree = ""; }; E420866F16027AE500EEE210 /* data.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = data.m; sourceTree = ""; }; E421E5F81716ADA10090DC9B /* introspection.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection.h; sourceTree = ""; }; E421E5FB1716B8730090DC9B /* install-dtrace.sh */ = {isa = PBXFileReference; lastKnownFileType = text.script.sh; path = "install-dtrace.sh"; sourceTree = ""; }; E421E5FD1716BEA70090DC9B /* libdispatch.interposable */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch.interposable; sourceTree = ""; }; E422A0D412A557B5005E5BDB /* trace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = trace.h; sourceTree = ""; }; E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.aliases; sourceTree = ""; }; - E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.unexport; sourceTree = ""; }; E43570B8126E93380097AB9F /* provider.d */ = {isa = PBXFileReference; explicitFileType = sourcecode.dtrace; fileEncoding = 4; path = provider.d; sourceTree = ""; }; E43A724F1AF85BBC00BAA921 /* block.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = block.cpp; sourceTree = ""; }; E43D93F11097917E004F6A62 /* libdispatch.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libdispatch.xcconfig; sourceTree = ""; }; @@ -752,7 +693,7 @@ E454569214746F1B00106147 /* object_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_private.h; sourceTree = ""; }; E463024F1761603C00E11F4C /* atomic_sfb.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_sfb.h; sourceTree = ""; }; E46DBC5714EE10C80001F9F6 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; - E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-static.xcconfig"; sourceTree = ""; }; + E46DBC5814EE11BC0001F9F6 /* libdispatch-up-static.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-up-static.xcconfig"; sourceTree = ""; }; E47D6BB5125F0F800070D91C /* resolved.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolved.h; sourceTree = ""; }; E482F1CD12DBAB590030614D /* postprocess-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-headers.sh"; sourceTree = ""; }; E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = io_private.h; path = private/io_private.h; sourceTree = SOURCE_ROOT; tabWidth = 8; }; @@ -760,7 +701,6 @@ E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E49F251D125D630A0057C971 /* install-manpages.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-manpages.sh"; sourceTree = ""; }; E49F251E125D631D0057C971 /* mig-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "mig-headers.sh"; sourceTree = ""; }; - E4A2C9C4176019760000F809 /* atomic_llsc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_llsc.h; sourceTree = ""; }; E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = "libdispatch-resolver_iphoneos.order"; sourceTree = ""; }; E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch_iphoneos.order; sourceTree = ""; }; E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = voucher_activity_private.h; sourceTree = ""; }; @@ -775,7 +715,6 @@ E4BA743913A8911B0095BDF1 /* getprogname.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = getprogname.h; sourceTree = ""; }; E4C1ED6E1263E714000D3C8B /* data_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_internal.h; sourceTree = ""; }; E4D76A9218E325D200B1F98B /* block.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = block.h; sourceTree = ""; }; - E4DC8D45191053EE0005C6F4 /* libdispatch_objc.aliases */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch_objc.aliases; sourceTree = ""; }; E4EB4A2614C35ECE00AA0FA9 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object.h; sourceTree = ""; }; E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-headers.sh"; sourceTree = ""; }; E4EC11C312514302000DDBD1 /* libdispatch_up.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_up.a; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -805,7 +744,7 @@ /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ - D289987405E68DCB004EDB86 /* Frameworks */ = { + 6E040C601C499B1B00411A2E /* Frameworks */ = { isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( @@ -826,11 +765,15 @@ isa = PBXGroup; children = ( E44DB71E11D2FF080074F2AD /* Build Support */, - E4EB4A2914C35F1800AA0FA9 /* OS Object */, - FC7BEDAA0E83625200161930 /* Public Headers */, - FC7BEDAF0E83626100161930 /* Private Headers */, - FC7BEDB60E8363DC00161930 /* Project Headers */, - 08FB7795FE84155DC02AAC07 /* Source */, + 6E9B6AE21BB39793009E324D /* OS Public Headers */, + E4EB4A2914C35F1800AA0FA9 /* OS Private Headers */, + FC7BEDAA0E83625200161930 /* Dispatch Public Headers */, + FC7BEDAF0E83626100161930 /* Dispatch Private Headers */, + FC7BEDB60E8363DC00161930 /* Dispatch Project Headers */, + 08FB7795FE84155DC02AAC07 /* Dispatch Source */, + 6EF0B2661BA8C43D007FA4F6 /* Firehose Project Headers */, + 6EF0B2641BA8C3A0007FA4F6 /* Firehose Source */, + 92F3FEC91BEC687200025962 /* Darwin Tests */, C6A0FF2B0290797F04C91782 /* Documentation */, 1AB674ADFE9D54B511CA2CBB /* Products */, C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */, @@ -842,7 +785,7 @@ tabWidth = 4; usesTabs = 1; }; - 08FB7795FE84155DC02AAC07 /* Source */ = { + 08FB7795FE84155DC02AAC07 /* Dispatch Source */ = { isa = PBXGroup; children = ( 2BBF5A62154B64F5002B20F9 /* allocator.c */, @@ -854,6 +797,7 @@ E44EBE3B1251659900645D88 /* init.c */, E4B515DC164B32E000E003AF /* introspection.c */, 5A27262510F26F1900751FBC /* io.c */, + 6EF2CAAB1C8899D5001ABE83 /* lock.c */, 9661E56A0F3E7DDF00749F3E /* object.c */, E4FC3263145F46C9002FBDDB /* object.m */, 96DF70BD0F38FE3C0074BD99 /* once.c */, @@ -862,11 +806,13 @@ 96A8AA860F41E7A400CD570B /* source.c */, 96032E4A0F5CC8C700241C5F /* time.c */, C9C5F80D143C1771006DC718 /* transform.c */, + 6E9955CE1C3B218E0071D40C /* venture.c */, E44A8E6A1805C3E0009FFDB6 /* voucher.c */, + 6EA283D01CAB93270041B2E0 /* libdispatch.codes */, FC7BED950E8361E600161930 /* protocol.defs */, E43570B8126E93380097AB9F /* provider.d */, ); - name = Source; + name = "Dispatch Source"; path = src; sourceTree = ""; }; @@ -879,6 +825,10 @@ E49F24DF125D57FA0057C971 /* libdispatch.dylib */, E46DBC5714EE10C80001F9F6 /* libdispatch.a */, E4B515D6164B2DA300E003AF /* libdispatch.dylib */, + 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */, + 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */, + C01866BD1C5973210040FC07 /* libdispatch.a */, + C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */, ); name = Products; sourceTree = ""; @@ -886,59 +836,108 @@ 4552536F19B1384900B88766 /* Products */ = { isa = PBXGroup; children = ( - 455253A919B1384900B88766 /* libdispatch_test.a */, - 455253AB19B1384900B88766 /* dispatch_apply */, - 455253AD19B1384900B88766 /* dispatch_api */, - 455253AF19B1384900B88766 /* dispatch_c99 */, - 455253B119B1384900B88766 /* dispatch_cf_main */, - 455253B319B1384900B88766 /* dispatch_deadname */, - 455253B519B1384900B88766 /* dispatch_debug */, - 455253B719B1384900B88766 /* dispatch_group */, - 455253B919B1384900B88766 /* dispatch_overcommit */, - 455253BB19B1384900B88766 /* dispatch_pingpong */, - 455253BD19B1384900B88766 /* dispatch_plusplus */, - 455253BF19B1384900B88766 /* dispatch_priority */, - 455253C119B1384900B88766 /* dispatch_priority2 */, - 455253C319B1384900B88766 /* dispatch_concur */, - 455253C519B1384900B88766 /* dispatch_context_for_key */, - 455253C719B1384900B88766 /* dispatch_proc */, - 455253C919B1384900B88766 /* dispatch_queue_finalizer */, - 455253CB19B1384900B88766 /* dispatch_read */, - 455253CD19B1384900B88766 /* dispatch_read2 */, - 455253CF19B1384900B88766 /* dispatch_after */, - 455253D119B1384900B88766 /* dispatch_timer */, - 455253D319B1384900B88766 /* dispatch_timer_short */, - 455253D519B1384900B88766 /* dispatch_timer_timeout */, - 455253D719B1384900B88766 /* dispatch_suspend_timer */, - 455253D919B1384900B88766 /* dispatch_sema */, - 455253DB19B1384900B88766 /* dispatch_timer_bit31 */, - 455253DD19B1384900B88766 /* dispatch_timer_bit63 */, - 455253DF19B1384900B88766 /* dispatch_timer_set_time */, - 455253E119B1384900B88766 /* dispatch_drift */, - 455253E319B1384900B88766 /* dispatch_starfish */, - 455253E519B1384900B88766 /* dispatch_cascade */, - 455253E719B1384900B88766 /* dispatch_readsync */, - 455253E919B1384900B88766 /* dispatch_sync_on_main */, - 455253EB19B1384900B88766 /* dispatch_sync_gc */, - 455253ED19B1384900B88766 /* dispatch_apply_gc */, - 455253EF19B1384900B88766 /* dispatch_data */, - 455253F119B1384900B88766 /* dispatch_io */, - 455253F319B1384900B88766 /* dispatch_io_net */, - 455253F519B1384900B88766 /* dispatch_vm */, - 455253F719B1384900B88766 /* dispatch_vnode */, - 455253F919B1384900B88766 /* dispatch_qos */, - 455253FB19B1384900B88766 /* dispatch_select */, - 455253FD19B1384900B88766 /* dispatch_transform */, - 455253FF19B1384900B88766 /* nsoperation */, - 4552540119B1384900B88766 /* cffd */, 4552540319B1384900B88766 /* bench */, 4552540519B1384900B88766 /* jsgc_bench */, 4552540719B1384900B88766 /* async_bench */, 4552540919B1384900B88766 /* apply_bench */, + C00B0E111C5AEBBE000330B3 /* dispatch_deadname */, ); name = Products; sourceTree = ""; }; + 6E9B6AE21BB39793009E324D /* OS Public Headers */ = { + isa = PBXGroup; + children = ( + E4EB4A2614C35ECE00AA0FA9 /* object.h */, + ); + name = "OS Public Headers"; + path = os; + sourceTree = ""; + }; + 6EF0B2641BA8C3A0007FA4F6 /* Firehose Source */ = { + isa = PBXGroup; + children = ( + 72406A391AF9926000DF4E2B /* firehose_types.defs */, + 72DEAA9B1AE1B0BD00289540 /* firehose.defs */, + 72406A031AF95DF800DF4E2B /* firehose_reply.defs */, + 72DEAA971AE181D300289540 /* firehose_buffer.c */, + 6E21F2E51BBB23F00000C6A5 /* firehose_server.c */, + 72DEAA9D1AE1BB7300289540 /* firehose_server_object.m */, + ); + name = "Firehose Source"; + path = src/firehose; + sourceTree = ""; + }; + 6EF0B2661BA8C43D007FA4F6 /* Firehose Project Headers */ = { + isa = PBXGroup; + children = ( + 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */, + 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */, + 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */, + 6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */, + ); + name = "Firehose Project Headers"; + path = src/firehose; + sourceTree = ""; + }; + 92F3FEC91BEC687200025962 /* Darwin Tests */ = { + isa = PBXGroup; + children = ( + 6E8E4EC51C1A5D450004F5CC /* cf_file_descriptor.c */, + 6E8E4EC31C1A57760004F5CC /* dispatch_after.c */, + 92F3FE8F1BEC686300025962 /* dispatch_api.c */, + 6E67D8D31C16C20B00FC98AC /* dispatch_apply.c */, + 6E9926711D01295F000CB89A /* dispatch_block.c */, + 924D8EAA1C116B9F002AC2BC /* dispatch_c99.c */, + 6E326AB11C224830002A6505 /* dispatch_cascade.c */, + 6E67D8D91C16C94B00FC98AC /* dispatch_cf_main.c */, + 6E326ADE1C23451A002A6505 /* dispatch_concur.c */, + 6E326AEF1C239303002A6505 /* dispatch_context_for_key.c */, + 6E8E4EC71C1A61680004F5CC /* dispatch_data.m */, + 6E67D90D1C16CCEB00FC98AC /* dispatch_debug.c */, + 6E8E4ECB1C1A72650004F5CC /* dispatch_drift.c */, + 6E67D90F1C16CF0B00FC98AC /* dispatch_group.c */, + 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */, + 6E326ABD1C22A577002A6505 /* dispatch_io_net.c */, + 6E326ABE1C22A577002A6505 /* dispatch_io.c */, + C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */, + 6E67D9131C17676D00FC98AC /* dispatch_overcommit.c */, + 6E67D9151C1768B300FC98AC /* dispatch_pingpong.c */, + 6E326B441C239B61002A6505 /* dispatch_priority.c */, + 6E326AB51C225477002A6505 /* dispatch_proc.c */, + 6E326AB31C224870002A6505 /* dispatch_qos.c */, + 6E67D9111C17669C00FC98AC /* dispatch_queue_finalizer.c */, + 6E1612691C79606E006FC9A9 /* dispatch_queue_label.c */, + 6E326AB91C229866002A6505 /* dispatch_read.c */, + 6E326ABB1C229895002A6505 /* dispatch_read2.c */, + 6E326ADC1C234396002A6505 /* dispatch_readsync.c */, + 6E8E4E6D1C1A35EE0004F5CC /* dispatch_select.c */, + 6E8E4E9B1C1A4EF10004F5CC /* dispatch_sema.c */, + 6EA2CB841C005DEF0076794A /* dispatch_source.c */, + 6E326AE01C234780002A6505 /* dispatch_starfish.c */, + 6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */, + 6E326B121C239431002A6505 /* dispatch_suspend_timer.c */, + 6E326AD81C233209002A6505 /* dispatch_sync_gc.m */, + 6E326AD91C233209002A6505 /* dispatch_sync_on_main.c */, + 6E326B131C239431002A6505 /* dispatch_timer_bit.c */, + 6E326B151C239431002A6505 /* dispatch_timer_set_time.c */, + 6E326B161C239431002A6505 /* dispatch_timer_short.c */, + 6E326B171C239431002A6505 /* dispatch_timer_timeout.c */, + 6E326AE61C2392E8002A6505 /* dispatch_timer.c */, + 6E326A8F1C2245C4002A6505 /* dispatch_transform.c */, + 6E62B0531C55806200D2C7C0 /* dispatch_trysync.c */, + 6E8E4EC91C1A670B0004F5CC /* dispatch_vm.c */, + 6E326AB71C225FCA002A6505 /* dispatch_vnode.c */, + 6E67D9171C17BA7200FC98AC /* nsoperation.m */, + 6E4FC9D11C84123600520351 /* os_venture_basic.c */, + 92F3FE921BEC686300025962 /* Makefile */, + 6E8E4E6E1C1A35EE0004F5CC /* test_lib.c */, + 6E8E4E6F1C1A35EE0004F5CC /* test_lib.h */, + ); + name = "Darwin Tests"; + path = tests; + sourceTree = ""; + }; C6A0FF2B0290797F04C91782 /* Documentation */ = { isa = PBXGroup; children = ( @@ -979,15 +978,16 @@ E43D93F11097917E004F6A62 /* libdispatch.xcconfig */, E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */, E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */, - E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */, + E46DBC5814EE11BC0001F9F6 /* libdispatch-up-static.xcconfig */, + C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */, + C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */, E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */, + 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */, + 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */, E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */, - E4DC8D45191053EE0005C6F4 /* libdispatch_objc.aliases */, - E416F53F175D04B800B23711 /* libdispatch_macosx.aliases */, E448727914C6215D00BB45C2 /* libdispatch.order */, E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */, E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */, - E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */, E421E5FD1716BEA70090DC9B /* libdispatch.interposable */, ); path = xcodeconfig; @@ -1022,6 +1022,7 @@ E421E5FB1716B8730090DC9B /* install-dtrace.sh */, E49F251E125D631D0057C971 /* mig-headers.sh */, E482F1CD12DBAB590030614D /* postprocess-headers.sh */, + C01866BF1C5976C90040FC07 /* run-on-install.sh */, ); path = xcodescripts; sourceTree = ""; @@ -1034,13 +1035,17 @@ path = config; sourceTree = ""; }; - E4EB4A2914C35F1800AA0FA9 /* OS Object */ = { + E4EB4A2914C35F1800AA0FA9 /* OS Private Headers */ = { isa = PBXGroup; children = ( - E4EB4A2614C35ECE00AA0FA9 /* object.h */, E454569214746F1B00106147 /* object_private.h */, + 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */, + 72EA3FBA1AF41EA400BBA227 /* firehose_server_private.h */, + 6E9955571C3AF7710071D40C /* venture_private.h */, + E44A8E711805C473009FFDB6 /* voucher_private.h */, + E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */, ); - name = "OS Object"; + name = "OS Private Headers"; path = os; sourceTree = ""; }; @@ -1048,10 +1053,10 @@ isa = PBXGroup; children = ( 96929D820F3EA1020041FF5D /* atomic.h */, - E4A2C9C4176019760000F809 /* atomic_llsc.h */, E463024F1761603C00E11F4C /* atomic_sfb.h */, E4BA743913A8911B0095BDF1 /* getprogname.h */, E4128ED513BA9A1700ABB2CB /* hw_config.h */, + 6EF2CAA41C88998A001ABE83 /* lock.h */, FC1832A2109923C7003403D5 /* perfmon.h */, FC1832A3109923C7003403D5 /* time.h */, FC1832A4109923C7003403D5 /* tsd.h */, @@ -1060,7 +1065,7 @@ path = shims; sourceTree = ""; }; - FC7BEDAA0E83625200161930 /* Public Headers */ = { + FC7BEDAA0E83625200161930 /* Dispatch Public Headers */ = { isa = PBXGroup; children = ( 72CC942F0ECCD8750031B751 /* base.h */, @@ -1069,6 +1074,7 @@ FC7BED960E8361E600161930 /* dispatch.h */, FC5C9C1D0EADABE3006E462D /* group.h */, 5AAB45C310D30CC7004407EA /* io.h */, + C901445E1C73A7FE002638FC /* module.modulemap */, 961B994F0F3E85C30006BC96 /* object.h */, 96C9553A0F3EAEDD000D2CA4 /* once.h */, FC7BED8B0E8361E600161930 /* queue.h */, @@ -1077,11 +1083,11 @@ 96032E4C0F5CC8D100241C5F /* time.h */, E421E5F81716ADA10090DC9B /* introspection.h */, ); - name = "Public Headers"; + name = "Dispatch Public Headers"; path = dispatch; sourceTree = ""; }; - FC7BEDAF0E83626100161930 /* Private Headers */ = { + FC7BEDAF0E83626100161930 /* Dispatch Private Headers */ = { isa = PBXGroup; children = ( FC7BED930E8361E600161930 /* private.h */, @@ -1090,17 +1096,16 @@ 96BC39BC0F3EBAB100C59689 /* queue_private.h */, FCEF047F0F5661960067401F /* source_private.h */, E4ECBAA415253C25002C313C /* mach_private.h */, - E44A8E711805C473009FFDB6 /* voucher_private.h */, - E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */, + C90144641C73A845002638FC /* module.modulemap */, 961B99350F3E83980006BC96 /* benchmark.h */, E4B515D7164B2DFB00E003AF /* introspection_private.h */, 2BE17C6318EA305E002CA4E8 /* layout_private.h */, ); - name = "Private Headers"; + name = "Dispatch Private Headers"; path = private; sourceTree = ""; }; - FC7BEDB60E8363DC00161930 /* Project Headers */ = { + FC7BEDB60E8363DC00161930 /* Dispatch Project Headers */ = { isa = PBXGroup; children = ( 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */, @@ -1112,43 +1117,68 @@ 96929D950F3EA2170041FF5D /* queue_internal.h */, 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */, FC0B34780FA2851C0080FFA0 /* source_internal.h */, + 6E9956061C3B21AA0071D40C /* venture_internal.h */, E44A8E7418066276009FFDB6 /* voucher_internal.h */, E422A0D412A557B5005E5BDB /* trace.h */, E44F9DA816543F79001DCD38 /* introspection_internal.h */, 96929D830F3EA1020041FF5D /* shims.h */, FC1832A0109923B3003403D5 /* shims */, ); - name = "Project Headers"; + name = "Dispatch Project Headers"; path = src; sourceTree = ""; }; /* End PBXGroup section */ /* Begin PBXHeadersBuildPhase section */ - D2AAC043055464E500DB518D /* Headers */ = { + 6E040C611C499B1B00411A2E /* Headers */ = { isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( - FC7BEDA50E8361E600161930 /* dispatch.h in Headers */, - 72CC94300ECCD8750031B751 /* base.h in Headers */, - 961B99500F3E85C30006BC96 /* object.h in Headers */, + 6E040C751C499CE600411A2E /* firehose_buffer_private.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 6EB4E4071BA8BCAD00D7B9D2 /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + 6EB60D2C1BBB197B0092FA94 /* firehose_inline_internal.h in Headers */, + 6EF0B2711BA8C540007FA4F6 /* firehose_internal.h in Headers */, + 6E9B6B5F1BB4F3C8009E324D /* firehose_buffer_internal.h in Headers */, + 6E21F2E81BBB23FA0000C6A5 /* firehose_server_internal.h in Headers */, + 6EF0B26D1BA8C527007FA4F6 /* firehose_server_private.h in Headers */, + 6EDF10B81BBB488A007F14BF /* firehose_buffer_private.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + D2AAC043055464E500DB518D /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + FC7BEDA50E8361E600161930 /* dispatch.h in Headers */, + 72CC94300ECCD8750031B751 /* base.h in Headers */, + 961B99500F3E85C30006BC96 /* object.h in Headers */, E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */, FC7BED9A0E8361E600161930 /* queue.h in Headers */, FC7BED9C0E8361E600161930 /* source.h in Headers */, + 6E9955581C3AF7710071D40C /* venture_private.h in Headers */, E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */, 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */, FC5C9C1E0EADABE3006E462D /* group.h in Headers */, 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */, 5AAB45C410D30CC7004407EA /* io.h in Headers */, E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */, + C90144651C73A8A3002638FC /* module.modulemap in Headers */, E4630253176162D400E11F4C /* atomic_sfb.h in Headers */, 5AAB45C610D30D0C004407EA /* data.h in Headers */, + 6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */, 96032E4D0F5CC8D100241C5F /* time.h in Headers */, FC7BEDA20E8361E600161930 /* private.h in Headers */, E4D76A9318E325D200B1F98B /* block.h in Headers */, - E4A2C9C7176019840000F809 /* atomic_llsc.h in Headers */, C913AC0F143BD34800B78976 /* data_private.h in Headers */, 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */, + C90144661C73A9F6002638FC /* module.modulemap in Headers */, FCEF04800F5661960067401F /* source_private.h in Headers */, 961B99360F3E83980006BC96 /* benchmark.h in Headers */, FC7BED9E0E8361E600161930 /* internal.h in Headers */, @@ -1163,12 +1193,16 @@ 96929D840F3EA1020041FF5D /* atomic.h in Headers */, 96929D850F3EA1020041FF5D /* shims.h in Headers */, FC1832A7109923C7003403D5 /* time.h in Headers */, + 6ED64B511BBD8A2100C35F4D /* firehose_buffer_internal.h in Headers */, E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */, 2BE17C6418EA305E002CA4E8 /* layout_private.h in Headers */, FC1832A6109923C7003403D5 /* perfmon.h in Headers */, FC9C70E8105EC9620074F9CA /* config.h in Headers */, + 6E9956071C3B21AA0071D40C /* venture_internal.h in Headers */, + 6EF2CAA51C88998A001ABE83 /* lock.h in Headers */, E422A0D512A557B5005E5BDB /* trace.h in Headers */, E4BA743B13A8911B0095BDF1 /* getprogname.h in Headers */, + 6ED64B571BBD8A3B00C35F4D /* firehose_inline_internal.h in Headers */, E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */, E454569314746F1B00106147 /* object_private.h in Headers */, E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */, @@ -1189,6 +1223,7 @@ E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */, E49F24AE125D57FA0057C971 /* queue.h in Headers */, E49F24AF125D57FA0057C971 /* source.h in Headers */, + 6E99558A1C3AF7900071D40C /* venture_private.h in Headers */, E4B3C3FF18C50D0E0039F49F /* voucher_activity_private.h in Headers */, E49F24B0125D57FA0057C971 /* semaphore.h in Headers */, E49F24B1125D57FA0057C971 /* group.h in Headers */, @@ -1200,7 +1235,6 @@ E49F24B5125D57FA0057C971 /* time.h in Headers */, E49F24B6125D57FA0057C971 /* private.h in Headers */, E4D76A9418E325D200B1F98B /* block.h in Headers */, - E4A2C9C6176019830000F809 /* atomic_llsc.h in Headers */, E49F24B7125D57FA0057C971 /* queue_private.h in Headers */, E49F24B8125D57FA0057C971 /* source_private.h in Headers */, E49F24B9125D57FA0057C971 /* benchmark.h in Headers */, @@ -1210,6 +1244,7 @@ E49F24BE125D57FA0057C971 /* source_internal.h in Headers */, E49F24BD125D57FA0057C971 /* semaphore_internal.h in Headers */, E4C1ED701263E714000D3C8B /* data_internal.h in Headers */, + 6ED64B501BBD8A1400C35F4D /* firehose_internal.h in Headers */, E49F24BF125D57FA0057C971 /* io_internal.h in Headers */, E44A8E731805C473009FFDB6 /* voucher_private.h in Headers */, E49F24C1125D57FA0057C971 /* tsd.h in Headers */, @@ -1217,12 +1252,16 @@ E49F24C3125D57FA0057C971 /* shims.h in Headers */, E49F24C4125D57FA0057C971 /* time.h in Headers */, E49F24C5125D57FA0057C971 /* perfmon.h in Headers */, + 6ED64B521BBD8A2100C35F4D /* firehose_buffer_internal.h in Headers */, E48EC97D1835BADD00EAC4F1 /* yield.h in Headers */, 2BE17C6518EA305E002CA4E8 /* layout_private.h in Headers */, E49F24C6125D57FA0057C971 /* config.h in Headers */, E422A0D612A557B5005E5BDB /* trace.h in Headers */, + 6E9956091C3B21B40071D40C /* venture_internal.h in Headers */, + 6EF2CAB41C889D65001ABE83 /* lock.h in Headers */, E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */, E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */, + 6ED64B581BBD8A3E00C35F4D /* firehose_inline_internal.h in Headers */, E454569414746F1B00106147 /* object_private.h in Headers */, E4EB4A2814C35ECE00AA0FA9 /* object.h in Headers */, E4ECBAA615253D17002C313C /* mach_private.h in Headers */, @@ -1242,19 +1281,23 @@ E421E5F91716ADA10090DC9B /* introspection.h in Headers */, E44F9DB216544032001DCD38 /* object_internal.h in Headers */, E44F9DB316544037001DCD38 /* queue_internal.h in Headers */, + 6ED64B531BBD8A2300C35F4D /* firehose_buffer_internal.h in Headers */, E44F9DB51654403F001DCD38 /* source_internal.h in Headers */, E44F9DB41654403B001DCD38 /* semaphore_internal.h in Headers */, E44F9DB01654402B001DCD38 /* data_internal.h in Headers */, + 6E9956081C3B21B30071D40C /* venture_internal.h in Headers */, E44F9DB11654402E001DCD38 /* io_internal.h in Headers */, E4630251176162D200E11F4C /* atomic_sfb.h in Headers */, E44F9DBE1654405B001DCD38 /* tsd.h in Headers */, E44F9DB816544053001DCD38 /* atomic.h in Headers */, + 6ED64B591BBD8A3F00C35F4D /* firehose_inline_internal.h in Headers */, + 6EF2CAB51C889D67001ABE83 /* lock.h in Headers */, E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */, + 6ED64B4F1BBD8A1400C35F4D /* firehose_internal.h in Headers */, E44F9DB71654404F001DCD38 /* shims.h in Headers */, E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */, E44F9DBF165440EF001DCD38 /* config.h in Headers */, E44A8E7718066276009FFDB6 /* voucher_internal.h in Headers */, - E4A2C9C5176019820000F809 /* atomic_llsc.h in Headers */, E44F9DB616544043001DCD38 /* trace.h in Headers */, E44F9DB916544056001DCD38 /* getprogname.h in Headers */, E48EC97E1835BADD00EAC4F1 /* yield.h in Headers */, @@ -1268,14 +1311,96 @@ }; /* End PBXHeadersBuildPhase section */ +/* Begin PBXLegacyTarget section */ + 92F3FECA1BEC69E500025962 /* darwintests */ = { + isa = PBXLegacyTarget; + buildArgumentsString = "$(ACTION)"; + buildConfigurationList = 92F3FECB1BEC69E500025962 /* Build configuration list for PBXLegacyTarget "darwintests" */; + buildPhases = ( + ); + buildToolPath = /usr/bin/make; + buildWorkingDirectory = tests/; + dependencies = ( + ); + name = darwintests; + passBuildSettingsInEnvironment = 1; + productName = darwintests; + }; +/* End PBXLegacyTarget section */ + /* Begin PBXNativeTarget section */ + 6E040C621C499B1B00411A2E /* libfirehose_kernel */ = { + isa = PBXNativeTarget; + buildConfigurationList = 6E040C6A1C499B1B00411A2E /* Build configuration list for PBXNativeTarget "libfirehose_kernel" */; + buildPhases = ( + 6E040C5F1C499B1B00411A2E /* Sources */, + 6E040C601C499B1B00411A2E /* Frameworks */, + 6E040C611C499B1B00411A2E /* Headers */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = libfirehose_kernel; + productName = libfirehose_kernel; + productReference = 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */; + productType = "com.apple.product-type.library.static"; + }; + 6EB4E4081BA8BCAD00D7B9D2 /* libfirehose_server */ = { + isa = PBXNativeTarget; + buildConfigurationList = 6EB4E40A1BA8BCAD00D7B9D2 /* Build configuration list for PBXNativeTarget "libfirehose_server" */; + buildPhases = ( + 6EB4E4051BA8BCAD00D7B9D2 /* Sources */, + 6EB4E4071BA8BCAD00D7B9D2 /* Headers */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = libfirehose_server; + productName = libfirehose_server; + productReference = 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */; + productType = "com.apple.product-type.library.static"; + }; + C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */ = { + isa = PBXNativeTarget; + buildConfigurationList = C00B0E071C5AEBBE000330B3 /* Build configuration list for PBXNativeTarget "libdispatch dyld stub" */; + buildPhases = ( + C00B0DF11C5AEBBE000330B3 /* Sources */, + C00B0E061C5AEBBE000330B3 /* Symlink libdispatch.a -> libdispatch_dyld_target.a */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libdispatch dyld stub"; + productName = libdispatch; + productReference = C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */; + productType = "com.apple.product-type.library.static"; + }; + C01866A41C5973210040FC07 /* libdispatch mp static */ = { + isa = PBXNativeTarget; + buildConfigurationList = C01866BA1C5973210040FC07 /* Build configuration list for PBXNativeTarget "libdispatch mp static" */; + buildPhases = ( + C01866A51C5973210040FC07 /* Sources */, + C01866C01C59777B0040FC07 /* Symlink to the loaderd path */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libdispatch mp static"; + productName = libdispatch; + productReference = C01866BD1C5973210040FC07 /* libdispatch.a */; + productType = "com.apple.product-type.library.static"; + }; D2AAC045055464E500DB518D /* libdispatch */ = { isa = PBXNativeTarget; buildConfigurationList = 1DEB91EB08733DB70010E9CD /* Build configuration list for PBXNativeTarget "libdispatch" */; buildPhases = ( D2AAC043055464E500DB518D /* Headers */, D2AAC044055464E500DB518D /* Sources */, - D289987405E68DCB004EDB86 /* Frameworks */, + 6EA283D61CAB933E0041B2E0 /* Copy Trace Definitions */, E4EB4A2B14C3720B00AA0FA9 /* Install Headers */, E482F1C512DBAA110030614D /* Postprocess Headers */, 4CED8B9D0EEDF8B600AF99AB /* Install Manpages */, @@ -1283,19 +1408,22 @@ buildRules = ( ); dependencies = ( + 6EF0B27E1BA8C5BF007FA4F6 /* PBXTargetDependency */, E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */, E47D6ECD125FEBA10070D91C /* PBXTargetDependency */, E4B515DB164B317700E003AF /* PBXTargetDependency */, + C01866C21C597AEA0040FC07 /* PBXTargetDependency */, E437F0D614F7441F00F0B997 /* PBXTargetDependency */, + C00B0E141C5AEED6000330B3 /* PBXTargetDependency */, ); name = libdispatch; productName = libdispatch; productReference = D2AAC046055464E500DB518D /* libdispatch.dylib */; productType = "com.apple.product-type.library.dynamic"; }; - E46DBC1A14EE10C80001F9F6 /* libdispatch static */ = { + E46DBC1A14EE10C80001F9F6 /* libdispatch up static */ = { isa = PBXNativeTarget; - buildConfigurationList = E46DBC5414EE10C80001F9F6 /* Build configuration list for PBXNativeTarget "libdispatch static" */; + buildConfigurationList = E46DBC5414EE10C80001F9F6 /* Build configuration list for PBXNativeTarget "libdispatch up static" */; buildPhases = ( E46DBC3E14EE10C80001F9F6 /* Sources */, ); @@ -1303,7 +1431,7 @@ ); dependencies = ( ); - name = "libdispatch static"; + name = "libdispatch up static"; productName = libdispatch; productReference = E46DBC5714EE10C80001F9F6 /* libdispatch.a */; productType = "com.apple.product-type.library.static"; @@ -1386,7 +1514,61 @@ isa = PBXProject; attributes = { BuildIndependentTargetsInParallel = YES; - LastUpgradeCheck = 0700; + LastUpgradeCheck = 0800; + TargetAttributes = { + 3F3C9326128E637B0042B1F7 = { + ProvisioningStyle = Manual; + }; + 4552540A19B1389700B88766 = { + ProvisioningStyle = Manual; + }; + 6E040C621C499B1B00411A2E = { + CreatedOnToolsVersion = 7.3; + ProvisioningStyle = Manual; + }; + 6E2ECAFD1C49C2FF00A30A32 = { + CreatedOnToolsVersion = 7.3; + ProvisioningStyle = Manual; + }; + 6EB4E4081BA8BCAD00D7B9D2 = { + CreatedOnToolsVersion = 7.0; + ProvisioningStyle = Manual; + }; + 92CBD7201BED924F006E0892 = { + ProvisioningStyle = Manual; + }; + 92F3FECA1BEC69E500025962 = { + CreatedOnToolsVersion = 7.1; + ProvisioningStyle = Manual; + }; + C00B0DF01C5AEBBE000330B3 = { + ProvisioningStyle = Manual; + }; + C01866A41C5973210040FC07 = { + ProvisioningStyle = Manual; + }; + C927F35A10FD7F0600C5AB8B = { + ProvisioningStyle = Manual; + }; + D2AAC045055464E500DB518D = { + ProvisioningStyle = Manual; + }; + E46DBC1A14EE10C80001F9F6 = { + ProvisioningStyle = Manual; + }; + E49F24A9125D57FA0057C971 = { + ProvisioningStyle = Manual; + }; + E4B51595164B2DA300E003AF = { + ProvisioningStyle = Manual; + }; + E4EC118F12514302000DDBD1 = { + ProvisioningStyle = Manual; + }; + E4EC121612514715000DDBD1 = { + ProvisioningStyle = Manual; + }; + }; }; buildConfigurationList = 1DEB91EF08733DB70010E9CD /* Build configuration list for PBXProject "libdispatch" */; compatibilityVersion = "Xcode 3.2"; @@ -1417,330 +1599,22 @@ E4EC121612514715000DDBD1 /* libdispatch mp resolved */, E4EC118F12514302000DDBD1 /* libdispatch up resolved */, E4B51595164B2DA300E003AF /* libdispatch introspection */, - E46DBC1A14EE10C80001F9F6 /* libdispatch static */, + E46DBC1A14EE10C80001F9F6 /* libdispatch up static */, + C01866A41C5973210040FC07 /* libdispatch mp static */, + C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */, 3F3C9326128E637B0042B1F7 /* libdispatch_Sim */, + 6E2ECAFD1C49C2FF00A30A32 /* libdispatch_kernel */, C927F35A10FD7F0600C5AB8B /* libdispatch_tools */, 4552540A19B1389700B88766 /* libdispatch_tests */, + 6E040C621C499B1B00411A2E /* libfirehose_kernel */, + 6EB4E4081BA8BCAD00D7B9D2 /* libfirehose_server */, + 92F3FECA1BEC69E500025962 /* darwintests */, + 92CBD7201BED924F006E0892 /* libdispatch_tests_legacy */, ); }; /* End PBXProject section */ /* Begin PBXReferenceProxy section */ - 455253A919B1384900B88766 /* libdispatch_test.a */ = { - isa = PBXReferenceProxy; - fileType = archive.ar; - path = libdispatch_test.a; - remoteRef = 455253A819B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253AB19B1384900B88766 /* dispatch_apply */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_apply; - remoteRef = 455253AA19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253AD19B1384900B88766 /* dispatch_api */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_api; - remoteRef = 455253AC19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253AF19B1384900B88766 /* dispatch_c99 */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_c99; - remoteRef = 455253AE19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253B119B1384900B88766 /* dispatch_cf_main */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_cf_main; - remoteRef = 455253B019B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253B319B1384900B88766 /* dispatch_deadname */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_deadname; - remoteRef = 455253B219B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253B519B1384900B88766 /* dispatch_debug */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_debug; - remoteRef = 455253B419B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253B719B1384900B88766 /* dispatch_group */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_group; - remoteRef = 455253B619B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253B919B1384900B88766 /* dispatch_overcommit */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_overcommit; - remoteRef = 455253B819B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253BB19B1384900B88766 /* dispatch_pingpong */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_pingpong; - remoteRef = 455253BA19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253BD19B1384900B88766 /* dispatch_plusplus */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_plusplus; - remoteRef = 455253BC19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253BF19B1384900B88766 /* dispatch_priority */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_priority; - remoteRef = 455253BE19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253C119B1384900B88766 /* dispatch_priority2 */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_priority2; - remoteRef = 455253C019B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253C319B1384900B88766 /* dispatch_concur */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_concur; - remoteRef = 455253C219B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253C519B1384900B88766 /* dispatch_context_for_key */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_context_for_key; - remoteRef = 455253C419B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253C719B1384900B88766 /* dispatch_proc */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_proc; - remoteRef = 455253C619B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253C919B1384900B88766 /* dispatch_queue_finalizer */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_queue_finalizer; - remoteRef = 455253C819B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253CB19B1384900B88766 /* dispatch_read */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_read; - remoteRef = 455253CA19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253CD19B1384900B88766 /* dispatch_read2 */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_read2; - remoteRef = 455253CC19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253CF19B1384900B88766 /* dispatch_after */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_after; - remoteRef = 455253CE19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253D119B1384900B88766 /* dispatch_timer */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_timer; - remoteRef = 455253D019B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253D319B1384900B88766 /* dispatch_timer_short */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_timer_short; - remoteRef = 455253D219B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253D519B1384900B88766 /* dispatch_timer_timeout */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_timer_timeout; - remoteRef = 455253D419B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253D719B1384900B88766 /* dispatch_suspend_timer */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_suspend_timer; - remoteRef = 455253D619B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253D919B1384900B88766 /* dispatch_sema */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_sema; - remoteRef = 455253D819B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253DB19B1384900B88766 /* dispatch_timer_bit31 */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_timer_bit31; - remoteRef = 455253DA19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253DD19B1384900B88766 /* dispatch_timer_bit63 */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_timer_bit63; - remoteRef = 455253DC19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253DF19B1384900B88766 /* dispatch_timer_set_time */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_timer_set_time; - remoteRef = 455253DE19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253E119B1384900B88766 /* dispatch_drift */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_drift; - remoteRef = 455253E019B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253E319B1384900B88766 /* dispatch_starfish */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_starfish; - remoteRef = 455253E219B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253E519B1384900B88766 /* dispatch_cascade */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_cascade; - remoteRef = 455253E419B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253E719B1384900B88766 /* dispatch_readsync */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_readsync; - remoteRef = 455253E619B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253E919B1384900B88766 /* dispatch_sync_on_main */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_sync_on_main; - remoteRef = 455253E819B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253EB19B1384900B88766 /* dispatch_sync_gc */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_sync_gc; - remoteRef = 455253EA19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253ED19B1384900B88766 /* dispatch_apply_gc */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_apply_gc; - remoteRef = 455253EC19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253EF19B1384900B88766 /* dispatch_data */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_data; - remoteRef = 455253EE19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253F119B1384900B88766 /* dispatch_io */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_io; - remoteRef = 455253F019B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253F319B1384900B88766 /* dispatch_io_net */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_io_net; - remoteRef = 455253F219B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253F519B1384900B88766 /* dispatch_vm */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_vm; - remoteRef = 455253F419B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253F719B1384900B88766 /* dispatch_vnode */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_vnode; - remoteRef = 455253F619B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253F919B1384900B88766 /* dispatch_qos */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_qos; - remoteRef = 455253F819B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253FB19B1384900B88766 /* dispatch_select */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_select; - remoteRef = 455253FA19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253FD19B1384900B88766 /* dispatch_transform */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_transform; - remoteRef = 455253FC19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253FF19B1384900B88766 /* nsoperation */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = nsoperation; - remoteRef = 455253FE19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 4552540119B1384900B88766 /* cffd */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = cffd; - remoteRef = 4552540019B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; 4552540319B1384900B88766 /* bench */ = { isa = PBXReferenceProxy; fileType = "compiled.mach-o.executable"; @@ -1769,6 +1643,13 @@ remoteRef = 4552540819B1384900B88766 /* PBXContainerItemProxy */; sourceTree = BUILT_PRODUCTS_DIR; }; + C00B0E111C5AEBBE000330B3 /* dispatch_deadname */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_deadname; + remoteRef = C00B0E101C5AEBBE000330B3 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; C927F36710FD7F1000C5AB8B /* ddt */ = { isa = PBXReferenceProxy; fileType = "compiled.mach-o.executable"; @@ -1795,6 +1676,40 @@ shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; showEnvVarsInLog = 0; }; + C00B0E061C5AEBBE000330B3 /* Symlink libdispatch.a -> libdispatch_dyld_target.a */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/xcodescripts/run-on-install.sh", + ); + name = "Symlink libdispatch.a -> libdispatch_dyld_target.a"; + outputPaths = ( + "${DSTROOT}${INSTALL_PATH}/libdispatch.a", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf ${PRODUCT_NAME}.a ${SCRIPT_OUTPUT_FILE_0}"; + showEnvVarsInLog = 0; + }; + C01866C01C59777B0040FC07 /* Symlink to the loaderd path */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/xcodescripts/run-on-install.sh", + ); + name = "Symlink to the loaderd path"; + outputPaths = ( + "${DSTROOT}/usr/local/lib/loaderd/${PRODUCT_NAME}.a", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf ../../../..${INSTALL_PATH}/${PRODUCT_NAME}.a ${DSTROOT}/usr/local/lib/loaderd/${PRODUCT_NAME}.a"; + showEnvVarsInLog = 0; + }; E4128EB213B9612700ABB2CB /* Postprocess Headers */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 8; @@ -1870,13 +1785,15 @@ "$(SRCROOT)/xcodescripts/install-headers.sh", "$(SRCROOT)/os/object.h", "$(SRCROOT)/os/object_private.h", - "$(SRCROOT)/private/voucher_private.h", - "$(SRCROOT)/private/voucher_activity_private.h", + "$(SRCROOT)/os/venture_private.h", + "$(SRCROOT)/os/voucher_private.h", + "$(SRCROOT)/os/voucher_activity_private.h", ); name = "Install Headers"; outputPaths = ( "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/venture_private.h", "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", ); @@ -1894,13 +1811,15 @@ "$(SRCROOT)/xcodescripts/install-headers.sh", "$(SRCROOT)/os/object.h", "$(SRCROOT)/os/object_private.h", - "$(SRCROOT)/private/voucher_private.h", - "$(SRCROOT)/private/voucher_activity_private.h", + "$(SRCROOT)/os/venture_private.h", + "$(SRCROOT)/os/voucher_private.h", + "$(SRCROOT)/os/voucher_activity_private.h", ); name = "Install Headers"; outputPaths = ( "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/venture_private.h", "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", ); @@ -1932,16 +1851,22 @@ ); inputPaths = ( "$(SRCROOT)/src/protocol.defs", + "$(SRCROOT)/src/firehose/firehose.defs", + "$(SRCROOT)/src/firehose/firehose_reply.defs", "$(SRCROOT)/xcodescripts/mig-headers.sh", ); name = "Mig Headers"; outputPaths = ( "$(DERIVED_FILE_DIR)/protocol.h", "$(DERIVED_FILE_DIR)/protocolServer.h", + "$(DERIVED_FILE_DIR)/firehose.h", + "$(DERIVED_FILE_DIR)/firehoseServer.h", + "$(DERIVED_FILE_DIR)/firehose_reply.h", + "$(DERIVED_FILE_DIR)/firehose_replyServer.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_1}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_3}\""; showEnvVarsInLog = 0; }; E4EC121712514715000DDBD1 /* Mig Headers */ = { @@ -1951,16 +1876,22 @@ ); inputPaths = ( "$(SRCROOT)/src/protocol.defs", + "$(SRCROOT)/src/firehose/firehose.defs", + "$(SRCROOT)/src/firehose/firehose_reply.defs", "$(SRCROOT)/xcodescripts/mig-headers.sh", ); name = "Mig Headers"; outputPaths = ( "$(DERIVED_FILE_DIR)/protocol.h", "$(DERIVED_FILE_DIR)/protocolServer.h", + "$(DERIVED_FILE_DIR)/firehose.h", + "$(DERIVED_FILE_DIR)/firehoseServer.h", + "$(DERIVED_FILE_DIR)/firehose_reply.h", + "$(DERIVED_FILE_DIR)/firehose_replyServer.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_1}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_3}\""; showEnvVarsInLog = 0; }; E4EC122512514715000DDBD1 /* Symlink normal variant */ = { @@ -1982,16 +1913,96 @@ /* End PBXShellScriptBuildPhase section */ /* Begin PBXSourcesBuildPhase section */ + 6E040C5F1C499B1B00411A2E /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 6E040C731C499C6500411A2E /* firehose_buffer.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 6EB4E4051BA8BCAD00D7B9D2 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 6EF0B27A1BA8C57D007FA4F6 /* firehose_server_object.m in Sources */, + 6E90269C1BB9BD50004DC3AD /* firehose.defs in Sources */, + 6E21F2E91BBB240E0000C6A5 /* firehose_server.c in Sources */, + 6EF0B2781BA8C56E007FA4F6 /* firehose_reply.defs in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + C00B0DF11C5AEBBE000330B3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */, + C00B0DF31C5AEBBE000330B3 /* resolver.c in Sources */, + 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */, + C00B0DF41C5AEBBE000330B3 /* init.c in Sources */, + C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */, + C00B0DF61C5AEBBE000330B3 /* firehose_buffer.c in Sources */, + C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */, + C00B0DF81C5AEBBE000330B3 /* block.cpp in Sources */, + C00B0DF91C5AEBBE000330B3 /* semaphore.c in Sources */, + C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */, + C00B0DFB1C5AEBBE000330B3 /* once.c in Sources */, + C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */, + C00B0DFD1C5AEBBE000330B3 /* apply.c in Sources */, + C00B0DFE1C5AEBBE000330B3 /* object.c in Sources */, + C00B0DFF1C5AEBBE000330B3 /* benchmark.c in Sources */, + C00B0E001C5AEBBE000330B3 /* source.c in Sources */, + C00B0E011C5AEBBE000330B3 /* time.c in Sources */, + C00B0E021C5AEBBE000330B3 /* data.c in Sources */, + C00B0E031C5AEBBE000330B3 /* io.c in Sources */, + C00B0E041C5AEBBE000330B3 /* transform.c in Sources */, + C00B0E051C5AEBBE000330B3 /* allocator.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + C01866A51C5973210040FC07 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + C01866A61C5973210040FC07 /* protocol.defs in Sources */, + C01866A71C5973210040FC07 /* resolver.c in Sources */, + 6EF2CAB21C8899EC001ABE83 /* lock.c in Sources */, + C01866A81C5973210040FC07 /* init.c in Sources */, + C01866A91C5973210040FC07 /* queue.c in Sources */, + C01866AA1C5973210040FC07 /* firehose_buffer.c in Sources */, + C01866AB1C5973210040FC07 /* firehose.defs in Sources */, + C01866AC1C5973210040FC07 /* block.cpp in Sources */, + C01866AD1C5973210040FC07 /* semaphore.c in Sources */, + C01866AE1C5973210040FC07 /* firehose_reply.defs in Sources */, + C01866AF1C5973210040FC07 /* once.c in Sources */, + C01866B01C5973210040FC07 /* voucher.c in Sources */, + C01866B11C5973210040FC07 /* apply.c in Sources */, + C01866B21C5973210040FC07 /* object.c in Sources */, + C01866B31C5973210040FC07 /* benchmark.c in Sources */, + C01866B41C5973210040FC07 /* source.c in Sources */, + C01866B51C5973210040FC07 /* time.c in Sources */, + C01866B61C5973210040FC07 /* data.c in Sources */, + C01866B71C5973210040FC07 /* io.c in Sources */, + C01866B81C5973210040FC07 /* transform.c in Sources */, + C01866B91C5973210040FC07 /* allocator.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; D2AAC044055464E500DB518D /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( E43570B9126E93380097AB9F /* provider.d in Sources */, FC7BEDA40E8361E600161930 /* protocol.defs in Sources */, + 6E9955CF1C3B218E0071D40C /* venture.c in Sources */, + 6ED64B471BBD89AF00C35F4D /* firehose.defs in Sources */, + 6ED64B441BBD898700C35F4D /* firehose_buffer.c in Sources */, E49F2499125D48D80057C971 /* resolver.c in Sources */, E44EBE3E1251659900645D88 /* init.c in Sources */, FC7BED990E8361E600161930 /* queue.c in Sources */, 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */, + 6EF2CAAC1C8899D5001ABE83 /* lock.c in Sources */, + 6ED64B491BBD89BC00C35F4D /* firehose_reply.defs in Sources */, 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */, 9676A0E10F3E755D00713ADB /* apply.c in Sources */, 9661E56B0F3E7DDF00749F3E /* object.c in Sources */, @@ -2015,10 +2026,15 @@ files = ( E46DBC4014EE10C80001F9F6 /* protocol.defs in Sources */, E46DBC4114EE10C80001F9F6 /* resolver.c in Sources */, + 6EF2CAB11C8899EC001ABE83 /* lock.c in Sources */, E46DBC4214EE10C80001F9F6 /* init.c in Sources */, E46DBC4314EE10C80001F9F6 /* queue.c in Sources */, + 6EE664271BE2FD5C00ED7B1C /* firehose_buffer.c in Sources */, + 6EBEC7E71BBDD30F009B1596 /* firehose.defs in Sources */, E43A72881AF85BE900BAA921 /* block.cpp in Sources */, E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */, + 6E9956011C3B21980071D40C /* venture.c in Sources */, + 6EBEC7EA1BBDD326009B1596 /* firehose_reply.defs in Sources */, E46DBC4514EE10C80001F9F6 /* once.c in Sources */, E44A8E701805C3E0009FFDB6 /* voucher.c in Sources */, E46DBC4614EE10C80001F9F6 /* apply.c in Sources */, @@ -2039,10 +2055,15 @@ files = ( E43570BA126E93380097AB9F /* provider.d in Sources */, E49F24C8125D57FA0057C971 /* protocol.defs in Sources */, + 6E9956051C3B219B0071D40C /* venture.c in Sources */, + 6ED64B461BBD89AF00C35F4D /* firehose.defs in Sources */, + 6ED64B401BBD898300C35F4D /* firehose_buffer.c in Sources */, E49F24C9125D57FA0057C971 /* resolver.c in Sources */, E49F24CA125D57FA0057C971 /* init.c in Sources */, E49F24CB125D57FA0057C971 /* queue.c in Sources */, E49F24CC125D57FA0057C971 /* semaphore.c in Sources */, + 6EF2CAAD1C8899E9001ABE83 /* lock.c in Sources */, + 6ED64B4A1BBD89BD00C35F4D /* firehose_reply.defs in Sources */, E49F24CD125D57FA0057C971 /* once.c in Sources */, E49F24CE125D57FA0057C971 /* apply.c in Sources */, E49F24CF125D57FA0057C971 /* object.c in Sources */, @@ -2067,13 +2088,17 @@ E4B515BD164B2DA300E003AF /* provider.d in Sources */, E4B515BE164B2DA300E003AF /* protocol.defs in Sources */, E4B515BF164B2DA300E003AF /* resolver.c in Sources */, + 6ED64B4B1BBD89BE00C35F4D /* firehose_reply.defs in Sources */, + 6ED64B481BBD89B100C35F4D /* firehose.defs in Sources */, E4B515C0164B2DA300E003AF /* init.c in Sources */, E4B515C1164B2DA300E003AF /* queue.c in Sources */, + 6E9956021C3B21990071D40C /* venture.c in Sources */, E4B515C2164B2DA300E003AF /* semaphore.c in Sources */, E4B515C3164B2DA300E003AF /* once.c in Sources */, E43A72871AF85BCD00BAA921 /* block.cpp in Sources */, E4B515C4164B2DA300E003AF /* apply.c in Sources */, E4B515C5164B2DA300E003AF /* object.c in Sources */, + 6ED64B431BBD898600C35F4D /* firehose_buffer.c in Sources */, E4B515C6164B2DA300E003AF /* benchmark.c in Sources */, E4B515C7164B2DA300E003AF /* source.c in Sources */, E4B515C8164B2DA300E003AF /* time.c in Sources */, @@ -2081,6 +2106,7 @@ E4B515CA164B2DA300E003AF /* io.c in Sources */, E44A8E6F1805C3E0009FFDB6 /* voucher.c in Sources */, E4B515CB164B2DA300E003AF /* transform.c in Sources */, + 6EF2CAB01C8899EB001ABE83 /* lock.c in Sources */, E4B515CC164B2DA300E003AF /* object.m in Sources */, E4B515CD164B2DA300E003AF /* allocator.c in Sources */, E4B515CE164B2DA300E003AF /* data.m in Sources */, @@ -2094,10 +2120,15 @@ files = ( E417A38412A472C4004D659D /* provider.d in Sources */, E44EBE5412517EBE00645D88 /* protocol.defs in Sources */, + 6E9956031C3B219A0071D40C /* venture.c in Sources */, + 6EBEC7E61BBDD30D009B1596 /* firehose.defs in Sources */, + 6ED64B421BBD898500C35F4D /* firehose_buffer.c in Sources */, E49F2424125D3C970057C971 /* resolver.c in Sources */, E44EBE5512517EBE00645D88 /* init.c in Sources */, E4EC11AE12514302000DDBD1 /* queue.c in Sources */, E4EC11AF12514302000DDBD1 /* semaphore.c in Sources */, + 6EF2CAAF1C8899EB001ABE83 /* lock.c in Sources */, + 6EBEC7E91BBDD325009B1596 /* firehose_reply.defs in Sources */, E4EC11B012514302000DDBD1 /* once.c in Sources */, E4EC11B112514302000DDBD1 /* apply.c in Sources */, E4EC11B212514302000DDBD1 /* object.c in Sources */, @@ -2121,10 +2152,15 @@ files = ( E417A38512A472C5004D659D /* provider.d in Sources */, E44EBE5612517EBE00645D88 /* protocol.defs in Sources */, + 6E9956041C3B219B0071D40C /* venture.c in Sources */, + 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */, + 6ED64B411BBD898400C35F4D /* firehose_buffer.c in Sources */, E49F2423125D3C960057C971 /* resolver.c in Sources */, E44EBE5712517EBE00645D88 /* init.c in Sources */, E4EC121A12514715000DDBD1 /* queue.c in Sources */, E4EC121B12514715000DDBD1 /* semaphore.c in Sources */, + 6EF2CAAE1C8899EA001ABE83 /* lock.c in Sources */, + 6EBEC7E81BBDD324009B1596 /* firehose_reply.defs in Sources */, E4EC121C12514715000DDBD1 /* once.c in Sources */, E4EC121D12514715000DDBD1 /* apply.c in Sources */, E4EC121E12514715000DDBD1 /* object.c in Sources */, @@ -2145,10 +2181,35 @@ /* End PBXSourcesBuildPhase section */ /* Begin PBXTargetDependency section */ - 4552540F19B138B700B88766 /* PBXTargetDependency */ = { + 6E2ECB021C49C31200A30A32 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 6E040C621C499B1B00411A2E /* libfirehose_kernel */; + targetProxy = 6E2ECB011C49C31200A30A32 /* PBXContainerItemProxy */; + }; + 6EF0B27E1BA8C5BF007FA4F6 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 6EB4E4081BA8BCAD00D7B9D2 /* libfirehose_server */; + targetProxy = 6EF0B27D1BA8C5BF007FA4F6 /* PBXContainerItemProxy */; + }; + 92CBD75A1BED926C006E0892 /* PBXTargetDependency */ = { isa = PBXTargetDependency; name = all; - targetProxy = 4552540E19B138B700B88766 /* PBXContainerItemProxy */; + targetProxy = 92CBD7591BED926C006E0892 /* PBXContainerItemProxy */; + }; + 92F3FECF1BEC6F1000025962 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 92F3FECA1BEC69E500025962 /* darwintests */; + targetProxy = 92F3FECE1BEC6F1000025962 /* PBXContainerItemProxy */; + }; + C00B0E141C5AEED6000330B3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */; + targetProxy = C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */; + }; + C01866C21C597AEA0040FC07 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = C01866A41C5973210040FC07 /* libdispatch mp static */; + targetProxy = C01866C11C597AEA0040FC07 /* PBXContainerItemProxy */; }; C927F36910FD7F1A00C5AB8B /* PBXTargetDependency */ = { isa = PBXTargetDependency; @@ -2162,7 +2223,7 @@ }; E437F0D614F7441F00F0B997 /* PBXTargetDependency */ = { isa = PBXTargetDependency; - target = E46DBC1A14EE10C80001F9F6 /* libdispatch static */; + target = E46DBC1A14EE10C80001F9F6 /* libdispatch up static */; targetProxy = E437F0D514F7441F00F0B997 /* PBXContainerItemProxy */; }; E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */ = { @@ -2223,6 +2284,106 @@ }; name = Debug; }; + 6E040C641C499B1B00411A2E /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */; + buildSettings = { + }; + name = Release; + }; + 6E040C651C499B1B00411A2E /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */; + buildSettings = { + }; + name = Debug; + }; + 6E2ECAFF1C49C30000A30A32 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 6E2ECB001C49C30000A30A32 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 6EB4E40B1BA8BCAD00D7B9D2 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */; + buildSettings = { + }; + name = Release; + }; + 6EB4E40C1BA8BCAD00D7B9D2 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */; + buildSettings = { + }; + name = Debug; + }; + 92CBD7241BED924F006E0892 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 92CBD7251BED924F006E0892 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 92F3FECC1BEC69E500025962 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + }; + name = Release; + }; + 92F3FECD1BEC69E500025962 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + }; + name = Debug; + }; + C00B0E081C5AEBBE000330B3 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; + buildSettings = { + PRODUCT_NAME = "$(PRODUCT_NAME)"; + }; + name = Release; + }; + C00B0E091C5AEBBE000330B3 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; + buildSettings = { + PRODUCT_NAME = "$(PRODUCT_NAME)"; + }; + name = Debug; + }; + C01866BB1C5973210040FC07 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; + buildSettings = { + PRODUCT_NAME = "$(PRODUCT_NAME)"; + }; + name = Release; + }; + C01866BC1C5973210040FC07 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; + buildSettings = { + PRODUCT_NAME = "$(PRODUCT_NAME)"; + }; + name = Debug; + }; C927F35B10FD7F0600C5AB8B /* Release */ = { isa = XCBuildConfiguration; buildSettings = { @@ -2237,14 +2398,14 @@ }; E46DBC5514EE10C80001F9F6 /* Release */ = { isa = XCBuildConfiguration; - baseConfigurationReference = E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */; + baseConfigurationReference = E46DBC5814EE11BC0001F9F6 /* libdispatch-up-static.xcconfig */; buildSettings = { }; name = Release; }; E46DBC5614EE10C80001F9F6 /* Debug */ = { isa = XCBuildConfiguration; - baseConfigurationReference = E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */; + baseConfigurationReference = E46DBC5814EE11BC0001F9F6 /* libdispatch-up-static.xcconfig */; buildSettings = { }; name = Debug; @@ -2279,8 +2440,6 @@ isa = XCBuildConfiguration; baseConfigurationReference = E43D93F11097917E004F6A62 /* libdispatch.xcconfig */; buildSettings = { - BUILD_VARIANTS = debug; - ONLY_ACTIVE_ARCH = YES; }; name = Debug; }; @@ -2362,6 +2521,69 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + 6E040C6A1C499B1B00411A2E /* Build configuration list for PBXNativeTarget "libfirehose_kernel" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 6E040C641C499B1B00411A2E /* Release */, + 6E040C651C499B1B00411A2E /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 6E2ECAFE1C49C30000A30A32 /* Build configuration list for PBXAggregateTarget "libdispatch_kernel" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 6E2ECAFF1C49C30000A30A32 /* Release */, + 6E2ECB001C49C30000A30A32 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 6EB4E40A1BA8BCAD00D7B9D2 /* Build configuration list for PBXNativeTarget "libfirehose_server" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 6EB4E40B1BA8BCAD00D7B9D2 /* Release */, + 6EB4E40C1BA8BCAD00D7B9D2 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 92CBD7231BED924F006E0892 /* Build configuration list for PBXAggregateTarget "libdispatch_tests_legacy" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 92CBD7241BED924F006E0892 /* Release */, + 92CBD7251BED924F006E0892 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 92F3FECB1BEC69E500025962 /* Build configuration list for PBXLegacyTarget "darwintests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 92F3FECC1BEC69E500025962 /* Release */, + 92F3FECD1BEC69E500025962 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + C00B0E071C5AEBBE000330B3 /* Build configuration list for PBXNativeTarget "libdispatch dyld stub" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + C00B0E081C5AEBBE000330B3 /* Release */, + C00B0E091C5AEBBE000330B3 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + C01866BA1C5973210040FC07 /* Build configuration list for PBXNativeTarget "libdispatch mp static" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + C01866BB1C5973210040FC07 /* Release */, + C01866BC1C5973210040FC07 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; C927F35E10FD7F0B00C5AB8B /* Build configuration list for PBXAggregateTarget "libdispatch_tools" */ = { isa = XCConfigurationList; buildConfigurations = ( @@ -2371,7 +2593,7 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; - E46DBC5414EE10C80001F9F6 /* Build configuration list for PBXNativeTarget "libdispatch static" */ = { + E46DBC5414EE10C80001F9F6 /* Build configuration list for PBXNativeTarget "libdispatch up static" */ = { isa = XCConfigurationList; buildConfigurations = ( E46DBC5514EE10C80001F9F6 /* Release */, diff --git a/man/Makefile.am b/man/Makefile.am index 0d58d14..3ca6946 100644 --- a/man/Makefile.am +++ b/man/Makefile.am @@ -2,6 +2,7 @@ # # +if !HAVE_SWIFT dist_man3_MANS= \ dispatch.3 \ dispatch_after.3 \ @@ -148,3 +149,4 @@ uninstall-hook: dispatch_io_barrier.3 \ dispatch_io_write.3 \ dispatch_write.3 +endif diff --git a/man/dispatch_object.3 b/man/dispatch_object.3 index 21b3d95..95ba1c3 100644 --- a/man/dispatch_object.3 +++ b/man/dispatch_object.3 @@ -101,9 +101,8 @@ lifetime is tracked by the Objective-C static analyzer. .El .Pp Integration of dispatch objects with Objective-C requires targeting Mac\ OS\ X -10.8 or later, and is disabled when building with Objective-C Garbage -Collection or for the legacy Objective-C runtime. It can also be disabled -manually by using compiler options to define the +10.8 or later, and is disabled when building for the legacy Objective-C runtime. +It can also be disabled manually by using compiler options to define the .Dv OS_OBJECT_USE_OBJC preprocessor macro to .Li 0 . diff --git a/man/dispatch_queue_create.3 b/man/dispatch_queue_create.3 index 0ca0648..f3c3051 100644 --- a/man/dispatch_queue_create.3 +++ b/man/dispatch_queue_create.3 @@ -298,10 +298,6 @@ and only if they restore the thread to its original state before returning: .Fn pthread_sigmask .It .Fn pthread_setugid_np -.It -.Fn pthread_chdir -.It -.Fn pthread_fchdir .El .Pp Applications diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3 index b954bcf..4da708c 100644 --- a/man/dispatch_source_create.3 +++ b/man/dispatch_source_create.3 @@ -271,8 +271,8 @@ Sources of this type allow applications to manually trigger the source's event handler via a call to .Fn dispatch_source_merge_data . The data will be merged with the source's pending data via an atomic add or -logic OR (based on the source's type), and the event handler block will be -submitted to the source's target queue. The +atomic bitwise OR (based on the source's type), and the event handler block will +be submitted to the source's target queue. The .Fa data is application defined. These sources have no .Fa handle @@ -297,7 +297,8 @@ The data returned by .Fn dispatch_source_get_data indicates which of the events in the .Fa mask -were observed. Note that because this source type will request notifications on the provided port, it should not be mixed with the use of +were observed. Note that because this source type will request notifications on +the provided port, it should not be mixed with the use of .Fn mach_port_request_notification on the same port. .Pp @@ -314,8 +315,8 @@ on the mach port is waiting to be received. .Pp .Vt DISPATCH_SOURCE_TYPE_MEMORYPRESSURE .Pp -Sources of this type monitor the system memory pressure condition for state changes. -The +Sources of this type monitor the system memory pressure condition for state +changes. The .Fa handle is unused and should be zero. The .Fa mask @@ -525,19 +526,24 @@ may be one or more of the following: The referenced node was removed from the filesystem namespace via .Xr unlink 2 . .It \(bu DISPATCH_VNODE_WRITE -A write to the referenced file occurred +A write to the referenced file occurred. .It \(bu DISPATCH_VNODE_EXTEND -The referenced file was extended +The referenced file was extended. .It \(bu DISPATCH_VNODE_ATTRIB -The metadata attributes of the referenced node have changed +The metadata attributes of the referenced node have changed. .It \(bu DISPATCH_VNODE_LINK -The link count on the referenced node has changed +The link count on the referenced node has changed. .It \(bu DISPATCH_VNODE_RENAME -The referenced node was renamed +The referenced node was renamed. .It \(bu DISPATCH_VNODE_REVOKE Access to the referenced node was revoked via .Xr revoke 2 or the underlying fileystem was unmounted. +.It \(bu DISPATCH_VNODE_FUNLOCK +The referenced file was unlocked by +.Xr flock 2 +or +.Xr close 2 . .El .Pp The data returned by diff --git a/man/dispatch_time.3 b/man/dispatch_time.3 index e318e90..4b4f9d8 100644 --- a/man/dispatch_time.3 +++ b/man/dispatch_time.3 @@ -49,7 +49,7 @@ Otherwise, if .Fa base is .Vt DISPATCH_TIME_NOW , -then the the current time of the default host clock is used. +then the current time of the default host clock is used. .Pp The .Fn dispatch_walltime diff --git a/os/Makefile.am b/os/Makefile.am index 2189f16..d009a37 100644 --- a/os/Makefile.am +++ b/os/Makefile.am @@ -2,10 +2,17 @@ # # +if HAVE_SWIFT +osdir=${prefix}/lib/swift/os +else osdir=$(includedir)/os +endif os_HEADERS= \ - object.h + object.h \ + linux_base.h noinst_HEADERS= \ - object_private.h + object_private.h \ + voucher_activity_private.h \ + voucher_private.h diff --git a/os/firehose_buffer_private.h b/os/firehose_buffer_private.h new file mode 100644 index 0000000..2c6466f --- /dev/null +++ b/os/firehose_buffer_private.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __FIREHOSE_BUFFER_PRIVATE__ +#define __FIREHOSE_BUFFER_PRIVATE__ + +#if OS_FIREHOSE_SPI +#ifdef KERNEL +#include +#else +#include +#include +#include +#endif + +#define OS_FIREHOSE_SPI_VERSION 20160318 + +/*! + * @group Firehose SPI + * SPI intended for logd only + * Layout of structs is subject to change without notice + */ + +#define FIREHOSE_BUFFER_CHUNK_SIZE 4096ul +#define FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE 2048ul +#define FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT 16 + +typedef union { + uint64_t fbc_atomic_pos; +#define FIREHOSE_BUFFER_POS_ENTRY_OFFS_INC (1ULL << 0) +#define FIREHOSE_BUFFER_POS_PRIVATE_OFFS_INC (1ULL << 16) +#define FIREHOSE_BUFFER_POS_REFCNT_INC (1ULL << 32) +#define FIREHOSE_BUFFER_POS_FULL_BIT (1ULL << 56) +#define FIREHOSE_BUFFER_POS_USABLE_FOR_STREAM(pos, stream) \ + ((((pos).fbc_atomic_pos >> 48) & 0x1ff) == (uint16_t)stream) + struct { + uint16_t fbc_next_entry_offs; + uint16_t fbc_private_offs; + uint8_t fbc_refcnt; + uint8_t fbc_qos_bits; + uint8_t fbc_stream; + uint8_t fbc_flag_full : 1; + uint8_t fbc_flag_io : 1; + uint8_t _fbc_flag_unused : 6; + }; +} firehose_buffer_pos_u; + +typedef struct firehose_buffer_chunk_s { + uint8_t fbc_start[0]; + firehose_buffer_pos_u volatile fbc_pos; + uint64_t fbc_timestamp; + uint8_t fbc_data[FIREHOSE_BUFFER_CHUNK_SIZE + - sizeof(firehose_buffer_pos_u) + - sizeof(uint64_t)]; +} __attribute__((aligned(8))) *firehose_buffer_chunk_t; + +typedef struct firehose_buffer_range_s { + uint16_t fbr_offset; // offset from the start of the buffer + uint16_t fbr_length; +} *firehose_buffer_range_t; + +#ifdef KERNEL + +// implemented by the kernel +extern void __firehose_buffer_push_to_logd(firehose_buffer_t fb, bool for_io); +extern void __firehose_critical_region_enter(void); +extern void __firehose_critical_region_leave(void); +extern void __firehose_allocate(vm_offset_t *addr, vm_size_t size); + +// exported for the kernel +firehose_tracepoint_t +__firehose_buffer_tracepoint_reserve(uint64_t stamp, firehose_stream_t stream, + uint16_t pubsize, uint16_t privsize, uint8_t **privptr); + +firehose_tracepoint_t +__firehose_buffer_tracepoint_reserve_with_chunk(firehose_buffer_chunk_t fbc, + uint64_t stamp, firehose_stream_t stream, + uint16_t pubsize, uint16_t privsize, uint8_t **privptr); + +void +__firehose_buffer_tracepoint_flush(firehose_tracepoint_t vat, + firehose_tracepoint_id_u vatid); + +void +__firehose_buffer_tracepoint_flush_chunk(firehose_buffer_chunk_t fbc, + firehose_tracepoint_t vat, firehose_tracepoint_id_u vatid); + +firehose_buffer_t +__firehose_buffer_create(size_t *size); + +void +__firehose_merge_updates(firehose_push_reply_t update); + +#else + +#define __firehose_critical_region_enter() +#define __firehose_critical_region_leave() + +OS_EXPORT +const uint32_t _firehose_spi_version; + +OS_ALWAYS_INLINE +static inline const uint8_t * +_firehose_tracepoint_reader_init(firehose_buffer_chunk_t fbc, + const uint8_t **endptr) +{ + const uint8_t *start = fbc->fbc_data; + const uint8_t *end = fbc->fbc_start + fbc->fbc_pos.fbc_next_entry_offs; + + if (end > fbc->fbc_start + FIREHOSE_BUFFER_CHUNK_SIZE) { + end = start; + } + *endptr = end; + return start; +} + +OS_ALWAYS_INLINE +static inline firehose_tracepoint_t +_firehose_tracepoint_reader_next(const uint8_t **ptr, const uint8_t *end) +{ + const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); + firehose_tracepoint_t ft; + + do { + ft = (firehose_tracepoint_t)*ptr; + if (ft->ft_data >= end) { + // reached the end + return NULL; + } + if (!ft->ft_length) { + // tracepoint write didn't even start + return NULL; + } + if (ft->ft_length > end - ft->ft_data) { + // invalid length + return NULL; + } + *ptr += roundup(ft_size + ft->ft_length, 8); + // test whether write of the tracepoint was finished + } while (os_unlikely(ft->ft_id.ftid_value == 0)); + + return ft; +} + +#define firehose_tracepoint_foreach(ft, fbc) \ + for (const uint8_t *end, *p = _firehose_tracepoint_reader_init(fbc, &end); \ + ((ft) = _firehose_tracepoint_reader_next(&p, end)); ) + +OS_ALWAYS_INLINE +static inline bool +firehose_buffer_range_validate(firehose_buffer_chunk_t fbc, + firehose_tracepoint_t ft, firehose_buffer_range_t range) +{ + if (range->fbr_offset + range->fbr_length > FIREHOSE_BUFFER_CHUNK_SIZE) { + return false; + } + if (fbc->fbc_start + range->fbr_offset < ft->ft_data + ft->ft_length) { + return false; + } + return true; +} + +#endif // !KERNEL + +#endif // OS_FIREHOSE_SPI + +#endif // __FIREHOSE_BUFFER_PRIVATE__ diff --git a/os/firehose_server_private.h b/os/firehose_server_private.h new file mode 100644 index 0000000..4bff8ab --- /dev/null +++ b/os/firehose_server_private.h @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __FIREHOSE_SERVER_PRIVATE__ +#define __FIREHOSE_SERVER_PRIVATE__ + +#include +#include +#include "firehose_buffer_private.h" + +#if OS_FIREHOSE_SPI +/*! + * @group Firehose SPI + * SPI intended for logd only + */ + +#pragma mark - Firehose Client + +/*! + * @typedef firehose_client_t + * + * @abstract + * Represents a firehose client. + * + * @discussion + * Firehose client objects are os_object_t's, and it's legal to retain/release + * them with os_retain / os_release. + */ +OS_OBJECT_DECL_CLASS(firehose_client); + +/*! + * @typedef firehose_event_t + * + * @const FIREHOSE_EVENT_NONE + * Never passed to callbacks, meaningful for + * firehose_client_metadata_stream_peek. + * + * @const FIREHOSE_EVENT_CLIENT_CONNECTED + * A new client has connected + * + * This is the first event delivered, and no event is delivered until + * the handler of that event returns + * + * The `page` argument really is really a firehose_client_connected_info_t. + * + * @const FIREHOSE_EVENT_CLIENT_DIED + * The specified client is gone and will not flush new buffers + * + * This is the last event delivered, it is never called before all other + * event handlers have returned. This event is generated even when a + * FIREHOSE_EVENT_CLIENT_CORRUPTED event has been generated. + * + * @const FIREHOSE_EVENT_IO_BUFFER_RECEIVED + * A new buffer needs to be pushed, `page` is set to that buffer. + * + * This event can be sent concurrently wrt FIREHOSE_EVENT_MEM_BUFFER_RECEIVED + * events. + * + * @const FIREHOSE_EVENT_MEM_BUFFER_RECEIVED + * A new buffer needs to be pushed, `page` is set to that buffer. + * + * This event can be sent concurrently wrt FIREHOSE_EVENT_IO_BUFFER_RECEIVED + * events. + * + * @const FIREHOSE_EVENT_CLIENT_CORRUPTED + * This event is received when a client is found being corrupted. + * `page` is set to the buffer header page. When this event is received, + * logs have likely been lost for this client. + * + * This buffer isn't really a proper firehose buffer page, but its content may + * be useful for debugging purposes. + * + * @const FIREHOSE_EVENT_CLIENT_FINALIZE + * This event is received when a firehose client structure is about to be + * destroyed. Only firehose_client_get_context() can ever be called with + * the passed firehose client. The `page` argument is NULL for this event. + * + * The event is sent from the context that is dropping the last refcount + * of the client. + */ +OS_ENUM(firehose_event, unsigned long, + FIREHOSE_EVENT_NONE = 0, + FIREHOSE_EVENT_CLIENT_CONNECTED, + FIREHOSE_EVENT_CLIENT_DIED, + FIREHOSE_EVENT_IO_BUFFER_RECEIVED, + FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, + FIREHOSE_EVENT_CLIENT_CORRUPTED, + FIREHOSE_EVENT_CLIENT_FINALIZE, +); + +#define FIREHOSE_CLIENT_CONNECTED_INFO_VERSION 1 + +/*! + * @typedef firehose_client_connected_info + * + * @abstract + * Type of the data passed to CLIENT_CONNECTED events. + */ +typedef struct firehose_client_connected_info_s { + unsigned long fcci_version; + // version 1 + const void *fcci_data; + size_t fcci_size; +} *firehose_client_connected_info_t; + +/*! + * @function firehose_client_get_unique_pid + * + * @abstract + * Returns the unique pid of the specified firehose client + * + * @param client + * The specified client. + * + * @param pid + * The pid for this client. + * + * @returns + * The unique pid of the specified client. + */ +OS_NOTHROW OS_NONNULL1 +uint64_t +firehose_client_get_unique_pid(firehose_client_t client, pid_t *pid); + +/*! + * @function firehose_client_get_metadata_buffer + * + * @abstract + * Returns the metadata buffer for the specified firehose client + * + * @param client + * The specified client. + * + * @param size + * The size of the metadata buffer. + * + * @returns + * The pointer to the buffer. + */ +OS_NOTHROW OS_NONNULL_ALL +void * +firehose_client_get_metadata_buffer(firehose_client_t client, size_t *size); + +/*! + * @function firehose_client_get_context + * + * @abstract + * Gets the context for the specified client. + * + * @param client + * The specified client. + * + * @returns + * The context set for the client with firehose_client_set_context + */ +OS_NOTHROW OS_NONNULL1 +void * +firehose_client_get_context(firehose_client_t client); + +/*! + * @function firehose_client_set_context + * + * @abstract + * Sets the context for the specified client. + * + * @discussion + * Setting the context exchanges the context pointer, but the client must + * ensure proper synchronization with possible getters. + * + * The lifetime of the context is under the control of the API user, + * it is suggested to destroy the context when the CLIENT_DIED event is + * received. + * + * @param client + * The specified client. + * + * @param ctxt + * The new context to set. + * + * @returns + * The previous context set for the client. + */ +OS_NOTHROW OS_NONNULL1 +void * +firehose_client_set_context(firehose_client_t client, void *ctxt); + +/*! + * @function firehose_client_metadata_stream_peek + * + * @abstract + * Peek at the metadata stream in flight buffers for a given client + * + * @discussion + * This function should never be called from the context of a snapshot + * handler. + * + * @param client + * The specified client + * + * @param context + * If this function is called synchronously from the handler passed to + * firehose_server_init, then `context` should be the event being processed. + * Else pass FIREHOSE_EVENT_NONE. + * + * @param peek_should_start + * Handler that is called prior to peeking to solve the race of metadata + * buffers not beeing processed yet at first lookup time, and being processed + * before the peek enumeration starts. + * + * If the handler returns false, then the enumeration doesn't start. + * If the race cannot happen, pass NULL. + * + * @param peek + * Handler that will receive all the live metadata buffers for this process. + * If the handler returns false, the enumeration is interrupted. + */ +OS_NOTHROW OS_NONNULL1 OS_NONNULL4 +void +firehose_client_metadata_stream_peek(firehose_client_t client, + firehose_event_t context, OS_NOESCAPE bool (^peek_should_start)(void), + OS_NOESCAPE bool (^peek)(firehose_buffer_chunk_t fbc)); + +#pragma mark - Firehose Server + +/*! + * @typedef firehose_handler_t + * + * @abstract + * Type of the handler block for firehose_server_init() + */ +typedef void (^firehose_handler_t)(firehose_client_t client, + firehose_event_t event, firehose_buffer_chunk_t page); + +/*! + * @function firehose_server_init + * + * @abstract + * Initializes the firehose MiG server + * + * @discussion + * Initializes the firehose MiG server by boostrap registering the services + * and creating dispatch_sources for the same. + */ +OS_NOTHROW +void +firehose_server_init(mach_port_t firehose_comm_port, + firehose_handler_t handler); + +/*! + * @function firehose_server_assert_spi_version + * + * @abstract + * Checks that libdispatch and firehose components all match + * + * @discussion + * Will assert that all the components have the same SPI versions + */ +OS_NOTHROW +void +firehose_server_assert_spi_version(uint32_t spi_version); + +/*! + * @function firehose_server_resume + * + * @abstract + * Allows firehose events to flow + * + * @discussion + * Must be called after firehose_server_init() + */ +OS_NOTHROW +void +firehose_server_resume(void); + +#pragma mark - Firehose Snapshot + +/*! + * @typedef firehose_snapshot_event + * + */ +OS_ENUM(firehose_snapshot_event, unsigned long, + FIREHOSE_SNAPSHOT_EVENT_IO_START = 1, + FIREHOSE_SNAPSHOT_EVENT_MEM_START, + FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, + FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, + FIREHOSE_SNAPSHOT_EVENT_COMPLETE, +); + +/*! + * @typedef firehose_snapshot_handler_t + * + * @abstract + * Type of the handler block for firehose_snapshot + */ +typedef void (^firehose_snapshot_handler_t)(firehose_client_t client, + firehose_snapshot_event_t event, firehose_buffer_chunk_t page); + +/*! + * @function firehose_snapshot + * + * @abstract + * Gather a snapshot for the current firehose state. + * + * @discussion + * This function can be called several times, in which case snapshots are taken + * one after the other. If coalescing is desired, it has to be built around this + * call. + */ +OS_NOTHROW +void +firehose_snapshot(firehose_snapshot_handler_t handler); + +#endif // OS_FIREHOSE_SPI + +#endif // __FIREHOSE_SERVER_PRIVATE__ diff --git a/os/linux_base.h b/os/linux_base.h new file mode 100644 index 0000000..96a3c82 --- /dev/null +++ b/os/linux_base.h @@ -0,0 +1,94 @@ +/* + * This source file is part of the Swift.org open source project + * + * Copyright (c) 2015 Apple Inc. and the Swift project authors + * + * Licensed under Apache License v2.0 with Runtime Library Exception + * + * See http://swift.org/LICENSE.txt for license information + * See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors + * + */ + +#ifndef __OS_LINUX_BASE__ +#define __OS_LINUX_BASE__ + +#include +#include + +#if __GNUC__ +#define OS_EXPECT(x, v) __builtin_expect((x), (v)) +#else +#define OS_EXPECT(x, v) (x) +#endif + +#ifndef os_likely +#define os_likely(x) OS_EXPECT(!!(x), 1) +#endif +#ifndef os_unlikely +#define os_unlikely(x) OS_EXPECT(!!(x), 0) +#endif + +#if __has_feature(assume_nonnull) +#define OS_ASSUME_NONNULL_BEGIN _Pragma("clang assume_nonnull begin") +#define OS_ASSUME_NONNULL_END _Pragma("clang assume_nonnull end") +#else +#define OS_ASSUME_NONNULL_BEGIN +#define OS_ASSUME_NONNULL_END +#endif + +#if __has_builtin(__builtin_assume) +#define OS_COMPILER_CAN_ASSUME(expr) __builtin_assume(expr) +#else +#define OS_COMPILER_CAN_ASSUME(expr) ((void)(expr)) +#endif + +#if __has_feature(attribute_availability_swift) +// equivalent to __SWIFT_UNAVAILABLE from Availability.h +#define OS_SWIFT_UNAVAILABLE(_msg) \ + __attribute__((__availability__(swift, unavailable, message=_msg))) +#else +#define OS_SWIFT_UNAVAILABLE(_msg) +#endif + +#if __has_attribute(swift_private) +# define OS_REFINED_FOR_SWIFT __attribute__((__swift_private__)) +#else +# define OS_REFINED_FOR_SWIFT +#endif + +#if __has_attribute(swift_name) +# define OS_SWIFT_NAME(_name) __attribute__((__swift_name__(#_name))) +#else +# define OS_SWIFT_NAME(_name) +#endif + +#define __OS_STRINGIFY(s) #s +#define OS_STRINGIFY(s) __OS_STRINGIFY(s) +#define __OS_CONCAT(x, y) x ## y +#define OS_CONCAT(x, y) __OS_CONCAT(x, y) + +/* + * Stub out misc linking and compilation attributes + */ + +#ifdef OS_EXPORT +#undef OS_EXPORT +#endif +#define OS_EXPORT + +#ifdef OS_WARN_RESULT_NEEDS_RELEASE +#undef OS_WARN_RESULT_NEEDS_RELEASE +#endif + +#ifdef OS_WARN_RESULT +#undef OS_WARN_RESULT +#endif +#define OS_WARN_RESULT + +#ifdef OS_NOTHROW +#undef OS_NOTHROW +#endif +#define OS_NOTHROW + +#endif /* __OS_LINUX_BASE__ */ diff --git a/os/object.h b/os/object.h index e07aaec..f3faa62 100644 --- a/os/object.h +++ b/os/object.h @@ -23,8 +23,13 @@ #ifdef __APPLE__ #include +#include #endif +#ifndef __linux__ #include +#else +#include +#endif /*! * @header @@ -50,14 +55,24 @@ */ #ifndef OS_OBJECT_HAVE_OBJC_SUPPORT -#if defined(__OBJC__) && defined(__OBJC2__) && !defined(__OBJC_GC__) && ( \ - __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_8 || \ - __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_6_0) -#define OS_OBJECT_HAVE_OBJC_SUPPORT 1 +#if !defined(__OBJC__) || defined(__OBJC_GC__) +# define OS_OBJECT_HAVE_OBJC_SUPPORT 0 +#elif !defined(TARGET_OS_MAC) || !TARGET_OS_MAC +# define OS_OBJECT_HAVE_OBJC_SUPPORT 0 +#elif TARGET_OS_IOS && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_6_0 +# define OS_OBJECT_HAVE_OBJC_SUPPORT 0 +#elif TARGET_OS_MAC && !TARGET_OS_IPHONE +# if __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_8 +# define OS_OBJECT_HAVE_OBJC_SUPPORT 0 +# elif defined(__i386__) && __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_12 +# define OS_OBJECT_HAVE_OBJC_SUPPORT 0 +# else +# define OS_OBJECT_HAVE_OBJC_SUPPORT 1 +# endif #else -#define OS_OBJECT_HAVE_OBJC_SUPPORT 0 -#endif +# define OS_OBJECT_HAVE_OBJC_SUPPORT 1 #endif +#endif // OS_OBJECT_HAVE_OBJC_SUPPORT #if OS_OBJECT_HAVE_OBJC_SUPPORT #ifndef OS_OBJECT_USE_OBJC @@ -71,18 +86,49 @@ #define OS_OBJECT_USE_OBJC 0 #endif +#ifndef OS_OBJECT_SWIFT3 +#if defined(SWIFT_SDK_OVERLAY_DISPATCH_EPOCH) && \ + SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#define OS_OBJECT_SWIFT3 1 +#else +#define OS_OBJECT_SWIFT3 0 +#endif // SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#endif // OS_OBJECT_SWIFT3 + #if OS_OBJECT_USE_OBJC #import +#if __has_attribute(objc_independent_class) +#define OS_OBJC_INDEPENDENT_CLASS __attribute__((objc_independent_class)) +#endif // __has_attribute(objc_independent_class) +#ifndef OS_OBJC_INDEPENDENT_CLASS +#define OS_OBJC_INDEPENDENT_CLASS +#endif #define OS_OBJECT_CLASS(name) OS_##name -#define OS_OBJECT_DECL_IMPL(name, ...) \ +#define OS_OBJECT_DECL_PROTOCOL(name, ...) \ @protocol OS_OBJECT_CLASS(name) __VA_ARGS__ \ - @end \ - typedef NSObject *name##_t + @end +#define OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL_IMPL(name, proto) \ + @interface name () \ + @end +#define OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, proto) \ + OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL_IMPL( \ + OS_OBJECT_CLASS(name), OS_OBJECT_CLASS(proto)) +#define OS_OBJECT_DECL_IMPL(name, ...) \ + OS_OBJECT_DECL_PROTOCOL(name, __VA_ARGS__) \ + typedef NSObject \ + * OS_OBJC_INDEPENDENT_CLASS name##_t +#define OS_OBJECT_DECL_BASE(name, ...) \ + @interface OS_OBJECT_CLASS(name) : __VA_ARGS__ \ + - (instancetype)init OS_SWIFT_UNAVAILABLE("Unavailable in Swift"); \ + @end +#define OS_OBJECT_DECL_IMPL_CLASS(name, ...) \ + OS_OBJECT_DECL_BASE(name, ## __VA_ARGS__) \ + typedef OS_OBJECT_CLASS(name) \ + * OS_OBJC_INDEPENDENT_CLASS name##_t #define OS_OBJECT_DECL(name, ...) \ - OS_OBJECT_DECL_IMPL(name, __VA_ARGS__) + OS_OBJECT_DECL_IMPL(name, ) #define OS_OBJECT_DECL_SUBCLASS(name, super) \ OS_OBJECT_DECL_IMPL(name, ) -#if defined(__has_attribute) #if __has_attribute(ns_returns_retained) #define OS_OBJECT_RETURNS_RETAINED __attribute__((__ns_returns_retained__)) #else @@ -93,11 +139,6 @@ #else #define OS_OBJECT_CONSUMED #endif -#else -#define OS_OBJECT_RETURNS_RETAINED -#define OS_OBJECT_CONSUMED -#endif -#if defined(__has_feature) #if __has_feature(objc_arc) #define OS_OBJECT_BRIDGE __bridge #define OS_WARN_RESULT_NEEDS_RELEASE @@ -105,23 +146,47 @@ #define OS_OBJECT_BRIDGE #define OS_WARN_RESULT_NEEDS_RELEASE OS_WARN_RESULT #endif +#if __has_attribute(objc_runtime_visible) && \ + ((defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && \ + __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_12) || \ + (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && \ + !defined(__TV_OS_VERSION_MIN_REQUIRED) && \ + !defined(__WATCH_OS_VERSION_MIN_REQUIRED) && \ + __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_10_0) || \ + (defined(__TV_OS_VERSION_MIN_REQUIRED) && \ + __TV_OS_VERSION_MIN_REQUIRED < __TVOS_10_0) || \ + (defined(__WATCH_OS_VERSION_MIN_REQUIRED) && \ + __WATCH_OS_VERSION_MIN_REQUIRED < __WATCHOS_3_0)) +/* + * To provide backward deployment of ObjC objects in Swift on pre-10.12 + * SDKs, OS_object classes can be marked as OS_OBJECT_OBJC_RUNTIME_VISIBLE. + * When compiling with a deployment target earlier than OS X 10.12 (iOS 10.0, + * tvOS 10.0, watchOS 3.0) the Swift compiler will only refer to this type at + * runtime (using the ObjC runtime). + */ +#define OS_OBJECT_OBJC_RUNTIME_VISIBLE __attribute__((objc_runtime_visible)) #else -#define OS_OBJECT_BRIDGE -#define OS_WARN_RESULT_NEEDS_RELEASE OS_WARN_RESULT +#define OS_OBJECT_OBJC_RUNTIME_VISIBLE #endif #ifndef OS_OBJECT_USE_OBJC_RETAIN_RELEASE #if defined(__clang_analyzer__) #define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 1 -#elif defined(__has_feature) -#if __has_feature(objc_arc) +#elif __has_feature(objc_arc) && !OS_OBJECT_SWIFT3 #define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 1 #else #define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 0 #endif -#else -#define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 0 -#endif #endif +#if OS_OBJECT_SWIFT3 +#define OS_OBJECT_DECL_SWIFT(name) \ + OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE \ + OS_OBJECT_DECL_IMPL_CLASS(name, NSObject) +#define OS_OBJECT_DECL_SUBCLASS_SWIFT(name, super) \ + OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE \ + OS_OBJECT_DECL_IMPL_CLASS(name, OS_OBJECT_CLASS(super)) +OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE +OS_OBJECT_DECL_BASE(object, NSObject); +#endif // OS_OBJECT_SWIFT3 #else /*! @parseOnly */ #define OS_OBJECT_RETURNS_RETAINED @@ -131,9 +196,22 @@ #define OS_OBJECT_BRIDGE /*! @parseOnly */ #define OS_WARN_RESULT_NEEDS_RELEASE OS_WARN_RESULT +/*! @parseOnly */ +#define OS_OBJECT_OBJC_RUNTIME_VISIBLE #define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 0 #endif +#if OS_OBJECT_SWIFT3 +#define OS_OBJECT_DECL_CLASS(name) \ + OS_OBJECT_DECL_SUBCLASS_SWIFT(name, object) +#elif OS_OBJECT_USE_OBJC +#define OS_OBJECT_DECL_CLASS(name) \ + OS_OBJECT_DECL(name) +#else +#define OS_OBJECT_DECL_CLASS(name) \ + typedef struct name##_s *name##_t +#endif + #define OS_OBJECT_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object)) __BEGIN_DECLS @@ -155,7 +233,7 @@ __BEGIN_DECLS * The retained object. */ __OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_EXPORT +OS_EXPORT OS_SWIFT_UNAVAILABLE("Can't be used with ARC") void* os_retain(void *object); #if OS_OBJECT_USE_OBJC @@ -178,7 +256,7 @@ os_retain(void *object); */ __OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) OS_EXPORT -void +void OS_SWIFT_UNAVAILABLE("Can't be used with ARC") os_release(void *object); #if OS_OBJECT_USE_OBJC #undef os_release diff --git a/os/object_private.h b/os/object_private.h index 0f2f01d..dc2af83 100644 --- a/os/object_private.h +++ b/os/object_private.h @@ -53,15 +53,11 @@ #define OS_OBJECT_EXPORT extern #endif -#if OS_OBJECT_USE_OBJC && defined(__has_feature) -#if __has_feature(objc_arc) +#if OS_OBJECT_USE_OBJC && __has_feature(objc_arc) #define _OS_OBJECT_OBJC_ARC 1 #else #define _OS_OBJECT_OBJC_ARC 0 #endif -#else -#define _OS_OBJECT_OBJC_ARC 0 -#endif #define _OS_OBJECT_GLOBAL_REFCNT INT_MAX @@ -71,10 +67,28 @@ int volatile xref_cnt #if OS_OBJECT_HAVE_OBJC_SUPPORT +#define OS_OBJECT_CLASS_SYMBOL(name) OS_##name##_class +#if TARGET_OS_MAC && !TARGET_OS_SIMULATOR && defined(__i386__) +#define OS_OBJECT_HAVE_OBJC1 1 +#define OS_OBJECT_HAVE_OBJC2 0 +#define OS_OBJC_CLASS_RAW_SYMBOL_NAME(name) \ + ".objc_class_name_" OS_STRINGIFY(name) +#define _OS_OBJECT_CLASS_HEADER() \ + const void *_os_obj_objc_isa +#else +#define OS_OBJECT_HAVE_OBJC1 0 +#define OS_OBJECT_HAVE_OBJC2 1 +#define OS_OBJC_CLASS_RAW_SYMBOL_NAME(name) "_OBJC_CLASS_$_" OS_STRINGIFY(name) // Must match size of compiler-generated OBJC_CLASS structure rdar://10640168 #define _OS_OBJECT_CLASS_HEADER() \ void *_os_obj_objc_class_t[5] +#endif +#define OS_OBJECT_OBJC_CLASS_DECL(name) \ + extern void *OS_OBJECT_CLASS_SYMBOL(name) \ + asm(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name))) #else +#define OS_OBJECT_HAVE_OBJC1 0 +#define OS_OBJECT_HAVE_OBJC2 0 #define _OS_OBJECT_CLASS_HEADER() \ void (*_os_obj_xref_dispose)(_os_object_t); \ void (*_os_obj_dispose)(_os_object_t) @@ -82,7 +96,22 @@ #define OS_OBJECT_CLASS(name) OS_##name -#if OS_OBJECT_USE_OBJC +#if OS_OBJECT_USE_OBJC && OS_OBJECT_SWIFT3 +@interface OS_OBJECT_CLASS(object) (OSObjectPrivate) +- (void)_xref_dispose; +- (void)_dispose; +@end +OS_OBJECT_DECL_PROTOCOL(object, ); +typedef OS_OBJECT_CLASS(object) *_os_object_t; +#define _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) \ + @interface OS_OBJECT_CLASS(name) : OS_OBJECT_CLASS(super) \ + \ + @end +#define _OS_OBJECT_DECL_PROTOCOL(name, super) \ + OS_OBJECT_DECL_PROTOCOL(name, ) +#define _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) \ + OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) +#elif OS_OBJECT_USE_OBJC __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT @interface OS_OBJECT_CLASS(object) : NSObject @@ -95,49 +124,62 @@ typedef OS_OBJECT_CLASS(object) *_os_object_t; \ @end #else +#define _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) +#define _OS_OBJECT_DECL_PROTOCOL(name, super) +#define _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) typedef struct _os_object_s *_os_object_t; #endif +OS_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS #if !_OS_OBJECT_OBJC_ARC __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_alloc(const void *cls, size_t size); __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_alloc_realized(const void *cls, size_t size); __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void _os_object_dealloc(_os_object_t object); __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_retain(_os_object_t object); __OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_retain_with_resurrect(_os_object_t obj); __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void _os_object_release(_os_object_t object); __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_retain_internal(_os_object_t object); __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void _os_object_release_internal(_os_object_t object); @@ -145,4 +187,6 @@ _os_object_release_internal(_os_object_t object); __END_DECLS +OS_ASSUME_NONNULL_END + #endif diff --git a/os/voucher_activity_private.h b/os/voucher_activity_private.h new file mode 100644 index 0000000..8f233b3 --- /dev/null +++ b/os/voucher_activity_private.h @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2013-2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_VOUCHER_ACTIVITY_PRIVATE__ +#define __OS_VOUCHER_ACTIVITY_PRIVATE__ + +#if OS_VOUCHER_ACTIVITY_SPI +#if __has_include() +#include +#include +#endif +#ifndef __linux__ +#include +#endif +#include +#include "voucher_private.h" + +#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20160329 + +#if OS_VOUCHER_WEAK_IMPORT +#define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT +#else +#define OS_VOUCHER_EXPORT OS_EXPORT +#endif + +#define __VOUCHER_ACTIVITY_IGNORE_DEPRECATION_PUSH \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") +#define __VOUCHER_ACTIVITY_IGNORE_DEPRECATION_POP \ + _Pragma("clang diagnostic pop") + +__BEGIN_DECLS + +/*! + * @const VOUCHER_CURRENT + * Shorthand for the currently adopted voucher + * + * This value can only be used as an argument to functions, and is never + * actually returned. It looks enough like a tagged pointer object that ARC + * won't crash if this is assigned to a temporary variable. + */ +#define VOUCHER_CURRENT ((OS_OBJECT_BRIDGE voucher_t)(void *)~2ul) + +/*! + * @function voucher_get_activity_id + * + * @abstract + * Returns the activity_id associated with the specified voucher at the time + * of the call. + * + * @discussion + * When the passed voucher is VOUCHER_CURRENT this returns the current + * activity ID. + * + * @param voucher + * The specified voucher. + * + * @param parent_id + * An out parameter to return the parent ID of the returned activity ID. + * + * @result + * The current activity identifier, if any. When 0 is returned, parent_id will + * also always be 0. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_NOTHROW +firehose_activity_id_t +voucher_get_activity_id(voucher_t voucher, firehose_activity_id_t *parent_id); + +/*! + * @function voucher_get_activity_id_and_creator + * + * @abstract + * Returns the activity_id associated with the specified voucher at the time + * of the call. + * + * @discussion + * When the passed voucher is VOUCHER_CURRENT this returns the current + * activity ID. + * + * @param voucher + * The specified voucher. + * + * @param creator_pid + * The unique pid of the process that created the returned activity ID if any. + * + * @param parent_id + * An out parameter to return the parent ID of the returned activity ID. + * + * @result + * The current activity identifier, if any. When 0 is returned, parent_id will + * also always be 0. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_NOTHROW +firehose_activity_id_t +voucher_get_activity_id_and_creator(voucher_t voucher, uint64_t *creator_pid, + firehose_activity_id_t *parent_id); + +/*! + * @function voucher_activity_create + * + * @abstract + * Creates a voucher object with a new activity identifier. + * + * @discussion + * As part of voucher transport, activities are automatically propagated by the + * system to other threads and processes (across IPC). + * + * When a voucher with an activity identifier is applied to a thread, work + * on that thread is done on behalf of this activity. + * + * @param trace_id + * Tracepoint identifier returned by voucher_activity_trace_id(), intended for + * identification of the automatic tracepoint generated as part of creating the + * new activity. + * + * @param base + * The base voucher used to create the activity. If the base voucher has an + * activity identifier, then the created activity will be parented to that one. + * If the passed in base has no activity identifier, the activity identifier + * will be a top-level one, on behalf of the process that created the base + * voucher. + * + * If base is VOUCHER_NONE, the activity is a top-level one, on behalf of the + * current process. + * + * If base is VOUCHER_CURRENT, then the activity is naturally based on the + * one currently applied to the current thread (the one voucher_copy() would + * return). + * + * @param flags + * See voucher_activity_flag_t documentation for effect. + * + * @param location + * Location identifier for the automatic tracepoint generated as part of + * creating the new activity. + * + * @result + * A new voucher with an activity identifier. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW +voucher_t +voucher_activity_create(firehose_tracepoint_id_t trace_id, + voucher_t base, firehose_activity_flags_t flags, uint64_t location); + +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW +voucher_t +voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, + voucher_t base, firehose_activity_flags_t flags, uint64_t location); + +/*! + * @group Voucher Activity Trace SPI + * SPI intended for libtrace only + */ + +/*! + * @function voucher_activity_flush + * + * @abstract + * Force flushing the specified stream. + * + * @discussion + * This maks all the buffers currently being written to as full, so that + * their current content is pushed in a timely fashion. + * + * When this call returns, the actual flush may or may not yet have happened. + * + * @param stream + * The stream to flush. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_NOTHROW +void +voucher_activity_flush(firehose_stream_t stream); + +/*! + * @function voucher_activity_trace + * + * @abstract + * Add a tracepoint to the specified stream. + * + * @param stream + * The stream to trace this entry into. + * + * @param trace_id + * Tracepoint identifier returned by voucher_activity_trace_id() + * + * @param timestamp + * The mach_approximate_time()/mach_absolute_time() value for this tracepoint. + * + * @param pubdata + * Pointer to packed buffer of tracepoint data. + * + * @param publen + * Length of data at 'pubdata'. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 +firehose_tracepoint_id_t +voucher_activity_trace(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const void *pubdata, size_t publen); + +/*! + * @function voucher_activity_trace_with_private_strings + * + * @abstract + * Add a tracepoint to the specified stream, with private data. + * + * @param stream + * The stream to trace this entry into. + * + * @param trace_id + * Tracepoint identifier returned by voucher_activity_trace_id() + * + * @param timestamp + * The mach_approximate_time()/mach_absolute_time() value for this tracepoint. + * + * @param pubdata + * Pointer to packed buffer of tracepoint data. + * + * @param publen + * Length of data at 'pubdata'. + * + * @param privdata + * Pointer to packed buffer of private tracepoint data. + * + * @param privlen + * Length of data at 'privdata'. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 OS_NONNULL6 +firehose_tracepoint_id_t +voucher_activity_trace_with_private_strings(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const void *pubdata, size_t publen, + const void *privdata, size_t privlen); + +typedef struct voucher_activity_hooks_s { +#define VOUCHER_ACTIVITY_HOOKS_VERSION 3 + long vah_version; + // version 1 + mach_port_t (*vah_get_logd_port)(void); + // version 2 + dispatch_mach_handler_function_t vah_debug_channel_handler; + // version 3 + kern_return_t (*vah_get_reconnect_info)(mach_vm_address_t *, mach_vm_size_t *); +} *voucher_activity_hooks_t; + +/*! + * @function voucher_activity_initialize_4libtrace + * + * @abstract + * Configure upcall hooks for libtrace. + * + * @param hooks + * A pointer to a voucher_activity_hooks_s structure. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL_ALL +void +voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks); + +/*! + * @function voucher_activity_get_metadata_buffer + * + * @abstract + * Return address and length of buffer in the process trace memory area + * reserved for libtrace metadata. + * + * @param length + * Pointer to size_t variable, filled with length of metadata buffer. + * + * @result + * Address of metadata buffer. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL_ALL +void* +voucher_activity_get_metadata_buffer(size_t *length); + +/*! + * @function voucher_get_activity_id_4dyld + * + * @abstract + * Return the current voucher activity ID. Available for the dyld client stub + * only. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW +firehose_activity_id_t +voucher_get_activity_id_4dyld(void); + +__END_DECLS + +#endif // OS_VOUCHER_ACTIVITY_SPI + +#endif // __OS_VOUCHER_ACTIVITY_PRIVATE__ diff --git a/private/voucher_private.h b/os/voucher_private.h similarity index 94% rename from private/voucher_private.h rename to os/voucher_private.h index 2640344..562a704 100644 --- a/private/voucher_private.h +++ b/os/voucher_private.h @@ -21,8 +21,23 @@ #ifndef __OS_VOUCHER_PRIVATE__ #define __OS_VOUCHER_PRIVATE__ +#ifndef __linux__ #include +#endif +#if __has_include() #include +#include +#endif +#if __has_include() +#include +#endif +#if __has_include() +#include +#endif + +#ifndef __DISPATCH_BUILDING_DISPATCH__ +#include +#endif /* !__DISPATCH_BUILDING_DISPATCH__ */ #define OS_VOUCHER_SPI_VERSION 20150630 @@ -32,6 +47,8 @@ #define OS_VOUCHER_EXPORT OS_EXPORT #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -50,11 +67,18 @@ __BEGIN_DECLS * Voucher objects are os_objects (c.f. ). They are memory-managed * with the os_retain()/os_release() functions or -[retain]/-[release] methods. */ -#if OS_OBJECT_USE_OBJC -OS_OBJECT_DECL(voucher); -#else -typedef struct voucher_s *voucher_t; -#endif +OS_OBJECT_DECL_CLASS(voucher); + +/*! + * @const VOUCHER_NULL + * Represents the empty base voucher with no attributes. + */ +#define VOUCHER_NULL ((voucher_t)0) +/*! + * @const VOUCHER_INVALID + * Represents an invalid voucher + */ +#define VOUCHER_INVALID ((voucher_t)-1) /*! * @function voucher_adopt @@ -79,8 +103,8 @@ typedef struct voucher_s *voucher_t; __OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT_NEEDS_RELEASE OS_NOTHROW -voucher_t -voucher_adopt(voucher_t voucher OS_OBJECT_CONSUMED); +voucher_t _Nullable +voucher_adopt(voucher_t _Nullable voucher OS_OBJECT_CONSUMED); /*! * @function voucher_copy @@ -94,7 +118,7 @@ voucher_adopt(voucher_t voucher OS_OBJECT_CONSUMED); */ __OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW -voucher_t +voucher_t _Nullable voucher_copy(void); /*! @@ -113,7 +137,7 @@ voucher_copy(void); */ __OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW -voucher_t +voucher_t _Nullable voucher_copy_without_importance(void); /*! @@ -151,23 +175,19 @@ voucher_replace_default_voucher(void); * * @discussion * This is only intended for use by CoreFoundation to explicitly manage the - * App Nap state of an application following receiption of a de-nap IPC message. + * App Nap state of an application following reception of a de-nap IPC message. * * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team. */ __OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) OS_VOUCHER_EXPORT OS_NOTHROW void -voucher_decrement_importance_count4CF(voucher_t voucher); +voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); /*! * @group Voucher dispatch block SPI */ -#ifndef __DISPATCH_BUILDING_DISPATCH__ -#include -#endif /* !__DISPATCH_BUILDING_DISPATCH__ */ - /*! * @typedef dispatch_block_flags_t * SPI Flags to pass to the dispatch_block_create* functions. @@ -248,7 +268,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t dispatch_block_create_with_voucher(dispatch_block_flags_t flags, - voucher_t voucher, dispatch_block_t block); + voucher_t _Nullable voucher, dispatch_block_t block); /*! * @function dispatch_block_create_with_voucher_and_qos_class @@ -331,7 +351,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL5 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, - voucher_t voucher, dispatch_qos_class_t qos_class, + voucher_t _Nullable voucher, dispatch_qos_class_t qos_class, int relative_priority, dispatch_block_t block); /*! @@ -391,17 +411,18 @@ __OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t -dispatch_queue_create_with_accounting_override_voucher(const char *label, - dispatch_queue_attr_t attr, voucher_t voucher); +dispatch_queue_create_with_accounting_override_voucher( + const char *_Nullable label, + dispatch_queue_attr_t _Nullable attr, + voucher_t _Nullable voucher); +#if __has_include() /*! * @group Voucher Mach SPI * SPI intended for clients that need to interact with mach messages or mach * voucher ports directly. */ -#include - /*! * @function voucher_create_with_mach_msg * @@ -421,7 +442,7 @@ dispatch_queue_create_with_accounting_override_voucher(const char *label, */ __OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW -voucher_t +voucher_t _Nullable voucher_create_with_mach_msg(mach_msg_header_t *msg); /*! @@ -429,13 +450,6 @@ voucher_create_with_mach_msg(mach_msg_header_t *msg); * SPI intended for clients that need to interact with personas. */ -#if __has_include() -#include -#endif -#if __has_include() -#include -#endif - struct proc_persona_info; /*! @@ -514,12 +528,14 @@ int voucher_get_current_persona_proximate_info( struct proc_persona_info *persona_info); +#endif // __has_include() + __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif // __OS_VOUCHER_PRIVATE__ -#if (OS_VOUCHER_ACTIVITY_SPI || OS_VOUCHER_ACTIVITY_BUFFER_SPI) && \ - !defined(__DISPATCH_BUILDING_DISPATCH__) && \ - !defined(__OS_VOUCHER_ACTIVITY_PRIVATE__) -#include +#if OS_VOUCHER_ACTIVITY_SPI +#include "voucher_activity_private.h" #endif diff --git a/private/Makefile.am b/private/Makefile.am index de12391..98840d5 100644 --- a/private/Makefile.am +++ b/private/Makefile.am @@ -7,6 +7,7 @@ noinst_HEADERS= \ data_private.h \ introspection_private.h \ io_private.h \ + layout_private.h \ mach_private.h \ private.h \ queue_private.h \ diff --git a/private/benchmark.h b/private/benchmark.h index c6edfe6..ef3cdbd 100644 --- a/private/benchmark.h +++ b/private/benchmark.h @@ -32,6 +32,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -71,14 +73,17 @@ __BEGIN_DECLS __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW uint64_t -dispatch_benchmark(size_t count, void (^block)(void)); +dispatch_benchmark(size_t count, dispatch_block_t block); #endif __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW uint64_t -dispatch_benchmark_f(size_t count, void *ctxt, void (*func)(void *)); +dispatch_benchmark_f(size_t count, void *_Nullable ctxt, + dispatch_function_t func); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/private/data_private.h b/private/data_private.h index 751b7ce..7485525 100644 --- a/private/data_private.h +++ b/private/data_private.h @@ -32,6 +32,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -51,7 +53,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(none); */ #define DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE \ (_dispatch_data_destructor_vm_deallocate) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(vm_deallocate); /*! @@ -80,8 +82,8 @@ DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_create_f(const void *buffer, size_t size, - dispatch_queue_t queue, - dispatch_function_t destructor); + dispatch_queue_t _Nullable queue, + dispatch_function_t _Nullable destructor); /*! * @function dispatch_data_create_alloc @@ -102,7 +104,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t -dispatch_data_create_alloc(size_t size, void** buffer_ptr); +dispatch_data_create_alloc(size_t size, void *_Nullable *_Nullable buffer_ptr); /*! * @typedef dispatch_data_applier_function_t @@ -116,7 +118,7 @@ dispatch_data_create_alloc(size_t size, void** buffer_ptr); * @param size The size of the memory for the current region. * @result A Boolean indicating whether traversal should continue. */ -typedef bool (*dispatch_data_applier_function_t)(void *context, +typedef bool (*dispatch_data_applier_function_t)(void *_Nullable context, dispatch_data_t region, size_t offset, const void *buffer, size_t size); /*! @@ -143,7 +145,7 @@ typedef bool (*dispatch_data_applier_function_t)(void *context, __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW bool -dispatch_data_apply_f(dispatch_data_t data, void *context, +dispatch_data_apply_f(dispatch_data_t data, void *_Nullable context, dispatch_data_applier_function_t applier); #if TARGET_OS_MAC @@ -159,7 +161,7 @@ dispatch_data_apply_f(dispatch_data_t data, void *context, * * @param data The data object to make a memory entry for. * @result A mach port for the newly made memory entry, or - * MACH_PORT_NULL if an error ocurred. + * MACH_PORT_NULL if an error occurred. */ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW @@ -286,7 +288,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(utf_any); * Flags specifying the input format of the source dispatch_data_t * * @param output_type - * Flags specifying the expected output format of the resulting transfomation. + * Flags specifying the expected output format of the resulting transformation. * * @result * A newly created dispatch data object, dispatch_data_empty if no has been @@ -303,4 +305,6 @@ dispatch_data_create_with_transform(dispatch_data_t data, __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif // __DISPATCH_DATA_PRIVATE__ diff --git a/private/introspection_private.h b/private/introspection_private.h index 7ac0e7e..fa8e49a 100644 --- a/private/introspection_private.h +++ b/private/introspection_private.h @@ -535,7 +535,7 @@ typedef void (*dispatch_introspection_hook_queue_item_complete_t)( * @typedef dispatch_introspection_hooks_s * * @abstract - * A structure of function pointer hoooks into libdispatch. + * A structure of function pointer hooks into libdispatch. */ typedef struct dispatch_introspection_hooks_s { diff --git a/private/io_private.h b/private/io_private.h index 4a00ee0..0bb1e3b 100644 --- a/private/io_private.h +++ b/private/io_private.h @@ -32,6 +32,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -83,8 +85,8 @@ void dispatch_read_f(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, - void *context, - void (*handler)(void *context, dispatch_data_t data, int error)); + void *_Nullable context, + void (*handler)(void *_Nullable context, dispatch_data_t data, int error)); /*! * @function dispatch_write_f @@ -126,8 +128,9 @@ void dispatch_write_f(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, - void *context, - void (*handler)(void *context, dispatch_data_t data, int error)); + void *_Nullable context, + void (*handler)(void *_Nullable context, dispatch_data_t _Nullable data, + int error)); /*! * @function dispatch_io_create_f @@ -164,8 +167,8 @@ dispatch_io_t dispatch_io_create_f(dispatch_io_type_t type, dispatch_fd_t fd, dispatch_queue_t queue, - void *context, - void (*cleanup_handler)(void *context, int error)); + void *_Nullable context, + void (*cleanup_handler)(void *_Nullable context, int error)); /*! * @function dispatch_io_create_with_path_f @@ -204,8 +207,8 @@ dispatch_io_t dispatch_io_create_with_path_f(dispatch_io_type_t type, const char *path, int oflag, mode_t mode, dispatch_queue_t queue, - void *context, - void (*cleanup_handler)(void *context, int error)); + void *_Nullable context, + void (*cleanup_handler)(void *_Nullable context, int error)); /*! * @function dispatch_io_create_with_io_f @@ -248,8 +251,8 @@ dispatch_io_t dispatch_io_create_with_io_f(dispatch_io_type_t type, dispatch_io_t io, dispatch_queue_t queue, - void *context, - void (*cleanup_handler)(void *context, int error)); + void *_Nullable context, + void (*cleanup_handler)(void *_Nullable context, int error)); /*! * @typedef dispatch_io_handler_function_t @@ -260,8 +263,8 @@ dispatch_io_create_with_io_f(dispatch_io_type_t type, * @param data The data object to be handled. * @param error An errno condition for the operation. */ -typedef void (*dispatch_io_handler_function_t)(void *context, bool done, - dispatch_data_t data, int error); +typedef void (*dispatch_io_handler_function_t)(void *_Nullable context, + bool done, dispatch_data_t _Nullable data, int error); /*! * @function dispatch_io_read_f @@ -316,7 +319,7 @@ dispatch_io_read_f(dispatch_io_t channel, off_t offset, size_t length, dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_io_handler_function_t io_handler); /*! @@ -373,7 +376,7 @@ dispatch_io_write_f(dispatch_io_t channel, off_t offset, dispatch_data_t data, dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_io_handler_function_t io_handler); /*! @@ -403,9 +406,11 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_io_barrier_f(dispatch_io_t channel, - void *context, + void *_Nullable context, dispatch_function_t barrier); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif /* __DISPATCH_IO_PRIVATE__ */ diff --git a/private/layout_private.h b/private/layout_private.h index 17e8ed8..bf93ee9 100644 --- a/private/layout_private.h +++ b/private/layout_private.h @@ -69,16 +69,6 @@ DISPATCH_EXPORT const struct dispatch_tsd_indexes_s { const uint16_t dti_qos_class_index; } dispatch_tsd_indexes; -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -DISPATCH_EXPORT const struct voucher_offsets_s { - // always add new fields at the end - const uint16_t vo_version; - const uint16_t vo_activity_ids_count; - const uint16_t vo_activity_ids_count_size; - const uint16_t vo_activity_ids_array; - const uint16_t vo_activity_ids_array_entry_size; -} voucher_offsets; - #endif // DISPATCH_LAYOUT_SPI __END_DECLS diff --git a/private/mach_private.h b/private/mach_private.h index 93c1e81..2228436 100644 --- a/private/mach_private.h +++ b/private/mach_private.h @@ -36,8 +36,12 @@ __BEGIN_DECLS #if DISPATCH_MACH_SPI +#define DISPATCH_MACH_SPI_VERSION 20160505 + #include +DISPATCH_ASSUME_NONNULL_BEGIN + /*! * @functiongroup Dispatch Mach Channel SPI * @@ -53,7 +57,8 @@ DISPATCH_DECL(dispatch_mach); /*! * @typedef dispatch_mach_reason_t - * Reasons for a mach channel handler to be invoked. + * Reasons for a mach channel handler to be invoked, or the result of an + * immediate send attempt. * * @const DISPATCH_MACH_CONNECTED * The channel has been connected. The first handler invocation on a channel @@ -91,6 +96,19 @@ DISPATCH_DECL(dispatch_mach); * * @const DISPATCH_MACH_CANCELED * The channel has been canceled. + * + * @const DISPATCH_MACH_REPLY_RECEIVED + * A synchronous reply to a call to dispatch_mach_send_and_wait_for_reply() has + * been received on another thread, an empty message is passed in the message + * parameter (so that associated port rights can be disposed of). + * The message header will contain a local port with a receive right associated + * with the reply to the message that was synchronously sent to the channel. + * + * @const DISPATCH_MACH_NEEDS_DEFERRED_SEND + * The message could not be sent synchronously. Only returned from a send with + * result operation and never passed to a channel handler. Indicates that the + * message passed to the send operation must not be disposed of until it is + * returned via the channel handler. */ DISPATCH_ENUM(dispatch_mach_reason, unsigned long, DISPATCH_MACH_CONNECTED = 1, @@ -101,9 +119,19 @@ DISPATCH_ENUM(dispatch_mach_reason, unsigned long, DISPATCH_MACH_BARRIER_COMPLETED, DISPATCH_MACH_DISCONNECTED, DISPATCH_MACH_CANCELED, + DISPATCH_MACH_REPLY_RECEIVED, + DISPATCH_MACH_NEEDS_DEFERRED_SEND, DISPATCH_MACH_REASON_LAST, /* unused */ ); +/*! + * @typedef dispatch_mach_send_flags_t + * Flags that can be passed to the *with_flags send functions. + */ +DISPATCH_ENUM(dispatch_mach_send_flags, unsigned long, + DISPATCH_MACH_SEND_DEFAULT = 0, +); + /*! * @typedef dispatch_mach_trailer_t * Trailer type of mach message received by dispatch mach channels @@ -178,8 +206,9 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_mach_msg_t -dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, - dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr); +dispatch_mach_msg_create(mach_msg_header_t *_Nullable msg, size_t size, + dispatch_mach_msg_destructor_t destructor, + mach_msg_header_t *_Nonnull *_Nullable msg_ptr); /*! * @function dispatch_mach_msg_get_msg @@ -193,7 +222,8 @@ dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW mach_msg_header_t* -dispatch_mach_msg_get_msg(dispatch_mach_msg_t message, size_t *size_ptr); +dispatch_mach_msg_get_msg(dispatch_mach_msg_t message, + size_t *_Nullable size_ptr); #ifdef __BLOCKS__ /*! @@ -205,7 +235,7 @@ dispatch_mach_msg_get_msg(dispatch_mach_msg_t message, size_t *size_ptr); * @param error Mach error code for the send operation. */ typedef void (^dispatch_mach_handler_t)(dispatch_mach_reason_t reason, - dispatch_mach_msg_t message, mach_error_t error); + dispatch_mach_msg_t _Nullable message, mach_error_t error); /*! * @function dispatch_mach_create @@ -241,8 +271,8 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NONNULL3 DISPATCH_NOTHROW dispatch_mach_t -dispatch_mach_create(const char *label, dispatch_queue_t queue, - dispatch_mach_handler_t handler); +dispatch_mach_create(const char *_Nullable label, + dispatch_queue_t _Nullable queue, dispatch_mach_handler_t handler); #endif /*! @@ -254,8 +284,8 @@ dispatch_mach_create(const char *label, dispatch_queue_t queue, * @param message Message object that was sent or received. * @param error Mach error code for the send operation. */ -typedef void (*dispatch_mach_handler_function_t)(void *context, - dispatch_mach_reason_t reason, dispatch_mach_msg_t message, +typedef void (*dispatch_mach_handler_function_t)(void *_Nullable context, + dispatch_mach_reason_t reason, dispatch_mach_msg_t _Nullable message, mach_error_t error); /*! @@ -295,7 +325,8 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NONNULL4 DISPATCH_NOTHROW dispatch_mach_t -dispatch_mach_create_f(const char *label, dispatch_queue_t queue, void *context, +dispatch_mach_create_f(const char *_Nullable label, + dispatch_queue_t _Nullable queue, void *_Nullable context, dispatch_mach_handler_function_t handler); /*! @@ -327,7 +358,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_mach_connect(dispatch_mach_t channel, mach_port_t receive, - mach_port_t send, dispatch_mach_msg_t checkin); + mach_port_t send, dispatch_mach_msg_t _Nullable checkin); /*! * @function dispatch_mach_reconnect @@ -358,7 +389,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_mach_reconnect(dispatch_mach_t channel, mach_port_t send, - dispatch_mach_msg_t checkin); + dispatch_mach_msg_t _Nullable checkin); /*! * @function dispatch_mach_cancel @@ -426,6 +457,222 @@ void dispatch_mach_send(dispatch_mach_t channel, dispatch_mach_msg_t message, mach_msg_option_t options); +/*! + * @function dispatch_mach_send_with_result + * Asynchronously send a message encapsulated in a dispatch mach message object + * to the specified mach channel. If an immediate send can be performed, return + * its result via out parameters. + * + * Unless the message is being sent to a send-once right (as determined by the + * presence of MACH_MSG_TYPE_MOVE_SEND_ONCE in the message header remote bits), + * the message header remote port is set to the channel send right before the + * send operation is performed. + * + * If the message expects a direct reply (as determined by the presence of + * MACH_MSG_TYPE_MAKE_SEND_ONCE in the message header local bits) the receive + * right specified in the message header local port will be monitored until a + * reply message (or a send-once notification) is received, or the channel is + * canceled. Hence the application must wait for the channel handler to be + * invoked with a DISPATCH_MACH_DISCONNECTED message before releasing that + * receive right. + * + * If the message send operation is attempted but the channel is canceled + * before the send operation succesfully completes, the message returned to the + * channel handler with DISPATCH_MACH_MESSAGE_NOT_SENT may be the result of a + * pseudo-receive operation. If the message expected a direct reply, the + * receive right originally specified in the message header local port will + * returned in a DISPATCH_MACH_DISCONNECTED message. + * + * If an immediate send could be performed, returns the resulting reason + * (e.g. DISPATCH_MACH_MESSAGE_SENT) and possible error to the caller in the + * send_result and send_error out parameters (instead of via the channel + * handler), in which case the passed-in message and associated resources + * can be disposed of synchronously. + * + * If a deferred send is required, returns DISPATCH_MACH_NEEDS_DEFERRED_SEND + * in the send_result out parameter to indicate that the passed-in message has + * been retained and associated resources must not be disposed of until the + * message is returned asynchronusly via the channel handler. + * + * @param channel + * The mach channel to which to send the message. + * + * @param message + * The message object encapsulating the message to send. Unless an immediate + * send could be performed, the object will be retained until the asynchronous + * send operation is complete and the channel handler has returned. The storage + * underlying the message object may be modified by the send operation. + * + * @param options + * Additional send options to pass to mach_msg() when performing the send + * operation. + * + * @param send_flags + * Flags to configure the send operation. Must be 0 for now. + * + * @param send_result + * Out parameter to return the result of the immediate send attempt. + * If a deferred send is required, returns DISPATCH_MACH_NEEDS_DEFERRED_SEND. + * Must not be NULL. + * + * @param send_error + * Out parameter to return the error from the immediate send attempt. + * If a deferred send is required, returns 0. Must not be NULL. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5 +DISPATCH_NONNULL6 DISPATCH_NOTHROW +void +dispatch_mach_send_with_result(dispatch_mach_t channel, + dispatch_mach_msg_t message, mach_msg_option_t options, + dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error); + +/*! + * @function dispatch_mach_send_and_wait_for_reply + * Synchronously send a message encapsulated in a dispatch mach message object + * to the specified mach channel and wait for a reply. + * + * Unless the message is being sent to a send-once right (as determined by the + * presence of MACH_MSG_TYPE_MOVE_SEND_ONCE in the message header remote bits), + * the message header remote port is set to the channel send right before the + * send operation is performed. + * + * The message is required to expect a direct reply (as determined by the + * presence of MACH_MSG_TYPE_MAKE_SEND_ONCE in the message header local bits) + * and this function will not complete until the receive right specified in the + * message header local port receives a reply message (or a send-once + * notification) which will be returned, or until that receive right is + * destroyed in response to the channel being canceled, in which case NULL will + * be returned. + * In all these cases the application must wait for the channel handler to + * be invoked with a DISPATCH_MACH_REPLY_RECEIVED or DISPATCH_MACH_DISCONNECTED + * message before releasing that receive right. + * + * Alternatively, the application may specify MACH_PORT_NULL in the header local + * port to indicate that the channel should create and manage the reply receive + * right internally, including destroying it upon channel cancellation. + * This is a more efficient mode of operation as no asynchronous operations are + * required to return the receive right (i.e. the channel handler will not be + * called as described above). + * + * If the message send operation is attempted but the channel is canceled + * before the send operation succesfully completes, the message returned to the + * channel handler with DISPATCH_MACH_MESSAGE_NOT_SENT may be the result of a + * pseudo-receive operation. The receive right originally specified in the + * message header local port will returned in a DISPATCH_MACH_DISCONNECTED + * message (unless it was MACH_PORT_NULL). + * + * @param channel + * The mach channel to which to send the message. + * + * @param message + * The message object encapsulating the message to send. The object will be + * retained until the send operation is complete and the channel handler has + * returned. The storage underlying the message object may be modified by the + * send operation. + * + * @param options + * Additional send options to pass to mach_msg() when performing the send + * operation. + * + * @result + * The received reply message object, or NULL if the channel was canceled. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NOTHROW +dispatch_mach_msg_t _Nullable +dispatch_mach_send_and_wait_for_reply(dispatch_mach_t channel, + dispatch_mach_msg_t message, mach_msg_option_t options); + +/*! + * @function dispatch_mach_send_with_result_and_wait_for_reply + * Synchronously send a message encapsulated in a dispatch mach message object + * to the specified mach channel and wait for a reply. If an immediate send can + * be performed, return its result via out parameters. + * + * Unless the message is being sent to a send-once right (as determined by the + * presence of MACH_MSG_TYPE_MOVE_SEND_ONCE in the message header remote bits), + * the message header remote port is set to the channel send right before the + * send operation is performed. + * + * The message is required to expect a direct reply (as determined by the + * presence of MACH_MSG_TYPE_MAKE_SEND_ONCE in the message header local bits) + * and this function will not complete until the receive right specified in the + * message header local port receives a reply message (or a send-once + * notification) which will be returned, or until that receive right is + * destroyed in response to the channel being canceled, in which case NULL will + * be returned. + * In all these cases the application must wait for the channel handler to + * be invoked with a DISPATCH_MACH_REPLY_RECEIVED or DISPATCH_MACH_DISCONNECTED + * message before releasing that receive right. + * + * Alternatively, the application may specify MACH_PORT_NULL in the header local + * port to indicate that the channel should create and manage the reply receive + * right internally, including destroying it upon channel cancellation. + * This is a more efficient mode of operation as no asynchronous operations are + * required to return the receive right (i.e. the channel handler will not be + * called as described above). + * + * If the message send operation is attempted but the channel is canceled + * before the send operation succesfully completes, the message returned to the + * channel handler with DISPATCH_MACH_MESSAGE_NOT_SENT may be the result of a + * pseudo-receive operation. The receive right originally specified in the + * message header local port will returned in a DISPATCH_MACH_DISCONNECTED + * message (unless it was MACH_PORT_NULL). + * + * If an immediate send could be performed, returns the resulting reason + * (e.g. DISPATCH_MACH_MESSAGE_SENT) and possible error to the caller in the + * send_result and send_error out parameters (instead of via the channel + * handler), in which case the passed-in message and associated resources + * can be disposed of synchronously. + * + * If a deferred send is required, returns DISPATCH_MACH_NEEDS_DEFERRED_SEND + * in the send_result out parameter to indicate that the passed-in message has + * been retained and associated resources must not be disposed of until the + * message is returned asynchronusly via the channel handler. + * + * @param channel + * The mach channel to which to send the message. + * + * @param message + * The message object encapsulating the message to send. Unless an immediate + * send could be performed, the object will be retained until the asynchronous + * send operation is complete and the channel handler has returned. The storage + * underlying the message object may be modified by the send operation. + * + * @param options + * Additional send options to pass to mach_msg() when performing the send + * operation. + * + * @param send_flags + * Flags to configure the send operation. Must be 0 for now. + * + * @param send_result + * Out parameter to return the result of the immediate send attempt. + * If a deferred send is required, returns DISPATCH_MACH_NEEDS_DEFERRED_SEND. + * Must not be NULL. + * + * @param send_error + * Out parameter to return the error from the immediate send attempt. + * If a deferred send is required, returns 0. Must not be NULL. + * + * @result + * The received reply message object, or NULL if the channel was canceled. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5 DISPATCH_NONNULL6 +DISPATCH_NOTHROW +dispatch_mach_msg_t _Nullable +dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t channel, + dispatch_mach_msg_t message, mach_msg_option_t options, + dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error); + #ifdef __BLOCKS__ /*! * @function dispatch_mach_send_barrier @@ -467,7 +714,7 @@ dispatch_mach_send_barrier(dispatch_mach_t channel, dispatch_block_t barrier); __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void -dispatch_mach_send_barrier_f(dispatch_mach_t channel, void *context, +dispatch_mach_send_barrier_f(dispatch_mach_t channel, void *_Nullable context, dispatch_function_t barrier); #ifdef __BLOCKS__ @@ -510,7 +757,7 @@ dispatch_mach_receive_barrier(dispatch_mach_t channel, __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void -dispatch_mach_receive_barrier_f(dispatch_mach_t channel, void *context, +dispatch_mach_receive_barrier_f(dispatch_mach_t channel, void *_Nullable context, dispatch_function_t barrier); /*! @@ -539,6 +786,8 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_port_t dispatch_mach_get_checkin_port(dispatch_mach_t channel); +DISPATCH_ASSUME_NONNULL_END + #endif // DISPATCH_MACH_SPI __END_DECLS diff --git a/private/module.modulemap b/private/module.modulemap new file mode 100644 index 0000000..62975a5 --- /dev/null +++ b/private/module.modulemap @@ -0,0 +1,11 @@ +module DispatchPrivate [system] [extern_c] { + umbrella header "private.h" + exclude header "mach_private.h" + module * { export * } + export * +} + +module DispatchIntrospectionPrivate [system] [extern_c] { + header "introspection_private.h" + export * +} diff --git a/private/private.h b/private/private.h index 46d0e5d..3c37bed 100644 --- a/private/private.h +++ b/private/private.h @@ -54,7 +54,9 @@ #include #include #include +#if DISPATCH_MACH_SPI #include +#endif // DISPATCH_MACH_SPI #include #include #include @@ -64,10 +66,12 @@ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ // Check that public and private dispatch headers match -#if DISPATCH_API_VERSION != 20141121 // Keep in sync with +#if DISPATCH_API_VERSION != 20160712 // Keep in sync with #error "Dispatch header mismatch between /usr/include and /usr/local/include" #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -117,6 +121,34 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT DISPATCH_NOTHROW bool _dispatch_is_fork_of_multithreaded_parent(void); +/*! + * @function _dispatch_prohibit_transition_to_multithreaded + * + * @abstract + * Sets a mode that aborts if a program tries to use dispatch. + * + * @discussion + * This SPI is intended for use by programs that know they will use fork() and + * want their children to be able to use dispatch before exec(). Such programs + * should call _dispatch_prohibit_transition_to_multithreaded(true) as early as + * possible, which will cause any use of dispatch API that would make the + * process multithreaded to abort immediately. + * + * Once the program no longer intends to call fork() it can call + * _dispatch_prohibit_transition_to_multithreaded(false). + * + * This status is not inherited by the child process, so if the behavior + * is required after fork, _dispatch_prohibit_transition_to_multithreaded(true) + * should be called manually in the child after fork. + * + * If the program already used dispatch before the guard is enabled, then + * this function will abort immediately. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_NOTHROW +void _dispatch_prohibit_transition_to_multithreaded(bool prohibit); + /* * dispatch_time convenience macros */ @@ -131,40 +163,60 @@ bool _dispatch_is_fork_of_multithreaded_parent(void); dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_SEC) /* - * SPI for CoreFoundation/Foundation/libauto ONLY + * SPI for CoreFoundation/Foundation ONLY */ -#define DISPATCH_COCOA_COMPAT (TARGET_OS_MAC || TARGET_OS_WIN32) +#if TARGET_OS_MAC +#define DISPATCH_COCOA_COMPAT 1 +#elif defined(__linux__) +#define DISPATCH_COCOA_COMPAT 1 +#else +#define DISPATCH_COCOA_COMPAT 0 +#endif #if DISPATCH_COCOA_COMPAT +#define DISPATCH_CF_SPI_VERSION 20160712 + +#if TARGET_OS_MAC +typedef mach_port_t dispatch_runloop_handle_t; +#elif defined(__linux__) +typedef int dispatch_runloop_handle_t; +#else +#error "runloop support not implemented on this platform" +#endif + #if TARGET_OS_MAC __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW -mach_port_t +dispatch_runloop_handle_t _dispatch_get_main_queue_port_4CF(void); +#endif -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) DISPATCH_EXPORT DISPATCH_NOTHROW -void -_dispatch_main_queue_callback_4CF(mach_msg_header_t *msg); -#elif TARGET_OS_WIN32 -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NOTHROW -HANDLE +dispatch_runloop_handle_t _dispatch_get_main_queue_handle_4CF(void); +#if TARGET_OS_MAC __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NOTHROW void -_dispatch_main_queue_callback_4CF(void); -#endif // TARGET_OS_WIN32 +_dispatch_main_queue_callback_4CF(mach_msg_header_t *_Null_unspecified msg); +#else +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +_dispatch_main_queue_callback_4CF(void *_Null_unspecified msg); +#endif __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t -_dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags); +_dispatch_runloop_root_queue_create_4CF(const char *_Nullable label, + unsigned long flags); #if TARGET_OS_MAC __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) @@ -191,21 +243,16 @@ _dispatch_source_set_runloop_timer_4CF(dispatch_source_t source, __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT -void (*dispatch_begin_thread_4GC)(void); - -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -void (*dispatch_end_thread_4GC)(void); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -void *(*_dispatch_begin_NSAutoReleasePool)(void); +void *_Nonnull (*_Nullable _dispatch_begin_NSAutoReleasePool)(void); __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT -void (*_dispatch_end_NSAutoReleasePool)(void *); +void (*_Nullable _dispatch_end_NSAutoReleasePool)(void *); #endif /* DISPATCH_COCOA_COMPAT */ __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif // __DISPATCH_PRIVATE__ diff --git a/private/queue_private.h b/private/queue_private.h index f2bb691..33de371 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -32,6 +32,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -54,6 +56,18 @@ enum { * Returns a dispatch queue attribute value with the overcommit flag set to the * specified value. * + * This attribute only makes sense when the specified queue is targeted at + * a root queue. Passing this attribute to dispatch_queue_create_with_target() + * with a target queue that is not a root queue will result in an assertion and + * the process being terminated. + * + * It is recommended to not specify a target queue at all when using this + * attribute and to use dispatch_queue_attr_make_with_qos_class() to select the + * appropriate QoS class instead. + * + * Queues created with this attribute cannot change target after having been + * activated. See dispatch_set_target_queue() and dispatch_activate(). + * * @param attr * A queue attribute value to be combined with the overcommit flag, or NULL. * @@ -68,7 +82,7 @@ enum { __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW dispatch_queue_attr_t -dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t attr, +dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr, bool overcommit); /*! @@ -120,50 +134,6 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_queue_set_width(dispatch_queue_t dq, long width); -/*! - * @function dispatch_queue_create_with_target - * - * @abstract - * Creates a new dispatch queue with a specified target queue. - * - * @discussion - * Dispatch queues created with the DISPATCH_QUEUE_SERIAL or a NULL attribute - * invoke blocks serially in FIFO order. - * - * Dispatch queues created with the DISPATCH_QUEUE_CONCURRENT attribute may - * invoke blocks concurrently (similarly to the global concurrent queues, but - * potentially with more overhead), and support barrier blocks submitted with - * the dispatch barrier API, which e.g. enables the implementation of efficient - * reader-writer schemes. - * - * When a dispatch queue is no longer needed, it should be released with - * dispatch_release(). Note that any pending blocks submitted to a queue will - * hold a reference to that queue. Therefore a queue will not be deallocated - * until all pending blocks have finished. - * - * @param label - * A string label to attach to the queue. - * This parameter is optional and may be NULL. - * - * @param attr - * DISPATCH_QUEUE_SERIAL, DISPATCH_QUEUE_CONCURRENT, or the result of a call to - * the function dispatch_queue_attr_make_with_qos_class(). - * - * @param target - * The target queue for the newly created queue. The target queue is retained. - * If this parameter is DISPATCH_TARGET_QUEUE_DEFAULT, sets the queue's target - * queue to the default target queue for the given queue type. - * - * @result - * The newly created dispatch queue. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT -DISPATCH_NOTHROW -dispatch_queue_t -dispatch_queue_create_with_target(const char *label, - dispatch_queue_attr_t attr, dispatch_queue_t target); - #ifdef __BLOCKS__ /*! * @function dispatch_pthread_root_queue_create @@ -223,8 +193,9 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t -dispatch_pthread_root_queue_create(const char *label, unsigned long flags, - const pthread_attr_t *attr, dispatch_block_t configure); +dispatch_pthread_root_queue_create(const char *_Nullable label, + unsigned long flags, const pthread_attr_t *_Nullable attr, + dispatch_block_t _Nullable configure); /*! * @function dispatch_pthread_root_queue_flags_pool_size @@ -257,80 +228,31 @@ dispatch_pthread_root_queue_flags_pool_size(uint8_t pool_size) #endif /* __BLOCKS__ */ /*! - * @constant DISPATCH_APPLY_CURRENT_ROOT_QUEUE - * @discussion Constant to pass to the dispatch_apply() and dispatch_apply_f() - * functions to indicate that the root queue for the current thread should be - * used (i.e. one of the global concurrent queues or a queue created with - * dispatch_pthread_root_queue_create()). If there is no such queue, the - * default priority global concurrent queue will be used. - */ -#define DISPATCH_APPLY_CURRENT_ROOT_QUEUE NULL - -/*! - * @function dispatch_assert_queue + * @function dispatch_pthread_root_queue_copy_current * * @abstract - * Verifies that the current block is executing on a certain dispatch queue. - * - * @discussion - * Some code expects to be run on a specific dispatch queue. This function - * verifies that expectation for debugging. - * - * This function will only return if the currently executing block was submitted - * to the specified queue or to any queue targeting it (see - * dispatch_set_target_queue()). Otherwise, it logs an explanation to the system - * log, then terminates the application. - * - * When dispatch_assert_queue() is called outside of the context of a - * submitted block, its behavior is undefined. - * - * Passing the result of dispatch_get_main_queue() to this function verifies - * that the current block was submitted to the main queue or to a queue - * targeting it. - * IMPORTANT: this is NOT the same as verifying that the current block is - * executing on the main thread. + * Returns a reference to the pthread root queue object that has created the + * currently executing thread, or NULL if the current thread is not associated + * to a pthread root queue. * - * The variant dispatch_assert_queue_debug() is compiled out when the - * preprocessor macro NDEBUG is defined. (See also assert(3)). - * - * @param queue - * The dispatch queue that the current block is expected to run on. - * The result of passing NULL in this parameter is undefined. + * @result + * A new reference to a pthread root queue object or NULL. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) -DISPATCH_EXPORT DISPATCH_NONNULL1 -void -dispatch_assert_queue(dispatch_queue_t queue); +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_queue_t _Nullable +dispatch_pthread_root_queue_copy_current(void); /*! - * @function dispatch_assert_queue_not - * - * @abstract - * Verifies that the current block is not executing on a certain dispatch queue. - * - * @discussion - * This function is the equivalent of dispatch_queue_assert() with the test for - * equality inverted. See discussion there. - * - * The variant dispatch_assert_queue_not_debug() is compiled out when the - * preprocessor macro NDEBUG is defined. (See also assert(3)). - * - * @param queue - * The dispatch queue that the current block is expected not to run on. - * The result of passing NULL in this parameter is undefined. + * @constant DISPATCH_APPLY_CURRENT_ROOT_QUEUE + * @discussion Constant to pass to the dispatch_apply() and dispatch_apply_f() + * functions to indicate that the root queue for the current thread should be + * used (i.e. one of the global concurrent queues or a queue created with + * dispatch_pthread_root_queue_create()). If there is no such queue, the + * default priority global concurrent queue will be used. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) -DISPATCH_EXPORT DISPATCH_NONNULL1 -void -dispatch_assert_queue_not(dispatch_queue_t queue); - -#ifdef NDEBUG -#define dispatch_assert_queue_debug(q) ((void)0) -#define dispatch_assert_queue_not_debug(q) ((void)0) -#else -#define dispatch_assert_queue_debug(q) dispatch_assert_queue(q) -#define dispatch_assert_queue_not_debug(q) dispatch_assert_queue_not(q) -#endif +#define DISPATCH_APPLY_CURRENT_ROOT_QUEUE ((dispatch_queue_t _Nonnull)0) /*! * @function dispatch_async_enforce_qos_class_f @@ -366,10 +288,11 @@ __OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_async_enforce_qos_class_f(dispatch_queue_t queue, - void *context, - dispatch_function_t work); + void *_Nullable context, dispatch_function_t work); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/private/source_private.h b/private/source_private.h index 2c76824..bb13702 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -32,6 +32,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -77,18 +79,19 @@ DISPATCH_SOURCE_TYPE_DECL(interval); * The handle is a process identifier (pid_t). */ #define DISPATCH_SOURCE_TYPE_VFS (&_dispatch_source_type_vfs) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vfs; /*! * @const DISPATCH_SOURCE_TYPE_VM * @discussion A dispatch source that monitors virtual memory * The mask is a mask of desired events from dispatch_source_vm_flags_t. - * This type is deprecated, use DISPATCH_SOURCE_TYPE_MEMORYSTATUS instead. + * This type is deprecated, use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead. */ #define DISPATCH_SOURCE_TYPE_VM (&_dispatch_source_type_vm) __OSX_AVAILABLE_BUT_DEPRECATED_MSG(__MAC_10_7, __MAC_10_10, __IPHONE_4_3, - __IPHONE_8_0, "Use DISPATCH_SOURCE_TYPE_MEMORYSTATUS instead") + __IPHONE_8_0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") +DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm; /*! @@ -98,7 +101,11 @@ DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm; * dispatch_source_memorystatus_flags_t. */ #define DISPATCH_SOURCE_TYPE_MEMORYSTATUS (&_dispatch_source_type_memorystatus) -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +__OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") +__IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") +__TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") +__WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") +DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_memorystatus; @@ -107,7 +114,7 @@ DISPATCH_EXPORT const struct dispatch_source_type_s * @discussion A dispatch source that monitors events on socket state changes. */ #define DISPATCH_SOURCE_TYPE_SOCK (&_dispatch_source_type_sock) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_sock; __END_DECLS @@ -150,6 +157,9 @@ __END_DECLS * * @constant DISPATCH_SOCK_CONNINFO_UPDATED * Connection info was updated + * + * @constant DISPATCH_SOCK_NOTIFY_ACK + * Notify acknowledgement */ enum { DISPATCH_SOCK_CONNRESET = 0x00000001, @@ -166,6 +176,7 @@ enum { DISPATCH_SOCK_CONNECTED = 0x00000800, DISPATCH_SOCK_DISCONNECTED = 0x00001000, DISPATCH_SOCK_CONNINFO_UPDATED = 0x00002000, + DISPATCH_SOCK_NOTIFY_ACK = 0x00004000, }; /*! @@ -274,36 +285,127 @@ enum { enum { DISPATCH_VM_PRESSURE __OSX_AVAILABLE_BUT_DEPRECATED_MSG( __MAC_10_7, __MAC_10_10, __IPHONE_4_3, __IPHONE_8_0, - "Use DISPATCH_MEMORYSTATUS_PRESSURE_WARN instead") = 0x80000000, + "Use DISPATCH_MEMORYPRESSURE_WARN instead") = 0x80000000, }; /*! - * @enum dispatch_source_memorystatus_flags_t + * @typedef dispatch_source_memorypressure_flags_t + * Type of dispatch_source_memorypressure flags * - * @constant DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL - * The system's memory pressure state has returned to normal. - * @constant DISPATCH_MEMORYSTATUS_PRESSURE_WARN - * The system's memory pressure state has changed to warning. - * @constant DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL - * The system's memory pressure state has changed to critical. - * @constant DISPATCH_MEMORYSTATUS_LOW_SWAP + * @constant DISPATCH_MEMORYPRESSURE_LOW_SWAP * The system's memory pressure state has entered the "low swap" condition. * Restricted to the root user. */ +enum { + DISPATCH_MEMORYPRESSURE_LOW_SWAP + __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x08, +}; +/*! + * @enum dispatch_source_memorystatus_flags_t + * @warning Deprecated, see DISPATCH_MEMORYPRESSURE_* + */ enum { DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL - __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_6_0) = 0x01, + __OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") + __IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") + __TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") + __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") + = 0x01, DISPATCH_MEMORYSTATUS_PRESSURE_WARN - __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_6_0) = 0x02, + __OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_WARN instead") + __IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead") + __TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead") + __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead") + = 0x02, DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL - __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_8_0) = 0x04, + __OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") + __IOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") + __TVOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") + __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") + = 0x04, DISPATCH_MEMORYSTATUS_LOW_SWAP - __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x08, + __OSX_DEPRECATED(10.10, 10.12, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") + __IOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") + __TVOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") + __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") + = 0x08, +}; + +/*! + * @typedef dispatch_source_memorypressure_flags_t + * Type of dispatch_source_memorypressure flags + * + * @constant DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN + * The memory of the process has crossed 80% of its high watermark limit. + * + * @constant DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL + * The memory of the process has reached 100% of its high watermark limit. + */ +enum { + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN + __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10) + __TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) = 0x10, + + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL + __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10) + __TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) = 0x20, }; + __BEGIN_DECLS +/*! + * @function dispatch_source_cancel_and_wait + * + * @abstract + * Synchronously cancel the dispatch source, preventing any further invocation + * of its event handler block. + * + * @discussion + * Cancellation prevents any further invocation of handler blocks for the + * specified dispatch source, but does not interrupt a handler block that is + * already in progress. + * + * When this function returns, any handler block that may have been in progress + * has returned, the specified source has been unregistered and it is safe to + * reclaim any system resource (such as file descriptors or mach ports) that + * the specified source was monitoring. + * + * If the specified dispatch source is inactive, it will be activated as a side + * effect of calling this function. + * + * It is possible to call this function from several threads concurrently, + * and it is the responsibility of the callers to synchronize reclaiming the + * associated system resources. + * + * This function is not subject to priority inversion when it is waiting on + * a handler block still in progress, unlike patterns based on waiting on + * a dispatch semaphore or a dispatch group signaled (or left) from the source + * cancel handler. + * + * This function must not be called if the specified source has a cancel + * handler set, or from the context of its handler blocks. + * + * This function must not be called from the context of the target queue of + * the specified source or from any queue that synchronizes with it. Note that + * calling dispatch_source_cancel() from such a context already guarantees + * that no handler is in progress, and that no new event will be delivered. + * + * This function must not be called on sources suspended with an explicit + * call to dispatch_suspend(), or being concurrently activated on another + * thread. + * + * @param source + * The dispatch source to be canceled. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10) +__TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_source_cancel_and_wait(dispatch_source_t source); + /*! * @typedef dispatch_timer_aggregate_t * @@ -354,9 +456,9 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT DISPATCH_NOTHROW uint64_t dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t aggregate, - uint64_t *leeway_ptr); + uint64_t *_Nullable leeway_ptr); -#if TARGET_OS_MAC +#if __has_include() /*! * @typedef dispatch_mig_callback_t * @@ -366,7 +468,7 @@ dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t aggregate, typedef boolean_t (*dispatch_mig_callback_t)(mach_msg_header_t *message, mach_msg_header_t *reply); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_msg_return_t dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, @@ -378,13 +480,15 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, * @abstract * Extract the context pointer from a mach message trailer. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -void * +void *_Nullable dispatch_mach_msg_get_context(mach_msg_header_t *msg); #endif __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/private/voucher_activity_private.h b/private/voucher_activity_private.h deleted file mode 100644 index 8a13e76..0000000 --- a/private/voucher_activity_private.h +++ /dev/null @@ -1,619 +0,0 @@ -/* - * Copyright (c) 2013-2014 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -#ifndef __OS_VOUCHER_ACTIVITY_PRIVATE__ -#define __OS_VOUCHER_ACTIVITY_PRIVATE__ - -#include -#include -#if !defined(__DISPATCH_BUILDING_DISPATCH__) -#include -#endif - -#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20150318 - -#if OS_VOUCHER_WEAK_IMPORT -#define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT -#else -#define OS_VOUCHER_EXPORT OS_EXPORT -#endif - -__BEGIN_DECLS - -#if OS_VOUCHER_ACTIVITY_SPI - -/*! - * @group Voucher Activity SPI - * SPI intended for libtrace only - */ - -/*! - * @typedef voucher_activity_id_t - * - * @abstract - * Opaque activity identifier. - * - * @discussion - * Scalar value type, not reference counted. - */ -typedef uint64_t voucher_activity_id_t; - -/*! - * @enum voucher_activity_tracepoint_type_t - * - * @abstract - * Types of tracepoints. - */ -OS_ENUM(voucher_activity_tracepoint_type, uint8_t, - voucher_activity_tracepoint_type_release = (1u << 0), - voucher_activity_tracepoint_type_debug = (1u << 1), - voucher_activity_tracepoint_type_error = (1u << 6) | (1u << 0), - voucher_activity_tracepoint_type_fault = (1u << 7) | (1u << 6) | (1u << 0), -); - -/*! - * @enum voucher_activity_flag_t - * - * @abstract - * Flags to pass to voucher_activity_start/voucher_activity_start_with_location - */ -OS_ENUM(voucher_activity_flag, unsigned long, - voucher_activity_flag_default = 0, - voucher_activity_flag_force = 0x1, - voucher_activity_flag_debug = 0x2, - voucher_activity_flag_persist = 0x4, - voucher_activity_flag_stream = 0x8, -); - -/*! - * @typedef voucher_activity_trace_id_t - * - * @abstract - * Opaque tracepoint identifier. - */ -typedef uint64_t voucher_activity_trace_id_t; -static const uint8_t _voucher_activity_trace_id_type_shift = 40; -static const uint8_t _voucher_activity_trace_id_code_namespace_shift = 32; - -/*! - * @function voucher_activity_trace_id - * - * @abstract - * Return tracepoint identifier for specified arguments. - * - * @param type - * Tracepoint type from voucher_activity_tracepoint_type_t. - * - * @param code_namespace - * Namespace of 'code' argument. - * - * @param code - * Tracepoint code. - * - * @result - * Tracepoint identifier. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_INLINE OS_ALWAYS_INLINE -voucher_activity_trace_id_t -voucher_activity_trace_id(uint8_t type, uint8_t code_namespace, uint32_t code) -{ - return ((voucher_activity_trace_id_t)type << - _voucher_activity_trace_id_type_shift) | - ((voucher_activity_trace_id_t)code_namespace << - _voucher_activity_trace_id_code_namespace_shift) | - (voucher_activity_trace_id_t)code; -} - -/*! - * @function voucher_activity_start - * - * @abstract - * Creates a new activity identifier and marks the current thread as - * participating in the activity. - * - * @discussion - * As part of voucher transport, activities are automatically propagated by the - * system to other threads and processes (across IPC). - * - * Activities persist as long as any threads in any process are marked as - * participating. There may be many calls to voucher_activity_end() - * corresponding to one call to voucher_activity_start(). - * - * @param trace_id - * Tracepoint identifier returned by voucher_activity_trace_id(), intended for - * identification of the automatic tracepoint generated as part of creating the - * new activity. - * - * @param flags - * Pass voucher_activity_flag_force to indicate that existing activities - * on the current thread should not be inherited and that a new toplevel - * activity should be created. - * - * @result - * A new activity identifier. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW -voucher_activity_id_t -voucher_activity_start(voucher_activity_trace_id_t trace_id, - voucher_activity_flag_t flags); - -/*! - * @function voucher_activity_start_with_location - * - * @abstract - * Creates a new activity identifier and marks the current thread as - * participating in the activity. - * - * @discussion - * As part of voucher transport, activities are automatically propagated by the - * system to other threads and processes (across IPC). - * - * Activities persist as long as any threads in any process are marked as - * participating. There may be many calls to voucher_activity_end() - * corresponding to one call to voucher_activity_start_with_location(). - * - * @param trace_id - * Tracepoint identifier returned by voucher_activity_trace_id(), intended for - * identification of the automatic tracepoint generated as part of creating the - * new activity. - * - * @param location - * Location identifier for the automatic tracepoint generated as part of - * creating the new activity. - * - * @param flags - * Pass voucher_activity_flag_force to indicate that existing activities - * on the current thread should not be inherited and that a new toplevel - * activity should be created. - * - * @result - * A new activity identifier. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW -voucher_activity_id_t -voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id, - uint64_t location, voucher_activity_flag_t flags); - -/*! - * @function voucher_activity_end - * - * @abstract - * Unmarks the current thread if it is marked as particpating in the activity - * with the specified identifier. - * - * @discussion - * Activities persist as long as any threads in any process are marked as - * participating. There may be many calls to voucher_activity_end() - * corresponding to one call to voucher_activity_start() or - * voucher_activity_start_with_location(). - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_NOTHROW -void -voucher_activity_end(voucher_activity_id_t activity_id); - -/*! - * @function voucher_get_activities - * - * @abstract - * Returns the list of activity identifiers that the current thread is marked - * with. - * - * @param entries - * Pointer to an array of activity identifiers to be filled in. - * - * @param count - * Pointer to the requested number of activity identifiers. - * On output will be filled with the number of activities that are available. - * - * @result - * Number of activity identifiers written to 'entries' - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_NOTHROW -unsigned int -voucher_get_activities(voucher_activity_id_t *entries, unsigned int *count); - -/*! - * @group Voucher Activity Trace SPI - * SPI intended for libtrace only - */ - -/*! - * @function voucher_activity_get_namespace - * - * @abstract - * Returns the namespace of the current activity. - * - * @result - * The namespace of the current activity (if any). - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_NOTHROW -uint8_t -voucher_activity_get_namespace(void); - -/*! - * @function voucher_activity_trace - * - * @abstract - * Add a tracepoint to trace buffer of the current activity. - * - * @param trace_id - * Tracepoint identifier returned by voucher_activity_trace_id() - * - * @param location - * Tracepoint location. - * - * @param buffer - * Pointer to packed buffer of tracepoint data. - * - * @param length - * Length of data at 'buffer'. - * - * @result - * Timestamp recorded in tracepoint or 0 if no tracepoint was recorded. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_NOTHROW -uint64_t -voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location, - void *buffer, size_t length); - -/*! - * @function voucher_activity_trace_strings - * - * @abstract - * Add a tracepoint with strings data to trace buffer of the current activity. - * - * @param trace_id - * Tracepoint identifier returned by voucher_activity_trace_id() - * - * @param location - * Tracepoint location. - * - * @param buffer - * Pointer to packed buffer of tracepoint data. - * - * @param length - * Length of data at 'buffer'. - * - * @param strings - * NULL-terminated array of strings data. - * - * @param string_lengths - * Array of string lengths (required to have the same number of elements as the - * 'strings' array): string_lengths[i] is the maximum number of characters to - * copy from strings[i], excluding the NUL-terminator (may be smaller than the - * length of the string present in strings[i]). - * - * @param total_strings_size - * Total size of all strings data to be copied from strings array (including - * all NUL-terminators). - * - * @result - * Timestamp recorded in tracepoint or 0 if no tracepoint was recorded. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) -OS_VOUCHER_EXPORT OS_NOTHROW -uint64_t -voucher_activity_trace_strings(voucher_activity_trace_id_t trace_id, - uint64_t location, void *buffer, size_t length, const char *strings[], - size_t string_lengths[], size_t total_strings_size); - -/*! - * @function voucher_activity_trace_args - * - * @abstract - * Add a tracepoint to trace buffer of the current activity, recording - * specified arguments passed in registers. - * - * @param trace_id - * Tracepoint identifier returned by voucher_activity_trace_id() - * - * @param location - * Tracepoint location. - * - * @param arg1 - * Argument to be recorded in tracepoint data. - * - * @param arg2 - * Argument to be recorded in tracepoint data. - * - * @param arg3 - * Argument to be recorded in tracepoint data. - * - * @param arg4 - * Argument to be recorded in tracepoint data. - * - * @result - * Timestamp recorded in tracepoint or 0 if no tracepoint was recorded. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_NOTHROW -uint64_t -voucher_activity_trace_args(voucher_activity_trace_id_t trace_id, - uint64_t location, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, - uintptr_t arg4); - -/*! - * @group Voucher Activity Mode SPI - * SPI intended for libtrace only - */ - -/*! - * @enum voucher_activity_mode_t - * - * @abstract - * Voucher activity mode. - * - * @discussion - * Configure at process start by setting the OS_ACTIVITY_MODE environment - * variable. - */ -OS_ENUM(voucher_activity_mode, unsigned long, - voucher_activity_mode_disable = 0, - voucher_activity_mode_release = (1u << 0), - voucher_activity_mode_debug = (1u << 1), - voucher_activity_mode_stream = (1u << 2), -); - -/*! - * @function voucher_activity_get_mode - * - * @abstract - * Return current mode of voucher activity subsystem. - * - * @result - * Value from voucher_activity_mode_t enum. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW -voucher_activity_mode_t -voucher_activity_get_mode(void); - -/*! - * @function voucher_activity_set_mode_4libtrace - * - * @abstract - * Set the current mode of voucher activity subsystem. - * - * @param mode - * The new mode. - * - * Note that the new mode will take effect soon, but not immediately. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_NOTHROW -void -voucher_activity_set_mode_4libtrace(voucher_activity_mode_t mode); - -/*! - * @group Voucher Activity Metadata SPI - * SPI intended for libtrace only - */ - -/*! - * @function voucher_activity_get_metadata_buffer - * - * @abstract - * Return address and length of buffer in the process trace memory area - * reserved for libtrace metadata. - * - * @param length - * Pointer to size_t variable, filled with length of metadata buffer. - * - * @result - * Address of metadata buffer. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL_ALL -void* -voucher_activity_get_metadata_buffer(size_t *length); - -#endif // OS_VOUCHER_ACTIVITY_SPI - -#if OS_VOUCHER_ACTIVITY_BUFFER_SPI - -/*! - * @group Voucher Activity Tracepoint SPI - * SPI intended for diagnosticd only - */ - -OS_ENUM(_voucher_activity_tracepoint_flag, uint16_t, - _voucher_activity_trace_flag_buffer_empty = 0, - _voucher_activity_trace_flag_tracepoint = (1u << 0), - _voucher_activity_trace_flag_tracepoint_args = (1u << 1), - _voucher_activity_trace_flag_tracepoint_strings = (1u << 2), - _voucher_activity_trace_flag_wide_first = (1u << 6), - _voucher_activity_trace_flag_wide_second = (1u << 6) | (1u << 7), - _voucher_activity_trace_flag_start = (1u << 8), - _voucher_activity_trace_flag_end = (1u << 8) | (1u << 9), - _voucher_activity_trace_flag_libdispatch = (1u << 13), - _voucher_activity_trace_flag_activity = (1u << 14), - _voucher_activity_trace_flag_buffer_header = (1u << 15), -); - -// for tracepoints with _voucher_activity_trace_flag_libdispatch -OS_ENUM(_voucher_activity_tracepoint_namespace, uint8_t, - _voucher_activity_tracepoint_namespace_ipc = 0x1 -); -OS_ENUM(_voucher_activity_tracepoint_code, uint32_t, - _voucher_activity_tracepoint_namespace_ipc_send = 0x1, - _voucher_activity_tracepoint_namespace_ipc_receive = 0x2, -); - -typedef struct _voucher_activity_tracepoint_s { - uint16_t vat_flags; // voucher_activity_tracepoint_flag_t - uint8_t vat_type; // voucher_activity_tracepoint_type_t - uint8_t vat_namespace; // namespace for tracepoint code - uint32_t vat_code; // tracepoint code - uint64_t vat_thread; // pthread_t - uint64_t vat_timestamp; // absolute time - uint64_t vat_location; // tracepoint PC - union { - uint64_t vat_data[4]; // trace data - struct { - uint16_t vats_offset; // offset to string data (from buffer end) - uint8_t vats_data[30]; // trace data - } vat_stroff; // iff _vat_flag_tracepoint_strings present - }; -} *_voucher_activity_tracepoint_t; - -/*! - * @group Voucher Activity Buffer Internals - * SPI intended for diagnosticd only - * Layout of structs is subject to change without notice - */ - -#include -#include -#include - -static const size_t _voucher_activity_buffer_size = 4096; -static const size_t _voucher_activity_tracepoints_per_buffer = - _voucher_activity_buffer_size / - sizeof(struct _voucher_activity_tracepoint_s); -static const size_t _voucher_activity_buffer_header_size = - sizeof(struct _voucher_activity_tracepoint_s); -static const size_t _voucher_activity_strings_header_size = 0; // TODO - -typedef uint8_t _voucher_activity_buffer_t[_voucher_activity_buffer_size]; - -static const size_t _voucher_activity_buffers_per_heap = 512; -typedef unsigned long _voucher_activity_bitmap_base_t; -static const size_t _voucher_activity_bits_per_bitmap_base_t = - 8 * sizeof(_voucher_activity_bitmap_base_t); -static const size_t _voucher_activity_bitmaps_per_heap = - _voucher_activity_buffers_per_heap / - _voucher_activity_bits_per_bitmap_base_t; -typedef _voucher_activity_bitmap_base_t - _voucher_activity_bitmap_t[_voucher_activity_bitmaps_per_heap] - __attribute__((__aligned__(64))); - -struct _voucher_activity_self_metadata_s { - struct _voucher_activity_metadata_opaque_s *vasm_baseaddr; - _voucher_activity_bitmap_t volatile vam_buffer_bitmap; -}; - -typedef struct _voucher_activity_metadata_opaque_s { - _voucher_activity_buffer_t vam_client_metadata; - union { - struct _voucher_activity_self_metadata_s vam_self_metadata; - _voucher_activity_buffer_t vam_self_metadata_opaque; - }; -} *_voucher_activity_metadata_opaque_t; - -typedef os_lock_handoff_s _voucher_activity_lock_s; - -OS_ENUM(_voucher_activity_buffer_atomic_flags, uint8_t, - _voucher_activity_buffer_full = (1u << 0), - _voucher_activity_buffer_pushing = (1u << 1), -); - -typedef union { - uint64_t vabp_atomic_pos; - struct { - uint16_t vabp_refcnt; - uint8_t vabp_flags; - uint8_t vabp_unused; - uint16_t vabp_next_tracepoint_idx; - uint16_t vabp_string_offset; // offset from the _end_ of the buffer - } vabp_pos; -} _voucher_activity_buffer_position_u; - -// must match layout of _voucher_activity_tracepoint_s -typedef struct _voucher_activity_buffer_header_s { - uint16_t vabh_flags; // _voucher_activity_trace_flag_buffer_header - uint8_t vat_type; - uint8_t vat_namespace; - uint32_t vat_code; - uint64_t vat_thread; - uint64_t vat_timestamp; - uint64_t vat_location; - voucher_activity_id_t vabh_activity_id; - _voucher_activity_buffer_position_u volatile vabh_pos; - TAILQ_ENTRY(_voucher_activity_buffer_header_s) vabh_list; -} *_voucher_activity_buffer_header_t; - -/*! - * @enum _voucher_activity_buffer_hook_reason - * - * @constant _voucher_activity_buffer_hook_reason_full - * Specified activity buffer is full. - * Will be reported reused or freed later. - * - * @constant _voucher_activity_buffer_hook_reason_reuse - * Specified activity buffer is about to be reused. - * Was previously reported as full. - * - * @constant _voucher_activity_buffer_hook_reason_free - * Specified activity buffer is about to be freed. - * May have been previously reported as full or may be only partially filled. - */ -typedef enum _voucher_activity_buffer_hook_reason { - _voucher_activity_buffer_hook_reason_full = 0x1, - _voucher_activity_buffer_hook_reason_reuse = 0x2, - _voucher_activity_buffer_hook_reason_free = 0x4, -} _voucher_activity_buffer_hook_reason; - -/*! - * @typedef _voucher_activity_buffer_hook_t - * - * @abstract - * A function pointer called when an activity buffer is full or being freed. - * NOTE: callbacks occur under an activity-wide handoff lock and work done - * inside the callback function must not block or otherwise cause that lock to - * be held for a extended period of time. - * - * @param reason - * Reason for callback. - * - * @param buffer - * Pointer to activity buffer. - */ -typedef void (*_voucher_activity_buffer_hook_t)( - _voucher_activity_buffer_hook_reason reason, - _voucher_activity_buffer_header_t buffer); - -/*! - * @function voucher_activity_buffer_hook_install_4libtrace - * - * @abstract - * Install activity buffer hook callback function. - * Must be called from the libtrace initializer, and at most once. - * - * @param hook - * Hook function to install. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) -OS_VOUCHER_EXPORT OS_NOTHROW -void -voucher_activity_buffer_hook_install_4libtrace( - _voucher_activity_buffer_hook_t hook); - -#endif // OS_VOUCHER_ACTIVITY_BUFFER_SPI - -__END_DECLS - -#endif // __OS_VOUCHER_ACTIVITY_PRIVATE__ diff --git a/src/Makefile.am b/src/Makefile.am index 630a480..c417aec 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -2,12 +2,19 @@ # # +if HAVE_SWIFT +swiftlibdir=${prefix}/lib/swift/linux +swiftlib_LTLIBRARIES=libdispatch.la +else lib_LTLIBRARIES=libdispatch.la +endif libdispatch_la_SOURCES= \ + allocator.c \ apply.c \ benchmark.c \ data.c \ + introspection.c \ init.c \ io.c \ object.c \ @@ -17,10 +24,14 @@ libdispatch_la_SOURCES= \ source.c \ time.c \ transform.c \ + voucher.c \ protocol.defs \ provider.d \ + allocator_internal.h \ data_internal.h \ + inline_internal.h \ internal.h \ + introspection_internal.h \ io_internal.h \ object_internal.h \ queue_internal.h \ @@ -28,49 +39,140 @@ libdispatch_la_SOURCES= \ shims.h \ source_internal.h \ trace.h \ + voucher_internal.h \ + firehose/firehose_internal.h \ shims/atomic.h \ + shims/atomic_sfb.h \ shims/getprogname.h \ shims/hw_config.h \ + shims/linux_stubs.c \ + shims/linux_stubs.h \ + shims/lock.c \ + shims/lock.h \ shims/perfmon.h \ shims/time.h \ - shims/tsd.h + shims/tsd.h \ + shims/yield.h + +EXTRA_libdispatch_la_SOURCES= +EXTRA_libdispatch_la_DEPENDENCIES= -AM_CPPFLAGS=-I$(top_builddir) -I$(top_srcdir) \ - -I$(top_srcdir)/private -I$(top_srcdir)/os +AM_CPPFLAGS=-I$(top_builddir) -I$(top_srcdir) -I$(top_srcdir)/private -AM_CFLAGS=-Wall $(VISIBILITY_FLAGS) $(OMIT_LEAF_FP_FLAGS) \ - $(MARCH_FLAGS) $(CBLOCKS_FLAGS) $(KQUEUE_CFLAGS) +DISPATCH_CFLAGS=-Wall $(VISIBILITY_FLAGS) $(OMIT_LEAF_FP_FLAGS) \ + $(MARCH_FLAGS) $(KQUEUE_CFLAGS) $(BSD_OVERLAY_CFLAGS) +AM_CFLAGS= $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS) +AM_OBJCFLAGS=$(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS) +AM_CXXFLAGS=$(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) +AM_OBJCXXFLAGS=$(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) + +if BUILD_OWN_PTHREAD_WORKQUEUES + PTHREAD_WORKQUEUE_LIBS=$(top_builddir)/libpwq/libpthread_workqueue.la + PTHREAD_WORKQUEUE_CFLAGS=-I$(top_srcdir)/libpwq/include +else +if HAVE_PTHREAD_WORKQUEUES + PTHREAD_WORKQUEUE_LIBS=-lpthread_workqueue +endif +endif libdispatch_la_LDFLAGS=-avoid-version +libdispatch_la_LIBADD=$(KQUEUE_LIBS) $(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) if HAVE_DARWIN_LD libdispatch_la_LDFLAGS+=-Wl,-compatibility_version,1 \ - -Wl,-current_version,$(VERSION) -Wl,-dead_strip + -Wl,-current_version,$(VERSION) -Wl,-dead_strip \ + -Wl,-alias_list,$(top_srcdir)/xcodeconfig/libdispatch.aliases +endif + +if USE_GOLD_LINKER +libdispatch_la_LDFLAGS+=-Xcompiler -fuse-ld=gold endif if USE_OBJC -libdispatch_la_SOURCES+=object.m -libdispatch_la_OBJCFLAGS=$(AM_CFLAGS) -fobjc-gc +libdispatch_la_SOURCES+=block.cpp data.m object.m +libdispatch_la_OBJCFLAGS=$(AM_OBJCFLAGS) -Wno-switch -fobjc-gc +libdispatch_la_CXXFLAGS=$(AM_CXXFLAGS) -std=gnu++11 -fno-exceptions libdispatch_la_LDFLAGS+=-Wl,-upward-lobjc -Wl,-upward-lauto \ - -Wl,-order_file,$(top_srcdir)/xcodeconfig/libdispatch.order \ - -Wl,-alias_list,$(top_srcdir)/xcodeconfig/libdispatch.aliases \ - -Wl,-unexported_symbols_list,$(top_srcdir)/xcodeconfig/libdispatch.unexport + -Wl,-order_file,$(top_srcdir)/xcodeconfig/libdispatch.order +else +libdispatch_la_SOURCES+=block.cpp +libdispatch_la_CXXFLAGS=$(AM_CXXFLAGS) -std=gnu++11 -fno-exceptions endif -CLEANFILES= -DISTCLEANFILES=System objc - if USE_MIG -BUILT_SOURCES= \ +MIG_SOURCES= \ protocolUser.c \ protocol.h \ protocolServer.c \ protocolServer.h -nodist_libdispatch_la_SOURCES=$(BUILT_SOURCES) -CLEANFILES+=$(BUILT_SOURCES) - %User.c %.h %Server.c %Server.h: $(abs_srcdir)/%.defs $(MIG) -user $*User.c -header $*.h \ -server $*Server.c -sheader $*Server.h $< endif + +if USE_DTRACE +DTRACE_SOURCES=provider.h + +%.h: $(abs_srcdir)/%.d + $(DTRACE) -h -s $< -o $@ +endif + +if HAVE_SWIFT +SWIFT_SRC_FILES=\ + swift/Block.swift \ + swift/Data.swift \ + swift/Dispatch.swift \ + swift/IO.swift \ + swift/Private.swift \ + swift/Queue.swift \ + swift/Source.swift \ + swift/Time.swift \ + swift/Wrapper.swift + +SWIFT_ABS_SRC_FILES = $(SWIFT_SRC_FILES:%=$(abs_srcdir)/%) +SWIFT_OBJ_FILES = $(SWIFT_SRC_FILES:%.swift=$(abs_builddir)/%.o) + +libdispatch_la_SOURCES+=swift/DispatchStubs.cc +EXTRA_libdispatch_la_SOURCES+=$(SWIFT_SRC_FILES) + +EXTRA_libdispatch_la_DEPENDENCIES+=$(SWIFT_OBJ_FILES) $(abs_builddir)/swift/Dispatch.swiftmodule +libdispatch_la_LIBADD+=$(SWIFT_OBJ_FILES) + +SWIFT_GEN_FILES= \ + $(abs_builddir)/swift/Dispatch.swiftmodule \ + $(abs_builddir)/swift/Dispatch.swiftdoc \ + $(SWIFT_OBJ_FILES) \ + $(SWIFT_OBJ_FILES:%=%.d) \ + $(SWIFT_OBJ_FILES:%=%.swiftdeps) \ + $(SWIFT_OBJ_FILES:%=%.~partial.swiftmodule) \ + $(SWIFT_OBJ_FILES:%=%.~partial.swiftdoc) \ + $(SWIFT_OBJ_FILES:%=%.~partial.swiftdeps) + +SWIFTC_FLAGS = -Xcc -fmodule-map-file=$(abs_top_srcdir)/dispatch/module.map -I$(abs_top_srcdir) -Xcc -fblocks + +$(abs_builddir)/swift/%.o: $(abs_srcdir)/swift/%.swift + $(SWIFTC) -frontend -c $(SWIFT_ABS_SRC_FILES) -primary-file $< \ + $(SWIFTC_FLAGS) -module-name Dispatch -module-link-name dispatch \ + -o $@ -emit-module-path $@.~partial.swiftmodule \ + -emit-module-doc-path $@.~partial.swiftdoc -emit-dependencies-path $@.d \ + -emit-reference-dependencies-path $@.swiftdeps \ + -module-cache-path $(top_builddir) + +$(abs_builddir)/swift/Dispatch.swiftmodule: $(SWIFT_ABS_SRC_FILES) + $(SWIFTC) -frontend -emit-module $(SWIFT_OBJ_FILES:%=%.~partial.swiftmodule) \ + $(SWIFTC_FLAGS) -module-cache-path $(top_builddir) -module-link-name dispatch \ + -o $@ -emit-module-doc-path $(@:%.swiftmodule=%.swiftdoc) + +swiftmoddir=${prefix}/lib/swift/linux/${build_cpu} +swiftmod_HEADERS=\ + $(abs_builddir)/swift/Dispatch.swiftmodule \ + $(abs_builddir)/swift/Dispatch.swiftdoc + +endif + +BUILT_SOURCES=$(MIG_SOURCES) $(DTRACE_SOURCES) +nodist_libdispatch_la_SOURCES=$(BUILT_SOURCES) +CLEANFILES=$(BUILT_SOURCES) $(SWIFT_GEN_FILES) +DISTCLEANFILES=pthread_machdep.h pthread System mach objc + diff --git a/src/allocator.c b/src/allocator.c index d6db272..a3a8c65 100644 --- a/src/allocator.c +++ b/src/allocator.c @@ -35,7 +35,7 @@ // once to non-zero. They are not marked volatile. There is a small risk that // some thread may see a stale 0 value and enter try_create_heap. It will // waste some time in an allocate syscall, but eventually it will try to -// cmpxchg, expecting to overwite 0 with an address. This will fail +// cmpxchg, expecting to overwrite 0 with an address. This will fail // (because another thread already did this), the thread will deallocate the // unused allocated memory, and continue with the new value. // @@ -178,11 +178,11 @@ madvisable_page_base_for_continuation(dispatch_continuation_t c) #if DISPATCH_DEBUG struct dispatch_magazine_s *m = magazine_for_continuation(c); if (slowpath(page_base < (void *)&m->conts)) { - DISPATCH_CRASH("madvisable continuation too low"); + DISPATCH_INTERNAL_CRASH(page_base, "madvisable continuation too low"); } if (slowpath(page_base > (void *)&m->conts[SUPERMAPS_PER_MAGAZINE-1] [BITMAPS_PER_SUPERMAP-1][CONTINUATIONS_PER_BITMAP-1])) { - DISPATCH_CRASH("madvisable continuation too high"); + DISPATCH_INTERNAL_CRASH(page_base, "madvisable continuation too high"); } #endif return page_base; @@ -228,7 +228,7 @@ bitmap_set_first_unset_bit_upto_index(volatile bitmap_t *bitmap, // load from it before storing, so we don't need to guard // against reordering those loads. dispatch_assert(sizeof(*bitmap) == sizeof(unsigned long)); - return dispatch_atomic_set_first_bit(bitmap,max_index); + return os_atomic_set_first_bit(bitmap, max_index); } DISPATCH_ALWAYS_INLINE @@ -255,12 +255,13 @@ bitmap_clear_bit(volatile bitmap_t *bitmap, unsigned int index, if (exclusively == CLEAR_EXCLUSIVELY) { if (slowpath((*bitmap & mask) == 0)) { - DISPATCH_CRASH("Corruption: failed to clear bit exclusively"); + DISPATCH_CLIENT_CRASH(*bitmap, + "Corruption: failed to clear bit exclusively"); } } // and-and-fetch - b = dispatch_atomic_and(bitmap, ~mask, release); + b = os_atomic_and(bitmap, ~mask, release); return b == 0; } @@ -284,7 +285,7 @@ mark_bitmap_as_full_if_still_full(volatile bitmap_t *supermap, // don't protect access to other memory. s = s_new; s_masked = s | mask; - if (dispatch_atomic_cmpxchgvw(supermap, s, s_masked, &s_new, relaxed) || + if (os_atomic_cmpxchgvw(supermap, s, s_masked, &s_new, relaxed) || !bitmap_is_full(*bitmap)) { return; } @@ -358,8 +359,7 @@ _dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr) MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT))) { if (kr != KERN_NO_SPACE) { - (void)dispatch_assume_zero(kr); - DISPATCH_CLIENT_CRASH("Could not allocate heap"); + DISPATCH_CLIENT_CRASH(kr, "Could not allocate heap"); } _dispatch_temporary_resource_shortage(); vm_addr = vm_page_size; @@ -422,7 +422,7 @@ _dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr) #endif // DISPATCH_DEBUG #endif // HAVE_MACH - if (!dispatch_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region, + if (!os_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region, relaxed)) { // If we lost the race to link in the new region, unmap the whole thing. #if DISPATCH_DEBUG @@ -550,7 +550,7 @@ _dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c) // take ownership of them all. int last_locked = 0; do { - if (!dispatch_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0), + if (!os_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0), BITMAP_ALL_ONES, relaxed)) { // We didn't get one; since there is a cont allocated in // the page, we can't madvise. Give up and unlock all. @@ -573,7 +573,7 @@ unlock: page_bitmaps[--last_locked] = BITMAP_C(0); } if (last_locked) { - dispatch_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed); + os_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed); } return; } @@ -676,7 +676,7 @@ _dispatch_malloc_init(void) malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations"); } #else -static inline void _dispatch_malloc_init(void) {} +#define _dispatch_malloc_init() ((void)0) #endif // DISPATCH_USE_MALLOCZONE static dispatch_continuation_t @@ -769,4 +769,3 @@ _dispatch_continuation_free_to_heap(dispatch_continuation_t c) return _dispatch_malloc_continuation_free(c); #endif } - diff --git a/src/allocator_internal.h b/src/allocator_internal.h index 893ba82..abe4a1d 100644 --- a/src/allocator_internal.h +++ b/src/allocator_internal.h @@ -33,14 +33,8 @@ #endif #endif -#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090 -#undef DISPATCH_USE_NANOZONE -#define DISPATCH_USE_NANOZONE 0 -#endif #ifndef DISPATCH_USE_NANOZONE -#if TARGET_OS_MAC && defined(__LP64__) && \ - (__MAC_OS_X_VERSION_MIN_REQUIRED >= 1090 || \ - __IPHONE_OS_VERSION_MIN_REQUIRED >= 70000) +#if TARGET_OS_MAC && defined(__LP64__) #define DISPATCH_USE_NANOZONE 1 #endif #endif @@ -218,7 +212,7 @@ struct dispatch_magazine_header_s { // Link to the next heap in the chain. Only used in magazine 0's header dispatch_heap_t dh_next; - // Points to the first bitmap in the page where this CPU succesfully + // Points to the first bitmap in the page where this CPU successfully // allocated a continuation last time. Only used in the first heap. bitmap_t *last_found_page; }; diff --git a/src/apply.c b/src/apply.c index e0ab2c3..e051a16 100644 --- a/src/apply.c +++ b/src/apply.c @@ -21,16 +21,20 @@ #include "internal.h" typedef void (*dispatch_apply_function_t)(void *, size_t); +static char const * const _dispatch_apply_key = "apply"; + +#define DISPATCH_APPLY_INVOKE_REDIRECT 0x1 +#define DISPATCH_APPLY_INVOKE_WAIT 0x2 DISPATCH_ALWAYS_INLINE static inline void -_dispatch_apply_invoke2(void *ctxt, bool redirect) +_dispatch_apply_invoke2(void *ctxt, long invoke_flags) { dispatch_apply_t da = (dispatch_apply_t)ctxt; size_t const iter = da->da_iterations; size_t idx, done = 0; - idx = dispatch_atomic_inc_orig2o(da, da_index, acquire); + idx = os_atomic_inc_orig2o(da, da_index, acquire); if (!fastpath(idx < iter)) goto out; // da_dc is only safe to access once the 'index lock' has been acquired @@ -41,38 +45,51 @@ _dispatch_apply_invoke2(void *ctxt, bool redirect) _dispatch_perfmon_workitem_dec(); // this unit executes many items // Handle nested dispatch_apply rdar://problem/9294578 - size_t nested = (size_t)_dispatch_thread_getspecific(dispatch_apply_key); - _dispatch_thread_setspecific(dispatch_apply_key, (void*)da->da_nested); + dispatch_thread_context_s apply_ctxt = { + .dtc_key = _dispatch_apply_key, + .dtc_apply_nesting = da->da_nested, + }; + _dispatch_thread_context_push(&apply_ctxt); - dispatch_queue_t old_dq; + dispatch_thread_frame_s dtf; pthread_priority_t old_dp; - if (redirect) { - old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - _dispatch_thread_setspecific(dispatch_queue_key, dq); - old_dp = _dispatch_set_defaultpriority(dq->dq_priority); + if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) { + _dispatch_thread_frame_push(&dtf, dq); + old_dp = _dispatch_set_defaultpriority(dq->dq_priority, NULL); } + dispatch_invoke_flags_t flags = da->da_flags; // Striding is the responsibility of the caller. do { - _dispatch_client_callout2(da_ctxt, idx, func); - _dispatch_perfmon_workitem_inc(); - done++; - idx = dispatch_atomic_inc_orig2o(da, da_index, relaxed); + dispatch_invoke_with_autoreleasepool(flags, { + _dispatch_client_callout2(da_ctxt, idx, func); + _dispatch_perfmon_workitem_inc(); + done++; + idx = os_atomic_inc_orig2o(da, da_index, relaxed); + }); } while (fastpath(idx < iter)); - if (redirect) { + if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) { _dispatch_reset_defaultpriority(old_dp); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_thread_frame_pop(&dtf); } - _dispatch_thread_setspecific(dispatch_apply_key, (void*)nested); + + _dispatch_thread_context_pop(&apply_ctxt); // The thread that finished the last workitem wakes up the possibly waiting // thread that called dispatch_apply. They could be one and the same. - if (!dispatch_atomic_sub2o(da, da_todo, done, release)) { - _dispatch_thread_semaphore_signal(da->da_sema); + if (!os_atomic_sub2o(da, da_todo, done, release)) { + _dispatch_thread_event_signal(&da->da_event); } out: - if (dispatch_atomic_dec2o(da, da_thr_cnt, release) == 0) { + if (invoke_flags & DISPATCH_APPLY_INVOKE_WAIT) { + _dispatch_thread_event_wait(&da->da_event); + _dispatch_thread_event_destroy(&da->da_event); + } + if (os_atomic_dec2o(da, da_thr_cnt, release) == 0) { +#if DISPATCH_INTROSPECTION + _dispatch_continuation_free(da->da_dc); +#endif _dispatch_continuation_free((dispatch_continuation_t)da); } } @@ -81,30 +98,59 @@ DISPATCH_NOINLINE void _dispatch_apply_invoke(void *ctxt) { - _dispatch_apply_invoke2(ctxt, false); + _dispatch_apply_invoke2(ctxt, 0); +} + +DISPATCH_NOINLINE +static void +_dispatch_apply_invoke_and_wait(void *ctxt) +{ + _dispatch_apply_invoke2(ctxt, DISPATCH_APPLY_INVOKE_WAIT); + _dispatch_perfmon_workitem_inc(); } DISPATCH_NOINLINE void _dispatch_apply_redirect_invoke(void *ctxt) { - _dispatch_apply_invoke2(ctxt, true); + _dispatch_apply_invoke2(ctxt, DISPATCH_APPLY_INVOKE_REDIRECT); } +DISPATCH_ALWAYS_INLINE +static inline dispatch_invoke_flags_t +_dispatch_apply_autorelease_frequency(dispatch_queue_t dq) +{ + dispatch_invoke_flags_t qaf = 0; + + while (dq && !qaf) { + qaf = _dispatch_queue_autorelease_frequency(dq); + dq = slowpath(dq->do_targetq); + } + return qaf; +} + +DISPATCH_NOINLINE static void _dispatch_apply_serial(void *ctxt) { dispatch_apply_t da = (dispatch_apply_t)ctxt; dispatch_continuation_t dc = da->da_dc; size_t const iter = da->da_iterations; + dispatch_invoke_flags_t flags; size_t idx = 0; _dispatch_perfmon_workitem_dec(); // this unit executes many items + flags = _dispatch_apply_autorelease_frequency(dc->dc_data); do { - _dispatch_client_callout2(dc->dc_ctxt, idx, (void*)dc->dc_func); - _dispatch_perfmon_workitem_inc(); + dispatch_invoke_with_autoreleasepool(flags, { + _dispatch_client_callout2(dc->dc_ctxt, idx, (void*)dc->dc_func); + _dispatch_perfmon_workitem_inc(); + }); } while (++idx < iter); +#if DISPATCH_INTROSPECTION + _dispatch_continuation_free(da->da_dc); +#endif _dispatch_continuation_free((dispatch_continuation_t)da); } @@ -123,12 +169,9 @@ _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, for (i = 0; i < continuation_cnt; i++) { dispatch_continuation_t next = _dispatch_continuation_alloc(); - next->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; - next->dc_func = func; - next->dc_ctxt = da; - _dispatch_continuation_voucher_set(next, 0); - _dispatch_continuation_priority_set(next, 0, 0); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + _dispatch_continuation_init_f(next, dq, da, func, 0, 0, dc_flags); next->do_next = head; head = next; @@ -137,49 +180,47 @@ _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, } } - _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); - da->da_sema = sema; + _dispatch_thread_event_init(&da->da_event); _dispatch_queue_push_list(dq, head, tail, head->dc_priority, continuation_cnt); // Call the first element directly - _dispatch_apply_invoke(da); - _dispatch_perfmon_workitem_inc(); - - _dispatch_thread_semaphore_wait(sema); - _dispatch_put_thread_semaphore(sema); - + _dispatch_apply_invoke_and_wait(da); } +DISPATCH_NOINLINE static void _dispatch_apply_redirect(void *ctxt) { dispatch_apply_t da = (dispatch_apply_t)ctxt; - uint32_t da_width = 2 * (da->da_thr_cnt - 1); + uint32_t da_width = da->da_thr_cnt - 1; dispatch_queue_t dq = da->da_dc->dc_data, rq = dq, tq; do { - uint32_t running, width = rq->dq_width; - running = dispatch_atomic_add2o(rq, dq_running, da_width, relaxed); - if (slowpath(running > width)) { - uint32_t excess = width > 1 ? running - width : da_width; - for (tq = dq; 1; tq = tq->do_targetq) { - (void)dispatch_atomic_sub2o(tq, dq_running, excess, relaxed); - if (tq == rq) { - break; - } + uint32_t width = _dispatch_queue_try_reserve_apply_width(rq, da_width); + + if (slowpath(da_width > width)) { + uint32_t excess = da_width - width; + for (tq = dq; tq != rq; tq = tq->do_targetq) { + _dispatch_queue_relinquish_width(tq, excess); } da_width -= excess; if (slowpath(!da_width)) { return _dispatch_apply_serial(da); } - da->da_thr_cnt -= excess / 2; + da->da_thr_cnt -= excess; + } + if (!da->da_flags) { + // find first queue in descending target queue order that has + // an autorelease frequency set, and use that as the frequency for + // this continuation. + da->da_flags = _dispatch_queue_autorelease_frequency(dq); } rq = rq->do_targetq; } while (slowpath(rq->do_targetq)); _dispatch_apply_f2(rq, da, _dispatch_apply_redirect_invoke); do { - (void)dispatch_atomic_sub2o(dq, dq_running, da_width, relaxed); + _dispatch_queue_relinquish_width(dq, da_width); dq = dq->do_targetq; } while (slowpath(dq->do_targetq)); } @@ -195,7 +236,10 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, return; } uint32_t thr_cnt = dispatch_hw_config(active_cpus); - size_t nested = (size_t)_dispatch_thread_getspecific(dispatch_apply_key); + dispatch_thread_context_t dtctxt = _dispatch_thread_context_find(_dispatch_apply_key); + size_t nested = dtctxt ? dtctxt->dtc_apply_nesting : 0; + dispatch_queue_t old_dq = _dispatch_queue_get_current(); + if (!slowpath(nested)) { nested = iterations; } else { @@ -206,9 +250,17 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, if (iterations < thr_cnt) { thr_cnt = (uint32_t)iterations; } + if (slowpath(dq == DISPATCH_APPLY_CURRENT_ROOT_QUEUE)) { + dq = old_dq ? old_dq : _dispatch_get_root_queue( + _DISPATCH_QOS_CLASS_DEFAULT, false); + while (slowpath(dq->do_targetq)) { + dq = dq->do_targetq; + } + } struct dispatch_continuation_s dc = { .dc_func = (void*)func, .dc_ctxt = ctxt, + .dc_data = dq, }; dispatch_apply_t da = (typeof(da))_dispatch_continuation_alloc(); da->da_index = 0; @@ -216,57 +268,35 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, da->da_iterations = iterations; da->da_nested = nested; da->da_thr_cnt = thr_cnt; +#if DISPATCH_INTROSPECTION + da->da_dc = _dispatch_continuation_alloc(); + *da->da_dc = dc; +#else da->da_dc = &dc; +#endif + da->da_flags = 0; - dispatch_queue_t old_dq; - old_dq = (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key); - if (slowpath(dq == DISPATCH_APPLY_CURRENT_ROOT_QUEUE)) { - dq = old_dq ? old_dq : _dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_DEFAULT, false); - while (slowpath(dq->do_targetq)) { - dq = dq->do_targetq; - } - } - if (slowpath(dq->dq_width <= 2) || slowpath(thr_cnt <= 1)) { + if (slowpath(dq->dq_width == 1) || slowpath(thr_cnt <= 1)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } if (slowpath(dq->do_targetq)) { if (slowpath(dq == old_dq)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } else { - dc.dc_data = dq; return dispatch_sync_f(dq, da, _dispatch_apply_redirect); } } - _dispatch_thread_setspecific(dispatch_queue_key, dq); + + dispatch_thread_frame_s dtf; + _dispatch_thread_frame_push(&dtf, dq); _dispatch_apply_f2(dq, da, _dispatch_apply_invoke); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_thread_frame_pop(&dtf); } #ifdef __BLOCKS__ -#if DISPATCH_COCOA_COMPAT -DISPATCH_NOINLINE -static void -_dispatch_apply_slow(size_t iterations, dispatch_queue_t dq, - void (^work)(size_t)) -{ - dispatch_block_t bb = _dispatch_Block_copy((void *)work); - dispatch_apply_f(iterations, dq, bb, - (dispatch_apply_function_t)_dispatch_Block_invoke(bb)); - Block_release(bb); -} -#endif - void dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) { -#if DISPATCH_COCOA_COMPAT - // Under GC, blocks transferred to other threads must be Block_copy()ed - // rdar://problem/7455071 - if (dispatch_begin_thread_4GC) { - return _dispatch_apply_slow(iterations, dq, work); - } -#endif dispatch_apply_f(iterations, dq, work, (dispatch_apply_function_t)_dispatch_Block_invoke(work)); } diff --git a/src/block.cpp b/src/block.cpp index 83fff54..3060a2a 100644 --- a/src/block.cpp +++ b/src/block.cpp @@ -82,6 +82,9 @@ struct dispatch_block_private_data_s { if (!dbpd_performed) dispatch_group_leave(dbpd_group); ((void (*)(dispatch_group_t))dispatch_release)(dbpd_group); } + if (dbpd_queue) { + ((void (*)(os_mpsc_queue_t))_os_object_release_internal)(dbpd_queue); + } if (dbpd_block) Block_release(dbpd_block); if (dbpd_voucher) voucher_release(dbpd_voucher); } @@ -95,7 +98,7 @@ _dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher, return _dispatch_Block_copy(^{ // Capture stack object: invokes copy constructor (17094902) (void)dbpds; - _dispatch_block_invoke(&dbpds); + _dispatch_block_invoke_direct(&dbpds); }); } @@ -103,7 +106,11 @@ extern "C" { // The compiler hides the name of the function it generates, and changes it if // we try to reference it directly, but the linker still sees it. extern void DISPATCH_BLOCK_SPECIAL_INVOKE(void *) +#ifdef __linux__ + asm("___dispatch_block_create_block_invoke"); +#else asm("____dispatch_block_create_block_invoke"); +#endif void (*_dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE; } diff --git a/src/data.c b/src/data.c index e65399f..6443289 100644 --- a/src/data.c +++ b/src/data.c @@ -92,7 +92,7 @@ ******************************************************************************* */ -#if USE_OBJC +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA #define _dispatch_data_retain(x) _dispatch_objc_retain(x) #define _dispatch_data_release(x) _dispatch_objc_release(x) #else @@ -101,26 +101,33 @@ #endif const dispatch_block_t _dispatch_data_destructor_free = ^{ - DISPATCH_CRASH("free destructor called"); + DISPATCH_INTERNAL_CRASH(0, "free destructor called"); }; const dispatch_block_t _dispatch_data_destructor_none = ^{ - DISPATCH_CRASH("none destructor called"); + DISPATCH_INTERNAL_CRASH(0, "none destructor called"); }; +#if !HAVE_MACH +const dispatch_block_t _dispatch_data_destructor_munmap = ^{ + DISPATCH_INTERNAL_CRASH(0, "munmap destructor called"); +}; +#else +// _dispatch_data_destructor_munmap is a linker alias to the following const dispatch_block_t _dispatch_data_destructor_vm_deallocate = ^{ - DISPATCH_CRASH("vmdeallocate destructor called"); + DISPATCH_INTERNAL_CRASH(0, "vmdeallocate destructor called"); }; +#endif const dispatch_block_t _dispatch_data_destructor_inline = ^{ - DISPATCH_CRASH("inline destructor called"); + DISPATCH_INTERNAL_CRASH(0, "inline destructor called"); }; struct dispatch_data_s _dispatch_data_empty = { +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA .do_vtable = DISPATCH_DATA_EMPTY_CLASS, -#if !USE_OBJC - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, +#else + DISPATCH_GLOBAL_OBJECT_HEADER(data), .do_next = DISPATCH_OBJECT_LISTLESS, #endif }; @@ -129,11 +136,17 @@ DISPATCH_ALWAYS_INLINE static inline dispatch_data_t _dispatch_data_alloc(size_t n, size_t extra) { - dispatch_data_t data = _dispatch_alloc(DISPATCH_DATA_CLASS, - sizeof(struct dispatch_data_s) + extra + - n * sizeof(range_record)); + dispatch_data_t data; + size_t size; + + if (os_mul_and_add_overflow(n, sizeof(range_record), + sizeof(struct dispatch_data_s) + extra, &size)) { + return DISPATCH_OUT_OF_MEMORY; + } + + data = _dispatch_alloc(DISPATCH_DATA_CLASS, size); data->num_records = n; -#if !USE_OBJC +#if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA data->do_targetq = dispatch_get_global_queue( DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); data->do_next = DISPATCH_OBJECT_LISTLESS; @@ -149,10 +162,12 @@ _dispatch_data_destroy_buffer(const void* buffer, size_t size, free((void*)buffer); } else if (destructor == DISPATCH_DATA_DESTRUCTOR_NONE) { // do nothing +#if HAVE_MACH } else if (destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE) { mach_vm_size_t vm_size = size; mach_vm_address_t vm_addr = (uintptr_t)buffer; mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); +#endif } else { if (!queue) { queue = dispatch_get_global_queue( @@ -213,7 +228,7 @@ dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue, // copied. data_buf = malloc(size); if (slowpath(!data_buf)) { - return NULL; + return DISPATCH_OUT_OF_MEMORY; } buffer = memcpy(data_buf, buffer, size); data = _dispatch_data_alloc(0, 0); @@ -239,7 +254,9 @@ dispatch_data_create_f(const void *buffer, size_t size, dispatch_queue_t queue, if (destructor != DISPATCH_DATA_DESTRUCTOR_DEFAULT && destructor != DISPATCH_DATA_DESTRUCTOR_FREE && destructor != DISPATCH_DATA_DESTRUCTOR_NONE && +#if HAVE_MACH destructor != DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE && +#endif destructor != DISPATCH_DATA_DESTRUCTOR_INLINE) { destructor = ^{ destructor_function((void*)buffer); }; } @@ -319,6 +336,8 @@ dispatch_data_t dispatch_data_create_concat(dispatch_data_t dd1, dispatch_data_t dd2) { dispatch_data_t data; + size_t n; + if (!dd1->size) { _dispatch_data_retain(dd2); return dd2; @@ -328,8 +347,11 @@ dispatch_data_create_concat(dispatch_data_t dd1, dispatch_data_t dd2) return dd1; } - data = _dispatch_data_alloc(_dispatch_data_num_records(dd1) + - _dispatch_data_num_records(dd2), 0); + if (os_add_overflow(_dispatch_data_num_records(dd1), + _dispatch_data_num_records(dd2), &n)) { + return DISPATCH_OUT_OF_MEMORY; + } + data = _dispatch_data_alloc(n, 0); data->size = dd1->size + dd2->size; // Copy the constituent records into the newly created data object // Reference leaf objects as sub-objects @@ -361,9 +383,10 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, size_t length) { dispatch_data_t data; + if (offset >= dd->size || !length) { return dispatch_data_empty; - } else if ((offset + length) > dd->size) { + } else if (length > dd->size - offset) { length = dd->size - offset; } else if (length == dd->size) { _dispatch_data_retain(dd); @@ -398,8 +421,8 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, // Crashing here indicates memory corruption of passed in data object if (slowpath(i >= dd_num_records)) { - DISPATCH_CRASH("dispatch_data_create_subrange out of bounds"); - return NULL; + DISPATCH_INTERNAL_CRASH(i, + "dispatch_data_create_subrange out of bounds"); } // if everything is from a single dispatch data object, avoid boxing it @@ -428,8 +451,8 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, // Crashing here indicates memory corruption of passed in data object if (slowpath(i + count >= dd_num_records)) { - DISPATCH_CRASH("dispatch_data_create_subrange out of bounds"); - return NULL; + DISPATCH_INTERNAL_CRASH(i + count, + "dispatch_data_create_subrange out of bounds"); } } } @@ -529,7 +552,7 @@ _dispatch_data_get_flattened_bytes(dispatch_data_t dd) void *flatbuf = _dispatch_data_flatten(dd); if (fastpath(flatbuf)) { // we need a release so that readers see the content of the buffer - if (slowpath(!dispatch_atomic_cmpxchgv2o(dd, buf, NULL, flatbuf, + if (slowpath(!os_atomic_cmpxchgv2o(dd, buf, NULL, flatbuf, &buffer, release))) { free(flatbuf); } else { @@ -651,7 +674,8 @@ _dispatch_data_copy_region(dispatch_data_t dd, size_t from, size_t size, return _dispatch_data_copy_region(dd, from, length, location, offset_ptr); } - DISPATCH_CRASH("dispatch_data_copy_region out of bounds"); + DISPATCH_INTERNAL_CRASH(*offset_ptr+offset, + "dispatch_data_copy_region out of bounds"); } // Returs either a leaf object or an object composed of a single leaf object diff --git a/src/data.m b/src/data.m index 92bc1e2..190b1ed 100644 --- a/src/data.m +++ b/src/data.m @@ -20,18 +20,15 @@ #include "internal.h" -#if USE_OBJC +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA -#if !__OBJC2__ -#error "Cannot build with legacy ObjC runtime" -#endif #if _OS_OBJECT_OBJC_ARC #error "Cannot build with ARC" #endif #include -@interface DISPATCH_CLASS(data) () +@interface DISPATCH_CLASS(data) () @property (readonly) NSUInteger length; @property (readonly) const void *bytes NS_RETURNS_INNER_POINTER; @@ -94,10 +91,6 @@ _dispatch_data_objc_dispose(dealloc); } -- (void)finalize { - _dispatch_data_objc_dispose(finalize); -} - - (BOOL)_bytesAreVM { struct dispatch_data_s *dd = (void*)self; return dd->destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE; @@ -122,7 +115,7 @@ struct dispatch_data_s *dd = (void*)self; _os_object_retain_internal((_os_object_t)queue); dispatch_queue_t prev; - prev = dispatch_atomic_xchg2o(dd, do_targetq, queue, release); + prev = os_atomic_xchg2o(dd, do_targetq, queue, release); if (prev) _os_object_release_internal((_os_object_t)prev); } @@ -151,6 +144,15 @@ return !dd->size || _dispatch_data_map_direct(dd, 0, NULL, NULL) != NULL; } +- (void)_suspend { +} + +- (void)_resume { +} + +- (void)_activate { +} + @end @implementation DISPATCH_CLASS(data_empty) @@ -191,6 +193,15 @@ - (void)_setTargetQueue:(dispatch_queue_t) DISPATCH_UNUSED queue { } +- (void)_suspend { +} + +- (void)_resume { +} + +- (void)_activate { +} + @end #endif // USE_OBJC diff --git a/src/data_internal.h b/src/data_internal.h index 40a780c..bbef21e 100644 --- a/src/data_internal.h +++ b/src/data_internal.h @@ -38,30 +38,32 @@ typedef struct range_record_s { size_t length; } range_record; -#if USE_OBJC -#if OS_OBJECT_USE_OBJC -@interface DISPATCH_CLASS(data) : NSObject -@end +#if OS_OBJECT_HAVE_OBJC2 +#define DISPATCH_DATA_IS_BRIDGED_TO_NSDATA 1 +#else +#define DISPATCH_DATA_IS_BRIDGED_TO_NSDATA 0 #endif + +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA DISPATCH_OBJC_CLASS_DECL(data); DISPATCH_OBJC_CLASS_DECL(data_empty); -#define DISPATCH_DATA_CLASS DISPATCH_OBJC_CLASS(data) -#define DISPATCH_DATA_EMPTY_CLASS DISPATCH_OBJC_CLASS(data_empty) -#else // USE_OBJC +_OS_OBJECT_DECL_PROTOCOL(dispatch_data, dispatch_object); +#define DISPATCH_DATA_CLASS DISPATCH_VTABLE(data) +#define DISPATCH_DATA_EMPTY_CLASS DISPATCH_VTABLE(data_empty) +#else DISPATCH_CLASS_DECL(data); #define DISPATCH_DATA_CLASS DISPATCH_VTABLE(data) -#define DISPATCH_DATA_EMPTY_CLASS DISPATCH_VTABLE(data) -#endif // USE_OBJC +#endif // DISPATCH_DATA_IS_BRIDGED_TO_NSDATA struct dispatch_data_s { -#if USE_OBJC +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA const void *do_vtable; dispatch_queue_t do_targetq; void *ctxt; void *finalizer; -#else // USE_OBJC - DISPATCH_STRUCT_HEADER(data); -#endif // USE_OBJC +#else + DISPATCH_OBJECT_HEADER(data); +#endif // DISPATCH_DATA_IS_BRIDGED_TO_NSDATA const void *buf; dispatch_block_t destructor; size_t size, num_records; @@ -79,7 +81,7 @@ _dispatch_data_leaf(struct dispatch_data_s *dd) * This is about the number of records required to hold that dispatch data * if it's not a leaf. Callers either want that value, or have to special * case the case when the dispatch data *is* a leaf before (and that the actual - * embeded record count of that dispatch data is 0) + * embedded record count of that dispatch data is 0) */ DISPATCH_ALWAYS_INLINE static inline size_t @@ -106,10 +108,8 @@ const void* _dispatch_data_get_flattened_bytes(struct dispatch_data_s *dd); #if !defined(__cplusplus) -#if !__OBJC2__ -const dispatch_block_t _dispatch_data_destructor_inline; +extern const dispatch_block_t _dispatch_data_destructor_inline; #define DISPATCH_DATA_DESTRUCTOR_INLINE (_dispatch_data_destructor_inline) -#endif // !__OBJC2__ /* * the out parameters are about seeing "through" trivial subranges @@ -135,7 +135,7 @@ _dispatch_data_map_direct(struct dispatch_data_s *dd, size_t offset, if (fastpath(_dispatch_data_leaf(dd))) { buffer = dd->buf + offset; } else { - buffer = dispatch_atomic_load((void **)&dd->buf, relaxed); + buffer = os_atomic_load((void **)&dd->buf, relaxed); if (buffer) { buffer += offset; } diff --git a/src/firehose/firehose.defs b/src/firehose/firehose.defs new file mode 100644 index 0000000..986533c --- /dev/null +++ b/src/firehose/firehose.defs @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include +#include + +#include "firehose_types.defs" + +subsystem firehose 11600; +serverprefix firehose_server_; +userprefix firehose_send_; + +simpleroutine +register( + server_port : mach_port_t; + mem_port : mach_port_move_send_t; + mem_size : mach_vm_size_t; + comm_recvp : mach_port_move_receive_t; + comm_sendp : mach_port_make_send_t; + extra_info_port : mach_port_move_send_t; + extra_info_size : mach_vm_size_t +); + +routine +push( +RequestPort comm_port : mach_port_t; +SReplyPort reply_port : mach_port_make_send_once_t; + qos_class : qos_class_t; + for_io : boolean_t; +out push_reply : firehose_push_reply_t +); + +simpleroutine +push_async( + comm_port : mach_port_t; + qos_class : qos_class_t; + for_io : boolean_t; + expects_notify : boolean_t +); diff --git a/src/firehose/firehose_buffer.c b/src/firehose/firehose_buffer.c new file mode 100644 index 0000000..1305bde --- /dev/null +++ b/src/firehose/firehose_buffer.c @@ -0,0 +1,1147 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include // VM_MEMORY_GENEALOGY +#ifdef KERNEL + +#define OS_VOUCHER_ACTIVITY_SPI_TYPES 1 +#define OS_FIREHOSE_SPI 1 +#define __OS_EXPOSE_INTERNALS_INDIRECT__ 1 + +#define DISPATCH_PURE_C 1 +#define _safe_cast_to_long(x) \ + ({ _Static_assert(sizeof(typeof(x)) <= sizeof(long), \ + "__builtin_expect doesn't support types wider than long"); \ + (long)(x); }) +#define fastpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), ~0l)) +#define slowpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), 0l)) +#define os_likely(x) __builtin_expect(!!(x), 1) +#define os_unlikely(x) __builtin_expect(!!(x), 0) +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) + +#define DISPATCH_INTERNAL_CRASH(ac, msg) ({ panic(msg); __builtin_trap(); }) + +#if defined(__x86_64__) || defined(__i386__) +#define dispatch_hardware_pause() __asm__("pause") +#elif (defined(__arm__) && defined(_ARM_ARCH_7) && defined(__thumb__)) || \ + defined(__arm64__) +#define dispatch_hardware_pause() __asm__("yield") +#define dispatch_hardware_wfe() __asm__("wfe") +#else +#define dispatch_hardware_pause() __asm__("") +#endif + +#define _dispatch_wait_until(c) do { \ + while (!fastpath(c)) { \ + dispatch_hardware_pause(); \ + } } while (0) +#define dispatch_compiler_barrier() __asm__ __volatile__("" ::: "memory") + +typedef uint32_t dispatch_lock; +typedef struct dispatch_gate_s { + dispatch_lock dgl_lock; +} dispatch_gate_s, *dispatch_gate_t; +#define DLOCK_LOCK_DATA_CONTENTION 0 +static void _dispatch_gate_wait(dispatch_gate_t l, uint32_t flags); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // +#include // +#include // os/internal/atomic.h +#include "os/firehose_buffer_private.h" +#include "firehose_buffer_internal.h" +#include "firehose_inline_internal.h" +#else +#include "internal.h" +#include "firehose.h" // MiG +#include "firehose_replyServer.h" // MiG +#endif + +#if OS_FIREHOSE_SPI + +#if __has_feature(c_static_assert) +_Static_assert(sizeof(((firehose_stream_state_u *)NULL)->fss_gate) == + sizeof(((firehose_stream_state_u *)NULL)->fss_allocator), + "fss_gate and fss_allocator alias"); +_Static_assert(offsetof(firehose_stream_state_u, fss_gate) == + offsetof(firehose_stream_state_u, fss_allocator), + "fss_gate and fss_allocator alias"); +_Static_assert(sizeof(struct firehose_buffer_header_s) == + FIREHOSE_BUFFER_CHUNK_SIZE, + "firehose buffer header must be 4k"); +_Static_assert(offsetof(struct firehose_buffer_header_s, fbh_unused) <= + FIREHOSE_BUFFER_CHUNK_SIZE - FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE, + "we must have enough space for the libtrace header"); +_Static_assert(sizeof(struct firehose_buffer_chunk_s) == + FIREHOSE_BUFFER_CHUNK_SIZE, + "firehose buffer chunks must be 4k"); +_Static_assert(powerof2(FIREHOSE_BUFFER_CHUNK_COUNT), + "CHUNK_COUNT Must be a power of two"); +_Static_assert(FIREHOSE_BUFFER_CHUNK_COUNT <= 64, + "CHUNK_COUNT must be less than 64 (bitmap in uint64_t)"); +#ifdef FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT +_Static_assert(powerof2(FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT), + "madvise chunk count must be a power of two"); +#endif +_Static_assert(howmany(sizeof(struct firehose_tracepoint_s), + sizeof(struct firehose_buffer_chunk_s)) < 255, + "refcount assumes that you cannot have more than 255 tracepoints"); +// FIXME: we should have an event-count instead here +_Static_assert(sizeof(struct firehose_buffer_stream_s) == 128, + "firehose buffer stream must be small (single cacheline if possible)"); +_Static_assert(offsetof(struct firehose_buffer_chunk_s, fbc_data) % 8 == 0, + "Page header is 8 byte aligned"); +_Static_assert(sizeof(struct firehose_tracepoint_s) == 24, + "tracepoint header should be exactly 24 bytes"); +#endif + +#ifdef KERNEL +static firehose_buffer_t kernel_firehose_buffer = NULL; +#endif + +#pragma mark - +#pragma mark Client IPC to the log daemon +#ifndef KERNEL + +static mach_port_t +firehose_client_reconnect(firehose_buffer_t fb, mach_port_t oldsendp) +{ + mach_port_t sendp = MACH_PORT_NULL; + mach_port_t mem_port = MACH_PORT_NULL, extra_info_port = MACH_PORT_NULL; + mach_vm_size_t extra_info_size = 0; + kern_return_t kr; + + dispatch_assert(fb->fb_header.fbh_logd_port); + dispatch_assert(fb->fb_header.fbh_recvp); + dispatch_assert(fb->fb_header.fbh_uniquepid != 0); + + _dispatch_unfair_lock_lock(&fb->fb_header.fbh_logd_lock); + sendp = fb->fb_header.fbh_sendp; + if (sendp != oldsendp || sendp == MACH_PORT_DEAD) { + // someone beat us to reconnecting or logd was unloaded, just go away + goto unlock; + } + + if (oldsendp) { + // same trick as _xpc_pipe_dispose: keeping a send right + // maintains the name, so that we can destroy the receive right + // in case we still have it. + (void)firehose_mach_port_recv_dispose(oldsendp, fb); + firehose_mach_port_send_release(oldsendp); + fb->fb_header.fbh_sendp = MACH_PORT_NULL; + } + + /* Create a memory port for the buffer VM region */ + vm_prot_t flags = VM_PROT_READ | MAP_MEM_VM_SHARE; + memory_object_size_t size = sizeof(union firehose_buffer_u); + mach_vm_address_t addr = (vm_address_t)fb; + + kr = mach_make_memory_entry_64(mach_task_self(), &size, addr, + flags, &mem_port, MACH_PORT_NULL); + if (size < sizeof(union firehose_buffer_u)) { + DISPATCH_CLIENT_CRASH(size, "Invalid size for the firehose buffer"); + } + if (unlikely(kr)) { + // the client probably has some form of memory corruption + // and/or a port leak + DISPATCH_CLIENT_CRASH(kr, "Unable to make memory port"); + } + + /* Create a communication port to the logging daemon */ + uint32_t opts = MPO_CONTEXT_AS_GUARD | MPO_TEMPOWNER | MPO_INSERT_SEND_RIGHT; + sendp = firehose_mach_port_allocate(opts, fb); + + if (oldsendp && _voucher_libtrace_hooks->vah_version >= 3) { + if (_voucher_libtrace_hooks->vah_get_reconnect_info) { + kr = _voucher_libtrace_hooks->vah_get_reconnect_info(&addr, &size); + if (likely(kr == KERN_SUCCESS) && addr && size) { + extra_info_size = size; + kr = mach_make_memory_entry_64(mach_task_self(), &size, addr, + flags, &extra_info_port, MACH_PORT_NULL); + if (unlikely(kr)) { + // the client probably has some form of memory corruption + // and/or a port leak + DISPATCH_CLIENT_CRASH(kr, "Unable to make memory port"); + } + kr = mach_vm_deallocate(mach_task_self(), addr, size); + (void)dispatch_assume_zero(kr); + } + } + } + + /* Call the firehose_register() MIG routine */ + kr = firehose_send_register(fb->fb_header.fbh_logd_port, mem_port, + sizeof(union firehose_buffer_u), sendp, fb->fb_header.fbh_recvp, + extra_info_port, extra_info_size); + if (likely(kr == KERN_SUCCESS)) { + fb->fb_header.fbh_sendp = sendp; + } else if (unlikely(kr == MACH_SEND_INVALID_DEST)) { + // MACH_SEND_INVALID_DEST here means that logd's boostrap port + // turned into a dead name, which in turn means that logd has been + // unloaded. The only option here, is to give up permanently. + // + // same trick as _xpc_pipe_dispose: keeping a send right + // maintains the name, so that we can destroy the receive right + // in case we still have it. + (void)firehose_mach_port_recv_dispose(sendp, fb); + firehose_mach_port_send_release(sendp); + firehose_mach_port_send_release(mem_port); + if (extra_info_port) firehose_mach_port_send_release(extra_info_port); + sendp = fb->fb_header.fbh_sendp = MACH_PORT_DEAD; + } else { + // the client probably has some form of memory corruption + // and/or a port leak + DISPATCH_CLIENT_CRASH(kr, "Unable to register with logd"); + } + +unlock: + _dispatch_unfair_lock_unlock(&fb->fb_header.fbh_logd_lock); + return sendp; +} + +static void +firehose_buffer_update_limits_unlocked(firehose_buffer_t fb) +{ + firehose_bank_state_u old, new; + firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; + unsigned long fbb_flags = fbb->fbb_flags; + uint16_t io_streams = 0, mem_streams = 0; + uint16_t total = 0; + + for (size_t i = 0; i < countof(fb->fb_header.fbh_stream); i++) { + firehose_buffer_stream_t fbs = fb->fb_header.fbh_stream + i; + + if (fbs->fbs_state.fss_current == FIREHOSE_STREAM_STATE_PRISTINE) { + continue; + } + if ((1UL << i) & firehose_stream_uses_io_bank) { + io_streams++; + } else { + mem_streams++; + } + } + + if (fbb_flags & FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY) { + if (fbb_flags & FIREHOSE_BUFFER_BANK_FLAG_HIGH_RATE) { + total = 1 + 4 * mem_streams + io_streams; // usually 10 + } else { + total = 1 + 2 + mem_streams + io_streams; // usually 6 + } + } else { + if (fbb_flags & FIREHOSE_BUFFER_BANK_FLAG_HIGH_RATE) { + total = 1 + 6 * mem_streams + 3 * io_streams; // usually 16 + } else { + total = 1 + 2 * (mem_streams + io_streams); // usually 7 + } + } + + uint16_t ratio = (uint16_t)(PAGE_SIZE / FIREHOSE_BUFFER_CHUNK_SIZE); + if (ratio > 1) { + total = roundup(total, ratio); + } + total = MAX(total, FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT); + if (!(fbb_flags & FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY)) { + total = MAX(total, TARGET_OS_EMBEDDED ? 8 : 12); + } + + new.fbs_max_ref = total; + new.fbs_mem_bank = FIREHOSE_BANK_UNAVAIL_BIT - (total - 1); + new.fbs_io_bank = FIREHOSE_BANK_UNAVAIL_BIT - + MAX(3 * total / 8, 2 * io_streams); + new.fbs_unused = 0; + + old = fbb->fbb_limits; + fbb->fbb_limits = new; + if (old.fbs_atomic_state == new.fbs_atomic_state) { + return; + } + os_atomic_add2o(&fb->fb_header, fbh_bank.fbb_state.fbs_atomic_state, + new.fbs_atomic_state - old.fbs_atomic_state, relaxed); +} +#endif // !KERNEL + +firehose_buffer_t +firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, + unsigned long bank_flags) +{ + firehose_buffer_header_t fbh; + firehose_buffer_t fb; + +#ifndef KERNEL + mach_vm_address_t vm_addr = 0; + kern_return_t kr; + + vm_addr = vm_page_size; + const size_t madvise_bytes = FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT * + FIREHOSE_BUFFER_CHUNK_SIZE; + if (slowpath(madvise_bytes % PAGE_SIZE)) { + DISPATCH_INTERNAL_CRASH(madvise_bytes, + "Invalid values for MADVISE_CHUNK_COUNT / CHUNK_SIZE"); + } + + kr = mach_vm_map(mach_task_self(), &vm_addr, sizeof(*fb), 0, + VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE | + VM_MAKE_TAG(VM_MEMORY_GENEALOGY), MEMORY_OBJECT_NULL, 0, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_NONE); + if (slowpath(kr)) { + if (kr != KERN_NO_SPACE) dispatch_assume_zero(kr); + firehose_mach_port_send_release(logd_port); + return NULL; + } + + uint32_t opts = MPO_CONTEXT_AS_GUARD | MPO_STRICT | MPO_INSERT_SEND_RIGHT; +#else + vm_offset_t vm_addr = 0; + vm_size_t size; + + size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE; + __firehose_allocate(&vm_addr, size); + + (void)logd_port; (void)unique_pid; +#endif // KERNEL + + fb = (firehose_buffer_t)vm_addr; + fbh = &fb->fb_header; +#ifndef KERNEL + fbh->fbh_logd_port = logd_port; + fbh->fbh_pid = getpid(); + fbh->fbh_uniquepid = unique_pid; + fbh->fbh_recvp = firehose_mach_port_allocate(opts, fb); +#endif // !KERNEL + fbh->fbh_spi_version = OS_FIREHOSE_SPI_VERSION; + fbh->fbh_bank.fbb_flags = bank_flags; + +#ifndef KERNEL + for (size_t i = 0; i < countof(fbh->fbh_stream); i++) { + firehose_buffer_stream_t fbs = fbh->fbh_stream + i; + if (i != firehose_stream_metadata) { + fbs->fbs_state.fss_current = FIREHOSE_STREAM_STATE_PRISTINE; + } + } + firehose_buffer_update_limits_unlocked(fb); +#else + uint16_t total = FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT + 1; + const uint16_t num_kernel_io_pages = 8; + uint16_t io_pages = num_kernel_io_pages; + fbh->fbh_bank.fbb_state = (firehose_bank_state_u){ + .fbs_max_ref = total, + .fbs_io_bank = FIREHOSE_BANK_UNAVAIL_BIT - io_pages, + .fbs_mem_bank = FIREHOSE_BANK_UNAVAIL_BIT - (total - io_pages - 1), + }; + fbh->fbh_bank.fbb_limits = fbh->fbh_bank.fbb_state; +#endif // KERNEL + + // now pre-allocate some chunks in the ring directly +#ifdef KERNEL + const uint16_t pre_allocated = FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT - 1; +#else + const uint16_t pre_allocated = FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT; +#endif + + fbh->fbh_bank.fbb_bitmap = (1U << (1 + pre_allocated)) - 1; + + for (uint16_t i = 0; i < pre_allocated; i++) { + fbh->fbh_mem_ring[i] = i + 1; + } + fbh->fbh_bank.fbb_mem_flushed = pre_allocated; + fbh->fbh_ring_mem_head = pre_allocated; + + +#ifdef KERNEL + // install the early boot page as the current one for persist + fbh->fbh_stream[firehose_stream_persist].fbs_state.fss_current = + FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT; + fbh->fbh_bank.fbb_state.fbs_io_bank += 1; +#endif + + fbh->fbh_ring_tail = (firehose_ring_tail_u){ + .frp_mem_flushed = pre_allocated, + }; + return fb; +} + +#ifndef KERNEL +static void +firehose_notify_source_invoke(mach_msg_header_t *hdr) +{ + const size_t reply_size = + sizeof(union __ReplyUnion__firehose_client_firehoseReply_subsystem); + + firehose_mig_server(firehoseReply_server, reply_size, hdr); +} + +static void +firehose_client_register_for_notifications(firehose_buffer_t fb) +{ + static const struct dispatch_continuation_s dc = { + .dc_func = (void *)firehose_notify_source_invoke, + }; + firehose_buffer_header_t fbh = &fb->fb_header; + + dispatch_once(&fbh->fbh_notifs_pred, ^{ + dispatch_source_t ds = _dispatch_source_create_mach_msg_direct_recv( + fbh->fbh_recvp, &dc); + dispatch_set_context(ds, fb); + dispatch_activate(ds); + fbh->fbh_notifs_source = ds; + }); +} + +static void +firehose_client_send_push_async(firehose_buffer_t fb, qos_class_t qos, + bool for_io) +{ + bool ask_for_notifs = fb->fb_header.fbh_notifs_source != NULL; + mach_port_t sendp = fb->fb_header.fbh_sendp; + kern_return_t kr = KERN_FAILURE; + + if (!ask_for_notifs && _dispatch_is_multithreaded_inline()) { + firehose_client_register_for_notifications(fb); + ask_for_notifs = true; + } + + if (slowpath(sendp == MACH_PORT_DEAD)) { + return; + } + + if (fastpath(sendp)) { + kr = firehose_send_push_async(sendp, qos, for_io, ask_for_notifs); + if (likely(kr == KERN_SUCCESS)) { + return; + } + if (kr != MACH_SEND_INVALID_DEST) { + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + } + + sendp = firehose_client_reconnect(fb, sendp); + if (fastpath(MACH_PORT_VALID(sendp))) { + kr = firehose_send_push_async(sendp, qos, for_io, ask_for_notifs); + if (likely(kr == KERN_SUCCESS)) { + return; + } + if (kr != MACH_SEND_INVALID_DEST) { + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + } +} +#endif // !KERNEL + +static void +firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, + firehose_push_reply_t reply, firehose_bank_state_u *state_out) +{ + firehose_bank_state_u state; + firehose_ring_tail_u otail, ntail; + uint64_t old_flushed_pos, bank_updates; + uint16_t io_delta = 0; + uint16_t mem_delta = 0; + + if (firehose_atomic_maxv2o(&fb->fb_header, fbh_bank.fbb_mem_flushed, + reply.fpr_mem_flushed_pos, &old_flushed_pos, relaxed)) { + mem_delta = (uint16_t)(reply.fpr_mem_flushed_pos - old_flushed_pos); + } + if (firehose_atomic_maxv2o(&fb->fb_header, fbh_bank.fbb_io_flushed, + reply.fpr_io_flushed_pos, &old_flushed_pos, relaxed)) { + io_delta = (uint16_t)(reply.fpr_io_flushed_pos - old_flushed_pos); + } +#ifndef KERNEL + _dispatch_debug("client side: mem: +%d->%llx, io: +%d->%llx", + mem_delta, reply.fpr_mem_flushed_pos, + io_delta, reply.fpr_io_flushed_pos); +#endif + + if (!mem_delta && !io_delta) { + if (state_out) { + state_out->fbs_atomic_state = os_atomic_load2o(&fb->fb_header, + fbh_bank.fbb_state.fbs_atomic_state, relaxed); + } + return; + } + + bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) | + ((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1)); + state.fbs_atomic_state = os_atomic_sub2o(&fb->fb_header, + fbh_bank.fbb_state.fbs_atomic_state, bank_updates, relaxed); + if (state_out) *state_out = state; + + os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail, + otail.frp_atomic_tail, ntail.frp_atomic_tail, relaxed, { + ntail = otail; + // overflow handles the generation wraps + ntail.frp_io_flushed += io_delta; + ntail.frp_mem_flushed += mem_delta; + }); + if (async_notif) { + if (io_delta) { + os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_io_notifs, relaxed); + } + if (mem_delta) { + os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_mem_notifs, relaxed); + } + } +} + +#ifndef KERNEL +static void +firehose_client_send_push(firehose_buffer_t fb, bool for_io, + firehose_bank_state_u *state_out) +{ + mach_port_t sendp = fb->fb_header.fbh_sendp; + firehose_push_reply_t push_reply = { }; + qos_class_t qos = qos_class_self(); + kern_return_t kr; + + if (slowpath(sendp == MACH_PORT_DEAD)) { + return; + } + if (fastpath(sendp)) { + kr = firehose_send_push(sendp, qos, for_io, &push_reply); + if (likely(kr == KERN_SUCCESS)) { + goto success; + } + if (kr != MACH_SEND_INVALID_DEST) { + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + } + + sendp = firehose_client_reconnect(fb, sendp); + if (fastpath(MACH_PORT_VALID(sendp))) { + kr = firehose_send_push(sendp, qos, for_io, &push_reply); + if (likely(kr == KERN_SUCCESS)) { + goto success; + } + if (kr != MACH_SEND_INVALID_DEST) { + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + } + + if (state_out) { + state_out->fbs_atomic_state = os_atomic_load2o(&fb->fb_header, + fbh_bank.fbb_state.fbs_atomic_state, relaxed); + } + return; + +success: + if (memcmp(&push_reply, &FIREHOSE_PUSH_REPLY_CORRUPTED, + sizeof(push_reply)) == 0) { + // TODO: find out the actual cause and log it + DISPATCH_CLIENT_CRASH(0, "Memory corruption in the logging buffers"); + } + + if (for_io) { + os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_io_sync_pushes, relaxed); + } else { + os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_mem_sync_pushes, relaxed); + } + // TODO + // + // use fbb_*_flushes and fbb_*_sync_pushes to decide to dynamically + // allow using more buffers, if not under memory pressure. + // + // There only is a point for multithreaded clients if: + // - enough samples (total_flushes above some limits) + // - the ratio is really bad (a push per cycle is definitely a problem) + return firehose_client_merge_updates(fb, false, push_reply, state_out); +} + +kern_return_t +firehose_client_push_reply(mach_port_t req_port OS_UNUSED, + kern_return_t rtc, firehose_push_reply_t push_reply OS_UNUSED) +{ + DISPATCH_INTERNAL_CRASH(rtc, "firehose_push_reply should never be sent " + "to the buffer receive port"); +} + +kern_return_t +firehose_client_push_notify_async(mach_port_t server_port OS_UNUSED, + firehose_push_reply_t push_reply) +{ + // see _dispatch_source_merge_mach_msg_direct + dispatch_queue_t dq = _dispatch_queue_get_current(); + firehose_buffer_t fb = dispatch_get_context(dq); + firehose_client_merge_updates(fb, true, push_reply, NULL); + return KERN_SUCCESS; +} + +#endif // !KERNEL +#pragma mark - +#pragma mark Buffer handling + +#ifndef KERNEL +void +firehose_buffer_update_limits(firehose_buffer_t fb) +{ + dispatch_unfair_lock_t fbb_lock = &fb->fb_header.fbh_bank.fbb_lock; + _dispatch_unfair_lock_lock(fbb_lock); + firehose_buffer_update_limits_unlocked(fb); + _dispatch_unfair_lock_unlock(fbb_lock); +} +#endif // !KERNEL + +OS_ALWAYS_INLINE +static inline firehose_tracepoint_t +firehose_buffer_chunk_init(firehose_buffer_chunk_t fbc, + firehose_tracepoint_query_t ask, uint8_t **privptr) +{ + const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); + + uint16_t pub_offs = offsetof(struct firehose_buffer_chunk_s, fbc_data); + uint16_t priv_offs = FIREHOSE_BUFFER_CHUNK_SIZE; + + pub_offs += roundup(ft_size + ask->pubsize, 8); + priv_offs -= ask->privsize; + + if (fbc->fbc_pos.fbc_atomic_pos) { + // Needed for process death handling (recycle-reuse): + // No atomic fences required, we merely want to make sure the observers + // will see memory effects in program (asm) order. + // 1. the payload part of the chunk is cleared completely + // 2. the chunk is marked as reused + // This ensures that if we don't see a reference to a chunk in the ring + // and it is dirty, when crawling the chunk, we don't see remnants of + // other tracepoints + // + // We only do that when the fbc_pos is non zero, because zero means + // we just faulted the chunk, and the kernel already bzero-ed it. + bzero(fbc->fbc_data, sizeof(fbc->fbc_data)); + } + dispatch_compiler_barrier(); + // boot starts mach absolute time at 0, and + // wrapping around to values above UINT64_MAX - FIREHOSE_STAMP_SLOP + // breaks firehose_buffer_stream_flush() assumptions + if (ask->stamp > FIREHOSE_STAMP_SLOP) { + fbc->fbc_timestamp = ask->stamp - FIREHOSE_STAMP_SLOP; + } else { + fbc->fbc_timestamp = 0; + } + fbc->fbc_pos = (firehose_buffer_pos_u){ + .fbc_next_entry_offs = pub_offs, + .fbc_private_offs = priv_offs, + .fbc_refcnt = 1, + .fbc_qos_bits = firehose_buffer_qos_bits_propagate(), + .fbc_stream = ask->stream, + .fbc_flag_io = ask->for_io, + }; + + if (privptr) { + *privptr = fbc->fbc_start + priv_offs; + } + return (firehose_tracepoint_t)fbc->fbc_data; +} + +OS_NOINLINE +static firehose_tracepoint_t +firehose_buffer_stream_chunk_install(firehose_buffer_t fb, + firehose_tracepoint_query_t ask, uint8_t **privptr, uint16_t ref) +{ + firehose_stream_state_u state, new_state; + firehose_tracepoint_t ft; + firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[ask->stream]; + uint64_t stamp_and_len; + + if (fastpath(ref)) { + firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + ft = firehose_buffer_chunk_init(fbc, ask, privptr); + // Needed for process death handling (tracepoint-begin): + // write the length before making the chunk visible + stamp_and_len = ask->stamp - fbc->fbc_timestamp; + stamp_and_len |= (uint64_t)ask->pubsize << 48; + os_atomic_store2o(ft, ft_stamp_and_length, stamp_and_len, relaxed); + + if (ask->stream == firehose_stream_metadata) { + os_atomic_or2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap, + 1ULL << ref, relaxed); + } + // release barrier to make the chunk init visible + os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + state.fss_atomic_state, new_state.fss_atomic_state, release, { + // We use a generation counter to prevent a theoretical ABA problem: + // a thread could try to acquire a tracepoint in a chunk, fail to + // do so mark it as to be pushed, enqueue it, and then be preempted + // + // It sleeps for a long time, and then tries to acquire the + // allocator bit and uninstalling the chunk. Succeeds in doing so, + // but because the chunk actually happened to have cycled all the + // way back to being installed. That thread would effectively hide + // that unflushed chunk and leak it. + // + // Having a generation counter prevents the uninstallation of the + // chunk to spuriously succeed when it was a re-incarnation of it. + new_state = (firehose_stream_state_u){ + .fss_current = ref, + .fss_generation = state.fss_generation + 1, + }; + }); + } else { + // the allocator gave up just clear the allocator + waiter bits + firehose_stream_state_u mask = { .fss_allocator = ~0u, }; + state.fss_atomic_state = os_atomic_and_orig2o(fbs, + fbs_state.fss_atomic_state, ~mask.fss_atomic_state, relaxed); + ft = NULL; + } + +#ifndef KERNEL + if (unlikely(state.fss_gate.dgl_lock != _dispatch_tid_self())) { + _dispatch_gate_broadcast_slow(&fbs->fbs_state.fss_gate, + state.fss_gate.dgl_lock); + } + + if (unlikely(state.fss_current == FIREHOSE_STREAM_STATE_PRISTINE)) { + firehose_buffer_update_limits(fb); + } +#endif // KERNEL + + // pairs with the one in firehose_buffer_tracepoint_reserve() + __firehose_critical_region_leave(); + return ft; +} + +#ifndef KERNEL +OS_ALWAYS_INLINE +static inline uint16_t +firehose_buffer_ring_try_grow(firehose_buffer_bank_t fbb, uint16_t limit) +{ + uint16_t ref = 0; + uint64_t bitmap; + + _dispatch_unfair_lock_lock(&fbb->fbb_lock); + bitmap = ~(fbb->fbb_bitmap | (~0ULL << limit)); + if (bitmap) { + ref = firehose_bitmap_first_set(bitmap); + fbb->fbb_bitmap |= 1U << ref; + } + _dispatch_unfair_lock_unlock(&fbb->fbb_lock); + return ref; +} + +OS_ALWAYS_INLINE +static inline uint16_t +firehose_buffer_ring_shrink(firehose_buffer_t fb, uint16_t ref) +{ + const size_t madv_size = + FIREHOSE_BUFFER_CHUNK_SIZE * FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT; + const size_t madv_mask = + (1ULL << FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT) - 1; + + dispatch_unfair_lock_t fbb_lock = &fb->fb_header.fbh_bank.fbb_lock; + uint64_t bitmap; + + _dispatch_unfair_lock_lock(fbb_lock); + if (ref < fb->fb_header.fbh_bank.fbb_limits.fbs_max_ref) { + goto done; + } + + bitmap = (fb->fb_header.fbh_bank.fbb_bitmap &= ~(1UL << ref)); + ref &= ~madv_mask; + if ((bitmap & (madv_mask << ref)) == 0) { + // if MADVISE_WIDTH consecutive chunks are free, madvise them free + madvise(firehose_buffer_ref_to_chunk(fb, ref), madv_size, MADV_FREE); + } + ref = 0; +done: + _dispatch_unfair_lock_unlock(fbb_lock); + return ref; +} +#endif // !KERNEL + +OS_NOINLINE +void +firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref) +{ + firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t volatile *fbh_ring; + uint16_t volatile *fbh_ring_head; + uint16_t head, gen, dummy, idx; + firehose_buffer_pos_u fbc_pos = fbc->fbc_pos; + bool for_io = fbc_pos.fbc_flag_io; + + if (for_io) { + fbh_ring = fb->fb_header.fbh_io_ring; + fbh_ring_head = &fb->fb_header.fbh_ring_io_head; + } else { + fbh_ring = fb->fb_header.fbh_mem_ring; + fbh_ring_head = &fb->fb_header.fbh_ring_mem_head; + } + +#ifdef KERNEL + // The algorithm in the kernel is simpler: + // 1. reserve a write position for the head + // 2. store the new reference at that position + // Enqueuers can't starve each other that way. + // + // However, the dequeuers now have to sometimes wait for the value written + // in the ring to appear and have to spin, which is okay since the kernel + // disables preemption around these two consecutive atomic operations. + // See firehose_client_drain. + __firehose_critical_region_enter(); + head = os_atomic_inc_orig(fbh_ring_head, relaxed); + gen = head & FIREHOSE_RING_POS_GEN_MASK; + idx = head & FIREHOSE_RING_POS_IDX_MASK; + + while (unlikely(!os_atomic_cmpxchgvw(&fbh_ring[idx], gen, gen | ref, &dummy, + relaxed))) { + // can only ever happen if a recycler is slow, this requires having + // enough cores (>5 for I/O e.g.) + _dispatch_wait_until(fbh_ring[idx] == gen); + } + __firehose_critical_region_leave(); + __firehose_buffer_push_to_logd(fb, for_io); +#else + // The algorithm is: + // 1. read the head position + // 2. cmpxchg head.gen with the (head.gen | ref) at head.idx + // 3. if it fails wait until either the head cursor moves, + // or the cell becomes free + // + // The most likely stall at (3) is because another enqueuer raced us + // and made the cell non empty. + // + // The alternative is to reserve the enqueue slot with an atomic inc. + // Then write the ref into the ring. This would be much simpler as the + // generation packing wouldn't be required (though setting the ring cell + // would still need a cmpxchg loop to avoid clobbering values of slow + // dequeuers) + // + // But then that means that flushers (logd) could be starved until that + // finishes, and logd cannot be held forever (that could even be a logd + // DoS from malicious programs). Meaning that logd would stop draining + // buffer queues when encountering that issue, leading the program to be + // stuck in firehose_client_push() apparently waiting on logd, while + // really it's waiting on itself. It's better for the scheduler if we + // make it clear that we're waiting on ourselves! + + head = os_atomic_load(fbh_ring_head, relaxed); + for (;;) { + gen = head & FIREHOSE_RING_POS_GEN_MASK; + idx = head & FIREHOSE_RING_POS_IDX_MASK; + + // a thread being preempted here for GEN_MASK worth of ring rotations, + // it could lead to the cmpxchg succeed, and have a bogus enqueue + // (confused enqueuer) + if (fastpath(os_atomic_cmpxchgvw(&fbh_ring[idx], gen, gen | ref, &dummy, + relaxed))) { + if (fastpath(os_atomic_cmpxchgv(fbh_ring_head, head, head + 1, + &head, release))) { + __firehose_critical_region_leave(); + break; + } + // this thread is a confused enqueuer, need to undo enqueue + os_atomic_store(&fbh_ring[idx], gen, relaxed); + continue; + } + + _dispatch_wait_until(({ + // wait until either the head moves (another enqueuer is done) + // or (not very likely) a recycler is very slow + // or (very unlikely) the confused thread undoes its enqueue + uint16_t old_head = head; + head = *fbh_ring_head; + head != old_head || fbh_ring[idx] == gen; + })); + } + + pthread_priority_t pp = fbc_pos.fbc_qos_bits; + pp <<= _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; + firehose_client_send_push_async(fb, _pthread_qos_class_decode(pp, NULL, NULL), + for_io); +#endif +} + +OS_ALWAYS_INLINE +static inline uint16_t +firehose_buffer_ring_try_recycle(firehose_buffer_t fb) +{ + firehose_ring_tail_u pos, old; + uint16_t volatile *fbh_ring; + uint16_t gen, ref, entry, tail; + firehose_buffer_chunk_t fbc; + bool for_io; + + os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail, + old.frp_atomic_tail, pos.frp_atomic_tail, relaxed, { + pos = old; + if (fastpath(old.frp_mem_tail != old.frp_mem_flushed)) { + pos.frp_mem_tail++; + } else if (fastpath(old.frp_io_tail != old.frp_io_flushed)) { + pos.frp_io_tail++; + } else { + os_atomic_rmw_loop_give_up(return 0); + } + }); + + // there's virtually no chance that the lack of acquire barrier above + // lets us read a value from the ring so stale that it's still an Empty + // marker. For correctness purposes have a cheap loop that should never + // really loop, instead of an acquire barrier in the cmpxchg above. + for_io = (pos.frp_io_tail != old.frp_io_tail); + if (for_io) { + fbh_ring = fb->fb_header.fbh_io_ring; + tail = old.frp_io_tail & FIREHOSE_RING_POS_IDX_MASK; + } else { + fbh_ring = fb->fb_header.fbh_mem_ring; + tail = old.frp_mem_tail & FIREHOSE_RING_POS_IDX_MASK; + } + _dispatch_wait_until((entry = fbh_ring[tail]) & FIREHOSE_RING_POS_IDX_MASK); + + // Needed for process death handling (recycle-dequeue): + // No atomic fences required, we merely want to make sure the observers + // will see memory effects in program (asm) order. + // 1. the chunk is marked as "void&full" (clobbering the pos with FULL_BIT) + // 2. then we remove any reference to the chunk from the ring + // This ensures that if we don't see a reference to a chunk in the ring + // and it is dirty, it is a chunk being written to that needs a flush + gen = (entry & FIREHOSE_RING_POS_GEN_MASK) + FIREHOSE_RING_POS_GEN_INC; + ref = entry & FIREHOSE_RING_POS_IDX_MASK; + fbc = firehose_buffer_ref_to_chunk(fb, ref); + + if (!for_io && fbc->fbc_pos.fbc_stream == firehose_stream_metadata) { + os_atomic_and2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap, + ~(1ULL << ref), relaxed); + } + os_atomic_store2o(fbc, fbc_pos.fbc_atomic_pos, + FIREHOSE_BUFFER_POS_FULL_BIT, relaxed); + dispatch_compiler_barrier(); + os_atomic_store(&fbh_ring[tail], gen | 0, relaxed); + return ref; +} + +#ifndef KERNEL +OS_NOINLINE +static firehose_tracepoint_t +firehose_buffer_tracepoint_reserve_slow2(firehose_buffer_t fb, + firehose_tracepoint_query_t ask, uint8_t **privptr, uint16_t ref) +{ + const uint64_t bank_unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(ask->for_io); + firehose_buffer_bank_t const fbb = &fb->fb_header.fbh_bank; + firehose_bank_state_u state; + uint16_t fbs_max_ref; + + // first wait for our bank to have space, if needed + if (!fastpath(ask->is_bank_ok)) { + state.fbs_atomic_state = + os_atomic_load2o(fbb, fbb_state.fbs_atomic_state, relaxed); + while (state.fbs_atomic_state & bank_unavail_mask) { + firehose_client_send_push(fb, ask->for_io, &state); + if (slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD)) { + // logd was unloaded, give up + return NULL; + } + } + ask->is_bank_ok = true; + fbs_max_ref = state.fbs_max_ref; + } else { + fbs_max_ref = fbb->fbb_state.fbs_max_ref; + } + + // second, if we were passed a chunk, we may need to shrink + if (slowpath(ref)) { + goto try_shrink; + } + + // third, wait for a chunk to come up, and if not, wait on the daemon + for (;;) { + if (fastpath(ref = firehose_buffer_ring_try_recycle(fb))) { + try_shrink: + if (slowpath(ref >= fbs_max_ref)) { + ref = firehose_buffer_ring_shrink(fb, ref); + if (!ref) { + continue; + } + } + break; + } + if (fastpath(ref = firehose_buffer_ring_try_grow(fbb, fbs_max_ref))) { + break; + } + firehose_client_send_push(fb, ask->for_io, NULL); + if (slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD)) { + // logd was unloaded, give up + break; + } + } + + return firehose_buffer_stream_chunk_install(fb, ask, privptr, ref); +} +#else +static inline dispatch_lock +_dispatch_gate_lock_load_seq_cst(dispatch_gate_t l) +{ + return os_atomic_load(&l->dgl_lock, seq_cst); +} +OS_NOINLINE +static void +_dispatch_gate_wait(dispatch_gate_t l, uint32_t flags) +{ + (void)flags; + _dispatch_wait_until(_dispatch_gate_lock_load_seq_cst(l) == 0); +} +#endif // KERNEL + +firehose_tracepoint_t +firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, + firehose_tracepoint_query_t ask, uint8_t **privptr) +{ + const unsigned for_io = ask->for_io; + const firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; + firehose_bank_state_u state; + uint16_t ref = 0; + + uint64_t unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(for_io); +#ifndef KERNEL + state.fbs_atomic_state = os_atomic_add_orig2o(fbb, + fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), relaxed); + if (fastpath(!(state.fbs_atomic_state & unavail_mask))) { + ask->is_bank_ok = true; + if (fastpath(ref = firehose_buffer_ring_try_recycle(fb))) { + if (fastpath(ref < state.fbs_max_ref)) { + return firehose_buffer_stream_chunk_install(fb, ask, + privptr, ref); + } + } + } + return firehose_buffer_tracepoint_reserve_slow2(fb, ask, privptr, ref); +#else + firehose_bank_state_u value; + ask->is_bank_ok = os_atomic_rmw_loop2o(fbb, fbb_state.fbs_atomic_state, + state.fbs_atomic_state, value.fbs_atomic_state, relaxed, { + value = state; + if (slowpath((value.fbs_atomic_state & unavail_mask) != 0)) { + os_atomic_rmw_loop_give_up(break); + } + value.fbs_atomic_state += FIREHOSE_BANK_INC(for_io); + }); + if (ask->is_bank_ok) { + ref = firehose_buffer_ring_try_recycle(fb); + if (slowpath(ref == 0)) { + // the kernel has no overlap between I/O and memory chunks, + // having an available bank slot means we should be able to recycle + DISPATCH_INTERNAL_CRASH(0, "Unable to recycle a chunk"); + } + } + // rdar://25137005 installing `0` unlocks the allocator + return firehose_buffer_stream_chunk_install(fb, ask, privptr, ref); +#endif // KERNEL +} + +#ifdef KERNEL +firehose_tracepoint_t +__firehose_buffer_tracepoint_reserve(uint64_t stamp, firehose_stream_t stream, + uint16_t pubsize, uint16_t privsize, uint8_t **privptr) +{ + firehose_buffer_t fb = kernel_firehose_buffer; + if (!fastpath(fb)) { + return NULL; + } + return firehose_buffer_tracepoint_reserve(fb, stamp, stream, pubsize, + privsize, privptr); +} + +firehose_tracepoint_t +__firehose_buffer_tracepoint_reserve_with_chunk(firehose_buffer_chunk_t fbc, + uint64_t stamp, firehose_stream_t stream, + uint16_t pubsize, uint16_t privsize, uint8_t **privptr) +{ + + firehose_tracepoint_t ft; + long result; + + result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, + pubsize, privsize, privptr); + if (fastpath(result > 0)) { + ft = (firehose_tracepoint_t)(fbc->fbc_start + result); + stamp -= fbc->fbc_timestamp; + stamp |= (uint64_t)pubsize << 48; + // Needed for process death handling (tracepoint-begin) + // see firehose_buffer_stream_chunk_install + os_atomic_store2o(ft, ft_stamp_and_length, stamp, relaxed); + dispatch_compiler_barrier(); + return ft; + } + else { + return NULL; + } +} + +firehose_buffer_t +__firehose_buffer_create(size_t *size) +{ + if (!kernel_firehose_buffer) { + kernel_firehose_buffer = firehose_buffer_create(MACH_PORT_NULL, 0, 0); + } + + if (size) { + *size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE; + } + return kernel_firehose_buffer; +} + +void +__firehose_buffer_tracepoint_flush(firehose_tracepoint_t ft, + firehose_tracepoint_id_u ftid) +{ + return firehose_buffer_tracepoint_flush(kernel_firehose_buffer, ft, ftid); +} + +void +__firehose_buffer_tracepoint_flush_chunk(firehose_buffer_chunk_t fbc, + firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid) +{ + firehose_buffer_pos_u pos; + + // Needed for process death handling (tracepoint-flush): + // We want to make sure the observers + // will see memory effects in program (asm) order. + // 1. write all the data to the tracepoint + // 2. write the tracepoint ID, so that seeing it means the tracepoint + // is valid + ft->ft_thread = thread_tid(current_thread()); + + // release barrier makes the log writes visible + os_atomic_store2o(ft, ft_id.ftid_value, ftid.ftid_value, release); + pos.fbc_atomic_pos = os_atomic_sub2o(fbc, fbc_pos.fbc_atomic_pos, + FIREHOSE_BUFFER_POS_REFCNT_INC, relaxed); + return; +} + +void +__firehose_merge_updates(firehose_push_reply_t update) +{ + firehose_buffer_t fb = kernel_firehose_buffer; + if (fastpath(fb)) { + firehose_client_merge_updates(fb, true, update, NULL); + } +} +#endif // KERNEL + +#endif // OS_FIREHOSE_SPI diff --git a/src/firehose/firehose_buffer_internal.h b/src/firehose/firehose_buffer_internal.h new file mode 100644 index 0000000..db8e026 --- /dev/null +++ b/src/firehose/firehose_buffer_internal.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __FIREHOSE_BUFFER_INTERNAL__ +#define __FIREHOSE_BUFFER_INTERNAL__ + +#if BYTE_ORDER != LITTLE_ENDIAN +#error unsupported byte order +#endif + +#ifndef KERNEL +#include +#endif + +// firehose buffer is CHUNK_COUNT * CHUNK_SIZE big == 256k +#define FIREHOSE_BUFFER_CHUNK_COUNT 64ul +#ifdef KERNEL +#define FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT 15 +#else +#define FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT 4 +#define FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT 4 +#endif + +static const unsigned long firehose_stream_uses_io_bank = + (1UL << firehose_stream_persist) | + (1UL << firehose_stream_special); + +typedef union { +#define FIREHOSE_BANK_SHIFT(bank) (16 * (bank)) +#define FIREHOSE_BANK_INC(bank) (1ULL << FIREHOSE_BANK_SHIFT(bank)) +#define FIREHOSE_BANK_UNAVAIL_BIT ((uint16_t)0x8000) +#define FIREHOSE_BANK_UNAVAIL_MASK(bank) (FIREHOSE_BANK_INC(bank) << 15) + uint64_t fbs_atomic_state; + struct { + uint16_t fbs_mem_bank; + uint16_t fbs_io_bank; + uint16_t fbs_max_ref; + uint16_t fbs_unused; + }; +} firehose_bank_state_u; + +#if __has_feature(c_static_assert) +_Static_assert(8 * offsetof(firehose_bank_state_u, fbs_mem_bank) + == FIREHOSE_BANK_SHIFT(0), "mem bank shift"); +_Static_assert(8 * offsetof(firehose_bank_state_u, fbs_io_bank) + == FIREHOSE_BANK_SHIFT(1), "mem bank shift"); +#endif + +typedef struct firehose_buffer_bank_s { + firehose_bank_state_u volatile fbb_state; + uint64_t volatile fbb_metadata_bitmap; + uint64_t volatile fbb_mem_flushed; + uint64_t volatile fbb_mem_notifs; + uint64_t volatile fbb_mem_sync_pushes; + uint64_t volatile fbb_io_flushed; + uint64_t volatile fbb_io_notifs; + uint64_t volatile fbb_io_sync_pushes; +#define FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY (1UL << 0) +#define FIREHOSE_BUFFER_BANK_FLAG_HIGH_RATE (1UL << 1) + unsigned long volatile fbb_flags; + + uint64_t fbb_bitmap; // protected by fbb_lock + firehose_bank_state_u fbb_limits; // protected by fbb_lock +#ifdef KERNEL + uint32_t _fbb_unused; +#else + dispatch_unfair_lock_s fbb_lock; +#endif +} OS_ALIGNED(64) *firehose_buffer_bank_t; + +typedef union { + uint64_t fss_atomic_state; + dispatch_gate_s fss_gate; + struct { + uint32_t fss_allocator; +#define FIREHOSE_STREAM_STATE_PRISTINE 0xffff + uint16_t fss_current; + uint16_t fss_generation; + }; +} firehose_stream_state_u; + +typedef struct firehose_buffer_stream_s { + firehose_stream_state_u fbs_state; +} OS_ALIGNED(128) *firehose_buffer_stream_t; + +typedef union { + uint64_t frp_atomic_tail; + struct { + uint16_t frp_mem_tail; + uint16_t frp_mem_flushed; + uint16_t frp_io_tail; + uint16_t frp_io_flushed; + }; +} firehose_ring_tail_u; + +#define FIREHOSE_RING_POS_GEN_INC ((uint16_t)(FIREHOSE_BUFFER_CHUNK_COUNT)) +#define FIREHOSE_RING_POS_IDX_MASK ((uint16_t)(FIREHOSE_RING_POS_GEN_INC - 1)) +#define FIREHOSE_RING_POS_GEN_MASK ((uint16_t)~FIREHOSE_RING_POS_IDX_MASK) + +/* + * Rings are circular buffers with CHUNK_COUNT entries, with 3 important markers + * + * +--------+-------------------------+------------+---------------------------+ + * |xxxxxxxx| |............|xxxxxxxxxxxxxxxxxxxxxxxxxxx| + * +--------+-------------------------+------------+---------------------------+ + * ^ ^ ^ + * head tail flushed + * + * A ring position is a uint16_t made of a generation (see GEN_MASK) and an + * index (see IDX_MASK). Slots of that ring hold tagged page references. These + * are made from a generation (see GEN_MASK) and a page reference. + * + * A generation is how many times the head wrapped around. + * + * These conditions hold: + * (uint16_t)(flushed - tail) < FIREHOSE_BUFFER_CHUNK_COUNT + * (uint16_t)(head - flushed) < FIREHOSE_BUFFER_CHUNK_COUNT + * which really means, on the circular buffer, tail <= flushed <= head. + * + * Page references span from 1 to (CHUNK_COUNT - 1). 0 is an invalid page + * (corresponds to the buffer header) and means "unused". + * + * + * - Entries situated between tail and flushed hold references to pages that + * the firehose consumer (logd) has flushed, and can be reused. + * + * - Entries situated between flushed and head are references to pages waiting + * to be flushed. + * + * - Entries not situated between tail and head are either slots being modified + * or that should be set to Empty. Empty is the 0 page reference associated + * with the generation count the head will have the next time it will go over + * that slot. + */ +typedef struct firehose_buffer_header_s { + uint16_t volatile fbh_mem_ring[FIREHOSE_BUFFER_CHUNK_COUNT]; + uint16_t volatile fbh_io_ring[FIREHOSE_BUFFER_CHUNK_COUNT]; + + firehose_ring_tail_u volatile fbh_ring_tail OS_ALIGNED(64); + uint32_t fbh_spi_version; + uint16_t volatile fbh_ring_mem_head OS_ALIGNED(64); + uint16_t volatile fbh_ring_io_head OS_ALIGNED(64); + struct firehose_buffer_bank_s fbh_bank; + struct firehose_buffer_stream_s fbh_stream[_firehose_stream_max]; + + uint64_t fbh_uniquepid; + pid_t fbh_pid; + mach_port_t fbh_logd_port; + mach_port_t volatile fbh_sendp; + mach_port_t fbh_recvp; + + // past that point fields may be aligned differently between 32 and 64bits +#ifndef KERNEL + dispatch_once_t fbh_notifs_pred OS_ALIGNED(64); + dispatch_source_t fbh_notifs_source; + dispatch_unfair_lock_s fbh_logd_lock; +#endif + uint64_t fbh_unused[0]; +} OS_ALIGNED(FIREHOSE_BUFFER_CHUNK_SIZE) *firehose_buffer_header_t; + +union firehose_buffer_u { + struct firehose_buffer_header_s fb_header; + struct firehose_buffer_chunk_s fb_chunks[FIREHOSE_BUFFER_CHUNK_COUNT]; +}; + +// used to let the compiler pack these values in 1 or 2 registers +typedef struct firehose_tracepoint_query_s { + uint16_t pubsize; + uint16_t privsize; + firehose_stream_t stream; + bool is_bank_ok; + bool for_io; + uint64_t stamp; +} *firehose_tracepoint_query_t; + +#ifndef FIREHOSE_SERVER + +firehose_buffer_t +firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, + unsigned long bank_flags); + +firehose_tracepoint_t +firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, + firehose_tracepoint_query_t ask, uint8_t **privptr); + +void +firehose_buffer_update_limits(firehose_buffer_t fb); + +void +firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref); + +#endif + +#endif // __FIREHOSE_BUFFER_INTERNAL__ diff --git a/src/firehose/firehose_inline_internal.h b/src/firehose/firehose_inline_internal.h new file mode 100644 index 0000000..9576882 --- /dev/null +++ b/src/firehose/firehose_inline_internal.h @@ -0,0 +1,502 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __FIREHOSE_INLINE_INTERNAL__ +#define __FIREHOSE_INLINE_INTERNAL__ + +#define firehose_atomic_maxv2o(p, f, v, o, m) \ + os_atomic_rmw_loop2o(p, f, *(o), (v), m, { \ + if (*(o) >= (v)) os_atomic_rmw_loop_give_up(break); \ + }) + +#define firehose_atomic_max2o(p, f, v, m) ({ \ + typeof((p)->f) _old; \ + firehose_atomic_maxv2o(p, f, v, &_old, m); \ + }) + +#ifndef KERNEL +// caller must test for non zero first +OS_ALWAYS_INLINE +static inline uint16_t +firehose_bitmap_first_set(uint64_t bitmap) +{ + dispatch_assert(bitmap != 0); + // this builtin returns 0 if bitmap is 0, or (first bit set + 1) + return (uint16_t)__builtin_ffsll((long long)bitmap) - 1; +} +#endif + +#pragma mark - +#pragma mark Mach Misc. +#ifndef KERNEL + +OS_ALWAYS_INLINE +static inline mach_port_t +firehose_mach_port_allocate(uint32_t flags, void *ctx) +{ + mach_port_t port = MACH_PORT_NULL; + mach_port_options_t opts = { + .flags = flags, + }; + kern_return_t kr; + + for (;;) { + kr = mach_port_construct(mach_task_self(), &opts, + (mach_port_context_t)ctx, &port); + if (fastpath(kr == KERN_SUCCESS)) { + break; + } + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + _dispatch_temporary_resource_shortage(); + } + return port; +} + +OS_ALWAYS_INLINE +static inline kern_return_t +firehose_mach_port_recv_dispose(mach_port_t port, void *ctx) +{ + kern_return_t kr; + kr = mach_port_destruct(mach_task_self(), port, 0, + (mach_port_context_t)ctx); + DISPATCH_VERIFY_MIG(kr); + return kr; +} + +OS_ALWAYS_INLINE +static inline void +firehose_mach_port_send_release(mach_port_t port) +{ + kern_return_t kr = mach_port_deallocate(mach_task_self(), port); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); +} + +OS_ALWAYS_INLINE +static inline void +firehose_mach_port_guard(mach_port_t port, bool strict, void *ctx) +{ + kern_return_t kr = mach_port_guard(mach_task_self(), port, + (mach_port_context_t)ctx, strict); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); +} + +OS_ALWAYS_INLINE +static inline void +firehose_mig_server(dispatch_mig_callback_t demux, size_t maxmsgsz, + mach_msg_header_t *hdr) +{ + mig_reply_error_t *msg_reply = (mig_reply_error_t *)alloca(maxmsgsz); + kern_return_t rc = KERN_SUCCESS; + bool expects_reply = false; + + if (MACH_MSGH_BITS_REMOTE(hdr->msgh_bits) == MACH_MSG_TYPE_MOVE_SEND_ONCE) { + expects_reply = true; + } + + if (!fastpath(demux(hdr, &msg_reply->Head))) { + rc = MIG_BAD_ID; + } else if (msg_reply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { + rc = KERN_SUCCESS; + } else { + // if MACH_MSGH_BITS_COMPLEX is _not_ set, then msg_reply->RetCode + // is present + rc = msg_reply->RetCode; + } + + if (slowpath(rc == KERN_SUCCESS && expects_reply)) { + // if crashing here, some handler returned KERN_SUCCESS + // hoping for firehose_mig_server to perform the mach_msg() + // call to reply, and it doesn't know how to do that + DISPATCH_INTERNAL_CRASH(msg_reply->Head.msgh_id, + "firehose_mig_server doesn't handle replies"); + } + if (slowpath(rc != KERN_SUCCESS && rc != MIG_NO_REPLY)) { + // destroy the request - but not the reply port + hdr->msgh_remote_port = 0; + mach_msg_destroy(hdr); + } +} + +#endif // !KERNEL +#pragma mark - +#pragma mark firehose buffer + +OS_ALWAYS_INLINE +static inline firehose_buffer_chunk_t +firehose_buffer_chunk_for_address(void *addr) +{ + uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_BUFFER_CHUNK_SIZE - 1); + return (firehose_buffer_chunk_t)chunk_addr; +} + +OS_ALWAYS_INLINE +static inline uint16_t +firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_buffer_chunk_t fbc) +{ + return (uint16_t)(fbc - fb->fb_chunks); +} + +OS_ALWAYS_INLINE +static inline firehose_buffer_chunk_t +firehose_buffer_ref_to_chunk(firehose_buffer_t fb, uint16_t ref) +{ + return fb->fb_chunks + ref; +} + +#ifndef FIREHOSE_SERVER + +OS_ALWAYS_INLINE +static inline bool +firehose_buffer_pos_fits(firehose_buffer_pos_u pos, uint16_t size) +{ + return pos.fbc_next_entry_offs + size <= pos.fbc_private_offs; +} + +#if DISPATCH_PURE_C + +OS_ALWAYS_INLINE +static inline uint8_t +firehose_buffer_qos_bits_propagate(void) +{ +#ifndef KERNEL + pthread_priority_t pp = _dispatch_priority_propagate(); + + pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + return (uint8_t)(pp >> _PTHREAD_PRIORITY_QOS_CLASS_SHIFT); +#else + return 0; +#endif +} + +OS_ALWAYS_INLINE +static inline long +firehose_buffer_chunk_try_reserve(firehose_buffer_chunk_t fbc, uint64_t stamp, + firehose_stream_t stream, uint16_t pubsize, + uint16_t privsize, uint8_t **privptr) +{ + const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); + firehose_buffer_pos_u orig, pos; + uint8_t qos_bits = firehose_buffer_qos_bits_propagate(); + bool reservation_failed, stamp_delta_fits; + + stamp_delta_fits = ((stamp - fbc->fbc_timestamp) >> 48) == 0; + + // no acquire barrier because the returned space is written to only + os_atomic_rmw_loop2o(fbc, fbc_pos.fbc_atomic_pos, + orig.fbc_atomic_pos, pos.fbc_atomic_pos, relaxed, { + if (unlikely(orig.fbc_atomic_pos == 0)) { + // we acquired a really really old reference, and we probably + // just faulted in a new page + // FIXME: if/when we hit this we should try to madvise it back FREE + os_atomic_rmw_loop_give_up(return 0); + } + if (unlikely(!FIREHOSE_BUFFER_POS_USABLE_FOR_STREAM(orig, stream))) { + // nothing to do if the chunk is full, or the stream doesn't match, + // in which case the thread probably: + // - loaded the chunk ref + // - been suspended a long while + // - read the chunk to find a very old thing + os_atomic_rmw_loop_give_up(return 0); + } + pos = orig; + pos.fbc_qos_bits |= qos_bits; + if (unlikely(!firehose_buffer_pos_fits(orig, + ft_size + pubsize + privsize) || !stamp_delta_fits)) { + pos.fbc_flag_full = true; + reservation_failed = true; + } else { + // using these *_INC macros is so that the compiler generates better + // assembly: using the struct individual fields forces the compiler + // to handle carry propagations, and we know it won't happen + pos.fbc_atomic_pos += roundup(ft_size + pubsize, 8) * + FIREHOSE_BUFFER_POS_ENTRY_OFFS_INC; + pos.fbc_atomic_pos -= privsize * + FIREHOSE_BUFFER_POS_PRIVATE_OFFS_INC; + pos.fbc_atomic_pos += FIREHOSE_BUFFER_POS_REFCNT_INC; + const uint16_t minimum_payload_size = 16; + if (!firehose_buffer_pos_fits(pos, + roundup(ft_size + minimum_payload_size , 8))) { + // if we can't even have minimum_payload_size bytes of payload + // for the next tracepoint, just flush right away + pos.fbc_flag_full = true; + } + reservation_failed = false; + } + }); + + if (reservation_failed) { + if (pos.fbc_refcnt) { + // nothing to do, there is a thread writing that will pick up + // the "FULL" flag on flush and push as a consequence + return 0; + } + // caller must enqueue chunk + return -1; + } + if (privptr) { + *privptr = fbc->fbc_start + pos.fbc_private_offs; + } + return orig.fbc_next_entry_offs; +} + +OS_ALWAYS_INLINE +static inline void +firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) +{ + firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream]; + firehose_stream_state_u old_state, new_state; + firehose_buffer_chunk_t fbc; + uint64_t stamp = UINT64_MAX; // will cause the reservation to fail + uint16_t ref; + long result; + + old_state.fss_atomic_state = + os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + ref = old_state.fss_current; + if (!ref || ref == FIREHOSE_STREAM_STATE_PRISTINE) { + // there is no installed page, nothing to flush, go away + return; + } + + fbc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current); + result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, 1, 0, NULL); + if (likely(result < 0)) { + firehose_buffer_ring_enqueue(fb, old_state.fss_current); + } + if (unlikely(result > 0)) { + // because we pass a silly stamp that requires a flush + DISPATCH_INTERNAL_CRASH(result, "Allocation should always fail"); + } + + // as a best effort try to uninstall the page we just flushed + // but failing is okay, let's not contend stupidly for something + // allocators know how to handle in the first place + new_state = old_state; + new_state.fss_current = 0; + (void)os_atomic_cmpxchg2o(fbs, fbs_state.fss_atomic_state, + old_state.fss_atomic_state, new_state.fss_atomic_state, relaxed); +} + +/** + * @function firehose_buffer_tracepoint_reserve + * + * @abstract + * Reserves space in the firehose buffer for the tracepoint with specified + * characteristics. + * + * @discussion + * This returns a slot, with the length of the tracepoint already set, so + * that in case of a crash, we maximize our chance to be able to skip the + * tracepoint in case of a partial write. + * + * Once the tracepoint has been written, firehose_buffer_tracepoint_flush() + * must be called. + * + * @param fb + * The buffer to allocate from. + * + * @param stream + * The buffer stream to use. + * + * @param pubsize + * The size of the public data for this tracepoint, cannot be 0, doesn't + * take the size of the tracepoint header into account. + * + * @param privsize + * The size of the private data for this tracepoint, can be 0. + * + * @param privptr + * The pointer to the private buffer, can be NULL + * + * @result + * The pointer to the tracepoint. + */ +OS_ALWAYS_INLINE +static inline firehose_tracepoint_t +firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, + firehose_stream_t stream, uint16_t pubsize, + uint16_t privsize, uint8_t **privptr) +{ + firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream]; + firehose_stream_state_u old_state, new_state; + firehose_tracepoint_t ft; + firehose_buffer_chunk_t fbc; +#if KERNEL + bool failable = false; +#endif + bool success; + long result; + uint16_t ref; + + // cannot use os_atomic_rmw_loop2o, _page_try_reserve does a store + old_state.fss_atomic_state = + os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + for (;;) { + new_state = old_state; + + ref = old_state.fss_current; + if (likely(ref && ref != FIREHOSE_STREAM_STATE_PRISTINE)) { + fbc = firehose_buffer_ref_to_chunk(fb, ref); + result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, + pubsize, privsize, privptr); + if (likely(result > 0)) { + ft = (firehose_tracepoint_t)(fbc->fbc_start + result); + stamp -= fbc->fbc_timestamp; + stamp |= (uint64_t)pubsize << 48; + // Needed for process death handling (tracepoint-begin) + // see firehose_buffer_stream_chunk_install + os_atomic_store2o(ft, ft_stamp_and_length, stamp, relaxed); + dispatch_compiler_barrier(); + return ft; + } + if (likely(result < 0)) { + firehose_buffer_ring_enqueue(fb, old_state.fss_current); + } + new_state.fss_current = 0; + } +#if KERNEL + if (failable) { + return NULL; + } +#endif + + if (unlikely(old_state.fss_allocator)) { + _dispatch_gate_wait(&fbs->fbs_state.fss_gate, + DLOCK_LOCK_DATA_CONTENTION); + old_state.fss_atomic_state = + os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); +#if KERNEL + failable = true; +#endif + continue; + } + + // if the thread doing the allocation is a low priority one + // we may starve high priority ones. + // so disable preemption before we become an allocator + // the reenabling of the preemption is in + // firehose_buffer_stream_chunk_install + __firehose_critical_region_enter(); +#if KERNEL + new_state.fss_allocator = (uint32_t)cpu_number(); +#else + new_state.fss_allocator = _dispatch_tid_self(); +#endif + success = os_atomic_cmpxchgvw2o(fbs, fbs_state.fss_atomic_state, + old_state.fss_atomic_state, new_state.fss_atomic_state, + &old_state.fss_atomic_state, relaxed); + if (likely(success)) { + break; + } + __firehose_critical_region_leave(); + } + + struct firehose_tracepoint_query_s ask = { + .pubsize = pubsize, + .privsize = privsize, + .stream = stream, + .for_io = (firehose_stream_uses_io_bank & (1UL << stream)) != 0, + .stamp = stamp, + }; + return firehose_buffer_tracepoint_reserve_slow(fb, &ask, privptr); +} + +/** + * @function firehose_buffer_tracepoint_flush + * + * @abstract + * Flushes a firehose tracepoint, and sends the chunk to the daemon when full + * and this was the last tracepoint writer for this chunk. + * + * @param fb + * The buffer the tracepoint belongs to. + * + * @param ft + * The tracepoint to flush. + * + * @param ftid + * The firehose tracepoint ID for that tracepoint. + * It is written last, preventing compiler reordering, so that its absence + * on crash recovery means the tracepoint is partial. + */ +OS_ALWAYS_INLINE +static inline void +firehose_buffer_tracepoint_flush(firehose_buffer_t fb, + firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid) +{ + firehose_buffer_chunk_t fbc = firehose_buffer_chunk_for_address(ft); + firehose_buffer_pos_u pos; + + // Needed for process death handling (tracepoint-flush): + // We want to make sure the observers + // will see memory effects in program (asm) order. + // 1. write all the data to the tracepoint + // 2. write the tracepoint ID, so that seeing it means the tracepoint + // is valid +#ifdef KERNEL + ft->ft_thread = thread_tid(current_thread()); +#else + ft->ft_thread = _pthread_threadid_self_np_direct(); +#endif + // release barrier makes the log writes visible + os_atomic_store2o(ft, ft_id.ftid_value, ftid.ftid_value, release); + pos.fbc_atomic_pos = os_atomic_sub2o(fbc, fbc_pos.fbc_atomic_pos, + FIREHOSE_BUFFER_POS_REFCNT_INC, relaxed); + if (pos.fbc_refcnt == 0 && pos.fbc_flag_full) { + firehose_buffer_ring_enqueue(fb, firehose_buffer_chunk_to_ref(fb, fbc)); + } +} + +#ifndef KERNEL +OS_ALWAYS_INLINE +static inline void +firehose_buffer_clear_bank_flags(firehose_buffer_t fb, unsigned long bits) +{ + firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; + unsigned long orig_flags; + + orig_flags = os_atomic_and_orig2o(fbb, fbb_flags, ~bits, relaxed); + if (orig_flags != (orig_flags & ~bits)) { + firehose_buffer_update_limits(fb); + } +} + +OS_ALWAYS_INLINE +static inline void +firehose_buffer_set_bank_flags(firehose_buffer_t fb, unsigned long bits) +{ + firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; + unsigned long orig_flags; + + orig_flags = os_atomic_or_orig2o(fbb, fbb_flags, bits, relaxed); + if (orig_flags != (orig_flags | bits)) { + firehose_buffer_update_limits(fb); + } +} +#endif // !KERNEL + +#endif // !defined(FIREHOSE_SERVER) + +#endif // DISPATCH_PURE_C + +#endif // __FIREHOSE_INLINE_INTERNAL__ diff --git a/src/firehose/firehose_internal.h b/src/firehose/firehose_internal.h new file mode 100644 index 0000000..29d1ad2 --- /dev/null +++ b/src/firehose/firehose_internal.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __FIREHOSE_INTERNAL__ +#define __FIREHOSE_INTERNAL__ + +#if OS_FIREHOSE_SPI + +// make sure this is defined so that we get MIG_SERVER_DIED when a send once +// notification is sent back because of a crashed server +#ifndef __MigTypeCheck +#define __MigTypeCheck 1 +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "os/firehose_server_private.h" +#include "firehose_buffer_internal.h" +#ifdef FIREHOSE_SERVER +#include "firehose_server_internal.h" +#endif +#include "firehose_inline_internal.h" + +#endif // OS_FIREHOSE_SPI + +#endif // __FIREHOSE_INTERNAL__ diff --git a/src/firehose/firehose_reply.defs b/src/firehose/firehose_reply.defs new file mode 100644 index 0000000..124defa --- /dev/null +++ b/src/firehose/firehose_reply.defs @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include +#include + +#include "firehose_types.defs" + +subsystem firehoseReply 11700; + +serverprefix firehose_client_; +userprefix firehose_send_; + +skip; // firehose_register + +simpleroutine push_reply( +RequestPort req_port : mach_port_move_send_once_t; +in rtc : kern_return_t; +in push_reply : firehose_push_reply_t +); + +simpleroutine push_notify_async( +RequestPort comm_port : mach_port_t; +in push_reply : firehose_push_reply_t; +WaitTime timeout : natural_t +); diff --git a/src/firehose/firehose_server.c b/src/firehose/firehose_server.c new file mode 100644 index 0000000..a6be2fa --- /dev/null +++ b/src/firehose/firehose_server.c @@ -0,0 +1,1137 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include "internal.h" +#include "firehoseServer.h" // MiG +#include "firehose_reply.h" // MiG + +#if __has_feature(c_static_assert) +_Static_assert(offsetof(struct firehose_client_s, fc_mem_sent_flushed_pos) + % 8 == 0, "Make sure atomic fields are properly aligned"); +#endif + +static struct firehose_server_s { + mach_port_t fs_bootstrap_port; + dispatch_mach_t fs_mach_channel; + dispatch_queue_t fs_ipc_queue; + dispatch_queue_t fs_snapshot_gate_queue; + dispatch_queue_t fs_io_drain_queue; + dispatch_queue_t fs_mem_drain_queue; + firehose_handler_t fs_handler; + + firehose_snapshot_t fs_snapshot; + + int fs_kernel_fd; + firehose_client_t fs_kernel_client; + + TAILQ_HEAD(, firehose_client_s) fs_clients; +} server_config = { + .fs_clients = TAILQ_HEAD_INITIALIZER(server_config.fs_clients), + .fs_kernel_fd = -1, +}; + +#pragma mark - +#pragma mark firehose client state machine + +static void firehose_server_demux(firehose_client_t fc, + mach_msg_header_t *msg_hdr); +static void firehose_client_cancel(firehose_client_t fc); +static void firehose_client_snapshot_finish(firehose_client_t fc, + firehose_snapshot_t snapshot, bool for_io); + +static void +firehose_client_notify(firehose_client_t fc, mach_port_t reply_port) +{ + firehose_push_reply_t push_reply = { + .fpr_mem_flushed_pos = os_atomic_load2o(fc, fc_mem_flushed_pos,relaxed), + .fpr_io_flushed_pos = os_atomic_load2o(fc, fc_io_flushed_pos, relaxed), + }; + kern_return_t kr; + + firehose_atomic_max2o(fc, fc_mem_sent_flushed_pos, + push_reply.fpr_mem_flushed_pos, relaxed); + firehose_atomic_max2o(fc, fc_io_sent_flushed_pos, + push_reply.fpr_io_flushed_pos, relaxed); + + if (fc->fc_is_kernel) { + if (ioctl(server_config.fs_kernel_fd, LOGFLUSHED, &push_reply) < 0) { + dispatch_assume_zero(errno); + } + } else { + if (reply_port == fc->fc_sendp) { + kr = firehose_send_push_notify_async(reply_port, push_reply, 0); + } else { + kr = firehose_send_push_reply(reply_port, KERN_SUCCESS, push_reply); + } + if (kr != MACH_SEND_INVALID_DEST) { + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + } +} + +OS_ALWAYS_INLINE +static inline uint16_t +firehose_client_acquire_head(firehose_buffer_t fb, bool for_io) +{ + uint16_t head; + if (for_io) { + head = os_atomic_load2o(&fb->fb_header, fbh_ring_io_head, acquire); + } else { + head = os_atomic_load2o(&fb->fb_header, fbh_ring_mem_head, acquire); + } + return head; +} + +OS_ALWAYS_INLINE +static inline void +firehose_client_push_async_merge(firehose_client_t fc, pthread_priority_t pp, + bool for_io) +{ + if (for_io) { + _dispatch_source_merge_data(fc->fc_io_source, pp, 1); + } else { + _dispatch_source_merge_data(fc->fc_mem_source, pp, 1); + } +} + +OS_NOINLINE OS_COLD +static void +firehose_client_mark_corrupted(firehose_client_t fc, mach_port_t reply_port) +{ + // this client is really confused, do *not* answer to asyncs anymore + fc->fc_memory_corrupted = true; + fc->fc_use_notifs = false; + + // XXX: do not cancel the data sources or a corrupted client could + // prevent snapshots from being taken if unlucky with ordering + + if (reply_port) { + kern_return_t kr = firehose_send_push_reply(reply_port, 0, + FIREHOSE_PUSH_REPLY_CORRUPTED); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } +} + +OS_ALWAYS_INLINE +static inline void +firehose_client_snapshot_mark_done(firehose_client_t fc, + firehose_snapshot_t snapshot, bool for_io) +{ + if (for_io) { + fc->fc_needs_io_snapshot = false; + } else { + fc->fc_needs_mem_snapshot = false; + } + dispatch_group_leave(snapshot->fs_group); +} + +#define DRAIN_BATCH_SIZE 4 +#define FIREHOSE_DRAIN_FOR_IO 0x1 +#define FIREHOSE_DRAIN_POLL 0x2 + +OS_NOINLINE +static void +firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) +{ + firehose_buffer_t fb = fc->fc_buffer; + firehose_buffer_chunk_t fbc; + firehose_event_t evt; + uint16_t volatile *fbh_ring; + uint16_t flushed, ref, count = 0; + uint16_t client_head, client_flushed, sent_flushed; + firehose_snapshot_t snapshot = NULL; + bool for_io = (flags & FIREHOSE_DRAIN_FOR_IO); + + if (for_io) { + evt = FIREHOSE_EVENT_IO_BUFFER_RECEIVED; + _Static_assert(FIREHOSE_EVENT_IO_BUFFER_RECEIVED == + FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, ""); + fbh_ring = fb->fb_header.fbh_io_ring; + sent_flushed = (uint16_t)fc->fc_io_sent_flushed_pos; + flushed = (uint16_t)fc->fc_io_flushed_pos; + if (fc->fc_needs_io_snapshot) { + snapshot = server_config.fs_snapshot; + } + } else { + evt = FIREHOSE_EVENT_MEM_BUFFER_RECEIVED; + _Static_assert(FIREHOSE_EVENT_MEM_BUFFER_RECEIVED == + FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, ""); + fbh_ring = fb->fb_header.fbh_mem_ring; + sent_flushed = (uint16_t)fc->fc_mem_sent_flushed_pos; + flushed = (uint16_t)fc->fc_mem_flushed_pos; + if (fc->fc_needs_mem_snapshot) { + snapshot = server_config.fs_snapshot; + } + } + + if (slowpath(fc->fc_memory_corrupted)) { + goto corrupt; + } + + client_head = flushed; + do { + if ((uint16_t)(flushed + count) == client_head) { + client_head = firehose_client_acquire_head(fb, for_io); + if ((uint16_t)(flushed + count) == client_head) { + break; + } + if ((uint16_t)(client_head - sent_flushed) >= + FIREHOSE_BUFFER_CHUNK_COUNT) { + goto corrupt; + } + } + + // see firehose_buffer_ring_enqueue + do { + ref = (flushed + count) & FIREHOSE_RING_POS_IDX_MASK; + ref = os_atomic_load(&fbh_ring[ref], relaxed); + ref &= FIREHOSE_RING_POS_IDX_MASK; + } while (fc->fc_is_kernel && !ref); + count++; + if (!ref) { + _dispatch_debug("Ignoring invalid page reference in ring: %d", ref); + continue; + } + + fbc = firehose_buffer_ref_to_chunk(fb, ref); + server_config.fs_handler(fc, evt, fbc); + if (slowpath(snapshot)) { + snapshot->handler(fc, evt, fbc); + } + // clients not using notifications (single threaded) always drain fully + // because they use all their limit, always + } while (!fc->fc_use_notifs || count < DRAIN_BATCH_SIZE || snapshot); + + if (count) { + // we don't load the full fbh_ring_tail because that is a 64bit quantity + // and we only need 16bits from it. and on 32bit arm, there's no way to + // perform an atomic load of a 64bit quantity on read-only memory. + if (for_io) { + os_atomic_add2o(fc, fc_io_flushed_pos, count, relaxed); + client_flushed = os_atomic_load2o(&fb->fb_header, + fbh_ring_tail.frp_io_flushed, relaxed); + } else { + os_atomic_add2o(fc, fc_mem_flushed_pos, count, relaxed); + client_flushed = os_atomic_load2o(&fb->fb_header, + fbh_ring_tail.frp_mem_flushed, relaxed); + } + if (fc->fc_is_kernel) { + // will fire firehose_client_notify() because port is MACH_PORT_DEAD + port = fc->fc_sendp; + } else if (!port && client_flushed == sent_flushed && fc->fc_use_notifs) { + port = fc->fc_sendp; + } + } + + if (slowpath(snapshot)) { + firehose_client_snapshot_finish(fc, snapshot, for_io); + firehose_client_snapshot_mark_done(fc, snapshot, for_io); + } + if (port) { + firehose_client_notify(fc, port); + } + if (fc->fc_is_kernel) { + if (!(flags & FIREHOSE_DRAIN_POLL)) { + // see firehose_client_kernel_source_handle_event + dispatch_resume(fc->fc_kernel_source); + } + } else { + if (fc->fc_use_notifs && count >= DRAIN_BATCH_SIZE) { + // if we hit the drain batch size, the client probably logs a lot + // and there's more to drain, so optimistically schedule draining + // again this is cheap since the queue is hot, and is fair for other + // clients + firehose_client_push_async_merge(fc, 0, for_io); + } + if (count && server_config.fs_kernel_client) { + // the kernel is special because it can drop messages, so if we're + // draining, poll the kernel each time while we're bound to a thread + firehose_client_drain(server_config.fs_kernel_client, + MACH_PORT_NULL, flags | FIREHOSE_DRAIN_POLL); + } + } + return; + +corrupt: + if (snapshot) { + firehose_client_snapshot_mark_done(fc, snapshot, for_io); + } + firehose_client_mark_corrupted(fc, port); + // from now on all IO/mem drains depending on `for_io` will be no-op + // (needs__snapshot: false, memory_corrupted: true). we can safely + // silence the corresponding source of drain wake-ups. + if (!fc->fc_is_kernel) { + dispatch_source_cancel(for_io ? fc->fc_io_source : fc->fc_mem_source); + } +} + +static void +firehose_client_drain_io_async(void *ctx) +{ + firehose_client_drain(ctx, MACH_PORT_NULL, FIREHOSE_DRAIN_FOR_IO); +} + +static void +firehose_client_drain_mem_async(void *ctx) +{ + firehose_client_drain(ctx, MACH_PORT_NULL, 0); +} + +OS_NOINLINE +static void +firehose_client_finalize(firehose_client_t fc OS_OBJECT_CONSUMED) +{ + firehose_snapshot_t snapshot = server_config.fs_snapshot; + firehose_buffer_t fb = fc->fc_buffer; + + dispatch_assert_queue(server_config.fs_io_drain_queue); + + // if a client dies between phase 1 and 2 of starting the snapshot + // (see firehose_snapshot_start)) there's no handler to call, but the + // dispatch group has to be adjusted for this client going away. + if (fc->fc_needs_io_snapshot) { + dispatch_group_leave(snapshot->fs_group); + fc->fc_needs_io_snapshot = false; + } + if (fc->fc_needs_mem_snapshot) { + dispatch_group_leave(snapshot->fs_group); + fc->fc_needs_mem_snapshot = false; + } + if (fc->fc_memory_corrupted) { + server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_CORRUPTED, + &fb->fb_chunks[0]); + } + server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_DIED, NULL); + + TAILQ_REMOVE(&server_config.fs_clients, fc, fc_entry); + fc->fc_entry.tqe_next = DISPATCH_OBJECT_LISTLESS; + fc->fc_entry.tqe_prev = DISPATCH_OBJECT_LISTLESS; + _os_object_release(&fc->fc_as_os_object); +} + +OS_NOINLINE +static void +firehose_client_handle_death(void *ctxt) +{ + firehose_client_t fc = ctxt; + firehose_buffer_t fb = fc->fc_buffer; + firehose_buffer_header_t fbh = &fb->fb_header; + uint64_t mem_bitmap = 0, bitmap; + + if (fc->fc_memory_corrupted) { + return firehose_client_finalize(fc); + } + + dispatch_assert_queue(server_config.fs_io_drain_queue); + + // acquire to match release barriers from threads that died + os_atomic_thread_fence(acquire); + + bitmap = fbh->fbh_bank.fbb_bitmap & ~1ULL; + for (int for_io = 0; for_io < 2; for_io++) { + uint16_t volatile *fbh_ring; + uint16_t tail, flushed; + + if (for_io) { + fbh_ring = fbh->fbh_io_ring; + tail = fbh->fbh_ring_tail.frp_io_tail; + flushed = (uint16_t)fc->fc_io_flushed_pos; + } else { + fbh_ring = fbh->fbh_mem_ring; + tail = fbh->fbh_ring_tail.frp_mem_tail; + flushed = (uint16_t)fc->fc_mem_flushed_pos; + } + if ((uint16_t)(flushed - tail) >= FIREHOSE_BUFFER_CHUNK_COUNT) { + fc->fc_memory_corrupted = true; + return firehose_client_finalize(fc); + } + + // remove the pages that we flushed already from the bitmap + for (; tail != flushed; tail++) { + uint16_t ring_pos = tail & FIREHOSE_RING_POS_IDX_MASK; + uint16_t ref = fbh_ring[ring_pos] & FIREHOSE_RING_POS_IDX_MASK; + + bitmap &= ~(1ULL << ref); + } + } + + firehose_snapshot_t snapshot = server_config.fs_snapshot; + + // Then look at all the allocated pages not seen in the ring + while (bitmap) { + uint16_t ref = firehose_bitmap_first_set(bitmap); + firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs; + + bitmap &= ~(1ULL << ref); + if (fbc->fbc_start + fbc_length <= fbc->fbc_data) { + // this page has its "recycle-requeue" done, but hasn't gone + // through "recycle-reuse", or it has no data, ditch it + continue; + } + if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) { + // this thing has data, but the first tracepoint is unreadable + // so also just ditch it + continue; + } + if (!fbc->fbc_pos.fbc_flag_io) { + mem_bitmap |= 1ULL << ref; + continue; + } + server_config.fs_handler(fc, FIREHOSE_EVENT_IO_BUFFER_RECEIVED, fbc); + if (fc->fc_needs_io_snapshot && snapshot) { + snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, fbc); + } + } + + if (!mem_bitmap) { + return firehose_client_finalize(fc); + } + + dispatch_async(server_config.fs_mem_drain_queue, ^{ + uint64_t mem_bitmap_copy = mem_bitmap; + + while (mem_bitmap_copy) { + uint16_t ref = firehose_bitmap_first_set(mem_bitmap_copy); + firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + + mem_bitmap_copy &= ~(1ULL << ref); + server_config.fs_handler(fc, FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, fbc); + if (fc->fc_needs_mem_snapshot && snapshot) { + snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, fbc); + } + } + + dispatch_async_f(server_config.fs_io_drain_queue, fc, + (dispatch_function_t)firehose_client_finalize); + }); +} + +static void +firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, mach_error_t error OS_UNUSED) +{ + mach_msg_header_t *msg_hdr; + firehose_client_t fc = ctx; + mach_port_t oldsendp, oldrecvp; + + if (dmsg) { + msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); + oldsendp = msg_hdr->msgh_remote_port; + oldrecvp = msg_hdr->msgh_local_port; + } + + switch (reason) { + case DISPATCH_MACH_MESSAGE_RECEIVED: + if (msg_hdr->msgh_id == MACH_NOTIFY_NO_SENDERS) { + _dispatch_debug("FIREHOSE NO_SENDERS (unique_pid: 0x%llx)", + firehose_client_get_unique_pid(fc, NULL)); + dispatch_mach_cancel(fc->fc_mach_channel); + } else { + firehose_server_demux(fc, msg_hdr); + } + break; + + case DISPATCH_MACH_DISCONNECTED: + if (oldsendp) { + if (slowpath(oldsendp != fc->fc_sendp)) { + DISPATCH_INTERNAL_CRASH(oldsendp, + "disconnect event about unknown send-right"); + } + firehose_mach_port_send_release(fc->fc_sendp); + fc->fc_sendp = MACH_PORT_NULL; + } + if (oldrecvp) { + if (slowpath(oldrecvp != fc->fc_recvp)) { + DISPATCH_INTERNAL_CRASH(oldrecvp, + "disconnect event about unknown receive-right"); + } + firehose_mach_port_recv_dispose(fc->fc_recvp, fc); + fc->fc_recvp = MACH_PORT_NULL; + } + if (fc->fc_recvp == MACH_PORT_NULL && fc->fc_sendp == MACH_PORT_NULL) { + firehose_client_cancel(fc); + } + break; + } +} + +#if !TARGET_OS_SIMULATOR +static void +firehose_client_kernel_source_handle_event(void *ctxt) +{ + firehose_client_t fc = ctxt; + + // resumed in firehose_client_drain for both memory and I/O + dispatch_suspend(fc->fc_kernel_source); + dispatch_suspend(fc->fc_kernel_source); + dispatch_async_f(server_config.fs_mem_drain_queue, + fc, firehose_client_drain_mem_async); + dispatch_async_f(server_config.fs_io_drain_queue, + fc, firehose_client_drain_io_async); +} +#endif + +static inline void +firehose_client_resume(firehose_client_t fc, + const struct firehose_client_connected_info_s *fcci) +{ + dispatch_assert_queue(server_config.fs_io_drain_queue); + TAILQ_INSERT_TAIL(&server_config.fs_clients, fc, fc_entry); + server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_CONNECTED, (void *)fcci); + if (fc->fc_is_kernel) { + dispatch_activate(fc->fc_kernel_source); + } else { + dispatch_mach_connect(fc->fc_mach_channel, + fc->fc_recvp, fc->fc_sendp, NULL); + dispatch_activate(fc->fc_io_source); + dispatch_activate(fc->fc_mem_source); + } +} + +static void +firehose_client_cancel(firehose_client_t fc) +{ + dispatch_mach_t dm; + dispatch_block_t block; + + _dispatch_debug("client died (unique_pid: 0x%llx", + firehose_client_get_unique_pid(fc, NULL)); + + dm = fc->fc_mach_channel; + fc->fc_mach_channel = NULL; + dispatch_release(dm); + + fc->fc_use_notifs = false; + dispatch_source_cancel(fc->fc_io_source); + dispatch_source_cancel(fc->fc_mem_source); + + block = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{ + dispatch_async_f(server_config.fs_io_drain_queue, fc, + firehose_client_handle_death); + }); + dispatch_async(server_config.fs_mem_drain_queue, block); + _Block_release(block); +} + +static firehose_client_t +_firehose_client_create(firehose_buffer_t fb) +{ + firehose_client_t fc; + + fc = (firehose_client_t)_os_object_alloc_realized(FIREHOSE_CLIENT_CLASS, + sizeof(struct firehose_client_s)); + fc->fc_buffer = fb; + fc->fc_mem_flushed_pos = fb->fb_header.fbh_bank.fbb_mem_flushed; + fc->fc_mem_sent_flushed_pos = fc->fc_mem_flushed_pos; + fc->fc_io_flushed_pos = fb->fb_header.fbh_bank.fbb_io_flushed; + fc->fc_io_sent_flushed_pos = fc->fc_io_flushed_pos; + return fc; +} + +static firehose_client_t +firehose_client_create(firehose_buffer_t fb, + mach_port_t comm_recvp, mach_port_t comm_sendp) +{ + uint64_t unique_pid = fb->fb_header.fbh_uniquepid; + firehose_client_t fc = _firehose_client_create(fb); + dispatch_mach_t dm; + dispatch_source_t ds; + + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR, 0, 0, + server_config.fs_mem_drain_queue); + _os_object_retain_internal_inline(&fc->fc_as_os_object); + dispatch_set_context(ds, fc); + dispatch_set_finalizer_f(ds, + (dispatch_function_t)_os_object_release_internal); + dispatch_source_set_event_handler_f(ds, firehose_client_drain_mem_async); + fc->fc_mem_source = ds; + + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR, 0, 0, + server_config.fs_io_drain_queue); + _os_object_retain_internal_inline(&fc->fc_as_os_object); + dispatch_set_context(ds, fc); + dispatch_set_finalizer_f(ds, + (dispatch_function_t)_os_object_release_internal); + dispatch_source_set_event_handler_f(ds, firehose_client_drain_io_async); + fc->fc_io_source = ds; + + _dispatch_debug("FIREHOSE_REGISTER (unique_pid: 0x%llx)", unique_pid); + fc->fc_recvp = comm_recvp; + fc->fc_sendp = comm_sendp; + firehose_mach_port_guard(comm_recvp, true, fc); + dm = dispatch_mach_create_f("com.apple.firehose.peer", + server_config.fs_ipc_queue, + fc, firehose_client_handle_mach_event); + fc->fc_mach_channel = dm; + return fc; +} + +static void +firehose_kernel_client_create(void) +{ +#if !TARGET_OS_SIMULATOR + struct firehose_server_s *fs = &server_config; + firehose_buffer_map_info_t fb_map; + firehose_client_t fc; + dispatch_source_t ds; + int fd; + + while ((fd = open("/dev/oslog", O_RDWR)) < 0) { + if (errno == EINTR) { + continue; + } + if (errno == ENOENT) { + return; + } + DISPATCH_INTERNAL_CRASH(errno, "Unable to open /dev/oslog"); + } + + while (ioctl(fd, LOGBUFFERMAP, &fb_map) < 0) { + if (errno == EINTR) { + continue; + } + DISPATCH_INTERNAL_CRASH(errno, "Unable to map kernel buffer"); + } + if (fb_map.fbmi_size != + FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE) { + DISPATCH_INTERNAL_CRASH(fb_map.fbmi_size, "Unexpected kernel buffer size"); + } + + fc = _firehose_client_create((firehose_buffer_t)(uintptr_t)fb_map.fbmi_addr); + fc->fc_is_kernel = true; + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)fd, 0, + fs->fs_ipc_queue); + dispatch_set_context(ds, fc); + dispatch_source_set_event_handler_f(ds, + firehose_client_kernel_source_handle_event); + fc->fc_kernel_source = ds; + fc->fc_use_notifs = true; + fc->fc_sendp = MACH_PORT_DEAD; // causes drain() to call notify + + fs->fs_kernel_fd = fd; + fs->fs_kernel_client = fc; +#endif +} + +void +_firehose_client_dispose(firehose_client_t fc) +{ + vm_deallocate(mach_task_self(), (vm_address_t)fc->fc_buffer, + sizeof(*fc->fc_buffer)); + fc->fc_buffer = NULL; + server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_FINALIZE, NULL); +} + +void +_firehose_client_xref_dispose(firehose_client_t fc) +{ + _dispatch_debug("Cleaning up client info for unique_pid 0x%llx", + firehose_client_get_unique_pid(fc, NULL)); + + dispatch_release(fc->fc_io_source); + fc->fc_io_source = NULL; + + dispatch_release(fc->fc_mem_source); + fc->fc_mem_source = NULL; +} + +uint64_t +firehose_client_get_unique_pid(firehose_client_t fc, pid_t *pid_out) +{ + firehose_buffer_header_t fbh = &fc->fc_buffer->fb_header; + if (fc->fc_is_kernel) { + if (pid_out) *pid_out = 0; + return 0; + } + if (pid_out) *pid_out = fbh->fbh_pid ?: ~(pid_t)0; + return fbh->fbh_uniquepid ?: ~0ull; +} + +void * +firehose_client_get_metadata_buffer(firehose_client_t client, size_t *size) +{ + firehose_buffer_header_t fbh = &client->fc_buffer->fb_header; + + *size = FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE; + return (void *)((uintptr_t)(fbh + 1) - *size); +} + +void * +firehose_client_get_context(firehose_client_t fc) +{ + return os_atomic_load2o(fc, fc_ctxt, relaxed); +} + +void * +firehose_client_set_context(firehose_client_t fc, void *ctxt) +{ + return os_atomic_xchg2o(fc, fc_ctxt, ctxt, relaxed); +} + +#pragma mark - +#pragma mark firehose server + +/* + * The current_message context stores the client info for the current message + * being handled. The only reason this works is because currently the message + * processing is serial. If that changes, this would not work. + */ +static firehose_client_t cur_client_info; + +static void +firehose_server_handle_mach_event(void *ctx OS_UNUSED, + dispatch_mach_reason_t reason, dispatch_mach_msg_t dmsg, + mach_error_t error OS_UNUSED) +{ + mach_msg_header_t *msg_hdr = NULL; + + if (reason == DISPATCH_MACH_MESSAGE_RECEIVED) { + msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); + /* TODO: Assert this should be a register message */ + firehose_server_demux(NULL, msg_hdr); + } +} + +void +firehose_server_init(mach_port_t comm_port, firehose_handler_t handler) +{ + struct firehose_server_s *fs = &server_config; + dispatch_queue_attr_t attr; + dispatch_mach_t dm; + + // just reference the string so that it's captured + (void)os_atomic_load(&__libfirehose_serverVersionString[0], relaxed); + + attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, + QOS_CLASS_USER_INITIATED, 0); + fs->fs_ipc_queue = dispatch_queue_create_with_target( + "com.apple.firehose.ipc", attr, NULL); + fs->fs_snapshot_gate_queue = dispatch_queue_create_with_target( + "com.apple.firehose.snapshot-gate", DISPATCH_QUEUE_SERIAL, NULL); + fs->fs_io_drain_queue = dispatch_queue_create_with_target( + "com.apple.firehose.drain-io", DISPATCH_QUEUE_SERIAL, NULL); + fs->fs_mem_drain_queue = dispatch_queue_create_with_target( + "com.apple.firehose.drain-mem", DISPATCH_QUEUE_SERIAL, NULL); + + dm = dispatch_mach_create_f("com.apple.firehose.listener", + fs->fs_ipc_queue, NULL, firehose_server_handle_mach_event); + fs->fs_bootstrap_port = comm_port; + fs->fs_mach_channel = dm; + fs->fs_handler = _Block_copy(handler); + firehose_kernel_client_create(); +} + +void +firehose_server_assert_spi_version(uint32_t spi_version) +{ + if (spi_version != OS_FIREHOSE_SPI_VERSION) { + DISPATCH_CLIENT_CRASH(spi_version, "firehose server version mismatch (" + OS_STRINGIFY(OS_FIREHOSE_SPI_VERSION) ")"); + } + if (_firehose_spi_version != OS_FIREHOSE_SPI_VERSION) { + DISPATCH_CLIENT_CRASH(_firehose_spi_version, + "firehose libdispatch version mismatch (" + OS_STRINGIFY(OS_FIREHOSE_SPI_VERSION) ")"); + + } +} + +void +firehose_server_resume(void) +{ + struct firehose_server_s *fs = &server_config; + + if (fs->fs_kernel_client) { + dispatch_async(fs->fs_io_drain_queue, ^{ + struct firehose_client_connected_info_s fcci = { + .fcci_version = FIREHOSE_CLIENT_CONNECTED_INFO_VERSION, + }; + firehose_client_resume(fs->fs_kernel_client, &fcci); + }); + } + dispatch_mach_connect(fs->fs_mach_channel, fs->fs_bootstrap_port, + MACH_PORT_NULL, NULL); +} + +#pragma mark - +#pragma mark firehose snapshot and peeking + +void +firehose_client_metadata_stream_peek(firehose_client_t fc, + firehose_event_t context, bool (^peek_should_start)(void), + bool (^peek)(firehose_buffer_chunk_t fbc)) +{ + if (context != FIREHOSE_EVENT_MEM_BUFFER_RECEIVED) { + return dispatch_sync(server_config.fs_mem_drain_queue, ^{ + firehose_client_metadata_stream_peek(fc, + FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, peek_should_start, peek); + }); + } + + if (peek_should_start && !peek_should_start()) { + return; + } + + firehose_buffer_t fb = fc->fc_buffer; + firehose_buffer_header_t fbh = &fb->fb_header; + uint64_t bitmap = fbh->fbh_bank.fbb_metadata_bitmap; + + while (bitmap) { + uint16_t ref = firehose_bitmap_first_set(bitmap); + firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs; + + bitmap &= ~(1ULL << ref); + if (fbc->fbc_start + fbc_length <= fbc->fbc_data) { + // this page has its "recycle-requeue" done, but hasn't gone + // through "recycle-reuse", or it has no data, ditch it + continue; + } + if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) { + // this thing has data, but the first tracepoint is unreadable + // so also just ditch it + continue; + } + if (fbc->fbc_pos.fbc_stream != firehose_stream_metadata) { + continue; + } + if (!peek(fbc)) { + break; + } + } +} + +OS_NOINLINE OS_COLD +static void +firehose_client_snapshot_finish(firehose_client_t fc, + firehose_snapshot_t snapshot, bool for_io) +{ + firehose_buffer_t fb = fc->fc_buffer; + firehose_buffer_header_t fbh = &fb->fb_header; + firehose_snapshot_event_t evt; + uint16_t volatile *fbh_ring; + uint16_t tail, flushed; + uint64_t bitmap; + + bitmap = ~1ULL; + + if (for_io) { + fbh_ring = fbh->fbh_io_ring; + tail = fbh->fbh_ring_tail.frp_io_tail; + flushed = (uint16_t)fc->fc_io_flushed_pos; + evt = FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER; + } else { + fbh_ring = fbh->fbh_mem_ring; + tail = fbh->fbh_ring_tail.frp_mem_tail; + flushed = (uint16_t)fc->fc_mem_flushed_pos; + evt = FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER; + } + if ((uint16_t)(flushed - tail) >= FIREHOSE_BUFFER_CHUNK_COUNT) { + fc->fc_memory_corrupted = true; + return; + } + + // remove the pages that we flushed already from the bitmap + for (; tail != flushed; tail++) { + uint16_t idx = tail & FIREHOSE_RING_POS_IDX_MASK; + uint16_t ref = fbh_ring[idx] & FIREHOSE_RING_POS_IDX_MASK; + + bitmap &= ~(1ULL << ref); + } + + // Remove pages that are free by AND-ing with the allocating bitmap. + // The load of fbb_bitmap may not be atomic, but it's ok because bits + // being flipped are pages we don't care about snapshotting. The worst thing + // that can happen is that we go peek at an unmapped page and we fault it in + bitmap &= fbh->fbh_bank.fbb_bitmap; + + // Then look at all the allocated pages not seen in the ring + while (bitmap) { + uint16_t ref = firehose_bitmap_first_set(bitmap); + firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs; + + bitmap &= ~(1ULL << ref); + if (fbc->fbc_start + fbc_length <= fbc->fbc_data) { + // this page has its "recycle-requeue" done, but hasn't gone + // through "recycle-reuse", or it has no data, ditch it + continue; + } + if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) { + // this thing has data, but the first tracepoint is unreadable + // so also just ditch it + continue; + } + if (fbc->fbc_pos.fbc_flag_io != for_io) { + continue; + } + snapshot->handler(fc, evt, fbc); + } +} + +static void +firehose_snapshot_start(void *ctxt) +{ + firehose_snapshot_t snapshot = ctxt; + firehose_client_t fci; + long n = 0; + + // 0. we need to be on the IO queue so that client connection and/or death + // cannot happen concurrently + dispatch_assert_queue(server_config.fs_io_drain_queue); + + // 1. mark all the clients participating in the current snapshot + // and enter the group for each bit set + TAILQ_FOREACH(fci, &server_config.fs_clients, fc_entry) { + if (fci->fc_is_kernel) { +#if TARGET_OS_SIMULATOR + continue; +#endif + } + if (slowpath(fci->fc_memory_corrupted)) { + continue; + } + fci->fc_needs_io_snapshot = true; + fci->fc_needs_mem_snapshot = true; + n += 2; + } + if (n) { + // cheating: equivalent to dispatch_group_enter() n times + // without the acquire barriers that we don't need + os_atomic_add2o(snapshot->fs_group, dg_value, n, relaxed); + } + + dispatch_async(server_config.fs_mem_drain_queue, ^{ + // 2. make fs_snapshot visible, this is what triggers the snapshot + // logic from _drain() or handle_death(). until fs_snapshot is + // published, the bits set above are mostly ignored + server_config.fs_snapshot = snapshot; + + snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_MEM_START, NULL); + + dispatch_async(server_config.fs_io_drain_queue, ^{ + firehose_client_t fcj; + + snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_IO_START, NULL); + + // match group_enter from firehose_snapshot() after MEM+IO_START + dispatch_group_leave(snapshot->fs_group); + + // 3. tickle all the clients. the list of clients may have changed + // since step 1, but worry not - new clients don't have + // fc_needs_*_snapshot set so drain is harmless; clients that + // were removed from the list have already left the group + // (see firehose_client_finalize()) + TAILQ_FOREACH(fcj, &server_config.fs_clients, fc_entry) { + if (fcj->fc_is_kernel) { +#if !TARGET_OS_SIMULATOR + firehose_client_kernel_source_handle_event(fcj); +#endif + } else { + dispatch_source_merge_data(fcj->fc_io_source, 1); + dispatch_source_merge_data(fcj->fc_mem_source, 1); + } + } + }); + }); +} + +static void +firehose_snapshot_finish(void *ctxt) +{ + firehose_snapshot_t fs = ctxt; + + fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_COMPLETE, NULL); + server_config.fs_snapshot = NULL; + + dispatch_release(fs->fs_group); + Block_release(fs->handler); + free(fs); + + // resume the snapshot gate queue to maybe handle the next snapshot + dispatch_resume(server_config.fs_snapshot_gate_queue); +} + +static void +firehose_snapshot_gate(void *ctxt) +{ + // prevent other snapshots from running until done + dispatch_suspend(server_config.fs_snapshot_gate_queue); + dispatch_async_f(server_config.fs_io_drain_queue, ctxt, + firehose_snapshot_start); +} + +void +firehose_snapshot(firehose_snapshot_handler_t handler) +{ + firehose_snapshot_t snapshot = malloc(sizeof(struct firehose_snapshot_s)); + + snapshot->handler = Block_copy(handler); + snapshot->fs_group = dispatch_group_create(); + + // keep the group entered until IO_START and MEM_START have been sent + // See firehose_snapshot_start() + dispatch_group_enter(snapshot->fs_group); + dispatch_group_notify_f(snapshot->fs_group, server_config.fs_io_drain_queue, + snapshot, firehose_snapshot_finish); + + dispatch_async_f(server_config.fs_snapshot_gate_queue, snapshot, + firehose_snapshot_gate); +} + +#pragma mark - +#pragma mark MiG handler routines + +kern_return_t +firehose_server_register(mach_port_t server_port OS_UNUSED, + mach_port_t mem_port, mach_vm_size_t mem_size, + mach_port_t comm_recvp, mach_port_t comm_sendp, + mach_port_t extra_info_port, mach_vm_size_t extra_info_size) +{ + mach_vm_address_t base_addr = 0; + firehose_client_t fc = NULL; + kern_return_t kr; + struct firehose_client_connected_info_s fcci = { + .fcci_version = FIREHOSE_CLIENT_CONNECTED_INFO_VERSION, + }; + + if (mem_size != sizeof(union firehose_buffer_u)) { + return KERN_INVALID_VALUE; + } + + /* + * Request a MACH_NOTIFY_NO_SENDERS notification for recvp. That should + * indicate the client going away. + */ + mach_port_t previous = MACH_PORT_NULL; + kr = mach_port_request_notification(mach_task_self(), comm_recvp, + MACH_NOTIFY_NO_SENDERS, 0, comm_recvp, + MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(kr); + if (dispatch_assume_zero(kr)) { + return KERN_FAILURE; + } + dispatch_assert(previous == MACH_PORT_NULL); + + /* Map the memory handle into the server address space */ + kr = mach_vm_map(mach_task_self(), &base_addr, mem_size, 0, + VM_FLAGS_ANYWHERE, mem_port, 0, FALSE, + VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE); + DISPATCH_VERIFY_MIG(kr); + if (dispatch_assume_zero(kr)) { + return KERN_NO_SPACE; + } + + if (extra_info_port && extra_info_size) { + mach_vm_address_t addr = 0; + kr = mach_vm_map(mach_task_self(), &addr, extra_info_size, 0, + VM_FLAGS_ANYWHERE, extra_info_port, 0, FALSE, + VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE); + if (dispatch_assume_zero(kr)) { + mach_vm_deallocate(mach_task_self(), base_addr, mem_size); + return KERN_NO_SPACE; + } + fcci.fcci_data = (void *)(uintptr_t)addr; + fcci.fcci_size = (size_t)extra_info_size; + } + + fc = firehose_client_create((firehose_buffer_t)base_addr, + comm_recvp, comm_sendp); + dispatch_async(server_config.fs_io_drain_queue, ^{ + firehose_client_resume(fc, &fcci); + if (fcci.fcci_size) { + vm_deallocate(mach_task_self(), (vm_address_t)fcci.fcci_data, + fcci.fcci_size); + } + }); + + if (extra_info_port) firehose_mach_port_send_release(extra_info_port); + firehose_mach_port_send_release(mem_port); + return KERN_SUCCESS; +} + +kern_return_t +firehose_server_push_async(mach_port_t server_port OS_UNUSED, + qos_class_t qos, boolean_t for_io, boolean_t expects_notifs) +{ + firehose_client_t fc = cur_client_info; + pthread_priority_t pp = _pthread_qos_class_encode(qos, 0, + _PTHREAD_PRIORITY_ENFORCE_FLAG); + + _dispatch_debug("FIREHOSE_PUSH_ASYNC (unique_pid %llx)", + firehose_client_get_unique_pid(fc, NULL)); + if (!slowpath(fc->fc_memory_corrupted)) { + if (expects_notifs && !fc->fc_use_notifs) { + fc->fc_use_notifs = true; + } + firehose_client_push_async_merge(fc, pp, for_io); + } + return KERN_SUCCESS; +} + +kern_return_t +firehose_server_push(mach_port_t server_port OS_UNUSED, + mach_port_t reply_port, qos_class_t qos, boolean_t for_io, + firehose_push_reply_t *push_reply OS_UNUSED) +{ + firehose_client_t fc = cur_client_info; + dispatch_block_flags_t flags = DISPATCH_BLOCK_ENFORCE_QOS_CLASS; + dispatch_block_t block; + dispatch_queue_t q; + + _dispatch_debug("FIREHOSE_PUSH (unique_pid %llx)", + firehose_client_get_unique_pid(fc, NULL)); + + if (slowpath(fc->fc_memory_corrupted)) { + firehose_client_mark_corrupted(fc, reply_port); + return MIG_NO_REPLY; + } + + if (for_io) { + q = server_config.fs_io_drain_queue; + } else { + q = server_config.fs_mem_drain_queue; + } + + block = dispatch_block_create_with_qos_class(flags, qos, 0, ^{ + firehose_client_drain(fc, reply_port, + for_io ? FIREHOSE_DRAIN_FOR_IO : 0); + }); + dispatch_async(q, block); + _Block_release(block); + return MIG_NO_REPLY; +} + +static void +firehose_server_demux(firehose_client_t fc, mach_msg_header_t *msg_hdr) +{ + const size_t reply_size = + sizeof(union __ReplyUnion__firehose_server_firehose_subsystem); + + cur_client_info = fc; + firehose_mig_server(firehose_server, reply_size, msg_hdr); +} diff --git a/src/firehose/firehose_server_internal.h b/src/firehose/firehose_server_internal.h new file mode 100644 index 0000000..7991721 --- /dev/null +++ b/src/firehose/firehose_server_internal.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __FIREHOSE_SERVER_INTERNAL__ +#define __FIREHOSE_SERVER_INTERNAL__ + +OS_OBJECT_CLASS_DECL(firehose_client, object); +#define FIREHOSE_CLIENT_CLASS OS_OBJECT_VTABLE(firehose_client) + +typedef struct firehose_snapshot_s *firehose_snapshot_t; +struct firehose_snapshot_s { + firehose_snapshot_handler_t handler; + dispatch_group_t fs_group; +}; + +struct firehose_client_s { + union { + _OS_OBJECT_HEADER(void *os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); + struct _os_object_s fc_as_os_object; + }; + TAILQ_ENTRY(firehose_client_s) fc_entry; + + firehose_buffer_t fc_buffer; + uint64_t volatile fc_mem_sent_flushed_pos; + uint64_t volatile fc_mem_flushed_pos; + uint64_t volatile fc_io_sent_flushed_pos; + uint64_t volatile fc_io_flushed_pos; + + void *volatile fc_ctxt; + + union { + dispatch_mach_t fc_mach_channel; + dispatch_source_t fc_kernel_source; + }; + dispatch_source_t fc_io_source; + dispatch_source_t fc_mem_source; + mach_port_t fc_recvp; + mach_port_t fc_sendp; + bool fc_use_notifs; + bool fc_memory_corrupted; + bool fc_needs_io_snapshot; + bool fc_needs_mem_snapshot; + bool fc_is_kernel; +}; + +void +_firehose_client_xref_dispose(struct firehose_client_s *fc); +void +_firehose_client_dispose(struct firehose_client_s *fc); + +extern unsigned char __libfirehose_serverVersionString[]; +extern double __libfirehose_serverVersionNumber; + +#endif // __FIREHOSE_SERVER_INTERNAL__ diff --git a/src/firehose/firehose_server_object.m b/src/firehose/firehose_server_object.m new file mode 100644 index 0000000..6965ca0 --- /dev/null +++ b/src/firehose/firehose_server_object.m @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +#if !USE_OBJC || _OS_OBJECT_OBJC_ARC +#error the firehose server requires the objc-runtime, no ARC +#endif + +@implementation OS_OBJECT_CLASS(firehose_client) +DISPATCH_UNAVAILABLE_INIT() ++ (void)load { } + +- (void)_xref_dispose +{ + _firehose_client_xref_dispose((struct firehose_client_s *)self); + [super _xref_dispose]; +} + +- (void)_dispose +{ + _firehose_client_dispose((struct firehose_client_s *)self); + [super _dispose]; +} + +- (NSString *)debugDescription +{ + return nil; +} +@end diff --git a/src/firehose/firehose_types.defs b/src/firehose/firehose_types.defs new file mode 100644 index 0000000..9462fd8 --- /dev/null +++ b/src/firehose/firehose_types.defs @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include +#include + +import ; +import ; + +type firehose_push_reply_t = struct [2] of uint64_t; +type qos_class_t = unsigned; diff --git a/src/init.c b/src/init.c index 7cfa8dc..45cbff3 100644 --- a/src/init.c +++ b/src/init.c @@ -58,70 +58,110 @@ dispatch_atfork_parent(void) #pragma mark - #pragma mark dispatch_globals +DISPATCH_HIDE_SYMBOL(dispatch_assert_queue, 10.12, 10.0, 10.0, 3.0); +DISPATCH_HIDE_SYMBOL(dispatch_assert_queue_not, 10.12, 10.0, 10.0, 3.0); +DISPATCH_HIDE_SYMBOL(dispatch_queue_create_with_target, 10.12, 10.0, 10.0, 3.0); + #if DISPATCH_COCOA_COMPAT -void (*dispatch_begin_thread_4GC)(void); -void (*dispatch_end_thread_4GC)(void); void *(*_dispatch_begin_NSAutoReleasePool)(void); void (*_dispatch_end_NSAutoReleasePool)(void *); #endif -#if !DISPATCH_USE_DIRECT_TSD +#if DISPATCH_USE_THREAD_LOCAL_STORAGE +__thread struct dispatch_tsd __dispatch_tsd; +pthread_key_t __dispatch_tsd_key; +#elif !DISPATCH_USE_DIRECT_TSD pthread_key_t dispatch_queue_key; -pthread_key_t dispatch_sema4_key; +pthread_key_t dispatch_frame_key; pthread_key_t dispatch_cache_key; -pthread_key_t dispatch_io_key; -pthread_key_t dispatch_apply_key; +pthread_key_t dispatch_context_key; +pthread_key_t dispatch_pthread_root_queue_observer_hooks_key; pthread_key_t dispatch_defaultpriority_key; #if DISPATCH_INTROSPECTION pthread_key_t dispatch_introspection_key; #elif DISPATCH_PERF_MON pthread_key_t dispatch_bcounter_key; #endif -#endif // !DISPATCH_USE_DIRECT_TSD +pthread_key_t dispatch_sema4_key; +pthread_key_t dispatch_voucher_key; +pthread_key_t dispatch_deferred_items_key; +#endif // !DISPATCH_USE_DIRECT_TSD && !DISPATCH_USE_THREAD_LOCAL_STORAGE #if VOUCHER_USE_MACH_VOUCHER dispatch_once_t _voucher_task_mach_voucher_pred; mach_voucher_t _voucher_task_mach_voucher; -_voucher_atm_t _voucher_task_atm; -_voucher_activity_t _voucher_activity_default; +#if !VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER +mach_voucher_t _voucher_default_task_mach_voucher; #endif -voucher_activity_mode_t _voucher_activity_mode; +dispatch_once_t _firehose_task_buffer_pred; +firehose_buffer_t _firehose_task_buffer; +const uint32_t _firehose_spi_version = OS_FIREHOSE_SPI_VERSION; +uint64_t _voucher_unique_pid; +voucher_activity_hooks_t _voucher_libtrace_hooks; +dispatch_mach_t _voucher_activity_debug_channel; +#endif +#if HAVE_PTHREAD_WORKQUEUE_QOS && DISPATCH_DEBUG int _dispatch_set_qos_class_enabled; +#endif +#if DISPATCH_USE_KEVENT_WORKQUEUE && DISPATCH_USE_MGR_THREAD +int _dispatch_kevent_workqueue_enabled; +#endif +#if DISPATCH_USE_EVFILT_MACHPORT_DIRECT && \ + DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK +int _dispatch_evfilt_machport_direct_enabled; +#endif +DISPATCH_HW_CONFIG(); +uint8_t _dispatch_unsafe_fork; +bool _dispatch_child_of_unsafe_fork; +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE +bool _dispatch_memory_warn; +int _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; +#endif DISPATCH_NOINLINE -voucher_activity_mode_t -voucher_activity_get_mode(void) +bool +_dispatch_is_multithreaded(void) { - return _voucher_activity_mode; + return _dispatch_is_multithreaded_inline(); } -void -voucher_activity_set_mode_4libtrace(voucher_activity_mode_t mode) +DISPATCH_NOINLINE +bool +_dispatch_is_fork_of_multithreaded_parent(void) { - if (_voucher_activity_disabled()) return; - _voucher_activity_mode = mode; + return _dispatch_child_of_unsafe_fork; } -DISPATCH_HW_CONFIG(); -bool _dispatch_safe_fork = true, _dispatch_child_of_unsafe_fork; - DISPATCH_NOINLINE -bool -_dispatch_is_multithreaded(void) +void +_dispatch_fork_becomes_unsafe_slow(void) { - return !_dispatch_safe_fork; + uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, + _DISPATCH_UNSAFE_FORK_MULTITHREADED, relaxed); + if (value & _DISPATCH_UNSAFE_FORK_PROHIBIT) { + DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited"); + } } DISPATCH_NOINLINE -bool -_dispatch_is_fork_of_multithreaded_parent(void) +void +_dispatch_prohibit_transition_to_multithreaded(bool prohibit) { - return _dispatch_child_of_unsafe_fork; + if (prohibit) { + uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, + _DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); + if (value & _DISPATCH_UNSAFE_FORK_MULTITHREADED) { + DISPATCH_CLIENT_CRASH(0, "The executable is already multithreaded"); + } + } else { + os_atomic_and(&_dispatch_unsafe_fork, + (uint8_t)~_DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); + } } const struct dispatch_queue_offsets_s dispatch_queue_offsets = { - .dqo_version = 5, + .dqo_version = 6, .dqo_label = offsetof(struct dispatch_queue_s, dq_label), .dqo_label_size = sizeof(((dispatch_queue_t)NULL)->dq_label), .dqo_flags = 0, @@ -130,30 +170,16 @@ const struct dispatch_queue_offsets_s dispatch_queue_offsets = { .dqo_serialnum_size = sizeof(((dispatch_queue_t)NULL)->dq_serialnum), .dqo_width = offsetof(struct dispatch_queue_s, dq_width), .dqo_width_size = sizeof(((dispatch_queue_t)NULL)->dq_width), - .dqo_running = offsetof(struct dispatch_queue_s, dq_running), - .dqo_running_size = sizeof(((dispatch_queue_t)NULL)->dq_running), - .dqo_suspend_cnt = offsetof(struct dispatch_queue_s, do_suspend_cnt), - .dqo_suspend_cnt_size = sizeof(((dispatch_queue_t)NULL)->do_suspend_cnt), + .dqo_running = 0, + .dqo_running_size = 0, + .dqo_suspend_cnt = 0, + .dqo_suspend_cnt_size = 0, .dqo_target_queue = offsetof(struct dispatch_queue_s, do_targetq), .dqo_target_queue_size = sizeof(((dispatch_queue_t)NULL)->do_targetq), .dqo_priority = offsetof(struct dispatch_queue_s, dq_priority), .dqo_priority_size = sizeof(((dispatch_queue_t)NULL)->dq_priority), }; -#if VOUCHER_USE_MACH_VOUCHER -const struct voucher_offsets_s voucher_offsets = { - .vo_version = 1, - .vo_activity_ids_count = offsetof(struct voucher_s, v_activities), - .vo_activity_ids_count_size = sizeof(((voucher_t)NULL)->v_activities), - .vo_activity_ids_array = (uint16_t)_voucher_activity_ids((voucher_t)(NULL)), - .vo_activity_ids_array_entry_size = sizeof(voucher_activity_id_t), -}; -#else // VOUCHER_USE_MACH_VOUCHER -const struct voucher_offsets_s voucher_offsets = { - .vo_version = 0, -}; -#endif // VOUCHER_USE_MACH_VOUCHER - #if DISPATCH_USE_DIRECT_TSD const struct dispatch_tsd_indexes_s dispatch_tsd_indexes = { .dti_version = 2, @@ -161,26 +187,21 @@ const struct dispatch_tsd_indexes_s dispatch_tsd_indexes = { .dti_voucher_index = dispatch_voucher_key, .dti_qos_class_index = dispatch_priority_key, }; -#else // DISPATCH_USE_DIRECT_TSD -#error Not implemented on this platform #endif // DISPATCH_USE_DIRECT_TSD // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol DISPATCH_CACHELINE_ALIGN struct dispatch_queue_s _dispatch_main_q = { - .do_vtable = DISPATCH_VTABLE(queue), + DISPATCH_GLOBAL_OBJECT_HEADER(queue_main), #if !DISPATCH_USE_RESOLVERS .do_targetq = &_dispatch_root_queues[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT], #endif - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1), .dq_label = "com.apple.main-thread", - .dq_running = 1, .dq_width = 1, - .dq_is_thread_bound = 1, + .dq_atomic_bits = DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC, .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 1, }; @@ -188,36 +209,50 @@ struct dispatch_queue_s _dispatch_main_q = { #pragma mark - #pragma mark dispatch_queue_attr_t -#define DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, overcommit, concurrent) \ +#define DISPATCH_QUEUE_ATTR_INIT(qos, prio, overcommit, freq, concurrent, inactive) \ { \ - .do_vtable = DISPATCH_VTABLE(queue_attr), \ - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \ - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \ - .do_next = DISPATCH_OBJECT_LISTLESS, \ + DISPATCH_GLOBAL_OBJECT_HEADER(queue_attr), \ .dqa_qos_class = (qos), \ .dqa_relative_priority = (qos) ? (prio) : 0, \ .dqa_overcommit = _dispatch_queue_attr_overcommit_##overcommit, \ + .dqa_autorelease_frequency = DISPATCH_AUTORELEASE_FREQUENCY_##freq, \ .dqa_concurrent = (concurrent), \ + .dqa_inactive = (inactive), \ + } + +#define DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, freq, concurrent) \ + { \ + [DQA_INDEX_ACTIVE] = DISPATCH_QUEUE_ATTR_INIT(\ + qos, prio, overcommit, freq, concurrent, false), \ + [DQA_INDEX_INACTIVE] = DISPATCH_QUEUE_ATTR_INIT(\ + qos, prio, overcommit, freq, concurrent, true), \ } -#define DISPATCH_QUEUE_ATTR_KIND_INIT(qos, prio) \ +#define DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, prio, overcommit) \ { \ - [DQA_INDEX_NON_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, disabled, 1), \ - [DQA_INDEX_NON_OVERCOMMIT][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, disabled, 0), \ - [DQA_INDEX_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, enabled, 1), \ - [DQA_INDEX_OVERCOMMIT][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, enabled, 0), \ - [DQA_INDEX_UNSPECIFIED_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, unspecified, 1),\ - [DQA_INDEX_UNSPECIFIED_OVERCOMMIT][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, unspecified, 0),\ + [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_CONCURRENT] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, INHERIT, 1), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_SERIAL] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, INHERIT, 0), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_CONCURRENT] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, WORK_ITEM, 1), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_SERIAL] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, WORK_ITEM, 0), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_CONCURRENT] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, NEVER, 1), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_SERIAL] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, NEVER, 0), \ } #define DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, prio) \ - [prio] = DISPATCH_QUEUE_ATTR_KIND_INIT(qos, -(prio)) + [prio] = { \ + [DQA_INDEX_UNSPECIFIED_OVERCOMMIT] = \ + DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), unspecified), \ + [DQA_INDEX_NON_OVERCOMMIT] = \ + DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), disabled), \ + [DQA_INDEX_OVERCOMMIT] = \ + DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), enabled), \ + } #define DISPATCH_QUEUE_ATTR_PRIO_INIT(qos) \ { \ @@ -244,11 +279,13 @@ struct dispatch_queue_s _dispatch_main_q = { DISPATCH_QUEUE_ATTR_PRIO_INIT(_DISPATCH_QOS_CLASS_##qos) // DISPATCH_QUEUE_CONCURRENT resp. _dispatch_queue_attr_concurrent is aliased -// to array member [0][0][0][0] and their properties must match! +// to array member [0][0][0][0][0][0] and their properties must match! const struct dispatch_queue_attr_s _dispatch_queue_attrs[] [DISPATCH_QUEUE_ATTR_PRIO_COUNT] [DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT] - [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT] = { + [DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT] + [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT] + [DISPATCH_QUEUE_ATTR_INACTIVE_COUNT] = { DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(UNSPECIFIED), DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(MAINTENANCE), DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(BACKGROUND), @@ -258,6 +295,20 @@ const struct dispatch_queue_attr_s _dispatch_queue_attrs[] DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(USER_INTERACTIVE), }; +#if DISPATCH_VARIANT_STATIC +// +struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent = + DISPATCH_QUEUE_ATTR_INIT(_DISPATCH_QOS_CLASS_UNSPECIFIED, 0, + unspecified, INHERIT, 1, false); +#endif // DISPATCH_VARIANT_STATIC + +// _dispatch_queue_attr_concurrent is aliased using libdispatch.aliases +// and the -alias_list linker option on Darwin but needs to be done manually +// for other platforms. +#ifndef __APPLE__ +extern struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent + __attribute__((__alias__("_dispatch_queue_attrs"))); +#endif #pragma mark - #pragma mark dispatch_vtables @@ -272,33 +323,72 @@ DISPATCH_VTABLE_INSTANCE(semaphore, DISPATCH_VTABLE_INSTANCE(group, .do_type = DISPATCH_GROUP_TYPE, .do_kind = "group", - .do_dispose = _dispatch_semaphore_dispose, - .do_debug = _dispatch_semaphore_debug, + .do_dispose = _dispatch_group_dispose, + .do_debug = _dispatch_group_debug, ); DISPATCH_VTABLE_INSTANCE(queue, - .do_type = DISPATCH_QUEUE_TYPE, + .do_type = DISPATCH_QUEUE_LEGACY_TYPE, .do_kind = "queue", .do_dispose = _dispatch_queue_dispose, + .do_suspend = _dispatch_queue_suspend, + .do_resume = _dispatch_queue_resume, .do_invoke = _dispatch_queue_invoke, - .do_probe = _dispatch_queue_probe, + .do_wakeup = _dispatch_queue_wakeup, .do_debug = dispatch_queue_debug, + .do_set_targetq = _dispatch_queue_set_target_queue, ); +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, queue, + .do_type = DISPATCH_QUEUE_SERIAL_TYPE, + .do_kind = "serial-queue", + .do_dispose = _dispatch_queue_dispose, + .do_suspend = _dispatch_queue_suspend, + .do_resume = _dispatch_queue_resume, + .do_finalize_activation = _dispatch_queue_finalize_activation, + .do_invoke = _dispatch_queue_invoke, + .do_wakeup = _dispatch_queue_wakeup, + .do_debug = dispatch_queue_debug, + .do_set_targetq = _dispatch_queue_set_target_queue, +); + +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, queue, + .do_type = DISPATCH_QUEUE_CONCURRENT_TYPE, + .do_kind = "concurrent-queue", + .do_dispose = _dispatch_queue_dispose, + .do_suspend = _dispatch_queue_suspend, + .do_resume = _dispatch_queue_resume, + .do_finalize_activation = _dispatch_queue_finalize_activation, + .do_invoke = _dispatch_queue_invoke, + .do_wakeup = _dispatch_queue_wakeup, + .do_debug = dispatch_queue_debug, + .do_set_targetq = _dispatch_queue_set_target_queue, +); + + DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_root, queue, - .do_type = DISPATCH_QUEUE_ROOT_TYPE, + .do_type = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE, .do_kind = "global-queue", .do_dispose = _dispatch_pthread_root_queue_dispose, - .do_probe = _dispatch_root_queue_probe, + .do_wakeup = _dispatch_root_queue_wakeup, + .do_debug = dispatch_queue_debug, +); + +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_main, queue, + .do_type = DISPATCH_QUEUE_SERIAL_TYPE, + .do_kind = "main-queue", + .do_dispose = _dispatch_queue_dispose, + .do_invoke = _dispatch_queue_invoke, + .do_wakeup = _dispatch_main_queue_wakeup, .do_debug = dispatch_queue_debug, ); DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_runloop, queue, - .do_type = DISPATCH_QUEUE_ROOT_TYPE, + .do_type = DISPATCH_QUEUE_RUNLOOP_TYPE, .do_kind = "runloop-queue", .do_dispose = _dispatch_runloop_queue_dispose, .do_invoke = _dispatch_queue_invoke, - .do_probe = _dispatch_runloop_queue_probe, + .do_wakeup = _dispatch_runloop_queue_wakeup, .do_debug = dispatch_queue_debug, ); @@ -306,7 +396,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_mgr, queue, .do_type = DISPATCH_QUEUE_MGR_TYPE, .do_kind = "mgr-queue", .do_invoke = _dispatch_mgr_thread, - .do_probe = _dispatch_mgr_queue_probe, + .do_wakeup = _dispatch_mgr_queue_wakeup, .do_debug = dispatch_queue_debug, ); @@ -314,8 +404,8 @@ DISPATCH_VTABLE_INSTANCE(queue_specific_queue, .do_type = DISPATCH_QUEUE_SPECIFIC_TYPE, .do_kind = "queue-context", .do_dispose = _dispatch_queue_specific_queue_dispose, - .do_invoke = (void*)_dispatch_queue_invoke, - .do_probe = (void *)_dispatch_queue_probe, + .do_invoke = (void *)_dispatch_queue_invoke, + .do_wakeup = (void *)_dispatch_queue_wakeup, .do_debug = (void *)dispatch_queue_debug, ); @@ -328,18 +418,27 @@ DISPATCH_VTABLE_INSTANCE(source, .do_type = DISPATCH_SOURCE_KEVENT_TYPE, .do_kind = "kevent-source", .do_dispose = _dispatch_source_dispose, + .do_suspend = (void *)_dispatch_queue_suspend, + .do_resume = (void *)_dispatch_queue_resume, + .do_finalize_activation = _dispatch_source_finalize_activation, .do_invoke = _dispatch_source_invoke, - .do_probe = _dispatch_source_probe, + .do_wakeup = _dispatch_source_wakeup, .do_debug = _dispatch_source_debug, + .do_set_targetq = (void *)_dispatch_queue_set_target_queue, ); +#if HAVE_MACH DISPATCH_VTABLE_INSTANCE(mach, .do_type = DISPATCH_MACH_CHANNEL_TYPE, .do_kind = "mach-channel", .do_dispose = _dispatch_mach_dispose, + .do_suspend = (void *)_dispatch_queue_suspend, + .do_resume = (void *)_dispatch_queue_resume, + .do_finalize_activation = _dispatch_mach_finalize_activation, .do_invoke = _dispatch_mach_invoke, - .do_probe = _dispatch_mach_probe, + .do_wakeup = _dispatch_mach_wakeup, .do_debug = _dispatch_mach_debug, + .do_set_targetq = (void *)_dispatch_queue_set_target_queue, ); DISPATCH_VTABLE_INSTANCE(mach_msg, @@ -349,8 +448,9 @@ DISPATCH_VTABLE_INSTANCE(mach_msg, .do_invoke = _dispatch_mach_msg_invoke, .do_debug = _dispatch_mach_msg_debug, ); +#endif // HAVE_MACH -#if !USE_OBJC +#if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA DISPATCH_VTABLE_INSTANCE(data, .do_type = DISPATCH_DATA_TYPE, .do_kind = "data", @@ -364,6 +464,7 @@ DISPATCH_VTABLE_INSTANCE(io, .do_kind = "channel", .do_dispose = _dispatch_io_dispose, .do_debug = _dispatch_io_debug, + .do_set_targetq = _dispatch_io_set_target_queue, ); DISPATCH_VTABLE_INSTANCE(operation, @@ -379,18 +480,41 @@ DISPATCH_VTABLE_INSTANCE(disk, .do_dispose = _dispatch_disk_dispose, ); + +const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { + DC_VTABLE_ENTRY(ASYNC_REDIRECT, + .do_kind = "dc-redirect", + .do_invoke = _dispatch_async_redirect_invoke), +#if HAVE_MACH + DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN, + .do_kind = "dc-mach-send-drain", + .do_invoke = _dispatch_mach_send_barrier_drain_invoke), + DC_VTABLE_ENTRY(MACH_SEND_BARRIER, + .do_kind = "dc-mach-send-barrier", + .do_invoke = _dispatch_mach_barrier_invoke), + DC_VTABLE_ENTRY(MACH_RECV_BARRIER, + .do_kind = "dc-mach-recv-barrier", + .do_invoke = _dispatch_mach_barrier_invoke), +#endif +#if HAVE_PTHREAD_WORKQUEUE_QOS + DC_VTABLE_ENTRY(OVERRIDE_STEALING, + .do_kind = "dc-override-stealing", + .do_invoke = _dispatch_queue_override_invoke), + DC_VTABLE_ENTRY(OVERRIDE_OWNING, + .do_kind = "dc-override-owning", + .do_invoke = _dispatch_queue_override_invoke), +#endif +}; + void _dispatch_vtable_init(void) { -#if USE_OBJC +#if OS_OBJECT_HAVE_OBJC2 // ObjC classes and dispatch vtables are co-located via linker order and // alias files, verify correct layout during initialization rdar://10640168 - DISPATCH_OBJC_CLASS_DECL(semaphore); - dispatch_assert((char*)DISPATCH_VTABLE(semaphore) - - (char*)DISPATCH_OBJC_CLASS(semaphore) == 0); dispatch_assert((char*)&DISPATCH_CONCAT(_,DISPATCH_CLASS(semaphore_vtable)) - - (char*)DISPATCH_OBJC_CLASS(semaphore) == - sizeof(_os_object_class_s)); + - (char*)DISPATCH_VTABLE(semaphore) == + offsetof(struct dispatch_semaphore_vtable_s, _os_obj_vtable)); #endif // USE_OBJC } @@ -407,12 +531,20 @@ _dispatch_build_init(void *context DISPATCH_UNUSED) size_t bufsz = sizeof(_dispatch_build); sysctl(mib, 2, _dispatch_build, &bufsz, NULL, 0); +#if TARGET_IPHONE_SIMULATOR + char *sim_version = getenv("SIMULATOR_RUNTIME_BUILD_VERSION"); + if (sim_version) { + (void)strlcat(_dispatch_build, " ", sizeof(_dispatch_build)); + (void)strlcat(_dispatch_build, sim_version, sizeof(_dispatch_build)); + } +#endif // TARGET_IPHONE_SIMULATOR + #else /* * XXXRW: What to do here for !Mac OS X? */ memset(_dispatch_build, 0, sizeof(_dispatch_build)); -#endif +#endif // __APPLE__ } static dispatch_once_t _dispatch_build_pred; @@ -447,19 +579,35 @@ _dispatch_bug_client(const char* msg) _dispatch_bug_log("BUG in libdispatch client: %s", msg); } +#if HAVE_MACH void _dispatch_bug_mach_client(const char* msg, mach_msg_return_t kr) { _dispatch_bug_log("BUG in libdispatch client: %s %s - 0x%x", msg, mach_error_string(kr), kr); } +#endif void _dispatch_bug_kevent_client(const char* msg, const char* filter, const char *operation, int err) { - _dispatch_bug_log("BUG in libdispatch client: %s[%s] %s: \"%s\" - 0x%x", - msg, filter, operation, strerror(err), err); + if (operation && err) { + _dispatch_bug_log("BUG in libdispatch client: %s[%s] %s: \"%s\" - 0x%x", + msg, filter, operation, strerror(err), err); + } else if (operation) { + _dispatch_bug_log("BUG in libdispatch client: %s[%s] %s", + msg, filter, operation); + } else { + _dispatch_bug_log("BUG in libdispatch: %s[%s]: \"%s\" - 0x%x", + msg, filter, strerror(err), err); + } +} + +void +_dispatch_bug_deprecated(const char *msg) +{ + _dispatch_bug_log("DEPRECATED USE in libdispatch client: %s", msg); } void @@ -516,11 +664,11 @@ _dispatch_logv_init(void *context DISPATCH_UNUSED) struct timeval tv; gettimeofday(&tv, NULL); #if DISPATCH_DEBUG - dispatch_log_basetime = mach_absolute_time(); + dispatch_log_basetime = _dispatch_absolute_time(); #endif dprintf(dispatch_logfile, "=== log file opened for %s[%u] at " "%ld.%06u ===\n", getprogname() ?: "", getpid(), - tv.tv_sec, tv.tv_usec); + tv.tv_sec, (int)tv.tv_usec); } } } @@ -548,7 +696,7 @@ _dispatch_logv_file(const char *msg, va_list ap) #if DISPATCH_DEBUG offset += dsnprintf(&buf[offset], bufsiz - offset, "%llu\t", - mach_absolute_time() - dispatch_log_basetime); + _dispatch_absolute_time() - dispatch_log_basetime); #endif r = vsnprintf(&buf[offset], bufsiz - offset, msg, ap); if (r < 0) return; @@ -586,7 +734,7 @@ _dispatch_syslog(const char *msg) static inline void _dispatch_vsyslog(const char *msg, va_list ap) { - vsyslog(LOG_NOTICE, msg, *ap_ptr); + vsyslog(LOG_NOTICE, msg, ap); } #endif // DISPATCH_USE_SIMPLE_ASL @@ -630,7 +778,7 @@ static size_t _dispatch_object_debug2(dispatch_object_t dou, char* buf, size_t bufsiz) { DISPATCH_OBJECT_TFB(_dispatch_objc_debug, dou, buf, bufsiz); - if (dou._do->do_vtable->do_debug) { + if (dx_vtable(dou._do)->do_debug) { return dx_debug(dou._do, buf, bufsiz); } return strlcpy(buf, "NULL vtable slot: ", bufsiz); @@ -645,7 +793,7 @@ _dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) int r; #if DISPATCH_DEBUG && !DISPATCH_USE_OS_DEBUG_LOG offset += dsnprintf(&buf[offset], bufsiz - offset, "%llu\t\t%p\t", - mach_absolute_time() - dispatch_log_basetime, + _dispatch_absolute_time() - dispatch_log_basetime, (void *)_dispatch_thread_self()); #endif if (dou._do) { @@ -720,14 +868,35 @@ _dispatch_calloc(size_t num_items, size_t size) return buf; } +/** + * If the source string is mutable, allocates memory and copies the contents. + * Otherwise returns the source string. + */ +const char * +_dispatch_strdup_if_mutable(const char *str) +{ +#if HAVE_DYLD_IS_MEMORY_IMMUTABLE + size_t size = strlen(str) + 1; + if (slowpath(!_dyld_is_memory_immutable(str, size))) { + char *clone = (char *) malloc(size); + if (dispatch_assume(clone)) { + memcpy(clone, str, size); + } + return clone; + } + return str; +#else + return strdup(str); +#endif +} + #pragma mark - #pragma mark dispatch_block_t #ifdef __BLOCKS__ -#undef _dispatch_Block_copy -dispatch_block_t -_dispatch_Block_copy(dispatch_block_t db) +void * +(_dispatch_Block_copy)(void *db) { dispatch_block_t rval; @@ -737,7 +906,7 @@ _dispatch_Block_copy(dispatch_block_t db) } return rval; } - DISPATCH_CLIENT_CRASH("NULL was passed where a block should have been"); + DISPATCH_CLIENT_CRASH(0, "NULL was passed where a block should have been"); } void @@ -754,7 +923,8 @@ _dispatch_call_block_and_release(void *block) #pragma mark dispatch_client_callout // Abort on uncaught exceptions thrown from client callouts rdar://8577499 -#if DISPATCH_USE_CLIENT_CALLOUT && (__USING_SJLJ_EXCEPTIONS__ || !USE_OBJC) +#if DISPATCH_USE_CLIENT_CALLOUT && (__USING_SJLJ_EXCEPTIONS__ || !USE_OBJC || \ + OS_OBJECT_HAVE_OBJC1) // On platforms with SjLj exceptions, avoid the SjLj overhead on every callout // by clearing the unwinder's TSD pointer to the handler stack around callouts @@ -791,6 +961,7 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) _dispatch_set_unwind_tsd(u); } +#if HAVE_MACH #undef _dispatch_client_callout4 void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, @@ -805,6 +976,7 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, _dispatch_free_unwind_tsd(); _dispatch_set_unwind_tsd(u); } +#endif // HAVE_MACH #endif // DISPATCH_USE_CLIENT_CALLOUT @@ -813,7 +985,7 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, #if !USE_OBJC -static const _os_object_class_s _os_object_class; +static const _os_object_vtable_s _os_object_vtable; void _os_object_init(void) @@ -836,7 +1008,7 @@ _os_object_alloc_realized(const void *cls, size_t size) _os_object_t _os_object_alloc(const void *cls, size_t size) { - if (!cls) cls = &_os_object_class; + if (!cls) cls = &_os_object_vtable; return _os_object_alloc_realized(cls, size); } @@ -890,7 +1062,9 @@ os_release(void *obj) #if DISPATCH_COCOA_COMPAT -void *_dispatch_autorelease_pool_push(void) { +void* +_dispatch_autorelease_pool_push(void) +{ void *pool = NULL; if (_dispatch_begin_NSAutoReleasePool) { pool = _dispatch_begin_NSAutoReleasePool(); @@ -898,12 +1072,26 @@ void *_dispatch_autorelease_pool_push(void) { return pool; } -void _dispatch_autorelease_pool_pop(void *pool) { +void +_dispatch_autorelease_pool_pop(void *pool) +{ if (_dispatch_end_NSAutoReleasePool) { _dispatch_end_NSAutoReleasePool(pool); } } +void* +_dispatch_last_resort_autorelease_pool_push(void) +{ + return _dispatch_autorelease_pool_push(); +} + +void +_dispatch_last_resort_autorelease_pool_pop(void *pool) +{ + _dispatch_autorelease_pool_pop(pool); +} + #endif // DISPATCH_COCOA_COMPAT #endif // !USE_OBJC @@ -940,6 +1128,23 @@ const struct dispatch_source_type_s _dispatch_source_type_timer = { .init = dispatch_source_type_timer_init, }; +static void +dispatch_source_type_after_init(dispatch_source_t ds, + dispatch_source_type_t type, uintptr_t handle, unsigned long mask, + dispatch_queue_t q) +{ + dispatch_source_type_timer_init(ds, type, handle, mask, q); + ds->ds_needs_rearm = false; + ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_AFTER; +} + +const struct dispatch_source_type_s _dispatch_source_type_after = { + .ke = { + .filter = DISPATCH_EVFILT_TIMER, + }, + .init = dispatch_source_type_after_init, +}; + static void dispatch_source_type_timer_with_aggregate_init(dispatch_source_t ds, dispatch_source_type_t type, uintptr_t handle, unsigned long mask, @@ -984,7 +1189,6 @@ const struct dispatch_source_type_s _dispatch_source_type_interval = { .init = dispatch_source_type_interval_init, }; -#if !DISPATCH_USE_SELECT_FALLBACK || DISPATCH_DYNAMIC_SELECT_FALLBACK static void dispatch_source_type_readwrite_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, @@ -992,20 +1196,18 @@ dispatch_source_type_readwrite_init(dispatch_source_t ds, unsigned long mask DISPATCH_UNUSED, dispatch_queue_t q DISPATCH_UNUSED) { - ds->ds_dkev->dk_kevent.flags |= EV_UDATA_SPECIFIC; - ds->ds_is_direct_kevent = true; + ds->ds_is_level = true; +#ifdef HAVE_DECL_NOTE_LOWAT // bypass kernel check for device kqueue support rdar://19004921 ds->ds_dkev->dk_kevent.fflags = NOTE_LOWAT; +#endif ds->ds_dkev->dk_kevent.data = 1; } -#else -#define dispatch_source_type_readwrite_init NULL -#endif const struct dispatch_source_type_s _dispatch_source_type_read = { .ke = { .filter = EVFILT_READ, - .flags = EV_DISPATCH, + .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, }, .init = dispatch_source_type_readwrite_init, }; @@ -1013,7 +1215,7 @@ const struct dispatch_source_type_s _dispatch_source_type_read = { const struct dispatch_source_type_s _dispatch_source_type_write = { .ke = { .filter = EVFILT_WRITE, - .flags = EV_DISPATCH, + .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, }, .init = dispatch_source_type_readwrite_init, }; @@ -1034,14 +1236,14 @@ _dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED) } #endif +#if TARGET_IPHONE_SIMULATOR static void -dispatch_source_type_memorystatus_init(dispatch_source_t ds, +dispatch_source_type_memorypressure_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, uintptr_t handle DISPATCH_UNUSED, unsigned long mask DISPATCH_UNUSED, dispatch_queue_t q DISPATCH_UNUSED) { -#if TARGET_IPHONE_SIMULATOR static dispatch_once_t pred; dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init); handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd; @@ -1052,38 +1254,42 @@ dispatch_source_type_memorystatus_init(dispatch_source_t ds, ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask; ds->ds_ident_hack = handle; ds->ds_pending_data_mask = mask; - ds->ds_memorystatus_override = 1; -#endif - ds->ds_is_level = false; + ds->ds_memorypressure_override = 1; } +#else +#define dispatch_source_type_memorypressure_init NULL +#endif #ifndef NOTE_MEMORYSTATUS_LOW_SWAP #define NOTE_MEMORYSTATUS_LOW_SWAP 0x8 #endif -const struct dispatch_source_type_s _dispatch_source_type_memorystatus = { +const struct dispatch_source_type_s _dispatch_source_type_memorypressure = { .ke = { .filter = EVFILT_MEMORYSTATUS, .flags = EV_DISPATCH|EV_UDATA_SPECIFIC, }, .mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL|NOTE_MEMORYSTATUS_PRESSURE_WARN - |NOTE_MEMORYSTATUS_PRESSURE_CRITICAL|NOTE_MEMORYSTATUS_LOW_SWAP, - .init = dispatch_source_type_memorystatus_init, + |NOTE_MEMORYSTATUS_PRESSURE_CRITICAL|NOTE_MEMORYSTATUS_LOW_SWAP + |NOTE_MEMORYSTATUS_PROC_LIMIT_WARN|NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL, + .init = dispatch_source_type_memorypressure_init, }; static void dispatch_source_type_vm_init(dispatch_source_t ds, - dispatch_source_type_t type, - uintptr_t handle, - unsigned long mask, - dispatch_queue_t q) + dispatch_source_type_t type DISPATCH_UNUSED, + uintptr_t handle DISPATCH_UNUSED, + unsigned long mask DISPATCH_UNUSED, + dispatch_queue_t q DISPATCH_UNUSED) { - // Map legacy vm pressure to memorystatus warning rdar://problem/15907505 + // Map legacy vm pressure to memorypressure warning rdar://problem/15907505 mask = NOTE_MEMORYSTATUS_PRESSURE_WARN; ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask; ds->ds_pending_data_mask = mask; ds->ds_vmpressure_override = 1; - dispatch_source_type_memorystatus_init(ds, type, handle, mask, q); +#if TARGET_IPHONE_SIMULATOR + dispatch_source_type_memorypressure_init(ds, type, handle, mask, q); +#endif } const struct dispatch_source_type_s _dispatch_source_type_vm = { @@ -1097,27 +1303,24 @@ const struct dispatch_source_type_s _dispatch_source_type_vm = { #elif DISPATCH_USE_VM_PRESSURE -static void -dispatch_source_type_vm_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) -{ - ds->ds_is_level = false; -} - const struct dispatch_source_type_s _dispatch_source_type_vm = { .ke = { .filter = EVFILT_VM, .flags = EV_DISPATCH|EV_UDATA_SPECIFIC, }, .mask = NOTE_VM_PRESSURE, - .init = dispatch_source_type_vm_init, }; #endif // DISPATCH_USE_VM_PRESSURE +const struct dispatch_source_type_s _dispatch_source_type_signal = { + .ke = { + .filter = EVFILT_SIGNAL, + .flags = EV_UDATA_SPECIFIC, + }, +}; + +#if !defined(__linux__) static void dispatch_source_type_proc_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, @@ -1144,20 +1347,16 @@ const struct dispatch_source_type_s _dispatch_source_type_proc = { .init = dispatch_source_type_proc_init, }; -const struct dispatch_source_type_s _dispatch_source_type_signal = { - .ke = { - .filter = EVFILT_SIGNAL, - .flags = EV_UDATA_SPECIFIC, - }, -}; - const struct dispatch_source_type_s _dispatch_source_type_vnode = { .ke = { .filter = EVFILT_VNODE, - .flags = EV_CLEAR|EV_UDATA_SPECIFIC, + .flags = EV_CLEAR|EV_VANISHED|EV_UDATA_SPECIFIC, }, .mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK| - NOTE_RENAME|NOTE_REVOKE + NOTE_RENAME|NOTE_FUNLOCK +#if HAVE_DECL_NOTE_REVOKE + |NOTE_REVOKE +#endif #if HAVE_DECL_NOTE_NONE |NOTE_NONE #endif @@ -1187,7 +1386,7 @@ const struct dispatch_source_type_s _dispatch_source_type_sock = { #ifdef EVFILT_SOCK .ke = { .filter = EVFILT_SOCK, - .flags = EV_CLEAR|EV_UDATA_SPECIFIC, + .flags = EV_CLEAR|EV_VANISHED|EV_UDATA_SPECIFIC, }, .mask = NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | NOTE_TIMEOUT | NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | @@ -1197,12 +1396,15 @@ const struct dispatch_source_type_s _dispatch_source_type_sock = { #endif #ifdef NOTE_CONNECTED | NOTE_CONNECTED | NOTE_DISCONNECTED | NOTE_CONNINFO_UPDATED +#endif +#ifdef NOTE_NOTIFY_ACK + | NOTE_NOTIFY_ACK #endif , #endif // EVFILT_SOCK }; +#endif // !defined(__linux__) -#if DISPATCH_USE_EV_UDATA_SPECIFIC static void dispatch_source_type_data_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, @@ -1210,11 +1412,12 @@ dispatch_source_type_data_init(dispatch_source_t ds, unsigned long mask DISPATCH_UNUSED, dispatch_queue_t q DISPATCH_UNUSED) { + ds->ds_is_installed = true; + ds->ds_is_custom_source = true; + ds->ds_is_direct_kevent = true; + ds->ds_pending_data_mask = ~0ul; ds->ds_needs_rearm = false; // not registered with kevent } -#else -#define dispatch_source_type_data_init NULL -#endif const struct dispatch_source_type_s _dispatch_source_type_data_add = { .ke = { @@ -1264,14 +1467,19 @@ dispatch_source_type_mach_recv_init(dispatch_source_t ds, unsigned long mask DISPATCH_UNUSED, dispatch_queue_t q DISPATCH_UNUSED) { - ds->ds_is_level = false; + ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE; +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + if (_dispatch_evfilt_machport_direct_enabled) return; + ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE; + ds->ds_dkev->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); + ds->ds_is_direct_kevent = false; +#endif } const struct dispatch_source_type_s _dispatch_source_type_mach_recv = { .ke = { .filter = EVFILT_MACHPORT, - .flags = EV_DISPATCH, - .fflags = DISPATCH_MACH_RECV_MESSAGE, + .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, }, .init = dispatch_source_type_mach_recv_init, }; diff --git a/src/inline_internal.h b/src/inline_internal.h index 5cc4cd8..d1c73dd 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -38,10 +38,12 @@ DISPATCH_NOTHROW void _dispatch_client_callout(void *ctxt, dispatch_function_t f); DISPATCH_NOTHROW void _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)); +#if HAVE_MACH DISPATCH_NOTHROW void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, dispatch_mach_msg_t dmsg, mach_error_t error, dispatch_mach_handler_function_t f); +#endif // HAVE_MACH #else // !DISPATCH_USE_CLIENT_CALLOUT @@ -59,6 +61,7 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) return f(ctxt, i); } +#if HAVE_MACH DISPATCH_ALWAYS_INLINE static inline void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, @@ -67,39 +70,143 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, { return f(ctxt, reason, dmsg, error); } +#endif // HAVE_MACH #endif // !DISPATCH_USE_CLIENT_CALLOUT -#if !(USE_OBJC && __OBJC2__) && !defined(__cplusplus) - #pragma mark - #pragma mark _os_object_t & dispatch_object_t +#if DISPATCH_PURE_C + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_has_vtable(dispatch_object_t dou) +{ + uintptr_t dc_flags = dou._dc->dc_flags; + + // vtables are pointers far away from the low page in memory + return dc_flags > 0xffful; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_continuation(dispatch_object_t dou) +{ + if (_dispatch_object_has_vtable(dou)) { + return dx_metatype(dou._do) == _DISPATCH_CONTINUATION_TYPE; + } + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_has_type(dispatch_object_t dou, unsigned long type) +{ + return _dispatch_object_has_vtable(dou) && dx_type(dou._do) == type; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_redirection(dispatch_object_t dou) +{ + return _dispatch_object_has_type(dou, + DISPATCH_CONTINUATION_TYPE(ASYNC_REDIRECT)); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_barrier(dispatch_object_t dou) +{ + dispatch_queue_flags_t dq_flags; + + if (!_dispatch_object_has_vtable(dou)) { + return (dou._dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT); + } + switch (dx_metatype(dou._do)) { + case _DISPATCH_QUEUE_TYPE: + case _DISPATCH_SOURCE_TYPE: + dq_flags = os_atomic_load2o(dou._dq, dq_atomic_flags, relaxed); + return dq_flags & DQF_BARRIER_BIT; + default: + return false; + } +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_slow_item(dispatch_object_t dou) +{ + if (_dispatch_object_has_vtable(dou)) { + return false; + } + return (dou._dc->dc_flags & DISPATCH_OBJ_SYNC_SLOW_BIT); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_slow_non_barrier(dispatch_object_t dou) +{ + if (_dispatch_object_has_vtable(dou)) { + return false; + } + return ((dou._dc->dc_flags & + (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) == + (DISPATCH_OBJ_SYNC_SLOW_BIT)); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_slow_barrier(dispatch_object_t dou) +{ + if (_dispatch_object_has_vtable(dou)) { + return false; + } + return ((dou._dc->dc_flags & + (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) == + (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)); +} DISPATCH_ALWAYS_INLINE static inline _os_object_t _os_object_retain_internal_inline(_os_object_t obj) { int ref_cnt = _os_object_refcnt_inc(obj); - if (slowpath(ref_cnt <= 0)) { - DISPATCH_CRASH("Resurrection of an object"); + if (unlikely(ref_cnt <= 0)) { + _OS_OBJECT_CLIENT_CRASH("Resurrection of an object"); } return obj; } +DISPATCH_ALWAYS_INLINE +static inline void +_os_object_release_internal_inline_no_dispose(_os_object_t obj) +{ + int ref_cnt = _os_object_refcnt_dec(obj); + if (likely(ref_cnt >= 0)) { + return; + } + if (ref_cnt == 0) { + _OS_OBJECT_CLIENT_CRASH("Unexpected release of an object"); + } + _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); +} + DISPATCH_ALWAYS_INLINE static inline void _os_object_release_internal_inline(_os_object_t obj) { int ref_cnt = _os_object_refcnt_dec(obj); - if (fastpath(ref_cnt >= 0)) { + if (likely(ref_cnt >= 0)) { return; } - if (slowpath(ref_cnt < -1)) { - DISPATCH_CRASH("Over-release of an object"); + if (unlikely(ref_cnt < -1)) { + _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); } #if DISPATCH_DEBUG - if (slowpath(obj->os_obj_xref_cnt >= 0)) { - DISPATCH_CRASH("Release while external references exist"); + int xref_cnt = obj->os_obj_xref_cnt; + if (unlikely(xref_cnt >= 0)) { + DISPATCH_INTERNAL_CRASH(xref_cnt, + "Release while external references exist"); } #endif // _os_object_refcnt_dispose_barrier() is in _os_object_dispose() @@ -120,74 +227,1198 @@ _dispatch_release(dispatch_object_t dou) _os_object_release_internal_inline(dou._os_obj); } +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_release_tailcall(dispatch_object_t dou) +{ + _os_object_release_internal(dou._os_obj); +} + +DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL +static inline void +_dispatch_object_set_target_queue_inline(dispatch_object_t dou, + dispatch_queue_t tq) +{ + _dispatch_retain(tq); + tq = os_atomic_xchg2o(dou._do, do_targetq, tq, release); + if (tq) _dispatch_release(tq); + _dispatch_object_debug(dou._do, "%s", __func__); +} + +#endif // DISPATCH_PURE_C +#pragma mark - +#pragma mark dispatch_thread +#if DISPATCH_PURE_C + +#define DISPATCH_DEFERRED_ITEMS_MAGIC 0xdefe55edul /* deferred */ +#define DISPATCH_DEFERRED_ITEMS_EVENT_COUNT 8 +#ifdef WORKQ_KEVENT_EVENT_BUFFER_LEN +_Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >= + DISPATCH_DEFERRED_ITEMS_EVENT_COUNT, + "our list should not be longer than the kernel's"); +#endif + +typedef struct dispatch_deferred_items_s { + uint32_t ddi_magic; + dispatch_queue_t ddi_stashed_dq; + struct dispatch_object_s *ddi_stashed_dou; + dispatch_priority_t ddi_stashed_pp; + int ddi_nevents; + int ddi_maxevents; + _dispatch_kevent_qos_s ddi_eventlist[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT]; +} dispatch_deferred_items_s, *dispatch_deferred_items_t; + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_deferred_items_set(dispatch_deferred_items_t ddi) +{ + _dispatch_thread_setspecific(dispatch_deferred_items_key, (void *)ddi); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_deferred_items_t +_dispatch_deferred_items_get(void) +{ + dispatch_deferred_items_t ddi = (dispatch_deferred_items_t) + _dispatch_thread_getspecific(dispatch_deferred_items_key); + if (ddi && ddi->ddi_magic == DISPATCH_DEFERRED_ITEMS_MAGIC) { + return ddi; + } + return NULL; +} + +#endif // DISPATCH_PURE_C +#pragma mark - +#pragma mark dispatch_thread +#if DISPATCH_PURE_C + +DISPATCH_ALWAYS_INLINE +static inline dispatch_thread_context_t +_dispatch_thread_context_find(const void *key) +{ + dispatch_thread_context_t dtc = + _dispatch_thread_getspecific(dispatch_context_key); + while (dtc) { + if (dtc->dtc_key == key) { + return dtc; + } + dtc = dtc->dtc_prev; + } + return NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_context_push(dispatch_thread_context_t ctxt) +{ + ctxt->dtc_prev = _dispatch_thread_getspecific(dispatch_context_key); + _dispatch_thread_setspecific(dispatch_context_key, ctxt); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_context_pop(dispatch_thread_context_t ctxt) +{ + dispatch_assert(_dispatch_thread_getspecific(dispatch_context_key) == ctxt); + _dispatch_thread_setspecific(dispatch_context_key, ctxt->dtc_prev); +} + +typedef struct dispatch_thread_frame_iterator_s { + dispatch_queue_t dtfi_queue; + dispatch_thread_frame_t dtfi_frame; +} *dispatch_thread_frame_iterator_t; + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_iterate_start(dispatch_thread_frame_iterator_t it) +{ + _dispatch_thread_getspecific_pair( + dispatch_queue_key, (void **)&it->dtfi_queue, + dispatch_frame_key, (void **)&it->dtfi_frame); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_iterate_next(dispatch_thread_frame_iterator_t it) +{ + dispatch_thread_frame_t dtf = it->dtfi_frame; + dispatch_queue_t dq = it->dtfi_queue; + + if (dtf) { + if (dq->do_targetq) { + // redirections and trysync_f may skip some frames, + // so we need to simulate seeing the missing links + // however the bottom root queue is always present + it->dtfi_queue = dq->do_targetq; + if (it->dtfi_queue == dtf->dtf_queue) { + it->dtfi_frame = dtf->dtf_prev; + } + } else { + it->dtfi_queue = dtf->dtf_queue; + it->dtfi_frame = dtf->dtf_prev; + } + } else if (dq) { + it->dtfi_queue = dq->do_targetq; + } +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_thread_frame_find_queue(dispatch_queue_t dq) +{ + struct dispatch_thread_frame_iterator_s it; + + _dispatch_thread_frame_iterate_start(&it); + while (it.dtfi_queue) { + if (it.dtfi_queue == dq) { + return true; + } + _dispatch_thread_frame_iterate_next(&it); + } + return false; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_thread_frame_t +_dispatch_thread_frame_get_current(void) +{ + return _dispatch_thread_getspecific(dispatch_frame_key); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_set_current(dispatch_thread_frame_t dtf) +{ + _dispatch_thread_setspecific(dispatch_frame_key, dtf); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_save_state(dispatch_thread_frame_t dtf) +{ + _dispatch_thread_getspecific_packed_pair( + dispatch_queue_key, dispatch_frame_key, (void **)&dtf->dtf_queue); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_push(dispatch_thread_frame_t dtf, dispatch_queue_t dq) +{ + _dispatch_thread_frame_save_state(dtf); + _dispatch_thread_setspecific_pair(dispatch_queue_key, dq, + dispatch_frame_key, dtf); + dtf->dtf_deferred = NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_push_and_rebase(dispatch_thread_frame_t dtf, + dispatch_queue_t dq, dispatch_thread_frame_t new_base) +{ + _dispatch_thread_frame_save_state(dtf); + _dispatch_thread_setspecific_pair(dispatch_queue_key, dq, + dispatch_frame_key, new_base); + dtf->dtf_deferred = NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_pop(dispatch_thread_frame_t dtf) +{ + _dispatch_thread_setspecific_packed_pair( + dispatch_queue_key, dispatch_frame_key, (void **)&dtf->dtf_queue); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_thread_frame_stash(dispatch_thread_frame_t dtf) +{ + _dispatch_thread_getspecific_pair( + dispatch_queue_key, (void **)&dtf->dtf_queue, + dispatch_frame_key, (void **)&dtf->dtf_prev); + _dispatch_thread_frame_pop(dtf->dtf_prev); + return dtf->dtf_queue; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_unstash(dispatch_thread_frame_t dtf) +{ + _dispatch_thread_frame_pop(dtf); +} + +DISPATCH_ALWAYS_INLINE +static inline int +_dispatch_wqthread_override_start_check_owner(mach_port_t thread, + pthread_priority_t pp, mach_port_t *ulock_addr) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (!_dispatch_set_qos_class_enabled) return 0; + return _pthread_workqueue_override_start_direct_check_owner(thread, + pp, ulock_addr); +#else + (void)thread; (void)pp; (void)ulock_addr; + return 0; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_wqthread_override_start(mach_port_t thread, + pthread_priority_t pp) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (!_dispatch_set_qos_class_enabled) return; + (void)_pthread_workqueue_override_start_direct(thread, pp); +#else + (void)thread; (void)pp; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_wqthread_override_reset(void) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (!_dispatch_set_qos_class_enabled) return; + (void)_pthread_workqueue_override_reset(); +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_override_start(mach_port_t thread, pthread_priority_t pp, + void *resource) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (!_dispatch_set_qos_class_enabled) return; + (void)_pthread_qos_override_start_direct(thread, pp, resource); +#else + (void)thread; (void)pp; (void)resource; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_override_end(mach_port_t thread, void *resource) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (!_dispatch_set_qos_class_enabled) return; + (void)_pthread_qos_override_end_direct(thread, resource); +#else + (void)thread; (void)resource; +#endif +} + +#if DISPATCH_DEBUG_QOS && HAVE_PTHREAD_WORKQUEUE_QOS +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_qos_class_is_valid(pthread_priority_t pp) +{ + pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + if (pp > (1UL << (DISPATCH_QUEUE_QOS_COUNT + + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT))) { + return false; + } + return true; +} +#define _dispatch_assert_is_valid_qos_class(pp) ({ typeof(pp) _pp = (pp); \ + if (unlikely(!_dispatch_qos_class_is_valid(_pp))) { \ + DISPATCH_INTERNAL_CRASH(_pp, "Invalid qos class"); \ + } \ + }) + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_qos_override_is_valid(pthread_priority_t pp) +{ + if (pp & (pthread_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK) { + return false; + } + return _dispatch_qos_class_is_valid(pp); +} +#define _dispatch_assert_is_valid_qos_override(pp) ({ typeof(pp) _pp = (pp); \ + if (unlikely(!_dispatch_qos_override_is_valid(_pp))) { \ + DISPATCH_INTERNAL_CRASH(_pp, "Invalid override"); \ + } \ + }) +#else +#define _dispatch_assert_is_valid_qos_override(pp) (void)(pp) +#define _dispatch_assert_is_valid_qos_class(pp) (void)(pp) +#endif + +#endif // DISPATCH_PURE_C +#pragma mark - +#pragma mark dispatch_queue_t state accessors +#if DISPATCH_PURE_C + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_flags_t +_dispatch_queue_atomic_flags(dispatch_queue_t dq) +{ + return os_atomic_load2o(dq, dq_atomic_flags, relaxed); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_flags_t +_dispatch_queue_atomic_flags_set(dispatch_queue_t dq, + dispatch_queue_flags_t bits) +{ + return os_atomic_or2o(dq, dq_atomic_flags, bits, relaxed); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_flags_t +_dispatch_queue_atomic_flags_set_and_clear_orig(dispatch_queue_t dq, + dispatch_queue_flags_t add_bits, dispatch_queue_flags_t clr_bits) +{ + dispatch_queue_flags_t oflags, nflags; + os_atomic_rmw_loop2o(dq, dq_atomic_flags, oflags, nflags, relaxed, { + nflags = (oflags | add_bits) & ~clr_bits; + }); + return oflags; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_flags_t +_dispatch_queue_atomic_flags_set_and_clear(dispatch_queue_t dq, + dispatch_queue_flags_t add_bits, dispatch_queue_flags_t clr_bits) +{ + dispatch_queue_flags_t oflags, nflags; + os_atomic_rmw_loop2o(dq, dq_atomic_flags, oflags, nflags, relaxed, { + nflags = (oflags | add_bits) & ~clr_bits; + }); + return nflags; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_flags_t +_dispatch_queue_atomic_flags_set_orig(dispatch_queue_t dq, + dispatch_queue_flags_t bits) +{ + return os_atomic_or_orig2o(dq, dq_atomic_flags, bits, relaxed); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_flags_t +_dispatch_queue_atomic_flags_clear(dispatch_queue_t dq, + dispatch_queue_flags_t bits) +{ + return os_atomic_and2o(dq, dq_atomic_flags, ~bits, relaxed); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_is_thread_bound(dispatch_queue_t dq) +{ + return _dispatch_queue_atomic_flags(dq) & DQF_THREAD_BOUND; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_cannot_trysync(dispatch_queue_t dq) +{ + return _dispatch_queue_atomic_flags(dq) & DQF_CANNOT_TRYSYNC; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_label_needs_free(dispatch_queue_t dq) +{ + return _dispatch_queue_atomic_flags(dq) & DQF_LABEL_NEEDS_FREE; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_invoke_flags_t +_dispatch_queue_autorelease_frequency(dispatch_queue_t dq) +{ + const unsigned long factor = + DISPATCH_INVOKE_AUTORELEASE_ALWAYS / DQF_AUTORELEASE_ALWAYS; + dispatch_static_assert(factor > 0); + + dispatch_queue_flags_t qaf = _dispatch_queue_atomic_flags(dq); + + qaf &= _DQF_AUTORELEASE_MASK; + return (dispatch_invoke_flags_t)qaf * factor; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_invoke_flags_t +_dispatch_queue_merge_autorelease_frequency(dispatch_queue_t dq, + dispatch_invoke_flags_t flags) +{ + dispatch_invoke_flags_t qaf = _dispatch_queue_autorelease_frequency(dq); + + if (qaf) { + flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK; + flags |= qaf; + } + return flags; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_has_immutable_target(dispatch_queue_t dq) +{ + if (dx_metatype(dq) != _DISPATCH_QUEUE_TYPE) { + return false; + } + return dx_type(dq) != DISPATCH_QUEUE_LEGACY_TYPE; +} + +#endif // DISPATCH_PURE_C +#ifndef __cplusplus + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dq_state_suspend_cnt(uint64_t dq_state) +{ + return (uint32_t)(dq_state / DISPATCH_QUEUE_SUSPEND_INTERVAL); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_has_side_suspend_cnt(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; +} + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dq_state_extract_width_bits(uint64_t dq_state) +{ + dq_state &= DISPATCH_QUEUE_WIDTH_MASK; + return (uint32_t)(dq_state >> DISPATCH_QUEUE_WIDTH_SHIFT); +} + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dq_state_available_width(uint64_t dq_state) +{ + uint32_t full = DISPATCH_QUEUE_WIDTH_FULL; + if (fastpath(!(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT))) { + return full - _dq_state_extract_width_bits(dq_state); + } + return 0; +} + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dq_state_used_width(uint64_t dq_state, uint16_t dq_width) +{ + uint32_t full = DISPATCH_QUEUE_WIDTH_FULL; + uint32_t width = _dq_state_extract_width_bits(dq_state); + + if (dq_state & DISPATCH_QUEUE_PENDING_BARRIER) { + // DISPATCH_QUEUE_PENDING_BARRIER means (dq_width - 1) of the used width + // is pre-reservation that we want to ignore + return width - (full - dq_width) - (dq_width - 1); + } + return width - (full - dq_width); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_suspended(uint64_t dq_state) +{ + return dq_state >= DISPATCH_QUEUE_NEEDS_ACTIVATION; +} +#define DISPATCH_QUEUE_IS_SUSPENDED(x) _dq_state_is_suspended((x)->dq_state) + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_inactive(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_INACTIVE; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_needs_activation(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_NEEDS_ACTIVATION; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_in_barrier(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_IN_BARRIER; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_has_available_width(uint64_t dq_state) +{ + return !(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_has_pending_barrier(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_PENDING_BARRIER; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_dirty(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_DIRTY; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_enqueued(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_ENQUEUED; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_has_override(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_HAS_OVERRIDE; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_lock_owner +_dq_state_drain_owner(uint64_t dq_state) +{ + return _dispatch_lock_owner((dispatch_lock)dq_state); +} +#define DISPATCH_QUEUE_DRAIN_OWNER(dq) \ + _dq_state_drain_owner(os_atomic_load2o(dq, dq_state, relaxed)) + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_drain_pended(uint64_t dq_state) +{ + return (dq_state & DISPATCH_QUEUE_DRAIN_PENDED); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_drain_locked_by(uint64_t dq_state, uint32_t owner) +{ + if (_dq_state_drain_pended(dq_state)) { + return false; + } + return _dq_state_drain_owner(dq_state) == owner; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_drain_locked(uint64_t dq_state) +{ + return (dq_state & DISPATCH_QUEUE_DRAIN_OWNER_MASK) != 0; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_has_waiters(uint64_t dq_state) +{ + return _dispatch_lock_has_waiters((dispatch_lock)dq_state); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_sync_runnable(uint64_t dq_state) +{ + return dq_state < DISPATCH_QUEUE_IN_BARRIER; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_runnable(uint64_t dq_state) +{ + return dq_state < DISPATCH_QUEUE_WIDTH_FULL_BIT; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_should_wakeup(uint64_t dq_state) +{ + return _dq_state_is_runnable(dq_state) && + !_dq_state_is_enqueued(dq_state) && + !_dq_state_drain_locked(dq_state); +} + +#endif // __cplusplus +#pragma mark - +#pragma mark dispatch_queue_t state machine +#ifndef __cplusplus + +static inline bool _dispatch_queue_need_override(dispatch_queue_class_t dqu, + pthread_priority_t pp); +static inline bool _dispatch_queue_need_override_retain( + dispatch_queue_class_t dqu, pthread_priority_t pp); +static inline dispatch_priority_t _dispatch_queue_reset_override_priority( + dispatch_queue_class_t dqu, bool qp_is_floor); +static inline bool _dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu, + dispatch_priority_t new_op); +static inline pthread_priority_t _dispatch_get_defaultpriority(void); +static inline void _dispatch_set_defaultpriority_override(void); +static inline void _dispatch_reset_defaultpriority(pthread_priority_t pp); +static inline pthread_priority_t _dispatch_get_priority(void); +static inline pthread_priority_t _dispatch_set_defaultpriority( + pthread_priority_t pp, pthread_priority_t *new_pp); + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_xref_dispose(struct dispatch_queue_s *dq) +{ + if (slowpath(DISPATCH_QUEUE_IS_SUSPENDED(dq))) { + // Arguments for and against this assert are within 6705399 + DISPATCH_CLIENT_CRASH(dq, "Release of a suspended object"); + } + os_atomic_or2o(dq, dq_atomic_flags, DQF_RELEASED, relaxed); +} + +#endif +#if DISPATCH_PURE_C + +// Note to later developers: ensure that any initialization changes are +// made for statically allocated queues (i.e. _dispatch_main_q). +static inline void +_dispatch_queue_init(dispatch_queue_t dq, dispatch_queue_flags_t dqf, + uint16_t width, bool inactive) +{ + uint64_t dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(width); + + if (inactive) { + dq_state += DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION; + dq->do_ref_cnt++; // rdar://8181908 see _dispatch_queue_resume + } + dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS; + dqf |= (dispatch_queue_flags_t)width << DQF_WIDTH_SHIFT; + os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed); + dq->dq_state = dq_state; + dq->dq_override_voucher = DISPATCH_NO_VOUCHER; + dq->dq_serialnum = + os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed); +} + +/* Used by: + * - _dispatch_queue_set_target_queue + * - changing dispatch source handlers + * + * Tries to prevent concurrent wakeup of an inactive queue by suspending it. + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_queue_try_inactive_suspend(dispatch_queue_t dq) +{ + uint64_t dq_state, value; + + (void)os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + if (!fastpath(_dq_state_is_inactive(dq_state))) { + os_atomic_rmw_loop_give_up(return false); + } + value = dq_state + DISPATCH_QUEUE_SUSPEND_INTERVAL; + }); + if (slowpath(!_dq_state_is_suspended(dq_state)) || + slowpath(_dq_state_has_side_suspend_cnt(dq_state))) { + // Crashing here means that 128+ dispatch_suspend() calls have been + // made on an inactive object and then dispatch_set_target_queue() or + // dispatch_set_*_handler() has been called. + // + // We don't want to handle the side suspend count in a codepath that + // needs to be fast. + DISPATCH_CLIENT_CRASH(dq, "Too many calls to dispatch_suspend() " + "prior to calling dispatch_set_target_queue() " + "or dispatch_set_*_handler()"); + } + return true; +} + +/* Must be used by any caller meaning to do a speculative wakeup when the caller + * was preventing other wakeups (for example dispatch_resume() or a drainer not + * doing a drain_try_unlock() and not observing DIRTY) + * + * In that case this call loads DIRTY with an acquire barrier so that when + * other threads have made changes (such as dispatch_source_cancel()) the + * caller can take these state machine changes into account in its decision to + * wake up the object. + */ +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_try_wakeup(dispatch_queue_t dq, uint64_t dq_state, + dispatch_wakeup_flags_t flags) +{ + if (_dq_state_should_wakeup(dq_state)) { + if (slowpath(_dq_state_is_dirty(dq_state))) { + // + // seq_cst wrt state changes that were flushed and not acted upon + os_atomic_thread_fence(acquire); + } + return dx_wakeup(dq, 0, flags); + } + if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dq); + } +} + +/* Used by: + * - _dispatch_queue_class_invoke (normal path) + * - _dispatch_queue_override_invoke (stealer) + * + * Initial state must be { sc:0, ib:0, qf:0, dl:0 } + * Final state forces { dl:self, qf:1, d: 0 } + * ib:1 is forced when the width acquired is equivalent to the barrier width + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline uint64_t +_dispatch_queue_drain_try_lock(dispatch_queue_t dq, + dispatch_invoke_flags_t flags, uint64_t *dq_state) +{ + uint64_t pending_barrier_width = + (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; + uint64_t xor_owner_and_set_full_width = + _dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT; + uint64_t clear_enqueued_bit, old_state, new_state; + + if (flags & DISPATCH_INVOKE_STEALING) { + clear_enqueued_bit = 0; + } else { + clear_enqueued_bit = DISPATCH_QUEUE_ENQUEUED; + } + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + new_state = old_state; + new_state ^= clear_enqueued_bit; + if (likely(_dq_state_is_runnable(old_state) && + !_dq_state_drain_locked(old_state))) { + // + // Only keep the HAS_WAITER bit (and ENQUEUED if stealing). + // In particular acquiring the drain lock clears the DIRTY bit + // + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + // + // For the NOWAITERS_BIT case, the thread identity + // has NOWAITERS_BIT set, and NOWAITERS_BIT was kept above, + // so the xor below flips the NOWAITERS_BIT to 0 as expected. + // + // For the non inverted WAITERS_BIT case, WAITERS_BIT is not set in + // the thread identity, and the xor leaves the bit alone. + // + new_state ^= xor_owner_and_set_full_width; + if (_dq_state_has_pending_barrier(old_state) || + old_state + pending_barrier_width < + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + new_state |= DISPATCH_QUEUE_IN_BARRIER; + } + } else if (!clear_enqueued_bit) { + os_atomic_rmw_loop_give_up(break); + } + }); + + if (dq_state) *dq_state = new_state; + if (likely(_dq_state_is_runnable(old_state) && + !_dq_state_drain_locked(old_state))) { + new_state &= DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_FULL_BIT; + old_state &= DISPATCH_QUEUE_WIDTH_MASK; + return new_state - old_state; + } + return 0; +} + +/* Used by _dispatch_barrier_{try,}sync + * + * Note, this fails if any of e:1 or dl!=0, but that allows this code to be a + * simple cmpxchg which is significantly faster on Intel, and makes a + * significant difference on the uncontended codepath. + * + * See discussion for DISPATCH_QUEUE_DIRTY in queue_internal.h + * + * Initial state must be `completely idle` + * Final state forces { ib:1, qf:1, w:0 } + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq) +{ + uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; + value |= _dispatch_tid_self(); + + return os_atomic_cmpxchg2o(dq, dq_state, + DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width), value, acquire); +} + +/* Used by _dispatch_sync for root queues and some drain codepaths + * + * Root queues have no strict orderning and dispatch_sync() always goes through. + * Drain is the sole setter of `dl` hence can use this non failing version of + * _dispatch_queue_try_acquire_sync(). + * + * Final state: { w += 1 } + */ +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_reserve_sync_width(dispatch_queue_t dq) +{ + (void)os_atomic_add2o(dq, dq_state, + DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed); +} + +/* Used by _dispatch_sync on non-serial queues + * + * Initial state must be { sc:0, ib:0, pb:0, d:0 } + * Final state: { w += 1 } + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_queue_try_reserve_sync_width(dispatch_queue_t dq) +{ + uint64_t dq_state, value; + + return os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + if (!fastpath(_dq_state_is_sync_runnable(dq_state)) || + slowpath(_dq_state_is_dirty(dq_state)) || + slowpath(_dq_state_has_pending_barrier(dq_state))) { + os_atomic_rmw_loop_give_up(return false); + } + value = dq_state + DISPATCH_QUEUE_WIDTH_INTERVAL; + }); +} + +/* Used by _dispatch_apply_redirect + * + * Try to acquire at most da_width and returns what could be acquired, + * possibly 0 + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline uint32_t +_dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq, uint32_t da_width) +{ + uint64_t dq_state, value; + uint32_t width; + + (void)os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + width = _dq_state_available_width(dq_state); + if (!fastpath(width)) { + os_atomic_rmw_loop_give_up(return 0); + } + if (width > da_width) { + width = da_width; + } + value = dq_state + width * DISPATCH_QUEUE_WIDTH_INTERVAL; + }); + return width; +} + +/* Used by _dispatch_apply_redirect + * + * Release width acquired by _dispatch_queue_try_acquire_width + */ +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_relinquish_width(dispatch_queue_t dq, uint32_t da_width) +{ + (void)os_atomic_sub2o(dq, dq_state, + da_width * DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed); +} + +/* Used by target-queue recursing code + * + * Initial state must be { sc:0, ib:0, qf:0, pb:0, d:0 } + * Final state: { w += 1 } + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_queue_try_acquire_async(dispatch_queue_t dq) +{ + uint64_t dq_state, value; + + return os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, acquire, { + if (!fastpath(_dq_state_is_runnable(dq_state)) || + slowpath(_dq_state_is_dirty(dq_state)) || + slowpath(_dq_state_has_pending_barrier(dq_state))) { + os_atomic_rmw_loop_give_up(return false); + } + value = dq_state + DISPATCH_QUEUE_WIDTH_INTERVAL; + }); +} + +/* Used at the end of Drainers + * + * This adjusts the `owned` width when the next continuation is already known + * to account for its barrierness. + */ +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_queue_adjust_owned(dispatch_queue_t dq, uint64_t owned, + struct dispatch_object_s *next_dc) +{ + uint64_t reservation; + + if (slowpath(dq->dq_width > 1)) { + if (next_dc && _dispatch_object_is_barrier(next_dc)) { + reservation = DISPATCH_QUEUE_PENDING_BARRIER; + reservation += (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; + owned -= reservation; + } + } + return owned; +} + +/* Used at the end of Drainers + * + * Unlocking fails if the DIRTY bit is seen (and the queue is not suspended). + * In that case, only the DIRTY bit is cleared. The DIRTY bit is therefore used + * as a signal to renew the drain lock instead of releasing it. + * + * Successful unlock forces { dl:0, d:0, qo:0 } and gives back `owned` + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned) +{ + uint64_t old_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t new_state; + dispatch_priority_t pp = 0, op; + + do { + if (unlikely(_dq_state_is_dirty(old_state) && + !_dq_state_is_suspended(old_state))) { + // just renew the drain lock with an acquire barrier, to see + // what the enqueuer that set DIRTY has done. + os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_DIRTY, acquire); + _dispatch_queue_reinstate_override_priority(dq, pp); + return false; + } + new_state = old_state - owned; + if ((new_state & DISPATCH_QUEUE_WIDTH_FULL_BIT) || + _dq_state_is_suspended(old_state)) { + // the test for the WIDTH_FULL_BIT is about narrow concurrent queues + // releasing the drain lock while being at the width limit + // + // _non_barrier_complete() will set the DIRTY bit when going back + // under the limit which will cause the try_unlock to fail + new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state); + } else { + new_state &= ~DISPATCH_QUEUE_DIRTY; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + // This current owner is the only one that can clear HAS_OVERRIDE, + // so accumulating reset overrides here is valid. + if (unlikely(_dq_state_has_override(new_state))) { + new_state &= ~DISPATCH_QUEUE_HAS_OVERRIDE; + dispatch_assert(!_dispatch_queue_is_thread_bound(dq)); + op = _dispatch_queue_reset_override_priority(dq, false); + if (op > pp) pp = op; + } + } + } while (!fastpath(os_atomic_cmpxchgvw2o(dq, dq_state, + old_state, new_state, &old_state, release))); + + if (_dq_state_has_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } + return true; +} + +/* Used at the end of Drainers when the next work item is known + * and that the dirty-head check isn't needed. + * + * This releases `owned`, clears DIRTY, and handles HAS_OVERRIDE when seen. + */ +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_queue_drain_lock_transfer_or_unlock(dispatch_queue_t dq, + uint64_t owned, mach_port_t next_owner, uint64_t *orig_state) +{ + uint64_t dq_state, value; + +#ifdef DLOCK_NOWAITERS_BIT + // The NOWAITERS_BIT state must not change through the transfer. It means + // that if next_owner is 0 the bit must be flipped in the rmw_loop below, + // and if next_owner is set, then the bit must be left unchanged. + // + // - when next_owner is 0, the xor below sets NOWAITERS_BIT in next_owner, + // which causes the second xor to flip the bit as expected. + // - if next_owner is not 0, it has the NOWAITERS_BIT set, so we have to + // clear it so that the second xor leaves the NOWAITERS_BIT alone. + next_owner ^= DLOCK_NOWAITERS_BIT; +#endif + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, release, { + value = dq_state - owned; + // same as DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT + // but we want to be more efficient wrt the WAITERS_BIT + value &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK; + value &= ~DISPATCH_QUEUE_DRAIN_PENDED; + value &= ~DISPATCH_QUEUE_DIRTY; + value ^= next_owner; + }); + + if (_dq_state_has_override(dq_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } + if (orig_state) *orig_state = dq_state; + return value; +} +#define _dispatch_queue_drain_unlock(dq, owned, orig) \ + _dispatch_queue_drain_lock_transfer_or_unlock(dq, owned, 0, orig) + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_drain_transfer_lock(dispatch_queue_t dq, + uint64_t to_unlock, dispatch_object_t dou) +{ + mach_port_t th_next = 0; + if (dou._dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT) { + th_next = (mach_port_t)dou._dc->dc_data; + } + _dispatch_queue_drain_lock_transfer_or_unlock(dq, to_unlock, th_next, NULL); +} + + #pragma mark - -#pragma mark dispatch_thread +#pragma mark os_mpsc_queue + +// type_t * {volatile,const,_Atomic,...} -> type_t * +// type_t[] -> type_t * +#define os_unqualified_pointer_type(expr) \ + typeof(typeof(*(expr)) *) + +#define os_mpsc_node_type(q, _ns) \ + os_unqualified_pointer_type((q)->_ns##_head) + +// +// Multi Producer calls, can be used safely concurrently +// + +// Returns true when the queue was empty and the head must be set +#define os_mpsc_push_update_tail_list(q, _ns, head, tail, _o_next) ({ \ + os_mpsc_node_type(q, _ns) _head = (head), _tail = (tail), _prev; \ + _tail->_o_next = NULL; \ + _prev = os_atomic_xchg2o((q), _ns##_tail, _tail, release); \ + if (fastpath(_prev)) { \ + os_atomic_store2o(_prev, _o_next, _head, relaxed); \ + } \ + (_prev == NULL); \ + }) + +// Returns true when the queue was empty and the head must be set +#define os_mpsc_push_update_tail(q, _ns, o, _o_next) ({ \ + os_mpsc_node_type(q, _ns) _o = (o); \ + os_mpsc_push_update_tail_list(q, _ns, _o, _o, _o_next); \ + }) + +#define os_mpsc_push_update_head(q, _ns, o) ({ \ + os_atomic_store2o((q), _ns##_head, o, relaxed); \ + }) + +// +// Single Consumer calls, can NOT be used safely concurrently +// + +#define os_mpsc_get_head(q, _ns) ({ \ + os_mpsc_node_type(q, _ns) _head; \ + _dispatch_wait_until(_head = (q)->_ns##_head); \ + _head; \ + }) + +#define os_mpsc_pop_head(q, _ns, head, _o_next) ({ \ + typeof(q) _q = (q); \ + os_mpsc_node_type(_q, _ns) _head = (head), _n = fastpath(_head->_o_next); \ + os_atomic_store2o(_q, _ns##_head, _n, relaxed); \ + /* 22708742: set tail to NULL with release, so that NULL write */ \ + /* to head above doesn't clobber head from concurrent enqueuer */ \ + if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release)) { \ + _dispatch_wait_until(_n = fastpath(_head->_o_next)); \ + os_atomic_store2o(_q, _ns##_head, _n, relaxed); \ + } \ + _n; \ + }) + +#define os_mpsc_undo_pop_head(q, _ns, head, next, _o_next) ({ \ + typeof(q) _q = (q); \ + os_mpsc_node_type(_q, _ns) _head = (head), _n = (next); \ + if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed)) { \ + _dispatch_wait_until(_n = _q->_ns##_head); \ + _head->_o_next = _n; \ + } \ + os_atomic_store2o(_q, _ns##_head, _head, relaxed); \ + }) + +#define os_mpsc_capture_snapshot(q, _ns, tail) ({ \ + typeof(q) _q = (q); \ + os_mpsc_node_type(_q, _ns) _head; \ + _dispatch_wait_until(_head = _q->_ns##_head); \ + os_atomic_store2o(_q, _ns##_head, NULL, relaxed); \ + /* 22708742: set tail to NULL with release, so that NULL write */ \ + /* to head above doesn't clobber head from concurrent enqueuer */ \ + *(tail) = os_atomic_xchg2o(_q, _ns##_tail, NULL, release); \ + _head; \ + }) + +#define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \ + os_unqualified_pointer_type(head) _head = (head), _n = NULL; \ + if (_head != (tail)) { \ + _dispatch_wait_until(_n = _head->_o_next); \ + }; \ + _n; }) + +#define os_mpsc_prepend(q, _ns, head, tail, _o_next) ({ \ + typeof(q) _q = (q); \ + os_mpsc_node_type(_q, _ns) _head = (head), _tail = (tail), _n; \ + _tail->_o_next = NULL; \ + if (!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release)) { \ + _dispatch_wait_until(_n = _q->_ns##_head); \ + _tail->_o_next = _n; \ + } \ + os_atomic_store2o(_q, _ns##_head, _head, relaxed); \ + }) + +#pragma mark - +#pragma mark dispatch_queue_t tq lock DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_wqthread_override_start(mach_port_t thread, - pthread_priority_t priority) +static inline bool +_dispatch_queue_sidelock_trylock(dispatch_queue_t dq, pthread_priority_t pp) { -#if HAVE_PTHREAD_WORKQUEUE_QOS - if (!_dispatch_set_qos_class_enabled) return; - (void)_pthread_workqueue_override_start_direct(thread, priority); -#else - (void)thread; (void)priority; -#endif + dispatch_lock_owner owner; + if (_dispatch_unfair_lock_trylock(&dq->dq_sidelock, &owner)) { + return true; + } + _dispatch_wqthread_override_start_check_owner(owner, pp, + &dq->dq_sidelock.dul_lock); + return false; } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_wqthread_override_reset(void) +_dispatch_queue_sidelock_lock(dispatch_queue_t dq) { -#if HAVE_PTHREAD_WORKQUEUE_QOS - if (!_dispatch_set_qos_class_enabled) return; - (void)_pthread_workqueue_override_reset(); -#endif + return _dispatch_unfair_lock_lock(&dq->dq_sidelock); } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_thread_override_start(mach_port_t thread, pthread_priority_t priority) +static inline bool +_dispatch_queue_sidelock_tryunlock(dispatch_queue_t dq) { -#if HAVE_PTHREAD_WORKQUEUE_QOS - if (!_dispatch_set_qos_class_enabled) return; - (void)_pthread_override_qos_class_start_direct(thread, priority); -#else - (void)thread; (void)priority; -#endif + if (_dispatch_unfair_lock_tryunlock(&dq->dq_sidelock)) { + return true; + } + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + return false; } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_thread_override_end(mach_port_t thread) +_dispatch_queue_sidelock_unlock(dispatch_queue_t dq) { -#if HAVE_PTHREAD_WORKQUEUE_QOS - if (!_dispatch_set_qos_class_enabled) return; - (void)_pthread_override_qos_class_end_direct(thread); -#else - (void)thread; -#endif + if (_dispatch_unfair_lock_unlock_had_failed_trylock(&dq->dq_sidelock)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } } #pragma mark - -#pragma mark dispatch_queue_t - -static inline bool _dispatch_queue_need_override(dispatch_queue_t dq, - pthread_priority_t pp); -static inline bool _dispatch_queue_need_override_retain(dispatch_queue_t dq, - pthread_priority_t pp); -static inline bool _dispatch_queue_retain_if_override(dispatch_queue_t dq, - pthread_priority_t pp); -static inline pthread_priority_t _dispatch_queue_get_override_priority( - dispatch_queue_t dq); -static inline pthread_priority_t _dispatch_queue_reset_override_priority( - dispatch_queue_t dq); -static inline pthread_priority_t _dispatch_get_defaultpriority(void); -static inline void _dispatch_set_defaultpriority_override(void); -static inline void _dispatch_reset_defaultpriority(pthread_priority_t priority); -static inline pthread_priority_t _dispatch_get_priority(void); -static inline void _dispatch_set_priority(pthread_priority_t priority); +#pragma mark dispatch_queue_t misc DISPATCH_ALWAYS_INLINE static inline dispatch_queue_t @@ -198,238 +1429,325 @@ _dispatch_queue_get_current(void) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_set_thread(dispatch_queue_t dq) +_dispatch_queue_set_current(dispatch_queue_t dq) { - // The manager queue uses dispatch_queue_drain but is thread bound - if (!dq->dq_is_thread_bound) { - dq->dq_thread = _dispatch_thread_port(); - } + _dispatch_thread_setspecific(dispatch_queue_key, dq); } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_clear_thread(dispatch_queue_t dq) +static inline struct dispatch_object_s* +_dispatch_queue_head(dispatch_queue_t dq) { - if (!dq->dq_is_thread_bound) { - dq->dq_thread = MACH_PORT_NULL; - } + return os_mpsc_get_head(dq, dq_items); +} + +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_object_s* +_dispatch_queue_next(dispatch_queue_t dq, struct dispatch_object_s *dc) +{ + return os_mpsc_pop_head(dq, dq_items, dc, do_next); } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_push_list2(dispatch_queue_t dq, struct dispatch_object_s *head, +_dispatch_queue_push_update_tail(dispatch_queue_t dq, struct dispatch_object_s *tail) { - struct dispatch_object_s *prev; - tail->do_next = NULL; - prev = dispatch_atomic_xchg2o(dq, dq_items_tail, tail, release); - if (fastpath(prev)) { - // if we crash here with a value less than 0x1000, then we are at a - // known bug in client code for example, see _dispatch_queue_dispose - // or _dispatch_atfork_child - prev->do_next = head; - } - return (prev != NULL); + // if we crash here with a value less than 0x1000, then we are + // at a known bug in client code. for example, see + // _dispatch_queue_dispose or _dispatch_atfork_child + return os_mpsc_push_update_tail(dq, dq_items, tail, do_next); } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, - dispatch_object_t _tail, pthread_priority_t pp, unsigned int n) +static inline bool +_dispatch_queue_push_update_tail_list(dispatch_queue_t dq, + struct dispatch_object_s *head, struct dispatch_object_s *tail) { - struct dispatch_object_s *head = _head._do, *tail = _tail._do; - bool override = _dispatch_queue_need_override_retain(dq, pp); - if (!fastpath(_dispatch_queue_push_list2(dq, head, tail))) { - _dispatch_queue_push_list_slow(dq, pp, head, n, override); - } else if (override) { - _dispatch_queue_wakeup_with_qos_and_release(dq, pp); + // if we crash here with a value less than 0x1000, then we are + // at a known bug in client code. for example, see + // _dispatch_queue_dispose or _dispatch_atfork_child + return os_mpsc_push_update_tail_list(dq, dq_items, head, tail, do_next); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_push_update_head(dispatch_queue_t dq, + struct dispatch_object_s *head, bool retained) +{ + if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { + dispatch_assert(!retained); + // Lie about "retained" here, it generates better assembly in this + // hotpath, and _dispatch_root_queue_wakeup knows to ignore this + // fake "WAKEUP_CONSUME" bit when it also sees WAKEUP_FLUSH. + // + // We need to bypass the retain below because pthread root queues + // are not global and retaining them would be wrong. + // + // We should eventually have a typeflag for "POOL" kind of root queues. + retained = true; } + // The queue must be retained before dq_items_head is written in order + // to ensure that the reference is still valid when _dispatch_queue_wakeup + // is called. Otherwise, if preempted between the assignment to + // dq_items_head and _dispatch_queue_wakeup, the blocks submitted to the + // queue may release the last reference to the queue when invoked by + // _dispatch_queue_drain. + if (!retained) _dispatch_retain(dq); + os_mpsc_push_update_head(dq, dq_items, head); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t _tail, - pthread_priority_t pp) +_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, + dispatch_object_t _tail, pthread_priority_t pp, unsigned int n) { - struct dispatch_object_s *tail = _tail._do; + struct dispatch_object_s *head = _head._do, *tail = _tail._do; bool override = _dispatch_queue_need_override_retain(dq, pp); - if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) { - _dispatch_queue_push_slow(dq, pp, tail, override); + dispatch_queue_flags_t flags; + if (slowpath(_dispatch_queue_push_update_tail_list(dq, head, tail))) { + _dispatch_queue_push_update_head(dq, head, override); + if (fastpath(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)) { + return _dispatch_queue_push_list_slow(dq, n); + } + flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH; } else if (override) { - _dispatch_queue_wakeup_with_qos_and_release(dq, pp); + flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING; + } else { + return; } + dx_wakeup(dq, pp, flags); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail, - pthread_priority_t pp, bool wakeup) +_dispatch_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail, + pthread_priority_t pp, dispatch_wakeup_flags_t flags) { - // caller assumed to have a reference on dq struct dispatch_object_s *tail = _tail._do; - if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) { - _dispatch_queue_push_slow(dq, pp, tail, false); - } else if (_dispatch_queue_need_override(dq, pp)) { - _dispatch_queue_wakeup_with_qos(dq, pp); - } else if (slowpath(wakeup)) { - _dispatch_queue_wakeup(dq); + bool override = _dispatch_queue_need_override(dq, pp); + if (flags & DISPATCH_WAKEUP_SLOW_WAITER) { + // when SLOW_WAITER is set, we borrow the reference of the caller + if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) { + _dispatch_queue_push_update_head(dq, tail, true); + flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_FLUSH; + } else if (override) { + flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_OVERRIDING; + } else { + flags = DISPATCH_WAKEUP_SLOW_WAITER; + } + } else { + if (override) _dispatch_retain(dq); + if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) { + _dispatch_queue_push_update_head(dq, tail, override); + flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH; + } else if (override) { + flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING; + } else { + return; + } } + return dx_wakeup(dq, pp, flags); } struct _dispatch_identity_s { - pthread_priority_t old_pri; pthread_priority_t old_pp; - dispatch_queue_t old_dq; }; DISPATCH_ALWAYS_INLINE static inline void _dispatch_root_queue_identity_assume(struct _dispatch_identity_s *di, - dispatch_queue_t assumed_rq) + pthread_priority_t pp) { - di->old_dq = _dispatch_queue_get_current(); - di->old_pri = _dispatch_get_priority(); - di->old_pp = _dispatch_get_defaultpriority(); + // assumed_rq was set by the caller, we need to fake the priorities + dispatch_queue_t assumed_rq = _dispatch_queue_get_current(); + + dispatch_assert(dx_type(assumed_rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); - dispatch_assert(dx_type(di->old_dq) == DISPATCH_QUEUE_ROOT_TYPE); - dispatch_assert(dx_type(assumed_rq) == DISPATCH_QUEUE_ROOT_TYPE); + di->old_pp = _dispatch_get_defaultpriority(); - _dispatch_wqthread_override_start(_dispatch_thread_port(), di->old_pri); - _dispatch_set_priority(assumed_rq->dq_priority); + if (!(assumed_rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) { + if (!pp) { + pp = _dispatch_get_priority(); + // _dispatch_root_queue_drain_deferred_item() may turn a manager + // thread into a regular root queue, and we must never try to + // restore the manager flag once we became a regular work queue + // thread. + pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + } + if ((pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) > + (assumed_rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + _dispatch_wqthread_override_start(_dispatch_tid_self(), pp); + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } + } _dispatch_reset_defaultpriority(assumed_rq->dq_priority); - _dispatch_thread_setspecific(dispatch_queue_key, assumed_rq); } DISPATCH_ALWAYS_INLINE static inline void _dispatch_root_queue_identity_restore(struct _dispatch_identity_s *di) { - _dispatch_thread_setspecific(dispatch_queue_key, di->old_dq); - _dispatch_set_priority(di->old_pri); _dispatch_reset_defaultpriority(di->old_pp); - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); } typedef dispatch_queue_t _dispatch_queue_class_invoke_handler_t(dispatch_object_t, - _dispatch_thread_semaphore_t*); + dispatch_invoke_flags_t, uint64_t *owned, struct dispatch_object_s **); DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_class_invoke(dispatch_object_t dou, - dispatch_continuation_t dc, dispatch_invoke_flags_t flags, + dispatch_invoke_flags_t flags, _dispatch_queue_class_invoke_handler_t invoke) { - pthread_priority_t p = 0; dispatch_queue_t dq = dou._dq; + struct dispatch_object_s *dc = NULL; + dispatch_queue_t tq = NULL; + uint64_t dq_state, to_unlock = 0; bool owning = !slowpath(flags & DISPATCH_INVOKE_STEALING); bool overriding = slowpath(flags & DISPATCH_INVOKE_OVERRIDING); - if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && - fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))){ - _dispatch_queue_set_thread(dq); + // When called from a plain _dispatch_queue_drain: + // overriding = false + // owning = true + // + // When called from an override continuation: + // overriding = true + // owning depends on whether the override embedded the queue or steals + DISPATCH_COMPILER_CAN_ASSUME(owning || overriding); - dispatch_queue_t tq = NULL; - _dispatch_thread_semaphore_t sema = 0; + if (owning) { + dq->do_next = DISPATCH_OBJECT_LISTLESS; + } + to_unlock = _dispatch_queue_drain_try_lock(dq, flags, &dq_state); + if (likely(to_unlock)) { struct _dispatch_identity_s di; + pthread_priority_t old_dp; +drain_pending_barrier: if (overriding) { _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx", - dq->dq_thread, _dispatch_get_defaultpriority()); - _dispatch_root_queue_identity_assume(&di, dc->dc_other); + _dispatch_tid_self(), _dispatch_get_defaultpriority()); + _dispatch_root_queue_identity_assume(&di, 0); } - tq = invoke(dq, &sema); - _dispatch_queue_clear_thread(dq); - - if (!owning && !sema && tq && tq != dq->do_targetq) { - /* - * When (tq && tq != dq->do_targetq) this is a source or mach - * channel asking to get to their manager queue. - * - * Since stealers cannot call _dispatch_queue_push_queue and - * retarget those, they need ot destroy the override so that - * when waking those sources or mach channels on their target queue - * we don't risk a stealer taking them over and not be able to - * retarget again, effectively live-locking them. - * - * Also, we're in the `overriding` case so the thread will be marked - * dirty by _dispatch_root_queue_identity_restore anyway - * so forgetting about p is fine. - */ - (void)_dispatch_queue_reset_override_priority(dq); - p = 0; - } else if (sema || tq || DISPATCH_OBJECT_SUSPENDED(dq)) { - p = _dispatch_queue_get_override_priority(dq); - } else { - p = _dispatch_queue_reset_override_priority(dq); - } - if (overriding) { - _dispatch_root_queue_identity_restore(&di); - } else { - if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) { + pthread_priority_t op, dp; + + old_dp = _dispatch_set_defaultpriority(dq->dq_priority, &dp); + op = dq->dq_override; + if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + _dispatch_wqthread_override_start(_dispatch_tid_self(), op); // Ensure that the root queue sees that this thread was overridden. _dispatch_set_defaultpriority_override(); } } - uint32_t running = dispatch_atomic_dec2o(dq, dq_running, release); - if (sema) { - _dispatch_thread_semaphore_signal(sema); - } else if (owning && tq) { - _dispatch_introspection_queue_item_complete(dq); - return _dispatch_queue_push_queue(tq, dq, p); + flags = _dispatch_queue_merge_autorelease_frequency(dq, flags); +attempt_running_slow_head: + tq = invoke(dq, flags, &to_unlock, &dc); + if (slowpath(tq)) { + // Either dc is set, which is a deferred invoke case + // + // or only tq is and it means a reenqueue is required, because of: + // a retarget, a suspension, or a width change. + // + // In both cases, we want to bypass the check for DIRTY. + // That may cause us to leave DIRTY in place but all drain lock + // acquirers clear it + } else { + if (!_dispatch_queue_drain_try_unlock(dq, to_unlock)) { + goto attempt_running_slow_head; + } + to_unlock = 0; + } + if (overriding) { + _dispatch_root_queue_identity_restore(&di); } - if (!owning && running == 0) { - _dispatch_introspection_queue_item_complete(dq); - return _dispatch_queue_wakeup_with_qos_and_release(dq, p); + if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) { + _dispatch_reset_defaultpriority(old_dp); } } else if (overriding) { - mach_port_t th = dq->dq_thread; - if (th) { - p = _dispatch_queue_get_override_priority(dq); + uint32_t owner = _dq_state_drain_owner(dq_state); + pthread_priority_t p = dq->dq_override; + if (owner && p) { _dispatch_object_debug(dq, "overriding thr 0x%x to priority 0x%lx", - th, p); - _dispatch_wqthread_override_start(th, p); + owner, p); + _dispatch_wqthread_override_start_check_owner(owner, p, + &dq->dq_state_lock); } } - _dispatch_introspection_queue_item_complete(dq); if (owning) { - dq->do_next = DISPATCH_OBJECT_LISTLESS; - if (!dispatch_atomic_sub2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_LOCK, seq_cst)) { - // seq_cst with atomic store to suspend_cnt - if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) { - // verify that the queue is idle - return _dispatch_queue_wakeup_with_qos_and_release(dq, p); + _dispatch_introspection_queue_item_complete(dq); + } + + if (tq && dc) { + return _dispatch_queue_drain_deferred_invoke(dq, flags, to_unlock, dc); + } + + if (tq) { + bool full_width_upgrade_allowed = (tq == _dispatch_queue_get_current()); + uint64_t old_state, new_state; + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state - to_unlock; + if (full_width_upgrade_allowed && _dq_state_is_runnable(new_state) && + _dq_state_has_pending_barrier(new_state)) { + new_state += DISPATCH_QUEUE_IN_BARRIER; + new_state += DISPATCH_QUEUE_WIDTH_INTERVAL; + new_state -= DISPATCH_QUEUE_PENDING_BARRIER; + new_state += to_unlock & DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + } else { + new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state); + if (_dq_state_should_wakeup(new_state)) { + // drain was not interupted for suspension + // we will reenqueue right away, just put ENQUEUED back + new_state |= DISPATCH_QUEUE_ENQUEUED; + new_state |= DISPATCH_QUEUE_DIRTY; + } } + }); + if (_dq_state_is_in_barrier(new_state)) { + // we did a "full width upgrade" and just added IN_BARRIER + // so adjust what we own and drain again + to_unlock &= DISPATCH_QUEUE_ENQUEUED; + to_unlock += DISPATCH_QUEUE_IN_BARRIER; + to_unlock += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + goto drain_pending_barrier; + } + if (_dq_state_has_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } + + if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { + return _dispatch_queue_push(tq, dq, 0); } } - _dispatch_release(dq); // added when the queue is put on the list + + return _dispatch_release_tailcall(dq); } DISPATCH_ALWAYS_INLINE -static inline unsigned long -_dispatch_queue_class_probe(dispatch_object_t dou) +static inline bool +_dispatch_queue_class_probe(dispatch_queue_class_t dqu) { - dispatch_queue_t dq = dou._dq; struct dispatch_object_s *tail; - // seq_cst with atomic store to suspend_cnt - tail = dispatch_atomic_load2o(dq, dq_items_tail, seq_cst); - return (unsigned long)slowpath(tail != NULL); + // seq_cst wrt atomic store to dq_state + // seq_cst wrt atomic store to dq_flags + tail = os_atomic_load2o(dqu._oq, oq_items_tail, ordered); + return slowpath(tail != NULL); } -DISPATCH_ALWAYS_INLINE +DISPATCH_ALWAYS_INLINE DISPATCH_CONST static inline bool -_dispatch_object_suspended(dispatch_object_t dou) +_dispatch_is_in_root_queues_array(dispatch_queue_t dq) { - struct dispatch_object_s *obj = dou._do; - unsigned int suspend_cnt; - // seq_cst with atomic store to tail - suspend_cnt = dispatch_atomic_load2o(obj, do_suspend_cnt, seq_cst); - return slowpath(suspend_cnt >= DISPATCH_OBJECT_SUSPEND_INTERVAL); + return (dq >= _dispatch_root_queues) && + (dq < _dispatch_root_queues + _DISPATCH_ROOT_QUEUE_IDX_COUNT); } DISPATCH_ALWAYS_INLINE DISPATCH_CONST @@ -474,43 +1792,72 @@ _dispatch_get_root_queue(qos_class_t priority, bool overcommit) return NULL; } -// Note to later developers: ensure that any initialization changes are -// made for statically allocated queues (i.e. _dispatch_main_q). -static inline void -_dispatch_queue_init(dispatch_queue_t dq) +#if HAVE_PTHREAD_WORKQUEUE_QOS +DISPATCH_ALWAYS_INLINE DISPATCH_CONST +static inline dispatch_queue_t +_dispatch_get_root_queue_for_priority(pthread_priority_t pp, bool overcommit) { - dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS; + uint32_t idx; - dq->dq_running = 0; - dq->dq_width = 1; - dq->dq_override_voucher = DISPATCH_NO_VOUCHER; - dq->dq_serialnum = dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers, - relaxed); + pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + idx = (uint32_t)__builtin_ffs((int)pp); + if (unlikely(!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] + .dq_priority)) { + // If kernel doesn't support maintenance, bottom bit is background. + // Shift to our idea of where background bit is. + idx++; + } + // ffs starts at 1, and account for the QOS_CLASS_SHIFT + // if pp is 0, idx is 0 or 1 and this will wrap to a value larger than + // DISPATCH_QOS_COUNT + idx -= (_PTHREAD_PRIORITY_QOS_CLASS_SHIFT + 1); + if (unlikely(idx >= DISPATCH_QUEUE_QOS_COUNT)) { + DISPATCH_CLIENT_CRASH(pp, "Corrupted priority"); + } + return &_dispatch_root_queues[2 * idx + overcommit]; +} +#endif + +DISPATCH_ALWAYS_INLINE DISPATCH_CONST +static inline dispatch_queue_t +_dispatch_get_root_queue_with_overcommit(dispatch_queue_t rq, bool overcommit) +{ + bool rq_overcommit = (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + // root queues in _dispatch_root_queues are not overcommit for even indices + // and overcommit for odd ones, so fixing overcommit is either returning + // the same queue, or picking its neighbour in _dispatch_root_queues + if (overcommit && !rq_overcommit) { + return rq + 1; + } + if (!overcommit && rq_overcommit) { + return rq - 1; + } + return rq; } DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_set_bound_thread(dispatch_queue_t dq) { - //Tag thread-bound queues with the owning thread - dispatch_assert(dq->dq_is_thread_bound); - dq->dq_thread = _dispatch_thread_port(); + // Tag thread-bound queues with the owning thread + dispatch_assert(_dispatch_queue_is_thread_bound(dq)); + mach_port_t old_owner, self = _dispatch_tid_self(); + uint64_t dq_state = os_atomic_or_orig2o(dq, dq_state, self, relaxed); + if (unlikely(old_owner = _dq_state_drain_owner(dq_state))) { + DISPATCH_INTERNAL_CRASH(old_owner, "Queue bound twice"); + } } DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_clear_bound_thread(dispatch_queue_t dq) { - dispatch_assert(dq->dq_is_thread_bound); - dq->dq_thread = MACH_PORT_NULL; -} + uint64_t dq_state, value; -DISPATCH_ALWAYS_INLINE -static inline mach_port_t -_dispatch_queue_get_bound_thread(dispatch_queue_t dq) -{ - dispatch_assert(dq->dq_is_thread_bound); - return dq->dq_thread; + dispatch_assert(_dispatch_queue_is_thread_bound(dq)); + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + value = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(dq_state); + }); } DISPATCH_ALWAYS_INLINE @@ -538,9 +1885,9 @@ static inline pthread_priority_t _dispatch_get_defaultpriority(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t priority = (uintptr_t)_dispatch_thread_getspecific( + pthread_priority_t pp = (uintptr_t)_dispatch_thread_getspecific( dispatch_defaultpriority_key); - return priority; + return pp; #else return 0; #endif @@ -548,20 +1895,16 @@ _dispatch_get_defaultpriority(void) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_reset_defaultpriority(pthread_priority_t priority) +_dispatch_reset_defaultpriority(pthread_priority_t pp) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t old_priority = _dispatch_get_defaultpriority(); - // if an inner-loop or'd in the override flag to the per-thread priority, - // it needs to be propogated up the chain - priority |= old_priority & _PTHREAD_PRIORITY_OVERRIDE_FLAG; - - if (slowpath(priority != old_priority)) { - _dispatch_thread_setspecific(dispatch_defaultpriority_key, - (void*)priority); - } + pthread_priority_t old_pp = _dispatch_get_defaultpriority(); + // If an inner-loop or'd in the override flag to the per-thread priority, + // it needs to be propagated up the chain. + pp |= old_pp & _PTHREAD_PRIORITY_OVERRIDE_FLAG; + _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); #else - (void)priority; + (void)pp; #endif } @@ -570,14 +1913,10 @@ static inline void _dispatch_set_defaultpriority_override(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t old_priority = _dispatch_get_defaultpriority(); - pthread_priority_t priority = old_priority | - _PTHREAD_PRIORITY_OVERRIDE_FLAG; + pthread_priority_t old_pp = _dispatch_get_defaultpriority(); + pthread_priority_t pp = old_pp | _PTHREAD_PRIORITY_OVERRIDE_FLAG; - if (slowpath(priority != old_priority)) { - _dispatch_thread_setspecific(dispatch_defaultpriority_key, - (void*)priority); - } + _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); #endif } @@ -586,15 +1925,12 @@ static inline bool _dispatch_reset_defaultpriority_override(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t old_priority = _dispatch_get_defaultpriority(); - pthread_priority_t priority = old_priority & + pthread_priority_t old_pp = _dispatch_get_defaultpriority(); + pthread_priority_t pp = old_pp & ~((pthread_priority_t)_PTHREAD_PRIORITY_OVERRIDE_FLAG); - if (slowpath(priority != old_priority)) { - _dispatch_thread_setspecific(dispatch_defaultpriority_key, - (void*)priority); - return true; - } + _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); + return unlikely(pp != old_pp); #endif return false; } @@ -605,12 +1941,18 @@ _dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq, dispatch_queue_t tq) { #if HAVE_PTHREAD_WORKQUEUE_QOS - const pthread_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG; - const pthread_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG; - pthread_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority; + const dispatch_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG; + const dispatch_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG; + const dispatch_priority_t defaultqueue_flag = + _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + dispatch_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority; if ((!(dqp & ~_PTHREAD_PRIORITY_FLAGS_MASK) || (dqp & inherited_flag)) && (tqp & rootqueue_flag)) { - dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag; + if (tqp & defaultqueue_flag) { + dq->dq_priority = 0; + } else { + dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag; + } } #else (void)dq; (void)tq; @@ -619,275 +1961,290 @@ _dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq, DISPATCH_ALWAYS_INLINE static inline pthread_priority_t -_dispatch_set_defaultpriority(pthread_priority_t priority) +_dispatch_set_defaultpriority(pthread_priority_t pp, pthread_priority_t *new_pp) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t old_priority = _dispatch_get_defaultpriority(); - if (old_priority) { + const pthread_priority_t default_priority_preserved_flags = + _PTHREAD_PRIORITY_OVERRIDE_FLAG|_PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + pthread_priority_t old_pp = _dispatch_get_defaultpriority(); + if (old_pp) { pthread_priority_t flags, defaultqueue, basepri; - flags = (priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); - defaultqueue = (old_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); - basepri = (old_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK); - priority &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (!priority) { + flags = (pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); + defaultqueue = (old_pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); + basepri = (old_pp & ~_PTHREAD_PRIORITY_FLAGS_MASK); + pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (!pp) { flags = _PTHREAD_PRIORITY_INHERIT_FLAG | defaultqueue; - priority = basepri; - } else if (priority < basepri && !defaultqueue) { // rdar://16349734 - priority = basepri; + pp = basepri; + } else if (pp < basepri && !defaultqueue) { // rdar://16349734 + pp = basepri; } - priority |= flags | (old_priority & _PTHREAD_PRIORITY_OVERRIDE_FLAG); - } - if (slowpath(priority != old_priority)) { - _dispatch_thread_setspecific(dispatch_defaultpriority_key, - (void*)priority); + pp |= flags | (old_pp & default_priority_preserved_flags); } - return old_priority; + _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); + if (new_pp) *new_pp = pp; + return old_pp; #else - (void)priority; + (void)pp; (void)new_pp; return 0; #endif } DISPATCH_ALWAYS_INLINE static inline pthread_priority_t -_dispatch_priority_adopt(pthread_priority_t priority, unsigned long flags) +_dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags) { #if HAVE_PTHREAD_WORKQUEUE_QOS pthread_priority_t defaultpri = _dispatch_get_defaultpriority(); bool enforce, inherited, defaultqueue; enforce = (flags & DISPATCH_PRIORITY_ENFORCE) || - (priority & _PTHREAD_PRIORITY_ENFORCE_FLAG); + (pp & _PTHREAD_PRIORITY_ENFORCE_FLAG); inherited = (defaultpri & _PTHREAD_PRIORITY_INHERIT_FLAG); defaultqueue = (defaultpri & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); defaultpri &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - priority &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (!priority) { - enforce = false; - } else if (!enforce) { - if (priority < defaultpri) { - if (defaultqueue) enforce = true; // rdar://16349734 - } else if (inherited || defaultqueue) { - enforce = true; - } - } else if (priority < defaultpri && !defaultqueue) { // rdar://16349734 - enforce = false; + pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + + if (!pp) { + return defaultpri; + } else if (defaultqueue) { // rdar://16349734 + return pp; + } else if (pp < defaultpri) { + return defaultpri; + } else if (enforce || inherited) { + return pp; + } else { + return defaultpri; } - return enforce ? priority : defaultpri; #else - (void)priority; (void)flags; + (void)pp; (void)flags; return 0; #endif } DISPATCH_ALWAYS_INLINE static inline pthread_priority_t -_dispatch_get_priority(void) +_dispatch_priority_inherit_from_root_queue(pthread_priority_t pp, + dispatch_queue_t rq) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t priority = (uintptr_t)_dispatch_thread_getspecific( - dispatch_priority_key); - return (priority & ~_PTHREAD_PRIORITY_FLAGS_MASK); + pthread_priority_t p = pp & ~_PTHREAD_PRIORITY_FLAGS_MASK; + pthread_priority_t rqp = rq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + pthread_priority_t defaultqueue = + rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + + if (!p || (!defaultqueue && p < rqp)) { + p = rqp | defaultqueue; + } + return p | (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); #else + (void)rq; (void)pp; return 0; #endif } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_set_priority_and_mach_voucher(pthread_priority_t priority, - mach_voucher_t kv) +static inline pthread_priority_t +_dispatch_get_priority(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - _pthread_set_flags_t flags = 0; - if (priority && _dispatch_set_qos_class_enabled) { - pthread_priority_t old_priority = _dispatch_get_priority(); - if (priority != old_priority && old_priority) { - flags |= _PTHREAD_SET_SELF_QOS_FLAG; - } - } - if (kv != VOUCHER_NO_MACH_VOUCHER) { -#if VOUCHER_USE_MACH_VOUCHER - flags |= _PTHREAD_SET_SELF_VOUCHER_FLAG; -#endif - } - if (!flags) return; - int r = _pthread_set_properties_self(flags, priority, kv); - (void)dispatch_assume_zero(r); -#elif VOUCHER_USE_MACH_VOUCHER -#error Invalid build configuration + pthread_priority_t pp = (uintptr_t) + _dispatch_thread_getspecific(dispatch_priority_key); + return pp; #else - (void)priority; (void)kv; + return 0; #endif } -DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT -static inline voucher_t -_dispatch_set_priority_and_adopt_voucher(pthread_priority_t priority, - voucher_t voucher) -{ - pthread_priority_t p = (priority != DISPATCH_NO_PRIORITY) ? priority : 0; - voucher_t ov = DISPATCH_NO_VOUCHER; - mach_voucher_t kv = VOUCHER_NO_MACH_VOUCHER; - if (voucher != DISPATCH_NO_VOUCHER) { - ov = _voucher_get(); - kv = _voucher_swap_and_get_mach_voucher(ov, voucher); +#if HAVE_PTHREAD_WORKQUEUE_QOS +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_priority_compute_update(pthread_priority_t pp) +{ + dispatch_assert(pp != DISPATCH_NO_PRIORITY); + if (!_dispatch_set_qos_class_enabled) return 0; + // the priority in _dispatch_get_priority() only tracks manager-ness + // and overcommit, which is inherited from the current value for each update + // however if the priority had the NEEDS_UNBIND flag set we need to clear it + // the first chance we get + // + // the manager bit is invalid input, but we keep it to get meaningful + // assertions in _dispatch_set_priority_and_voucher_slow() + pp &= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + pthread_priority_t cur_priority = _dispatch_get_priority(); + pthread_priority_t unbind = _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + pthread_priority_t overcommit = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + if (unlikely(cur_priority & unbind)) { + // else we always need an update if the NEEDS_UNBIND flag is set + // the slowpath in _dispatch_set_priority_and_voucher_slow() will + // adjust the priority further with the proper overcommitness + return pp ? pp : (cur_priority & ~unbind); + } else { + cur_priority &= ~overcommit; } - _dispatch_set_priority_and_mach_voucher(p, kv); - return ov; + if (unlikely(pp != cur_priority)) return pp; + return 0; } +#endif DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline voucher_t -_dispatch_adopt_priority_and_voucher(pthread_priority_t priority, - voucher_t v, unsigned long flags) +_dispatch_set_priority_and_voucher(pthread_priority_t pp, + voucher_t v, _dispatch_thread_set_self_t flags) { - pthread_priority_t p = 0; - if (priority != DISPATCH_NO_PRIORITY) { - p = _dispatch_priority_adopt(priority, flags); - } - if (!(flags & DISPATCH_VOUCHER_IGNORE_QUEUE_OVERRIDE)) { - dispatch_queue_t dq = _dispatch_queue_get_current(); - if (dq && dq->dq_override_voucher != DISPATCH_NO_VOUCHER) { - if (v != DISPATCH_NO_VOUCHER && v) _voucher_release(v); - v = dq->dq_override_voucher; - if (v) _voucher_retain(v); +#if HAVE_PTHREAD_WORKQUEUE_QOS + pp = _dispatch_priority_compute_update(pp); + if (likely(!pp)) { + if (v == DISPATCH_NO_VOUCHER) { + return DISPATCH_NO_VOUCHER; + } + if (likely(v == _voucher_get())) { + bool retained = flags & DISPATCH_VOUCHER_CONSUME; + if (flags & DISPATCH_VOUCHER_REPLACE) { + if (retained && v) _voucher_release_no_dispose(v); + v = DISPATCH_NO_VOUCHER; + } else { + if (!retained && v) _voucher_retain(v); + } + return v; } } - return _dispatch_set_priority_and_adopt_voucher(p, v); + return _dispatch_set_priority_and_voucher_slow(pp, v, flags); +#else + (void)pp; (void)v; (void)flags; + return DISPATCH_NO_VOUCHER; +#endif } DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline voucher_t -_dispatch_adopt_queue_override_voucher(dispatch_queue_t dq) -{ - voucher_t v = dq->dq_override_voucher; - if (v == DISPATCH_NO_VOUCHER) return DISPATCH_NO_VOUCHER; - if (v) _voucher_retain(v); - return _dispatch_set_priority_and_adopt_voucher(DISPATCH_NO_PRIORITY, v); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_adopt_priority_and_replace_voucher(pthread_priority_t priority, - voucher_t voucher, unsigned long flags) +_dispatch_adopt_priority_and_set_voucher(pthread_priority_t pp, + voucher_t v, _dispatch_thread_set_self_t flags) { - voucher_t ov; - ov = _dispatch_adopt_priority_and_voucher(priority, voucher, flags); - if (voucher != DISPATCH_NO_VOUCHER && ov) _voucher_release(ov); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_reset_priority_and_voucher(pthread_priority_t priority, - voucher_t voucher) -{ - voucher_t ov; - ov = _dispatch_set_priority_and_adopt_voucher(priority, voucher); - if (voucher != DISPATCH_NO_VOUCHER && ov) _voucher_release(ov); + pthread_priority_t p = 0; + if (pp != DISPATCH_NO_PRIORITY) { + p = _dispatch_priority_adopt(pp, flags); + } + return _dispatch_set_priority_and_voucher(p, v, flags); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_reset_voucher(voucher_t voucher) +_dispatch_reset_priority_and_voucher(pthread_priority_t pp, voucher_t v) { - return _dispatch_reset_priority_and_voucher(DISPATCH_NO_PRIORITY, voucher); + if (pp == DISPATCH_NO_PRIORITY) pp = 0; + (void)_dispatch_set_priority_and_voucher(pp, v, + DISPATCH_VOUCHER_CONSUME | DISPATCH_VOUCHER_REPLACE); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_set_priority(pthread_priority_t priority) +_dispatch_reset_voucher(voucher_t v, _dispatch_thread_set_self_t flags) { - _dispatch_set_priority_and_mach_voucher(priority, VOUCHER_NO_MACH_VOUCHER); + flags |= DISPATCH_VOUCHER_CONSUME | DISPATCH_VOUCHER_REPLACE; + (void)_dispatch_set_priority_and_voucher(0, v, flags); } DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_priority_normalize(pthread_priority_t pp) +static inline bool +_dispatch_queue_need_override(dispatch_queue_class_t dqu, pthread_priority_t pp) { - dispatch_assert_zero(pp & ~(pthread_priority_t) - _PTHREAD_PRIORITY_QOS_CLASS_MASK); - unsigned int qosbits = (unsigned int)pp, idx; - if (!qosbits) return 0; - idx = (unsigned int)(sizeof(qosbits)*8) - - (unsigned int)__builtin_clz(qosbits) - 1; - return (1 << idx); + // global queues have their override set to DISPATCH_SATURATED_OVERRIDE + // which makes this test always return false for them. + return dqu._oq->oq_override < (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_need_override(dispatch_queue_t dq, pthread_priority_t pp) +_dispatch_queue_received_override(dispatch_queue_class_t dqu, + pthread_priority_t pp) { - if (!pp || dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE) return false; - uint32_t p = (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); - uint32_t o = dq->dq_override; - return (o < p); + dispatch_assert(dqu._oq->oq_override != DISPATCH_SATURATED_OVERRIDE); + return dqu._oq->oq_override > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_need_override_retain(dispatch_queue_t dq, pthread_priority_t pp) +_dispatch_queue_need_override_retain(dispatch_queue_class_t dqu, + pthread_priority_t pp) { - bool override = _dispatch_queue_need_override(dq, pp); - if (override) _dispatch_retain(dq); - return override; + if (_dispatch_queue_need_override(dqu, pp)) { + _os_object_retain_internal_inline(dqu._oq->_as_os_obj); + return true; + } + return false; } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_override_priority(dispatch_queue_t dq, pthread_priority_t *pp, - bool *was_overridden) -{ - uint32_t o = dq->dq_override; - uint32_t p = (*pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); - if (o < p) { - o = dispatch_atomic_or_orig2o(dq, dq_override, p, relaxed); - if (was_overridden) { - o = (uint32_t)_dispatch_priority_normalize(o); +_dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu, + dispatch_priority_t new_op) +{ + dispatch_priority_t old_op; + new_op &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + if (!new_op) return false; + os_atomic_rmw_loop2o(dqu._oq, oq_override, old_op, new_op, relaxed, { + if (new_op <= old_op) { + os_atomic_rmw_loop_give_up(return false); } - *pp = _dispatch_priority_normalize(o | p); - } else { - o = (uint32_t)_dispatch_priority_normalize(o); - *pp = o; - } - if (was_overridden) { - *was_overridden = - (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK) < o; - } - return (o < p); -} - -DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_queue_get_override_priority(dispatch_queue_t dq) -{ - uint32_t p = (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK); - uint32_t o = dq->dq_override; - if (o == p) return o; - return _dispatch_priority_normalize(o); + }); + return true; } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_set_override_priority(dispatch_queue_t dq) -{ - uint32_t p = 0; - if (!(dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) { - p = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; +_dispatch_queue_override_priority(dispatch_queue_class_t dqu, + pthread_priority_t *pp, dispatch_wakeup_flags_t *flags) +{ + os_mpsc_queue_t oq = dqu._oq; + dispatch_priority_t qp = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + dispatch_priority_t np = (*pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); + dispatch_priority_t o; + + _dispatch_assert_is_valid_qos_override(np); + if (oq->oq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG) { + qp = 0; + } else if (*flags & DISPATCH_WAKEUP_SLOW_WAITER) { + // when a queue is used as a lock its priority doesn't count + } else if (np < qp) { + // for asynchronous workitems, queue priority is the floor for overrides + np = qp; + } + *flags &= ~_DISPATCH_WAKEUP_OVERRIDE_BITS; + + // this optimizes for the case when no update of the override is required + // os_atomic_rmw_loop2o optimizes for the case when the update happens, + // and can't be used. + o = os_atomic_load2o(oq, oq_override, relaxed); + do { + if (likely(np <= o)) break; + } while (unlikely(!os_atomic_cmpxchgvw2o(oq, oq_override, o, np, &o, relaxed))); + + if (np <= o) { + *pp = o; + } else { + *flags |= DISPATCH_WAKEUP_OVERRIDING; + *pp = np; + } + if (o > qp) { + *flags |= DISPATCH_WAKEUP_WAS_OVERRIDDEN; } - dispatch_atomic_store2o(dq, dq_override, p, relaxed); } DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_queue_reset_override_priority(dispatch_queue_t dq) -{ - uint32_t p = 0; - if (!(dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) { - p = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; +static inline dispatch_priority_t +_dispatch_queue_reset_override_priority(dispatch_queue_class_t dqu, + bool qp_is_floor) +{ + os_mpsc_queue_t oq = dqu._oq; + dispatch_priority_t p = 0; + if (qp_is_floor) { + // thread bound queues floor their dq_override to their + // priority to avoid receiving useless overrides + p = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; } - uint32_t o = dispatch_atomic_xchg2o(dq, dq_override, p, relaxed); - if (o == p) return o; - return _dispatch_priority_normalize(o); + dispatch_priority_t o = os_atomic_xchg2o(oq, oq_override, p, relaxed); + dispatch_assert(o != DISPATCH_SATURATED_OVERRIDE); + return (o > p) ? o : 0; } DISPATCH_ALWAYS_INLINE @@ -895,12 +2252,13 @@ static inline pthread_priority_t _dispatch_priority_propagate(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t priority = _dispatch_get_priority(); - if (priority > _dispatch_user_initiated_priority) { + pthread_priority_t pp = _dispatch_get_priority(); + pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (pp > _dispatch_user_initiated_priority) { // Cap QOS for propagation at user-initiated - priority = _dispatch_user_initiated_priority; + pp = _dispatch_user_initiated_priority; } - return priority; + return pp; #else return 0; #endif @@ -912,9 +2270,9 @@ static inline bool _dispatch_is_background_thread(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t priority; - priority = _dispatch_get_priority(); - return priority && (priority <= _dispatch_background_priority); + pthread_priority_t pp = _dispatch_get_priority(); + pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + return pp && (pp <= _dispatch_background_priority); #else return false; #endif @@ -933,6 +2291,18 @@ _dispatch_block_has_private_data(const dispatch_block_t block) return (_dispatch_Block_invoke(block) == _dispatch_block_special_invoke); } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_block_sync_should_enforce_qos_class(dispatch_block_flags_t flags) +{ + /* + * Generates better assembly than the actual readable test: + * (flags & ENFORCE_QOS_CLASS) || !(flags & INHERIT_QOS_FLAGS) + */ + flags &= DISPATCH_BLOCK_ENFORCE_QOS_CLASS | DISPATCH_BLOCK_INHERIT_QOS_CLASS; + return flags != DISPATCH_BLOCK_INHERIT_QOS_CLASS; +} + DISPATCH_ALWAYS_INLINE static inline dispatch_block_private_data_t _dispatch_block_get_data(const dispatch_block_t db) @@ -947,7 +2317,8 @@ _dispatch_block_get_data(const dispatch_block_t db) // x points to base of captured dispatch_block_private_data_s object dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)x; if (dbpd->dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) { - DISPATCH_CRASH("Corruption of dispatch block object"); + DISPATCH_CLIENT_CRASH(dbpd->dbpd_magic, + "Corruption of dispatch block object"); } return dbpd; } @@ -968,11 +2339,6 @@ _dispatch_block_get_flags(const dispatch_block_t db) return dbpd ? dbpd->dbpd_flags : 0; } -#define DISPATCH_BLOCK_HAS(flag, db) \ - ((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_HAS_ ## flag) != 0) -#define DISPATCH_BLOCK_IS(flag, db) \ - ((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_ ## flag) != 0) - #endif #pragma mark - @@ -983,8 +2349,8 @@ static inline dispatch_continuation_t _dispatch_continuation_alloc_cacheonly(void) { dispatch_continuation_t dc = (dispatch_continuation_t) - fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); - if (dc) { + _dispatch_thread_getspecific(dispatch_cache_key); + if (likely(dc)) { _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next); } return dc; @@ -995,8 +2361,8 @@ static inline dispatch_continuation_t _dispatch_continuation_alloc(void) { dispatch_continuation_t dc = - fastpath(_dispatch_continuation_alloc_cacheonly()); - if(!dc) { + _dispatch_continuation_alloc_cacheonly(); + if (unlikely(!dc)) { return _dispatch_continuation_alloc_from_heap(); } return dc; @@ -1007,10 +2373,10 @@ static inline dispatch_continuation_t _dispatch_continuation_free_cacheonly(dispatch_continuation_t dc) { dispatch_continuation_t prev_dc = (dispatch_continuation_t) - fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); + _dispatch_thread_getspecific(dispatch_cache_key); int cnt = prev_dc ? prev_dc->dc_cache_cnt + 1 : 1; // Cap continuation cache - if (slowpath(cnt > _dispatch_continuation_cache_limit)) { + if (unlikely(cnt > _dispatch_continuation_cache_limit)) { return dc; } dc->do_next = prev_dc; @@ -1024,7 +2390,7 @@ static inline void _dispatch_continuation_free(dispatch_continuation_t dc) { dc = _dispatch_continuation_free_cacheonly(dc); - if (slowpath(dc)) { + if (unlikely(dc)) { _dispatch_continuation_free_to_cache_limit(dc); } } @@ -1033,72 +2399,102 @@ _dispatch_continuation_free(dispatch_continuation_t dc) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_continuation_invoke(dispatch_object_t dou, dispatch_queue_t dq) -{ - dispatch_continuation_t dc = dou._dc, dc1; - dispatch_group_t dg; - - _dispatch_trace_continuation_pop(dq, dou); - if (DISPATCH_OBJ_IS_VTABLE(dou._do)) { - return dx_invoke(dou._do, NULL, DISPATCH_INVOKE_NONE); - } - - // Add the item back to the cache before calling the function. This - // allows the 'hot' continuation to be used for a quick callback. - // - // The ccache version is per-thread. - // Therefore, the object has not been reused yet. - // This generates better assembly. - if ((long)dc->do_vtable & DISPATCH_OBJ_ASYNC_BIT) { - _dispatch_continuation_voucher_adopt(dc); - dc1 = _dispatch_continuation_free_cacheonly(dc); - } else { - dc1 = NULL; - } - if ((long)dc->do_vtable & DISPATCH_OBJ_GROUP_BIT) { - dg = dc->dc_data; +_dispatch_continuation_with_group_invoke(dispatch_continuation_t dc) +{ + struct dispatch_object_s *dou = dc->dc_data; + unsigned long type = dx_type(dou); + if (type == DISPATCH_GROUP_TYPE) { + _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); + _dispatch_introspection_queue_item_complete(dou); + dispatch_group_leave((dispatch_group_t)dou); } else { - dg = NULL; - } - _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - if (dg) { - dispatch_group_leave(dg); - _dispatch_release(dg); - } - _dispatch_introspection_queue_item_complete(dou); - if (slowpath(dc1)) { - _dispatch_continuation_free_to_cache_limit(dc1); + DISPATCH_INTERNAL_CRASH(dx_type(dou), "Unexpected object type"); } } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_invoke_inline(dispatch_object_t dou, voucher_t ov, + dispatch_invoke_flags_t flags) +{ + dispatch_continuation_t dc = dou._dc, dc1; + dispatch_invoke_with_autoreleasepool(flags, { + uintptr_t dc_flags = dc->dc_flags; + // Add the item back to the cache before calling the function. This + // allows the 'hot' continuation to be used for a quick callback. + // + // The ccache version is per-thread. + // Therefore, the object has not been reused yet. + // This generates better assembly. + _dispatch_continuation_voucher_adopt(dc, ov, dc_flags); + if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { + dc1 = _dispatch_continuation_free_cacheonly(dc); + } else { + dc1 = NULL; + } + if (unlikely(dc_flags & DISPATCH_OBJ_GROUP_BIT)) { + _dispatch_continuation_with_group_invoke(dc); + } else { + _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); + _dispatch_introspection_queue_item_complete(dou); + } + if (unlikely(dc1)) { + _dispatch_continuation_free_to_cache_limit(dc1); + } + }); +} + DISPATCH_ALWAYS_INLINE_NDEBUG static inline void -_dispatch_continuation_pop(dispatch_object_t dou) +_dispatch_continuation_pop_inline(dispatch_object_t dou, dispatch_queue_t dq, + dispatch_invoke_flags_t flags) { - dispatch_queue_t dq = _dispatch_queue_get_current(); dispatch_pthread_root_queue_observer_hooks_t observer_hooks = _dispatch_get_pthread_root_queue_observer_hooks(); if (observer_hooks) observer_hooks->queue_will_execute(dq); - _dispatch_continuation_invoke(dou, dq); + _dispatch_trace_continuation_pop(dq, dou); + flags &= _DISPATCH_INVOKE_PROPAGATE_MASK; + if (_dispatch_object_has_vtable(dou)) { + dx_invoke(dou._do, flags); + } else { + voucher_t ov = dq->dq_override_voucher; + _dispatch_continuation_invoke_inline(dou, ov, flags); + } if (observer_hooks) observer_hooks->queue_did_execute(dq); } +// used to forward the do_invoke of a continuation with a vtable to its real +// implementation. +#define _dispatch_continuation_pop_forwarded(dc, ov, dc_flags, ...) \ + ({ \ + dispatch_continuation_t _dc = (dc), _dc1; \ + uintptr_t _dc_flags = (dc_flags); \ + _dispatch_continuation_voucher_adopt(_dc, ov, _dc_flags); \ + if (_dc_flags & DISPATCH_OBJ_CONSUME_BIT) { \ + _dc1 = _dispatch_continuation_free_cacheonly(_dc); \ + } else { \ + _dc1 = NULL; \ + } \ + __VA_ARGS__; \ + _dispatch_introspection_queue_item_complete(_dc); \ + if (unlikely(_dc1)) { \ + _dispatch_continuation_free_to_cache_limit(_dc1); \ + } \ + }) + DISPATCH_ALWAYS_INLINE static inline void _dispatch_continuation_priority_set(dispatch_continuation_t dc, pthread_priority_t pp, dispatch_block_flags_t flags) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t prio = 0; - if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { - prio = pp; - } else if (!(flags & DISPATCH_BLOCK_NO_QOS_CLASS)) { - prio = _dispatch_priority_propagate(); + if (likely(!(flags & DISPATCH_BLOCK_HAS_PRIORITY))) { + pp = _dispatch_priority_propagate(); } if (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) { - prio |= _PTHREAD_PRIORITY_ENFORCE_FLAG; + pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; } - dc->dc_priority = prio; + dc->dc_priority = pp; #else (void)dc; (void)pp; (void)flags; #endif @@ -1114,21 +2510,55 @@ _dispatch_continuation_get_override_priority(dispatch_queue_t dq, bool enforce = dc->dc_priority & _PTHREAD_PRIORITY_ENFORCE_FLAG; pthread_priority_t dqp = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; bool defaultqueue = dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; - if (!p) { - enforce = false; - } else if (!enforce && (!dqp || defaultqueue)) { - enforce = true; - } - if (!enforce) { - p = dqp; + + dispatch_assert(dc->dc_priority != DISPATCH_NO_PRIORITY); + if (p && (enforce || !dqp || defaultqueue)) { + return p; } - return p; + return dqp; #else (void)dq; (void)dc; return 0; #endif } -#endif // !(USE_OBJC && __OBJC2__) && !defined(__cplusplus) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_init_f(dispatch_continuation_t dc, + dispatch_queue_class_t dqu, void *ctxt, dispatch_function_t func, + pthread_priority_t pp, dispatch_block_flags_t flags, uintptr_t dc_flags) +{ + dc->dc_flags = dc_flags; + dc->dc_func = func; + dc->dc_ctxt = ctxt; + _dispatch_continuation_voucher_set(dc, dqu, flags); + _dispatch_continuation_priority_set(dc, pp, flags); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_init(dispatch_continuation_t dc, + dispatch_queue_class_t dqu, dispatch_block_t work, + pthread_priority_t pp, dispatch_block_flags_t flags, uintptr_t dc_flags) +{ + dc->dc_flags = dc_flags | DISPATCH_OBJ_BLOCK_BIT; + dc->dc_ctxt = _dispatch_Block_copy(work); + _dispatch_continuation_priority_set(dc, pp, flags); + + if (unlikely(_dispatch_block_has_private_data(work))) { + // always sets dc_func & dc_voucher + // may update dc_priority & do_vtable + return _dispatch_continuation_init_slow(dc, dqu, flags); + } + + if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { + dc->dc_func = _dispatch_call_block_and_release; + } else { + dc->dc_func = _dispatch_Block_invoke(work); + } + _dispatch_continuation_voucher_set(dc, dqu, flags); +} + +#endif // DISPATCH_PURE_C #endif /* __DISPATCH_INLINE_INTERNAL__ */ diff --git a/src/internal.h b/src/internal.h index 98626c6..a9aee11 100644 --- a/src/internal.h +++ b/src/internal.h @@ -27,7 +27,11 @@ #ifndef __DISPATCH_INTERNAL__ #define __DISPATCH_INTERNAL__ +#if __has_include() +#include +#else #include +#endif #define __DISPATCH_BUILDING_DISPATCH__ #define __DISPATCH_INDIRECT__ @@ -35,8 +39,35 @@ #ifdef __APPLE__ #include #include + +#ifndef TARGET_OS_MAC_DESKTOP +#define TARGET_OS_MAC_DESKTOP (TARGET_OS_MAC && \ + !TARGET_OS_SIMULATOR && !TARGET_OS_IPHONE && !TARGET_OS_EMBEDDED) +#endif + +#if TARGET_OS_MAC_DESKTOP +# define DISPATCH_HOST_SUPPORTS_OSX(x) \ + (__MAC_OS_X_VERSION_MIN_REQUIRED >= (x)) +# if !DISPATCH_HOST_SUPPORTS_OSX(101000) +# error "OS X hosts older than OS X 10.10 aren't supported anymore" +# endif // !DISPATCH_HOST_SUPPORTS_OSX(101000) +#elif TARGET_OS_SIMULATOR +# define DISPATCH_HOST_SUPPORTS_OSX(x) \ + (IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED >= (x)) +# if !DISPATCH_HOST_SUPPORTS_OSX(101000) +# error "Simulator hosts older than OS X 10.10 aren't supported anymore" +# endif // !DISPATCH_HOST_SUPPORTS_OSX(101000) +#else +# define DISPATCH_HOST_SUPPORTS_OSX(x) 1 +# if __IPHONE_OS_VERSION_MIN_REQUIRED < 70000 +# error "iOS hosts older than iOS 7.0 aren't supported anymore" +# endif #endif +#else // !__APPLE__ +#define DISPATCH_HOST_SUPPORTS_OSX(x) 0 +#endif // !__APPLE__ + #if !defined(DISPATCH_MACH_SPI) && TARGET_OS_MAC #define DISPATCH_MACH_SPI 1 @@ -47,29 +78,35 @@ #if !defined(OS_VOUCHER_ACTIVITY_SPI) && TARGET_OS_MAC #define OS_VOUCHER_ACTIVITY_SPI 1 #endif -#if !defined(OS_VOUCHER_ACTIVITY_BUFFER_SPI) && TARGET_OS_MAC && \ - __has_include() -#define OS_VOUCHER_ACTIVITY_BUFFER_SPI 1 +#if !defined(OS_FIREHOSE_SPI) && TARGET_OS_MAC +#define OS_FIREHOSE_SPI 1 #endif #if !defined(DISPATCH_LAYOUT_SPI) && TARGET_OS_MAC #define DISPATCH_LAYOUT_SPI 1 #endif -#if !defined(USE_OBJC) && HAVE_OBJC -#define USE_OBJC 1 +#if __has_include() +#include +#if !defined(HAVE_DYLD_IS_MEMORY_IMMUTABLE) +#if defined(DYLD_MACOSX_VERSION_10_12) || defined(DYLD_IOS_VERSION_10_0) +#define HAVE_DYLD_IS_MEMORY_IMMUTABLE 1 +#else +#define HAVE_DYLD_IS_MEMORY_IMMUTABLE 0 #endif +#endif // !defined(HAVE_DYLD_IS_MEMORY_IMMUTABLE) +#endif // __has_include() -#if USE_OBJC && ((!TARGET_IPHONE_SIMULATOR && defined(__i386__)) || \ - (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1080)) -// Disable Objective-C support on platforms with legacy objc runtime -#undef USE_OBJC -#define USE_OBJC 0 +#if !defined(USE_OBJC) && HAVE_OBJC +#define USE_OBJC 1 #endif #if USE_OBJC #define OS_OBJECT_HAVE_OBJC_SUPPORT 1 -#if __OBJC__ +#if defined(__OBJC__) #define OS_OBJECT_USE_OBJC 1 +// Force internal Objective-C sources to use class-visible headers +// even when not compiling in Swift. +#define OS_OBJECT_SWIFT3 1 #else #define OS_OBJECT_USE_OBJC 0 #endif // __OBJC__ @@ -80,6 +117,22 @@ #include #include +#define __DISPATCH_HIDE_SYMBOL(sym, version) \ + __asm__(".section __TEXT,__const\n\t" \ + ".globl $ld$hide$os" #version "$_" #sym "\n\t" \ + "$ld$hide$os" #version "$_" #sym ":\n\t" \ + " .byte 0\n\t" \ + ".previous") + + +#ifndef DISPATCH_HIDE_SYMBOL +#if TARGET_OS_MAC && !TARGET_OS_IPHONE +#define DISPATCH_HIDE_SYMBOL(sym, osx, ios, tvos, watchos) \ + __DISPATCH_HIDE_SYMBOL(sym, osx) +#else +#define DISPATCH_HIDE_SYMBOL(sym, osx, ios, tvos, watchos) +#endif +#endif #include #include @@ -95,47 +148,23 @@ #include #endif -#define DISPATCH_STRUCT_DECL(type, name, ...) \ - struct type __VA_ARGS__ name - -// Visual Studio C++ does not support C99 designated initializers. -// This means that static declarations should be zero initialized and cannot -// be const since we must fill in the values during DLL initialization. -#if !TARGET_OS_WIN32 -#define DISPATCH_STRUCT_INSTANCE(type, name, ...) \ -struct type name = { \ -__VA_ARGS__ \ -} -#else -#define DISPATCH_STRUCT_INSTANCE(type, name, ...) \ -struct type name = { 0 } -#endif - -#if !TARGET_OS_WIN32 -#define DISPATCH_CONST_STRUCT_DECL(type, name, ...) \ - const DISPATCH_STRUCT_DECL(type, name, __VA_ARGS__) - -#define DISPATCH_CONST_STRUCT_INSTANCE(type, name, ...) \ - const DISPATCH_STRUCT_INSTANCE(type, name, __VA_ARGS__) +#if defined(__OBJC__) || defined(__cplusplus) +#define DISPATCH_PURE_C 0 #else -#define DISPATCH_CONST_STRUCT_DECL(type, name, ...) \ - DISPATCH_STRUCT_DECL(type, name, __VA_ARGS__) - -#define DISPATCH_CONST_STRUCT_INSTANCE(type, name, ...) \ - DISPATCH_STRUCT_INSTANCE(type, name, __VA_ARGS__) +#define DISPATCH_PURE_C 1 #endif /* private.h must be included last to avoid picking up installed headers. */ -#include "object_private.h" +#include "os/object_private.h" #include "queue_private.h" #include "source_private.h" #include "mach_private.h" #include "data_private.h" +#include "os/voucher_private.h" +#include "os/voucher_activity_private.h" #if !TARGET_OS_WIN32 #include "io_private.h" #endif -#include "voucher_private.h" -#include "voucher_activity_private.h" #include "layout_private.h" #include "benchmark.h" #include "private.h" @@ -174,6 +203,10 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define DISPATCH_USE_DTRACE_INTROSPECTION 1 #endif +#ifndef DISPATCH_DEBUG_QOS +#define DISPATCH_DEBUG_QOS DISPATCH_DEBUG +#endif + #if HAVE_LIBKERN_OSCROSSENDIAN_H #include #endif @@ -202,10 +235,13 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #if HAVE_MALLOC_MALLOC_H #include #endif +#if __has_include() +#include +#endif // __has_include( -#if !TARGET_OS_WIN32 +#if !TARGET_OS_WIN32 #include #include #include @@ -214,8 +250,9 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #include #include #include -#else -#include "sys_queue.h" +#endif +#if defined(__linux__) +#include #endif #ifdef __BLOCKS__ @@ -244,25 +281,13 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #include #endif -#ifndef __has_builtin -#define __has_builtin(x) 0 -#endif -#ifndef __has_include -#define __has_include(x) 0 -#endif -#ifndef __has_feature -#define __has_feature(x) 0 -#endif -#ifndef __has_attribute -#define __has_attribute(x) 0 -#endif - #if __GNUC__ #define DISPATCH_NOINLINE __attribute__((__noinline__)) #define DISPATCH_USED __attribute__((__used__)) #define DISPATCH_UNUSED __attribute__((__unused__)) #define DISPATCH_WEAK __attribute__((__weak__)) #define DISPATCH_OVERLOADABLE __attribute__((__overloadable__)) +#define DISPATCH_PACKED __attribute__((__packed__)) #if DISPATCH_DEBUG #define DISPATCH_ALWAYS_INLINE_NDEBUG #else @@ -295,13 +320,36 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); /* I wish we had __builtin_expect_range() */ #if __GNUC__ -#define fastpath(x) ((typeof(x))__builtin_expect((long)(x), ~0l)) -#define slowpath(x) ((typeof(x))__builtin_expect((long)(x), 0l)) +#define _safe_cast_to_long(x) \ + ({ _Static_assert(sizeof(typeof(x)) <= sizeof(long), \ + "__builtin_expect doesn't support types wider than long"); \ + (long)(x); }) +#define fastpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), ~0l)) +#define slowpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), 0l)) +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) #else #define fastpath(x) (x) #define slowpath(x) (x) +#define likely(x) (!!(x)) +#define unlikely(x) (!!(x)) #endif // __GNUC__ +#if BYTE_ORDER == LITTLE_ENDIAN +#define DISPATCH_STRUCT_LITTLE_ENDIAN_2(a, b) struct { a; b; } +#define DISPATCH_STRUCT_LITTLE_ENDIAN_3(a, b, c) struct { a; b; c; } +#define DISPATCH_STRUCT_LITTLE_ENDIAN_4(a, b, c, d) struct { a; b; c; d; } +#else +#define DISPATCH_STRUCT_LITTLE_ENDIAN_2(a, b) struct { b; a; } +#define DISPATCH_STRUCT_LITTLE_ENDIAN_3(a, b, c) struct { c; b; a; } +#define DISPATCH_STRUCT_LITTLE_ENDIAN_4(a, b, c, d) struct { d; c; b; a; } +#endif + +#define _TAILQ_IS_ENQUEUED(elm, field) \ + ((elm)->field.tqe_prev != NULL) +#define _TAILQ_MARK_NOT_ENQUEUED(elm, field) \ + do { (elm)->field.tqe_prev = NULL; } while (0) + #if DISPATCH_DEBUG // sys/queue.h debugging #undef TRASHIT @@ -316,7 +364,7 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); TRASHIT((head)->tqh_last); \ } while (0) -DISPATCH_NOINLINE +DISPATCH_EXPORT DISPATCH_NOINLINE void _dispatch_bug(size_t line, long val); #if HAVE_MACH @@ -324,10 +372,13 @@ DISPATCH_NOINLINE void _dispatch_bug_client(const char* msg); DISPATCH_NOINLINE void _dispatch_bug_mach_client(const char *msg, mach_msg_return_t kr); +#endif // HAVE_MACH DISPATCH_NOINLINE void _dispatch_bug_kevent_client(const char* msg, const char* filter, const char *operation, int err); -#endif + +DISPATCH_NOINLINE +void _dispatch_bug_deprecated(const char *msg); DISPATCH_NOINLINE DISPATCH_NORETURN void _dispatch_abort(size_t line, long val); @@ -353,12 +404,13 @@ void _dispatch_abort(size_t line, long val); #if DISPATCH_USE_OS_DEBUG_LOG #define _dispatch_log(msg, ...) os_debug_log("libdispatch", msg, ## __VA_ARGS__) #else -DISPATCH_NOINLINE __attribute__((__format__(__printf__,1,2))) +DISPATCH_EXPORT DISPATCH_NOINLINE __attribute__((__format__(__printf__,1,2))) void _dispatch_log(const char *msg, ...); #endif // DISPATCH_USE_OS_DEBUG_LOG -#define dsnprintf(...) \ - ({ int _r = snprintf(__VA_ARGS__); _r < 0 ? 0u : (size_t)_r; }) +#define dsnprintf(buf, siz, ...) \ + ({ size_t _siz = siz; int _r = snprintf(buf, _siz, __VA_ARGS__); \ + _r < 0 ? 0u : ((size_t)_r > _siz ? _siz : (size_t)_r); }) #if __GNUC__ #define dispatch_static_assert(e) ({ \ @@ -368,6 +420,9 @@ void _dispatch_log(const char *msg, ...); #define dispatch_static_assert(e) #endif +#define DISPATCH_BAD_INPUT ((void *_Nonnull)0) +#define DISPATCH_OUT_OF_MEMORY ((void *_Nonnull)0) + /* * For reporting bugs within libdispatch when using the "_debug" version of the * library. @@ -409,7 +464,7 @@ static inline void _dispatch_assert(long e, long line) { static inline void _dispatch_assert_zero(long e, long line) { if (DISPATCH_DEBUG && e) _dispatch_abort(line, e); } -#define dispatch_assert_zero(e) _dispatch_assert((long)(e), __LINE__) +#define dispatch_assert_zero(e) _dispatch_assert((long)(e), __LINE__) #endif /* __GNUC__ */ /* @@ -513,31 +568,40 @@ _dispatch_object_debug(dispatch_object_t object, const char *message, ...); #ifdef __BLOCKS__ #define _dispatch_Block_invoke(bb) \ ((dispatch_function_t)((struct Block_layout *)bb)->invoke) +void *_dispatch_Block_copy(void *block); #if __GNUC__ -dispatch_block_t _dispatch_Block_copy(dispatch_block_t block); #define _dispatch_Block_copy(x) ((typeof(x))_dispatch_Block_copy(x)) -#else -dispatch_block_t _dispatch_Block_copy(const void *block); #endif void _dispatch_call_block_and_release(void *block); #endif /* __BLOCKS__ */ void _dispatch_temporary_resource_shortage(void); void *_dispatch_calloc(size_t num_items, size_t size); +const char *_dispatch_strdup_if_mutable(const char *str); void _dispatch_vtable_init(void); char *_dispatch_get_build(void); uint64_t _dispatch_timeout(dispatch_time_t when); - -extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; - -#if !defined(DISPATCH_USE_OS_SEMAPHORE_CACHE) && !(TARGET_IPHONE_SIMULATOR) -// rdar://problem/15492045 -#if __has_include() -#define DISPATCH_USE_OS_SEMAPHORE_CACHE 1 -#include -#endif -#endif +uint64_t _dispatch_time_nanoseconds_since_epoch(dispatch_time_t when); + +#define _DISPATCH_UNSAFE_FORK_MULTITHREADED ((uint8_t)1) +#define _DISPATCH_UNSAFE_FORK_PROHIBIT ((uint8_t)2) +extern uint8_t _dispatch_unsafe_fork; +extern bool _dispatch_child_of_unsafe_fork; +void _dispatch_fork_becomes_unsafe_slow(void); + +#define _dispatch_is_multithreaded_inline() \ + ((_dispatch_unsafe_fork & _DISPATCH_UNSAFE_FORK_MULTITHREADED) != 0) + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_fork_becomes_unsafe(void) +{ + if (!fastpath(_dispatch_is_multithreaded_inline())) { + _dispatch_fork_becomes_unsafe_slow(); + DISPATCH_COMPILER_CAN_ASSUME(_dispatch_is_multithreaded_inline()); + } +} /* #includes dependent on internal.h */ #include "shims.h" @@ -548,34 +612,32 @@ extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; #ifndef WORKQ_ADDTHREADS_OPTION_OVERCOMMIT #define WORKQ_ADDTHREADS_OPTION_OVERCOMMIT 0x00000001 #endif -#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1080 -#ifndef DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK -#define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1 -#endif -#endif -#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1080 -#undef HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP -#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 0 -#endif -#if TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101000 -#ifndef DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK -#define DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK 1 -#endif -#endif -#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 101000 -#undef HAVE__PTHREAD_WORKQUEUE_INIT -#define HAVE__PTHREAD_WORKQUEUE_INIT 0 -#endif #endif // HAVE_PTHREAD_WORKQUEUES #if HAVE__PTHREAD_WORKQUEUE_INIT && PTHREAD_WORKQUEUE_SPI_VERSION >= 20140213 \ && !defined(HAVE_PTHREAD_WORKQUEUE_QOS) #define HAVE_PTHREAD_WORKQUEUE_QOS 1 #endif +#if HAVE__PTHREAD_WORKQUEUE_INIT && (PTHREAD_WORKQUEUE_SPI_VERSION >= 20150304 \ + || (PTHREAD_WORKQUEUE_SPI_VERSION == 20140730 && \ + defined(WORKQ_FEATURE_KEVENT))) \ + && !defined(HAVE_PTHREAD_WORKQUEUE_KEVENT) +#if PTHREAD_WORKQUEUE_SPI_VERSION == 20140730 +// rdar://problem/20609877 +typedef pthread_worqueue_function_kevent_t pthread_workqueue_function_kevent_t; +#endif +#define HAVE_PTHREAD_WORKQUEUE_KEVENT 1 +#endif + +#ifndef PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK +#if HAVE_PTHREAD_WORKQUEUE_QOS && DISPATCH_HOST_SUPPORTS_OSX(101200) +#define PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK 1 +#else +#define PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK 0 +#endif +#endif // PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK #if HAVE_MACH -#if !defined(MACH_NOTIFY_SEND_POSSIBLE) || (TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070) +#if !defined(MACH_NOTIFY_SEND_POSSIBLE) #undef MACH_NOTIFY_SEND_POSSIBLE #define MACH_NOTIFY_SEND_POSSIBLE MACH_NOTIFY_DEAD_NAME #endif @@ -593,20 +655,22 @@ extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; #endif #endif // EVFILT_VM -#if TARGET_IPHONE_SIMULATOR -#undef DISPATCH_USE_MEMORYSTATUS_SOURCE -#define DISPATCH_USE_MEMORYSTATUS_SOURCE 0 +#if TARGET_OS_SIMULATOR +#undef DISPATCH_USE_MEMORYPRESSURE_SOURCE +#define DISPATCH_USE_MEMORYPRESSURE_SOURCE 0 #undef DISPATCH_USE_VM_PRESSURE_SOURCE #define DISPATCH_USE_VM_PRESSURE_SOURCE 0 -#endif // TARGET_IPHONE_SIMULATOR -#if !defined(DISPATCH_USE_MEMORYSTATUS_SOURCE) && DISPATCH_USE_MEMORYSTATUS -#define DISPATCH_USE_MEMORYSTATUS_SOURCE 1 +#endif // TARGET_OS_SIMULATOR +#if !defined(DISPATCH_USE_MEMORYPRESSURE_SOURCE) && DISPATCH_USE_MEMORYSTATUS +#define DISPATCH_USE_MEMORYPRESSURE_SOURCE 1 #elif !defined(DISPATCH_USE_VM_PRESSURE_SOURCE) && DISPATCH_USE_VM_PRESSURE #define DISPATCH_USE_VM_PRESSURE_SOURCE 1 #endif +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE +extern bool _dispatch_memory_warn; +#endif -#if !defined(NOTE_LEEWAY) || (TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) +#if !defined(NOTE_LEEWAY) #undef NOTE_LEEWAY #define NOTE_LEEWAY 0 #undef NOTE_CRITICAL @@ -615,6 +679,22 @@ extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; #define NOTE_BACKGROUND 0 #endif // NOTE_LEEWAY +#if !defined(NOTE_FUNLOCK) +#define NOTE_FUNLOCK 0x00000100 +#endif + +#if !defined(NOTE_MACH_CONTINUOUS_TIME) +#define NOTE_MACH_CONTINUOUS_TIME 0 +#endif // NOTE_MACH_CONTINUOUS_TIME + +#if !defined(HOST_NOTIFY_CALENDAR_SET) +#define HOST_NOTIFY_CALENDAR_SET HOST_NOTIFY_CALENDAR_CHANGE +#endif // HOST_NOTIFY_CALENDAR_SET + +#if !defined(HOST_CALENDAR_SET_REPLYID) +#define HOST_CALENDAR_SET_REPLYID 951 +#endif // HOST_CALENDAR_SET_REPLYID + #if HAVE_DECL_NOTE_REAP #if defined(NOTE_REAP) && defined(__APPLE__) #undef NOTE_REAP @@ -622,9 +702,23 @@ extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; #endif #endif // HAVE_DECL_NOTE_REAP -#if !defined(EV_UDATA_SPECIFIC) || (TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100) || \ - (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 101100) +#ifndef VQ_QUOTA +#undef HAVE_DECL_VQ_QUOTA // rdar://problem/24160982 +#endif // VQ_QUOTA + +#if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) || \ + !DISPATCH_HOST_SUPPORTS_OSX(101200) +#undef NOTE_MEMORYSTATUS_PROC_LIMIT_WARN +#define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN 0 +#endif // NOTE_MEMORYSTATUS_PROC_LIMIT_WARN + +#if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) || \ + !DISPATCH_HOST_SUPPORTS_OSX(101200) +#undef NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL +#define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL 0 +#endif // NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL + +#if !defined(EV_UDATA_SPECIFIC) || !DISPATCH_HOST_SUPPORTS_OSX(101100) #undef DISPATCH_USE_EV_UDATA_SPECIFIC #define DISPATCH_USE_EV_UDATA_SPECIFIC 0 #elif !defined(DISPATCH_USE_EV_UDATA_SPECIFIC) @@ -634,23 +728,64 @@ extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; #if !DISPATCH_USE_EV_UDATA_SPECIFIC #undef EV_UDATA_SPECIFIC #define EV_UDATA_SPECIFIC 0 -#undef DISPATCH_DYNAMIC_SELECT_FALLBACK -#define DISPATCH_DYNAMIC_SELECT_FALLBACK 0 -#undef DISPATCH_USE_SELECT_FALLBACK -#define DISPATCH_USE_SELECT_FALLBACK 1 +#undef EV_VANISHED +#define EV_VANISHED 0 #endif // !DISPATCH_USE_EV_UDATA_SPECIFIC -#if !defined(EV_SET_QOS) || (TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100) || \ - (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 101100) +#ifndef EV_VANISHED +#define EV_VANISHED 0x0200 +#endif + +#ifndef DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS +#if TARGET_OS_MAC && !DISPATCH_HOST_SUPPORTS_OSX(101200) +// deferred delete can return bogus ENOENTs on older kernels +#define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 1 +#else +#define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 0 +#endif +#endif + +#if !defined(EV_SET_QOS) || !DISPATCH_HOST_SUPPORTS_OSX(101100) #undef DISPATCH_USE_KEVENT_QOS #define DISPATCH_USE_KEVENT_QOS 0 #elif !defined(DISPATCH_USE_KEVENT_QOS) #define DISPATCH_USE_KEVENT_QOS 1 #endif // EV_SET_QOS +#if HAVE_PTHREAD_WORKQUEUE_KEVENT && defined(KEVENT_FLAG_WORKQ) && \ + DISPATCH_USE_EV_UDATA_SPECIFIC && DISPATCH_USE_KEVENT_QOS && \ + DISPATCH_HOST_SUPPORTS_OSX(101200) && \ + !defined(DISPATCH_USE_KEVENT_WORKQUEUE) +#define DISPATCH_USE_KEVENT_WORKQUEUE 1 +#endif + + +#if (!DISPATCH_USE_KEVENT_WORKQUEUE || DISPATCH_DEBUG) && \ + !defined(DISPATCH_USE_MGR_THREAD) +#define DISPATCH_USE_MGR_THREAD 1 +#endif + +#if DISPATCH_USE_KEVENT_WORKQUEUE && DISPATCH_USE_EV_UDATA_SPECIFIC && \ + DISPATCH_HOST_SUPPORTS_OSX(101200) && \ + !defined(DISPATCH_USE_EVFILT_MACHPORT_DIRECT) +#define DISPATCH_USE_EVFILT_MACHPORT_DIRECT 1 +#endif + +#ifndef MACH_SEND_OVERRIDE +#define MACH_SEND_OVERRIDE 0x00000020 +typedef unsigned int mach_msg_priority_t; +#define MACH_MSG_PRIORITY_UNSPECIFIED ((mach_msg_priority_t)0) +#endif // MACH_SEND_OVERRIDE + + +#if (!DISPATCH_USE_EVFILT_MACHPORT_DIRECT || DISPATCH_DEBUG) && \ + !defined(DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK) +#define DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK 1 +#endif + #if DISPATCH_USE_KEVENT_QOS typedef struct kevent_qos_s _dispatch_kevent_qos_s; +typedef typeof(((struct kevent_qos_s*)NULL)->qos) _dispatch_kevent_priority_t; #else // DISPATCH_USE_KEVENT_QOS #ifndef KEVENT_FLAG_IMMEDIATE #define KEVENT_FLAG_NONE 0x00 @@ -673,10 +808,6 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #endif // DISPATCH_USE_KEVENT_QOS #if defined(F_SETNOSIGPIPE) && defined(F_GETNOSIGPIPE) -#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070 -#undef DISPATCH_USE_SETNOSIGPIPE -#define DISPATCH_USE_SETNOSIGPIPE 0 -#endif #ifndef DISPATCH_USE_SETNOSIGPIPE #define DISPATCH_USE_SETNOSIGPIPE 1 #endif @@ -695,14 +826,6 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #if HAVE_LIBPROC_INTERNAL_H #include #include -#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090 -#undef DISPATCH_USE_IMPORTANCE_ASSERTION -#define DISPATCH_USE_IMPORTANCE_ASSERTION 0 -#endif -#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 -#undef DISPATCH_USE_IMPORTANCE_ASSERTION -#define DISPATCH_USE_IMPORTANCE_ASSERTION 0 -#endif #ifndef DISPATCH_USE_IMPORTANCE_ASSERTION #define DISPATCH_USE_IMPORTANCE_ASSERTION 1 #endif @@ -710,10 +833,6 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #if HAVE_SYS_GUARDED_H #include -#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090 -#undef DISPATCH_USE_GUARDED_FD -#define DISPATCH_USE_GUARDED_FD 0 -#endif #ifndef DISPATCH_USE_GUARDED_FD #define DISPATCH_USE_GUARDED_FD 1 #endif @@ -724,6 +843,68 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #endif // HAVE_SYS_GUARDED_H +#if __has_include() +#include +#ifndef DBG_DISPATCH +#define DBG_DISPATCH 46 +#endif +#ifndef KDBG_CODE +#define KDBG_CODE(...) 0 +#endif +#define DISPATCH_CODE(subclass, code) \ + KDBG_CODE(DBG_DISPATCH, DISPATCH_TRACE_SUBCLASS_##subclass, code) +#ifdef ARIADNEDBG_CODE +#define ARIADNE_ENTER_DISPATCH_MAIN_CODE ARIADNEDBG_CODE(220, 2) +#else +#define ARIADNE_ENTER_DISPATCH_MAIN_CODE 0 +#endif +#if !defined(DISPATCH_USE_VOUCHER_KDEBUG_TRACE) && DISPATCH_INTROSPECTION +#define DISPATCH_USE_VOUCHER_KDEBUG_TRACE 1 +#endif + +#define DISPATCH_TRACE_SUBCLASS_DEFAULT 0 +#define DISPATCH_TRACE_SUBCLASS_VOUCHER 1 +#define DISPATCH_TRACE_SUBCLASS_PERF 2 +#define DISPATCH_TRACE_SUBCLASS_MACH_MSG 3 + +#define DISPATCH_PERF_non_leaf_retarget DISPATCH_CODE(PERF, 1) +#define DISPATCH_PERF_post_activate_retarget DISPATCH_CODE(PERF, 2) +#define DISPATCH_PERF_post_activate_mutation DISPATCH_CODE(PERF, 3) +#define DISPATCH_PERF_delayed_registration DISPATCH_CODE(PERF, 4) +#define DISPATCH_PERF_mutable_target DISPATCH_CODE(PERF, 5) + +#define DISPATCH_MACH_MSG_hdr_move DISPATCH_CODE(MACH_MSG, 1) + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, + uint64_t c, uint64_t d) +{ + if (!code) return; +#ifdef _COMM_PAGE_KDEBUG_ENABLE + if (likely(*(volatile uint32_t *)_COMM_PAGE_KDEBUG_ENABLE == 0)) return; +#endif + kdebug_trace(code, a, b, c, d); +} +#define _dispatch_cast_to_uint64(e) \ + __builtin_choose_expr(sizeof(e) > 4, \ + ((uint64_t)(e)), ((uint64_t)(uintptr_t)(e))) +#define _dispatch_ktrace(code, a, b, c, d) _dispatch_ktrace_impl(code, \ + _dispatch_cast_to_uint64(a), _dispatch_cast_to_uint64(b), \ + _dispatch_cast_to_uint64(c), _dispatch_cast_to_uint64(d)) + +#else // __has_include() +#define DISPATCH_CODE(subclass, code) 0 +#define ARIADNE_ENTER_DISPATCH_MAIN_CODE 0 +#define DISPATCH_USE_VOUCHER_KDEBUG_TRACE 0 +#define _dispatch_ktrace(code, a, b, c, d) +#endif // !__has_include() +#define _dispatch_ktrace4(code, a, b, c, d) _dispatch_ktrace(code, a, b, c, d) +#define _dispatch_ktrace3(code, a, b, c) _dispatch_ktrace(code, a, b, c, 0) +#define _dispatch_ktrace2(code, a, b) _dispatch_ktrace(code, a, b, 0, 0) +#define _dispatch_ktrace1(code, a) _dispatch_ktrace(code, a, 0, 0, 0) +#define _dispatch_ktrace0(code) _dispatch_ktrace(code, 0, 0, 0, 0) + #ifndef MACH_MSGH_BITS_VOUCHER_MASK #define MACH_MSGH_BITS_VOUCHER_MASK 0x001f0000 #define MACH_MSGH_BITS_SET_PORTS(remote, local, voucher) \ @@ -740,9 +921,66 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #define MACH_SEND_INVALID_VOUCHER 0x10000005 #endif +#if TARGET_OS_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100 +#undef VOUCHER_USE_MACH_VOUCHER +#define VOUCHER_USE_MACH_VOUCHER 0 +#endif +#ifndef VOUCHER_USE_MACH_VOUCHER +#if __has_include() +#define VOUCHER_USE_MACH_VOUCHER 1 +#endif +#endif + +#if RDAR_24272659 // FIXME: +#if !VOUCHER_USE_MACH_VOUCHER || !DISPATCH_HOST_SUPPORTS_OSX(101200) +#undef VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER +#define VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER 0 +#elif !defined(VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER) +#define VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER 1 +#endif +#else // RDAR_24272659 +#undef VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER +#define VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER 0 +#endif // RDAR_24272659 + +#if !VOUCHER_USE_MACH_VOUCHER || !DISPATCH_HOST_SUPPORTS_OSX(101200) +#undef VOUCHER_USE_BANK_AUTOREDEEM +#define VOUCHER_USE_BANK_AUTOREDEEM 0 +#elif !defined(VOUCHER_USE_BANK_AUTOREDEEM) +#define VOUCHER_USE_BANK_AUTOREDEEM 1 +#endif + +#if !VOUCHER_USE_MACH_VOUCHER || \ + !__has_include() || \ + !DISPATCH_HOST_SUPPORTS_OSX(101200) +#undef VOUCHER_USE_MACH_VOUCHER_PRIORITY +#define VOUCHER_USE_MACH_VOUCHER_PRIORITY 0 +#elif !defined(VOUCHER_USE_MACH_VOUCHER_PRIORITY) +#define VOUCHER_USE_MACH_VOUCHER_PRIORITY 1 +#endif + +#ifndef VOUCHER_USE_PERSONA +#if VOUCHER_USE_MACH_VOUCHER && defined(BANK_PERSONA_TOKEN) && \ + TARGET_OS_IOS && !TARGET_OS_SIMULATOR +#define VOUCHER_USE_PERSONA 1 +#else +#define VOUCHER_USE_PERSONA 0 +#endif +#endif // VOUCHER_USE_PERSONA + +#if VOUCHER_USE_MACH_VOUCHER +#undef DISPATCH_USE_IMPORTANCE_ASSERTION +#define DISPATCH_USE_IMPORTANCE_ASSERTION 0 +#else +#undef MACH_RCV_VOUCHER +#define MACH_RCV_VOUCHER 0 +#define VOUCHER_USE_PERSONA 0 +#endif // VOUCHER_USE_MACH_VOUCHER + #define _dispatch_hardware_crash() \ __asm__(""); __builtin_trap() // +#define _dispatch_set_crash_log_cause_and_message(ac, msg) #define _dispatch_set_crash_log_message(msg) #define _dispatch_set_crash_log_message_dynamic(msg) @@ -753,19 +991,22 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; // 2) A hand crafted call to mach_msg*() screwed up. Use MIG. #define DISPATCH_VERIFY_MIG(x) do { \ if ((x) == MIG_REPLY_MISMATCH) { \ - _dispatch_set_crash_log_message("MIG_REPLY_MISMATCH"); \ + _dispatch_set_crash_log_cause_and_message((x), \ + "MIG_REPLY_MISMATCH"); \ _dispatch_hardware_crash(); \ } \ } while (0) #endif -#define DISPATCH_CRASH(x) do { \ - _dispatch_set_crash_log_message("BUG IN LIBDISPATCH: " x); \ +#define DISPATCH_INTERNAL_CRASH(c, x) do { \ + _dispatch_set_crash_log_cause_and_message((c), \ + "BUG IN LIBDISPATCH: " x); \ _dispatch_hardware_crash(); \ } while (0) -#define DISPATCH_CLIENT_CRASH(x) do { \ - _dispatch_set_crash_log_message("BUG IN CLIENT OF LIBDISPATCH: " x); \ +#define DISPATCH_CLIENT_CRASH(c, x) do { \ + _dispatch_set_crash_log_cause_and_message((c), \ + "BUG IN CLIENT OF LIBDISPATCH: " x); \ _dispatch_hardware_crash(); \ } while (0) @@ -774,19 +1015,72 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; _dispatch_hardware_crash(); \ } while (0) -extern int _dispatch_set_qos_class_enabled; +#define DISPATCH_ASSERTION_FAILED_MESSAGE \ + "BUG IN CLIENT OF LIBDISPATCH: Assertion failed: " + +#define _dispatch_assert_crash(msg) do { \ + const char *__msg = (msg); \ + _dispatch_log("%s", __msg); \ + _dispatch_set_crash_log_message_dynamic(__msg); \ + _dispatch_hardware_crash(); \ + } while (0) + +#define _dispatch_client_assert_fail(fmt, ...) do { \ + char *_msg = NULL; \ + asprintf(&_msg, "%s" fmt, DISPATCH_ASSERTION_FAILED_MESSAGE, \ + ##__VA_ARGS__); \ + _dispatch_assert_crash(_msg); \ + free(_msg); \ + } while (0) + #define DISPATCH_NO_VOUCHER ((voucher_t)(void*)~0ul) #define DISPATCH_NO_PRIORITY ((pthread_priority_t)~0ul) -#define DISPATCH_PRIORITY_ENFORCE 0x1 -#define DISPATCH_VOUCHER_IGNORE_QUEUE_OVERRIDE 0x2 -static inline void _dispatch_adopt_priority_and_replace_voucher( - pthread_priority_t priority, voucher_t voucher, unsigned long flags); +DISPATCH_ENUM(_dispatch_thread_set_self, unsigned long, + DISPATCH_PRIORITY_ENFORCE = 0x1, + DISPATCH_VOUCHER_REPLACE = 0x2, + DISPATCH_VOUCHER_CONSUME = 0x4, + DISPATCH_THREAD_PARK = 0x8, +); +DISPATCH_WARN_RESULT +static inline voucher_t _dispatch_adopt_priority_and_set_voucher( + pthread_priority_t priority, voucher_t voucher, + _dispatch_thread_set_self_t flags); #if HAVE_MACH -static inline void _dispatch_set_priority_and_mach_voucher( - pthread_priority_t priority, mach_voucher_t kv); mach_port_t _dispatch_get_mach_host_port(void); #endif +#if HAVE_PTHREAD_WORKQUEUE_QOS +#if DISPATCH_DEBUG +extern int _dispatch_set_qos_class_enabled; +#else +#define _dispatch_set_qos_class_enabled (1) +#endif +#endif // HAVE_PTHREAD_WORKQUEUE_QOS +#if DISPATCH_USE_KEVENT_WORKQUEUE +#if !HAVE_PTHREAD_WORKQUEUE_QOS || !DISPATCH_USE_KEVENT_QOS || \ + !DISPATCH_USE_EV_UDATA_SPECIFIC +#error Invalid build configuration +#endif +#if DISPATCH_USE_MGR_THREAD +extern int _dispatch_kevent_workqueue_enabled; +#else +#define _dispatch_kevent_workqueue_enabled (1) +#endif +#endif // DISPATCH_USE_KEVENT_WORKQUEUE + +#if DISPATCH_USE_EVFILT_MACHPORT_DIRECT +#if !DISPATCH_USE_KEVENT_WORKQUEUE || !DISPATCH_USE_EV_UDATA_SPECIFIC +#error Invalid build configuration +#endif +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK +extern int _dispatch_evfilt_machport_direct_enabled; +#else +#define _dispatch_evfilt_machport_direct_enabled (1) +#endif +#else +#define _dispatch_evfilt_machport_direct_enabled (0) +#endif // DISPATCH_USE_EVFILT_MACHPORT_DIRECT + /* #includes dependent on internal.h */ #include "object_internal.h" @@ -800,5 +1094,6 @@ mach_port_t _dispatch_get_mach_host_port(void); #include "io_internal.h" #endif #include "inline_internal.h" +#include "firehose/firehose_internal.h" #endif /* __DISPATCH_INTERNAL__ */ diff --git a/src/introspection.c b/src/introspection.c index 35b0b57..d847cb9 100644 --- a/src/introspection.c +++ b/src/introspection.c @@ -23,6 +23,7 @@ #if DISPATCH_INTROSPECTION +#include #include "internal.h" #include "dispatch/introspection.h" #include "introspection_private.h" @@ -35,38 +36,50 @@ typedef struct dispatch_introspection_thread_s { } dispatch_introspection_thread_s; typedef struct dispatch_introspection_thread_s *dispatch_introspection_thread_t; -static TAILQ_HEAD(, dispatch_introspection_thread_s) - _dispatch_introspection_threads = - TAILQ_HEAD_INITIALIZER(_dispatch_introspection_threads); -static volatile OSSpinLock _dispatch_introspection_threads_lock; +struct dispatch_introspection_state_s _dispatch_introspection = { + .threads = TAILQ_HEAD_INITIALIZER(_dispatch_introspection.threads), + .queues = TAILQ_HEAD_INITIALIZER(_dispatch_introspection.queues), +}; static void _dispatch_introspection_thread_remove(void *ctxt); -static TAILQ_HEAD(, dispatch_queue_s) _dispatch_introspection_queues = - TAILQ_HEAD_INITIALIZER(_dispatch_introspection_queues); -static volatile OSSpinLock _dispatch_introspection_queues_lock; - -static ptrdiff_t _dispatch_introspection_thread_queue_offset; +static void _dispatch_introspection_queue_order_dispose(dispatch_queue_t dq); #pragma mark - #pragma mark dispatch_introspection_init +DISPATCH_NOINLINE +static bool +_dispatch_getenv_bool(const char *env, bool default_v) +{ + const char *v = getenv(env); + + if (v) { + return strcasecmp(v, "YES") == 0 || strcasecmp(v, "Y") == 0 || + strcasecmp(v, "TRUE") == 0 || atoi(v); + } + return default_v; +} + void _dispatch_introspection_init(void) { - TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, &_dispatch_main_q, diq_list); - TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, &_dispatch_mgr_q, diq_list); #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES - TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, _dispatch_mgr_q.do_targetq, diq_list); #endif for (size_t i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { - TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, &_dispatch_root_queues[i], diq_list); } + _dispatch_introspection.debug_queue_inversions = + _dispatch_getenv_bool("LIBDISPATCH_DEBUG_QUEUE_INVERSIONS", false); + // Hack to determine queue TSD offset from start of pthread structure uintptr_t thread = _dispatch_thread_self(); thread_identifier_info_data_t tiid; @@ -74,7 +87,7 @@ _dispatch_introspection_init(void) kern_return_t kr = thread_info(pthread_mach_thread_np((void*)thread), THREAD_IDENTIFIER_INFO, (thread_info_t)&tiid, &cnt); if (!dispatch_assume_zero(kr)) { - _dispatch_introspection_thread_queue_offset = + _dispatch_introspection.thread_queue_offset = (void*)(uintptr_t)tiid.dispatch_qaddr - (void*)thread; } _dispatch_thread_key_create(&dispatch_introspection_key, @@ -116,21 +129,21 @@ _dispatch_introspection_thread_add(void) dispatch_introspection_thread_t dit = (void*)_dispatch_continuation_alloc(); dit->dit_isa = (void*)0x41; dit->thread = (void*)thread; - dit->queue = !_dispatch_introspection_thread_queue_offset ? NULL : - (void*)thread + _dispatch_introspection_thread_queue_offset; + dit->queue = !_dispatch_introspection.thread_queue_offset ? NULL : + (void*)thread + _dispatch_introspection.thread_queue_offset; _dispatch_thread_setspecific(dispatch_introspection_key, dit); - OSSpinLockLock(&_dispatch_introspection_threads_lock); - TAILQ_INSERT_TAIL(&_dispatch_introspection_threads, dit, dit_list); - OSSpinLockUnlock(&_dispatch_introspection_threads_lock); + _dispatch_unfair_lock_lock(&_dispatch_introspection.threads_lock); + TAILQ_INSERT_TAIL(&_dispatch_introspection.threads, dit, dit_list); + _dispatch_unfair_lock_unlock(&_dispatch_introspection.threads_lock); } static void _dispatch_introspection_thread_remove(void *ctxt) { dispatch_introspection_thread_t dit = ctxt; - OSSpinLockLock(&_dispatch_introspection_threads_lock); - TAILQ_REMOVE(&_dispatch_introspection_threads, dit, dit_list); - OSSpinLockUnlock(&_dispatch_introspection_threads_lock); + _dispatch_unfair_lock_lock(&_dispatch_introspection.threads_lock); + TAILQ_REMOVE(&_dispatch_introspection.threads, dit, dit_list); + _dispatch_unfair_lock_unlock(&_dispatch_introspection.threads_lock); _dispatch_continuation_free((void*)dit); _dispatch_thread_setspecific(dispatch_introspection_key, NULL); } @@ -138,71 +151,116 @@ _dispatch_introspection_thread_remove(void *ctxt) #pragma mark - #pragma mark dispatch_introspection_info -static inline -dispatch_introspection_queue_function_s +DISPATCH_USED inline +dispatch_introspection_queue_s +dispatch_introspection_queue_get_info(dispatch_queue_t dq) +{ + bool global = (dq->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + (dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT); + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + + dispatch_introspection_queue_s diq = { + .queue = dq, + .target_queue = dq->do_targetq, + .label = dq->dq_label, + .serialnum = dq->dq_serialnum, + .width = dq->dq_width, + .suspend_count = _dq_state_suspend_cnt(dq_state) + dq->dq_side_suspend_cnt, + .enqueued = _dq_state_is_enqueued(dq_state) && !global, + .barrier = _dq_state_is_in_barrier(dq_state) && !global, + .draining = (dq->dq_items_head == (void*)~0ul) || + (!dq->dq_items_head && dq->dq_items_tail), + .global = global, + .main = (dq == &_dispatch_main_q), + }; + return diq; +} + +static inline void _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, - dispatch_continuation_t dc, unsigned long *type) + dispatch_continuation_t dc, dispatch_introspection_queue_item_t diqi) { void *ctxt = dc->dc_ctxt; dispatch_function_t func = dc->dc_func; pthread_t waiter = NULL; bool apply = false; - long flags = (long)dc->do_vtable; - if (flags & DISPATCH_OBJ_SYNC_SLOW_BIT) { - waiter = pthread_from_mach_thread_np((mach_port_t)dc->dc_data); - if (flags & DISPATCH_OBJ_BARRIER_BIT) { - dc = dc->dc_ctxt; - dq = dc->dc_data; + uintptr_t flags = dc->dc_flags; + + if (_dispatch_object_has_vtable(dc)) { + flags = 0; + switch (dc_type(dc)) { +#if HAVE_PTHREAD_WORKQUEUE_QOS + case DC_OVERRIDE_STEALING_TYPE: + case DC_OVERRIDE_OWNING_TYPE: + dc = dc->dc_data; + if (_dispatch_object_has_vtable(dc)) { + // these really wrap queues so we should hide the continuation type + dq = (dispatch_queue_t)dc; + diqi->type = dispatch_introspection_queue_item_type_queue; + diqi->queue = dispatch_introspection_queue_get_info(dq); + return; + } + return _dispatch_introspection_continuation_get_info(dq, dc, diqi); +#endif + case DC_ASYNC_REDIRECT_TYPE: + DISPATCH_INTERNAL_CRASH(0, "Handled by the caller"); + case DC_MACH_SEND_BARRRIER_DRAIN_TYPE: + break; + case DC_MACH_SEND_BARRIER_TYPE: + case DC_MACH_RECV_BARRIER_TYPE: + flags = (uintptr_t)dc->dc_data; + dq = dq->do_targetq; + break; } - ctxt = dc->dc_ctxt; - func = dc->dc_func; - } - if (func == _dispatch_sync_recurse_invoke) { - dc = dc->dc_ctxt; - dq = dc->dc_data; - ctxt = dc->dc_ctxt; - func = dc->dc_func; - } else if (func == _dispatch_async_redirect_invoke) { - dq = dc->dc_data; - dc = dc->dc_other; - ctxt = dc->dc_ctxt; - func = dc->dc_func; - flags = (long)dc->do_vtable; - } else if (func == _dispatch_mach_barrier_invoke) { - dq = dq->do_targetq; - ctxt = dc->dc_data; - func = dc->dc_other; - } else if (func == _dispatch_apply_invoke || - func == _dispatch_apply_redirect_invoke) { - dispatch_apply_t da = ctxt; - if (da->da_todo) { - dc = da->da_dc; - if (func == _dispatch_apply_redirect_invoke) { + } else { + if (flags & DISPATCH_OBJ_SYNC_SLOW_BIT) { + waiter = pthread_from_mach_thread_np((mach_port_t)dc->dc_data); + if (flags & DISPATCH_OBJ_BARRIER_BIT) { + dc = dc->dc_ctxt; dq = dc->dc_data; } ctxt = dc->dc_ctxt; func = dc->dc_func; - apply = true; + } + if (func == _dispatch_sync_recurse_invoke) { + dc = dc->dc_ctxt; + dq = dc->dc_data; + ctxt = dc->dc_ctxt; + func = dc->dc_func; + } else if (func == _dispatch_apply_invoke || + func == _dispatch_apply_redirect_invoke) { + dispatch_apply_t da = ctxt; + if (da->da_todo) { + dc = da->da_dc; + dq = dc->dc_data; + ctxt = dc->dc_ctxt; + func = dc->dc_func; + apply = true; + } } } - if (func == _dispatch_call_block_and_release) { - *type = dispatch_introspection_queue_item_type_block; + if (flags & DISPATCH_OBJ_BLOCK_BIT) { + diqi->type = dispatch_introspection_queue_item_type_block; func = _dispatch_Block_invoke(ctxt); } else { - *type = dispatch_introspection_queue_item_type_function; + diqi->type = dispatch_introspection_queue_item_type_function; } - dispatch_introspection_queue_function_s diqf= { + diqi->function = (dispatch_introspection_queue_function_s){ .continuation = dc, .target_queue = dq, .context = ctxt, .function = func, - .group = flags & DISPATCH_OBJ_GROUP_BIT ? dc->dc_data : NULL, .waiter = waiter, - .barrier = flags & DISPATCH_OBJ_BARRIER_BIT, + .barrier = (flags & DISPATCH_OBJ_BARRIER_BIT) || dq->dq_width == 1, .sync = flags & DISPATCH_OBJ_SYNC_SLOW_BIT, .apply = apply, }; - return diqf; + if (flags & DISPATCH_OBJ_GROUP_BIT) { + dispatch_group_t group = dc->dc_data; + if (dx_type(group) == DISPATCH_GROUP_TYPE) { + diqi->function.group = group; + } + } } static inline @@ -218,31 +276,6 @@ _dispatch_introspection_object_get_info(dispatch_object_t dou) return dio; } -DISPATCH_USED inline -dispatch_introspection_queue_s -dispatch_introspection_queue_get_info(dispatch_queue_t dq) -{ - bool global = (dq->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - (dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT); - uint16_t width = dq->dq_width; - if (width > 1 && width != DISPATCH_QUEUE_WIDTH_MAX) width /= 2; - dispatch_introspection_queue_s diq = { - .queue = dq, - .target_queue = dq->do_targetq, - .label = dq->dq_label, - .serialnum = dq->dq_serialnum, - .width = width, - .suspend_count = dq->do_suspend_cnt / 2, - .enqueued = (dq->do_suspend_cnt & 1) && !global, - .barrier = (dq->dq_running & 1) && !global, - .draining = (dq->dq_items_head == (void*)~0ul) || - (!dq->dq_items_head && dq->dq_items_tail), - .global = global, - .main = (dq == &_dispatch_main_q), - }; - return diq; -} - static inline dispatch_introspection_source_s _dispatch_introspection_source_get_info(dispatch_source_t ds) @@ -255,31 +288,28 @@ _dispatch_introspection_source_get_info(dispatch_source_t ds) if (dc) { ctxt = dc->dc_ctxt; handler = dc->dc_func; - hdlr_is_block = ((long)dc->do_vtable & DISPATCH_OBJ_BLOCK_RELEASE_BIT); - } - bool after = (handler == _dispatch_after_timer_callback); - if (after && !(ds->ds_atomic_flags & DSF_CANCELED)) { - dc = ctxt; - ctxt = dc->dc_ctxt; - handler = dc->dc_func; - hdlr_is_block = (handler == _dispatch_call_block_and_release); - if (hdlr_is_block) { - handler = _dispatch_Block_invoke(ctxt); - } + hdlr_is_block = (dc->dc_flags & DISPATCH_OBJ_BLOCK_BIT); } + + uint64_t dq_state = os_atomic_load2o(ds, dq_state, relaxed); dispatch_introspection_source_s dis = { .source = ds, .target_queue = ds->do_targetq, - .type = ds->ds_dkev ? (unsigned long)ds->ds_dkev->dk_kevent.filter : 0, - .handle = ds->ds_dkev ? (unsigned long)ds->ds_dkev->dk_kevent.ident : 0, .context = ctxt, .handler = handler, - .suspend_count = ds->do_suspend_cnt / 2, - .enqueued = (ds->do_suspend_cnt & 1), + .suspend_count = _dq_state_suspend_cnt(dq_state) + ds->dq_side_suspend_cnt, + .enqueued = _dq_state_is_enqueued(dq_state), .handler_is_block = hdlr_is_block, .timer = ds->ds_is_timer, - .after = after, + .after = ds->ds_is_timer && (bool)(ds_timer(ds).flags & DISPATCH_TIMER_AFTER), }; + dispatch_kevent_t dk = ds->ds_dkev; + if (ds->ds_is_custom_source) { + dis.type = (unsigned long)dk; + } else if (dk) { + dis.type = (unsigned long)dk->dk_kevent.filter; + dis.handle = (unsigned long)dk->dk_kevent.ident; + } return dis; } @@ -303,11 +333,21 @@ dispatch_introspection_queue_item_get_info(dispatch_queue_t dq, dispatch_continuation_t dc) { dispatch_introspection_queue_item_s diqi; - if (DISPATCH_OBJ_IS_VTABLE(dc)) { - dispatch_object_t dou = (dispatch_object_t)dc; + dispatch_object_t dou; + +again: + dou._dc = dc; + if (_dispatch_object_has_vtable(dou._do)) { unsigned long type = dx_type(dou._do); unsigned long metatype = type & _DISPATCH_META_TYPE_MASK; - if (metatype == _DISPATCH_QUEUE_TYPE && + if (type == DC_ASYNC_REDIRECT_TYPE) { + dq = dc->dc_data; + dc = dc->dc_other; + goto again; + } + if (metatype == _DISPATCH_CONTINUATION_TYPE) { + _dispatch_introspection_continuation_get_info(dq, dc, &diqi); + } else if (metatype == _DISPATCH_QUEUE_TYPE && type != DISPATCH_QUEUE_SPECIFIC_TYPE) { diqi.type = dispatch_introspection_queue_item_type_queue; diqi.queue = dispatch_introspection_queue_get_info(dou._dq); @@ -320,8 +360,7 @@ dispatch_introspection_queue_item_get_info(dispatch_queue_t dq, diqi.object = _dispatch_introspection_object_get_info(dou._do); } } else { - diqi.function = _dispatch_introspection_continuation_get_info(dq, dc, - &diqi.type); + _dispatch_introspection_continuation_get_info(dq, dc, &diqi); } return diqi; } @@ -335,7 +374,7 @@ dispatch_introspection_get_queues(dispatch_queue_t start, size_t count, dispatch_introspection_queue_t queues) { dispatch_queue_t next; - next = start ? start : TAILQ_FIRST(&_dispatch_introspection_queues); + next = start ? start : TAILQ_FIRST(&_dispatch_introspection.queues); while (count--) { if (!next) { queues->queue = NULL; @@ -353,7 +392,7 @@ dispatch_introspection_get_queue_threads(dispatch_continuation_t start, size_t count, dispatch_introspection_queue_thread_t threads) { dispatch_introspection_thread_t next = start ? (void*)start : - TAILQ_FIRST(&_dispatch_introspection_threads); + TAILQ_FIRST(&_dispatch_introspection.threads); while (count--) { if (!next) { threads->object = NULL; @@ -488,9 +527,11 @@ _dispatch_introspection_queue_create_hook(dispatch_queue_t dq) dispatch_queue_t _dispatch_introspection_queue_create(dispatch_queue_t dq) { - OSSpinLockLock(&_dispatch_introspection_queues_lock); - TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, dq, diq_list); - OSSpinLockUnlock(&_dispatch_introspection_queues_lock); + TAILQ_INIT(&dq->diq_order_top_head); + TAILQ_INIT(&dq->diq_order_bottom_head); + _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); + TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, dq, diq_list); + _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock); DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_create, dq); if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_create)) { @@ -524,9 +565,10 @@ _dispatch_introspection_queue_dispose(dispatch_queue_t dq) _dispatch_introspection_queue_dispose_hook(dq); } - OSSpinLockLock(&_dispatch_introspection_queues_lock); - TAILQ_REMOVE(&_dispatch_introspection_queues, dq, diq_list); - OSSpinLockUnlock(&_dispatch_introspection_queues_lock); + _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); + TAILQ_REMOVE(&_dispatch_introspection.queues, dq, diq_list); + _dispatch_introspection_queue_order_dispose(dq); + _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock); } DISPATCH_NOINLINE @@ -612,17 +654,267 @@ _dispatch_introspection_queue_item_complete(dispatch_object_t dou) } void -_dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f) { +_dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f) +{ dispatch_queue_t dq = _dispatch_queue_get_current(); DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT( queue_callout_begin, dq, ctxt, f); } void -_dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f) { +_dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f) +{ dispatch_queue_t dq = _dispatch_queue_get_current(); DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT( queue_callout_end, dq, ctxt, f); } +#pragma mark - +#pragma mark dispatch introspection deadlock detection + +typedef struct dispatch_queue_order_entry_s *dispatch_queue_order_entry_t; +struct dispatch_queue_order_entry_s { + TAILQ_ENTRY(dispatch_queue_order_entry_s) dqoe_order_top_list; + TAILQ_ENTRY(dispatch_queue_order_entry_s) dqoe_order_bottom_list; + const char *dqoe_top_label; + const char *dqoe_bottom_label; + dispatch_queue_t dqoe_top_tq; + dispatch_queue_t dqoe_bottom_tq; + int dqoe_pcs_n; + void *dqoe_pcs[]; +}; + +static void +_dispatch_introspection_queue_order_dispose(dispatch_queue_t dq) +{ + dispatch_queue_order_entry_t e, te; + dispatch_queue_t otherq; + TAILQ_HEAD(, dispatch_queue_order_entry_s) head; + + // this whole thing happens with _dispatch_introspection.queues_lock locked + + _dispatch_unfair_lock_lock(&dq->diq_order_top_head_lock); + head.tqh_first = dq->diq_order_top_head.tqh_first; + head.tqh_last = dq->diq_order_top_head.tqh_last; + TAILQ_INIT(&dq->diq_order_top_head); + _dispatch_unfair_lock_unlock(&dq->diq_order_top_head_lock); + + TAILQ_FOREACH_SAFE(e, &head, dqoe_order_top_list, te) { + otherq = e->dqoe_bottom_tq; + _dispatch_unfair_lock_lock(&otherq->diq_order_bottom_head_lock); + TAILQ_REMOVE(&otherq->diq_order_bottom_head, e, dqoe_order_bottom_list); + _dispatch_unfair_lock_unlock(&otherq->diq_order_bottom_head_lock); + free(e); + } + + _dispatch_unfair_lock_lock(&dq->diq_order_bottom_head_lock); + head.tqh_first = dq->diq_order_bottom_head.tqh_first; + head.tqh_last = dq->diq_order_bottom_head.tqh_last; + TAILQ_INIT(&dq->diq_order_bottom_head); + _dispatch_unfair_lock_unlock(&dq->diq_order_bottom_head_lock); + + TAILQ_FOREACH_SAFE(e, &head, dqoe_order_bottom_list, te) { + otherq = e->dqoe_top_tq; + _dispatch_unfair_lock_lock(&otherq->diq_order_top_head_lock); + TAILQ_REMOVE(&otherq->diq_order_top_head, e, dqoe_order_top_list); + _dispatch_unfair_lock_unlock(&otherq->diq_order_top_head_lock); + free(e); + } +} + +// caller must make sure dq is not a root quueue +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_queue_bottom_target_queue(dispatch_queue_t dq) +{ + while (dq->do_targetq->do_targetq) { + dq = dq->do_targetq; + } + return dq; +} + +typedef struct dispatch_order_frame_s *dispatch_order_frame_t; +struct dispatch_order_frame_s { + dispatch_order_frame_t dof_prev; + dispatch_queue_order_entry_t dof_e; +}; + +DISPATCH_NOINLINE +static void +_dispatch_introspection_lock_inversion_fail(dispatch_order_frame_t dof, + dispatch_queue_t top_q, dispatch_queue_t bottom_q) +{ + _SIMPLE_STRING buf = _simple_salloc(); + const char *leading_word = "with"; + + _simple_sprintf(buf, "%s Lock inversion detected\n" + "queue [%s] trying to sync onto queue [%s] conflicts\n", + DISPATCH_ASSERTION_FAILED_MESSAGE, + bottom_q->dq_label ?: "", top_q->dq_label ?: ""); + + while (dof) { + dispatch_queue_order_entry_t e = dof->dof_e; + char **symbols; + + _simple_sprintf(buf, + "%s queue [%s] syncing onto queue [%s] at:\n", leading_word, + dof->dof_e->dqoe_bottom_label, dof->dof_e->dqoe_top_label); + + symbols = backtrace_symbols(e->dqoe_pcs, e->dqoe_pcs_n); + if (symbols) { + for (int i = 0; i < e->dqoe_pcs_n; i++) { + _simple_sprintf(buf, "%s\n", symbols[i]); + } + free(symbols); + } else { + _simple_sappend(buf, "\n"); + } + + leading_word = "and"; + dof = dof->dof_prev; + } + + // turn off the feature for crash handlers + _dispatch_introspection.debug_queue_inversions = false; + _dispatch_assert_crash(_simple_string(buf)); + _simple_sfree(buf); +} + +static void +_dispatch_introspection_order_check(dispatch_order_frame_t dof_prev, + dispatch_queue_t top_q, dispatch_queue_t top_tq, + dispatch_queue_t bottom_q, dispatch_queue_t bottom_tq) +{ + struct dispatch_order_frame_s dof = { .dof_prev = dof_prev }; + + // has anyone above bottom_tq ever sync()ed onto top_tq ? + _dispatch_unfair_lock_lock(&bottom_tq->diq_order_top_head_lock); + TAILQ_FOREACH(dof.dof_e, &bottom_tq->diq_order_top_head, dqoe_order_top_list) { + if (slowpath(dof.dof_e->dqoe_bottom_tq == top_tq)) { + _dispatch_introspection_lock_inversion_fail(&dof, top_q, bottom_q); + } + _dispatch_introspection_order_check(&dof, top_q, top_tq, + bottom_q, dof.dof_e->dqoe_bottom_tq); + } + _dispatch_unfair_lock_unlock(&bottom_tq->diq_order_top_head_lock); +} + +void +_dispatch_introspection_order_record(dispatch_queue_t top_q, + dispatch_queue_t bottom_q) +{ + dispatch_queue_order_entry_t e, it; + const int pcs_skip = 1, pcs_n_max = 128; + void *pcs[pcs_n_max]; + int pcs_n; + + if (!bottom_q || !bottom_q->do_targetq || !top_q->do_targetq) { + return; + } + + dispatch_queue_t top_tq = _dispatch_queue_bottom_target_queue(top_q); + dispatch_queue_t bottom_tq = _dispatch_queue_bottom_target_queue(bottom_q); + + _dispatch_unfair_lock_lock(&top_tq->diq_order_top_head_lock); + TAILQ_FOREACH(it, &top_tq->diq_order_top_head, dqoe_order_top_list) { + if (it->dqoe_bottom_tq == bottom_tq) { + // that dispatch_sync() is known and validated + // move on + _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock); + return; + } + } + _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock); + + _dispatch_introspection_order_check(NULL, top_q, top_tq, bottom_q, bottom_tq); + pcs_n = MAX(backtrace(pcs, pcs_n_max) - pcs_skip, 0); + + bool copy_top_label = false, copy_bottom_label = false; + size_t size = sizeof(struct dispatch_queue_order_entry_s) + + (size_t)pcs_n * sizeof(void *); + + if (_dispatch_queue_label_needs_free(top_q)) { + size += strlen(top_q->dq_label) + 1; + copy_top_label = true; + } + if (_dispatch_queue_label_needs_free(bottom_q)) { + size += strlen(bottom_q->dq_label) + 1; + copy_bottom_label = true; + } + + e = _dispatch_calloc(1, size); + e->dqoe_top_tq = top_tq; + e->dqoe_bottom_tq = bottom_tq; + e->dqoe_pcs_n = pcs_n; + memcpy(e->dqoe_pcs, pcs + pcs_skip, (size_t)pcs_n * sizeof(void *)); + // and then lay out the names of the queues at the end + char *p = (char *)(e->dqoe_pcs + pcs_n); + if (copy_top_label) { + e->dqoe_top_label = strcpy(p, top_q->dq_label); + p += strlen(p) + 1; + } else { + e->dqoe_top_label = top_q->dq_label ?: ""; + } + if (copy_bottom_label) { + e->dqoe_bottom_label = strcpy(p, bottom_q->dq_label); + } else { + e->dqoe_bottom_label = bottom_q->dq_label ?: ""; + } + + _dispatch_unfair_lock_lock(&top_tq->diq_order_top_head_lock); + TAILQ_FOREACH(it, &top_tq->diq_order_top_head, dqoe_order_top_list) { + if (slowpath(it->dqoe_bottom_tq == bottom_tq)) { + // someone else validated it at the same time + // go away quickly + _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock); + free(e); + return; + } + } + TAILQ_INSERT_HEAD(&top_tq->diq_order_top_head, e, dqoe_order_top_list); + _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock); + + _dispatch_unfair_lock_lock(&bottom_tq->diq_order_bottom_head_lock); + TAILQ_INSERT_HEAD(&bottom_tq->diq_order_bottom_head, e, dqoe_order_bottom_list); + _dispatch_unfair_lock_unlock(&bottom_tq->diq_order_bottom_head_lock); +} + +void +_dispatch_introspection_target_queue_changed(dispatch_queue_t dq) +{ + if (!_dispatch_introspection.debug_queue_inversions) return; + + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { + _dispatch_log( + "BUG IN CLIENT OF LIBDISPATCH: queue inversion debugging " + "cannot be used with code that changes the target " + "of a queue already targeted by other dispatch objects\n" + "queue %p[%s] was already targeted by other dispatch objects", + dq, dq->dq_label ?: ""); + _dispatch_introspection.debug_queue_inversions = false; + return; + } + + static char const * const reasons[] = { + [1] = "an initiator", + [2] = "a recipient", + [3] = "both an initiator and a recipient" + }; + bool as_top = !TAILQ_EMPTY(&dq->diq_order_top_head); + bool as_bottom = !TAILQ_EMPTY(&dq->diq_order_top_head); + + if (as_top || as_bottom) { + _dispatch_log( + "BUG IN CLIENT OF LIBDISPATCH: queue inversion debugging " + "expects queues to not participate in dispatch_sync() " + "before their setup is complete\n" + "forgetting that queue 0x%p[%s] participated as %s of " + "a dispatch_sync", dq, dq->dq_label ?: "", + reasons[(int)as_top + 2 * (int)as_bottom]); + _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); + _dispatch_introspection_queue_order_dispose(dq); + _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock); + } +} + #endif // DISPATCH_INTROSPECTION diff --git a/src/introspection_internal.h b/src/introspection_internal.h index 4ed951e..06504a8 100644 --- a/src/introspection_internal.h +++ b/src/introspection_internal.h @@ -29,10 +29,28 @@ #if DISPATCH_INTROSPECTION -#define DISPATCH_INTROSPECTION_QUEUE_LIST \ - TAILQ_ENTRY(dispatch_queue_s) diq_list -#define DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE \ - sizeof(TAILQ_ENTRY(dispatch_queue_s)) +#define DISPATCH_INTROSPECTION_QUEUE_HEADER \ + TAILQ_ENTRY(dispatch_queue_s) diq_list; \ + dispatch_unfair_lock_s diq_order_top_head_lock; \ + dispatch_unfair_lock_s diq_order_bottom_head_lock; \ + TAILQ_HEAD(, dispatch_queue_order_entry_s) diq_order_top_head; \ + TAILQ_HEAD(, dispatch_queue_order_entry_s) diq_order_bottom_head +#define DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE \ + sizeof(struct { DISPATCH_INTROSPECTION_QUEUE_HEADER; }) + +struct dispatch_introspection_state_s { + TAILQ_HEAD(, dispatch_introspection_thread_s) threads; + TAILQ_HEAD(, dispatch_queue_s) queues; + dispatch_unfair_lock_s threads_lock; + dispatch_unfair_lock_s queues_lock; + + ptrdiff_t thread_queue_offset; + + // dispatch introspection features + bool debug_queue_inversions; // DISPATCH_DEBUG_QUEUE_INVERSIONS +}; + +extern struct dispatch_introspection_state_s _dispatch_introspection; void _dispatch_introspection_init(void); void _dispatch_introspection_thread_add(void); @@ -46,7 +64,10 @@ void _dispatch_introspection_queue_item_complete(dispatch_object_t dou); void _dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f); void _dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f); -#if !__OBJC2__ && !defined(__cplusplus) +#if DISPATCH_PURE_C + +void _dispatch_sync_recurse_invoke(void *ctxt); +static dispatch_queue_t _dispatch_queue_get_current(void); DISPATCH_ALWAYS_INLINE static inline void @@ -70,12 +91,41 @@ _dispatch_introspection_queue_pop(dispatch_queue_t dq, dispatch_object_t dou) { _dispatch_introspection_queue_item_dequeue(dq, dou); }; -#endif // !__OBJC2__ && !defined(__cplusplus) +void +_dispatch_introspection_order_record(dispatch_queue_t top_q, + dispatch_queue_t bottom_q); + +void +_dispatch_introspection_target_queue_changed(dispatch_queue_t dq); + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_barrier_sync_begin(dispatch_queue_t dq, + dispatch_function_t func) +{ + if (!_dispatch_introspection.debug_queue_inversions) return; + if (func != _dispatch_sync_recurse_invoke) { + _dispatch_introspection_order_record(dq, _dispatch_queue_get_current()); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_non_barrier_sync_begin(dispatch_queue_t dq, + dispatch_function_t func) +{ + if (!_dispatch_introspection.debug_queue_inversions) return; + if (func != _dispatch_sync_recurse_invoke) { + _dispatch_introspection_order_record(dq, _dispatch_queue_get_current()); + } +} + +#endif // DISPATCH_PURE_C #else // DISPATCH_INTROSPECTION -#define DISPATCH_INTROSPECTION_QUEUE_LIST -#define DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE 0 +#define DISPATCH_INTROSPECTION_QUEUE_HEADER +#define DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE 0 #define _dispatch_introspection_init() #define _dispatch_introspection_thread_add() @@ -120,6 +170,21 @@ static inline void _dispatch_introspection_callout_return(void *ctxt DISPATCH_UNUSED, dispatch_function_t f DISPATCH_UNUSED) {} +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_target_queue_changed( + dispatch_queue_t dq DISPATCH_UNUSED) {} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_barrier_sync_begin(dispatch_queue_t dq DISPATCH_UNUSED, + dispatch_function_t func DISPATCH_UNUSED) {} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_non_barrier_sync_begin(dispatch_queue_t dq DISPATCH_UNUSED, + dispatch_function_t func DISPATCH_UNUSED) {} + #endif // DISPATCH_INTROSPECTION #endif // __DISPATCH_INTROSPECTION_INTERNAL__ diff --git a/src/io.c b/src/io.c index 0ad5b53..e4f05ae 100644 --- a/src/io.c +++ b/src/io.c @@ -24,14 +24,7 @@ #define DISPATCH_IO_DEBUG DISPATCH_DEBUG #endif -#if DISPATCH_IO_DEBUG -#define _dispatch_fd_debug(msg, fd, args...) \ - _dispatch_debug("fd[0x%x]: " msg, (fd), ##args) -#else -#define _dispatch_fd_debug(msg, fd, args...) -#endif - -#if USE_OBJC +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA #define _dispatch_io_data_retain(x) _dispatch_objc_retain(x) #define _dispatch_io_data_release(x) _dispatch_objc_release(x) #else @@ -75,7 +68,7 @@ static void _dispatch_disk_enqueue_operation(dispatch_disk_t dsk, dispatch_operation_t operation, dispatch_data_t data); static void _dispatch_stream_cleanup_operations(dispatch_stream_t stream, dispatch_io_t channel); -static void _dispatch_disk_cleanup_operations(dispatch_disk_t disk, +static void _dispatch_disk_cleanup_inactive_operations(dispatch_disk_t disk, dispatch_io_t channel); static void _dispatch_stream_source_handler(void *ctx); static void _dispatch_stream_queue_handler(void *ctx); @@ -119,6 +112,38 @@ enum { #define _dispatch_io_Block_copy(x) \ ((typeof(x))_dispatch_Block_copy((dispatch_block_t)(x))) +#pragma mark - +#pragma mark dispatch_io_debug + +#if DISPATCH_IO_DEBUG +#if !DISPATCH_DEBUG +#define _dispatch_io_log(x, ...) do { \ + _dispatch_log("%llu\t%p\t" x, _dispatch_absolute_time(), \ + (void *)_dispatch_thread_self(), ##__VA_ARGS__); \ + } while (0) +#ifdef _dispatch_object_debug +#undef _dispatch_object_debug +#define _dispatch_object_debug dispatch_debug +#pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif +#else +#define _dispatch_io_log(x, ...) _dispatch_debug(x, ##__VA_ARGS__) +#endif // DISPATCH_DEBUG +#else +#define _dispatch_io_log(x, ...) +#endif // DISPATCH_IO_DEBUG + +#define _dispatch_fd_debug(msg, fd, ...) \ + _dispatch_io_log("fd[0x%x]: " msg, fd, ##__VA_ARGS__) +#define _dispatch_op_debug(msg, op, ...) \ + _dispatch_io_log("op[%p]: " msg, op, ##__VA_ARGS__) +#define _dispatch_channel_debug(msg, channel, ...) \ + _dispatch_io_log("channel[%p]: " msg, channel, ##__VA_ARGS__) +#define _dispatch_fd_entry_debug(msg, fd_entry, ...) \ + _dispatch_io_log("fd_entry[%p]: " msg, fd_entry, ##__VA_ARGS__) +#define _dispatch_disk_debug(msg, disk, ...) \ + _dispatch_io_log("disk[%p]: " msg, disk, ##__VA_ARGS__) + #pragma mark - #pragma mark dispatch_io_hashtables @@ -133,6 +158,8 @@ static dispatch_once_t _dispatch_io_devs_lockq_pred; static dispatch_queue_t _dispatch_io_devs_lockq; static dispatch_queue_t _dispatch_io_fds_lockq; +static char const * const _dispatch_io_key = "io"; + static void _dispatch_io_fds_lockq_init(void *context DISPATCH_UNUSED) { @@ -225,7 +252,8 @@ _dispatch_io_init(dispatch_io_t channel, dispatch_fd_entry_t fd_entry, _dispatch_retain(queue); dispatch_async(!err ? fd_entry->close_queue : channel->queue, ^{ dispatch_async(queue, ^{ - _dispatch_fd_debug("cleanup handler invoke", -1); + _dispatch_channel_debug("cleanup handler invoke: err %d", + channel, err); cleanup_handler(err); }); _dispatch_release(queue); @@ -314,11 +342,11 @@ dispatch_io_create(dispatch_io_type_t type, dispatch_fd_t fd, dispatch_queue_t queue, void (^cleanup_handler)(int)) { if (type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) { - return NULL; + return DISPATCH_BAD_INPUT; } - _dispatch_fd_debug("io create", fd); dispatch_io_t channel = _dispatch_io_create(type); channel->fd = fd; + _dispatch_channel_debug("create", channel); channel->fd_actual = fd; dispatch_suspend(channel->queue); _dispatch_retain(queue); @@ -365,16 +393,16 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, { if ((type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) || !(*path == '/')) { - return NULL; + return DISPATCH_BAD_INPUT; } size_t pathlen = strlen(path); dispatch_io_path_data_t path_data = malloc(sizeof(*path_data) + pathlen+1); if (!path_data) { - return NULL; + return DISPATCH_OUT_OF_MEMORY; } - _dispatch_fd_debug("io create with path %s", -1, path); dispatch_io_t channel = _dispatch_io_create(type); channel->fd = -1; + _dispatch_channel_debug("create with path %s", channel, path); channel->fd_actual = -1; path_data->channel = channel; path_data->oflag = oflag; @@ -387,9 +415,11 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, int err = 0; struct stat st; _dispatch_io_syscall_switch_noerr(err, - (path_data->oflag & O_NOFOLLOW) == O_NOFOLLOW || - (path_data->oflag & O_SYMLINK) == O_SYMLINK ? - lstat(path_data->path, &st) : stat(path_data->path, &st), + (path_data->oflag & O_NOFOLLOW) == O_NOFOLLOW +#ifndef __linux__ + || (path_data->oflag & O_SYMLINK) == O_SYMLINK +#endif + ? lstat(path_data->path, &st) : stat(path_data->path, &st), case 0: err = _dispatch_io_validate_type(channel, st.st_mode); break; @@ -455,10 +485,10 @@ dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_io_t in_channel, dispatch_queue_t queue, void (^cleanup_handler)(int error)) { if (type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) { - return NULL; + return DISPATCH_BAD_INPUT; } - _dispatch_fd_debug("io create with io %p", -1, in_channel); dispatch_io_t channel = _dispatch_io_create(type); + _dispatch_channel_debug("create with channel %p", channel, in_channel); dispatch_suspend(channel->queue); _dispatch_retain(queue); _dispatch_retain(channel); @@ -565,7 +595,7 @@ dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water) { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_fd_debug("io set high water", channel->fd); + _dispatch_channel_debug("set high water: %zu", channel, high_water); if (channel->params.low > high_water) { channel->params.low = high_water; } @@ -579,7 +609,7 @@ dispatch_io_set_low_water(dispatch_io_t channel, size_t low_water) { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_fd_debug("io set low water", channel->fd); + _dispatch_channel_debug("set low water: %zu", channel, low_water); if (channel->params.high < low_water) { channel->params.high = low_water ? low_water : 1; } @@ -594,7 +624,7 @@ dispatch_io_set_interval(dispatch_io_t channel, uint64_t interval, { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_fd_debug("io set interval", channel->fd); + _dispatch_channel_debug("set interval: %llu", channel, interval); channel->params.interval = interval < INT64_MAX ? interval : INT64_MAX; channel->params.interval_flags = flags; _dispatch_release(channel); @@ -622,10 +652,12 @@ dispatch_io_get_descriptor(dispatch_io_t channel) return -1; } dispatch_fd_t fd = channel->fd_actual; - if (fd == -1 && _dispatch_thread_getspecific(dispatch_io_key) == channel && - !_dispatch_io_get_error(NULL, channel, false)) { - dispatch_fd_entry_t fd_entry = channel->fd_entry; - (void)_dispatch_fd_entry_open(fd_entry, channel); + if (fd == -1 && !_dispatch_io_get_error(NULL, channel, false)) { + dispatch_thread_context_t ctxt = + _dispatch_thread_context_find(_dispatch_io_key); + if (ctxt && ctxt->dtc_io_in_barrier == channel) { + (void)_dispatch_fd_entry_open(channel->fd_entry, channel); + } } return channel->fd_actual; } @@ -636,15 +668,15 @@ dispatch_io_get_descriptor(dispatch_io_t channel) static void _dispatch_io_stop(dispatch_io_t channel) { - _dispatch_fd_debug("io stop", channel->fd); - (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_STOPPED, relaxed); + _dispatch_channel_debug("stop", channel); + (void)os_atomic_or2o(channel, atomic_flags, DIO_STOPPED, relaxed); _dispatch_retain(channel); dispatch_async(channel->queue, ^{ dispatch_async(channel->barrier_queue, ^{ _dispatch_object_debug(channel, "%s", __func__); dispatch_fd_entry_t fd_entry = channel->fd_entry; if (fd_entry) { - _dispatch_fd_debug("io stop cleanup", channel->fd); + _dispatch_channel_debug("stop cleanup", channel); _dispatch_fd_entry_cleanup_operations(fd_entry, channel); if (!(channel->atomic_flags & DIO_CLOSED)) { channel->fd_entry = NULL; @@ -655,8 +687,8 @@ _dispatch_io_stop(dispatch_io_t channel) _dispatch_retain(channel); dispatch_async(_dispatch_io_fds_lockq, ^{ _dispatch_object_debug(channel, "%s", __func__); - _dispatch_fd_debug("io stop after close cleanup", - channel->fd); + _dispatch_channel_debug("stop cleanup after close", + channel); dispatch_fd_entry_t fdi; uintptr_t hash = DIO_HASH(channel->fd); TAILQ_FOREACH(fdi, &_dispatch_io_fds[hash], fd_list) { @@ -691,9 +723,9 @@ dispatch_io_close(dispatch_io_t channel, unsigned long flags) dispatch_async(channel->queue, ^{ dispatch_async(channel->barrier_queue, ^{ _dispatch_object_debug(channel, "%s", __func__); - _dispatch_fd_debug("io close", channel->fd); + _dispatch_channel_debug("close", channel); if (!(channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED))) { - (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_CLOSED, + (void)os_atomic_or2o(channel, atomic_flags, DIO_CLOSED, relaxed); dispatch_fd_entry_t fd_entry = channel->fd_entry; if (fd_entry) { @@ -719,10 +751,15 @@ dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier) dispatch_async(barrier_queue, ^{ dispatch_suspend(barrier_queue); dispatch_group_notify(barrier_group, io_q, ^{ + dispatch_thread_context_s io_ctxt = { + .dtc_key = _dispatch_io_key, + .dtc_io_in_barrier = channel, + }; + _dispatch_object_debug(channel, "%s", __func__); - _dispatch_thread_setspecific(dispatch_io_key, channel); + _dispatch_thread_context_push(&io_ctxt); barrier(); - _dispatch_thread_setspecific(dispatch_io_key, NULL); + _dispatch_thread_context_pop(&io_ctxt); dispatch_resume(barrier_queue); _dispatch_release(channel); }); @@ -956,10 +993,6 @@ _dispatch_operation_create(dispatch_op_direction_t direction, { // On channel queue dispatch_assert(direction < DOP_DIR_MAX); - _dispatch_fd_debug("operation create", channel->fd); -#if DISPATCH_IO_DEBUG - int fd = channel->fd; -#endif // Safe to call _dispatch_io_get_error() with channel->fd_entry since // that can only be NULL if atomic_flags are set rdar://problem/8362514 int err = _dispatch_io_get_error(NULL, channel, false); @@ -974,7 +1007,8 @@ _dispatch_operation_create(dispatch_op_direction_t direction, } else if (direction == DOP_DIR_WRITE && !err) { d = NULL; } - _dispatch_fd_debug("IO handler invoke", fd); + _dispatch_channel_debug("IO handler invoke: err %d", channel, + err); handler(true, d, err); _dispatch_io_data_release(data); }); @@ -984,6 +1018,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, } dispatch_operation_t op = _dispatch_alloc(DISPATCH_VTABLE(operation), sizeof(struct dispatch_operation_s)); + _dispatch_channel_debug("operation create: %p", channel, op); op->do_next = DISPATCH_OBJECT_LISTLESS; op->do_xref_cnt = -1; // operation object is not exposed externally op->op_q = dispatch_queue_create("com.apple.libdispatch-io.opq", NULL); @@ -1012,6 +1047,7 @@ void _dispatch_operation_dispose(dispatch_operation_t op) { _dispatch_object_debug(op, "%s", __func__); + _dispatch_op_debug("dispose", op); // Deliver the data if there's any if (op->fd_entry) { _dispatch_operation_deliver_data(op, DOP_DONE); @@ -1038,6 +1074,7 @@ _dispatch_operation_dispose(dispatch_operation_t op) dispatch_release(op->op_q); } Block_release(op->handler); + _dispatch_op_debug("disposed", op); } static void @@ -1060,6 +1097,7 @@ _dispatch_operation_enqueue(dispatch_operation_t op, handler(true, d, err); _dispatch_io_data_release(data); }); + _dispatch_op_debug("release -> %d, err %d", op, op->do_ref_cnt, err); _dispatch_release(op); return; } @@ -1087,13 +1125,14 @@ _dispatch_operation_should_enqueue(dispatch_operation_t op, dispatch_queue_t tq, dispatch_data_t data) { // On stream queue or disk queue - _dispatch_fd_debug("enqueue operation", op->fd_entry->fd); + _dispatch_op_debug("enqueue", op); _dispatch_io_data_retain(data); op->data = data; int err = _dispatch_io_get_error(op, NULL, true); if (err) { op->err = err; // Final release + _dispatch_op_debug("release -> %d, err %d", op, op->do_ref_cnt, err); _dispatch_release(op); return false; } @@ -1230,7 +1269,6 @@ _dispatch_fd_entry_init_async(dispatch_fd_t fd, dispatch_once_f(&_dispatch_io_fds_lockq_pred, NULL, _dispatch_io_fds_lockq_init); dispatch_async(_dispatch_io_fds_lockq, ^{ - _dispatch_fd_debug("fd entry init", fd); dispatch_fd_entry_t fd_entry = NULL; // Check to see if there is an existing entry for the given fd uintptr_t hash = DIO_HASH(fd); @@ -1246,8 +1284,9 @@ _dispatch_fd_entry_init_async(dispatch_fd_t fd, // If we did not find an existing entry, create one fd_entry = _dispatch_fd_entry_create_with_fd(fd, hash); } + _dispatch_fd_entry_debug("init", fd_entry); dispatch_async(fd_entry->barrier_queue, ^{ - _dispatch_fd_debug("fd entry init completion", fd); + _dispatch_fd_entry_debug("init completion", fd_entry); completion_callback(fd_entry); // stat() is complete, release reference to fd_entry _dispatch_fd_entry_release(fd_entry); @@ -1275,16 +1314,16 @@ static dispatch_fd_entry_t _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) { // On fds lock queue - _dispatch_fd_debug("fd entry create", fd); dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create( _dispatch_io_fds_lockq); + _dispatch_fd_entry_debug("create: fd %d", fd_entry, fd); fd_entry->fd = fd; TAILQ_INSERT_TAIL(&_dispatch_io_fds[hash], fd_entry, fd_list); fd_entry->barrier_queue = dispatch_queue_create( "com.apple.libdispatch-io.barrierq", NULL); fd_entry->barrier_group = dispatch_group_create(); dispatch_async(fd_entry->barrier_queue, ^{ - _dispatch_fd_debug("fd entry stat", fd); + _dispatch_fd_entry_debug("stat", fd_entry); int err, orig_flags, orig_nosigpipe = -1; struct stat st; _dispatch_io_syscall_switch(err, @@ -1356,7 +1395,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) // all operations associated with this entry have been freed dispatch_async(fd_entry->close_queue, ^{ if (!fd_entry->disk) { - _dispatch_fd_debug("close queue fd_entry cleanup", fd); + _dispatch_fd_entry_debug("close queue cleanup", fd_entry); dispatch_op_direction_t dir; for (dir = 0; dir < DOP_DIR_MAX; dir++) { _dispatch_stream_dispose(fd_entry, dir); @@ -1374,11 +1413,11 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) // source cancels it and suspends the close queue. Freeing the fd_entry // structure must happen after the source cancel handler has finished dispatch_async(fd_entry->close_queue, ^{ - _dispatch_fd_debug("close queue release", fd); + _dispatch_fd_entry_debug("close queue release", fd_entry); dispatch_release(fd_entry->close_queue); - _dispatch_fd_debug("barrier queue release", fd); + _dispatch_fd_entry_debug("barrier queue release", fd_entry); dispatch_release(fd_entry->barrier_queue); - _dispatch_fd_debug("barrier group release", fd); + _dispatch_fd_entry_debug("barrier group release", fd_entry); dispatch_release(fd_entry->barrier_group); if (fd_entry->orig_flags != -1) { _dispatch_io_syscall( @@ -1407,9 +1446,9 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, dev_t dev, mode_t mode) { // On devs lock queue - _dispatch_fd_debug("fd entry create with path %s", -1, path_data->path); dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create( path_data->channel->queue); + _dispatch_fd_entry_debug("create: path %s", fd_entry, path_data->path); if (S_ISREG(mode)) { _dispatch_disk_init(fd_entry, major(dev)); } else { @@ -1428,7 +1467,7 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, // that the channel associated with this entry has been closed and that // all operations associated with this entry have been freed dispatch_async(fd_entry->close_queue, ^{ - _dispatch_fd_debug("close queue fd_entry cleanup", -1); + _dispatch_fd_entry_debug("close queue cleanup", fd_entry); if (!fd_entry->disk) { dispatch_op_direction_t dir; for (dir = 0; dir < DOP_DIR_MAX; dir++) { @@ -1447,7 +1486,7 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, } }); dispatch_async(fd_entry->close_queue, ^{ - _dispatch_fd_debug("close queue release", -1); + _dispatch_fd_entry_debug("close queue release", fd_entry); dispatch_release(fd_entry->close_queue); dispatch_release(fd_entry->barrier_queue); dispatch_release(fd_entry->barrier_group); @@ -1477,10 +1516,10 @@ open: if (err == EINTR) { goto open; } - (void)dispatch_atomic_cmpxchg2o(fd_entry, err, 0, err, relaxed); + (void)os_atomic_cmpxchg2o(fd_entry, err, 0, err, relaxed); return err; } - if (!dispatch_atomic_cmpxchg2o(fd_entry, fd, -1, fd, relaxed)) { + if (!os_atomic_cmpxchg2o(fd_entry, fd, -1, fd, relaxed)) { // Lost the race with another open _dispatch_fd_entry_guarded_close(fd_entry, fd); } else { @@ -1500,7 +1539,7 @@ _dispatch_fd_entry_cleanup_operations(dispatch_fd_entry_t fd_entry, } _dispatch_fd_entry_retain(fd_entry); dispatch_async(fd_entry->disk->pick_queue, ^{ - _dispatch_disk_cleanup_operations(fd_entry->disk, channel); + _dispatch_disk_cleanup_inactive_operations(fd_entry->disk, channel); _dispatch_fd_entry_release(fd_entry); if (channel) { _dispatch_release(channel); @@ -1599,7 +1638,8 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) TAILQ_INIT(&disk->operations); disk->cur_rq = TAILQ_FIRST(&disk->operations); char label[45]; - snprintf(label, sizeof(label), "com.apple.libdispatch-io.deviceq.%d", dev); + snprintf(label, sizeof(label), "com.apple.libdispatch-io.deviceq.%d", + (int)dev); disk->pick_queue = dispatch_queue_create(label, NULL); TAILQ_INSERT_TAIL(&_dispatch_io_devs[hash], disk, disk_list); out: @@ -1671,7 +1711,7 @@ _dispatch_stream_complete_operation(dispatch_stream_t stream, { // On stream queue _dispatch_object_debug(op, "%s", __func__); - _dispatch_fd_debug("complete operation", op->fd_entry->fd); + _dispatch_op_debug("complete: stream %p", op, stream); TAILQ_REMOVE(&stream->operations[op->params.type], op, operation_list); if (op == stream->op) { stream->op = NULL; @@ -1680,6 +1720,7 @@ _dispatch_stream_complete_operation(dispatch_stream_t stream, dispatch_source_cancel(op->timer); } // Final release will deliver any pending data + _dispatch_op_debug("release -> %d (stream complete)", op, op->do_ref_cnt); _dispatch_release(op); } @@ -1688,7 +1729,7 @@ _dispatch_disk_complete_operation(dispatch_disk_t disk, dispatch_operation_t op) { // On pick queue _dispatch_object_debug(op, "%s", __func__); - _dispatch_fd_debug("complete operation", op->fd_entry->fd); + _dispatch_op_debug("complete: disk %p", op, disk); // Current request is always the last op returned if (disk->cur_rq == op) { disk->cur_rq = TAILQ_PREV(op, dispatch_disk_operations_s, @@ -1707,6 +1748,7 @@ _dispatch_disk_complete_operation(dispatch_disk_t disk, dispatch_operation_t op) dispatch_source_cancel(op->timer); } // Final release will deliver any pending data + _dispatch_op_debug("release -> %d (disk complete)", op, op->do_ref_cnt); _dispatch_release(op); } @@ -1794,18 +1836,34 @@ _dispatch_stream_cleanup_operations(dispatch_stream_t stream, } } -static void -_dispatch_disk_cleanup_operations(dispatch_disk_t disk, dispatch_io_t channel) +static inline void +_dispatch_disk_cleanup_specified_operations(dispatch_disk_t disk, + dispatch_io_t channel, bool inactive_only) { // On pick queue dispatch_operation_t op, tmp; TAILQ_FOREACH_SAFE(op, &disk->operations, operation_list, tmp) { + if (inactive_only && op->active) continue; if (!channel || op->channel == channel) { + _dispatch_op_debug("cleanup: disk %p", op, disk); _dispatch_disk_complete_operation(disk, op); } } } +static void +_dispatch_disk_cleanup_operations(dispatch_disk_t disk, dispatch_io_t channel) +{ + _dispatch_disk_cleanup_specified_operations(disk, channel, false); +} + +static void +_dispatch_disk_cleanup_inactive_operations(dispatch_disk_t disk, + dispatch_io_t channel) +{ + _dispatch_disk_cleanup_specified_operations(disk, channel, true); +} + #pragma mark - #pragma mark dispatch_stream_handler/dispatch_disk_handler @@ -1817,7 +1875,7 @@ _dispatch_stream_source(dispatch_stream_t stream, dispatch_operation_t op) return stream->source; } dispatch_fd_t fd = op->fd_entry->fd; - _dispatch_fd_debug("stream source create", fd); + _dispatch_op_debug("stream source create", op); dispatch_source_t source = NULL; if (op->direction == DOP_DIR_READ) { source = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, @@ -1836,7 +1894,7 @@ _dispatch_stream_source(dispatch_stream_t stream, dispatch_operation_t op) // unregistered dispatch_queue_t close_queue = op->fd_entry->close_queue; dispatch_source_set_cancel_handler(source, ^{ - _dispatch_fd_debug("stream source cancel", fd); + _dispatch_op_debug("stream source cancel", op); dispatch_resume(close_queue); }); stream->source = source; @@ -1884,13 +1942,13 @@ pick: goto pick; } stream->op = op; - _dispatch_fd_debug("stream handler", op->fd_entry->fd); + _dispatch_op_debug("stream handler", op); dispatch_fd_entry_t fd_entry = op->fd_entry; _dispatch_fd_entry_retain(fd_entry); // For performance analysis if (!op->total && dispatch_io_defaults.initial_delivery) { // Empty delivery to signal the start of the operation - _dispatch_fd_debug("initial delivery", op->fd_entry->fd); + _dispatch_op_debug("initial delivery", op); _dispatch_operation_deliver_data(op, DOP_DELIVER); } // TODO: perform on the operation target queue to get correct priority @@ -1948,7 +2006,7 @@ _dispatch_disk_handler(void *ctx) if (disk->io_active) { return; } - _dispatch_fd_debug("disk handler", -1); + _dispatch_disk_debug("disk handler", disk); dispatch_operation_t op; size_t i = disk->free_idx, j = disk->req_idx; if (j <= i) { @@ -1964,8 +2022,10 @@ _dispatch_disk_handler(void *ctx) continue; } _dispatch_retain(op); + _dispatch_op_debug("retain -> %d", op, op->do_ref_cnt + 1); disk->advise_list[i%disk->advise_list_depth] = op; op->active = true; + _dispatch_op_debug("activate: disk %p", op, disk); _dispatch_object_debug(op, "%s", __func__); } else { // No more operations to get @@ -1977,6 +2037,7 @@ _dispatch_disk_handler(void *ctx) op = disk->advise_list[disk->req_idx]; if (op) { disk->io_active = true; + _dispatch_op_debug("async perform: disk %p", op, disk); dispatch_async_f(op->do_targetq, disk, _dispatch_disk_perform); } } @@ -1985,8 +2046,8 @@ static void _dispatch_disk_perform(void *ctxt) { dispatch_disk_t disk = ctxt; + _dispatch_disk_debug("disk perform", disk); size_t chunk_size = dispatch_io_defaults.chunk_size; - _dispatch_fd_debug("disk perform", -1); dispatch_operation_t op; size_t i = disk->advise_idx, j = disk->free_idx; if (j <= i) { @@ -2010,7 +2071,7 @@ _dispatch_disk_perform(void *ctxt) // For performance analysis if (!op->total && dispatch_io_defaults.initial_delivery) { // Empty delivery to signal the start of the operation - _dispatch_fd_debug("initial delivery", op->fd_entry->fd); + _dispatch_op_debug("initial delivery", op); _dispatch_operation_deliver_data(op, DOP_DELIVER); } // Advise two chunks if the list only has one element and this is the @@ -2026,7 +2087,9 @@ _dispatch_disk_perform(void *ctxt) int result = _dispatch_operation_perform(op); disk->advise_list[disk->req_idx] = NULL; disk->req_idx = (++disk->req_idx)%disk->advise_list_depth; + _dispatch_op_debug("async perform completion: disk %p", op, disk); dispatch_async(disk->pick_queue, ^{ + _dispatch_op_debug("perform completion", op); switch (result) { case DISPATCH_OP_DELIVER: _dispatch_operation_deliver_data(op, DOP_DEFAULT); @@ -2048,12 +2111,15 @@ _dispatch_disk_perform(void *ctxt) dispatch_assert(result); break; } + _dispatch_op_debug("deactivate: disk %p", op, disk); op->active = false; disk->io_active = false; _dispatch_disk_handler(disk); // Balancing the retain in _dispatch_disk_handler. Note that op must be // released at the very end, since it might hold the last reference to // the disk + _dispatch_op_debug("release -> %d (disk perform complete)", op, + op->do_ref_cnt); _dispatch_release(op); }); } @@ -2064,6 +2130,16 @@ _dispatch_disk_perform(void *ctxt) static void _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) { + _dispatch_op_debug("advise", op); + if (_dispatch_io_get_error(op, NULL, true)) return; +#ifdef __linux__ + // linux does not support fcntl (F_RDAVISE) + // define necessary datastructure and use readahead + struct radvisory { + off_t ra_offset; + int ra_count; + }; +#endif int err; struct radvisory advise; // No point in issuing a read advise for the next chunk if we are already @@ -2083,6 +2159,13 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) } advise.ra_offset = op->advise_offset; op->advise_offset += advise.ra_count; +#ifdef __linux__ + _dispatch_io_syscall_switch(err, + readahead(op->fd_entry->fd, advise.ra_offset, advise.ra_count), + case EINVAL: break; // fd does refer to a non-supported filetype + default: (void)dispatch_assume_zero(err); break; + ); +#else _dispatch_io_syscall_switch(err, fcntl(op->fd_entry->fd, F_RDADVISE, &advise), case EFBIG: break; // advised past the end of the file rdar://10415691 @@ -2090,11 +2173,13 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) // TODO: set disk status on error default: (void)dispatch_assume_zero(err); break; ); +#endif } static int _dispatch_operation_perform(dispatch_operation_t op) { + _dispatch_op_debug("perform", op); int err = _dispatch_io_get_error(op, NULL, true); if (err) { goto error; @@ -2123,7 +2208,7 @@ _dispatch_operation_perform(dispatch_operation_t op) op->buf_siz = max_buf_siz; } op->buf = valloc(op->buf_siz); - _dispatch_fd_debug("buffer allocated", op->fd_entry->fd); + _dispatch_op_debug("buffer allocated", op); } else if (op->direction == DOP_DIR_WRITE) { // Always write the first data piece, if that is smaller than a // chunk, accumulate further data pieces until chunk size is reached @@ -2149,7 +2234,7 @@ _dispatch_operation_perform(dispatch_operation_t op) op->buf_data = dispatch_data_create_map(d, (const void**)&op->buf, NULL); _dispatch_io_data_release(d); - _dispatch_fd_debug("buffer mapped", op->fd_entry->fd); + _dispatch_op_debug("buffer mapped", op); } } if (op->fd_entry->fd == -1) { @@ -2186,7 +2271,7 @@ syscall: } // EOF is indicated by two handler invocations if (processed == 0) { - _dispatch_fd_debug("EOF", op->fd_entry->fd); + _dispatch_op_debug("performed: EOF", op); return DISPATCH_OP_DELIVER_AND_COMPLETE; } op->buf_len += (size_t)processed; @@ -2202,7 +2287,7 @@ error: if (err == EAGAIN) { // For disk based files with blocking I/O we should never get EAGAIN dispatch_assert(!op->fd_entry->disk); - _dispatch_fd_debug("EAGAIN %d", op->fd_entry->fd, err); + _dispatch_op_debug("performed: EAGAIN", op); if (op->direction == DOP_DIR_READ && op->total && op->channel == op->fd_entry->convenience_channel) { // Convenience read with available data completes on EAGAIN @@ -2210,12 +2295,13 @@ error: } return DISPATCH_OP_RESUME; } + _dispatch_op_debug("performed: err %d", op, err); op->err = err; switch (err) { case ECANCELED: return DISPATCH_OP_ERR; case EBADF: - (void)dispatch_atomic_cmpxchg2o(op->fd_entry, err, 0, err, relaxed); + (void)os_atomic_cmpxchg2o(op->fd_entry, err, 0, err, relaxed); return DISPATCH_OP_FD_ERR; default: return DISPATCH_OP_COMPLETE; @@ -2239,7 +2325,7 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, deliver = true; } else if (op->buf_len < op->buf_siz) { // Request buffer is not yet used up - _dispatch_fd_debug("buffer data", op->fd_entry->fd); + _dispatch_op_debug("buffer data: undelivered %zu", op, undelivered); return; } } else { @@ -2293,17 +2379,14 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, } if (!deliver || ((flags & DOP_NO_EMPTY) && !dispatch_data_get_size(data))) { op->undelivered = undelivered; - _dispatch_fd_debug("buffer data", op->fd_entry->fd); + _dispatch_op_debug("buffer data: undelivered %zu", op, undelivered); return; } op->undelivered = 0; _dispatch_object_debug(op, "%s", __func__); - _dispatch_fd_debug("deliver data", op->fd_entry->fd); + _dispatch_op_debug("deliver data", op); dispatch_op_direction_t direction = op->direction; dispatch_io_handler_t handler = op->handler; -#if DISPATCH_IO_DEBUG - int fd = op->fd_entry->fd; -#endif dispatch_fd_entry_t fd_entry = op->fd_entry; _dispatch_fd_entry_retain(fd_entry); dispatch_io_t channel = op->channel; @@ -2315,7 +2398,7 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, if (done) { if (direction == DOP_DIR_READ && err) { if (dispatch_data_get_size(d)) { - _dispatch_fd_debug("IO handler invoke", fd); + _dispatch_op_debug("IO handler invoke", op); handler(false, d, 0); } d = NULL; @@ -2323,7 +2406,7 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, d = NULL; } } - _dispatch_fd_debug("IO handler invoke", fd); + _dispatch_op_debug("IO handler invoke: err %d", op, err); handler(done, d, err); _dispatch_release(channel); _dispatch_fd_entry_release(fd_entry); @@ -2349,7 +2432,7 @@ _dispatch_io_debug_attr(dispatch_io_t channel, char* buf, size_t bufsiz) channel->barrier_group, channel->err, channel->params.low, channel->params.high, channel->params.interval_flags & DISPATCH_IO_STRICT_INTERVAL ? "(strict)" : "", - channel->params.interval); + (unsigned long long) channel->params.interval); } size_t @@ -2380,10 +2463,11 @@ _dispatch_operation_debug_attr(dispatch_operation_t op, char* buf, "write", op->fd_entry ? op->fd_entry->fd : -1, op->fd_entry, op->channel, op->op_q, oqtarget && oqtarget->dq_label ? oqtarget->dq_label : "", oqtarget, target && target->dq_label ? - target->dq_label : "", target, op->offset, op->length, op->total, - op->undelivered + op->buf_len, op->flags, op->err, op->params.low, - op->params.high, op->params.interval_flags & - DISPATCH_IO_STRICT_INTERVAL ? "(strict)" : "", op->params.interval); + target->dq_label : "", target, (long long)op->offset, op->length, + op->total, op->undelivered + op->buf_len, op->flags, op->err, + op->params.low, op->params.high, op->params.interval_flags & + DISPATCH_IO_STRICT_INTERVAL ? "(strict)" : "", + (unsigned long long)op->params.interval); } size_t diff --git a/src/io_internal.h b/src/io_internal.h index ecdc775..ad8259a 100644 --- a/src/io_internal.h +++ b/src/io_internal.h @@ -66,8 +66,8 @@ typedef unsigned int dispatch_op_flags_t; #define DIO_CLOSED 1u // channel has been closed #define DIO_STOPPED 2u // channel has been stopped (implies closed) -DISPATCH_DECL_INTERNAL(dispatch_operation); -DISPATCH_DECL_INTERNAL(dispatch_disk); +DISPATCH_INTERNAL_CLASS_DECL(operation); +DISPATCH_INTERNAL_CLASS_DECL(disk); struct dispatch_stream_s { dispatch_queue_t dq; @@ -94,9 +94,8 @@ struct dispatch_stat_s { mode_t mode; }; -DISPATCH_CLASS_DECL(disk); struct dispatch_disk_s { - DISPATCH_STRUCT_HEADER(disk); + DISPATCH_OBJECT_HEADER(disk); TAILQ_HEAD(dispatch_disk_operations_s, dispatch_operation_s) operations; dispatch_operation_t cur_rq; dispatch_queue_t pick_queue; @@ -141,9 +140,8 @@ typedef struct dispatch_io_param_s { unsigned long interval_flags; } dispatch_io_param_s; -DISPATCH_CLASS_DECL(operation); struct dispatch_operation_s { - DISPATCH_STRUCT_HEADER(operation); + DISPATCH_OBJECT_HEADER(operation); dispatch_queue_t op_q; dispatch_op_direction_t direction; // READ OR WRITE dispatch_io_param_s params; @@ -167,7 +165,7 @@ struct dispatch_operation_s { DISPATCH_CLASS_DECL(io); struct dispatch_io_s { - DISPATCH_STRUCT_HEADER(io); + DISPATCH_OBJECT_HEADER(io); dispatch_queue_t queue, barrier_queue; dispatch_group_t barrier_group; dispatch_io_param_s params; diff --git a/src/libdispatch.codes b/src/libdispatch.codes new file mode 100644 index 0000000..9aca7e1 --- /dev/null +++ b/src/libdispatch.codes @@ -0,0 +1,13 @@ +0x2bdc0008 DISPATCH_ARIADNE_dispatch_main + +0x2e010004 DISPATCH_VOUCHER_dc_push +0x2e010008 DISPATCH_VOUCHER_dc_pop +0x2e01000c DISPATCH_VOUCHER_dmsg_push +0x2e010010 DISPATCH_VOUCHER_dmsg_pop +0x2e010018 DISPATCH_VOUCHER_activity_adopt + +0x2e020004 DISPATCH_PERF_non_leaf_retarget +0x2e020008 DISPATCH_PERF_post_activate_mutation +0x2e02000c DISPATCH_PERF_post_activate_mutation +0x2e020010 DISPATCH_PERF_delayed_registration +0x2e020014 DISPATCH_PERF_mutable_target diff --git a/src/object.c b/src/object.c index 4089ba0..1928df5 100644 --- a/src/object.c +++ b/src/object.c @@ -64,7 +64,7 @@ _os_object_retain_with_resurrect(_os_object_t obj) { int xref_cnt = _os_object_xrefcnt_inc(obj); if (slowpath(xref_cnt < 0)) { - _OS_OBJECT_CLIENT_CRASH("Resurrection of an overreleased object"); + _OS_OBJECT_CLIENT_CRASH("Resurrection of an over-released object"); } if (slowpath(xref_cnt == 0)) { _os_object_retain_internal(obj); @@ -100,7 +100,7 @@ retry: if (slowpath(xref_cnt < -1)) { goto overrelease; } - if (slowpath(!dispatch_atomic_cmpxchgvw2o(obj, os_obj_xref_cnt, xref_cnt, + if (slowpath(!os_atomic_cmpxchgvw2o(obj, os_obj_xref_cnt, xref_cnt, xref_cnt + 1, &xref_cnt, relaxed))) { goto retry; } @@ -128,7 +128,15 @@ _os_object_allows_weak_reference(_os_object_t obj) void * _dispatch_alloc(const void *vtable, size_t size) { +#if OS_OBJECT_HAVE_OBJC1 + const struct dispatch_object_vtable_s *_vtable = vtable; + dispatch_object_t dou; + dou._os_obj = _os_object_alloc_realized(_vtable->_os_obj_objc_isa, size); + dou._do->do_vtable = vtable; + return dou._do; +#else return _os_object_alloc_realized(vtable, size); +#endif } void @@ -151,37 +159,40 @@ _dispatch_dealloc(dispatch_object_t dou) dispatch_queue_t tq = dou._do->do_targetq; dispatch_function_t func = dou._do->do_finalizer; void *ctxt = dou._do->do_ctxt; - +#if OS_OBJECT_HAVE_OBJC1 + // so that ddt doesn't pick up bad objects when malloc reuses this memory + dou._do->do_vtable = NULL; +#endif _os_object_dealloc(dou._os_obj); if (func && ctxt) { dispatch_async_f(tq, ctxt, func); } - _dispatch_release(tq); + _dispatch_release_tailcall(tq); } +#if !USE_OBJC void _dispatch_xref_dispose(dispatch_object_t dou) { - if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) { - // Arguments for and against this assert are within 6705399 - DISPATCH_CLIENT_CRASH("Release of a suspended object"); + unsigned long metatype = dx_metatype(dou._do); + if (metatype == _DISPATCH_QUEUE_TYPE || metatype == _DISPATCH_SOURCE_TYPE) { + _dispatch_queue_xref_dispose(dou._dq); } -#if !USE_OBJC if (dx_type(dou._do) == DISPATCH_SOURCE_KEVENT_TYPE) { _dispatch_source_xref_dispose(dou._ds); - } else if (dou._dq->do_vtable == DISPATCH_VTABLE(queue_runloop)) { + } else if (dx_type(dou._do) == DISPATCH_QUEUE_RUNLOOP_TYPE) { _dispatch_runloop_queue_xref_dispose(dou._dq); } - return _dispatch_release(dou._os_obj); -#endif + return _dispatch_release_tailcall(dou._os_obj); } +#endif void _dispatch_dispose(dispatch_object_t dou) { if (slowpath(dou._do->do_next != DISPATCH_OBJECT_LISTLESS)) { - DISPATCH_CRASH("Release while enqueued"); + DISPATCH_INTERNAL_CRASH(dou._do->do_next, "Release while enqueued"); } dx_dispose(dou._do); return _dispatch_dealloc(dou); @@ -192,7 +203,7 @@ dispatch_get_context(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_get_context, dou); if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { + slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { return NULL; } return dou._do->do_ctxt; @@ -203,7 +214,7 @@ dispatch_set_context(dispatch_object_t dou, void *context) { DISPATCH_OBJECT_TFB(_dispatch_objc_set_context, dou, context); if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { + slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { return; } dou._do->do_ctxt = context; @@ -214,69 +225,57 @@ dispatch_set_finalizer_f(dispatch_object_t dou, dispatch_function_t finalizer) { DISPATCH_OBJECT_TFB(_dispatch_objc_set_finalizer_f, dou, finalizer); if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { + slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { return; } dou._do->do_finalizer = finalizer; } void -dispatch_suspend(dispatch_object_t dou) +dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t tq) { - DISPATCH_OBJECT_TFB(_dispatch_objc_suspend, dou); - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { - return; + DISPATCH_OBJECT_TFB(_dispatch_objc_set_target_queue, dou, tq); + if (dx_vtable(dou._do)->do_set_targetq) { + dx_vtable(dou._do)->do_set_targetq(dou._do, tq); + } else if (dou._do->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT && + !slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { + if (slowpath(!tq)) { + tq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, false); + } + _dispatch_object_set_target_queue_inline(dou._do, tq); } - // rdar://8181908 explains why we need to do an internal retain at every - // suspension. - (void)dispatch_atomic_add2o(dou._do, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL, acquire); - _dispatch_retain(dou._do); } -DISPATCH_NOINLINE -static void -_dispatch_resume_slow(dispatch_object_t dou) +void +dispatch_activate(dispatch_object_t dou) { - _dispatch_wakeup(dou._do); - // Balancing the retain() done in suspend() for rdar://8181908 - _dispatch_release(dou._do); + DISPATCH_OBJECT_TFB(_dispatch_objc_activate, dou); + if (dx_vtable(dou._do)->do_resume) { + dx_vtable(dou._do)->do_resume(dou._do, true); + } +} + +void +dispatch_suspend(dispatch_object_t dou) +{ + DISPATCH_OBJECT_TFB(_dispatch_objc_suspend, dou); + if (dx_vtable(dou._do)->do_suspend) { + dx_vtable(dou._do)->do_suspend(dou._do); + } } void dispatch_resume(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_resume, dou); - // Global objects cannot be suspended or resumed. This also has the - // side effect of saturating the suspend count of an object and - // guarding against resuming due to overflow. - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { - return; - } - // Check the previous value of the suspend count. If the previous - // value was a single suspend interval, the object should be resumed. - // If the previous value was less than the suspend interval, the object - // has been over-resumed. - unsigned int suspend_cnt = dispatch_atomic_sub_orig2o(dou._do, - do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, release); - if (fastpath(suspend_cnt > DISPATCH_OBJECT_SUSPEND_INTERVAL)) { - // Balancing the retain() done in suspend() for rdar://8181908 - return _dispatch_release(dou._do); - } - if (fastpath(suspend_cnt == DISPATCH_OBJECT_SUSPEND_INTERVAL)) { - return _dispatch_resume_slow(dou); + if (dx_vtable(dou._do)->do_resume) { + dx_vtable(dou._do)->do_resume(dou._do, false); } - DISPATCH_CLIENT_CRASH("Over-resume of an object"); } size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz) { - return dsnprintf(buf, bufsiz, "xrefcnt = 0x%x, refcnt = 0x%x, " - "suspend_cnt = 0x%x, locked = %d, ", dou._do->do_xref_cnt + 1, - dou._do->do_ref_cnt + 1, - dou._do->do_suspend_cnt / DISPATCH_OBJECT_SUSPEND_INTERVAL, - dou._do->do_suspend_cnt & 1); + return dsnprintf(buf, bufsiz, "xrefcnt = 0x%x, refcnt = 0x%x, ", + dou._do->do_xref_cnt + 1, dou._do->do_ref_cnt + 1); } diff --git a/src/object.m b/src/object.m index 1a98d7e..323c98b 100644 --- a/src/object.m +++ b/src/object.m @@ -22,104 +22,16 @@ #if USE_OBJC -#if !__OBJC2__ -#error "Cannot build with legacy ObjC runtime" -#endif #if _OS_OBJECT_OBJC_ARC #error "Cannot build with ARC" #endif +#if defined(__OBJC_GC__) +#error Objective C GC isn't supported anymore +#endif #include #include - -#pragma mark - -#pragma mark _os_object_gc - -#if __OBJC_GC__ -#include -#include - -static bool _os_object_have_gc; -static malloc_zone_t *_os_object_gc_zone; - -static void -_os_object_gc_init(void) -{ - _os_object_have_gc = objc_collectingEnabled(); - if (slowpath(_os_object_have_gc)) { - _os_object_gc_zone = objc_collectableZone(); - (void)[OS_OBJECT_CLASS(object) class]; // OS_object class realization - } -} - -static _os_object_t -_os_object_make_uncollectable(_os_object_t obj) -{ - if (slowpath(_os_object_have_gc)) { - auto_zone_retain(_os_object_gc_zone, obj); - } - return obj; -} - -static _os_object_t -_os_object_make_collectable(_os_object_t obj) -{ - if (slowpath(_os_object_have_gc)) { - auto_zone_release(_os_object_gc_zone, obj); - } - return obj; -} - -DISPATCH_NOINLINE -static id -_os_objc_gc_retain(id obj) -{ - if (fastpath(obj)) { - auto_zone_retain(_os_object_gc_zone, obj); - } - return obj; -} - -DISPATCH_NOINLINE -static void -_os_objc_gc_release(id obj) -{ - if (fastpath(obj)) { - (void)auto_zone_release(_os_object_gc_zone, obj); - } - asm(""); // prevent tailcall -} - -DISPATCH_NOINLINE -static id -_os_object_gc_retain(id obj) -{ - if ([obj isKindOfClass:OS_OBJECT_OBJC_CLASS(object)]) { - return _os_object_retain(obj); - } else { - return _os_objc_gc_retain(obj); - } -} - -DISPATCH_NOINLINE -static void -_os_object_gc_release(id obj) -{ - if ([obj isKindOfClass:OS_OBJECT_OBJC_CLASS(object)]) { - return _os_object_release(obj); - } else { - return _os_objc_gc_release(obj); - } -} - -#else // __OBJC_GC__ -#define _os_object_gc_init() -#define _os_object_make_uncollectable(obj) (obj) -#define _os_object_make_collectable(obj) (obj) -#define _os_object_have_gc 0 -#define _os_object_gc_retain(obj) (obj) -#define _os_object_gc_release(obj) -#endif // __OBJC_GC__ +#include #pragma mark - #pragma mark _os_object_t @@ -150,8 +62,6 @@ void _os_object_init(void) { _objc_init(); - _os_object_gc_init(); - if (slowpath(_os_object_have_gc)) return; Block_callbacks_RR callbacks = { sizeof(Block_callbacks_RR), (void (*)(const void *))&objc_retain, @@ -169,7 +79,7 @@ _os_object_t _os_object_alloc_realized(const void *cls, size_t size) { dispatch_assert(size >= sizeof(struct _os_object_s)); - return _os_object_make_uncollectable(_os_objc_alloc(cls, size)); + return _os_objc_alloc(cls, size); } _os_object_t @@ -177,13 +87,13 @@ _os_object_alloc(const void *_cls, size_t size) { dispatch_assert(size >= sizeof(struct _os_object_s)); Class cls = _cls ? [(id)_cls class] : [OS_OBJECT_CLASS(object) class]; - return _os_object_make_uncollectable(_os_objc_alloc(cls, size)); + return _os_objc_alloc(cls, size); } void _os_object_dealloc(_os_object_t obj) { - [_os_object_make_collectable(obj) dealloc]; + [obj dealloc]; } void @@ -206,7 +116,6 @@ _os_object_dispose(_os_object_t obj) void* os_retain(void *obj) { - if (slowpath(_os_object_have_gc)) return _os_object_gc_retain(obj); return objc_retain(obj); } @@ -214,7 +123,6 @@ os_retain(void *obj) void os_release(void *obj) { - if (slowpath(_os_object_have_gc)) return _os_object_gc_release(obj); return objc_release(obj); } @@ -222,6 +130,7 @@ os_release(void *obj) #pragma mark _os_object @implementation OS_OBJECT_CLASS(object) +DISPATCH_UNAVAILABLE_INIT() -(id)retain { return _os_object_retain(self); @@ -255,8 +164,7 @@ os_release(void *obj) #pragma mark - #pragma mark _dispatch_objc - -#include +#if OS_OBJECT_HAVE_OBJC2 id _dispatch_objc_alloc(Class cls, size_t size) @@ -313,6 +221,12 @@ _dispatch_objc_resume(dispatch_object_t dou) return [dou _resume]; } +void +_dispatch_objc_activate(dispatch_object_t dou) +{ + return [dou _activate]; +} + size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz) { @@ -325,6 +239,7 @@ _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz) return offset; } +#endif #pragma mark - #pragma mark _dispatch_object @@ -332,18 +247,7 @@ _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz) #define DISPATCH_OBJC_LOAD() + (void)load {} @implementation DISPATCH_CLASS(object) - -- (id)init { - self = [super init]; - [self release]; - self = nil; - return self; -} - -- (void)_xref_dispose { - _dispatch_xref_dispose(self); - [super _xref_dispose]; -} +DISPATCH_UNAVAILABLE_INIT() - (void)_dispose { return _dispatch_dispose(self); // calls _os_object_dealloc() @@ -354,7 +258,7 @@ _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz) if (!nsstring) return nil; char buf[2048]; struct dispatch_object_s *obj = (struct dispatch_object_s *)self; - if (obj->do_vtable->do_debug) { + if (dx_vtable(obj)->do_debug) { dx_debug(obj, buf, sizeof(buf)); } else { strlcpy(buf, dx_kind(obj), sizeof(buf)); @@ -368,6 +272,7 @@ _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz) @implementation DISPATCH_CLASS(queue) DISPATCH_OBJC_LOAD() +DISPATCH_UNAVAILABLE_INIT() - (NSString *)description { Class nsstring = objc_lookUpClass("NSString"); @@ -377,22 +282,42 @@ DISPATCH_OBJC_LOAD() class_getName([self class]), dispatch_queue_get_label(self), self]; } +- (void)_xref_dispose { + _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); + [super _xref_dispose]; +} + @end @implementation DISPATCH_CLASS(source) DISPATCH_OBJC_LOAD() +DISPATCH_UNAVAILABLE_INIT() - (void)_xref_dispose { + _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); _dispatch_source_xref_dispose(self); [super _xref_dispose]; } @end +@implementation DISPATCH_CLASS(mach) +DISPATCH_OBJC_LOAD() +DISPATCH_UNAVAILABLE_INIT() + +- (void)_xref_dispose { + _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); + [super _xref_dispose]; +} + +@end + @implementation DISPATCH_CLASS(queue_runloop) DISPATCH_OBJC_LOAD() +DISPATCH_UNAVAILABLE_INIT() - (void)_xref_dispose { + _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); _dispatch_runloop_queue_xref_dispose(self); [super _xref_dispose]; } @@ -402,30 +327,30 @@ DISPATCH_OBJC_LOAD() #define DISPATCH_CLASS_IMPL(name) \ @implementation DISPATCH_CLASS(name) \ DISPATCH_OBJC_LOAD() \ + DISPATCH_UNAVAILABLE_INIT() \ @end +#if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA +DISPATCH_CLASS_IMPL(data) +#endif DISPATCH_CLASS_IMPL(semaphore) DISPATCH_CLASS_IMPL(group) +DISPATCH_CLASS_IMPL(queue_serial) +DISPATCH_CLASS_IMPL(queue_concurrent) +DISPATCH_CLASS_IMPL(queue_main) DISPATCH_CLASS_IMPL(queue_root) DISPATCH_CLASS_IMPL(queue_mgr) DISPATCH_CLASS_IMPL(queue_specific_queue) DISPATCH_CLASS_IMPL(queue_attr) -DISPATCH_CLASS_IMPL(mach) DISPATCH_CLASS_IMPL(mach_msg) DISPATCH_CLASS_IMPL(io) DISPATCH_CLASS_IMPL(operation) DISPATCH_CLASS_IMPL(disk) @implementation OS_OBJECT_CLASS(voucher) +DISPATCH_UNAVAILABLE_INIT() DISPATCH_OBJC_LOAD() -- (id)init { - self = [super init]; - [self release]; - self = nil; - return self; -} - - (void)_xref_dispose { return _voucher_xref_dispose(self); // calls _os_object_release_internal() } @@ -448,15 +373,9 @@ DISPATCH_OBJC_LOAD() #if VOUCHER_ENABLE_RECIPE_OBJECTS @implementation OS_OBJECT_CLASS(voucher_recipe) +DISPATCH_UNAVAILABLE_INIT() DISPATCH_OBJC_LOAD() -- (id)init { - self = [super init]; - [self release]; - self = nil; - return self; -} - - (void)_dispose { } @@ -468,23 +387,26 @@ DISPATCH_OBJC_LOAD() @end #endif + #pragma mark - -#pragma mark dispatch_autorelease_pool +#pragma mark dispatch_last_resort_autorelease_pool #if DISPATCH_COCOA_COMPAT void * -_dispatch_autorelease_pool_push(void) { +_dispatch_last_resort_autorelease_pool_push(void) +{ if (!slowpath(_os_object_debug_missing_pools)) { - return objc_autoreleasePoolPush(); + return _dispatch_autorelease_pool_push(); } return NULL; } void -_dispatch_autorelease_pool_pop(void *context) { +_dispatch_last_resort_autorelease_pool_pop(void *context) +{ if (!slowpath(_os_object_debug_missing_pools)) { - return objc_autoreleasePoolPop(context); + return _dispatch_autorelease_pool_pop(context); } } @@ -494,7 +416,8 @@ _dispatch_autorelease_pool_pop(void *context) { #pragma mark dispatch_client_callout // Abort on uncaught exceptions thrown from client callouts rdar://8577499 -#if DISPATCH_USE_CLIENT_CALLOUT && !__USING_SJLJ_EXCEPTIONS__ +#if DISPATCH_USE_CLIENT_CALLOUT && !__USING_SJLJ_EXCEPTIONS__ && \ + OS_OBJECT_HAVE_OBJC2 // On platforms with zero-cost exceptions, use a compiler-generated catch-all // exception handler. @@ -524,6 +447,7 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) } } +#if HAVE_MACH #undef _dispatch_client_callout4 void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, @@ -537,6 +461,7 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, objc_terminate(); } } +#endif // HAVE_MACH #endif // DISPATCH_USE_CLIENT_CALLOUT diff --git a/src/object_internal.h b/src/object_internal.h index 4778f4c..80bb102 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -27,116 +27,200 @@ #ifndef __DISPATCH_OBJECT_INTERNAL__ #define __DISPATCH_OBJECT_INTERNAL__ -#if OS_OBJECT_USE_OBJC -#define DISPATCH_DECL_INTERNAL_SUBCLASS(name, super) \ - OS_OBJECT_DECL_SUBCLASS(name, super) -#define DISPATCH_DECL_INTERNAL(name) \ - DISPATCH_DECL_INTERNAL_SUBCLASS(name, dispatch_object) -#define DISPATCH_DECL_SUBCLASS_INTERFACE(name, super) \ - _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) +#if !OS_OBJECT_USE_OBJC +#define OS_OBJECT_DECL(name) DISPATCH_DECL(name) +#define OS_OBJECT_DECL_SUBCLASS(name, super) DISPATCH_DECL(name) +#endif + +#if USE_OBJC +#define OS_OBJECT_EXTRA_VTABLE_SYMBOL(name) _OS_##name##_vtable +#define DISPATCH_CLASS_SYMBOL(name) OS_dispatch_##name##_class +#define DISPATCH_CLASS_RAW_SYMBOL_NAME(name) \ + OS_OBJC_CLASS_RAW_SYMBOL_NAME(DISPATCH_CLASS(name)) #else -#define DISPATCH_DECL_INTERNAL_SUBCLASS(name, super) DISPATCH_DECL(name) -#define DISPATCH_DECL_INTERNAL(name) DISPATCH_DECL(name) -#define DISPATCH_DECL_SUBCLASS_INTERFACE(name, super) -#endif // OS_OBJECT_USE_OBJC - -DISPATCH_ENUM(dispatch_invoke_flags, unsigned long, - DISPATCH_INVOKE_NONE = 0x00, - /* This invoke is a stealer, meaning that it doesn't own the - * enqueue lock, and is not allowed to requeue elsewhere - */ - DISPATCH_INVOKE_STEALING = 0x01, - /* The `dc` argument is a dispatch continuation wrapper - * created by _dispatch_queue_push_override - */ - DISPATCH_INVOKE_OVERRIDING = 0x02, -); +#define OS_OBJECT_CLASS_SYMBOL(name) _##name##_vtable +#define OS_OBJC_CLASS_RAW_SYMBOL_NAME(name) \ + "__" OS_STRINGIFY(name) "_vtable" +#define DISPATCH_CLASS_SYMBOL(name) _dispatch_##name##_vtable +#define DISPATCH_CLASS_RAW_SYMBOL_NAME(name) \ + "__dispatch_" OS_STRINGIFY(name) "_vtable" +#endif + +#define DISPATCH_CLASS(name) OS_dispatch_##name +#if USE_OBJC +#define DISPATCH_OBJC_CLASS_DECL(name) \ + extern void *DISPATCH_CLASS_SYMBOL(name) \ + asm(DISPATCH_CLASS_RAW_SYMBOL_NAME(name)) +#endif + +// define a new proper class +#define OS_OBJECT_CLASS_DECL(name, super, ...) \ + struct name##_s; \ + struct name##_extra_vtable_s { \ + __VA_ARGS__; \ + }; \ + struct name##_vtable_s { \ + _OS_OBJECT_CLASS_HEADER(); \ + struct name##_extra_vtable_s _os_obj_vtable; \ + }; \ + OS_OBJECT_EXTRA_VTABLE_DECL(name, name) \ + extern const struct name##_vtable_s OS_OBJECT_CLASS_SYMBOL(name) \ + asm(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name))) + +#if OS_OBJECT_SWIFT3 +#define OS_OBJECT_INTERNAL_CLASS_DECL(name, super, ...) \ + OS_OBJECT_OBJC_RUNTIME_VISIBLE \ + OS_OBJECT_DECL_IMPL_CLASS(name, OS_OBJECT_CLASS(super)); \ + OS_OBJECT_CLASS_DECL(name, super, ## __VA_ARGS__) +#elif OS_OBJECT_USE_OBJC +#define OS_OBJECT_INTERNAL_CLASS_DECL(name, super, ...) \ + OS_OBJECT_DECL(name); \ + OS_OBJECT_CLASS_DECL(name, super, ## __VA_ARGS__) +#else +#define OS_OBJECT_INTERNAL_CLASS_DECL(name, super, ...) \ + typedef struct name##_s *name##_t; \ + OS_OBJECT_CLASS_DECL(name, super, ## __VA_ARGS__) +#endif + +#define DISPATCH_CLASS_DECL_BARE(name) \ + OS_OBJECT_CLASS_DECL(dispatch_##name, dispatch_object, \ + DISPATCH_OBJECT_VTABLE_HEADER(dispatch_##name)) +#define DISPATCH_CLASS_DECL(name) \ + _OS_OBJECT_DECL_PROTOCOL(dispatch_##name, dispatch_object) \ + _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(dispatch_##name, dispatch_##name) \ + DISPATCH_CLASS_DECL_BARE(name) + +#define DISPATCH_INTERNAL_CLASS_DECL(name) \ + DISPATCH_DECL(dispatch_##name); \ + DISPATCH_CLASS_DECL(name) + +// define a new subclass used in a cluster +#define OS_OBJECT_SUBCLASS_DECL(name, super) \ + _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) \ + struct name##_s; \ + OS_OBJECT_EXTRA_VTABLE_DECL(name, super) \ + extern const struct super##_vtable_s OS_OBJECT_CLASS_SYMBOL(name) \ + asm(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name))) + +#define DISPATCH_SUBCLASS_DECL(name, super) \ + OS_OBJECT_SUBCLASS_DECL(dispatch_##name, super) + +#if OS_OBJECT_SWIFT3 +// define a new internal subclass used in a class cluster +#define OS_OBJECT_INTERNAL_SUBCLASS_DECL(name, super) \ + _OS_OBJECT_DECL_PROTOCOL(name, super); \ + OS_OBJECT_SUBCLASS_DECL(name, super) + +#define DISPATCH_INTERNAL_SUBCLASS_DECL(name, super) \ + _OS_OBJECT_DECL_PROTOCOL(dispatch_##name, dispatch_##super) \ + DISPATCH_SUBCLASS_DECL(name, dispatch_##super) +#else +// define a new internal subclass used in a class cluster +#define OS_OBJECT_INTERNAL_SUBCLASS_DECL(name, super) \ + OS_OBJECT_DECL_SUBCLASS(name, super); \ + OS_OBJECT_SUBCLASS_DECL(name, super) + +#define DISPATCH_INTERNAL_SUBCLASS_DECL(name, super) \ + OS_OBJECT_DECL_SUBCLASS(dispatch_##name, dispatch_##super); \ + DISPATCH_SUBCLASS_DECL(name, dispatch_##super) +#endif + +// vtable symbols +#define OS_OBJECT_VTABLE(name) (&OS_OBJECT_CLASS_SYMBOL(name)) +#define DISPATCH_OBJC_CLASS(name) (&DISPATCH_CLASS_SYMBOL(name)) + +// vtables for subclasses used in a class cluster #if USE_OBJC -#define DISPATCH_CLASS(name) OS_OBJECT_CLASS(dispatch_##name) // ObjC classes and dispatch vtables are co-located via linker order and alias // files rdar://10640168 -#define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, super, ...) \ +#if OS_OBJECT_HAVE_OBJC2 +#define OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(name, super, xdispose, dispose, ...) \ __attribute__((section("__DATA,__objc_data"), used)) \ - static const struct { \ - DISPATCH_VTABLE_HEADER(super); \ - } DISPATCH_CONCAT(_,DISPATCH_CLASS(name##_vtable)) = { \ - __VA_ARGS__ \ + const struct super##_extra_vtable_s \ + OS_OBJECT_EXTRA_VTABLE_SYMBOL(name) = { __VA_ARGS__ } +#define OS_OBJECT_EXTRA_VTABLE_DECL(name, super) +#define DISPATCH_VTABLE(name) DISPATCH_OBJC_CLASS(name) +#else +#define OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(name, super, xdispose, dispose, ...) \ + const struct super##_vtable_s \ + OS_OBJECT_EXTRA_VTABLE_SYMBOL(name) = { \ + ._os_obj_objc_isa = &OS_OBJECT_CLASS_SYMBOL(name), \ + ._os_obj_vtable = { __VA_ARGS__ }, \ } +#define OS_OBJECT_EXTRA_VTABLE_DECL(name, super) \ + extern const struct super##_vtable_s \ + OS_OBJECT_EXTRA_VTABLE_SYMBOL(name); +#define DISPATCH_VTABLE(name) &OS_OBJECT_EXTRA_VTABLE_SYMBOL(dispatch_##name) +#endif #else -#define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, super, ...) \ - DISPATCH_CONST_STRUCT_INSTANCE(dispatch_##super##_vtable_s, \ - _dispatch_##name##_vtable, \ - ._os_obj_xref_dispose = _dispatch_xref_dispose, \ - ._os_obj_dispose = _dispatch_dispose, \ - __VA_ARGS__) +#define OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(name, super, xdispose, dispose, ...) \ + const struct super##_vtable_s OS_OBJECT_CLASS_SYMBOL(name) = { \ + ._os_obj_xref_dispose = xdispose, \ + ._os_obj_dispose = dispose, \ + ._os_obj_vtable = { __VA_ARGS__ }, \ + } +#define OS_OBJECT_EXTRA_VTABLE_DECL(name, super) +#define DISPATCH_VTABLE(name) DISPATCH_OBJC_CLASS(name) #endif // USE_OBJC -#define DISPATCH_SUBCLASS_DECL(name, super) \ - DISPATCH_DECL_SUBCLASS_INTERFACE(dispatch_##name, super) \ - struct dispatch_##name##_s; \ - extern DISPATCH_CONST_STRUCT_DECL(dispatch_##name##_vtable_s, \ - _dispatch_##name##_vtable, \ - { \ - _OS_OBJECT_CLASS_HEADER(); \ - DISPATCH_VTABLE_HEADER(name); \ - }) -#define DISPATCH_CLASS_DECL(name) DISPATCH_SUBCLASS_DECL(name, dispatch_object) -#define DISPATCH_INTERNAL_SUBCLASS_DECL(name, super) \ - DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_##name, dispatch_##super); \ - DISPATCH_DECL_SUBCLASS_INTERFACE(dispatch_##name, dispatch_##super) \ - extern DISPATCH_CONST_STRUCT_DECL(dispatch_##super##_vtable_s, \ - _dispatch_##name##_vtable) +#define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, super, ...) \ + OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(dispatch_##name, dispatch_##super, \ + _dispatch_xref_dispose, _dispatch_dispose, __VA_ARGS__) + +// vtables for proper classes +#define OS_OBJECT_VTABLE_INSTANCE(name, xdispose, dispose, ...) \ + OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(name, name, \ + xdispose, dispose, __VA_ARGS__) + #define DISPATCH_VTABLE_INSTANCE(name, ...) \ DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, name, __VA_ARGS__) -#define DISPATCH_VTABLE(name) &_dispatch_##name##_vtable -#if !TARGET_OS_WIN32 -#define DISPATCH_VTABLE_HEADER(x) \ +#define DISPATCH_INVOKABLE_VTABLE_HEADER(x) \ unsigned long const do_type; \ const char *const do_kind; \ - size_t (*const do_debug)(struct dispatch_##x##_s *, char *, size_t); \ - void (*const do_invoke)(struct dispatch_##x##_s *, dispatch_object_t dc, \ - dispatch_invoke_flags_t); \ - unsigned long (*const do_probe)(struct dispatch_##x##_s *); \ - void (*const do_dispose)(struct dispatch_##x##_s *); + void (*const do_invoke)(struct x##_s *, dispatch_invoke_flags_t) + +#define DISPATCH_QUEUEABLE_VTABLE_HEADER(x) \ + DISPATCH_INVOKABLE_VTABLE_HEADER(x); \ + void (*const do_wakeup)(struct x##_s *, \ + pthread_priority_t, dispatch_wakeup_flags_t); \ + void (*const do_dispose)(struct x##_s *) + +#define DISPATCH_OBJECT_VTABLE_HEADER(x) \ + DISPATCH_QUEUEABLE_VTABLE_HEADER(x); \ + void (*const do_set_targetq)(struct x##_s *, dispatch_queue_t); \ + void (*const do_suspend)(struct x##_s *); \ + void (*const do_resume)(struct x##_s *, bool activate); \ + void (*const do_finalize_activation)(struct x##_s *); \ + size_t (*const do_debug)(struct x##_s *, char *, size_t) + +#define dx_vtable(x) (&(x)->do_vtable->_os_obj_vtable) +#define dx_type(x) dx_vtable(x)->do_type +#define dx_subtype(x) (dx_vtable(x)->do_type & _DISPATCH_SUB_TYPE_MASK) +#define dx_metatype(x) (dx_vtable(x)->do_type & _DISPATCH_META_TYPE_MASK) +#define dx_hastypeflag(x, f) (dx_vtable(x)->do_type & _DISPATCH_##f##_TYPEFLAG) +#define dx_kind(x) dx_vtable(x)->do_kind +#define dx_debug(x, y, z) dx_vtable(x)->do_debug((x), (y), (z)) +#define dx_dispose(x) dx_vtable(x)->do_dispose(x) +#define dx_invoke(x, z) dx_vtable(x)->do_invoke(x, z) +#define dx_wakeup(x, y, z) dx_vtable(x)->do_wakeup(x, y, z) + +#define DISPATCH_OBJECT_GLOBAL_REFCNT _OS_OBJECT_GLOBAL_REFCNT + +#if OS_OBJECT_HAVE_OBJC1 +#define DISPATCH_GLOBAL_OBJECT_HEADER(name) \ + .do_vtable = DISPATCH_VTABLE(name), \ + ._objc_isa = DISPATCH_OBJC_CLASS(name), \ + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \ + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT #else -// Cannot be const on Win32 because we initialize at runtime. -#define DISPATCH_VTABLE_HEADER(x) \ - unsigned long do_type; \ - const char *do_kind; \ - size_t (*do_debug)(struct dispatch_##x##_s *, char *, size_t); \ - void (*do_invoke)(struct dispatch_##x##_s *, dispatch_object_t dc, \ - dispatch_invoke_flags_t); \ - unsigned long (*do_probe)(struct dispatch_##x##_s *); \ - void (*do_dispose)(struct dispatch_##x##_s *); +#define DISPATCH_GLOBAL_OBJECT_HEADER(name) \ + .do_vtable = DISPATCH_VTABLE(name), \ + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \ + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT #endif -#define dx_type(x) (x)->do_vtable->do_type -#define dx_metatype(x) ((x)->do_vtable->do_type & _DISPATCH_META_TYPE_MASK) -#define dx_kind(x) (x)->do_vtable->do_kind -#define dx_debug(x, y, z) (x)->do_vtable->do_debug((x), (y), (z)) -#define dx_dispose(x) (x)->do_vtable->do_dispose(x) -#define dx_invoke(x, y, z) (x)->do_vtable->do_invoke(x, y, z) -#define dx_probe(x) (x)->do_vtable->do_probe(x) - -#define DISPATCH_STRUCT_HEADER(x) \ - _OS_OBJECT_HEADER( \ - const struct dispatch_##x##_vtable_s *do_vtable, \ - do_ref_cnt, \ - do_xref_cnt); \ - struct dispatch_##x##_s *volatile do_next; \ - struct dispatch_queue_s *do_targetq; \ - void *do_ctxt; \ - void *do_finalizer; \ - unsigned int volatile do_suspend_cnt; - -#define DISPATCH_OBJECT_GLOBAL_REFCNT _OS_OBJECT_GLOBAL_REFCNT -// "word and bit" must be a power of two to be safely subtracted -#define DISPATCH_OBJECT_SUSPEND_LOCK 1u -#define DISPATCH_OBJECT_SUSPEND_INTERVAL 2u -#define DISPATCH_OBJECT_SUSPENDED(x) \ - ((x)->do_suspend_cnt >= DISPATCH_OBJECT_SUSPEND_INTERVAL) #ifdef __LP64__ // the bottom nibble must not be zero, the rest of the bits should be random // we sign extend the 64-bit version so that a better instruction encoding is @@ -146,84 +230,285 @@ DISPATCH_ENUM(dispatch_invoke_flags, unsigned long, #define DISPATCH_OBJECT_LISTLESS ((void *)0x89abcdef) #endif +DISPATCH_ENUM(dispatch_wakeup_flags, uint32_t, + // The caller of dx_wakeup owns an internal refcount on the object being + // woken up + DISPATCH_WAKEUP_CONSUME = 0x00000001, + + // Some change to the object needs to be published to drainers. + // If the drainer isn't the same thread, some scheme such as the dispatch + // queue DIRTY bit must be used and a release barrier likely has to be + // involved before dx_wakeup returns + DISPATCH_WAKEUP_FLUSH = 0x00000002, + + // A slow waiter was just enqueued + DISPATCH_WAKEUP_SLOW_WAITER = 0x00000004, + + // The caller desires to apply an override on the object being woken up + // and has already adjusted the `oq_override` field. When this flag is + // passed, the priority passed to dx_wakeup() should not be 0 + DISPATCH_WAKEUP_OVERRIDING = 0x00000008, + + // At the time this queue was woken up it had an override that must be + // preserved (used to solve a race with _dispatch_queue_drain_try_unlock()) + DISPATCH_WAKEUP_WAS_OVERRIDDEN = 0x00000010, + +#define _DISPATCH_WAKEUP_OVERRIDE_BITS \ + ((dispatch_wakeup_flags_t)(DISPATCH_WAKEUP_OVERRIDING | \ + DISPATCH_WAKEUP_WAS_OVERRIDDEN)) +); + +DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, + DISPATCH_INVOKE_NONE = 0x00000000, + + // Invoke modes + // + // @const DISPATCH_INVOKE_STEALING + // This invoke is a stealer, meaning that it doesn't own the + // enqueue lock at drain lock time. + // + // @const DISPATCH_INVOKE_OVERRIDING + // This invoke is draining the hierarchy on another root queue and needs + // to fake the identity of the original one. + // + DISPATCH_INVOKE_STEALING = 0x00000001, + DISPATCH_INVOKE_OVERRIDING = 0x00000002, + + // Below this point flags are propagated to recursive calls to drain(), + // continuation pop() or dx_invoke(). +#define _DISPATCH_INVOKE_PROPAGATE_MASK 0xffff0000u + + // Drain modes + // + // @const DISPATCH_INVOKE_WORKER_DRAIN + // Invoke has been issued by a worker thread (work queue thread, or + // pthread root queue) drain. This flag is NOT set when the main queue, + // manager queue or runloop queues are drained + // + // @const DISPATCH_INVOKE_REDIRECTING_DRAIN + // Has only been draining concurrent queues so far + // Implies DISPATCH_INVOKE_WORKER_DRAIN + // + // @const DISPATCH_INVOKE_MANAGER_DRAIN + // We're draining from a manager context + // + DISPATCH_INVOKE_WORKER_DRAIN = 0x00010000, + DISPATCH_INVOKE_REDIRECTING_DRAIN = 0x00020000, + DISPATCH_INVOKE_MANAGER_DRAIN = 0x00040000, +#define _DISPATCH_INVOKE_DRAIN_MODE_MASK 0x000f0000u + + // Autoreleasing modes + // + // @const DISPATCH_INVOKE_AUTORELEASE_ALWAYS + // Always use autoreleasepools around callouts + // + // @const DISPATCH_INVOKE_AUTORELEASE_NEVER + // Never use autoreleasepools around callouts + // + DISPATCH_INVOKE_AUTORELEASE_ALWAYS = 0x00100000, + DISPATCH_INVOKE_AUTORELEASE_NEVER = 0x00200000, +#define _DISPATCH_INVOKE_AUTORELEASE_MASK 0x00300000u +); + enum { - _DISPATCH_CONTINUATION_TYPE = 0x00000, // meta-type for continuations + _DISPATCH_META_TYPE_MASK = 0xffff0000, // mask for object meta-types + _DISPATCH_TYPEFLAGS_MASK = 0x0000ff00, // mask for object typeflags + _DISPATCH_SUB_TYPE_MASK = 0x000000ff, // mask for object sub-types + + _DISPATCH_CONTINUATION_TYPE = 0x00000, // meta-type for continuations _DISPATCH_QUEUE_TYPE = 0x10000, // meta-type for queues _DISPATCH_SOURCE_TYPE = 0x20000, // meta-type for sources _DISPATCH_SEMAPHORE_TYPE = 0x30000, // meta-type for semaphores - _DISPATCH_NODE_TYPE = 0x40000, // meta-type for data node + _DISPATCH_NODE_TYPE = 0x40000, // meta-type for data node _DISPATCH_IO_TYPE = 0x50000, // meta-type for io channels _DISPATCH_OPERATION_TYPE = 0x60000, // meta-type for io operations - _DISPATCH_DISK_TYPE = 0x70000, // meta-type for io disks - _DISPATCH_META_TYPE_MASK = 0xfff0000, // mask for object meta-types - _DISPATCH_ATTR_TYPE = 0x10000000, // meta-type for attributes + _DISPATCH_DISK_TYPE = 0x70000, // meta-type for io disks - DISPATCH_CONTINUATION_TYPE = _DISPATCH_CONTINUATION_TYPE, + _DISPATCH_QUEUE_ROOT_TYPEFLAG = 0x0100, // bit set for any root queues +#define DISPATCH_CONTINUATION_TYPE(name) \ + (_DISPATCH_CONTINUATION_TYPE | DC_##name##_TYPE) DISPATCH_DATA_TYPE = 1 | _DISPATCH_NODE_TYPE, - DISPATCH_MACH_MSG_TYPE = 2 | _DISPATCH_NODE_TYPE, + DISPATCH_MACH_MSG_TYPE = 2 | _DISPATCH_NODE_TYPE, + DISPATCH_QUEUE_ATTR_TYPE = 3 | _DISPATCH_NODE_TYPE, - DISPATCH_IO_TYPE = _DISPATCH_IO_TYPE, - DISPATCH_OPERATION_TYPE = _DISPATCH_OPERATION_TYPE, - DISPATCH_DISK_TYPE = _DISPATCH_DISK_TYPE, + DISPATCH_IO_TYPE = 0 | _DISPATCH_IO_TYPE, + DISPATCH_OPERATION_TYPE = 0 | _DISPATCH_OPERATION_TYPE, + DISPATCH_DISK_TYPE = 0 | _DISPATCH_DISK_TYPE, - DISPATCH_QUEUE_ATTR_TYPE = _DISPATCH_QUEUE_TYPE |_DISPATCH_ATTR_TYPE, + DISPATCH_QUEUE_LEGACY_TYPE = 1 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_SERIAL_TYPE = 2 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_CONCURRENT_TYPE = 3 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_GLOBAL_ROOT_TYPE = 4 | _DISPATCH_QUEUE_TYPE | + _DISPATCH_QUEUE_ROOT_TYPEFLAG, + DISPATCH_QUEUE_RUNLOOP_TYPE = 5 | _DISPATCH_QUEUE_TYPE | + _DISPATCH_QUEUE_ROOT_TYPEFLAG, + DISPATCH_QUEUE_MGR_TYPE = 6 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_SPECIFIC_TYPE = 7 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_TYPE = 1 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_ROOT_TYPE = 2 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_MGR_TYPE = 3 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_SPECIFIC_TYPE = 4 | _DISPATCH_QUEUE_TYPE, + DISPATCH_SEMAPHORE_TYPE = 1 | _DISPATCH_SEMAPHORE_TYPE, + DISPATCH_GROUP_TYPE = 2 | _DISPATCH_SEMAPHORE_TYPE, - DISPATCH_SEMAPHORE_TYPE = 1 | _DISPATCH_SEMAPHORE_TYPE, - DISPATCH_GROUP_TYPE = 2 | _DISPATCH_SEMAPHORE_TYPE, + DISPATCH_SOURCE_KEVENT_TYPE = 1 | _DISPATCH_SOURCE_TYPE, + DISPATCH_MACH_CHANNEL_TYPE = 2 | _DISPATCH_SOURCE_TYPE, - DISPATCH_SOURCE_KEVENT_TYPE = 1 | _DISPATCH_SOURCE_TYPE, - DISPATCH_MACH_CHANNEL_TYPE = 2 | _DISPATCH_SOURCE_TYPE, }; -DISPATCH_SUBCLASS_DECL(object, object); +typedef struct _os_object_vtable_s { + _OS_OBJECT_CLASS_HEADER(); +} _os_object_vtable_s; + +typedef struct _os_object_s { + _OS_OBJECT_HEADER( + const _os_object_vtable_s *os_obj_isa, + os_obj_ref_cnt, + os_obj_xref_cnt); +} _os_object_s; + +#if OS_OBJECT_HAVE_OBJC1 +#define OS_OBJECT_STRUCT_HEADER(x) \ + _OS_OBJECT_HEADER(\ + const void *_objc_isa, \ + do_ref_cnt, \ + do_xref_cnt); \ + const struct x##_vtable_s *do_vtable +#else +#define OS_OBJECT_STRUCT_HEADER(x) \ + _OS_OBJECT_HEADER(\ + const struct x##_vtable_s *do_vtable, \ + do_ref_cnt, \ + do_xref_cnt) +#endif + +#define _DISPATCH_OBJECT_HEADER(x) \ + struct _os_object_s _as_os_obj[0]; \ + OS_OBJECT_STRUCT_HEADER(dispatch_##x); \ + struct dispatch_##x##_s *volatile do_next; \ + struct dispatch_queue_s *do_targetq; \ + void *do_ctxt; \ + void *do_finalizer + +#define DISPATCH_OBJECT_HEADER(x) \ + struct dispatch_object_s _as_do[0]; \ + _DISPATCH_OBJECT_HEADER(x) + +// Swift-unavailable -init requires method in each class. +#define DISPATCH_UNAVAILABLE_INIT() \ + - (instancetype)init { \ + DISPATCH_CLIENT_CRASH(0, "-init called directly"); \ + return [super init]; \ + } + +_OS_OBJECT_DECL_PROTOCOL(dispatch_object, object); + +OS_OBJECT_CLASS_DECL(dispatch_object, object, + DISPATCH_OBJECT_VTABLE_HEADER(dispatch_object)); + struct dispatch_object_s { - DISPATCH_STRUCT_HEADER(object); + _DISPATCH_OBJECT_HEADER(object); +}; + +#if OS_OBJECT_HAVE_OBJC1 +#define _OS_MPSC_QUEUE_FIELDS(ns, __state_field__) \ + struct dispatch_object_s *volatile ns##_items_head; \ + unsigned long ns##_serialnum; \ + union { \ + uint64_t volatile __state_field__; \ + DISPATCH_STRUCT_LITTLE_ENDIAN_2( \ + dispatch_lock __state_field__##_lock, \ + uint32_t __state_field__##_bits \ + ); \ + }; /* needs to be 64-bit aligned */ \ + /* LP64 global queue cacheline boundary */ \ + const char *ns##_label; \ + voucher_t ns##_override_voucher; \ + dispatch_priority_t ns##_priority; \ + dispatch_priority_t volatile ns##_override; \ + struct dispatch_object_s *volatile ns##_items_tail +#else +#define _OS_MPSC_QUEUE_FIELDS(ns, __state_field__) \ + struct dispatch_object_s *volatile ns##_items_head; \ + union { \ + uint64_t volatile __state_field__; \ + DISPATCH_STRUCT_LITTLE_ENDIAN_2( \ + dispatch_lock __state_field__##_lock, \ + uint32_t __state_field__##_bits \ + ); \ + }; /* needs to be 64-bit aligned */ \ + /* LP64 global queue cacheline boundary */ \ + unsigned long ns##_serialnum; \ + const char *ns##_label; \ + voucher_t ns##_override_voucher; \ + dispatch_priority_t ns##_priority; \ + dispatch_priority_t volatile ns##_override; \ + struct dispatch_object_s *volatile ns##_items_tail +#endif + +OS_OBJECT_INTERNAL_CLASS_DECL(os_mpsc_queue, object, + DISPATCH_QUEUEABLE_VTABLE_HEADER(os_mpsc_queue)); + +struct os_mpsc_queue_s { + struct _os_object_s _as_os_obj[0]; + OS_OBJECT_STRUCT_HEADER(os_mpsc_queue); + struct dispatch_object_s *volatile oq_next; + void *oq_opaque1; // do_targetq + void *oq_opaque2; // do_ctxt + void *oq_opaque3; // do_finalizer + _OS_MPSC_QUEUE_FIELDS(oq, __oq_state_do_not_use); }; size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz); void *_dispatch_alloc(const void *vtable, size_t size); +#if !USE_OBJC void _dispatch_xref_dispose(dispatch_object_t dou); +#endif void _dispatch_dispose(dispatch_object_t dou); #if DISPATCH_COCOA_COMPAT +#if USE_OBJC +#include +#include +#define _dispatch_autorelease_pool_push() \ + objc_autoreleasePoolPush() +#define _dispatch_autorelease_pool_pop(context) \ + objc_autoreleasePoolPop(context) +#else void *_dispatch_autorelease_pool_push(void); void _dispatch_autorelease_pool_pop(void *context); #endif +void *_dispatch_last_resort_autorelease_pool_push(void); +void _dispatch_last_resort_autorelease_pool_pop(void *context); + +#define dispatch_invoke_with_autoreleasepool(flags, ...) ({ \ + void *pool = NULL; \ + if ((flags) & DISPATCH_INVOKE_AUTORELEASE_ALWAYS) { \ + pool = _dispatch_autorelease_pool_push(); \ + DISPATCH_COMPILER_CAN_ASSUME(pool); \ + }; \ + __VA_ARGS__; \ + if (pool) _dispatch_autorelease_pool_pop(pool); \ + }) +#else +#define dispatch_invoke_with_autoreleasepool(flags, ...) \ + do { (void)flags; __VA_ARGS__; } while (0) +#endif -#if USE_OBJC -#include - -#define OS_OBJC_CLASS_SYMBOL(name) \ - DISPATCH_CONCAT(OBJC_CLASS_$_,name) -#define OS_OBJC_CLASS_DECL(name) \ - extern void *OS_OBJC_CLASS_SYMBOL(name) -#define OS_OBJC_CLASS(name) \ - ((Class)&OS_OBJC_CLASS_SYMBOL(name)) -#define OS_OBJECT_OBJC_CLASS_DECL(name) \ - OS_OBJC_CLASS_DECL(OS_OBJECT_CLASS(name)) -#define OS_OBJECT_OBJC_CLASS(name) \ - OS_OBJC_CLASS(OS_OBJECT_CLASS(name)) -#define DISPATCH_OBJC_CLASS_DECL(name) \ - OS_OBJC_CLASS_DECL(DISPATCH_CLASS(name)) -#define DISPATCH_OBJC_CLASS(name) \ - OS_OBJC_CLASS(DISPATCH_CLASS(name)) +#if USE_OBJC OS_OBJECT_OBJC_CLASS_DECL(object); -DISPATCH_OBJC_CLASS_DECL(object); +#endif +#if OS_OBJECT_HAVE_OBJC2 // ObjC toll-free bridging, keep in sync with libdispatch.order file +// +// This is required by the dispatch_data_t/NSData bridging, which is not +// supported on the old runtime. #define DISPATCH_OBJECT_TFB(f, o, ...) \ if (slowpath((uintptr_t)((o)._os_obj->os_obj_isa) & 1) || \ slowpath((Class)((o)._os_obj->os_obj_isa) < \ - DISPATCH_OBJC_CLASS(object)) || \ + (Class)OS_OBJECT_VTABLE(dispatch_object)) || \ slowpath((Class)((o)._os_obj->os_obj_isa) >= \ - OS_OBJECT_OBJC_CLASS(object))) { \ + (Class)OS_OBJECT_VTABLE(object))) { \ return f((o), ##__VA_ARGS__); \ } @@ -238,6 +523,7 @@ void _dispatch_objc_set_target_queue(dispatch_object_t dou, dispatch_queue_t queue); void _dispatch_objc_suspend(dispatch_object_t dou); void _dispatch_objc_resume(dispatch_object_t dou); +void _dispatch_objc_activate(dispatch_object_t dou); size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); #if __OBJC2__ @@ -248,11 +534,12 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); - (void)_setTargetQueue:(dispatch_queue_t)queue; - (void)_suspend; - (void)_resume; +- (void)_activate; @end #endif // __OBJC2__ -#else // USE_OBJC +#else #define DISPATCH_OBJECT_TFB(f, o, ...) -#endif // USE_OBJC +#endif // OS_OBJECT_HAVE_OBJC2 #pragma mark - #pragma mark _os_object_s @@ -274,7 +561,7 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); typeof(o) _o = (o); \ int _ref_cnt = _o->f; \ if (fastpath(_ref_cnt != _OS_OBJECT_GLOBAL_REFCNT)) { \ - _ref_cnt = dispatch_atomic_##op##2o(_o, f, m); \ + _ref_cnt = os_atomic_##op##2o(_o, f, m); \ } \ _ref_cnt; \ }) @@ -286,7 +573,7 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); _os_atomic_refcnt_perform2o(o, m, dec, release) #define _os_atomic_refcnt_dispose_barrier2o(o, m) \ - (void)dispatch_atomic_load2o(o, m, acquire) + (void)os_atomic_load2o(o, m, acquire) /* @@ -323,17 +610,6 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); #define _os_object_refcnt_dispose_barrier(o) \ _os_atomic_refcnt_dispose_barrier2o(o, os_obj_ref_cnt) -typedef struct _os_object_class_s { - _OS_OBJECT_CLASS_HEADER(); -} _os_object_class_s; - -typedef struct _os_object_s { - _OS_OBJECT_HEADER( - const _os_object_class_s *os_obj_isa, - os_obj_ref_cnt, - os_obj_xref_cnt); -} _os_object_s; - void _os_object_init(void); unsigned long _os_object_retain_count(_os_object_t obj); bool _os_object_retain_weak(_os_object_t obj); diff --git a/src/once.c b/src/once.c index 86b1a03..d7d6a8e 100644 --- a/src/once.c +++ b/src/once.c @@ -26,7 +26,7 @@ typedef struct _dispatch_once_waiter_s { volatile struct _dispatch_once_waiter_s *volatile dow_next; - _dispatch_thread_semaphore_t dow_sema; + dispatch_thread_event_s dow_event; mach_port_t dow_thread; } *_dispatch_once_waiter_t; @@ -44,13 +44,23 @@ DISPATCH_NOINLINE void dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) { +#if DISPATCH_GATE_USE_FOR_DISPATCH_ONCE + dispatch_once_gate_t l = (dispatch_once_gate_t)val; + + if (_dispatch_once_gate_tryenter(l)) { + _dispatch_client_callout(ctxt, func); + _dispatch_once_gate_broadcast(l); + } else { + _dispatch_once_gate_wait(l); + } +#else _dispatch_once_waiter_t volatile *vval = (_dispatch_once_waiter_t*)val; - struct _dispatch_once_waiter_s dow = { NULL, 0, MACH_PORT_NULL }; + struct _dispatch_once_waiter_s dow = { }; _dispatch_once_waiter_t tail = &dow, next, tmp; - _dispatch_thread_semaphore_t sema; + dispatch_thread_event_t event; - if (dispatch_atomic_cmpxchg(vval, NULL, tail, acquire)) { - dow.dow_thread = _dispatch_thread_port(); + if (os_atomic_cmpxchg(vval, NULL, tail, acquire)) { + dow.dow_thread = _dispatch_tid_self(); _dispatch_client_callout(ctxt, func); // The next barrier must be long and strong. @@ -103,36 +113,37 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) // On some CPUs, the most fully synchronizing instruction might // need to be issued. - dispatch_atomic_maximally_synchronizing_barrier(); + os_atomic_maximally_synchronizing_barrier(); // above assumed to contain release barrier - next = dispatch_atomic_xchg(vval, DISPATCH_ONCE_DONE, relaxed); + next = os_atomic_xchg(vval, DISPATCH_ONCE_DONE, relaxed); while (next != tail) { _dispatch_wait_until(tmp = (_dispatch_once_waiter_t)next->dow_next); - sema = next->dow_sema; + event = &next->dow_event; next = tmp; - _dispatch_thread_semaphore_signal(sema); + _dispatch_thread_event_signal(event); } } else { - dow.dow_sema = _dispatch_get_thread_semaphore(); + _dispatch_thread_event_init(&dow.dow_event); next = *vval; for (;;) { if (next == DISPATCH_ONCE_DONE) { break; } - if (dispatch_atomic_cmpxchgvw(vval, next, tail, &next, release)) { + if (os_atomic_cmpxchgvw(vval, next, tail, &next, release)) { dow.dow_thread = next->dow_thread; dow.dow_next = next; if (dow.dow_thread) { pthread_priority_t pp = _dispatch_get_priority(); - _dispatch_thread_override_start(dow.dow_thread, pp); + _dispatch_thread_override_start(dow.dow_thread, pp, val); } - _dispatch_thread_semaphore_wait(dow.dow_sema); + _dispatch_thread_event_wait(&dow.dow_event); if (dow.dow_thread) { - _dispatch_thread_override_end(dow.dow_thread); + _dispatch_thread_override_end(dow.dow_thread, val); } break; } } - _dispatch_put_thread_semaphore(dow.dow_sema); + _dispatch_thread_event_destroy(&dow.dow_event); } +#endif } diff --git a/src/queue.c b/src/queue.c index 5868e87..58c545b 100644 --- a/src/queue.c +++ b/src/queue.c @@ -48,19 +48,17 @@ #define pthread_workqueue_t void* #endif +static void _dispatch_sig_thread(void *ctxt); static void _dispatch_cache_cleanup(void *value); -static void _dispatch_async_f_redirect(dispatch_queue_t dq, - dispatch_continuation_t dc, pthread_priority_t pp); +static void _dispatch_sync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp); +static void _dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc); static void _dispatch_queue_cleanup(void *ctxt); -static inline void _dispatch_queue_wakeup_global2(dispatch_queue_t dq, - unsigned int n); -static inline void _dispatch_queue_wakeup_global(dispatch_queue_t dq); -static inline _dispatch_thread_semaphore_t - _dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq); -static inline bool _dispatch_queue_prepare_override(dispatch_queue_t dq, - dispatch_queue_t tq, pthread_priority_t p); -static inline void _dispatch_queue_push_override(dispatch_queue_t dq, - dispatch_queue_t tq, pthread_priority_t p, bool owning); +static void _dispatch_deferred_items_cleanup(void *ctxt); +static void _dispatch_frame_cleanup(void *ctxt); +static void _dispatch_context_cleanup(void *ctxt); +static void _dispatch_non_barrier_complete(dispatch_queue_t dq); +static inline void _dispatch_global_queue_poke(dispatch_queue_t dq); #if HAVE_PTHREAD_WORKQUEUES static void _dispatch_worker_thread4(void *context); #if HAVE_PTHREAD_WORKQUEUE_QOS @@ -76,14 +74,14 @@ static int _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset); #endif #if DISPATCH_COCOA_COMPAT -static dispatch_once_t _dispatch_main_q_port_pred; -static dispatch_queue_t _dispatch_main_queue_wakeup(void); -unsigned long _dispatch_runloop_queue_wakeup(dispatch_queue_t dq); -static void _dispatch_runloop_queue_port_init(void *ctxt); -static void _dispatch_runloop_queue_port_dispose(dispatch_queue_t dq); +static dispatch_once_t _dispatch_main_q_handle_pred; +static void _dispatch_runloop_queue_poke(dispatch_queue_t dq, + pthread_priority_t pp, dispatch_wakeup_flags_t flags); +static void _dispatch_runloop_queue_handle_init(void *ctxt); +static void _dispatch_runloop_queue_handle_dispose(dispatch_queue_t dq); #endif -static void _dispatch_root_queues_init(void *context); +static void _dispatch_root_queues_init_once(void *context); static dispatch_once_t _dispatch_root_queues_pred; #pragma mark - @@ -103,75 +101,51 @@ static struct dispatch_pthread_root_queue_context_s _dispatch_pthread_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, }; #endif @@ -199,6 +173,14 @@ struct dispatch_root_queue_context_s { }; typedef struct dispatch_root_queue_context_s *dispatch_root_queue_context_t; +#define WORKQ_PRIO_INVALID (-1) +#ifndef WORKQ_BG_PRIOQUEUE_CONDITIONAL +#define WORKQ_BG_PRIOQUEUE_CONDITIONAL WORKQ_PRIO_INVALID +#endif +#ifndef WORKQ_HIGH_PRIOQUEUE_CONDITIONAL +#define WORKQ_HIGH_PRIOQUEUE_CONDITIONAL WORKQ_PRIO_INVALID +#endif + DISPATCH_CACHELINE_ALIGN static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = {{{ @@ -226,7 +208,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = {{{ #if HAVE_PTHREAD_WORKQUEUES .dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND, - .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, + .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = 0, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -237,7 +219,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = {{{ #if HAVE_PTHREAD_WORKQUEUES .dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND, - .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, + .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -314,7 +296,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = {{{ #if HAVE_PTHREAD_WORKQUEUES .dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE, - .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, + .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = 0, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -325,7 +307,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = {{{ #if HAVE_PTHREAD_WORKQUEUES .dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE, - .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, + .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -337,165 +319,67 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol -// dq_running is set to 2 so that barrier operations go through the slow path DISPATCH_CACHELINE_ALIGN struct dispatch_queue_s _dispatch_root_queues[] = { - [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS], +#define _DISPATCH_ROOT_QUEUE_ENTRY(n, ...) \ + [DISPATCH_ROOT_QUEUE_IDX_##n] = { \ + DISPATCH_GLOBAL_OBJECT_HEADER(queue_root), \ + .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \ + .do_ctxt = &_dispatch_root_queue_contexts[ \ + DISPATCH_ROOT_QUEUE_IDX_##n], \ + .dq_width = DISPATCH_QUEUE_WIDTH_POOL, \ + .dq_override_voucher = DISPATCH_NO_VOUCHER, \ + .dq_override = DISPATCH_SATURATED_OVERRIDE, \ + __VA_ARGS__ \ + } + _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE_QOS, .dq_label = "com.apple.root.maintenance-qos", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 4, - }, - [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE_QOS_OVERCOMMIT, .dq_label = "com.apple.root.maintenance-qos.overcommit", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 5, - }, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND_QOS, .dq_label = "com.apple.root.background-qos", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 6, - }, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND_QOS_OVERCOMMIT, .dq_label = "com.apple.root.background-qos.overcommit", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 7, - }, - [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY_QOS, .dq_label = "com.apple.root.utility-qos", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 8, - }, - [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY_QOS_OVERCOMMIT, .dq_label = "com.apple.root.utility-qos.overcommit", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 9, - }, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT_QOS, .dq_label = "com.apple.root.default-qos", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 10, - }, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT_QOS_OVERCOMMIT, .dq_label = "com.apple.root.default-qos.overcommit", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 11, - }, - [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED_QOS, .dq_label = "com.apple.root.user-initiated-qos", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 12, - }, - [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED_QOS_OVERCOMMIT, .dq_label = "com.apple.root.user-initiated-qos.overcommit", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 13, - }, - [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE_QOS, .dq_label = "com.apple.root.user-interactive-qos", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 14, - }, - [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE_QOS_OVERCOMMIT, .dq_label = "com.apple.root.user-interactive-qos.overcommit", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 15, - }, + ), }; #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP @@ -553,26 +437,24 @@ static const int _dispatch_priority2wq[] = { }; #endif -#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES static struct dispatch_queue_s _dispatch_mgr_root_queue; #else -#define _dispatch_mgr_root_queue \ - _dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] +#define _dispatch_mgr_root_queue _dispatch_root_queues[\ + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] #endif // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol DISPATCH_CACHELINE_ALIGN struct dispatch_queue_s _dispatch_mgr_q = { - .do_vtable = DISPATCH_VTABLE(queue_mgr), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + DISPATCH_GLOBAL_OBJECT_HEADER(queue_mgr), + .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1), .do_targetq = &_dispatch_mgr_root_queue, .dq_label = "com.apple.libdispatch-manager", .dq_width = 1, - .dq_is_thread_bound = 1, .dq_override_voucher = DISPATCH_NO_VOUCHER, + .dq_override = DISPATCH_SATURATED_OVERRIDE, .dq_serialnum = 2, }; @@ -580,10 +462,10 @@ dispatch_queue_t dispatch_get_global_queue(long priority, unsigned long flags) { if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) { - return NULL; + return DISPATCH_BAD_INPUT; } dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init); + _dispatch_root_queues_init_once); qos_class_t qos; switch (priority) { #if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK @@ -642,61 +524,94 @@ dispatch_get_current_queue(void) return _dispatch_get_current_queue(); } -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_targets_queue(dispatch_queue_t dq1, dispatch_queue_t dq2) +DISPATCH_NOINLINE DISPATCH_NORETURN +static void +_dispatch_assert_queue_fail(dispatch_queue_t dq, bool expected) { - while (dq1) { - if (dq1 == dq2) { - return true; - } - dq1 = dq1->do_targetq; - } - return false; + _dispatch_client_assert_fail( + "Block was %sexpected to execute on queue [%s]", + expected ? "" : "not ", dq->dq_label ?: ""); } -#define DISPATCH_ASSERT_QUEUE_MESSAGE "BUG in client of libdispatch: " \ - "Assertion failed: Block was run on an unexpected queue" - -DISPATCH_NOINLINE +DISPATCH_NOINLINE DISPATCH_NORETURN static void -_dispatch_assert_queue_fail(dispatch_queue_t dq, bool expected) +_dispatch_assert_queue_barrier_fail(dispatch_queue_t dq) { - char *msg; - asprintf(&msg, "%s\n%s queue: 0x%p[%s]", DISPATCH_ASSERT_QUEUE_MESSAGE, - expected ? "Expected" : "Unexpected", dq, dq->dq_label ? - dq->dq_label : ""); - _dispatch_log("%s", msg); - _dispatch_set_crash_log_message_dynamic(msg); - _dispatch_hardware_crash(); - free(msg); + _dispatch_client_assert_fail( + "Block was expected to act as a barrier on queue [%s]", + dq->dq_label ?: ""); } void dispatch_assert_queue(dispatch_queue_t dq) { - if (slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) { - DISPATCH_CLIENT_CRASH("invalid queue passed to " + unsigned long metatype = dx_metatype(dq); + if (unlikely(metatype != _DISPATCH_QUEUE_TYPE)) { + DISPATCH_CLIENT_CRASH(metatype, "invalid queue passed to " "dispatch_assert_queue()"); } - dispatch_queue_t cq = _dispatch_queue_get_current(); - if (fastpath(cq) && fastpath(_dispatch_queue_targets_queue(cq, dq))) { + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(_dq_state_drain_pended(dq_state))) { + goto fail; + } + if (likely(_dq_state_drain_owner(dq_state) == _dispatch_tid_self())) { return; } + if (likely(dq->dq_width > 1)) { + // we can look at the width: if it is changing while we read it, + // it means that a barrier is running on `dq` concurrently, which + // proves that we're not on `dq`. Hence reading a stale '1' is ok. + if (fastpath(_dispatch_thread_frame_find_queue(dq))) { + return; + } + } +fail: _dispatch_assert_queue_fail(dq, true); } void dispatch_assert_queue_not(dispatch_queue_t dq) { - if (slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) { - DISPATCH_CLIENT_CRASH("invalid queue passed to " + unsigned long metatype = dx_metatype(dq); + if (unlikely(metatype != _DISPATCH_QUEUE_TYPE)) { + DISPATCH_CLIENT_CRASH(metatype, "invalid queue passed to " "dispatch_assert_queue_not()"); } - dispatch_queue_t cq = _dispatch_queue_get_current(); - if (slowpath(cq) && slowpath(_dispatch_queue_targets_queue(cq, dq))) { - _dispatch_assert_queue_fail(dq, false); + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (_dq_state_drain_pended(dq_state)) { + return; + } + if (likely(_dq_state_drain_owner(dq_state) != _dispatch_tid_self())) { + if (likely(dq->dq_width == 1)) { + // we can look at the width: if it is changing while we read it, + // it means that a barrier is running on `dq` concurrently, which + // proves that we're not on `dq`. Hence reading a stale '1' is ok. + return; + } + if (likely(!_dispatch_thread_frame_find_queue(dq))) { + return; + } + } + _dispatch_assert_queue_fail(dq, false); +} + +void +dispatch_assert_queue_barrier(dispatch_queue_t dq) +{ + dispatch_assert_queue(dq); + + if (likely(dq->dq_width == 1)) { + return; + } + + if (likely(dq->do_targetq)) { + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (likely(_dq_state_is_in_barrier(dq_state))) { + return; + } } + + _dispatch_assert_queue_barrier_fail(dq); } #if DISPATCH_DEBUG && DISPATCH_ROOT_QUEUE_DEBUG @@ -711,7 +626,6 @@ dispatch_assert_queue_not(dispatch_queue_t dq) #pragma mark dispatch_init #if HAVE_PTHREAD_WORKQUEUE_QOS -int _dispatch_set_qos_class_enabled; pthread_priority_t _dispatch_background_priority; pthread_priority_t _dispatch_user_initiated_priority; @@ -740,55 +654,64 @@ _dispatch_root_queues_init_qos(int supported) flags |= _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; } p = _pthread_qos_class_encode(qos, 0, flags); - _dispatch_root_queues[i].dq_priority = p; - } - p = _pthread_qos_class_encode(qos_class_main(), 0, 0); - _dispatch_main_q.dq_priority = p; - _dispatch_queue_set_override_priority(&_dispatch_main_q); - _dispatch_background_priority = _dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS].dq_priority & - ~_PTHREAD_PRIORITY_FLAGS_MASK; - _dispatch_user_initiated_priority = _dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS].dq_priority & - ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (!slowpath(getenv("LIBDISPATCH_DISABLE_SET_QOS"))) { - _dispatch_set_qos_class_enabled = 1; + _dispatch_root_queues[i].dq_priority = (dispatch_priority_t)p; } } -#endif +#endif // HAVE_PTHREAD_WORKQUEUE_QOS static inline bool -_dispatch_root_queues_init_workq(void) +_dispatch_root_queues_init_workq(int *wq_supported) { + int r; bool result = false; + *wq_supported = 0; #if HAVE_PTHREAD_WORKQUEUES bool disable_wq = false; #if DISPATCH_ENABLE_THREAD_POOL && DISPATCH_DEBUG disable_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KWQ")); #endif - int r; -#if HAVE_PTHREAD_WORKQUEUE_QOS +#if DISPATCH_USE_KEVENT_WORKQUEUE || HAVE_PTHREAD_WORKQUEUE_QOS bool disable_qos = false; #if DISPATCH_DEBUG disable_qos = slowpath(getenv("LIBDISPATCH_DISABLE_QOS")); #endif - if (!disable_qos && !disable_wq) { - r = _pthread_workqueue_supported(); - int supported = r; - if (r & WORKQ_FEATURE_FINEPRIO) { +#if DISPATCH_USE_KEVENT_WORKQUEUE + bool disable_kevent_wq = false; +#if DISPATCH_DEBUG + disable_kevent_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KEVENT_WQ")); +#endif +#endif + if (!disable_wq && !disable_qos) { + *wq_supported = _pthread_workqueue_supported(); +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (!disable_kevent_wq && (*wq_supported & WORKQ_FEATURE_KEVENT)) { + r = _pthread_workqueue_init_with_kevent(_dispatch_worker_thread3, + (pthread_workqueue_function_kevent_t) + _dispatch_kevent_worker_thread, + offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#if DISPATCH_USE_MGR_THREAD + _dispatch_kevent_workqueue_enabled = !r; +#endif +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + _dispatch_evfilt_machport_direct_enabled = !r; +#endif + result = !r; + } else +#endif + if (*wq_supported & WORKQ_FEATURE_FINEPRIO) { +#if DISPATCH_USE_MGR_THREAD r = _pthread_workqueue_init(_dispatch_worker_thread3, offsetof(struct dispatch_queue_s, dq_serialnum), 0); result = !r; - if (result) _dispatch_root_queues_init_qos(supported); +#endif } + if (result) _dispatch_root_queues_init_qos(*wq_supported); } -#endif // HAVE_PTHREAD_WORKQUEUE_QOS +#endif // DISPATCH_USE_KEVENT_WORKQUEUE || HAVE_PTHREAD_WORKQUEUE_QOS #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP if (!result && !disable_wq) { -#if PTHREAD_WORKQUEUE_SPI_VERSION >= 20121218 pthread_workqueue_setdispatchoffset_np( offsetof(struct dispatch_queue_s, dq_serialnum)); -#endif r = pthread_workqueue_setdispatch_np(_dispatch_worker_thread2); #if !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK (void)dispatch_assume_zero(r); @@ -811,7 +734,7 @@ _dispatch_root_queues_init_workq(void) dispatch_root_queue_context_t qc; qc = &_dispatch_root_queue_contexts[i]; #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK - if (!disable_wq) { + if (!disable_wq && qc->dgq_wq_priority != WORKQ_PRIO_INVALID) { r = pthread_workqueue_attr_setqueuepriority_np(&pwq_attr, qc->dgq_wq_priority); (void)dispatch_assume_zero(r); @@ -850,6 +773,7 @@ _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, thread_pool_size = pool_size; } qc->dgq_thread_pool_size = thread_pool_size; +#if HAVE_PTHREAD_WORKQUEUES if (qc->dgq_qos) { (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr)); (void)dispatch_assume_zero(pthread_attr_setdetachstate( @@ -859,6 +783,7 @@ _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, &pqc->dpq_thread_attr, qc->dgq_qos, 0)); #endif } +#endif // HAVE_PTHREAD_WORKQUEUES #if USE_MACH_SEM // override the default FIFO behavior for the pool semaphores kern_return_t kr = semaphore_create(mach_task_self(), @@ -868,7 +793,7 @@ _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, (void)dispatch_assume(pqc->dpq_thread_mediator.dsema_port); #elif USE_POSIX_SEM /* XXXRW: POSIX semaphores don't support LIFO? */ - int ret = sem_init(&pqc->dpq_thread_mediator.dsema_sem), 0, 0); + int ret = sem_init(&(pqc->dpq_thread_mediator.dsema_sem), 0, 0); (void)dispatch_assume_zero(ret); #endif } @@ -876,11 +801,19 @@ _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, static dispatch_once_t _dispatch_root_queues_pred; +void +_dispatch_root_queues_init(void) +{ + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init_once); +} + static void -_dispatch_root_queues_init(void *context DISPATCH_UNUSED) +_dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) { - _dispatch_safe_fork = false; - if (!_dispatch_root_queues_init_workq()) { + int wq_supported; + _dispatch_fork_becomes_unsafe(); + if (!_dispatch_root_queues_init_workq(&wq_supported)) { #if DISPATCH_ENABLE_THREAD_POOL int i; for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { @@ -897,13 +830,12 @@ _dispatch_root_queues_init(void *context DISPATCH_UNUSED) &_dispatch_root_queue_contexts[i], 0, overcommit); } #else - DISPATCH_CRASH("Root queue initialization failed"); + DISPATCH_INTERNAL_CRASH((errno << 16) | wq_supported, + "Root queue initialization failed"); #endif // DISPATCH_ENABLE_THREAD_POOL } } -#define countof(x) (sizeof(x) / sizeof(x[0])) - DISPATCH_EXPORT DISPATCH_NOTHROW void libdispatch_init(void) @@ -935,27 +867,56 @@ libdispatch_init(void) dispatch_assert(offsetof(struct dispatch_continuation_s, do_next) == offsetof(struct dispatch_object_s, do_next)); + dispatch_assert(offsetof(struct dispatch_continuation_s, do_vtable) == + offsetof(struct dispatch_object_s, do_vtable)); dispatch_assert(sizeof(struct dispatch_apply_s) <= DISPATCH_CONTINUATION_SIZE); dispatch_assert(sizeof(struct dispatch_queue_s) % DISPATCH_CACHELINE_SIZE == 0); + dispatch_assert(offsetof(struct dispatch_queue_s, dq_state) % _Alignof(uint64_t) == 0); dispatch_assert(sizeof(struct dispatch_root_queue_context_s) % DISPATCH_CACHELINE_SIZE == 0); + +#if HAVE_PTHREAD_WORKQUEUE_QOS + // 26497968 _dispatch_user_initiated_priority should be set for qos + // propagation to work properly + pthread_priority_t p = _pthread_qos_class_encode(qos_class_main(), 0, 0); + _dispatch_main_q.dq_priority = (dispatch_priority_t)p; + _dispatch_main_q.dq_override = p & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + p = _pthread_qos_class_encode(_DISPATCH_QOS_CLASS_USER_INITIATED, 0, 0); + _dispatch_user_initiated_priority = p; + p = _pthread_qos_class_encode(_DISPATCH_QOS_CLASS_BACKGROUND, 0, 0); + _dispatch_background_priority = p; +#if DISPATCH_DEBUG + if (!slowpath(getenv("LIBDISPATCH_DISABLE_SET_QOS"))) { + _dispatch_set_qos_class_enabled = 1; + } +#endif +#endif + +#if DISPATCH_USE_THREAD_LOCAL_STORAGE + _dispatch_thread_key_create(&__dispatch_tsd_key, _libdispatch_tsd_cleanup); +#else _dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup); + _dispatch_thread_key_create(&dispatch_deferred_items_key, + _dispatch_deferred_items_cleanup); + _dispatch_thread_key_create(&dispatch_frame_key, _dispatch_frame_cleanup); _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup); _dispatch_thread_key_create(&dispatch_cache_key, _dispatch_cache_cleanup); - _dispatch_thread_key_create(&dispatch_io_key, NULL); - _dispatch_thread_key_create(&dispatch_apply_key, NULL); + _dispatch_thread_key_create(&dispatch_context_key, _dispatch_context_cleanup); _dispatch_thread_key_create(&dispatch_defaultpriority_key, NULL); _dispatch_thread_key_create(&dispatch_pthread_root_queue_observer_hooks_key, NULL); #if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION _dispatch_thread_key_create(&dispatch_bcounter_key, NULL); #endif -#if !DISPATCH_USE_OS_SEMAPHORE_CACHE - _dispatch_thread_key_create(&dispatch_sema4_key, - (void (*)(void *))_dispatch_thread_semaphore_dispose); +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + _dispatch_thread_key_create(&dispatch_sema4_key, + _dispatch_thread_semaphore_dispose); + } +#endif #endif #if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 @@ -963,14 +924,13 @@ libdispatch_init(void) DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT]; #endif - _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_main_q); + _dispatch_queue_set_current(&_dispatch_main_q); _dispatch_queue_set_bound_thread(&_dispatch_main_q); #if DISPATCH_USE_PTHREAD_ATFORK (void)dispatch_assume_zero(pthread_atfork(dispatch_atfork_prepare, dispatch_atfork_parent, dispatch_atfork_child)); #endif - _dispatch_hw_config_init(); _dispatch_vtable_init(); _os_object_init(); @@ -989,17 +949,16 @@ _dispatch_mach_host_port_init(void *ctxt DISPATCH_UNUSED) mach_port_t mp, mhp = mach_host_self(); kr = host_get_host_port(mhp, &mp); DISPATCH_VERIFY_MIG(kr); - if (!kr) { + if (fastpath(!kr)) { // mach_host_self returned the HOST_PRIV port kr = mach_port_deallocate(mach_task_self(), mhp); DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); mhp = mp; } else if (kr != KERN_INVALID_ARGUMENT) { (void)dispatch_assume_zero(kr); } - if (!dispatch_assume(mhp)) { - DISPATCH_CRASH("Could not get unprivileged host port"); + if (!fastpath(mhp)) { + DISPATCH_CLIENT_CRASH(kr, "Could not get unprivileged host port"); } _dispatch_mach_host_port = mhp; } @@ -1013,6 +972,59 @@ _dispatch_get_mach_host_port(void) } #endif +#if DISPATCH_USE_THREAD_LOCAL_STORAGE +#include +#include + +#ifdef SYS_gettid +DISPATCH_ALWAYS_INLINE +static inline pid_t +gettid(void) +{ + return (pid_t) syscall(SYS_gettid); +} +#else +#error "SYS_gettid unavailable on this system" +#endif + +#define _tsd_call_cleanup(k, f) do { \ + if ((f) && tsd->k) ((void(*)(void*))(f))(tsd->k); \ + } while (0) + +void +_libdispatch_tsd_cleanup(void *ctx) +{ + struct dispatch_tsd *tsd = (struct dispatch_tsd*) ctx; + + _tsd_call_cleanup(dispatch_queue_key, _dispatch_queue_cleanup); + _tsd_call_cleanup(dispatch_frame_key, _dispatch_frame_cleanup); + _tsd_call_cleanup(dispatch_cache_key, _dispatch_cache_cleanup); + _tsd_call_cleanup(dispatch_context_key, _dispatch_context_cleanup); + _tsd_call_cleanup(dispatch_pthread_root_queue_observer_hooks_key, + NULL); + _tsd_call_cleanup(dispatch_defaultpriority_key, NULL); +#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION + _tsd_call_cleanup(dispatch_bcounter_key, NULL); +#endif +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + _tsd_call_cleanup(dispatch_sema4_key, _dispatch_thread_semaphore_dispose); +#endif + _tsd_call_cleanup(dispatch_priority_key, NULL); + _tsd_call_cleanup(dispatch_voucher_key, _voucher_thread_cleanup); + _tsd_call_cleanup(dispatch_deferred_items_key, + _dispatch_deferred_items_cleanup); + tsd->tid = 0; +} + +DISPATCH_NOINLINE +void +libdispatch_tsd_init(void) +{ + pthread_setspecific(__dispatch_tsd_key, &__dispatch_tsd); + __dispatch_tsd.tid = gettid(); +} +#endif + DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void) @@ -1025,9 +1037,12 @@ dispatch_atfork_child(void) _dispatch_mach_host_port = MACH_VOUCHER_NULL; #endif _voucher_atfork_child(); - if (_dispatch_safe_fork) { + if (!_dispatch_is_multithreaded_inline()) { + // clear the _PROHIBIT bit if set + _dispatch_unsafe_fork = 0; return; } + _dispatch_unsafe_fork = 0; _dispatch_child_of_unsafe_fork = true; _dispatch_main_q.dq_items_head = crash; @@ -1091,34 +1106,67 @@ _dispatch_queue_attr_index_qos_class_t _dispatch_queue_attr_qos2idx[] = { #define DISPATCH_QUEUE_ATTR_CONCURRENT2IDX(concurrent) \ ((concurrent) ? DQA_INDEX_CONCURRENT : DQA_INDEX_SERIAL) +#define DISPATCH_QUEUE_ATTR_INACTIVE2IDX(inactive) \ + ((inactive) ? DQA_INDEX_INACTIVE : DQA_INDEX_ACTIVE) + +#define DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY2IDX(frequency) \ + (frequency) + #define DISPATCH_QUEUE_ATTR_PRIO2IDX(prio) (-(prio)) #define DISPATCH_QUEUE_ATTR_QOS2IDX(qos) (_dispatch_queue_attr_qos2idx[(qos)]) static inline dispatch_queue_attr_t _dispatch_get_queue_attr(qos_class_t qos, int prio, - _dispatch_queue_attr_overcommit_t overcommit, bool concurrent) + _dispatch_queue_attr_overcommit_t overcommit, + dispatch_autorelease_frequency_t frequency, + bool concurrent, bool inactive) { return (dispatch_queue_attr_t)&_dispatch_queue_attrs [DISPATCH_QUEUE_ATTR_QOS2IDX(qos)] [DISPATCH_QUEUE_ATTR_PRIO2IDX(prio)] [DISPATCH_QUEUE_ATTR_OVERCOMMIT2IDX(overcommit)] - [DISPATCH_QUEUE_ATTR_CONCURRENT2IDX(concurrent)]; + [DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY2IDX(frequency)] + [DISPATCH_QUEUE_ATTR_CONCURRENT2IDX(concurrent)] + [DISPATCH_QUEUE_ATTR_INACTIVE2IDX(inactive)]; +} + +dispatch_queue_attr_t +_dispatch_get_default_queue_attr(void) +{ + return _dispatch_get_queue_attr(_DISPATCH_QOS_CLASS_UNSPECIFIED, 0, + _dispatch_queue_attr_overcommit_unspecified, + DISPATCH_AUTORELEASE_FREQUENCY_INHERIT, false, false); } dispatch_queue_attr_t dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t dqa, dispatch_qos_class_t qos_class, int relative_priority) { - if (!_dispatch_qos_class_valid(qos_class, relative_priority)) return NULL; + if (!_dispatch_qos_class_valid(qos_class, relative_priority)) { + return DISPATCH_BAD_INPUT; + } if (!slowpath(dqa)) { - dqa = _dispatch_get_queue_attr(0, 0, - _dispatch_queue_attr_overcommit_unspecified, false); + dqa = _dispatch_get_default_queue_attr(); } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { - DISPATCH_CLIENT_CRASH("Invalid queue attribute"); + DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } return _dispatch_get_queue_attr(qos_class, relative_priority, - dqa->dqa_overcommit, dqa->dqa_concurrent); + dqa->dqa_overcommit, dqa->dqa_autorelease_frequency, + dqa->dqa_concurrent, dqa->dqa_inactive); +} + +dispatch_queue_attr_t +dispatch_queue_attr_make_initially_inactive(dispatch_queue_attr_t dqa) +{ + if (!slowpath(dqa)) { + dqa = _dispatch_get_default_queue_attr(); + } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { + DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); + } + return _dispatch_get_queue_attr(dqa->dqa_qos_class, + dqa->dqa_relative_priority, dqa->dqa_overcommit, + dqa->dqa_autorelease_frequency, dqa->dqa_concurrent, true); } dispatch_queue_attr_t @@ -1126,15 +1174,38 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t dqa, bool overcommit) { if (!slowpath(dqa)) { - dqa = _dispatch_get_queue_attr(0, 0, - _dispatch_queue_attr_overcommit_unspecified, false); + dqa = _dispatch_get_default_queue_attr(); } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { - DISPATCH_CLIENT_CRASH("Invalid queue attribute"); + DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } return _dispatch_get_queue_attr(dqa->dqa_qos_class, dqa->dqa_relative_priority, overcommit ? _dispatch_queue_attr_overcommit_enabled : - _dispatch_queue_attr_overcommit_disabled, dqa->dqa_concurrent); + _dispatch_queue_attr_overcommit_disabled, + dqa->dqa_autorelease_frequency, dqa->dqa_concurrent, + dqa->dqa_inactive); +} + +dispatch_queue_attr_t +dispatch_queue_attr_make_with_autorelease_frequency(dispatch_queue_attr_t dqa, + dispatch_autorelease_frequency_t frequency) +{ + switch (frequency) { + case DISPATCH_AUTORELEASE_FREQUENCY_INHERIT: + case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM: + case DISPATCH_AUTORELEASE_FREQUENCY_NEVER: + break; + default: + return DISPATCH_BAD_INPUT; + } + if (!slowpath(dqa)) { + dqa = _dispatch_get_default_queue_attr(); + } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { + DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); + } + return _dispatch_get_queue_attr(dqa->dqa_qos_class, + dqa->dqa_relative_priority, dqa->dqa_overcommit, + frequency, dqa->dqa_concurrent, dqa->dqa_inactive); } #pragma mark - @@ -1148,92 +1219,173 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t dqa, // we use 'xadd' on Intel, so the initial value == next assigned unsigned long volatile _dispatch_queue_serial_numbers = 16; -dispatch_queue_t -dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, - dispatch_queue_t tq) +DISPATCH_NOINLINE +static dispatch_queue_t +_dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, + dispatch_queue_t tq, bool legacy) { #if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK // Be sure the root queue priorities are set dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init); + _dispatch_root_queues_init_once); #endif - bool disallow_tq = (slowpath(dqa) && dqa != DISPATCH_QUEUE_CONCURRENT); if (!slowpath(dqa)) { - dqa = _dispatch_get_queue_attr(0, 0, - _dispatch_queue_attr_overcommit_unspecified, false); + dqa = _dispatch_get_default_queue_attr(); } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { - DISPATCH_CLIENT_CRASH("Invalid queue attribute"); - } - dispatch_queue_t dq = _dispatch_alloc(DISPATCH_VTABLE(queue), - sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD); - _dispatch_queue_init(dq); - if (label) { - dq->dq_label = strdup(label); + DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } + + // + // Step 1: Normalize arguments (qos, overcommit, tq) + // + qos_class_t qos = dqa->dqa_qos_class; - _dispatch_queue_attr_overcommit_t overcommit = dqa->dqa_overcommit; - if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { - // Serial queues default to overcommit! - overcommit = dqa->dqa_concurrent ? - _dispatch_queue_attr_overcommit_disabled : - _dispatch_queue_attr_overcommit_enabled; +#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK + if (qos == _DISPATCH_QOS_CLASS_USER_INTERACTIVE && + !_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS].dq_priority) { + qos = _DISPATCH_QOS_CLASS_USER_INITIATED; } -#if HAVE_PTHREAD_WORKQUEUE_QOS - dq->dq_priority = _pthread_qos_class_encode(qos, dqa->dqa_relative_priority, - overcommit == _dispatch_queue_attr_overcommit_enabled ? - _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0); #endif - if (dqa->dqa_concurrent) { - dq->dq_width = DISPATCH_QUEUE_WIDTH_MAX; - } - if (!tq) { - if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) { - qos = _DISPATCH_QOS_CLASS_DEFAULT; - } + bool maintenance_fallback = false; #if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - if (qos == _DISPATCH_QOS_CLASS_USER_INTERACTIVE && + maintenance_fallback = true; +#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK + if (maintenance_fallback) { + if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE && !_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS].dq_priority) { - qos = _DISPATCH_QOS_CLASS_USER_INITIATED; + DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS].dq_priority) { + qos = _DISPATCH_QOS_CLASS_BACKGROUND; } -#endif - bool maintenance_fallback = false; -#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - maintenance_fallback = true; -#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - if (maintenance_fallback) { - if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE && - !_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS].dq_priority) { - qos = _DISPATCH_QOS_CLASS_BACKGROUND; - } + } + + _dispatch_queue_attr_overcommit_t overcommit = dqa->dqa_overcommit; + if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) { + if (tq->do_targetq) { + DISPATCH_CLIENT_CRASH(tq, "Cannot specify both overcommit and " + "a non-global target queue"); } + } - tq = _dispatch_get_root_queue(qos, overcommit == + if (tq && !tq->do_targetq && + tq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { + // Handle discrepancies between attr and target queue, attributes win + if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { + if (tq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) { + overcommit = _dispatch_queue_attr_overcommit_enabled; + } else { + overcommit = _dispatch_queue_attr_overcommit_disabled; + } + } + if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) { + tq = _dispatch_get_root_queue_with_overcommit(tq, + overcommit == _dispatch_queue_attr_overcommit_enabled); + } else { + tq = NULL; + } + } else if (tq && !tq->do_targetq) { + // target is a pthread or runloop root queue, setting QoS or overcommit + // is disallowed + if (overcommit != _dispatch_queue_attr_overcommit_unspecified) { + DISPATCH_CLIENT_CRASH(tq, "Cannot specify an overcommit attribute " + "and use this kind of target queue"); + } + if (qos != _DISPATCH_QOS_CLASS_UNSPECIFIED) { + DISPATCH_CLIENT_CRASH(tq, "Cannot specify a QoS attribute " + "and use this kind of target queue"); + } + } else { + if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { + // Serial queues default to overcommit! + overcommit = dqa->dqa_concurrent ? + _dispatch_queue_attr_overcommit_disabled : + _dispatch_queue_attr_overcommit_enabled; + } + } + if (!tq) { + qos_class_t tq_qos = qos == _DISPATCH_QOS_CLASS_UNSPECIFIED ? + _DISPATCH_QOS_CLASS_DEFAULT : qos; + tq = _dispatch_get_root_queue(tq_qos, overcommit == _dispatch_queue_attr_overcommit_enabled); if (slowpath(!tq)) { - DISPATCH_CLIENT_CRASH("Invalid queue attribute"); + DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute"); } + } + + // + // Step 2: Initialize the queue + // + + if (legacy) { + // if any of these attributes is specified, use non legacy classes + if (dqa->dqa_inactive || dqa->dqa_autorelease_frequency) { + legacy = false; + } + } + + const void *vtable; + dispatch_queue_flags_t dqf = 0; + if (legacy) { + vtable = DISPATCH_VTABLE(queue); + } else if (dqa->dqa_concurrent) { + vtable = DISPATCH_VTABLE(queue_concurrent); } else { - _dispatch_retain(tq); - if (disallow_tq) { - // TODO: override target queue's qos/overcommit ? - DISPATCH_CLIENT_CRASH("Invalid combination of target queue & " - "queue attribute"); + vtable = DISPATCH_VTABLE(queue_serial); + } + switch (dqa->dqa_autorelease_frequency) { + case DISPATCH_AUTORELEASE_FREQUENCY_NEVER: + dqf |= DQF_AUTORELEASE_NEVER; + break; + case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM: + dqf |= DQF_AUTORELEASE_ALWAYS; + break; + } + if (label) { + const char *tmp = _dispatch_strdup_if_mutable(label); + if (tmp != label) { + dqf |= DQF_LABEL_NEEDS_FREE; + label = tmp; } + } + + dispatch_queue_t dq = _dispatch_alloc(vtable, + sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD); + _dispatch_queue_init(dq, dqf, dqa->dqa_concurrent ? + DISPATCH_QUEUE_WIDTH_MAX : 1, dqa->dqa_inactive); + + dq->dq_label = label; + +#if HAVE_PTHREAD_WORKQUEUE_QOS + dq->dq_priority = (dispatch_priority_t)_pthread_qos_class_encode(qos, + dqa->dqa_relative_priority, + overcommit == _dispatch_queue_attr_overcommit_enabled ? + _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0); +#endif + _dispatch_retain(tq); + if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) { + // legacy way of inherithing the QoS from the target _dispatch_queue_priority_inherit_from_target(dq, tq); } - _dispatch_queue_set_override_priority(dq); + if (!dqa->dqa_inactive) { + _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); + } dq->do_targetq = tq; _dispatch_object_debug(dq, "%s", __func__); return _dispatch_introspection_queue_create(dq); } +dispatch_queue_t +dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, + dispatch_queue_t tq) +{ + return _dispatch_queue_create_with_target(label, dqa, tq, false); +} + dispatch_queue_t dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) { - return dispatch_queue_create_with_target(label, attr, - DISPATCH_TARGET_QUEUE_DEFAULT); + return _dispatch_queue_create_with_target(label, attr, + DISPATCH_TARGET_QUEUE_DEFAULT, true); } dispatch_queue_t @@ -1247,20 +1399,45 @@ dispatch_queue_create_with_accounting_override_voucher(const char *label, } void -_dispatch_queue_destroy(dispatch_object_t dou) +_dispatch_queue_destroy(dispatch_queue_t dq) { - dispatch_queue_t dq = dou._dq; + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); + + if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { + initial_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; + } + if (dx_type(dq) == DISPATCH_SOURCE_KEVENT_TYPE) { + // dispatch_cancel_and_wait may apply overrides in a racy way with + // the source cancellation finishing. This race is expensive and not + // really worthwhile to resolve since the source becomes dead anyway. + dq_state &= ~DISPATCH_QUEUE_HAS_OVERRIDE; + } + if (slowpath(dq_state != initial_state)) { + if (_dq_state_drain_locked(dq_state)) { + DISPATCH_CLIENT_CRASH(dq, "Release of a locked queue"); + } +#ifndef __LP64__ + dq_state >>= 32; +#endif + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "Release of a queue with corrupt state"); + } if (slowpath(dq == _dispatch_queue_get_current())) { - DISPATCH_CRASH("Release of a queue by itself"); + DISPATCH_CLIENT_CRASH(dq, "Release of a queue by itself"); } if (slowpath(dq->dq_items_tail)) { - DISPATCH_CRASH("Release of a queue while items are enqueued"); + DISPATCH_CLIENT_CRASH(dq->dq_items_tail, + "Release of a queue while items are enqueued"); } - // trash the tail queue so that use after free will crash + // trash the queue so that use after free will crash + dq->dq_items_head = (void *)0x200; dq->dq_items_tail = (void *)0x200; + // poison the state with something that is suspended and is easy to spot + dq->dq_state = 0xdead000000000000; - dispatch_queue_t dqsq = dispatch_atomic_xchg2o(dq, dq_specific_q, + dispatch_queue_t dqsq = os_atomic_xchg2o(dq, dq_specific_q, (void *)0x200, relaxed); if (dqsq) { _dispatch_release(dqsq); @@ -1278,30 +1455,276 @@ _dispatch_queue_dispose(dispatch_queue_t dq) { _dispatch_object_debug(dq, "%s", __func__); _dispatch_introspection_queue_dispose(dq); - if (dq->dq_label) { + if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) { free((void*)dq->dq_label); } _dispatch_queue_destroy(dq); } -const char * -dispatch_queue_get_label(dispatch_queue_t dq) +DISPATCH_NOINLINE +static void +_dispatch_queue_suspend_slow(dispatch_queue_t dq) { - if (slowpath(dq == DISPATCH_CURRENT_QUEUE_LABEL)) { - dq = _dispatch_get_current_queue(); + uint64_t dq_state, value, delta; + + _dispatch_queue_sidelock_lock(dq); + + // what we want to transfer (remove from dq_state) + delta = DISPATCH_QUEUE_SUSPEND_HALF * DISPATCH_QUEUE_SUSPEND_INTERVAL; + // but this is a suspend so add a suspend count at the same time + delta -= DISPATCH_QUEUE_SUSPEND_INTERVAL; + if (dq->dq_side_suspend_cnt == 0) { + // we substract delta from dq_state, and we want to set this bit + delta -= DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; } - return dq->dq_label ? dq->dq_label : ""; + + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + // unsigned underflow of the substraction can happen because other + // threads could have touched this value while we were trying to acquire + // the lock, or because another thread raced us to do the same operation + // and got to the lock first. + if (slowpath(os_sub_overflow(dq_state, delta, &value))) { + os_atomic_rmw_loop_give_up(goto retry); + } + }); + if (slowpath(os_add_overflow(dq->dq_side_suspend_cnt, + DISPATCH_QUEUE_SUSPEND_HALF, &dq->dq_side_suspend_cnt))) { + DISPATCH_CLIENT_CRASH(0, "Too many nested calls to dispatch_suspend()"); + } + return _dispatch_queue_sidelock_unlock(dq); + +retry: + _dispatch_queue_sidelock_unlock(dq); + return dx_vtable(dq)->do_suspend(dq); } -qos_class_t -dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relative_priority_ptr) +void +_dispatch_queue_suspend(dispatch_queue_t dq) { - qos_class_t qos = _DISPATCH_QOS_CLASS_UNSPECIFIED; - int relative_priority = 0; -#if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t dqp = dq->dq_priority; - if (dqp & _PTHREAD_PRIORITY_INHERIT_FLAG) dqp = 0; - qos = _pthread_qos_class_decode(dqp, &relative_priority, NULL); + dispatch_assert(dq->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT); + + uint64_t dq_state, value; + + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + value = DISPATCH_QUEUE_SUSPEND_INTERVAL; + if (slowpath(os_add_overflow(dq_state, value, &value))) { + os_atomic_rmw_loop_give_up({ + return _dispatch_queue_suspend_slow(dq); + }); + } + }); + + if (!_dq_state_is_suspended(dq_state)) { + // rdar://8181908 we need to extend the queue life for the duration + // of the call to wakeup at _dispatch_queue_resume() time. + _dispatch_retain(dq); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_queue_resume_slow(dispatch_queue_t dq) +{ + uint64_t dq_state, value, delta; + + _dispatch_queue_sidelock_lock(dq); + + // what we want to transfer + delta = DISPATCH_QUEUE_SUSPEND_HALF * DISPATCH_QUEUE_SUSPEND_INTERVAL; + // but this is a resume so consume a suspend count at the same time + delta -= DISPATCH_QUEUE_SUSPEND_INTERVAL; + switch (dq->dq_side_suspend_cnt) { + case 0: + goto retry; + case DISPATCH_QUEUE_SUSPEND_HALF: + // we will transition the side count to 0, so we want to clear this bit + delta -= DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; + break; + } + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + // unsigned overflow of the addition can happen because other + // threads could have touched this value while we were trying to acquire + // the lock, or because another thread raced us to do the same operation + // and got to the lock first. + if (slowpath(os_add_overflow(dq_state, delta, &value))) { + os_atomic_rmw_loop_give_up(goto retry); + } + }); + dq->dq_side_suspend_cnt -= DISPATCH_QUEUE_SUSPEND_HALF; + return _dispatch_queue_sidelock_unlock(dq); + +retry: + _dispatch_queue_sidelock_unlock(dq); + return dx_vtable(dq)->do_resume(dq, false); +} + +DISPATCH_NOINLINE +static void +_dispatch_queue_resume_finalize_activation(dispatch_queue_t dq) +{ + // Step 2: run the activation finalizer + if (dx_vtable(dq)->do_finalize_activation) { + dx_vtable(dq)->do_finalize_activation(dq); + } + // Step 3: consume the suspend count + return dx_vtable(dq)->do_resume(dq, false); +} + +void +_dispatch_queue_resume(dispatch_queue_t dq, bool activate) +{ + // covers all suspend and inactive bits, including side suspend bit + const uint64_t suspend_bits = DISPATCH_QUEUE_SUSPEND_BITS_MASK; + // backward compatibility: only dispatch sources can abuse + // dispatch_resume() to really mean dispatch_activate() + bool resume_can_activate = (dx_type(dq) == DISPATCH_SOURCE_KEVENT_TYPE); + uint64_t dq_state, value; + + dispatch_assert(dq->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT); + + // Activation is a bit tricky as it needs to finalize before the wakeup. + // + // If after doing its updates to the suspend count and/or inactive bit, + // the last suspension related bit that would remain is the + // NEEDS_ACTIVATION one, then this function: + // + // 1. moves the state to { sc:1 i:0 na:0 } (converts the needs-activate into + // a suspend count) + // 2. runs the activation finalizer + // 3. consumes the suspend count set in (1), and finishes the resume flow + // + // Concurrently, some property setters such as setting dispatch source + // handlers or _dispatch_queue_set_target_queue try to do in-place changes + // before activation. These protect their action by taking a suspend count. + // Step (1) above cannot happen if such a setter has locked the object. + if (activate) { + // relaxed atomic because this doesn't publish anything, this is only + // about picking the thread that gets to finalize the activation + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + if ((dq_state & suspend_bits) == + DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) { + // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 } + value = dq_state - DISPATCH_QUEUE_INACTIVE + - DISPATCH_QUEUE_NEEDS_ACTIVATION + + DISPATCH_QUEUE_SUSPEND_INTERVAL; + } else if (_dq_state_is_inactive(dq_state)) { + // { sc:>0 i:1 na:1 } -> { i:0 na:1 } + // simple activation because sc is not 0 + // resume will deal with na:1 later + value = dq_state - DISPATCH_QUEUE_INACTIVE; + } else { + // object already active, this is a no-op, just exit + os_atomic_rmw_loop_give_up(return); + } + }); + } else { + // release barrier needed to publish the effect of + // - dispatch_set_target_queue() + // - dispatch_set_*_handler() + // - do_finalize_activation() + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, release, { + if ((dq_state & suspend_bits) == DISPATCH_QUEUE_SUSPEND_INTERVAL + + DISPATCH_QUEUE_NEEDS_ACTIVATION) { + // { sc:1 i:0 na:1 } -> { sc:1 i:0 na:0 } + value = dq_state - DISPATCH_QUEUE_NEEDS_ACTIVATION; + } else if (resume_can_activate && (dq_state & suspend_bits) == + DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) { + // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 } + value = dq_state - DISPATCH_QUEUE_INACTIVE + - DISPATCH_QUEUE_NEEDS_ACTIVATION + + DISPATCH_QUEUE_SUSPEND_INTERVAL; + } else { + value = DISPATCH_QUEUE_SUSPEND_INTERVAL; + if (slowpath(os_sub_overflow(dq_state, value, &value))) { + // underflow means over-resume or a suspend count transfer + // to the side count is needed + os_atomic_rmw_loop_give_up({ + if (!(dq_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT)) { + goto over_resume; + } + return _dispatch_queue_resume_slow(dq); + }); + } + if (_dq_state_is_runnable(value) && + !_dq_state_drain_locked(value)) { + uint64_t full_width = value; + if (_dq_state_has_pending_barrier(value)) { + full_width -= DISPATCH_QUEUE_PENDING_BARRIER; + full_width += DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } else { + full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } + if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) == + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + value = full_width; + value &= ~DISPATCH_QUEUE_DIRTY; + value |= _dispatch_tid_self(); + } + } + } + }); + } + + if ((dq_state ^ value) & DISPATCH_QUEUE_NEEDS_ACTIVATION) { + // we cleared the NEEDS_ACTIVATION bit and we have a valid suspend count + return _dispatch_queue_resume_finalize_activation(dq); + } + + if (activate) { + // if we're still in an activate codepath here we should have + // { sc:>0 na:1 }, if not we've got a corrupt state + if (!fastpath(_dq_state_is_suspended(value))) { + DISPATCH_CLIENT_CRASH(dq, "Invalid suspension state"); + } + return; + } + + if (_dq_state_is_suspended(value)) { + return; + } + + if ((dq_state ^ value) & DISPATCH_QUEUE_IN_BARRIER) { + _dispatch_release(dq); + return _dispatch_try_lock_transfer_or_wakeup(dq); + } + + if (_dq_state_should_wakeup(value)) { + // + // seq_cst wrt state changes that were flushed and not acted upon + os_atomic_thread_fence(acquire); + pthread_priority_t pp = _dispatch_queue_reset_override_priority(dq, + _dispatch_queue_is_thread_bound(dq)); + return dx_wakeup(dq, pp, DISPATCH_WAKEUP_CONSUME); + } + return _dispatch_release_tailcall(dq); + +over_resume: + if (slowpath(_dq_state_is_inactive(dq_state))) { + DISPATCH_CLIENT_CRASH(dq, "Over-resume of an inactive object"); + } + DISPATCH_CLIENT_CRASH(dq, "Over-resume of an object"); +} + +const char * +dispatch_queue_get_label(dispatch_queue_t dq) +{ + if (slowpath(dq == DISPATCH_CURRENT_QUEUE_LABEL)) { + dq = _dispatch_get_current_queue(); + } + return dq->dq_label ? dq->dq_label : ""; +} + +qos_class_t +dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relative_priority_ptr) +{ + qos_class_t qos = _DISPATCH_QOS_CLASS_UNSPECIFIED; + int relative_priority = 0; +#if HAVE_PTHREAD_WORKQUEUE_QOS + pthread_priority_t dqp = dq->dq_priority; + if (dqp & _PTHREAD_PRIORITY_INHERIT_FLAG) dqp = 0; + qos = _pthread_qos_class_decode(dqp, &relative_priority, NULL); #else (void)dq; #endif @@ -1316,14 +1739,12 @@ _dispatch_queue_set_width2(void *ctxt) uint32_t tmp; dispatch_queue_t dq = _dispatch_queue_get_current(); - if (w == 1 || w == 0) { - dq->dq_width = 1; - _dispatch_object_debug(dq, "%s", __func__); - return; - } if (w > 0) { tmp = (unsigned int)w; } else switch (w) { + case 0: + tmp = 1; + break; case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: tmp = dispatch_hw_config(physical_cpus); break; @@ -1336,12 +1757,15 @@ _dispatch_queue_set_width2(void *ctxt) tmp = dispatch_hw_config(logical_cpus); break; } - if (tmp > DISPATCH_QUEUE_WIDTH_MAX / 2) { - tmp = DISPATCH_QUEUE_WIDTH_MAX / 2; + if (tmp > DISPATCH_QUEUE_WIDTH_MAX) { + tmp = DISPATCH_QUEUE_WIDTH_MAX; } - // multiply by two since the running count is inc/dec by two - // (the low bit == barrier) - dq->dq_width = (typeof(dq->dq_width))(tmp * 2); + + dispatch_queue_flags_t old_dqf, new_dqf; + os_atomic_rmw_loop2o(dq, dq_atomic_flags, old_dqf, new_dqf, relaxed, { + new_dqf = old_dqf & ~DQF_WIDTH_MASK; + new_dqf |= (tmp << DQF_WIDTH_SHIFT); + }); _dispatch_object_debug(dq, "%s", __func__); } @@ -1349,77 +1773,110 @@ void dispatch_queue_set_width(dispatch_queue_t dq, long width) { if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE)) { + slowpath(dx_hastypeflag(dq, QUEUE_ROOT))) { return; } - _dispatch_barrier_trysync_f(dq, (void*)(intptr_t)width, + + unsigned long type = dx_type(dq); + switch (type) { + case DISPATCH_QUEUE_LEGACY_TYPE: + case DISPATCH_QUEUE_CONCURRENT_TYPE: + break; + case DISPATCH_QUEUE_SERIAL_TYPE: + DISPATCH_CLIENT_CRASH(type, "Cannot set width of a serial queue"); + default: + DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type"); + } + + _dispatch_barrier_trysync_or_async_f(dq, (void*)(intptr_t)width, _dispatch_queue_set_width2); } -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol static void -_dispatch_set_target_queue2(void *ctxt) +_dispatch_queue_legacy_set_target_queue(void *ctxt) { - dispatch_queue_t prev_dq, dq = _dispatch_queue_get_current(), tq = ctxt; -#if HAVE_PTHREAD_WORKQUEUE_QOS - // see _dispatch_queue_wakeup_with_qos_slow - mach_msg_timeout_t timeout = 1; - mach_port_t th; + dispatch_queue_t dq = _dispatch_queue_get_current(); + dispatch_queue_t tq = ctxt; + dispatch_queue_t otq = dq->do_targetq; - while (!dispatch_atomic_cmpxchgv2o(dq, dq_tqthread, MACH_PORT_NULL, - _dispatch_thread_port(), &th, acquire)) { - _dispatch_thread_switch(th, DISPATCH_YIELD_THREAD_SWITCH_OPTION, - timeout++); + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { + _dispatch_ktrace3(DISPATCH_PERF_non_leaf_retarget, dq, otq, tq); + _dispatch_bug_deprecated("Changing the target of a queue " + "already targeted by other dispatch objects"); } -#endif + _dispatch_queue_priority_inherit_from_target(dq, tq); - prev_dq = dq->do_targetq; + _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); +#if HAVE_PTHREAD_WORKQUEUE_QOS + // see _dispatch_queue_class_wakeup() + _dispatch_queue_sidelock_lock(dq); +#endif dq->do_targetq = tq; - _dispatch_release(prev_dq); +#if HAVE_PTHREAD_WORKQUEUE_QOS + // see _dispatch_queue_class_wakeup() + _dispatch_queue_sidelock_unlock(dq); +#endif + _dispatch_object_debug(dq, "%s", __func__); - dispatch_atomic_store2o(dq, dq_tqthread, MACH_PORT_NULL, release); + _dispatch_introspection_target_queue_changed(dq); + _dispatch_release_tailcall(otq); } void -dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t dq) +_dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq) { - DISPATCH_OBJECT_TFB(_dispatch_objc_set_target_queue, dou, dq); - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { - return; - } - unsigned long type = dx_metatype(dou._do); - if (slowpath(!dq)) { - bool is_concurrent_q = (type == _DISPATCH_QUEUE_TYPE && - slowpath(dou._dq->dq_width > 1)); - dq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, + dispatch_assert(dq->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT && + dq->do_targetq); + + if (slowpath(!tq)) { + bool is_concurrent_q = (dq->dq_width > 1); + tq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, !is_concurrent_q); } - // TODO: put into the vtable - switch(type) { - case _DISPATCH_QUEUE_TYPE: - case _DISPATCH_SOURCE_TYPE: - _dispatch_retain(dq); - return _dispatch_barrier_trysync_f(dou._dq, dq, - _dispatch_set_target_queue2); - case _DISPATCH_IO_TYPE: - return _dispatch_io_set_target_queue(dou._dchannel, dq); - default: { - dispatch_queue_t prev_dq; - _dispatch_retain(dq); - prev_dq = dispatch_atomic_xchg2o(dou._do, do_targetq, dq, release); - if (prev_dq) _dispatch_release(prev_dq); - _dispatch_object_debug(dou._do, "%s", __func__); - return; + + if (_dispatch_queue_try_inactive_suspend(dq)) { + _dispatch_object_set_target_queue_inline(dq, tq); + return dx_vtable(dq)->do_resume(dq, false); + } + + if (dq->dq_override_voucher != DISPATCH_NO_VOUCHER) { + DISPATCH_CLIENT_CRASH(dq, "Cannot change the target of a queue or " + "source with an accounting override voucher " + "after it has been activated"); + } + + unsigned long type = dx_type(dq); + switch (type) { + case DISPATCH_QUEUE_LEGACY_TYPE: + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { + _dispatch_bug_deprecated("Changing the target of a queue " + "already targeted by other dispatch objects"); } + break; + case DISPATCH_SOURCE_KEVENT_TYPE: + case DISPATCH_MACH_CHANNEL_TYPE: + _dispatch_ktrace1(DISPATCH_PERF_post_activate_retarget, dq); + _dispatch_bug_deprecated("Changing the target of a source " + "after it has been activated"); + break; + + case DISPATCH_QUEUE_SERIAL_TYPE: + case DISPATCH_QUEUE_CONCURRENT_TYPE: + DISPATCH_CLIENT_CRASH(type, "Cannot change the target of this queue " + "after it has been activated"); + default: + DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type"); } + + _dispatch_retain(tq); + return _dispatch_barrier_trysync_or_async_f(dq, tq, + _dispatch_queue_legacy_set_target_queue); } #pragma mark - -#pragma mark dispatch_pthread_root_queue +#pragma mark dispatch_mgr_queue -#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES static struct dispatch_pthread_root_queue_context_s _dispatch_mgr_root_queue_pthread_context; static struct dispatch_root_queue_context_s @@ -1432,18 +1889,18 @@ static struct dispatch_root_queue_context_s }}}; static struct dispatch_queue_s _dispatch_mgr_root_queue = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + DISPATCH_GLOBAL_OBJECT_HEADER(queue_root), + .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, .do_ctxt = &_dispatch_mgr_root_queue_context, .dq_label = "com.apple.root.libdispatch-manager", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_width = DISPATCH_QUEUE_WIDTH_POOL, + .dq_override = DISPATCH_SATURATED_OVERRIDE, .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 3, }; +#endif // DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_KEVENT_WORKQUEUE static struct { volatile int prio; volatile qos_class_t qos; @@ -1456,6 +1913,7 @@ static dispatch_once_t _dispatch_mgr_sched_pred; // TODO: switch to "event-reflector thread" property +#if HAVE_PTHREAD_WORKQUEUE_QOS // Must be kept in sync with list of qos classes in sys/qos.h static const int _dispatch_mgr_sched_qos2prio[] = { [_DISPATCH_QOS_CLASS_MAINTENANCE] = 4, @@ -1465,13 +1923,18 @@ static const int _dispatch_mgr_sched_qos2prio[] = { [_DISPATCH_QOS_CLASS_USER_INITIATED] = 37, [_DISPATCH_QOS_CLASS_USER_INTERACTIVE] = 47, }; +#endif // HAVE_PTHREAD_WORKQUEUE_QOS static void _dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) { struct sched_param param; +#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES pthread_attr_t *attr; attr = &_dispatch_mgr_root_queue_pthread_context.dpq_thread_attr; +#else + pthread_attr_t a, *attr = &a; +#endif (void)dispatch_assume_zero(pthread_attr_init(attr)); (void)dispatch_assume_zero(pthread_attr_getschedpolicy(attr, &_dispatch_mgr_sched.policy)); @@ -1489,7 +1952,9 @@ _dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) _dispatch_mgr_sched.default_prio = param.sched_priority; _dispatch_mgr_sched.prio = _dispatch_mgr_sched.default_prio; } +#endif // DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_KEVENT_WORKQUEUE +#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES DISPATCH_NOINLINE static pthread_t * _dispatch_mgr_root_queue_init(void) @@ -1510,8 +1975,8 @@ _dispatch_mgr_root_queue_init(void) (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr, qos, 0)); } - _dispatch_mgr_q.dq_priority = _pthread_qos_class_encode(qos, 0, 0); - _dispatch_queue_set_override_priority(&_dispatch_mgr_q); + _dispatch_mgr_q.dq_priority = + (dispatch_priority_t)_pthread_qos_class_encode(qos, 0, 0); } #endif param.sched_priority = _dispatch_mgr_sched.prio; @@ -1558,7 +2023,9 @@ _dispatch_mgr_priority_init(void) return _dispatch_mgr_priority_apply(); } } +#endif // DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES DISPATCH_NOINLINE static void _dispatch_mgr_priority_raise(const pthread_attr_t *attr) @@ -1567,27 +2034,80 @@ _dispatch_mgr_priority_raise(const pthread_attr_t *attr) struct sched_param param; (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); #if HAVE_PTHREAD_WORKQUEUE_QOS - qos_class_t qos = 0; + qos_class_t q, qos = 0; (void)pthread_attr_get_qos_class_np((pthread_attr_t *)attr, &qos, NULL); if (qos) { param.sched_priority = _dispatch_mgr_sched_qos2prio[qos]; - qos_class_t q = _dispatch_mgr_sched.qos; - do if (q >= qos) { - break; - } while (slowpath(!dispatch_atomic_cmpxchgvw2o(&_dispatch_mgr_sched, - qos, q, qos, &q, relaxed))); + os_atomic_rmw_loop2o(&_dispatch_mgr_sched, qos, q, qos, relaxed, { + if (q >= qos) os_atomic_rmw_loop_give_up(break); + }); } #endif - int p = _dispatch_mgr_sched.prio; - do if (p >= param.sched_priority) { + int p, prio = param.sched_priority; + os_atomic_rmw_loop2o(&_dispatch_mgr_sched, prio, p, prio, relaxed, { + if (p >= prio) os_atomic_rmw_loop_give_up(return); + }); +#if DISPATCH_USE_KEVENT_WORKQUEUE + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init_once); + if (_dispatch_kevent_workqueue_enabled) { + pthread_priority_t pp = 0; + if (prio > _dispatch_mgr_sched.default_prio) { + // The values of _PTHREAD_PRIORITY_SCHED_PRI_FLAG and + // _PTHREAD_PRIORITY_ROOTQUEUE_FLAG overlap, but that is not + // problematic in this case, since it the second one is only ever + // used on dq_priority fields. + // We never pass the _PTHREAD_PRIORITY_ROOTQUEUE_FLAG to a syscall, + // it is meaningful to libdispatch only. + pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG; + } else if (qos) { + pp = _pthread_qos_class_encode(qos, 0, 0); + } + if (pp) { + int r = _pthread_workqueue_set_event_manager_priority(pp); + (void)dispatch_assume_zero(r); + } return; - } while (slowpath(!dispatch_atomic_cmpxchgvw2o(&_dispatch_mgr_sched, prio, - p, param.sched_priority, &p, relaxed))); + } +#endif +#if DISPATCH_USE_MGR_THREAD if (_dispatch_mgr_sched.tid) { return _dispatch_mgr_priority_apply(); } +#endif +} +#endif // DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES + +#if DISPATCH_USE_KEVENT_WORKQUEUE +void +_dispatch_kevent_workqueue_init(void) +{ + // Initialize kevent workqueue support + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init_once); + if (!_dispatch_kevent_workqueue_enabled) return; + dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); + qos_class_t qos = _dispatch_mgr_sched.qos; + int prio = _dispatch_mgr_sched.prio; + pthread_priority_t pp = 0; + if (qos) { + pp = _pthread_qos_class_encode(qos, 0, 0); + _dispatch_mgr_q.dq_priority = (dispatch_priority_t)pp; + } + if (prio > _dispatch_mgr_sched.default_prio) { + pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG; + } + if (pp) { + int r = _pthread_workqueue_set_event_manager_priority(pp); + (void)dispatch_assume_zero(r); + } } +#endif + +#pragma mark - +#pragma mark dispatch_pthread_root_queue +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES static dispatch_queue_t _dispatch_pthread_root_queue_create(const char *label, unsigned long flags, const pthread_attr_t *attr, dispatch_block_t configure, @@ -1596,27 +2116,34 @@ _dispatch_pthread_root_queue_create(const char *label, unsigned long flags, dispatch_queue_t dq; dispatch_root_queue_context_t qc; dispatch_pthread_root_queue_context_t pqc; + dispatch_queue_flags_t dqf = 0; size_t dqs; uint8_t pool_size = flags & _DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE ? (uint8_t)(flags & ~_DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE) : 0; dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD; + dqs = roundup(dqs, _Alignof(struct dispatch_root_queue_context_s)); dq = _dispatch_alloc(DISPATCH_VTABLE(queue_root), dqs + sizeof(struct dispatch_root_queue_context_s) + sizeof(struct dispatch_pthread_root_queue_context_s)); qc = (void*)dq + dqs; + dispatch_assert((uintptr_t)qc % _Alignof(typeof(*qc)) == 0); pqc = (void*)qc + sizeof(struct dispatch_root_queue_context_s); - - _dispatch_queue_init(dq); + dispatch_assert((uintptr_t)pqc % _Alignof(typeof(*pqc)) == 0); if (label) { - dq->dq_label = strdup(label); + const char *tmp = _dispatch_strdup_if_mutable(label); + if (tmp != label) { + dqf |= DQF_LABEL_NEEDS_FREE; + label = tmp; + } } - dq->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK; + _dispatch_queue_init(dq, dqf, DISPATCH_QUEUE_WIDTH_POOL, false); + dq->dq_label = label; + dq->dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, + dq->dq_override = DISPATCH_SATURATED_OVERRIDE; dq->do_ctxt = qc; dq->do_targetq = NULL; - dq->dq_running = 2; - dq->dq_width = DISPATCH_QUEUE_WIDTH_MAX; pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore); qc->dgq_ctxt = pqc; @@ -1651,6 +2178,36 @@ dispatch_pthread_root_queue_create(const char *label, unsigned long flags, NULL); } +#if DISPATCH_IOHID_SPI +dispatch_queue_t +_dispatch_pthread_root_queue_create_with_observer_hooks_4IOHID(const char *label, + unsigned long flags, const pthread_attr_t *attr, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks, + dispatch_block_t configure) +{ + if (!observer_hooks->queue_will_execute || + !observer_hooks->queue_did_execute) { + DISPATCH_CLIENT_CRASH(0, "Invalid pthread root queue observer hooks"); + } + return _dispatch_pthread_root_queue_create(label, flags, attr, configure, + observer_hooks); +} +#endif + +dispatch_queue_t +dispatch_pthread_root_queue_copy_current(void) +{ + dispatch_queue_t dq = _dispatch_queue_get_current(); + if (!dq) return NULL; + while (slowpath(dq->do_targetq)) { + dq = dq->do_targetq; + } + if (dx_type(dq) != DISPATCH_QUEUE_GLOBAL_ROOT_TYPE || + dq->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { + return NULL; + } + return (dispatch_queue_t)_os_object_retain_with_resurrect(dq->_as_os_obj); +} #endif // DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES @@ -1658,7 +2215,7 @@ void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq) { if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { - DISPATCH_CRASH("Global root queue disposed"); + DISPATCH_INTERNAL_CRASH(dq, "Global root queue disposed"); } _dispatch_object_debug(dq, "%s", __func__); _dispatch_introspection_queue_dispose(dq); @@ -1674,7 +2231,7 @@ _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq) dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, false); #endif - if (dq->dq_label) { + if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) { free((void*)dq->dq_label); } _dispatch_queue_destroy(dq); @@ -1684,11 +2241,10 @@ _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq) #pragma mark dispatch_queue_specific struct dispatch_queue_specific_queue_s { - DISPATCH_STRUCT_HEADER(queue_specific_queue); - DISPATCH_QUEUE_HEADER; + DISPATCH_QUEUE_HEADER(queue_specific_queue); TAILQ_HEAD(dispatch_queue_specific_head_s, dispatch_queue_specific_s) dqsq_contexts; -}; +} DISPATCH_QUEUE_ALIGN; struct dispatch_queue_specific_s { const void *dqs_key; @@ -1711,7 +2267,7 @@ _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq) } free(dqs); } - _dispatch_queue_destroy((dispatch_queue_t)dqsq); + _dispatch_queue_destroy(dqsq->_as_dq); } static void @@ -1721,16 +2277,16 @@ _dispatch_queue_init_specific(dispatch_queue_t dq) dqsq = _dispatch_alloc(DISPATCH_VTABLE(queue_specific_queue), sizeof(struct dispatch_queue_specific_queue_s)); - _dispatch_queue_init((dispatch_queue_t)dqsq); + _dispatch_queue_init(dqsq->_as_dq, DQF_NONE, + DISPATCH_QUEUE_WIDTH_MAX, false); dqsq->do_xref_cnt = -1; dqsq->do_targetq = _dispatch_get_root_queue( _DISPATCH_QOS_CLASS_USER_INITIATED, true); - dqsq->dq_width = DISPATCH_QUEUE_WIDTH_MAX; dqsq->dq_label = "queue-specific"; TAILQ_INIT(&dqsq->dqsq_contexts); - if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_specific_q, NULL, - (dispatch_queue_t)dqsq, release))) { - _dispatch_release((dispatch_queue_t)dqsq); + if (slowpath(!os_atomic_cmpxchg2o(dq, dq_specific_q, NULL, + dqsq->_as_dq, release))) { + _dispatch_release(dqsq->_as_dq); } } @@ -1782,7 +2338,7 @@ dispatch_queue_set_specific(dispatch_queue_t dq, const void *key, if (slowpath(!dq->dq_specific_q)) { _dispatch_queue_init_specific(dq); } - _dispatch_barrier_trysync_f(dq->dq_specific_q, dqs, + _dispatch_barrier_trysync_or_async_f(dq->dq_specific_q, dqs, _dispatch_queue_set_specific); } @@ -1842,6 +2398,18 @@ dispatch_get_specific(const void *key) return ctxt; } +#if DISPATCH_IOHID_SPI +bool +_dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID( + dispatch_queue_t dq) // rdar://problem/18033810 +{ + if (dq->dq_width != 1) { + DISPATCH_CLIENT_CRASH(dq->dq_width, "Invalid queue type"); + } + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + return _dq_state_drain_locked_by(dq_state, _dispatch_tid_self()); +} +#endif #pragma mark - #pragma mark dispatch_queue_debug @@ -1851,13 +2419,47 @@ _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) { size_t offset = 0; dispatch_queue_t target = dq->do_targetq; - offset += dsnprintf(&buf[offset], bufsiz - offset, "target = %s[%p], " - "width = 0x%x, running = 0x%x, barrier = %d ", + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + + offset += dsnprintf(&buf[offset], bufsiz - offset, + "target = %s[%p], width = 0x%x, state = 0x%016llx", target && target->dq_label ? target->dq_label : "", target, - dq->dq_width / 2, dq->dq_running / 2, dq->dq_running & 1); - if (dq->dq_is_thread_bound) { + dq->dq_width, (unsigned long long)dq_state); + if (_dq_state_is_suspended(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", suspended = %d", + _dq_state_suspend_cnt(dq_state)); + } + if (_dq_state_is_inactive(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", inactive"); + } else if (_dq_state_needs_activation(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", needs-activation"); + } + if (_dq_state_is_enqueued(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", enqueued"); + } + if (_dq_state_is_dirty(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", dirty"); + } + if (_dq_state_has_override(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", async-override"); + } + mach_port_t owner = _dq_state_drain_owner(dq_state); + if (!_dispatch_queue_is_thread_bound(dq) && owner) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", draining on 0x%x", + owner); + } + if (_dq_state_is_in_barrier(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", in-barrier"); + } else { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", in-flight = %d", + _dq_state_used_width(dq_state, dq->dq_width)); + } + if (_dq_state_has_pending_barrier(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", pending-barrier"); + } + if (_dispatch_queue_is_thread_bound(dq)) { offset += dsnprintf(&buf[offset], bufsiz - offset, ", thread = 0x%x ", - _dispatch_queue_get_bound_thread(dq)); + owner); } return offset; } @@ -1915,6 +2517,90 @@ _dispatch_queue_merge_stats(uint64_t start) } #endif +#pragma mark - +#pragma mark _dispatch_set_priority_and_mach_voucher +#if HAVE_PTHREAD_WORKQUEUE_QOS + +DISPATCH_NOINLINE +void +_dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, + mach_voucher_t kv) +{ + _pthread_set_flags_t pflags = 0; + if (pp && _dispatch_set_qos_class_enabled) { + pthread_priority_t old_pri = _dispatch_get_priority(); + if (pp != old_pri) { + if (old_pri & _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG) { + pflags |= _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND; + // when we unbind, overcomitness can flip, so we need to learn + // it from the defaultpri, see _dispatch_priority_compute_update + pp |= (_dispatch_get_defaultpriority() & + _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + } else { + // else we need to keep the one that is set in the current pri + pp |= (old_pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + } + if (likely(old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { + pflags |= _PTHREAD_SET_SELF_QOS_FLAG; + } + if (unlikely(DISPATCH_QUEUE_DRAIN_OWNER(&_dispatch_mgr_q) == + _dispatch_tid_self())) { + DISPATCH_INTERNAL_CRASH(pp, + "Changing the QoS while on the manager queue"); + } + if (unlikely(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) { + DISPATCH_INTERNAL_CRASH(pp, "Cannot raise oneself to manager"); + } + if (old_pri & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) { + DISPATCH_INTERNAL_CRASH(old_pri, + "Cannot turn a manager thread into a normal one"); + } + } + } + if (kv != VOUCHER_NO_MACH_VOUCHER) { +#if VOUCHER_USE_MACH_VOUCHER + pflags |= _PTHREAD_SET_SELF_VOUCHER_FLAG; +#endif + } + if (!pflags) return; + int r = _pthread_set_properties_self(pflags, pp, kv); + if (r == EINVAL) { + DISPATCH_INTERNAL_CRASH(pp, "_pthread_set_properties_self failed"); + } + (void)dispatch_assume_zero(r); +} + +DISPATCH_NOINLINE +voucher_t +_dispatch_set_priority_and_voucher_slow(pthread_priority_t priority, + voucher_t v, _dispatch_thread_set_self_t flags) +{ + voucher_t ov = DISPATCH_NO_VOUCHER; + mach_voucher_t kv = VOUCHER_NO_MACH_VOUCHER; + if (v != DISPATCH_NO_VOUCHER) { + bool retained = flags & DISPATCH_VOUCHER_CONSUME; + ov = _voucher_get(); + if (ov == v && (flags & DISPATCH_VOUCHER_REPLACE)) { + if (retained && v) _voucher_release_no_dispose(v); + ov = DISPATCH_NO_VOUCHER; + } else { + if (!retained && v) _voucher_retain(v); + kv = _voucher_swap_and_get_mach_voucher(ov, v); + } + } +#if !PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK + flags &= ~(_dispatch_thread_set_self_t)DISPATCH_THREAD_PARK; +#endif + if (!(flags & DISPATCH_THREAD_PARK)) { + _dispatch_set_priority_and_mach_voucher_slow(priority, kv); + } + if (ov != DISPATCH_NO_VOUCHER && (flags & DISPATCH_VOUCHER_REPLACE)) { + if (ov) _voucher_release(ov); + ov = DISPATCH_NO_VOUCHER; + } + return ov; +} +#endif #pragma mark - #pragma mark dispatch_continuation_t @@ -1941,9 +2627,7 @@ _dispatch_cache_cleanup(void *value) } } -#if DISPATCH_USE_MEMORYSTATUS_SOURCE -int _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; - +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE DISPATCH_NOINLINE void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc) @@ -1966,24 +2650,55 @@ _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc) DISPATCH_ALWAYS_INLINE_NDEBUG static inline void -_dispatch_continuation_redirect(dispatch_queue_t dq, dispatch_object_t dou) +_dispatch_continuation_slow_item_signal(dispatch_queue_t dq, + dispatch_object_t dou) { dispatch_continuation_t dc = dou._dc; + pthread_priority_t pp = dq->dq_override; - (void)dispatch_atomic_add2o(dq, dq_running, 2, acquire); - if (!DISPATCH_OBJ_IS_VTABLE(dc) && - (long)dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT) { - _dispatch_trace_continuation_pop(dq, dou); - _dispatch_wqthread_override_start((mach_port_t)dc->dc_data, - _dispatch_queue_get_override_priority(dq)); - _dispatch_thread_semaphore_signal( - (_dispatch_thread_semaphore_t)dc->dc_other); - _dispatch_introspection_queue_item_complete(dou); - } else { - _dispatch_async_f_redirect(dq, dc, - _dispatch_queue_get_override_priority(dq)); + _dispatch_trace_continuation_pop(dq, dc); + if (pp > (dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + _dispatch_wqthread_override_start((mach_port_t)dc->dc_data, pp); } - _dispatch_perfmon_workitem_inc(); + _dispatch_thread_event_signal((dispatch_thread_event_t)dc->dc_other); + _dispatch_introspection_queue_item_complete(dc); +} + +DISPATCH_NOINLINE +static void +_dispatch_continuation_push(dispatch_queue_t dq, dispatch_continuation_t dc) +{ + _dispatch_queue_push(dq, dc, + _dispatch_continuation_get_override_priority(dq, dc)); +} + +DISPATCH_NOINLINE +static void +_dispatch_continuation_push_sync_slow(dispatch_queue_t dq, + dispatch_continuation_t dc) +{ + _dispatch_queue_push_inline(dq, dc, + _dispatch_continuation_get_override_priority(dq, dc), + DISPATCH_WAKEUP_SLOW_WAITER); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_async2(dispatch_queue_t dq, dispatch_continuation_t dc, + bool barrier) +{ + if (fastpath(barrier || !DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width))) { + return _dispatch_continuation_push(dq, dc); + } + return _dispatch_async_f2(dq, dc); +} + +DISPATCH_NOINLINE +void +_dispatch_continuation_async(dispatch_queue_t dq, dispatch_continuation_t dc) +{ + _dispatch_continuation_async2(dq, dc, + dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT); } #pragma mark - @@ -2016,18 +2731,20 @@ _dispatch_block_create_with_voucher_and_priority(dispatch_block_flags_t flags, voucher_t voucher, pthread_priority_t pri, dispatch_block_t block) { flags = _dispatch_block_normalize_flags(flags); - voucher_t cv = NULL; bool assign = (flags & DISPATCH_BLOCK_ASSIGN_CURRENT); + if (assign && !(flags & DISPATCH_BLOCK_HAS_VOUCHER)) { - voucher = cv = voucher_copy(); + voucher = VOUCHER_CURRENT; flags |= DISPATCH_BLOCK_HAS_VOUCHER; } + if (voucher == VOUCHER_CURRENT) { + voucher = _voucher_get(); + } if (assign && !(flags & DISPATCH_BLOCK_HAS_PRIORITY)) { pri = _dispatch_priority_propagate(); flags |= DISPATCH_BLOCK_HAS_PRIORITY; } dispatch_block_t db = _dispatch_block_create(flags, voucher, pri, block); - if (cv) _voucher_release(cv); #if DISPATCH_DEBUG dispatch_assert(_dispatch_block_get_data(db)); #endif @@ -2037,7 +2754,7 @@ _dispatch_block_create_with_voucher_and_priority(dispatch_block_flags_t flags, dispatch_block_t dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block) { - if (!_dispatch_block_flags_valid(flags)) return NULL; + if (!_dispatch_block_flags_valid(flags)) return DISPATCH_BAD_INPUT; return _dispatch_block_create_with_voucher_and_priority(flags, NULL, 0, block); } @@ -2047,8 +2764,10 @@ dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, dispatch_qos_class_t qos_class, int relative_priority, dispatch_block_t block) { - if (!_dispatch_block_flags_valid(flags)) return NULL; - if (!_dispatch_qos_class_valid(qos_class, relative_priority)) return NULL; + if (!_dispatch_block_flags_valid(flags) || + !_dispatch_qos_class_valid(qos_class, relative_priority)) { + return DISPATCH_BAD_INPUT; + } flags |= DISPATCH_BLOCK_HAS_PRIORITY; pthread_priority_t pri = 0; #if HAVE_PTHREAD_WORKQUEUE_QOS @@ -2062,7 +2781,7 @@ dispatch_block_t dispatch_block_create_with_voucher(dispatch_block_flags_t flags, voucher_t voucher, dispatch_block_t block) { - if (!_dispatch_block_flags_valid(flags)) return NULL; + if (!_dispatch_block_flags_valid(flags)) return DISPATCH_BAD_INPUT; flags |= DISPATCH_BLOCK_HAS_VOUCHER; return _dispatch_block_create_with_voucher_and_priority(flags, voucher, 0, block); @@ -2073,8 +2792,10 @@ dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, voucher_t voucher, dispatch_qos_class_t qos_class, int relative_priority, dispatch_block_t block) { - if (!_dispatch_block_flags_valid(flags)) return NULL; - if (!_dispatch_qos_class_valid(qos_class, relative_priority)) return NULL; + if (!_dispatch_block_flags_valid(flags) || + !_dispatch_qos_class_valid(qos_class, relative_priority)) { + return DISPATCH_BAD_INPUT; + } flags |= (DISPATCH_BLOCK_HAS_VOUCHER|DISPATCH_BLOCK_HAS_PRIORITY); pthread_priority_t pri = 0; #if HAVE_PTHREAD_WORKQUEUE_QOS @@ -2088,57 +2809,56 @@ void dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block) { if (!_dispatch_block_flags_valid(flags)) { - DISPATCH_CLIENT_CRASH("Invalid flags passed to " + DISPATCH_CLIENT_CRASH(flags, "Invalid flags passed to " "dispatch_block_perform()"); } flags = _dispatch_block_normalize_flags(flags); struct dispatch_block_private_data_s dbpds = DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block); - return _dispatch_block_invoke(&dbpds); + return _dispatch_block_invoke_direct(&dbpds); } #define _dbpd_group(dbpd) ((dbpd)->dbpd_group) void -_dispatch_block_invoke(const struct dispatch_block_private_data_s *dbcpd) +_dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd) { dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)dbcpd; dispatch_block_flags_t flags = dbpd->dbpd_flags; unsigned int atomic_flags = dbpd->dbpd_atomic_flags; if (slowpath(atomic_flags & DBF_WAITED)) { - DISPATCH_CLIENT_CRASH("A block object may not be both run more " - "than once and waited for"); + DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " + "run more than once and waited for"); } if (atomic_flags & DBF_CANCELED) goto out; pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY; - unsigned long override = DISPATCH_VOUCHER_IGNORE_QUEUE_OVERRIDE; + _dispatch_thread_set_self_t adopt_flags = 0; if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { op = _dispatch_get_priority(); p = dbpd->dbpd_priority; - override |= (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) || - !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS) ? - DISPATCH_PRIORITY_ENFORCE : 0; + if (_dispatch_block_sync_should_enforce_qos_class(flags)) { + adopt_flags |= DISPATCH_PRIORITY_ENFORCE; + } } voucher_t ov, v = DISPATCH_NO_VOUCHER; if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { v = dbpd->dbpd_voucher; - if (v) _voucher_retain(v); } - ov = _dispatch_adopt_priority_and_voucher(p, v, override); - dbpd->dbpd_thread = _dispatch_thread_port(); + ov = _dispatch_adopt_priority_and_set_voucher(p, v, adopt_flags); + dbpd->dbpd_thread = _dispatch_tid_self(); _dispatch_client_callout(dbpd->dbpd_block, _dispatch_Block_invoke(dbpd->dbpd_block)); _dispatch_reset_priority_and_voucher(op, ov); out: if ((atomic_flags & DBF_PERFORM) == 0) { - if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) { + if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { dispatch_group_leave(_dbpd_group(dbpd)); } } } -static void +void _dispatch_block_sync_invoke(void *block) { dispatch_block_t b = block; @@ -2146,92 +2866,92 @@ _dispatch_block_sync_invoke(void *block) dispatch_block_flags_t flags = dbpd->dbpd_flags; unsigned int atomic_flags = dbpd->dbpd_atomic_flags; if (slowpath(atomic_flags & DBF_WAITED)) { - DISPATCH_CLIENT_CRASH("A block object may not be both run more " - "than once and waited for"); + DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " + "run more than once and waited for"); } if (atomic_flags & DBF_CANCELED) goto out; pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY; - unsigned long override = 0; + _dispatch_thread_set_self_t adopt_flags = 0; if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { op = _dispatch_get_priority(); p = dbpd->dbpd_priority; - override = (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) || - !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS) ? - DISPATCH_PRIORITY_ENFORCE : 0; + if (_dispatch_block_sync_should_enforce_qos_class(flags)) { + adopt_flags |= DISPATCH_PRIORITY_ENFORCE; + } } voucher_t ov, v = DISPATCH_NO_VOUCHER; if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { v = dbpd->dbpd_voucher; - if (v) _voucher_retain(v); } - ov = _dispatch_adopt_priority_and_voucher(p, v, override); + ov = _dispatch_adopt_priority_and_set_voucher(p, v, adopt_flags); dbpd->dbpd_block(); _dispatch_reset_priority_and_voucher(op, ov); out: if ((atomic_flags & DBF_PERFORM) == 0) { - if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) { + if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { dispatch_group_leave(_dbpd_group(dbpd)); } } - dispatch_queue_t dq = _dispatch_queue_get_current(); - if (dispatch_atomic_cmpxchg2o(dbpd, dbpd_queue, dq, NULL, acquire)) { + os_mpsc_queue_t oq; + oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + if (oq) { // balances dispatch_{,barrier_,}sync - _dispatch_release(dq); + _os_object_release_internal(oq->_as_os_obj); } } +DISPATCH_ALWAYS_INLINE static void -_dispatch_block_async_invoke_and_release(void *block) +_dispatch_block_async_invoke2(dispatch_block_t b, bool release) { - dispatch_block_t b = block; dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); - dispatch_block_flags_t flags = dbpd->dbpd_flags; unsigned int atomic_flags = dbpd->dbpd_atomic_flags; if (slowpath(atomic_flags & DBF_WAITED)) { - DISPATCH_CLIENT_CRASH("A block object may not be both run more " - "than once and waited for"); - } - if (atomic_flags & DBF_CANCELED) goto out; - - pthread_priority_t p = DISPATCH_NO_PRIORITY; - unsigned long override = 0; - if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { - override = (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) ? - DISPATCH_PRIORITY_ENFORCE : 0; - p = dbpd->dbpd_priority; + DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " + "run more than once and waited for"); } - voucher_t v = DISPATCH_NO_VOUCHER; - if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { - v = dbpd->dbpd_voucher; - if (v) _voucher_retain(v); + if (!slowpath(atomic_flags & DBF_CANCELED)) { + dbpd->dbpd_block(); } - _dispatch_adopt_priority_and_replace_voucher(p, v, override); - dbpd->dbpd_block(); -out: if ((atomic_flags & DBF_PERFORM) == 0) { - if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) { + if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { dispatch_group_leave(_dbpd_group(dbpd)); } } - dispatch_queue_t dq = _dispatch_queue_get_current(); - if (dispatch_atomic_cmpxchg2o(dbpd, dbpd_queue, dq, NULL, acquire)) { + os_mpsc_queue_t oq; + oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + if (oq) { // balances dispatch_{,barrier_,group_}async - _dispatch_release(dq); + _os_object_release_internal_inline(oq->_as_os_obj); + } + if (release) { + Block_release(b); } - Block_release(b); } -void -dispatch_block_cancel(dispatch_block_t db) +static void +_dispatch_block_async_invoke(void *block) +{ + _dispatch_block_async_invoke2(block, false); +} + +static void +_dispatch_block_async_invoke_and_release(void *block) +{ + _dispatch_block_async_invoke2(block, true); +} + +void +dispatch_block_cancel(dispatch_block_t db) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); if (!dbpd) { - DISPATCH_CLIENT_CRASH("Invalid block object passed to " + DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " "dispatch_block_cancel()"); } - (void)dispatch_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_CANCELED, relaxed); + (void)os_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_CANCELED, relaxed); } long @@ -2239,7 +2959,7 @@ dispatch_block_testcancel(dispatch_block_t db) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); if (!dbpd) { - DISPATCH_CLIENT_CRASH("Invalid block object passed to " + DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " "dispatch_block_testcancel()"); } return (bool)(dbpd->dbpd_atomic_flags & DBF_CANCELED); @@ -2250,14 +2970,14 @@ dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); if (!dbpd) { - DISPATCH_CLIENT_CRASH("Invalid block object passed to " + DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " "dispatch_block_wait()"); } - unsigned int flags = dispatch_atomic_or_orig2o(dbpd, dbpd_atomic_flags, + unsigned int flags = os_atomic_or_orig2o(dbpd, dbpd_atomic_flags, DBF_WAITING, relaxed); if (slowpath(flags & (DBF_WAITED | DBF_WAITING))) { - DISPATCH_CLIENT_CRASH("A block object may not be waited for " + DISPATCH_CLIENT_CRASH(flags, "A block object may not be waited for " "more than once"); } @@ -2267,41 +2987,42 @@ dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) pthread_priority_t pp = _dispatch_get_priority(); - dispatch_queue_t boost_dq; - boost_dq = dispatch_atomic_xchg2o(dbpd, dbpd_queue, NULL, acquire); - if (boost_dq) { + os_mpsc_queue_t boost_oq; + boost_oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + if (boost_oq) { // release balances dispatch_{,barrier_,group_}async. // Can't put the queue back in the timeout case: the block might // finish after we fell out of group_wait and see our NULL, so // neither of us would ever release. Side effect: After a _wait // that times out, subsequent waits will not boost the qos of the // still-running block. - _dispatch_queue_wakeup_with_qos_and_release(boost_dq, pp); + dx_wakeup(boost_oq, pp, DISPATCH_WAKEUP_OVERRIDING | + DISPATCH_WAKEUP_CONSUME); } mach_port_t boost_th = dbpd->dbpd_thread; if (boost_th) { - _dispatch_thread_override_start(boost_th, pp); + _dispatch_thread_override_start(boost_th, pp, dbpd); } - int performed = dispatch_atomic_load2o(dbpd, dbpd_performed, relaxed); - if (slowpath(performed > 1 || (boost_th && boost_dq))) { - DISPATCH_CLIENT_CRASH("A block object may not be both run more " - "than once and waited for"); + int performed = os_atomic_load2o(dbpd, dbpd_performed, relaxed); + if (slowpath(performed > 1 || (boost_th && boost_oq))) { + DISPATCH_CLIENT_CRASH(performed, "A block object may not be both " + "run more than once and waited for"); } long ret = dispatch_group_wait(_dbpd_group(dbpd), timeout); if (boost_th) { - _dispatch_thread_override_end(boost_th); + _dispatch_thread_override_end(boost_th, dbpd); } if (ret) { // timed out: reverse our changes - (void)dispatch_atomic_and2o(dbpd, dbpd_atomic_flags, + (void)os_atomic_and2o(dbpd, dbpd_atomic_flags, ~DBF_WAITING, relaxed); } else { - (void)dispatch_atomic_or2o(dbpd, dbpd_atomic_flags, + (void)os_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_WAITED, relaxed); // don't need to re-test here: the second call would see // the first call's WAITING @@ -2316,18 +3037,81 @@ dispatch_block_notify(dispatch_block_t db, dispatch_queue_t queue, { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); if (!dbpd) { - DISPATCH_CLIENT_CRASH("Invalid block object passed to " + DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " "dispatch_block_notify()"); } - int performed = dispatch_atomic_load2o(dbpd, dbpd_performed, relaxed); + int performed = os_atomic_load2o(dbpd, dbpd_performed, relaxed); if (slowpath(performed > 1)) { - DISPATCH_CLIENT_CRASH("A block object may not be both run more " - "than once and observed"); + DISPATCH_CLIENT_CRASH(performed, "A block object may not be both " + "run more than once and observed"); } return dispatch_group_notify(_dbpd_group(dbpd), queue, notification_block); } +DISPATCH_NOINLINE +void +_dispatch_continuation_init_slow(dispatch_continuation_t dc, + dispatch_queue_class_t dqu, dispatch_block_flags_t flags) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(dc->dc_ctxt); + dispatch_block_flags_t block_flags = dbpd->dbpd_flags; + uintptr_t dc_flags = dc->dc_flags; + os_mpsc_queue_t oq = dqu._oq; + + // balanced in d_block_async_invoke_and_release or d_block_wait + if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, oq, relaxed)) { + _os_object_retain_internal_inline(oq->_as_os_obj); + } + + if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { + dc->dc_func = _dispatch_block_async_invoke_and_release; + } else { + dc->dc_func = _dispatch_block_async_invoke; + } + + flags |= block_flags; + if (block_flags & DISPATCH_BLOCK_HAS_PRIORITY) { + _dispatch_continuation_priority_set(dc, dbpd->dbpd_priority, flags); + } else { + _dispatch_continuation_priority_set(dc, dc->dc_priority, flags); + } + if (block_flags & DISPATCH_BLOCK_BARRIER) { + dc_flags |= DISPATCH_OBJ_BARRIER_BIT; + } + if (block_flags & DISPATCH_BLOCK_HAS_VOUCHER) { + voucher_t v = dbpd->dbpd_voucher; + dc->dc_voucher = v ? _voucher_retain(v) : NULL; + dc_flags |= DISPATCH_OBJ_ENFORCE_VOUCHER; + _dispatch_voucher_debug("continuation[%p] set", dc->dc_voucher, dc); + _dispatch_voucher_ktrace_dc_push(dc); + } else { + _dispatch_continuation_voucher_set(dc, oq, flags); + } + dc_flags |= DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT; + dc->dc_flags = dc_flags; +} + +void +_dispatch_continuation_update_bits(dispatch_continuation_t dc, + uintptr_t dc_flags) +{ + dc->dc_flags = dc_flags; + if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { + if (dc_flags & DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT) { + dc->dc_func = _dispatch_block_async_invoke_and_release; + } else if (dc_flags & DISPATCH_OBJ_BLOCK_BIT) { + dc->dc_func = _dispatch_call_block_and_release; + } + } else { + if (dc_flags & DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT) { + dc->dc_func = _dispatch_block_async_invoke; + } else if (dc_flags & DISPATCH_OBJ_BLOCK_BIT) { + dc->dc_func = _dispatch_Block_invoke(dc->dc_ctxt); + } + } +} + #endif // __BLOCKS__ #pragma mark - @@ -2335,21 +3119,13 @@ dispatch_block_notify(dispatch_block_t db, dispatch_queue_t queue, DISPATCH_NOINLINE static void -_dispatch_barrier_async_f_slow(dispatch_queue_t dq, void *ctxt, +_dispatch_async_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, pthread_priority_t pp, - dispatch_block_flags_t flags) + dispatch_block_flags_t flags, uintptr_t dc_flags) { dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); - - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); - dc->dc_func = func; - dc->dc_ctxt = ctxt; - _dispatch_continuation_voucher_set(dc, flags); - _dispatch_continuation_priority_set(dc, pp, flags); - - pp = _dispatch_continuation_get_override_priority(dq, dc); - - _dispatch_queue_push(dq, dc, pp); + _dispatch_continuation_init_f(dc, dq, ctxt, func, pp, flags, dc_flags); + _dispatch_continuation_async(dq, dc); } DISPATCH_ALWAYS_INLINE @@ -2358,31 +3134,15 @@ _dispatch_barrier_async_f2(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, pthread_priority_t pp, dispatch_block_flags_t flags) { - dispatch_continuation_t dc; + dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_BARRIER_BIT; - dc = fastpath(_dispatch_continuation_alloc_cacheonly()); - if (!dc) { - return _dispatch_barrier_async_f_slow(dq, ctxt, func, pp, flags); + if (!fastpath(dc)) { + return _dispatch_async_f_slow(dq, ctxt, func, pp, flags, dc_flags); } - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); - dc->dc_func = func; - dc->dc_ctxt = ctxt; - _dispatch_continuation_voucher_set(dc, flags); - _dispatch_continuation_priority_set(dc, pp, flags); - - pp = _dispatch_continuation_get_override_priority(dq, dc); - - _dispatch_queue_push(dq, dc, pp); -} - -DISPATCH_NOINLINE -static void -_dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp, - dispatch_block_flags_t flags) -{ - return _dispatch_barrier_async_f2(dq, ctxt, func, pp, flags); + _dispatch_continuation_init_f(dc, dq, ctxt, func, pp, flags, dc_flags); + _dispatch_continuation_push(dq, dc); } DISPATCH_NOINLINE @@ -2390,7 +3150,7 @@ void dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - return _dispatch_barrier_async_f2(dq, ctxt, func, 0, 0); + _dispatch_barrier_async_f2(dq, ctxt, func, 0, 0); } DISPATCH_NOINLINE @@ -2398,28 +3158,24 @@ void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - return _dispatch_barrier_async_f2(dq, ctxt, func, 0, - DISPATCH_BLOCK_NO_QOS_CLASS|DISPATCH_BLOCK_NO_VOUCHER); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_BARRIER_BIT; + dc->dc_func = func; + dc->dc_ctxt = ctxt; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; + _dispatch_queue_push(dq, dc, 0); } #ifdef __BLOCKS__ void dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void)) { - dispatch_function_t func = _dispatch_call_block_and_release; - pthread_priority_t pp = 0; - dispatch_block_flags_t flags = 0; - if (slowpath(_dispatch_block_has_private_data(work))) { - func = _dispatch_block_async_invoke_and_release; - pp = _dispatch_block_get_priority(work); - flags = _dispatch_block_get_flags(work); - // balanced in d_block_async_invoke_and_release or d_block_wait - if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work), - dbpd_queue, NULL, dq, release)) { - _dispatch_retain(dq); - } - } - _dispatch_barrier_async_f(dq, _dispatch_Block_copy(work), func, pp, flags); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_BARRIER_BIT; + + _dispatch_continuation_init(dc, dq, work, 0, 0, dc_flags); + _dispatch_continuation_push(dq, dc); } #endif @@ -2427,130 +3183,144 @@ dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void)) #pragma mark dispatch_async void -_dispatch_async_redirect_invoke(void *ctxt) +_dispatch_async_redirect_invoke(dispatch_continuation_t dc, + dispatch_invoke_flags_t flags) { - struct dispatch_continuation_s *dc = ctxt; + dispatch_thread_frame_s dtf; struct dispatch_continuation_s *other_dc = dc->dc_other; - dispatch_queue_t old_dq, dq = dc->dc_data, rq; + dispatch_invoke_flags_t ctxt_flags = (dispatch_invoke_flags_t)dc->dc_ctxt; + // if we went through _dispatch_root_queue_push_override, + // the "right" root queue was stuffed into dc_func + dispatch_queue_t assumed_rq = (dispatch_queue_t)dc->dc_func; + dispatch_queue_t dq = dc->dc_data, rq, old_dq; + struct _dispatch_identity_s di; + + pthread_priority_t op, dp, old_dp; + + if (ctxt_flags) { + flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK; + flags |= ctxt_flags; + } + old_dq = _dispatch_get_current_queue(); + if (assumed_rq) { + _dispatch_queue_set_current(assumed_rq); + _dispatch_root_queue_identity_assume(&di, 0); + } + + old_dp = _dispatch_set_defaultpriority(dq->dq_priority, &dp); + op = dq->dq_override; + if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + _dispatch_wqthread_override_start(_dispatch_tid_self(), op); + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } - old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - _dispatch_thread_setspecific(dispatch_queue_key, dq); - pthread_priority_t old_dp = _dispatch_set_defaultpriority(dq->dq_priority); - _dispatch_continuation_pop(other_dc); + _dispatch_thread_frame_push(&dtf, dq); + _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, + DISPATCH_OBJ_CONSUME_BIT, { + _dispatch_continuation_pop(other_dc, dq, flags); + }); + _dispatch_thread_frame_pop(&dtf); + if (assumed_rq) { + _dispatch_root_queue_identity_restore(&di); + _dispatch_queue_set_current(old_dq); + } _dispatch_reset_defaultpriority(old_dp); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); rq = dq->do_targetq; while (slowpath(rq->do_targetq) && rq != old_dq) { - if (dispatch_atomic_sub2o(rq, dq_running, 2, relaxed) == 0) { - _dispatch_queue_wakeup(rq); - } + _dispatch_non_barrier_complete(rq); rq = rq->do_targetq; } - if (dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0) { - _dispatch_queue_wakeup(dq); - } - _dispatch_release(dq); -} - -static inline void -_dispatch_async_f_redirect2(dispatch_queue_t dq, dispatch_continuation_t dc, - pthread_priority_t pp) -{ - uint32_t running = 2; + _dispatch_non_barrier_complete(dq); - // Find the queue to redirect to - do { - if (slowpath(dq->dq_items_tail) || - slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) || - slowpath(dq->dq_width == 1)) { - break; - } - running = dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - if (slowpath(running & 1) || slowpath(running > dq->dq_width)) { - running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); - break; - } - dq = dq->do_targetq; - } while (slowpath(dq->do_targetq)); + if (dtf.dtf_deferred) { + struct dispatch_object_s *dou = dtf.dtf_deferred; + return _dispatch_queue_drain_deferred_invoke(dq, flags, 0, dou); + } - _dispatch_queue_push_wakeup(dq, dc, pp, running == 0); + _dispatch_release_tailcall(dq); } -DISPATCH_NOINLINE -static void -_dispatch_async_f_redirect(dispatch_queue_t dq, - dispatch_continuation_t other_dc, pthread_priority_t pp) +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_async_redirect_wrap(dispatch_queue_t dq, dispatch_object_t dou) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; - dc->dc_func = _dispatch_async_redirect_invoke; - dc->dc_ctxt = dc; + dou._do->do_next = NULL; + dc->do_vtable = DC_VTABLE(ASYNC_REDIRECT); + dc->dc_func = NULL; + dc->dc_ctxt = (void *)(uintptr_t)_dispatch_queue_autorelease_frequency(dq); dc->dc_data = dq; - dc->dc_other = other_dc; - dc->dc_priority = 0; - dc->dc_voucher = NULL; - + dc->dc_other = dou._do; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; _dispatch_retain(dq); - dq = dq->do_targetq; - if (slowpath(dq->do_targetq)) { - return _dispatch_async_f_redirect2(dq, dc, pp); - } - - _dispatch_queue_push(dq, dc, pp); + return dc; } DISPATCH_NOINLINE static void -_dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc, - pthread_priority_t pp) +_dispatch_async_f_redirect(dispatch_queue_t dq, + dispatch_object_t dou, pthread_priority_t pp) { - uint32_t running = 2; + if (!slowpath(_dispatch_object_is_redirection(dou))) { + dou._dc = _dispatch_async_redirect_wrap(dq, dou); + } + dq = dq->do_targetq; - do { - if (slowpath(dq->dq_items_tail) - || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))) { - break; - } - running = dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - if (slowpath(running > dq->dq_width)) { - running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); + // Find the queue to redirect to + while (slowpath(DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width))) { + if (!fastpath(_dispatch_queue_try_acquire_async(dq))) { break; } - if (!slowpath(running & 1)) { - return _dispatch_async_f_redirect(dq, dc, pp); + if (!dou._dc->dc_ctxt) { + // find first queue in descending target queue order that has + // an autorelease frequency set, and use that as the frequency for + // this continuation. + dou._dc->dc_ctxt = (void *) + (uintptr_t)_dispatch_queue_autorelease_frequency(dq); } - running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); - // We might get lucky and find that the barrier has ended by now - } while (!(running & 1)); + dq = dq->do_targetq; + } + + _dispatch_queue_push(dq, dou, pp); +} - _dispatch_queue_push_wakeup(dq, dc, pp, running == 0); +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_redirect(dispatch_queue_t dq, + struct dispatch_object_s *dc) +{ + _dispatch_trace_continuation_pop(dq, dc); + // This is a re-redirect, overrides have already been applied + // by _dispatch_async_f2. + // However we want to end up on the root queue matching `dc` qos, so pick up + // the current override of `dq` which includes dc's overrde (and maybe more) + _dispatch_async_f_redirect(dq, dc, dq->dq_override); + _dispatch_introspection_queue_item_complete(dc); } DISPATCH_NOINLINE static void -_dispatch_async_f_slow(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp, - dispatch_block_flags_t flags) +_dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc) { - dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); - - dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; - dc->dc_func = func; - dc->dc_ctxt = ctxt; - _dispatch_continuation_voucher_set(dc, flags); - _dispatch_continuation_priority_set(dc, pp, flags); - - pp = _dispatch_continuation_get_override_priority(dq, dc); + // reserving non barrier width + // doesn't fail if only the ENQUEUED bit is set (unlike its barrier width + // equivalent), so we have to check that this thread hasn't enqueued + // anything ahead of this call or we can break ordering + if (slowpath(dq->dq_items_tail)) { + return _dispatch_continuation_push(dq, dc); + } - // No fastpath/slowpath hint because we simply don't know - if (dq->do_targetq) { - return _dispatch_async_f2(dq, dc, pp); + if (slowpath(!_dispatch_queue_try_acquire_async(dq))) { + return _dispatch_continuation_push(dq, dc); } - _dispatch_queue_push(dq, dc, pp); + return _dispatch_async_f_redirect(dq, dc, + _dispatch_continuation_get_override_priority(dq, dc)); } DISPATCH_ALWAYS_INLINE @@ -2558,39 +3328,22 @@ static inline void _dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, pthread_priority_t pp, dispatch_block_flags_t flags) { - dispatch_continuation_t dc; - - // No fastpath/slowpath hint because we simply don't know - if (dq->dq_width == 1 || flags & DISPATCH_BLOCK_BARRIER) { - return _dispatch_barrier_async_f(dq, ctxt, func, pp, flags); - } - - dc = fastpath(_dispatch_continuation_alloc_cacheonly()); - if (!dc) { - return _dispatch_async_f_slow(dq, ctxt, func, pp, flags); - } + dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; - dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; - dc->dc_func = func; - dc->dc_ctxt = ctxt; - _dispatch_continuation_voucher_set(dc, flags); - _dispatch_continuation_priority_set(dc, pp, flags); - - pp = _dispatch_continuation_get_override_priority(dq, dc); - - // No fastpath/slowpath hint because we simply don't know - if (dq->do_targetq) { - return _dispatch_async_f2(dq, dc, pp); + if (!fastpath(dc)) { + return _dispatch_async_f_slow(dq, ctxt, func, pp, flags, dc_flags); } - _dispatch_queue_push(dq, dc, pp); + _dispatch_continuation_init_f(dc, dq, ctxt, func, pp, flags, dc_flags); + _dispatch_continuation_async2(dq, dc, false); } DISPATCH_NOINLINE void dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - return _dispatch_async_f(dq, ctxt, func, 0, 0); + _dispatch_async_f(dq, ctxt, func, 0, 0); } DISPATCH_NOINLINE @@ -2598,28 +3351,18 @@ void dispatch_async_enforce_qos_class_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - return _dispatch_async_f(dq, ctxt, func, 0, - DISPATCH_BLOCK_ENFORCE_QOS_CLASS); + _dispatch_async_f(dq, ctxt, func, 0, DISPATCH_BLOCK_ENFORCE_QOS_CLASS); } #ifdef __BLOCKS__ void dispatch_async(dispatch_queue_t dq, void (^work)(void)) { - dispatch_function_t func = _dispatch_call_block_and_release; - dispatch_block_flags_t flags = 0; - pthread_priority_t pp = 0; - if (slowpath(_dispatch_block_has_private_data(work))) { - func = _dispatch_block_async_invoke_and_release; - pp = _dispatch_block_get_priority(work); - flags = _dispatch_block_get_flags(work); - // balanced in d_block_async_invoke_and_release or d_block_wait - if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work), - dbpd_queue, NULL, dq, release)) { - _dispatch_retain(dq); - } - } - _dispatch_async_f(dq, _dispatch_Block_copy(work), func, pp, flags); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + + _dispatch_continuation_init(dc, dq, work, 0, 0, dc_flags); + _dispatch_continuation_async(dq, dc); } #endif @@ -2628,35 +3371,12 @@ dispatch_async(dispatch_queue_t dq, void (^work)(void)) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp, - dispatch_block_flags_t flags) +_dispatch_continuation_group_async(dispatch_group_t dg, dispatch_queue_t dq, + dispatch_continuation_t dc) { - dispatch_continuation_t dc; - - _dispatch_retain(dg); dispatch_group_enter(dg); - - dc = _dispatch_continuation_alloc(); - - unsigned long barrier = (flags & DISPATCH_BLOCK_BARRIER) ? - DISPATCH_OBJ_BARRIER_BIT : 0; - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_GROUP_BIT | - barrier); - dc->dc_func = func; - dc->dc_ctxt = ctxt; dc->dc_data = dg; - _dispatch_continuation_voucher_set(dc, flags); - _dispatch_continuation_priority_set(dc, pp, flags); - - pp = _dispatch_continuation_get_override_priority(dq, dc); - - // No fastpath/slowpath hint because we simply don't know - if (dq->dq_width != 1 && !barrier && dq->do_targetq) { - return _dispatch_async_f2(dq, dc, pp); - } - - _dispatch_queue_push(dq, dc, pp); + _dispatch_continuation_async(dq, dc); } DISPATCH_NOINLINE @@ -2664,7 +3384,11 @@ void dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - return _dispatch_group_async_f(dg, dq, ctxt, func, 0, 0); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_GROUP_BIT; + + _dispatch_continuation_init_f(dc, dq, ctxt, func, 0, 0, dc_flags); + _dispatch_continuation_group_async(dg, dq, dc); } #ifdef __BLOCKS__ @@ -2672,68 +3396,65 @@ void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, dispatch_block_t db) { - dispatch_function_t func = _dispatch_call_block_and_release; - dispatch_block_flags_t flags = 0; - pthread_priority_t pp = 0; - if (slowpath(_dispatch_block_has_private_data(db))) { - func = _dispatch_block_async_invoke_and_release; - pp = _dispatch_block_get_priority(db); - flags = _dispatch_block_get_flags(db); - // balanced in d_block_async_invoke_and_release or d_block_wait - if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(db), - dbpd_queue, NULL, dq, release)) { - _dispatch_retain(dq); - } - } - _dispatch_group_async_f(dg, dq, _dispatch_Block_copy(db), func, pp, flags); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_GROUP_BIT; + + _dispatch_continuation_init(dc, dq, db, 0, 0, dc_flags); + _dispatch_continuation_group_async(dg, dq, dc); } #endif #pragma mark - -#pragma mark dispatch_function_invoke - -static void _dispatch_sync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp); +#pragma mark dispatch_sync / dispatch_barrier_sync recurse and invoke DISPATCH_NOINLINE static void -_dispatch_function_invoke_slow(dispatch_queue_t dq, void *ctxt, +_dispatch_sync_function_invoke_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - _dispatch_thread_setspecific(dispatch_queue_key, dq); - voucher_t ov = _dispatch_adopt_queue_override_voucher(dq); + voucher_t ov; + dispatch_thread_frame_s dtf; + _dispatch_thread_frame_push(&dtf, dq); + ov = _dispatch_set_priority_and_voucher(0, dq->dq_override_voucher, 0); _dispatch_client_callout(ctxt, func); _dispatch_perfmon_workitem_inc(); - _dispatch_reset_voucher(ov); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_reset_voucher(ov, 0); + _dispatch_thread_frame_pop(&dtf); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_function_invoke(dispatch_queue_t dq, void *ctxt, +_dispatch_sync_function_invoke_inline(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { if (slowpath(dq->dq_override_voucher != DISPATCH_NO_VOUCHER)) { - return _dispatch_function_invoke_slow(dq, ctxt, func); + return _dispatch_sync_function_invoke_slow(dq, ctxt, func); } - dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - _dispatch_thread_setspecific(dispatch_queue_key, dq); + dispatch_thread_frame_s dtf; + _dispatch_thread_frame_push(&dtf, dq); _dispatch_client_callout(ctxt, func); _dispatch_perfmon_workitem_inc(); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_thread_frame_pop(&dtf); +} + +DISPATCH_NOINLINE +static void +_dispatch_sync_function_invoke(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + _dispatch_sync_function_invoke_inline(dq, ctxt, func); } void _dispatch_sync_recurse_invoke(void *ctxt) { dispatch_continuation_t dc = ctxt; - _dispatch_function_invoke(dc->dc_data, dc->dc_ctxt, dc->dc_func); + _dispatch_sync_function_invoke(dc->dc_data, dc->dc_ctxt, dc->dc_func); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_function_recurse(dispatch_queue_t dq, void *ctxt, +_dispatch_sync_function_recurse(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, pthread_priority_t pp) { struct dispatch_continuation_s dc = { @@ -2744,70 +3465,118 @@ _dispatch_function_recurse(dispatch_queue_t dq, void *ctxt, _dispatch_sync_f(dq->do_targetq, &dc, _dispatch_sync_recurse_invoke, pp); } +DISPATCH_NOINLINE +static void +_dispatch_non_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + _dispatch_non_barrier_complete(dq); +} + +DISPATCH_NOINLINE +static void +_dispatch_non_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp) +{ + _dispatch_sync_function_recurse(dq, ctxt, func, pp); + _dispatch_non_barrier_complete(dq); +} + +DISPATCH_ALWAYS_INLINE +static void +_dispatch_non_barrier_sync_f_invoke_inline(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp) +{ + _dispatch_introspection_non_barrier_sync_begin(dq, func); + if (slowpath(dq->do_targetq->do_targetq)) { + return _dispatch_non_barrier_sync_f_recurse(dq, ctxt, func, pp); + } + _dispatch_non_barrier_sync_f_invoke(dq, ctxt, func); +} + #pragma mark - #pragma mark dispatch_barrier_sync -static void _dispatch_sync_f_invoke(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func); - -DISPATCH_ALWAYS_INLINE_NDEBUG -static inline _dispatch_thread_semaphore_t -_dispatch_barrier_sync_f_pop(dispatch_queue_t dq, dispatch_object_t dou, - bool lock) +DISPATCH_NOINLINE +static void +_dispatch_barrier_complete(dispatch_queue_t dq) { - _dispatch_thread_semaphore_t sema; - dispatch_continuation_t dc = dou._dc; - mach_port_t th; + uint64_t owned = DISPATCH_QUEUE_IN_BARRIER + + dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + + if (slowpath(dq->dq_items_tail)) { + return _dispatch_try_lock_transfer_or_wakeup(dq); + } - if (DISPATCH_OBJ_IS_VTABLE(dc) || ((long)dc->do_vtable & - (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) != - (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) { - return 0; + if (!fastpath(_dispatch_queue_drain_try_unlock(dq, owned))) { + // someone enqueued a slow item at the head + // looping may be its last chance + return _dispatch_try_lock_transfer_or_wakeup(dq); } - _dispatch_trace_continuation_pop(dq, dc); - _dispatch_perfmon_workitem_inc(); +} + +DISPATCH_NOINLINE +static void +_dispatch_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp) +{ + _dispatch_sync_function_recurse(dq, ctxt, func, pp); + _dispatch_barrier_complete(dq); +} + +DISPATCH_NOINLINE +static void +_dispatch_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + _dispatch_barrier_complete(dq); +} - th = (mach_port_t)dc->dc_data; - dc = dc->dc_ctxt; - dq = dc->dc_data; - sema = (_dispatch_thread_semaphore_t)dc->dc_other; - if (lock) { - (void)dispatch_atomic_add2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); - // rdar://problem/9032024 running lock must be held until sync_f_slow - // returns - (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); +DISPATCH_ALWAYS_INLINE +static void +_dispatch_barrier_sync_f_invoke_inline(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp) +{ + _dispatch_introspection_barrier_sync_begin(dq, func); + if (slowpath(dq->do_targetq->do_targetq)) { + return _dispatch_barrier_sync_f_recurse(dq, ctxt, func, pp); } - _dispatch_introspection_queue_item_complete(dou); - _dispatch_wqthread_override_start(th, - _dispatch_queue_get_override_priority(dq)); - return sema ? sema : MACH_PORT_DEAD; + _dispatch_barrier_sync_f_invoke(dq, ctxt, func); } +typedef struct dispatch_barrier_sync_context_s { + struct dispatch_continuation_s dbsc_dc; + dispatch_thread_frame_s dbsc_dtf; +} *dispatch_barrier_sync_context_t; + static void _dispatch_barrier_sync_f_slow_invoke(void *ctxt) { - dispatch_continuation_t dc = ctxt; + dispatch_barrier_sync_context_t dbsc = ctxt; + dispatch_continuation_t dc = &dbsc->dbsc_dc; dispatch_queue_t dq = dc->dc_data; - _dispatch_thread_semaphore_t sema; - sema = (_dispatch_thread_semaphore_t)dc->dc_other; + dispatch_thread_event_t event = (dispatch_thread_event_t)dc->dc_other; dispatch_assert(dq == _dispatch_queue_get_current()); #if DISPATCH_COCOA_COMPAT - if (slowpath(dq->dq_is_thread_bound)) { + if (slowpath(_dispatch_queue_is_thread_bound(dq))) { + dispatch_assert(_dispatch_thread_frame_get_current() == NULL); + + // the block runs on the thread the queue is bound to and not + // on the calling thread, but we mean to see the calling thread + // dispatch thread frames, so we fake the link, and then undo it + _dispatch_thread_frame_set_current(&dbsc->dbsc_dtf); // The queue is bound to a non-dispatch thread (e.g. main thread) - _dispatch_continuation_voucher_adopt(dc); + _dispatch_continuation_voucher_adopt(dc, DISPATCH_NO_VOUCHER, + DISPATCH_OBJ_CONSUME_BIT); _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - dispatch_atomic_store2o(dc, dc_func, NULL, release); - _dispatch_thread_semaphore_signal(sema); // release - return; + os_atomic_store2o(dc, dc_func, NULL, release); + _dispatch_thread_frame_set_current(NULL); } #endif - (void)dispatch_atomic_add2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); - // rdar://9032024 running lock must be held until sync_f_slow returns - (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - _dispatch_thread_semaphore_signal(sema); // release + _dispatch_thread_event_signal(event); // release } DISPATCH_NOINLINE @@ -2816,337 +3585,283 @@ _dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, pthread_priority_t pp) { if (slowpath(!dq->do_targetq)) { - // the global concurrent queues do not need strict ordering - (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - return _dispatch_sync_f_invoke(dq, ctxt, func); + // see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE + return _dispatch_sync_function_invoke(dq, ctxt, func); } - if (!pp) pp = (_dispatch_get_priority() | _PTHREAD_PRIORITY_ENFORCE_FLAG); - _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); - struct dispatch_continuation_s dc = { - .dc_data = dq, + + if (!pp) { + pp = _dispatch_get_priority(); + pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; + } + dispatch_thread_event_s event; + _dispatch_thread_event_init(&event); + struct dispatch_barrier_sync_context_s dbsc = { + .dbsc_dc = { + .dc_data = dq, #if DISPATCH_COCOA_COMPAT - .dc_func = func, - .dc_ctxt = ctxt, + .dc_func = func, + .dc_ctxt = ctxt, #endif - .dc_other = (void*)sema, + .dc_other = &event, + } }; #if DISPATCH_COCOA_COMPAT // It's preferred to execute synchronous blocks on the current thread - // due to thread-local side effects, garbage collection, etc. However, - // blocks submitted to the main thread MUST be run on the main thread - if (slowpath(dq->dq_is_thread_bound)) { - _dispatch_continuation_voucher_set(&dc, 0); + // due to thread-local side effects, etc. However, blocks submitted + // to the main thread MUST be run on the main thread + if (slowpath(_dispatch_queue_is_thread_bound(dq))) { + // consumed by _dispatch_barrier_sync_f_slow_invoke + // or in the DISPATCH_COCOA_COMPAT hunk below + _dispatch_continuation_voucher_set(&dbsc.dbsc_dc, dq, 0); + // save frame linkage for _dispatch_barrier_sync_f_slow_invoke + _dispatch_thread_frame_save_state(&dbsc.dbsc_dtf); + // thread bound queues cannot mutate their target queue hierarchy + // so it's fine to look now + _dispatch_introspection_barrier_sync_begin(dq, func); } #endif + uint32_t th_self = _dispatch_tid_self(); struct dispatch_continuation_s dbss = { - .do_vtable = (void *)(DISPATCH_OBJ_BARRIER_BIT | - DISPATCH_OBJ_SYNC_SLOW_BIT), + .dc_flags = DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT, .dc_func = _dispatch_barrier_sync_f_slow_invoke, - .dc_ctxt = &dc, - .dc_data = (void*)(uintptr_t)_dispatch_thread_port(), + .dc_ctxt = &dbsc, + .dc_data = (void*)(uintptr_t)th_self, .dc_priority = pp, + .dc_other = &event, + .dc_voucher = DISPATCH_NO_VOUCHER, }; - _dispatch_queue_push(dq, &dbss, - _dispatch_continuation_get_override_priority(dq, &dbss)); - _dispatch_thread_semaphore_wait(sema); // acquire - _dispatch_put_thread_semaphore(sema); + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(_dq_state_drain_locked_by(dq_state, th_self))) { + DISPATCH_CLIENT_CRASH(dq, "dispatch_barrier_sync called on queue " + "already owned by current thread"); + } - pthread_priority_t p = _dispatch_queue_get_override_priority(dq); - if (p > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + _dispatch_continuation_push_sync_slow(dq, &dbss); + _dispatch_thread_event_wait(&event); // acquire + _dispatch_thread_event_destroy(&event); + if (_dispatch_queue_received_override(dq, pp)) { // Ensure that the root queue sees that this thread was overridden. + // pairs with the _dispatch_wqthread_override_start in + // _dispatch_continuation_slow_item_signal _dispatch_set_defaultpriority_override(); } #if DISPATCH_COCOA_COMPAT // Queue bound to a non-dispatch thread - if (dc.dc_func == NULL) { + if (dbsc.dbsc_dc.dc_func == NULL) { return; + } else if (dbsc.dbsc_dc.dc_voucher) { + // this almost never happens, unless a dispatch_sync() onto a thread + // bound queue went to the slow path at the same time dispatch_main() + // is called, or the queue is detached from the runloop. + _voucher_release(dbsc.dbsc_dc.dc_voucher); } #endif - _dispatch_queue_set_thread(dq); - if (slowpath(dq->do_targetq->do_targetq)) { - _dispatch_function_recurse(dq, ctxt, func, pp); - } else { - _dispatch_function_invoke(dq, ctxt, func); - } - _dispatch_queue_clear_thread(dq); + _dispatch_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); +} - if (fastpath(dq->do_suspend_cnt < 2 * DISPATCH_OBJECT_SUSPEND_INTERVAL) && - dq->dq_running == 2) { - // rdar://problem/8290662 "lock transfer" - sema = _dispatch_queue_drain_one_barrier_sync(dq); - if (sema) { - _dispatch_thread_semaphore_signal(sema); // release - return; - } - } - (void)dispatch_atomic_sub2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL, release); - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { - _dispatch_queue_wakeup(dq); +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_barrier_sync_f2(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp) +{ + if (slowpath(!_dispatch_queue_try_acquire_barrier_sync(dq))) { + // global concurrent queues and queues bound to non-dispatch threads + // always fall into the slow case + return _dispatch_barrier_sync_f_slow(dq, ctxt, func, pp); } + // + // TODO: the more correct thing to do would be to set dq_override to the qos + // of the thread that just acquired the barrier lock here. Unwinding that + // would slow down the uncontended fastpath however. + // + // The chosen tradeoff is that if an enqueue on a lower priority thread + // contends with this fastpath, this thread may receive a useless override. + // Improving this requires the override level to be part of the atomic + // dq_state + // + _dispatch_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); } DISPATCH_NOINLINE static void -_dispatch_barrier_sync_f2(dispatch_queue_t dq) -{ - if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq))) { - // rdar://problem/8290662 "lock transfer" - _dispatch_thread_semaphore_t sema; - sema = _dispatch_queue_drain_one_barrier_sync(dq); - if (sema) { - (void)dispatch_atomic_add2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); - // rdar://9032024 running lock must be held until sync_f_slow - // returns: increment by 2 and decrement by 1 - (void)dispatch_atomic_inc2o(dq, dq_running, relaxed); - _dispatch_thread_semaphore_signal(sema); - return; - } - } - if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { - _dispatch_queue_wakeup(dq); - } +_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp) +{ + _dispatch_barrier_sync_f2(dq, ctxt, func, pp); } DISPATCH_NOINLINE -static void -_dispatch_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt, +void +dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - _dispatch_queue_set_thread(dq); - _dispatch_function_invoke(dq, ctxt, func); - _dispatch_queue_clear_thread(dq); - if (slowpath(dq->dq_items_tail)) { - return _dispatch_barrier_sync_f2(dq); - } - if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { - _dispatch_queue_wakeup(dq); - } + _dispatch_barrier_sync_f2(dq, ctxt, func, 0); } +#ifdef __BLOCKS__ DISPATCH_NOINLINE static void -_dispatch_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +_dispatch_sync_block_with_private_data(dispatch_queue_t dq, + void (^work)(void), dispatch_block_flags_t flags) { - _dispatch_queue_set_thread(dq); - _dispatch_function_recurse(dq, ctxt, func, pp); - _dispatch_queue_clear_thread(dq); - if (slowpath(dq->dq_items_tail)) { - return _dispatch_barrier_sync_f2(dq); - } - if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { - _dispatch_queue_wakeup(dq); - } -} + pthread_priority_t pp = _dispatch_block_get_priority(work); -DISPATCH_NOINLINE -static void -_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) -{ - // 1) ensure that this thread hasn't enqueued anything ahead of this call - // 2) the queue is not suspended - if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){ - return _dispatch_barrier_sync_f_slow(dq, ctxt, func, pp); - } - if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))) { - // global concurrent queues and queues bound to non-dispatch threads - // always fall into the slow case - return _dispatch_barrier_sync_f_slow(dq, ctxt, func, pp); - } - if (slowpath(dq->do_targetq->do_targetq)) { - return _dispatch_barrier_sync_f_recurse(dq, ctxt, func, pp); - } - _dispatch_barrier_sync_f_invoke(dq, ctxt, func); -} - -DISPATCH_NOINLINE -void -dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - // 1) ensure that this thread hasn't enqueued anything ahead of this call - // 2) the queue is not suspended - if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){ - return _dispatch_barrier_sync_f_slow(dq, ctxt, func, 0); - } - if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))) { - // global concurrent queues and queues bound to non-dispatch threads - // always fall into the slow case - return _dispatch_barrier_sync_f_slow(dq, ctxt, func, 0); + flags |= _dispatch_block_get_flags(work); + if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { + pthread_priority_t tp = _dispatch_get_priority(); + tp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (pp < tp) { + pp = tp | _PTHREAD_PRIORITY_ENFORCE_FLAG; + } else if (_dispatch_block_sync_should_enforce_qos_class(flags)) { + pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; + } } - if (slowpath(dq->do_targetq->do_targetq)) { - return _dispatch_barrier_sync_f_recurse(dq, ctxt, func, 0); + // balanced in d_block_sync_invoke or d_block_wait + if (os_atomic_cmpxchg2o(_dispatch_block_get_data(work), + dbpd_queue, NULL, dq, relaxed)) { + _dispatch_retain(dq); } - _dispatch_barrier_sync_f_invoke(dq, ctxt, func); -} - -#ifdef __BLOCKS__ -DISPATCH_NOINLINE -static void -_dispatch_barrier_sync_slow(dispatch_queue_t dq, void (^work)(void)) -{ - bool has_pd = _dispatch_block_has_private_data(work); - dispatch_function_t func = _dispatch_Block_invoke(work); - pthread_priority_t pp = 0; - if (has_pd) { - func = _dispatch_block_sync_invoke; - pp = _dispatch_block_get_priority(work); - dispatch_block_flags_t flags = _dispatch_block_get_flags(work); - if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { - pthread_priority_t tp = _dispatch_get_priority(); - if (pp < tp) { - pp = tp | _PTHREAD_PRIORITY_ENFORCE_FLAG; - } else if (!(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS)) { - pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; - } - } - // balanced in d_block_sync_invoke or d_block_wait - if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work), - dbpd_queue, NULL, dq, release)) { - _dispatch_retain(dq); - } -#if DISPATCH_COCOA_COMPAT - } else if (dq->dq_is_thread_bound && dispatch_begin_thread_4GC) { - // Blocks submitted to the main queue MUST be run on the main thread, - // under GC we must Block_copy in order to notify the thread-local - // garbage collector that the objects are transferring to another thread - // rdar://problem/7176237&7181849&7458685 - work = _dispatch_Block_copy(work); - func = _dispatch_call_block_and_release; + if (flags & DISPATCH_BLOCK_BARRIER) { + _dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke, pp); + } else { + _dispatch_sync_f(dq, work, _dispatch_block_sync_invoke, pp); } -#endif - _dispatch_barrier_sync_f(dq, work, func, pp); } void dispatch_barrier_sync(dispatch_queue_t dq, void (^work)(void)) { - if (slowpath(dq->dq_is_thread_bound) || - slowpath(_dispatch_block_has_private_data(work))) { - return _dispatch_barrier_sync_slow(dq, work); + if (slowpath(_dispatch_block_has_private_data(work))) { + dispatch_block_flags_t flags = DISPATCH_BLOCK_BARRIER; + return _dispatch_sync_block_with_private_data(dq, work, flags); } dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work)); } #endif -DISPATCH_NOINLINE -static void -_dispatch_barrier_trysync_f_invoke(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - _dispatch_queue_set_thread(dq); - _dispatch_function_invoke(dq, ctxt, func); - _dispatch_queue_clear_thread(dq); - if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { - _dispatch_queue_wakeup(dq); - } -} - DISPATCH_NOINLINE void -_dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, +_dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { // Use for mutation of queue-/source-internal state only, ignores target // queue hierarchy! - if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) - || slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, - acquire))) { + if (!fastpath(_dispatch_queue_try_acquire_barrier_sync(dq))) { return _dispatch_barrier_async_detached_f(dq, ctxt, func); } - _dispatch_barrier_trysync_f_invoke(dq, ctxt, func); + // skip the recursion because it's about the queue state only + _dispatch_barrier_sync_f_invoke(dq, ctxt, func); } #pragma mark - #pragma mark dispatch_sync +DISPATCH_NOINLINE +static void +_dispatch_non_barrier_complete(dispatch_queue_t dq) +{ + uint64_t old_state, new_state; + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = old_state - DISPATCH_QUEUE_WIDTH_INTERVAL; + if (_dq_state_is_runnable(new_state)) { + if (!_dq_state_is_runnable(old_state)) { + // we're making a FULL -> non FULL transition + new_state |= DISPATCH_QUEUE_DIRTY; + } + if (!_dq_state_drain_locked(new_state)) { + uint64_t full_width = new_state; + if (_dq_state_has_pending_barrier(new_state)) { + full_width -= DISPATCH_QUEUE_PENDING_BARRIER; + full_width += DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } else { + full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } + if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) == + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + new_state = full_width; + new_state &= ~DISPATCH_QUEUE_DIRTY; + new_state |= _dispatch_tid_self(); + } + } + } + }); + + if (_dq_state_is_in_barrier(new_state)) { + return _dispatch_try_lock_transfer_or_wakeup(dq); + } + if (!_dq_state_is_runnable(old_state)) { + _dispatch_queue_try_wakeup(dq, new_state, 0); + } +} + DISPATCH_NOINLINE static void _dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, - pthread_priority_t pp, bool wakeup) + pthread_priority_t pp) { - if (!pp) pp = (_dispatch_get_priority() | _PTHREAD_PRIORITY_ENFORCE_FLAG); - _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); + dispatch_assert(dq->do_targetq); + if (!pp) { + pp = _dispatch_get_priority(); + pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; + } + dispatch_thread_event_s event; + _dispatch_thread_event_init(&event); + uint32_t th_self = _dispatch_tid_self(); struct dispatch_continuation_s dc = { - .do_vtable = (void*)DISPATCH_OBJ_SYNC_SLOW_BIT, + .dc_flags = DISPATCH_OBJ_SYNC_SLOW_BIT, #if DISPATCH_INTROSPECTION .dc_func = func, .dc_ctxt = ctxt, - .dc_data = (void*)(uintptr_t)_dispatch_thread_port(), #endif - .dc_other = (void*)sema, + .dc_data = (void*)(uintptr_t)th_self, + .dc_other = &event, .dc_priority = pp, + .dc_voucher = DISPATCH_NO_VOUCHER, }; - _dispatch_queue_push_wakeup(dq, &dc, - _dispatch_continuation_get_override_priority(dq, &dc), wakeup); - _dispatch_thread_semaphore_wait(sema); - _dispatch_put_thread_semaphore(sema); + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(_dq_state_drain_locked_by(dq_state, th_self))) { + DISPATCH_CLIENT_CRASH(dq, "dispatch_sync called on queue " + "already owned by current thread"); + } - pthread_priority_t p = _dispatch_queue_get_override_priority(dq); - if (p > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + _dispatch_continuation_push_sync_slow(dq, &dc); + _dispatch_thread_event_wait(&event); // acquire + _dispatch_thread_event_destroy(&event); + if (_dispatch_queue_received_override(dq, pp)) { // Ensure that the root queue sees that this thread was overridden. + // pairs with the _dispatch_wqthread_override_start in + // _dispatch_continuation_slow_item_signal _dispatch_set_defaultpriority_override(); } - - if (slowpath(dq->do_targetq->do_targetq)) { - _dispatch_function_recurse(dq, ctxt, func, pp); - } else { - _dispatch_function_invoke(dq, ctxt, func); - } - - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { - _dispatch_queue_wakeup(dq); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_sync_f_invoke(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - _dispatch_function_invoke(dq, ctxt, func); - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { - _dispatch_queue_wakeup(dq); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_sync_f_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) -{ - _dispatch_function_recurse(dq, ctxt, func, pp); - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { - _dispatch_queue_wakeup(dq); - } + _dispatch_non_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); } +DISPATCH_ALWAYS_INLINE static inline void _dispatch_sync_f2(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, pthread_priority_t pp) { - // 1) ensure that this thread hasn't enqueued anything ahead of this call - // 2) the queue is not suspended - if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){ - return _dispatch_sync_f_slow(dq, ctxt, func, pp, false); - } - uint32_t running = dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - // re-check suspension after barrier check - if (slowpath(running & 1) || _dispatch_object_suspended(dq)) { - running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); - return _dispatch_sync_f_slow(dq, ctxt, func, pp, running == 0); + // reserving non barrier width + // doesn't fail if only the ENQUEUED bit is set (unlike its barrier width + // equivalent), so we have to check that this thread hasn't enqueued + // anything ahead of this call or we can break ordering + if (slowpath(dq->dq_items_tail)) { + return _dispatch_sync_f_slow(dq, ctxt, func, pp); } - if (slowpath(dq->do_targetq->do_targetq)) { - return _dispatch_sync_f_recurse(dq, ctxt, func, pp); + // concurrent queues do not respect width on sync + if (slowpath(!_dispatch_queue_try_reserve_sync_width(dq))) { + return _dispatch_sync_f_slow(dq, ctxt, func, pp); } - _dispatch_sync_f_invoke(dq, ctxt, func); + _dispatch_non_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); } DISPATCH_NOINLINE @@ -3154,148 +3869,182 @@ static void _dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, pthread_priority_t pp) { - if (fastpath(dq->dq_width == 1)) { - return _dispatch_barrier_sync_f(dq, ctxt, func, pp); + if (DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width)) { + return _dispatch_sync_f2(dq, ctxt, func, pp); } - if (slowpath(!dq->do_targetq)) { - // the global concurrent queues do not need strict ordering - (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - return _dispatch_sync_f_invoke(dq, ctxt, func); - } - _dispatch_sync_f2(dq, ctxt, func, pp); + return _dispatch_barrier_sync_f(dq, ctxt, func, pp); } DISPATCH_NOINLINE void dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - if (fastpath(dq->dq_width == 1)) { - return dispatch_barrier_sync_f(dq, ctxt, func); - } - if (slowpath(!dq->do_targetq)) { - // the global concurrent queues do not need strict ordering - (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - return _dispatch_sync_f_invoke(dq, ctxt, func); + if (DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width)) { + return _dispatch_sync_f2(dq, ctxt, func, 0); } - _dispatch_sync_f2(dq, ctxt, func, 0); + return dispatch_barrier_sync_f(dq, ctxt, func); } #ifdef __BLOCKS__ -DISPATCH_NOINLINE -static void -_dispatch_sync_slow(dispatch_queue_t dq, void (^work)(void)) +void +dispatch_sync(dispatch_queue_t dq, void (^work)(void)) { - bool has_pd = _dispatch_block_has_private_data(work); - if (has_pd && (_dispatch_block_get_flags(work) & DISPATCH_BLOCK_BARRIER)) { - return _dispatch_barrier_sync_slow(dq, work); + if (slowpath(_dispatch_block_has_private_data(work))) { + return _dispatch_sync_block_with_private_data(dq, work, 0); } - dispatch_function_t func = _dispatch_Block_invoke(work); - pthread_priority_t pp = 0; - if (has_pd) { - func = _dispatch_block_sync_invoke; - pp = _dispatch_block_get_priority(work); - dispatch_block_flags_t flags = _dispatch_block_get_flags(work); - if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { - pthread_priority_t tp = _dispatch_get_priority(); - if (pp < tp) { - pp = tp | _PTHREAD_PRIORITY_ENFORCE_FLAG; - } else if (!(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS)) { - pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; - } + dispatch_sync_f(dq, work, _dispatch_Block_invoke(work)); +} +#endif + +#pragma mark - +#pragma mark dispatch_trysync + +struct trysync_context { + dispatch_queue_t tc_dq; + void *tc_ctxt; + dispatch_function_t tc_func; +}; + +DISPATCH_NOINLINE +static int +_dispatch_trysync_recurse(dispatch_queue_t dq, + struct trysync_context *tc, bool barrier) +{ + dispatch_queue_t tq = dq->do_targetq; + + if (barrier) { + if (slowpath(!_dispatch_queue_try_acquire_barrier_sync(dq))) { + return EWOULDBLOCK; } - // balanced in d_block_sync_invoke or d_block_wait - if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work), - dbpd_queue, NULL, dq, release)) { - _dispatch_retain(dq); + } else { + // check nothing was queued by the current + // thread ahead of this call. _dispatch_queue_try_reserve_sync_width + // ignores the ENQUEUED bit which could cause it to miss a barrier_async + // made by the same thread just before. + if (slowpath(dq->dq_items_tail)) { + return EWOULDBLOCK; + } + // concurrent queues do not respect width on sync + if (slowpath(!_dispatch_queue_try_reserve_sync_width(dq))) { + return EWOULDBLOCK; } -#if DISPATCH_COCOA_COMPAT - } else if (dq->dq_is_thread_bound && dispatch_begin_thread_4GC) { - // Blocks submitted to the main queue MUST be run on the main thread, - // under GC we must Block_copy in order to notify the thread-local - // garbage collector that the objects are transferring to another thread - // rdar://problem/7176237&7181849&7458685 - work = _dispatch_Block_copy(work); - func = _dispatch_call_block_and_release; -#endif } - if (slowpath(!dq->do_targetq)) { - // the global concurrent queues do not need strict ordering - (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - return _dispatch_sync_f_invoke(dq, work, func); + + int rc = 0; + if (_dispatch_queue_cannot_trysync(tq)) { + _dispatch_queue_atomic_flags_set(dq, DQF_CANNOT_TRYSYNC); + rc = ENOTSUP; + } else if (tq->do_targetq) { + rc = _dispatch_trysync_recurse(tq, tc, tq->dq_width == 1); + if (rc == ENOTSUP) { + _dispatch_queue_atomic_flags_set(dq, DQF_CANNOT_TRYSYNC); + } + } else { + dispatch_thread_frame_s dtf; + _dispatch_thread_frame_push(&dtf, tq); + _dispatch_sync_function_invoke(tc->tc_dq, tc->tc_ctxt, tc->tc_func); + _dispatch_thread_frame_pop(&dtf); + } + if (barrier) { + _dispatch_barrier_complete(dq); + } else { + _dispatch_non_barrier_complete(dq); } - _dispatch_sync_f2(dq, work, func, pp); + return rc; } -void -dispatch_sync(dispatch_queue_t dq, void (^work)(void)) +DISPATCH_NOINLINE +bool +_dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t f) { - if (fastpath(dq->dq_width == 1)) { - return dispatch_barrier_sync(dq, work); + if (slowpath(!dq->do_targetq)) { + _dispatch_sync_function_invoke(dq, ctxt, f); + return true; } - if (slowpath(dq->dq_is_thread_bound) || - slowpath(_dispatch_block_has_private_data(work)) ) { - return _dispatch_sync_slow(dq, work); + if (slowpath(_dispatch_queue_cannot_trysync(dq))) { + return false; } - dispatch_sync_f(dq, work, _dispatch_Block_invoke(work)); + struct trysync_context tc = { + .tc_dq = dq, + .tc_func = f, + .tc_ctxt = ctxt, + }; + return _dispatch_trysync_recurse(dq, &tc, true) == 0; } -#endif - -#pragma mark - -#pragma mark dispatch_after -void -_dispatch_after_timer_callback(void *ctxt) +DISPATCH_NOINLINE +bool +_dispatch_trysync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t f) { - dispatch_continuation_t dc = ctxt, dc1; - dispatch_source_t ds = dc->dc_data; - dc1 = _dispatch_continuation_free_cacheonly(dc); - _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - dispatch_source_cancel(ds); - dispatch_release(ds); - if (slowpath(dc1)) { - _dispatch_continuation_free_to_cache_limit(dc1); + if (slowpath(!dq->do_targetq)) { + _dispatch_sync_function_invoke(dq, ctxt, f); + return true; } + if (slowpath(_dispatch_queue_cannot_trysync(dq))) { + return false; + } + struct trysync_context tc = { + .tc_dq = dq, + .tc_func = f, + .tc_ctxt = ctxt, + }; + return _dispatch_trysync_recurse(dq, &tc, dq->dq_width == 1) == 0; } -DISPATCH_NOINLINE -void -dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, - dispatch_function_t func) +#pragma mark - +#pragma mark dispatch_after + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_after(dispatch_time_t when, dispatch_queue_t queue, + void *ctxt, void *handler, bool block) { - uint64_t delta, leeway; dispatch_source_t ds; + uint64_t leeway, delta; if (when == DISPATCH_TIME_FOREVER) { #if DISPATCH_DEBUG - DISPATCH_CLIENT_CRASH( - "dispatch_after_f() called with 'when' == infinity"); + DISPATCH_CLIENT_CRASH(0, "dispatch_after called with 'when' == infinity"); #endif return; } delta = _dispatch_timeout(when); if (delta == 0) { - return dispatch_async_f(queue, ctxt, func); + if (block) { + return dispatch_async(queue, handler); + } + return dispatch_async_f(queue, ctxt, handler); } leeway = delta / 10; // + if (leeway < NSEC_PER_MSEC) leeway = NSEC_PER_MSEC; if (leeway > 60 * NSEC_PER_SEC) leeway = 60 * NSEC_PER_SEC; // this function can and should be optimized to not use a dispatch source - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, queue); + ds = dispatch_source_create(&_dispatch_source_type_after, 0, 0, queue); dispatch_assert(ds); - // TODO: don't use a separate continuation & voucher dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT); - dc->dc_func = func; - dc->dc_ctxt = ctxt; + if (block) { + _dispatch_continuation_init(dc, ds, handler, 0, 0, 0); + } else { + _dispatch_continuation_init_f(dc, ds, ctxt, handler, 0, 0, 0); + } + // reference `ds` so that it doesn't show up as a leak dc->dc_data = ds; - - dispatch_set_context(ds, dc); - dispatch_source_set_event_handler_f(ds, _dispatch_after_timer_callback); + _dispatch_source_set_event_handler_continuation(ds, dc); dispatch_source_set_timer(ds, when, DISPATCH_TIME_FOREVER, leeway); - dispatch_resume(ds); + dispatch_activate(ds); +} + +DISPATCH_NOINLINE +void +dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, + dispatch_function_t func) +{ + _dispatch_after(when, queue, ctxt, func, false); } #ifdef __BLOCKS__ @@ -3303,138 +4052,144 @@ void dispatch_after(dispatch_time_t when, dispatch_queue_t queue, dispatch_block_t work) { - // test before the copy of the block - if (when == DISPATCH_TIME_FOREVER) { -#if DISPATCH_DEBUG - DISPATCH_CLIENT_CRASH( - "dispatch_after() called with 'when' == infinity"); -#endif - return; - } - dispatch_after_f(when, queue, _dispatch_Block_copy(work), - _dispatch_call_block_and_release); + _dispatch_after(when, queue, NULL, work, true); } #endif #pragma mark - -#pragma mark dispatch_queue_push - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_push_list_slow2(dispatch_queue_t dq, pthread_priority_t pp, - struct dispatch_object_s *obj, bool retained) -{ - // The queue must be retained before dq_items_head is written in order - // to ensure that the reference is still valid when _dispatch_wakeup is - // called. Otherwise, if preempted between the assignment to - // dq_items_head and _dispatch_wakeup, the blocks submitted to the - // queue may release the last reference to the queue when invoked by - // _dispatch_queue_drain. - if (!retained) _dispatch_retain(dq); - dq->dq_items_head = obj; - return _dispatch_queue_wakeup_with_qos_and_release(dq, pp); -} +#pragma mark dispatch_queue_wakeup DISPATCH_NOINLINE void -_dispatch_queue_push_list_slow(dispatch_queue_t dq, pthread_priority_t pp, - struct dispatch_object_s *obj, unsigned int n, bool retained) +_dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags) { - if (dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE && !dq->dq_is_thread_bound) { - dispatch_assert(!retained); - dispatch_atomic_store2o(dq, dq_items_head, obj, relaxed); - return _dispatch_queue_wakeup_global2(dq, n); + dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; + + if (_dispatch_queue_class_probe(dq)) { + target = DISPATCH_QUEUE_WAKEUP_TARGET; + } + if (target) { + return _dispatch_queue_class_wakeup(dq, pp, flags, target); + } else if (pp) { + return _dispatch_queue_class_override_drainer(dq, pp, flags); + } else if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dq); } - _dispatch_queue_push_list_slow2(dq, pp, obj, retained); } -DISPATCH_NOINLINE -void -_dispatch_queue_push_slow(dispatch_queue_t dq, pthread_priority_t pp, - struct dispatch_object_s *obj, bool retained) +#if DISPATCH_COCOA_COMPAT +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_runloop_handle_is_valid(dispatch_runloop_handle_t handle) { - if (dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE && !dq->dq_is_thread_bound) { - dispatch_assert(!retained); - dispatch_atomic_store2o(dq, dq_items_head, obj, relaxed); - return _dispatch_queue_wakeup_global(dq); - } - _dispatch_queue_push_list_slow2(dq, pp, obj, retained); +#if TARGET_OS_MAC + return MACH_PORT_VALID(handle); +#elif defined(__linux__) + return handle >= 0; +#else +#error "runloop support not implemented on this platform" +#endif } -#pragma mark - -#pragma mark dispatch_queue_probe +DISPATCH_ALWAYS_INLINE +static inline dispatch_runloop_handle_t +_dispatch_runloop_queue_get_handle(dispatch_queue_t dq) +{ +#if TARGET_OS_MAC + return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt); +#elif defined(__linux__) + // decode: 0 is a valid fd, so offset by 1 to distinguish from NULL + return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt) - 1; +#else +#error "runloop support not implemented on this platform" +#endif +} -unsigned long -_dispatch_queue_probe(dispatch_queue_t dq) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_runloop_queue_set_handle(dispatch_queue_t dq, dispatch_runloop_handle_t handle) { - return _dispatch_queue_class_probe(dq); +#if TARGET_OS_MAC + dq->do_ctxt = (void *)(uintptr_t)handle; +#elif defined(__linux__) + // encode: 0 is a valid fd, so offset by 1 to distinguish from NULL + dq->do_ctxt = (void *)(uintptr_t)(handle + 1); +#else +#error "runloop support not implemented on this platform" +#endif } +#endif // DISPATCH_COCOA_COMPAT -#if DISPATCH_COCOA_COMPAT -unsigned long -_dispatch_runloop_queue_probe(dispatch_queue_t dq) +void +_dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags) { - if (_dispatch_queue_class_probe(dq)) { - if (dq->do_xref_cnt == -1) return true; // - return _dispatch_runloop_queue_wakeup(dq); +#if DISPATCH_COCOA_COMPAT + if (slowpath(_dispatch_queue_atomic_flags(dq) & DQF_RELEASED)) { + // + return _dispatch_queue_wakeup(dq, pp, flags); } - return false; -} -#endif -unsigned long -_dispatch_mgr_queue_probe(dispatch_queue_t dq) -{ if (_dispatch_queue_class_probe(dq)) { - return _dispatch_mgr_wakeup(dq); + return _dispatch_runloop_queue_poke(dq, pp, flags); } - return false; + + pp = _dispatch_queue_reset_override_priority(dq, true); + if (pp) { + mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq); + if (_dispatch_queue_class_probe(dq)) { + _dispatch_runloop_queue_poke(dq, pp, flags); + } + _dispatch_thread_override_end(owner, dq); + return; + } + if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dq); + } +#else + return _dispatch_queue_wakeup(dq, pp, flags); +#endif } -unsigned long -_dispatch_root_queue_probe(dispatch_queue_t dq) +void +_dispatch_main_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags) { - _dispatch_queue_wakeup_global(dq); - return false; +#if DISPATCH_COCOA_COMPAT + if (_dispatch_queue_is_thread_bound(dq)) { + return _dispatch_runloop_queue_wakeup(dq, pp, flags); + } +#endif + return _dispatch_queue_wakeup(dq, pp, flags); } -#pragma mark - -#pragma mark dispatch_wakeup - -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol -dispatch_queue_t -_dispatch_wakeup(dispatch_object_t dou) +void +_dispatch_root_queue_wakeup(dispatch_queue_t dq, + pthread_priority_t pp DISPATCH_UNUSED, + dispatch_wakeup_flags_t flags) { - unsigned long type = dx_metatype(dou._do); - if (type == _DISPATCH_QUEUE_TYPE || type == _DISPATCH_SOURCE_TYPE) { - return _dispatch_queue_wakeup(dou._dq); - } - if (_dispatch_object_suspended(dou)) { - return NULL; - } - if (!dx_probe(dou._do)) { - return NULL; - } - if (!dispatch_atomic_cmpxchg2o(dou._do, do_suspend_cnt, 0, - DISPATCH_OBJECT_SUSPEND_LOCK, acquire)) { - return NULL; + if (flags & DISPATCH_WAKEUP_CONSUME) { + // see _dispatch_queue_push_set_head + dispatch_assert(flags & DISPATCH_WAKEUP_FLUSH); } - _dispatch_retain(dou._do); - dispatch_queue_t tq = dou._do->do_targetq; - _dispatch_queue_push(tq, dou._do, 0); - return tq; // libdispatch does not need this, but the Instrument DTrace - // probe does + _dispatch_global_queue_poke(dq); } +#pragma mark - +#pragma mark dispatch root queues poke + #if DISPATCH_COCOA_COMPAT static inline void -_dispatch_runloop_queue_wakeup_thread(dispatch_queue_t dq) +_dispatch_runloop_queue_class_poke(dispatch_queue_t dq) { - mach_port_t mp = (mach_port_t)dq->do_ctxt; - if (!mp) { + dispatch_runloop_handle_t handle = _dispatch_runloop_queue_get_handle(dq); + if (!_dispatch_runloop_handle_is_valid(handle)) { return; } + +#if TARGET_OS_MAC + mach_port_t mp = handle; kern_return_t kr = _dispatch_send_wakeup_runloop_thread(mp, 0); switch (kr) { case MACH_SEND_TIMEOUT: @@ -3445,43 +4200,56 @@ _dispatch_runloop_queue_wakeup_thread(dispatch_queue_t dq) (void)dispatch_assume_zero(kr); break; } -} - -DISPATCH_NOINLINE DISPATCH_WEAK -unsigned long -_dispatch_runloop_queue_wakeup(dispatch_queue_t dq) -{ - _dispatch_runloop_queue_wakeup_thread(dq); - return false; +#elif defined(__linux__) + int result; + do { + result = eventfd_write(handle, 1); + } while (result == -1 && errno == EINTR); + (void)dispatch_assume_zero(result); +#else +#error "runloop support not implemented on this platform" +#endif } DISPATCH_NOINLINE -static dispatch_queue_t -_dispatch_main_queue_wakeup(void) -{ - dispatch_queue_t dq = &_dispatch_main_q; - if (!dq->dq_is_thread_bound) { - return NULL; +static void +_dispatch_runloop_queue_poke(dispatch_queue_t dq, + pthread_priority_t pp, dispatch_wakeup_flags_t flags) +{ + // it's not useful to handle WAKEUP_FLUSH because mach_msg() will have + // a release barrier and that when runloop queues stop being thread bound + // they have a non optional wake-up to start being a "normal" queue + // either in _dispatch_runloop_queue_xref_dispose, + // or in _dispatch_queue_cleanup2() for the main thread. + + if (dq == &_dispatch_main_q) { + dispatch_once_f(&_dispatch_main_q_handle_pred, dq, + _dispatch_runloop_queue_handle_init); + } + _dispatch_queue_override_priority(dq, /* inout */ &pp, /* inout */ &flags); + if (flags & DISPATCH_WAKEUP_OVERRIDING) { + mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq); + _dispatch_thread_override_start(owner, pp, dq); + if (flags & DISPATCH_WAKEUP_WAS_OVERRIDDEN) { + _dispatch_thread_override_end(owner, dq); + } + } + _dispatch_runloop_queue_class_poke(dq); + if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dq); } - dispatch_once_f(&_dispatch_main_q_port_pred, dq, - _dispatch_runloop_queue_port_init); - _dispatch_runloop_queue_wakeup_thread(dq); - return NULL; } #endif DISPATCH_NOINLINE static void -_dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) +_dispatch_global_queue_poke_slow(dispatch_queue_t dq, unsigned int n) { dispatch_root_queue_context_t qc = dq->do_ctxt; uint32_t i = n; int r; _dispatch_debug_root_queue(dq, __func__); - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init); - #if HAVE_PTHREAD_WORKQUEUES #if DISPATCH_USE_PTHREAD_POOL if (qc->dgq_kworkqueue != (void*)(~0ul)) @@ -3527,7 +4295,7 @@ _dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) } uint32_t j, t_count; // seq_cst with atomic store to tail - t_count = dispatch_atomic_load2o(qc, dgq_thread_pool_size, seq_cst); + t_count = os_atomic_load2o(qc, dgq_thread_pool_size, ordered); do { if (!t_count) { _dispatch_root_queue_debug("pthread pool is full for root queue: " @@ -3535,12 +4303,12 @@ _dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) return; } j = i > t_count ? t_count : i; - } while (!dispatch_atomic_cmpxchgvw2o(qc, dgq_thread_pool_size, t_count, + } while (!os_atomic_cmpxchgvw2o(qc, dgq_thread_pool_size, t_count, t_count - j, &t_count, acquire)); pthread_attr_t *attr = &pqc->dpq_thread_attr; pthread_t tid, *pthr = &tid; -#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES if (slowpath(dq == &_dispatch_mgr_root_queue)) { pthr = _dispatch_mgr_root_queue_init(); } @@ -3558,7 +4326,7 @@ _dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) } static inline void -_dispatch_queue_wakeup_global2(dispatch_queue_t dq, unsigned int n) +_dispatch_global_queue_poke_n(dispatch_queue_t dq, unsigned int n) { if (!_dispatch_queue_class_probe(dq)) { return; @@ -3569,148 +4337,209 @@ _dispatch_queue_wakeup_global2(dispatch_queue_t dq, unsigned int n) #if DISPATCH_USE_PTHREAD_POOL (qc->dgq_kworkqueue != (void*)(~0ul)) && #endif - !dispatch_atomic_cmpxchg2o(qc, dgq_pending, 0, n, relaxed)) { + !os_atomic_cmpxchg2o(qc, dgq_pending, 0, n, relaxed)) { _dispatch_root_queue_debug("worker thread request still pending for " "global queue: %p", dq); return; } #endif // HAVE_PTHREAD_WORKQUEUES - return _dispatch_queue_wakeup_global_slow(dq, n); + return _dispatch_global_queue_poke_slow(dq, n); } static inline void -_dispatch_queue_wakeup_global(dispatch_queue_t dq) +_dispatch_global_queue_poke(dispatch_queue_t dq) { - return _dispatch_queue_wakeup_global2(dq, 1); + return _dispatch_global_queue_poke_n(dq, 1); } -#pragma mark - -#pragma mark dispatch_queue_invoke - -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t -dispatch_queue_invoke2(dispatch_object_t dou, - _dispatch_thread_semaphore_t *sema_ptr) -{ - dispatch_queue_t dq = dou._dq; - dispatch_queue_t otq = dq->do_targetq; - dispatch_queue_t cq = _dispatch_queue_get_current(); - - if (slowpath(cq != otq)) { - return otq; - } - - *sema_ptr = _dispatch_queue_drain(dq); - - if (slowpath(otq != dq->do_targetq)) { - // An item on the queue changed the target queue - return dq->do_targetq; - } - return NULL; -} - -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol DISPATCH_NOINLINE void -_dispatch_queue_invoke(dispatch_queue_t dq, dispatch_object_t dou, - dispatch_invoke_flags_t flags) +_dispatch_queue_push_list_slow(dispatch_queue_t dq, unsigned int n) { - _dispatch_queue_class_invoke(dq, dou._dc, flags, dispatch_queue_invoke2); + return _dispatch_global_queue_poke_n(dq, n); } #pragma mark - #pragma mark dispatch_queue_drain -DISPATCH_ALWAYS_INLINE -static inline struct dispatch_object_s* -_dispatch_queue_head(dispatch_queue_t dq) +void +_dispatch_continuation_pop(dispatch_object_t dou, dispatch_queue_t dq, + dispatch_invoke_flags_t flags) { - struct dispatch_object_s *dc; - _dispatch_wait_until(dc = fastpath(dq->dq_items_head)); - return dc; + _dispatch_continuation_pop_inline(dou, dq, flags); } -DISPATCH_ALWAYS_INLINE -static inline struct dispatch_object_s* -_dispatch_queue_next(dispatch_queue_t dq, struct dispatch_object_s *dc) +void +_dispatch_continuation_invoke(dispatch_object_t dou, voucher_t override_voucher, + dispatch_invoke_flags_t flags) { - struct dispatch_object_s *next_dc; - next_dc = fastpath(dc->do_next); - dq->dq_items_head = next_dc; - if (!next_dc && !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, dc, NULL, - relaxed)) { - _dispatch_wait_until(next_dc = fastpath(dc->do_next)); - dq->dq_items_head = next_dc; - } - return next_dc; + _dispatch_continuation_invoke_inline(dou, override_voucher, flags); } -_dispatch_thread_semaphore_t -_dispatch_queue_drain(dispatch_object_t dou) +/* + * Drain comes in 2 flavours (serial/concurrent) and 2 modes + * (redirecting or not). + * + * Serial + * ~~~~~~ + * Serial drain is about serial queues (width == 1). It doesn't support + * the redirecting mode, which doesn't make sense, and treats all continuations + * as barriers. Bookkeeping is minimal in serial flavour, most of the loop + * is optimized away. + * + * Serial drain stops if the width of the queue grows to larger than 1. + * Going through a serial drain prevents any recursive drain from being + * redirecting. + * + * Concurrent + * ~~~~~~~~~~ + * When in non-redirecting mode (meaning one of the target queues is serial), + * non-barriers and barriers alike run in the context of the drain thread. + * Slow non-barrier items are still all signaled so that they can make progress + * toward the dispatch_sync() that will serialize them all . + * + * In redirecting mode, non-barrier work items are redirected downward. + * + * Concurrent drain stops if the width of the queue becomes 1, so that the + * queue drain moves to the more efficient serial mode. + */ +DISPATCH_ALWAYS_INLINE +static dispatch_queue_t +_dispatch_queue_drain(dispatch_queue_t dq, dispatch_invoke_flags_t flags, + uint64_t *owned_ptr, struct dispatch_object_s **dc_out, + bool serial_drain) { - dispatch_queue_t dq = dou._dq, orig_tq, old_dq; - old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - struct dispatch_object_s *dc, *next_dc; - _dispatch_thread_semaphore_t sema = 0; + dispatch_queue_t orig_tq = dq->do_targetq; + dispatch_thread_frame_s dtf; + struct dispatch_object_s *dc = NULL, *next_dc; + uint64_t owned = *owned_ptr; - // Continue draining sources after target queue change rdar://8928171 - bool check_tq = (dx_type(dq) != DISPATCH_SOURCE_KEVENT_TYPE); - - orig_tq = dq->do_targetq; - - _dispatch_thread_setspecific(dispatch_queue_key, dq); - pthread_priority_t old_dp = _dispatch_set_defaultpriority(dq->dq_priority); - - pthread_priority_t op = _dispatch_queue_get_override_priority(dq); - pthread_priority_t dp = _dispatch_get_defaultpriority(); - dp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - if (op > dp) { - _dispatch_wqthread_override_start(dq->dq_thread, op); + _dispatch_thread_frame_push(&dtf, dq); + if (_dq_state_is_in_barrier(owned)) { + // we really own `IN_BARRIER + dq->dq_width * WIDTH_INTERVAL` + // but width can change while draining barrier work items, so we only + // convert to `dq->dq_width * WIDTH_INTERVAL` when we drop `IN_BARRIER` + owned = DISPATCH_QUEUE_IN_BARRIER; } - //dispatch_debug_queue(dq, __func__); - while (dq->dq_items_tail) { dc = _dispatch_queue_head(dq); do { - if (DISPATCH_OBJECT_SUSPENDED(dq)) { + if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(dq))) { goto out; } - if (dq->dq_running > dq->dq_width) { + if (unlikely(orig_tq != dq->do_targetq)) { goto out; } - if (slowpath(orig_tq != dq->do_targetq) && check_tq) { + if (unlikely(serial_drain != (dq->dq_width == 1))) { goto out; } - bool redirect = false; - if (!fastpath(dq->dq_width == 1)) { - if (!DISPATCH_OBJ_IS_VTABLE(dc) && - (long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) { - if (dq->dq_running > 1) { - goto out; + if (serial_drain || _dispatch_object_is_barrier(dc)) { + if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) { + goto out; + } + next_dc = _dispatch_queue_next(dq, dc); + if (_dispatch_object_is_slow_item(dc)) { + owned = 0; + goto out_with_deferred; + } + } else { + if (owned == DISPATCH_QUEUE_IN_BARRIER) { + // we just ran barrier work items, we have to make their + // effect visible to other sync work items on other threads + // that may start coming in after this point, hence the + // release barrier + os_atomic_and2o(dq, dq_state, ~owned, release); + owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + } else if (unlikely(owned == 0)) { + if (_dispatch_object_is_slow_item(dc)) { + // sync "readers" don't observe the limit + _dispatch_queue_reserve_sync_width(dq); + } else if (!_dispatch_queue_try_acquire_async(dq)) { + goto out_with_no_width; } - } else { - redirect = true; + owned = DISPATCH_QUEUE_WIDTH_INTERVAL; + } + + next_dc = _dispatch_queue_next(dq, dc); + if (_dispatch_object_is_slow_item(dc)) { + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + _dispatch_continuation_slow_item_signal(dq, dc); + continue; + } + + if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) { + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + _dispatch_continuation_redirect(dq, dc); + continue; } } - next_dc = _dispatch_queue_next(dq, dc); - if (redirect) { - _dispatch_continuation_redirect(dq, dc); - continue; - } - if ((sema = _dispatch_barrier_sync_f_pop(dq, dc, true))) { - goto out; - } - _dispatch_continuation_pop(dc); + + _dispatch_continuation_pop_inline(dc, dq, flags); _dispatch_perfmon_workitem_inc(); + if (unlikely(dtf.dtf_deferred)) { + goto out_with_deferred_compute_owned; + } } while ((dc = next_dc)); } out: - _dispatch_reset_defaultpriority(old_dp); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); - return sema; + if (owned == DISPATCH_QUEUE_IN_BARRIER) { + // if we're IN_BARRIER we really own the full width too + owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + } + if (dc) { + owned = _dispatch_queue_adjust_owned(dq, owned, dc); + } + *owned_ptr = owned; + _dispatch_thread_frame_pop(&dtf); + return dc ? dq->do_targetq : NULL; + +out_with_no_width: + *owned_ptr = 0; + _dispatch_thread_frame_pop(&dtf); + return NULL; + +out_with_deferred_compute_owned: + if (serial_drain) { + owned = DISPATCH_QUEUE_IN_BARRIER + DISPATCH_QUEUE_WIDTH_INTERVAL; + } else { + if (owned == DISPATCH_QUEUE_IN_BARRIER) { + // if we're IN_BARRIER we really own the full width too + owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + } + if (next_dc) { + owned = _dispatch_queue_adjust_owned(dq, owned, next_dc); + } + } +out_with_deferred: + *owned_ptr = owned; + if (unlikely(!dc_out)) { + DISPATCH_INTERNAL_CRASH(dc, + "Deferred continuation on source, mach channel or mgr"); + } + *dc_out = dc; + _dispatch_thread_frame_pop(&dtf); + return dq->do_targetq; +} + +DISPATCH_NOINLINE +static dispatch_queue_t +_dispatch_queue_concurrent_drain(dispatch_queue_t dq, + dispatch_invoke_flags_t flags, uint64_t *owned, + struct dispatch_object_s **dc_ptr) +{ + return _dispatch_queue_drain(dq, flags, owned, dc_ptr, false); +} + +DISPATCH_NOINLINE +dispatch_queue_t +_dispatch_queue_serial_drain(dispatch_queue_t dq, + dispatch_invoke_flags_t flags, uint64_t *owned, + struct dispatch_object_s **dc_ptr) +{ + flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; + return _dispatch_queue_drain(dq, flags, owned, dc_ptr, true); } #if DISPATCH_COCOA_COMPAT @@ -3718,48 +4547,49 @@ static void _dispatch_main_queue_drain(void) { dispatch_queue_t dq = &_dispatch_main_q; + dispatch_thread_frame_s dtf; + if (!dq->dq_items_tail) { return; } - struct dispatch_continuation_s marker = { - .do_vtable = NULL, - }; - struct dispatch_object_s *dmarker = (void*)▮ - _dispatch_queue_push_notrace(dq, dmarker, 0); + + if (!fastpath(_dispatch_queue_is_thread_bound(dq))) { + DISPATCH_CLIENT_CRASH(0, "_dispatch_main_queue_callback_4CF called" + " after dispatch_main()"); + } + mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq); + if (slowpath(owner != _dispatch_tid_self())) { + DISPATCH_CLIENT_CRASH(owner, "_dispatch_main_queue_callback_4CF called" + " from the wrong thread"); + } + + dispatch_once_f(&_dispatch_main_q_handle_pred, dq, + _dispatch_runloop_queue_handle_init); _dispatch_perfmon_start(); - dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - _dispatch_thread_setspecific(dispatch_queue_key, dq); + // hide the frame chaining when CFRunLoop + // drains the main runloop, as this should not be observable that way + _dispatch_thread_frame_push_and_rebase(&dtf, dq, NULL); + pthread_priority_t old_pri = _dispatch_get_priority(); - pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri); + pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri, NULL); voucher_t voucher = _voucher_copy(); - struct dispatch_object_s *dc, *next_dc; - dc = _dispatch_queue_head(dq); + struct dispatch_object_s *dc, *next_dc, *tail; + dc = os_mpsc_capture_snapshot(dq, dq_items, &tail); do { - next_dc = _dispatch_queue_next(dq, dc); - if (dc == dmarker) { - goto out; - } - _dispatch_continuation_pop(dc); + next_dc = os_mpsc_pop_snapshot_head(dc, tail, do_next); + _dispatch_continuation_pop_inline(dc, dq, DISPATCH_INVOKE_NONE); _dispatch_perfmon_workitem_inc(); } while ((dc = next_dc)); - DISPATCH_CRASH("Main queue corruption"); - -out: - if (next_dc) { - _dispatch_main_queue_wakeup(); - } else { - pthread_priority_t p = _dispatch_queue_reset_override_priority(dq); - if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - _dispatch_thread_override_end(dq->dq_thread); - } - } + // runloop based queues use their port for the queue PUBLISH pattern + // so this raw call to dx_wakeup(0) is valid + dx_wakeup(dq, 0, 0); _dispatch_voucher_debug("main queue restore", voucher); - _dispatch_reset_priority_and_voucher(old_pri, voucher); _dispatch_reset_defaultpriority(old_dp); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_reset_priority_and_voucher(old_pri, voucher); + _dispatch_thread_frame_pop(&dtf); _dispatch_perfmon_end(); _dispatch_force_cache_cleanup(); } @@ -3770,269 +4600,729 @@ _dispatch_runloop_queue_drain_one(dispatch_queue_t dq) if (!dq->dq_items_tail) { return false; } + dispatch_thread_frame_s dtf; _dispatch_perfmon_start(); - dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - _dispatch_thread_setspecific(dispatch_queue_key, dq); + _dispatch_thread_frame_push(&dtf, dq); pthread_priority_t old_pri = _dispatch_get_priority(); - pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri); + pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri, NULL); voucher_t voucher = _voucher_copy(); struct dispatch_object_s *dc, *next_dc; dc = _dispatch_queue_head(dq); next_dc = _dispatch_queue_next(dq, dc); - _dispatch_continuation_pop(dc); + _dispatch_continuation_pop_inline(dc, dq, DISPATCH_INVOKE_NONE); _dispatch_perfmon_workitem_inc(); + if (!next_dc) { + // runloop based queues use their port for the queue PUBLISH pattern + // so this raw call to dx_wakeup(0) is valid + dx_wakeup(dq, 0, 0); + } + _dispatch_voucher_debug("runloop queue restore", voucher); - _dispatch_reset_priority_and_voucher(old_pri, voucher); _dispatch_reset_defaultpriority(old_dp); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_reset_priority_and_voucher(old_pri, voucher); + _dispatch_thread_frame_pop(&dtf); _dispatch_perfmon_end(); _dispatch_force_cache_cleanup(); return next_dc; } #endif -DISPATCH_ALWAYS_INLINE_NDEBUG -static inline _dispatch_thread_semaphore_t -_dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq) +DISPATCH_NOINLINE +void +_dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq) { - // rdar://problem/8290662 "lock transfer" - struct dispatch_object_s *dc; - _dispatch_thread_semaphore_t sema; + dispatch_continuation_t dc_tmp, dc_start, dc_end; + struct dispatch_object_s *dc = NULL; + uint64_t dq_state, owned; + size_t count = 0; + + owned = DISPATCH_QUEUE_IN_BARRIER; + owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; +attempt_running_slow_head: + if (slowpath(dq->dq_items_tail) && !DISPATCH_QUEUE_IS_SUSPENDED(dq)) { + dc = _dispatch_queue_head(dq); + if (!_dispatch_object_is_slow_item(dc)) { + // not a slow item, needs to wake up + } else if (fastpath(dq->dq_width == 1) || + _dispatch_object_is_barrier(dc)) { + // rdar://problem/8290662 "barrier/writer lock transfer" + dc_start = dc_end = (dispatch_continuation_t)dc; + owned = 0; + count = 1; + dc = _dispatch_queue_next(dq, dc); + } else { + // "reader lock transfer" + // we must not signal semaphores immediately because our right + // for dequeuing is granted through holding the full "barrier" width + // which a signaled work item could relinquish out from our feet + dc_start = (dispatch_continuation_t)dc; + do { + // no check on width here because concurrent queues + // do not respect width for blocked readers, the thread + // is already spent anyway + dc_end = (dispatch_continuation_t)dc; + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + count++; + dc = _dispatch_queue_next(dq, dc); + } while (dc && _dispatch_object_is_slow_non_barrier(dc)); + } - // queue is locked, or suspended and not being drained - dc = dq->dq_items_head; - if (slowpath(!dc) || !(sema = _dispatch_barrier_sync_f_pop(dq, dc, false))){ - return 0; + if (count) { + _dispatch_queue_drain_transfer_lock(dq, owned, dc_start); + do { + // signaled job will release the continuation + dc_tmp = dc_start; + dc_start = dc_start->do_next; + _dispatch_continuation_slow_item_signal(dq, dc_tmp); + } while (dc_tmp != dc_end); + return; + } + } + + if (dc || dx_metatype(dq) != _DISPATCH_QUEUE_TYPE) { + // the following wakeup is needed for sources + // or mach channels: when ds_pending_data is set at the same time + // as a trysync_f happens, lock transfer code above doesn't know about + // ds_pending_data or the wakeup logic, but lock transfer is useless + // for sources and mach channels in the first place. + owned = _dispatch_queue_adjust_owned(dq, owned, dc); + dq_state = _dispatch_queue_drain_unlock(dq, owned, NULL); + return _dispatch_queue_try_wakeup(dq, dq_state, 0); + } else if (!fastpath(_dispatch_queue_drain_try_unlock(dq, owned))) { + // someone enqueued a slow item at the head + // looping may be its last chance + goto attempt_running_slow_head; } - // dequeue dc, it is a barrier sync - (void)_dispatch_queue_next(dq, dc); - return sema; } void _dispatch_mgr_queue_drain(void) { + const dispatch_invoke_flags_t flags = DISPATCH_INVOKE_MANAGER_DRAIN; dispatch_queue_t dq = &_dispatch_mgr_q; - if (!dq->dq_items_tail) { - return _dispatch_force_cache_cleanup(); + uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + + if (dq->dq_items_tail) { + _dispatch_perfmon_start(); + if (slowpath(_dispatch_queue_serial_drain(dq, flags, &owned, NULL))) { + DISPATCH_INTERNAL_CRASH(0, "Interrupted drain on manager queue"); + } + _dispatch_voucher_debug("mgr queue clear", NULL); + _voucher_clear(); + _dispatch_reset_defaultpriority_override(); + _dispatch_perfmon_end(); } - _dispatch_perfmon_start(); - if (slowpath(_dispatch_queue_drain(dq))) { - DISPATCH_CRASH("Sync onto manager queue"); + +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (!_dispatch_kevent_workqueue_enabled) +#endif + { + _dispatch_force_cache_cleanup(); } - _dispatch_voucher_debug("mgr queue clear", NULL); - _voucher_clear(); - _dispatch_queue_reset_override_priority(dq); - _dispatch_reset_defaultpriority_override(); - _dispatch_perfmon_end(); - _dispatch_force_cache_cleanup(); } #pragma mark - -#pragma mark _dispatch_queue_wakeup_with_qos +#pragma mark dispatch_queue_invoke -DISPATCH_NOINLINE -static dispatch_queue_t -_dispatch_queue_wakeup_with_qos_slow(dispatch_queue_t dq, pthread_priority_t pp, - bool retained) -{ - if (!dx_probe(dq) && (dq->dq_is_thread_bound || !dq->dq_thread)) { - if (retained) _dispatch_release(dq); - return NULL; +void +_dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq, + dispatch_invoke_flags_t flags, uint64_t to_unlock, + struct dispatch_object_s *dc) +{ + if (_dispatch_object_is_slow_item(dc)) { + dispatch_assert(to_unlock == 0); + _dispatch_queue_drain_transfer_lock(dq, to_unlock, dc); + _dispatch_continuation_slow_item_signal(dq, dc); + return _dispatch_release_tailcall(dq); + } + + bool should_defer_again = false, should_pend_queue = true; + uint64_t old_state, new_state; + + if (_dispatch_get_current_queue()->do_targetq) { + _dispatch_thread_frame_get_current()->dtf_deferred = dc; + should_defer_again = true; + should_pend_queue = false; + } + + if (dq->dq_width > 1) { + should_pend_queue = false; + } else if (should_pend_queue) { + dispatch_assert(to_unlock == + DISPATCH_QUEUE_WIDTH_INTERVAL + DISPATCH_QUEUE_IN_BARRIER); + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release,{ + new_state = old_state; + if (_dq_state_has_waiters(old_state) || + _dq_state_is_enqueued(old_state)) { + os_atomic_rmw_loop_give_up(break); + } + new_state += DISPATCH_QUEUE_DRAIN_PENDED; + new_state -= DISPATCH_QUEUE_IN_BARRIER; + new_state -= DISPATCH_QUEUE_WIDTH_INTERVAL; + }); + should_pend_queue = (new_state & DISPATCH_QUEUE_DRAIN_PENDED); + } + + if (!should_pend_queue) { + if (to_unlock & DISPATCH_QUEUE_IN_BARRIER) { + _dispatch_try_lock_transfer_or_wakeup(dq); + _dispatch_release(dq); + } else if (to_unlock) { + uint64_t dq_state = _dispatch_queue_drain_unlock(dq, to_unlock, NULL); + _dispatch_queue_try_wakeup(dq, dq_state, DISPATCH_WAKEUP_CONSUME); + } else { + _dispatch_release(dq); + } + dq = NULL; } - if (!dispatch_atomic_cmpxchg2o(dq, do_suspend_cnt, 0, - DISPATCH_OBJECT_SUSPEND_LOCK, acquire)) { - bool was_overridden, override; - override = _dispatch_queue_override_priority(dq, &pp, &was_overridden); - if (override && dq->dq_running > 1) { - override = false; - } + if (!should_defer_again) { + dx_invoke(dc, flags & _DISPATCH_INVOKE_PROPAGATE_MASK); + } -#if DISPATCH_COCOA_COMPAT - if (dq == &_dispatch_main_q && dq->dq_is_thread_bound) { - if (override) { - _dispatch_thread_override_start(dq->dq_thread, pp); - if (was_overridden) { - _dispatch_thread_override_end(dq->dq_thread); - } - } - return _dispatch_main_queue_wakeup(); - } -#endif - if (override) { -#if HAVE_PTHREAD_WORKQUEUE_QOS - mach_port_t th; - // to traverse the tq chain safely we must - // lock it to ensure it cannot change, unless the queue is running - // and we can just override the thread itself - if (dq->dq_thread) { - _dispatch_wqthread_override_start(dq->dq_thread, pp); - } else if (!dispatch_atomic_cmpxchgv2o(dq, dq_tqthread, - MACH_PORT_NULL, _dispatch_thread_port(), &th, acquire)) { - // already locked, override the owner, trysync will do a queue - // wakeup when it returns, see _dispatch_set_target_queue2 - _dispatch_wqthread_override_start(th, pp); - } else { - dispatch_queue_t tq = dq->do_targetq; - if (_dispatch_queue_prepare_override(dq, tq, pp)) { - _dispatch_queue_push_override(dq, tq, pp, false); - } else { - _dispatch_queue_wakeup_with_qos(tq, pp); - } - dispatch_atomic_store2o(dq, dq_tqthread, MACH_PORT_NULL, - release); + if (dq) { + uint32_t self = _dispatch_tid_self(); + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release,{ + new_state = old_state; + if (!_dq_state_drain_pended(old_state) || + _dq_state_drain_owner(old_state) != self) { + os_atomic_rmw_loop_give_up({ + // We may have been overridden, so inform the root queue + _dispatch_set_defaultpriority_override(); + return _dispatch_release_tailcall(dq); + }); } -#endif + new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state); + }); + if (_dq_state_has_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); } - if (retained) _dispatch_release(dq); - return NULL; + return dx_invoke(dq, flags | DISPATCH_INVOKE_STEALING); } +} +void +_dispatch_queue_finalize_activation(dispatch_queue_t dq) +{ dispatch_queue_t tq = dq->do_targetq; - if (!retained) _dispatch_retain(dq); - _dispatch_queue_push_queue(tq, dq, pp); - return tq; // libdispatch does not need this, but the Instrument DTrace - // probe does + _dispatch_queue_priority_inherit_from_target(dq, tq); + _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); + if (dq->dq_override_voucher == DISPATCH_NO_VOUCHER) { + voucher_t v = tq->dq_override_voucher; + if (v != DISPATCH_NO_VOUCHER) { + if (v) _voucher_retain(v); + dq->dq_override_voucher = v; + } + } } DISPATCH_ALWAYS_INLINE static inline dispatch_queue_t -_dispatch_queue_wakeup_with_qos2(dispatch_queue_t dq, pthread_priority_t pp, - bool retained) +dispatch_queue_invoke2(dispatch_queue_t dq, dispatch_invoke_flags_t flags, + uint64_t *owned, struct dispatch_object_s **dc_ptr) { - if (_dispatch_object_suspended(dq)) { - _dispatch_queue_override_priority(dq, &pp, NULL); - if (retained) _dispatch_release(dq); - return NULL; + dispatch_queue_t otq = dq->do_targetq; + dispatch_queue_t cq = _dispatch_queue_get_current(); + + if (slowpath(cq != otq)) { + return otq; } - return _dispatch_queue_wakeup_with_qos_slow(dq, pp, retained); + if (dq->dq_width == 1) { + return _dispatch_queue_serial_drain(dq, flags, owned, dc_ptr); + } + return _dispatch_queue_concurrent_drain(dq, flags, owned, dc_ptr); } +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol DISPATCH_NOINLINE void -_dispatch_queue_wakeup_with_qos_and_release(dispatch_queue_t dq, - pthread_priority_t pp) +_dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags) { - (void)_dispatch_queue_wakeup_with_qos2(dq, pp, true); + _dispatch_queue_class_invoke(dq, flags, dispatch_queue_invoke2); } -DISPATCH_NOINLINE +#pragma mark - +#pragma mark dispatch_queue_class_wakeup + +#if HAVE_PTHREAD_WORKQUEUE_QOS void -_dispatch_queue_wakeup_with_qos(dispatch_queue_t dq, pthread_priority_t pp) +_dispatch_queue_override_invoke(dispatch_continuation_t dc, + dispatch_invoke_flags_t flags) { - (void)_dispatch_queue_wakeup_with_qos2(dq, pp, false); + dispatch_queue_t old_rq = _dispatch_queue_get_current(); + dispatch_queue_t assumed_rq = dc->dc_other; + voucher_t ov = DISPATCH_NO_VOUCHER; + dispatch_object_t dou; + + dou._do = dc->dc_data; + _dispatch_queue_set_current(assumed_rq); + flags |= DISPATCH_INVOKE_OVERRIDING; + if (dc_type(dc) == DISPATCH_CONTINUATION_TYPE(OVERRIDE_STEALING)) { + flags |= DISPATCH_INVOKE_STEALING; + } else { + // balance the fake continuation push in + // _dispatch_root_queue_push_override + _dispatch_trace_continuation_pop(assumed_rq, dou._do); + } + _dispatch_continuation_pop_forwarded(dc, ov, DISPATCH_OBJ_CONSUME_BIT, { + if (_dispatch_object_has_vtable(dou._do)) { + dx_invoke(dou._do, flags); + } else { + _dispatch_continuation_invoke_inline(dou, ov, flags); + } + }); + _dispatch_queue_set_current(old_rq); } -DISPATCH_NOINLINE -void -_dispatch_queue_wakeup_and_release(dispatch_queue_t dq) +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_need_global_root_queue_push_override(dispatch_queue_t rq, + pthread_priority_t pp) { - (void)_dispatch_queue_wakeup_with_qos2(dq, - _dispatch_queue_get_override_priority(dq), true); + pthread_priority_t rqp = rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + bool defaultqueue = rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + + if (unlikely(!rqp)) return false; + + pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + return defaultqueue ? pp && pp != rqp : pp > rqp; } -DISPATCH_NOINLINE -dispatch_queue_t -_dispatch_queue_wakeup(dispatch_queue_t dq) +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_need_global_root_queue_push_override_stealer(dispatch_queue_t rq, + pthread_priority_t pp) { - return _dispatch_queue_wakeup_with_qos2(dq, - _dispatch_queue_get_override_priority(dq), false); + pthread_priority_t rqp = rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + bool defaultqueue = rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + + if (unlikely(!rqp)) return false; + + pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + return defaultqueue || pp > rqp; } -#if HAVE_PTHREAD_WORKQUEUE_QOS DISPATCH_NOINLINE static void -_dispatch_queue_override_invoke_stealing(void *ctxt) +_dispatch_root_queue_push_override(dispatch_queue_t orig_rq, + dispatch_object_t dou, pthread_priority_t pp) { - dispatch_continuation_t dc = (dispatch_continuation_t)ctxt; - dispatch_queue_t dq = dc->dc_data; + bool overcommit = orig_rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + dispatch_queue_t rq = _dispatch_get_root_queue_for_priority(pp, overcommit); + dispatch_continuation_t dc = dou._dc; + + if (_dispatch_object_is_redirection(dc)) { + // no double-wrap is needed, _dispatch_async_redirect_invoke will do + // the right thing + dc->dc_func = (void *)orig_rq; + } else { + dc = _dispatch_continuation_alloc(); + dc->do_vtable = DC_VTABLE(OVERRIDE_OWNING); + // fake that we queued `dou` on `orig_rq` for introspection purposes + _dispatch_trace_continuation_push(orig_rq, dou); + dc->dc_ctxt = dc; + dc->dc_other = orig_rq; + dc->dc_data = dou._do; + dc->dc_priority = DISPATCH_NO_PRIORITY; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + } - dx_invoke(dq, dc, DISPATCH_INVOKE_OVERRIDING | DISPATCH_INVOKE_STEALING); + DISPATCH_COMPILER_CAN_ASSUME(dx_type(rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); + _dispatch_queue_push_inline(rq, dc, 0, 0); } DISPATCH_NOINLINE static void -_dispatch_queue_override_invoke_owning(void *ctxt) +_dispatch_root_queue_push_override_stealer(dispatch_queue_t orig_rq, + dispatch_queue_t dq, pthread_priority_t pp) { - dispatch_continuation_t dc = (dispatch_continuation_t)ctxt; - dispatch_queue_t dq = dc->dc_data; + bool overcommit = orig_rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + dispatch_queue_t rq = _dispatch_get_root_queue_for_priority(pp, overcommit); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + + dc->do_vtable = DC_VTABLE(OVERRIDE_STEALING); + _dispatch_retain(dq); + dc->dc_func = NULL; + dc->dc_ctxt = dc; + dc->dc_other = orig_rq; + dc->dc_data = dq; + dc->dc_priority = DISPATCH_NO_PRIORITY; + dc->dc_voucher = DISPATCH_NO_VOUCHER; - // balance the fake continuation push in _dispatch_queue_push_override - _dispatch_trace_continuation_pop(dc->dc_other, dc->dc_data); - dx_invoke(dq, dc, DISPATCH_INVOKE_OVERRIDING); + DISPATCH_COMPILER_CAN_ASSUME(dx_type(rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); + _dispatch_queue_push_inline(rq, dc, 0, 0); } -#endif -static inline bool -_dispatch_queue_prepare_override(dispatch_queue_t dq, dispatch_queue_t tq, - pthread_priority_t p) +DISPATCH_NOINLINE +static void +_dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, + pthread_priority_t pp, dispatch_wakeup_flags_t flags, uint64_t dq_state) +{ + mach_port_t owner = _dq_state_drain_owner(dq_state); + pthread_priority_t pp2; + dispatch_queue_t tq; + bool locked; + + if (owner) { + int rc = _dispatch_wqthread_override_start_check_owner(owner, pp, + &dq->dq_state_lock); + // EPERM means the target of the override is not a work queue thread + // and could be a thread bound queue such as the main queue. + // When that happens we must get to that queue and wake it up if we + // want the override to be appplied and take effect. + if (rc != EPERM) { + goto out; + } + } + + if (_dq_state_is_suspended(dq_state)) { + goto out; + } + + tq = dq->do_targetq; + + if (_dispatch_queue_has_immutable_target(dq)) { + locked = false; + } else if (_dispatch_is_in_root_queues_array(tq)) { + // avoid locking when we recognize the target queue as a global root + // queue it is gross, but is a very common case. The locking isn't + // needed because these target queues cannot go away. + locked = false; + } else if (_dispatch_queue_sidelock_trylock(dq, pp)) { + // to traverse the tq chain safely we must + // lock it to ensure it cannot change + locked = true; + tq = dq->do_targetq; + _dispatch_ktrace1(DISPATCH_PERF_mutable_target, dq); + } else { + // + // Leading to being there, the current thread has: + // 1. enqueued an object on `dq` + // 2. raised the dq_override value of `dq` + // 3. set the HAS_OVERRIDE bit and not seen an owner + // 4. tried and failed to acquire the side lock + // + // + // The side lock owner can only be one of three things: + // + // - The suspend/resume side count code. Besides being unlikely, + // it means that at this moment the queue is actually suspended, + // which transfers the responsibility of applying the override to + // the eventual dispatch_resume(). + // + // - A dispatch_set_target_queue() call. The fact that we saw no `owner` + // means that the trysync it does wasn't being drained when (3) + // happened which can only be explained by one of these interleavings: + // + // o `dq` became idle between when the object queued in (1) ran and + // the set_target_queue call and we were unlucky enough that our + // step (3) happened while this queue was idle. There is no reason + // to override anything anymore, the queue drained to completion + // while we were preempted, our job is done. + // + // o `dq` is queued but not draining during (1-3), then when we try + // to lock at (4) the queue is now draining a set_target_queue. + // Since we set HAS_OVERRIDE with a release barrier, the effect of + // (2) was visible to the drainer when he acquired the drain lock, + // and that guy has applied our override. Our job is done. + // + // - Another instance of _dispatch_queue_class_wakeup_with_override(), + // which is fine because trylock leaves a hint that we failed our + // trylock, causing the tryunlock below to fail and reassess whether + // a better override needs to be applied. + // + _dispatch_ktrace1(DISPATCH_PERF_mutable_target, dq); + goto out; + } + +apply_again: + if (dx_type(tq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { + if (_dispatch_need_global_root_queue_push_override_stealer(tq, pp)) { + _dispatch_root_queue_push_override_stealer(tq, dq, pp); + } + } else if (_dispatch_queue_need_override(tq, pp)) { + dx_wakeup(tq, pp, DISPATCH_WAKEUP_OVERRIDING); + } + while (unlikely(locked && !_dispatch_queue_sidelock_tryunlock(dq))) { + // rdar://problem/24081326 + // + // Another instance of _dispatch_queue_class_wakeup_with_override() + // tried to acquire the side lock while we were running, and could have + // had a better override than ours to apply. + // + pp2 = dq->dq_override; + if (pp2 > pp) { + pp = pp2; + // The other instance had a better priority than ours, override + // our thread, and apply the override that wasn't applied to `dq` + // because of us. + goto apply_again; + } + } + +out: + if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dq); + } +} +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + +DISPATCH_NOINLINE +void +_dispatch_queue_class_override_drainer(dispatch_queue_t dq, + pthread_priority_t pp, dispatch_wakeup_flags_t flags) { #if HAVE_PTHREAD_WORKQUEUE_QOS - if (dx_type(tq) != DISPATCH_QUEUE_ROOT_TYPE || !tq->dq_priority) { - return false; + uint64_t dq_state, value; + + // + // Someone is trying to override the last work item of the queue. + // Do not remember this override on the queue because we know the precise + // duration the override is required for: until the current drain unlocks. + // + // That is why this function only tries to set HAS_OVERRIDE if we can + // still observe a drainer, and doesn't need to set the DIRTY bit + // because oq_override wasn't touched and there is no race to resolve + // + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + if (!_dq_state_drain_locked(dq_state)) { + os_atomic_rmw_loop_give_up(break); + } + value = dq_state | DISPATCH_QUEUE_HAS_OVERRIDE; + }); + if (_dq_state_drain_locked(dq_state)) { + return _dispatch_queue_class_wakeup_with_override(dq, pp, + flags, dq_state); } - if (p <= (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - return false; +#else + (void)pp; +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dq); } - if (p <= (tq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - return false; +} + +#if DISPATCH_USE_KEVENT_WORKQUEUE +DISPATCH_NOINLINE +static void +_dispatch_trystash_to_deferred_items(dispatch_queue_t dq, dispatch_object_t dou, + pthread_priority_t pp, dispatch_deferred_items_t ddi) +{ + dispatch_priority_t old_pp = ddi->ddi_stashed_pp; + dispatch_queue_t old_dq = ddi->ddi_stashed_dq; + struct dispatch_object_s *old_dou = ddi->ddi_stashed_dou; + dispatch_priority_t rq_overcommit; + + rq_overcommit = dq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + if (likely(!old_pp || rq_overcommit)) { + ddi->ddi_stashed_dq = dq; + ddi->ddi_stashed_dou = dou._do; + ddi->ddi_stashed_pp = (dispatch_priority_t)pp | rq_overcommit | + _PTHREAD_PRIORITY_PRIORITY_MASK; + if (likely(!old_pp)) { + return; + } + // push the previously stashed item + pp = old_pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + dq = old_dq; + dou._do = old_dou; } - return true; -#else - (void)dq; (void)tq; (void)p; - return false; + if (_dispatch_need_global_root_queue_push_override(dq, pp)) { + return _dispatch_root_queue_push_override(dq, dou, pp); + } + // bit of cheating: we should really pass `pp` but we know that we are + // pushing onto a global queue at this point, and we just checked that + // `pp` doesn't matter. + DISPATCH_COMPILER_CAN_ASSUME(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); + _dispatch_queue_push_inline(dq, dou, 0, 0); +} #endif + +DISPATCH_NOINLINE +static void +_dispatch_queue_push_slow(dispatch_queue_t dq, dispatch_object_t dou, + pthread_priority_t pp) +{ + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init_once); + _dispatch_queue_push(dq, dou, pp); } -static inline void -_dispatch_queue_push_override(dispatch_queue_t dq, dispatch_queue_t tq, - pthread_priority_t p, bool owning) +DISPATCH_NOINLINE +void +_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou, + pthread_priority_t pp) { + _dispatch_assert_is_valid_qos_override(pp); + if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { +#if DISPATCH_USE_KEVENT_WORKQUEUE + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (unlikely(ddi && !(ddi->ddi_stashed_pp & + (dispatch_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK))) { + dispatch_assert(_dispatch_root_queues_pred == DLOCK_ONCE_DONE); + return _dispatch_trystash_to_deferred_items(dq, dou, pp, ddi); + } +#endif #if HAVE_PTHREAD_WORKQUEUE_QOS - unsigned int qosbit, idx, overcommit; - overcommit = (tq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) ? 1 : 0; - qosbit = (p & _PTHREAD_PRIORITY_QOS_CLASS_MASK) >> - _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; - idx = (unsigned int)__builtin_ffs((int)qosbit); - if (!idx || idx > DISPATCH_QUEUE_QOS_COUNT) { - DISPATCH_CRASH("Corrupted override priority"); + // can't use dispatch_once_f() as it would create a frame + if (unlikely(_dispatch_root_queues_pred != DLOCK_ONCE_DONE)) { + return _dispatch_queue_push_slow(dq, dou, pp); + } + if (_dispatch_need_global_root_queue_push_override(dq, pp)) { + return _dispatch_root_queue_push_override(dq, dou, pp); + } +#endif } - dispatch_queue_t rq = &_dispatch_root_queues[((idx-1) << 1) | overcommit]; + _dispatch_queue_push_inline(dq, dou, pp, 0); +} - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); - if (owning) { - // fake that we queued `dq` on `tq` for introspection purposes - _dispatch_trace_continuation_push(tq, dq); - dc->dc_func = _dispatch_queue_override_invoke_owning; - } else { - dc->dc_func = _dispatch_queue_override_invoke_stealing; +DISPATCH_NOINLINE +static void +_dispatch_queue_class_wakeup_enqueue(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) +{ + dispatch_queue_t tq; + + if (flags & (DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_WAS_OVERRIDDEN)) { + // _dispatch_queue_drain_try_unlock may have reset the override while + // we were becoming the enqueuer + _dispatch_queue_reinstate_override_priority(dq, (dispatch_priority_t)pp); + } + if (!(flags & DISPATCH_WAKEUP_CONSUME)) { _dispatch_retain(dq); } - dc->dc_ctxt = dc; - dc->dc_priority = 0; - dc->dc_other = tq; - dc->dc_voucher = NULL; - dc->dc_data = dq; - - _dispatch_queue_push(rq, dc, 0); -#else - (void)dq; (void)tq; (void)p; -#endif + if (target == DISPATCH_QUEUE_WAKEUP_TARGET) { + // try_become_enqueuer has no acquire barrier, as the last block + // of a queue asyncing to that queue is not an uncommon pattern + // and in that case the acquire is completely useless + // + // so instead use a thread fence here when we will read the targetq + // pointer because that is the only thing that really requires + // that barrier. + os_atomic_thread_fence(acquire); + tq = dq->do_targetq; + } else { + dispatch_assert(target == DISPATCH_QUEUE_WAKEUP_MGR); + tq = &_dispatch_mgr_q; + } + return _dispatch_queue_push(tq, dq, pp); } +DISPATCH_NOINLINE void -_dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_t dq, - pthread_priority_t pp) +_dispatch_queue_class_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) { - _dispatch_queue_override_priority(dq, &pp, NULL); - if (_dispatch_queue_prepare_override(dq, tq, pp)) { - _dispatch_queue_push_override(dq, tq, pp, true); + uint64_t old_state, new_state, bits = 0; + +#if HAVE_PTHREAD_WORKQUEUE_QOS + _dispatch_queue_override_priority(dq, /* inout */ &pp, /* inout */ &flags); +#endif + + if (flags & DISPATCH_WAKEUP_FLUSH) { + bits = DISPATCH_QUEUE_DIRTY; + } + if (flags & DISPATCH_WAKEUP_OVERRIDING) { + // + // Setting the dirty bit here is about forcing callers of + // _dispatch_queue_drain_try_unlock() to loop again when an override + // has just been set to close the following race: + // + // Drainer (in drain_try_unlokc(): + // override_reset(); + // preempted.... + // + // Enqueuer: + // atomic_or(oq_override, override, relaxed); + // atomic_or(dq_state, HAS_OVERRIDE, release); + // + // Drainer: + // ... resumes + // successful drain_unlock() and leaks `oq_override` + // + bits = DISPATCH_QUEUE_DIRTY | DISPATCH_QUEUE_HAS_OVERRIDE; + } + + if (flags & DISPATCH_WAKEUP_SLOW_WAITER) { + uint64_t pending_barrier_width = + (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; + uint64_t xor_owner_and_set_full_width_and_in_barrier = + _dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | + DISPATCH_QUEUE_IN_BARRIER; + +#ifdef DLOCK_NOWAITERS_BIT + bits |= DLOCK_NOWAITERS_BIT; +#else + bits |= DLOCK_WAITERS_BIT; +#endif + flags ^= DISPATCH_WAKEUP_SLOW_WAITER; + dispatch_assert(!(flags & DISPATCH_WAKEUP_CONSUME)); + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state | bits; + if (_dq_state_drain_pended(old_state)) { + // same as DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT + // but we want to be more efficient wrt the WAITERS_BIT + new_state &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK; + new_state &= ~DISPATCH_QUEUE_DRAIN_PENDED; + } + if (unlikely(_dq_state_drain_locked(new_state))) { +#ifdef DLOCK_NOWAITERS_BIT + new_state &= ~(uint64_t)DLOCK_NOWAITERS_BIT; +#endif + } else if (unlikely(!_dq_state_is_runnable(new_state) || + !(flags & DISPATCH_WAKEUP_FLUSH))) { + // either not runnable, or was not for the first item (26700358) + // so we should not try to lock and handle overrides instead + } else if (_dq_state_has_pending_barrier(old_state) || + new_state + pending_barrier_width < + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + // see _dispatch_queue_drain_try_lock + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + new_state ^= xor_owner_and_set_full_width_and_in_barrier; + } else { + new_state |= DISPATCH_QUEUE_ENQUEUED; + } + }); + if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { + return _dispatch_try_lock_transfer_or_wakeup(dq); + } + } else if (bits) { + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release,{ + new_state = old_state | bits; + if (likely(_dq_state_should_wakeup(old_state))) { + new_state |= DISPATCH_QUEUE_ENQUEUED; + } + }); } else { - _dispatch_queue_push(tq, dq, pp); + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed,{ + new_state = old_state; + if (likely(_dq_state_should_wakeup(old_state))) { + new_state |= DISPATCH_QUEUE_ENQUEUED; + } else { + os_atomic_rmw_loop_give_up(break); + } + }); + } + + if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { + return _dispatch_queue_class_wakeup_enqueue(dq, pp, flags, target); + } + +#if HAVE_PTHREAD_WORKQUEUE_QOS + if ((flags & DISPATCH_WAKEUP_OVERRIDING) + && target == DISPATCH_QUEUE_WAKEUP_TARGET) { + return _dispatch_queue_class_wakeup_with_override(dq, pp, + flags, new_state); + } +#endif + + if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dq); } } @@ -4041,7 +5331,7 @@ _dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_t dq, DISPATCH_NOINLINE static bool -_dispatch_queue_concurrent_drain_one_slow(dispatch_queue_t dq) +_dispatch_root_queue_drain_one_slow(dispatch_queue_t dq) { dispatch_root_queue_context_t qc = dq->do_ctxt; struct dispatch_object_s *const mediator = (void *)~0ul; @@ -4058,7 +5348,7 @@ _dispatch_queue_concurrent_drain_one_slow(dispatch_queue_t dq) // Since we have serious contention, we need to back off. if (!pending) { // Mark this queue as pending to avoid requests for further threads - (void)dispatch_atomic_inc2o(qc, dgq_pending, relaxed); + (void)os_atomic_inc2o(qc, dgq_pending, relaxed); pending = true; } _dispatch_contention_usleep(sleep_time); @@ -4074,17 +5364,17 @@ _dispatch_queue_concurrent_drain_one_slow(dispatch_queue_t dq) available = false; out: if (pending) { - (void)dispatch_atomic_dec2o(qc, dgq_pending, relaxed); + (void)os_atomic_dec2o(qc, dgq_pending, relaxed); } if (!available) { - _dispatch_queue_wakeup_global(dq); + _dispatch_global_queue_poke(dq); } return available; } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_concurrent_drain_one2(dispatch_queue_t dq) +_dispatch_root_queue_drain_one2(dispatch_queue_t dq) { // Wait for queue head and tail to be both non-empty or both empty bool available; // @@ -4095,24 +5385,24 @@ _dispatch_queue_concurrent_drain_one2(dispatch_queue_t dq) DISPATCH_ALWAYS_INLINE_NDEBUG static inline struct dispatch_object_s * -_dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) +_dispatch_root_queue_drain_one(dispatch_queue_t dq) { struct dispatch_object_s *head, *next, *const mediator = (void *)~0ul; start: // The mediator value acts both as a "lock" and a signal - head = dispatch_atomic_xchg2o(dq, dq_items_head, mediator, relaxed); + head = os_atomic_xchg2o(dq, dq_items_head, mediator, relaxed); if (slowpath(head == NULL)) { // The first xchg on the tail will tell the enqueueing thread that it // is safe to blindly write out to the head pointer. A cmpxchg honors // the algorithm. - if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_items_head, mediator, + if (slowpath(!os_atomic_cmpxchg2o(dq, dq_items_head, mediator, NULL, relaxed))) { goto start; } if (slowpath(dq->dq_items_tail) && // - _dispatch_queue_concurrent_drain_one2(dq)) { + _dispatch_root_queue_drain_one2(dq)) { goto start; } _dispatch_root_queue_debug("no work on global queue: %p", dq); @@ -4121,7 +5411,7 @@ start: if (slowpath(head == mediator)) { // This thread lost the race for ownership of the queue. - if (fastpath(_dispatch_queue_concurrent_drain_one_slow(dq))) { + if (fastpath(_dispatch_root_queue_drain_one_slow(dq))) { goto start; } return NULL; @@ -4132,9 +5422,10 @@ start: next = fastpath(head->do_next); if (slowpath(!next)) { - dispatch_atomic_store2o(dq, dq_items_head, NULL, relaxed); - - if (dispatch_atomic_cmpxchg2o(dq, dq_items_tail, head, NULL, relaxed)) { + os_atomic_store2o(dq, dq_items_head, NULL, relaxed); + // 22708742: set tail to NULL with release, so that NULL write to head + // above doesn't clobber head from concurrent enqueuer + if (os_atomic_cmpxchg2o(dq, dq_items_tail, head, NULL, release)) { // both head and tail are NULL now goto out; } @@ -4142,54 +5433,78 @@ start: _dispatch_wait_until(next = head->do_next); } - dispatch_atomic_store2o(dq, dq_items_head, next, relaxed); - _dispatch_queue_wakeup_global(dq); + os_atomic_store2o(dq, dq_items_head, next, relaxed); + _dispatch_global_queue_poke(dq); out: return head; } +void +_dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq, + struct dispatch_object_s *dou, pthread_priority_t pp) +{ + struct _dispatch_identity_s di; + + // fake that we queued `dou` on `dq` for introspection purposes + _dispatch_trace_continuation_push(dq, dou); + + pp = _dispatch_priority_inherit_from_root_queue(pp, dq); + _dispatch_queue_set_current(dq); + _dispatch_root_queue_identity_assume(&di, pp); +#if DISPATCH_COCOA_COMPAT + void *pool = _dispatch_last_resort_autorelease_pool_push(); +#endif // DISPATCH_COCOA_COMPAT + + _dispatch_perfmon_start(); + _dispatch_continuation_pop_inline(dou, dq, + DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN); + _dispatch_perfmon_workitem_inc(); + _dispatch_perfmon_end(); + +#if DISPATCH_COCOA_COMPAT + _dispatch_last_resort_autorelease_pool_pop(pool); +#endif // DISPATCH_COCOA_COMPAT + _dispatch_reset_defaultpriority(di.old_pp); + _dispatch_queue_set_current(NULL); + + _dispatch_voucher_debug("root queue clear", NULL); + _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); +} + +DISPATCH_NOT_TAIL_CALLED // prevent tailcall (for Instrument DTrace probe) static void -_dispatch_root_queue_drain(dispatch_queue_t dq) +_dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pri) { #if DISPATCH_DEBUG - if (_dispatch_thread_getspecific(dispatch_queue_key)) { - DISPATCH_CRASH("Premature thread recycling"); + dispatch_queue_t cq; + if (slowpath(cq = _dispatch_queue_get_current())) { + DISPATCH_INTERNAL_CRASH(cq, "Premature thread recycling"); } #endif - _dispatch_thread_setspecific(dispatch_queue_key, dq); - pthread_priority_t old_pri = _dispatch_get_priority(); - pthread_priority_t pri = dq->dq_priority ? dq->dq_priority : old_pri; - pthread_priority_t old_dp = _dispatch_set_defaultpriority(pri); - + _dispatch_queue_set_current(dq); + if (dq->dq_priority) pri = dq->dq_priority; + pthread_priority_t old_dp = _dispatch_set_defaultpriority(pri, NULL); #if DISPATCH_COCOA_COMPAT - // ensure that high-level memory management techniques do not leak/crash - if (dispatch_begin_thread_4GC) { - dispatch_begin_thread_4GC(); - } - void *pool = _dispatch_autorelease_pool_push(); + void *pool = _dispatch_last_resort_autorelease_pool_push(); #endif // DISPATCH_COCOA_COMPAT _dispatch_perfmon_start(); struct dispatch_object_s *item; bool reset = false; - while ((item = fastpath(_dispatch_queue_concurrent_drain_one(dq)))) { + while ((item = fastpath(_dispatch_root_queue_drain_one(dq)))) { if (reset) _dispatch_wqthread_override_reset(); - _dispatch_continuation_pop(item); + _dispatch_continuation_pop_inline(item, dq, + DISPATCH_INVOKE_WORKER_DRAIN|DISPATCH_INVOKE_REDIRECTING_DRAIN); + _dispatch_perfmon_workitem_inc(); reset = _dispatch_reset_defaultpriority_override(); } - _dispatch_voucher_debug("root queue clear", NULL); - _dispatch_reset_priority_and_voucher(old_pri, NULL); - _dispatch_reset_defaultpriority(old_dp); _dispatch_perfmon_end(); #if DISPATCH_COCOA_COMPAT - _dispatch_autorelease_pool_pop(pool); - if (dispatch_end_thread_4GC) { - dispatch_end_thread_4GC(); - } + _dispatch_last_resort_autorelease_pool_pop(pool); #endif // DISPATCH_COCOA_COMPAT - - _dispatch_thread_setspecific(dispatch_queue_key, NULL); + _dispatch_reset_defaultpriority(old_dp); + _dispatch_queue_set_current(NULL); } #pragma mark - @@ -4203,32 +5518,22 @@ _dispatch_worker_thread4(void *context) dispatch_root_queue_context_t qc = dq->do_ctxt; _dispatch_introspection_thread_add(); - int pending = (int)dispatch_atomic_dec2o(qc, dgq_pending, relaxed); + int pending = (int)os_atomic_dec2o(qc, dgq_pending, relaxed); dispatch_assert(pending >= 0); - _dispatch_root_queue_drain(dq); - __asm__(""); // prevent tailcall (for Instrument DTrace probe) + _dispatch_root_queue_drain(dq, _dispatch_get_priority()); + _dispatch_voucher_debug("root queue clear", NULL); + _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); } #if HAVE_PTHREAD_WORKQUEUE_QOS static void -_dispatch_worker_thread3(pthread_priority_t priority) -{ - // Reset priority TSD to workaround - _dispatch_thread_setspecific(dispatch_priority_key, - (void*)(uintptr_t)(priority & ~_PTHREAD_PRIORITY_FLAGS_MASK)); - unsigned int overcommit, qosbit, idx; - overcommit = (priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) ? 1 : 0; - qosbit = (priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK) >> - _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; - if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS]. - dq_priority) { - // If kernel doesn't support maintenance, bottom bit is background. - // Shift to our idea of where background bit is. - qosbit <<= 1; - } - idx = (unsigned int)__builtin_ffs((int)qosbit); - dispatch_assert(idx > 0 && idx < DISPATCH_QUEUE_QOS_COUNT+1); - dispatch_queue_t dq = &_dispatch_root_queues[((idx-1) << 1) | overcommit]; +_dispatch_worker_thread3(pthread_priority_t pp) +{ + bool overcommit = pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + dispatch_queue_t dq; + pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); + dq = _dispatch_get_root_queue_for_priority(pp, overcommit); return _dispatch_worker_thread4(dq); } #endif // HAVE_PTHREAD_WORKQUEUE_QOS @@ -4277,13 +5582,15 @@ _dispatch_worker_thread(void *context) _dispatch_introspection_thread_add(); const int64_t timeout = 5ull * NSEC_PER_SEC; + pthread_priority_t old_pri = _dispatch_get_priority(); do { - _dispatch_root_queue_drain(dq); + _dispatch_root_queue_drain(dq, old_pri); + _dispatch_reset_priority_and_voucher(old_pri, NULL); } while (dispatch_semaphore_wait(&pqc->dpq_thread_mediator, dispatch_time(0, timeout)) == 0); - (void)dispatch_atomic_inc2o(qc, dgq_thread_pool_size, release); - _dispatch_queue_wakeup_global(dq); + (void)os_atomic_inc2o(qc, dgq_thread_pool_size, release); + _dispatch_global_queue_poke(dq); _dispatch_release(dq); return NULL; @@ -4333,17 +5640,14 @@ _dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags) size_t dqs; if (slowpath(flags)) { - return NULL; + return DISPATCH_BAD_INPUT; } dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD; dq = _dispatch_alloc(DISPATCH_VTABLE(queue_runloop), dqs); - _dispatch_queue_init(dq); + _dispatch_queue_init(dq, DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC, 1, false); dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,true); dq->dq_label = label ? label : "runloop-queue"; // no-copy contract - dq->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK; - dq->dq_running = 1; - dq->dq_is_thread_bound = 1; - _dispatch_runloop_queue_port_init(dq); + _dispatch_runloop_queue_handle_init(dq); _dispatch_queue_set_bound_thread(dq); _dispatch_object_debug(dq, "%s", __func__); return _dispatch_introspection_queue_create(dq); @@ -4353,13 +5657,11 @@ void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq) { _dispatch_object_debug(dq, "%s", __func__); - (void)dispatch_atomic_dec2o(dq, dq_running, relaxed); - unsigned int suspend_cnt = dispatch_atomic_sub2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_LOCK, release); + + pthread_priority_t pp = _dispatch_queue_reset_override_priority(dq, true); _dispatch_queue_clear_bound_thread(dq); - if (suspend_cnt == 0) { - _dispatch_queue_wakeup(dq); - } + dx_wakeup(dq, pp, DISPATCH_WAKEUP_FLUSH); + if (pp) _dispatch_thread_override_end(DISPATCH_QUEUE_DRAIN_OWNER(dq), dq); } void @@ -4367,7 +5669,7 @@ _dispatch_runloop_queue_dispose(dispatch_queue_t dq) { _dispatch_object_debug(dq, "%s", __func__); _dispatch_introspection_queue_dispose(dq); - _dispatch_runloop_queue_port_dispose(dq); + _dispatch_runloop_queue_handle_dispose(dq); _dispatch_queue_destroy(dq); } @@ -4375,7 +5677,7 @@ bool _dispatch_runloop_root_queue_perform_4CF(dispatch_queue_t dq) { if (slowpath(dq->do_vtable != DISPATCH_VTABLE(queue_runloop))) { - DISPATCH_CLIENT_CRASH("Not a runloop queue"); + DISPATCH_CLIENT_CRASH(dq->do_vtable, "Not a runloop queue"); } dispatch_retain(dq); bool r = _dispatch_runloop_queue_drain_one(dq); @@ -4387,28 +5689,31 @@ void _dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t dq) { if (slowpath(dq->do_vtable != DISPATCH_VTABLE(queue_runloop))) { - DISPATCH_CLIENT_CRASH("Not a runloop queue"); + DISPATCH_CLIENT_CRASH(dq->do_vtable, "Not a runloop queue"); } - _dispatch_runloop_queue_probe(dq); + _dispatch_runloop_queue_wakeup(dq, 0, false); } -mach_port_t +dispatch_runloop_handle_t _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq) { if (slowpath(dq->do_vtable != DISPATCH_VTABLE(queue_runloop))) { - DISPATCH_CLIENT_CRASH("Not a runloop queue"); + DISPATCH_CLIENT_CRASH(dq->do_vtable, "Not a runloop queue"); } - return (mach_port_t)dq->do_ctxt; + return _dispatch_runloop_queue_get_handle(dq); } static void -_dispatch_runloop_queue_port_init(void *ctxt) +_dispatch_runloop_queue_handle_init(void *ctxt) { dispatch_queue_t dq = (dispatch_queue_t)ctxt; + dispatch_runloop_handle_t handle; + + _dispatch_fork_becomes_unsafe(); + +#if TARGET_OS_MAC mach_port_t mp; kern_return_t kr; - - _dispatch_safe_fork = false; kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &mp); DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); @@ -4426,38 +5731,81 @@ _dispatch_runloop_queue_port_init(void *ctxt) DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); } - dq->do_ctxt = (void*)(uintptr_t)mp; + handle = mp; +#elif defined(__linux__) + int fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); + if (fd == -1) { + int err = errno; + switch (err) { + case EMFILE: + DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " + "process is out of file descriptors"); + break; + case ENFILE: + DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " + "system is out of file descriptors"); + break; + case ENOMEM: + DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " + "kernel is out of memory"); + break; + default: + DISPATCH_INTERNAL_CRASH(err, "eventfd() failure"); + break; + } + } + handle = fd; +#else +#error "runloop support not implemented on this platform" +#endif + _dispatch_runloop_queue_set_handle(dq, handle); _dispatch_program_is_probably_callback_driven = true; } static void -_dispatch_runloop_queue_port_dispose(dispatch_queue_t dq) +_dispatch_runloop_queue_handle_dispose(dispatch_queue_t dq) { - mach_port_t mp = (mach_port_t)dq->do_ctxt; - if (!mp) { + dispatch_runloop_handle_t handle = _dispatch_runloop_queue_get_handle(dq); + if (!_dispatch_runloop_handle_is_valid(handle)) { return; } dq->do_ctxt = NULL; +#if TARGET_OS_MAC + mach_port_t mp = handle; kern_return_t kr = mach_port_deallocate(mach_task_self(), mp); DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); kr = mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_RECEIVE, -1); DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); +#elif defined(__linux__) + int rc = close(handle); + (void)dispatch_assume_zero(rc); +#else +#error "runloop support not implemented on this platform" +#endif } #pragma mark - #pragma mark dispatch_main_queue -mach_port_t -_dispatch_get_main_queue_port_4CF(void) +dispatch_runloop_handle_t +_dispatch_get_main_queue_handle_4CF(void) { dispatch_queue_t dq = &_dispatch_main_q; - dispatch_once_f(&_dispatch_main_q_port_pred, dq, - _dispatch_runloop_queue_port_init); - return (mach_port_t)dq->do_ctxt; + dispatch_once_f(&_dispatch_main_q_handle_pred, dq, + _dispatch_runloop_queue_handle_init); + return _dispatch_runloop_queue_get_handle(dq); +} + +#if TARGET_OS_MAC +dispatch_runloop_handle_t +_dispatch_get_main_queue_port_4CF(void) +{ + return _dispatch_get_main_queue_handle_4CF(); } +#endif static bool main_q_is_draining; @@ -4471,7 +5819,13 @@ _dispatch_queue_set_mainq_drain_state(bool arg) } void -_dispatch_main_queue_callback_4CF(mach_msg_header_t *msg DISPATCH_UNUSED) +_dispatch_main_queue_callback_4CF( +#if TARGET_OS_MAC + mach_msg_header_t *_Null_unspecified msg +#else + void *ignored +#endif + DISPATCH_UNUSED) { if (main_q_is_draining) { return; @@ -4491,11 +5845,23 @@ dispatch_main(void) #endif _dispatch_object_debug(&_dispatch_main_q, "%s", __func__); _dispatch_program_is_probably_callback_driven = true; + _dispatch_ktrace0(ARIADNE_ENTER_DISPATCH_MAIN_CODE); +#ifdef __linux__ + // On Linux, if the main thread calls pthread_exit, the process becomes a zombie. + // To avoid that, just before calling pthread_exit we register a TSD destructor + // that will call _dispatch_sig_thread -- thus capturing the main thread in sigsuspend. + // This relies on an implementation detail (currently true in glibc) that TSD destructors + // will be called in the order of creation to cause all the TSD cleanup functions to + // run before the thread becomes trapped in sigsuspend. + pthread_key_t dispatch_main_key; + pthread_key_create(&dispatch_main_key, _dispatch_sig_thread); + pthread_setspecific(dispatch_main_key, &dispatch_main_key); +#endif pthread_exit(NULL); - DISPATCH_CRASH("pthread_exit() returned"); + DISPATCH_INTERNAL_CRASH(errno, "pthread_exit() returned"); #if HAVE_PTHREAD_MAIN_NP } - DISPATCH_CLIENT_CRASH("dispatch_main() must be called on the main thread"); + DISPATCH_CLIENT_CRASH(0, "dispatch_main() must be called on the main thread"); #endif } @@ -4524,28 +5890,69 @@ static void _dispatch_queue_cleanup2(void) { dispatch_queue_t dq = &_dispatch_main_q; - (void)dispatch_atomic_dec2o(dq, dq_running, relaxed); - (void)dispatch_atomic_sub2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_LOCK, release); _dispatch_queue_clear_bound_thread(dq); - dq->dq_is_thread_bound = 0; + + // + // Here is what happens when both this cleanup happens because of + // dispatch_main() being called, and a concurrent enqueuer makes the queue + // non empty. + // + // _dispatch_queue_cleanup2: + // atomic_and(dq_is_thread_bound, ~DQF_THREAD_BOUND, relaxed); + // maximal_barrier(); + // if (load(dq_items_tail, seq_cst)) { + // // do the wake up the normal serial queue way + // } else { + // // do no wake up <---- + // } + // + // enqueuer: + // store(dq_items_tail, new_tail, release); + // if (load(dq_is_thread_bound, relaxed)) { + // // do the wake up the runloop way <---- + // } else { + // // do the wake up the normal serial way + // } + // + // what would be bad is to take both paths marked <---- because the queue + // wouldn't be woken up until the next time it's used (which may never + // happen) + // + // An enqueuer that speculates the load of the old value of thread_bound + // and then does the store may wake up the main queue the runloop way. + // But then, the cleanup thread will see that store because the load + // of dq_items_tail is sequentially consistent, and we have just thrown away + // our pipeline. + // + // By the time cleanup2() is out of the maximally synchronizing barrier, + // no other thread can speculate the wrong load anymore, and both cleanup2() + // and a concurrent enqueuer would treat the queue in the standard non + // thread bound way + + _dispatch_queue_atomic_flags_clear(dq, + DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC); + os_atomic_maximally_synchronizing_barrier(); // no need to drop the override, the thread will die anyway - _dispatch_queue_wakeup_with_qos(dq, - _dispatch_queue_reset_override_priority(dq)); + // the barrier above includes an acquire, so it's ok to do this raw + // call to dx_wakeup(0) + dx_wakeup(dq, 0, 0); // overload the "probably" variable to mean that dispatch_main() or // similar non-POSIX API was called // this has to run before the DISPATCH_COCOA_COMPAT below + // See dispatch_main for call to _dispatch_sig_thread on linux. +#ifndef __linux__ if (_dispatch_program_is_probably_callback_driven) { _dispatch_barrier_async_detached_f(_dispatch_get_root_queue( _DISPATCH_QOS_CLASS_DEFAULT, true), NULL, _dispatch_sig_thread); sleep(1); // workaround 6778970 } +#endif #if DISPATCH_COCOA_COMPAT - dispatch_once_f(&_dispatch_main_q_port_pred, dq, - _dispatch_runloop_queue_port_init); - _dispatch_runloop_queue_port_dispose(dq); + dispatch_once_f(&_dispatch_main_q_handle_pred, dq, + _dispatch_runloop_queue_handle_init); + _dispatch_runloop_queue_handle_dispose(dq); #endif } @@ -4556,5 +5963,30 @@ _dispatch_queue_cleanup(void *ctxt) return _dispatch_queue_cleanup2(); } // POSIX defines that destructors are only called if 'ctxt' is non-null - DISPATCH_CRASH("Premature thread exit while a dispatch queue is running"); + DISPATCH_INTERNAL_CRASH(ctxt, + "Premature thread exit while a dispatch queue is running"); +} + +static void +_dispatch_deferred_items_cleanup(void *ctxt) +{ + // POSIX defines that destructors are only called if 'ctxt' is non-null + DISPATCH_INTERNAL_CRASH(ctxt, + "Premature thread exit with unhandled deferred items"); +} + +static void +_dispatch_frame_cleanup(void *ctxt) +{ + // POSIX defines that destructors are only called if 'ctxt' is non-null + DISPATCH_INTERNAL_CRASH(ctxt, + "Premature thread exit while a dispatch frame is active"); +} + +static void +_dispatch_context_cleanup(void *ctxt) +{ + // POSIX defines that destructors are only called if 'ctxt' is non-null + DISPATCH_INTERNAL_CRASH(ctxt, + "Premature thread exit while a dispatch context is set"); } diff --git a/src/queue_internal.h b/src/queue_internal.h index 143ab1e..1bff7b0 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -48,95 +48,567 @@ #pragma mark - #pragma mark dispatch_queue_t -#define DISPATCH_QUEUE_HEADER \ - uint32_t volatile dq_running; \ - struct dispatch_object_s *volatile dq_items_head; \ - /* LP64 global queue cacheline boundary */ \ - struct dispatch_object_s *volatile dq_items_tail; \ +DISPATCH_ENUM(dispatch_queue_flags, uint32_t, + DQF_NONE = 0x0000, + DQF_AUTORELEASE_ALWAYS = 0x0001, + DQF_AUTORELEASE_NEVER = 0x0002, +#define _DQF_AUTORELEASE_MASK 0x0003 + DQF_THREAD_BOUND = 0x0004, // queue is bound to a thread + DQF_BARRIER_BIT = 0x0008, // queue is a barrier on its target + DQF_TARGETED = 0x0010, // queue is targeted by another object + DQF_LABEL_NEEDS_FREE = 0x0020, // queue label was strduped; need to free it + DQF_CANNOT_TRYSYNC = 0x0040, + DQF_RELEASED = 0x0080, // xref_cnt == -1 + + // only applies to sources + // + // Assuming DSF_ARMED (a), DSF_DEFERRED_DELETE (p), DSF_DELETED (d): + // + // --- + // a-- + // source states for regular operations + // (delivering event / waiting for event) + // + // ap- + // Either armed for deferred deletion delivery, waiting for an EV_DELETE, + // and the next state will be -pd (EV_DELETE delivered), + // Or, a cancellation raced with an event delivery and failed + // (EINPROGRESS), and when the event delivery happens, the next state + // will be -p-. + // + // -pd + // Received EV_DELETE (from ap-), needs to free `ds_dkev`, the knote is + // gone from the kernel, but ds_dkev lives. Next state will be --d. + // + // -p- + // Received an EV_ONESHOT event (from a--), or the delivery of an event + // causing the cancellation to fail with EINPROGRESS was delivered + // (from ap-). The knote still lives, next state will be --d. + // + // --d + // Final state of the source, the knote is gone from the kernel and + // ds_dkev is freed. The source can safely be released. + // + // a-d (INVALID) + // apd (INVALID) + // Setting DSF_DELETED should also always atomically clear DSF_ARMED. If + // the knote is gone from the kernel, it makes no sense whatsoever to + // have it armed. And generally speaking, once `d` or `p` has been set, + // `a` cannot do a cleared -> set transition anymore + // (see _dispatch_source_try_set_armed). + // + DSF_CANCEL_WAITER = 0x0800, // synchronous waiters for cancel + DSF_CANCELED = 0x1000, // cancellation has been requested + DSF_ARMED = 0x2000, // source is armed + DSF_DEFERRED_DELETE = 0x4000, // source is pending delete + DSF_DELETED = 0x8000, // source knote is deleted +#define DSF_STATE_MASK (DSF_ARMED | DSF_DEFERRED_DELETE | DSF_DELETED) + + DQF_WIDTH_MASK = 0xffff0000, +#define DQF_WIDTH_SHIFT 16 +); + +#define _DISPATCH_QUEUE_HEADER(x) \ + struct os_mpsc_queue_s _as_oq[0]; \ + DISPATCH_OBJECT_HEADER(x); \ + _OS_MPSC_QUEUE_FIELDS(dq, dq_state); \ dispatch_queue_t dq_specific_q; \ - uint16_t dq_width; \ - uint16_t dq_is_thread_bound:1; \ - uint32_t volatile dq_override; \ - pthread_priority_t dq_priority; \ - mach_port_t dq_thread; \ - mach_port_t volatile dq_tqthread; \ - voucher_t dq_override_voucher; \ - unsigned long dq_serialnum; \ - const char *dq_label; \ - DISPATCH_INTROSPECTION_QUEUE_LIST; - -#define DISPATCH_QUEUE_WIDTH_MAX UINT16_MAX + union { \ + uint32_t volatile dq_atomic_flags; \ + DISPATCH_STRUCT_LITTLE_ENDIAN_2( \ + uint16_t dq_atomic_bits, \ + uint16_t dq_width \ + ); \ + }; \ + uint32_t dq_side_suspend_cnt; \ + DISPATCH_INTROSPECTION_QUEUE_HEADER; \ + dispatch_unfair_lock_s dq_sidelock + /* LP64: 32bit hole on LP64 */ + +#define DISPATCH_QUEUE_HEADER(x) \ + struct dispatch_queue_s _as_dq[0]; \ + _DISPATCH_QUEUE_HEADER(x) + +#define DISPATCH_QUEUE_ALIGN __attribute__((aligned(8))) + +#define DISPATCH_QUEUE_WIDTH_POOL 0x7fff +#define DISPATCH_QUEUE_WIDTH_MAX 0x7ffe +#define DISPATCH_QUEUE_USES_REDIRECTION(width) \ + ({ uint16_t _width = (width); \ + _width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; }) #define DISPATCH_QUEUE_CACHELINE_PADDING \ char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD] #ifdef __LP64__ #define DISPATCH_QUEUE_CACHELINE_PAD (( \ - (0*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \ + (sizeof(uint32_t) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \ + + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) +#elif OS_OBJECT_HAVE_OBJC1 +#define DISPATCH_QUEUE_CACHELINE_PAD (( \ + (11*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \ + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) #else #define DISPATCH_QUEUE_CACHELINE_PAD (( \ - (12*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \ + (12*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \ + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) #endif +/* + * dispatch queues `dq_state` demystified + * + ******************************************************************************* + * + * Most Significant 32 bit Word + * ---------------------------- + * + * sc: suspend count (bits 63 - 57) + * The suspend count unsurprisingly holds the suspend count of the queue + * Only 7 bits are stored inline. Extra counts are transfered in a side + * suspend count and when that has happened, the ssc: bit is set. + */ +#define DISPATCH_QUEUE_SUSPEND_INTERVAL 0x0200000000000000ull +#define DISPATCH_QUEUE_SUSPEND_HALF 0x40u +/* + * ssc: side suspend count (bit 56) + * This bit means that the total suspend count didn't fit in the inline + * suspend count, and that there are additional suspend counts stored in the + * `dq_side_suspend_cnt` field. + */ +#define DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT 0x0100000000000000ull +/* + * i: inactive bit (bit 55) + * This bit means that the object is inactive (see dispatch_activate) + */ +#define DISPATCH_QUEUE_INACTIVE 0x0080000000000000ull +/* + * na: needs activation (bit 54) + * This bit is set if the object is created inactive. It tells + * dispatch_queue_wakeup to perform various tasks at first wakeup. + * + * This bit is cleared as part of the first wakeup. Having that bit prevents + * the object from being woken up (because _dq_state_should_wakeup will say + * no), except in the dispatch_activate/dispatch_resume codepath. + */ +#define DISPATCH_QUEUE_NEEDS_ACTIVATION 0x0040000000000000ull +/* + * This mask covers the suspend count (sc), side suspend count bit (ssc), + * inactive (i) and needs activation (na) bits + */ +#define DISPATCH_QUEUE_SUSPEND_BITS_MASK 0xffc0000000000000ull +/* + * ib: in barrier (bit 53) + * This bit is set when the queue is currently executing a barrier + */ +#define DISPATCH_QUEUE_IN_BARRIER 0x0020000000000000ull +/* + * qf: queue full (bit 52) + * This bit is a subtle hack that allows to check for any queue width whether + * the full width of the queue is used or reserved (depending on the context) + * In other words that the queue has reached or overflown its capacity. + */ +#define DISPATCH_QUEUE_WIDTH_FULL_BIT 0x0010000000000000ull +#define DISPATCH_QUEUE_WIDTH_FULL 0x8000ull +/* + * w: width (bits 51 - 37) + * This encodes how many work items are in flight. Barriers hold `dq_width` + * of them while they run. This is encoded as a signed offset with respect, + * to full use, where the negative values represent how many available slots + * are left, and the positive values how many work items are exceeding our + * capacity. + * + * When this value is positive, then `wo` is always set to 1. + */ +#define DISPATCH_QUEUE_WIDTH_INTERVAL 0x0000002000000000ull +#define DISPATCH_QUEUE_WIDTH_MASK 0x001fffe000000000ull +#define DISPATCH_QUEUE_WIDTH_SHIFT 37 +/* + * pb: pending barrier (bit 36) + * Drainers set this bit when they couldn't run the next work item and it is + * a barrier. When this bit is set, `dq_width - 1` work item slots are + * reserved so that no wakeup happens until the last work item in flight + * completes. + */ +#define DISPATCH_QUEUE_PENDING_BARRIER 0x0000001000000000ull +/* + * d: dirty bit (bit 35) + * This bit is set when a queue transitions from empty to not empty. + * This bit is set before dq_items_head is set, with appropriate barriers. + * Any thread looking at a queue head is responsible for unblocking any + * dispatch_*_sync that could be enqueued at the beginning. + * + * Drainer perspective + * =================== + * + * When done, any "Drainer", in particular for dispatch_*_sync() handoff + * paths, exits in 3 steps, and the point of the DIRTY bit is to make + * the Drainers take the slowpath at step 2 to take into account enqueuers + * that could have made the queue non idle concurrently. + * + * + * // drainer-exit step 1 + * if (slowpath(dq->dq_items_tail)) { // speculative test + * return handle_non_empty_queue_or_wakeup(dq); + * } + * // drainer-exit step 2 + * if (!_dispatch_queue_drain_try_unlock(dq, ${owned}, ...)) { + * return handle_non_empty_queue_or_wakeup(dq); + * } + * // drainer-exit step 3 + * // no need to wake up the queue, it's really empty for sure + * return; + * + * + * The crux is _dispatch_queue_drain_try_unlock(), it is a function whose + * contract is to release everything the current thread owns from the queue + * state, so that when it's successful, any other thread can acquire + * width from that queue. + * + * But, that function must fail if it sees the DIRTY bit set, leaving + * the state untouched. Leaving the state untouched is vital as it ensures + * that no other Slayer^WDrainer can rise at the same time, because the + * resource stays locked. + * + * + * Note that releasing the DRAIN_LOCK or ENQUEUE_LOCK (see below) currently + * doesn't use that pattern, and always tries to requeue. It isn't a problem + * because while holding either of these locks prevents *some* sync (the + * barrier one) codepaths to acquire the resource, the retry they perform + * at their step D (see just below) isn't affected by the state of these bits + * at all. + * + * + * Sync items perspective + * ====================== + * + * On the dispatch_*_sync() acquire side, the code must look like this: + * + * + * // step A + * if (try_acquire_sync(dq)) { + * return sync_operation_fastpath(dq, item); + * } + * + * // step B + * if (queue_push_and_inline(dq, item)) { + * atomic_store(dq->dq_items_head, item, relaxed); + * // step C + * atomic_or(dq->dq_state, DIRTY, release); + * + * // step D + * if (try_acquire_sync(dq)) { + * try_lock_transfer_or_wakeup(dq); + * } + * } + * + * // step E + * wait_for_lock_transfer(dq); + * + * + * A. If this code can acquire the resource it needs at step A, we're good. + * + * B. If the item isn't the first at enqueue time, then there is no issue + * At least another thread went through C, this thread isn't interesting + * for the possible races, responsibility to make progress is transfered + * to the thread which went through C-D. + * + * C. The DIRTY bit is set with a release barrier, after the head/tail + * has been set, so that seeing the DIRTY bit means that head/tail + * will be visible to any drainer that has the matching acquire barrier. + * + * Drainers may see the head/tail and fail to see DIRTY, in which + * case, their _dispatch_queue_drain_try_unlock() will clear the DIRTY + * bit, and fail, causing the caller to retry exactly once. + * + * D. At this stage, there's two possible outcomes: + * + * - either the acquire works this time, in which case this thread + * successfuly becomes a drainer. That's obviously the happy path. + * It means all drainers are after Step 2 (or there is no Drainer) + * + * - or the acquire fails, which means that another drainer is before + * its Step 2. Since we set the DIRTY bit on the dq_state by now, + * and that drainers manipulate the state atomically, at least one + * drainer that is still before its step 2 will fail its step 2, and + * be responsible for making progress. + * + * + * Async items perspective + * ====================== + * + * On the async codepath, when the queue becomes non empty, the queue + * is always woken up. There is no point in trying to avoid that wake up + * for the async case, because it's required for the async()ed item to make + * progress: a drain of the queue must happen. + * + * So on the async "acquire" side, there is no subtlety at all. + */ +#define DISPATCH_QUEUE_DIRTY 0x0000000800000000ull +/* + * qo: (bit 34) + * Set when a queue has a useful override set. + * This bit is only cleared when the final drain_try_unlock() succeeds. + * + * When the queue dq_override is touched (overrides or-ed in), usually with + * _dispatch_queue_override_priority(), then the HAS_OVERRIDE bit is set + * with a release barrier and one of these three things happen next: + * + * - the queue is enqueued, which will cause it to be drained, and the + * override to be handled by _dispatch_queue_drain_try_unlock(). + * In rare cases it could cause the queue to be queued while empty though. + * + * - the DIRTY bit is also set with a release barrier, which pairs with + * the handling of these bits by _dispatch_queue_drain_try_unlock(), + * so that dq_override is reset properly. + * + * - the queue was suspended, and _dispatch_queue_resume() will handle the + * override as part of its wakeup sequence. + */ +#define DISPATCH_QUEUE_HAS_OVERRIDE 0x0000000400000000ull +/* + * p: pended bit (bit 33) + * Set when a drain lock has been pended. When this bit is set, + * the drain lock is taken and ENQUEUED is never set. + * + * This bit marks a queue that needs further processing but was kept pended + * by an async drainer (not reenqueued) in the hope of being able to drain + * it further later. + */ +#define DISPATCH_QUEUE_DRAIN_PENDED 0x0000000200000000ull +/* + * e: enqueued bit (bit 32) + * Set when a queue is enqueued on its target queue + */ +#define DISPATCH_QUEUE_ENQUEUED 0x0000000100000000ull +/* + * dl: drain lock (bits 31-0) + * This is used by the normal drain to drain exlusively relative to other + * drain stealers (like the QoS Override codepath). It holds the identity + * (thread port) of the current drainer. + */ +#define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK 0x00000002ffffffffull +#ifdef DLOCK_NOWAITERS_BIT +#define DISPATCH_QUEUE_DRAIN_OWNER_MASK \ + ((uint64_t)(DLOCK_OWNER_MASK | DLOCK_NOFAILED_TRYLOCK_BIT)) +#define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \ + (((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK))\ + ^ DLOCK_NOWAITERS_BIT) +#define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \ + (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \ + DLOCK_NOWAITERS_BIT) +#else +#define DISPATCH_QUEUE_DRAIN_OWNER_MASK \ + ((uint64_t)(DLOCK_OWNER_MASK | DLOCK_FAILED_TRYLOCK_BIT)) +#define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \ + ((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK)) +#define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \ + (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \ + DLOCK_WAITERS_BIT) +#endif +/* + ******************************************************************************* + * + * `Drainers` + * + * Drainers are parts of the code that hold the drain lock by setting its value + * to their thread port. There are two kinds: + * 1. async drainers, + * 2. lock transfer handlers. + * + * Drainers from the first category are _dispatch_queue_class_invoke and its + * stealers. Those drainers always try to reserve width at the same time they + * acquire the drain lock, to make sure they can make progress, and else exit + * quickly. + * + * Drainers from the second category are `slow` work items. Those run on the + * calling thread, and when done, try to transfer the width they own to the + * possible next `slow` work item, and if there is no such item, they reliquish + * that right. To do so, prior to taking any decision, they also try to own + * the full "barrier" width on the given queue. + * + * see _dispatch_try_lock_transfer_or_wakeup + * + ******************************************************************************* + * + * Enqueuing and wakeup rules + * + * Nobody should enqueue any dispatch object if it has no chance to make any + * progress. That means that queues that: + * - are suspended + * - have reached or overflown their capacity + * - are currently draining + * - are already enqueued + * + * should not try to be enqueued. + * + ******************************************************************************* + * + * Lock transfer + * + * The point of the lock transfer code is to allow pure dispatch_*_sync() + * callers to make progress without requiring the bring up of a drainer. + * There are two reason for that: + * + * - performance, as draining has to give up for dispatch_*_sync() work items, + * so waking up a queue for this is wasteful. + * + * - liveness, as with dispatch_*_sync() you burn threads waiting, you're more + * likely to hit various thread limits and may not have any drain being + * brought up if the process hits a limit. + * + * + * Lock transfer happens at the end on the dispatch_*_sync() codepaths: + * + * - obviously once a dispatch_*_sync() work item finishes, it owns queue + * width and it should try to transfer that ownership to the possible next + * queued item if it is a dispatch_*_sync() item + * + * - just before such a work item blocks to make sure that that work item + * itself isn't its own last chance to be woken up. That can happen when + * a Drainer pops up everything from the queue, and that a dispatch_*_sync() + * work item has taken the slow path then was preempted for a long time. + * + * That's why such work items, if first in the queue, must try a lock + * transfer procedure. + * + * + * For transfers where a partial width is owned, we give back that width. + * If the queue state is "idle" again, we attempt to acquire the full width. + * If that succeeds, this falls back to the full barrier lock + * transfer, else it wakes up the queue according to its state. + * + * For full barrier transfers, if items eligible for lock transfer are found, + * then they are woken up and the lock transfer is successful. + * + * If none are found, the full barrier width is released. If by doing so the + * DIRTY bit is found, releasing the full barrier width fails and transferring + * the lock is retried from scratch. + */ + +#define DISPATCH_QUEUE_STATE_INIT_VALUE(width) \ + ((DISPATCH_QUEUE_WIDTH_FULL - (width)) << DISPATCH_QUEUE_WIDTH_SHIFT) + +/* Magic dq_state values for global queues: they have QUEUE_FULL and IN_BARRIER + * set to force the slowpath in both dispatch_barrier_sync() and dispatch_sync() + */ +#define DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE \ + (DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER) + +#define DISPATCH_QUEUE_SERIAL_DRAIN_OWNED \ + (DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_INTERVAL) + DISPATCH_CLASS_DECL(queue); #if !(defined(__cplusplus) && DISPATCH_INTROSPECTION) struct dispatch_queue_s { - DISPATCH_STRUCT_HEADER(queue); - DISPATCH_QUEUE_HEADER; + _DISPATCH_QUEUE_HEADER(queue); DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only -}; +} DISPATCH_QUEUE_ALIGN; #endif // !(defined(__cplusplus) && DISPATCH_INTROSPECTION) +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_serial, queue); +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_concurrent, queue); +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_main, queue); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root, queue); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue); -DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_queue_specific_queue, dispatch_queue); -DISPATCH_CLASS_DECL(queue_specific_queue); +OS_OBJECT_INTERNAL_CLASS_DECL(dispatch_queue_specific_queue, dispatch_queue, + DISPATCH_OBJECT_VTABLE_HEADER(dispatch_queue_specific_queue)); + +typedef union { + struct os_mpsc_queue_s *_oq; + struct dispatch_queue_s *_dq; + struct dispatch_source_s *_ds; + struct dispatch_mach_s *_dm; + struct dispatch_queue_specific_queue_s *_dqsq; + struct dispatch_timer_aggregate_s *_dta; +#if USE_OBJC + os_mpsc_queue_t _ojbc_oq; + dispatch_queue_t _objc_dq; + dispatch_source_t _objc_ds; + dispatch_mach_t _objc_dm; + dispatch_queue_specific_queue_t _objc_dqsq; + dispatch_timer_aggregate_t _objc_dta; +#endif +} dispatch_queue_class_t __attribute__((__transparent_union__)); -void _dispatch_queue_destroy(dispatch_object_t dou); +typedef struct dispatch_thread_context_s *dispatch_thread_context_t; +typedef struct dispatch_thread_context_s { + dispatch_thread_context_t dtc_prev; + const void *dtc_key; + union { + size_t dtc_apply_nesting; + dispatch_io_t dtc_io_in_barrier; + }; +} dispatch_thread_context_s; + +typedef struct dispatch_thread_frame_s *dispatch_thread_frame_t; +typedef struct dispatch_thread_frame_s { + // must be in the same order as our TSD keys! + dispatch_queue_t dtf_queue; + dispatch_thread_frame_t dtf_prev; + struct dispatch_object_s *dtf_deferred; +} dispatch_thread_frame_s; + +DISPATCH_ENUM(dispatch_queue_wakeup_target, long, + DISPATCH_QUEUE_WAKEUP_NONE = 0, + DISPATCH_QUEUE_WAKEUP_TARGET, + DISPATCH_QUEUE_WAKEUP_MGR, +); + +void _dispatch_queue_class_override_drainer(dispatch_queue_t dqu, + pthread_priority_t pp, dispatch_wakeup_flags_t flags); +void _dispatch_queue_class_wakeup(dispatch_queue_t dqu, pthread_priority_t pp, + dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target); + +void _dispatch_queue_destroy(dispatch_queue_t dq); void _dispatch_queue_dispose(dispatch_queue_t dq); -void _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_object_t dou, - dispatch_invoke_flags_t flags); -void _dispatch_queue_push_list_slow(dispatch_queue_t dq, - pthread_priority_t pp, struct dispatch_object_s *obj, unsigned int n, - bool retained); -void _dispatch_queue_push_slow(dispatch_queue_t dq, - pthread_priority_t pp, struct dispatch_object_s *obj, bool retained); -unsigned long _dispatch_queue_probe(dispatch_queue_t dq); -dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou); -dispatch_queue_t _dispatch_queue_wakeup(dispatch_queue_t dq); -void _dispatch_queue_wakeup_and_release(dispatch_queue_t dq); -void _dispatch_queue_wakeup_with_qos(dispatch_queue_t dq, +void _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq); +void _dispatch_queue_suspend(dispatch_queue_t dq); +void _dispatch_queue_resume(dispatch_queue_t dq, bool activate); +void _dispatch_queue_finalize_activation(dispatch_queue_t dq); +void _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags); +void _dispatch_queue_push_list_slow(dispatch_queue_t dq, unsigned int n); +void _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou, pthread_priority_t pp); -void _dispatch_queue_wakeup_with_qos_and_release(dispatch_queue_t dq, - pthread_priority_t pp); -void _dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_t dq, - pthread_priority_t pp); -_dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_object_t dou); +void _dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq); +void _dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags); +dispatch_queue_t _dispatch_queue_serial_drain(dispatch_queue_t dq, + dispatch_invoke_flags_t flags, uint64_t *owned, + struct dispatch_object_s **dc_ptr); +void _dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq, + dispatch_invoke_flags_t flags, uint64_t to_unlock, + struct dispatch_object_s *dc); void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq); -unsigned long _dispatch_root_queue_probe(dispatch_queue_t dq); +void _dispatch_root_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags); +void _dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq, + struct dispatch_object_s *dou, pthread_priority_t pp); void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq); -unsigned long _dispatch_runloop_queue_probe(dispatch_queue_t dq); +void _dispatch_main_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags); +void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags); void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq); void _dispatch_runloop_queue_dispose(dispatch_queue_t dq); void _dispatch_mgr_queue_drain(void); -unsigned long _dispatch_mgr_queue_probe(dispatch_queue_t dq); -#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES void _dispatch_mgr_priority_init(void); #else static inline void _dispatch_mgr_priority_init(void) {} #endif -void _dispatch_after_timer_callback(void *ctxt); -void _dispatch_async_redirect_invoke(void *ctxt); +#if DISPATCH_USE_KEVENT_WORKQUEUE +void _dispatch_kevent_workqueue_init(void); +#else +static inline void _dispatch_kevent_workqueue_init(void) {} +#endif void _dispatch_sync_recurse_invoke(void *ctxt); void _dispatch_apply_invoke(void *ctxt); void _dispatch_apply_redirect_invoke(void *ctxt); void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func); -void _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, +void _dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func); #if DISPATCH_DEBUG @@ -168,17 +640,21 @@ enum { DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT, DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS, DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT, + _DISPATCH_ROOT_QUEUE_IDX_COUNT, }; extern unsigned long volatile _dispatch_queue_serial_numbers; extern struct dispatch_queue_s _dispatch_root_queues[]; extern struct dispatch_queue_s _dispatch_mgr_q; +void _dispatch_root_queues_init(void); #if HAVE_PTHREAD_WORKQUEUE_QOS extern pthread_priority_t _dispatch_background_priority; extern pthread_priority_t _dispatch_user_initiated_priority; #endif +typedef uint8_t _dispatch_qos_class_t; + #pragma mark - #pragma mark dispatch_queue_attr_t @@ -190,20 +666,34 @@ typedef enum { DISPATCH_CLASS_DECL(queue_attr); struct dispatch_queue_attr_s { - DISPATCH_STRUCT_HEADER(queue_attr); - qos_class_t dqa_qos_class; - int dqa_relative_priority; - unsigned int dqa_overcommit:2, dqa_concurrent:1; + OS_OBJECT_STRUCT_HEADER(dispatch_queue_attr); + _dispatch_qos_class_t dqa_qos_class; + int8_t dqa_relative_priority; + uint16_t dqa_overcommit:2; + uint16_t dqa_autorelease_frequency:2; + uint16_t dqa_concurrent:1; + uint16_t dqa_inactive:1; }; enum { - DQA_INDEX_NON_OVERCOMMIT = 0, + DQA_INDEX_UNSPECIFIED_OVERCOMMIT = 0, + DQA_INDEX_NON_OVERCOMMIT, DQA_INDEX_OVERCOMMIT, - DQA_INDEX_UNSPECIFIED_OVERCOMMIT, }; #define DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT 3 +enum { + DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT = + DISPATCH_AUTORELEASE_FREQUENCY_INHERIT, + DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM = + DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM, + DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER = + DISPATCH_AUTORELEASE_FREQUENCY_NEVER, +}; + +#define DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT 3 + enum { DQA_INDEX_CONCURRENT = 0, DQA_INDEX_SERIAL, @@ -211,6 +701,13 @@ enum { #define DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT 2 +enum { + DQA_INDEX_ACTIVE = 0, + DQA_INDEX_INACTIVE, +}; + +#define DISPATCH_QUEUE_ATTR_INACTIVE_COUNT 2 + typedef enum { DQA_INDEX_QOS_CLASS_UNSPECIFIED = 0, DQA_INDEX_QOS_CLASS_MAINTENANCE, @@ -226,17 +723,24 @@ typedef enum { extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[] [DISPATCH_QUEUE_ATTR_PRIO_COUNT] [DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT] - [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT]; + [DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT] + [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT] + [DISPATCH_QUEUE_ATTR_INACTIVE_COUNT]; + +dispatch_queue_attr_t _dispatch_get_default_queue_attr(void); #pragma mark - #pragma mark dispatch_continuation_t -// If dc_vtable is less than 127, then the object is a continuation. +// If dc_flags is less than 0x1000, then the object is a continuation. // Otherwise, the object has a private layout and memory management rules. The // layout until after 'do_next' must align with normal objects. #if __LP64__ #define DISPATCH_CONTINUATION_HEADER(x) \ - const void *do_vtable; \ + union { \ + const void *do_vtable; \ + uintptr_t dc_flags; \ + }; \ union { \ pthread_priority_t dc_priority; \ int dc_cache_cnt; \ @@ -247,11 +751,32 @@ extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[] dispatch_function_t dc_func; \ void *dc_ctxt; \ void *dc_data; \ - void *dc_other; + void *dc_other #define _DISPATCH_SIZEOF_PTR 8 +#elif OS_OBJECT_HAVE_OBJC1 +#define DISPATCH_CONTINUATION_HEADER(x) \ + dispatch_function_t dc_func; \ + union { \ + pthread_priority_t dc_priority; \ + int dc_cache_cnt; \ + uintptr_t dc_pad; \ + }; \ + struct voucher_s *dc_voucher; \ + union { \ + const void *do_vtable; \ + uintptr_t dc_flags; \ + }; \ + struct dispatch_##x##_s *volatile do_next; \ + void *dc_ctxt; \ + void *dc_data; \ + void *dc_other +#define _DISPATCH_SIZEOF_PTR 4 #else #define DISPATCH_CONTINUATION_HEADER(x) \ - const void *do_vtable; \ + union { \ + const void *do_vtable; \ + uintptr_t dc_flags; \ + }; \ union { \ pthread_priority_t dc_priority; \ int dc_cache_cnt; \ @@ -262,7 +787,7 @@ extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[] dispatch_function_t dc_func; \ void *dc_ctxt; \ void *dc_data; \ - void *dc_other; + void *dc_other #define _DISPATCH_SIZEOF_PTR 4 #endif #define _DISPATCH_CONTINUATION_PTRS 8 @@ -279,35 +804,55 @@ extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[] (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \ ~(DISPATCH_CONTINUATION_SIZE - 1u)) -#define DISPATCH_OBJ_ASYNC_BIT 0x1 -#define DISPATCH_OBJ_BARRIER_BIT 0x2 -#define DISPATCH_OBJ_GROUP_BIT 0x4 -#define DISPATCH_OBJ_SYNC_SLOW_BIT 0x8 -#define DISPATCH_OBJ_BLOCK_RELEASE_BIT 0x10 -#define DISPATCH_OBJ_CTXT_FETCH_BIT 0x20 -#define DISPATCH_OBJ_HAS_VOUCHER_BIT 0x80 -// vtables are pointers far away from the low page in memory -#define DISPATCH_OBJ_IS_VTABLE(x) ((unsigned long)(x)->do_vtable > 0xfful) +// continuation is a dispatch_sync or dispatch_barrier_sync +#define DISPATCH_OBJ_SYNC_SLOW_BIT 0x001ul +// continuation acts as a barrier +#define DISPATCH_OBJ_BARRIER_BIT 0x002ul +// continuation resources are freed on run +// this is set on async or for non event_handler source handlers +#define DISPATCH_OBJ_CONSUME_BIT 0x004ul +// continuation has a group in dc_data +#define DISPATCH_OBJ_GROUP_BIT 0x008ul +// continuation function is a block (copied in dc_ctxt) +#define DISPATCH_OBJ_BLOCK_BIT 0x010ul +// continuation function is a block with private data, implies BLOCK_BIT +#define DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT 0x020ul +// source handler requires fetching context from source +#define DISPATCH_OBJ_CTXT_FETCH_BIT 0x040ul +// use the voucher from the continuation even if the queue has voucher set +#define DISPATCH_OBJ_ENFORCE_VOUCHER 0x080ul struct dispatch_continuation_s { + struct dispatch_object_s _as_do[0]; DISPATCH_CONTINUATION_HEADER(continuation); }; typedef struct dispatch_continuation_s *dispatch_continuation_t; +typedef struct dispatch_continuation_vtable_s { + _OS_OBJECT_CLASS_HEADER(); + DISPATCH_INVOKABLE_VTABLE_HEADER(dispatch_continuation); +} *dispatch_continuation_vtable_t; + #ifndef DISPATCH_CONTINUATION_CACHE_LIMIT #if TARGET_OS_EMBEDDED #define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads -#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 16 +#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN 16 #else #define DISPATCH_CONTINUATION_CACHE_LIMIT 1024 -#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 128 +#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN 128 #endif #endif dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void); void _dispatch_continuation_free_to_heap(dispatch_continuation_t c); +void _dispatch_continuation_async(dispatch_queue_t dq, + dispatch_continuation_t dc); +void _dispatch_continuation_pop(dispatch_object_t dou, dispatch_queue_t dq, + dispatch_invoke_flags_t flags); +void _dispatch_continuation_invoke(dispatch_object_t dou, + voucher_t override_voucher, dispatch_invoke_flags_t flags); -#if DISPATCH_USE_MEMORYSTATUS_SOURCE +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE extern int _dispatch_continuation_cache_limit; void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c); #else @@ -316,6 +861,67 @@ void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c); _dispatch_continuation_free_to_heap(c) #endif +#pragma mark - +#pragma mark dispatch_continuation vtables + +enum { + _DC_USER_TYPE = 0, + DC_ASYNC_REDIRECT_TYPE, + DC_MACH_SEND_BARRRIER_DRAIN_TYPE, + DC_MACH_SEND_BARRIER_TYPE, + DC_MACH_RECV_BARRIER_TYPE, +#if HAVE_PTHREAD_WORKQUEUE_QOS + DC_OVERRIDE_STEALING_TYPE, + DC_OVERRIDE_OWNING_TYPE, +#endif + _DC_MAX_TYPE, +}; + +DISPATCH_ALWAYS_INLINE +static inline unsigned long +dc_type(dispatch_continuation_t dc) +{ + return dx_type(dc->_as_do); +} + +DISPATCH_ALWAYS_INLINE +static inline unsigned long +dc_subtype(dispatch_continuation_t dc) +{ + return dx_subtype(dc->_as_do); +} + +extern const struct dispatch_continuation_vtable_s + _dispatch_continuation_vtables[_DC_MAX_TYPE]; + +void +_dispatch_async_redirect_invoke(dispatch_continuation_t dc, + dispatch_invoke_flags_t flags); + +#if HAVE_PTHREAD_WORKQUEUE_QOS +void +_dispatch_queue_override_invoke(dispatch_continuation_t dc, + dispatch_invoke_flags_t flags); +#endif + +#define DC_VTABLE(name) (&_dispatch_continuation_vtables[DC_##name##_TYPE]) + +#define DC_VTABLE_ENTRY(name, ...) \ + [DC_##name##_TYPE] = { \ + .do_type = DISPATCH_CONTINUATION_TYPE(name), \ + __VA_ARGS__ \ + } + +#pragma mark - +#pragma mark _dispatch_set_priority_and_voucher +#if HAVE_PTHREAD_WORKQUEUE_QOS + +void _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri, + mach_voucher_t kv); +voucher_t _dispatch_set_priority_and_voucher_slow(pthread_priority_t pri, + voucher_t voucher, _dispatch_thread_set_self_t flags); + +#endif #pragma mark - #pragma mark dispatch_apply_t @@ -323,7 +929,8 @@ struct dispatch_apply_s { size_t volatile da_index, da_todo; size_t da_iterations, da_nested; dispatch_continuation_t da_dc; - _dispatch_thread_semaphore_t da_sema; + dispatch_thread_event_s da_event; + dispatch_invoke_flags_t da_flags; uint32_t da_thr_cnt; }; typedef struct dispatch_apply_s *dispatch_apply_t; @@ -346,7 +953,7 @@ typedef struct dispatch_apply_s *dispatch_apply_t; voucher_t dbpd_voucher; \ dispatch_block_t dbpd_block; \ dispatch_group_t dbpd_group; \ - dispatch_queue_t volatile dbpd_queue; \ + os_mpsc_queue_t volatile dbpd_queue; \ mach_port_t dbpd_thread; #if !defined(__cplusplus) @@ -375,8 +982,47 @@ typedef struct dispatch_block_private_data_s *dispatch_block_private_data_t; dispatch_block_t _dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher, pthread_priority_t priority, dispatch_block_t block); -void _dispatch_block_invoke(const struct dispatch_block_private_data_s *dbcpd); +void _dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd); +void _dispatch_block_sync_invoke(void *block); + +void _dispatch_continuation_init_slow(dispatch_continuation_t dc, + dispatch_queue_class_t dqu, dispatch_block_flags_t flags); +void _dispatch_continuation_update_bits(dispatch_continuation_t dc, + uintptr_t dc_flags); + +bool _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func); + +/* exported for tests in dispatch_trysync.c */ +DISPATCH_EXPORT DISPATCH_NOTHROW +bool _dispatch_trysync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t f); #endif /* __BLOCKS__ */ +typedef struct dispatch_pthread_root_queue_observer_hooks_s { + void (*queue_will_execute)(dispatch_queue_t queue); + void (*queue_did_execute)(dispatch_queue_t queue); +} dispatch_pthread_root_queue_observer_hooks_s; +typedef dispatch_pthread_root_queue_observer_hooks_s + *dispatch_pthread_root_queue_observer_hooks_t; + +#ifdef __APPLE__ +#define DISPATCH_IOHID_SPI 1 + +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW DISPATCH_NONNULL4 +dispatch_queue_t +_dispatch_pthread_root_queue_create_with_observer_hooks_4IOHID( + const char *label, unsigned long flags, const pthread_attr_t *attr, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks, + dispatch_block_t configure); + +DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +bool +_dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID( + dispatch_queue_t queue); + +#endif // __APPLE__ + #endif diff --git a/src/semaphore.c b/src/semaphore.c index b8c8971..4d232b7 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -21,29 +21,6 @@ #include "internal.h" // semaphores are too fundamental to use the dispatch_assume*() macros -#if USE_MACH_SEM -#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \ - if (slowpath((x) == KERN_INVALID_NAME)) { \ - DISPATCH_CLIENT_CRASH("Use-after-free of dispatch_semaphore_t"); \ - } else if (slowpath(x)) { \ - DISPATCH_CRASH("mach semaphore API failure"); \ - } \ - } while (0) -#define DISPATCH_GROUP_VERIFY_KR(x) do { \ - if (slowpath((x) == KERN_INVALID_NAME)) { \ - DISPATCH_CLIENT_CRASH("Use-after-free of dispatch_group_t"); \ - } else if (slowpath(x)) { \ - DISPATCH_CRASH("mach semaphore API failure"); \ - } \ - } while (0) -#elif USE_POSIX_SEM -#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \ - if (slowpath((x) == -1)) { \ - DISPATCH_CRASH("POSIX semaphore API failure"); \ - } \ - } while (0) -#endif - #if USE_WIN32_SEM // rdar://problem/8428132 static DWORD best_resolution = 1; // 1ms @@ -94,24 +71,49 @@ DISPATCH_WEAK // rdar://problem/8503746 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); #pragma mark - -#pragma mark dispatch_semaphore_t +#pragma mark dispatch_semaphore_class_t static void -_dispatch_semaphore_init(long value, dispatch_object_t dou) +_dispatch_semaphore_class_init(long value, dispatch_semaphore_class_t dsemau) { - dispatch_semaphore_t dsema = dou._dsema; + struct dispatch_semaphore_header_s *dsema = dsemau._dsema_hdr; - dsema->do_next = (dispatch_semaphore_t)DISPATCH_OBJECT_LISTLESS; + dsema->do_next = DISPATCH_OBJECT_LISTLESS; dsema->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, false); dsema->dsema_value = value; - dsema->dsema_orig = value; #if USE_POSIX_SEM int ret = sem_init(&dsema->dsema_sem, 0, 0); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif } +static void +_dispatch_semaphore_class_dispose(dispatch_semaphore_class_t dsemau) +{ + struct dispatch_semaphore_header_s *dsema = dsemau._dsema_hdr; + +#if USE_MACH_SEM + kern_return_t kr; + if (dsema->dsema_port) { + kr = semaphore_destroy(mach_task_self(), dsema->dsema_port); + DISPATCH_VERIFY_MIG(kr); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + } + dsema->dsema_port = MACH_PORT_DEAD; +#elif USE_POSIX_SEM + int ret = sem_destroy(&dsema->dsema_sem); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + if (dsema->dsema_handle) { + CloseHandle(dsema->dsema_handle); + } +#endif +} + +#pragma mark - +#pragma mark dispatch_semaphore_t + dispatch_semaphore_t dispatch_semaphore_create(long value) { @@ -121,14 +123,13 @@ dispatch_semaphore_create(long value) // equal to the number of waiting threads. Therefore it is bogus to // initialize the semaphore with a negative value. if (value < 0) { - return NULL; + return DISPATCH_BAD_INPUT; } dsema = (dispatch_semaphore_t)_dispatch_alloc(DISPATCH_VTABLE(semaphore), - sizeof(struct dispatch_semaphore_s) - - sizeof(dsema->dsema_notify_head) - - sizeof(dsema->dsema_notify_tail)); - _dispatch_semaphore_init(value, dsema); + sizeof(struct dispatch_semaphore_s)); + _dispatch_semaphore_class_init(value, dsema); + dsema->dsema_orig = value; return dsema; } @@ -142,7 +143,7 @@ _dispatch_semaphore_create_port(semaphore_t *s4) if (*s4) { return; } - _dispatch_safe_fork = false; + _dispatch_fork_becomes_unsafe(); // lazily allocate the semaphore port @@ -157,7 +158,7 @@ _dispatch_semaphore_create_port(semaphore_t *s4) _dispatch_temporary_resource_shortage(); } - if (!dispatch_atomic_cmpxchg(s4, 0, tmp, relaxed)) { + if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) { kr = semaphore_destroy(mach_task_self(), tmp); DISPATCH_VERIFY_MIG(kr); DISPATCH_SEMAPHORE_VERIFY_KR(kr); @@ -179,7 +180,7 @@ _dispatch_semaphore_create_handle(HANDLE *s4) _dispatch_temporary_resource_shortage(); } - if (!dispatch_atomic_cmpxchg(s4, 0, tmp)) { + if (!os_atomic_cmpxchg(s4, 0, tmp)) { CloseHandle(tmp); } } @@ -191,26 +192,11 @@ _dispatch_semaphore_dispose(dispatch_object_t dou) dispatch_semaphore_t dsema = dou._dsema; if (dsema->dsema_value < dsema->dsema_orig) { - DISPATCH_CLIENT_CRASH( - "Semaphore/group object deallocated while in use"); + DISPATCH_CLIENT_CRASH(dsema->dsema_orig - dsema->dsema_value, + "Semaphore object deallocated while in use"); } -#if USE_MACH_SEM - kern_return_t kr; - if (dsema->dsema_port) { - kr = semaphore_destroy(mach_task_self(), dsema->dsema_port); - DISPATCH_VERIFY_MIG(kr); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - } - dsema->dsema_port = MACH_PORT_DEAD; -#elif USE_POSIX_SEM - int ret = sem_destroy(&dsema->dsema_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - if (dsema->dsema_handle) { - CloseHandle(dsema->dsema_handle); - } -#endif + _dispatch_semaphore_class_dispose(dsema); } size_t @@ -235,17 +221,6 @@ DISPATCH_NOINLINE long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) { - // Before dsema_sent_ksignals is incremented we can rely on the reference - // held by the waiter. However, once this value is incremented the waiter - // may return between the atomic increment and the semaphore_signal(), - // therefore an explicit reference must be held in order to safely access - // dsema after the atomic increment. - _dispatch_retain(dsema); - -#if USE_MACH_SEM || USE_POSIX_SEM - (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals, relaxed); -#endif - #if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_port); kern_return_t kr = semaphore_signal(dsema->dsema_port); @@ -258,20 +233,19 @@ _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) int ret = ReleaseSemaphore(dsema->dsema_handle, 1, NULL); dispatch_assume(ret); #endif - - _dispatch_release(dsema); return 1; } long dispatch_semaphore_signal(dispatch_semaphore_t dsema) { - long value = dispatch_atomic_inc2o(dsema, dsema_value, release); + long value = os_atomic_inc2o(dsema, dsema_value, release); if (fastpath(value > 0)) { return 0; } if (slowpath(value == LONG_MIN)) { - DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_semaphore_signal()"); + DISPATCH_CLIENT_CRASH(value, + "Unbalanced call to dispatch_semaphore_signal()"); } return _dispatch_semaphore_signal_slow(dsema); } @@ -296,34 +270,12 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, DWORD wait_result; #endif -#if USE_MACH_SEM || USE_POSIX_SEM -again: - // Mach semaphores appear to sometimes spuriously wake up. Therefore, - // we keep a parallel count of the number of times a Mach semaphore is - // signaled (6880961). - orig = dsema->dsema_sent_ksignals; - while (orig) { - if (dispatch_atomic_cmpxchgvw2o(dsema, dsema_sent_ksignals, orig, - orig - 1, &orig, relaxed)) { - return 0; - } - } -#endif - #if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_port); #elif USE_WIN32_SEM _dispatch_semaphore_create_handle(&dsema->dsema_handle); #endif - // From xnu/osfmk/kern/sync_sema.c: - // wait_semaphore->count = -1; /* we don't keep an actual count */ - // - // The code above does not match the documentation, and that fact is - // not surprising. The documented semantics are clumsy to use in any - // practical way. The above hack effectively tricks the rest of the - // Mach semaphore logic to behave like the libdispatch algorithm. - switch (timeout) { default: #if USE_MACH_SEM @@ -340,13 +292,13 @@ again: } #elif USE_POSIX_SEM do { - uint64_t nsec = _dispatch_timeout(timeout); + uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout)); } while (ret == -1 && errno == EINTR); - if (ret == -1 && errno != ETIMEDOUT) { + if (!(ret == -1 && errno == ETIMEDOUT)) { DISPATCH_SEMAPHORE_VERIFY_RET(ret); break; } @@ -365,7 +317,7 @@ again: case DISPATCH_TIME_NOW: orig = dsema->dsema_value; while (orig < 0) { - if (dispatch_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1, + if (os_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1, &orig, relaxed)) { #if USE_MACH_SEM return KERN_OPERATION_TIMED_OUT; @@ -393,17 +345,13 @@ again: #endif break; } -#if USE_MACH_SEM || USE_POSIX_SEM - goto again; -#else return 0; -#endif } long dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) { - long value = dispatch_atomic_dec2o(dsema, dsema_value, acquire); + long value = os_atomic_dec2o(dsema, dsema_value, acquire); if (fastpath(value >= 0)) { return 0; } @@ -418,11 +366,10 @@ static inline dispatch_group_t _dispatch_group_create_with_count(long count) { dispatch_group_t dg = (dispatch_group_t)_dispatch_alloc( - DISPATCH_VTABLE(group), sizeof(struct dispatch_semaphore_s)); - _dispatch_semaphore_init(LONG_MAX - count, dg); + DISPATCH_VTABLE(group), sizeof(struct dispatch_group_s)); + _dispatch_semaphore_class_init(count, dg); if (count) { - dispatch_atomic_store2o((dispatch_semaphore_t)dg, do_ref_cnt, 1, - relaxed); // + os_atomic_store2o(dg, do_ref_cnt, 1, relaxed); // } return dg; } @@ -442,47 +389,48 @@ _dispatch_group_create_and_enter(void) void dispatch_group_enter(dispatch_group_t dg) { - dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - long value = dispatch_atomic_dec_orig2o(dsema, dsema_value, acquire); - if (value == LONG_MAX) { - return _dispatch_retain(dg); // - } - if (slowpath(value <= 0)) { - DISPATCH_CLIENT_CRASH( + long value = os_atomic_inc_orig2o(dg, dg_value, acquire); + if (slowpath((unsigned long)value >= (unsigned long)LONG_MAX)) { + DISPATCH_CLIENT_CRASH(value, "Too many nested calls to dispatch_group_enter()"); } + if (value == 0) { + _dispatch_retain(dg); // + } } DISPATCH_NOINLINE static long -_dispatch_group_wake(dispatch_semaphore_t dsema, bool needs_release) +_dispatch_group_wake(dispatch_group_t dg, bool needs_release) { - dispatch_continuation_t next, head, tail = NULL, dc; + dispatch_continuation_t next, head, tail = NULL; long rval; - head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL, relaxed); + // cannot use os_mpsc_capture_snapshot() because we can have concurrent + // _dispatch_group_wake() calls + head = os_atomic_xchg2o(dg, dg_notify_head, NULL, relaxed); if (head) { // snapshot before anything is notified/woken - tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL, relaxed); + tail = os_atomic_xchg2o(dg, dg_notify_tail, NULL, release); } - rval = (long)dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0, relaxed); + rval = (long)os_atomic_xchg2o(dg, dg_waiters, 0, relaxed); if (rval) { // wake group waiters #if USE_MACH_SEM - _dispatch_semaphore_create_port(&dsema->dsema_port); + _dispatch_semaphore_create_port(&dg->dg_port); do { - kern_return_t kr = semaphore_signal(dsema->dsema_port); + kern_return_t kr = semaphore_signal(dg->dg_port); DISPATCH_GROUP_VERIFY_KR(kr); } while (--rval); #elif USE_POSIX_SEM do { - int ret = sem_post(&dsema->dsema_sem); + int ret = sem_post(&dg->dg_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); } while (--rval); #elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dsema->dsema_handle); + _dispatch_semaphore_create_handle(&dg->dg_handle); int ret; - ret = ReleaseSemaphore(dsema->dsema_handle, rval, NULL); + ret = ReleaseSemaphore(dg->dg_handle, rval, NULL); dispatch_assume(ret); #else #error "No supported semaphore type" @@ -491,22 +439,15 @@ _dispatch_group_wake(dispatch_semaphore_t dsema, bool needs_release) if (head) { // async group notify blocks do { - next = fastpath(head->do_next); - if (!next && head != tail) { - _dispatch_wait_until(next = fastpath(head->do_next)); - } + next = os_mpsc_pop_snapshot_head(head, tail, do_next); dispatch_queue_t dsn_queue = (dispatch_queue_t)head->dc_data; - dc = _dispatch_continuation_free_cacheonly(head); - dispatch_async_f(dsn_queue, head->dc_ctxt, head->dc_func); + _dispatch_continuation_async(dsn_queue, head); _dispatch_release(dsn_queue); - if (slowpath(dc)) { - _dispatch_continuation_free_to_cache_limit(dc); - } } while ((head = next)); - _dispatch_release(dsema); + _dispatch_release(dg); } if (needs_release) { - _dispatch_release(dsema); // + _dispatch_release(dg); // } return 0; } @@ -514,21 +455,53 @@ _dispatch_group_wake(dispatch_semaphore_t dsema, bool needs_release) void dispatch_group_leave(dispatch_group_t dg) { - dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - long value = dispatch_atomic_inc2o(dsema, dsema_value, release); + long value = os_atomic_dec2o(dg, dg_value, release); + if (slowpath(value == 0)) { + return (void)_dispatch_group_wake(dg, true); + } if (slowpath(value < 0)) { - DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave()"); + DISPATCH_CLIENT_CRASH(value, + "Unbalanced call to dispatch_group_leave()"); } - if (slowpath(value == LONG_MAX)) { - return (void)_dispatch_group_wake(dsema, true); +} + +void +_dispatch_group_dispose(dispatch_object_t dou) +{ + dispatch_group_t dg = dou._dg; + + if (dg->dg_value) { + DISPATCH_CLIENT_CRASH(dg->dg_value, + "Group object deallocated while in use"); } + + _dispatch_semaphore_class_dispose(dg); +} + +size_t +_dispatch_group_debug(dispatch_object_t dou, char *buf, size_t bufsiz) +{ + dispatch_group_t dg = dou._dg; + + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dx_kind(dg), dg); + offset += _dispatch_object_debug_attr(dg, &buf[offset], bufsiz - offset); +#if USE_MACH_SEM + offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", + dg->dg_port); +#endif + offset += dsnprintf(&buf[offset], bufsiz - offset, + "count = %ld, waiters = %d }", dg->dg_value, dg->dg_waiters); + return offset; } DISPATCH_NOINLINE static long -_dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) +_dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) { - long orig, value; + long value; + int orig_waiters; #if USE_MACH_SEM mach_timespec_t _timeout; @@ -543,37 +516,29 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) DWORD wait_result; #endif -again: // check before we cause another signal to be sent by incrementing - // dsema->dsema_group_waiters - value = dispatch_atomic_load2o(dsema, dsema_value, seq_cst); // 19296565 - if (value == LONG_MAX) { - return _dispatch_group_wake(dsema, false); - } - // Mach semaphores appear to sometimes spuriously wake up. Therefore, - // we keep a parallel count of the number of times a Mach semaphore is - // signaled (6880961). - (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters, relaxed); + // dg->dg_waiters + value = os_atomic_load2o(dg, dg_value, ordered); // 19296565 + if (value == 0) { + return _dispatch_group_wake(dg, false); + } + + (void)os_atomic_inc2o(dg, dg_waiters, relaxed); // check the values again in case we need to wake any threads - value = dispatch_atomic_load2o(dsema, dsema_value, seq_cst); // 19296565 - if (value == LONG_MAX) { - return _dispatch_group_wake(dsema, false); + value = os_atomic_load2o(dg, dg_value, ordered); // 19296565 + if (value == 0) { + _dispatch_group_wake(dg, false); + // Fall through to consume the extra signal, forcing timeout to avoid + // useless setups as it won't block + timeout = DISPATCH_TIME_FOREVER; } #if USE_MACH_SEM - _dispatch_semaphore_create_port(&dsema->dsema_port); + _dispatch_semaphore_create_port(&dg->dg_port); #elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dsema->dsema_handle); + _dispatch_semaphore_create_handle(&dg->dg_handle); #endif - // From xnu/osfmk/kern/sync_sema.c: - // wait_semaphore->count = -1; /* we don't keep an actual count */ - // - // The code above does not match the documentation, and that fact is - // not surprising. The documented semantics are clumsy to use in any - // practical way. The above hack effectively tricks the rest of the - // Mach semaphore logic to behave like the libdispatch algorithm. - switch (timeout) { default: #if USE_MACH_SEM @@ -581,7 +546,7 @@ again: uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout)); + kr = slowpath(semaphore_timedwait(dg->dg_port, _timeout)); } while (kr == KERN_ABORTED); if (kr != KERN_OPERATION_TIMED_OUT) { @@ -590,10 +555,10 @@ again: } #elif USE_POSIX_SEM do { - uint64_t nsec = _dispatch_timeout(timeout); + uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout)); + ret = slowpath(sem_timedwait(&dg->dg_sem, &_timeout)); } while (ret == -1 && errno == EINTR); if (!(ret == -1 && errno == ETIMEDOUT)) { @@ -604,19 +569,19 @@ again: nsec = _dispatch_timeout(timeout); msec = (DWORD)(nsec / (uint64_t)1000000); resolution = _push_timer_resolution(msec); - wait_result = WaitForSingleObject(dsema->dsema_handle, msec); + wait_result = WaitForSingleObject(dg->dg_handle, msec); _pop_timer_resolution(resolution); if (wait_result != WAIT_TIMEOUT) { break; } #endif // Fall through and try to undo the earlier change to - // dsema->dsema_group_waiters + // dg->dg_waiters case DISPATCH_TIME_NOW: - orig = dsema->dsema_group_waiters; - while (orig) { - if (dispatch_atomic_cmpxchgvw2o(dsema, dsema_group_waiters, orig, - orig - 1, &orig, relaxed)) { + orig_waiters = dg->dg_waiters; + while (orig_waiters) { + if (os_atomic_cmpxchgvw2o(dg, dg_waiters, orig_waiters, + orig_waiters - 1, &orig_waiters, relaxed)) { #if USE_MACH_SEM return KERN_OPERATION_TIMED_OUT; #elif USE_POSIX_SEM || USE_WIN32_SEM @@ -630,28 +595,26 @@ again: case DISPATCH_TIME_FOREVER: #if USE_MACH_SEM do { - kr = semaphore_wait(dsema->dsema_port); + kr = semaphore_wait(dg->dg_port); } while (kr == KERN_ABORTED); DISPATCH_GROUP_VERIFY_KR(kr); #elif USE_POSIX_SEM do { - ret = sem_wait(&dsema->dsema_sem); + ret = sem_wait(&dg->dg_sem); } while (ret == -1 && errno == EINTR); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #elif USE_WIN32_SEM - WaitForSingleObject(dsema->dsema_handle, INFINITE); + WaitForSingleObject(dg->dg_handle, INFINITE); #endif break; } - goto again; - } + return 0; +} long dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) { - dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - - if (dsema->dsema_value == LONG_MAX) { + if (dg->dg_value == 0) { return 0; } if (timeout == 0) { @@ -662,152 +625,45 @@ dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) return (-1); #endif } - return _dispatch_group_wait_slow(dsema, timeout); + return _dispatch_group_wait_slow(dg, timeout); } -DISPATCH_NOINLINE -void -dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, - void (*func)(void *)) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, + dispatch_continuation_t dsn) { - dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - dispatch_continuation_t prev, dsn = _dispatch_continuation_alloc(); - dsn->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; dsn->dc_data = dq; - dsn->dc_ctxt = ctxt; - dsn->dc_func = func; dsn->do_next = NULL; _dispatch_retain(dq); - prev = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, dsn, release); - if (fastpath(prev)) { - prev->do_next = dsn; - } else { + if (os_mpsc_push_update_tail(dg, dg_notify, dsn, do_next)) { _dispatch_retain(dg); - dispatch_atomic_store2o(dsema, dsema_notify_head, dsn, seq_cst); + os_atomic_store2o(dg, dg_notify_head, dsn, ordered); // seq_cst with atomic store to notify_head - if (dispatch_atomic_load2o(dsema, dsema_value, seq_cst) == LONG_MAX) { - _dispatch_group_wake(dsema, false); + if (os_atomic_load2o(dg, dg_value, ordered) == 0) { + _dispatch_group_wake(dg, false); } } } -#ifdef __BLOCKS__ -void -dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, - dispatch_block_t db) -{ - dispatch_group_notify_f(dg, dq, _dispatch_Block_copy(db), - _dispatch_call_block_and_release); -} -#endif - -#pragma mark - -#pragma mark _dispatch_thread_semaphore_t - -_dispatch_thread_semaphore_t -_dispatch_thread_semaphore_create(void) -{ - _dispatch_safe_fork = false; -#if DISPATCH_USE_OS_SEMAPHORE_CACHE - return _os_semaphore_create(); -#elif USE_MACH_SEM - semaphore_t s4; - kern_return_t kr; - while (slowpath(kr = semaphore_create(mach_task_self(), &s4, - SYNC_POLICY_FIFO, 0))) { - DISPATCH_VERIFY_MIG(kr); - _dispatch_temporary_resource_shortage(); - } - return s4; -#elif USE_POSIX_SEM - sem_t s4; - int ret = sem_init(&s4, 0, 0); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); - return s4; -#elif USE_WIN32_SEM - HANDLE tmp; - while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) { - _dispatch_temporary_resource_shortage(); - } - return (_dispatch_thread_semaphore_t)tmp; -#else -#error "No supported semaphore type" -#endif -} - +DISPATCH_NOINLINE void -_dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema) +dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { -#if DISPATCH_USE_OS_SEMAPHORE_CACHE - return _os_semaphore_dispose(sema); -#elif USE_MACH_SEM - semaphore_t s4 = (semaphore_t)sema; - kern_return_t kr = semaphore_destroy(mach_task_self(), s4); - DISPATCH_VERIFY_MIG(kr); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); -#elif USE_POSIX_SEM - sem_t s4 = (sem_t)sema; - int ret = sem_destroy(&s4); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - // XXX: signal the semaphore? - WINBOOL success; - success = CloseHandle((HANDLE)sema); - dispatch_assume(success); -#else -#error "No supported semaphore type" -#endif + dispatch_continuation_t dsn = _dispatch_continuation_alloc(); + _dispatch_continuation_init_f(dsn, dq, ctxt, func, 0, 0, + DISPATCH_OBJ_CONSUME_BIT); + _dispatch_group_notify(dg, dq, dsn); } +#ifdef __BLOCKS__ void -_dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema) +dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, + dispatch_block_t db) { - // assumed to contain a release barrier -#if DISPATCH_USE_OS_SEMAPHORE_CACHE - return _os_semaphore_signal(sema); -#elif USE_MACH_SEM - semaphore_t s4 = (semaphore_t)sema; - kern_return_t kr = semaphore_signal(s4); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); -#elif USE_POSIX_SEM - sem_t s4 = (sem_t)sema; - int ret = sem_post(&s4); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - int ret; - ret = ReleaseSemaphore((HANDLE)sema, 1, NULL); - dispatch_assume(ret); -#else -#error "No supported semaphore type" -#endif + dispatch_continuation_t dsn = _dispatch_continuation_alloc(); + _dispatch_continuation_init(dsn, dq, db, 0, 0, DISPATCH_OBJ_CONSUME_BIT); + _dispatch_group_notify(dg, dq, dsn); } - -void -_dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema) -{ - // assumed to contain an acquire barrier -#if DISPATCH_USE_OS_SEMAPHORE_CACHE - return _os_semaphore_wait(sema); -#elif USE_MACH_SEM - semaphore_t s4 = (semaphore_t)sema; - kern_return_t kr; - do { - kr = semaphore_wait(s4); - } while (slowpath(kr == KERN_ABORTED)); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); -#elif USE_POSIX_SEM - sem_t s4 = (sem_t)sema; - int ret; - do { - ret = sem_wait(&s4); - } while (slowpath(ret != 0)); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - DWORD wait_result; - do { - wait_result = WaitForSingleObject((HANDLE)sema, INFINITE); - } while (wait_result != WAIT_OBJECT_0); -#else -#error "No supported semaphore type" #endif -} diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h index 11261c3..dceda6d 100644 --- a/src/semaphore_internal.h +++ b/src/semaphore_internal.h @@ -29,65 +29,56 @@ struct dispatch_queue_s; -DISPATCH_CLASS_DECL(semaphore); -struct dispatch_semaphore_s { - DISPATCH_STRUCT_HEADER(semaphore); #if USE_MACH_SEM - semaphore_t dsema_port; +#define DISPATCH_OS_SEMA_FIELD(base) semaphore_t base##_port #elif USE_POSIX_SEM - sem_t dsema_sem; +#define DISPATCH_OS_SEMA_FIELD(base) sem_t base##_sem #elif USE_WIN32_SEM - HANDLE dsema_handle; +#define DISPATCH_OS_SEMA_FIELD(base) HANDLE base##_handle #else #error "No supported semaphore type" #endif + +#define DISPATCH_SEMAPHORE_HEADER(cls, ns) \ + DISPATCH_OBJECT_HEADER(cls); \ + long volatile ns##_value; \ + DISPATCH_OS_SEMA_FIELD(ns) + +struct dispatch_semaphore_header_s { + DISPATCH_SEMAPHORE_HEADER(semaphore, dsema); +}; + +DISPATCH_CLASS_DECL(semaphore); +struct dispatch_semaphore_s { + DISPATCH_SEMAPHORE_HEADER(semaphore, dsema); long dsema_orig; - long volatile dsema_value; - union { - long volatile dsema_sent_ksignals; - long volatile dsema_group_waiters; - }; - struct dispatch_continuation_s *volatile dsema_notify_head; - struct dispatch_continuation_s *volatile dsema_notify_tail; }; DISPATCH_CLASS_DECL(group); +struct dispatch_group_s { + DISPATCH_SEMAPHORE_HEADER(group, dg); + int volatile dg_waiters; + struct dispatch_continuation_s *volatile dg_notify_head; + struct dispatch_continuation_s *volatile dg_notify_tail; +}; + +typedef union { + struct dispatch_semaphore_header_s *_dsema_hdr; + struct dispatch_semaphore_s *_dsema; + struct dispatch_group_s *_dg; +#if USE_OBJC + dispatch_semaphore_t _objc_dsema; + dispatch_group_t _objc_dg; +#endif +} dispatch_semaphore_class_t __attribute__((__transparent_union__)); dispatch_group_t _dispatch_group_create_and_enter(void); +void _dispatch_group_dispose(dispatch_object_t dou); +size_t _dispatch_group_debug(dispatch_object_t dou, char *buf, + size_t bufsiz); + void _dispatch_semaphore_dispose(dispatch_object_t dou); size_t _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz); -typedef uintptr_t _dispatch_thread_semaphore_t; - -_dispatch_thread_semaphore_t _dispatch_thread_semaphore_create(void); -void _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t); -void _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t); -void _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t); - -DISPATCH_ALWAYS_INLINE -static inline _dispatch_thread_semaphore_t -_dispatch_get_thread_semaphore(void) -{ - _dispatch_thread_semaphore_t sema = (_dispatch_thread_semaphore_t) - _dispatch_thread_getspecific(dispatch_sema4_key); - if (slowpath(!sema)) { - return _dispatch_thread_semaphore_create(); - } - _dispatch_thread_setspecific(dispatch_sema4_key, NULL); - return sema; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t sema) -{ - _dispatch_thread_semaphore_t old_sema = (_dispatch_thread_semaphore_t) - _dispatch_thread_getspecific(dispatch_sema4_key); - _dispatch_thread_setspecific(dispatch_sema4_key, (void*)sema); - if (slowpath(old_sema)) { - return _dispatch_thread_semaphore_dispose(old_sema); - } -} - #endif diff --git a/src/shims.h b/src/shims.h index ae7f1c3..db28822 100644 --- a/src/shims.h +++ b/src/shims.h @@ -34,11 +34,7 @@ #include #define _DISPATCH_QOS_CLASS_USER_INTERACTIVE QOS_CLASS_USER_INTERACTIVE #define _DISPATCH_QOS_CLASS_USER_INITIATED QOS_CLASS_USER_INITIATED -#ifndef QOS_CLASS_LEGACY -#define _DISPATCH_QOS_CLASS_DEFAULT QOS_CLASS_LEGACY -#else #define _DISPATCH_QOS_CLASS_DEFAULT QOS_CLASS_DEFAULT -#endif #define _DISPATCH_QOS_CLASS_UTILITY QOS_CLASS_UTILITY #define _DISPATCH_QOS_CLASS_BACKGROUND QOS_CLASS_BACKGROUND #define _DISPATCH_QOS_CLASS_UNSPECIFIED QOS_CLASS_UNSPECIFIED @@ -51,9 +47,18 @@ typedef unsigned long pthread_priority_t; #else // sys/qos_private.h #define _DISPATCH_QOS_CLASS_MAINTENANCE 0x05 #endif // sys/qos_private.h +#ifndef _PTHREAD_PRIORITY_OVERCOMMIT_FLAG +#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 +#endif +#ifndef _PTHREAD_PRIORITY_INHERIT_FLAG +#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000 +#endif #ifndef _PTHREAD_PRIORITY_ROOTQUEUE_FLAG #define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000 #endif +#ifndef _PTHREAD_PRIORITY_SCHED_PRI_FLAG +#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 +#endif #ifndef _PTHREAD_PRIORITY_ENFORCE_FLAG #define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 #endif @@ -63,16 +68,38 @@ typedef unsigned long pthread_priority_t; #ifndef _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG #define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 #endif +#ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG +#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 +#endif +#ifndef _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG +#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 +#endif + #else // HAVE_PTHREAD_QOS_H typedef unsigned int qos_class_t; typedef unsigned long pthread_priority_t; #define QOS_MIN_RELATIVE_PRIORITY (-15) +#define _PTHREAD_PRIORITY_FLAGS_MASK (~0xffffff) #define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x00ffff00 +#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull) +#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff +#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 +#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000 #define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000 #define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 #define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000 #define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 +#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 +#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 #endif // HAVE_PTHREAD_QOS_H + +#ifdef __linux__ +#include "shims/linux_stubs.h" +#endif + +typedef uint32_t dispatch_priority_t; +#define DISPATCH_SATURATED_OVERRIDE ((dispatch_priority_t)UINT32_MAX) + #ifndef _DISPATCH_QOS_CLASS_USER_INTERACTIVE enum { _DISPATCH_QOS_CLASS_USER_INTERACTIVE = 0x21, @@ -99,6 +126,10 @@ enum { #include #endif +#if __has_include() +#include +#endif + #if !HAVE_DECL_FD_COPY #define FD_COPY(f, t) (void)(*(t) = *(f)) #endif @@ -128,9 +159,19 @@ _pthread_workqueue_override_start_direct(mach_port_t thread, } #endif // PTHREAD_WORKQUEUE_SPI_VERSION < 20140716 +#if PTHREAD_WORKQUEUE_SPI_VERSION < 20150319 +static inline int +_pthread_workqueue_override_start_direct_check_owner(mach_port_t thread, + pthread_priority_t priority, mach_port_t *ulock_addr) +{ + (void)ulock_addr; + return _pthread_workqueue_override_start_direct(thread, priority); +} +#endif // PTHREAD_WORKQUEUE_SPI_VERSION < 20150319 + #if PTHREAD_WORKQUEUE_SPI_VERSION < 20140707 static inline int -_pthread_override_qos_class_start_direct(pthread_t thread, +_pthread_override_qos_class_start_direct(mach_port_t thread, pthread_priority_t priority) { (void)thread; (void)priority; @@ -145,6 +186,27 @@ _pthread_override_qos_class_end_direct(mach_port_t thread) } #endif // PTHREAD_WORKQUEUE_SPI_VERSION < 20140707 +#if PTHREAD_WORKQUEUE_SPI_VERSION < 20150325 +static inline int +_pthread_qos_override_start_direct(mach_port_t thread, + pthread_priority_t priority, void *resource) +{ + (void)resource; + return _pthread_override_qos_class_start_direct(thread, priority); +} + +static inline int +_pthread_qos_override_end_direct(mach_port_t thread, void *resource) +{ + (void)resource; + return _pthread_override_qos_class_end_direct(thread); +} +#endif // PTHREAD_WORKQUEUE_SPI_VERSION < 20150325 + +#if PTHREAD_WORKQUEUE_SPI_VERSION < 20160427 +#define _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND 0 +#endif + #if !HAVE_NORETURN_BUILTIN_TRAP /* * XXXRW: Work-around for possible clang bug in which __builtin_trap() is not @@ -156,13 +218,19 @@ void __builtin_trap(void); #endif #if DISPATCH_HW_CONFIG_UP -#define DISPATCH_ATOMIC_UP 1 +#define OS_ATOMIC_UP 1 +#else +#define OS_ATOMIC_UP 0 #endif + +#ifndef __OS_INTERNAL_ATOMIC__ #include "shims/atomic.h" +#endif #include "shims/atomic_sfb.h" #include "shims/tsd.h" #include "shims/yield.h" +#include "shims/lock.h" #include "shims/hw_config.h" #include "shims/perfmon.h" @@ -170,6 +238,47 @@ void __builtin_trap(void); #include "shims/getprogname.h" #include "shims/time.h" +#if __has_include() +#include +#elif __has_builtin(__builtin_add_overflow) +#define os_add_overflow(a, b, c) __builtin_add_overflow(a, b, c) +#define os_sub_overflow(a, b, c) __builtin_sub_overflow(a, b, c) +#define os_mul_overflow(a, b, c) __builtin_mul_overflow(a, b, c) +#else +#error unsupported compiler +#endif + +#ifndef os_mul_and_add_overflow +#define os_mul_and_add_overflow(a, x, b, res) __extension__({ \ + __typeof(*(res)) _tmp; \ + bool _s, _t; \ + _s = os_mul_overflow((a), (x), &_tmp); \ + _t = os_add_overflow((b), _tmp, (res)); \ + _s | _t; \ +}) +#endif + + +#if __has_feature(c_static_assert) +#define __dispatch_is_array(x) \ + _Static_assert(!__builtin_types_compatible_p(typeof((x)[0]) *, typeof(x)), \ + #x " isn't an array") +#define countof(x) \ + ({ __dispatch_is_array(x); sizeof(x) / sizeof((x)[0]); }) +#else +#define countof(x) (sizeof(x) / sizeof(x[0])) +#endif + +DISPATCH_ALWAYS_INLINE +static inline void * +_dispatch_mempcpy(void *ptr, const void *data, size_t len) +{ + memcpy(ptr, data, len); + return (char *)ptr + len; +} +#define _dispatch_memappend(ptr, e) \ + _dispatch_mempcpy(ptr, e, sizeof(*(e))) + #ifdef __APPLE__ // Clear the stack before calling long-running thread-handler functions that // never return (and don't take arguments), to facilitate leak detection and diff --git a/src/shims/atomic.h b/src/shims/atomic.h index 24c113b..5199477 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2013 Apple Inc. All rights reserved. + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -27,54 +27,28 @@ #ifndef __DISPATCH_SHIMS_ATOMIC__ #define __DISPATCH_SHIMS_ATOMIC__ -// generate error during codegen -#define _dispatch_atomic_unimplemented() \ - ({ __asm__(".err unimplemented"); }) - -#pragma mark - -#pragma mark memory_order - -typedef enum _dispatch_atomic_memory_order -{ - _dispatch_atomic_memory_order_relaxed, - _dispatch_atomic_memory_order_consume, - _dispatch_atomic_memory_order_acquire, - _dispatch_atomic_memory_order_release, - _dispatch_atomic_memory_order_acq_rel, - _dispatch_atomic_memory_order_seq_cst, -} _dispatch_atomic_memory_order; - -#if !DISPATCH_ATOMIC_UP - -#define dispatch_atomic_memory_order_relaxed \ - _dispatch_atomic_memory_order_relaxed -#define dispatch_atomic_memory_order_acquire \ - _dispatch_atomic_memory_order_acquire -#define dispatch_atomic_memory_order_release \ - _dispatch_atomic_memory_order_release -#define dispatch_atomic_memory_order_acq_rel \ - _dispatch_atomic_memory_order_acq_rel -#define dispatch_atomic_memory_order_seq_cst \ - _dispatch_atomic_memory_order_seq_cst - -#else // DISPATCH_ATOMIC_UP +#if !__has_extension(c_atomic) || \ + !__has_extension(c_generic_selections) || \ + !__has_include() +#error libdispatch requires C11 with and generic selections +#endif -#define dispatch_atomic_memory_order_relaxed \ - _dispatch_atomic_memory_order_relaxed -#define dispatch_atomic_memory_order_acquire \ - _dispatch_atomic_memory_order_relaxed -#define dispatch_atomic_memory_order_release \ - _dispatch_atomic_memory_order_relaxed -#define dispatch_atomic_memory_order_acq_rel \ - _dispatch_atomic_memory_order_relaxed -#define dispatch_atomic_memory_order_seq_cst \ - _dispatch_atomic_memory_order_relaxed +#include -#endif // DISPATCH_ATOMIC_UP +#define memory_order_ordered memory_order_seq_cst -#if __has_extension(c_generic_selections) -#define _dispatch_atomic_basetypeof(p) \ +#define _os_atomic_basetypeof(p) \ typeof(*_Generic((p), \ + char*: (char*)(p), \ + volatile char*: (char*)(p), \ + signed char*: (signed char*)(p), \ + volatile signed char*: (signed char*)(p), \ + unsigned char*: (unsigned char*)(p), \ + volatile unsigned char*: (unsigned char*)(p), \ + short*: (short*)(p), \ + volatile short*: (short*)(p), \ + unsigned short*: (unsigned short*)(p), \ + volatile unsigned short*: (unsigned short*)(p), \ int*: (int*)(p), \ volatile int*: (int*)(p), \ unsigned int*: (unsigned int*)(p), \ @@ -87,15 +61,22 @@ typedef enum _dispatch_atomic_memory_order volatile long long*: (long long*)(p), \ unsigned long long*: (unsigned long long*)(p), \ volatile unsigned long long*: (unsigned long long*)(p), \ + const void**: (const void**)(p), \ + const void*volatile*: (const void**)(p), \ default: (void**)(p))) -#endif - -#if __has_extension(c_atomic) && __has_extension(c_generic_selections) -#pragma mark - -#pragma mark c11 -#define _dispatch_atomic_c11_atomic(p) \ +#define _os_atomic_c11_atomic(p) \ _Generic((p), \ + char*: (_Atomic(char)*)(p), \ + volatile char*: (volatile _Atomic(char)*)(p), \ + signed char*: (_Atomic(signed char)*)(p), \ + volatile signed char*: (volatile _Atomic(signed char)*)(p), \ + unsigned char*: (_Atomic(unsigned char)*)(p), \ + volatile unsigned char*: (volatile _Atomic(unsigned char)*)(p), \ + short*: (_Atomic(short)*)(p), \ + volatile short*: (volatile _Atomic(short)*)(p), \ + unsigned short*: (_Atomic(unsigned short)*)(p), \ + volatile unsigned short*: (volatile _Atomic(unsigned short)*)(p), \ int*: (_Atomic(int)*)(p), \ volatile int*: (volatile _Atomic(int)*)(p), \ unsigned int*: (_Atomic(unsigned int)*)(p), \ @@ -109,263 +90,154 @@ typedef enum _dispatch_atomic_memory_order unsigned long long*: (_Atomic(unsigned long long)*)(p), \ volatile unsigned long long*: \ (volatile _Atomic(unsigned long long)*)(p), \ + const void**: (_Atomic(const void*)*)(p), \ + const void*volatile*: (volatile _Atomic(const void*)*)(p), \ default: (volatile _Atomic(void*)*)(p)) -#define _dispatch_atomic_barrier(m) \ - ({ __c11_atomic_thread_fence(dispatch_atomic_memory_order_##m); }) -#define dispatch_atomic_load(p, m) \ - ({ _dispatch_atomic_basetypeof(p) _r = \ - __c11_atomic_load(_dispatch_atomic_c11_atomic(p), \ - dispatch_atomic_memory_order_##m); (typeof(*(p)))_r; }) -#define dispatch_atomic_store(p, v, m) \ - ({ _dispatch_atomic_basetypeof(p) _v = (v); \ - __c11_atomic_store(_dispatch_atomic_c11_atomic(p), _v, \ - dispatch_atomic_memory_order_##m); }) -#define dispatch_atomic_xchg(p, v, m) \ - ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = \ - __c11_atomic_exchange(_dispatch_atomic_c11_atomic(p), _v, \ - dispatch_atomic_memory_order_##m); (typeof(*(p)))_r; }) -#define dispatch_atomic_cmpxchg(p, e, v, m) \ - ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = (e); \ - __c11_atomic_compare_exchange_strong(_dispatch_atomic_c11_atomic(p), \ - &_r, _v, dispatch_atomic_memory_order_##m, \ - dispatch_atomic_memory_order_relaxed); }) -#define dispatch_atomic_cmpxchgv(p, e, v, g, m) \ - ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ - __c11_atomic_compare_exchange_strong(_dispatch_atomic_c11_atomic(p), \ - &_r, _v, dispatch_atomic_memory_order_##m, \ - dispatch_atomic_memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) -#define dispatch_atomic_cmpxchgvw(p, e, v, g, m) \ - ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ - __c11_atomic_compare_exchange_weak(_dispatch_atomic_c11_atomic(p), \ - &_r, _v, dispatch_atomic_memory_order_##m, \ - dispatch_atomic_memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) -#define _dispatch_atomic_c11_op(p, v, m, o, op) \ - ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = \ - __c11_atomic_fetch_##o(_dispatch_atomic_c11_atomic(p), _v, \ - dispatch_atomic_memory_order_##m); (typeof(*(p)))(_r op _v); }) -#define _dispatch_atomic_c11_op_orig(p, v, m, o, op) \ - ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = \ - __c11_atomic_fetch_##o(_dispatch_atomic_c11_atomic(p), _v, \ - dispatch_atomic_memory_order_##m); (typeof(*(p)))_r; }) - -#define dispatch_atomic_add(p, v, m) \ - _dispatch_atomic_c11_op((p), (v), m, add, +) -#define dispatch_atomic_add_orig(p, v, m) \ - _dispatch_atomic_c11_op_orig((p), (v), m, add, +) -#define dispatch_atomic_sub(p, v, m) \ - _dispatch_atomic_c11_op((p), (v), m, sub, -) -#define dispatch_atomic_sub_orig(p, v, m) \ - _dispatch_atomic_c11_op_orig((p), (v), m, sub, -) -#define dispatch_atomic_and(p, v, m) \ - _dispatch_atomic_c11_op((p), (v), m, and, &) -#define dispatch_atomic_and_orig(p, v, m) \ - _dispatch_atomic_c11_op_orig((p), (v), m, and, &) -#define dispatch_atomic_or(p, v, m) \ - _dispatch_atomic_c11_op((p), (v), m, or, |) -#define dispatch_atomic_or_orig(p, v, m) \ - _dispatch_atomic_c11_op_orig((p), (v), m, or, |) -#define dispatch_atomic_xor(p, v, m) \ - _dispatch_atomic_c11_op((p), (v), m, xor, ^) -#define dispatch_atomic_xor_orig(p, v, m) \ - _dispatch_atomic_c11_op_orig((p), (v), m, xor, ^) - -#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2) -#pragma mark - -#pragma mark gnu99 - -#define _dispatch_atomic_full_barrier() \ - __sync_synchronize() -#define _dispatch_atomic_barrier(m) \ - ({ switch(dispatch_atomic_memory_order_##m) { \ - case _dispatch_atomic_memory_order_relaxed: \ - break; \ - default: \ - _dispatch_atomic_full_barrier(); break; \ - } }) -// seq_cst: only emulate explicit store(seq_cst) -> load(seq_cst) -#define dispatch_atomic_load(p, m) \ - ({ typeof(*(p)) _r = *(p); \ - switch(dispatch_atomic_memory_order_##m) { \ - case _dispatch_atomic_memory_order_seq_cst: \ - _dispatch_atomic_barrier(m); /* fallthrough */ \ - case _dispatch_atomic_memory_order_relaxed: \ - break; \ - default: \ - _dispatch_atomic_unimplemented(); break; \ - } _r; }) -#define dispatch_atomic_store(p, v, m) \ - ({ switch(dispatch_atomic_memory_order_##m) { \ - case _dispatch_atomic_memory_order_release: \ - case _dispatch_atomic_memory_order_seq_cst: \ - _dispatch_atomic_barrier(m); /* fallthrough */ \ - case _dispatch_atomic_memory_order_relaxed: \ - *(p) = (v); break; \ - default: \ - _dispatch_atomic_unimplemented(); break; \ - } switch(dispatch_atomic_memory_order_##m) { \ - case _dispatch_atomic_memory_order_seq_cst: \ - _dispatch_atomic_barrier(m); break; \ - default: \ - break; \ - } }) -#if __has_builtin(__sync_swap) -#define dispatch_atomic_xchg(p, v, m) \ - ((typeof(*(p)))__sync_swap((p), (v))) -#else -#define dispatch_atomic_xchg(p, v, m) \ - ((typeof(*(p)))__sync_lock_test_and_set((p), (v))) -#endif -#define dispatch_atomic_cmpxchg(p, e, v, m) \ - __sync_bool_compare_and_swap((p), (e), (v)) -#define dispatch_atomic_cmpxchgv(p, e, v, g, m) \ - ({ typeof(*(g)) _e = (e), _r = \ - __sync_val_compare_and_swap((p), _e, (v)); \ - bool _b = (_e == _r); *(g) = _r; _b; }) -#define dispatch_atomic_cmpxchgvw(p, e, v, g, m) \ - dispatch_atomic_cmpxchgv((p), (e), (v), (g), m) - -#define dispatch_atomic_add(p, v, m) \ - __sync_add_and_fetch((p), (v)) -#define dispatch_atomic_add_orig(p, v, m) \ - __sync_fetch_and_add((p), (v)) -#define dispatch_atomic_sub(p, v, m) \ - __sync_sub_and_fetch((p), (v)) -#define dispatch_atomic_sub_orig(p, v, m) \ - __sync_fetch_and_sub((p), (v)) -#define dispatch_atomic_and(p, v, m) \ - __sync_and_and_fetch((p), (v)) -#define dispatch_atomic_and_orig(p, v, m) \ - __sync_fetch_and_and((p), (v)) -#define dispatch_atomic_or(p, v, m) \ - __sync_or_and_fetch((p), (v)) -#define dispatch_atomic_or_orig(p, v, m) \ - __sync_fetch_and_or((p), (v)) -#define dispatch_atomic_xor(p, v, m) \ - __sync_xor_and_fetch((p), (v)) -#define dispatch_atomic_xor_orig(p, v, m) \ - __sync_fetch_and_xor((p), (v)) - -#if defined(__x86_64__) || defined(__i386__) -// GCC emits nothing for __sync_synchronize() on x86_64 & i386 -#undef _dispatch_atomic_full_barrier -#define _dispatch_atomic_full_barrier() \ - ({ __asm__ __volatile__( \ - "mfence" \ - : : : "memory"); }) -#undef dispatch_atomic_load -#define dispatch_atomic_load(p, m) \ - ({ switch(dispatch_atomic_memory_order_##m) { \ - case _dispatch_atomic_memory_order_seq_cst: \ - case _dispatch_atomic_memory_order_relaxed: \ - break; \ - default: \ - _dispatch_atomic_unimplemented(); break; \ - } *(p); }) -// xchg is faster than store + mfence -#undef dispatch_atomic_store -#define dispatch_atomic_store(p, v, m) \ - ({ switch(dispatch_atomic_memory_order_##m) { \ - case _dispatch_atomic_memory_order_relaxed: \ - case _dispatch_atomic_memory_order_release: \ - *(p) = (v); break; \ - case _dispatch_atomic_memory_order_seq_cst: \ - (void)dispatch_atomic_xchg((p), (v), m); break; \ - default:\ - _dispatch_atomic_unimplemented(); break; \ - } }) -#endif - -#else -#error "Please upgrade to GCC 4.2 or newer." -#endif - -#pragma mark - -#pragma mark generic - -// assume atomic builtins provide barriers -#define dispatch_atomic_barrier(m) +#define os_atomic_thread_fence(m) atomic_thread_fence(memory_order_##m) // see comment in dispatch_once.c -#define dispatch_atomic_maximally_synchronizing_barrier() \ - _dispatch_atomic_barrier(seq_cst) - -#define dispatch_atomic_load2o(p, f, m) \ - dispatch_atomic_load(&(p)->f, m) -#define dispatch_atomic_store2o(p, f, v, m) \ - dispatch_atomic_store(&(p)->f, (v), m) -#define dispatch_atomic_xchg2o(p, f, v, m) \ - dispatch_atomic_xchg(&(p)->f, (v), m) -#define dispatch_atomic_cmpxchg2o(p, f, e, v, m) \ - dispatch_atomic_cmpxchg(&(p)->f, (e), (v), m) -#define dispatch_atomic_cmpxchgv2o(p, f, e, v, g, m) \ - dispatch_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m) -#define dispatch_atomic_cmpxchgvw2o(p, f, e, v, g, m) \ - dispatch_atomic_cmpxchgvw(&(p)->f, (e), (v), (g), m) -#define dispatch_atomic_add2o(p, f, v, m) \ - dispatch_atomic_add(&(p)->f, (v), m) -#define dispatch_atomic_add_orig2o(p, f, v, m) \ - dispatch_atomic_add_orig(&(p)->f, (v), m) -#define dispatch_atomic_sub2o(p, f, v, m) \ - dispatch_atomic_sub(&(p)->f, (v), m) -#define dispatch_atomic_sub_orig2o(p, f, v, m) \ - dispatch_atomic_sub_orig(&(p)->f, (v), m) -#define dispatch_atomic_and2o(p, f, v, m) \ - dispatch_atomic_and(&(p)->f, (v), m) -#define dispatch_atomic_and_orig2o(p, f, v, m) \ - dispatch_atomic_and_orig(&(p)->f, (v), m) -#define dispatch_atomic_or2o(p, f, v, m) \ - dispatch_atomic_or(&(p)->f, (v), m) -#define dispatch_atomic_or_orig2o(p, f, v, m) \ - dispatch_atomic_or_orig(&(p)->f, (v), m) -#define dispatch_atomic_xor2o(p, f, v, m) \ - dispatch_atomic_xor(&(p)->f, (v), m) -#define dispatch_atomic_xor_orig2o(p, f, v, m) \ - dispatch_atomic_xor_orig(&(p)->f, (v), m) - -#define dispatch_atomic_inc(p, m) \ - dispatch_atomic_add((p), 1, m) -#define dispatch_atomic_inc_orig(p, m) \ - dispatch_atomic_add_orig((p), 1, m) -#define dispatch_atomic_inc2o(p, f, m) \ - dispatch_atomic_add2o(p, f, 1, m) -#define dispatch_atomic_inc_orig2o(p, f, m) \ - dispatch_atomic_add_orig2o(p, f, 1, m) -#define dispatch_atomic_dec(p, m) \ - dispatch_atomic_sub((p), 1, m) -#define dispatch_atomic_dec_orig(p, m) \ - dispatch_atomic_sub_orig((p), 1, m) -#define dispatch_atomic_dec2o(p, f, m) \ - dispatch_atomic_sub2o(p, f, 1, m) -#define dispatch_atomic_dec_orig2o(p, f, m) \ - dispatch_atomic_sub_orig2o(p, f, 1, m) - -#define dispatch_atomic_tsx_xacq_cmpxchgv(p, e, v, g) \ - dispatch_atomic_cmpxchgv((p), (e), (v), (g), acquire) -#define dispatch_atomic_tsx_xrel_store(p, v) \ - dispatch_atomic_store(p, v, release) -#define dispatch_atomic_tsx_xacq_cmpxchgv2o(p, f, e, v, g) \ - dispatch_atomic_tsx_xacq_cmpxchgv(&(p)->f, (e), (v), (g)) -#define dispatch_atomic_tsx_xrel_store2o(p, f, v) \ - dispatch_atomic_tsx_xrel_store(&(p)->f, (v)) +#define os_atomic_maximally_synchronizing_barrier() \ + atomic_thread_fence(memory_order_seq_cst) + +#define os_atomic_load(p, m) \ + ({ _os_atomic_basetypeof(p) _r = \ + atomic_load_explicit(_os_atomic_c11_atomic(p), \ + memory_order_##m); (typeof(*(p)))_r; }) +#define os_atomic_store(p, v, m) \ + ({ _os_atomic_basetypeof(p) _v = (v); \ + atomic_store_explicit(_os_atomic_c11_atomic(p), _v, \ + memory_order_##m); }) +#define os_atomic_xchg(p, v, m) \ + ({ _os_atomic_basetypeof(p) _v = (v), _r = \ + atomic_exchange_explicit(_os_atomic_c11_atomic(p), _v, \ + memory_order_##m); (typeof(*(p)))_r; }) +#define os_atomic_cmpxchg(p, e, v, m) \ + ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); \ + atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ + &_r, _v, memory_order_##m, \ + memory_order_relaxed); }) +#define os_atomic_cmpxchgv(p, e, v, g, m) \ + ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ + atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ + &_r, _v, memory_order_##m, \ + memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) +#define os_atomic_cmpxchgvw(p, e, v, g, m) \ + ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ + atomic_compare_exchange_weak_explicit(_os_atomic_c11_atomic(p), \ + &_r, _v, memory_order_##m, \ + memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) + +#define _os_atomic_c11_op(p, v, m, o, op) \ + ({ _os_atomic_basetypeof(p) _v = (v), _r = \ + atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \ + memory_order_##m); (typeof(*(p)))(_r op _v); }) +#define _os_atomic_c11_op_orig(p, v, m, o, op) \ + ({ _os_atomic_basetypeof(p) _v = (v), _r = \ + atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \ + memory_order_##m); (typeof(*(p)))_r; }) +#define os_atomic_add(p, v, m) \ + _os_atomic_c11_op((p), (v), m, add, +) +#define os_atomic_add_orig(p, v, m) \ + _os_atomic_c11_op_orig((p), (v), m, add, +) +#define os_atomic_sub(p, v, m) \ + _os_atomic_c11_op((p), (v), m, sub, -) +#define os_atomic_sub_orig(p, v, m) \ + _os_atomic_c11_op_orig((p), (v), m, sub, -) +#define os_atomic_and(p, v, m) \ + _os_atomic_c11_op((p), (v), m, and, &) +#define os_atomic_and_orig(p, v, m) \ + _os_atomic_c11_op_orig((p), (v), m, and, &) +#define os_atomic_or(p, v, m) \ + _os_atomic_c11_op((p), (v), m, or, |) +#define os_atomic_or_orig(p, v, m) \ + _os_atomic_c11_op_orig((p), (v), m, or, |) +#define os_atomic_xor(p, v, m) \ + _os_atomic_c11_op((p), (v), m, xor, ^) +#define os_atomic_xor_orig(p, v, m) \ + _os_atomic_c11_op_orig((p), (v), m, xor, ^) + +#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ + bool _result = false; \ + typeof(p) _p = (p); \ + ov = os_atomic_load(_p, relaxed); \ + do { \ + __VA_ARGS__; \ + _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ + } while (os_unlikely(!_result)); \ + _result; \ + }) +#define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \ + os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__) +#define os_atomic_rmw_loop_give_up_with_fence(m, expr) \ + ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); }) +#define os_atomic_rmw_loop_give_up(expr) \ + os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) + +#define os_atomic_load2o(p, f, m) \ + os_atomic_load(&(p)->f, m) +#define os_atomic_store2o(p, f, v, m) \ + os_atomic_store(&(p)->f, (v), m) +#define os_atomic_xchg2o(p, f, v, m) \ + os_atomic_xchg(&(p)->f, (v), m) +#define os_atomic_cmpxchg2o(p, f, e, v, m) \ + os_atomic_cmpxchg(&(p)->f, (e), (v), m) +#define os_atomic_cmpxchgv2o(p, f, e, v, g, m) \ + os_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m) +#define os_atomic_cmpxchgvw2o(p, f, e, v, g, m) \ + os_atomic_cmpxchgvw(&(p)->f, (e), (v), (g), m) +#define os_atomic_add2o(p, f, v, m) \ + os_atomic_add(&(p)->f, (v), m) +#define os_atomic_add_orig2o(p, f, v, m) \ + os_atomic_add_orig(&(p)->f, (v), m) +#define os_atomic_sub2o(p, f, v, m) \ + os_atomic_sub(&(p)->f, (v), m) +#define os_atomic_sub_orig2o(p, f, v, m) \ + os_atomic_sub_orig(&(p)->f, (v), m) +#define os_atomic_and2o(p, f, v, m) \ + os_atomic_and(&(p)->f, (v), m) +#define os_atomic_and_orig2o(p, f, v, m) \ + os_atomic_and_orig(&(p)->f, (v), m) +#define os_atomic_or2o(p, f, v, m) \ + os_atomic_or(&(p)->f, (v), m) +#define os_atomic_or_orig2o(p, f, v, m) \ + os_atomic_or_orig(&(p)->f, (v), m) +#define os_atomic_xor2o(p, f, v, m) \ + os_atomic_xor(&(p)->f, (v), m) +#define os_atomic_xor_orig2o(p, f, v, m) \ + os_atomic_xor_orig(&(p)->f, (v), m) + +#define os_atomic_inc(p, m) \ + os_atomic_add((p), 1, m) +#define os_atomic_inc_orig(p, m) \ + os_atomic_add_orig((p), 1, m) +#define os_atomic_inc2o(p, f, m) \ + os_atomic_add2o(p, f, 1, m) +#define os_atomic_inc_orig2o(p, f, m) \ + os_atomic_add_orig2o(p, f, 1, m) +#define os_atomic_dec(p, m) \ + os_atomic_sub((p), 1, m) +#define os_atomic_dec_orig(p, m) \ + os_atomic_sub_orig((p), 1, m) +#define os_atomic_dec2o(p, f, m) \ + os_atomic_sub2o(p, f, 1, m) +#define os_atomic_dec_orig2o(p, f, m) \ + os_atomic_sub_orig2o(p, f, 1, m) #if defined(__x86_64__) || defined(__i386__) -#pragma mark - -#pragma mark x86 - -#undef dispatch_atomic_maximally_synchronizing_barrier +#undef os_atomic_maximally_synchronizing_barrier #ifdef __LP64__ -#define dispatch_atomic_maximally_synchronizing_barrier() \ +#define os_atomic_maximally_synchronizing_barrier() \ ({ unsigned long _clbr; __asm__ __volatile__( \ "cpuid" \ : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory"); }) #else #ifdef __llvm__ -#define dispatch_atomic_maximally_synchronizing_barrier() \ +#define os_atomic_maximally_synchronizing_barrier() \ ({ unsigned long _clbr; __asm__ __volatile__( \ "cpuid" \ : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory"); }) #else // gcc does not allow inline i386 asm to clobber ebx -#define dispatch_atomic_maximally_synchronizing_barrier() \ +#define os_atomic_maximally_synchronizing_barrier() \ ({ unsigned long _clbr; __asm__ __volatile__( \ "pushl %%ebx\n\t" \ "cpuid\n\t" \ @@ -373,9 +245,6 @@ typedef enum _dispatch_atomic_memory_order : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory"); }) #endif #endif - - -#endif - +#endif // defined(__x86_64__) || defined(__i386__) #endif // __DISPATCH_SHIMS_ATOMIC__ diff --git a/src/shims/atomic_sfb.h b/src/shims/atomic_sfb.h index 087d98c..5f972b4 100644 --- a/src/shims/atomic_sfb.h +++ b/src/shims/atomic_sfb.h @@ -32,11 +32,11 @@ #endif // Returns UINT_MAX if all the bits in p were already set. -#define dispatch_atomic_set_first_bit(p,m) _dispatch_atomic_set_first_bit(p,m) +#define os_atomic_set_first_bit(p,m) _os_atomic_set_first_bit(p,m) DISPATCH_ALWAYS_INLINE static inline unsigned int -_dispatch_atomic_set_first_bit(volatile unsigned long *p, +_os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max_index) { unsigned int index; @@ -63,10 +63,10 @@ _dispatch_atomic_set_first_bit(volatile unsigned long *p, #if defined(__x86_64__) || defined(__i386__) -#undef dispatch_atomic_set_first_bit +#undef os_atomic_set_first_bit DISPATCH_ALWAYS_INLINE static inline unsigned int -dispatch_atomic_set_first_bit(volatile unsigned long *p, unsigned int max) +os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max) { unsigned long val, bit; if (max > (sizeof(val) * 8)) { diff --git a/src/shims/hw_config.h b/src/shims/hw_config.h index 2b85d4a..cad211d 100644 --- a/src/shims/hw_config.h +++ b/src/shims/hw_config.h @@ -81,6 +81,15 @@ static inline uint32_t _dispatch_hw_get_config(_dispatch_hw_config_t c) { uint32_t val = 1; +#if defined(__linux__) && HAVE_SYSCONF + switch (c) { + case _dispatch_hw_config_logical_cpus: + case _dispatch_hw_config_physical_cpus: + return sysconf(_SC_NPROCESSORS_CONF); + case _dispatch_hw_config_active_cpus: + return sysconf(_SC_NPROCESSORS_ONLN); + } +#else const char *name = NULL; int r; #if defined(__APPLE__) @@ -106,6 +115,7 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) if (r > 0) val = (uint32_t)r; #endif } +#endif return val; } diff --git a/src/shims/linux_stubs.c b/src/shims/linux_stubs.c new file mode 100644 index 0000000..07ee8bc --- /dev/null +++ b/src/shims/linux_stubs.c @@ -0,0 +1,53 @@ +/* + * This source file is part of the Swift.org open source project + * + * Copyright (c) 2015 Apple Inc. and the Swift project authors + * + * Licensed under Apache License v2.0 with Runtime Library Exception + * + * See http://swift.org/LICENSE.txt for license information + * See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors + * + */ + +/* + * This file contains stubbed out functions we are using during + * the initial linux port. When the port is complete, this file + * should be empty (and thus removed). + */ + +#include +#include + +#if __has_include() +#include +#else +#include +#endif + +#include "pthread.h" +#include "os/linux_base.h" +#include "internal.h" + + +#undef LINUX_PORT_ERROR +#define LINUX_PORT_ERROR() do { printf("LINUX_PORT_ERROR_CALLED %s:%d: %s\n",__FILE__,__LINE__,__FUNCTION__); abort(); } while (0) + + +/* + * Stubbed out static data + */ + +pthread_key_t dispatch_voucher_key; +pthread_key_t dispatch_pthread_root_queue_observer_hooks_key; + +unsigned short dispatch_timer__program_semaphore; +unsigned short dispatch_timer__wake_semaphore; +unsigned short dispatch_timer__fire_semaphore; +unsigned short dispatch_timer__configure_semaphore; +unsigned short dispatch_queue__pop_semaphore; +unsigned short dispatch_callout__entry_semaphore; +unsigned short dispatch_callout__return_semaphore; +unsigned short dispatch_queue__push_semaphore; +void (*_dispatch_block_special_invoke)(void*); +struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; diff --git a/src/shims/linux_stubs.h b/src/shims/linux_stubs.h new file mode 100644 index 0000000..6a70c0b --- /dev/null +++ b/src/shims/linux_stubs.h @@ -0,0 +1,101 @@ +/* + * This source file is part of the Swift.org open source project + * + * Copyright (c) 2015 Apple Inc. and the Swift project authors + * + * Licensed under Apache License v2.0 with Runtime Library Exception + * + * See http://swift.org/LICENSE.txt for license information + * See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors + * + */ + +// forward declarations for functions we are stubbing out +// in the intial linux port. + +#ifndef __DISPATCH__STUBS__INTERNAL +#define __DISPATCH__STUBS__INTERNAL + +// marker for hacks we have made to make progress +#define __LINUX_PORT_HDD__ 1 + +/* + * Stub out defines for some mach types and related macros + */ + +typedef uint32_t mach_port_t; + +#define MACH_PORT_NULL (0) +#define MACH_PORT_DEAD (-1) + +typedef uint32_t mach_error_t; + +typedef uint32_t mach_vm_size_t; + +typedef uint32_t mach_msg_return_t; + +typedef uint32_t mach_msg_bits_t; + +typedef uintptr_t mach_vm_address_t; + +typedef uint32_t dispatch_mach_msg_t; + +typedef uint32_t dispatch_mach_t; + +typedef uint32_t dispatch_mach_reason_t; + +typedef uint32_t voucher_activity_mode_t; + +typedef uint32_t voucher_activity_trace_id_t; + +typedef uint32_t voucher_activity_id_t; + +typedef uint32_t _voucher_activity_buffer_hook_t;; + +typedef uint32_t voucher_activity_flag_t; + +typedef struct { } mach_msg_header_t; + + +typedef void (*dispatch_mach_handler_function_t)(void*, dispatch_mach_reason_t, + dispatch_mach_msg_t, mach_error_t); + +typedef void (*dispatch_mach_msg_destructor_t)(void*); + +// Print a warning when an unported code path executes. +#define LINUX_PORT_ERROR() do { printf("LINUX_PORT_ERROR_CALLED %s:%d: %s\n",__FILE__,__LINE__,__FUNCTION__); } while (0) + +/* + * Stub out defines for other missing types + */ + +#if __linux__ +// we fall back to use kevent +#define kevent64_s kevent +#define kevent64(kq,cl,nc,el,ne,f,to) kevent(kq,cl,nc,el,ne,to) +#endif + +// SIZE_T_MAX should not be hardcoded like this here. +#define SIZE_T_MAX (0x7fffffff) + +// Define to 0 the NOTE_ values that are not present on Linux. +// Revisit this...would it be better to ifdef out the uses instead?? + +// The following values are passed as part of the EVFILT_TIMER requests + +#define IGNORE_KEVENT64_EXT /* will force the kevent64_s.ext[] to not be used -> leeway ignored */ + +#define NOTE_SECONDS 0x01 +#define NOTE_USECONDS 0x02 +#define NOTE_NSECONDS 0x04 +#define NOTE_ABSOLUTE 0x08 +#define NOTE_CRITICAL 0x10 +#define NOTE_BACKGROUND 0x20 +#define NOTE_LEEWAY 0x40 + +// need to catch the following usage if it happens .. +// we simply return '0' as a value probably not correct + +#define NOTE_VM_PRESSURE ({LINUX_PORT_ERROR(); 0;}) + +#endif diff --git a/src/shims/lock.c b/src/shims/lock.c new file mode 100644 index 0000000..2fab691 --- /dev/null +++ b/src/shims/lock.c @@ -0,0 +1,421 @@ +/* + * Copyright (c) 2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +#define _dlock_syscall_switch(err, syscall, ...) \ + for (;;) { \ + int err; \ + switch ((err = ((syscall) < 0 ? errno : 0))) { \ + case EINTR: continue; \ + __VA_ARGS__ \ + } \ + break; \ + } + +#if TARGET_OS_MAC +_Static_assert(DLOCK_LOCK_DATA_CONTENTION == ULF_WAIT_WORKQ_DATA_CONTENTION, + "values should be the same"); + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags, + uint32_t timeout) +{ + int option; + if (flags & DLOCK_LOCK_DATA_CONTENTION) { + option = SWITCH_OPTION_OSLOCK_DEPRESS; + } else { + option = SWITCH_OPTION_DEPRESS; + } + thread_switch(_dispatch_lock_owner(value), option, timeout); +} +#endif + +#pragma mark - ulock wrappers +#if HAVE_UL_COMPARE_AND_WAIT + +static int +_dispatch_ulock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout, + uint32_t flags) +{ + dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK); + int rc; + _dlock_syscall_switch(err, + rc = __ulock_wait(UL_COMPARE_AND_WAIT | flags, uaddr, val, timeout), + case 0: return rc > 0 ? ENOTEMPTY : 0; + case ETIMEDOUT: case EFAULT: return err; + default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed"); + ); +} + +static void +_dispatch_ulock_wake(uint32_t *uaddr, uint32_t flags) +{ + dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK); + _dlock_syscall_switch(err, + __ulock_wake(UL_COMPARE_AND_WAIT | flags, uaddr, 0), + case 0: case ENOENT: break; + default: DISPATCH_INTERNAL_CRASH(err, "ulock_wake() failed"); + ); +} + +#endif +#if HAVE_UL_UNFAIR_LOCK + +// returns 0, ETIMEDOUT, ENOTEMPTY, EFAULT +static int +_dispatch_unfair_lock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout, + dispatch_lock_options_t flags) +{ + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + // + timeout = timeout < 1000 ? 1 : timeout / 1000; + _dispatch_thread_switch(val, flags, timeout); + return 0; + } + int rc; + _dlock_syscall_switch(err, + rc = __ulock_wait(UL_UNFAIR_LOCK | flags, uaddr, val, timeout), + case 0: return rc > 0 ? ENOTEMPTY : 0; + case ETIMEDOUT: case EFAULT: return err; + default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed"); + ); +} + +static void +_dispatch_unfair_lock_wake(uint32_t *uaddr, uint32_t flags) +{ + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + // + return; + } + _dlock_syscall_switch(err, __ulock_wake(UL_UNFAIR_LOCK | flags, uaddr, 0), + case 0: case ENOENT: break; + default: DISPATCH_INTERNAL_CRASH(err, "ulock_wake() failed"); + ); +} + +#endif +#pragma mark - futex wrappers +#if HAVE_FUTEX +#include +#include + +DISPATCH_ALWAYS_INLINE +static inline int +_dispatch_futex(uint32_t *uaddr, int op, uint32_t val, + const struct timespec *timeout, uint32_t *uaddr2, uint32_t val3, + int opflags) +{ + return syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3); +} + +static int +_dispatch_futex_wait(uint32_t *uaddr, uint32_t val, + const struct timespec *timeout, int opflags) +{ + _dlock_syscall_switch(err, + _dispatch_futex(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags), + case 0: case EWOULDBLOCK: case ETIMEDOUT: return err; + default: DISPATCH_CLIENT_CRASH(err, "futex_wait() failed"); + ); +} + +static void +_dispatch_futex_wake(uint32_t *uaddr, int wake, int opflags) +{ + int rc; + _dlock_syscall_switch(err, + rc = _dispatch_futex(uaddr, FUTEX_WAKE, wake, NULL, NULL, 0, opflags), + case 0: return; + default: DISPATCH_CLIENT_CRASH(err, "futex_wake() failed"); + ); +} + +static void +_dispatch_futex_lock_pi(uint32_t *uaddr, struct timespec *timeout, int detect, + int opflags) +{ + _dlock_syscall_switch(err, + _dispatch_futex(uaddr, FUTEX_LOCK_PI, detect, timeout, + NULL, 0, opflags), + case 0: return; + default: DISPATCH_CLIENT_CRASH(errno, "futex_lock_pi() failed"); + ); +} + +static void +_dispatch_futex_unlock_pi(uint32_t *uaddr, int opflags) +{ + _dlock_syscall_switch(err, + _dispatch_futex(uaddr, FUTEX_UNLOCK_PI, 0, NULL, NULL, 0, opflags), + case 0: return; + default: DISPATCH_CLIENT_CRASH(errno, "futex_unlock_pi() failed"); + ); +} + +#endif +#pragma mark - wait for address + +void +_dispatch_wait_on_address(uint32_t volatile *address, uint32_t value, + dispatch_lock_options_t flags) +{ +#if HAVE_UL_COMPARE_AND_WAIT + _dispatch_ulock_wait((uint32_t *)address, value, 0, flags); +#elif HAVE_FUTEX + _dispatch_futex_wait((uint32_t *)address, value, NULL, FUTEX_PRIVATE_FLAG); +#else + mach_msg_timeout_t timeout = 1; + while (os_atomic_load(address, relaxed) == value) { + thread_switch(MACH_PORT_NULL, SWITCH_OPTION_WAIT, timeout++); + } +#endif + (void)flags; +} + +void +_dispatch_wake_by_address(uint32_t volatile *address) +{ +#if HAVE_UL_COMPARE_AND_WAIT + _dispatch_ulock_wake((uint32_t *)address, ULF_WAKE_ALL); +#elif HAVE_FUTEX + _dispatch_futex_wake((uint32_t *)address, INT_MAX, FUTEX_PRIVATE_FLAG); +#else + (void)address; +#endif +} + +#pragma mark - thread event + +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK +semaphore_t +_dispatch_thread_semaphore_create(void) +{ + semaphore_t s4; + kern_return_t kr; + while (unlikely(kr = semaphore_create(mach_task_self(), &s4, + SYNC_POLICY_FIFO, 0))) { + DISPATCH_VERIFY_MIG(kr); + _dispatch_temporary_resource_shortage(); + } + return s4; +} + +void +_dispatch_thread_semaphore_dispose(void *ctxt) +{ + semaphore_t s4 = (semaphore_t)(uintptr_t)ctxt; + kern_return_t kr = semaphore_destroy(mach_task_self(), s4); + DISPATCH_VERIFY_MIG(kr); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +} +#endif + +void +_dispatch_thread_event_signal_slow(dispatch_thread_event_t dte) +{ +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + kern_return_t kr = semaphore_signal(dte->dte_semaphore); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + return; + } +#endif +#if HAVE_UL_COMPARE_AND_WAIT + _dispatch_ulock_wake(&dte->dte_value, 0); +#elif HAVE_FUTEX + _dispatch_futex_wake(&dte->dte_value, 1, FUTEX_PRIVATE_FLAG); +#elif USE_POSIX_SEM + int rc = sem_post(&dte->dte_sem); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#endif +} + +void +_dispatch_thread_event_wait_slow(dispatch_thread_event_t dte) +{ +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + kern_return_t kr; + do { + kr = semaphore_wait(dte->dte_semaphore); + } while (unlikely(kr == KERN_ABORTED)); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + return; + } +#endif +#if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX + for (;;) { + uint32_t value = os_atomic_load(&dte->dte_value, acquire); + if (likely(value == 0)) return; + if (unlikely(value != UINT32_MAX)) { + DISPATCH_CLIENT_CRASH(value, "Corrupt thread event value"); + } +#if HAVE_UL_COMPARE_AND_WAIT + int rc = _dispatch_ulock_wait(&dte->dte_value, UINT32_MAX, 0, 0); + dispatch_assert(rc == 0 || rc == EFAULT); +#elif HAVE_FUTEX + _dispatch_futex_wait(&dte->dte_value, UINT32_MAX, + NULL, FUTEX_PRIVATE_FLAG); +#endif + } +#elif USE_POSIX_SEM + int rc; + do { + rc = sem_wait(&dte->dte_sem); + } while (unlikely(rc != 0)); + DISPATCH_SEMAPHORE_VERIFY_RET(rc); +#endif +} + +#pragma mark - unfair lock + +#if HAVE_UL_UNFAIR_LOCK +void +_dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul, + dispatch_lock_options_t flags) +{ + dispatch_lock tid_self = _dispatch_tid_self(), next = tid_self; + dispatch_lock tid_old, tid_new; + int rc; + + for (;;) { + os_atomic_rmw_loop(&dul->dul_lock, tid_old, tid_new, acquire, { + if (likely(!_dispatch_lock_is_locked(tid_old))) { + tid_new = next; + } else { + tid_new = tid_old & ~DLOCK_NOWAITERS_BIT; + if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break); + } + }); + if (unlikely(_dispatch_lock_is_locked_by(tid_old, tid_self))) { + DISPATCH_CLIENT_CRASH(0, "trying to lock recursively"); + } + if (tid_new == next) { + return; + } + rc = _dispatch_unfair_lock_wait(&dul->dul_lock, tid_new, 0, flags); + if (rc == ENOTEMPTY) { + next = tid_self & ~DLOCK_NOWAITERS_BIT; + } else { + next = tid_self; + } + } +} +#elif HAVE_FUTEX +void +_dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul, + dispatch_lock_options_t flags) +{ + (void)flags; + _dispatch_futex_lock_pi(&dul->dul_lock, NULL, 1, FUTEX_PRIVATE_FLAG); +} +#else +void +_dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul, + dispatch_lock_options_t flags) +{ + dispatch_lock tid_cur, tid_self = _dispatch_tid_self(); + uint32_t timeout = 1; + + while (unlikely(!os_atomic_cmpxchgv(&dul->dul_lock, + DLOCK_OWNER_NULL, tid_self, &tid_cur, acquire))) { + if (unlikely(_dispatch_lock_is_locked_by(tid_cur, tid_self))) { + DISPATCH_CLIENT_CRASH(0, "trying to lock recursively"); + } + _dispatch_thread_switch(tid_cur, flags, timeout++); + } +} +#endif + +void +_dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t dul, + dispatch_lock tid_cur) +{ + dispatch_lock_owner tid_self = _dispatch_tid_self(); + if (unlikely(!_dispatch_lock_is_locked_by(tid_cur, tid_self))) { + DISPATCH_CLIENT_CRASH(tid_cur, "lock not owned by current thread"); + } + +#if HAVE_UL_UNFAIR_LOCK + if (!(tid_cur & DLOCK_NOWAITERS_BIT)) { + _dispatch_unfair_lock_wake(&dul->dul_lock, 0); + } +#elif HAVE_FUTEX + // futex_unlock_pi() handles both OWNER_DIED which we abuse & WAITERS + _dispatch_futex_unlock_pi(&dul->dul_lock, FUTEX_PRIVATE_FLAG); +#else + (void)dul; +#endif +} + +#pragma mark - gate lock + +void +_dispatch_gate_wait_slow(dispatch_gate_t dgl, dispatch_lock value, + dispatch_lock_options_t flags) +{ + dispatch_lock tid_self = _dispatch_tid_self(), tid_old, tid_new; + uint32_t timeout = 1; + + for (;;) { + os_atomic_rmw_loop(&dgl->dgl_lock, tid_old, tid_new, acquire, { + if (likely(tid_old == value)) { + os_atomic_rmw_loop_give_up_with_fence(acquire, return); + } +#ifdef DLOCK_NOWAITERS_BIT + tid_new = tid_old & ~DLOCK_NOWAITERS_BIT; +#else + tid_new = tid_old | DLOCK_WAITERS_BIT; +#endif + if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break); + }); + if (unlikely(_dispatch_lock_is_locked_by(tid_old, tid_self))) { + DISPATCH_CLIENT_CRASH(0, "trying to lock recursively"); + } +#if HAVE_UL_UNFAIR_LOCK + _dispatch_unfair_lock_wait(&dgl->dgl_lock, tid_new, 0, flags); +#elif HAVE_FUTEX + _dispatch_futex_wait(&dgl->dgl_lock, tid_new, NULL, FUTEX_PRIVATE_FLAG); +#else + _dispatch_thread_switch(tid_new, flags, timeout++); +#endif + (void)timeout; + } +} + +void +_dispatch_gate_broadcast_slow(dispatch_gate_t dgl, dispatch_lock tid_cur) +{ + dispatch_lock_owner tid_self = _dispatch_tid_self(); + if (unlikely(!_dispatch_lock_is_locked_by(tid_cur, tid_self))) { + DISPATCH_CLIENT_CRASH(tid_cur, "lock not owned by current thread"); + } + +#if HAVE_UL_UNFAIR_LOCK + _dispatch_unfair_lock_wake(&dgl->dgl_lock, ULF_WAKE_ALL); +#elif HAVE_FUTEX + _dispatch_futex_wake(&dgl->dgl_lock, INT_MAX, FUTEX_PRIVATE_FLAG); +#else + (void)dgl; +#endif +} diff --git a/src/shims/lock.h b/src/shims/lock.h new file mode 100644 index 0000000..246c807 --- /dev/null +++ b/src/shims/lock.h @@ -0,0 +1,539 @@ +/* + * Copyright (c) 2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_SHIMS_LOCK__ +#define __DISPATCH_SHIMS_LOCK__ + +#pragma mark - platform macros + +DISPATCH_ENUM(dispatch_lock_options, uint32_t, + DLOCK_LOCK_NONE = 0x00000000, + DLOCK_LOCK_DATA_CONTENTION = 0x00010000, +); + +#if TARGET_OS_MAC + +typedef mach_port_t dispatch_lock_owner; +typedef uint32_t dispatch_lock; + +#define DLOCK_OWNER_NULL ((dispatch_lock_owner)MACH_PORT_NULL) +#define DLOCK_OWNER_MASK ((dispatch_lock)0xfffffffc) +#define DLOCK_NOWAITERS_BIT ((dispatch_lock)0x00000001) +#define DLOCK_NOFAILED_TRYLOCK_BIT ((dispatch_lock)0x00000002) +#define _dispatch_tid_self() ((dispatch_lock_owner)_dispatch_thread_port()) + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_is_locked(dispatch_lock lock_value) +{ + return (lock_value & DLOCK_OWNER_MASK) != 0; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_lock_owner +_dispatch_lock_owner(dispatch_lock lock_value) +{ + lock_value &= DLOCK_OWNER_MASK; + if (lock_value) { + lock_value |= DLOCK_NOWAITERS_BIT | DLOCK_NOFAILED_TRYLOCK_BIT; + } + return lock_value; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_lock_owner tid) +{ + // equivalent to _dispatch_lock_owner(lock_value) == tid + return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_has_waiters(dispatch_lock lock_value) +{ + bool nowaiters_bit = (lock_value & DLOCK_NOWAITERS_BIT); + return _dispatch_lock_is_locked(lock_value) != nowaiters_bit; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_has_failed_trylock(dispatch_lock lock_value) +{ + return !(lock_value & DLOCK_NOFAILED_TRYLOCK_BIT); +} + +#elif defined(__linux__) +#include +#include +#include /* For SYS_xxx definitions */ + +typedef uint32_t dispatch_lock; +typedef pid_t dispatch_lock_owner; + +#define DLOCK_OWNER_NULL ((dispatch_lock_owner)0) +#define DLOCK_OWNER_MASK ((dispatch_lock)FUTEX_TID_MASK) +#define DLOCK_WAITERS_BIT ((dispatch_lock)FUTEX_WAITERS) +#define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)FUTEX_OWNER_DIED) +#define _dispatch_tid_self() \ + ((dispatch_lock_owner)(_dispatch_get_tsd_base()->tid)) + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_is_locked(dispatch_lock lock_value) +{ + return (lock_value & DLOCK_OWNER_MASK) != 0; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_lock_owner +_dispatch_lock_owner(dispatch_lock lock_value) +{ + return (lock_value & DLOCK_OWNER_MASK); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_lock_owner tid) +{ + return _dispatch_lock_owner(lock_value) == tid; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_has_waiters(dispatch_lock lock_value) +{ + return (lock_value & DLOCK_WAITERS_BIT); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_has_failed_trylock(dispatch_lock lock_value) +{ + return !(lock_value & DLOCK_FAILED_TRYLOCK_BIT); +} + +#else +# error define _dispatch_lock encoding scheme for your platform here +#endif + +#if __has_include() +#include +#endif + +#ifndef HAVE_UL_COMPARE_AND_WAIT +#if defined(UL_COMPARE_AND_WAIT) && DISPATCH_HOST_SUPPORTS_OSX(101200) +# define HAVE_UL_COMPARE_AND_WAIT 1 +#else +# define HAVE_UL_COMPARE_AND_WAIT 0 +#endif +#endif // HAVE_UL_COMPARE_AND_WAIT + +#ifndef HAVE_UL_UNFAIR_LOCK +#if defined(UL_UNFAIR_LOCK) && DISPATCH_HOST_SUPPORTS_OSX(101200) +# define HAVE_UL_UNFAIR_LOCK 1 +#else +# define HAVE_UL_UNFAIR_LOCK 0 +#endif +#endif // HAVE_UL_UNFAIR_LOCK + +#ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK +#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT && !HAVE_FUTEX) +#endif + +#ifndef HAVE_FUTEX +#ifdef __linux__ +#define HAVE_FUTEX 1 +#else +#define HAVE_FUTEX 0 +#endif +#endif // HAVE_FUTEX + +#if USE_MACH_SEM +#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \ + if (unlikely((x) == KERN_INVALID_NAME)) { \ + DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_semaphore_t"); \ + } else if (unlikely(x)) { \ + DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \ + } \ + } while (0) +#define DISPATCH_GROUP_VERIFY_KR(x) do { \ + if (unlikely((x) == KERN_INVALID_NAME)) { \ + DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_group_t"); \ + } else if (unlikely(x)) { \ + DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \ + } \ + } while (0) +#elif USE_POSIX_SEM +#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \ + if (unlikely((x) == -1)) { \ + DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \ + } \ + } while (0) +#endif + +#pragma mark - compare and wait + +DISPATCH_NOT_TAIL_CALLED +void _dispatch_wait_on_address(uint32_t volatile *address, uint32_t value, + dispatch_lock_options_t flags); +void _dispatch_wake_by_address(uint32_t volatile *address); + +#pragma mark - thread event +/** + * @typedef dispatch_thread_event_t + * + * @abstract + * Dispatch Thread Events are used for one-time synchronization between threads. + * + * @discussion + * Dispatch Thread Events are cheap synchronization points used when a thread + * needs to block until a certain event has happened. Dispatch Thread Event + * must be initialized and destroyed with _dispatch_thread_event_init() and + * _dispatch_thread_event_destroy(). + * + * A Dispatch Thread Event must be waited on and signaled exactly once between + * initialization and destruction. These objects are simpler than semaphores + * and do not support being signaled and waited on an arbitrary number of times. + * + * This locking primitive has no notion of ownership + */ +typedef struct dispatch_thread_event_s { +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + union { + semaphore_t dte_semaphore; + uint32_t dte_value; + }; +#elif HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX + // 1 means signalled but not waited on yet + // UINT32_MAX means waited on, but not signalled yet + // 0 is the initial and final state + uint32_t dte_value; +#elif USE_POSIX_SEM + sem_t dte_sem; +#else +# error define dispatch_thread_event_s for your platform +#endif +} dispatch_thread_event_s, *dispatch_thread_event_t; + +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK +semaphore_t _dispatch_thread_semaphore_create(void); +void _dispatch_thread_semaphore_dispose(void *); + +DISPATCH_ALWAYS_INLINE +static inline semaphore_t +_dispatch_get_thread_semaphore(void) +{ + semaphore_t sema = (semaphore_t)(uintptr_t) + _dispatch_thread_getspecific(dispatch_sema4_key); + if (unlikely(!sema)) { + return _dispatch_thread_semaphore_create(); + } + _dispatch_thread_setspecific(dispatch_sema4_key, NULL); + return sema; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_put_thread_semaphore(semaphore_t sema) +{ + semaphore_t old_sema = (semaphore_t)(uintptr_t) + _dispatch_thread_getspecific(dispatch_sema4_key); + _dispatch_thread_setspecific(dispatch_sema4_key, (void*)(uintptr_t)sema); + if (unlikely(old_sema)) { + return _dispatch_thread_semaphore_dispose((void *)(uintptr_t)old_sema); + } +} +#endif + +DISPATCH_NOT_TAIL_CALLED +void _dispatch_thread_event_wait_slow(dispatch_thread_event_t); +void _dispatch_thread_event_signal_slow(dispatch_thread_event_t); + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_event_init(dispatch_thread_event_t dte) +{ +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + dte->dte_semaphore = _dispatch_get_thread_semaphore(); + return; + } +#endif +#if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX + dte->dte_value = 0; +#elif USE_POSIX_SEM + int rc = sem_init(&dte->dte_sem, 0, 0); + DISPATCH_SEMAPHORE_VERIFY_RET(rc); +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_event_signal(dispatch_thread_event_t dte) +{ +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + _dispatch_thread_event_signal_slow(dte); + return; + } +#endif +#if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX + if (os_atomic_inc_orig(&dte->dte_value, release) == 0) { + // 0 -> 1 transition doesn't need a signal + // force a wake even when the value is corrupt, + // waiters do the validation + return; + } +#elif USE_POSIX_SEM + // fallthrough +#endif + _dispatch_thread_event_signal_slow(dte); +} + + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_event_wait(dispatch_thread_event_t dte) +{ +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + _dispatch_thread_event_wait_slow(dte); + return; + } +#endif +#if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX + if (os_atomic_dec(&dte->dte_value, acquire) == 0) { + // 1 -> 0 is always a valid transition, so we can return + // for any other value, go to the slowpath which checks it's not corrupt + return; + } +#elif USE_POSIX_SEM + // fallthrough +#endif + _dispatch_thread_event_wait_slow(dte); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_event_destroy(dispatch_thread_event_t dte) +{ +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + _dispatch_put_thread_semaphore(dte->dte_semaphore); + return; + } +#endif +#if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX + // nothing to do + dispatch_assert(dte->dte_value == 0); +#elif USE_POSIX_SEM + int rc = sem_destroy(&dte->dte_sem); + DISPATCH_SEMAPHORE_VERIFY_RET(rc); +#endif +} + +#pragma mark - unfair lock + +typedef struct dispatch_unfair_lock_s { + dispatch_lock dul_lock; +} dispatch_unfair_lock_s, *dispatch_unfair_lock_t; + +DISPATCH_NOT_TAIL_CALLED +void _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t l, + dispatch_lock_options_t options); +void _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t l, + dispatch_lock tid_cur); + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_unfair_lock_lock(dispatch_unfair_lock_t l) +{ + dispatch_lock tid_self = _dispatch_tid_self(); + if (likely(os_atomic_cmpxchg(&l->dul_lock, + DLOCK_OWNER_NULL, tid_self, acquire))) { + return; + } + return _dispatch_unfair_lock_lock_slow(l, DLOCK_LOCK_NONE); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_unfair_lock_trylock(dispatch_unfair_lock_t l, + dispatch_lock_owner *owner) +{ + dispatch_lock tid_old, tid_new, tid_self = _dispatch_tid_self(); + + os_atomic_rmw_loop(&l->dul_lock, tid_old, tid_new, acquire, { + if (likely(!_dispatch_lock_is_locked(tid_old))) { + tid_new = tid_self; + } else { +#ifdef DLOCK_NOFAILED_TRYLOCK_BIT + tid_new = tid_old & ~DLOCK_NOFAILED_TRYLOCK_BIT; +#else + tid_new = tid_old | DLOCK_FAILED_TRYLOCK_BIT; +#endif + } + }); + if (owner) *owner = _dispatch_lock_owner(tid_new); + return !_dispatch_lock_is_locked(tid_old); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_unfair_lock_tryunlock(dispatch_unfair_lock_t l) +{ + dispatch_lock tid_old, tid_new; + + os_atomic_rmw_loop(&l->dul_lock, tid_old, tid_new, release, { +#ifdef DLOCK_NOFAILED_TRYLOCK_BIT + if (likely(tid_old & DLOCK_NOFAILED_TRYLOCK_BIT)) { + tid_new = DLOCK_OWNER_NULL; + } else { + tid_new = tid_old | DLOCK_NOFAILED_TRYLOCK_BIT; + } +#else + if (likely(!(tid_old & DLOCK_FAILED_TRYLOCK_BIT))) { + tid_new = DLOCK_OWNER_NULL; + } else { + tid_new = tid_old & ~DLOCK_FAILED_TRYLOCK_BIT; + } +#endif + }); + if (unlikely(tid_new)) { + // unlock failed, renew the lock, which needs an acquire barrier + os_atomic_thread_fence(acquire); + return false; + } + if (unlikely(_dispatch_lock_has_waiters(tid_old))) { + _dispatch_unfair_lock_unlock_slow(l, tid_old); + } + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_unfair_lock_unlock_had_failed_trylock(dispatch_unfair_lock_t l) +{ + dispatch_lock tid_cur, tid_self = _dispatch_tid_self(); +#if HAVE_FUTEX + if (likely(os_atomic_cmpxchgv(&l->dul_lock, + tid_self, DLOCK_OWNER_NULL, &tid_cur, release))) { + return false; + } +#else + tid_cur = os_atomic_xchg(&l->dul_lock, DLOCK_OWNER_NULL, release); + if (likely(tid_cur == tid_self)) return false; +#endif + _dispatch_unfair_lock_unlock_slow(l, tid_cur); + return _dispatch_lock_has_failed_trylock(tid_cur); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_unfair_lock_unlock(dispatch_unfair_lock_t l) +{ + (void)_dispatch_unfair_lock_unlock_had_failed_trylock(l); +} + +#pragma mark - gate lock + +#if HAVE_UL_UNFAIR_LOCK || HAVE_FUTEX +#define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 1 +#else +#define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 0 +#endif + +#define DLOCK_GATE_UNLOCKED ((dispatch_lock)0) + +#define DLOCK_ONCE_UNLOCKED ((dispatch_once_t)0) +#define DLOCK_ONCE_DONE (~(dispatch_once_t)0) + +typedef struct dispatch_gate_s { + dispatch_lock dgl_lock; +} dispatch_gate_s, *dispatch_gate_t; + +typedef struct dispatch_once_gate_s { + union { + dispatch_gate_s dgo_gate; + dispatch_once_t dgo_once; + }; +} dispatch_once_gate_s, *dispatch_once_gate_t; + +DISPATCH_NOT_TAIL_CALLED +void _dispatch_gate_wait_slow(dispatch_gate_t l, dispatch_lock value, + uint32_t flags); +void _dispatch_gate_broadcast_slow(dispatch_gate_t l, dispatch_lock tid_cur); + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_gate_tryenter(dispatch_gate_t l) +{ + dispatch_lock tid_self = _dispatch_tid_self(); + return likely(os_atomic_cmpxchg(&l->dgl_lock, + DLOCK_GATE_UNLOCKED, tid_self, acquire)); +} + +#define _dispatch_gate_wait(l, flags) \ + _dispatch_gate_wait_slow(l, DLOCK_GATE_UNLOCKED, flags) + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_gate_broadcast(dispatch_gate_t l) +{ + dispatch_lock tid_cur, tid_self = _dispatch_tid_self(); + tid_cur = os_atomic_xchg(&l->dgl_lock, DLOCK_GATE_UNLOCKED, release); + if (likely(tid_cur == tid_self)) return; + _dispatch_gate_broadcast_slow(l, tid_cur); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_once_gate_tryenter(dispatch_once_gate_t l) +{ + dispatch_once_t tid_self = (dispatch_once_t)_dispatch_tid_self(); + return likely(os_atomic_cmpxchg(&l->dgo_once, + DLOCK_ONCE_UNLOCKED, tid_self, acquire)); +} + +#define _dispatch_once_gate_wait(l) \ + _dispatch_gate_wait_slow(&(l)->dgo_gate, (dispatch_lock)DLOCK_ONCE_DONE, \ + DLOCK_LOCK_NONE) + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_once_gate_broadcast(dispatch_once_gate_t l) +{ + dispatch_once_t tid_cur, tid_self = (dispatch_once_t)_dispatch_tid_self(); + // see once.c for explanation about this trick + os_atomic_maximally_synchronizing_barrier(); + // above assumed to contain release barrier + tid_cur = os_atomic_xchg(&l->dgo_once, DLOCK_ONCE_DONE, relaxed); + if (likely(tid_cur == tid_self)) return; + _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)tid_cur); +} + +#endif // __DISPATCH_SHIMS_LOCK__ diff --git a/src/shims/time.h b/src/shims/time.h index 7b34bc7..7b29771 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -132,5 +132,10 @@ _dispatch_absolute_time(void) #endif // HAVE_MACH_ABSOLUTE_TIME } +static inline uint64_t +_dispatch_approximate_time(void) +{ + return _dispatch_absolute_time(); +} #endif // __DISPATCH_SHIMS_TIME__ diff --git a/src/shims/tsd.h b/src/shims/tsd.h index cf52385..2e3ece8 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -39,32 +39,51 @@ #if __has_include() #include #endif + +#if !defined(OS_GS_RELATIVE) && (defined(__i386__) || defined(__x86_64__)) +#define OS_GS_RELATIVE __attribute__((address_space(256))) +#endif + +#ifdef _os_tsd_get_base +#ifdef OS_GS_RELATIVE +typedef long dispatch_tsd_pair_t \ + __attribute__((vector_size(sizeof(long) * 2), aligned(sizeof(long)))); +#define _os_tsd_get_pair_address(k) \ + (dispatch_tsd_pair_t OS_GS_RELATIVE *)((k) * sizeof(long)) +#else +typedef struct { void *a; void *b; } dispatch_tsd_pair_t; +#define _os_tsd_get_pair_address(k) \ + (dispatch_tsd_pair_t *)(_os_tsd_get_base() + (k)) +#endif +#endif // _os_tsd_get_base #endif #if DISPATCH_USE_DIRECT_TSD +// dispatch_queue_key & dispatch_frame_key need to be contiguous +// in that order, and queue_key to be an even number static const unsigned long dispatch_queue_key = __PTK_LIBDISPATCH_KEY0; -static const unsigned long dispatch_voucher_key = __PTK_LIBDISPATCH_KEY1; +static const unsigned long dispatch_frame_key = __PTK_LIBDISPATCH_KEY1; static const unsigned long dispatch_cache_key = __PTK_LIBDISPATCH_KEY2; -static const unsigned long dispatch_io_key = __PTK_LIBDISPATCH_KEY3; -static const unsigned long dispatch_apply_key = __PTK_LIBDISPATCH_KEY4; +static const unsigned long dispatch_context_key = __PTK_LIBDISPATCH_KEY3; +static const unsigned long dispatch_pthread_root_queue_observer_hooks_key = + __PTK_LIBDISPATCH_KEY4; static const unsigned long dispatch_defaultpriority_key =__PTK_LIBDISPATCH_KEY5; #if DISPATCH_INTROSPECTION static const unsigned long dispatch_introspection_key = __PTK_LIBDISPATCH_KEY6; #elif DISPATCH_PERF_MON static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY6; #endif -#if DISPATCH_USE_OS_SEMAPHORE_CACHE -static const unsigned long dispatch_sema4_key = __TSD_SEMAPHORE_CACHE; -#else static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY7; -#endif -static const unsigned long dispatch_pthread_root_queue_observer_hooks_key = - __PTK_LIBDISPATCH_KEY8; #ifndef __TSD_THREAD_QOS_CLASS #define __TSD_THREAD_QOS_CLASS 4 #endif +#ifndef __TSD_THREAD_VOUCHER +#define __TSD_THREAD_VOUCHER 6 +#endif static const unsigned long dispatch_priority_key = __TSD_THREAD_QOS_CLASS; +static const unsigned long dispatch_voucher_key = __PTK_LIBDISPATCH_KEY8; +static const unsigned long dispatch_deferred_items_key = __PTK_LIBDISPATCH_KEY9; DISPATCH_TSD_INLINE static inline void @@ -73,24 +92,89 @@ _dispatch_thread_key_create(const unsigned long *k, void (*d)(void *)) if (!*k || !d) return; dispatch_assert_zero(pthread_key_init_np((int)*k, d)); } +#elif DISPATCH_USE_THREAD_LOCAL_STORAGE + +DISPATCH_TSD_INLINE +static inline void +_dispatch_thread_key_create(pthread_key_t *k, void (*d)(void *)) +{ + dispatch_assert_zero(pthread_key_create(k, d)); +} + +struct dispatch_tsd { + pid_t tid; + void *dispatch_queue_key; + void *dispatch_frame_key; + void *dispatch_cache_key; + void *dispatch_context_key; + void *dispatch_pthread_root_queue_observer_hooks_key; + void *dispatch_defaultpriority_key; +#if DISPATCH_INTROSPECTION + void *dispatch_introspection_key; +#elif DISPATCH_PERF_MON + void *dispatch_bcounter_key; +#endif +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + void *dispatch_sema4_key; +#endif + void *dispatch_priority_key; + void *dispatch_voucher_key; + void *dispatch_deferred_items_key; +}; + +extern __thread struct dispatch_tsd __dispatch_tsd; +extern pthread_key_t __dispatch_tsd_key; +extern void libdispatch_tsd_init(void); +extern void _libdispatch_tsd_cleanup(void *ctx); + +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_tsd * +_dispatch_get_tsd_base(void) +{ + if (unlikely(__dispatch_tsd.tid == 0)) { + libdispatch_tsd_init(); + } + OS_COMPILER_CAN_ASSUME(__dispatch_tsd.tid != 0); + return &__dispatch_tsd; +} + +#define _dispatch_thread_getspecific(key) \ + (_dispatch_get_tsd_base()->key) +#define _dispatch_thread_setspecific(key, value) \ + (void)(_dispatch_get_tsd_base()->key = (value)) + +#define _dispatch_thread_getspecific_pair(k1, p1, k2, p2) \ + ( *(p1) = _dispatch_thread_getspecific(k1), \ + *(p2) = _dispatch_thread_getspecific(k2) ) + +#define _dispatch_thread_getspecific_packed_pair(k1, k2, p) \ + ( (p)[0] = _dispatch_thread_getspecific(k1), \ + (p)[1] = _dispatch_thread_getspecific(k2) ) + +#define _dispatch_thread_setspecific_pair(k1, p1, k2, p2) \ + ( _dispatch_thread_setspecific(k1,p1), \ + _dispatch_thread_setspecific(k2,p2) ) + +#define _dispatch_thread_setspecific_packed_pair(k1, k2, p) \ + ( _dispatch_thread_setspecific(k1,(p)[0]), \ + _dispatch_thread_setspecific(k2,(p)[1]) ) + #else extern pthread_key_t dispatch_queue_key; -extern pthread_key_t dispatch_voucher_key; -#if DISPATCH_USE_OS_SEMAPHORE_CACHE -#error "Invalid DISPATCH_USE_OS_SEMAPHORE_CACHE configuration" -#else -extern pthread_key_t dispatch_sema4_key; -#endif +extern pthread_key_t dispatch_frame_key; extern pthread_key_t dispatch_cache_key; -extern pthread_key_t dispatch_io_key; -extern pthread_key_t dispatch_apply_key; +extern pthread_key_t dispatch_context_key; +extern pthread_key_t dispatch_pthread_root_queue_observer_hooks_key; extern pthread_key_t dispatch_defaultpriority_key; #if DISPATCH_INTROSPECTION extern pthread_key_t dispatch_introspection_key; #elif DISPATCH_PERF_MON extern pthread_key_t dispatch_bcounter_key; #endif -exern pthread_key_t dispatch_pthread_root_queue_observer_hooks_key; +extern pthread_key_t dispatch_sema4_key; +extern pthread_key_t dispatch_priority_key; +extern pthread_key_t dispatch_voucher_key; +extern pthread_key_t dispatch_deferred_items_key; DISPATCH_TSD_INLINE static inline void @@ -100,8 +184,7 @@ _dispatch_thread_key_create(pthread_key_t *k, void (*d)(void *)) } #endif -#if DISPATCH_USE_TSD_BASE && !DISPATCH_DEBUG -#else // DISPATCH_USE_TSD_BASE +#ifndef DISPATCH_USE_THREAD_LOCAL_STORAGE DISPATCH_TSD_INLINE static inline void _dispatch_thread_setspecific(pthread_key_t k, void *v) @@ -109,8 +192,14 @@ _dispatch_thread_setspecific(pthread_key_t k, void *v) #if DISPATCH_USE_DIRECT_TSD if (_pthread_has_direct_tsd()) { (void)_pthread_setspecific_direct(k, v); - return; + } else { +#if TARGET_IPHONE_SIMULATOR + (void)_pthread_setspecific_static(k, v); // rdar://26058142 +#else + __builtin_trap(); // unreachable +#endif } + return; #endif dispatch_assert_zero(pthread_setspecific(k, v)); } @@ -126,7 +215,70 @@ _dispatch_thread_getspecific(pthread_key_t k) #endif return pthread_getspecific(k); } -#endif // DISPATCH_USE_TSD_BASE + +// this is used when loading a pair at once and the caller will want to +// look at each component individually. +// some platforms can load a pair of pointers efficiently that way (like arm) +// intel doesn't, hence this degrades to two loads on intel +DISPATCH_TSD_INLINE +static inline void +_dispatch_thread_getspecific_pair(pthread_key_t k1, void **p1, + pthread_key_t k2, void **p2) +{ + *p1 = _dispatch_thread_getspecific(k1); + *p2 = _dispatch_thread_getspecific(k2); +} + +// this is used for save/restore purposes +// and the caller doesn't need to look at a specific component +// this does SSE on intel, and SSE is bad at breaking/assembling components +DISPATCH_TSD_INLINE +static inline void +_dispatch_thread_getspecific_packed_pair(pthread_key_t k1, pthread_key_t k2, + void **p) +{ +#if DISPATCH_USE_DIRECT_TSD && defined(_os_tsd_get_pair_address) + dispatch_assert(k2 == k1 + 1); + if (_pthread_has_direct_tsd()) { + *(dispatch_tsd_pair_t *)p = *_os_tsd_get_pair_address(k1); + return; + } +#endif + p[0] = _dispatch_thread_getspecific(k1); + p[1] = _dispatch_thread_getspecific(k2); +} + +// this is used when storing a pair at once from separated components +// some platforms can store a pair of pointers efficiently that way (like arm) +// intel doesn't, hence this degrades to two stores on intel +DISPATCH_TSD_INLINE +static inline void +_dispatch_thread_setspecific_pair(pthread_key_t k1, void *p1, + pthread_key_t k2, void *p2) +{ + _dispatch_thread_setspecific(k1, p1); + _dispatch_thread_setspecific(k2, p2); +} + +// this is used for save/restore purposes +// and the caller doesn't need to look at a specific component +// this does SSE on intel, and SSE is bad at breaking/assembling components +DISPATCH_TSD_INLINE +static inline void +_dispatch_thread_setspecific_packed_pair(pthread_key_t k1, pthread_key_t k2, + void **p) +{ +#if DISPATCH_USE_DIRECT_TSD && defined(_os_tsd_get_pair_address) + dispatch_assert(k2 == k1 + 1); + if (_pthread_has_direct_tsd()) { + *_os_tsd_get_pair_address(k1) = *(dispatch_tsd_pair_t *)p; + return; + } +#endif + _dispatch_thread_setspecific(k1, p[0]); + _dispatch_thread_setspecific(k2, p[1]); +} +#endif #if TARGET_OS_WIN32 #define _dispatch_thread_self() ((uintptr_t)GetCurrentThreadId()) @@ -141,22 +293,28 @@ _dispatch_thread_getspecific(pthread_key_t k) #if TARGET_OS_WIN32 #define _dispatch_thread_port() ((mach_port_t)0) -#else +#elif !DISPATCH_USE_THREAD_LOCAL_STORAGE #if DISPATCH_USE_DIRECT_TSD -#define _dispatch_thread_port() ((mach_port_t)_dispatch_thread_getspecific(\ - _PTHREAD_TSD_SLOT_MACH_THREAD_SELF)) +#define _dispatch_thread_port() ((mach_port_t)(uintptr_t)\ + _dispatch_thread_getspecific(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF)) #else -#define _dispatch_thread_port() (pthread_mach_thread_np(_dispatch_thread_self())) +#define _dispatch_thread_port() pthread_mach_thread_np(_dispatch_thread_self()) #endif #endif +#if HAVE_MACH +#define _dispatch_get_thread_mig_reply_port() ((mach_port_t)(uintptr_t) \ + _dispatch_thread_getspecific(_PTHREAD_TSD_SLOT_MIG_REPLY)) +#define _dispatch_set_thread_mig_reply_port(p) ( \ + _dispatch_thread_setspecific(_PTHREAD_TSD_SLOT_MIG_REPLY, \ + (void*)(uintptr_t)(p))) +#endif + DISPATCH_TSD_INLINE DISPATCH_CONST static inline unsigned int _dispatch_cpu_number(void) { -#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090 - return 0; -#elif __has_include() +#if __has_include() return _os_cpu_number(); #elif defined(__x86_64__) || defined(__i386__) struct { uintptr_t p1, p2; } p; diff --git a/src/shims/yield.h b/src/shims/yield.h index 2a884d6..1850aee 100644 --- a/src/shims/yield.h +++ b/src/shims/yield.h @@ -33,7 +33,7 @@ #if DISPATCH_HW_CONFIG_UP #define _dispatch_wait_until(c) do { \ int _spins = 0; \ - while (!(c)) { \ + while (!fastpath(c)) { \ _spins++; \ _dispatch_preemption_yield(_spins); \ } } while (0) @@ -44,7 +44,7 @@ #endif #define _dispatch_wait_until(c) do { \ int _spins = -(DISPATCH_WAIT_SPINS); \ - while (!(c)) { \ + while (!fastpath(c)) { \ if (slowpath(_spins++ >= 0)) { \ _dispatch_preemption_yield(_spins); \ } else { \ @@ -53,7 +53,7 @@ } } while (0) #else #define _dispatch_wait_until(c) do { \ - while (!(c)) { \ + while (!fastpath(c)) { \ dispatch_hardware_pause(); \ } } while (0) #endif @@ -109,16 +109,18 @@ #pragma mark _dispatch_preemption_yield #if HAVE_MACH -#if defined(SWITCH_OPTION_OSLOCK_DEPRESS) && !(TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) +#if defined(SWITCH_OPTION_OSLOCK_DEPRESS) #define DISPATCH_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_OSLOCK_DEPRESS #else #define DISPATCH_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_DEPRESS #endif -#define _dispatch_preemption_yield(n) _dispatch_thread_switch(MACH_PORT_NULL, \ +#define _dispatch_preemption_yield(n) thread_switch(MACH_PORT_NULL, \ + DISPATCH_YIELD_THREAD_SWITCH_OPTION, (mach_msg_timeout_t)(n)) +#define _dispatch_preemption_yield_to(th, n) thread_switch(th, \ DISPATCH_YIELD_THREAD_SWITCH_OPTION, (mach_msg_timeout_t)(n)) #else #define _dispatch_preemption_yield(n) pthread_yield_np() +#define _dispatch_preemption_yield_to(th, n) pthread_yield_np() #endif // HAVE_MACH #pragma mark - @@ -132,25 +134,15 @@ #endif #if HAVE_MACH -#if defined(SWITCH_OPTION_DISPATCH_CONTENTION) && !(TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) -#define _dispatch_contention_usleep(u) _dispatch_thread_switch(MACH_PORT_NULL, \ +#if defined(SWITCH_OPTION_DISPATCH_CONTENTION) +#define _dispatch_contention_usleep(u) thread_switch(MACH_PORT_NULL, \ SWITCH_OPTION_DISPATCH_CONTENTION, (u)) #else -#define _dispatch_contention_usleep(u) _dispatch_thread_switch(MACH_PORT_NULL, \ +#define _dispatch_contention_usleep(u) thread_switch(MACH_PORT_NULL, \ SWITCH_OPTION_WAIT, (((u)-1)/1000)+1) #endif #else #define _dispatch_contention_usleep(u) usleep((u)) #endif // HAVE_MACH -#pragma mark - -#pragma mark _dispatch_thread_switch - -#if HAVE_MACH -#define _dispatch_thread_switch(thread_name, option, option_time) \ - thread_switch((thread_name), (option), (option_time)) - -#endif // HAVE_MACH - #endif // __DISPATCH_SHIMS_YIELD__ diff --git a/src/source.c b/src/source.c index 430c7af..a5a2c94 100644 --- a/src/source.c +++ b/src/source.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2013 Apple Inc. All rights reserved. + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -26,13 +26,17 @@ #include #define DKEV_DISPOSE_IMMEDIATE_DELETE 0x1 -#define DKEV_DISPOSE_IGNORE_ENOENT 0x2 +#define DKEV_UNREGISTER_DISCONNECTED 0x2 +#define DKEV_UNREGISTER_REPLY_REMOVE 0x4 +#define DKEV_UNREGISTER_WAKEUP 0x8 +static void _dispatch_source_handler_free(dispatch_source_t ds, long kind); static void _dispatch_source_merge_kevent(dispatch_source_t ds, const _dispatch_kevent_qos_s *ke); -static bool _dispatch_kevent_register(dispatch_kevent_t *dkp, uint32_t *flgp); +static bool _dispatch_kevent_register(dispatch_kevent_t *dkp, + pthread_priority_t pp, uint32_t *flgp); static long _dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg, - int options); + unsigned int options); static long _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); static void _dispatch_kevent_drain(_dispatch_kevent_qos_s *ke); @@ -49,33 +53,46 @@ static void _dispatch_timer_aggregates_unregister(dispatch_source_t ds, unsigned int tidx); static inline unsigned long _dispatch_source_timer_data( dispatch_source_refs_t dr, unsigned long prev); -static long _dispatch_kq_update(const _dispatch_kevent_qos_s *); -static void _dispatch_memorystatus_init(void); +static void _dispatch_kq_deferred_update(const _dispatch_kevent_qos_s *ke); +static long _dispatch_kq_immediate_update(_dispatch_kevent_qos_s *ke); +static void _dispatch_memorypressure_init(void); #if HAVE_MACH static void _dispatch_mach_host_calendar_change_register(void); +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK static void _dispatch_mach_recv_msg_buf_init(void); static kern_return_t _dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); +#endif static kern_return_t _dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); -static inline void _dispatch_kevent_mach_portset(_dispatch_kevent_qos_s *ke); +static void _dispatch_mach_kevent_merge(_dispatch_kevent_qos_s *ke); +static mach_msg_size_t _dispatch_kevent_mach_msg_size( + _dispatch_kevent_qos_s *ke); #else static inline void _dispatch_mach_host_calendar_change_register(void) {} static inline void _dispatch_mach_recv_msg_buf_init(void) {} #endif static const char * _evfiltstr(short filt); #if DISPATCH_DEBUG -static void _dispatch_kevent_debug(const _dispatch_kevent_qos_s* kev, - const char* str); +static void dispatch_kevent_debug(const char *verb, + const _dispatch_kevent_qos_s *kev, int i, int n, + const char *function, unsigned int line); static void _dispatch_kevent_debugger(void *context); #define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \ dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q) #else static inline void -_dispatch_kevent_debug(const _dispatch_kevent_qos_s* kev DISPATCH_UNUSED, - const char* str DISPATCH_UNUSED) {} +dispatch_kevent_debug(const char *verb, const _dispatch_kevent_qos_s *kev, + int i, int n, const char *function, unsigned int line) +{ + (void)verb; (void)kev; (void)i; (void)n; (void)function; (void)line; +} #define DISPATCH_ASSERT_ON_MANAGER_QUEUE() #endif +#define _dispatch_kevent_debug(verb, _kev) \ + dispatch_kevent_debug(verb, _kev, 0, 1, __FUNCTION__, __LINE__) +#define _dispatch_kevent_debug_n(verb, _kev, i, n) \ + dispatch_kevent_debug(verb, _kev, i, n, __FUNCTION__, __LINE__) #ifndef DISPATCH_MGR_QUEUE_DEBUG #define DISPATCH_MGR_QUEUE_DEBUG 0 #endif @@ -83,32 +100,47 @@ _dispatch_kevent_debug(const _dispatch_kevent_qos_s* kev DISPATCH_UNUSED, #define _dispatch_kevent_mgr_debug _dispatch_kevent_debug #else static inline void -_dispatch_kevent_mgr_debug(_dispatch_kevent_qos_s* kev DISPATCH_UNUSED, - const char* str DISPATCH_UNUSED) {} +_dispatch_kevent_mgr_debug(_dispatch_kevent_qos_s* kev DISPATCH_UNUSED) {} #endif #pragma mark - #pragma mark dispatch_source_t dispatch_source_t -dispatch_source_create(dispatch_source_type_t type, - uintptr_t handle, - unsigned long mask, - dispatch_queue_t dq) +dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, + unsigned long mask, dispatch_queue_t dq) { + // ensure _dispatch_evfilt_machport_direct_enabled is initialized + _dispatch_root_queues_init(); const _dispatch_kevent_qos_s *proto_kev = &type->ke; dispatch_source_t ds; dispatch_kevent_t dk; // input validation if (type == NULL || (mask & ~type->mask)) { - return NULL; + return DISPATCH_BAD_INPUT; + } + if (type->mask && !mask) { + // expect a non-zero mask when the type declares one ... except + switch (type->ke.filter) { + case DISPATCH_EVFILT_TIMER: + break; // timers don't need masks +#if DISPATCH_USE_VM_PRESSURE + case EVFILT_VM: + break; // type->init forces the only acceptable mask +#endif + case DISPATCH_EVFILT_MACH_NOTIFICATION: + break; // type->init handles zero mask as a legacy case + default: + // otherwise reject as invalid input + return DISPATCH_BAD_INPUT; + } } switch (type->ke.filter) { case EVFILT_SIGNAL: if (handle >= NSIG) { - return NULL; + return DISPATCH_BAD_INPUT; } break; case EVFILT_FS: @@ -121,12 +153,12 @@ dispatch_source_create(dispatch_source_type_t type, case DISPATCH_EVFILT_CUSTOM_ADD: case DISPATCH_EVFILT_CUSTOM_OR: if (handle) { - return NULL; + return DISPATCH_BAD_INPUT; } break; case DISPATCH_EVFILT_TIMER: - if (!!handle ^ !!type->ke.ident) { - return NULL; + if ((handle == 0) != (type->ke.ident == 0)) { + return DISPATCH_BAD_INPUT; } break; default: @@ -136,42 +168,54 @@ dispatch_source_create(dispatch_source_type_t type, ds = _dispatch_alloc(DISPATCH_VTABLE(source), sizeof(struct dispatch_source_s)); // Initialize as a queue first, then override some settings below. - _dispatch_queue_init((dispatch_queue_t)ds); + _dispatch_queue_init(ds->_as_dq, DQF_NONE, 1, true); ds->dq_label = "source"; - ds->do_ref_cnt++; // the reference the manager queue holds - ds->do_ref_cnt++; // since source is created suspended - ds->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL; - dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = *proto_kev; - dk->dk_kevent.ident = handle; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; - dk->dk_kevent.fflags |= (uint32_t)mask; - dk->dk_kevent.udata = (uintptr_t)dk; - TAILQ_INIT(&dk->dk_sources); + switch (type->ke.filter) { + case DISPATCH_EVFILT_CUSTOM_OR: + dk = DISPATCH_KEV_CUSTOM_OR; + break; + case DISPATCH_EVFILT_CUSTOM_ADD: + dk = DISPATCH_KEV_CUSTOM_ADD; + break; + default: + dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); + dk->dk_kevent = *proto_kev; + dk->dk_kevent.ident = handle; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; + dk->dk_kevent.fflags |= (uint32_t)mask; + dk->dk_kevent.udata = (_dispatch_kevent_qos_udata_t)dk; + TAILQ_INIT(&dk->dk_sources); + ds->ds_pending_data_mask = dk->dk_kevent.fflags; + ds->ds_ident_hack = (uintptr_t)dk->dk_kevent.ident; + if (EV_UDATA_SPECIFIC & proto_kev->flags) { + dk->dk_kevent.flags |= EV_DISPATCH; + ds->ds_is_direct_kevent = true; + ds->ds_needs_rearm = true; + } + break; + } ds->ds_dkev = dk; - ds->ds_pending_data_mask = dk->dk_kevent.fflags; - ds->ds_ident_hack = (uintptr_t)dk->dk_kevent.ident; + if ((EV_DISPATCH|EV_ONESHOT) & proto_kev->flags) { - ds->ds_is_level = true; ds->ds_needs_rearm = true; } else if (!(EV_CLEAR & proto_kev->flags)) { // we cheat and use EV_CLEAR to mean a "flag thingy" ds->ds_is_adder = true; } - if (EV_UDATA_SPECIFIC & proto_kev->flags) { - dispatch_assert(!(EV_ONESHOT & proto_kev->flags)); - dk->dk_kevent.flags |= EV_DISPATCH; - ds->ds_is_direct_kevent = true; - ds->ds_needs_rearm = true; - } // Some sources require special processing if (type->init != NULL) { type->init(ds, type, handle, mask, dq); } dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder)); + if (!ds->ds_is_custom_source && (dk->dk_kevent.flags & EV_VANISHED)) { + // see _dispatch_source_merge_kevent + dispatch_assert(!(dk->dk_kevent.flags & EV_ONESHOT)); + dispatch_assert(dk->dk_kevent.flags & EV_DISPATCH); + dispatch_assert(dk->dk_kevent.flags & EV_UDATA_SPECIFIC); + } if (fastpath(!ds->ds_refs)) { ds->ds_refs = _dispatch_calloc(1ul, @@ -179,69 +223,37 @@ dispatch_source_create(dispatch_source_type_t type, } ds->ds_refs->dr_source_wref = _dispatch_ptr2wref(ds); - if (!ds->ds_is_direct_kevent) { - // The initial target queue is the manager queue, in order to get - // the source installed. - ds->do_targetq = &_dispatch_mgr_q; - // First item on the queue sets the user-specified target queue - dispatch_set_target_queue(ds, dq); + if (slowpath(!dq)) { + dq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true); } else { - if (slowpath(!dq)) { - dq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true); - } else { - _dispatch_retain(dq); - } - ds->do_targetq = dq; - _dispatch_queue_priority_inherit_from_target((dispatch_queue_t)ds, dq); - _dispatch_queue_set_override_priority(dq); + _dispatch_retain(dq); } + ds->do_targetq = dq; _dispatch_object_debug(ds, "%s", __func__); return ds; } -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t -_dispatch_source_get_kevent_queue(dispatch_source_t ds) -{ - if (ds->ds_is_direct_kevent) { - return ds->do_targetq; - } - return &_dispatch_mgr_q; -} - void _dispatch_source_dispose(dispatch_source_t ds) { _dispatch_object_debug(ds, "%s", __func__); + _dispatch_source_handler_free(ds, DS_REGISTN_HANDLER); + _dispatch_source_handler_free(ds, DS_EVENT_HANDLER); + _dispatch_source_handler_free(ds, DS_CANCEL_HANDLER); free(ds->ds_refs); - _dispatch_queue_destroy(ds); + _dispatch_queue_destroy(ds->_as_dq); } void _dispatch_source_xref_dispose(dispatch_source_t ds) { - _dispatch_wakeup(ds); -} - -void -dispatch_source_cancel(dispatch_source_t ds) -{ - _dispatch_object_debug(ds, "%s", __func__); - // Right after we set the cancel flag, someone else - // could potentially invoke the source, do the cancelation, - // unregister the source, and deallocate it. We would - // need to therefore retain/release before setting the bit - - _dispatch_retain(ds); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_CANCELED, relaxed); - _dispatch_wakeup(ds); - _dispatch_release(ds); + dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH); } long dispatch_source_testcancel(dispatch_source_t ds) { - return (bool)(ds->ds_atomic_flags & DSF_CANCELED); + return (bool)(ds->dq_atomic_flags & DSF_CANCELED); } unsigned long @@ -252,7 +264,7 @@ dispatch_source_get_mask(dispatch_source_t ds) mask = NOTE_VM_PRESSURE; } #if TARGET_IPHONE_SIMULATOR - else if (ds->ds_memorystatus_override) { + else if (ds->ds_memorypressure_override) { mask = NOTE_MEMORYSTATUS_PRESSURE_WARN; } #endif @@ -264,7 +276,7 @@ dispatch_source_get_handle(dispatch_source_t ds) { unsigned int handle = (unsigned int)ds->ds_ident_hack; #if TARGET_IPHONE_SIMULATOR - if (ds->ds_memorystatus_override) { + if (ds->ds_memorypressure_override) { handle = 0; } #endif @@ -279,118 +291,173 @@ dispatch_source_get_data(dispatch_source_t ds) data = NOTE_VM_PRESSURE; } #if TARGET_IPHONE_SIMULATOR - else if (ds->ds_memorystatus_override) { + else if (ds->ds_memorypressure_override) { data = NOTE_MEMORYSTATUS_PRESSURE_WARN; } #endif return data; } -void -dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_source_merge_data2(dispatch_source_t ds, + pthread_priority_t pp, unsigned long val) { _dispatch_kevent_qos_s kev = { .fflags = (typeof(kev.fflags))val, .data = (typeof(kev.data))val, +#if DISPATCH_USE_KEVENT_QOS + .qos = (_dispatch_kevent_priority_t)pp, +#endif }; +#if !DISPATCH_USE_KEVENT_QOS + (void)pp; +#endif - dispatch_assert( - ds->ds_dkev->dk_kevent.filter == DISPATCH_EVFILT_CUSTOM_ADD || - ds->ds_dkev->dk_kevent.filter == DISPATCH_EVFILT_CUSTOM_OR); - + dispatch_assert(ds->ds_dkev == DISPATCH_KEV_CUSTOM_OR || + ds->ds_dkev == DISPATCH_KEV_CUSTOM_ADD); + _dispatch_kevent_debug("synthetic data", &kev); _dispatch_source_merge_kevent(ds, &kev); } +void +dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) +{ + _dispatch_source_merge_data2(ds, 0, val); +} + +void +_dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp, + unsigned long val) +{ + _dispatch_source_merge_data2(ds, pp, val); +} + #pragma mark - #pragma mark dispatch_source_handler DISPATCH_ALWAYS_INLINE static inline dispatch_continuation_t -_dispatch_source_handler_alloc(dispatch_source_t ds, void *handler, long kind, +_dispatch_source_get_handler(dispatch_source_refs_t dr, long kind) +{ + return os_atomic_load(&dr->ds_handler[kind], relaxed); +} +#define _dispatch_source_get_event_handler(dr) \ + _dispatch_source_get_handler(dr, DS_EVENT_HANDLER) +#define _dispatch_source_get_cancel_handler(dr) \ + _dispatch_source_get_handler(dr, DS_CANCEL_HANDLER) +#define _dispatch_source_get_registration_handler(dr) \ + _dispatch_source_get_handler(dr, DS_REGISTN_HANDLER) + +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_source_handler_alloc(dispatch_source_t ds, void *func, long kind, bool block) { + // sources don't propagate priority by default + const dispatch_block_flags_t flags = + DISPATCH_BLOCK_HAS_PRIORITY | DISPATCH_BLOCK_NO_VOUCHER; dispatch_continuation_t dc = _dispatch_continuation_alloc(); - if (handler) { - dc->do_vtable = (void *)((block ? DISPATCH_OBJ_BLOCK_RELEASE_BIT : - DISPATCH_OBJ_CTXT_FETCH_BIT) | (kind != DS_EVENT_HANDLER ? - DISPATCH_OBJ_ASYNC_BIT : 0l)); - dc->dc_priority = 0; - dc->dc_voucher = NULL; + if (func) { + uintptr_t dc_flags = 0; + + if (kind != DS_EVENT_HANDLER) { + dc_flags |= DISPATCH_OBJ_CONSUME_BIT; + } if (block) { #ifdef __BLOCKS__ - if (slowpath(_dispatch_block_has_private_data(handler))) { - // sources don't propagate priority by default - dispatch_block_flags_t flags = DISPATCH_BLOCK_NO_QOS_CLASS; - flags |= _dispatch_block_get_flags(handler); - _dispatch_continuation_priority_set(dc, - _dispatch_block_get_priority(handler), flags); - } - if (kind != DS_EVENT_HANDLER) { - dc->dc_func = _dispatch_call_block_and_release; - } else { - dc->dc_func = _dispatch_Block_invoke(handler); - } - dc->dc_ctxt = _dispatch_Block_copy(handler); + _dispatch_continuation_init(dc, ds, func, 0, flags, dc_flags); #endif /* __BLOCKS__ */ } else { - dc->dc_func = handler; - dc->dc_ctxt = ds->do_ctxt; + dc_flags |= DISPATCH_OBJ_CTXT_FETCH_BIT; + _dispatch_continuation_init_f(dc, ds, ds->do_ctxt, func, + 0, flags, dc_flags); } - _dispatch_trace_continuation_push((dispatch_queue_t)ds, dc); + _dispatch_trace_continuation_push(ds->_as_dq, dc); } else { + dc->dc_flags = 0; dc->dc_func = NULL; } - dc->dc_data = (void*)kind; return dc; } -static inline void -_dispatch_source_handler_replace(dispatch_source_refs_t dr, long kind, - dispatch_continuation_t dc_new) +DISPATCH_NOINLINE +static void +_dispatch_source_handler_dispose(dispatch_continuation_t dc) { - dispatch_continuation_t dc = dr->ds_handler[kind]; - if (dc) { #ifdef __BLOCKS__ - if ((long)dc->do_vtable & DISPATCH_OBJ_BLOCK_RELEASE_BIT) { - Block_release(dc->dc_ctxt); - } + if (dc->dc_flags & DISPATCH_OBJ_BLOCK_BIT) { + Block_release(dc->dc_ctxt); + } #endif /* __BLOCKS__ */ - if (dc->dc_voucher) { - _voucher_release(dc->dc_voucher); - dc->dc_voucher = NULL; - } - _dispatch_continuation_free(dc); + if (dc->dc_voucher) { + _voucher_release(dc->dc_voucher); + dc->dc_voucher = VOUCHER_INVALID; } - dr->ds_handler[kind] = dc_new; + _dispatch_continuation_free(dc); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_source_handler_take(dispatch_source_t ds, long kind) +{ + return os_atomic_xchg(&ds->ds_refs->ds_handler[kind], NULL, relaxed); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_source_handler_free(dispatch_source_t ds, long kind) +{ + dispatch_continuation_t dc = _dispatch_source_handler_take(ds, kind); + if (dc) _dispatch_source_handler_dispose(dc); } +DISPATCH_ALWAYS_INLINE static inline void -_dispatch_source_handler_free(dispatch_source_refs_t dr, long kind) +_dispatch_source_handler_replace(dispatch_source_t ds, long kind, + dispatch_continuation_t dc) { - _dispatch_source_handler_replace(dr, kind, NULL); + if (!dc->dc_func) { + _dispatch_continuation_free(dc); + dc = NULL; + } else if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { + dc->dc_ctxt = ds->do_ctxt; + } + dc = os_atomic_xchg(&ds->ds_refs->ds_handler[kind], dc, release); + if (dc) _dispatch_source_handler_dispose(dc); } +DISPATCH_NOINLINE static void -_dispatch_source_set_handler(void *context) +_dispatch_source_set_handler_slow(void *context) { dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); + dispatch_continuation_t dc = context; long kind = (long)dc->dc_data; - dc->dc_data = 0; - if (!dc->dc_func) { - _dispatch_continuation_free(dc); - dc = NULL; - } else if ((long)dc->do_vtable & DISPATCH_OBJ_CTXT_FETCH_BIT) { - dc->dc_ctxt = ds->do_ctxt; + dc->dc_data = NULL; + _dispatch_source_handler_replace(ds, kind, dc); +} + +DISPATCH_NOINLINE +static void +_dispatch_source_set_handler(dispatch_source_t ds, long kind, + dispatch_continuation_t dc) +{ + dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); + if (_dispatch_queue_try_inactive_suspend(ds->_as_dq)) { + _dispatch_source_handler_replace(ds, kind, dc); + return dx_vtable(ds)->do_resume(ds, false); } - _dispatch_source_handler_replace(ds->ds_refs, kind, dc); - if (kind == DS_EVENT_HANDLER && dc && dc->dc_priority) { -#if HAVE_PTHREAD_WORKQUEUE_QOS - ds->dq_priority = dc->dc_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; - _dispatch_queue_set_override_priority((dispatch_queue_t)ds); -#endif + _dispatch_ktrace1(DISPATCH_PERF_post_activate_mutation, ds); + if (kind == DS_REGISTN_HANDLER) { + _dispatch_bug_deprecated("Setting registration handler after " + "the source has been activated"); } + dc->dc_data = (void *)kind; + _dispatch_barrier_trysync_or_async_f(ds->_as_dq, dc, + _dispatch_source_set_handler_slow); } #ifdef __BLOCKS__ @@ -400,8 +467,7 @@ dispatch_source_set_event_handler(dispatch_source_t ds, { dispatch_continuation_t dc; dc = _dispatch_source_handler_alloc(ds, handler, DS_EVENT_HANDLER, true); - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, - _dispatch_source_set_handler); + _dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc); } #endif /* __BLOCKS__ */ @@ -411,21 +477,15 @@ dispatch_source_set_event_handler_f(dispatch_source_t ds, { dispatch_continuation_t dc; dc = _dispatch_source_handler_alloc(ds, handler, DS_EVENT_HANDLER, false); - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, - _dispatch_source_set_handler); + _dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc); } void -_dispatch_source_set_event_handler_with_context_f(dispatch_source_t ds, - void *ctxt, dispatch_function_t handler) +_dispatch_source_set_event_handler_continuation(dispatch_source_t ds, + dispatch_continuation_t dc) { - dispatch_continuation_t dc; - dc = _dispatch_source_handler_alloc(ds, handler, DS_EVENT_HANDLER, false); - dc->do_vtable = (void *)((long)dc->do_vtable &~DISPATCH_OBJ_CTXT_FETCH_BIT); - dc->dc_other = dc->dc_ctxt; - dc->dc_ctxt = ctxt; - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, - _dispatch_source_set_handler); + _dispatch_trace_continuation_push(ds->_as_dq, dc); + _dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc); } #ifdef __BLOCKS__ @@ -435,8 +495,7 @@ dispatch_source_set_cancel_handler(dispatch_source_t ds, { dispatch_continuation_t dc; dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, true); - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, - _dispatch_source_set_handler); + _dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc); } #endif /* __BLOCKS__ */ @@ -446,8 +505,7 @@ dispatch_source_set_cancel_handler_f(dispatch_source_t ds, { dispatch_continuation_t dc; dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, false); - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, - _dispatch_source_set_handler); + _dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc); } #ifdef __BLOCKS__ @@ -457,8 +515,7 @@ dispatch_source_set_registration_handler(dispatch_source_t ds, { dispatch_continuation_t dc; dc = _dispatch_source_handler_alloc(ds, handler, DS_REGISTN_HANDLER, true); - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, - _dispatch_source_set_handler); + _dispatch_source_set_handler(ds, DS_REGISTN_HANDLER, dc); } #endif /* __BLOCKS__ */ @@ -468,67 +525,62 @@ dispatch_source_set_registration_handler_f(dispatch_source_t ds, { dispatch_continuation_t dc; dc = _dispatch_source_handler_alloc(ds, handler, DS_REGISTN_HANDLER, false); - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, - _dispatch_source_set_handler); + _dispatch_source_set_handler(ds, DS_REGISTN_HANDLER, dc); } #pragma mark - #pragma mark dispatch_source_invoke static void -_dispatch_source_registration_callout(dispatch_source_t ds) +_dispatch_source_registration_callout(dispatch_source_t ds, dispatch_queue_t cq, + dispatch_invoke_flags_t flags) { - dispatch_source_refs_t dr = ds->ds_refs; - dispatch_continuation_t dc = dr->ds_handler[DS_REGISTN_HANDLER]; - if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) { + dispatch_continuation_t dc; + + dc = _dispatch_source_handler_take(ds, DS_REGISTN_HANDLER); + if (ds->dq_atomic_flags & (DSF_CANCELED | DQF_RELEASED)) { // no registration callout if source is canceled rdar://problem/8955246 - return _dispatch_source_handler_free(dr, DS_REGISTN_HANDLER); + return _dispatch_source_handler_dispose(dc); } - pthread_priority_t old_dp = _dispatch_set_defaultpriority(ds->dq_priority); - if ((long)dc->do_vtable & DISPATCH_OBJ_CTXT_FETCH_BIT) { + if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { dc->dc_ctxt = ds->do_ctxt; } - _dispatch_continuation_pop(dc); - dr->ds_handler[DS_REGISTN_HANDLER] = NULL; - _dispatch_reset_defaultpriority(old_dp); + _dispatch_continuation_pop(dc, cq, flags); } static void -_dispatch_source_cancel_callout(dispatch_source_t ds) +_dispatch_source_cancel_callout(dispatch_source_t ds, dispatch_queue_t cq, + dispatch_invoke_flags_t flags) { - dispatch_source_refs_t dr = ds->ds_refs; - dispatch_continuation_t dc = dr->ds_handler[DS_CANCEL_HANDLER]; + dispatch_continuation_t dc; + + dc = _dispatch_source_handler_take(ds, DS_CANCEL_HANDLER); ds->ds_pending_data_mask = 0; ds->ds_pending_data = 0; ds->ds_data = 0; - _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); - _dispatch_source_handler_free(dr, DS_REGISTN_HANDLER); + _dispatch_source_handler_free(ds, DS_EVENT_HANDLER); + _dispatch_source_handler_free(ds, DS_REGISTN_HANDLER); if (!dc) { return; } - if (!(ds->ds_atomic_flags & DSF_CANCELED)) { - return _dispatch_source_handler_free(dr, DS_CANCEL_HANDLER); + if (!(ds->dq_atomic_flags & DSF_CANCELED)) { + return _dispatch_source_handler_dispose(dc); } - pthread_priority_t old_dp = _dispatch_set_defaultpriority(ds->dq_priority); - if ((long)dc->do_vtable & DISPATCH_OBJ_CTXT_FETCH_BIT) { + if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { dc->dc_ctxt = ds->do_ctxt; } - _dispatch_continuation_pop(dc); - dr->ds_handler[DS_CANCEL_HANDLER] = NULL; - _dispatch_reset_defaultpriority(old_dp); + _dispatch_continuation_pop(dc, cq, flags); } static void -_dispatch_source_latch_and_call(dispatch_source_t ds) +_dispatch_source_latch_and_call(dispatch_source_t ds, dispatch_queue_t cq, + dispatch_invoke_flags_t flags) { unsigned long prev; - if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) { - return; - } dispatch_source_refs_t dr = ds->ds_refs; - dispatch_continuation_t dc = dr->ds_handler[DS_EVENT_HANDLER]; - prev = dispatch_atomic_xchg2o(ds, ds_pending_data, 0, relaxed); + dispatch_continuation_t dc = _dispatch_source_get_handler(dr, DS_EVENT_HANDLER); + prev = os_atomic_xchg2o(ds, ds_pending_data, 0, relaxed); if (ds->ds_is_level) { ds->ds_data = ~prev; } else if (ds->ds_is_timer && ds_timer(dr).target && prev) { @@ -539,12 +591,11 @@ _dispatch_source_latch_and_call(dispatch_source_t ds) if (!dispatch_assume(prev) || !dc) { return; } - pthread_priority_t old_dp = _dispatch_set_defaultpriority(ds->dq_priority); - voucher_t voucher = dc->dc_voucher ? _voucher_retain(dc->dc_voucher) : NULL; - _dispatch_continuation_voucher_adopt(dc); // consumes voucher reference - _dispatch_continuation_pop(dc); - if (voucher) dc->dc_voucher = voucher; - _dispatch_reset_defaultpriority(old_dp); + _dispatch_continuation_pop(dc, cq, flags); + if (ds->ds_is_timer && (ds_timer(dr).flags & DISPATCH_TIMER_AFTER)) { + _dispatch_source_handler_free(ds, DS_EVENT_HANDLER); + dispatch_release(ds); // dispatch_after sources are one-shot + } } static void @@ -553,101 +604,237 @@ _dispatch_source_kevent_unregister(dispatch_source_t ds) _dispatch_object_debug(ds, "%s", __func__); uint32_t flags = (uint32_t)ds->ds_pending_data_mask; dispatch_kevent_t dk = ds->ds_dkev; - if (ds->ds_atomic_flags & DSF_DELETED) { + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if (ds->ds_is_custom_source) { + ds->ds_dkev = NULL; + goto done; + } + + if (ds->ds_is_direct_kevent && + ((dqf & DSF_DELETED) || !(ds->ds_is_installed))) { dk->dk_kevent.flags |= EV_DELETE; // already deleted - dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE); + dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED); } if (dk->dk_kevent.filter == DISPATCH_EVFILT_TIMER) { ds->ds_dkev = NULL; - _dispatch_timers_unregister(ds, dk); + if (ds->ds_is_installed) { + _dispatch_timers_unregister(ds, dk); + } } else if (!ds->ds_is_direct_kevent) { ds->ds_dkev = NULL; + dispatch_assert((bool)ds->ds_is_installed); TAILQ_REMOVE(&dk->dk_sources, ds->ds_refs, dr_list); _dispatch_kevent_unregister(dk, flags, 0); } else { - int dkev_dispose_options = 0; - if (ds->ds_needs_rearm && !(ds->ds_atomic_flags & DSF_ARMED)) { + unsigned int dkev_dispose_options = 0; + if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) { dkev_dispose_options |= DKEV_DISPOSE_IMMEDIATE_DELETE; - } - if (ds->ds_needs_mgr) { - dkev_dispose_options |= DKEV_DISPOSE_IGNORE_ENOENT; - ds->ds_needs_mgr = false; + } else if (dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE) { + if (!ds->ds_is_direct_kevent) { + dkev_dispose_options |= DKEV_DISPOSE_IMMEDIATE_DELETE; + } } long r = _dispatch_kevent_unregister(dk, flags, dkev_dispose_options); if (r == EINPROGRESS) { _dispatch_debug("kevent-source[%p]: deferred delete kevent[%p]", ds, dk); - ds->ds_pending_delete = true; + _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_DEFERRED_DELETE); return; // deferred unregistration +#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS } else if (r == ENOENT) { _dispatch_debug("kevent-source[%p]: ENOENT delete kevent[%p]", ds, dk); - ds->ds_needs_mgr = true; + _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_DEFERRED_DELETE); return; // potential concurrent EV_DELETE delivery rdar://22047283 +#endif + } else { + dispatch_assume_zero(r); } ds->ds_dkev = NULL; _TAILQ_TRASH_ENTRY(ds->ds_refs, dr_list); } - (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed); - _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, ds->ds_dkev); +done: + dqf = _dispatch_queue_atomic_flags_set_and_clear_orig(ds->_as_dq, + DSF_DELETED, DSF_ARMED | DSF_DEFERRED_DELETE | DSF_CANCEL_WAITER); + if (dqf & DSF_CANCEL_WAITER) { + _dispatch_wake_by_address(&ds->dq_atomic_flags); + } + ds->ds_is_installed = true; ds->ds_needs_rearm = false; // re-arm is pointless and bad now + _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dk); _dispatch_release(ds); // the retain is done at creation time } -static void +DISPATCH_ALWAYS_INLINE +static bool +_dispatch_source_tryarm(dispatch_source_t ds) +{ + dispatch_queue_flags_t oqf, nqf; + return os_atomic_rmw_loop2o(ds, dq_atomic_flags, oqf, nqf, relaxed, { + if (oqf & (DSF_DEFERRED_DELETE | DSF_DELETED)) { + // the test is inside the loop because it's convenient but the + // result should not change for the duration of the rmw_loop + os_atomic_rmw_loop_give_up(break); + } + nqf = oqf | DSF_ARMED; + }); +} + +static bool _dispatch_source_kevent_resume(dispatch_source_t ds, uint32_t new_flags) { switch (ds->ds_dkev->dk_kevent.filter) { case DISPATCH_EVFILT_TIMER: _dispatch_timers_update(ds); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); + _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, ds->ds_dkev); - return; + return true; +#if HAVE_MACH case EVFILT_MACHPORT: - if (ds->ds_pending_data_mask & DISPATCH_MACH_RECV_MESSAGE) { + if ((ds->ds_pending_data_mask & DISPATCH_MACH_RECV_MESSAGE) && + !ds->ds_is_direct_kevent) { new_flags |= DISPATCH_MACH_RECV_MESSAGE; // emulate EV_DISPATCH } break; +#endif } - if ((ds->ds_atomic_flags & DSF_DELETED) || - _dispatch_kevent_resume(ds->ds_dkev, new_flags, 0)) { - _dispatch_source_kevent_unregister(ds); + if (unlikely(!_dispatch_source_tryarm(ds))) { + return false; } + if (unlikely(_dispatch_kevent_resume(ds->ds_dkev, new_flags, 0))) { + _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, DSF_DELETED, + DSF_ARMED); + return false; + } + _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev); + return true; } static void -_dispatch_source_kevent_register(dispatch_source_t ds) +_dispatch_source_kevent_register(dispatch_source_t ds, pthread_priority_t pp) { - dispatch_assert_zero(ds->ds_is_installed); + dispatch_assert_zero((bool)ds->ds_is_installed); switch (ds->ds_dkev->dk_kevent.filter) { case DISPATCH_EVFILT_TIMER: _dispatch_timers_update(ds); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); + _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev); return; } uint32_t flags; - bool do_resume = _dispatch_kevent_register(&ds->ds_dkev, &flags); + bool do_resume = _dispatch_kevent_register(&ds->ds_dkev, pp, &flags); TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, ds->ds_refs, dr_list); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); - _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev); + ds->ds_is_installed = true; if (do_resume || ds->ds_needs_rearm) { - _dispatch_source_kevent_resume(ds, flags); + if (unlikely(!_dispatch_source_kevent_resume(ds, flags))) { + _dispatch_source_kevent_unregister(ds); + } + } else { + _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); } _dispatch_object_debug(ds, "%s", __func__); } +static void +_dispatch_source_set_event_handler_context(void *ctxt) +{ + dispatch_source_t ds = ctxt; + dispatch_continuation_t dc = _dispatch_source_get_event_handler(ds->ds_refs); + + if (dc && (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT)) { + dc->dc_ctxt = ds->do_ctxt; + } +} + +static pthread_priority_t +_dispatch_source_compute_kevent_priority(dispatch_source_t ds) +{ + pthread_priority_t p = ds->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + dispatch_queue_t tq = ds->do_targetq; + pthread_priority_t tqp = tq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + + while (unlikely(tq->do_targetq)) { + if (unlikely(tq == &_dispatch_mgr_q)) { + return _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + } + if (unlikely(_dispatch_queue_is_thread_bound(tq))) { + // thread bound hierarchies are weird, we need to install + // from the context of the thread this hierarchy is bound to + return 0; + } + if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(tq))) { + // this queue may not be activated yet, so the queue graph may not + // have stabilized yet + _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, ds); + return 0; + } + if (unlikely(!_dispatch_queue_has_immutable_target(tq))) { + if (!_dispatch_is_in_root_queues_array(tq->do_targetq)) { + // we're not allowed to dereference tq->do_targetq + _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, ds); + return 0; + } + } + if (!(tq->dq_priority & _PTHREAD_PRIORITY_INHERIT_FLAG)) { + if (p < tqp) p = tqp; + } + tq = tq->do_targetq; + tqp = tq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + } + + if (unlikely(!tqp)) { + // pthread root queues opt out of QoS + return 0; + } + return _dispatch_priority_inherit_from_root_queue(p, tq); +} + +void +_dispatch_source_finalize_activation(dispatch_source_t ds) +{ + dispatch_continuation_t dc; + + if (unlikely(ds->ds_is_direct_kevent && + (_dispatch_queue_atomic_flags(ds->_as_dq) & DSF_CANCELED))) { + return _dispatch_source_kevent_unregister(ds); + } + + dc = _dispatch_source_get_event_handler(ds->ds_refs); + if (dc) { + if (_dispatch_object_is_barrier(dc)) { + _dispatch_queue_atomic_flags_set(ds->_as_dq, DQF_BARRIER_BIT); + } + ds->dq_priority = dc->dc_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { + _dispatch_barrier_async_detached_f(ds->_as_dq, ds, + _dispatch_source_set_event_handler_context); + } + } + + // call "super" + _dispatch_queue_finalize_activation(ds->_as_dq); + + if (ds->ds_is_direct_kevent && !ds->ds_is_installed) { + pthread_priority_t pp = _dispatch_source_compute_kevent_priority(ds); + if (pp) _dispatch_source_kevent_register(ds, pp); + } +} + DISPATCH_ALWAYS_INLINE static inline dispatch_queue_t -_dispatch_source_invoke2(dispatch_object_t dou, - _dispatch_thread_semaphore_t *sema_ptr DISPATCH_UNUSED) +_dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, + uint64_t *owned, struct dispatch_object_s **dc_ptr DISPATCH_UNUSED) { dispatch_source_t ds = dou._ds; + dispatch_queue_t retq = NULL; + dispatch_queue_t dq = _dispatch_queue_get_current(); + if (_dispatch_queue_class_probe(ds)) { - if (slowpath(_dispatch_queue_drain(ds))) { - DISPATCH_CLIENT_CRASH("Sync onto source"); - } + // Intentionally always drain even when on the manager queue + // and not the source's regular target queue: we need to be able + // to drain timer setting and the like there. + retq = _dispatch_queue_serial_drain(ds->_as_dq, flags, owned, NULL); } // This function performs all source actions. Each action is responsible @@ -655,164 +842,319 @@ _dispatch_source_invoke2(dispatch_object_t dou, // current queue is not the correct queue for this action, the correct queue // will be returned and the invoke will be re-driven on that queue. - // The order of tests here in invoke and in probe should be consistent. + // The order of tests here in invoke and in wakeup should be consistent. - dispatch_queue_t dq = _dispatch_queue_get_current(); - dispatch_queue_t dkq = _dispatch_source_get_kevent_queue(ds); dispatch_source_refs_t dr = ds->ds_refs; + dispatch_queue_t dkq = &_dispatch_mgr_q; + + if (ds->ds_is_direct_kevent) { + dkq = ds->do_targetq; + } if (!ds->ds_is_installed) { // The source needs to be installed on the kevent queue. if (dq != dkq) { return dkq; } - _dispatch_source_kevent_register(ds); - ds->ds_is_installed = true; - if (dr->ds_handler[DS_REGISTN_HANDLER]) { - return ds->do_targetq; - } - if (slowpath(ds->do_xref_cnt == -1)) { - return dkq; // rdar://problem/9558246 - } - } else if (slowpath(DISPATCH_OBJECT_SUSPENDED(ds))) { + _dispatch_source_kevent_register(ds, _dispatch_get_defaultpriority()); + } + + if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(ds))) { // Source suspended by an item drained from the source queue. - return NULL; - } else if (dr->ds_handler[DS_REGISTN_HANDLER]) { + return ds->do_targetq; + } + + if (_dispatch_source_get_registration_handler(dr)) { // The source has been registered and the registration handler needs // to be delivered on the target queue. if (dq != ds->do_targetq) { return ds->do_targetq; } // clears ds_registration_handler - _dispatch_source_registration_callout(ds); - if (slowpath(ds->do_xref_cnt == -1)) { - return dkq; // rdar://problem/9558246 - } - } else if ((ds->ds_atomic_flags & DSF_DELETED) && (ds->ds_pending_delete || - (ds->ds_atomic_flags & DSF_ONESHOT))) { - // Pending source kevent unregistration has been completed - if (ds->ds_needs_mgr) { - dkq = &_dispatch_mgr_q; - } + _dispatch_source_registration_callout(ds, dq, flags); + } + + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + bool prevent_starvation = false; + + if ((dqf & DSF_DEFERRED_DELETE) && + ((dqf & DSF_DELETED) || !(dqf & DSF_ARMED))) { +unregister_event: + // DSF_DELETE: Pending source kevent unregistration has been completed + // !DSF_ARMED: event was delivered and can safely be unregistered if (dq != dkq) { return dkq; } - ds->ds_pending_delete = false; - if (ds->ds_atomic_flags & DSF_ONESHOT) { - (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ONESHOT, - relaxed); - } - if (ds->ds_dkev) { - _dispatch_source_kevent_unregister(ds); - if (ds->ds_needs_mgr) { - return &_dispatch_mgr_q; + _dispatch_source_kevent_unregister(ds); + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + } + + if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && ds->ds_pending_data) { + // The source has pending data to deliver via the event handler callback + // on the target queue. Some sources need to be rearmed on the kevent + // queue after event delivery. + if (dq == ds->do_targetq) { + _dispatch_source_latch_and_call(ds, dq, flags); + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + + // starvation avoidance: if the source triggers itself then force a + // re-queue to give other things already queued on the target queue + // a chance to run. + // + // however, if the source is directly targetting an overcommit root + // queue, this would requeue the source and ask for a new overcommit + // thread right away. + prevent_starvation = dq->do_targetq || + !(dq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + if (prevent_starvation && ds->ds_pending_data) { + retq = ds->do_targetq; } - } - if (dr->ds_handler[DS_EVENT_HANDLER] || - dr->ds_handler[DS_CANCEL_HANDLER] || - dr->ds_handler[DS_REGISTN_HANDLER]) { + } else { + // there is no point trying to be eager, the next thing to do is + // to deliver the event return ds->do_targetq; } - } else if (((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) - && !ds->ds_pending_delete) { + } + + if ((dqf & (DSF_CANCELED | DQF_RELEASED)) && !(dqf & DSF_DEFERRED_DELETE)) { // The source has been cancelled and needs to be uninstalled from the // kevent queue. After uninstallation, the cancellation handler needs // to be delivered to the target queue. - if (ds->ds_dkev) { - if (ds->ds_needs_mgr) { - dkq = &_dispatch_mgr_q; - } + if (!(dqf & DSF_DELETED)) { if (dq != dkq) { return dkq; } _dispatch_source_kevent_unregister(ds); - if (ds->ds_needs_mgr) { - return &_dispatch_mgr_q; - } - if (ds->ds_pending_delete) { - // deferred unregistration - if (ds->ds_needs_rearm) { - return dkq; + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if (unlikely(dqf & DSF_DEFERRED_DELETE)) { + if (!(dqf & DSF_ARMED)) { + goto unregister_event; } - return NULL; + // we need to wait for the EV_DELETE + return retq; } } - if (dr->ds_handler[DS_EVENT_HANDLER] || - dr->ds_handler[DS_CANCEL_HANDLER] || - dr->ds_handler[DS_REGISTN_HANDLER]) { - if (dq != ds->do_targetq) { - return ds->do_targetq; - } - } - _dispatch_source_cancel_callout(ds); - } else if (ds->ds_pending_data && !ds->ds_pending_delete) { - // The source has pending data to deliver via the event handler callback - // on the target queue. Some sources need to be rearmed on the kevent - // queue after event delivery. - if (dq != ds->do_targetq) { - return ds->do_targetq; - } - _dispatch_source_latch_and_call(ds); - if (ds->ds_needs_rearm) { - return dkq; + if (dq != ds->do_targetq && (_dispatch_source_get_event_handler(dr) || + _dispatch_source_get_cancel_handler(dr) || + _dispatch_source_get_registration_handler(dr))) { + retq = ds->do_targetq; + } else { + _dispatch_source_cancel_callout(ds, dq, flags); + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); } - } else if (ds->ds_needs_rearm && !(ds->ds_atomic_flags & DSF_ARMED)) { + prevent_starvation = false; + } + + if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) { // The source needs to be rearmed on the kevent queue. if (dq != dkq) { return dkq; } - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); - _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, - ds->ds_dkev); - _dispatch_source_kevent_resume(ds, 0); - } - - return NULL; + if (unlikely(dqf & DSF_DEFERRED_DELETE)) { + // no need for resume when we can directly unregister the kevent + goto unregister_event; + } + if (prevent_starvation) { + // keep the old behavior to force re-enqueue to our target queue + // for the rearm. It is inefficient though and we should + // improve this . + // + // if the handler didn't run, or this is a pending delete + // or our target queue is a global queue, then starvation is + // not a concern and we can rearm right away. + return ds->do_targetq; + } + if (unlikely(!_dispatch_source_kevent_resume(ds, 0))) { + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + goto unregister_event; + } + } + + return retq; } DISPATCH_NOINLINE void -_dispatch_source_invoke(dispatch_source_t ds, dispatch_object_t dou, - dispatch_invoke_flags_t flags) +_dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_flags_t flags) { - _dispatch_queue_class_invoke(ds, dou._dc, flags, _dispatch_source_invoke2); + _dispatch_queue_class_invoke(ds->_as_dq, flags, _dispatch_source_invoke2); } -unsigned long -_dispatch_source_probe(dispatch_source_t ds) +void +_dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp, + dispatch_wakeup_flags_t flags) { // This function determines whether the source needs to be invoked. - // The order of tests here in probe and in invoke should be consistent. + // The order of tests here in wakeup and in invoke should be consistent. dispatch_source_refs_t dr = ds->ds_refs; + dispatch_queue_wakeup_target_t dkq = DISPATCH_QUEUE_WAKEUP_MGR; + dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE; + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + bool deferred_delete = (dqf & DSF_DEFERRED_DELETE); + + if (ds->ds_is_direct_kevent) { + dkq = DISPATCH_QUEUE_WAKEUP_TARGET; + } + if (!ds->ds_is_installed) { // The source needs to be installed on the kevent queue. - return true; - } else if (dr->ds_handler[DS_REGISTN_HANDLER]) { + tq = dkq; + } else if (_dispatch_source_get_registration_handler(dr)) { // The registration handler needs to be delivered to the target queue. - return true; - } else if ((ds->ds_atomic_flags & DSF_DELETED) && (ds->ds_pending_delete || - (ds->ds_atomic_flags & DSF_ONESHOT))) { + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } else if (deferred_delete && ((dqf & DSF_DELETED) || !(dqf & DSF_ARMED))) { // Pending source kevent unregistration has been completed - return true; - } else if (((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) - && !ds->ds_pending_delete) { + // or EV_ONESHOT event can be acknowledged + tq = dkq; + } else if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && ds->ds_pending_data) { + // The source has pending data to deliver to the target queue. + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } else if ((dqf & (DSF_CANCELED | DQF_RELEASED)) && !deferred_delete) { // The source needs to be uninstalled from the kevent queue, or the // cancellation handler needs to be delivered to the target queue. // Note: cancellation assumes installation. - if (ds->ds_dkev || dr->ds_handler[DS_EVENT_HANDLER] || - dr->ds_handler[DS_CANCEL_HANDLER] || - dr->ds_handler[DS_REGISTN_HANDLER]) { - return true; + if (!(dqf & DSF_DELETED)) { + tq = dkq; + } else if (_dispatch_source_get_event_handler(dr) || + _dispatch_source_get_cancel_handler(dr) || + _dispatch_source_get_registration_handler(dr)) { + tq = DISPATCH_QUEUE_WAKEUP_TARGET; } - } else if (ds->ds_pending_data && !ds->ds_pending_delete) { - // The source has pending data to deliver to the target queue. - return true; - } else if (ds->ds_needs_rearm && !(ds->ds_atomic_flags & DSF_ARMED)) { + } else if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) { // The source needs to be rearmed on the kevent queue. - return true; + tq = dkq; + } + if (!tq && _dispatch_queue_class_probe(ds)) { + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } + + if (tq) { + return _dispatch_queue_class_wakeup(ds->_as_dq, pp, flags, tq); + } else if (pp) { + return _dispatch_queue_class_override_drainer(ds->_as_dq, pp, flags); + } else if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(ds); + } +} + +void +dispatch_source_cancel(dispatch_source_t ds) +{ + _dispatch_object_debug(ds, "%s", __func__); + // Right after we set the cancel flag, someone else + // could potentially invoke the source, do the cancellation, + // unregister the source, and deallocate it. We would + // need to therefore retain/release before setting the bit + _dispatch_retain(ds); + + dispatch_queue_t q = ds->_as_dq; + if (_dispatch_queue_atomic_flags_set_orig(q, DSF_CANCELED) & DSF_CANCELED) { + _dispatch_release_tailcall(ds); + } else { + dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH | DISPATCH_WAKEUP_CONSUME); + } +} + +void +dispatch_source_cancel_and_wait(dispatch_source_t ds) +{ + dispatch_queue_flags_t old_dqf, dqf, new_dqf; + pthread_priority_t pp; + + if (unlikely(_dispatch_source_get_cancel_handler(ds->ds_refs))) { + DISPATCH_CLIENT_CRASH(ds, "Source has a cancel handler"); + } + + _dispatch_object_debug(ds, "%s", __func__); + os_atomic_rmw_loop2o(ds, dq_atomic_flags, old_dqf, new_dqf, relaxed, { + new_dqf = old_dqf | DSF_CANCELED; + if (old_dqf & DSF_CANCEL_WAITER) { + os_atomic_rmw_loop_give_up(break); + } + if ((old_dqf & DSF_STATE_MASK) == DSF_DELETED) { + // just add DSF_CANCELED + } else if ((old_dqf & DSF_DEFERRED_DELETE) || !ds->ds_is_direct_kevent){ + new_dqf |= DSF_CANCEL_WAITER; + } + }); + dqf = new_dqf; + + if (old_dqf & DQF_RELEASED) { + DISPATCH_CLIENT_CRASH(ds, "Dispatch source used after last release"); + } + if ((old_dqf & DSF_STATE_MASK) == DSF_DELETED) { + return; + } + if (dqf & DSF_CANCEL_WAITER) { + goto override; + } + + // simplified version of _dispatch_queue_drain_try_lock + // that also sets the DIRTY bit on failure to lock + dispatch_lock_owner tid_self = _dispatch_tid_self(); + uint64_t xor_owner_and_set_full_width = tid_self | + DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; + uint64_t old_state, new_state; + + os_atomic_rmw_loop2o(ds, dq_state, old_state, new_state, seq_cst, { + new_state = old_state; + if (likely(_dq_state_is_runnable(old_state) && + !_dq_state_drain_locked(old_state))) { + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + new_state ^= xor_owner_and_set_full_width; + } else if (old_dqf & DSF_CANCELED) { + os_atomic_rmw_loop_give_up(break); + } else { + // this case needs a release barrier, hence the seq_cst above + new_state |= DISPATCH_QUEUE_DIRTY; + } + }); + + if (unlikely(_dq_state_is_suspended(old_state))) { + if (unlikely(_dq_state_suspend_cnt(old_state))) { + DISPATCH_CLIENT_CRASH(ds, "Source is suspended"); + } + // inactive sources have never been registered and there is no need + // to wait here because activation will notice and mark the source + // as deleted without ever trying to use the fd or mach port. + return dispatch_activate(ds); + } + + if (likely(_dq_state_is_runnable(old_state) && + !_dq_state_drain_locked(old_state))) { + // same thing _dispatch_source_invoke2() does when handling cancellation + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if (!(dqf & (DSF_DEFERRED_DELETE | DSF_DELETED))) { + _dispatch_source_kevent_unregister(ds); + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if (likely((dqf & DSF_STATE_MASK) == DSF_DELETED)) { + _dispatch_source_cancel_callout(ds, NULL, DISPATCH_INVOKE_NONE); + } + } + _dispatch_try_lock_transfer_or_wakeup(ds->_as_dq); + } else if (unlikely(_dq_state_drain_locked_by(old_state, tid_self))) { + DISPATCH_CLIENT_CRASH(ds, "dispatch_source_cancel_and_wait " + "called from a source handler"); + } else { +override: + pp = _dispatch_get_priority() & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + if (pp) dx_wakeup(ds, pp, DISPATCH_WAKEUP_OVERRIDING); + dispatch_activate(ds); + } + + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + while (unlikely((dqf & DSF_STATE_MASK) != DSF_DELETED)) { + if (unlikely(!(dqf & DSF_CANCEL_WAITER))) { + if (!os_atomic_cmpxchgvw2o(ds, dq_atomic_flags, + dqf, dqf | DSF_CANCEL_WAITER, &dqf, relaxed)) { + continue; + } + dqf |= DSF_CANCEL_WAITER; + } + _dispatch_wait_on_address(&ds->dq_atomic_flags, dqf, DLOCK_LOCK_NONE); + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); } - return _dispatch_queue_class_probe(ds); } static void @@ -820,53 +1162,90 @@ _dispatch_source_merge_kevent(dispatch_source_t ds, const _dispatch_kevent_qos_s *ke) { _dispatch_object_debug(ds, "%s", __func__); - bool retained = false; + dispatch_wakeup_flags_t flags = 0; + dispatch_queue_flags_t dqf; + pthread_priority_t pp = 0; + + if (ds->ds_needs_rearm || (ke->flags & (EV_DELETE | EV_ONESHOT))) { + // once we modify the queue atomic flags below, it will allow concurrent + // threads running _dispatch_source_invoke2 to dispose of the source, + // so we can't safely borrow the reference we get from the knote udata + // anymore, and need our own + flags = DISPATCH_WAKEUP_CONSUME; + _dispatch_retain(ds); // rdar://20382435 + } + if ((ke->flags & EV_UDATA_SPECIFIC) && (ke->flags & EV_ONESHOT) && !(ke->flags & EV_DELETE)) { - _dispatch_debug("kevent-source[%p]: deferred delete oneshot kevent[%p]", - ds, (void*)ke->udata); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ONESHOT, relaxed); + dqf = _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, + DSF_DEFERRED_DELETE, DSF_ARMED); + if (ke->flags & EV_VANISHED) { + _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter), + "monitored resource vanished before the source " + "cancel handler was invoked", 0); + } + _dispatch_debug("kevent-source[%p]: %s kevent[%p]", ds, + (ke->flags & EV_VANISHED) ? "vanished" : + "deferred delete oneshot", (void*)ke->udata); } else if ((ke->flags & EV_DELETE) || (ke->flags & EV_ONESHOT)) { + dqf = _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, + DSF_DELETED, DSF_ARMED); _dispatch_debug("kevent-source[%p]: delete kevent[%p]", ds, (void*)ke->udata); - retained = true; - _dispatch_retain(ds); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_DELETED, relaxed); if (ke->flags & EV_DELETE) goto done; + } else if (ds->ds_needs_rearm) { + dqf = _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED); + _dispatch_debug("kevent-source[%p]: disarmed kevent[%p] ", + ds, (void*)ke->udata); + } else { + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); } - if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) { + + if (dqf & (DSF_CANCELED | DQF_RELEASED)) { goto done; // rdar://20204025 } - if (ds->ds_is_level) { +#if HAVE_MACH + if (ke->filter == EVFILT_MACHPORT && + dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE) { + DISPATCH_INTERNAL_CRASH(ke->flags,"Unexpected kevent for mach channel"); + } +#endif + + unsigned long data; + if ((ke->flags & EV_UDATA_SPECIFIC) && (ke->flags & EV_ONESHOT) && + (ke->flags & EV_VANISHED)) { + // if the resource behind the ident vanished, the event handler can't + // do anything useful anymore, so do not try to call it at all + // + // Note: if the kernel doesn't support EV_VANISHED we always get it + // back unchanged from the flags passed at EV_ADD (registration) time + // Since we never ask for both EV_ONESHOT and EV_VANISHED for sources, + // if we get both bits it was a real EV_VANISHED delivery + os_atomic_store2o(ds, ds_pending_data, 0, relaxed); +#if HAVE_MACH + } else if (ke->filter == EVFILT_MACHPORT) { + data = DISPATCH_MACH_RECV_MESSAGE; + os_atomic_store2o(ds, ds_pending_data, data, relaxed); +#endif + } else if (ds->ds_is_level) { // ke->data is signed and "negative available data" makes no sense // zero bytes happens when EV_EOF is set - // 10A268 does not fail this assert with EVFILT_READ and a 10 GB file dispatch_assert(ke->data >= 0l); - dispatch_atomic_store2o(ds, ds_pending_data, ~(unsigned long)ke->data, - relaxed); + data = ~(unsigned long)ke->data; + os_atomic_store2o(ds, ds_pending_data, data, relaxed); } else if (ds->ds_is_adder) { - (void)dispatch_atomic_add2o(ds, ds_pending_data, - (unsigned long)ke->data, relaxed); + data = (unsigned long)ke->data; + os_atomic_add2o(ds, ds_pending_data, data, relaxed); } else if (ke->fflags & ds->ds_pending_data_mask) { - (void)dispatch_atomic_or2o(ds, ds_pending_data, - ke->fflags & ds->ds_pending_data_mask, relaxed); + data = ke->fflags & ds->ds_pending_data_mask; + os_atomic_or2o(ds, ds_pending_data, data, relaxed); } + done: - // EV_DISPATCH and EV_ONESHOT sources are no longer armed after delivery - if (ds->ds_needs_rearm) { - if (!retained) { - retained = true; - _dispatch_retain(ds); // rdar://20382435 - } - (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed); - _dispatch_debug("kevent-source[%p]: disarmed kevent[%p] ", - ds, (void*)ke->udata); - } - if (retained) { - _dispatch_queue_wakeup_and_release((dispatch_queue_t)ds); - } else { - _dispatch_queue_wakeup((dispatch_queue_t)ds); - } +#if DISPATCH_USE_KEVENT_QOS + pp = ((pthread_priority_t)ke->qos) & ~_PTHREAD_PRIORITY_FLAGS_MASK; +#endif + dx_wakeup(ds, pp, flags | DISPATCH_WAKEUP_FLUSH); } #pragma mark - @@ -915,9 +1294,9 @@ _dispatch_kevent_init() TAILQ_INSERT_TAIL(&_dispatch_sources[0], &_dispatch_kevent_data_add, dk_list); _dispatch_kevent_data_or.dk_kevent.udata = - (uintptr_t)&_dispatch_kevent_data_or; + (_dispatch_kevent_qos_udata_t)&_dispatch_kevent_data_or; _dispatch_kevent_data_add.dk_kevent.udata = - (uintptr_t)&_dispatch_kevent_data_add; + (_dispatch_kevent_qos_udata_t)&_dispatch_kevent_data_add; #endif // !DISPATCH_USE_EV_UDATA_SPECIFIC } @@ -931,6 +1310,7 @@ _dispatch_kevent_hash(uint64_t ident, short filter) MACH_PORT_INDEX(ident) : ident); #else value = ident; + (void)filter; #endif return DSL_HASH((uintptr_t)value); } @@ -961,7 +1341,8 @@ _dispatch_kevent_insert(dispatch_kevent_t dk) // Find existing kevents, and merge any new flags if necessary static bool -_dispatch_kevent_register(dispatch_kevent_t *dkp, uint32_t *flgp) +_dispatch_kevent_register(dispatch_kevent_t *dkp, pthread_priority_t pp, + uint32_t *flgp) { dispatch_kevent_t dk = NULL, ds_dkev = *dkp; uint32_t new_flags; @@ -981,6 +1362,21 @@ _dispatch_kevent_register(dispatch_kevent_t *dkp, uint32_t *flgp) do_resume = new_flags; } else { dk = ds_dkev; +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (!_dispatch_kevent_workqueue_enabled) { + // do nothing + } else if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { + dk->dk_kevent.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + } else { + pp &= (~_PTHREAD_PRIORITY_FLAGS_MASK | + _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + if (!pp) pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + _dispatch_assert_is_valid_qos_class(pp); + dk->dk_kevent.qos = (_dispatch_kevent_priority_t)pp; + } +#else + (void)pp; +#endif _dispatch_kevent_insert(dk); new_flags = dk->dk_kevent.fflags; do_resume = true; @@ -999,6 +1395,10 @@ _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags) { long r; + bool oneshot; + if (dk->dk_kevent.flags & EV_DELETE) { + return 0; + } switch (dk->dk_kevent.filter) { case DISPATCH_EVFILT_TIMER: case DISPATCH_EVFILT_CUSTOM_ADD: @@ -1006,29 +1406,39 @@ _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, // these types not registered with kevent return 0; #if HAVE_MACH - case EVFILT_MACHPORT: - return _dispatch_kevent_machport_resume(dk, new_flags, del_flags); case DISPATCH_EVFILT_MACH_NOTIFICATION: return _dispatch_kevent_mach_notify_resume(dk, new_flags, del_flags); +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + case EVFILT_MACHPORT: + if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { + return _dispatch_kevent_machport_resume(dk, new_flags, del_flags); + } + // fall through #endif +#endif // HAVE_MACH default: - if (dk->dk_kevent.flags & EV_DELETE) { - return 0; - } - r = _dispatch_kq_update(&dk->dk_kevent); + // oneshot dk may be freed by the time we return from + // _dispatch_kq_immediate_update if the event was delivered (and then + // unregistered) concurrently. + oneshot = (dk->dk_kevent.flags & EV_ONESHOT); + r = _dispatch_kq_immediate_update(&dk->dk_kevent); if (r && (dk->dk_kevent.flags & EV_ADD) && (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { dk->dk_kevent.flags |= EV_DELETE; - dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE); - } else if (dk->dk_kevent.flags & EV_DISPATCH) { - dk->dk_kevent.flags &= ~EV_ADD; + dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED); + } else if (!oneshot && (dk->dk_kevent.flags & EV_DISPATCH)) { + // we can safely skip doing this for ONESHOT events because + // the next kq update we will do is _dispatch_kevent_dispose() + // which also clears EV_ADD. + dk->dk_kevent.flags &= ~(EV_ADD|EV_VANISHED); } return r; } + (void)new_flags; (void)del_flags; } static long -_dispatch_kevent_dispose(dispatch_kevent_t dk, int options) +_dispatch_kevent_dispose(dispatch_kevent_t dk, unsigned int options) { long r = 0; switch (dk->dk_kevent.filter) { @@ -1041,56 +1451,63 @@ _dispatch_kevent_dispose(dispatch_kevent_t dk, int options) // these sources live on statically allocated lists } return r; + } + if (!(dk->dk_kevent.flags & EV_DELETE)) { + dk->dk_kevent.flags |= EV_DELETE; + dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED); + if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { + dk->dk_kevent.flags |= EV_ENABLE; + } + switch (dk->dk_kevent.filter) { #if HAVE_MACH - case EVFILT_MACHPORT: - _dispatch_kevent_machport_resume(dk, 0, dk->dk_kevent.fflags); - break; - case DISPATCH_EVFILT_MACH_NOTIFICATION: - _dispatch_kevent_mach_notify_resume(dk, 0, dk->dk_kevent.fflags); - break; -#endif - default: - if (~dk->dk_kevent.flags & EV_DELETE) { - dk->dk_kevent.flags |= EV_DELETE; - dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE); - if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { - dk->dk_kevent.flags |= EV_ENABLE; - } - r = _dispatch_kq_update(&dk->dk_kevent); - if (r == ENOENT && (options & DKEV_DISPOSE_IGNORE_ENOENT)) { - r = 0; + case DISPATCH_EVFILT_MACH_NOTIFICATION: + r = _dispatch_kevent_mach_notify_resume(dk, 0,dk->dk_kevent.fflags); + break; +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + case EVFILT_MACHPORT: + if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { + r = _dispatch_kevent_machport_resume(dk,0,dk->dk_kevent.fflags); + break; } + // fall through +#endif +#endif + default: if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { - dk->dk_kevent.flags &= ~EV_ENABLE; + _dispatch_kq_deferred_update(&dk->dk_kevent); + } else { + r = _dispatch_kq_immediate_update(&dk->dk_kevent); } + break; + } + if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { + dk->dk_kevent.flags &= ~EV_ENABLE; } - break; } - if ((r == EINPROGRESS || r == ENOENT) && - (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { - // deferred EV_DELETE or concurrent concurrent EV_DELETE delivery - dk->dk_kevent.flags &= ~EV_DELETE; - dk->dk_kevent.flags |= EV_ENABLE; - } else { - if ((dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { -#if DISPATCH_DEBUG - // zero/trash dr linkage - dispatch_source_refs_t dr = TAILQ_FIRST(&dk->dk_sources); - TAILQ_REMOVE(&dk->dk_sources, dr, dr_list); + if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) { + bool deferred_delete = (r == EINPROGRESS); +#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS + if (r == ENOENT) deferred_delete = true; #endif - } else { - uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident, - dk->dk_kevent.filter); - TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list); + if (deferred_delete) { + // deferred EV_DELETE or concurrent concurrent EV_DELETE delivery + dk->dk_kevent.flags &= ~EV_DELETE; + dk->dk_kevent.flags |= EV_ENABLE; + return r; } - _dispatch_kevent_unguard(dk); - free(dk); + } else { + uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident, + dk->dk_kevent.filter); + TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list); } + _dispatch_kevent_unguard(dk); + free(dk); return r; } static long -_dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg, int options) +_dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg, + unsigned int options) { dispatch_source_refs_t dri; uint32_t del_flags, fflags = 0; @@ -1125,19 +1542,52 @@ _dispatch_kevent_proc_exit(_dispatch_kevent_qos_s *ke) _dispatch_kevent_qos_s fake; fake = *ke; fake.flags &= ~EV_ERROR; + fake.flags |= EV_ONESHOT; fake.fflags = NOTE_EXIT; fake.data = 0; - _dispatch_kevent_drain(&fake); + _dispatch_kevent_debug("synthetic NOTE_EXIT", ke); + _dispatch_kevent_merge(&fake); } DISPATCH_NOINLINE static void _dispatch_kevent_error(_dispatch_kevent_qos_s *ke) { - _dispatch_kevent_debug(ke, __func__); + _dispatch_kevent_qos_s *kev = NULL; + + if (ke->flags & EV_DELETE) { + if (ke->flags & EV_UDATA_SPECIFIC) { + if (ke->data == EINPROGRESS) { + // deferred EV_DELETE + return; + } +#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS + if (ke->data == ENOENT) { + // deferred EV_DELETE + return; + } +#endif + } + // for EV_DELETE if the update was deferred we may have reclaimed + // our dispatch_kevent_t, and it is unsafe to dereference it now. + } else if (ke->udata) { + kev = &((dispatch_kevent_t)ke->udata)->dk_kevent; + ke->flags |= kev->flags; + } + +#if HAVE_MACH + if (ke->filter == EVFILT_MACHPORT && ke->data == ENOTSUP && + (ke->flags & EV_ADD) && _dispatch_evfilt_machport_direct_enabled && + kev && (kev->fflags & MACH_RCV_MSG)) { + DISPATCH_INTERNAL_CRASH(ke->ident, + "Missing EVFILT_MACHPORT support for ports"); + } +#endif + if (ke->data) { // log the unexpected error _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter), + !ke->udata ? NULL : ke->flags & EV_DELETE ? "delete" : ke->flags & EV_ADD ? "add" : ke->flags & EV_ENABLE ? "enable" : "monitor", @@ -1153,29 +1603,29 @@ _dispatch_kevent_drain(_dispatch_kevent_qos_s *ke) dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger); #endif if (ke->filter == EVFILT_USER) { - _dispatch_kevent_mgr_debug(ke, __func__); + _dispatch_kevent_mgr_debug(ke); return; } if (slowpath(ke->flags & EV_ERROR)) { if (ke->filter == EVFILT_PROC && ke->data == ESRCH) { - ke->data = 0; // don't return error from caller - if (ke->flags & EV_DELETE) { - _dispatch_debug("kevent[0x%llx]: ignoring ESRCH from " - "EVFILT_PROC EV_DELETE", ke->udata); - return; - } _dispatch_debug("kevent[0x%llx]: ESRCH from EVFILT_PROC: " - "generating fake NOTE_EXIT", ke->udata); + "generating fake NOTE_EXIT", (unsigned long long)ke->udata); return _dispatch_kevent_proc_exit(ke); } + _dispatch_debug("kevent[0x%llx]: handling error", + (unsigned long long)ke->udata); return _dispatch_kevent_error(ke); } if (ke->filter == EVFILT_TIMER) { + _dispatch_debug("kevent[0x%llx]: handling timer", + (unsigned long long)ke->udata); return _dispatch_timers_kevent(ke); } #if HAVE_MACH if (ke->filter == EVFILT_MACHPORT) { - return _dispatch_kevent_mach_portset(ke); + _dispatch_debug("kevent[0x%llx]: handling mach port", + (unsigned long long)ke->udata); + return _dispatch_mach_kevent_merge(ke); } #endif return _dispatch_kevent_merge(ke); @@ -1185,13 +1635,9 @@ DISPATCH_NOINLINE static void _dispatch_kevent_merge(_dispatch_kevent_qos_s *ke) { - _dispatch_kevent_debug(ke, __func__); - dispatch_kevent_t dk; + dispatch_kevent_t dk = (void*)ke->udata; dispatch_source_refs_t dri, dr_next; - dk = (void*)ke->udata; - dispatch_assert(dk); - TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) { _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), ke); } @@ -1302,7 +1748,7 @@ static inline uint64_t _dispatch_source_timer_now(uint64_t nows[], unsigned int tidx) { unsigned int tk = DISPATCH_TIMER_KIND(tidx); - if (nows && fastpath(nows[tk])) { + if (nows && fastpath(nows[tk] != 0)) { return nows[tk]; } uint64_t now; @@ -1353,7 +1799,7 @@ _dispatch_source_set_timer3(void *context) // older timer params ds->ds_pending_data = 0; // Re-arm in case we got disarmed because of pending set_timer suspension - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, release); + _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, ds->ds_dkev); dispatch_resume(ds); // Must happen after resume to avoid getting disarmed due to suspension @@ -1435,7 +1881,7 @@ _dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, { if (slowpath(!ds->ds_is_timer) || slowpath(ds_timer(ds->ds_refs).flags & DISPATCH_TIMER_INTERVAL)) { - DISPATCH_CLIENT_CRASH("Attempt to set timer on a non-timer source"); + DISPATCH_CLIENT_CRASH(ds, "Attempt to set timer on a non-timer source"); } struct dispatch_set_timer_params *params; @@ -1446,7 +1892,7 @@ _dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, // The use of suspend/resume requires the external retain/release dispatch_retain(ds); if (source_sync) { - return _dispatch_barrier_trysync_f((dispatch_queue_t)ds, params, + return _dispatch_barrier_trysync_or_async_f(ds->_as_dq, params, _dispatch_source_set_timer2); } else { return _dispatch_source_set_timer2(params); @@ -1525,8 +1971,13 @@ struct dispatch_timer_s _dispatch_timer[] = { #define DISPATCH_TIMER_COUNT \ ((sizeof(_dispatch_timer) / sizeof(_dispatch_timer[0]))) +#if __linux__ +#define DISPATCH_KEVENT_TIMER_UDATA(tidx) \ + (void*)&_dispatch_kevent_timer[tidx] +#else #define DISPATCH_KEVENT_TIMER_UDATA(tidx) \ (uintptr_t)&_dispatch_kevent_timer[tidx] +#endif #ifdef __LP64__ #define DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx) \ .udata = DISPATCH_KEVENT_TIMER_UDATA(tidx) @@ -1562,21 +2013,29 @@ struct dispatch_kevent_s _dispatch_kevent_timer[] = { ((sizeof(_dispatch_kevent_timer) / sizeof(_dispatch_kevent_timer[0]))) #define DISPATCH_KEVENT_TIMEOUT_IDENT_MASK (~0ull << 8) -#define DISPATCH_KEVENT_TIMEOUT_INITIALIZER(qos, note) \ - [qos] = { \ - .ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK|(qos), \ +#define DISPATCH_KEVENT_TIMEOUT_INITIALIZER(tidx, note) \ + [tidx] = { \ + .ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK|(tidx), \ .filter = EVFILT_TIMER, \ .flags = EV_ONESHOT, \ .fflags = NOTE_ABSOLUTE|NOTE_NSECONDS|NOTE_LEEWAY|(note), \ } -#define DISPATCH_KEVENT_TIMEOUT_INIT(qos, note) \ - DISPATCH_KEVENT_TIMEOUT_INITIALIZER(DISPATCH_TIMER_QOS_##qos, note) +#define DISPATCH_KEVENT_TIMEOUT_INIT(kind, qos, note) \ + DISPATCH_KEVENT_TIMEOUT_INITIALIZER(DISPATCH_TIMER_INDEX( \ + DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos), note) _dispatch_kevent_qos_s _dispatch_kevent_timeout[] = { - DISPATCH_KEVENT_TIMEOUT_INIT(NORMAL, 0), - DISPATCH_KEVENT_TIMEOUT_INIT(CRITICAL, NOTE_CRITICAL), - DISPATCH_KEVENT_TIMEOUT_INIT(BACKGROUND, NOTE_BACKGROUND), + DISPATCH_KEVENT_TIMEOUT_INIT(WALL, NORMAL, NOTE_MACH_CONTINUOUS_TIME), + DISPATCH_KEVENT_TIMEOUT_INIT(WALL, CRITICAL, NOTE_MACH_CONTINUOUS_TIME | NOTE_CRITICAL), + DISPATCH_KEVENT_TIMEOUT_INIT(WALL, BACKGROUND, NOTE_MACH_CONTINUOUS_TIME | NOTE_BACKGROUND), + DISPATCH_KEVENT_TIMEOUT_INIT(MACH, NORMAL, 0), + DISPATCH_KEVENT_TIMEOUT_INIT(MACH, CRITICAL, NOTE_CRITICAL), + DISPATCH_KEVENT_TIMEOUT_INIT(MACH, BACKGROUND, NOTE_BACKGROUND), }; +#define DISPATCH_KEVENT_TIMEOUT_COUNT \ + ((sizeof(_dispatch_kevent_timeout) / sizeof(_dispatch_kevent_timeout[0]))) +static_assert(DISPATCH_KEVENT_TIMEOUT_COUNT == DISPATCH_TIMER_INDEX_COUNT - 1, + "should have a kevent for everything but disarm (ddt assumes this)"); #define DISPATCH_KEVENT_COALESCING_WINDOW_INIT(qos, ms) \ [DISPATCH_TIMER_QOS_##qos] = 2ull * (ms) * NSEC_PER_MSEC @@ -1622,11 +2081,11 @@ static const uint64_t _dispatch_kevent_coalescing_window[] = { dr_list); }) #define _dispatch_timers_check(dra, dta) ({ \ - unsigned int qosm = _dispatch_timers_qos_mask; \ + unsigned int timerm = _dispatch_timers_mask; \ bool update = false; \ unsigned int tidx; \ for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { \ - if (!(qosm & 1 << DISPATCH_TIMER_QOS(tidx))){ \ + if (!(timerm & (1 << tidx))){ \ continue; \ } \ dispatch_timer_source_refs_t dr = (dispatch_timer_source_refs_t) \ @@ -1647,7 +2106,7 @@ static const uint64_t _dispatch_kevent_coalescing_window[] = { update; }) static bool _dispatch_timers_reconfigure, _dispatch_timer_expired; -static unsigned int _dispatch_timers_qos_mask; +static unsigned int _dispatch_timers_mask; static bool _dispatch_timers_force_max_leeway; static void @@ -1656,7 +2115,7 @@ _dispatch_timers_init(void) #ifndef __LP64__ unsigned int tidx; for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { - _dispatch_kevent_timer[tidx].dk_kevent.udata = \ + _dispatch_kevent_timer[tidx].dk_kevent.udata = DISPATCH_KEVENT_TIMER_UDATA(tidx); } #endif // __LP64__ @@ -1678,7 +2137,7 @@ _dispatch_timers_unregister(dispatch_source_t ds, dispatch_kevent_t dk) _dispatch_timer, (dispatch_timer_source_refs_t)dr, dt_list); if (tidx != DISPATCH_TIMER_INDEX_DISARM) { _dispatch_timers_reconfigure = true; - _dispatch_timers_qos_mask |= 1 << DISPATCH_TIMER_QOS(tidx); + _dispatch_timers_mask |= 1 << tidx; } } @@ -1699,10 +2158,10 @@ _dispatch_timers_update(dispatch_source_t ds) } // Move timers that are disabled, suspended or have missed intervals to the // disarmed list, rearm after resume resp. source invoke will reenable them - if (!ds_timer(dr).target || DISPATCH_OBJECT_SUSPENDED(ds) || + if (!ds_timer(dr).target || DISPATCH_QUEUE_IS_SUSPENDED(ds) || ds->ds_pending_data) { tidx = DISPATCH_TIMER_INDEX_DISARM; - (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed); + _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED); _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, ds->ds_dkev); } else { @@ -1714,7 +2173,7 @@ _dispatch_timers_update(dispatch_source_t ds) if (slowpath(!ds->ds_is_installed)) { ds->ds_is_installed = true; if (tidx != DISPATCH_TIMER_INDEX_DISARM) { - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); + _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, ds->ds_dkev); } @@ -1726,7 +2185,7 @@ _dispatch_timers_update(dispatch_source_t ds) } if (tidx != DISPATCH_TIMER_INDEX_DISARM) { _dispatch_timers_reconfigure = true; - _dispatch_timers_qos_mask |= 1 << DISPATCH_TIMER_QOS(tidx); + _dispatch_timers_mask |= 1 << tidx; } if (dk != &_dispatch_kevent_timer[tidx]){ ds->ds_dkev = &_dispatch_kevent_timer[tidx]; @@ -1764,7 +2223,7 @@ _dispatch_timers_run2(uint64_t nows[], unsigned int tidx) } // Remove timers that are suspended or have missed intervals from the // list, rearm after resume resp. source invoke will reenable them - if (DISPATCH_OBJECT_SUSPENDED(ds) || ds->ds_pending_data) { + if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || ds->ds_pending_data) { _dispatch_timers_update(ds); continue; } @@ -1784,10 +2243,13 @@ _dispatch_timers_run2(uint64_t nows[], unsigned int tidx) ds_timer(dr).last_fire = now; unsigned long data; - data = dispatch_atomic_add2o(ds, ds_pending_data, + data = os_atomic_add2o(ds, ds_pending_data, (unsigned long)missed, relaxed); _dispatch_trace_timer_fire(dr, data, (unsigned long)missed); - _dispatch_wakeup(ds); + dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH); + if (ds_timer(dr).flags & DISPATCH_TIMER_AFTER) { + _dispatch_source_kevent_unregister(ds); + } } } @@ -1805,7 +2267,7 @@ _dispatch_timers_run(uint64_t nows[]) static inline unsigned int _dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], - uint64_t *delay, uint64_t *leeway, int qos) + uint64_t *delay, uint64_t *leeway, int qos, int kind) { unsigned int tidx, ridx = DISPATCH_TIMER_COUNT; uint64_t tmp, delta = UINT64_MAX, dldelta = UINT64_MAX; @@ -1814,6 +2276,9 @@ _dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], if (qos >= 0 && qos != DISPATCH_TIMER_QOS(tidx)){ continue; } + if (kind >= 0 && kind != DISPATCH_TIMER_KIND(tidx)){ + continue; + } uint64_t target = timer[tidx].target; if (target == UINT64_MAX) { continue; @@ -1858,19 +2323,58 @@ _dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], return ridx; } + +#ifdef __linux__ +// in linux we map the _dispatch_kevent_qos_s to struct kevent instead +// of struct kevent64. We loose the kevent.ext[] members and the time +// out is based on relavite msec based time vs. absolute nsec based time. +// For now we make the adjustments right here until the solution +// to either extend libkqueue with a proper kevent64 API or removing kevent +// all together and move to a lower API (e.g. epoll or kernel_module. +// Also leeway is ignored. + +static void +_dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay, + uint64_t leeway, uint64_t nows[]) +{ + // call to update nows[] + _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL); + // adjust nsec based delay to msec based and ignore leeway + delay /= 1000000L; + if ((int64_t)(delay) <= 0) { + delay = 1; // if value <= 0 the dispatch will stop + } + ke->data = (int64_t)delay; +} + +#else +static void +_dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay, + uint64_t leeway, uint64_t nows[]) +{ + delay += _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL); + if (slowpath(_dispatch_timers_force_max_leeway)) { + ke->data = (int64_t)(delay + leeway); + ke->ext[1] = 0; + } else { + ke->data = (int64_t)delay; + ke->ext[1] = leeway; + } +} +#endif // __linux__ + static bool _dispatch_timers_program2(uint64_t nows[], _dispatch_kevent_qos_s *ke, - unsigned int qos) + unsigned int tidx) { - unsigned int tidx; bool poll; uint64_t delay, leeway; - tidx = _dispatch_timers_get_delay(nows, _dispatch_timer, &delay, &leeway, - (int)qos); + _dispatch_timers_get_delay(nows, _dispatch_timer, &delay, &leeway, + (int)DISPATCH_TIMER_QOS(tidx), (int)DISPATCH_TIMER_KIND(tidx)); poll = (delay == 0); if (poll || delay == UINT64_MAX) { - _dispatch_trace_next_timer_set(NULL, qos); + _dispatch_trace_next_timer_set(NULL, DISPATCH_TIMER_QOS(tidx)); if (!ke->data) { return poll; } @@ -1879,20 +2383,18 @@ _dispatch_timers_program2(uint64_t nows[], _dispatch_kevent_qos_s *ke, ke->flags &= ~(EV_ADD|EV_ENABLE); } else { _dispatch_trace_next_timer_set( - TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources), qos); - _dispatch_trace_next_timer_program(delay, qos); - delay += _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL); - if (slowpath(_dispatch_timers_force_max_leeway)) { - ke->data = (int64_t)(delay + leeway); - ke->ext[1] = 0; - } else { - ke->data = (int64_t)delay; - ke->ext[1] = leeway; - } + TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources), DISPATCH_TIMER_QOS(tidx)); + _dispatch_trace_next_timer_program(delay, DISPATCH_TIMER_QOS(tidx)); + _dispatch_kevent_timer_set_delay(ke, delay, leeway, nows); ke->flags |= EV_ADD|EV_ENABLE; ke->flags &= ~EV_DELETE; +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (_dispatch_kevent_workqueue_enabled) { + ke->qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + } +#endif } - _dispatch_kq_update(ke); + _dispatch_kq_deferred_update(ke); return poll; } @@ -1901,13 +2403,13 @@ static bool _dispatch_timers_program(uint64_t nows[]) { bool poll = false; - unsigned int qos, qosm = _dispatch_timers_qos_mask; - for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) { - if (!(qosm & 1 << qos)){ + unsigned int tidx, timerm = _dispatch_timers_mask; + for (tidx = 0; tidx < DISPATCH_KEVENT_TIMEOUT_COUNT; tidx++) { + if (!(timerm & 1 << tidx)){ continue; } - poll |= _dispatch_timers_program2(nows, &_dispatch_kevent_timeout[qos], - qos); + poll |= _dispatch_timers_program2(nows, &_dispatch_kevent_timeout[tidx], + tidx); } return poll; } @@ -1921,28 +2423,34 @@ _dispatch_timers_configure(void) return _dispatch_timers_check(_dispatch_kevent_timer, _dispatch_timer); } +#if HAVE_MACH static void _dispatch_timers_calendar_change(void) { + unsigned int qos; + // calendar change may have gone past the wallclock deadline _dispatch_timer_expired = true; - _dispatch_timers_qos_mask = ~0u; + for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) { + _dispatch_timers_mask |= + 1 << DISPATCH_TIMER_INDEX(DISPATCH_TIMER_KIND_WALL, qos); + } } +#endif static void _dispatch_timers_kevent(_dispatch_kevent_qos_s *ke) { - _dispatch_kevent_debug(ke, __func__); dispatch_assert(ke->data > 0); dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) == DISPATCH_KEVENT_TIMEOUT_IDENT_MASK); - unsigned int qos = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK; - dispatch_assert(qos < DISPATCH_TIMER_QOS_COUNT); - dispatch_assert(_dispatch_kevent_timeout[qos].data); - _dispatch_kevent_timeout[qos].data = 0; // kevent deleted via EV_ONESHOT + unsigned int tidx = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK; + dispatch_assert(tidx < DISPATCH_KEVENT_TIMEOUT_COUNT); + dispatch_assert(_dispatch_kevent_timeout[tidx].data != 0); + _dispatch_kevent_timeout[tidx].data = 0; // kevent deleted via EV_ONESHOT _dispatch_timer_expired = true; - _dispatch_timers_qos_mask |= 1 << qos; - _dispatch_trace_next_timer_wake(qos); + _dispatch_timers_mask |= 1 << tidx; + _dispatch_trace_next_timer_wake(DISPATCH_TIMER_QOS(tidx)); } static inline bool @@ -1963,7 +2471,7 @@ _dispatch_mgr_timers(void) expired = _dispatch_timer_expired = _dispatch_timers_program(nows); expired = expired || _dispatch_mgr_q.dq_items_tail; } - _dispatch_timers_qos_mask = 0; + _dispatch_timers_mask = 0; } return expired; } @@ -1976,8 +2484,7 @@ typedef struct { } dispatch_timer_aggregate_refs_s; typedef struct dispatch_timer_aggregate_s { - DISPATCH_STRUCT_HEADER(queue); - DISPATCH_QUEUE_HEADER; + DISPATCH_QUEUE_HEADER(queue); TAILQ_ENTRY(dispatch_timer_aggregate_s) dta_list; dispatch_timer_aggregate_refs_s dta_kevent_timer[DISPATCH_KEVENT_TIMER_COUNT]; @@ -1986,7 +2493,7 @@ typedef struct dispatch_timer_aggregate_s { } dta_timer[DISPATCH_TIMER_COUNT]; struct dispatch_timer_s dta_timer_data[DISPATCH_TIMER_COUNT]; unsigned int dta_refcount; -} dispatch_timer_aggregate_s; +} DISPATCH_QUEUE_ALIGN dispatch_timer_aggregate_s; typedef TAILQ_HEAD(, dispatch_timer_aggregate_s) dispatch_timer_aggregates_s; static dispatch_timer_aggregates_s _dispatch_timer_aggregates = @@ -1998,10 +2505,10 @@ dispatch_timer_aggregate_create(void) unsigned int tidx; dispatch_timer_aggregate_t dta = _dispatch_alloc(DISPATCH_VTABLE(queue), sizeof(struct dispatch_timer_aggregate_s)); - _dispatch_queue_init((dispatch_queue_t)dta); + _dispatch_queue_init(dta->_as_dq, DQF_NONE, + DISPATCH_QUEUE_WIDTH_MAX, false); dta->do_targetq = _dispatch_get_root_queue( _DISPATCH_QOS_CLASS_USER_INITIATED, true); - dta->dq_width = DISPATCH_QUEUE_WIDTH_MAX; //FIXME: aggregates need custom vtable //dta->dq_label = "timer-aggregate"; for (tidx = 0; tidx < DISPATCH_KEVENT_TIMER_COUNT; tidx++) { @@ -2015,7 +2522,7 @@ dispatch_timer_aggregate_create(void) dta->dta_timer_data[tidx].deadline = UINT64_MAX; } return (dispatch_timer_aggregate_t)_dispatch_introspection_queue_create( - (dispatch_queue_t)dta); + dta->_as_dq); } typedef struct dispatch_timer_delay_s { @@ -2029,7 +2536,7 @@ _dispatch_timer_aggregate_get_delay(void *ctxt) dispatch_timer_delay_t dtd = ctxt; struct { uint64_t nows[DISPATCH_TIMER_KIND_COUNT]; } dtn = {}; _dispatch_timers_get_delay(dtn.nows, dtd->timer, &dtd->delay, &dtd->leeway, - -1); + -1, -1); } uint64_t @@ -2039,8 +2546,7 @@ dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t dta, struct dispatch_timer_delay_s dtd = { .timer = dta->dta_timer_data, }; - dispatch_sync_f((dispatch_queue_t)dta, &dtd, - _dispatch_timer_aggregate_get_delay); + dispatch_sync_f(dta->_as_dq, &dtd, _dispatch_timer_aggregate_get_delay); if (leeway_ptr) { *leeway_ptr = dtd.leeway; } @@ -2072,7 +2578,7 @@ _dispatch_timer_aggregates_configure(void) } dtau = _dispatch_calloc(DISPATCH_TIMER_COUNT, sizeof(*dtau)); memcpy(dtau, dta->dta_timer, sizeof(dta->dta_timer)); - _dispatch_barrier_async_detached_f((dispatch_queue_t)dta, dtau, + _dispatch_barrier_async_detached_f(dta->_as_dq, dtau, _dispatch_timer_aggregate_update); } } @@ -2121,340 +2627,262 @@ _dispatch_timer_aggregates_unregister(dispatch_source_t ds, unsigned int tidx) } #pragma mark - -#pragma mark dispatch_select +#pragma mark dispatch_kqueue static int _dispatch_kq; -#if DISPATCH_USE_SELECT_FALLBACK +#if DISPATCH_DEBUG_QOS && DISPATCH_USE_KEVENT_WORKQUEUE +#define _dispatch_kevent_assert_valid_qos(ke) ({ \ + if (_dispatch_kevent_workqueue_enabled) { \ + const _dispatch_kevent_qos_s *_ke = (ke); \ + if (_ke->flags & (EV_ADD|EV_ENABLE)) { \ + _dispatch_assert_is_valid_qos_class(\ + (pthread_priority_t)_ke->qos); \ + dispatch_assert(_ke->qos); \ + } \ + } \ + }) +#else +#define _dispatch_kevent_assert_valid_qos(ke) ((void)ke) +#endif -static unsigned int _dispatch_select_workaround; -static fd_set _dispatch_rfds; -static fd_set _dispatch_wfds; -static uint64_t*_dispatch_rfd_ptrs; -static uint64_t*_dispatch_wfd_ptrs; -DISPATCH_NOINLINE -static bool -_dispatch_select_register(const _dispatch_kevent_qos_s *kev) +static void +_dispatch_kq_init(void *context DISPATCH_UNUSED) { - // Must execute on manager queue - DISPATCH_ASSERT_ON_MANAGER_QUEUE(); - - // If an EINVAL or ENOENT error occurred while adding/enabling a read or - // write kevent, assume it was due to a type of filedescriptor not - // supported by kqueue and fall back to select - switch (kev->filter) { - case EVFILT_READ: - if ((kev->data == EINVAL || kev->data == ENOENT) && - dispatch_assume(kev->ident < FD_SETSIZE)) { - FD_SET((int)kev->ident, &_dispatch_rfds); - if (slowpath(!_dispatch_rfd_ptrs)) { - _dispatch_rfd_ptrs = _dispatch_calloc(FD_SETSIZE, - sizeof(*_dispatch_rfd_ptrs)); - } - if (!_dispatch_rfd_ptrs[kev->ident]) { - _dispatch_rfd_ptrs[kev->ident] = kev->udata; - _dispatch_select_workaround++; - _dispatch_debug("select workaround used to read fd %d: 0x%lx", - (int)kev->ident, (long)kev->data); + _dispatch_fork_becomes_unsafe(); +#if DISPATCH_USE_KEVENT_WORKQUEUE + _dispatch_kevent_workqueue_init(); + if (_dispatch_kevent_workqueue_enabled) { + int r; + const _dispatch_kevent_qos_s kev[] = { + [0] = { + .ident = 1, + .filter = EVFILT_USER, + .flags = EV_ADD|EV_CLEAR, + .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, + }, + [1] = { + .ident = 1, + .filter = EVFILT_USER, + .fflags = NOTE_TRIGGER, + }, + }; + _dispatch_kq = -1; +retry: + r = kevent_qos(-1, kev, 2, NULL, 0, NULL, NULL, + KEVENT_FLAG_WORKQ|KEVENT_FLAG_IMMEDIATE); + if (slowpath(r == -1)) { + int err = errno; + switch (err) { + case EINTR: + goto retry; + default: + DISPATCH_CLIENT_CRASH(err, + "Failed to initalize workqueue kevent"); + break; } - return true; } - break; - case EVFILT_WRITE: - if ((kev->data == EINVAL || kev->data == ENOENT) && - dispatch_assume(kev->ident < FD_SETSIZE)) { - FD_SET((int)kev->ident, &_dispatch_wfds); - if (slowpath(!_dispatch_wfd_ptrs)) { - _dispatch_wfd_ptrs = _dispatch_calloc(FD_SETSIZE, - sizeof(*_dispatch_wfd_ptrs)); - } - if (!_dispatch_wfd_ptrs[kev->ident]) { - _dispatch_wfd_ptrs[kev->ident] = kev->udata; - _dispatch_select_workaround++; - _dispatch_debug("select workaround used to write fd %d: 0x%lx", - (int)kev->ident, (long)kev->data); - } - return true; + return; + } +#endif // DISPATCH_USE_KEVENT_WORKQUEUE +#if DISPATCH_USE_MGR_THREAD + static const _dispatch_kevent_qos_s kev = { + .ident = 1, + .filter = EVFILT_USER, + .flags = EV_ADD|EV_CLEAR, + }; + + _dispatch_fork_becomes_unsafe(); +#if DISPATCH_USE_GUARDED_FD + guardid_t guard = (uintptr_t)&kev; + _dispatch_kq = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP); +#else + _dispatch_kq = kqueue(); +#endif + if (_dispatch_kq == -1) { + int err = errno; + switch (err) { + case EMFILE: + DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " + "process is out of file descriptors"); + break; + case ENFILE: + DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " + "system is out of file descriptors"); + break; + case ENOMEM: + DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " + "kernel is out of memory"); + break; + default: + DISPATCH_INTERNAL_CRASH(err, "kqueue() failure"); + break; } - break; } - return false; + (void)dispatch_assume_zero(kevent_qos(_dispatch_kq, &kev, 1, NULL, 0, NULL, + NULL, 0)); + _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); +#endif // DISPATCH_USE_MGR_THREAD } DISPATCH_NOINLINE -static bool -_dispatch_select_unregister(const _dispatch_kevent_qos_s *kev) +static long +_dispatch_kq_update(const _dispatch_kevent_qos_s *ke, int n) { - // Must execute on manager queue - DISPATCH_ASSERT_ON_MANAGER_QUEUE(); + int i, r; + _dispatch_kevent_qos_s kev_error[n]; + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_kq_init); - switch (kev->filter) { - case EVFILT_READ: - if (_dispatch_rfd_ptrs && kev->ident < FD_SETSIZE && - _dispatch_rfd_ptrs[kev->ident]) { - FD_CLR((int)kev->ident, &_dispatch_rfds); - _dispatch_rfd_ptrs[kev->ident] = 0; - _dispatch_select_workaround--; - return true; - } - break; - case EVFILT_WRITE: - if (_dispatch_wfd_ptrs && kev->ident < FD_SETSIZE && - _dispatch_wfd_ptrs[kev->ident]) { - FD_CLR((int)kev->ident, &_dispatch_wfds); - _dispatch_wfd_ptrs[kev->ident] = 0; - _dispatch_select_workaround--; - return true; + for (i = 0; i < n; i++) { + if (ke[i].filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) { + _dispatch_kevent_debug_n("updating", ke + i, i, n); } - break; } - return false; -} -DISPATCH_NOINLINE -static bool -_dispatch_mgr_select(bool poll) -{ - static const struct timeval timeout_immediately = { 0, 0 }; - fd_set tmp_rfds, tmp_wfds; - int err, i, r; - bool kevent_avail = false; - - FD_COPY(&_dispatch_rfds, &tmp_rfds); - FD_COPY(&_dispatch_wfds, &tmp_wfds); - - r = select(FD_SETSIZE, &tmp_rfds, &tmp_wfds, NULL, - poll ? (struct timeval*)&timeout_immediately : NULL); - if (slowpath(r == -1)) { - err = errno; - if (err != EBADF) { - if (err != EINTR) { - (void)dispatch_assume_zero(err); - } - return false; - } - for (i = 0; i < FD_SETSIZE; i++) { - if (i == _dispatch_kq) { - continue; - } - if (!FD_ISSET(i, &_dispatch_rfds) && !FD_ISSET(i, &_dispatch_wfds)){ - continue; - } - r = dup(i); - if (dispatch_assume(r != -1)) { - close(r); - } else { - if (_dispatch_rfd_ptrs && _dispatch_rfd_ptrs[i]) { - FD_CLR(i, &_dispatch_rfds); - _dispatch_rfd_ptrs[i] = 0; - _dispatch_select_workaround--; - } - if (_dispatch_wfd_ptrs && _dispatch_wfd_ptrs[i]) { - FD_CLR(i, &_dispatch_wfds); - _dispatch_wfd_ptrs[i] = 0; - _dispatch_select_workaround--; - } - } - } - return false; + unsigned int flags = KEVENT_FLAG_ERROR_EVENTS; +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (_dispatch_kevent_workqueue_enabled) { + flags |= KEVENT_FLAG_WORKQ; } - if (r > 0) { - for (i = 0; i < FD_SETSIZE; i++) { - if (FD_ISSET(i, &tmp_rfds)) { - if (i == _dispatch_kq) { - kevent_avail = true; - continue; - } - FD_CLR(i, &_dispatch_rfds); // emulate EV_DISPATCH - _dispatch_kevent_qos_s kev = { - .ident = (uint64_t)i, - .filter = EVFILT_READ, - .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, - .data = 1, - .udata = _dispatch_rfd_ptrs[i], - }; - _dispatch_kevent_drain(&kev); - } - if (FD_ISSET(i, &tmp_wfds)) { - FD_CLR(i, &_dispatch_wfds); // emulate EV_DISPATCH - _dispatch_kevent_qos_s kev = { - .ident = (uint64_t)i, - .filter = EVFILT_WRITE, - .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, - .data = 1, - .udata = _dispatch_wfd_ptrs[i], - }; - _dispatch_kevent_drain(&kev); - } - } - } - return kevent_avail; -} - -#endif // DISPATCH_USE_SELECT_FALLBACK - -#pragma mark - -#pragma mark dispatch_kqueue - -static void -_dispatch_kq_init(void *context DISPATCH_UNUSED) -{ - static const _dispatch_kevent_qos_s kev = { - .ident = 1, - .filter = EVFILT_USER, - .flags = EV_ADD|EV_CLEAR, - }; - - _dispatch_safe_fork = false; -#if DISPATCH_USE_GUARDED_FD - guardid_t guard = (uintptr_t)&kev; - _dispatch_kq = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP); -#else - _dispatch_kq = kqueue(); #endif - if (_dispatch_kq == -1) { + +retry: + r = kevent_qos(_dispatch_kq, ke, n, kev_error, n, NULL, NULL, flags); + if (slowpath(r == -1)) { int err = errno; switch (err) { - case EMFILE: - DISPATCH_CLIENT_CRASH("kqueue() failure: " - "process is out of file descriptors"); - break; - case ENFILE: - DISPATCH_CLIENT_CRASH("kqueue() failure: " - "system is out of file descriptors"); - break; - case ENOMEM: - DISPATCH_CLIENT_CRASH("kqueue() failure: " - "kernel is out of memory"); + case EINTR: + goto retry; + case EBADF: + DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors"); break; default: (void)dispatch_assume_zero(err); - DISPATCH_CRASH("kqueue() failure"); break; } + return err; } -#if DISPATCH_USE_SELECT_FALLBACK - else if (dispatch_assume(_dispatch_kq < FD_SETSIZE)) { - // in case we fall back to select() - FD_SET(_dispatch_kq, &_dispatch_rfds); + for (i = 0, n = r; i < n; i++) { + if (kev_error[i].flags & EV_ERROR) { + _dispatch_kevent_debug("returned error", &kev_error[i]); + _dispatch_kevent_drain(&kev_error[i]); + r = (int)kev_error[i].data; + } else { + _dispatch_kevent_mgr_debug(&kev_error[i]); + r = 0; + } } -#endif // DISPATCH_USE_SELECT_FALLBACK - - (void)dispatch_assume_zero(kevent_qos(_dispatch_kq, &kev, 1, NULL, 0, NULL, - NULL, 0)); - _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); + return r; } -static int -_dispatch_get_kq(void) +DISPATCH_ALWAYS_INLINE +static void +_dispatch_kq_update_all(const _dispatch_kevent_qos_s *kev, int n) { - static dispatch_once_t pred; + (void)_dispatch_kq_update(kev, n); +} - dispatch_once_f(&pred, NULL, _dispatch_kq_init); +DISPATCH_ALWAYS_INLINE +static long +_dispatch_kq_update_one(const _dispatch_kevent_qos_s *kev) +{ + return _dispatch_kq_update(kev, 1); +} - return _dispatch_kq; +static inline bool +_dispatch_kevent_maps_to_same_knote(const _dispatch_kevent_qos_s *e1, + const _dispatch_kevent_qos_s *e2) +{ + return e1->filter == e2->filter && + e1->ident == e2->ident && + e1->udata == e2->udata; } -DISPATCH_NOINLINE -static long -_dispatch_kq_update(const _dispatch_kevent_qos_s *kev) +static inline int +_dispatch_deferred_event_find_slot(dispatch_deferred_items_t ddi, + const _dispatch_kevent_qos_s *ke) { - int r; - _dispatch_kevent_qos_s kev_error; + _dispatch_kevent_qos_s *events = ddi->ddi_eventlist; + int i; -#if DISPATCH_USE_SELECT_FALLBACK - if (slowpath(_dispatch_select_workaround) && (kev->flags & EV_DELETE)) { - if (_dispatch_select_unregister(kev)) { - return 0; - } - } -#endif // DISPATCH_USE_SELECT_FALLBACK - if (kev->filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) { - _dispatch_kevent_debug(kev, __func__); - } -retry: - r = kevent_qos(_dispatch_get_kq(), kev, 1, &kev_error, - 1, NULL, NULL, KEVENT_FLAG_ERROR_EVENTS); - if (slowpath(r == -1)) { - int err = errno; - switch (err) { - case EINTR: - goto retry; - case EBADF: - DISPATCH_CLIENT_CRASH("Do not close random Unix descriptors"); - break; - default: - (void)dispatch_assume_zero(err); + for (i = 0; i < ddi->ddi_nevents; i++) { + if (_dispatch_kevent_maps_to_same_knote(&events[i], ke)) { break; } - return err; - } - if (r == 0) { - return 0; - } - if (kev_error.flags & EV_ERROR && kev_error.data) { - _dispatch_kevent_debug(&kev_error, __func__); } - r = (int)kev_error.data; - switch (r) { - case 0: - _dispatch_kevent_mgr_debug(&kev_error, __func__); - break; - case EINPROGRESS: - // deferred EV_DELETE - break; - case ENOENT: - if ((kev->flags & EV_DELETE) && (kev->flags & EV_UDATA_SPECIFIC)) { - // potential concurrent EV_DELETE delivery - break; + return i; +} + +static void +_dispatch_kq_deferred_update(const _dispatch_kevent_qos_s *ke) +{ + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + int slot; + + _dispatch_kevent_assert_valid_qos(ke); + if (ddi) { + if (unlikely(ddi->ddi_nevents == ddi->ddi_maxevents)) { + _dispatch_deferred_items_set(NULL); + _dispatch_kq_update_all(ddi->ddi_eventlist, ddi->ddi_nevents); + ddi->ddi_nevents = 0; + _dispatch_deferred_items_set(ddi); } - // fall through - case EINVAL: - if ((kev->flags & (EV_ADD|EV_ENABLE)) && !(kev->flags & EV_DELETE)) { -#if DISPATCH_USE_SELECT_FALLBACK - if (_dispatch_select_register(&kev_error)) { - r = 0; - break; + if (ke->filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) { + _dispatch_kevent_debug("deferred", ke); + } + bool needs_enable = false; + slot = _dispatch_deferred_event_find_slot(ddi, ke); + if (slot == ddi->ddi_nevents) { + ddi->ddi_nevents++; + } else if (ke->flags & EV_DELETE) { + // when deleting and an enable is pending, + // we must merge EV_ENABLE to do an immediate deletion + needs_enable = (ddi->ddi_eventlist[slot].flags & EV_ENABLE); + } + ddi->ddi_eventlist[slot] = *ke; + if (needs_enable) { + ddi->ddi_eventlist[slot].flags |= EV_ENABLE; + } + } else { + _dispatch_kq_update_one(ke); + } +} + +static long +_dispatch_kq_immediate_update(_dispatch_kevent_qos_s *ke) +{ + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + int slot, last; + + _dispatch_kevent_assert_valid_qos(ke); + if (ddi) { + _dispatch_kevent_qos_s *events = ddi->ddi_eventlist; + slot = _dispatch_deferred_event_find_slot(ddi, ke); + if (slot < ddi->ddi_nevents) { + // when deleting and an enable is pending, + // we must merge EV_ENABLE to do an immediate deletion + if ((ke->flags & EV_DELETE) && (events[slot].flags & EV_ENABLE)) { + ke->flags |= EV_ENABLE; } -#elif DISPATCH_DEBUG - if (kev->filter == EVFILT_READ || kev->filter == EVFILT_WRITE) { - DISPATCH_CRASH("Unsupported fd for EVFILT_READ or EVFILT_WRITE " - "kevent"); + last = --ddi->ddi_nevents; + if (slot != last) { + events[slot] = events[last]; } -#endif // DISPATCH_USE_SELECT_FALLBACK } - // fall through - case EBADF: - case EPERM: - default: - kev_error.flags |= kev->flags; - _dispatch_kevent_drain(&kev_error); - r = (int)kev_error.data; - break; } - return r; + return _dispatch_kq_update_one(ke); } #pragma mark - #pragma mark dispatch_mgr -static _dispatch_kevent_qos_s *_dispatch_kevent_enable; - -static void inline -_dispatch_mgr_kevent_reenable(_dispatch_kevent_qos_s *ke) -{ - dispatch_assert(!_dispatch_kevent_enable || _dispatch_kevent_enable == ke); - _dispatch_kevent_enable = ke; -} - -unsigned long -_dispatch_mgr_wakeup(dispatch_queue_t dq DISPATCH_UNUSED) +DISPATCH_NOINLINE +static void +_dispatch_mgr_queue_poke(dispatch_queue_t dq DISPATCH_UNUSED, + pthread_priority_t pp DISPATCH_UNUSED) { - if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { - return false; - } - static const _dispatch_kevent_qos_s kev = { .ident = 1, .filter = EVFILT_USER, @@ -2464,126 +2892,345 @@ _dispatch_mgr_wakeup(dispatch_queue_t dq DISPATCH_UNUSED) #if DISPATCH_DEBUG && DISPATCH_MGR_QUEUE_DEBUG _dispatch_debug("waking up the dispatch manager queue: %p", dq); #endif + _dispatch_kq_deferred_update(&kev); +} - _dispatch_kq_update(&kev); +void +_dispatch_mgr_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags) +{ + if (flags & DISPATCH_WAKEUP_FLUSH) { + os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); + } - return false; + if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { + return; + } + + if (!_dispatch_queue_class_probe(&_dispatch_mgr_q)) { + return; + } + + _dispatch_mgr_queue_poke(dq, pp); } DISPATCH_NOINLINE static void -_dispatch_mgr_init(void) +_dispatch_event_init(void) { - (void)dispatch_atomic_inc2o(&_dispatch_mgr_q, dq_running, relaxed); - _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_mgr_q); - _dispatch_queue_set_bound_thread(&_dispatch_mgr_q); - _dispatch_mgr_priority_init(); _dispatch_kevent_init(); _dispatch_timers_init(); +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK _dispatch_mach_recv_msg_buf_init(); - _dispatch_memorystatus_init(); +#endif + _dispatch_memorypressure_init(); + _voucher_activity_debug_channel_init(); +} + +#if DISPATCH_USE_MGR_THREAD +DISPATCH_NOINLINE +static void +_dispatch_mgr_init(void) +{ + uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + _dispatch_queue_set_current(&_dispatch_mgr_q); + if (_dispatch_queue_drain_try_lock(&_dispatch_mgr_q, + DISPATCH_INVOKE_STEALING, NULL) != owned) { + DISPATCH_INTERNAL_CRASH(0, "Locking the manager should not fail"); + } + _dispatch_mgr_priority_init(); + _dispatch_event_init(); +} + +DISPATCH_NOINLINE +static bool +_dispatch_mgr_wait_for_event(dispatch_deferred_items_t ddi, bool poll) +{ + int r; + dispatch_assert((size_t)ddi->ddi_maxevents < countof(ddi->ddi_eventlist)); + +retry: + r = kevent_qos(_dispatch_kq, ddi->ddi_eventlist, ddi->ddi_nevents, + ddi->ddi_eventlist + ddi->ddi_maxevents, 1, NULL, NULL, + poll ? KEVENT_FLAG_IMMEDIATE : KEVENT_FLAG_NONE); + if (slowpath(r == -1)) { + int err = errno; + switch (err) { + case EINTR: + goto retry; + case EBADF: + DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors"); + break; + default: + (void)dispatch_assume_zero(err); + break; + } + } + ddi->ddi_nevents = 0; + return r > 0; } DISPATCH_NOINLINE DISPATCH_NORETURN static void _dispatch_mgr_invoke(void) { - _dispatch_kevent_qos_s kev; + dispatch_deferred_items_s ddi; bool poll; - int r; + + ddi.ddi_magic = DISPATCH_DEFERRED_ITEMS_MAGIC; + ddi.ddi_stashed_pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + ddi.ddi_nevents = 0; + ddi.ddi_maxevents = 1; + + _dispatch_deferred_items_set(&ddi); for (;;) { _dispatch_mgr_queue_drain(); poll = _dispatch_mgr_timers(); -#if DISPATCH_USE_SELECT_FALLBACK - if (slowpath(_dispatch_select_workaround)) { - poll = _dispatch_mgr_select(poll); - if (!poll) continue; - } -#endif // DISPATCH_USE_SELECT_FALLBACK poll = poll || _dispatch_queue_class_probe(&_dispatch_mgr_q); - r = kevent_qos(_dispatch_kq, _dispatch_kevent_enable, - _dispatch_kevent_enable ? 1 : 0, &kev, 1, NULL, NULL, - poll ? KEVENT_FLAG_IMMEDIATE : KEVENT_FLAG_NONE); - _dispatch_kevent_enable = NULL; - if (slowpath(r == -1)) { - int err = errno; - switch (err) { - case EINTR: - break; - case EBADF: - DISPATCH_CLIENT_CRASH("Do not close random Unix descriptors"); - break; - default: - (void)dispatch_assume_zero(err); - break; - } - } else if (r) { - _dispatch_kevent_drain(&kev); + if (_dispatch_mgr_wait_for_event(&ddi, poll)) { + _dispatch_kevent_qos_s *ke = ddi.ddi_eventlist + ddi.ddi_maxevents; + _dispatch_kevent_debug("received", ke); + _dispatch_kevent_drain(ke); } } } +#endif // DISPATCH_USE_MGR_THREAD DISPATCH_NORETURN void _dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED, - dispatch_object_t dou DISPATCH_UNUSED, dispatch_invoke_flags_t flags DISPATCH_UNUSED) { +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (_dispatch_kevent_workqueue_enabled) { + DISPATCH_INTERNAL_CRASH(0, "Manager queue invoked with " + "kevent workqueue enabled"); + } +#endif +#if DISPATCH_USE_MGR_THREAD _dispatch_mgr_init(); // never returns, so burn bridges behind us & clear stack 2k ahead _dispatch_clear_stack(2048); _dispatch_mgr_invoke(); +#endif } -#pragma mark - -#pragma mark dispatch_memorystatus +#if DISPATCH_USE_KEVENT_WORKQUEUE + +#define DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER ((pthread_priority_t)(~0ul)) + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_kevent_worker_thread_init(dispatch_deferred_items_t ddi) +{ + uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + + ddi->ddi_magic = DISPATCH_DEFERRED_ITEMS_MAGIC; + ddi->ddi_nevents = 0; + ddi->ddi_maxevents = countof(ddi->ddi_eventlist); + ddi->ddi_stashed_pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + + pthread_priority_t pp = _dispatch_get_priority(); + if (!(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) { + // If this thread does not have the event manager flag set, don't setup + // as the dispatch manager and let the caller know to only process + // the delivered events. + // + // Also add the NEEDS_UNBIND flag so that + // _dispatch_priority_compute_update knows it has to unbind + pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + _dispatch_thread_setspecific(dispatch_priority_key, + (void *)(uintptr_t)pp); + ddi->ddi_stashed_pp = 0; + return DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER; + } + + if ((pp & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) || + !(pp & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { + // When the phtread kext is delivering kevents to us, and pthread + // root queues are in use, then the pthread priority TSD is set + // to a sched pri with the _PTHREAD_PRIORITY_SCHED_PRI_FLAG bit set. + // + // Given that this isn't a valid QoS we need to fixup the TSD, + // and the best option is to clear the qos/priority bits which tells + // us to not do any QoS related calls on this thread. + // + // However, in that case the manager thread is opted out of QoS, + // as far as pthread is concerned, and can't be turned into + // something else, so we can't stash. + pp &= (pthread_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK; + } + // Managers always park without mutating to a regular worker thread, and + // hence never need to unbind from userland, and when draining a manager, + // the NEEDS_UNBIND flag would cause the mutation to happen. + // So we need to strip this flag + pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); + + // ensure kevents registered from this thread are registered at manager QoS + pthread_priority_t old_dp = _dispatch_set_defaultpriority( + (pthread_priority_t)_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, NULL); + _dispatch_queue_set_current(&_dispatch_mgr_q); + if (_dispatch_queue_drain_try_lock(&_dispatch_mgr_q, + DISPATCH_INVOKE_STEALING, NULL) != owned) { + DISPATCH_INTERNAL_CRASH(0, "Locking the manager should not fail"); + } + static int event_thread_init; + if (!event_thread_init) { + event_thread_init = 1; + _dispatch_event_init(); + } + return old_dp; +} + +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_kevent_worker_thread_reset(pthread_priority_t old_dp) +{ + dispatch_queue_t dq = &_dispatch_mgr_q; + uint64_t orig_dq_state; + + _dispatch_queue_drain_unlock(dq, DISPATCH_QUEUE_SERIAL_DRAIN_OWNED, + &orig_dq_state); + _dispatch_reset_defaultpriority(old_dp); + _dispatch_queue_set_current(NULL); + return _dq_state_is_dirty(orig_dq_state); +} + +DISPATCH_NOINLINE +void +_dispatch_kevent_worker_thread(_dispatch_kevent_qos_s **events, int *nevents) +{ + _dispatch_introspection_thread_add(); + + if (!events && !nevents) { + // events for worker thread request have already been delivered earlier + return; + } + + _dispatch_kevent_qos_s *ke = *events; + int n = *nevents; + if (!dispatch_assume(n) || !dispatch_assume(*events)) return; -#if DISPATCH_USE_MEMORYSTATUS_SOURCE -#define DISPATCH_MEMORYSTATUS_SOURCE_TYPE DISPATCH_SOURCE_TYPE_MEMORYSTATUS -#define DISPATCH_MEMORYSTATUS_SOURCE_MASK ( \ - DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL | \ - DISPATCH_MEMORYSTATUS_PRESSURE_WARN) + dispatch_deferred_items_s ddi; + pthread_priority_t old_dp = _dispatch_kevent_worker_thread_init(&ddi); + + _dispatch_deferred_items_set(&ddi); + for (int i = 0; i < n; i++) { + _dispatch_kevent_debug("received", ke); + _dispatch_kevent_drain(ke++); + } + + if (old_dp != DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) { + _dispatch_mgr_queue_drain(); + bool poll = _dispatch_mgr_timers(); + if (_dispatch_kevent_worker_thread_reset(old_dp)) { + poll = true; + } + if (poll) _dispatch_mgr_queue_poke(&_dispatch_mgr_q, 0); + } + _dispatch_deferred_items_set(NULL); + + if (ddi.ddi_stashed_pp & _PTHREAD_PRIORITY_PRIORITY_MASK) { + *nevents = 0; + if (ddi.ddi_nevents) { + _dispatch_kq_update_all(ddi.ddi_eventlist, ddi.ddi_nevents); + } + ddi.ddi_stashed_pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + return _dispatch_root_queue_drain_deferred_item(ddi.ddi_stashed_dq, + ddi.ddi_stashed_dou, ddi.ddi_stashed_pp); +#ifndef WORKQ_KEVENT_EVENT_BUFFER_LEN + } else if (ddi.ddi_nevents > *nevents) { + *nevents = 0; + _dispatch_kq_update_all(ddi.ddi_eventlist, ddi.ddi_nevents); +#endif + } else { + *nevents = ddi.ddi_nevents; + dispatch_static_assert(__builtin_types_compatible_p(typeof(**events), + typeof(*ddi.ddi_eventlist))); + memcpy(*events, ddi.ddi_eventlist, + (size_t)ddi.ddi_nevents * sizeof(*ddi.ddi_eventlist)); + } +} +#endif // DISPATCH_USE_KEVENT_WORKQUEUE + +#pragma mark - +#pragma mark dispatch_memorypressure + +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE +#define DISPATCH_MEMORYPRESSURE_SOURCE_TYPE DISPATCH_SOURCE_TYPE_MEMORYPRESSURE +#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK ( \ + DISPATCH_MEMORYPRESSURE_NORMAL | \ + DISPATCH_MEMORYPRESSURE_WARN | \ + DISPATCH_MEMORYPRESSURE_CRITICAL | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL) +#define DISPATCH_MEMORYPRESSURE_MALLOC_MASK ( \ + DISPATCH_MEMORYPRESSURE_WARN | \ + DISPATCH_MEMORYPRESSURE_CRITICAL | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL) #elif DISPATCH_USE_VM_PRESSURE_SOURCE -#define DISPATCH_MEMORYSTATUS_SOURCE_TYPE DISPATCH_SOURCE_TYPE_VM -#define DISPATCH_MEMORYSTATUS_SOURCE_MASK DISPATCH_VM_PRESSURE +#define DISPATCH_MEMORYPRESSURE_SOURCE_TYPE DISPATCH_SOURCE_TYPE_VM +#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK DISPATCH_VM_PRESSURE #endif -#if DISPATCH_USE_MEMORYSTATUS_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE -static dispatch_source_t _dispatch_memorystatus_source; +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE +static dispatch_source_t _dispatch_memorypressure_source; static void -_dispatch_memorystatus_handler(void *context DISPATCH_UNUSED) +_dispatch_memorypressure_handler(void *context DISPATCH_UNUSED) { -#if DISPATCH_USE_MEMORYSTATUS_SOURCE - unsigned long memorystatus; - memorystatus = dispatch_source_get_data(_dispatch_memorystatus_source); - if (memorystatus & DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL) { +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE + unsigned long memorypressure; + memorypressure = dispatch_source_get_data(_dispatch_memorypressure_source); + + if (memorypressure & DISPATCH_MEMORYPRESSURE_NORMAL) { + _dispatch_memory_warn = false; _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; - _voucher_activity_heap_pressure_normal(); - return; +#if VOUCHER_USE_MACH_VOUCHER + if (_firehose_task_buffer) { + firehose_buffer_clear_bank_flags(_firehose_task_buffer, + FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY); + } +#endif } - _dispatch_continuation_cache_limit = - DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN; - _voucher_activity_heap_pressure_warn(); + if (memorypressure & DISPATCH_MEMORYPRESSURE_WARN) { + _dispatch_memory_warn = true; + _dispatch_continuation_cache_limit = + DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN; +#if VOUCHER_USE_MACH_VOUCHER + if (_firehose_task_buffer) { + firehose_buffer_set_bank_flags(_firehose_task_buffer, + FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY); + } #endif + } + if (memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK) { + malloc_memory_event_handler(memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK); + } +#elif DISPATCH_USE_VM_PRESSURE_SOURCE + // we must have gotten DISPATCH_VM_PRESSURE malloc_zone_pressure_relief(0,0); +#endif } static void -_dispatch_memorystatus_init(void) +_dispatch_memorypressure_init(void) { - _dispatch_memorystatus_source = dispatch_source_create( - DISPATCH_MEMORYSTATUS_SOURCE_TYPE, 0, - DISPATCH_MEMORYSTATUS_SOURCE_MASK, + _dispatch_memorypressure_source = dispatch_source_create( + DISPATCH_MEMORYPRESSURE_SOURCE_TYPE, 0, + DISPATCH_MEMORYPRESSURE_SOURCE_MASK, _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true)); - dispatch_source_set_event_handler_f(_dispatch_memorystatus_source, - _dispatch_memorystatus_handler); - dispatch_resume(_dispatch_memorystatus_source); + dispatch_source_set_event_handler_f(_dispatch_memorypressure_source, + _dispatch_memorypressure_handler); + dispatch_activate(_dispatch_memorypressure_source); } #else -static inline void _dispatch_memorystatus_init(void) {} -#endif // DISPATCH_USE_MEMORYSTATUS_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE +static inline void _dispatch_memorypressure_init(void) {} +#endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE #pragma mark - #pragma mark dispatch_mach @@ -2616,9 +3263,6 @@ static inline void _dispatch_memorystatus_init(void) {} #define _DISPATCH_MACHPORT_HASH(x) \ _DISPATCH_HASH((x), _DISPATCH_MACHPORT_HASH_SIZE) -#ifndef MACH_RCV_LARGE_IDENTITY -#define MACH_RCV_LARGE_IDENTITY 0x00000008 -#endif #ifndef MACH_RCV_VOUCHER #define MACH_RCV_VOUCHER 0x00000800 #endif @@ -2629,28 +3273,27 @@ static inline void _dispatch_memorystatus_init(void) {} MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)) | \ MACH_RCV_VOUCHER -#define DISPATCH_MACH_KEVENT_ARMED(dk) ((dk)->dk_kevent.ext[0]) +#define DISPATCH_MACH_NOTIFICATION_ARMED(dk) ((dk)->dk_kevent.ext[0]) -static void _dispatch_kevent_machport_drain(_dispatch_kevent_qos_s *ke); -static void _dispatch_kevent_mach_msg_drain(_dispatch_kevent_qos_s *ke); -static void _dispatch_kevent_mach_msg_recv(mach_msg_header_t *hdr); -static void _dispatch_kevent_mach_msg_destroy(mach_msg_header_t *hdr); +static void _dispatch_kevent_mach_msg_recv(_dispatch_kevent_qos_s *ke, + mach_msg_header_t *hdr); +static void _dispatch_kevent_mach_msg_destroy(_dispatch_kevent_qos_s *ke, + mach_msg_header_t *hdr); static void _dispatch_source_merge_mach_msg(dispatch_source_t ds, dispatch_source_refs_t dr, dispatch_kevent_t dk, - mach_msg_header_t *hdr, mach_msg_size_t siz); + _dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr, + mach_msg_size_t siz); static kern_return_t _dispatch_mach_notify_update(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid, mach_port_mscount_t notify_sync); static void _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr); static void _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, bool disconnected); -static void _dispatch_mach_kevent_unregister(dispatch_mach_t dm); -static inline void _dispatch_mach_msg_set_options(dispatch_object_t dou, - mach_msg_option_t options); + dispatch_mach_reply_refs_t dmr, unsigned int options); +static void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm); static void _dispatch_mach_msg_recv(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, mach_msg_header_t *hdr, - mach_msg_size_t siz); -static void _dispatch_mach_merge_kevent(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, _dispatch_kevent_qos_s *ke, + mach_msg_header_t *hdr, mach_msg_size_t siz); +static void _dispatch_mach_merge_notification_kevent(dispatch_mach_t dm, const _dispatch_kevent_qos_s *ke); static inline mach_msg_option_t _dispatch_mach_checkin_options(void); @@ -2658,38 +3301,71 @@ static const size_t _dispatch_mach_recv_msg_size = DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE; static const size_t dispatch_mach_trailer_size = sizeof(dispatch_mach_trailer_t); -static mach_msg_size_t _dispatch_mach_recv_msg_buf_size; -static mach_port_t _dispatch_mach_portset, _dispatch_mach_recv_portset; static mach_port_t _dispatch_mach_notify_port; -static _dispatch_kevent_qos_s _dispatch_mach_recv_kevent = { - .filter = EVFILT_MACHPORT, - .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, - .fflags = DISPATCH_MACH_RCV_OPTIONS, -}; static dispatch_source_t _dispatch_mach_notify_source; + +static inline void* +_dispatch_kevent_mach_msg_buf(_dispatch_kevent_qos_s *ke) +{ + return (void*)ke->ext[0]; +} + +static inline mach_msg_size_t +_dispatch_kevent_mach_msg_size(_dispatch_kevent_qos_s *ke) +{ + // buffer size in the successful receive case, but message size (like + // msgh_size) in the MACH_RCV_TOO_LARGE case, i.e. add trailer size. + return (mach_msg_size_t)ke->ext[1]; +} + +static void +_dispatch_source_type_mach_recv_direct_init(dispatch_source_t ds, + dispatch_source_type_t type DISPATCH_UNUSED, + uintptr_t handle DISPATCH_UNUSED, + unsigned long mask DISPATCH_UNUSED, + dispatch_queue_t q DISPATCH_UNUSED) +{ + ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE_DIRECT; +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + if (_dispatch_evfilt_machport_direct_enabled) return; + ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT; + ds->ds_dkev->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); + ds->ds_is_direct_kevent = false; +#endif +} + static const struct dispatch_source_type_s _dispatch_source_type_mach_recv_direct = { .ke = { .filter = EVFILT_MACHPORT, - .flags = EV_CLEAR, - .fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT, + .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, + .fflags = DISPATCH_MACH_RCV_OPTIONS, }, + .init = _dispatch_source_type_mach_recv_direct_init, +}; + +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK +static mach_port_t _dispatch_mach_portset, _dispatch_mach_recv_portset; +static _dispatch_kevent_qos_s _dispatch_mach_recv_kevent = { + .filter = EVFILT_MACHPORT, + .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, + .fflags = DISPATCH_MACH_RCV_OPTIONS, }; static void _dispatch_mach_recv_msg_buf_init(void) { + if (_dispatch_evfilt_machport_direct_enabled) return; mach_vm_size_t vm_size = mach_vm_round_page( _dispatch_mach_recv_msg_size + dispatch_mach_trailer_size); - _dispatch_mach_recv_msg_buf_size = (mach_msg_size_t)vm_size; mach_vm_address_t vm_addr = vm_page_size; kern_return_t kr; while (slowpath(kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size, VM_FLAGS_ANYWHERE))) { if (kr != KERN_NO_SPACE) { - (void)dispatch_assume_zero(kr); - DISPATCH_CLIENT_CRASH("Could not allocate mach msg receive buffer"); + DISPATCH_CLIENT_CRASH(kr, + "Could not allocate mach msg receive buffer"); } _dispatch_temporary_resource_shortage(); vm_addr = vm_page_size; @@ -2697,13 +3373,78 @@ _dispatch_mach_recv_msg_buf_init(void) _dispatch_mach_recv_kevent.ext[0] = (uintptr_t)vm_addr; _dispatch_mach_recv_kevent.ext[1] = vm_size; } +#endif -static inline void* -_dispatch_get_mach_recv_msg_buf(void) +DISPATCH_NOINLINE +static void +_dispatch_source_merge_mach_msg_direct(dispatch_source_t ds, + _dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr) +{ + dispatch_continuation_t dc = _dispatch_source_get_event_handler(ds->ds_refs); + dispatch_queue_t cq = _dispatch_queue_get_current(); + + // see firehose_client_push_notify_async + _dispatch_queue_set_current(ds->_as_dq); + dc->dc_func(hdr); + _dispatch_queue_set_current(cq); + if (hdr != _dispatch_kevent_mach_msg_buf(ke)) { + free(hdr); + } +} + +dispatch_source_t +_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp, + const struct dispatch_continuation_s *dc) +{ + dispatch_source_t ds; + ds = dispatch_source_create(&_dispatch_source_type_mach_recv_direct, + recvp, 0, &_dispatch_mgr_q); + os_atomic_store(&ds->ds_refs->ds_handler[DS_EVENT_HANDLER], + (dispatch_continuation_t)dc, relaxed); + return ds; +} + +static void +_dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED) +{ + kern_return_t kr; +#if HAVE_MACH_PORT_CONSTRUCT + mach_port_options_t opts = { .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT }; +#ifdef __LP64__ + const mach_port_context_t guard = 0xfeed09071f1ca7edull; +#else + const mach_port_context_t guard = 0xff1ca7edull; +#endif + kr = mach_port_construct(mach_task_self(), &opts, guard, + &_dispatch_mach_notify_port); +#else + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, + &_dispatch_mach_notify_port); +#endif + DISPATCH_VERIFY_MIG(kr); + if (slowpath(kr)) { + DISPATCH_CLIENT_CRASH(kr, + "mach_port_construct() failed: cannot create receive right"); + } + + static const struct dispatch_continuation_s dc = { + .dc_func = (void*)_dispatch_mach_notify_source_invoke, + }; + _dispatch_mach_notify_source = _dispatch_source_create_mach_msg_direct_recv( + _dispatch_mach_notify_port, &dc); + dispatch_assert(_dispatch_mach_notify_source); + dispatch_activate(_dispatch_mach_notify_source); +} + +static mach_port_t +_dispatch_get_mach_notify_port(void) { - return (void*)_dispatch_mach_recv_kevent.ext[0]; + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_notify_port_init); + return _dispatch_mach_notify_port; } +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK static void _dispatch_mach_recv_portset_init(void *context DISPATCH_UNUSED) { @@ -2712,34 +3453,22 @@ _dispatch_mach_recv_portset_init(void *context DISPATCH_UNUSED) kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &_dispatch_mach_recv_portset); DISPATCH_VERIFY_MIG(kr); - if (dispatch_assume_zero(kr)) { - DISPATCH_CLIENT_CRASH( + if (slowpath(kr)) { + DISPATCH_CLIENT_CRASH(kr, "mach_port_allocate() failed: cannot create port set"); } - dispatch_assert(_dispatch_get_mach_recv_msg_buf()); + _dispatch_kevent_qos_s *ke = &_dispatch_mach_recv_kevent; + dispatch_assert(_dispatch_kevent_mach_msg_buf(ke)); dispatch_assert(dispatch_mach_trailer_size == REQUESTED_TRAILER_SIZE_NATIVE(MACH_RCV_TRAILER_ELEMENTS( DISPATCH_MACH_RCV_TRAILER))); - _dispatch_mach_recv_kevent.ident = _dispatch_mach_recv_portset; - _dispatch_kq_update(&_dispatch_mach_recv_kevent); - - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, - &_dispatch_mach_notify_port); - DISPATCH_VERIFY_MIG(kr); - if (dispatch_assume_zero(kr)) { - DISPATCH_CLIENT_CRASH( - "mach_port_allocate() failed: cannot create receive right"); + ke->ident = _dispatch_mach_recv_portset; +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (_dispatch_kevent_workqueue_enabled) { + ke->qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; } - _dispatch_mach_notify_source = dispatch_source_create( - &_dispatch_source_type_mach_recv_direct, - _dispatch_mach_notify_port, 0, &_dispatch_mgr_q); - static const struct dispatch_continuation_s dc = { - .dc_func = (void*)_dispatch_mach_notify_source_invoke, - }; - _dispatch_mach_notify_source->ds_refs->ds_handler[DS_EVENT_HANDLER] = - (dispatch_continuation_t)&dc; - dispatch_assert(_dispatch_mach_notify_source); - dispatch_resume(_dispatch_mach_notify_source); +#endif + _dispatch_kq_immediate_update(&_dispatch_mach_recv_kevent); } static mach_port_t @@ -2757,17 +3486,23 @@ _dispatch_mach_portset_init(void *context DISPATCH_UNUSED) .filter = EVFILT_MACHPORT, .flags = EV_ADD, }; +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (_dispatch_kevent_workqueue_enabled) { + kev.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + } +#endif + kern_return_t kr; kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &_dispatch_mach_portset); DISPATCH_VERIFY_MIG(kr); - if (dispatch_assume_zero(kr)) { - DISPATCH_CLIENT_CRASH( + if (slowpath(kr)) { + DISPATCH_CLIENT_CRASH(kr, "mach_port_allocate() failed: cannot create port set"); } kev.ident = _dispatch_mach_portset; - _dispatch_kq_update(&kev); + _dispatch_kq_immediate_update(&kev); } static mach_port_t @@ -2810,22 +3545,6 @@ _dispatch_mach_portset_update(dispatch_kevent_t dk, mach_port_t mps) return mps ? kr : 0; } -static void -_dispatch_kevent_mach_recv_reenable(_dispatch_kevent_qos_s *ke DISPATCH_UNUSED) -{ -#if (TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) || \ - (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090) - // delete and re-add kevent to workaround - if (ke->ext[1] != _dispatch_mach_recv_kevent.ext[1]) { - _dispatch_kevent_qos_s kev = _dispatch_mach_recv_kevent; - kev.flags = EV_DELETE; - _dispatch_kq_update(&kev); - } -#endif - _dispatch_mgr_kevent_reenable(&_dispatch_mach_recv_kevent); -} - static kern_return_t _dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags) @@ -2848,6 +3567,7 @@ _dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, } return kr; } +#endif // DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK static kern_return_t _dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, uint32_t new_flags, @@ -2871,23 +3591,11 @@ _dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, uint32_t new_flags, return kr; } -static inline void -_dispatch_kevent_mach_portset(_dispatch_kevent_qos_s *ke) -{ - if (ke->ident == _dispatch_mach_recv_portset) { - return _dispatch_kevent_mach_msg_drain(ke); - } else if (ke->ident == _dispatch_mach_portset) { - return _dispatch_kevent_machport_drain(ke); - } else { - return _dispatch_kevent_error(ke); - } -} - +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK DISPATCH_NOINLINE static void _dispatch_kevent_machport_drain(_dispatch_kevent_qos_s *ke) { - _dispatch_kevent_debug(ke, __func__); mach_port_t name = (mach_port_name_t)ke->data; dispatch_kevent_t dk; @@ -2905,92 +3613,57 @@ _dispatch_kevent_machport_drain(_dispatch_kevent_qos_s *ke) .fflags = DISPATCH_MACH_RECV_MESSAGE, .udata = (uintptr_t)dk, }; - _dispatch_kevent_debug(&kev, __func__); + _dispatch_kevent_debug("synthetic", &kev); _dispatch_kevent_merge(&kev); } +#endif DISPATCH_NOINLINE static void _dispatch_kevent_mach_msg_drain(_dispatch_kevent_qos_s *ke) { - _dispatch_kevent_debug(ke, __func__); - mach_msg_header_t *hdr = (mach_msg_header_t*)ke->ext[0]; - mach_msg_size_t siz, msgsiz; + mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke); + mach_msg_size_t siz; mach_msg_return_t kr = (mach_msg_return_t)ke->fflags; - _dispatch_kevent_mach_recv_reenable(ke); - if (!dispatch_assume(hdr)) { - DISPATCH_CRASH("EVFILT_MACHPORT with no message"); + if (!fastpath(hdr)) { + DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message"); } if (fastpath(!kr)) { - return _dispatch_kevent_mach_msg_recv(hdr); + _dispatch_kevent_mach_msg_recv(ke, hdr); + goto out; } else if (kr != MACH_RCV_TOO_LARGE) { goto out; + } else if (!ke->data) { + DISPATCH_INTERNAL_CRASH(0, "MACH_RCV_LARGE_IDENTITY with no identity"); } - if (!dispatch_assume(ke->ext[1] <= UINT_MAX - - dispatch_mach_trailer_size)) { - DISPATCH_CRASH("EVFILT_MACHPORT with overlarge message"); + if (slowpath(ke->ext[1] > (UINT_MAX - dispatch_mach_trailer_size))) { + DISPATCH_INTERNAL_CRASH(ke->ext[1], + "EVFILT_MACHPORT with overlarge message"); } - siz = (mach_msg_size_t)ke->ext[1] + dispatch_mach_trailer_size; + siz = _dispatch_kevent_mach_msg_size(ke) + dispatch_mach_trailer_size; hdr = malloc(siz); - if (ke->data) { - if (!dispatch_assume(hdr)) { - // Kernel will discard message too large to fit - hdr = _dispatch_get_mach_recv_msg_buf(); - siz = _dispatch_mach_recv_msg_buf_size; - } - mach_port_t name = (mach_port_name_t)ke->data; - const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS | - MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE); - kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE, - MACH_PORT_NULL); - if (fastpath(!kr)) { - return _dispatch_kevent_mach_msg_recv(hdr); - } else if (kr == MACH_RCV_TOO_LARGE) { - _dispatch_log("BUG in libdispatch client: " - "_dispatch_kevent_mach_msg_drain: dropped message too " - "large to fit in memory: id = 0x%x, size = %lld", - hdr->msgh_id, ke->ext[1]); - kr = MACH_MSG_SUCCESS; - } - } else { - // We don't know which port in the portset contains the large message, - // so need to receive all messages pending on the portset to ensure the - // large message is drained. - bool received = false; - for (;;) { - if (!dispatch_assume(hdr)) { - DISPATCH_CLIENT_CRASH("Message too large to fit in memory"); - } - const mach_msg_option_t options = (DISPATCH_MACH_RCV_OPTIONS | - MACH_RCV_TIMEOUT); - kr = mach_msg(hdr, options, 0, siz, _dispatch_mach_recv_portset, - MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); - if ((!kr || kr == MACH_RCV_TOO_LARGE) && !dispatch_assume( - hdr->msgh_size <= UINT_MAX - dispatch_mach_trailer_size)) { - DISPATCH_CRASH("Overlarge message"); - } - if (fastpath(!kr)) { - msgsiz = hdr->msgh_size + dispatch_mach_trailer_size; - if (msgsiz < siz) { - void *shrink = realloc(hdr, msgsiz); - if (shrink) hdr = shrink; - } - _dispatch_kevent_mach_msg_recv(hdr); - hdr = NULL; - received = true; - } else if (kr == MACH_RCV_TOO_LARGE) { - siz = hdr->msgh_size + dispatch_mach_trailer_size; - } else { - if (kr == MACH_RCV_TIMED_OUT && received) { - kr = MACH_MSG_SUCCESS; - } - break; - } - hdr = reallocf(hdr, siz); - } + if (!dispatch_assume(hdr)) { + // Kernel will discard message too large to fit + hdr = NULL; + siz = 0; } - if (hdr != _dispatch_get_mach_recv_msg_buf()) { + mach_port_t name = (mach_port_name_t)ke->data; + const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS | + MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE); + kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); + if (fastpath(!kr)) { + _dispatch_kevent_mach_msg_recv(ke, hdr); + goto out; + } else if (kr == MACH_RCV_TOO_LARGE) { + _dispatch_log("BUG in libdispatch client: " + "_dispatch_kevent_mach_msg_drain: dropped message too " + "large to fit in memory: id = 0x%x, size = %u", + hdr->msgh_id, _dispatch_kevent_mach_msg_size(ke)); + kr = MACH_MSG_SUCCESS; + } + if (hdr != _dispatch_kevent_mach_msg_buf(ke)) { free(hdr); } out: @@ -3000,8 +3673,57 @@ out: } } +DISPATCH_NOINLINE static void -_dispatch_kevent_mach_msg_recv(mach_msg_header_t *hdr) +_dispatch_mach_kevent_merge(_dispatch_kevent_qos_s *ke) +{ + if (unlikely(!(ke->flags & EV_UDATA_SPECIFIC))) { +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + if (ke->ident == _dispatch_mach_recv_portset) { + _dispatch_kevent_mach_msg_drain(ke); + return _dispatch_kq_deferred_update(&_dispatch_mach_recv_kevent); + } else if (ke->ident == _dispatch_mach_portset) { + return _dispatch_kevent_machport_drain(ke); + } +#endif + return _dispatch_kevent_error(ke); + } + + dispatch_kevent_t dk = (dispatch_kevent_t)ke->udata; + dispatch_source_refs_t dr = TAILQ_FIRST(&dk->dk_sources); + bool is_reply = (dk->dk_kevent.flags & EV_ONESHOT); + dispatch_source_t ds = _dispatch_source_from_refs(dr); + + if (_dispatch_kevent_mach_msg_size(ke)) { + _dispatch_kevent_mach_msg_drain(ke); + if (is_reply) { + // _dispatch_kevent_mach_msg_drain() should have deleted this event + dispatch_assert(ke->flags & EV_DELETE); + return; + } + + if (!(ds->dq_atomic_flags & DSF_CANCELED)) { + // re-arm the mach channel + ke->fflags = DISPATCH_MACH_RCV_OPTIONS; + ke->data = 0; + ke->ext[0] = 0; + ke->ext[1] = 0; + return _dispatch_kq_deferred_update(ke); + } + } else if (is_reply) { + DISPATCH_INTERNAL_CRASH(ke->flags, "Unexpected EVFILT_MACHPORT event"); + } + if (unlikely((ke->flags & EV_VANISHED) && + (dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE))) { + DISPATCH_CLIENT_CRASH(ke->flags, + "Unexpected EV_VANISHED (do not destroy random mach ports)"); + } + return _dispatch_kevent_merge(ke); +} + +static void +_dispatch_kevent_mach_msg_recv(_dispatch_kevent_qos_s *ke, + mach_msg_header_t *hdr) { dispatch_source_refs_t dri; dispatch_kevent_t dk; @@ -3012,38 +3734,42 @@ _dispatch_kevent_mach_msg_recv(mach_msg_header_t *hdr) dispatch_mach_trailer_size)) { _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " "received overlarge message"); - return _dispatch_kevent_mach_msg_destroy(hdr); + return _dispatch_kevent_mach_msg_destroy(ke, hdr); } if (!dispatch_assume(name)) { _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " "received message with MACH_PORT_NULL port"); - return _dispatch_kevent_mach_msg_destroy(hdr); + return _dispatch_kevent_mach_msg_destroy(ke, hdr); } _dispatch_debug_machport(name); - dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); + if (ke->flags & EV_UDATA_SPECIFIC) { + dk = (void*)ke->udata; + } else { + dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); + } if (!dispatch_assume(dk)) { _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " "received message with unknown kevent"); - return _dispatch_kevent_mach_msg_destroy(hdr); + return _dispatch_kevent_mach_msg_destroy(ke, hdr); } - _dispatch_kevent_debug(&dk->dk_kevent, __func__); TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { dispatch_source_t dsi = _dispatch_source_from_refs(dri); if (dsi->ds_pending_data_mask & _DISPATCH_MACH_RECV_DIRECT_FLAGS) { - return _dispatch_source_merge_mach_msg(dsi, dri, dk, hdr, siz); + return _dispatch_source_merge_mach_msg(dsi, dri, dk, ke, hdr, siz); } } _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " "received message with no listeners"); - return _dispatch_kevent_mach_msg_destroy(hdr); + return _dispatch_kevent_mach_msg_destroy(ke, hdr); } static void -_dispatch_kevent_mach_msg_destroy(mach_msg_header_t *hdr) +_dispatch_kevent_mach_msg_destroy(_dispatch_kevent_qos_s *ke, + mach_msg_header_t *hdr) { if (hdr) { mach_msg_destroy(hdr); - if (hdr != _dispatch_get_mach_recv_msg_buf()) { + if (hdr != _dispatch_kevent_mach_msg_buf(ke)) { free(hdr); } } @@ -3051,17 +3777,17 @@ _dispatch_kevent_mach_msg_destroy(mach_msg_header_t *hdr) static void _dispatch_source_merge_mach_msg(dispatch_source_t ds, dispatch_source_refs_t dr, - dispatch_kevent_t dk, mach_msg_header_t *hdr, mach_msg_size_t siz) + dispatch_kevent_t dk, _dispatch_kevent_qos_s *ke, + mach_msg_header_t *hdr, mach_msg_size_t siz) { - if (ds == _dispatch_mach_notify_source) { - _dispatch_mach_notify_source_invoke(hdr); - return _dispatch_kevent_mach_msg_destroy(hdr); + if (dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE) { + return _dispatch_source_merge_mach_msg_direct(ds, ke, hdr); } dispatch_mach_reply_refs_t dmr = NULL; - if (dk->dk_kevent.fflags & DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE) { + if (dk->dk_kevent.flags & EV_ONESHOT) { dmr = (dispatch_mach_reply_refs_t)dr; } - return _dispatch_mach_msg_recv((dispatch_mach_t)ds, dmr, hdr, siz); + return _dispatch_mach_msg_recv((dispatch_mach_t)ds, dmr, ke, hdr, siz); } DISPATCH_NOINLINE @@ -3093,14 +3819,14 @@ _dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, bool final) // Re-register for notification before delivery unreg = _dispatch_kevent_resume(dk, flag, 0); } - DISPATCH_MACH_KEVENT_ARMED(dk) = 0; + DISPATCH_MACH_NOTIFICATION_ARMED(dk) = 0; TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) { dispatch_source_t dsi = _dispatch_source_from_refs(dri); if (dx_type(dsi) == DISPATCH_MACH_CHANNEL_TYPE) { dispatch_mach_t dm = (dispatch_mach_t)dsi; - _dispatch_mach_merge_kevent(dm, &kev); + _dispatch_mach_merge_notification_kevent(dm, &kev); if (unreg && dm->dm_dkev) { - _dispatch_mach_kevent_unregister(dm); + _dispatch_mach_notification_kevent_unregister(dm); } } else { _dispatch_source_merge_kevent(dsi, &kev); @@ -3108,7 +3834,7 @@ _dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, bool final) _dispatch_source_kevent_unregister(dsi); } } - if (!dr_next || DISPATCH_MACH_KEVENT_ARMED(dk)) { + if (!dr_next || DISPATCH_MACH_NOTIFICATION_ARMED(dk)) { // current merge is last in list (dk might have been freed) // or it re-armed the notification return; @@ -3131,24 +3857,22 @@ _dispatch_mach_notify_update(dispatch_kevent_t dk, uint32_t new_flags, _dispatch_debug_machport(port); if ((dk->dk_kevent.data & mask) && !(prev & mask)) { - // initialize _dispatch_mach_notify_port: - (void)_dispatch_get_mach_recv_portset(); _dispatch_debug("machport[0x%08x]: registering for send-possible " "notification", port); previous = MACH_PORT_NULL; krr = mach_port_request_notification(mach_task_self(), port, - notify_msgid, notify_sync, _dispatch_mach_notify_port, + notify_msgid, notify_sync, _dispatch_get_mach_notify_port(), MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); DISPATCH_VERIFY_MIG(krr); switch(krr) { case KERN_INVALID_NAME: case KERN_INVALID_RIGHT: - // Supress errors & clear registration state + // Suppress errors & clear registration state dk->dk_kevent.data &= ~mask; break; default: - // Else, we dont expect any errors from mach. Log any errors + // Else, we don't expect any errors from mach. Log any errors if (dispatch_assume_zero(krr)) { // log the error & clear registration state dk->dk_kevent.data &= ~mask; @@ -3198,10 +3922,18 @@ _dispatch_mach_notify_update(dispatch_kevent_t dk, uint32_t new_flags, static void _dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED) { - (void)_dispatch_get_mach_recv_portset(); + static int notify_type = HOST_NOTIFY_CALENDAR_SET; + kern_return_t kr; _dispatch_debug("registering for calendar-change notification"); - kern_return_t kr = host_request_notification(_dispatch_get_mach_host_port(), - HOST_NOTIFY_CALENDAR_CHANGE, _dispatch_mach_notify_port); +retry: + kr = host_request_notification(_dispatch_get_mach_host_port(), + notify_type, _dispatch_get_mach_notify_port()); + // Fallback when missing support for newer _SET variant, fires strictly more. + if (kr == KERN_INVALID_ARGUMENT && + notify_type != HOST_NOTIFY_CALENDAR_CHANGE){ + notify_type = HOST_NOTIFY_CALENDAR_CHANGE; + goto retry; + } DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); } @@ -3221,8 +3953,9 @@ _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr) __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem)); dispatch_assert(sizeof(mig_reply_error_t) < _dispatch_mach_recv_msg_size); boolean_t success = libdispatch_internal_protocol_server(hdr, &reply.Head); - if (!success && reply.RetCode == MIG_BAD_ID && hdr->msgh_id == 950) { - // host_notify_reply.defs: host_calendar_changed + if (!success && reply.RetCode == MIG_BAD_ID && + (hdr->msgh_id == HOST_CALENDAR_SET_REPLYID || + hdr->msgh_id == HOST_CALENDAR_CHANGED_REPLYID)) { _dispatch_debug("calendar-change notification"); _dispatch_timers_calendar_change(); _dispatch_mach_host_notify_update(NULL); @@ -3232,6 +3965,9 @@ _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr) if (dispatch_assume(success) && reply.RetCode != MIG_NO_REPLY) { (void)dispatch_assume_zero(reply.RetCode); } + if (!success || (reply.RetCode && reply.RetCode != MIG_NO_REPLY)) { + mach_msg_destroy(hdr); + } } kern_return_t @@ -3282,20 +4018,41 @@ _dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED, #pragma mark - #pragma mark dispatch_mach_t -#define DISPATCH_MACH_NEVER_CONNECTED (UINT32_MAX/2) +#define DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT 0x1 #define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2 +#define DISPATCH_MACH_WAIT_FOR_REPLY 0x4 +#define DISPATCH_MACH_OWNED_REPLY_PORT 0x8 #define DISPATCH_MACH_OPTIONS_MASK 0xffff +#define DM_SEND_STATUS_SUCCESS 0x1 +#define DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT 0x2 + +DISPATCH_ENUM(dispatch_mach_send_invoke_flags, uint32_t, + DM_SEND_INVOKE_NONE = 0x0, + DM_SEND_INVOKE_FLUSH = 0x1, + DM_SEND_INVOKE_NEEDS_BARRIER = 0x2, + DM_SEND_INVOKE_CANCEL = 0x4, + DM_SEND_INVOKE_CAN_RUN_BARRIER = 0x8, + DM_SEND_INVOKE_IMMEDIATE_SEND = 0x10, +); +#define DM_SEND_INVOKE_IMMEDIATE_SEND_MASK \ + ((dispatch_mach_send_invoke_flags_t)DM_SEND_INVOKE_IMMEDIATE_SEND) + +static inline pthread_priority_t _dispatch_mach_priority_propagate( + mach_msg_option_t options); static mach_port_t _dispatch_mach_msg_get_remote_port(dispatch_object_t dou); +static mach_port_t _dispatch_mach_msg_get_reply_port(dispatch_object_t dou); static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port, mach_port_t remote_port); +static inline void _dispatch_mach_msg_reply_received(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t local_port); static dispatch_mach_msg_t _dispatch_mach_msg_create_reply_disconnected( dispatch_object_t dou, dispatch_mach_reply_refs_t dmr); static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou); static inline mach_msg_header_t* _dispatch_mach_msg_get_msg( dispatch_mach_msg_t dmsg); -static void _dispatch_mach_push(dispatch_object_t dm, dispatch_object_t dou, +static void _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou, pthread_priority_t pp); static dispatch_mach_t @@ -3307,13 +4064,10 @@ _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, dm = _dispatch_alloc(DISPATCH_VTABLE(mach), sizeof(struct dispatch_mach_s)); - _dispatch_queue_init((dispatch_queue_t)dm); - dm->dq_label = label; + _dispatch_queue_init(dm->_as_dq, DQF_NONE, 1, true); + dm->dq_label = label; dm->do_ref_cnt++; // the reference _dispatch_mach_cancel_invoke holds - dm->do_ref_cnt++; // since channel is created suspended - dm->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL; - dm->do_targetq = &_dispatch_mgr_q; dr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_refs_s)); dr->dr_source_wref = _dispatch_ptr2wref(dm); @@ -3328,8 +4082,12 @@ _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, dm->dm_refs->dm_disconnect_cnt = DISPATCH_MACH_NEVER_CONNECTED; TAILQ_INIT(&dm->dm_refs->dm_replies); - // First item on the channel sets the user-specified target queue - dispatch_set_target_queue(dm, q); + if (slowpath(!q)) { + q = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true); + } else { + _dispatch_retain(q); + } + dm->do_targetq = q; _dispatch_object_debug(dm, "%s", __func__); return dm; } @@ -3360,7 +4118,7 @@ _dispatch_mach_dispose(dispatch_mach_t dm) } free(dr); free(dm->dm_refs); - _dispatch_queue_destroy(dm); + _dispatch_queue_destroy(dm->_as_dq); } void @@ -3369,98 +4127,273 @@ dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, { dispatch_mach_send_refs_t dr = dm->dm_refs; dispatch_kevent_t dk; + uint32_t disconnect_cnt; + dispatch_source_type_t type = &_dispatch_source_type_mach_recv_direct; + dm->ds_is_direct_kevent = (bool)_dispatch_evfilt_machport_direct_enabled; if (MACH_PORT_VALID(receive)) { dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = _dispatch_source_type_mach_recv_direct.ke; + dk->dk_kevent = type->ke; dk->dk_kevent.ident = receive; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE|EV_VANISHED; dk->dk_kevent.udata = (uintptr_t)dk; TAILQ_INIT(&dk->dk_sources); dm->ds_dkev = dk; - dm->ds_pending_data_mask = dk->dk_kevent.fflags; + dm->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE_DIRECT; + dm->ds_needs_rearm = dm->ds_is_direct_kevent; + if (!dm->ds_is_direct_kevent) { + dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT; + dk->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); + } _dispatch_retain(dm); // the reference the manager queue holds } dr->dm_send = send; if (MACH_PORT_VALID(send)) { if (checkin) { dispatch_retain(checkin); - mach_msg_option_t options = _dispatch_mach_checkin_options(); - _dispatch_mach_msg_set_options(checkin, options); + checkin->dmsg_options = _dispatch_mach_checkin_options(); dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin); } - dr->dm_checkin = checkin; + dr->dm_checkin = checkin; + } + // monitor message reply ports + dm->ds_pending_data_mask |= DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; + dispatch_assert(DISPATCH_MACH_NEVER_CONNECTED - 1 == + DISPATCH_MACH_NEVER_INSTALLED); + disconnect_cnt = os_atomic_dec2o(dr, dm_disconnect_cnt, release); + if (unlikely(disconnect_cnt != DISPATCH_MACH_NEVER_INSTALLED)) { + DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel already connected"); + } + _dispatch_object_debug(dm, "%s", __func__); + return dispatch_activate(dm); +} + +// assumes low bit of mach port names is always set +#define DISPATCH_MACH_REPLY_PORT_UNOWNED 0x1u + +static inline void +_dispatch_mach_reply_mark_reply_port_owned(dispatch_mach_reply_refs_t dmr) +{ + dmr->dmr_reply &= ~DISPATCH_MACH_REPLY_PORT_UNOWNED; +} + +static inline bool +_dispatch_mach_reply_is_reply_port_owned(dispatch_mach_reply_refs_t dmr) +{ + mach_port_t reply_port = dmr->dmr_reply; + return reply_port ? !(reply_port & DISPATCH_MACH_REPLY_PORT_UNOWNED) :false; +} + +static inline mach_port_t +_dispatch_mach_reply_get_reply_port(dispatch_mach_reply_refs_t dmr) +{ + mach_port_t reply_port = dmr->dmr_reply; + return reply_port ? (reply_port | DISPATCH_MACH_REPLY_PORT_UNOWNED) : 0; +} + +static inline bool +_dispatch_mach_reply_tryremove(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr) +{ + bool removed; + _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); + if ((removed = _TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + } + _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); + return removed; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_reply_waiter_unregister(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, unsigned int options) +{ + dispatch_mach_msg_t dmsgr = NULL; + bool disconnected = (options & DKEV_UNREGISTER_DISCONNECTED); + if (options & DKEV_UNREGISTER_REPLY_REMOVE) { + _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); + if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration"); + } + TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); } - // monitor message reply ports - dm->ds_pending_data_mask |= DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; - if (slowpath(!dispatch_atomic_cmpxchg2o(dr, dm_disconnect_cnt, - DISPATCH_MACH_NEVER_CONNECTED, 0, release))) { - DISPATCH_CLIENT_CRASH("Channel already connected"); + if (disconnected) { + dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr); + } else if (dmr->dmr_voucher) { + _voucher_release(dmr->dmr_voucher); + dmr->dmr_voucher = NULL; } - _dispatch_object_debug(dm, "%s", __func__); - return dispatch_resume(dm); + _dispatch_debug("machport[0x%08x]: unregistering for sync reply%s, ctxt %p", + _dispatch_mach_reply_get_reply_port(dmr), + disconnected ? " (disconnected)" : "", dmr->dmr_ctxt); + if (dmsgr) { + return _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); + } + dispatch_assert(!(options & DKEV_UNREGISTER_WAKEUP)); } DISPATCH_NOINLINE static void _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, bool disconnected) + dispatch_mach_reply_refs_t dmr, unsigned int options) { dispatch_mach_msg_t dmsgr = NULL; + bool replies_empty = false; + bool disconnected = (options & DKEV_UNREGISTER_DISCONNECTED); + if (options & DKEV_UNREGISTER_REPLY_REMOVE) { + _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); + if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration"); + } + TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + replies_empty = TAILQ_EMPTY(&dm->dm_refs->dm_replies); + _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); + } if (disconnected) { dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr); + } else if (dmr->dmr_voucher) { + _voucher_release(dmr->dmr_voucher); + dmr->dmr_voucher = NULL; } + uint32_t flags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; dispatch_kevent_t dk = dmr->dmr_dkev; - TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dmr, dr_list); - _dispatch_kevent_unregister(dk, DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE, 0); - TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); - if (dmr->dmr_voucher) _voucher_release(dmr->dmr_voucher); + _dispatch_debug("machport[0x%08x]: unregistering for reply%s, ctxt %p", + (mach_port_t)dk->dk_kevent.ident, + disconnected ? " (disconnected)" : "", dmr->dmr_ctxt); + if (!dm->ds_is_direct_kevent) { + dmr->dmr_dkev = NULL; + TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dmr, dr_list); + _dispatch_kevent_unregister(dk, flags, 0); + } else { + long r = _dispatch_kevent_unregister(dk, flags, options); + if (r == EINPROGRESS) { + _dispatch_debug("machport[0x%08x]: deferred delete kevent[%p]", + (mach_port_t)dk->dk_kevent.ident, dk); + dispatch_assert(options == DKEV_UNREGISTER_DISCONNECTED); + // dmr must be put back so that the event delivery finds it, the + // replies lock is held by the caller. + TAILQ_INSERT_HEAD(&dm->dm_refs->dm_replies, dmr, dmr_list); + if (dmsgr) { + dmr->dmr_voucher = dmsgr->dmsg_voucher; + dmsgr->dmsg_voucher = NULL; + dispatch_release(dmsgr); + } + return; // deferred unregistration + } + dispatch_assume_zero(r); + dmr->dmr_dkev = NULL; + _TAILQ_TRASH_ENTRY(dmr, dr_list); + } free(dmr); - if (dmsgr) _dispatch_mach_push(dm, dmsgr, dmsgr->dmsg_priority); + if (dmsgr) { + return _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); + } + if ((options & DKEV_UNREGISTER_WAKEUP) && replies_empty && + (dm->dm_refs->dm_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED))) { + dx_wakeup(dm, 0, DISPATCH_WAKEUP_FLUSH); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_reply_waiter_register(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t reply_port, + dispatch_mach_msg_t dmsg, mach_msg_option_t msg_opts) +{ + dmr->dr_source_wref = _dispatch_ptr2wref(dm); + dmr->dmr_dkev = NULL; + dmr->dmr_reply = reply_port; + if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) { + _dispatch_mach_reply_mark_reply_port_owned(dmr); + } else { + if (dmsg->dmsg_voucher) { + dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher); + } + dmr->dmr_priority = (dispatch_priority_t)dmsg->dmsg_priority; + // make reply context visible to leaks rdar://11777199 + dmr->dmr_ctxt = dmsg->do_ctxt; + } + + _dispatch_debug("machport[0x%08x]: registering for sync reply, ctxt %p", + reply_port, dmsg->do_ctxt); + _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); + if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, "Reply already registered"); + } + TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dmr_list); + _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); } DISPATCH_NOINLINE static void -_dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply, +_dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port, dispatch_mach_msg_t dmsg) { dispatch_kevent_t dk; dispatch_mach_reply_refs_t dmr; + dispatch_source_type_t type = &_dispatch_source_type_mach_recv_direct; + pthread_priority_t mp, pp; dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = _dispatch_source_type_mach_recv_direct.ke; - dk->dk_kevent.ident = reply; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; - dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; + dk->dk_kevent = type->ke; + dk->dk_kevent.ident = reply_port; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE|EV_ONESHOT; dk->dk_kevent.udata = (uintptr_t)dk; TAILQ_INIT(&dk->dk_sources); + if (!dm->ds_is_direct_kevent) { + dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; + dk->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); + } dmr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_reply_refs_s)); dmr->dr_source_wref = _dispatch_ptr2wref(dm); dmr->dmr_dkev = dk; + dmr->dmr_reply = reply_port; if (dmsg->dmsg_voucher) { - dmr->dmr_voucher =_voucher_retain(dmsg->dmsg_voucher); + dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher); } - dmr->dmr_priority = dmsg->dmsg_priority; + dmr->dmr_priority = (dispatch_priority_t)dmsg->dmsg_priority; // make reply context visible to leaks rdar://11777199 dmr->dmr_ctxt = dmsg->do_ctxt; - _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p", reply, - dmsg->do_ctxt); + pp = dm->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (pp && dm->ds_is_direct_kevent) { + mp = dmsg->dmsg_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (pp < mp) pp = mp; + pp |= dm->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + } else { + pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + } + + _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p", + reply_port, dmsg->do_ctxt); uint32_t flags; - bool do_resume = _dispatch_kevent_register(&dmr->dmr_dkev, &flags); + bool do_resume = _dispatch_kevent_register(&dmr->dmr_dkev, pp, &flags); TAILQ_INSERT_TAIL(&dmr->dmr_dkev->dk_sources, (dispatch_source_refs_t)dmr, dr_list); + _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); + if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, "Reply already registered"); + } TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dmr_list); + _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); if (do_resume && _dispatch_kevent_resume(dmr->dmr_dkev, flags, 0)) { - _dispatch_mach_reply_kevent_unregister(dm, dmr, true); + return _dispatch_mach_reply_kevent_unregister(dm, dmr, + DKEV_UNREGISTER_DISCONNECTED|DKEV_UNREGISTER_REPLY_REMOVE); } } DISPATCH_NOINLINE static void -_dispatch_mach_kevent_unregister(dispatch_mach_t dm) +_dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm) { + DISPATCH_ASSERT_ON_MANAGER_QUEUE(); dispatch_kevent_t dk = dm->dm_dkev; dm->dm_dkev = NULL; TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dm->dm_refs, @@ -3473,8 +4406,9 @@ _dispatch_mach_kevent_unregister(dispatch_mach_t dm) DISPATCH_NOINLINE static void -_dispatch_mach_kevent_register(dispatch_mach_t dm, mach_port_t send) +_dispatch_mach_notification_kevent_register(dispatch_mach_t dm,mach_port_t send) { + DISPATCH_ASSERT_ON_MANAGER_QUEUE(); dispatch_kevent_t dk; dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); @@ -3488,49 +4422,103 @@ _dispatch_mach_kevent_register(dispatch_mach_t dm, mach_port_t send) dm->ds_pending_data_mask |= dk->dk_kevent.fflags; uint32_t flags; - bool do_resume = _dispatch_kevent_register(&dk, &flags); + bool do_resume = _dispatch_kevent_register(&dk, + _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, &flags); TAILQ_INSERT_TAIL(&dk->dk_sources, (dispatch_source_refs_t)dm->dm_refs, dr_list); dm->dm_dkev = dk; if (do_resume && _dispatch_kevent_resume(dm->dm_dkev, flags, 0)) { - _dispatch_mach_kevent_unregister(dm); + _dispatch_mach_notification_kevent_unregister(dm); } } -static inline void -_dispatch_mach_push(dispatch_object_t dm, dispatch_object_t dou, - pthread_priority_t pp) +static mach_port_t +_dispatch_get_thread_reply_port(void) { - return _dispatch_queue_push(dm._dq, dou, pp); + mach_port_t reply_port, mrp = _dispatch_get_thread_mig_reply_port(); + if (mrp) { + reply_port = mrp; + _dispatch_debug("machport[0x%08x]: borrowed thread sync reply port", + reply_port); + } else { + reply_port = mach_reply_port(); + _dispatch_set_thread_mig_reply_port(reply_port); + _dispatch_debug("machport[0x%08x]: allocated thread sync reply port", + reply_port); + } + _dispatch_debug_machport(reply_port); + return reply_port; } -static inline void -_dispatch_mach_msg_set_options(dispatch_object_t dou, mach_msg_option_t options) +static void +_dispatch_clear_thread_reply_port(mach_port_t reply_port) +{ + mach_port_t mrp = _dispatch_get_thread_mig_reply_port(); + if (reply_port != mrp) { + if (mrp) { + _dispatch_debug("machport[0x%08x]: did not clear thread sync reply " + "port (found 0x%08x)", reply_port, mrp); + } + return; + } + _dispatch_set_thread_mig_reply_port(MACH_PORT_NULL); + _dispatch_debug_machport(reply_port); + _dispatch_debug("machport[0x%08x]: cleared thread sync reply port", + reply_port); +} + +static void +_dispatch_set_thread_reply_port(mach_port_t reply_port) { - dou._do->do_suspend_cnt = (unsigned int)options; + _dispatch_debug_machport(reply_port); + mach_port_t mrp = _dispatch_get_thread_mig_reply_port(); + if (mrp) { + kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port, + MACH_PORT_RIGHT_RECEIVE, -1); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + _dispatch_debug("machport[0x%08x]: deallocated sync reply port " + "(found 0x%08x)", reply_port, mrp); + } else { + _dispatch_set_thread_mig_reply_port(reply_port); + _dispatch_debug("machport[0x%08x]: restored thread sync reply port", + reply_port); + } } -static inline mach_msg_option_t -_dispatch_mach_msg_get_options(dispatch_object_t dou) +static inline mach_port_t +_dispatch_mach_msg_get_remote_port(dispatch_object_t dou) { - mach_msg_option_t options = (mach_msg_option_t)dou._do->do_suspend_cnt; - return options; + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); + mach_port_t remote = hdr->msgh_remote_port; + return remote; +} + +static inline mach_port_t +_dispatch_mach_msg_get_reply_port(dispatch_object_t dou) +{ + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); + mach_port_t local = hdr->msgh_local_port; + if (!MACH_PORT_VALID(local) || MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) != + MACH_MSG_TYPE_MAKE_SEND_ONCE) return MACH_PORT_NULL; + return local; } static inline void -_dispatch_mach_msg_set_reason(dispatch_object_t dou, mach_error_t err, +_dispatch_mach_msg_set_reason(dispatch_mach_msg_t dmsg, mach_error_t err, unsigned long reason) { dispatch_assert_zero(reason & ~(unsigned long)code_emask); - dou._do->do_suspend_cnt = (unsigned int)((err || !reason) ? err : + dmsg->dmsg_error = ((err || !reason) ? err : err_local|err_sub(0x3e0)|(mach_error_t)reason); } static inline unsigned long -_dispatch_mach_msg_get_reason(dispatch_object_t dou, mach_error_t *err_ptr) +_dispatch_mach_msg_get_reason(dispatch_mach_msg_t dmsg, mach_error_t *err_ptr) { - mach_error_t err = (mach_error_t)dou._do->do_suspend_cnt; - dou._do->do_suspend_cnt = 0; + mach_error_t err = dmsg->dmsg_error; + + dmsg->dmsg_error = 0; if ((err & system_emask) == err_local && err_get_sub(err) == 0x3e0) { *err_ptr = 0; return err_get_code(err); @@ -3541,13 +4529,16 @@ _dispatch_mach_msg_get_reason(dispatch_object_t dou, mach_error_t *err_ptr) static void _dispatch_mach_msg_recv(dispatch_mach_t dm, dispatch_mach_reply_refs_t dmr, - mach_msg_header_t *hdr, mach_msg_size_t siz) + _dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr, mach_msg_size_t siz) { _dispatch_debug_machport(hdr->msgh_remote_port); _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x", hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); - if (slowpath(dm->ds_atomic_flags & DSF_CANCELED)) { - return _dispatch_kevent_mach_msg_destroy(hdr); + bool canceled = (dm->dq_atomic_flags & DSF_CANCELED); + if (!dmr && canceled) { + // message received after cancellation, _dispatch_mach_kevent_merge is + // responsible for mach channel source state (e.g. deferred deletion) + return _dispatch_kevent_mach_msg_destroy(ke, hdr); } dispatch_mach_msg_t dmsg; voucher_t voucher; @@ -3559,31 +4550,166 @@ _dispatch_mach_msg_recv(dispatch_mach_t dm, dispatch_mach_reply_refs_t dmr, dmr->dmr_voucher = NULL; // transfer reference priority = dmr->dmr_priority; ctxt = dmr->dmr_ctxt; - _dispatch_mach_reply_kevent_unregister(dm, dmr, false); + unsigned int options = DKEV_DISPOSE_IMMEDIATE_DELETE; + options |= DKEV_UNREGISTER_REPLY_REMOVE; + options |= DKEV_UNREGISTER_WAKEUP; + if (canceled) options |= DKEV_UNREGISTER_DISCONNECTED; + _dispatch_mach_reply_kevent_unregister(dm, dmr, options); + ke->flags |= EV_DELETE; // remember that unregister deleted the event + if (canceled) return; } else { voucher = voucher_create_with_mach_msg(hdr); priority = _voucher_get_priority(voucher); } dispatch_mach_msg_destructor_t destructor; - destructor = (hdr == _dispatch_get_mach_recv_msg_buf()) ? + destructor = (hdr == _dispatch_kevent_mach_msg_buf(ke)) ? DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT : DISPATCH_MACH_MSG_DESTRUCTOR_FREE; dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL); + if (hdr == _dispatch_kevent_mach_msg_buf(ke)) { + _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, (uint64_t)hdr, (uint64_t)dmsg->dmsg_buf); + } dmsg->dmsg_voucher = voucher; dmsg->dmsg_priority = priority; dmsg->do_ctxt = ctxt; _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_RECEIVED); _dispatch_voucher_debug("mach-msg[%p] create", voucher, dmsg); _dispatch_voucher_ktrace_dmsg_push(dmsg); - return _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority); + return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); } -static inline mach_port_t -_dispatch_mach_msg_get_remote_port(dispatch_object_t dou) +DISPATCH_ALWAYS_INLINE +static inline dispatch_mach_msg_t +_dispatch_mach_msg_reply_recv(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t reply_port) +{ + if (slowpath(!MACH_PORT_VALID(reply_port))) { + DISPATCH_CLIENT_CRASH(reply_port, "Invalid reply port"); + } + void *ctxt = dmr->dmr_ctxt; + mach_msg_header_t *hdr, *hdr2 = NULL; + void *hdr_copyout_addr; + mach_msg_size_t siz, msgsiz = 0; + mach_msg_return_t kr; + mach_msg_option_t options; + siz = mach_vm_round_page(_dispatch_mach_recv_msg_size + + dispatch_mach_trailer_size); + hdr = alloca(siz); + for (mach_vm_address_t p = mach_vm_trunc_page(hdr + vm_page_size); + p < (mach_vm_address_t)hdr + siz; p += vm_page_size) { + *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard + } + options = DISPATCH_MACH_RCV_OPTIONS & (~MACH_RCV_VOUCHER); +retry: + _dispatch_debug_machport(reply_port); + _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG %s", reply_port, + (options & MACH_RCV_TIMEOUT) ? "poll" : "wait"); + kr = mach_msg(hdr, options, 0, siz, reply_port, MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); + hdr_copyout_addr = hdr; + _dispatch_debug_machport(reply_port); + _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG (size %u, opts 0x%x) " + "returned: %s - 0x%x", reply_port, siz, options, + mach_error_string(kr), kr); + switch (kr) { + case MACH_RCV_TOO_LARGE: + if (!fastpath(hdr->msgh_size <= UINT_MAX - + dispatch_mach_trailer_size)) { + DISPATCH_CLIENT_CRASH(hdr->msgh_size, "Overlarge message"); + } + if (options & MACH_RCV_LARGE) { + msgsiz = hdr->msgh_size + dispatch_mach_trailer_size; + hdr2 = malloc(msgsiz); + if (dispatch_assume(hdr2)) { + hdr = hdr2; + siz = msgsiz; + } + options |= MACH_RCV_TIMEOUT; + options &= ~MACH_RCV_LARGE; + goto retry; + } + _dispatch_log("BUG in libdispatch client: " + "dispatch_mach_send_and_wait_for_reply: dropped message too " + "large to fit in memory: id = 0x%x, size = %u", hdr->msgh_id, + hdr->msgh_size); + break; + case MACH_RCV_INVALID_NAME: // rdar://problem/21963848 + case MACH_RCV_PORT_CHANGED: // rdar://problem/21885327 + case MACH_RCV_PORT_DIED: + // channel was disconnected/canceled and reply port destroyed + _dispatch_debug("machport[0x%08x]: sync reply port destroyed, ctxt %p: " + "%s - 0x%x", reply_port, ctxt, mach_error_string(kr), kr); + goto out; + case MACH_MSG_SUCCESS: + if (hdr->msgh_remote_port) { + _dispatch_debug_machport(hdr->msgh_remote_port); + } + _dispatch_debug("machport[0x%08x]: received msg id 0x%x, size = %u, " + "reply on 0x%08x", hdr->msgh_local_port, hdr->msgh_id, + hdr->msgh_size, hdr->msgh_remote_port); + siz = hdr->msgh_size + dispatch_mach_trailer_size; + if (hdr2 && siz < msgsiz) { + void *shrink = realloc(hdr2, msgsiz); + if (shrink) hdr = hdr2 = shrink; + } + break; + default: + dispatch_assume_zero(kr); + break; + } + _dispatch_mach_msg_reply_received(dm, dmr, hdr->msgh_local_port); + hdr->msgh_local_port = MACH_PORT_NULL; + if (slowpath((dm->dq_atomic_flags & DSF_CANCELED) || kr)) { + if (!kr) mach_msg_destroy(hdr); + goto out; + } + dispatch_mach_msg_t dmsg; + dispatch_mach_msg_destructor_t destructor = (!hdr2) ? + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT : + DISPATCH_MACH_MSG_DESTRUCTOR_FREE; + dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL); + if (!hdr2 || hdr != hdr_copyout_addr) { + _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, (uint64_t)hdr_copyout_addr, (uint64_t)_dispatch_mach_msg_get_msg(dmsg)); + } + dmsg->do_ctxt = ctxt; + return dmsg; +out: + free(hdr2); + return NULL; +} + +static inline void +_dispatch_mach_msg_reply_received(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t local_port) { - mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); - mach_port_t remote = hdr->msgh_remote_port; - return remote; + bool removed = _dispatch_mach_reply_tryremove(dm, dmr); + if (!MACH_PORT_VALID(local_port) || !removed) { + // port moved/destroyed during receive, or reply waiter was never + // registered or already removed (disconnected) + return; + } + mach_port_t reply_port = _dispatch_mach_reply_get_reply_port(dmr); + _dispatch_debug("machport[0x%08x]: unregistered for sync reply, ctxt %p", + reply_port, dmr->dmr_ctxt); + if (_dispatch_mach_reply_is_reply_port_owned(dmr)) { + _dispatch_set_thread_reply_port(reply_port); + if (local_port != reply_port) { + DISPATCH_CLIENT_CRASH(local_port, + "Reply received on unexpected port"); + } + return; + } + mach_msg_header_t *hdr; + dispatch_mach_msg_t dmsg; + dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); + hdr->msgh_local_port = local_port; + dmsg->dmsg_voucher = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + dmsg->dmsg_priority = dmr->dmr_priority; + dmsg->do_ctxt = dmr->dmr_ctxt; + _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_REPLY_RECEIVED); + return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); } static inline void @@ -3597,7 +4723,9 @@ _dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port, if (local_port) hdr->msgh_local_port = local_port; if (remote_port) hdr->msgh_remote_port = remote_port; _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED); - return _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority); + _dispatch_debug("machport[0x%08x]: %s right disconnected", local_port ? + local_port : remote_port, local_port ? "receive" : "send"); + return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); } static inline dispatch_mach_msg_t @@ -3605,25 +4733,56 @@ _dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou, dispatch_mach_reply_refs_t dmr) { dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; - if (dmsg && !dmsg->dmsg_reply) return NULL; + mach_port_t reply_port = dmsg ? dmsg->dmsg_reply : + _dispatch_mach_reply_get_reply_port(dmr); + voucher_t v; + + if (!reply_port) { + if (!dmsg) { + v = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + if (v) _voucher_release(v); + } + return NULL; + } + + if (dmsg) { + v = dmsg->dmsg_voucher; + if (v) _voucher_retain(v); + } else { + v = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + } + + if ((dmsg && (dmsg->dmsg_options & DISPATCH_MACH_WAIT_FOR_REPLY) && + (dmsg->dmsg_options & DISPATCH_MACH_OWNED_REPLY_PORT)) || + (dmr && !dmr->dmr_dkev && + _dispatch_mach_reply_is_reply_port_owned(dmr))) { + if (v) _voucher_release(v); + // deallocate owned reply port to break _dispatch_mach_msg_reply_recv + // out of waiting in mach_msg(MACH_RCV_MSG) + kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port, + MACH_PORT_RIGHT_RECEIVE, -1); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + return NULL; + } + mach_msg_header_t *hdr; dmsgr = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); + dmsgr->dmsg_voucher = v; + hdr->msgh_local_port = reply_port; if (dmsg) { - hdr->msgh_local_port = dmsg->dmsg_reply; - if (dmsg->dmsg_voucher) { - dmsgr->dmsg_voucher = _voucher_retain(dmsg->dmsg_voucher); - } dmsgr->dmsg_priority = dmsg->dmsg_priority; dmsgr->do_ctxt = dmsg->do_ctxt; } else { - hdr->msgh_local_port = (mach_port_t)dmr->dmr_dkev->dk_kevent.ident; - dmsgr->dmsg_voucher = dmr->dmr_voucher; - dmr->dmr_voucher = NULL; // transfer reference dmsgr->dmsg_priority = dmr->dmr_priority; dmsgr->do_ctxt = dmr->dmr_ctxt; } _dispatch_mach_msg_set_reason(dmsgr, 0, DISPATCH_MACH_DISCONNECTED); + _dispatch_debug("machport[0x%08x]: reply disconnected, ctxt %p", + hdr->msgh_local_port, dmsgr->do_ctxt); return dmsgr; } @@ -3632,52 +4791,69 @@ static void _dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou) { dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; + mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); + mach_msg_option_t msg_opts = dmsg->dmsg_options; + _dispatch_debug("machport[0x%08x]: not sent msg id 0x%x, ctxt %p, " + "msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x", + msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt, + msg_opts, msg->msgh_voucher_port, dmsg->dmsg_reply); + unsigned long reason = (msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY) ? + 0 : DISPATCH_MACH_MESSAGE_NOT_SENT; dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL); - _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_NOT_SENT); - _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority); - if (dmsgr) _dispatch_mach_push(dm, dmsgr, dmsgr->dmsg_priority); + _dispatch_mach_msg_set_reason(dmsg, 0, reason); + _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); + if (dmsgr) _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); } DISPATCH_NOINLINE -static dispatch_object_t -_dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou) +static uint32_t +_dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, + dispatch_mach_reply_refs_t dmr, pthread_priority_t pp, + dispatch_mach_send_invoke_flags_t send_flags) { dispatch_mach_send_refs_t dr = dm->dm_refs; dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr = NULL; voucher_t voucher = dmsg->dmsg_voucher; mach_voucher_t ipc_kvoucher = MACH_VOUCHER_NULL; + uint32_t send_status = 0; bool clear_voucher = false, kvoucher_move_send = false; - dr->dm_needs_mgr = 0; - if (slowpath(dr->dm_checkin) && dmsg != dr->dm_checkin) { - // send initial checkin message - if (dm->dm_dkev && slowpath(_dispatch_queue_get_current() != - &_dispatch_mgr_q)) { - // send kevent must be uninstalled on the manager queue - dr->dm_needs_mgr = 1; - goto out; - } - dr->dm_checkin = _dispatch_mach_msg_send(dm, dr->dm_checkin)._dmsg; - if (slowpath(dr->dm_checkin)) { - goto out; + mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); + bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == + MACH_MSG_TYPE_MOVE_SEND_ONCE); + mach_port_t reply_port = dmsg->dmsg_reply; + if (!is_reply) { + dr->dm_needs_mgr = 0; + if (unlikely(dr->dm_checkin && dmsg != dr->dm_checkin)) { + // send initial checkin message + if (dm->dm_dkev && slowpath(_dispatch_queue_get_current() != + &_dispatch_mgr_q)) { + // send kevent must be uninstalled on the manager queue + dr->dm_needs_mgr = 1; + goto out; + } + if (unlikely(!_dispatch_mach_msg_send(dm, + dr->dm_checkin, NULL, pp, DM_SEND_INVOKE_NONE))) { + goto out; + } + dr->dm_checkin = NULL; } } - mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); mach_msg_return_t kr = 0; - mach_port_t reply = dmsg->dmsg_reply; - mach_msg_option_t opts = 0, msg_opts = _dispatch_mach_msg_get_options(dmsg); - if (!slowpath(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) { + mach_msg_option_t opts = 0, msg_opts = dmsg->dmsg_options; + if (!(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) { + mach_msg_priority_t msg_priority = MACH_MSG_PRIORITY_UNSPECIFIED; opts = MACH_SEND_MSG | (msg_opts & ~DISPATCH_MACH_OPTIONS_MASK); - if (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) != - MACH_MSG_TYPE_MOVE_SEND_ONCE) { + if (!is_reply) { if (dmsg != dr->dm_checkin) { msg->msgh_remote_port = dr->dm_send; } if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { if (slowpath(!dm->dm_dkev)) { - _dispatch_mach_kevent_register(dm, msg->msgh_remote_port); + _dispatch_mach_notification_kevent_register(dm, + msg->msgh_remote_port); } if (fastpath(dm->dm_dkev)) { - if (DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev)) { + if (DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) { goto out; } opts |= MACH_SEND_NOTIFY; @@ -3696,20 +4872,34 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou) } else { clear_voucher = _voucher_mach_msg_set(msg, voucher); } + if (pp && _dispatch_evfilt_machport_direct_enabled) { + opts |= MACH_SEND_OVERRIDE; + msg_priority = (mach_msg_priority_t)pp; + } } - _voucher_activity_trace_msg(voucher, msg, send); _dispatch_debug_machport(msg->msgh_remote_port); - if (reply) _dispatch_debug_machport(reply); + if (reply_port) _dispatch_debug_machport(reply_port); + if (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) { + if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) { + _dispatch_clear_thread_reply_port(reply_port); + } + _dispatch_mach_reply_waiter_register(dm, dmr, reply_port, dmsg, + msg_opts); + } kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0, - MACH_PORT_NULL); + msg_priority); _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, " "opts 0x%x, msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x: " "%s - 0x%x", msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt, - opts, msg_opts, msg->msgh_voucher_port, reply, + opts, msg_opts, msg->msgh_voucher_port, reply_port, mach_error_string(kr), kr); + if (unlikely(kr && (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY))) { + _dispatch_mach_reply_waiter_unregister(dm, dmr, + DKEV_UNREGISTER_REPLY_REMOVE); + } if (clear_voucher) { if (kr == MACH_SEND_INVALID_VOUCHER && msg->msgh_voucher_port) { - DISPATCH_CRASH("Voucher port corruption"); + DISPATCH_CLIENT_CRASH(kr, "Voucher port corruption"); } mach_voucher_t kv; kv = _voucher_mach_msg_clear(msg, kvoucher_move_send); @@ -3720,7 +4910,7 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou) if (opts & MACH_SEND_NOTIFY) { _dispatch_debug("machport[0x%08x]: send-possible notification " "armed", (mach_port_t)dm->dm_dkev->dk_kevent.ident); - DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev) = 1; + DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev) = 1; } else { // send kevent must be installed on the manager queue dr->dm_needs_mgr = 1; @@ -3739,142 +4929,500 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou) } else if (ipc_kvoucher && (kr || !kvoucher_move_send)) { _voucher_dealloc_mach_voucher(ipc_kvoucher); } - if (fastpath(!kr) && reply && - !(dm->ds_dkev && dm->ds_dkev->dk_kevent.ident == reply)) { - if (_dispatch_queue_get_current() != &_dispatch_mgr_q) { + if (!(msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) && !kr && reply_port && + !(dm->ds_dkev && dm->ds_dkev->dk_kevent.ident == reply_port)) { + if (!dm->ds_is_direct_kevent && + _dispatch_queue_get_current() != &_dispatch_mgr_q) { // reply receive kevent must be installed on the manager queue dr->dm_needs_mgr = 1; - _dispatch_mach_msg_set_options(dmsg, msg_opts | - DISPATCH_MACH_REGISTER_FOR_REPLY); + dmsg->dmsg_options = msg_opts | DISPATCH_MACH_REGISTER_FOR_REPLY; goto out; } - _dispatch_mach_reply_kevent_register(dm, reply, dmsg); + _dispatch_mach_reply_kevent_register(dm, reply_port, dmsg); } - if (slowpath(dmsg == dr->dm_checkin) && dm->dm_dkev) { - _dispatch_mach_kevent_unregister(dm); + if (unlikely(!is_reply && dmsg == dr->dm_checkin && dm->dm_dkev)) { + _dispatch_mach_notification_kevent_unregister(dm); } if (slowpath(kr)) { - // Send failed, so reply was never connected + // Send failed, so reply was never registered dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL); } _dispatch_mach_msg_set_reason(dmsg, kr, 0); - _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority); - if (dmsgr) _dispatch_mach_push(dm, dmsgr, dmsgr->dmsg_priority); - dmsg = NULL; + if ((send_flags & DM_SEND_INVOKE_IMMEDIATE_SEND) && + (msg_opts & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT)) { + // Return sent message synchronously + send_status |= DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT; + } else { + _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); + } + if (dmsgr) _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); + send_status |= DM_SEND_STATUS_SUCCESS; +out: + return send_status; +} + +#pragma mark - +#pragma mark dispatch_mach_send_refs_t + +static void _dispatch_mach_cancel(dispatch_mach_t dm); +static void _dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm, + pthread_priority_t pp); + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dm_state_get_override(uint64_t dm_state) +{ + dm_state &= DISPATCH_MACH_STATE_OVERRIDE_MASK; + return (pthread_priority_t)(dm_state >> 32); +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dm_state_override_from_priority(pthread_priority_t pp) +{ + uint64_t pp_state = pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + return pp_state << 32; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dm_state_needs_override(uint64_t dm_state, uint64_t pp_state) +{ + return (pp_state > (dm_state & DISPATCH_MACH_STATE_OVERRIDE_MASK)); +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dm_state_merge_override(uint64_t dm_state, uint64_t pp_state) +{ + if (_dm_state_needs_override(dm_state, pp_state)) { + dm_state &= ~DISPATCH_MACH_STATE_OVERRIDE_MASK; + dm_state |= pp_state; + dm_state |= DISPATCH_MACH_STATE_DIRTY; + dm_state |= DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + } + return dm_state; +} + +#define _dispatch_mach_send_push_update_tail(dr, tail) \ + os_mpsc_push_update_tail(dr, dm, tail, do_next) +#define _dispatch_mach_send_push_update_head(dr, head) \ + os_mpsc_push_update_head(dr, dm, head) +#define _dispatch_mach_send_get_head(dr) \ + os_mpsc_get_head(dr, dm) +#define _dispatch_mach_send_unpop_head(dr, dc, dc_next) \ + os_mpsc_undo_pop_head(dr, dm, dc, dc_next, do_next) +#define _dispatch_mach_send_pop_head(dr, head) \ + os_mpsc_pop_head(dr, dm, head, do_next) + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_mach_send_push_inline(dispatch_mach_send_refs_t dr, + dispatch_object_t dou) +{ + if (_dispatch_mach_send_push_update_tail(dr, dou._do)) { + _dispatch_mach_send_push_update_head(dr, dou._do); + return true; + } + return false; +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, + dispatch_mach_send_invoke_flags_t send_flags) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + dispatch_mach_reply_refs_t dmr; + dispatch_mach_msg_t dmsg; + struct dispatch_object_s *dc = NULL, *next_dc = NULL; + pthread_priority_t pp = _dm_state_get_override(dr->dm_state); + uint64_t old_state, new_state; + uint32_t send_status; + bool needs_mgr, disconnecting, returning_send_result = false; + +again: + needs_mgr = false; disconnecting = false; + while (dr->dm_tail) { + dc = _dispatch_mach_send_get_head(dr); + do { + dispatch_mach_send_invoke_flags_t sf = send_flags; + // Only request immediate send result for the first message + send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK; + next_dc = _dispatch_mach_send_pop_head(dr, dc); + if (_dispatch_object_has_type(dc, + DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) { + if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) { + goto partial_drain; + } + _dispatch_continuation_pop(dc, dm->_as_dq, flags); + continue; + } + if (_dispatch_object_is_slow_item(dc)) { + dmsg = ((dispatch_continuation_t)dc)->dc_data; + dmr = ((dispatch_continuation_t)dc)->dc_other; + } else if (_dispatch_object_has_vtable(dc)) { + dmsg = (dispatch_mach_msg_t)dc; + dmr = NULL; + } else { + if ((dm->dm_dkev || !dm->ds_is_direct_kevent) && + (_dispatch_queue_get_current() != &_dispatch_mgr_q)) { + // send kevent must be uninstalled on the manager queue + needs_mgr = true; + goto partial_drain; + } + if (unlikely(!_dispatch_mach_reconnect_invoke(dm, dc))) { + disconnecting = true; + goto partial_drain; + } + continue; + } + _dispatch_voucher_ktrace_dmsg_pop(dmsg); + if (unlikely(dr->dm_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED))) { + _dispatch_mach_msg_not_sent(dm, dmsg); + continue; + } + send_status = _dispatch_mach_msg_send(dm, dmsg, dmr, pp, sf); + if (unlikely(!send_status)) { + goto partial_drain; + } + if (send_status & DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT) { + returning_send_result = true; + } + } while ((dc = next_dc)); + } + + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { + if (old_state & DISPATCH_MACH_STATE_DIRTY) { + new_state = old_state; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } else { + // unlock + new_state = 0; + } + }); + goto out; + +partial_drain: + // if this is not a complete drain, we must undo some things + _dispatch_mach_send_unpop_head(dr, dc, next_dc); + + if (_dispatch_object_has_type(dc, + DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) { + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { + new_state = old_state; + new_state |= DISPATCH_MACH_STATE_DIRTY; + new_state |= DISPATCH_MACH_STATE_PENDING_BARRIER; + new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK; + }); + } else { + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { + new_state = old_state; + if (old_state & (DISPATCH_MACH_STATE_DIRTY | + DISPATCH_MACH_STATE_RECEIVED_OVERRIDE)) { + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } else { + new_state |= DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK; + } + }); + } + out: - return (dispatch_object_t)dmsg; + if (old_state & DISPATCH_MACH_STATE_RECEIVED_OVERRIDE) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } + + if (unlikely(new_state & DISPATCH_MACH_STATE_UNLOCK_MASK)) { + os_atomic_thread_fence(acquire); + pp = _dm_state_get_override(new_state); + goto again; + } + + if (new_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { + pp = _dm_state_get_override(new_state); + _dispatch_mach_send_barrier_drain_push(dm, pp); + } else { + if (needs_mgr) { + pp = _dm_state_get_override(new_state); + } else { + pp = 0; + } + if (!disconnecting) dx_wakeup(dm, pp, DISPATCH_WAKEUP_FLUSH); + } + return returning_send_result; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_send_invoke(dispatch_mach_t dm, + dispatch_invoke_flags_t flags, + dispatch_mach_send_invoke_flags_t send_flags) +{ + dispatch_lock_owner tid_self = _dispatch_tid_self(); + uint64_t old_state, new_state; + pthread_priority_t pp_floor; + + uint64_t canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK; + uint64_t canlock_state = 0; + + if (send_flags & DM_SEND_INVOKE_NEEDS_BARRIER) { + canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER; + canlock_state = DISPATCH_MACH_STATE_PENDING_BARRIER; + } else if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) { + canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER; + } + + if (flags & DISPATCH_INVOKE_MANAGER_DRAIN) { + pp_floor = 0; + } else { + // _dispatch_queue_class_invoke will have applied the queue override + // (if any) before we get here. Else use the default base priority + // as an estimation of the priority we already asked for. + pp_floor = dm->_as_dq->dq_override; + if (!pp_floor) { + pp_floor = _dispatch_get_defaultpriority(); + pp_floor &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + } + } + +retry: + os_atomic_rmw_loop2o(dm->dm_refs, dm_state, old_state, new_state, acquire, { + new_state = old_state; + if (unlikely((old_state & canlock_mask) != canlock_state)) { + if (!(send_flags & DM_SEND_INVOKE_FLUSH)) { + os_atomic_rmw_loop_give_up(break); + } + new_state |= DISPATCH_MACH_STATE_DIRTY; + } else { + if (likely(pp_floor)) { + pthread_priority_t pp = _dm_state_get_override(old_state); + if (unlikely(pp > pp_floor)) { + os_atomic_rmw_loop_give_up({ + _dispatch_wqthread_override_start(tid_self, pp); + // Ensure that the root queue sees + // that this thread was overridden. + _dispatch_set_defaultpriority_override(); + pp_floor = pp; + goto retry; + }); + } + } + new_state |= tid_self; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } + }); + + if (unlikely((old_state & canlock_mask) != canlock_state)) { + return; + } + if (send_flags & DM_SEND_INVOKE_CANCEL) { + _dispatch_mach_cancel(dm); + } + _dispatch_mach_send_drain(dm, flags, send_flags); +} + +DISPATCH_NOINLINE +void +_dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc, + dispatch_invoke_flags_t flags) +{ + dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + dispatch_thread_frame_s dtf; + + DISPATCH_COMPILER_CAN_ASSUME(dc->dc_priority == DISPATCH_NO_PRIORITY); + DISPATCH_COMPILER_CAN_ASSUME(dc->dc_voucher == DISPATCH_NO_VOUCHER); + // hide the mach channel (see _dispatch_mach_barrier_invoke comment) + _dispatch_thread_frame_stash(&dtf); + _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags,{ + _dispatch_mach_send_invoke(dm, flags, + DM_SEND_INVOKE_NEEDS_BARRIER | DM_SEND_INVOKE_CAN_RUN_BARRIER); + }); + _dispatch_thread_frame_unstash(&dtf); } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_mach_send_push_wakeup(dispatch_mach_t dm, dispatch_object_t dou, - bool wakeup) +DISPATCH_NOINLINE +static void +_dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm, + pthread_priority_t pp) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + + dc->do_vtable = DC_VTABLE(MACH_SEND_BARRRIER_DRAIN); + dc->dc_func = NULL; + dc->dc_ctxt = NULL; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; + return _dispatch_queue_push(dm->_as_dq, dc, pp); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_continuation_t dc, + pthread_priority_t pp) { dispatch_mach_send_refs_t dr = dm->dm_refs; - struct dispatch_object_s *prev, *dc = dou._do; - dc->do_next = NULL; + uint64_t pp_state, old_state, new_state, state_flags = 0; + dispatch_lock_owner owner; + bool wakeup; + + // when pushing a send barrier that destroys + // the last reference to this channel, and the send queue is already + // draining on another thread, the send barrier may run as soon as + // _dispatch_mach_send_push_inline() returns. + _dispatch_retain(dm); + pp_state = _dm_state_override_from_priority(pp); + + wakeup = _dispatch_mach_send_push_inline(dr, dc); + if (wakeup) { + state_flags = DISPATCH_MACH_STATE_DIRTY; + if (dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)) { + state_flags |= DISPATCH_MACH_STATE_PENDING_BARRIER; + } + } - prev = dispatch_atomic_xchg2o(dr, dm_tail, dc, release); - if (fastpath(prev)) { - prev->do_next = dc; + if (state_flags) { + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { + new_state = _dm_state_merge_override(old_state, pp_state); + new_state |= state_flags; + }); } else { - dr->dm_head = dc; + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, relaxed, { + new_state = _dm_state_merge_override(old_state, pp_state); + if (old_state == new_state) { + os_atomic_rmw_loop_give_up(break); + } + }); } - if (wakeup || !prev) { - _dispatch_wakeup(dm); + + pp = _dm_state_get_override(new_state); + owner = _dispatch_lock_owner((dispatch_lock)old_state); + if (owner) { + if (_dm_state_needs_override(old_state, pp_state)) { + _dispatch_wqthread_override_start_check_owner(owner, pp, + &dr->dm_state_lock.dul_lock); + } + return _dispatch_release_tailcall(dm); } -} -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou) -{ - return _dispatch_mach_send_push_wakeup(dm, dou, false); + dispatch_wakeup_flags_t wflags = 0; + if (state_flags & DISPATCH_MACH_STATE_PENDING_BARRIER) { + _dispatch_mach_send_barrier_drain_push(dm, pp); + } else if (wakeup || dr->dm_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED)) { + wflags = DISPATCH_WAKEUP_FLUSH | DISPATCH_WAKEUP_CONSUME; + } else if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { + wflags = DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_CONSUME; + } + if (wflags) { + return dx_wakeup(dm, pp, wflags); + } + return _dispatch_release_tailcall(dm); } DISPATCH_NOINLINE -static void -_dispatch_mach_send_drain(dispatch_mach_t dm) +static bool +_dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm, + dispatch_object_t dou, pthread_priority_t pp, + dispatch_mach_send_invoke_flags_t send_flags) { dispatch_mach_send_refs_t dr = dm->dm_refs; - struct dispatch_object_s *dc = NULL, *next_dc = NULL; - while (dr->dm_tail) { - _dispatch_wait_until(dc = fastpath(dr->dm_head)); - do { - next_dc = fastpath(dc->do_next); - dr->dm_head = next_dc; - if (!next_dc && !dispatch_atomic_cmpxchg2o(dr, dm_tail, dc, NULL, - relaxed)) { - _dispatch_wait_until(next_dc = fastpath(dc->do_next)); - dr->dm_head = next_dc; - } - if (!DISPATCH_OBJ_IS_VTABLE(dc)) { - if ((long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) { - // send barrier - // leave send queue locked until barrier has completed - return _dispatch_mach_push(dm, dc, - ((dispatch_continuation_t)dc)->dc_priority); - } -#if DISPATCH_MACH_SEND_SYNC - if (slowpath((long)dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT)){ - _dispatch_thread_semaphore_signal( - (_dispatch_thread_semaphore_t)dc->do_ctxt); - continue; - } -#endif // DISPATCH_MACH_SEND_SYNC - if (slowpath(!_dispatch_mach_reconnect_invoke(dm, dc))) { - goto out; - } - continue; + dispatch_lock_owner tid_self = _dispatch_tid_self(); + uint64_t pp_state, old_state, new_state, canlock_mask, state_flags = 0; + dispatch_lock_owner owner; + + pp_state = _dm_state_override_from_priority(pp); + bool wakeup = _dispatch_mach_send_push_inline(dr, dou); + if (wakeup) { + state_flags = DISPATCH_MACH_STATE_DIRTY; + } + + if (unlikely(dr->dm_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED))) { + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { + new_state = _dm_state_merge_override(old_state, pp_state); + new_state |= state_flags; + }); + dx_wakeup(dm, pp, DISPATCH_WAKEUP_FLUSH); + return false; + } + + canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK | + DISPATCH_MACH_STATE_PENDING_BARRIER; + if (state_flags) { + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, seq_cst, { + new_state = _dm_state_merge_override(old_state, pp_state); + new_state |= state_flags; + if (likely((old_state & canlock_mask) == 0)) { + new_state |= tid_self; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; } - _dispatch_voucher_ktrace_dmsg_pop((dispatch_mach_msg_t)dc); - if (slowpath(dr->dm_disconnect_cnt) || - slowpath(dm->ds_atomic_flags & DSF_CANCELED)) { - _dispatch_mach_msg_not_sent(dm, dc); - continue; + }); + } else { + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, acquire, { + new_state = _dm_state_merge_override(old_state, pp_state); + if (new_state == old_state) { + os_atomic_rmw_loop_give_up(return false); } - if (slowpath(dc = _dispatch_mach_msg_send(dm, dc)._do)) { - goto out; + if (likely((old_state & canlock_mask) == 0)) { + new_state |= tid_self; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; } - } while ((dc = next_dc)); + }); } -out: - // if this is not a complete drain, we must undo some things - if (slowpath(dc)) { - if (!next_dc && - !dispatch_atomic_cmpxchg2o(dr, dm_tail, NULL, dc, relaxed)) { - // wait for enqueue slow path to finish - _dispatch_wait_until(next_dc = fastpath(dr->dm_head)); - dc->do_next = next_dc; + + owner = _dispatch_lock_owner((dispatch_lock)old_state); + if (owner) { + if (_dm_state_needs_override(old_state, pp_state)) { + _dispatch_wqthread_override_start_check_owner(owner, pp, + &dr->dm_state_lock.dul_lock); } - dr->dm_head = dc; + return false; } - (void)dispatch_atomic_dec2o(dr, dm_sending, release); - _dispatch_wakeup(dm); -} -static inline void -_dispatch_mach_send(dispatch_mach_t dm) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - if (!fastpath(dr->dm_tail) || !fastpath(dispatch_atomic_cmpxchg2o(dr, - dm_sending, 0, 1, acquire))) { - return; + if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { + dx_wakeup(dm, pp, DISPATCH_WAKEUP_OVERRIDING); + return false; } - _dispatch_object_debug(dm, "%s", __func__); - _dispatch_mach_send_drain(dm); + + // Ensure our message is still at the head of the queue and has not already + // been dequeued by another thread that raced us to the send queue lock. + // A plain load of the head and comparison against our object pointer is + // sufficient. + if (unlikely(!(wakeup && dou._do == dr->dm_head))) { + // Don't request immediate send result for messages we don't own + send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK; + } + return _dispatch_mach_send_drain(dm, DISPATCH_INVOKE_NONE, send_flags); } static void -_dispatch_mach_merge_kevent(dispatch_mach_t dm, +_dispatch_mach_merge_notification_kevent(dispatch_mach_t dm, const _dispatch_kevent_qos_s *ke) { if (!(ke->fflags & dm->ds_pending_data_mask)) { return; } - _dispatch_mach_send(dm); + _dispatch_mach_send_invoke(dm, DISPATCH_INVOKE_MANAGER_DRAIN, + DM_SEND_INVOKE_FLUSH); } +#pragma mark - +#pragma mark dispatch_mach_t + static inline mach_msg_option_t _dispatch_mach_checkin_options(void) { @@ -3906,55 +5454,186 @@ _dispatch_mach_priority_propagate(mach_msg_option_t options) } DISPATCH_NOINLINE -void -dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, - mach_msg_option_t options) +static bool +_dispatch_mach_send_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, + dispatch_continuation_t dc_wait, mach_msg_option_t options) { dispatch_mach_send_refs_t dr = dm->dm_refs; if (slowpath(dmsg->do_next != DISPATCH_OBJECT_LISTLESS)) { - DISPATCH_CLIENT_CRASH("Message already enqueued"); + DISPATCH_CLIENT_CRASH(dmsg->do_next, "Message already enqueued"); } dispatch_retain(dmsg); - dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); pthread_priority_t priority = _dispatch_mach_priority_propagate(options); options |= _dispatch_mach_send_options(); - _dispatch_mach_msg_set_options(dmsg, options & ~DISPATCH_MACH_OPTIONS_MASK); + dmsg->dmsg_options = options; mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); - dmsg->dmsg_reply = (MACH_MSGH_BITS_LOCAL(msg->msgh_bits) == - MACH_MSG_TYPE_MAKE_SEND_ONCE && - MACH_PORT_VALID(msg->msgh_local_port) ? msg->msgh_local_port : - MACH_PORT_NULL); + dmsg->dmsg_reply = _dispatch_mach_msg_get_reply_port(dmsg); bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == MACH_MSG_TYPE_MOVE_SEND_ONCE); dmsg->dmsg_priority = priority; dmsg->dmsg_voucher = _voucher_copy(); _dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg); - if ((!is_reply && slowpath(dr->dm_tail)) || - slowpath(dr->dm_disconnect_cnt) || - slowpath(dm->ds_atomic_flags & DSF_CANCELED) || - slowpath(!dispatch_atomic_cmpxchg2o(dr, dm_sending, 0, 1, - acquire))) { + + uint32_t send_status; + bool returning_send_result = false; + dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE; + if (options & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT) { + send_flags = DM_SEND_INVOKE_IMMEDIATE_SEND; + } + if (is_reply && !dmsg->dmsg_reply && !dr->dm_disconnect_cnt && + !(dm->dq_atomic_flags & DSF_CANCELED)) { + // replies are sent to a send-once right and don't need the send queue + dispatch_assert(!dc_wait); + send_status = _dispatch_mach_msg_send(dm, dmsg, NULL, 0, send_flags); + dispatch_assert(send_status); + returning_send_result = !!(send_status & + DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT); + } else { _dispatch_voucher_ktrace_dmsg_push(dmsg); - return _dispatch_mach_send_push(dm, dmsg); + priority &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + dispatch_object_t dou = { ._dmsg = dmsg }; + if (dc_wait) dou._dc = dc_wait; + returning_send_result = _dispatch_mach_send_push_and_trydrain(dm, dou, + priority, send_flags); + } + if (returning_send_result) { + _dispatch_voucher_debug("mach-msg[%p] clear", dmsg->dmsg_voucher, dmsg); + if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher); + dmsg->dmsg_voucher = NULL; + dmsg->do_next = DISPATCH_OBJECT_LISTLESS; + dispatch_release(dmsg); } - if (slowpath(dmsg = _dispatch_mach_msg_send(dm, dmsg)._dmsg)) { - (void)dispatch_atomic_dec2o(dr, dm_sending, release); - _dispatch_voucher_ktrace_dmsg_push(dmsg); - return _dispatch_mach_send_push_wakeup(dm, dmsg, true); + return returning_send_result; +} + +DISPATCH_NOINLINE +void +dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, + mach_msg_option_t options) +{ + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); + dispatch_assert(!returned_send_result); +} + +DISPATCH_NOINLINE +void +dispatch_mach_send_with_result(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, + mach_msg_option_t options, dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error) +{ + if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) { + DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags"); } - if (!is_reply && slowpath(dr->dm_tail)) { - return _dispatch_mach_send_drain(dm); + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT; + bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); + unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND; + mach_error_t err = 0; + if (returned_send_result) { + reason = _dispatch_mach_msg_get_reason(dmsg, &err); } - (void)dispatch_atomic_dec2o(dr, dm_sending, release); - _dispatch_wakeup(dm); + *send_result = reason; + *send_error = err; } -static void +static inline +dispatch_mach_msg_t +_dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options, + bool *returned_send_result) +{ + mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg); + if (!reply_port) { + // use per-thread mach reply port + reply_port = _dispatch_get_thread_reply_port(); + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); + dispatch_assert(MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) == + MACH_MSG_TYPE_MAKE_SEND_ONCE); + hdr->msgh_local_port = reply_port; + options |= DISPATCH_MACH_OWNED_REPLY_PORT; + } + + dispatch_mach_reply_refs_t dmr; +#if DISPATCH_DEBUG + dmr = _dispatch_calloc(1, sizeof(*dmr)); +#else + struct dispatch_mach_reply_refs_s dmr_buf = { }; + dmr = &dmr_buf; +#endif + struct dispatch_continuation_s dc_wait = { + .dc_flags = DISPATCH_OBJ_SYNC_SLOW_BIT, + .dc_data = dmsg, + .dc_other = dmr, + .dc_priority = DISPATCH_NO_PRIORITY, + .dc_voucher = DISPATCH_NO_VOUCHER, + }; + dmr->dmr_ctxt = dmsg->do_ctxt; + *returned_send_result = _dispatch_mach_send_msg(dm, dmsg, &dc_wait,options); + if (options & DISPATCH_MACH_OWNED_REPLY_PORT) { + _dispatch_clear_thread_reply_port(reply_port); + } + dmsg = _dispatch_mach_msg_reply_recv(dm, dmr, reply_port); +#if DISPATCH_DEBUG + free(dmr); +#endif + return dmsg; +} + +DISPATCH_NOINLINE +dispatch_mach_msg_t +dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options) +{ + bool returned_send_result; + dispatch_mach_msg_t reply; + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + options |= DISPATCH_MACH_WAIT_FOR_REPLY; + reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options, + &returned_send_result); + dispatch_assert(!returned_send_result); + return reply; +} + +DISPATCH_NOINLINE +dispatch_mach_msg_t +dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options, + dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error) +{ + if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) { + DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags"); + } + bool returned_send_result; + dispatch_mach_msg_t reply; + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + options |= DISPATCH_MACH_WAIT_FOR_REPLY; + options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT; + reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options, + &returned_send_result); + unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND; + mach_error_t err = 0; + if (returned_send_result) { + reason = _dispatch_mach_msg_get_reason(dmsg, &err); + } + *send_result = reason; + *send_error = err; + return reply; +} + +DISPATCH_NOINLINE +static bool _dispatch_mach_disconnect(dispatch_mach_t dm) { dispatch_mach_send_refs_t dr = dm->dm_refs; + bool disconnected; if (dm->dm_dkev) { - _dispatch_mach_kevent_unregister(dm); + _dispatch_mach_notification_kevent_unregister(dm); } if (MACH_PORT_VALID(dr->dm_send)) { _dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dr->dm_send); @@ -3964,50 +5643,53 @@ _dispatch_mach_disconnect(dispatch_mach_t dm) _dispatch_mach_msg_not_sent(dm, dr->dm_checkin); dr->dm_checkin = NULL; } - if (!TAILQ_EMPTY(&dm->dm_refs->dm_replies)) { - dispatch_mach_reply_refs_t dmr, tmp; - TAILQ_FOREACH_SAFE(dmr, &dm->dm_refs->dm_replies, dmr_list, tmp){ - _dispatch_mach_reply_kevent_unregister(dm, dmr, true); + _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); + dispatch_mach_reply_refs_t dmr, tmp; + TAILQ_FOREACH_SAFE(dmr, &dm->dm_refs->dm_replies, dmr_list, tmp) { + TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + if (dmr->dmr_dkev) { + _dispatch_mach_reply_kevent_unregister(dm, dmr, + DKEV_UNREGISTER_DISCONNECTED); + } else { + _dispatch_mach_reply_waiter_unregister(dm, dmr, + DKEV_UNREGISTER_DISCONNECTED); } } + disconnected = TAILQ_EMPTY(&dm->dm_refs->dm_replies); + _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); + return disconnected; } -DISPATCH_NOINLINE -static bool +static void _dispatch_mach_cancel(dispatch_mach_t dm) { - dispatch_mach_send_refs_t dr = dm->dm_refs; - if (!fastpath(dispatch_atomic_cmpxchg2o(dr, dm_sending, 0, 1, acquire))) { - return false; - } _dispatch_object_debug(dm, "%s", __func__); - _dispatch_mach_disconnect(dm); + if (!_dispatch_mach_disconnect(dm)) return; if (dm->ds_dkev) { mach_port_t local_port = (mach_port_t)dm->ds_dkev->dk_kevent.ident; - _dispatch_source_kevent_unregister((dispatch_source_t)dm); - _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL); + _dispatch_source_kevent_unregister(dm->_as_ds); + if ((dm->dq_atomic_flags & DSF_STATE_MASK) == DSF_DELETED) { + _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL); + } + } else { + _dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq, DSF_DELETED, + DSF_ARMED | DSF_DEFERRED_DELETE); } - (void)dispatch_atomic_dec2o(dr, dm_sending, release); - return true; } DISPATCH_NOINLINE static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou) { - if (dm->dm_dkev || !TAILQ_EMPTY(&dm->dm_refs->dm_replies)) { - if (slowpath(_dispatch_queue_get_current() != &_dispatch_mgr_q)) { - // send/reply kevents must be uninstalled on the manager queue - return false; - } - } - _dispatch_mach_disconnect(dm); + if (!_dispatch_mach_disconnect(dm)) return false; dispatch_mach_send_refs_t dr = dm->dm_refs; dr->dm_checkin = dou._dc->dc_data; dr->dm_send = (mach_port_t)dou._dc->dc_other; _dispatch_continuation_free(dou._dc); - (void)dispatch_atomic_dec2o(dr, dm_disconnect_cnt, relaxed); + (void)os_atomic_dec2o(dr, dm_disconnect_cnt, relaxed); _dispatch_object_debug(dm, "%s", __func__); + _dispatch_release(dm); // return true; } @@ -4017,47 +5699,34 @@ dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send, dispatch_mach_msg_t checkin) { dispatch_mach_send_refs_t dr = dm->dm_refs; - (void)dispatch_atomic_inc2o(dr, dm_disconnect_cnt, relaxed); + (void)os_atomic_inc2o(dr, dm_disconnect_cnt, relaxed); if (MACH_PORT_VALID(send) && checkin) { dispatch_retain(checkin); - mach_msg_option_t options = _dispatch_mach_checkin_options(); - _dispatch_mach_msg_set_options(checkin, options); + checkin->dmsg_options = _dispatch_mach_checkin_options(); dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin); } else { checkin = NULL; dr->dm_checkin_port = MACH_PORT_NULL; } dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT); + dc->dc_flags = DISPATCH_OBJ_CONSUME_BIT; + // actually called manually in _dispatch_mach_send_drain dc->dc_func = (void*)_dispatch_mach_reconnect_invoke; dc->dc_ctxt = dc; dc->dc_data = checkin; dc->dc_other = (void*)(uintptr_t)send; - return _dispatch_mach_send_push(dm, dc); -} - -#if DISPATCH_MACH_SEND_SYNC -DISPATCH_NOINLINE -static void -_dispatch_mach_send_sync_slow(dispatch_mach_t dm) -{ - _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); - struct dispatch_object_s dc = { - .do_vtable = (void *)(DISPATCH_OBJ_SYNC_SLOW_BIT), - .do_ctxt = (void*)sema, - }; - _dispatch_mach_send_push(dm, &dc); - _dispatch_thread_semaphore_wait(sema); - _dispatch_put_thread_semaphore(sema); + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; + _dispatch_retain(dm); // + return _dispatch_mach_send_push(dm, dc, 0); } -#endif // DISPATCH_MACH_SEND_SYNC DISPATCH_NOINLINE mach_port_t dispatch_mach_get_checkin_port(dispatch_mach_t dm) { dispatch_mach_send_refs_t dr = dm->dm_refs; - if (slowpath(dm->ds_atomic_flags & DSF_CANCELED)) { + if (slowpath(dm->dq_atomic_flags & DSF_CANCELED)) { return MACH_PORT_DEAD; } return dr->dm_checkin_port; @@ -4076,121 +5745,149 @@ _dispatch_mach_connect_invoke(dispatch_mach_t dm) DISPATCH_NOINLINE void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, - dispatch_object_t dou DISPATCH_UNUSED, - dispatch_invoke_flags_t flags DISPATCH_UNUSED) + dispatch_invoke_flags_t flags) { - dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); - dispatch_mach_refs_t dr = dm->ds_refs; + dispatch_thread_frame_s dtf; + dispatch_mach_refs_t dr; + dispatch_mach_t dm; mach_error_t err; unsigned long reason = _dispatch_mach_msg_get_reason(dmsg, &err); + _dispatch_thread_set_self_t adopt_flags = DISPATCH_PRIORITY_ENFORCE| + DISPATCH_VOUCHER_CONSUME|DISPATCH_VOUCHER_REPLACE; + // hide mach channel + dm = (dispatch_mach_t)_dispatch_thread_frame_stash(&dtf); + dr = dm->ds_refs; dmsg->do_next = DISPATCH_OBJECT_LISTLESS; - _dispatch_thread_setspecific(dispatch_queue_key, dm->do_targetq); _dispatch_voucher_ktrace_dmsg_pop(dmsg); _dispatch_voucher_debug("mach-msg[%p] adopt", dmsg->dmsg_voucher, dmsg); - _dispatch_adopt_priority_and_replace_voucher(dmsg->dmsg_priority, - dmsg->dmsg_voucher, DISPATCH_PRIORITY_ENFORCE); + (void)_dispatch_adopt_priority_and_set_voucher(dmsg->dmsg_priority, + dmsg->dmsg_voucher, adopt_flags); dmsg->dmsg_voucher = NULL; - if (slowpath(!dm->dm_connect_handler_called)) { - _dispatch_mach_connect_invoke(dm); - } - _dispatch_client_callout4(dr->dm_handler_ctxt, reason, dmsg, err, - dr->dm_handler_func); - _dispatch_thread_setspecific(dispatch_queue_key, (dispatch_queue_t)dm); + dispatch_invoke_with_autoreleasepool(flags, { + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); + } + _dispatch_client_callout4(dr->dm_handler_ctxt, reason, dmsg, err, + dr->dm_handler_func); + }); + _dispatch_thread_frame_unstash(&dtf); _dispatch_introspection_queue_item_complete(dmsg); dispatch_release(dmsg); } DISPATCH_NOINLINE void -_dispatch_mach_barrier_invoke(void *ctxt) +_dispatch_mach_barrier_invoke(dispatch_continuation_t dc, + dispatch_invoke_flags_t flags) { - dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); - dispatch_mach_refs_t dr = dm->ds_refs; - struct dispatch_continuation_s *dc = ctxt; - void *context = dc->dc_data; - dispatch_function_t barrier = dc->dc_other; - bool send_barrier = ((long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT); - - _dispatch_thread_setspecific(dispatch_queue_key, dm->do_targetq); - if (slowpath(!dm->dm_connect_handler_called)) { - _dispatch_mach_connect_invoke(dm); - } - _dispatch_client_callout(context, barrier); - _dispatch_client_callout4(dr->dm_handler_ctxt, - DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0, dr->dm_handler_func); - _dispatch_thread_setspecific(dispatch_queue_key, (dispatch_queue_t)dm); - if (send_barrier) { - (void)dispatch_atomic_dec2o(dm->dm_refs, dm_sending, release); + dispatch_thread_frame_s dtf; + dispatch_mach_t dm = dc->dc_other; + dispatch_mach_refs_t dr; + uintptr_t dc_flags = (uintptr_t)dc->dc_data; + unsigned long type = dc_type(dc); + + // hide mach channel from clients + if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) { + // on the send queue, the mach channel isn't the current queue + // its target queue is the current one already + _dispatch_thread_frame_stash(&dtf); + } + dr = dm->ds_refs; + DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DISPATCH_OBJ_CONSUME_BIT); + _dispatch_continuation_pop_forwarded(dc, dm->dq_override_voucher, dc_flags,{ + dispatch_invoke_with_autoreleasepool(flags, { + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); + } + _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); + _dispatch_client_callout4(dr->dm_handler_ctxt, + DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0, + dr->dm_handler_func); + }); + }); + if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) { + _dispatch_thread_frame_unstash(&dtf); } } DISPATCH_NOINLINE void dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context, - dispatch_function_t barrier) + dispatch_function_t func) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); - dc->dc_func = _dispatch_mach_barrier_invoke; - dc->dc_ctxt = dc; - dc->dc_data = context; - dc->dc_other = barrier; - _dispatch_continuation_voucher_set(dc, 0); - _dispatch_continuation_priority_set(dc, 0, 0); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + pthread_priority_t pp; - dispatch_mach_send_refs_t dr = dm->dm_refs; - if (slowpath(dr->dm_tail) || slowpath(!dispatch_atomic_cmpxchg2o(dr, - dm_sending, 0, 1, acquire))) { - return _dispatch_mach_send_push(dm, dc); - } - // leave send queue locked until barrier has completed - return _dispatch_mach_push(dm, dc, dc->dc_priority); + _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); + dc->dc_data = (void *)dc->dc_flags; + dc->dc_other = dm; + dc->do_vtable = DC_VTABLE(MACH_SEND_BARRIER); + _dispatch_trace_continuation_push(dm->_as_dq, dc); + pp = _dispatch_continuation_get_override_priority(dm->_as_dq, dc); + return _dispatch_mach_send_push(dm, dc, pp); } DISPATCH_NOINLINE void -dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context, - dispatch_function_t barrier) +dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT); - dc->dc_func = _dispatch_mach_barrier_invoke; - dc->dc_ctxt = dc; - dc->dc_data = context; - dc->dc_other = barrier; - _dispatch_continuation_voucher_set(dc, 0); - _dispatch_continuation_priority_set(dc, 0, 0); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + pthread_priority_t pp; - return _dispatch_mach_push(dm, dc, dc->dc_priority); + _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); + dc->dc_data = (void *)dc->dc_flags; + dc->dc_other = dm; + dc->do_vtable = DC_VTABLE(MACH_SEND_BARRIER); + _dispatch_trace_continuation_push(dm->_as_dq, dc); + pp = _dispatch_continuation_get_override_priority(dm->_as_dq, dc); + return _dispatch_mach_send_push(dm, dc, pp); } DISPATCH_NOINLINE void -dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier) +dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context, + dispatch_function_t func) { - dispatch_mach_send_barrier_f(dm, _dispatch_Block_copy(barrier), - _dispatch_call_block_and_release); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + + _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); + dc->dc_data = (void *)dc->dc_flags; + dc->dc_other = dm; + dc->do_vtable = DC_VTABLE(MACH_RECV_BARRIER); + return _dispatch_continuation_async(dm->_as_dq, dc); } DISPATCH_NOINLINE void dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier) { - dispatch_mach_receive_barrier_f(dm, _dispatch_Block_copy(barrier), - _dispatch_call_block_and_release); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + + _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); + dc->dc_data = (void *)dc->dc_flags; + dc->dc_other = dm; + dc->do_vtable = DC_VTABLE(MACH_RECV_BARRIER); + return _dispatch_continuation_async(dm->_as_dq, dc); } DISPATCH_NOINLINE static void -_dispatch_mach_cancel_invoke(dispatch_mach_t dm) +_dispatch_mach_cancel_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags) { dispatch_mach_refs_t dr = dm->ds_refs; - if (slowpath(!dm->dm_connect_handler_called)) { - _dispatch_mach_connect_invoke(dm); - } - _dispatch_client_callout4(dr->dm_handler_ctxt, - DISPATCH_MACH_CANCELED, NULL, 0, dr->dm_handler_func); + + dispatch_invoke_with_autoreleasepool(flags, { + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); + } + _dispatch_client_callout4(dr->dm_handler_ctxt, + DISPATCH_MACH_CANCELED, NULL, 0, dr->dm_handler_func); + }); dm->dm_cancel_handler_called = 1; _dispatch_release(dm); // the retain is done at creation time } @@ -4199,15 +5896,55 @@ DISPATCH_NOINLINE void dispatch_mach_cancel(dispatch_mach_t dm) { - dispatch_source_cancel((dispatch_source_t)dm); + dispatch_source_cancel(dm->_as_ds); +} + +static void +_dispatch_mach_install(dispatch_mach_t dm, pthread_priority_t pp) +{ + uint32_t disconnect_cnt; + + if (dm->ds_dkev) { + _dispatch_source_kevent_register(dm->_as_ds, pp); + } + if (dm->ds_is_direct_kevent) { + pp &= (~_PTHREAD_PRIORITY_FLAGS_MASK | + _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG | + _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + // _dispatch_mach_reply_kevent_register assumes this has been done + // which is unlike regular sources or queues, the DEFAULTQUEUE flag + // is used so that the priority of that channel doesn't act as a floor + // QoS for incoming messages (26761457) + dm->dq_priority = (dispatch_priority_t)pp; + } + dm->ds_is_installed = true; + if (unlikely(!os_atomic_cmpxchgv2o(dm->dm_refs, dm_disconnect_cnt, + DISPATCH_MACH_NEVER_INSTALLED, 0, &disconnect_cnt, release))) { + DISPATCH_INTERNAL_CRASH(disconnect_cnt, "Channel already installed"); + } +} + +void +_dispatch_mach_finalize_activation(dispatch_mach_t dm) +{ + if (dm->ds_is_direct_kevent && !dm->ds_is_installed) { + dispatch_source_t ds = dm->_as_ds; + pthread_priority_t pp = _dispatch_source_compute_kevent_priority(ds); + if (pp) _dispatch_mach_install(dm, pp); + } + + // call "super" + _dispatch_queue_finalize_activation(dm->_as_dq); } DISPATCH_ALWAYS_INLINE static inline dispatch_queue_t -_dispatch_mach_invoke2(dispatch_object_t dou, - _dispatch_thread_semaphore_t *sema_ptr DISPATCH_UNUSED) +_dispatch_mach_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, + uint64_t *owned, struct dispatch_object_s **dc_ptr DISPATCH_UNUSED) { dispatch_mach_t dm = dou._dm; + dispatch_queue_t retq = NULL; + dispatch_queue_t dq = _dispatch_queue_get_current(); // This function performs all mach channel actions. Each action is // responsible for verifying that it takes place on the appropriate queue. @@ -4215,122 +5952,158 @@ _dispatch_mach_invoke2(dispatch_object_t dou, // correct queue will be returned and the invoke will be re-driven on that // queue. - // The order of tests here in invoke and in probe should be consistent. + // The order of tests here in invoke and in wakeup should be consistent. - dispatch_queue_t dq = _dispatch_queue_get_current(); dispatch_mach_send_refs_t dr = dm->dm_refs; + dispatch_queue_t dkq = &_dispatch_mgr_q; + + if (dm->ds_is_direct_kevent) { + dkq = dm->do_targetq; + } if (slowpath(!dm->ds_is_installed)) { - // The channel needs to be installed on the manager queue. - if (dq != &_dispatch_mgr_q) { - return &_dispatch_mgr_q; - } - if (dm->ds_dkev) { - _dispatch_source_kevent_register((dispatch_source_t)dm); - } - dm->ds_is_installed = true; - _dispatch_mach_send(dm); - // Apply initial target queue change - _dispatch_queue_drain(dou); - if (dm->dq_items_tail) { - return dm->do_targetq; - } - } else if (dm->dq_items_tail) { - // The channel has pending messages to deliver to the target queue. - if (dq != dm->do_targetq) { - return dm->do_targetq; - } - dispatch_queue_t tq = dm->do_targetq; - if (slowpath(_dispatch_queue_drain(dou))) { - DISPATCH_CLIENT_CRASH("Sync onto mach channel"); - } - if (slowpath(tq != dm->do_targetq)) { - // An item on the channel changed the target queue - return dm->do_targetq; - } - } else if (dr->dm_sending) { - // Sending and uninstallation below require the send lock, the channel - // will be woken up when the lock is dropped - return NULL; - } else if (dr->dm_tail) { - if (slowpath(dr->dm_needs_mgr) || (slowpath(dr->dm_disconnect_cnt) && - (dm->dm_dkev || !TAILQ_EMPTY(&dm->dm_refs->dm_replies)))) { - // Send/reply kevents need to be installed or uninstalled - if (dq != &_dispatch_mgr_q) { - return &_dispatch_mgr_q; - } + // The channel needs to be installed on the kevent queue. + if (dq != dkq) { + return dkq; + } + _dispatch_mach_install(dm, _dispatch_get_defaultpriority()); + } + + if (_dispatch_queue_class_probe(dm)) { + if (dq == dm->do_targetq) { + retq = _dispatch_queue_serial_drain(dm->_as_dq, flags, owned, NULL); + } else { + retq = dm->do_targetq; } - if (!(dm->dm_dkev && DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev)) || - (dm->ds_atomic_flags & DSF_CANCELED) || dr->dm_disconnect_cnt) { + } + + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + + if (dr->dm_tail) { + bool requires_mgr = dr->dm_needs_mgr || (dr->dm_disconnect_cnt && + (dm->dm_dkev || !dm->ds_is_direct_kevent)); + if (!(dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) || + (dqf & DSF_CANCELED) || dr->dm_disconnect_cnt) { // The channel has pending messages to send. - _dispatch_mach_send(dm); + if (unlikely(requires_mgr && dq != &_dispatch_mgr_q)) { + return retq ? retq : &_dispatch_mgr_q; + } + dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE; + if (dq != &_dispatch_mgr_q) { + send_flags |= DM_SEND_INVOKE_CAN_RUN_BARRIER; + } + _dispatch_mach_send_invoke(dm, flags, send_flags); } - } else if (dm->ds_atomic_flags & DSF_CANCELED){ + } else if (dqf & DSF_CANCELED) { // The channel has been cancelled and needs to be uninstalled from the // manager queue. After uninstallation, the cancellation handler needs // to be delivered to the target queue. - if (dm->ds_dkev || dm->dm_dkev || dr->dm_send || - !TAILQ_EMPTY(&dm->dm_refs->dm_replies)) { + if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) { + // waiting for the delivery of a deferred delete event + return retq; + } + if ((dqf & DSF_STATE_MASK) != DSF_DELETED) { if (dq != &_dispatch_mgr_q) { - return &_dispatch_mgr_q; + return retq ? retq : &_dispatch_mgr_q; } - if (!_dispatch_mach_cancel(dm)) { - return NULL; + _dispatch_mach_send_invoke(dm, flags, DM_SEND_INVOKE_CANCEL); + dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + if (unlikely((dqf & DSF_STATE_MASK) != DSF_DELETED)) { + // waiting for the delivery of a deferred delete event + // or deletion didn't happen because send_invoke couldn't + // acquire the send lock + return retq; } } if (!dm->dm_cancel_handler_called) { if (dq != dm->do_targetq) { - return dm->do_targetq; + return retq ? retq : dm->do_targetq; } - _dispatch_mach_cancel_invoke(dm); + _dispatch_mach_cancel_invoke(dm, flags); } } - return NULL; + + return retq; } DISPATCH_NOINLINE void -_dispatch_mach_invoke(dispatch_mach_t dm, dispatch_object_t dou, - dispatch_invoke_flags_t flags) +_dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags) { - _dispatch_queue_class_invoke(dm, dou._dc, flags, _dispatch_mach_invoke2); + _dispatch_queue_class_invoke(dm, flags, _dispatch_mach_invoke2); } -unsigned long -_dispatch_mach_probe(dispatch_mach_t dm) +void +_dispatch_mach_wakeup(dispatch_mach_t dm, pthread_priority_t pp, + dispatch_wakeup_flags_t flags) { // This function determines whether the mach channel needs to be invoked. // The order of tests here in probe and in invoke should be consistent. dispatch_mach_send_refs_t dr = dm->dm_refs; + dispatch_queue_wakeup_target_t dkq = DISPATCH_QUEUE_WAKEUP_MGR; + dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE; + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq); - if (slowpath(!dm->ds_is_installed)) { - // The channel needs to be installed on the manager queue. - return true; - } else if (_dispatch_queue_class_probe(dm)) { - // The source has pending messages to deliver to the target queue. - return true; - } else if (dr->dm_sending) { + if (dm->ds_is_direct_kevent) { + dkq = DISPATCH_QUEUE_WAKEUP_TARGET; + } + + if (!dm->ds_is_installed) { + // The channel needs to be installed on the kevent queue. + tq = dkq; + goto done; + } + + if (_dispatch_queue_class_probe(dm)) { + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + goto done; + } + + if (_dispatch_lock_is_locked(dr->dm_state_lock.dul_lock)) { // Sending and uninstallation below require the send lock, the channel // will be woken up when the lock is dropped - return false; - } else if (dr->dm_tail && - (!(dm->dm_dkev && DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev)) || - (dm->ds_atomic_flags & DSF_CANCELED) || dr->dm_disconnect_cnt)) { - // The channel has pending messages to send. - return true; - } else if (dm->ds_atomic_flags & DSF_CANCELED) { - if (dm->ds_dkev || dm->dm_dkev || dr->dm_send || - !TAILQ_EMPTY(&dm->dm_refs->dm_replies) || - !dm->dm_cancel_handler_called) { - // The channel needs to be uninstalled from the manager queue, or + _dispatch_queue_reinstate_override_priority(dm, (dispatch_priority_t)pp); + goto done; + } + + if (dr->dm_tail) { + bool requires_mgr = dr->dm_needs_mgr || (dr->dm_disconnect_cnt && + (dm->dm_dkev || !dm->ds_is_direct_kevent)); + if (!(dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) || + (dqf & DSF_CANCELED) || dr->dm_disconnect_cnt) { + if (unlikely(requires_mgr)) { + tq = DISPATCH_QUEUE_WAKEUP_MGR; + } else { + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } + } else { + // can happen when we can't send because the port is full + // but we should not lose the override + _dispatch_queue_reinstate_override_priority(dm, + (dispatch_priority_t)pp); + } + } else if (dqf & DSF_CANCELED) { + if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) { + // waiting for the delivery of a deferred delete event + } else if ((dqf & DSF_STATE_MASK) != DSF_DELETED) { + // The channel needs to be uninstalled from the manager queue + tq = DISPATCH_QUEUE_WAKEUP_MGR; + } else if (!dm->dm_cancel_handler_called) { // the cancellation handler needs to be delivered to the target // queue. - return true; + tq = DISPATCH_QUEUE_WAKEUP_TARGET; } } - // Nothing to do. - return false; + +done: + if (tq) { + return _dispatch_queue_class_wakeup(dm->_as_dq, pp, flags, tq); + } else if (pp) { + return _dispatch_queue_class_override_drainer(dm->_as_dq, pp, flags); + } else if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dm); + } } #pragma mark - @@ -4342,7 +6115,7 @@ dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, { if (slowpath(size < sizeof(mach_msg_header_t)) || slowpath(destructor && !msg)) { - DISPATCH_CLIENT_CRASH("Empty message"); + DISPATCH_CLIENT_CRASH(size, "Empty message"); } dispatch_mach_msg_t dmsg = _dispatch_alloc(DISPATCH_VTABLE(mach_msg), sizeof(struct dispatch_mach_msg_s) + @@ -4410,7 +6183,7 @@ _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz) offset += dsnprintf(&buf[offset], bufsiz - offset, "xrefcnt = 0x%x, " "refcnt = 0x%x, ", dmsg->do_xref_cnt + 1, dmsg->do_ref_cnt + 1); offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, " - "msgh[%p] = { ", dmsg->do_suspend_cnt, dmsg->dmsg_buf); + "msgh[%p] = { ", dmsg->dmsg_options, dmsg->dmsg_buf); mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); if (hdr->msgh_id) { offset += dsnprintf(&buf[offset], bufsiz - offset, "id 0x%x, ", @@ -4465,11 +6238,19 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, bool received = false; size_t rcv_size = maxmsgsz + MAX_TRAILER_SIZE; - // XXX FIXME -- allocate these elsewhere bufRequest = alloca(rcv_size); + bufRequest->RetCode = 0; + for (mach_vm_address_t p = mach_vm_trunc_page(bufRequest + vm_page_size); + p < (mach_vm_address_t)bufRequest + rcv_size; p += vm_page_size) { + *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard + } + bufReply = alloca(rcv_size); bufReply->Head.msgh_size = 0; - bufRequest->RetCode = 0; + for (mach_vm_address_t p = mach_vm_trunc_page(bufReply + vm_page_size); + p < (mach_vm_address_t)bufReply + rcv_size; p += vm_page_size) { + *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard + } #if DISPATCH_DEBUG options |= MACH_RCV_LARGE; // rdar://problem/8422992 @@ -4477,7 +6258,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, tmp_options = options; // XXX FIXME -- change this to not starve out the target queue for (;;) { - if (DISPATCH_OBJECT_SUSPENDED(ds) || (--cnt == 0)) { + if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (--cnt == 0)) { options &= ~MACH_RCV_MSG; tmp_options &= ~MACH_RCV_MSG; @@ -4669,10 +6450,15 @@ _evflagstr2(uint16_t *flagsp) _evflag2(EV_RECEIPT); _evflag2(EV_DISPATCH); _evflag2(EV_UDATA_SPECIFIC); +#ifdef EV_POLL _evflag2(EV_POLL); +#endif +#ifdef EV_OOBAND _evflag2(EV_OOBAND); +#endif _evflag2(EV_ERROR); _evflag2(EV_EOF); + _evflag2(EV_VANISHED); *flagsp = 0; return "EV_UNKNOWN "; } @@ -4697,24 +6483,25 @@ _dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) dispatch_queue_t target = ds->do_targetq; return dsnprintf(buf, bufsiz, "target = %s[%p], ident = 0x%lx, " "mask = 0x%lx, pending_data = 0x%lx, registered = %d, " - "armed = %d, deleted = %d%s%s, canceled = %d, needs_mgr = %d, ", + "armed = %d, deleted = %d%s, canceled = %d, ", target && target->dq_label ? target->dq_label : "", target, ds->ds_ident_hack, ds->ds_pending_data_mask, ds->ds_pending_data, - ds->ds_is_installed, (bool)(ds->ds_atomic_flags & DSF_ARMED), - (bool)(ds->ds_atomic_flags & DSF_DELETED), ds->ds_pending_delete ? - " (pending)" : "", (ds->ds_atomic_flags & DSF_ONESHOT) ? - " (oneshot)" : "", (bool)(ds->ds_atomic_flags & DSF_CANCELED), - ds->ds_needs_mgr); + ds->ds_is_installed, (bool)(ds->dq_atomic_flags & DSF_ARMED), + (bool)(ds->dq_atomic_flags & DSF_DELETED), + (ds->dq_atomic_flags & DSF_DEFERRED_DELETE) ? " (pending)" : "", + (bool)(ds->dq_atomic_flags & DSF_CANCELED)); } static size_t _dispatch_timer_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) { dispatch_source_refs_t dr = ds->ds_refs; - return dsnprintf(buf, bufsiz, "timer = { target = 0x%llx, deadline = 0x%llx," - " last_fire = 0x%llx, interval = 0x%llx, flags = 0x%lx }, ", - ds_timer(dr).target, ds_timer(dr).deadline, ds_timer(dr).last_fire, - ds_timer(dr).interval, ds_timer(dr).flags); + return dsnprintf(buf, bufsiz, "timer = { target = 0x%llx, deadline = 0x%llx" + ", last_fire = 0x%llx, interval = 0x%llx, flags = 0x%lx }, ", + (unsigned long long)ds_timer(dr).target, + (unsigned long long)ds_timer(dr).deadline, + (unsigned long long)ds_timer(dr).last_fire, + (unsigned long long)ds_timer(dr).interval, ds_timer(dr).flags); } size_t @@ -4728,30 +6515,39 @@ _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz) if (ds->ds_is_timer) { offset += _dispatch_timer_debug_attr(ds, &buf[offset], bufsiz - offset); } + const char *filter; + if (!ds->ds_dkev) { + filter = "????"; + } else if (ds->ds_is_custom_source) { + filter = _evfiltstr((int16_t)(uintptr_t)ds->ds_dkev); + } else { + filter = _evfiltstr(ds->ds_dkev->dk_kevent.filter); + } offset += dsnprintf(&buf[offset], bufsiz - offset, "kevent = %p%s, " "filter = %s }", ds->ds_dkev, ds->ds_is_direct_kevent ? " (direct)" - : "", ds->ds_dkev ? _evfiltstr(ds->ds_dkev->dk_kevent.filter) : - "????"); + : "", filter); return offset; } +#if HAVE_MACH static size_t _dispatch_mach_debug_attr(dispatch_mach_t dm, char* buf, size_t bufsiz) { dispatch_queue_t target = dm->do_targetq; return dsnprintf(buf, bufsiz, "target = %s[%p], receive = 0x%x, " "send = 0x%x, send-possible = 0x%x%s, checkin = 0x%x%s, " - "sending = %d, disconnected = %d, canceled = %d ", + "send state = %016llx, disconnected = %d, canceled = %d ", target && target->dq_label ? target->dq_label : "", target, dm->ds_dkev ?(mach_port_t)dm->ds_dkev->dk_kevent.ident:0, dm->dm_refs->dm_send, dm->dm_dkev ?(mach_port_t)dm->dm_dkev->dk_kevent.ident:0, - dm->dm_dkev && DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev) ? + dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev) ? " (armed)" : "", dm->dm_refs->dm_checkin_port, dm->dm_refs->dm_checkin ? " (pending)" : "", - dm->dm_refs->dm_sending, dm->dm_refs->dm_disconnect_cnt, - (bool)(dm->ds_atomic_flags & DSF_CANCELED)); + dm->dm_refs->dm_state, dm->dm_refs->dm_disconnect_cnt, + (bool)(dm->dq_atomic_flags & DSF_CANCELED)); } + size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz) { @@ -4764,19 +6560,44 @@ _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz) offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); return offset; } +#endif // HAVE_MACH #if DISPATCH_DEBUG DISPATCH_NOINLINE static void -_dispatch_kevent_debug(const _dispatch_kevent_qos_s* kev, const char* str) +dispatch_kevent_debug(const char *verb, const _dispatch_kevent_qos_s *kev, + int i, int n, const char *function, unsigned int line) { char flagstr[256]; - _dispatch_debug("kevent[%p] = { ident = 0x%llx, filter = %s, " + char i_n[31]; + + if (n > 1) { + snprintf(i_n, sizeof(i_n), "%d/%d ", i + 1, n); + } else { + i_n[0] = '\0'; + } +#if DISPATCH_USE_KEVENT_QOS + _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, " "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, " - "ext[0] = 0x%llx, ext[1] = 0x%llx }: %s", kev, kev->ident, + "qos = 0x%x, ext[0] = 0x%llx, ext[1] = 0x%llx, ext[2] = 0x%llx, " + "ext[3] = 0x%llx }: %s #%u", verb, kev, i_n, kev->ident, _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr, sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata, - kev->ext[0], kev->ext[1], str); + kev->qos, kev->ext[0], kev->ext[1], kev->ext[2], kev->ext[3], + function, line); +#else + _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, " + "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, " + "ext[0] = 0x%llx, ext[1] = 0x%llx }: %s #%u", verb, kev, i_n, + kev->ident, _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr, + sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata, +#ifndef IGNORE_KEVENT64_EXT + kev->ext[0], kev->ext[1], +#else + 0ull, 0ull, +#endif + function, line); +#endif } static void @@ -4818,9 +6639,6 @@ _dispatch_kevent_debugger2(void *context) fprintf(debug_stream, "PID %u\n", getpid()); fprintf(debug_stream, "\n
    \n"); - //fprintf(debug_stream, "DKDKDKDK" - // "DKDKDK\n"); - for (i = 0; i < DSL_HASH_SIZE; i++) { if (TAILQ_EMPTY(&_dispatch_sources[i])) { continue; @@ -4835,16 +6653,16 @@ _dispatch_kevent_debugger2(void *context) fprintf(debug_stream, "\t\t
      \n"); TAILQ_FOREACH(dr, &dk->dk_sources, dr_list) { ds = _dispatch_source_from_refs(dr); - fprintf(debug_stream, "\t\t\t
    • DS %p refcnt 0x%x suspend " - "0x%x data 0x%lx mask 0x%lx flags 0x%x
    • \n", - ds, ds->do_ref_cnt + 1, ds->do_suspend_cnt, + fprintf(debug_stream, "\t\t\t
    • DS %p refcnt 0x%x state " + "0x%llx data 0x%lx mask 0x%lx flags 0x%x
    • \n", + ds, ds->do_ref_cnt + 1, ds->dq_state, ds->ds_pending_data, ds->ds_pending_data_mask, - ds->ds_atomic_flags); - if (ds->do_suspend_cnt == DISPATCH_OBJECT_SUSPEND_LOCK) { + ds->dq_atomic_flags); + if (_dq_state_is_enqueued(ds->dq_state)) { dispatch_queue_t dq = ds->do_targetq; - fprintf(debug_stream, "\t\t
      DQ: %p refcnt 0x%x suspend " - "0x%x label: %s\n", dq, dq->do_ref_cnt + 1, - dq->do_suspend_cnt, dq->dq_label ? dq->dq_label:""); + fprintf(debug_stream, "\t\t
      DQ: %p refcnt 0x%x state " + "0x%llx label: %s\n", dq, dq->do_ref_cnt + 1, + dq->dq_state, dq->dq_label ?: ""); } } fprintf(debug_stream, "\t\t
    \n"); @@ -4884,9 +6702,11 @@ _dispatch_kevent_debugger(void *context DISPATCH_UNUSED) int val, r, fd, sock_opt = 1; socklen_t slen = sizeof(sa_u); +#ifndef __linux__ if (issetugid()) { return; } +#endif valstr = getenv("LIBDISPATCH_DEBUGGER"); if (!valstr) { return; diff --git a/src/source_internal.h b/src/source_internal.h index 6e8f40f..41b6d11 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -38,6 +38,7 @@ #define DISPATCH_EVFILT_MACH_NOTIFICATION (-EVFILT_SYSCOUNT - 4) #define DISPATCH_EVFILT_SYSCOUNT ( EVFILT_SYSCOUNT + 4) +#if HAVE_MACH // NOTE: dispatch_source_mach_send_flags_t and dispatch_source_mach_recv_flags_t // bit values must not overlap as they share the same kevent fflags ! @@ -68,28 +69,32 @@ enum { DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE = 0x20, DISPATCH_MACH_RECV_NO_SENDERS = 0x40, }; +#endif // HAVE_MACH enum { + /* DISPATCH_TIMER_STRICT 0x1 */ + /* DISPATCH_TIMER_BACKGROUND = 0x2, */ DISPATCH_TIMER_WALL_CLOCK = 0x4, DISPATCH_TIMER_INTERVAL = 0x8, DISPATCH_TIMER_WITH_AGGREGATE = 0x10, + /* DISPATCH_INTERVAL_UI_ANIMATION = 0x20 */ + DISPATCH_TIMER_AFTER = 0x40, }; -// low bits are timer QoS class #define DISPATCH_TIMER_QOS_NORMAL 0u #define DISPATCH_TIMER_QOS_CRITICAL 1u #define DISPATCH_TIMER_QOS_BACKGROUND 2u #define DISPATCH_TIMER_QOS_COUNT (DISPATCH_TIMER_QOS_BACKGROUND + 1) -#define DISPATCH_TIMER_QOS(tidx) ((uintptr_t)(tidx) & 0x3ul) +#define DISPATCH_TIMER_QOS(tidx) (((uintptr_t)(tidx) >> 1) & 0x3ul) #define DISPATCH_TIMER_KIND_WALL 0u #define DISPATCH_TIMER_KIND_MACH 1u #define DISPATCH_TIMER_KIND_COUNT (DISPATCH_TIMER_KIND_MACH + 1) -#define DISPATCH_TIMER_KIND(tidx) (((uintptr_t)(tidx) >> 2) & 0x1ul) +#define DISPATCH_TIMER_KIND(tidx) ((uintptr_t)(tidx) & 0x1ul) -#define DISPATCH_TIMER_INDEX(kind, qos) (((kind) << 2) | (qos)) +#define DISPATCH_TIMER_INDEX(kind, qos) ((qos) << 1 | (kind)) #define DISPATCH_TIMER_INDEX_DISARM \ - DISPATCH_TIMER_INDEX(DISPATCH_TIMER_KIND_COUNT, 0) + DISPATCH_TIMER_INDEX(0, DISPATCH_TIMER_QOS_COUNT) #define DISPATCH_TIMER_INDEX_COUNT (DISPATCH_TIMER_INDEX_DISARM + 1) #define DISPATCH_TIMER_IDENT(flags) ({ unsigned long f = (flags); \ DISPATCH_TIMER_INDEX(f & DISPATCH_TIMER_WALL_CLOCK ? \ @@ -106,6 +111,11 @@ struct dispatch_kevent_s { typedef struct dispatch_kevent_s *dispatch_kevent_t; +typedef typeof(((dispatch_kevent_t)NULL)->dk_kevent.udata) _dispatch_kevent_qos_udata_t; + +#define DISPATCH_KEV_CUSTOM_ADD ((dispatch_kevent_t)DISPATCH_EVFILT_CUSTOM_ADD) +#define DISPATCH_KEV_CUSTOM_OR ((dispatch_kevent_t)DISPATCH_EVFILT_CUSTOM_OR) + struct dispatch_source_type_s { _dispatch_kevent_qos_s ke; uint64_t mask; @@ -134,7 +144,7 @@ enum { typedef struct dispatch_source_refs_s { TAILQ_ENTRY(dispatch_source_refs_s) dr_list; uintptr_t dr_source_wref; // "weak" backref to dispatch_source_t - dispatch_continuation_t ds_handler[3]; + dispatch_continuation_t volatile ds_handler[3]; } *dispatch_source_refs_t; typedef struct dispatch_timer_source_refs_s { @@ -165,42 +175,43 @@ _dispatch_source_timer_idx(dispatch_source_refs_t dr) return DISPATCH_TIMER_IDENT(ds_timer(dr).flags); } -// ds_atomic_flags bits -#define DSF_CANCELED 1u // cancellation has been requested -#define DSF_ARMED 2u // source is armed -#define DSF_DELETED 4u // source received EV_DELETE event -#define DSF_ONESHOT 8u // source received EV_ONESHOT event - -#define DISPATCH_SOURCE_HEADER(refs) \ - dispatch_kevent_t ds_dkev; \ - dispatch_##refs##_refs_t ds_refs; \ - unsigned int ds_atomic_flags; \ +#define _DISPATCH_SOURCE_HEADER(refs) \ + DISPATCH_QUEUE_HEADER(refs); \ + /* LP64: fills 32bit hole in QUEUE_HEADER */ \ unsigned int \ ds_is_level:1, \ ds_is_adder:1, \ ds_is_installed:1, \ ds_is_direct_kevent:1, \ + ds_is_custom_source:1, \ ds_needs_rearm:1, \ - ds_pending_delete:1, \ - ds_needs_mgr:1, \ ds_is_timer:1, \ ds_vmpressure_override:1, \ - ds_memorystatus_override:1, \ + ds_memorypressure_override:1, \ dm_handler_is_block:1, \ dm_connect_handler_called:1, \ dm_cancel_handler_called:1; \ + dispatch_kevent_t ds_dkev; \ + dispatch_##refs##_refs_t ds_refs; \ unsigned long ds_pending_data_mask; -DISPATCH_CLASS_DECL(source); +#define DISPATCH_SOURCE_HEADER(refs) \ + struct dispatch_source_s _as_ds[0]; \ + _DISPATCH_SOURCE_HEADER(refs) + +DISPATCH_CLASS_DECL_BARE(source); +_OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(dispatch_source, dispatch_object); + +#if DISPATCH_PURE_C struct dispatch_source_s { - DISPATCH_STRUCT_HEADER(source); - DISPATCH_QUEUE_HEADER; - DISPATCH_SOURCE_HEADER(source); + _DISPATCH_SOURCE_HEADER(source); unsigned long ds_ident_hack; unsigned long ds_data; unsigned long ds_pending_data; -}; +} DISPATCH_QUEUE_ALIGN; +#endif +#if HAVE_MACH // Mach channel state which may contain references to the channel object // layout must match dispatch_source_refs_s struct dispatch_mach_refs_s { @@ -216,19 +227,39 @@ struct dispatch_mach_reply_refs_s { uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t dispatch_kevent_t dmr_dkev; void *dmr_ctxt; - pthread_priority_t dmr_priority; + mach_port_t dmr_reply; + dispatch_priority_t dmr_priority; voucher_t dmr_voucher; TAILQ_ENTRY(dispatch_mach_reply_refs_s) dmr_list; }; typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t; +#define _DISPATCH_MACH_STATE_UNUSED_MASK_2 0xff00000000000000ull +#define DISPATCH_MACH_STATE_OVERRIDE_MASK 0x00ffff0000000000ull +#define _DISPATCH_MACH_STATE_UNUSED_MASK_1 0x000000f000000000ull +#define DISPATCH_MACH_STATE_DIRTY 0x0000000800000000ull +#define DISPATCH_MACH_STATE_RECEIVED_OVERRIDE 0x0000000400000000ull +#define _DISPATCH_MACH_STATE_UNUSED_MASK_0 0x0000000200000000ull +#define DISPATCH_MACH_STATE_PENDING_BARRIER 0x0000000100000000ull +#define DISPATCH_MACH_STATE_UNLOCK_MASK 0x00000000ffffffffull + struct dispatch_mach_send_refs_s { TAILQ_ENTRY(dispatch_mach_send_refs_s) dr_list; uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t dispatch_mach_msg_t dm_checkin; TAILQ_HEAD(, dispatch_mach_reply_refs_s) dm_replies; + dispatch_unfair_lock_s dm_replies_lock; +#define DISPATCH_MACH_DISCONNECT_MAGIC_BASE (0x80000000) +#define DISPATCH_MACH_NEVER_INSTALLED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 0) +#define DISPATCH_MACH_NEVER_CONNECTED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 1) uint32_t volatile dm_disconnect_cnt; - uint32_t volatile dm_sending; + union { + uint64_t volatile dm_state; + DISPATCH_STRUCT_LITTLE_ENDIAN_2( + dispatch_unfair_lock_s dm_state_lock, + uint32_t dm_state_bits + ); + }; unsigned int dm_needs_mgr:1; struct dispatch_object_s *volatile dm_tail; struct dispatch_object_s *volatile dm_head; @@ -237,17 +268,21 @@ struct dispatch_mach_send_refs_s { typedef struct dispatch_mach_send_refs_s *dispatch_mach_send_refs_t; DISPATCH_CLASS_DECL(mach); +#if DISPATCH_PURE_C struct dispatch_mach_s { - DISPATCH_STRUCT_HEADER(mach); - DISPATCH_QUEUE_HEADER; DISPATCH_SOURCE_HEADER(mach); dispatch_kevent_t dm_dkev; dispatch_mach_send_refs_t dm_refs; -}; +} DISPATCH_QUEUE_ALIGN; +#endif DISPATCH_CLASS_DECL(mach_msg); struct dispatch_mach_msg_s { - DISPATCH_STRUCT_HEADER(mach_msg); + DISPATCH_OBJECT_HEADER(mach_msg); + union { + mach_msg_option_t dmsg_options; + mach_error_t dmsg_error; + }; mach_port_t dmsg_reply; pthread_priority_t dmsg_priority; voucher_t dmsg_voucher; @@ -258,6 +293,9 @@ struct dispatch_mach_msg_s { char dmsg_buf[0]; }; }; +#endif // HAVE_MACH + +extern const struct dispatch_source_type_s _dispatch_source_type_after; #if TARGET_OS_EMBEDDED #define DSL_HASH_SIZE 64u // must be a power of two @@ -265,31 +303,49 @@ struct dispatch_mach_msg_s { #define DSL_HASH_SIZE 256u // must be a power of two #endif +dispatch_source_t +_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp, + const struct dispatch_continuation_s *dc); void _dispatch_source_xref_dispose(dispatch_source_t ds); void _dispatch_source_dispose(dispatch_source_t ds); -void _dispatch_source_invoke(dispatch_source_t ds, dispatch_object_t dou, - dispatch_invoke_flags_t flags); -unsigned long _dispatch_source_probe(dispatch_source_t ds); +void _dispatch_source_finalize_activation(dispatch_source_t ds); +void _dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_flags_t flags); +void _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp, + dispatch_wakeup_flags_t flags); size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz); void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval); -void _dispatch_source_set_event_handler_with_context_f(dispatch_source_t ds, - void *ctxt, dispatch_function_t handler); +void _dispatch_source_set_event_handler_continuation(dispatch_source_t ds, + dispatch_continuation_t dc); +DISPATCH_EXPORT // for firehose server +void _dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp, + unsigned long val); +#if HAVE_MACH void _dispatch_mach_dispose(dispatch_mach_t dm); -void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_object_t dou, - dispatch_invoke_flags_t flags); -unsigned long _dispatch_mach_probe(dispatch_mach_t dm); +void _dispatch_mach_finalize_activation(dispatch_mach_t dm); +void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags); +void _dispatch_mach_wakeup(dispatch_mach_t dm, pthread_priority_t pp, + dispatch_wakeup_flags_t flags); size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz); void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg); -void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, dispatch_object_t dou, +void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, dispatch_invoke_flags_t flags); -size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz); +size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, + size_t bufsiz); -void _dispatch_mach_barrier_invoke(void *ctxt); - -unsigned long _dispatch_mgr_wakeup(dispatch_queue_t dq); -void _dispatch_mgr_thread(dispatch_queue_t dq, dispatch_object_t dou, +void _dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc, + dispatch_invoke_flags_t flags); +void _dispatch_mach_barrier_invoke(dispatch_continuation_t dc, dispatch_invoke_flags_t flags); +#endif // HAVE_MACH + +void _dispatch_mgr_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags); +void _dispatch_mgr_thread(dispatch_queue_t dq, dispatch_invoke_flags_t flags); +#if DISPATCH_USE_KEVENT_WORKQUEUE +void _dispatch_kevent_worker_thread(_dispatch_kevent_qos_s **events, + int *nevents); +#endif #endif /* __DISPATCH_SOURCE_INTERNAL__ */ diff --git a/src/swift/Block.swift b/src/swift/Block.swift new file mode 100644 index 0000000..c1266ce --- /dev/null +++ b/src/swift/Block.swift @@ -0,0 +1,114 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +import CDispatch + +public struct DispatchWorkItemFlags : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + + public static let barrier = DispatchWorkItemFlags(rawValue: 0x1) + + @available(OSX 10.10, iOS 8.0, *) + public static let detached = DispatchWorkItemFlags(rawValue: 0x2) + + @available(OSX 10.10, iOS 8.0, *) + public static let assignCurrentContext = DispatchWorkItemFlags(rawValue: 0x4) + + @available(OSX 10.10, iOS 8.0, *) + public static let noQoS = DispatchWorkItemFlags(rawValue: 0x8) + + @available(OSX 10.10, iOS 8.0, *) + public static let inheritQoS = DispatchWorkItemFlags(rawValue: 0x10) + + @available(OSX 10.10, iOS 8.0, *) + public static let enforceQoS = DispatchWorkItemFlags(rawValue: 0x20) +} + +@available(OSX 10.10, iOS 8.0, *) +public class DispatchWorkItem { + internal var _block: _DispatchBlock + internal var _group: DispatchGroup? + + public init(group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], block: @convention(block) () -> ()) { + _block = dispatch_block_create_with_qos_class(dispatch_block_flags_t(flags.rawValue), + qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority), block) + } + + // Used by DispatchQueue.synchronously to provide a @noescape path through + // dispatch_block_t, as we know the lifetime of the block in question. + internal init(flags: DispatchWorkItemFlags = [], noescapeBlock: @noescape () -> ()) { + _block = _swift_dispatch_block_create_noescape(dispatch_block_flags_t(flags.rawValue), noescapeBlock) + } + + public func perform() { + if let g = _group { + g.enter() + defer { g.leave() } + } + _block() + } + + public func wait() { + _ = dispatch_block_wait(_block, DispatchTime.distantFuture.rawValue) + } + + public func wait(timeout: DispatchTime) -> DispatchTimeoutResult { + return dispatch_block_wait(_block, timeout.rawValue) == 0 ? .Success : .TimedOut + } + + public func wait(wallTimeout: DispatchWallTime) -> DispatchTimeoutResult { + return dispatch_block_wait(_block, wallTimeout.rawValue) == 0 ? .Success : .TimedOut + } + + public func notify(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], queue: DispatchQueue, execute: @convention(block) () -> Void) { + if qos != .unspecified || !flags.isEmpty { + let item = DispatchWorkItem(qos: qos, flags: flags, block: execute) + dispatch_block_notify(_block, queue.__wrapped, item._block) + } else { + dispatch_block_notify(_block, queue.__wrapped, execute) + } + } + + public func notify(queue: DispatchQueue, execute: DispatchWorkItem) { + dispatch_block_notify(_block, queue.__wrapped, execute._block) + } + + public func cancel() { + dispatch_block_cancel(_block) + } + + public var isCancelled: Bool { + return dispatch_block_testcancel(_block) != 0 + } +} + +@available(OSX 10.10, iOS 8.0, *) +public extension DispatchWorkItem { + @available(*, deprecated, renamed: "DispatchWorkItem.wait(self:wallTimeout:)") + public func wait(timeout: DispatchWallTime) -> Int { + switch wait(wallTimeout: timeout) { + case .Success: return 0 + case .TimedOut: return DispatchTimeoutResult.KERN_OPERATION_TIMED_OUT + } + } +} + +/// The dispatch_block_t typealias is different from usual closures in that it +/// uses @convention(block). This is to avoid unnecessary bridging between +/// C blocks and Swift closures, which interferes with dispatch APIs that depend +/// on the referential identity of a block. Particularly, dispatch_block_create. +internal typealias _DispatchBlock = @convention(block) () -> Void +internal typealias dispatch_block_t = @convention(block) () -> Void + +@_silgen_name("_swift_dispatch_block_create_noescape") +internal func _swift_dispatch_block_create_noescape(_ flags: dispatch_block_flags_t, _ block: @noescape () -> ()) -> _DispatchBlock diff --git a/src/swift/Data.swift b/src/swift/Data.swift new file mode 100644 index 0000000..0d21e27 --- /dev/null +++ b/src/swift/Data.swift @@ -0,0 +1,277 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +import CDispatch + +public struct DispatchData : RandomAccessCollection { + public typealias Iterator = DispatchDataIterator + public typealias Index = Int + public typealias Indices = DefaultRandomAccessIndices + + public static let empty: DispatchData = DispatchData(data: _swift_dispatch_data_empty()) + +#if false /* FIXME: dragging in _TMBO (Objective-C) */ + public enum Deallocator { + /// Use `free` + case free + + /// Use `munmap` + case unmap + + /// A custom deallocator + case custom(DispatchQueue?, @convention(block) () -> Void) + + private var _deallocator: (DispatchQueue?, @convention(block) () -> Void) { + switch self { + case .free: return (nil, _dispatch_data_destructor_free()) + case .unmap: return (nil, _dispatch_data_destructor_munmap()) + case .custom(let q, let b): return (q, b) + } + } + } +#endif + internal var __wrapped: dispatch_data_t + + /// Initialize a `Data` with copied memory content. + /// + /// - parameter bytes: A pointer to the memory. It will be copied. + /// - parameter count: The number of bytes to copy. + public init(bytes buffer: UnsafeBufferPointer) { + __wrapped = dispatch_data_create( + buffer.baseAddress!, buffer.count, nil, _dispatch_data_destructor_default()) + } +#if false /* FIXME: dragging in _TMBO (Objective-C) */ + /// Initialize a `Data` without copying the bytes. + /// + /// - parameter bytes: A pointer to the bytes. + /// - parameter count: The size of the bytes. + /// - parameter deallocator: Specifies the mechanism to free the indicated buffer. + public init(bytesNoCopy bytes: UnsafeBufferPointer, deallocator: Deallocator = .free) { + let (q, b) = deallocator._deallocator + + __wrapped = dispatch_data_create(bytes.baseAddress!, bytes.count, q?.__wrapped, b) + } +#endif + internal init(data: dispatch_data_t) { + __wrapped = data + } + + public var count: Int { + return CDispatch.dispatch_data_get_size(__wrapped) + } + + public func withUnsafeBytes( + body: @noescape (UnsafePointer) throws -> Result) rethrows -> Result + { + var ptr: UnsafePointer? = nil + var size = 0; + let data = CDispatch.dispatch_data_create_map(__wrapped, &ptr, &size) + defer { _fixLifetime(data) } + return try body(UnsafePointer(ptr!)) + } + + public func enumerateBytes( + block: @noescape (buffer: UnsafeBufferPointer, byteIndex: Int, stop: inout Bool) -> Void) + { + _swift_dispatch_data_apply(__wrapped) { (data: dispatch_data_t, offset: Int, ptr: UnsafePointer, size: Int) in + let bp = UnsafeBufferPointer(start: UnsafePointer(ptr), count: size) + var stop = false + block(buffer: bp, byteIndex: offset, stop: &stop) + return !stop + } + } + + /// Append bytes to the data. + /// + /// - parameter bytes: A pointer to the bytes to copy in to the data. + /// - parameter count: The number of bytes to copy. + public mutating func append(_ bytes: UnsafePointer, count: Int) { + let data = dispatch_data_create(bytes, count, nil, _dispatch_data_destructor_default()) + self.append(DispatchData(data: data)) + } + + /// Append data to the data. + /// + /// - parameter data: The data to append to this data. + public mutating func append(_ other: DispatchData) { + let data = CDispatch.dispatch_data_create_concat(__wrapped, other.__wrapped) + __wrapped = data + } + + /// Append a buffer of bytes to the data. + /// + /// - parameter buffer: The buffer of bytes to append. The size is calculated from `SourceType` and `buffer.count`. + public mutating func append(_ buffer : UnsafeBufferPointer) { + self.append(UnsafePointer(buffer.baseAddress!), count: buffer.count * sizeof(SourceType.self)) + } + + private func _copyBytesHelper(to pointer: UnsafeMutablePointer, from range: CountableRange) { + var copiedCount = 0 + _ = CDispatch.dispatch_data_apply(__wrapped) { (data: dispatch_data_t, offset: Int, ptr: UnsafePointer, size: Int) in + let limit = Swift.min((range.endIndex - range.startIndex) - copiedCount, size) + memcpy(pointer + copiedCount, ptr, limit) + copiedCount += limit + return copiedCount < (range.endIndex - range.startIndex) + } + } + + /// Copy the contents of the data to a pointer. + /// + /// - parameter pointer: A pointer to the buffer you wish to copy the bytes into. + /// - parameter count: The number of bytes to copy. + /// - warning: This method does not verify that the contents at pointer have enough space to hold `count` bytes. + public func copyBytes(to pointer: UnsafeMutablePointer, count: Int) { + _copyBytesHelper(to: pointer, from: 0.., from range: CountableRange) { + _copyBytesHelper(to: pointer, from: range) + } + + /// Copy the contents of the data into a buffer. + /// + /// This function copies the bytes in `range` from the data into the buffer. If the count of the `range` is greater than `sizeof(DestinationType) * buffer.count` then the first N bytes will be copied into the buffer. + /// - precondition: The range must be within the bounds of the data. Otherwise `fatalError` is called. + /// - parameter buffer: A buffer to copy the data into. + /// - parameter range: A range in the data to copy into the buffer. If the range is empty, this function will return 0 without copying anything. If the range is nil, as much data as will fit into `buffer` is copied. + /// - returns: Number of bytes copied into the destination buffer. + public func copyBytes(to buffer: UnsafeMutableBufferPointer, from range: CountableRange? = nil) -> Int { + let cnt = count + guard cnt > 0 else { return 0 } + + let copyRange : CountableRange + if let r = range { + guard !r.isEmpty else { return 0 } + precondition(r.startIndex >= 0) + precondition(r.startIndex < cnt, "The range is outside the bounds of the data") + + precondition(r.endIndex >= 0) + precondition(r.endIndex <= cnt, "The range is outside the bounds of the data") + + copyRange = r.startIndex..<(r.startIndex + Swift.min(buffer.count * sizeof(DestinationType.self), r.count)) + } else { + copyRange = 0.. = UnsafeMutablePointer(buffer.baseAddress!) + _copyBytesHelper(to: pointer, from: copyRange) + return copyRange.count + } + + /// Sets or returns the byte at the specified index. + public subscript(index: Index) -> UInt8 { + var offset = 0 + let subdata = CDispatch.dispatch_data_copy_region(__wrapped, index, &offset) + + var ptr: UnsafePointer? = nil + var size = 0 + let map = CDispatch.dispatch_data_create_map(subdata, &ptr, &size) + defer { _fixLifetime(map) } + + let pptr = UnsafePointer(ptr!) + return pptr[index - offset] + } + + public subscript(bounds: Range) -> RandomAccessSlice { + return RandomAccessSlice(base: self, bounds: bounds) + } + + /// Return a new copy of the data in a specified range. + /// + /// - parameter range: The range to copy. + public func subdata(in range: CountableRange) -> DispatchData { + let subrange = CDispatch.dispatch_data_create_subrange( + __wrapped, range.startIndex, range.endIndex - range.startIndex) + return DispatchData(data: subrange) + } + + public func region(location: Int) -> (data: DispatchData, offset: Int) { + var offset: Int = 0 + let data = CDispatch.dispatch_data_copy_region(__wrapped, location, &offset) + return (DispatchData(data: data), offset) + } + + public var startIndex: Index { + return 0 + } + + public var endIndex: Index { + return count + } + + public func index(before i: Index) -> Index { + return i - 1 + } + + public func index(after i: Index) -> Index { + return i + 1 + } + + /// An iterator over the contents of the data. + /// + /// The iterator will increment byte-by-byte. + public func makeIterator() -> DispatchData.Iterator { + return DispatchDataIterator(_data: self) + } +} + +public struct DispatchDataIterator : IteratorProtocol, Sequence { + + /// Create an iterator over the given DisaptchData + public init(_data: DispatchData) { + var ptr: UnsafePointer? + self._count = 0 + self._data = CDispatch.dispatch_data_create_map(_data.__wrapped, &ptr, &self._count) + self._ptr = UnsafePointer(ptr!) + self._position = _data.startIndex + } + + /// Advance to the next element and return it, or `nil` if no next + /// element exists. + /// + /// - Precondition: No preceding call to `self.next()` has returned `nil`. + public mutating func next() -> DispatchData._Element? { + if _position == _count { return nil } + let element = _ptr[_position]; + _position = _position + 1 + return element + } + + internal let _data: dispatch_data_t + internal var _ptr: UnsafePointer + internal var _count: Int + internal var _position: DispatchData.Index +} + +typealias _swift_data_applier = @convention(block) @noescape (dispatch_data_t, Int, UnsafePointer, Int) -> Bool + +@_silgen_name("_swift_dispatch_data_apply") +internal func _swift_dispatch_data_apply(_ data: dispatch_data_t, _ block: _swift_data_applier) + +@_silgen_name("_swift_dispatch_data_empty") +internal func _swift_dispatch_data_empty() -> dispatch_data_t + +@_silgen_name("_swift_dispatch_data_destructor_free") +internal func _dispatch_data_destructor_free() -> _DispatchBlock + +@_silgen_name("_swift_dispatch_data_destructor_munmap") +internal func _dispatch_data_destructor_munmap() -> _DispatchBlock + +@_silgen_name("_swift_dispatch_data_destructor_default") +internal func _dispatch_data_destructor_default() -> _DispatchBlock diff --git a/src/swift/Dispatch.apinotes b/src/swift/Dispatch.apinotes new file mode 100644 index 0000000..6e80451 --- /dev/null +++ b/src/swift/Dispatch.apinotes @@ -0,0 +1,328 @@ +--- +Name: Dispatch +Typedefs: +- Name: dispatch_object_t + Availability: nonswift +- Name: dispatch_block_t + Availability: nonswift +- Name: dispatch_queue_t + Availability: nonswift +- Name: dispatch_semaphore_t + Availability: nonswift +- Name: dispatch_io_t + Availability: nonswift +- Name: dispatch_data_t + Availability: nonswift +- Name: dispatch_group_t + Availability: nonswift +- Name: dispatch_qos_class_t + Availability: nonswift +- Name: dispatch_data_applier_t + Availability: nonswift +- Name: dispatch_fd_t + Availability: nonswift +- Name: dispatch_io_handler_t + Availability: nonswift +- Name: dispatch_source_t + Availability: nonswift +- Name: dispatch_function_t + Availability: nonswift +- Name: dispatch_io_close_flags_t + Availability: nonswift +- Name: dispatch_io_interval_flags_t + Availability: nonswift +- Name: dispatch_io_type_t + Availability: nonswift +- Name: dispatch_source_timer_flags_t + Availability: nonswift +- Name: dispatch_autorelease_frequency_t + SwiftPrivate: true +- Name: dispatch_queue_attr_t + Availability: nonswift +- Name: dispatch_queue_priority_t + Availability: nonswift +- Name: dispatch_block_flags_t + SwiftPrivate: true +- Name: dispatch_source_type_t + SwiftPrivate: true +- Name: dispatch_source_mach_send_flags_t + Availability: nonswift +- Name: dispatch_source_memorypressure_flags_t + Availability: nonswift +- Name: dispatch_source_proc_flags_t + Availability: nonswift +- Name: dispatch_source_vnode_flags_t + Availability: nonswift +Classes: +- Name: OS_dispatch_object + SwiftName: DispatchObject +- Name: OS_dispatch_queue + SwiftName: DispatchQueue +- Name: OS_dispatch_io + SwiftName: DispatchIO +- Name: OS_dispatch_semaphore + SwiftName: DispatchSemaphore +- Name: OS_dispatch_group + SwiftName: DispatchGroup +- Name: OS_dispatch_source + SwiftName: DispatchSource +- Name: OS_dispatch_queue_attr + SwiftPrivate: true +- Name: OS_dispatch_data + SwiftName: __DispatchData +Protocols: +- Name: OS_dispatch_source + SwiftName: DispatchSourceType +- Name: OS_dispatch_source_mach_send + SwiftName: DispatchSourceMachSend +- Name: OS_dispatch_source_mach_recv + SwiftName: DispatchSourceMachReceive +- Name: OS_dispatch_source_memorypressure + SwiftName: DispatchSourceMemoryPressure +- Name: OS_dispatch_source_proc + SwiftName: DispatchSourceProcess +- Name: OS_dispatch_source_read + SwiftName: DispatchSourceRead +- Name: OS_dispatch_source_signal + SwiftName: DispatchSourceSignal +- Name: OS_dispatch_source_timer + SwiftName: DispatchSourceTimer +- Name: OS_dispatch_source_data_or + SwiftName: DispatchSourceUserDataOr +- Name: OS_dispatch_source_data_add + SwiftName: DispatchSourceUserDataAdd +- Name: OS_dispatch_source_vnode + SwiftName: DispatchSourceFileSystemObject +- Name: OS_dispatch_source_write + SwiftName: DispatchSourceWrite +Functions: +- Name: dispatch_release + Availability: nonswift +- Name: dispatch_retain + Availability: nonswift +# dispatch_queue_t +- Name: dispatch_queue_create + SwiftName: 'DispatchQueue.init(__label:attr:)' + SwiftPrivate: true +- Name: dispatch_get_global_queue + SwiftPrivate: true +- Name: dispatch_queue_create_with_target + SwiftName: 'DispatchQueue.init(__label:attr:queue:)' + SwiftPrivate: true +- Name: dispatch_assert_queue + SwiftPrivate: true +- Name: dispatch_assert_queue_barrier + SwiftPrivate: true +- Name: dispatch_assert_queue_not + SwiftPrivate: true +- Name: dispatch_async + SwiftPrivate: true +- Name: dispatch_async_f + Availability: nonswift +- Name: dispatch_barrier_async + SwiftPrivate: true +- Name: dispatch_barrier_async_f + Availability: nonswift +- Name: dispatch_apply + SwiftPrivate: true +- Name: dispatch_apply_f + Availability: nonswift +- Name: dispatch_sync + SwiftName: 'DispatchQueue.sync(self:execute:)' +- Name: dispatch_sync_f + Availability: nonswift +- Name: dispatch_barrier_sync + SwiftPrivate: true +- Name: dispatch_barrier_sync_f + Availability: nonswift +- Name: dispatch_queue_get_label + SwiftPrivate: true +- Name: dispatch_queue_get_qos_class + SwiftPrivate: true +- Name: dispatch_after + SwiftPrivate: true +- Name: dispatch_after_f + Availability: nonswift +- Name: dispatch_queue_get_specific + SwiftPrivate: true +- Name: dispatch_queue_set_specific + SwiftPrivate: true +- Name: dispatch_get_specific + SwiftPrivate: true +- Name: dispatch_get_main_queue + Availability: nonswift +- Name: dispatch_queue_attr_make_initially_inactive + SwiftPrivate: true +- Name: dispatch_queue_attr_make_with_autorelease_frequency + SwiftPrivate: true +- Name: dispatch_queue_attr_make_with_qos_class + SwiftPrivate: true +# dispatch_object_t +- Name: dispatch_set_target_queue + SwiftName: 'DispatchObject.setTarget(self:queue:)' +- Name: dispatch_activate + SwiftName: 'DispatchObject.activate(self:)' +- Name: dispatch_suspend + SwiftName: 'DispatchObject.suspend(self:)' +- Name: dispatch_resume + SwiftName: 'DispatchObject.resume(self:)' +- Name: dispatch_set_finalizer_f + Availability: nonswift +- Name: dispatch_get_context + Availability: nonswift +- Name: dispatch_set_context + Availability: nonswift +- Name: _dispatch_object_validate + Availability: nonswift +# dispatch_block +- Name: dispatch_block_create + Availability: nonswift + AvailabilityMsg: 'Use DispatchWorkItem()' +- Name: dispatch_block_create_with_qos_class + Availability: nonswift + AvailabilityMsg: 'Use DispatchWorkItem()' +- Name: dispatch_block_perform + Availability: nonswift + AvailabilityMsg: 'Use DispatchWorkItem.perform()' +- Name: dispatch_block_wait + Availability: nonswift + AvailabilityMsg: 'Use DispatchWorkItem.wait(timeout:)' +- Name: dispatch_block_notify + Availability: nonswift + AvailabilityMsg: 'Use DispatchWorkItem.notify(queue:execute:)' +- Name: dispatch_block_cancel + Availability: nonswift + AvailabilityMsg: 'Use DispatchWorkItem.cancel()' +- Name: dispatch_block_testcancel + Availability: nonswift + AvailabilityMsg: 'Use DispatchWorkItem.isCancelled' +# dispatch_data +- Name: dispatch_data_create + SwiftPrivate: true +- Name: dispatch_data_get_size + SwiftPrivate: true +- Name: dispatch_data_apply + SwiftPrivate: true +- Name: dispatch_data_create_concat + SwiftPrivate: true +- Name: dispatch_data_create_subrange + SwiftPrivate: true +- Name: dispatch_data_copy_region + SwiftPrivate: true +- Name: dispatch_data_create_map + SwiftPrivate: true +# dispatch_group_t +- Name: dispatch_group_create + SwiftName: 'DispatchGroup.init()' + Availability: available +- Name: dispatch_group_async + SwiftPrivate: true +- Name: dispatch_group_async_f + Availability: nonswift +- Name: dispatch_group_wait + SwiftPrivate: true +- Name: dispatch_group_notify + SwiftPrivate: true +- Name: dispatch_group_notify_f + Availability: nonswift +- Name: dispatch_group_enter + SwiftName: 'DispatchGroup.enter(self:)' +- Name: dispatch_group_leave + SwiftName: 'DispatchGroup.leave(self:)' +# dispatch_io +- Name: dispatch_io_create + SwiftPrivate: true + SwiftName: 'DispatchIO.init(__type:fd:queue:handler:)' +- Name: dispatch_io_create_with_path + SwiftPrivate: true + SwiftName: 'DispatchIO.init(__type:path:oflag:mode:queue:handler:)' +- Name: dispatch_io_create_with_io + SwiftPrivate: true + SwiftName: 'DispatchIO.init(__type:io:queue:handler:)' +- Name: dispatch_io_read + SwiftPrivate: true +- Name: dispatch_io_write + SwiftPrivate: true +- Name: dispatch_io_close + SwiftPrivate: true +- Name: dispatch_io_barrier + SwiftName: 'DispatchIO.barrier(self:execute:)' +- Name: dispatch_io_get_descriptor + SwiftName: 'getter:DispatchIO.fileDescriptor(self:)' +- Name: dispatch_io_set_high_water + SwiftName: 'DispatchIO.setLimit(self:highWater:)' +- Name: dispatch_io_set_low_water + SwiftName: 'DispatchIO.setLimit(self:lowWater:)' +- Name: dispatch_io_set_interval + SwiftPrivate: true +- Name: dispatch_read + SwiftPrivate: true +- Name: dispatch_write + SwiftPrivate: true +# dispatch_semaphore +- Name: dispatch_semaphore_create + SwiftName: 'DispatchSemaphore.init(value:)' +- Name: dispatch_semaphore_wait + SwiftPrivate: true +- Name: dispatch_semaphore_signal + SwiftPrivate: true +# dispatch_source +- Name: dispatch_source_create + SwiftPrivate: true +- Name: dispatch_source_get_handle + SwiftPrivate: true +- Name: dispatch_source_get_mask + SwiftPrivate: true +- Name: dispatch_source_get_data + SwiftPrivate: true +- Name: dispatch_source_merge_data + SwiftPrivate: true +- Name: dispatch_source_set_event_handler + SwiftPrivate: true +- Name: dispatch_source_set_event_handler_f + Availability: nonswift +- Name: dispatch_source_set_cancel_handler + SwiftPrivate: true +- Name: dispatch_source_set_cancel_handler_f + Availability: nonswift +- Name: dispatch_source_set_registration_handler + SwiftPrivate: true +- Name: dispatch_source_set_registration_handler_f + Availability: nonswift +- Name: dispatch_source_cancel + SwiftPrivate: true +- Name: dispatch_source_testcancel + SwiftPrivate: true +- Name: dispatch_source_set_timer + SwiftPrivate: true +# dispatch_time +- Name: dispatch_time + SwiftPrivate: true +- Name: dispatch_walltime + SwiftPrivate: true +- Name: dispatch_main + SwiftName: 'dispatchMain()' +Globals: +- Name: _dispatch_data_destructor_free + Availability: nonswift +- Name: _dispatch_data_destructor_munmap + Availability: nonswift +Enumerators: +- Name: DISPATCH_BLOCK_BARRIER + Availability: nonswift +- Name: DISPATCH_BLOCK_DETACHED + Availability: nonswift +- Name: DISPATCH_BLOCK_ASSIGN_CURRENT + Availability: nonswift +- Name: DISPATCH_BLOCK_NO_QOS_CLASS + Availability: nonswift +- Name: DISPATCH_BLOCK_INHERIT_QOS_CLASS + Availability: nonswift +- Name: DISPATCH_BLOCK_ENFORCE_QOS_CLASS + Availability: nonswift +- Name: DISPATCH_AUTORELEASE_FREQUENCY_INHERIT + Availability: nonswift +- Name: DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM + Availability: nonswift +- Name: DISPATCH_AUTORELEASE_FREQUENCY_NEVER + Availability: nonswift diff --git a/src/swift/Dispatch.swift b/src/swift/Dispatch.swift new file mode 100644 index 0000000..2b9cb21 --- /dev/null +++ b/src/swift/Dispatch.swift @@ -0,0 +1,211 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +@_exported import Dispatch + +import CDispatch + +/// dispatch_assert + +@available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) +public enum DispatchPredicate { + case onQueue(DispatchQueue) + case onQueueAsBarrier(DispatchQueue) + case notOnQueue(DispatchQueue) +} + +@available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) +public func _dispatchPreconditionTest(_ condition: DispatchPredicate) -> Bool { + switch condition { + case .onQueue(let q): + dispatch_assert_queue(q.__wrapped) + case .onQueueAsBarrier(let q): + dispatch_assert_queue_barrier(q.__wrapped) + case .notOnQueue(let q): + dispatch_assert_queue_not(q.__wrapped) + } + return true +} + +@_transparent +@available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) +public func dispatchPrecondition(condition: @autoclosure () -> DispatchPredicate) { + // precondition is able to determine release-vs-debug asserts where the overlay + // cannot, so formulating this into a call that we can call with precondition() + precondition(_dispatchPreconditionTest(condition()), "dispatchPrecondition failure") +} + +/// qos_class_t + +public struct DispatchQoS : Equatable { + public let qosClass: QoSClass + public let relativePriority: Int + + @available(OSX 10.10, iOS 8.0, *) + public static let background = DispatchQoS(qosClass: .background, relativePriority: 0) + + @available(OSX 10.10, iOS 8.0, *) + public static let utility = DispatchQoS(qosClass: .utility, relativePriority: 0) + + @available(OSX 10.10, iOS 8.0, *) + public static let `default` = DispatchQoS(qosClass: .default, relativePriority: 0) + + @available(OSX, introduced: 10.10, deprecated: 10.10, renamed: "DispatchQoS.default") + @available(iOS, introduced: 8.0, deprecated: 8.0, renamed: "DispatchQoS.default") + @available(*, deprecated, renamed: "DispatchQoS.default") + public static let defaultQoS = DispatchQoS.default + + @available(OSX 10.10, iOS 8.0, *) + public static let userInitiated = DispatchQoS(qosClass: .userInitiated, relativePriority: 0) + + @available(OSX 10.10, iOS 8.0, *) + public static let userInteractive = DispatchQoS(qosClass: .userInteractive, relativePriority: 0) + + public static let unspecified = DispatchQoS(qosClass: .unspecified, relativePriority: 0) + + public enum QoSClass { + @available(OSX 10.10, iOS 8.0, *) + case background + + @available(OSX 10.10, iOS 8.0, *) + case utility + + @available(OSX 10.10, iOS 8.0, *) + case `default` + + @available(OSX, introduced: 10.10, deprecated: 10.10, renamed: "QoSClass.default") + @available(iOS, introduced: 8.0, deprecated: 8.0, renamed: "QoSClass.default") + @available(*, deprecated, renamed: "QoSClass.default") + static let defaultQoS = QoSClass.default + + @available(OSX 10.10, iOS 8.0, *) + case userInitiated + + @available(OSX 10.10, iOS 8.0, *) + case userInteractive + + case unspecified + + @available(OSX 10.10, iOS 8.0, *) + internal init?(qosClass: _OSQoSClass) { + switch qosClass { + case .QOS_CLASS_BACKGROUND: self = .background + case .QOS_CLASS_UTILITY: self = .utility + case .QOS_CLASS_DEFAULT: self = .default + case .QOS_CLASS_USER_INITIATED: self = .userInitiated + case .QOS_CLASS_USER_INTERACTIVE: self = .userInteractive + case .QOS_CLASS_UNSPECIFIED: self = .unspecified + default: return nil + } + } + + @available(OSX 10.10, iOS 8.0, *) + internal var rawValue: _OSQoSClass { + switch self { + case .background: return .QOS_CLASS_BACKGROUND + case .utility: return .QOS_CLASS_UTILITY + case .default: return .QOS_CLASS_DEFAULT + case .userInitiated: return .QOS_CLASS_USER_INITIATED + case .userInteractive: return .QOS_CLASS_USER_INTERACTIVE + case .unspecified: return .QOS_CLASS_UNSPECIFIED + } + } + } + + public init(qosClass: QoSClass, relativePriority: Int) { + self.qosClass = qosClass + self.relativePriority = relativePriority + } +} + +public func ==(a: DispatchQoS, b: DispatchQoS) -> Bool { + return a.qosClass == b.qosClass && a.relativePriority == b.relativePriority +} + +/// + +public enum DispatchTimeoutResult { + static let KERN_OPERATION_TIMED_OUT:Int = 49 + case Success + case TimedOut +} + +/// dispatch_group + +public extension DispatchGroup { + public func notify(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], queue: DispatchQueue, execute work: @convention(block) () -> ()) { + if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { + let item = DispatchWorkItem(qos: qos, flags: flags, block: work) + dispatch_group_notify(self.__wrapped, queue.__wrapped, item._block) + } else { + dispatch_group_notify(self.__wrapped, queue.__wrapped, work) + } + } + + @available(OSX 10.10, iOS 8.0, *) + public func notify(queue: DispatchQueue, work: DispatchWorkItem) { + dispatch_group_notify(self.__wrapped, queue.__wrapped, work._block) + } + + public func wait() { + _ = dispatch_group_wait(self.__wrapped, DispatchTime.distantFuture.rawValue) + } + + public func wait(timeout: DispatchTime) -> DispatchTimeoutResult { + return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .Success : .TimedOut + } + + public func wait(wallTimeout timeout: DispatchWallTime) -> DispatchTimeoutResult { + return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .Success : .TimedOut + } +} + +public extension DispatchGroup { + @available(*, deprecated, renamed: "DispatchGroup.wait(self:wallTimeout:)") + public func wait(walltime timeout: DispatchWallTime) -> Int { + switch wait(wallTimeout: timeout) { + case .Success: return 0 + case .TimedOut: return DispatchTimeoutResult.KERN_OPERATION_TIMED_OUT + } + } +} + +/// dispatch_semaphore + +public extension DispatchSemaphore { + @discardableResult + public func signal() -> Int { + return dispatch_semaphore_signal(self.__wrapped) + } + + public func wait() { + _ = dispatch_semaphore_wait(self.__wrapped, DispatchTime.distantFuture.rawValue) + } + + public func wait(timeout: DispatchTime) -> DispatchTimeoutResult { + return dispatch_semaphore_wait(self.__wrapped, timeout.rawValue) == 0 ? .Success : .TimedOut + } + + public func wait(wallTimeout: DispatchWallTime) -> DispatchTimeoutResult { + return dispatch_semaphore_wait(self.__wrapped, wallTimeout.rawValue) == 0 ? .Success : .TimedOut + } +} + +public extension DispatchSemaphore { + @available(*, deprecated, renamed: "DispatchSemaphore.wait(self:wallTimeout:)") + public func wait(walltime timeout: DispatchWalltime) -> Int { + switch wait(wallTimeout: timeout) { + case .Success: return 0 + case .TimedOut: return DispatchTimeoutResult.KERN_OPERATION_TIMED_OUT + } + } +} diff --git a/src/swift/DispatchStubs.cc b/src/swift/DispatchStubs.cc new file mode 100644 index 0000000..1e5ec74 --- /dev/null +++ b/src/swift/DispatchStubs.cc @@ -0,0 +1,207 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include +#include + +#define DISPATCH_RUNTIME_STDLIB_INTERFACE __attribute__((__visibility__("default"))) + +#if USE_OBJC +@protocol OS_dispatch_source; +@protocol OS_dispatch_source_mach_send; +@protocol OS_dispatch_source_mach_recv; +@protocol OS_dispatch_source_memorypressure; +@protocol OS_dispatch_source_proc; +@protocol OS_dispatch_source_read; +@protocol OS_dispatch_source_signal; +@protocol OS_dispatch_source_timer; +@protocol OS_dispatch_source_data_add; +@protocol OS_dispatch_source_data_or; +@protocol OS_dispatch_source_vnode; +@protocol OS_dispatch_source_write; + +// #include +__attribute__((constructor)) +static void _dispatch_overlay_constructor() { + Class source = objc_lookUpClass("OS_dispatch_source"); + if (source) { + class_addProtocol(source, @protocol(OS_dispatch_source)); + class_addProtocol(source, @protocol(OS_dispatch_source_mach_send)); + class_addProtocol(source, @protocol(OS_dispatch_source_mach_recv)); + class_addProtocol(source, @protocol(OS_dispatch_source_memorypressure)); + class_addProtocol(source, @protocol(OS_dispatch_source_proc)); + class_addProtocol(source, @protocol(OS_dispatch_source_read)); + class_addProtocol(source, @protocol(OS_dispatch_source_signal)); + class_addProtocol(source, @protocol(OS_dispatch_source_timer)); + class_addProtocol(source, @protocol(OS_dispatch_source_data_add)); + class_addProtocol(source, @protocol(OS_dispatch_source_data_or)); + class_addProtocol(source, @protocol(OS_dispatch_source_vnode)); + class_addProtocol(source, @protocol(OS_dispatch_source_write)); + } +} + +#endif /* USE_OBJC */ + +#if 0 /* FIXME -- adding directory to include path may need build-script plumbing to do properly... */ +#include "swift/Runtime/Config.h" +#else +#define SWIFT_CC(x) /* FIXME!! */ +#endif + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_queue_attr_t +_swift_dispatch_queue_concurrent(void) { + return DISPATCH_QUEUE_CONCURRENT; +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" void +_swift_dispatch_apply_current(size_t iterations, __attribute__((__noescape__)) void (^block)(size_t)) { + dispatch_apply(iterations, (dispatch_queue_t _Nonnull)0, block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_queue_t +_swift_dispatch_get_main_queue(void) { + return dispatch_get_main_queue(); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_data_t +_swift_dispatch_data_empty(void) { + return dispatch_data_empty; +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_block_t +_swift_dispatch_data_destructor_default(void) { + return DISPATCH_DATA_DESTRUCTOR_DEFAULT; +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_block_t +_swift_dispatch_data_destructor_free(void) { + return _dispatch_data_destructor_free; +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_block_t +_swift_dispatch_data_destructor_munmap(void) { + return _dispatch_data_destructor_munmap; +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_block_t +_swift_dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, dispatch_qos_class_t qos, int relative_priority, dispatch_block_t block) { + return dispatch_block_create_with_qos_class(flags, qos, relative_priority, block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_block_t +_swift_dispatch_block_create_noescape(dispatch_block_flags_t flags, dispatch_block_t block) { + return dispatch_block_create(flags, block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" void +_swift_dispatch_block_cancel(dispatch_block_t block) { + dispatch_block_cancel(block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" long +_swift_dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout) { + return dispatch_block_wait(block, timeout); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" void +_swift_dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue, dispatch_block_t notification_block) { + dispatch_block_notify(block, queue, notification_block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" long +_swift_dispatch_block_testcancel(dispatch_block_t block) { + return dispatch_block_testcancel(block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" bool +_swift_dispatch_data_apply(dispatch_data_t data, bool (^applier)(dispatch_data_t, size_t, const void *, size_t)) { + return dispatch_data_apply(data, applier); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" void +_swift_dispatch_async(dispatch_queue_t queue, dispatch_block_t block) { + dispatch_async(queue, block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" void +_swift_dispatch_group_async(dispatch_group_t group, dispatch_queue_t queue, dispatch_block_t block) { + dispatch_group_async(group, queue, block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" void +_swift_dispatch_sync(dispatch_queue_t queue, dispatch_block_t block) { + dispatch_sync(queue, block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" void +_swift_dispatch_release(dispatch_object_t obj) { + dispatch_release(obj); +} + +// DISPATCH_RUNTIME_STDLIB_INTERFACE +// extern "C" dispatch_queue_t +// _swift_apply_current_root_queue() { +// return DISPATCH_APPLY_CURRENT_ROOT_QUEUE; +// } + +#define SOURCE(t) \ + SWIFT_CC(swift) \ + DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" dispatch_source_type_t \ + _swift_dispatch_source_type_##t(void) { \ + return DISPATCH_SOURCE_TYPE_##t; \ + } + +SOURCE(DATA_ADD) +SOURCE(DATA_OR) +#if HAVE_MACH +SOURCE(MACH_SEND) +SOURCE(MACH_RECV) +SOURCE(MEMORYPRESSURE) +#endif +#ifndef __linux__ +SOURCE(PROC) +#endif +SOURCE(READ) +SOURCE(SIGNAL) +SOURCE(TIMER) +#ifndef __linux__ +SOURCE(VNODE) +#endif +SOURCE(WRITE) + +// See comment in CFFuntime.c explaining why objc_retainAutoreleasedReturnValue is needed. +extern "C" void swift_release(void *); +extern "C" void * objc_retainAutoreleasedReturnValue(void *obj) { + if (obj) { + swift_release(obj); + return obj; + } + else return NULL; +} diff --git a/src/swift/IO.swift b/src/swift/IO.swift new file mode 100644 index 0000000..6e6b669 --- /dev/null +++ b/src/swift/IO.swift @@ -0,0 +1,129 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +import CDispatch + +public extension DispatchIO { + + public enum StreamType : UInt { + case stream = 0 + case random = 1 + } + + public struct CloseFlags : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + + public static let stop = CloseFlags(rawValue: 1) + } + + public struct IntervalFlags : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + public init(nilLiteral: ()) { self.rawValue = 0 } + + public static let strictInterval = IntervalFlags(rawValue: 1) + } + + public class func read(fromFileDescriptor: Int32, maxLength: Int, runningHandlerOn queue: DispatchQueue, handler: (data: DispatchData, error: Int32) -> Void) { + dispatch_read(fromFileDescriptor, maxLength, queue.__wrapped) { (data: dispatch_data_t, error: Int32) in + handler(data: DispatchData(data: data), error: error) + } + } + + public class func write(fromFileDescriptor: Int32, data: DispatchData, runningHandlerOn queue: DispatchQueue, handler: (data: DispatchData?, error: Int32) -> Void) { + dispatch_write(fromFileDescriptor, data.__wrapped, queue.__wrapped) { (data: dispatch_data_t?, error: Int32) in + handler(data: data.flatMap { DispatchData(data: $0) }, error: error) + } + } + + public convenience init( + type: StreamType, + fileDescriptor: Int32, + queue: DispatchQueue, + cleanupHandler: (error: Int32) -> Void) + { + self.init(__type: type.rawValue, fd: fileDescriptor, queue: queue, handler: cleanupHandler) + } + + public convenience init( + type: StreamType, + path: UnsafePointer, + oflag: Int32, + mode: mode_t, + queue: DispatchQueue, + cleanupHandler: (error: Int32) -> Void) + { + self.init(__type: type.rawValue, path: path, oflag: oflag, mode: mode, queue: queue, handler: cleanupHandler) + } + + public convenience init( + type: StreamType, + io: DispatchIO, + queue: DispatchQueue, + cleanupHandler: (error: Int32) -> Void) + { + self.init(__type: type.rawValue, io: io, queue: queue, handler: cleanupHandler) + } + + public func read(offset: off_t, length: Int, queue: DispatchQueue, ioHandler: (done: Bool, data: DispatchData?, error: Int32) -> Void) { + dispatch_io_read(self.__wrapped, offset, length, queue.__wrapped) { (done: Bool, data: dispatch_data_t?, error: Int32) in + ioHandler(done: done, data: data.flatMap { DispatchData(data: $0) }, error: error) + } + } + + public func write(offset: off_t, data: DispatchData, queue: DispatchQueue, ioHandler: (done: Bool, data: DispatchData?, error: Int32) -> Void) { + dispatch_io_write(self.__wrapped, offset, data.__wrapped, queue.__wrapped) { (done: Bool, data: dispatch_data_t?, error: Int32) in + ioHandler(done: done, data: data.flatMap { DispatchData(data: $0) }, error: error) + } + } + + public func setInterval(interval: DispatchTimeInterval, flags: IntervalFlags = []) { + dispatch_io_set_interval(self.__wrapped, interval.rawValue, flags.rawValue) + } + + public func close(flags: CloseFlags = []) { + dispatch_io_close(self.__wrapped, flags.rawValue) + } +} + +extension DispatchIO { + @available(*, deprecated, renamed: "DispatchIO.read(fromFileDescriptor:maxLength:runningHandlerOn:handler:)") + public class func read(fd: Int32, length: Int, queue: DispatchQueue, handler: (DispatchData, Int32) -> Void) { + DispatchIO.read(fromFileDescriptor: fd, maxLength: length, runningHandlerOn: queue, handler: handler) + } + + @available(*, deprecated, renamed: "DispatchIO.write(fromFileDescriptor:data:runningHandlerOn:handler:)") + public class func write(fd: Int32, data: DispatchData, queue: DispatchQueue, handler: (DispatchData?, Int32) -> Void) { + DispatchIO.write(fromFileDescriptor: fd, data: data, runningHandlerOn: queue, handler: handler) + } + + @available(*, deprecated, renamed: "DispatchIO.barrier(self:execute:)") + public func withBarrier(barrier work: () -> ()) { + barrier(execute: work) + } + + @available(*, deprecated, renamed: "DispatchIO.setLimit(self:highWater:)") + public func setHighWater(highWater: Int) { + setLimit(highWater: highWater) + } + + @available(*, deprecated, renamed: "DispatchIO.setLimit(self:lowWater:)") + public func setLowWater(lowWater: Int) { + setLimit(lowWater: lowWater) + } + + @available(*, deprecated, renamed: "DispatchIO.setInterval(self:interval:flags:)") + public func setInterval(interval: UInt64, flags: IntervalFlags) { + setInterval(interval: .nanoseconds(Int(interval)), flags: flags) + } +} diff --git a/src/swift/Private.swift b/src/swift/Private.swift new file mode 100644 index 0000000..e38f728 --- /dev/null +++ b/src/swift/Private.swift @@ -0,0 +1,474 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +// Redeclarations of all SwiftPrivate functions with appropriate markup. + +import CDispatch + +@available(*, unavailable, renamed:"DispatchQueue.init(label:attributes:target:)") +public func dispatch_queue_create(_ label: UnsafePointer?, _ attr: dispatch_queue_attr_t?) -> DispatchQueue +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.init(label:attributes:target:)") +public func dispatch_queue_create_with_target(_ label: UnsafePointer?, _ attr: dispatch_queue_attr_t?, _ queue: DispatchQueue?) -> DispatchQueue +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.init(type:fileDescriptor:queue:cleanupHandler:)") +public func dispatch_io_create(_ type: UInt, _ fd: Int32, _ queue: DispatchQueue, _ cleanup_handler: (Int32) -> Void) -> DispatchIO +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.init(type:path:oflag:mode:queue:cleanupHandler:)") +public func dispatch_io_create_with_path(_ type: UInt, _ path: UnsafePointer, _ oflag: Int32, _ mode: mode_t, _ queue: DispatchQueue, _ cleanup_handler: (Int32) -> Void) -> DispatchIO +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.init(type:io:queue:cleanupHandler:)") +public func dispatch_io_create_with_io(_ type: UInt, _ io: DispatchIO, _ queue: DispatchQueue, _ cleanup_handler: (Int32) -> Void) -> DispatchIO +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.read(fileDescriptor:length:queue:handler:)") +public func dispatch_read(_ fd: Int32, _ length: Int, _ queue: DispatchQueue, _ handler: (dispatch_data_t, Int32) -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.read(self:offset:length:queue:ioHandler:)") +func dispatch_io_read(_ channel: DispatchIO, _ offset: off_t, _ length: Int, _ queue: DispatchQueue, _ io_handler: (Bool, dispatch_data_t?, Int32) -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.write(self:offset:data:queue:ioHandler:)") +func dispatch_io_write(_ channel: DispatchIO, _ offset: off_t, _ data: dispatch_data_t, _ queue: DispatchQueue, _ io_handler: (Bool, dispatch_data_t?, Int32) -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.write(fileDescriptor:data:queue:handler:)") +func dispatch_write(_ fd: Int32, _ data: dispatch_data_t, _ queue: DispatchQueue, _ handler: (dispatch_data_t?, Int32) -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchData.init(bytes:)") +public func dispatch_data_create(_ buffer: UnsafePointer, _ size: Int, _ queue: DispatchQueue?, _ destructor: (() -> Void)?) -> dispatch_data_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"getter:DispatchData.count(self:)") +public func dispatch_data_get_size(_ data: dispatch_data_t) -> Int +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchData.withUnsafeBytes(self:body:)") +public func dispatch_data_create_map(_ data: dispatch_data_t, _ buffer_ptr: UnsafeMutablePointer?>?, _ size_ptr: UnsafeMutablePointer?) -> dispatch_data_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchData.append(self:_:)") +public func dispatch_data_create_concat(_ data1: dispatch_data_t, _ data2: dispatch_data_t) -> dispatch_data_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchData.subdata(self:in:)") +public func dispatch_data_create_subrange(_ data: dispatch_data_t, _ offset: Int, _ length: Int) -> dispatch_data_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchData.enumerateBytes(self:block:)") +public func dispatch_data_apply(_ data: dispatch_data_t, _ applier: (dispatch_data_t, Int, UnsafePointer, Int) -> Bool) -> Bool +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchData.region(self:location:)") +public func dispatch_data_copy_region(_ data: dispatch_data_t, _ location: Int, _ offset_ptr: UnsafeMutablePointer) -> dispatch_data_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:group:qos:flags:execute:)") +public func dispatch_group_async(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: () -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed: "DispatchGroup.notify(self:qos:flags:queue:execute:)") +public func dispatch_group_notify(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: () -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchGroup.wait(self:timeout:)") +public func dispatch_group_wait(_ group: DispatchGroup, _ timeout: dispatch_time_t) -> Int +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.close(self:flags:)") +public func dispatch_io_close(_ channel: DispatchIO, _ flags: UInt) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.setInterval(self:interval:flags:)") +public func dispatch_io_set_interval(_ channel: DispatchIO, _ interval: UInt64, _ flags: UInt) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.apply(attributes:iterations:execute:)") +public func dispatch_apply(_ iterations: Int, _ queue: DispatchQueue, _ block: @noescape (Int) -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:execute:)") +public func dispatch_async(_ queue: DispatchQueue, _ block: () -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.global(attributes:)") +public func dispatch_get_global_queue(_ identifier: Int, _ flags: UInt) -> DispatchQueue +{ + fatalError() +} + +@available(*, unavailable, renamed: "DispatchQueue.main") +public func dispatch_get_main_queue() -> DispatchQueue +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueueAttributes.initiallyInactive") +public func dispatch_queue_attr_make_initially_inactive(_ attr: dispatch_queue_attr_t?) -> dispatch_queue_attr_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueueAttributes.autoreleaseWorkItem") +public func dispatch_queue_attr_make_with_autorelease_frequency(_ attr: dispatch_queue_attr_t?, _ frequency: dispatch_autorelease_frequency_t) -> dispatch_queue_attr_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueueAttributes.qosUserInitiated") +public func dispatch_queue_attr_make_with_qos_class(_ attr: dispatch_queue_attr_t?, _ qos_class: dispatch_qos_class_t, _ relative_priority: Int32) -> dispatch_queue_attr_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"getter:DispatchQueue.label(self:)") +public func dispatch_queue_get_label(_ queue: DispatchQueue?) -> UnsafePointer +{ + fatalError() +} + +@available(*, unavailable, renamed:"getter:DispatchQueue.qos(self:)") +public func dispatch_queue_get_qos_class(_ queue: DispatchQueue, _ relative_priority_ptr: UnsafeMutablePointer?) -> dispatch_qos_class_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.after(self:when:execute:)") +public func dispatch_after(_ when: dispatch_time_t, _ queue: DispatchQueue, _ block: () -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:group:qos:flags:execute:)") +public func dispatch_barrier_async(_ queue: DispatchQueue, _ block: () -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.synchronously(self:flags:execute:)") +public func dispatch_barrier_sync(_ queue: DispatchQueue, _ block: @noescape () -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.setSpecific(self:key:value:)") +public func dispatch_queue_set_specific(_ queue: DispatchQueue, _ key: UnsafePointer, _ context: UnsafeMutablePointer?, _ destructor: (@convention(c) (UnsafeMutablePointer?) -> Void)?) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.getSpecific(self:key:)") +public func dispatch_queue_get_specific(_ queue: DispatchQueue, _ key: UnsafePointer) -> UnsafeMutablePointer? +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.getSpecific(key:)") +public func dispatch_get_specific(_ key: UnsafePointer) -> UnsafeMutablePointer? +{ + fatalError() +} + +@available(*, unavailable, renamed:"dispatchPrecondition(_:)") +public func dispatch_assert_queue(_ queue: DispatchQueue) +{ + fatalError() +} + +@available(*, unavailable, renamed:"dispatchPrecondition(_:)") +public func dispatch_assert_queue_barrier(_ queue: DispatchQueue) +{ + fatalError() +} + +@available(*, unavailable, renamed:"dispatchPrecondition(_:)") +public func dispatch_assert_queue_not(_ queue: DispatchQueue) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchSemaphore.wait(self:timeout:)") +public func dispatch_semaphore_wait(_ dsema: DispatchSemaphore, _ timeout: dispatch_time_t) -> Int +{ + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSemaphore.signal(self:)") +public func dispatch_semaphore_signal(_ dsema: DispatchSemaphore) -> Int +{ + fatalError() +} + +@available(*, unavailable, message:"Use DispatchSource class methods") +public func dispatch_source_create(_ type: dispatch_source_type_t, _ handle: UInt, _ mask: UInt, _ queue: DispatchQueue?) -> DispatchSource +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchSource.setEventHandler(self:handler:)") +public func dispatch_source_set_event_handler(_ source: DispatchSource, _ handler: (() -> Void)?) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchSource.setCancelHandler(self:handler:)") +public func dispatch_source_set_cancel_handler(_ source: DispatchSource, _ handler: (() -> Void)?) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchSource.cancel(self:)") +public func dispatch_source_cancel(_ source: DispatchSource) +{ + fatalError() +} + +@available(*, unavailable, renamed:"getter:DispatchSource.isCancelled(self:)") +public func dispatch_source_testcancel(_ source: DispatchSource) -> Int +{ + fatalError() +} + +@available(*, unavailable, renamed:"getter:DispatchSource.handle(self:)") +public func dispatch_source_get_handle(_ source: DispatchSource) -> UInt +{ + fatalError() +} + +@available(*, unavailable, renamed:"getter:DispatchSource.mask(self:)") +public func dispatch_source_get_mask(_ source: DispatchSource) -> UInt +{ + fatalError() +} + +@available(*, unavailable, renamed:"getter:DispatchSource.data(self:)") +public func dispatch_source_get_data(_ source: DispatchSource) -> UInt +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchUserDataAdd.mergeData(self:value:)") +public func dispatch_source_merge_data(_ source: DispatchSource, _ value: UInt) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchTimerSource.setTimer(self:start:interval:leeway:)") +public func dispatch_source_set_timer(_ source: DispatchSource, _ start: dispatch_time_t, _ interval: UInt64, _ leeway: UInt64) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchSource.setRegistrationHandler(self:handler:)") +public func dispatch_source_set_registration_handler(_ source: DispatchSource, _ handler: (() -> Void)?) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchTime.now()") +public func dispatch_time(_ when: dispatch_time_t, _ delta: Int64) -> dispatch_time_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchWalltime.init(time:)") +public func dispatch_walltime(_ when: UnsafePointer?, _ delta: Int64) -> dispatch_time_t +{ + fatalError() +} + +@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosUserInitiated") +public var DISPATCH_QUEUE_PRIORITY_HIGH: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosDefault") +public var DISPATCH_QUEUE_PRIORITY_DEFAULT: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosUtility") +public var DISPATCH_QUEUE_PRIORITY_LOW: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosBackground") +public var DISPATCH_QUEUE_PRIORITY_BACKGROUND: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchIO.StreamType.stream") +public var DISPATCH_IO_STREAM: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchIO.StreamType.random") +public var DISPATCH_IO_RANDOM: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchIO.CloseFlags.stop") +public var DISPATCH_IO_STOP: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchIO.IntervalFlags.strictInterval") +public var DISPATCH_IO_STRICT_INTERVAL: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.MachSendEvent.dead") +public var DISPATCH_MACH_SEND_DEAD: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.MemoryPressureEvent.normal") +public var DISPATCH_MEMORYPRESSURE_NORMAL: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.MemoryPressureEvent.warning") +public var DISPATCH_MEMORYPRESSURE_WARN: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.MemoryPressureEvent.critical") +public var DISPATCH_MEMORYPRESSURE_CRITICAL: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.ProcessEvent.exit") +public var DISPATCH_PROC_EXIT: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.ProcessEvent.fork") +public var DISPATCH_PROC_FORK: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.ProcessEvent.exec") +public var DISPATCH_PROC_EXEC: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.ProcessEvent.signal") +public var DISPATCH_PROC_SIGNAL: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.TimerFlags.strict") +public var DISPATCH_TIMER_STRICT: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.delete") +public var DISPATCH_VNODE_DELETE: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.write") +public var DISPATCH_VNODE_WRITE: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.extend") +public var DISPATCH_VNODE_EXTEND: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.attrib") +public var DISPATCH_VNODE_ATTRIB: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.link") +public var DISPATCH_VNODE_LINK: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.rename") +public var DISPATCH_VNODE_RENAME: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.revoke") +public var DISPATCH_VNODE_REVOKE: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.funlock") +public var DISPATCH_VNODE_FUNLOCK: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchTime.now()") +public var DISPATCH_TIME_NOW: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchTime.distantFuture") +public var DISPATCH_TIME_FOREVER: Int { + fatalError() +} diff --git a/src/swift/Queue.swift b/src/swift/Queue.swift new file mode 100644 index 0000000..5a45fdc --- /dev/null +++ b/src/swift/Queue.swift @@ -0,0 +1,421 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +// dispatch/queue.h + +import CDispatch + +public struct DispatchQueueAttributes : OptionSet { + public let rawValue: UInt64 + public init(rawValue: UInt64) { self.rawValue = rawValue } + + public static let serial = DispatchQueueAttributes(rawValue: 0<<0) + public static let concurrent = DispatchQueueAttributes(rawValue: 1<<1) + + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + public static let initiallyInactive = DispatchQueueAttributes(rawValue: 1<<2) + + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + public static let autoreleaseInherit = DispatchQueueAttributes(rawValue: 1<<3) + + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + public static let autoreleaseWorkItem = DispatchQueueAttributes(rawValue: 1<<4) + + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + public static let autoreleaseNever = DispatchQueueAttributes(rawValue: 1<<5) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosUserInteractive = DispatchQueueAttributes(rawValue: 1<<6) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosUserInitiated = DispatchQueueAttributes(rawValue: 1<<7) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosDefault = DispatchQueueAttributes(rawValue: 1<<8) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosUtility = DispatchQueueAttributes(rawValue: 1<<9) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosBackground = DispatchQueueAttributes(rawValue: 1<<10) + + @available(*, deprecated, message: ".noQoS has no effect, it should not be used") + public static let noQoS = DispatchQueueAttributes(rawValue: 1<<11) + + private var attr: dispatch_queue_attr_t? { + var attr: dispatch_queue_attr_t? + + if self.contains(.concurrent) { + attr = _swift_dispatch_queue_concurrent() + } + if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { + if self.contains(.initiallyInactive) { + attr = CDispatch.dispatch_queue_attr_make_initially_inactive(attr) + } + if self.contains(.autoreleaseWorkItem) { + // DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM + attr = CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(1)) + } else if self.contains(.autoreleaseInherit) { + // DISPATCH_AUTORELEASE_FREQUENCY_INHERIT + attr = CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(0)) + } else if self.contains(.autoreleaseNever) { + // DISPATCH_AUTORELEASE_FREQUENCY_NEVER + attr = CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(2)) + } + } + if #available(OSX 10.10, iOS 8.0, *) { + if self.contains(.qosUserInteractive) { + attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_USER_INTERACTIVE.rawValue, 0) + } else if self.contains(.qosUserInitiated) { + attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_USER_INITIATED.rawValue, 0) + } else if self.contains(.qosDefault) { + attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_DEFAULT.rawValue, 0) + } else if self.contains(.qosUtility) { + attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_UTILITY.rawValue, 0) + } else if self.contains(.qosBackground) { + attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_BACKGROUND.rawValue, 0) + } + } + return attr + } +} + + +public final class DispatchSpecificKey { + public init() {} +} + +internal class _DispatchSpecificValue { + internal let value: T + internal init(value: T) { self.value = value } +} + +public extension DispatchQueue { + + public struct GlobalAttributes : OptionSet { + public let rawValue: UInt64 + public init(rawValue: UInt64) { self.rawValue = rawValue } + + @available(OSX 10.10, iOS 8.0, *) + public static let qosUserInteractive = GlobalAttributes(rawValue: 1<<0) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosUserInitiated = GlobalAttributes(rawValue: 1<<1) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosDefault = GlobalAttributes(rawValue: 1<<2) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosUtility = GlobalAttributes(rawValue: 1<<3) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosBackground = GlobalAttributes(rawValue: 1<<4) + + // Avoid using our own deprecated constants here by declaring + // non-deprecated constants and then basing the public ones on those. + internal static let _priorityHigh = GlobalAttributes(rawValue: 1<<5) + internal static let _priorityDefault = GlobalAttributes(rawValue: 1<<6) + internal static let _priorityLow = GlobalAttributes(rawValue: 1<<7) + internal static let _priorityBackground = GlobalAttributes(rawValue: 1<<8) + + @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") + @available(*, deprecated: 8.0, message: "Use qos attributes instead") + public static let priorityHigh = _priorityHigh + + @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") + @available(*, deprecated: 8.0, message: "Use qos attributes instead") + public static let priorityDefault = _priorityDefault + + @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") + @available(*, deprecated: 8.0, message: "Use qos attributes instead") + public static let priorityLow = _priorityLow + + @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") + @available(*, deprecated: 8.0, message: "Use qos attributes instead") + public static let priorityBackground = _priorityBackground + + internal var _translatedValue: Int { + if #available(OSX 10.10, iOS 8.0, *) { + if self.contains(.qosUserInteractive) { return Int(_OSQoSClass.QOS_CLASS_USER_INTERACTIVE.rawValue) } + else if self.contains(.qosUserInitiated) { return Int(_OSQoSClass.QOS_CLASS_USER_INITIATED.rawValue) } + else if self.contains(.qosDefault) { return Int(_OSQoSClass.QOS_CLASS_DEFAULT.rawValue) } + else if self.contains(.qosUtility) { return Int(_OSQoSClass.QOS_CLASS_UTILITY.rawValue) } + else { return Int(_OSQoSClass.QOS_CLASS_BACKGROUND.rawValue) } + } + if self.contains(._priorityHigh) { return 2 } // DISPATCH_QUEUE_PRIORITY_HIGH + else if self.contains(._priorityDefault) { return 0 } // DISPATCH_QUEUE_PRIORITY_DEFAULT + else if self.contains(._priorityLow) { return -2 } // // DISPATCH_QUEUE_PRIORITY_LOW + else if self.contains(._priorityBackground) { return Int(Int16.min) } // // DISPATCH_QUEUE_PRIORITY_BACKGROUND + return 0 + } + } + + public class func concurrentPerform(iterations: Int, execute work: @noescape (Int) -> Void) { + _swift_dispatch_apply_current(iterations, work) + } + + public class var main: DispatchQueue { + return DispatchQueue(queue: _swift_dispatch_get_main_queue()) + } + + public class func global(attributes: GlobalAttributes = []) -> DispatchQueue { + // SubOptimal? Should we be caching these global DispatchQueue objects? + return DispatchQueue(queue:dispatch_get_global_queue(attributes._translatedValue, 0)) + } + + public class func getSpecific(key: DispatchSpecificKey) -> T? { + let k = Unmanaged.passUnretained(key).toOpaque() + if let p = CDispatch.dispatch_get_specific(k) { + let v = Unmanaged<_DispatchSpecificValue> + .fromOpaque(p) + .takeUnretainedValue() + return v.value + } + return nil + } + + public convenience init( + label: String, + attributes: DispatchQueueAttributes = .serial, + target: DispatchQueue? = nil) + { + if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { + self.init(__label: label, attr: attributes.attr, queue: target) + } else { + self.init(__label: label, attr: attributes.attr) + if let tq = target { self.setTarget(queue: tq) } + } + } + + public var label: String { + return String(validatingUTF8: dispatch_queue_get_label(self.__wrapped))! + } + + @available(OSX 10.10, iOS 8.0, *) + public func sync(execute workItem: DispatchWorkItem) { + dispatch_sync(self.__wrapped, workItem._block) + } + + @available(OSX 10.10, iOS 8.0, *) + public func async(execute workItem: DispatchWorkItem) { + // _swift_dispatch_{group,}_async preserves the @convention(block) + // for work item blocks. + if let g = workItem._group { + dispatch_group_async(g.__wrapped, self.__wrapped, workItem._block) + } else { + dispatch_async(self.__wrapped, workItem._block) + } + } + + public func async(group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { + if group == nil && qos == .unspecified && flags.isEmpty { + // Fast-path route for the most common API usage + dispatch_async(self.__wrapped, work) + return + } + + if #available(OSX 10.10, iOS 8.0, *), (qos != .unspecified || !flags.isEmpty) { + let workItem = DispatchWorkItem(qos: qos, flags: flags, block: work) + if let g = group { + dispatch_group_async(g.__wrapped, self.__wrapped, workItem._block) + } else { + dispatch_async(self.__wrapped, workItem._block) + } + } else { + if let g = group { + dispatch_group_async(g.__wrapped, self.__wrapped, work) + } else { + dispatch_async(self.__wrapped, work) + } + } + } + + private func _syncBarrier(block: @noescape () -> ()) { + dispatch_barrier_sync(self.__wrapped, block) + } + + private func _syncHelper( + fn: (@noescape () -> ()) -> (), + execute work: @noescape () throws -> T, + rescue: ((Swift.Error) throws -> (T))) rethrows -> T + { + var result: T? + var error: Swift.Error? + fn { + do { + result = try work() + } catch let e { + error = e + } + } + if let e = error { + return try rescue(e) + } else { + return result! + } + } + + @available(OSX 10.10, iOS 8.0, *) + private func _syncHelper( + fn: (DispatchWorkItem) -> (), + flags: DispatchWorkItemFlags, + execute work: @noescape () throws -> T, + rescue: ((Swift.Error) throws -> (T))) rethrows -> T + { + var result: T? + var error: Swift.Error? + let workItem = DispatchWorkItem(flags: flags, noescapeBlock: { + do { + result = try work() + } catch let e { + error = e + } + }) + fn(workItem) + if let e = error { + return try rescue(e) + } else { + return result! + } + } + + public func sync(execute work: @noescape () throws -> T) rethrows -> T { + return try self._syncHelper(fn: sync, execute: work, rescue: { throw $0 }) + } + + public func sync(flags: DispatchWorkItemFlags, execute work: @noescape () throws -> T) rethrows -> T { + if flags == .barrier { + return try self._syncHelper(fn: _syncBarrier, execute: work, rescue: { throw $0 }) + } else if #available(OSX 10.10, iOS 8.0, *), !flags.isEmpty { + return try self._syncHelper(fn: sync, flags: flags, execute: work, rescue: { throw $0 }) + } else { + return try self._syncHelper(fn: sync, execute: work, rescue: { throw $0 }) + } + } + + public func after(when: DispatchTime, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { + if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { + let item = DispatchWorkItem(qos: qos, flags: flags, block: work) + dispatch_after(when.rawValue, self.__wrapped, item._block) + } else { + dispatch_after(when.rawValue, self.__wrapped, work) + } + } + + @available(OSX 10.10, iOS 8.0, *) + public func after(when: DispatchTime, execute: DispatchWorkItem) { + dispatch_after(when.rawValue, self.__wrapped, execute._block) + } + + public func after(walltime when: DispatchWallTime, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { + if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { + let item = DispatchWorkItem(qos: qos, flags: flags, block: work) + dispatch_after(when.rawValue, self.__wrapped, item._block) + } else { + dispatch_after(when.rawValue, self.__wrapped, work) + } + } + + @available(OSX 10.10, iOS 8.0, *) + public func after(walltime when: DispatchWallTime, execute: DispatchWorkItem) { + dispatch_after(when.rawValue, self.__wrapped, execute._block) + } + + @available(OSX 10.10, iOS 8.0, *) + public var qos: DispatchQoS { + var relPri: Int32 = 0 + let cls = DispatchQoS.QoSClass(qosClass: _OSQoSClass(qosClass: dispatch_queue_get_qos_class(self.__wrapped, &relPri))!)! + return DispatchQoS(qosClass: cls, relativePriority: Int(relPri)) + } + + public func getSpecific(key: DispatchSpecificKey) -> T? { + let k = Unmanaged.passUnretained(key).toOpaque() + if let p = dispatch_queue_get_specific(self.__wrapped, k) { + let v = Unmanaged<_DispatchSpecificValue> + .fromOpaque(p) + .takeUnretainedValue() + return v.value + } + return nil + } + + public func setSpecific(key: DispatchSpecificKey, value: T) { + let v = _DispatchSpecificValue(value: value) + let k = Unmanaged.passUnretained(key).toOpaque() + let p = Unmanaged.passRetained(v).toOpaque() + dispatch_queue_set_specific(self.__wrapped, k, p, _destructDispatchSpecificValue) + } +} + +extension DispatchQueue { + @available(*, deprecated, renamed: "DispatchQueue.sync(self:execute:)") + public func synchronously(execute work: @noescape () -> ()) { + sync(execute: work) + } + + @available(OSX, introduced: 10.10, deprecated: 10.12, renamed: "DispatchQueue.sync(self:execute:)") + @available(iOS, introduced: 8.0, deprecated: 10.0, renamed: "DispatchQueue.sync(self:execute:)") + @available(*, deprecated, renamed: "DispatchQueue.sync(self:execute:)") + public func synchronously(execute workItem: DispatchWorkItem) { + sync(execute: workItem) + } + + @available(OSX, introduced: 10.10, deprecated: 10.12, renamed: "DispatchQueue.async(self:execute:)") + @available(iOS, introduced: 8.0, deprecated: 10.0, renamed: "DispatchQueue.async(self:execute:)") + @available(*, deprecated, renamed: "DispatchQueue.async(self:execute:)") + public func asynchronously(execute workItem: DispatchWorkItem) { + async(execute: workItem) + } + + @available(*, deprecated, renamed: "DispatchQueue.async(self:group:qos:flags:execute:)") + public func asynchronously(group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { + async(group: group, qos: qos, flags: flags, execute: work) + } + + @available(*, deprecated, renamed: "DispatchQueue.sync(self:execute:)") + public func synchronously(execute work: @noescape () throws -> T) rethrows -> T { + return try sync(execute: work) + } + + @available(*, deprecated, renamed: "DispatchQueue.sync(self:flags:execute:)") + public func synchronously(flags: DispatchWorkItemFlags, execute work: @noescape () throws -> T) rethrows -> T { + return try sync(flags: flags, execute: work) + } + + @available(*, deprecated, renamed: "DispatchQueue.concurrentPerform(iterations:execute:)") + public func apply(applier iterations: Int, execute block: @noescape (Int) -> Void) { + DispatchQueue.concurrentPerform(iterations: iterations, execute: block) + } + + @available(*, deprecated, renamed: "DispatchQueue.setTarget(self:queue:)") + public func setTargetQueue(queue: DispatchQueue) { + self.setTarget(queue: queue) + } +} + +private func _destructDispatchSpecificValue(ptr: UnsafeMutablePointer?) { + if let p = ptr { + Unmanaged.fromOpaque(p).release() + } +} + +@_silgen_name("_swift_dispatch_queue_concurrent") +internal func _swift_dispatch_queue_concurrent() -> dispatch_queue_attr_t + +@_silgen_name("_swift_dispatch_get_main_queue") +internal func _swift_dispatch_get_main_queue() -> dispatch_queue_t + +@_silgen_name("_swift_dispatch_apply_current_root_queue") +internal func _swift_dispatch_apply_current_root_queue() -> dispatch_queue_t + +@_silgen_name("_swift_dispatch_apply_current") +internal func _swift_dispatch_apply_current(_ iterations: Int, _ block: @convention(block) @noescape (Int) -> Void) diff --git a/src/swift/Source.swift b/src/swift/Source.swift new file mode 100644 index 0000000..2830f01 --- /dev/null +++ b/src/swift/Source.swift @@ -0,0 +1,425 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +import CDispatch + +public extension DispatchSourceType { + + public func setEventHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) { + if #available(OSX 10.10, iOS 8.0, *), let h = handler where qos != .unspecified || !flags.isEmpty { + let item = DispatchWorkItem(qos: qos, flags: flags, block: h) + CDispatch.dispatch_source_set_event_handler((self as! DispatchSource).__wrapped, item._block) + } else { + CDispatch.dispatch_source_set_event_handler((self as! DispatchSource).__wrapped, handler) + } + } + + @available(OSX 10.10, iOS 8.0, *) + public func setEventHandler(handler: DispatchWorkItem) { + CDispatch.dispatch_source_set_event_handler((self as! DispatchSource).__wrapped, handler._block) + } + + public func setCancelHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) { + if #available(OSX 10.10, iOS 8.0, *), let h = handler where qos != .unspecified || !flags.isEmpty { + let item = DispatchWorkItem(qos: qos, flags: flags, block: h) + CDispatch.dispatch_source_set_cancel_handler((self as! DispatchSource).__wrapped, item._block) + } else { + CDispatch.dispatch_source_set_cancel_handler((self as! DispatchSource).__wrapped, handler) + } + } + + @available(OSX 10.10, iOS 8.0, *) + public func setCancelHandler(handler: DispatchWorkItem) { + CDispatch.dispatch_source_set_cancel_handler((self as! DispatchSource).__wrapped, handler._block) + } + + public func setRegistrationHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) { + if #available(OSX 10.10, iOS 8.0, *), let h = handler where qos != .unspecified || !flags.isEmpty { + let item = DispatchWorkItem(qos: qos, flags: flags, block: h) + CDispatch.dispatch_source_set_registration_handler((self as! DispatchSource).__wrapped, item._block) + } else { + CDispatch.dispatch_source_set_registration_handler((self as! DispatchSource).__wrapped, handler) + } + } + + @available(OSX 10.10, iOS 8.0, *) + public func setRegistrationHandler(handler: DispatchWorkItem) { + CDispatch.dispatch_source_set_registration_handler((self as! DispatchSource).__wrapped, handler._block) + } + + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + public func activate() { + (self as! DispatchSource).activate() + } + + public func cancel() { + CDispatch.dispatch_source_cancel((self as! DispatchSource).__wrapped) + } + + public func resume() { + (self as! DispatchSource).resume() + } + + public func suspend() { + (self as! DispatchSource).suspend() + } + + public var handle: UInt { + return CDispatch.dispatch_source_get_handle((self as! DispatchSource).__wrapped) + } + + public var mask: UInt { + return CDispatch.dispatch_source_get_mask((self as! DispatchSource).__wrapped) + } + + public var data: UInt { + return CDispatch.dispatch_source_get_data((self as! DispatchSource).__wrapped) + } + + public var isCancelled: Bool { + return CDispatch.dispatch_source_testcancel((self as! DispatchSource).__wrapped) != 0 + } +} + +public extension DispatchSource { +#if HAVE_MACH + public struct MachSendEvent : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + + public static let dead = MachSendEvent(rawValue: 0x1) + } +#endif + +#if HAVE_MACH + public struct MemoryPressureEvent : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + + public static let normal = MemoryPressureEvent(rawValue: 0x1) + public static let warning = MemoryPressureEvent(rawValue: 0x2) + public static let critical = MemoryPressureEvent(rawValue: 0x4) + public static let all: MemoryPressureEvent = [.normal, .warning, .critical] + } +#endif + +#if !os(Linux) + public struct ProcessEvent : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + + public static let exit = ProcessEvent(rawValue: 0x80000000) + public static let fork = ProcessEvent(rawValue: 0x40000000) + public static let exec = ProcessEvent(rawValue: 0x20000000) + public static let signal = ProcessEvent(rawValue: 0x08000000) + public static let all: ProcessEvent = [.exit, .fork, .exec, .signal] + } +#endif + + public struct TimerFlags : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + + public static let strict = TimerFlags(rawValue: 1) + } + + public struct FileSystemEvent : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + + public static let delete = FileSystemEvent(rawValue: 0x1) + public static let write = FileSystemEvent(rawValue: 0x2) + public static let extend = FileSystemEvent(rawValue: 0x4) + public static let attrib = FileSystemEvent(rawValue: 0x8) + public static let link = FileSystemEvent(rawValue: 0x10) + public static let rename = FileSystemEvent(rawValue: 0x20) + public static let revoke = FileSystemEvent(rawValue: 0x40) + public static let funlock = FileSystemEvent(rawValue: 0x100) + + public static let all: FileSystemEvent = [ + .delete, .write, .extend, .attrib, .link, .rename, .revoke] + } + +#if HAVE_MACH + public class func machSend(port: mach_port_t, eventMask: MachSendEvent, queue: DispatchQueue? = nil) -> DispatchSourceMachSend { + let source = dispatch_source_create(_swift_dispatch_source_type_mach_send(), UInt(port), eventMask.rawValue, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceMachSend + } +#endif + +#if HAVE_MACH + public class func machReceive(port: mach_port_t, queue: DispatchQueue? = nil) -> DispatchSourceMachReceive { + let source = dispatch_source_create(_swift_dispatch_source_type_mach_recv(), UInt(port), 0, queue?.__wrapped) + return DispatchSource(source) as DispatchSourceMachReceive + } +#endif + +#if HAVE_MACH + public class func memoryPressure(eventMask: MemoryPressureEvent, queue: DispatchQueue? = nil) -> DispatchSourceMemoryPressure { + let source = dispatch_source_create(_swift_dispatch_source_type_memorypressure(), 0, eventMask.rawValue, queue.__wrapped) + return DispatchSourceMemoryPressure(source) + } +#endif + +#if !os(Linux) + public class func process(identifier: pid_t, eventMask: ProcessEvent, queue: DispatchQueue? = nil) -> DispatchSourceProcess { + let source = dispatch_source_create(_swift_dispatch_source_type_proc(), UInt(identifier), eventMask.rawValue, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceProcess + } +#endif + + public class func read(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceRead { + let source = dispatch_source_create(_swift_dispatch_source_type_read(), UInt(fileDescriptor), 0, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceRead + } + + public class func signal(signal: Int32, queue: DispatchQueue? = nil) -> DispatchSourceSignal { + let source = dispatch_source_create(_swift_dispatch_source_type_signal(), UInt(signal), 0, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceSignal + } + + public class func timer(flags: TimerFlags = [], queue: DispatchQueue? = nil) -> DispatchSourceTimer { + let source = dispatch_source_create(_swift_dispatch_source_type_timer(), 0, flags.rawValue, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceTimer + } + + public class func userDataAdd(queue: DispatchQueue? = nil) -> DispatchSourceUserDataAdd { + let source = dispatch_source_create(_swift_dispatch_source_type_data_add(), 0, 0, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceUserDataAdd + } + + public class func userDataOr(queue: DispatchQueue? = nil) -> DispatchSourceUserDataOr { + let source = dispatch_source_create(_swift_dispatch_source_type_data_or(), 0, 0, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceUserDataOr + } + +#if !os(Linux) + public class func fileSystemObject(fileDescriptor: Int32, eventMask: FileSystemEvent, queue: DispatchQueue? = nil) -> DispatchSourceFileSystemObject { + let source = dispatch_source_create(_swift_dispatch_source_type_vnode(), UInt(fileDescriptor), eventMask.rawValue, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceFileSystemObject + } +#endif + + public class func write(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceWrite { + let source = dispatch_source_create(_swift_dispatch_source_type_write(), UInt(fileDescriptor), 0, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceWrite + } +} + +#if HAVE_MACH +public extension DispatchSourceMachSend { + public var handle: mach_port_t { + return mach_port_t(dispatch_source_get_handle(self as! DispatchSource)) + } + + public var data: DispatchSource.MachSendEvent { + let data = dispatch_source_get_data(self as! DispatchSource) + return DispatchSource.MachSendEvent(rawValue: data) + } + + public var mask: DispatchSource.MachSendEvent { + let mask = dispatch_source_get_mask(self as! DispatchSource) + return DispatchSource.MachSendEvent(rawValue: mask) + } +} +#endif + +#if HAVE_MACH +public extension DispatchSourceMachReceive { + public var handle: mach_port_t { + return mach_port_t(dispatch_source_get_handle(self as! DispatchSource)) + } +} +#endif + +#if HAVE_MACH +public extension DispatchSourceMemoryPressure { + public var data: DispatchSource.MemoryPressureEvent { + let data = dispatch_source_get_data(self as! DispatchSource) + return DispatchSource.MemoryPressureEvent(rawValue: data) + } + + public var mask: DispatchSource.MemoryPressureEvent { + let mask = dispatch_source_get_mask(self as! DispatchSource) + return DispatchSource.MemoryPressureEvent(rawValue: mask) + } +} +#endif + +#if !os(Linux) +public extension DispatchSourceProcess { + public var handle: pid_t { + return pid_t(dispatch_source_get_handle(self as! DispatchSource)) + } + + public var data: DispatchSource.ProcessEvent { + let data = dispatch_source_get_data(self as! DispatchSource) + return DispatchSource.ProcessEvent(rawValue: data) + } + + public var mask: DispatchSource.ProcessEvent { + let mask = dispatch_source_get_mask(self as! DispatchSource) + return DispatchSource.ProcessEvent(rawValue: mask) + } +} +#endif + +public extension DispatchSourceTimer { + public func scheduleOneshot(deadline: DispatchTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, ~0, UInt64(leeway.rawValue)) + } + + public func scheduleOneshot(wallDeadline: DispatchWallTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, ~0, UInt64(leeway.rawValue)) + } + + public func scheduleRepeating(deadline: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval.rawValue, UInt64(leeway.rawValue)) + } + + public func scheduleRepeating(deadline: DispatchTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) + } + + public func scheduleRepeating(wallDeadline: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval.rawValue, UInt64(leeway.rawValue)) + } + + public func scheduleRepeating(wallDeadline: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) + } +} + +public extension DispatchSourceTimer { + @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleOneshot(self:deadline:leeway:)") + public func setTimer(start: DispatchTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { + scheduleOneshot(deadline: start, leeway: leeway) + } + + @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleOneshot(self:wallDeadline:leeway:)") + public func setTimer(walltime start: DispatchWallTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { + scheduleOneshot(wallDeadline: start, leeway: leeway) + } + + @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:deadline:interval:leeway:)") + public func setTimer(start: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { + scheduleRepeating(deadline: start, interval: interval, leeway: leeway) + } + + @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:deadline:interval:leeway:)") + public func setTimer(start: DispatchTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { + scheduleRepeating(deadline: start, interval: interval, leeway: leeway) + } + + @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:wallDeadline:interval:leeway:)") + public func setTimer(walltime start: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { + scheduleRepeating(wallDeadline: start, interval: interval, leeway: leeway) + } + + @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:wallDeadline:interval:leeway:)") + public func setTimer(walltime start: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { + scheduleRepeating(wallDeadline: start, interval: interval, leeway: leeway) + } +} + +#if !os(Linux) +public extension DispatchSourceFileSystemObject { + public var handle: Int32 { + return Int32(dispatch_source_get_handle((self as! DispatchSource).__wrapped)) + } + + public var data: DispatchSource.FileSystemEvent { + let data = dispatch_source_get_data((self as! DispatchSource).__wrapped) + return DispatchSource.FileSystemEvent(rawValue: data) + } + + public var mask: DispatchSource.FileSystemEvent { + let data = dispatch_source_get_mask((self as! DispatchSource).__wrapped) + return DispatchSource.FileSystemEvent(rawValue: data) + } +} +#endif + +public extension DispatchSourceUserDataAdd { + /// @function mergeData + /// + /// @abstract + /// Merges data into a dispatch source of type DISPATCH_SOURCE_TYPE_DATA_ADD or + /// DISPATCH_SOURCE_TYPE_DATA_OR and submits its event handler block to its + /// target queue. + /// + /// @param value + /// The value to coalesce with the pending data using a logical OR or an ADD + /// as specified by the dispatch source type. A value of zero has no effect + /// and will not result in the submission of the event handler block. + public func mergeData(value: UInt) { + dispatch_source_merge_data((self as! DispatchSource).__wrapped, value) + } +} + +public extension DispatchSourceUserDataOr { +#if false /*FIXME: clashes with UserDataAdd?? */ + /// @function mergeData + /// + /// @abstract + /// Merges data into a dispatch source of type DISPATCH_SOURCE_TYPE_DATA_ADD or + /// DISPATCH_SOURCE_TYPE_DATA_OR and submits its event handler block to its + /// target queue. + /// + /// @param value + /// The value to coalesce with the pending data using a logical OR or an ADD + /// as specified by the dispatch source type. A value of zero has no effect + /// and will not result in the submission of the event handler block. + public func mergeData(value: UInt) { + dispatch_source_merge_data((self as! DispatchSource).__wrapped, value) + } +#endif +} + +@_silgen_name("_swift_dispatch_source_type_DATA_ADD") +internal func _swift_dispatch_source_type_data_add() -> dispatch_source_type_t + +@_silgen_name("_swift_dispatch_source_type_DATA_OR") +internal func _swift_dispatch_source_type_data_or() -> dispatch_source_type_t + +#if HAVE_MACH +@_silgen_name("_swift_dispatch_source_type_MACH_SEND") +internal func _swift_dispatch_source_type_mach_send() -> dispatch_source_type_t + +@_silgen_name("_swift_dispatch_source_type_MACH_RECV") +internal func _swift_dispatch_source_type_mach_recv() -> dispatch_source_type_t + +@_silgen_name("_swift_dispatch_source_type_MEMORYPRESSURE") +internal func _swift_dispatch_source_type_memorypressure() -> dispatch_source_type_t +#endif + +#if !os(Linux) +@_silgen_name("_swift_dispatch_source_type_PROC") +internal func _swift_dispatch_source_type_proc() -> dispatch_source_type_t +#endif + +@_silgen_name("_swift_dispatch_source_type_READ") +internal func _swift_dispatch_source_type_read() -> dispatch_source_type_t + +@_silgen_name("_swift_dispatch_source_type_SIGNAL") +internal func _swift_dispatch_source_type_signal() -> dispatch_source_type_t + +@_silgen_name("_swift_dispatch_source_type_TIMER") +internal func _swift_dispatch_source_type_timer() -> dispatch_source_type_t + +#if !os(Linux) +@_silgen_name("_swift_dispatch_source_type_VNODE") +internal func _swift_dispatch_source_type_vnode() -> dispatch_source_type_t +#endif + +@_silgen_name("_swift_dispatch_source_type_WRITE") +internal func _swift_dispatch_source_type_write() -> dispatch_source_type_t diff --git a/src/swift/Time.swift b/src/swift/Time.swift new file mode 100644 index 0000000..76a6979 --- /dev/null +++ b/src/swift/Time.swift @@ -0,0 +1,110 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +// dispatch/time.h +// DISPATCH_TIME_NOW: ok +// DISPATCH_TIME_FOREVER: ok + +import CDispatch + +public struct DispatchTime { + public let rawValue: dispatch_time_t + + public static func now() -> DispatchTime { + let t = CDispatch.dispatch_time(0, 0) + return DispatchTime(rawValue: t) + } + + public static let distantFuture = DispatchTime(rawValue: ~0) + + private init(rawValue: dispatch_time_t) { + self.rawValue = rawValue + } +} + +public struct DispatchWallTime { + public let rawValue: dispatch_time_t + + public static func now() -> DispatchWallTime { + return DispatchWallTime(rawValue: CDispatch.dispatch_walltime(nil, 0)) + } + + public static let distantFuture = DispatchWallTime(rawValue: ~0) + + private init(rawValue: dispatch_time_t) { + self.rawValue = rawValue + } + + public init(time: timespec) { + var t = time + self.rawValue = CDispatch.dispatch_walltime(&t, 0) + } +} + +@available(*, deprecated, renamed: "DispatchWallTime") +public typealias DispatchWalltime = DispatchWallTime + +public enum DispatchTimeInterval { + case seconds(Int) + case milliseconds(Int) + case microseconds(Int) + case nanoseconds(Int) + + internal var rawValue: UInt64 { + switch self { + case .seconds(let s): return UInt64(s) * NSEC_PER_SEC + case .milliseconds(let ms): return UInt64(ms) * NSEC_PER_MSEC + case .microseconds(let us): return UInt64(us) * NSEC_PER_USEC + case .nanoseconds(let ns): return UInt64(ns) + } + } +} + +public func +(time: DispatchTime, interval: DispatchTimeInterval) -> DispatchTime { + let t = CDispatch.dispatch_time(time.rawValue, Int64(interval.rawValue)) + return DispatchTime(rawValue: t) +} + +public func -(time: DispatchTime, interval: DispatchTimeInterval) -> DispatchTime { + let t = CDispatch.dispatch_time(time.rawValue, -Int64(interval.rawValue)) + return DispatchTime(rawValue: t) +} + +public func +(time: DispatchTime, seconds: Double) -> DispatchTime { + let t = CDispatch.dispatch_time(time.rawValue, Int64(seconds * Double(NSEC_PER_SEC))) + return DispatchTime(rawValue: t) +} + +public func -(time: DispatchTime, seconds: Double) -> DispatchTime { + let t = CDispatch.dispatch_time(time.rawValue, Int64(-seconds * Double(NSEC_PER_SEC))) + return DispatchTime(rawValue: t) +} + +public func +(time: DispatchWallTime, interval: DispatchTimeInterval) -> DispatchWallTime { + let t = CDispatch.dispatch_time(time.rawValue, Int64(interval.rawValue)) + return DispatchWallTime(rawValue: t) +} + +public func -(time: DispatchWallTime, interval: DispatchTimeInterval) -> DispatchWallTime { + let t = CDispatch.dispatch_time(time.rawValue, -Int64(interval.rawValue)) + return DispatchWallTime(rawValue: t) +} + +public func +(time: DispatchWallTime, seconds: Double) -> DispatchWallTime { + let t = CDispatch.dispatch_time(time.rawValue, Int64(seconds * Double(NSEC_PER_SEC))) + return DispatchWallTime(rawValue: t) +} + +public func -(time: DispatchWallTime, seconds: Double) -> DispatchWallTime { + let t = CDispatch.dispatch_time(time.rawValue, Int64(-seconds * Double(NSEC_PER_SEC))) + return DispatchWallTime(rawValue: t) +} diff --git a/src/swift/Wrapper.swift b/src/swift/Wrapper.swift new file mode 100644 index 0000000..d38bb93 --- /dev/null +++ b/src/swift/Wrapper.swift @@ -0,0 +1,319 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +import CDispatch + +// This file contains declarations that are provided by the +// importer via Dispatch.apinote when the platform has Objective-C support + +public class DispatchObject { + + internal func wrapped() -> dispatch_object_t { + fatalError("should be overriden in subclass") + } + + public func setTarget(queue:DispatchQueue) { + dispatch_set_target_queue(wrapped(), queue.__wrapped) + } + + public func activate() { + dispatch_activate(wrapped()) + } + + public func suspend() { + dispatch_suspend(wrapped()) + } + + public func resume() { + dispatch_resume(wrapped()) + } +} + + +public class DispatchGroup : DispatchObject { + internal let __wrapped:dispatch_group_t; + + final internal override func wrapped() -> dispatch_object_t { + return unsafeBitCast(__wrapped, to: dispatch_object_t.self) + } + + public override init() { + __wrapped = dispatch_group_create() + } + + deinit { + _swift_dispatch_release(wrapped()) + } + + public func enter() { + dispatch_group_enter(__wrapped) + } + + public func leave() { + dispatch_group_enter(__wrapped) + } +} + +public class DispatchSemaphore : DispatchObject { + internal let __wrapped: dispatch_semaphore_t; + + final internal override func wrapped() -> dispatch_object_t { + return unsafeBitCast(__wrapped, to: dispatch_object_t.self) + } + + public init(value: Int) { + __wrapped = dispatch_semaphore_create(value) + } + + deinit { + _swift_dispatch_release(wrapped()) + } +} + +public class DispatchIO : DispatchObject { + internal let __wrapped:dispatch_io_t + + final internal override func wrapped() -> dispatch_object_t { + return unsafeBitCast(__wrapped, to: dispatch_object_t.self) + } + + internal init(__type: UInt, fd: Int32, queue: DispatchQueue, + handler: (error: Int32) -> Void) { + __wrapped = dispatch_io_create(__type, fd, queue.__wrapped, handler) + } + + internal init(__type: UInt, path: UnsafePointer, oflag: Int32, + mode: mode_t, queue: DispatchQueue, handler: (error: Int32) -> Void) { + __wrapped = dispatch_io_create_with_path(__type, path, oflag, mode, queue.__wrapped, handler) + } + + internal init(__type: UInt, io: DispatchIO, + queue: DispatchQueue, handler: (error: Int32) -> Void) { + __wrapped = dispatch_io_create_with_io(__type, io.__wrapped, queue.__wrapped, handler) + } + + internal init(queue:dispatch_queue_t) { + __wrapped = queue + } + + deinit { + _swift_dispatch_release(wrapped()) + } + + public func barrier(execute: () -> ()) { + dispatch_io_barrier(self.__wrapped, execute) + } + + public var fileDescriptor: Int32 { + return dispatch_io_get_descriptor(__wrapped) + } + + public func setLimit(highWater: Int) { + dispatch_io_set_high_water(__wrapped, highWater) + } + + public func setLimit(lowWater: Int) { + dispatch_io_set_low_water(__wrapped, lowWater) + } +} + +public class DispatchQueue : DispatchObject { + internal let __wrapped:dispatch_queue_t; + + final internal override func wrapped() -> dispatch_object_t { + return unsafeBitCast(__wrapped, to: dispatch_object_t.self) + } + + internal init(__label: String, attr: dispatch_queue_attr_t?) { + __wrapped = dispatch_queue_create(__label, attr) + } + + internal init(__label: String, attr: dispatch_queue_attr_t?, queue: DispatchQueue?) { + __wrapped = dispatch_queue_create_with_target(__label, attr, queue?.__wrapped) + } + + internal init(queue:dispatch_queue_t) { + __wrapped = queue + } + + deinit { + _swift_dispatch_release(wrapped()) + } + + public func sync(execute workItem: @noescape ()->()) { + dispatch_sync(self.__wrapped, workItem) + } +} + +public class DispatchSource : DispatchObject, + DispatchSourceType, DispatchSourceRead, + DispatchSourceSignal, DispatchSourceTimer, + DispatchSourceUserDataAdd, DispatchSourceUserDataOr, + DispatchSourceWrite { + internal let __wrapped:dispatch_source_t + + final internal override func wrapped() -> dispatch_object_t { + return unsafeBitCast(__wrapped, to: dispatch_object_t.self) + } + + internal init(source:dispatch_source_t) { + __wrapped = source + } + + deinit { + _swift_dispatch_release(wrapped()) + } +} + +#if HAVE_MACH +extension DispatchSource : DispatchSourceMachSend, + DispatchSourceMachReceive, DispatchSourceMemoryPressure { +} +#endif + +#if !os(Linux) +extension DispatchSource : DispatchSourceProcess, + DispatchSourceFileSystemObject { +} +#endif + +public typealias DispatchSourceHandler = @convention(block) () -> Void + +public protocol DispatchSourceType { + func setEventHandler(qos: DispatchQoS, flags: DispatchWorkItemFlags, handler: DispatchSourceHandler?) + + func setEventHandler(handler: DispatchWorkItem) + + func setCancelHandler(qos: DispatchQoS, flags: DispatchWorkItemFlags, handler: DispatchSourceHandler?) + + func setCancelHandler(handler: DispatchWorkItem) + + func setRegistrationHandler(qos: DispatchQoS, flags: DispatchWorkItemFlags, handler: DispatchSourceHandler?) + + func setRegistrationHandler(handler: DispatchWorkItem) + + func cancel() + + func resume() + + func suspend() + + var handle: UInt { get } + + var mask: UInt { get } + + var data: UInt { get } + + var isCancelled: Bool { get } +} + +public protocol DispatchSourceUserDataAdd : DispatchSourceType { + func mergeData(value: UInt) +} + +public protocol DispatchSourceUserDataOr { +#if false /*FIXME: clashes with UserDataAdd?? */ + func mergeData(value: UInt) +#endif +} + +#if HAVE_MACH +public protocol DispatchSourceMachSend : DispatchSourceType { + public var handle: mach_port_t { get } + + public var data: DispatchSource.MachSendEvent { get } + + public var mask: DispatchSource.MachSendEvent { get } +} +#endif + +#if HAVE_MACH +public protocol DispatchSourceMachReceive : DispatchSourceType { + var handle: mach_port_t { get } +} +#endif + +#if HAVE_MACH +public protocol DispatchSourceMemoryPressure : DispatchSourceType { + public var data: DispatchSource.MemoryPressureEvent { get } + + public var mask: DispatchSource.MemoryPressureEvent { get } +} +#endif + +#if !os(Linux) +public protocol DispatchSourceProcess : DispatchSourceType { + var handle: pid_t { get } + + var data: DispatchSource.ProcessEvent { get } + + var mask: DispatchSource.ProcessEvent { get } +} +#endif + +public protocol DispatchSourceRead : DispatchSourceType { +} + +public protocol DispatchSourceSignal : DispatchSourceType { +} + +public protocol DispatchSourceTimer : DispatchSourceType { + func setTimer(start: DispatchTime, leeway: DispatchTimeInterval) + + func setTimer(walltime start: DispatchWallTime, leeway: DispatchTimeInterval) + + func setTimer(start: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval) + + func setTimer(start: DispatchTime, interval: Double, leeway: DispatchTimeInterval) + + func setTimer(walltime start: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval) + + func setTimer(walltime start: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval) +} + +#if !os(Linux) +public protocol DispatchSourceFileSystemObject : DispatchSourceType { + var handle: Int32 { get } + + var data: DispatchSource.FileSystemEvent { get } + + var mask: DispatchSource.FileSystemEvent { get } +} +#endif + +public protocol DispatchSourceWrite : DispatchSourceType { +} + + +internal enum _OSQoSClass : UInt32 { + case QOS_CLASS_USER_INTERACTIVE = 0x21 + case QOS_CLASS_USER_INITIATED = 0x19 + case QOS_CLASS_DEFAULT = 0x15 + case QOS_CLASS_UTILITY = 0x11 + case QOS_CLASS_BACKGROUND = 0x09 + case QOS_CLASS_UNSPECIFIED = 0x00 + + internal init?(qosClass:dispatch_qos_class_t) { + switch qosClass { + case 0x21: self = .QOS_CLASS_USER_INTERACTIVE + case 0x19: self = .QOS_CLASS_USER_INITIATED + case 0x15: self = .QOS_CLASS_DEFAULT + case 0x11: self = QOS_CLASS_UTILITY + case 0x09: self = QOS_CLASS_BACKGROUND + case 0x00: self = QOS_CLASS_UNSPECIFIED + default: return nil + } + } +} + +@_silgen_name("_swift_dispatch_release") +internal func _swift_dispatch_release(_ obj: dispatch_object_t) -> Void diff --git a/src/time.c b/src/time.c index 35b0e52..6d00831 100644 --- a/src/time.c +++ b/src/time.c @@ -145,3 +145,16 @@ _dispatch_timeout(dispatch_time_t when) now = _dispatch_absolute_time(); return now >= when ? 0 : _dispatch_time_mach2nano(when - now); } + +uint64_t +_dispatch_time_nanoseconds_since_epoch(dispatch_time_t when) +{ + if (when == DISPATCH_TIME_FOREVER) { + return DISPATCH_TIME_FOREVER; + } + if ((int64_t)when < 0) { + // time in nanoseconds since the POSIX epoch already + return (uint64_t)-(int64_t)when; + } + return _dispatch_get_nanoseconds() + _dispatch_timeout(when); +} diff --git a/src/trace.h b/src/trace.h index ebab27c..d73ff3f 100644 --- a/src/trace.h +++ b/src/trace.h @@ -27,7 +27,7 @@ #ifndef __DISPATCH_TRACE__ #define __DISPATCH_TRACE__ -#if !__OBJC2__ && !defined(__cplusplus) +#if DISPATCH_PURE_C #if DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION typedef struct dispatch_trace_timer_params_s { @@ -92,12 +92,13 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) char *_kind; \ dispatch_function_t _func; \ void *_ctxt; \ - if (DISPATCH_OBJ_IS_VTABLE(_do)) { \ + if (_dispatch_object_has_vtable(_do)) { \ _kind = (char*)dx_kind(_do); \ if ((dx_type(_do) & _DISPATCH_META_TYPE_MASK) == \ _DISPATCH_SOURCE_TYPE && (_dq) != &_dispatch_mgr_q) { \ dispatch_source_t _ds = (dispatch_source_t)_do; \ - _dc = _ds->ds_refs->ds_handler[DS_EVENT_HANDLER]; \ + _dc = os_atomic_load(&_ds->ds_refs->ds_handler[ \ + DS_EVENT_HANDLER], relaxed); \ _func = _dc ? _dc->dc_func : NULL; \ _ctxt = _dc ? _dc->dc_ctxt : NULL; \ } else { \ @@ -107,10 +108,10 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) } else { \ _dc = (void*)_do; \ _ctxt = _dc->dc_ctxt; \ - if ((long)_dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT) { \ + if (_dc->dc_flags & DISPATCH_OBJ_SYNC_SLOW_BIT) { \ _kind = "semaphore"; \ _func = (dispatch_function_t)dispatch_semaphore_signal; \ - } else if (_dc->dc_func == _dispatch_call_block_and_release) { \ + } else if (_dc->dc_flags & DISPATCH_OBJ_BLOCK_BIT) { \ _kind = "block"; \ _func = _dispatch_Block_invoke(_dc->dc_ctxt); \ } else { \ @@ -145,27 +146,15 @@ _dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_queue_push(dispatch_queue_t dq, dispatch_object_t _tail, pthread_priority_t pp) +_dispatch_trace_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail, + pthread_priority_t pp, dispatch_wakeup_flags_t flags) { if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { struct dispatch_object_s *dou = _tail._do; _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); } _dispatch_introspection_queue_push(dq, _tail); - _dispatch_queue_push(dq, _tail, pp); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_trace_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail, - pthread_priority_t pp, bool wakeup) -{ - if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { - struct dispatch_object_s *dou = _tail._do; - _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); - } - _dispatch_introspection_queue_push(dq, _tail); - _dispatch_queue_push_wakeup(dq, _tail, pp, wakeup); + _dispatch_queue_push_inline(dq, _tail, pp, flags); } DISPATCH_ALWAYS_INLINE @@ -179,16 +168,8 @@ _dispatch_trace_continuation_push(dispatch_queue_t dq, dispatch_object_t _tail) _dispatch_introspection_queue_push(dq, _tail); } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_push_notrace(dispatch_queue_t dq, dispatch_object_t dou, pthread_priority_t pp) -{ - _dispatch_queue_push(dq, dou, pp); -} - #define _dispatch_queue_push_list _dispatch_trace_queue_push_list -#define _dispatch_queue_push _dispatch_trace_queue_push -#define _dispatch_queue_push_wakeup _dispatch_trace_queue_push_wakeup +#define _dispatch_queue_push_inline _dispatch_trace_queue_push_inline DISPATCH_ALWAYS_INLINE static inline void @@ -200,7 +181,6 @@ _dispatch_trace_continuation_pop(dispatch_queue_t dq, dispatch_object_t dou) _dispatch_introspection_queue_pop(dq, dou); } #else -#define _dispatch_queue_push_notrace _dispatch_queue_push #define _dispatch_trace_continuation_push(dq, dou) \ do { (void)(dq); (void)(dou); } while(0) #define _dispatch_trace_continuation_pop(dq, dou) \ @@ -209,17 +189,11 @@ _dispatch_trace_continuation_pop(dispatch_queue_t dq, dispatch_object_t dou) #if DISPATCH_USE_DTRACE static inline dispatch_function_t -_dispatch_trace_timer_function(dispatch_source_t ds, dispatch_source_refs_t dr) +_dispatch_trace_timer_function(dispatch_source_refs_t dr) { - dispatch_continuation_t dc = dr->ds_handler[DS_EVENT_HANDLER]; - dispatch_function_t func = dc ? dc->dc_func : NULL; - if (func == _dispatch_after_timer_callback && - !(ds->ds_atomic_flags & DSF_CANCELED)) { - dc = ds->do_ctxt; - func = dc->dc_func != _dispatch_call_block_and_release ? dc->dc_func : - dc->dc_ctxt ? _dispatch_Block_invoke(dc->dc_ctxt) : NULL; - } - return func; + dispatch_continuation_t dc; + dc = os_atomic_load(&dr->ds_handler[DS_EVENT_HANDLER], relaxed); + return dc ? dc->dc_func : NULL; } DISPATCH_ALWAYS_INLINE @@ -262,8 +236,8 @@ _dispatch_trace_timer_configure(dispatch_source_t ds, uintptr_t ident, struct dispatch_timer_source_s *values) { struct dispatch_trace_timer_params_s params; - DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(ds, - ds->ds_refs), _dispatch_trace_timer_params(ident, values, 0, + DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(ds->ds_refs), + _dispatch_trace_timer_params(ident, values, 0, ¶ms)); } @@ -275,7 +249,7 @@ _dispatch_trace_timer_program(dispatch_source_refs_t dr, uint64_t deadline) if (deadline && dr) { dispatch_source_t ds = _dispatch_source_from_refs(dr); struct dispatch_trace_timer_params_s params; - DISPATCH_TIMER_PROGRAM(ds, _dispatch_trace_timer_function(ds, dr), + DISPATCH_TIMER_PROGRAM(ds, _dispatch_trace_timer_function(dr), _dispatch_trace_timer_params(ds->ds_ident_hack, &ds_timer(dr), deadline, ¶ms)); } @@ -289,7 +263,7 @@ _dispatch_trace_timer_wake(dispatch_source_refs_t dr) if (slowpath(DISPATCH_TIMER_WAKE_ENABLED())) { if (dr) { dispatch_source_t ds = _dispatch_source_from_refs(dr); - DISPATCH_TIMER_WAKE(ds, _dispatch_trace_timer_function(ds, dr)); + DISPATCH_TIMER_WAKE(ds, _dispatch_trace_timer_function(dr)); } } } @@ -302,7 +276,7 @@ _dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data, if (slowpath(DISPATCH_TIMER_FIRE_ENABLED())) { if (!(data - missed) && dr) { dispatch_source_t ds = _dispatch_source_from_refs(dr); - DISPATCH_TIMER_FIRE(ds, _dispatch_trace_timer_function(ds, dr)); + DISPATCH_TIMER_FIRE(ds, _dispatch_trace_timer_function(dr)); } } } @@ -321,6 +295,6 @@ _dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data, #endif // DISPATCH_USE_DTRACE -#endif // !__OBJC2__ && !defined(__cplusplus) +#endif // DISPATCH_PURE_C #endif // __DISPATCH_TRACE__ diff --git a/src/transform.c b/src/transform.c index e6fa401..2c885ca 100644 --- a/src/transform.c +++ b/src/transform.c @@ -20,7 +20,17 @@ #include "internal.h" +#ifdef __APPLE__ #include +#elif __linux__ +#include +#define OSLittleEndian __LITTLE_ENDIAN +#define OSBigEndian __BIG_ENDIAN +#define OSSwapLittleToHostInt16 le16toh +#define OSSwapBigToHostInt16 be16toh +#define OSSwapHostToLittleInt16 htole16 +#define OSSwapHostToBigInt16 htobe16 +#endif #if defined(__LITTLE_ENDIAN__) #define DISPATCH_DATA_FORMAT_TYPE_UTF16_HOST DISPATCH_DATA_FORMAT_TYPE_UTF16LE @@ -28,6 +38,8 @@ #elif defined(__BIG_ENDIAN__) #define DISPATCH_DATA_FORMAT_TYPE_UTF16_HOST DISPATCH_DATA_FORMAT_TYPE_UTF16BE #define DISPATCH_DATA_FORMAT_TYPE_UTF16_REV DISPATCH_DATA_FORMAT_TYPE_UTF16LE +#else +#error Unsupported Endianness #endif enum { @@ -103,16 +115,6 @@ typedef struct dispatch_transform_buffer_s { size_t size; } dispatch_transform_buffer_s; -static size_t -_dispatch_transform_sizet_mul(size_t a, size_t b) -{ - size_t rv = SIZE_MAX; - if (a == 0 || rv/a >= b) { - rv = a * b; - } - return rv; -} - #define BUFFER_MALLOC_MAX (100*1024*1024) static bool @@ -286,11 +288,13 @@ _dispatch_transform_to_utf16(dispatch_data_t data, int32_t byteOrder) DISPATCH_UNUSED dispatch_data_t region, size_t offset, const void *_buffer, size_t size) { const uint8_t *src = _buffer; - size_t i; + size_t i, dest_size; if (offset == 0) { - size_t dest_size = 2 + _dispatch_transform_sizet_mul(size, - sizeof(uint16_t)); + if (os_mul_and_add_overflow(size, sizeof(uint16_t), + sizeof(uint16_t), &dest_size)) { + return (bool)false; + } if (!_dispatch_transform_buffer_new(&buffer, dest_size, 0)) { return (bool)false; } @@ -312,6 +316,7 @@ _dispatch_transform_to_utf16(dispatch_data_t data, int32_t byteOrder) for (i = 0; i < size;) { uint32_t wch = 0; uint8_t byte_size = _dispatch_transform_utf8_length(*src); + size_t next; if (byte_size == 0) { return (bool)false; @@ -336,7 +341,9 @@ _dispatch_transform_to_utf16(dispatch_data_t data, int32_t byteOrder) i += byte_size; } - size_t next = _dispatch_transform_sizet_mul(size - i, sizeof(uint16_t)); + if (os_mul_overflow(size - i, sizeof(uint16_t), &next)) { + return (bool)false; + } if (wch >= 0xd800 && wch < 0xdfff) { // Illegal range (surrogate pair) return (bool)false; @@ -390,8 +397,8 @@ _dispatch_transform_from_utf16(dispatch_data_t data, int32_t byteOrder) const uint16_t *src = _buffer; if (offset == 0) { + size_t dest_size = howmany(size, 3) * 2; // Assume first buffer will be mostly single-byte UTF-8 sequences - size_t dest_size = _dispatch_transform_sizet_mul(size, 2) / 3; if (!_dispatch_transform_buffer_new(&buffer, dest_size, 0)) { return (bool)false; } @@ -418,6 +425,7 @@ _dispatch_transform_from_utf16(dispatch_data_t data, int32_t byteOrder) for (i = 0; i < max; i++) { uint32_t wch = 0; uint16_t ch; + size_t next; if ((i == (max - 1)) && (max > (size / 2))) { // Last byte of an odd sized range @@ -472,7 +480,9 @@ _dispatch_transform_from_utf16(dispatch_data_t data, int32_t byteOrder) wch = ch; } - size_t next = _dispatch_transform_sizet_mul(max - i, 2); + if (os_mul_overflow(max - i, 2, &next)) { + return (bool)false; + } if (wch < 0x80) { if (!_dispatch_transform_buffer_new(&buffer, 1, next)) { return (bool)false; @@ -554,8 +564,7 @@ _dispatch_transform_from_base32_with_table(dispatch_data_t data, bool success = dispatch_data_apply(data, ^( DISPATCH_UNUSED dispatch_data_t region, DISPATCH_UNUSED size_t offset, const void *buffer, size_t size) { - size_t i, dest_size = (size * 5) / 8; - + size_t i, dest_size = howmany(size, 8) * 5; uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(uint8_t)); uint8_t *ptr = dest; if (dest == NULL) { @@ -632,18 +641,17 @@ _dispatch_transform_from_base32_with_table(dispatch_data_t data, static dispatch_data_t _dispatch_transform_to_base32_with_table(dispatch_data_t data, const unsigned char* table) { - size_t total = dispatch_data_get_size(data); + size_t total = dispatch_data_get_size(data), dest_size; __block size_t count = 0; - if (total > SIZE_T_MAX-4 || ((total+4)/5 > SIZE_T_MAX/8)) { - /* We can't hold larger than size_t in a dispatch_data_t - * and we want to avoid an integer overflow in the next - * calculation. - */ + dest_size = howmany(total, 5); + // + // os_mul_overflow(dest_size, 8, &dest_size) + if (dest_size > SIZE_T_MAX / 8) { return NULL; } + dest_size *= 8; - size_t dest_size = (total + 4) / 5 * 8; uint8_t *dest = (uint8_t*)malloc(dest_size); if (dest == NULL) { return NULL; @@ -799,7 +807,7 @@ _dispatch_transform_from_base64(dispatch_data_t data) bool success = dispatch_data_apply(data, ^( DISPATCH_UNUSED dispatch_data_t region, DISPATCH_UNUSED size_t offset, const void *buffer, size_t size) { - size_t i, dest_size = (size * 3) / 4; + size_t i, dest_size = howmany(size, 4) * 3; uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(uint8_t)); uint8_t *ptr = dest; @@ -868,18 +876,17 @@ _dispatch_transform_to_base64(dispatch_data_t data) { // RFC 4648 states that we should not linebreak // http://tools.ietf.org/html/rfc4648 - size_t total = dispatch_data_get_size(data); + size_t total = dispatch_data_get_size(data), dest_size; __block size_t count = 0; - if (total > SIZE_T_MAX-2 || ((total+2)/3> SIZE_T_MAX/4)) { - /* We can't hold larger than size_t in a dispatch_data_t - * and we want to avoid an integer overflow in the next - * calculation. - */ + dest_size = howmany(total, 3); + // + // os_mul_overflow(dest_size, 4, &dest_size) + if (dest_size > SIZE_T_MAX / 4) { return NULL; } + dest_size *= 4; - size_t dest_size = (total + 2) / 3 * 4; uint8_t *dest = (uint8_t*)malloc(dest_size); if (dest == NULL) { return NULL; @@ -968,16 +975,16 @@ dispatch_data_create_with_transform(dispatch_data_t data, if (input->type == _DISPATCH_DATA_FORMAT_UTF_ANY) { input = _dispatch_transform_detect_utf(data); if (input == NULL) { - return NULL; + return DISPATCH_BAD_INPUT; } } if ((input->type & ~output->input_mask) != 0) { - return NULL; + return DISPATCH_BAD_INPUT; } if ((output->type & ~input->output_mask) != 0) { - return NULL; + return DISPATCH_BAD_INPUT; } if (dispatch_data_get_size(data) == 0) { @@ -993,7 +1000,7 @@ dispatch_data_create_with_transform(dispatch_data_t data, } if (!temp1) { - return NULL; + return DISPATCH_BAD_INPUT; } dispatch_data_t temp2; diff --git a/src/voucher.c b/src/voucher.c index 7489e77..94a2934 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Apple Inc. All rights reserved. + * Copyright (c) 2013-2016 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -32,91 +32,38 @@ #define PERSONA_ID_NONE ((uid_t)-1) #endif -#if VOUCHER_USE_MACH_VOUCHER - -#include -#include - -// -#ifndef VM_MEMORY_GENEALOGY -#define VM_MEMORY_GENEALOGY 78 -#endif +#if !DISPATCH_VARIANT_DYLD_STUB -#ifndef VOUCHER_ATM_COLLECT_THRESHOLD -#define VOUCHER_ATM_COLLECT_THRESHOLD 1 +#if VOUCHER_USE_MACH_VOUCHER +#if !HAVE_PTHREAD_WORKQUEUE_QOS +#error Unsupported configuration, workqueue QoS support is required #endif -#define VATM_COLLECT_THRESHOLD_VALUE(t) (((t) - 1) * 2) -static uint64_t volatile _voucher_atm_generation; +#include +#include -typedef struct _voucher_atm_s *_voucher_atm_t; +#define MACH_ACTIVITY_ID_RANGE_SIZE 16 +#define MACH_ACTIVITY_ID_MASK ((1ULL << FIREHOSE_ACTIVITY_ID_FLAGS_SHIFT) - 1) +#define FIREHOSE_ACTIVITY_ID_MAKE(aid, flags) \ + FIREHOSE_ACTIVITY_ID_MERGE_FLAGS((aid) & MACH_ACTIVITY_ID_MASK, flags) -static void _voucher_activity_atfork_child(void); -static _voucher_activity_t _voucher_activity_copy_from_mach_voucher( - mach_voucher_t kv, voucher_activity_id_t va_id); -static inline _voucher_activity_t _voucher_activity_retain( - _voucher_activity_t act); -static inline void _voucher_activity_release(_voucher_activity_t act); -static void _voucher_activity_remove(_voucher_activity_t act); -static inline _voucher_atm_t _voucher_atm_retain(_voucher_atm_t vatm); -static inline void _voucher_atm_release(_voucher_atm_t vatm); +static volatile uint64_t _voucher_aid_next; #pragma mark - #pragma mark voucher_t -#if USE_OBJC -OS_OBJECT_OBJC_CLASS_DECL(voucher); -#define VOUCHER_CLASS OS_OBJECT_OBJC_CLASS(voucher) -#else -const _os_object_class_s _voucher_class = { - ._os_obj_xref_dispose = (void(*)(_os_object_t))_voucher_xref_dispose, - ._os_obj_dispose = (void(*)(_os_object_t))_voucher_dispose, -}; -#define VOUCHER_CLASS &_voucher_class +OS_OBJECT_CLASS_DECL(voucher, object); +#if !USE_OBJC +OS_OBJECT_VTABLE_INSTANCE(voucher, + (void (*)(_os_object_t))_voucher_xref_dispose, + (void (*)(_os_object_t))_voucher_dispose); #endif // USE_OBJC - -static const voucher_activity_trace_id_t _voucher_activity_trace_id_release = - (voucher_activity_trace_id_t)voucher_activity_tracepoint_type_release << - _voucher_activity_trace_id_type_shift; -static const unsigned int _voucher_max_activities = 16; - -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_recipes_init(mach_voucher_attr_recipe_data_t *recipes, - mach_voucher_attr_content_size_t bits_size) -{ - static const mach_voucher_attr_recipe_data_t base_recipe = { - .key = MACH_VOUCHER_ATTR_KEY_ALL, - .command = MACH_VOUCHER_ATTR_COPY, - }; - _voucher_recipes_base(recipes) = base_recipe; - static const mach_voucher_attr_recipe_data_t atm_recipe = { - .key = MACH_VOUCHER_ATTR_KEY_ATM, - .command = MACH_VOUCHER_ATTR_COPY, - }; - _voucher_recipes_atm(recipes) = atm_recipe; - static const mach_voucher_attr_recipe_data_t bits_recipe = { - .key = MACH_VOUCHER_ATTR_KEY_USER_DATA, - .command = MACH_VOUCHER_ATTR_USER_DATA_STORE, - }; - _voucher_recipes_bits(recipes) = bits_recipe; - if (!bits_size) return; - _voucher_recipes_bits(recipes).content_size = bits_size; - *_voucher_recipes_magic(recipes) = _voucher_magic_v1; -} +#define VOUCHER_CLASS OS_OBJECT_VTABLE(voucher) static inline voucher_t -_voucher_alloc(unsigned int activities, pthread_priority_t priority, - mach_voucher_attr_recipe_size_t extra) +_voucher_alloc(mach_voucher_attr_recipe_size_t extra) { - if (activities > _voucher_max_activities) { - activities = _voucher_max_activities; - } voucher_t voucher; - size_t voucher_size, recipes_size; - mach_voucher_attr_content_size_t bits_size; - recipes_size = (priority||activities||extra) ? _voucher_recipes_size() : 0; - bits_size = recipes_size ? _voucher_bits_size(activities) : 0; - voucher_size = sizeof(voucher_s) + recipes_size + bits_size + extra; + size_t voucher_size = sizeof(voucher_s) + extra; voucher = (voucher_t)_os_object_alloc_realized(VOUCHER_CLASS, voucher_size); #if VOUCHER_ENABLE_RECIPE_OBJECTS voucher->v_recipe_extra_size = extra; @@ -124,11 +71,6 @@ _voucher_alloc(unsigned int activities, pthread_priority_t priority, #else dispatch_assert(!extra); #endif - voucher->v_has_priority = priority ? 1 : 0; - voucher->v_activities = activities; - if (!recipes_size) return voucher; - _voucher_recipes_init(voucher->v_recipes, bits_size); - *_voucher_priority(voucher) = (_voucher_priority_t)priority; _dispatch_voucher_debug("alloc", voucher); return voucher; } @@ -139,7 +81,7 @@ voucher_create(voucher_recipe_t recipe) { // TODO: capture current activities or current kvoucher ? mach_voucher_attr_recipe_size_t extra = recipe ? recipe->vr_size : 0; - voucher_t voucher = _voucher_alloc(0, 0, extra); + voucher_t voucher = _voucher_alloc(extra); if (extra) { memcpy(_voucher_extra_recipes(voucher), recipe->vr_data, extra); } @@ -147,9 +89,46 @@ voucher_create(voucher_recipe_t recipe) } #endif +DISPATCH_ALWAYS_INLINE +static inline voucher_t +_voucher_clone(const voucher_t ov, voucher_fields_t ignore_fields) +{ + mach_voucher_attr_recipe_size_t extra = 0; + voucher_t v; + + if (ov && !(ignore_fields & VOUCHER_FIELD_EXTRA)) { + extra = _voucher_extra_size(ov); + } + v = _voucher_alloc(extra); + if (ov) { + voucher_fields_t fields = ~ignore_fields; + if ((fields & VOUCHER_FIELD_KVOUCHER) && ov->v_kvoucher) { + voucher_t kvb = ov->v_kvbase ? ov->v_kvbase : ov; + v->v_kvbase = _voucher_retain(kvb); + v->v_kvoucher = kvb->v_kvoucher; + v->v_kv_has_importance = kvb->v_kv_has_importance; + } + if (fields & VOUCHER_FIELD_PRIORITY) { + v->v_priority = ov->v_priority; + } + if (fields & VOUCHER_FIELD_ACTIVITY) { + v->v_activity = ov->v_activity; + v->v_activity_creator = ov->v_activity_creator; + v->v_parent_activity = ov->v_parent_activity; + } + if ((fields & VOUCHER_FIELD_EXTRA) && extra) { + memcpy(_voucher_extra_recipes(v), _voucher_extra_recipes(ov),extra); + } + } + return v; +} + voucher_t voucher_adopt(voucher_t voucher) { + if (voucher == VOUCHER_CURRENT) { + return _voucher_copy(); + } return _voucher_adopt(voucher); } @@ -180,15 +159,18 @@ voucher_release(voucher_t voucher) void _voucher_thread_cleanup(void *voucher) { - _voucher_swap(voucher, NULL); + // when a thread exits and has a voucher left, the kernel + // will get rid of the voucher kernel object that is set on the thread, + // we only need to release the voucher_t object. + _voucher_release(voucher); } DISPATCH_CACHELINE_ALIGN static TAILQ_HEAD(, voucher_s) _vouchers[VL_HASH_SIZE]; #define _vouchers_head(kv) (&_vouchers[VL_HASH((kv))]) -static os_lock_handoff_s _vouchers_lock = OS_LOCK_HANDOFF_INIT; -#define _vouchers_lock_lock() os_lock_lock(&_vouchers_lock) -#define _vouchers_lock_unlock() os_lock_unlock(&_vouchers_lock) +static dispatch_unfair_lock_s _vouchers_lock; +#define _vouchers_lock_lock() _dispatch_unfair_lock_lock(&_vouchers_lock) +#define _vouchers_lock_unlock() _dispatch_unfair_lock_unlock(&_vouchers_lock) static voucher_t _voucher_find_and_retain(mach_voucher_t kv) @@ -198,15 +180,15 @@ _voucher_find_and_retain(mach_voucher_t kv) _vouchers_lock_lock(); TAILQ_FOREACH(v, _vouchers_head(kv), v_list) { if (v->v_ipc_kvoucher == kv) { - int xref_cnt = dispatch_atomic_inc2o(v, os_obj_xref_cnt, relaxed); + int xref_cnt = os_atomic_inc2o(v, os_obj_xref_cnt, relaxed); _dispatch_voucher_debug("retain -> %d", v, xref_cnt + 1); if (slowpath(xref_cnt < 0)) { - _dispatch_voucher_debug("overrelease", v); - DISPATCH_CRASH("Voucher overrelease"); + _dispatch_voucher_debug("over-release", v); + _OS_OBJECT_CLIENT_CRASH("Voucher over-release"); } if (xref_cnt == 0) { // resurrection: raced with _voucher_remove - (void)dispatch_atomic_inc2o(v, os_obj_ref_cnt, relaxed); + (void)os_atomic_inc2o(v, os_obj_ref_cnt, relaxed); } break; } @@ -223,7 +205,7 @@ _voucher_insert(voucher_t v) _vouchers_lock_lock(); if (slowpath(_TAILQ_IS_ENQUEUED(v, v_list))) { _dispatch_voucher_debug("corruption", v); - DISPATCH_CRASH("Voucher corruption"); + DISPATCH_CLIENT_CRASH(v->v_list.tqe_prev, "Voucher corruption"); } TAILQ_INSERT_TAIL(_vouchers_head(kv), v, v_list); _vouchers_lock_unlock(); @@ -237,10 +219,10 @@ _voucher_remove(voucher_t v) _vouchers_lock_lock(); if (slowpath(!kv)) { _dispatch_voucher_debug("corruption", v); - DISPATCH_CRASH("Voucher corruption"); + DISPATCH_CLIENT_CRASH(0, "Voucher corruption"); } // check for resurrection race with _voucher_find_and_retain - if (dispatch_atomic_load2o(v, os_obj_xref_cnt, seq_cst) < 0 && + if (os_atomic_load2o(v, os_obj_xref_cnt, ordered) < 0 && _TAILQ_IS_ENQUEUED(v, v_list)) { TAILQ_REMOVE(_vouchers_head(kv), v, v_list); _TAILQ_MARK_NOT_ENQUEUED(v, v_list); @@ -280,45 +262,29 @@ _voucher_create_mach_voucher(const mach_voucher_attr_recipe_data_t *recipes, return kr; } -#if __has_include() && !defined(VOUCHER_USE_ATTR_BANK) -#include -#define VOUCHER_USE_ATTR_BANK 1 -mach_voucher_t _voucher_default_task_mach_voucher; -#endif - -#if !defined(VOUCHER_USE_PERSONA) -#if VOUCHER_USE_ATTR_BANK && defined(BANK_PERSONA_TOKEN) && \ - !TARGET_IPHONE_SIMULATOR -#define VOUCHER_USE_PERSONA 1 -#else -#define VOUCHER_USE_PERSONA 0 -#endif -#endif - void _voucher_task_mach_voucher_init(void* ctxt DISPATCH_UNUSED) { -#if VOUCHER_USE_ATTR_BANK kern_return_t kr; - mach_voucher_t kv; + mach_voucher_t kv = MACH_VOUCHER_NULL; +#if !VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER static const mach_voucher_attr_recipe_data_t task_create_recipe = { .key = MACH_VOUCHER_ATTR_KEY_BANK, .command = MACH_VOUCHER_ATTR_BANK_CREATE, }; kr = _voucher_create_mach_voucher(&task_create_recipe, sizeof(task_create_recipe), &kv); - if (dispatch_assume_zero(kr)) { - DISPATCH_CLIENT_CRASH("Could not create task mach voucher"); + if (slowpath(kr)) { + DISPATCH_CLIENT_CRASH(kr, "Could not create task mach voucher"); } _voucher_default_task_mach_voucher = kv; - _voucher_task_mach_voucher = kv; #endif + _voucher_task_mach_voucher = kv; } void voucher_replace_default_voucher(void) { -#if VOUCHER_USE_ATTR_BANK (void)_voucher_get_task_mach_voucher(); // initalize task mach voucher mach_voucher_t kv, tkv = MACH_VOUCHER_NULL; voucher_t v = _voucher_get(); @@ -337,28 +303,101 @@ voucher_replace_default_voucher(void) } } if (!tkv) tkv = _voucher_default_task_mach_voucher; - kv = dispatch_atomic_xchg(&_voucher_task_mach_voucher, tkv, relaxed); + kv = os_atomic_xchg(&_voucher_task_mach_voucher, tkv, relaxed); if (kv && kv != _voucher_default_task_mach_voucher) { _voucher_dealloc_mach_voucher(kv); } _dispatch_voucher_debug("kvoucher[0x%08x] replace default voucher", v, tkv); -#endif } -static inline _voucher_atm_t -_voucher_get_atm(voucher_t voucher) -{ - _voucher_atm_t vatm; - vatm = voucher && voucher->v_atm ? voucher->v_atm : _voucher_task_atm; - return vatm; -} +#define _voucher_mach_recipe_size(payload_size) \ + (sizeof(mach_voucher_attr_recipe_data_t) + (payload_size)) + +#if VOUCHER_USE_MACH_VOUCHER_PRIORITY +#define _voucher_mach_recipe_alloca(v) ((mach_voucher_attr_recipe_t)alloca(\ + _voucher_mach_recipe_size(0) + \ + _voucher_mach_recipe_size(sizeof(ipc_pthread_priority_value_t)) + \ + _voucher_mach_recipe_size(sizeof(_voucher_mach_udata_s)) + \ + _voucher_extra_size(v))) +#else +#define _voucher_mach_recipe_alloca(v) ((mach_voucher_attr_recipe_t)alloca(\ + _voucher_mach_recipe_size(0) + \ + _voucher_mach_recipe_size(sizeof(_voucher_mach_udata_s)) + \ + _voucher_extra_size(v))) +#endif -static inline mach_voucher_t -_voucher_get_atm_mach_voucher(voucher_t voucher) +DISPATCH_ALWAYS_INLINE +static inline mach_voucher_attr_recipe_size_t +_voucher_mach_recipe_init(mach_voucher_attr_recipe_t mvar_buf, voucher_s *v, + mach_voucher_t kvb, pthread_priority_t pp) { - _voucher_atm_t vatm = _voucher_get_atm(voucher); - mach_voucher_t kv = vatm ? vatm->vatm_kvoucher : MACH_VOUCHER_NULL; - return kv; + mach_voucher_attr_recipe_size_t extra = _voucher_extra_size(v); + mach_voucher_attr_recipe_size_t size = 0; + + // normalize to just the QoS class and 0 relative priority + pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + if (pp) pp |= _PTHREAD_PRIORITY_PRIORITY_MASK; + + *mvar_buf++ = (mach_voucher_attr_recipe_data_t){ + .key = MACH_VOUCHER_ATTR_KEY_ALL, + .command = MACH_VOUCHER_ATTR_COPY, + .previous_voucher = kvb, + }; + size += _voucher_mach_recipe_size(0); + +#if VOUCHER_USE_MACH_VOUCHER_PRIORITY + if (pp) { + ipc_pthread_priority_value_t value = (ipc_pthread_priority_value_t)pp; + *mvar_buf++ = (mach_voucher_attr_recipe_data_t){ + .key = MACH_VOUCHER_ATTR_KEY_PTHPRIORITY, + .command = MACH_VOUCHER_ATTR_PTHPRIORITY_CREATE, + .content_size = sizeof(value), + }; + mvar_buf = _dispatch_memappend(mvar_buf, &value); + size += _voucher_mach_recipe_size(sizeof(value)); + } +#endif // VOUCHER_USE_MACH_VOUCHER_PRIORITY + + if ((v && v->v_activity) || pp) { + _voucher_mach_udata_s *udata_buf; + unsigned udata_size = 0; + + if (v && v->v_activity) { + udata_size = offsetof(_voucher_mach_udata_s, _vmu_after_activity); + } else { + udata_size = offsetof(_voucher_mach_udata_s, _vmu_after_priority); + } + *mvar_buf = (mach_voucher_attr_recipe_data_t){ + .key = MACH_VOUCHER_ATTR_KEY_USER_DATA, + .command = MACH_VOUCHER_ATTR_USER_DATA_STORE, + .content_size = udata_size, + }; + udata_buf = (_voucher_mach_udata_s *)(mvar_buf->content); + + if (v && v->v_activity) { + *udata_buf = (_voucher_mach_udata_s){ + .vmu_magic = VOUCHER_MAGIC_V3, + .vmu_priority = (_voucher_priority_t)pp, + .vmu_activity = v->v_activity, + .vmu_activity_pid = v->v_activity_creator, + .vmu_parent_activity = v->v_parent_activity, + }; + } else { + *udata_buf = (_voucher_mach_udata_s){ + .vmu_magic = VOUCHER_MAGIC_V3, + .vmu_priority = (_voucher_priority_t)pp, + }; + } + + mvar_buf = (mach_voucher_attr_recipe_t)(mvar_buf->content + udata_size); + size += _voucher_mach_recipe_size(udata_size); + } + + if (extra) { + memcpy(mvar_buf, _voucher_extra_recipes(v), extra); + size += extra; + } + return size; } mach_voucher_t @@ -368,23 +407,22 @@ _voucher_get_mach_voucher(voucher_t voucher) if (voucher->v_ipc_kvoucher) return voucher->v_ipc_kvoucher; mach_voucher_t kvb = voucher->v_kvoucher; if (!kvb) kvb = _voucher_get_task_mach_voucher(); - if (!voucher->v_has_priority && !voucher->v_activities && + if (!voucher->v_activity && !voucher->v_priority && !_voucher_extra_size(voucher)) { return kvb; } - kern_return_t kr; + + mach_voucher_attr_recipe_t mvar = _voucher_mach_recipe_alloca(voucher); + mach_voucher_attr_recipe_size_t size; mach_voucher_t kv, kvo; - _voucher_base_recipe(voucher).previous_voucher = kvb; - _voucher_atm_recipe(voucher).previous_voucher = - _voucher_get_atm_mach_voucher(voucher); - size_t recipes_size = _voucher_recipes_size() + - _voucher_extra_size(voucher) + - _voucher_bits_recipe(voucher).content_size; - kr = _voucher_create_mach_voucher(voucher->v_recipes, recipes_size, &kv); + kern_return_t kr; + + size = _voucher_mach_recipe_init(mvar, voucher, kvb, voucher->v_priority); + kr = _voucher_create_mach_voucher(mvar, size, &kv); if (dispatch_assume_zero(kr) || !kv){ return MACH_VOUCHER_NULL; } - if (!dispatch_atomic_cmpxchgv2o(voucher, v_ipc_kvoucher, MACH_VOUCHER_NULL, + if (!os_atomic_cmpxchgv2o(voucher, v_ipc_kvoucher, MACH_VOUCHER_NULL, kv, &kvo, relaxed)) { _voucher_dealloc_mach_voucher(kv); kv = kvo; @@ -409,25 +447,12 @@ _voucher_create_mach_voucher_with_priority(voucher_t voucher, kern_return_t kr; mach_voucher_t kv, kvb = voucher ? voucher->v_kvoucher : MACH_VOUCHER_NULL; if (!kvb) kvb = _voucher_get_task_mach_voucher(); - mach_voucher_attr_recipe_data_t *recipes; - size_t recipes_size = _voucher_recipes_size(); - if (voucher && (voucher->v_has_priority || voucher->v_activities || - _voucher_extra_size(voucher))) { - recipes_size += _voucher_bits_recipe(voucher).content_size + - _voucher_extra_size(voucher); - recipes = alloca(recipes_size); - memcpy(recipes, voucher->v_recipes, recipes_size); - _voucher_recipes_atm(recipes).previous_voucher = - _voucher_get_atm_mach_voucher(voucher); - } else { - mach_voucher_attr_content_size_t bits_size = _voucher_bits_size(0); - recipes_size += bits_size; - recipes = alloca(recipes_size); - _voucher_recipes_init(recipes, bits_size); - } - _voucher_recipes_base(recipes).previous_voucher = kvb; - *_voucher_recipes_priority(recipes) = (_voucher_priority_t)priority; - kr = _voucher_create_mach_voucher(recipes, recipes_size, &kv); + + mach_voucher_attr_recipe_t mvar = _voucher_mach_recipe_alloca(voucher); + mach_voucher_attr_recipe_size_t size; + + size = _voucher_mach_recipe_init(mvar, voucher, kvb, priority); + kr = _voucher_create_mach_voucher(mvar, size, &kv); if (dispatch_assume_zero(kr) || !kv){ return MACH_VOUCHER_NULL; } @@ -437,78 +462,118 @@ _voucher_create_mach_voucher_with_priority(voucher_t voucher, } static voucher_t -_voucher_create_with_mach_voucher(mach_voucher_t kv) +_voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits) { if (!kv) return NULL; kern_return_t kr; mach_voucher_attr_recipe_t vr; size_t vr_size; mach_voucher_attr_recipe_size_t kvr_size = 0; + mach_voucher_attr_content_size_t udata_sz = 0; + _voucher_mach_udata_s *udata = NULL; +#if !VOUCHER_USE_BANK_AUTOREDEEM + mach_voucher_t rkv; + const mach_voucher_attr_recipe_data_t redeem_recipe[] = { + [0] = { + .key = MACH_VOUCHER_ATTR_KEY_ALL, + .command = MACH_VOUCHER_ATTR_COPY, + .previous_voucher = kv, + }, + [1] = { + .key = MACH_VOUCHER_ATTR_KEY_BANK, + .command = MACH_VOUCHER_ATTR_REDEEM, + }, + }; + kr = _voucher_create_mach_voucher(redeem_recipe, sizeof(redeem_recipe), + &rkv); + if (!dispatch_assume_zero(kr)) { + _voucher_dealloc_mach_voucher(kv); + _dispatch_kvoucher_debug("redeemed from 0x%08x", rkv, kv); + kv = rkv; + } else { + _dispatch_voucher_debug_machport(kv); + } +#endif voucher_t v = _voucher_find_and_retain(kv); if (v) { _dispatch_voucher_debug("kvoucher[0x%08x] found", v, kv); _voucher_dealloc_mach_voucher(kv); return v; } - vr_size = sizeof(*vr) + _voucher_bits_size(_voucher_max_activities); + vr_size = sizeof(*vr) + sizeof(_voucher_mach_udata_s); vr = alloca(vr_size); if (kv) { kvr_size = (mach_voucher_attr_recipe_size_t)vr_size; kr = mach_voucher_extract_attr_recipe(kv, MACH_VOUCHER_ATTR_KEY_USER_DATA, (void*)vr, &kvr_size); DISPATCH_VERIFY_MIG(kr); - if (dispatch_assume_zero(kr)) kvr_size = 0; - } - mach_voucher_attr_content_size_t content_size = vr->content_size; - uint8_t *content = vr->content; - bool valid = false, has_priority = false; - unsigned int activities = 0; - if (kvr_size >= sizeof(*vr) + sizeof(_voucher_magic_t)) { - valid = (*(_voucher_magic_t*)content == _voucher_magic_v1); - content += sizeof(_voucher_magic_t); - content_size -= sizeof(_voucher_magic_t); - } - if (valid) { - has_priority = (content_size >= sizeof(_voucher_priority_t)); - activities = has_priority ? (content_size - sizeof(_voucher_priority_t)) - / sizeof(voucher_activity_id_t) : 0; - } - pthread_priority_t priority = 0; - if (has_priority) { - priority = (pthread_priority_t)*(_voucher_priority_t*)content; - content += sizeof(_voucher_priority_t); - content_size -= sizeof(_voucher_priority_t); - } - voucher_activity_id_t va_id = 0, va_base_id = 0; - _voucher_activity_t act = NULL; - _voucher_atm_t vatm = NULL; - if (activities) { - va_id = *(voucher_activity_id_t*)content; - act = _voucher_activity_copy_from_mach_voucher(kv, va_id); - if (!act && _voucher_activity_default) { - activities++; - // default to _voucher_activity_default base activity - va_base_id = _voucher_activity_default->va_id; - } else if (act && act->va_id != va_id) { - activities++; - va_base_id = act->va_id; + if (!dispatch_assume_zero(kr) && kvr_size >= sizeof(*vr)) { + udata_sz = vr->content_size; + udata = (_voucher_mach_udata_s*)vr->content; + dispatch_assume(udata_sz >= sizeof(_voucher_magic_t)); } - if (act) { - vatm = _voucher_atm_retain(act->va_atm); + } + vr = NULL; + + v = _voucher_alloc(0); + v->v_ipc_kvoucher = v->v_kvoucher = kv; + v->v_kv_has_importance = !!(msgh_bits & MACH_MSGH_BITS_RAISEIMP); + + if (udata_sz >= offsetof(_voucher_mach_udata_s,_vmu_after_priority)){ + if (udata->vmu_magic == VOUCHER_MAGIC_V3) { + v->v_priority = udata->vmu_priority; } } - v = _voucher_alloc(activities, priority, 0); - v->v_atm = vatm; - v->v_activity = act; - voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); - if (activities && va_base_id) { - *activity_ids++ = va_base_id; - activities--; + bool remove_kv_userdata = false; + if (udata_sz >= offsetof(_voucher_mach_udata_s, _vmu_after_activity)) { +#if !RDAR_25050791 + remove_kv_userdata = true; +#endif + if (udata->vmu_magic == VOUCHER_MAGIC_V3 && udata->vmu_activity) { + v->v_activity = udata->vmu_activity; + v->v_activity_creator = udata->vmu_activity_pid; + v->v_parent_activity = udata->vmu_parent_activity; + } } - if (activities) { - memcpy(activity_ids, content, content_size); + + if (remove_kv_userdata) { + mach_voucher_t nkv = MACH_VOUCHER_NULL; + const mach_voucher_attr_recipe_data_t remove_userdata_recipe[] = { + [0] = { + .key = MACH_VOUCHER_ATTR_KEY_ALL, + .command = MACH_VOUCHER_ATTR_COPY, + .previous_voucher = kv, + }, + [1] = { + .key = MACH_VOUCHER_ATTR_KEY_USER_DATA, + .command = MACH_VOUCHER_ATTR_REMOVE, + }, +#if VOUCHER_USE_MACH_VOUCHER_PRIORITY + [2] = { + .key = MACH_VOUCHER_ATTR_KEY_PTHPRIORITY, + .command = MACH_VOUCHER_ATTR_REMOVE, + }, +#endif + }; + mach_voucher_attr_recipe_size_t size = sizeof(remove_userdata_recipe); + + kr = _voucher_create_mach_voucher(remove_userdata_recipe, size, &nkv); + if (!dispatch_assume_zero(kr)) { + _dispatch_voucher_debug("kvoucher[0x%08x] udata removal " + "(created 0x%08x)", v, kv, nkv); + v->v_ipc_kvoucher = MACH_VOUCHER_NULL; + v->v_kvoucher = nkv; + v->v_kvbase = _voucher_find_and_retain(nkv); + if (v->v_kvbase) { + _voucher_dealloc_mach_voucher(nkv); // borrow base reference + } + _voucher_dealloc_mach_voucher(kv); + kv = nkv; + } else { + _dispatch_voucher_debug_machport(kv); + } } - v->v_ipc_kvoucher = v->v_kvoucher = kv; + _voucher_insert(v); _dispatch_voucher_debug("kvoucher[0x%08x] create", v, kv); return v; @@ -523,24 +588,18 @@ _voucher_create_with_priority_and_mach_voucher(voucher_t ov, return ov ? _voucher_retain(ov) : NULL; } voucher_t v = _voucher_find_and_retain(kv); + voucher_fields_t ignore_fields = VOUCHER_FIELD_PRIORITY; + if (v) { _dispatch_voucher_debug("kvoucher[0x%08x] find", v, kv); _voucher_dealloc_mach_voucher(kv); return v; } - unsigned int activities = ov ? ov->v_activities : 0; - mach_voucher_attr_recipe_size_t extra = ov ? _voucher_extra_size(ov) : 0; - v = _voucher_alloc(activities, priority, extra); - if (extra) { - memcpy(_voucher_extra_recipes(v), _voucher_extra_recipes(ov), extra); - } - if (activities) { - if (ov->v_activity) { - v->v_activity = _voucher_activity_retain(ov->v_activity); - v->v_atm = _voucher_atm_retain(ov->v_atm); - } - memcpy(_voucher_activity_ids(v), _voucher_activity_ids(ov), - activities * sizeof(voucher_activity_id_t)); + + if (kv) ignore_fields |= VOUCHER_FIELD_KVOUCHER; + v = _voucher_clone(ov, ignore_fields); + if (priority) { + v->v_priority = (_voucher_priority_t)priority; } if (kv) { v->v_ipc_kvoucher = v->v_kvoucher = kv; @@ -548,10 +607,6 @@ _voucher_create_with_priority_and_mach_voucher(voucher_t ov, _dispatch_voucher_debug("kvoucher[0x%08x] create with priority from " "voucher[%p]", v, kv, ov); _dispatch_voucher_debug_machport(kv); - } else if (ov && ov->v_kvoucher) { - voucher_t kvb = ov->v_kvbase ? ov->v_kvbase : ov; - v->v_kvbase = _voucher_retain(kvb); - v->v_kvoucher = kvb->v_kvoucher; } return v; } @@ -562,8 +617,7 @@ _voucher_create_without_importance(voucher_t ov) // Nothing to do unless the old voucher has a kernel voucher. If it // doesn't, it can't have any importance, now or in the future. if (!ov) return NULL; - // TODO: 17487167: track presence of importance attribute - if (!ov->v_kvoucher) return _voucher_retain(ov); + if (!ov->v_kvoucher || !ov->v_kv_has_importance) return _voucher_retain(ov); kern_return_t kr; mach_voucher_t kv, okv; // Copy kernel voucher, removing importance. @@ -597,22 +651,8 @@ _voucher_create_without_importance(voucher_t ov) return v; } voucher_t kvbase = v; - // Copy userspace contents - unsigned int activities = ov->v_activities; - pthread_priority_t priority = _voucher_get_priority(ov); - mach_voucher_attr_recipe_size_t extra = _voucher_extra_size(ov); - v = _voucher_alloc(activities, priority, extra); - if (extra) { - memcpy(_voucher_extra_recipes(v), _voucher_extra_recipes(ov), extra); - } - if (activities) { - if (ov->v_activity) { - v->v_activity = _voucher_activity_retain(ov->v_activity); - v->v_atm = _voucher_atm_retain(ov->v_atm); - } - memcpy(_voucher_activity_ids(v), _voucher_activity_ids(ov), - activities * sizeof(voucher_activity_id_t)); - } + voucher_fields_t ignore_fields = VOUCHER_FIELD_KVOUCHER; + v = _voucher_clone(ov, ignore_fields); v->v_kvoucher = kv; if (ov->v_ipc_kvoucher) { v->v_ipc_kvoucher = kv; @@ -637,7 +677,6 @@ _voucher_create_accounting_voucher(voucher_t ov) kern_return_t kr = KERN_SUCCESS; mach_voucher_t okv, kv = MACH_VOUCHER_NULL; okv = ov->v_ipc_kvoucher ? ov->v_ipc_kvoucher : ov->v_kvoucher; -#if VOUCHER_USE_ATTR_BANK const mach_voucher_attr_recipe_data_t accounting_copy_recipe = { .key = MACH_VOUCHER_ATTR_KEY_BANK, .command = MACH_VOUCHER_ATTR_COPY, @@ -645,7 +684,6 @@ _voucher_create_accounting_voucher(voucher_t ov) }; kr = _voucher_create_mach_voucher(&accounting_copy_recipe, sizeof(accounting_copy_recipe), &kv); -#endif if (dispatch_assume_zero(kr) || !kv){ return NULL; } @@ -656,7 +694,7 @@ _voucher_create_accounting_voucher(voucher_t ov) _voucher_dealloc_mach_voucher(kv); return v; } - v = _voucher_alloc(0, 0, 0); + v = _voucher_alloc(0); v->v_ipc_kvoucher = v->v_kvoucher = kv; if (kv == okv) { v->v_kvbase = _voucher_retain(ov); @@ -671,20 +709,15 @@ _voucher_create_accounting_voucher(voucher_t ov) voucher_t voucher_create_with_mach_msg(mach_msg_header_t *msg) { - voucher_t v = _voucher_create_with_mach_voucher(_voucher_mach_msg_get(msg)); - _voucher_activity_trace_msg(v, msg, receive); - return v; + mach_msg_bits_t msgh_bits; + mach_voucher_t kv = _voucher_mach_msg_get(msg, &msgh_bits); + return _voucher_create_with_mach_voucher(kv, msgh_bits); } -#ifndef MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL -#define MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL 2 -#endif - void voucher_decrement_importance_count4CF(voucher_t v) { - if (!v || !v->v_kvoucher) return; - // TODO: 17487167: track presence of importance attribute + if (!v || !v->v_kvoucher || !v->v_kv_has_importance) return; kern_return_t kr; mach_voucher_t kv = v->v_ipc_kvoucher ? v->v_ipc_kvoucher : v->v_kvoucher; uint32_t dec = 1; @@ -701,14 +734,13 @@ voucher_decrement_importance_count4CF(voucher_t v) MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL, kvc_in, kvc_in_size, kvc_out, &kvc_out_size); DISPATCH_VERIFY_MIG(kr); + if (kr == KERN_INVALID_TASK) return; // non-denap receiver rdar://25643185 #if DISPATCH_DEBUG _dispatch_voucher_debug("kvoucher[0x%08x] decrement importance count to %u:" " %s - 0x%x", v, kv, count, mach_error_string(kr), kr); #endif - if (kr != KERN_INVALID_ARGUMENT && - dispatch_assume_zero(kr) == KERN_FAILURE) { - // TODO: 17487167: skip KERN_INVALID_ARGUMENT check - DISPATCH_CLIENT_CRASH("Voucher importance count underflow"); + if (slowpath(dispatch_assume_zero(kr) == KERN_FAILURE)) { + DISPATCH_CLIENT_CRASH(kr, "Voucher importance count underflow"); } } @@ -734,7 +766,7 @@ _voucher_dispose(voucher_t voucher) _dispatch_voucher_debug("dispose", voucher); if (slowpath(_TAILQ_IS_ENQUEUED(voucher, v_list))) { _dispatch_voucher_debug("corruption", voucher); - DISPATCH_CRASH("Voucher corruption"); + DISPATCH_CLIENT_CRASH(voucher->v_list.tqe_prev, "Voucher corruption"); } voucher->v_list.tqe_next = DISPATCH_OBJECT_LISTLESS; if (voucher->v_ipc_kvoucher) { @@ -753,16 +785,10 @@ _voucher_dispose(voucher_t voucher) _voucher_release(voucher->v_kvbase); voucher->v_kvbase = NULL; } - if (voucher->v_activity) { - _voucher_activity_release(voucher->v_activity); - voucher->v_activity = NULL; - } - if (voucher->v_atm) { - _voucher_atm_release(voucher->v_atm); - voucher->v_atm = NULL; - } - voucher->v_has_priority = 0; - voucher->v_activities = 0; + voucher->v_activity = 0; + voucher->v_activity_creator = 0; + voucher->v_parent_activity = 0; + voucher->v_priority = 0; #if VOUCHER_ENABLE_RECIPE_OBJECTS voucher->v_recipe_extra_size = 0; voucher->v_recipe_extra_offset = 0; @@ -770,15 +796,54 @@ _voucher_dispose(voucher_t voucher) return _os_object_dealloc((_os_object_t)voucher); } +static void +_voucher_activity_debug_channel_barrier_nop(void *ctxt DISPATCH_UNUSED) +{ +} + +void +_voucher_activity_debug_channel_init(void) +{ + dispatch_mach_handler_function_t handler = NULL; + + if (_voucher_libtrace_hooks && _voucher_libtrace_hooks->vah_version >= 2) { + handler = _voucher_libtrace_hooks->vah_debug_channel_handler; + } + + if (!handler) return; + + dispatch_mach_t dm; + mach_port_t dbgp; + kern_return_t kr; + + kr = task_get_debug_control_port(mach_task_self(), &dbgp); + DISPATCH_VERIFY_MIG(kr); + if (kr) { + DISPATCH_CLIENT_CRASH(kr, "Couldn't get debug control port"); + } + if (dbgp) { + dm = dispatch_mach_create_f("com.apple.debug-channel", + DISPATCH_TARGET_QUEUE_DEFAULT, NULL, handler); + dispatch_mach_connect(dm, dbgp, MACH_PORT_NULL, NULL); + // will force the DISPATCH_MACH_CONNECTED event + dispatch_mach_send_barrier_f(dm, NULL, + _voucher_activity_debug_channel_barrier_nop); + _voucher_activity_debug_channel = dm; + } +} + void _voucher_atfork_child(void) { - _voucher_activity_atfork_child(); _dispatch_thread_setspecific(dispatch_voucher_key, NULL); _voucher_task_mach_voucher_pred = 0; _voucher_task_mach_voucher = MACH_VOUCHER_NULL; - - // TODO: voucher/activity inheritance on fork ? +#if !VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER + _voucher_default_task_mach_voucher = MACH_PORT_NULL; +#endif + _voucher_aid_next = 0; + _firehose_task_buffer_pred = 0; + _firehose_task_buffer = NULL; // firehose buffer is VM_INHERIT_NONE } #if VOUCHER_EXPORT_PERSONA_SPI @@ -873,10 +938,7 @@ voucher_get_current_persona_proximate_info(struct proc_persona_info *persona_inf boolean_t voucher_mach_msg_set(mach_msg_header_t *msg) { - voucher_t v = _voucher_get(); - bool clear_voucher = _voucher_mach_msg_set(msg, v); - if (clear_voucher) _voucher_activity_trace_msg(v, msg, send); - return clear_voucher; + return _voucher_mach_msg_set(msg, _voucher_get()); } void @@ -888,10 +950,10 @@ voucher_mach_msg_clear(mach_msg_header_t *msg) voucher_mach_msg_state_t voucher_mach_msg_adopt(mach_msg_header_t *msg) { - mach_voucher_t kv = _voucher_mach_msg_get(msg); + mach_msg_bits_t msgh_bits; + mach_voucher_t kv = _voucher_mach_msg_get(msg, &msgh_bits); if (!kv) return VOUCHER_MACH_MSG_STATE_UNCHANGED; - voucher_t v = _voucher_create_with_mach_voucher(kv); - _voucher_activity_trace_msg(v, msg, receive); + voucher_t v = _voucher_create_with_mach_voucher(kv, msgh_bits); return (voucher_mach_msg_state_t)_voucher_adopt(v); } @@ -924,1470 +986,383 @@ _voucher_libkernel_init(void) #define _voucher_libkernel_init() #endif +void +voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks) +{ + if (!os_atomic_cmpxchg(&_voucher_libtrace_hooks, NULL, + hooks, relaxed)) { + DISPATCH_CLIENT_CRASH(_voucher_libtrace_hooks, + "voucher_activity_initialize_4libtrace called twice"); + } +} + void _voucher_init(void) { _voucher_libkernel_init(); - char *e; unsigned int i; for (i = 0; i < VL_HASH_SIZE; i++) { TAILQ_INIT(&_vouchers[i]); } - voucher_activity_mode_t mode; - mode = DISPATCH_DEBUG ? voucher_activity_mode_debug - : voucher_activity_mode_release; - e = getenv("OS_ACTIVITY_MODE"); - if (e) { - if (strcmp(e, "release") == 0) { - mode = voucher_activity_mode_release; - } else if (strcmp(e, "debug") == 0) { - mode = voucher_activity_mode_debug; - } else if (strcmp(e, "stream") == 0) { - mode = voucher_activity_mode_stream; - } else if (strcmp(e, "disable") == 0) { - mode = voucher_activity_mode_disable; - } - } - _voucher_activity_mode = mode; - if (_voucher_activity_disabled()) return; - - // default task activity - bool default_task_activity = DISPATCH_DEBUG; - e = getenv("LIBDISPATCH_DEFAULT_TASK_ACTIVITY"); - if (e) default_task_activity = atoi(e); - if (default_task_activity) { - (void)voucher_activity_start(_voucher_activity_trace_id_release, 0); - } -} - -#pragma mark - -#pragma mark _voucher_activity_lock_s - -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_lock_init(_voucher_activity_lock_s *lock) { - static const os_lock_handoff_s _os_lock_handoff_init = OS_LOCK_HANDOFF_INIT; - *lock = _os_lock_handoff_init; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_lock_lock(_voucher_activity_lock_s *lock) { - return os_lock_lock(lock); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_lock_unlock(_voucher_activity_lock_s *lock) { - return os_lock_unlock(lock); } #pragma mark - -#pragma mark _voucher_activity_heap - -#if __has_extension(c_static_assert) -_Static_assert(sizeof(struct _voucher_activity_tracepoint_s) == 64, - "Tracepoint too large"); -_Static_assert(sizeof(struct _voucher_activity_buffer_header_s) <= - sizeof(struct _voucher_activity_tracepoint_s), - "Buffer header too large"); -#if __LP64__ -_Static_assert(offsetof(struct _voucher_activity_s, va_buffers_lock) % 64 == 0, - "Bad activity padding"); -_Static_assert(sizeof(struct _voucher_atm_s) <= 128, - "ATM too large"); -#else -_Static_assert(sizeof(struct _voucher_atm_s) <= 64, - "ATM too large"); -#endif -_Static_assert(sizeof(_voucher_activity_buffer_t) == - sizeof(struct {char x[_voucher_activity_buffer_size];}), - "Buffer too large"); -_Static_assert(sizeof(struct _voucher_activity_metadata_s) <= - sizeof(struct _voucher_activity_metadata_opaque_s), - "Metadata too large"); -_Static_assert(sizeof(_voucher_activity_bitmap_t) % 64 == 0, - "Bad metadata bitmap size"); -#endif +#pragma mark voucher_activity_t -#define va_buffers_lock(va) (&(va)->va_buffers_lock) -#define vatm_activities(vatm) (&(vatm)->vatm_activities) -#define vam_atms_lock() (&_voucher_activity_heap->vam_atms_lock) -#define vam_activities_lock() (&_voucher_activity_heap->vam_activities_lock) -#define vam_atms(hash) (&_voucher_activity_heap->vam_atms[hash]) -#define vam_activities(hash) (&_voucher_activity_heap->vam_activities[hash]) -#define vam_buffer_bitmap() (_voucher_activity_heap->vam_buffer_bitmap) -#define vam_pressure_locked_bitmap() \ - (_voucher_activity_heap->vam_pressure_locked_bitmap) -#define vam_buffer(i) ((void*)((char*)_voucher_activity_heap + \ - (i) * _voucher_activity_buffer_size)) - -static _voucher_activity_t _voucher_activity_create_with_atm( - _voucher_atm_t vatm, voucher_activity_id_t va_id, - voucher_activity_trace_id_t trace_id, uint64_t location, - _voucher_activity_buffer_header_t buffer); -static _voucher_atm_t _voucher_atm_create(mach_voucher_t kv, atm_aid_t atm_id); -static void _voucher_activity_firehose_wait(_voucher_activity_t act, - _voucher_activity_buffer_header_t buffer); - -DISPATCH_ALWAYS_INLINE -static inline uint32_t -_voucher_default_activity_buffer_limit() +DISPATCH_NOINLINE +static uint64_t +_voucher_activity_id_allocate_slow(uint64_t aid) { -#if 0 // FIXME: tune buffer chain sizes - switch (_voucher_activity_mode) { - case voucher_activity_mode_debug: - case voucher_activity_mode_stream: - // High-profile modes: Default activity can use 1/32nd of the heap - // (twice as much as non-default activities) - return MAX(_voucher_activity_buffers_per_heap / 32, 3) - 1; + kern_return_t kr; + uint64_t next; + + kr = mach_generate_activity_id(mach_task_self(), 1, &next); + if (unlikely(kr)) { + DISPATCH_CLIENT_CRASH(kr, "Could not generate an activity ID"); + } + next *= MACH_ACTIVITY_ID_RANGE_SIZE; + next &= MACH_ACTIVITY_ID_MASK; + if (unlikely(next == 0)) { + next++; } -#endif - // Low-profile modes: Default activity can use a total of 4 buffers. - return 3; -} -DISPATCH_ALWAYS_INLINE -static inline uint32_t -_voucher_activity_buffer_limit() -{ -#if 0 // FIXME: tune buffer chain sizes - switch (_voucher_activity_mode) { - case voucher_activity_mode_debug: - case voucher_activity_mode_stream: - // High-profile modes: 64 activities, each of which can use 1/64th - // of the entire heap. - return MAX(_voucher_activity_buffers_per_heap / 64, 2) - 1; + if (unlikely(aid == 0)) { + if (os_atomic_cmpxchg(&_voucher_aid_next, 0, next + 1, relaxed)) { + return next; + } } -#endif - // Low-profile modes: Each activity can use a total of 2 buffers. - return 1; + return os_atomic_xchg(&_voucher_aid_next, next, relaxed); } -// The two functions above return the number of *additional* buffers activities -// may allocate, hence the gymnastics with - 1. - DISPATCH_ALWAYS_INLINE -static inline uint32_t -_voucher_heap_buffer_limit() -{ - switch (_voucher_activity_mode) { - case voucher_activity_mode_debug: - case voucher_activity_mode_stream: - // High-profile modes: Use it all. - return _voucher_activity_buffers_per_heap; - } -#if TARGET_OS_EMBEDDED - // Low-profile modes: 3 activities, each of which can use 2 buffers; - // plus the default activity, which can use 3; plus 3 buffers of overhead. - return 12; -#else - // Low-profile modes: 13 activities, each of which can use 4 buffers; - // plus the default activity, which can use 8; plus 3 buffers of overhead. - return 64; -#endif +static firehose_activity_id_t +_voucher_activity_id_allocate(firehose_activity_flags_t flags) +{ + uint64_t aid, next; + os_atomic_rmw_loop(&_voucher_aid_next, aid, next, relaxed, { + next = aid + 1; + if (aid == 0 || next % MACH_ACTIVITY_ID_RANGE_SIZE == 0) { + os_atomic_rmw_loop_give_up({ + aid = _voucher_activity_id_allocate_slow(aid); + break; + }); + } + }); + return FIREHOSE_ACTIVITY_ID_MAKE(aid, flags); } -#define NO_BITS_WERE_UNSET (UINT_MAX) +#define _voucher_activity_tracepoint_reserve(stamp, stream, pub, priv, privbuf) \ + firehose_buffer_tracepoint_reserve(_firehose_task_buffer, stamp, \ + stream, pub, priv, privbuf) -DISPATCH_ALWAYS_INLINE -static inline size_t -_voucher_activity_bitmap_set_first_unset_bit_upto( - _voucher_activity_bitmap_t volatile bitmap, - unsigned int max_index) -{ - dispatch_assert(max_index != 0); - unsigned int index = NO_BITS_WERE_UNSET, max_map, max_bit, i; - max_map = max_index / _voucher_activity_bits_per_bitmap_base_t; - max_map = MIN(max_map, _voucher_activity_bitmaps_per_heap - 1); - max_bit = max_index % _voucher_activity_bits_per_bitmap_base_t; - for (i = 0; i < max_map; i++) { - index = dispatch_atomic_set_first_bit(&bitmap[i], UINT_MAX); - if (fastpath(index < NO_BITS_WERE_UNSET)) { - return index + i * _voucher_activity_bits_per_bitmap_base_t; - } - } - index = dispatch_atomic_set_first_bit(&bitmap[i], max_bit); - if (fastpath(index < NO_BITS_WERE_UNSET)) { - return index + i * _voucher_activity_bits_per_bitmap_base_t; - } - return index; -} +#define _voucher_activity_tracepoint_flush(ft, ftid) \ + firehose_buffer_tracepoint_flush(_firehose_task_buffer, ft, ftid) -DISPATCH_ALWAYS_INLINE DISPATCH_UNUSED -static inline size_t -_voucher_activity_bitmap_set_first_unset_bit( - _voucher_activity_bitmap_t volatile bitmap) +DISPATCH_NOINLINE +static void +_firehose_task_buffer_init(void *ctx OS_UNUSED) { - return _voucher_activity_bitmap_set_first_unset_bit_upto(bitmap, UINT_MAX); -} + mach_port_t logd_port; -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_bitmap_clear_bit( - _voucher_activity_bitmap_t volatile bitmap, size_t index) -{ - size_t i = index / _voucher_activity_bits_per_bitmap_base_t; - _voucher_activity_bitmap_base_t mask = ((typeof(mask))1) << - (index % _voucher_activity_bits_per_bitmap_base_t); - if (slowpath((bitmap[i] & mask) == 0)) { - DISPATCH_CRASH("Corruption: failed to clear bit exclusively"); + /* Query the uniquepid of the current process */ + struct proc_uniqidentifierinfo p_uniqinfo = { }; + int info_size = 0; + + info_size = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 1, + &p_uniqinfo, PROC_PIDUNIQIDENTIFIERINFO_SIZE); + if (slowpath(info_size != PROC_PIDUNIQIDENTIFIERINFO_SIZE)) { + DISPATCH_INTERNAL_CRASH(info_size, "Unable to get the unique pid"); } - (void)dispatch_atomic_and(&bitmap[i], ~mask, release); -} + _voucher_unique_pid = p_uniqinfo.p_uniqueid; -_voucher_activity_metadata_t _voucher_activity_heap; -static dispatch_once_t _voucher_activity_heap_pred; -static void -_voucher_activity_heap_init(void *ctxt DISPATCH_UNUSED) -{ - if (_voucher_activity_disabled()) return; - kern_return_t kr; - mach_vm_size_t vm_size = _voucher_activity_buffer_size * - _voucher_activity_buffers_per_heap; - mach_vm_address_t vm_addr = vm_page_size; - while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size, - 0, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_GENEALOGY), - MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, - VM_INHERIT_NONE))) { - if (kr != KERN_NO_SPACE) { - (void)dispatch_assume_zero(kr); - _voucher_activity_mode = voucher_activity_mode_disable; - return; + if (!fastpath(_voucher_libtrace_hooks)) { + if (0) { // + DISPATCH_CLIENT_CRASH(0, + "Activity subsystem isn't initialized yet"); } - _dispatch_temporary_resource_shortage(); - vm_addr = vm_page_size; - } - _voucher_activity_metadata_t heap; - task_trace_memory_info_data_t trace_memory_info = { - .user_memory_address = vm_addr, - .buffer_size = vm_size, - }; - kr = task_set_info(mach_task_self(), TASK_TRACE_MEMORY_INFO, - (task_info_t)&trace_memory_info, TASK_TRACE_MEMORY_INFO_COUNT); - DISPATCH_VERIFY_MIG(kr); - if (kr) { - if (kr != KERN_NOT_SUPPORTED) (void)dispatch_assume_zero(kr); - kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); - (void)dispatch_assume_zero(kr); - _voucher_activity_mode = voucher_activity_mode_disable; return; } - heap = (void*)vm_addr; - heap->vasm_baseaddr = (void*)vm_addr; - heap->vam_buffer_bitmap[0] = 0x7; // first three buffers are reserved - uint32_t i; - for (i = 0; i < _voucher_activity_hash_size; i++) { - TAILQ_INIT(&heap->vam_activities[i]); - TAILQ_INIT(&heap->vam_atms[i]); + logd_port = _voucher_libtrace_hooks->vah_get_logd_port(); + if (logd_port) { + unsigned long flags = 0; +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE + if (_dispatch_memory_warn) { + flags |= FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY; + } +#endif + // firehose_buffer_create always consumes the send-right + _firehose_task_buffer = firehose_buffer_create(logd_port, + _voucher_unique_pid, flags); } - _voucher_activity_lock_init(&heap->vam_atms_lock); - _voucher_activity_lock_init(&heap->vam_activities_lock); - _voucher_activity_heap = heap; - - _voucher_atm_t vatm = _voucher_atm_create(0, 0); - dispatch_assert(vatm->vatm_kvoucher); - _voucher_atm_retain(vatm); - - _voucher_activity_buffer_header_t buffer = vam_buffer(2); // reserved index - // consumes vatm reference: - _voucher_activity_t va = _voucher_activity_create_with_atm(vatm, 0, 0, 0, - buffer); - dispatch_assert(va); - va->va_buffer_limit = _voucher_default_activity_buffer_limit(); - _voucher_activity_default = va; - _voucher_task_atm = vatm; } -static void -_voucher_activity_atfork_child(void) +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_activity_disabled(void) { - _voucher_activity_heap_pred = 0; - _voucher_activity_heap = NULL; // activity heap is VM_INHERIT_NONE - _voucher_activity_default = NULL; + dispatch_once_f(&_firehose_task_buffer_pred, + NULL, _firehose_task_buffer_init); + + firehose_buffer_t fb = _firehose_task_buffer; + if (fastpath(fb)) { + return slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD); + } + return true; } void* voucher_activity_get_metadata_buffer(size_t *length) { - dispatch_once_f(&_voucher_activity_heap_pred, NULL, - _voucher_activity_heap_init); if (_voucher_activity_disabled()) { *length = 0; return NULL; } - *length = sizeof(_voucher_activity_heap->vam_client_metadata); - return _voucher_activity_heap->vam_client_metadata; -} -static _voucher_activity_buffer_hook_t _voucher_activity_buffer_hook; + firehose_buffer_header_t fbh = &_firehose_task_buffer->fb_header; -void -voucher_activity_buffer_hook_install_4libtrace( - _voucher_activity_buffer_hook_t hook) -{ - if (dispatch_atomic_cmpxchg(&_voucher_activity_buffer_hook, NULL, - (void*)hook, release)) return; - DISPATCH_CLIENT_CRASH("_voucher_activity_buffer_hook_install_4libtrace " \ - "called more than once"); + *length = FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE; + return (void *)((uintptr_t)(fbh + 1) - *length); } -#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG -#define VOUCHER_ACTIVITY_BUFFER_DEBUG(reason, buffer) \ - _dispatch_debug("activity buffer %s (%p)", #reason, buffer) -#else -#define VOUCHER_ACTIVITY_BUFFER_DEBUG(reason, buffer) -#endif +voucher_t +voucher_activity_create(firehose_tracepoint_id_t trace_id, + voucher_t base, firehose_activity_flags_t flags, uint64_t location) +{ + return voucher_activity_create_with_location(&trace_id, base, flags, location); +} -#define VOUCHER_ACTIVITY_BUFFER_HOOK_CALLOUT(reason, buffer) \ - if (buffer) { VOUCHER_ACTIVITY_BUFFER_DEBUG(reason, buffer); \ - if (slowpath(_voucher_activity_buffer_hook)) { \ - _voucher_activity_buffer_hook( \ - _voucher_activity_buffer_hook_reason_##reason, (buffer)); \ - } } +voucher_t +voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, + voucher_t base, firehose_activity_flags_t flags, uint64_t location) +{ + firehose_activity_id_t va_id = 0, current_id = 0, parent_id = 0; + firehose_tracepoint_id_u ftid = { .ftid_value = *trace_id }; + uint16_t pubsize = sizeof(va_id) + sizeof(location); + uint64_t creator_id = 0; + voucher_t ov = _voucher_get(); + voucher_t v; -DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_buffer_header_t -_voucher_activity_heap_buffer_alloc(void) -{ - _voucher_activity_buffer_header_t buffer = NULL; - size_t index; - index = _voucher_activity_bitmap_set_first_unset_bit_upto( - vam_buffer_bitmap(), _voucher_heap_buffer_limit() - 1); - if (index < NO_BITS_WERE_UNSET) { - buffer = vam_buffer(index); - } -#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG - _dispatch_debug("activity heap alloc %zd (%p)", index, buffer); -#endif - return buffer; -} + if (base == VOUCHER_CURRENT) { + base = ov; + } + if (_voucher_activity_disabled()) { + *trace_id = 0; + return base ? _voucher_retain(base) : VOUCHER_NULL; + } -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_heap_buffer_free(_voucher_activity_buffer_header_t buffer) -{ - buffer->vabh_flags = _voucher_activity_trace_flag_buffer_empty; - size_t index = (size_t)((char*)buffer - (char*)_voucher_activity_heap) / - _voucher_activity_buffer_size; -#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG - _dispatch_debug("activity heap free %zd (%p)", index, buffer); -#endif - _voucher_activity_bitmap_clear_bit(vam_buffer_bitmap(), index); -} + FIREHOSE_TRACE_ID_CLEAR_FLAG(ftid, base, has_unique_pid); + if (ov && (current_id = ov->v_activity)) { + FIREHOSE_TRACE_ID_SET_FLAG(ftid, base, has_current_aid); + pubsize += sizeof(firehose_activity_id_t); + if ((creator_id = ov->v_activity_creator)) { + FIREHOSE_TRACE_ID_SET_FLAG(ftid, base, has_unique_pid); + pubsize += sizeof(uint64_t); + } + } + if (base != VOUCHER_NULL) { + parent_id = base->v_activity; + } -#define _voucher_activity_heap_can_madvise() \ - (PAGE_SIZE == _voucher_activity_buffer_size) // + if (parent_id) { + FIREHOSE_TRACE_ID_SET_FLAG(ftid, activity, has_other_aid); + pubsize += sizeof(firehose_activity_id_t); + flags |= FIREHOSE_ACTIVITY_ID_FLAGS(parent_id); + } -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_heap_madvise(size_t bitmap_num, unsigned int start, - unsigned int len) -{ - size_t base = bitmap_num * _voucher_activity_bits_per_bitmap_base_t; -#if DISPATCH_DEBUG -#if DISPATCH_VOUCHER_ACTIVITY_DEBUG - _dispatch_debug("activity heap madvise %zd (%p) -> %zd (%p)", base + start, - vam_buffer(base + start), base + start + len, - vam_buffer(base + start + len)); -#endif - dispatch_assert(!(len * _voucher_activity_buffer_size % vm_page_size)); - const uint64_t pattern = 0xFACEFACEFACEFACE; - _voucher_activity_buffer_header_t buffer = vam_buffer(base + start); - for (unsigned int i = 0; i < len; i++, buffer++) { - memset_pattern8((char*)buffer + sizeof(buffer->vabh_flags), &pattern, - _voucher_activity_buffer_size - sizeof(buffer->vabh_flags)); + if (firehose_precise_timestamps_enabled()) { + flags |= firehose_activity_flags_precise_timestamp; } -#endif - (void)dispatch_assume_zero(madvise(vam_buffer(base + start), - len * _voucher_activity_buffer_size, MADV_FREE)); -} + voucher_fields_t ignore_fields = VOUCHER_FIELD_ACTIVITY; + v = _voucher_clone(base, ignore_fields); + v->v_activity = va_id = _voucher_activity_id_allocate(flags); + v->v_activity_creator = _voucher_unique_pid; + v->v_parent_activity = parent_id; -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_heap_madvise_contiguous(size_t bitmap_num, - _voucher_activity_bitmap_base_t bits) -{ - // TODO: x86 has fast ctz; arm has fast clz; haswell has fast ctz - dispatch_assert(_voucher_activity_heap_can_madvise()); - if (bits == 0) { - return; - } else if (~bits == 0) { - _voucher_activity_heap_madvise(bitmap_num, 0, - _voucher_activity_bits_per_bitmap_base_t); - } else while (bits != 0) { - unsigned int start = (typeof(start))__builtin_ctzl(bits), len; - typeof(bits) inverse = ~bits >> start; - if (inverse) { - len = (typeof(len))__builtin_ctzl(inverse); - } else { - len = _voucher_activity_bits_per_bitmap_base_t - start; + static const firehose_stream_t streams[2] = { + firehose_stream_metadata, + firehose_stream_persist, + }; + firehose_tracepoint_t ft; + uint64_t stamp = firehose_tracepoint_time(flags); + + for (size_t i = 0; i < countof(streams); i++) { + ft = _voucher_activity_tracepoint_reserve(stamp, streams[i], pubsize, + 0, NULL); + if (!fastpath(ft)) continue; + + uint8_t *pubptr = ft->ft_data; + if (current_id) { + pubptr = _dispatch_memappend(pubptr, ¤t_id); + } + if (creator_id) { + pubptr = _dispatch_memappend(pubptr, &creator_id); } - typeof(bits) mask = ((((typeof(bits))1) << len) - 1) << start; - bits &= ~mask; - _voucher_activity_heap_madvise(bitmap_num, start, len); + if (parent_id) { + pubptr = _dispatch_memappend(pubptr, &parent_id); + } + pubptr = _dispatch_memappend(pubptr, &va_id); + pubptr = _dispatch_memappend(pubptr, &location); + _voucher_activity_tracepoint_flush(ft, ftid); } + *trace_id = ftid.ftid_value; + return v; } void -_voucher_activity_heap_pressure_warn(void) +_voucher_activity_swap(firehose_activity_id_t old_id, + firehose_activity_id_t new_id) { - if (!_voucher_activity_heap_can_madvise() || !_voucher_activity_heap) { - return; - } - volatile _voucher_activity_bitmap_base_t *bitmap, *pressure_locked_bitmap; - bitmap = vam_buffer_bitmap(); - pressure_locked_bitmap = vam_pressure_locked_bitmap(); - - // number of bitmaps needed to map the current buffer limit = - // ceil(buffer limit / bits per bitmap) - size_t nbuffers = _voucher_heap_buffer_limit(); - size_t nbitmaps_quot = nbuffers / _voucher_activity_bits_per_bitmap_base_t; - size_t nbitmaps_rem = nbuffers % _voucher_activity_bits_per_bitmap_base_t; - size_t nbitmaps = nbitmaps_quot + ((nbitmaps_rem == 0) ? 0 : 1); - - for (size_t i = 0; i < nbitmaps; i++) { - _voucher_activity_bitmap_base_t got_bits; - got_bits = dispatch_atomic_or_orig(&bitmap[i], ~((typeof(bitmap[i]))0), - relaxed); - got_bits = ~got_bits; // Now 1 means 'acquired this one, madvise it' - _voucher_activity_heap_madvise_contiguous(i, got_bits); - pressure_locked_bitmap[i] |= got_bits; - } -} - -void -_voucher_activity_heap_pressure_normal(void) -{ - if (!_voucher_activity_heap_can_madvise() || !_voucher_activity_heap) { - return; - } - volatile _voucher_activity_bitmap_base_t *bitmap, *pressure_locked_bitmap; - bitmap = vam_buffer_bitmap(); - pressure_locked_bitmap = vam_pressure_locked_bitmap(); - for (size_t i = 0; i < _voucher_activity_bitmaps_per_heap; i++) { - _voucher_activity_bitmap_base_t free_bits = pressure_locked_bitmap[i]; - pressure_locked_bitmap[i] = 0; - if (free_bits != 0) { - (void)dispatch_atomic_and(&bitmap[i], ~free_bits, release); - } - } -} - -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_buffer_init(_voucher_activity_t act, - _voucher_activity_buffer_header_t buffer, bool initial) -{ - _voucher_activity_tracepoint_t vat = (_voucher_activity_tracepoint_t)buffer; - _voucher_activity_tracepoint_init_with_id(vat, act->va_trace_id, - act->va_location, !initial); - buffer->vabh_flags = _voucher_activity_trace_flag_buffer_header | - _voucher_activity_trace_flag_activity | - (initial ? _voucher_activity_trace_flag_start : 0); - buffer->vabh_activity_id = act->va_id; - buffer->vabh_pos.vabp_atomic_pos = 0; - buffer->vabh_pos.vabp_pos.vabp_next_tracepoint_idx = 1; -} - -static _voucher_activity_buffer_header_t -_voucher_activity_buffer_alloc_slow(_voucher_activity_t act, - _voucher_activity_buffer_header_t current) -{ - _voucher_activity_buffer_header_t buffer; - _voucher_activity_lock_lock(va_buffers_lock(act)); // TODO: revisit locking - buffer = act->va_current_buffer; - if (buffer != current) { - _voucher_activity_lock_unlock(va_buffers_lock(act)); - return buffer; - } - buffer = TAILQ_FIRST(&act->va_buffers); - if (buffer != TAILQ_LAST(&act->va_buffers, - _voucher_activity_buffer_list_s)) { - TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list); - TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list); - } - _voucher_activity_lock_unlock(va_buffers_lock(act)); - if (_voucher_activity_buffer_is_full(buffer)) { - _voucher_activity_firehose_wait(act, buffer); - } - if (dispatch_atomic_cmpxchgv2o(act, va_current_buffer, current, buffer, - ¤t, release)) { - if (_voucher_activity_buffer_mark_full(current)) { - _voucher_activity_firehose_push(act, current); - } - _dispatch_voucher_activity_debug("buffer reuse %p", act, buffer); - } else { - buffer = current; - } - return buffer; -} - -static _voucher_activity_buffer_header_t -_voucher_activity_buffer_alloc(_voucher_activity_t act, - _voucher_activity_buffer_header_t current) -{ - _voucher_activity_buffer_header_t buffer = NULL; - if (act->va_buffer_count < act->va_buffer_limit) { - buffer = _voucher_activity_heap_buffer_alloc(); - if (buffer && dispatch_atomic_inc2o(act, va_buffer_count, relaxed) > - act->va_buffer_limit) { - dispatch_atomic_dec2o(act, va_buffer_count, relaxed); - _voucher_activity_heap_buffer_free(buffer); - buffer = NULL; - } - } - if (!buffer) return _voucher_activity_buffer_alloc_slow(act, current); - _voucher_activity_buffer_init(act, buffer, false); - if (dispatch_atomic_cmpxchgv2o(act, va_current_buffer, current, buffer, - ¤t, release)) { - _voucher_activity_lock_lock(va_buffers_lock(act)); - TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list); - _voucher_activity_lock_unlock(va_buffers_lock(act)); - if (_voucher_activity_buffer_mark_full(current)) { - _voucher_activity_firehose_push(act, current); - } - _dispatch_voucher_activity_debug("buffer alloc %p", act, buffer); - } else { - dispatch_atomic_dec2o(act, va_buffer_count, relaxed); - _voucher_activity_heap_buffer_free(buffer); - buffer = current; - } - return buffer; -} - -#pragma mark - -#pragma mark _voucher_activity_t - -#define _voucher_activity_ordered_insert(_act, head, field) do { \ - typeof(_act) _vai; \ - TAILQ_FOREACH(_vai, (head), field) { \ - if (_act->va_id < _vai->va_id) break; \ - } \ - if (_vai) { \ - TAILQ_INSERT_BEFORE(_vai, _act, field); \ - } else { \ - TAILQ_INSERT_TAIL((head), _act, field); \ - } } while (0); - -static void _voucher_activity_dispose(_voucher_activity_t act); -static _voucher_atm_t _voucher_atm_copy(atm_aid_t atm_id); -static inline void _voucher_atm_release(_voucher_atm_t vatm); -static atm_aid_t _voucher_mach_voucher_get_atm_id(mach_voucher_t kv); - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_try_retain(_voucher_activity_t act) -{ - // not using _os_object_refcnt* because we don't need barriers: - // activities are immutable and are in a hash table with a lock - int use_cnt = dispatch_atomic_inc2o(act, va_refcnt, relaxed); - _dispatch_voucher_activity_debug("retain -> %d", act, use_cnt + 1); - if (slowpath(use_cnt < 0)) { - _dispatch_voucher_activity_debug("overrelease", act); - DISPATCH_CRASH("Activity overrelease"); - } - return use_cnt > 0; -} + if (_voucher_activity_disabled()) return; -DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_t -_voucher_activity_retain(_voucher_activity_t act) -{ - if (slowpath(!_voucher_activity_try_retain(act))) { - _dispatch_voucher_activity_debug("resurrection", act); - DISPATCH_CRASH("Activity resurrection"); - } - return act; -} + firehose_tracepoint_id_u ftid = { .ftid = { + ._namespace = firehose_tracepoint_namespace_activity, + ._type = _firehose_tracepoint_type_activity_swap, + } }; + uint16_t pubsize = 0; -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_release(_voucher_activity_t act) -{ - // not using _os_object_refcnt* because we don't need barriers: - // activities are immutable and are in a hash table with a lock - int use_cnt = dispatch_atomic_dec2o(act, va_refcnt, relaxed); - _dispatch_voucher_activity_debug("release -> %d", act, use_cnt + 1); - if (fastpath(use_cnt >= 0)) { - return; + if (old_id) { + FIREHOSE_TRACE_ID_SET_FLAG(ftid, base, has_current_aid); + pubsize += sizeof(firehose_activity_id_t); } - if (slowpath(use_cnt < -1)) { - _dispatch_voucher_activity_debug("overrelease", act); - DISPATCH_CRASH("Activity overrelease"); + if (new_id) { + FIREHOSE_TRACE_ID_SET_FLAG(ftid, activity, has_other_aid); + pubsize += sizeof(firehose_activity_id_t); } - _voucher_activity_remove(act); - _voucher_activity_dispose(act); -} -static _voucher_activity_t -_voucher_activity_find_and_retain(voucher_activity_id_t va_id, uint32_t hash) -{ - // not using _os_object_refcnt* because we don't need barriers: - // activities are immutable and are in a hash table with a lock - // - // assumes vam_activities_lock held - _voucher_activity_t act; - TAILQ_FOREACH(act, vam_activities(hash), va_list) { - if (act->va_id == va_id) { - if (fastpath(_voucher_activity_try_retain(act))) { - return act; - } + firehose_stream_t stream = firehose_stream_metadata; + firehose_tracepoint_t ft; + firehose_activity_flags_t flags = FIREHOSE_ACTIVITY_ID_FLAGS(old_id) | + FIREHOSE_ACTIVITY_ID_FLAGS(new_id); + uint64_t stamp = firehose_tracepoint_time(flags); - // disallow resurrection - dispatch_atomic_dec2o(act, va_refcnt, relaxed); - _dispatch_voucher_activity_debug("undo resurrection", act); - } - } - return NULL; -} + _dispatch_voucher_ktrace_activity_adopt(new_id); -static _voucher_activity_t -_voucher_activity_copy_from_id(voucher_activity_id_t va_id) -{ - uint32_t hash = VACTID_HASH(va_id); - _voucher_activity_lock_lock(vam_activities_lock()); - _voucher_activity_t act = _voucher_activity_find_and_retain(va_id, hash); - if (act) { - _dispatch_voucher_activity_debug("copy from id 0x%llx", act, va_id); - } - _voucher_activity_lock_unlock(vam_activities_lock()); - return act; + ft = _voucher_activity_tracepoint_reserve(stamp, stream, pubsize, 0, NULL); + if (!fastpath(ft)) return; + uint8_t *pubptr = ft->ft_data; + if (old_id) pubptr = _dispatch_memappend(pubptr, &old_id); + if (new_id) pubptr = _dispatch_memappend(pubptr, &new_id); + _voucher_activity_tracepoint_flush(ft, ftid); } -static _voucher_activity_t -_voucher_activity_try_insert(_voucher_activity_t act_new) +firehose_activity_id_t +voucher_get_activity_id_and_creator(voucher_t v, uint64_t *creator_pid, + firehose_activity_id_t *parent_id) { - voucher_activity_id_t va_id = act_new->va_id; - uint32_t hash = VACTID_HASH(va_id); - _voucher_activity_lock_lock(vam_activities_lock()); - _voucher_activity_t act = _voucher_activity_find_and_retain(va_id, hash); - if (act) { - _dispatch_voucher_activity_debug("try insert: failed (%p)", act,act_new); - } else { - if (slowpath(_TAILQ_IS_ENQUEUED(act_new, va_list))) { - _dispatch_voucher_activity_debug("corruption", act_new); - DISPATCH_CRASH("Activity corruption"); - } - TAILQ_INSERT_TAIL(vam_activities(hash), act_new, va_list); - _dispatch_voucher_activity_debug("try insert: succeeded", act_new); + if (v == VOUCHER_CURRENT) { + v = _voucher_get(); } - _voucher_activity_lock_unlock(vam_activities_lock()); - return act; -} - -static void -_voucher_activity_remove(_voucher_activity_t act) -{ - voucher_activity_id_t va_id = act->va_id; - uint32_t hash = VACTID_HASH(va_id); - - _voucher_activity_lock_lock(vam_activities_lock()); - if (slowpath(!va_id || !_TAILQ_IS_ENQUEUED(act, va_list))) { - _dispatch_voucher_activity_debug("corruption", act); - DISPATCH_CRASH("Activity corruption"); + if (v == VOUCHER_NULL) { + if (creator_pid) *creator_pid = 0; + if (parent_id) *parent_id = FIREHOSE_ACTIVITY_ID_NULL; + return FIREHOSE_ACTIVITY_ID_NULL; } - TAILQ_REMOVE(vam_activities(hash), act, va_list); - _TAILQ_MARK_NOT_ENQUEUED(act, va_list); - act->va_list.tqe_next = (void*)~0ull; - _dispatch_voucher_activity_debug("remove", act); - _voucher_activity_lock_unlock(vam_activities_lock()); + if (creator_pid) *creator_pid = v->v_activity_creator; + if (parent_id) *parent_id = v->v_parent_activity; + return v->v_activity; } -static _voucher_activity_t -_voucher_activity_create_with_atm(_voucher_atm_t vatm, - voucher_activity_id_t va_id, voucher_activity_trace_id_t trace_id, - uint64_t location, _voucher_activity_buffer_header_t buffer) +firehose_activity_id_t +voucher_get_activity_id(voucher_t v, firehose_activity_id_t *parent_id) { - if (!buffer) buffer = _voucher_activity_heap_buffer_alloc(); - if (!buffer) { - _dispatch_voucher_atm_debug("no buffer", vatm); - _voucher_atm_release(vatm); // consume vatm reference - return NULL; - } - _voucher_activity_t act = _dispatch_calloc(1ul, - sizeof(struct _voucher_activity_s)); - act->va_id = va_id; - act->va_trace_id = trace_id ? trace_id : _voucher_activity_trace_id_release; - act->va_location = location; - act->va_buffer_limit = _voucher_activity_buffer_limit(); - TAILQ_INIT(&act->va_buffers); - act->va_current_buffer = buffer; - act->va_atm = vatm; // transfer vatm reference - _voucher_activity_lock_init(va_buffers_lock(act)); - if (dispatch_assume_zero(pthread_mutex_init(&act->va_mutex, NULL)) || - dispatch_assume_zero(pthread_cond_init(&act->va_cond, NULL))) { - DISPATCH_CLIENT_CRASH("Could not initialize activity"); - } - _TAILQ_MARK_NOT_ENQUEUED(act, va_list); - _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_list); - _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_used_list); - - _voucher_activity_buffer_init(act, buffer, true); - TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list); - _voucher_activity_t actx = _voucher_activity_try_insert(act); - if (actx) { - _voucher_activity_dispose(act); - act = actx; - } - _dispatch_voucher_activity_debug("create", act); - return act; + return voucher_get_activity_id_and_creator(v, NULL, parent_id); } -static void -_voucher_activity_dispose(_voucher_activity_t act) -{ - _dispatch_voucher_activity_debug("dispose", act); - _voucher_atm_release(act->va_atm); - if (slowpath(_TAILQ_IS_ENQUEUED(act, va_list))) { - _dispatch_voucher_activity_debug("corruption", act); - DISPATCH_CRASH("Activity corruption"); - } - act->va_list.tqe_next = DISPATCH_OBJECT_LISTLESS; - dispatch_assert(!_TAILQ_IS_ENQUEUED(act, va_atm_list)); - dispatch_assert(!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)); - _voucher_activity_buffer_header_t buffer, tmp; - TAILQ_FOREACH_SAFE(buffer, &act->va_buffers, vabh_list, tmp) { - if (buffer->vabh_pos.vabp_pos.vabp_next_tracepoint_idx > 1) { - dispatch_assert(_voucher_activity_buffer_mark_full(buffer)); - _voucher_activity_firehose_push(act, buffer); - } - TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list); - _dispatch_voucher_activity_debug("buffer free %p", act, buffer); - _voucher_activity_heap_buffer_free(buffer); - } - (void)dispatch_assume_zero(pthread_mutex_destroy(&act->va_mutex)); - (void)dispatch_assume_zero(pthread_cond_destroy(&act->va_cond)); - free(act); -} - -DISPATCH_NOINLINE void -_voucher_activity_firehose_push(_voucher_activity_t act, - _voucher_activity_buffer_header_t buffer) -{ - if (dispatch_assume_zero(pthread_mutex_lock(&act->va_mutex))) { - DISPATCH_CLIENT_CRASH("Activity corruption: mutex_lock"); - } - _dispatch_voucher_activity_debug("firehose push %p", act, buffer); - // TODO: call firehose_push - VOUCHER_ACTIVITY_BUFFER_HOOK_CALLOUT(full, buffer); - _voucher_activity_buffer_init(act, buffer, false); - if (dispatch_assume_zero(pthread_cond_broadcast(&act->va_cond))) { - DISPATCH_CLIENT_CRASH("Activity corruption: cond_broadcast"); - } - if (dispatch_assume_zero(pthread_mutex_unlock(&act->va_mutex))) { - DISPATCH_CLIENT_CRASH("Activity corruption: mutex_unlock"); - } -} - -DISPATCH_NOINLINE -static void -_voucher_activity_firehose_wait(_voucher_activity_t act, - _voucher_activity_buffer_header_t buffer) -{ - if (dispatch_assume_zero(pthread_mutex_lock(&act->va_mutex))) { - DISPATCH_CLIENT_CRASH("Activity corruption: mutex_lock"); - } - while (_voucher_activity_buffer_is_full(buffer)) { - _dispatch_voucher_activity_debug("firehose wait %p", act, buffer); - if (dispatch_assume_zero(pthread_cond_wait(&act->va_cond, - &act->va_mutex))){ - DISPATCH_CLIENT_CRASH("Activity corruption: cond_wait"); - } - } - if (dispatch_assume_zero(pthread_mutex_unlock(&act->va_mutex))) { - DISPATCH_CLIENT_CRASH("Activity corruption: mutex_unlock"); - } -} - -static _voucher_activity_t -_voucher_activity_copy_from_mach_voucher(mach_voucher_t kv, - voucher_activity_id_t va_id) +voucher_activity_flush(firehose_stream_t stream) { - dispatch_once_f(&_voucher_activity_heap_pred, NULL, - _voucher_activity_heap_init); - if (_voucher_activity_disabled()) return NULL; - _voucher_activity_t act = NULL; - if (dispatch_assume(va_id)) { - if ((act = _voucher_activity_copy_from_id(va_id))) return act; - } - atm_aid_t atm_id = _voucher_mach_voucher_get_atm_id(kv); - if (!dispatch_assume(atm_id)) return NULL; - _voucher_activity_buffer_header_t buffer; - buffer = _voucher_activity_heap_buffer_alloc(); - if (!buffer) return NULL; - _dispatch_kvoucher_debug("atm copy/create from <%lld>", kv, atm_id); - _voucher_atm_t vatm = _voucher_atm_copy(atm_id); - if (!vatm) vatm = _voucher_atm_create(kv, atm_id); - if (!vatm) { - _voucher_activity_heap_buffer_free(buffer); - return NULL; - } - // consumes vatm reference: - act = _voucher_activity_create_with_atm(vatm, va_id, 0, 0, buffer); - _dispatch_voucher_activity_debug("copy from kvoucher[0x%08x]", act, kv); - return act; -} - -#pragma mark - -#pragma mark _voucher_atm_t - -static void _voucher_atm_remove(_voucher_atm_t vatm); -static void _voucher_atm_dispose(_voucher_atm_t vatm, bool unregister); - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_atm_try_retain(_voucher_atm_t vatm) -{ - // not using _os_object_refcnt* because we don't need barriers: - // vouchers atm are immutable and are in a hash table with a lock - // - // assumes vam_atms_lock held - int refcnt = dispatch_atomic_inc2o(vatm, vatm_refcnt, relaxed); - _dispatch_voucher_atm_debug("retain -> %d", vatm, refcnt + 1); - if (slowpath(refcnt < 0)) { - _dispatch_voucher_atm_debug("overrelease", vatm); - DISPATCH_CRASH("ATM overrelease"); - } - return refcnt > 0; + if (_voucher_activity_disabled()) return; + firehose_buffer_stream_flush(_firehose_task_buffer, stream); } DISPATCH_ALWAYS_INLINE -static inline _voucher_atm_t -_voucher_atm_retain(_voucher_atm_t vatm) -{ - if (slowpath(!_voucher_atm_try_retain(vatm))) { - _dispatch_voucher_atm_debug("resurrection", vatm); - DISPATCH_CRASH("ATM resurrection"); - } - return vatm; -} +static inline firehose_tracepoint_id_t +_voucher_activity_trace(firehose_stream_t stream, + firehose_tracepoint_id_u ftid, uint64_t stamp, + const void *pubdata, size_t publen, + const void *privdata, size_t privlen) +{ + const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); + const size_t _firehose_chunk_payload_size = + sizeof(((struct firehose_buffer_chunk_s *)0)->fbc_data); + + if (_voucher_activity_disabled()) return 0; + + firehose_tracepoint_t ft; + firehose_activity_id_t va_id = 0; + firehose_buffer_chunk_t fbc; + uint8_t *privptr, *pubptr; + size_t pubsize = publen; + voucher_t ov = _voucher_get(); + uint64_t creator_pid; -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_atm_release(_voucher_atm_t vatm) -{ - // not using _os_object_refcnt* because we don't need barriers: - // vouchers atm are immutable are into a hash table with a lock - int refcnt = dispatch_atomic_dec2o(vatm, vatm_refcnt, relaxed); - _dispatch_voucher_atm_debug("release -> %d", vatm, refcnt + 1); - if (fastpath(refcnt >= 0)) { - return; + if ((va_id = _voucher_get_activity_id(ov, &creator_pid))) { + FIREHOSE_TRACE_ID_SET_FLAG(ftid, base, has_current_aid); + pubsize += sizeof(va_id); } - if (slowpath(refcnt < -1)) { - _dispatch_voucher_atm_debug("overrelease", vatm); - DISPATCH_CRASH("ATM overrelease"); - } - _voucher_atm_remove(vatm); - _voucher_atm_dispose(vatm, true); -} - -static _voucher_atm_t -_voucher_atm_find_and_retain(atm_aid_t atm_id, uint32_t hash) -{ - // not using _os_object_refcnt* because we don't need barriers: - // vouchers atm are immutable are into a hash table with a lock - // - // assumes vam_atms_lock held - _voucher_atm_t vatm; - TAILQ_FOREACH(vatm, vam_atms(hash), vatm_list){ - if (vatm->vatm_id == atm_id) { - if (fastpath(_voucher_atm_try_retain(vatm))) { - return vatm; - } - - // disallow resurrection - dispatch_atomic_dec2o(vatm, vatm_refcnt, relaxed); - _dispatch_voucher_atm_debug("undo resurrection", vatm); + if (FIREHOSE_TRACE_ID_HAS_FLAG(ftid, base, has_unique_pid)) { + if (creator_pid) { + pubsize += sizeof(creator_pid); + } else { + FIREHOSE_TRACE_ID_CLEAR_FLAG(ftid, base, has_unique_pid); } - } - return NULL; -} - -static _voucher_atm_t -_voucher_atm_copy(atm_aid_t atm_id) -{ - uint32_t hash = VATMID_HASH(atm_id); - _voucher_activity_lock_lock(vam_atms_lock()); - _voucher_atm_t vatm = _voucher_atm_find_and_retain(atm_id, hash); - if (vatm) { - _dispatch_voucher_atm_debug("copy", vatm); - } - _voucher_activity_lock_unlock(vam_atms_lock()); - return vatm; -} - -static _voucher_atm_t -_voucher_atm_try_insert(_voucher_atm_t vatm_new) -{ - atm_aid_t atm_id = vatm_new->vatm_id; - uint32_t hash = VATMID_HASH(atm_id); - _voucher_activity_lock_lock(vam_atms_lock()); - _voucher_atm_t vatm = _voucher_atm_find_and_retain(atm_id, hash); - if (vatm) { - _dispatch_voucher_atm_debug("try insert: failed (%p)", vatm, vatm_new); } else { - if (slowpath(_TAILQ_IS_ENQUEUED(vatm_new, vatm_list))) { - _dispatch_voucher_atm_debug("corruption", vatm_new); - DISPATCH_CRASH("ATM corruption"); - } - TAILQ_INSERT_TAIL(vam_atms(hash), vatm_new, vatm_list); - _dispatch_voucher_atm_debug("try insert: succeeded", vatm_new); + creator_pid = 0; } - _voucher_activity_lock_unlock(vam_atms_lock()); - return vatm; -} -static void -_voucher_atm_remove(_voucher_atm_t vatm) -{ - atm_aid_t atm_id = vatm->vatm_id; - uint32_t hash = VATMID_HASH(atm_id); - - _voucher_activity_lock_lock(vam_atms_lock()); - if (slowpath(!atm_id || !_TAILQ_IS_ENQUEUED(vatm, vatm_list))) { - _dispatch_voucher_atm_debug("corruption", vatm); - DISPATCH_CRASH("ATM corruption"); + if (privlen) { + FIREHOSE_TRACE_ID_SET_FLAG(ftid, log, has_private_data); + pubsize += sizeof(struct firehose_buffer_range_s); } - TAILQ_REMOVE(vam_atms(hash), vatm, vatm_list); - _TAILQ_MARK_NOT_ENQUEUED(vatm, vatm_list); - vatm->vatm_list.tqe_next = (void*)~0ull; - _dispatch_voucher_atm_debug("remove", vatm); - _voucher_activity_lock_unlock(vam_atms_lock()); -} -DISPATCH_NOINLINE -static void -_voucher_atm_fault(mach_voucher_attr_command_t kvc_cmd) -{ - mach_voucher_t kv = _voucher_get_atm_mach_voucher(_voucher_get()); - if (!kv) return; - - mach_atm_subaid_t subaid = 0; - voucher_t v = _voucher_get(); - if (v) { - unsigned int activities = v->v_activities; - voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); - if (activities) { - subaid = activity_ids[0]; - } + if (slowpath(ft_size + pubsize + privlen > _firehose_chunk_payload_size)) { + DISPATCH_CLIENT_CRASH(ft_size + pubsize + privlen, "Log is too large"); } - kern_return_t kr; - mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&subaid; - mach_voucher_attr_content_size_t kvc_in_size = sizeof(mach_atm_subaid_t); - mach_voucher_attr_content_t kvc_out = (mach_voucher_attr_content_t)&subaid; - mach_voucher_attr_content_size_t kvc_out_size = sizeof(mach_atm_subaid_t); - kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, - kvc_cmd, kvc_in, kvc_in_size, kvc_out, &kvc_out_size); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); -} - -static atm_aid_t -_voucher_mach_voucher_get_atm_id(mach_voucher_t kv) -{ - kern_return_t kr; - atm_aid_t atm_id = 0; - mach_voucher_attr_content_t kvc = (mach_voucher_attr_content_t)&atm_id; - mach_voucher_attr_content_size_t kvc_size = sizeof(atm_id); - kr = mach_voucher_extract_attr_content(kv, MACH_VOUCHER_ATTR_KEY_ATM, kvc, - &kvc_size); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - return atm_id; -} - -static mach_voucher_t -_voucher_atm_mach_voucher_create(atm_aid_t *atm_id_ptr) -{ - kern_return_t kr; - mach_voucher_t kv; - static const mach_voucher_attr_recipe_data_t atm_create_recipe = { - .key = MACH_VOUCHER_ATTR_KEY_ATM, - .command = MACH_VOUCHER_ATTR_ATM_CREATE, - }; - kr = _voucher_create_mach_voucher(&atm_create_recipe, - sizeof(atm_create_recipe), &kv); - if (dispatch_assume_zero(kr)) { - DISPATCH_CLIENT_CRASH("Could not create ATM mach voucher"); - } - atm_aid_t atm_id = _voucher_mach_voucher_get_atm_id(kv); - if (!dispatch_assume(atm_id)) { - DISPATCH_CLIENT_CRASH("Could not extract ATM ID"); - } - _dispatch_kvoucher_debug("atm create <%lld>", kv, atm_id); - *atm_id_ptr = atm_id; - return kv; -} - -static mach_voucher_t -_voucher_atm_mach_voucher_copy(mach_voucher_t akv) -{ - kern_return_t kr; - mach_voucher_t kv; - const mach_voucher_attr_recipe_data_t atm_copy_recipe = { - .key = MACH_VOUCHER_ATTR_KEY_ATM, - .command = MACH_VOUCHER_ATTR_COPY, - .previous_voucher = akv, - }; - kr = _voucher_create_mach_voucher(&atm_copy_recipe, - sizeof(atm_copy_recipe), &kv); - if (dispatch_assume_zero(kr)) { - DISPATCH_CLIENT_CRASH("Could not copy ATM mach voucher"); - } - _dispatch_kvoucher_debug("copy atm voucher from [0x%08x]", kv, akv); - return kv; -} - -static void -_voucher_atm_register(_voucher_atm_t vatm) -{ - mach_voucher_t kv = vatm->vatm_kvoucher; - if (!kv) return; - kern_return_t kr; - atm_guard_t gen = - dispatch_atomic_inc(&_voucher_atm_generation, relaxed); - _dispatch_voucher_atm_debug("atm register %lld", vatm, gen); - mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&gen; - mach_voucher_attr_content_size_t kvc_in_size = sizeof(gen); - mach_voucher_attr_content_t kvc_out = NULL; - mach_voucher_attr_content_size_t kvc_out_size = 0; - kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, - ATM_ACTION_REGISTER, kvc_in, kvc_in_size, kvc_out, - &kvc_out_size); - DISPATCH_VERIFY_MIG(kr); - if (kr) { - DISPATCH_CLIENT_CRASH("Could not register ATM ID"); - } - vatm->vatm_generation = gen; - _dispatch_voucher_atm_debug("atm registered %lld", vatm, - vatm->vatm_generation); -} - -static void -_voucher_atm_unregister(_voucher_atm_t vatm) -{ - _dispatch_voucher_atm_debug("atm unregister %lld", vatm, - vatm->vatm_generation); - mach_voucher_t kv = vatm->vatm_kvoucher; - dispatch_assert(kv); - kern_return_t kr; - atm_guard_t gen = vatm->vatm_generation; - mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&gen; - mach_voucher_attr_content_size_t kvc_in_size = sizeof(gen); - mach_voucher_attr_content_t kvc_out = NULL; - mach_voucher_attr_content_size_t kvc_out_size = 0; - kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, - ATM_ACTION_UNREGISTER, kvc_in, kvc_in_size, kvc_out, &kvc_out_size); - DISPATCH_VERIFY_MIG(kr); - if (kr && kr != KERN_INVALID_VALUE) { - (void)dispatch_assume_zero(kr); + ft = _voucher_activity_tracepoint_reserve(stamp, stream, (uint16_t)pubsize, + (uint16_t)privlen, &privptr); + if (!fastpath(ft)) return 0; + pubptr = ft->ft_data; + if (va_id) { + pubptr = _dispatch_memappend(pubptr, &va_id); } - _dispatch_voucher_atm_debug("atm unregistered %lld", vatm, - vatm->vatm_generation); -} - -static _voucher_atm_t -_voucher_atm_create(mach_voucher_t kv, atm_aid_t atm_id) -{ - _voucher_atm_t vatm = _dispatch_calloc(1ul, sizeof(struct _voucher_atm_s)); - kv = kv ? _voucher_atm_mach_voucher_copy(kv) : - _voucher_atm_mach_voucher_create(&atm_id); - vatm->vatm_kvoucher = kv; - vatm->vatm_id = atm_id; - _voucher_atm_t vatmx = _voucher_atm_try_insert(vatm); - if (vatmx) { - _voucher_atm_dispose(vatm, false); - vatm = vatmx; - } else { - _voucher_atm_register(vatm); - } - _dispatch_voucher_atm_debug("create with kvoucher[0x%08x]", vatm, kv); - return vatm; -} - -static void -_voucher_atm_dispose(_voucher_atm_t vatm, bool unregister) -{ - _dispatch_voucher_atm_debug("dispose", vatm); - if (slowpath(_TAILQ_IS_ENQUEUED(vatm, vatm_list))) { - _dispatch_voucher_atm_debug("corruption", vatm); - DISPATCH_CRASH("ATM corruption"); + if (creator_pid) { + pubptr = _dispatch_memappend(pubptr, &creator_pid); } - vatm->vatm_list.tqe_next = DISPATCH_OBJECT_LISTLESS; - if (vatm->vatm_kvoucher) { - if (unregister) _voucher_atm_unregister(vatm); - _voucher_dealloc_mach_voucher(vatm->vatm_kvoucher); - vatm->vatm_kvoucher = MACH_VOUCHER_NULL; - } - free(vatm); -} - -DISPATCH_NOINLINE -static voucher_activity_id_t -_voucher_atm_subid_make(_voucher_atm_t vatm, voucher_activity_flag_t flags) -{ - mach_voucher_t kv = vatm->vatm_kvoucher; - _dispatch_voucher_atm_debug("create subid from atm", vatm); - kern_return_t kr; - mach_atm_subaid_t naid; - mach_voucher_attr_content_t kvc_in = NULL; - mach_voucher_attr_content_size_t kvc_in_size = 0; - mach_voucher_attr_content_t kvc_out = (mach_voucher_attr_content_t)&naid; - mach_voucher_attr_content_size_t kvc_out_size = sizeof(naid); - kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, - ATM_ACTION_GETSUBAID, kvc_in, kvc_in_size, kvc_out, &kvc_out_size); - DISPATCH_VERIFY_MIG(kr); - if (dispatch_assume_zero(kr)) { - DISPATCH_CLIENT_CRASH("Could not get next ATM ID"); - } - _dispatch_voucher_atm_debug("created subid from atm %lld", vatm, naid); - return VATMID2ACTID(naid, flags); -} - -#pragma mark - -#pragma mark voucher_activity_id_t - -static const size_t _voucher_activity_maxsize = - _voucher_activity_buffer_size - _voucher_activity_buffer_header_size - - _voucher_activity_strings_header_size; - -voucher_activity_id_t -voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id, - uint64_t location, voucher_activity_flag_t flags) -{ - dispatch_once_f(&_voucher_activity_heap_pred, NULL, - _voucher_activity_heap_init); - if (!_voucher_activity_trace_id_enabled(trace_id)) return 0; - voucher_activity_id_t va_id = 0; - _voucher_atm_t vatm = NULL; - _voucher_activity_t act = NULL; - _voucher_activity_tracepoint_t vat = NULL; - unsigned int activities = 1, oactivities = 0; - voucher_t ov = _voucher_get(); - vatm = _voucher_get_atm(ov); - if (!(flags & voucher_activity_flag_force) && ov && ov->v_activities) { - oactivities = ov->v_activities; - activities += oactivities; - if (activities > _voucher_max_activities) { - va_id = _voucher_atm_subid_make(vatm, flags); - goto out; - } - } - va_id = _voucher_atm_subid_make(vatm, flags); - if (activities == 1) { - // consumes vatm reference: - act = _voucher_activity_create_with_atm(_voucher_atm_retain(vatm), - va_id, trace_id, location, NULL); - vat = (_voucher_activity_tracepoint_t)act; - } else if (ov && ov->v_activity) { - act = _voucher_activity_retain(ov->v_activity); - } - pthread_priority_t priority = _voucher_get_priority(ov); - mach_voucher_attr_recipe_size_t extra = ov ? _voucher_extra_size(ov) : 0; - voucher_t v = _voucher_alloc(activities, priority, extra); - if (extra) { - memcpy(_voucher_extra_recipes(v), _voucher_extra_recipes(ov), extra); - } - if (ov && ov->v_kvoucher) { - voucher_t kvb = ov->v_kvbase ? ov->v_kvbase : ov; - v->v_kvbase = _voucher_retain(kvb); - v->v_kvoucher = kvb->v_kvoucher; - } - voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); - if (oactivities) { - memcpy(activity_ids, _voucher_activity_ids(ov), - oactivities * sizeof(voucher_activity_id_t)); - } - activity_ids[activities-1] = va_id; - v->v_atm = _voucher_atm_retain(vatm); - v->v_activity = act; - _voucher_swap(ov, v); - if (vat) return va_id; // new activity buffer contains trace info -out: - _voucher_activity_trace_activity_event(trace_id, va_id, start); - return va_id; -} - -voucher_activity_id_t -voucher_activity_start(voucher_activity_trace_id_t trace_id, - voucher_activity_flag_t flags) -{ - return voucher_activity_start_with_location(trace_id, 0, flags); -} - -void -voucher_activity_end(voucher_activity_id_t va_id) -{ - if (!va_id) return; - _voucher_activity_trace_activity_event(_voucher_activity_trace_id_release, - va_id, end); - voucher_t v = _voucher_get(); - if (!v) return; - unsigned int activities = v->v_activities, act_idx = activities; - voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); - while (act_idx) { - if (activity_ids[act_idx-1] == va_id) break; - act_idx--; - } - if (!act_idx) return; // activity_id not found - pthread_priority_t priority = _voucher_get_priority(v); - mach_voucher_attr_recipe_size_t extra = _voucher_extra_size(v); - voucher_t nv = NULL; - if (act_idx > 1 || activities == 1) --activities; - if (priority || activities || extra || v->v_kvoucher) { - nv = _voucher_alloc(activities, priority, extra); - if (extra) { - memcpy(_voucher_extra_recipes(nv), _voucher_extra_recipes(v),extra); - } - } - if (v->v_kvoucher) { - voucher_t kvb = v->v_kvbase ? v->v_kvbase : v; - nv->v_kvbase = _voucher_retain(kvb); - nv->v_kvoucher = kvb->v_kvoucher; - } - bool atm_collect = !activities; - if (activities) { - voucher_activity_id_t *new_activity_ids = _voucher_activity_ids(nv); - if (act_idx == 1 && _voucher_activity_default) { - atm_collect = true; - // default to _voucher_activity_default base activity - new_activity_ids[0] = _voucher_activity_default->va_id; - memcpy(&new_activity_ids[1], &activity_ids[1], - (activities - 1) * sizeof(voucher_activity_id_t)); - } else { - if (v->v_activity) { - nv->v_activity = _voucher_activity_retain(v->v_activity); - nv->v_atm = _voucher_atm_retain(v->v_atm); - } - memcpy(new_activity_ids, activity_ids, - --act_idx * sizeof(voucher_activity_id_t)); - if (act_idx < activities) { - memcpy(&new_activity_ids[act_idx], &activity_ids[act_idx+1], - (activities - act_idx) * sizeof(voucher_activity_id_t)); - } - } + if (privlen) { + fbc = firehose_buffer_chunk_for_address(ft); + struct firehose_buffer_range_s range = { + .fbr_offset = (uint16_t)(privptr - fbc->fbc_start), + .fbr_length = (uint16_t)privlen, + }; + pubptr = _dispatch_memappend(pubptr, &range); + _dispatch_mempcpy(privptr, privdata, privlen); } - _voucher_swap(v, nv); + _dispatch_mempcpy(pubptr, pubdata, publen); + _voucher_activity_tracepoint_flush(ft, ftid); + return ftid.ftid_value; } -unsigned int -voucher_get_activities(voucher_activity_id_t *entries, unsigned int *count) +firehose_tracepoint_id_t +voucher_activity_trace(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const void *pubdata, size_t publen) { - voucher_t v = _voucher_get(); - if (!v || !count) return 0; - unsigned int activities = v->v_activities; - if (*count < activities) activities = *count; - *count = v->v_activities; - voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); - if (activities && entries) { - memcpy(entries, activity_ids, activities * - sizeof(voucher_activity_id_t)); - } - return activities; + firehose_tracepoint_id_u ftid = { .ftid_value = trace_id }; + return _voucher_activity_trace(stream, ftid, timestamp, pubdata, publen, + NULL, 0); } -uint8_t -voucher_activity_get_namespace(void) +firehose_tracepoint_id_t +voucher_activity_trace_with_private_strings(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const void *pubdata, size_t publen, + const void *privdata, size_t privlen) { - voucher_t v = _voucher_get(); - if (!v || !v->v_activity) return 0; - voucher_activity_trace_id_t trace_id = v->v_activity->va_trace_id; - uint8_t cns = (uint8_t)(trace_id >> - _voucher_activity_trace_id_code_namespace_shift); - return cns; -} - -DISPATCH_NOINLINE -_voucher_activity_tracepoint_t -_voucher_activity_buffer_tracepoint_acquire_slow(_voucher_activity_t *vap, - _voucher_activity_buffer_header_t *vabp, unsigned int slots, - size_t strsize, uint16_t *stroffsetp) -{ - _voucher_activity_t act; - _voucher_activity_buffer_header_t vab; - _voucher_activity_tracepoint_t vat = NULL; - voucher_t v = _voucher_get(); - if (v && v->v_activity) { - act = v->v_activity; - } else { - dispatch_once_f(&_voucher_activity_heap_pred, NULL, - _voucher_activity_heap_init); - if (_voucher_activity_disabled()) return NULL; - act = _voucher_activity_default; - } - vab = act->va_current_buffer; - if (act == *vap && vab != *vabp) { - goto retry; // another slowpath raced us - } - do { - vab = _voucher_activity_buffer_alloc(act, vab); - if (!vab) break; -retry: - vat = _voucher_activity_buffer_tracepoint_acquire(vab, slots, strsize, - stroffsetp); - } while (!vat); - *vap = act; - *vabp = vab; - return vat; -} - -static inline void -_voucher_activity_trace_fault(voucher_activity_trace_id_t trace_id) -{ - if (!slowpath(_voucher_activity_trace_id_is_subtype(trace_id, error))) { - return; - } - mach_voucher_attr_command_t atm_cmd = ATM_ACTION_COLLECT; - if (_voucher_activity_trace_id_is_subtype(trace_id, fault)) { - atm_cmd = ATM_ACTION_LOGFAIL; - } - return _voucher_atm_fault(atm_cmd); -} - -uint64_t -voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location, - void *buffer, size_t length) -{ - if (!_voucher_activity_trace_id_enabled(trace_id)) return 0; - _voucher_activity_t act; - _voucher_activity_buffer_header_t vab; - _voucher_activity_tracepoint_t vat; - const unsigned int slots = length <= sizeof(vat->vat_data) ? 1 : 2; - act = _voucher_activity_get(); - vab = _voucher_activity_buffer_get_from_activity(act); - vat = _voucher_activity_buffer_tracepoint_acquire(vab, slots, 0, NULL); - if (!vat) { - vat = _voucher_activity_buffer_tracepoint_acquire_slow(&act, &vab, - slots, 0, NULL); - } - if (!vat) return 0; - uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat, - trace_id, location, true); - void *tbuf = vat->vat_data; - size_t tlen = sizeof(vat->vat_data); - if (length < tlen) { - memcpy(tbuf, buffer, length); - } else { - memcpy(tbuf, buffer, tlen); - } - if (length > tlen) { - vat->vat_flags |= _voucher_activity_trace_flag_wide_first; - buffer += tlen; - length -= tlen; - (++vat)->vat_flags = _voucher_activity_trace_flag_tracepoint | - _voucher_activity_trace_flag_wide_second; - vat->vat_type = 0; vat->vat_namespace = 0; - tbuf = (void*)vat + offsetof(typeof(*vat), vat_code); - tlen = sizeof(*vat) - offsetof(typeof(*vat), vat_code); - if (length < tlen) { - memcpy(tbuf, buffer, length); - } else { - memcpy(tbuf, buffer, tlen); - } - } - _voucher_activity_trace_fault(trace_id); - if (_voucher_activity_buffer_tracepoint_release(vab)) { - _voucher_activity_firehose_push(act, vab); - } - return timestamp; -} - -uint64_t -voucher_activity_trace_strings(voucher_activity_trace_id_t trace_id, - uint64_t location, void *buffer, size_t length, const char *strings[], - size_t string_lengths[], size_t strings_size) -{ - if (!_voucher_activity_trace_id_enabled(trace_id)) return 0; - _voucher_activity_t act; - _voucher_activity_buffer_header_t vab; - _voucher_activity_tracepoint_t vat; - uint16_t offset; - const unsigned int slots = length <= sizeof(vat->vat_data) ? 1 : 2; - strings_size = MIN(strings_size, _voucher_activity_maxsize - - slots * sizeof(struct _voucher_activity_tracepoint_s)); - act = _voucher_activity_get(); - vab = _voucher_activity_buffer_get_from_activity(act); - vat = _voucher_activity_buffer_tracepoint_acquire(vab, slots, strings_size, - &offset); - if (!vat) { - vat = _voucher_activity_buffer_tracepoint_acquire_slow(&act, &vab, - slots, strings_size, &offset); - } - if (!vat) return 0; - uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat, - trace_id, location, false); - vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_strings; - vat->vat_stroff.vats_offset = offset; - void *tbuf = vat->vat_stroff.vats_data; - size_t tlen = sizeof(vat->vat_stroff.vats_data); - if (length < tlen) { - memcpy(tbuf, buffer, length); - } else { - memcpy(tbuf, buffer, tlen); - } - if (length > tlen) { - vat->vat_flags |= _voucher_activity_trace_flag_wide_first; - buffer += tlen; - length -= tlen; - (++vat)->vat_flags = _voucher_activity_trace_flag_tracepoint | - _voucher_activity_trace_flag_wide_second; - vat->vat_type = 0; vat->vat_namespace = 0; - tbuf = (void*)vat + offsetof(typeof(*vat), vat_code); - tlen = sizeof(*vat) - offsetof(typeof(*vat), vat_code); - if (length < tlen) { - memcpy(tbuf, buffer, length); - } else { - memcpy(tbuf, buffer, tlen); - } - } - const uint16_t offsetend = offset - (uint16_t)strings_size; - char *b = (char*)vab + _voucher_activity_buffer_size; - int i = 0; - while (offset > offsetend && strings[i]) { - size_t maxsize = MIN(string_lengths[i] + 1, offset - offsetend); - size_t len = strlcpy(b - offset, strings[i++], maxsize); - offset -= MIN(len + 1, maxsize); - } - _voucher_activity_trace_fault(trace_id); - if (_voucher_activity_buffer_tracepoint_release(vab)) { - _voucher_activity_firehose_push(act, vab); - } - return timestamp; -} - -uint64_t -voucher_activity_trace_args(voucher_activity_trace_id_t trace_id, - uint64_t location, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, - uintptr_t arg4) -{ - if (!_voucher_activity_trace_id_enabled(trace_id)) return 0; - _voucher_activity_t act; - _voucher_activity_buffer_header_t vab; - _voucher_activity_tracepoint_t vat; - act = _voucher_activity_get(); - vab = _voucher_activity_buffer_get_from_activity(act); - vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL); - if (!vat) { - vat = _voucher_activity_buffer_tracepoint_acquire_slow(&act, &vab, 1, - 0, NULL); - } - if (!vat) return 0; - uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat, - trace_id, location, true); - vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_args; - vat->vat_data[0] = arg1; - vat->vat_data[1] = arg2; - vat->vat_data[2] = arg3; - vat->vat_data[3] = arg4; - _voucher_activity_trace_fault(trace_id); - if (_voucher_activity_buffer_tracepoint_release(vab)) { - _voucher_activity_firehose_push(act, vab); - } - return timestamp; + firehose_tracepoint_id_u ftid = { .ftid_value = trace_id }; + return _voucher_activity_trace(stream, ftid, timestamp, + pubdata, publen, privdata, privlen); } #pragma mark - @@ -2399,40 +1374,27 @@ _voucher_debug(voucher_t v, char* buf, size_t bufsiz) size_t offset = 0; #define bufprintf(...) \ offset += dsnprintf(&buf[offset], bufsiz - offset, ##__VA_ARGS__) - bufprintf("voucher[%p] = { xrefcnt = 0x%x, refcnt = 0x%x, ", v, + bufprintf("voucher[%p] = { xrefcnt = 0x%x, refcnt = 0x%x", v, v->os_obj_xref_cnt + 1, v->os_obj_ref_cnt + 1); if (v->v_kvbase) { - bufprintf("base voucher %p, ", v->v_kvbase); + bufprintf(", base voucher %p", v->v_kvbase); } if (v->v_kvoucher) { - bufprintf("kvoucher%s 0x%x, ", v->v_kvoucher == v->v_ipc_kvoucher ? + bufprintf(", kvoucher%s 0x%x", v->v_kvoucher == v->v_ipc_kvoucher ? " & ipc kvoucher" : "", v->v_kvoucher); } if (v->v_ipc_kvoucher && v->v_ipc_kvoucher != v->v_kvoucher) { - bufprintf("ipc kvoucher 0x%x, ", v->v_ipc_kvoucher); - } - if (v->v_has_priority) { - bufprintf("QOS 0x%x, ", *_voucher_priority(v)); + bufprintf(", ipc kvoucher 0x%x", v->v_ipc_kvoucher); } - if (v->v_activities) { - voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); - bufprintf("activity IDs = { "); - unsigned int i; - for (i = 0; i < v->v_activities; i++) { - bufprintf("0x%llx, ", *activity_ids++); - } - bufprintf("}, "); + if (v->v_priority) { + bufprintf(", QOS 0x%x", v->v_priority); } if (v->v_activity) { - _voucher_activity_t va = v->v_activity; - _voucher_atm_t vatm = va->va_atm; - bufprintf("activity[%p] = { ID 0x%llx, ref %d, atm[%p] = { " - "AID 0x%llx, ref %d, kvoucher 0x%x } }, ", va, va->va_id, - va->va_refcnt + 1, va->va_atm, vatm->vatm_id, - vatm->vatm_refcnt + 1, vatm->vatm_kvoucher); - } - bufprintf("}"); + bufprintf(", activity 0x%llx (pid: 0x%16llx, parent 0x%llx)", + v->v_activity, v->v_activity_creator, v->v_parent_activity); + } + bufprintf(" }"); return offset; } @@ -2584,100 +1546,90 @@ voucher_get_current_persona_proximate_info(struct proc_persona_info *persona_inf #endif void -_voucher_atfork_child(void) +_voucher_activity_debug_channel_init(void) { } void -_voucher_init(void) -{ -} - -void* -voucher_activity_get_metadata_buffer(size_t *length) +_voucher_atfork_child(void) { - *length = 0; - return NULL; } void -voucher_activity_buffer_hook_install_4libtrace( - _voucher_activity_buffer_hook_t hook) +_voucher_init(void) { - (void)hook; } -void -_voucher_activity_heap_pressure_normal(void) +void* +voucher_activity_get_metadata_buffer(size_t *length) { + *length = 0; + return NULL; } -void -_voucher_activity_heap_pressure_warn(void) +voucher_t +voucher_activity_create(firehose_tracepoint_id_t trace_id, + voucher_t base, firehose_activity_flags_t flags, uint64_t location) { + (void)trace_id; (void)base; (void)flags; (void)location; + return NULL; } -voucher_activity_id_t -voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id, - uint64_t location, voucher_activity_flag_t flags) +voucher_t +voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, + voucher_t base, firehose_activity_flags_t flags, uint64_t location) { - (void)trace_id; (void)location; (void)flags; - return 0; + (void)trace_id; (void)base; (void)flags; (void)location; + return NULL; } -voucher_activity_id_t -voucher_activity_start(voucher_activity_trace_id_t trace_id, - voucher_activity_flag_t flags) +firehose_activity_id_t +voucher_get_activity_id(voucher_t voucher, firehose_activity_id_t *parent_id) { - (void)trace_id; (void)flags; + (void)voucher; (void)parent_id; return 0; } -void -voucher_activity_end(voucher_activity_id_t activity_id) +firehose_activity_id_t +voucher_get_activity_id_and_creator(voucher_t voucher, uint64_t *creator_pid, + firehose_activity_id_t *parent_id) { - (void)activity_id; -} - -unsigned int -voucher_get_activities(voucher_activity_id_t *entries, unsigned int *count) -{ - (void)entries; (void)count; + if (creator_pid) *creator_pid = 0; + (void)voucher; (void)parent_id; return 0; } -uint8_t -voucher_activity_get_namespace(void) +firehose_tracepoint_id_t +voucher_activity_trace(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const void *pubdata, size_t publen) { + (void)stream; (void)trace_id; (void)timestamp; + (void)pubdata; (void)publen; return 0; } -uint64_t -voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location, - void *buffer, size_t length) +firehose_tracepoint_id_t +voucher_activity_trace_with_private_strings(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const void *pubdata, size_t publen, + const void *privdata, size_t privlen) { - (void)trace_id; (void)location; (void)buffer; (void)length; + (void)stream; (void)trace_id; (void)timestamp; + (void)pubdata; (void)publen; (void)privdata; (void)privlen; return 0; } -uint64_t -voucher_activity_trace_strings(voucher_activity_trace_id_t trace_id, - uint64_t location, void *buffer, size_t length, const char *strings[], - size_t string_lengths[], size_t strings_size) +void +voucher_activity_flush(firehose_stream_t stream) { - (void)trace_id; (void)location; (void)buffer; (void)length; (void)strings; - (void)string_lengths; (void)strings_size; - return 0; + (void)stream; } -uint64_t -voucher_activity_trace_args(voucher_activity_trace_id_t trace_id, - uint64_t location, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, - uintptr_t arg4) +void +voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks) { - (void)trace_id; (void)location; - (void)arg1; (void)arg2; (void)arg3; (void)arg4; - return 0; + (void)hooks; } size_t @@ -2688,3 +1640,17 @@ _voucher_debug(voucher_t v, char* buf, size_t bufsiz) } #endif // VOUCHER_USE_MACH_VOUCHER + +#else // DISPATCH_VARIANT_DYLD_STUB + +firehose_activity_id_t +voucher_get_activity_id_4dyld(void) +{ +#if VOUCHER_USE_MACH_VOUCHER + return _voucher_get_activity_id(_voucher_get(), NULL); +#else + return 0; +#endif +} + +#endif // DISPATCH_VARIANT_DYLD_STUB diff --git a/src/voucher_internal.h b/src/voucher_internal.h index cc5ae22..3aa1a65 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -40,12 +40,7 @@ * @group Voucher Creation SPI * SPI intended for clients that need to create vouchers. */ - -#if OS_OBJECT_USE_OBJC -OS_OBJECT_DECL(voucher_recipe); -#else -typedef struct voucher_recipe_s *voucher_recipe_t; -#endif +OS_OBJECT_DECL_CLASS(voucher_recipe); /*! * @function voucher_create @@ -92,29 +87,11 @@ voucher_get_mach_voucher(voucher_t voucher); #pragma mark - #pragma mark voucher_t -#if TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100 -#undef VOUCHER_USE_MACH_VOUCHER -#define VOUCHER_USE_MACH_VOUCHER 0 -#endif -#ifndef VOUCHER_USE_MACH_VOUCHER -#if __has_include() -#define VOUCHER_USE_MACH_VOUCHER 1 -#endif -#endif - -#if VOUCHER_USE_MACH_VOUCHER -#undef DISPATCH_USE_IMPORTANCE_ASSERTION -#define DISPATCH_USE_IMPORTANCE_ASSERTION 0 -#else -#undef MACH_RCV_VOUCHER -#define MACH_RCV_VOUCHER 0 -#endif // VOUCHER_USE_MACH_VOUCHER - void _voucher_init(void); void _voucher_atfork_child(void); -void _voucher_activity_heap_pressure_warn(void); -void _voucher_activity_heap_pressure_normal(void); +void _voucher_activity_debug_channel_init(void); +void _voucher_activity_swap(firehose_activity_id_t old_id, + firehose_activity_id_t new_id); void _voucher_xref_dispose(voucher_t voucher); void _voucher_dispose(voucher_t voucher); size_t _voucher_debug(voucher_t v, char* buf, size_t bufsiz); @@ -128,21 +105,13 @@ voucher_t _voucher_create_with_priority_and_mach_voucher(voucher_t voucher, pthread_priority_t priority, mach_voucher_t kv); void _voucher_dealloc_mach_voucher(mach_voucher_t kv); -#if OS_OBJECT_USE_OBJC -_OS_OBJECT_DECL_SUBCLASS_INTERFACE(voucher, object) #if VOUCHER_ENABLE_RECIPE_OBJECTS _OS_OBJECT_DECL_SUBCLASS_INTERFACE(voucher_recipe, object) #endif -#endif voucher_t voucher_retain(voucher_t voucher); void voucher_release(voucher_t voucher); -#define _TAILQ_IS_ENQUEUED(elm, field) \ - ((elm)->field.tqe_prev != NULL) -#define _TAILQ_MARK_NOT_ENQUEUED(elm, field) \ - do { (elm)->field.tqe_prev = NULL; } while (0) - #define VOUCHER_NO_MACH_VOUCHER MACH_PORT_DEAD #if VOUCHER_USE_MACH_VOUCHER @@ -152,29 +121,69 @@ void voucher_release(voucher_t voucher); #define DISPATCH_VOUCHER_ACTIVITY_DEBUG 1 #endif +#if VOUCHER_USE_MACH_VOUCHER_PRIORITY +#include +#endif + +typedef uint32_t _voucher_magic_t; +typedef uint32_t _voucher_priority_t; + +#define VOUCHER_MAGIC_V3 ((_voucher_magic_t)0x0390cefa) // FACE9003 + +typedef struct _voucher_mach_udata_s { + _voucher_magic_t vmu_magic; + _voucher_priority_t vmu_priority; + uint8_t _vmu_after_priority[0]; + firehose_activity_id_t vmu_activity; + uint64_t vmu_activity_pid; + firehose_activity_id_t vmu_parent_activity; + uint8_t _vmu_after_activity[0]; +} _voucher_mach_udata_s; + +OS_ENUM(voucher_fields, uint16_t, + VOUCHER_FIELD_NONE = 0, + VOUCHER_FIELD_KVOUCHER = 1u << 0, + VOUCHER_FIELD_PRIORITY = 1u << 1, + VOUCHER_FIELD_ACTIVITY = 1u << 2, + +#if VOUCHER_ENABLE_RECIPE_OBJECTS + VOUCHER_FIELD_EXTRA = 1u << 15, +#else + VOUCHER_FIELD_EXTRA = 0, +#endif +); + typedef struct voucher_s { _OS_OBJECT_HEADER( - void *os_obj_isa, + struct voucher_vtable_s *os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); TAILQ_ENTRY(voucher_s) v_list; mach_voucher_t v_kvoucher, v_ipc_kvoucher; // if equal, only one reference voucher_t v_kvbase; // if non-NULL, v_kvoucher is a borrowed reference - struct _voucher_atm_s *v_atm; - struct _voucher_activity_s *v_activity; + firehose_activity_id_t v_activity; + uint64_t v_activity_creator; + firehose_activity_id_t v_parent_activity; + _voucher_priority_t v_priority; + unsigned int v_kv_has_importance:1; #if VOUCHER_ENABLE_RECIPE_OBJECTS size_t v_recipe_extra_offset; mach_voucher_attr_recipe_size_t v_recipe_extra_size; #endif - unsigned int v_has_priority:1; - unsigned int v_activities; - mach_voucher_attr_recipe_data_t v_recipes[]; } voucher_s; +#if VOUCHER_ENABLE_RECIPE_OBJECTS +#define _voucher_extra_size(v) ((v)->v_recipe_extra_size) +#define _voucher_extra_recipes(v) ((char*)(v) + (v)->v_recipe_extra_offset) +#else +#define _voucher_extra_size(v) 0 +#define _voucher_extra_recipes(v) NULL +#endif + #if VOUCHER_ENABLE_RECIPE_OBJECTS typedef struct voucher_recipe_s { _OS_OBJECT_HEADER( - const _os_object_class_s *os_obj_isa, + const _os_object_vtable_s *os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); size_t vr_allocation_size; @@ -183,14 +192,6 @@ typedef struct voucher_recipe_s { } voucher_recipe_s; #endif -#define _voucher_recipes_base(r) (r[0]) -#define _voucher_recipes_atm(r) (r[1]) -#define _voucher_recipes_bits(r) (r[2]) -#define _voucher_base_recipe(v) (_voucher_recipes_base((v)->v_recipes)) -#define _voucher_atm_recipe(v) (_voucher_recipes_atm((v)->v_recipes)) -#define _voucher_bits_recipe(v) (_voucher_recipes_bits((v)->v_recipes)) -#define _voucher_recipes_size() (3 * sizeof(mach_voucher_attr_recipe_data_t)) - #if TARGET_OS_EMBEDDED #define VL_HASH_SIZE 64u // must be a power of two #else @@ -198,44 +199,24 @@ typedef struct voucher_recipe_s { #endif #define VL_HASH(kv) (MACH_PORT_INDEX(kv) & (VL_HASH_SIZE - 1)) -typedef uint32_t _voucher_magic_t; -const _voucher_magic_t _voucher_magic_v1 = 0x0190cefa; // little-endian FACE9001 -#define _voucher_recipes_magic(r) ((_voucher_magic_t*) \ - (_voucher_recipes_bits(r).content)) -#define _voucher_magic(v) _voucher_recipes_magic((v)->v_recipes) -typedef uint32_t _voucher_priority_t; -#define _voucher_recipes_priority(r) ((_voucher_priority_t*) \ - (_voucher_recipes_bits(r).content + sizeof(_voucher_magic_t))) -#define _voucher_priority(v) _voucher_recipes_priority((v)->v_recipes) -#define _voucher_activity_ids(v) ((voucher_activity_id_t*) \ - (_voucher_bits_recipe(v).content + sizeof(_voucher_magic_t) + \ - sizeof(_voucher_priority_t))) -#define _voucher_bits_size(activities) \ - (sizeof(_voucher_magic_t) + sizeof(_voucher_priority_t) + \ - (activities) * sizeof(voucher_activity_id_t)) - -#if VOUCHER_ENABLE_RECIPE_OBJECTS -#define _voucher_extra_size(v) ((v)->v_recipe_extra_size) -#define _voucher_extra_recipes(v) ((char*)(v) + (v)->v_recipe_extra_offset) -#else -#define _voucher_extra_size(v) 0 -#define _voucher_extra_recipes(v) NULL -#endif - #if DISPATCH_DEBUG && DISPATCH_VOUCHER_DEBUG #define _dispatch_voucher_debug(msg, v, ...) \ _dispatch_debug("voucher[%p]: " msg, v, ##__VA_ARGS__) #define _dispatch_kvoucher_debug(msg, kv, ...) \ _dispatch_debug("kvoucher[0x%08x]: " msg, kv, ##__VA_ARGS__) +#if DISPATCH_MACHPORT_DEBUG #define _dispatch_voucher_debug_machport(name) \ dispatch_debug_machport((name), __func__) #else +#define _dispatch_voucher_debug_machport(name) ((void)(name)) +#endif +#else #define _dispatch_voucher_debug(msg, v, ...) #define _dispatch_kvoucher_debug(msg, kv, ...) #define _dispatch_voucher_debug_machport(name) ((void)(name)) #endif -#if !(USE_OBJC && __OBJC2__) && !defined(__cplusplus) +#if DISPATCH_PURE_C DISPATCH_ALWAYS_INLINE static inline voucher_t @@ -244,11 +225,10 @@ _voucher_retain(voucher_t voucher) #if !DISPATCH_VOUCHER_OBJC_DEBUG // not using _os_object_refcnt* because we don't need barriers: // vouchers are immutable and are in a hash table with a lock - int xref_cnt = dispatch_atomic_inc2o(voucher, os_obj_xref_cnt, relaxed); + int xref_cnt = os_atomic_inc2o(voucher, os_obj_xref_cnt, relaxed); _dispatch_voucher_debug("retain -> %d", voucher, xref_cnt + 1); - if (slowpath(xref_cnt <= 0)) { - _dispatch_voucher_debug("resurrection", voucher); - DISPATCH_CRASH("Voucher resurrection"); + if (unlikely(xref_cnt <= 0)) { + _OS_OBJECT_CLIENT_CRASH("Voucher resurrection"); } #else os_retain(voucher); @@ -265,14 +245,13 @@ _voucher_release(voucher_t voucher) #if !DISPATCH_VOUCHER_OBJC_DEBUG // not using _os_object_refcnt* because we don't need barriers: // vouchers are immutable and are in a hash table with a lock - int xref_cnt = dispatch_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed); + int xref_cnt = os_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed); _dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1); - if (fastpath(xref_cnt >= 0)) { + if (likely(xref_cnt >= 0)) { return; } - if (slowpath(xref_cnt < -1)) { - _dispatch_voucher_debug("overrelease", voucher); - DISPATCH_CRASH("Voucher overrelease"); + if (unlikely(xref_cnt < -1)) { + _OS_OBJECT_CLIENT_CRASH("Voucher over-release"); } return _os_object_xref_dispose((_os_object_t)voucher); #else @@ -281,6 +260,25 @@ _voucher_release(voucher_t voucher) #endif // DISPATCH_DEBUG } +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_release_no_dispose(voucher_t voucher) +{ +#if !DISPATCH_VOUCHER_OBJC_DEBUG + // not using _os_object_refcnt* because we don't need barriers: + // vouchers are immutable and are in a hash table with a lock + int xref_cnt = os_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed); + _dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1); + if (likely(xref_cnt >= 0)) { + return; + } + _OS_OBJECT_CLIENT_CRASH("Voucher over-release"); +#else + _dispatch_voucher_debug("release -> %d", voucher, voucher->os_obj_xref_cnt); + return os_release(voucher); +#endif // DISPATCH_DEBUG +} + DISPATCH_ALWAYS_INLINE static inline voucher_t _voucher_get(void) @@ -311,7 +309,7 @@ static inline void _voucher_mach_voucher_set(mach_voucher_t kv) { if (kv == VOUCHER_NO_MACH_VOUCHER) return; - _dispatch_set_priority_and_mach_voucher(0, kv); + _dispatch_set_priority_and_mach_voucher_slow(0, kv); } DISPATCH_ALWAYS_INLINE @@ -323,17 +321,12 @@ _voucher_swap_and_get_mach_voucher(voucher_t ov, voucher_t voucher) _dispatch_thread_setspecific(dispatch_voucher_key, voucher); mach_voucher_t kv = voucher ? voucher->v_kvoucher : MACH_VOUCHER_NULL; mach_voucher_t okv = ov ? ov->v_kvoucher : MACH_VOUCHER_NULL; + firehose_activity_id_t aid = voucher ? voucher->v_activity : 0; + firehose_activity_id_t oaid = ov ? ov->v_activity : 0; + if (aid != oaid) _voucher_activity_swap(aid, oaid); return (kv != okv) ? kv : VOUCHER_NO_MACH_VOUCHER; } -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_swap(voucher_t ov, voucher_t voucher) -{ - _voucher_mach_voucher_set(_voucher_swap_and_get_mach_voucher(ov, voucher)); - if (ov) _voucher_release(ov); -} - DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline voucher_t _voucher_adopt(voucher_t voucher) @@ -347,8 +340,8 @@ DISPATCH_ALWAYS_INLINE static inline void _voucher_replace(voucher_t voucher) { - voucher_t ov = _voucher_get(); - _voucher_swap(ov, voucher); + voucher_t ov = _voucher_adopt(voucher); + if (ov) _voucher_release(ov); } DISPATCH_ALWAYS_INLINE @@ -360,16 +353,27 @@ _voucher_clear(void) DISPATCH_ALWAYS_INLINE static inline pthread_priority_t -_voucher_get_priority(voucher_t voucher) +_voucher_get_priority(voucher_t v) +{ + return v ? (pthread_priority_t)v->v_priority : 0; +} + +DISPATCH_ALWAYS_INLINE +static inline firehose_activity_id_t +_voucher_get_activity_id(voucher_t v, uint64_t *creator_pid) { - return voucher && voucher->v_has_priority ? - (pthread_priority_t)*_voucher_priority(voucher) : 0; + if (creator_pid) *creator_pid = v ? v->v_activity_creator : 0; + return v ? v->v_activity : 0; } void _voucher_task_mach_voucher_init(void* ctxt); extern dispatch_once_t _voucher_task_mach_voucher_pred; extern mach_voucher_t _voucher_task_mach_voucher; - +#if VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER +#define _voucher_default_task_mach_voucher MACH_VOUCHER_NULL +#else +extern mach_voucher_t _voucher_default_task_mach_voucher; +#endif DISPATCH_ALWAYS_INLINE static inline mach_voucher_t _voucher_get_task_mach_voucher(void) @@ -411,12 +415,17 @@ _voucher_mach_msg_set(mach_msg_header_t *msg, voucher_t voucher) DISPATCH_ALWAYS_INLINE static inline mach_voucher_t -_voucher_mach_msg_get(mach_msg_header_t *msg) +_voucher_mach_msg_get(mach_msg_header_t *msg, mach_msg_bits_t *msgh_bits) { - if (!MACH_MSGH_BITS_HAS_VOUCHER(msg->msgh_bits)) return MACH_VOUCHER_NULL; + if (!MACH_MSGH_BITS_HAS_VOUCHER(msg->msgh_bits)) { + *msgh_bits = 0; + return MACH_VOUCHER_NULL; + } mach_voucher_t kv = msg->msgh_voucher_port; msg->msgh_voucher_port = MACH_VOUCHER_NULL; - msg->msgh_bits &= (mach_msg_bits_t)~MACH_MSGH_BITS_VOUCHER_MASK; + mach_msg_bits_t mask = MACH_MSGH_BITS_VOUCHER_MASK|MACH_MSGH_BITS_RAISEIMP; + *msgh_bits = msg->msgh_bits & mask; + msg->msgh_bits &= ~mask; return kv; } @@ -449,440 +458,111 @@ _voucher_mach_msg_clear(mach_msg_header_t *msg, bool move_send) #pragma mark - #pragma mark dispatch_continuation_t + voucher_t -#if DISPATCH_USE_KDEBUG_TRACE +#if DISPATCH_USE_VOUCHER_KDEBUG_TRACE +#define DISPATCH_VOUCHER_CODE(code) DISPATCH_CODE(VOUCHER, code) +#else +#define DISPATCH_VOUCHER_CODE(code) 0 +#endif // DISPATCH_USE_VOUCHER_KDEBUG_TRACE + +#define DISPATCH_TRACE_VOUCHER_DC_PUSH DISPATCH_VOUCHER_CODE(0x1) +#define DISPATCH_TRACE_VOUCHER_DC_POP DISPATCH_VOUCHER_CODE(0x2) +#define DISPATCH_TRACE_VOUCHER_DMSG_PUSH DISPATCH_VOUCHER_CODE(0x3) +#define DISPATCH_TRACE_VOUCHER_DMSG_POP DISPATCH_VOUCHER_CODE(0x4) +#define DISPATCH_TRACE_VOUCHER_ACTIVITY_ADOPT DISPATCH_VOUCHER_CODE(0x5) + DISPATCH_ALWAYS_INLINE static inline void -_dispatch_voucher_ktrace(int code, natural_t voucher, void *container) +_dispatch_voucher_ktrace(uint32_t code, voucher_t v, const void *container) { - if (!voucher) return; - __kdebug_trace(APPSDBG_CODE(DBG_MACH_CHUD, (0xfac >> 2)) | DBG_FUNC_NONE, - code, (int)voucher, (int)(uintptr_t)container, -#ifdef __LP64__ - (int)((uintptr_t)container >> 32) -#else - 0 -#endif - ); + if (v == DISPATCH_NO_VOUCHER) return; + natural_t voucher = v ? v->v_kvoucher : MACH_VOUCHER_NULL; + _dispatch_ktrace2(code, voucher, (uintptr_t)container); } +#define _dispatch_voucher_ktrace(code, v, container) \ + _dispatch_voucher_ktrace(DISPATCH_TRACE_VOUCHER_##code, v, container) #define _dispatch_voucher_ktrace_dc_push(dc) \ - _dispatch_voucher_ktrace(0x1, (dc)->dc_voucher ? \ - (dc)->dc_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dc)) -#define _dispatch_voucher_ktrace_dc_pop(dc) \ - _dispatch_voucher_ktrace(0x2, (dc)->dc_voucher ? \ - (dc)->dc_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dc)) + _dispatch_voucher_ktrace(DC_PUSH, (dc)->dc_voucher, (dc)) +#define _dispatch_voucher_ktrace_dc_pop(dc, v) \ + _dispatch_voucher_ktrace(DC_POP, v, (dc)) #define _dispatch_voucher_ktrace_dmsg_push(dmsg) \ - _dispatch_voucher_ktrace(0x3, (dmsg)->dmsg_voucher ? \ - (dmsg)->dmsg_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dmsg)) + _dispatch_voucher_ktrace(DMSG_PUSH, (dmsg)->dmsg_voucher, (dmsg)) #define _dispatch_voucher_ktrace_dmsg_pop(dmsg) \ - _dispatch_voucher_ktrace(0x4, (dmsg)->dmsg_voucher ? \ - (dmsg)->dmsg_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dmsg)) -#else -#define _dispatch_voucher_ktrace_dc_push(dc) -#define _dispatch_voucher_ktrace_dc_pop(dc) -#define _dispatch_voucher_ktrace_dmsg_push(dmsg) -#define _dispatch_voucher_ktrace_dmsg_pop(dmsg) -#endif // DISPATCH_USE_KDEBUG_TRACE + _dispatch_voucher_ktrace(DMSG_POP, (dmsg)->dmsg_voucher, (dmsg)) +#define _dispatch_voucher_ktrace_activity_adopt(aid) \ + _dispatch_ktrace1(DISPATCH_TRACE_VOUCHER_ACTIVITY_ADOPT, aid); DISPATCH_ALWAYS_INLINE static inline void _dispatch_continuation_voucher_set(dispatch_continuation_t dc, - dispatch_block_flags_t flags) + dispatch_queue_class_t dqu, dispatch_block_flags_t flags) { - unsigned long bits = (unsigned long)dc->do_vtable; voucher_t v = NULL; - if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { - bits |= DISPATCH_OBJ_HAS_VOUCHER_BIT; + // _dispatch_continuation_voucher_set is never called for blocks with + // private data or with the DISPATCH_BLOCK_HAS_VOUCHER flag set. + // only _dispatch_continuation_init_slow handles this bit. + dispatch_assert(!(flags & DISPATCH_BLOCK_HAS_VOUCHER)); + + if (dqu._oq->oq_override_voucher != DISPATCH_NO_VOUCHER) { + // if the queue has an override voucher, we should not capture anything + // + // if the continuation is enqueued before the queue is activated, then + // this optimization fails and we do capture whatever is current + // + // _dispatch_continuation_voucher_adopt() would do the right thing + // but using DISPATCH_NO_VOUCHER here is more efficient. + v = DISPATCH_NO_VOUCHER; } else if (!(flags & DISPATCH_BLOCK_NO_VOUCHER)) { v = _voucher_copy(); } - dc->do_vtable = (void*)bits; dc->dc_voucher = v; _dispatch_voucher_debug("continuation[%p] set", dc->dc_voucher, dc); _dispatch_voucher_ktrace_dc_push(dc); } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc) -{ - unsigned long bits = (unsigned long)dc->do_vtable; - voucher_t v = DISPATCH_NO_VOUCHER; - if (!(bits & DISPATCH_OBJ_HAS_VOUCHER_BIT)) { - _dispatch_voucher_ktrace_dc_pop(dc); - _dispatch_voucher_debug("continuation[%p] adopt", dc->dc_voucher, dc); - v = dc->dc_voucher; - dc->dc_voucher = NULL; - } - _dispatch_adopt_priority_and_replace_voucher(dc->dc_priority, v, 0); -} - -#pragma mark - -#pragma mark _voucher_activity_heap - -typedef uint32_t _voucher_atm_subid_t; -static const size_t _voucher_activity_hash_bits = 6; -static const size_t _voucher_activity_hash_size = - 1 << _voucher_activity_hash_bits; -#define VACTID_HASH(x) \ - (((uint32_t)(x) * 2654435761u) >> (32-_voucher_activity_hash_bits)) -#define VATMID_HASH(x) \ - (((uint32_t)(x) * 2654435761u) >> (32-_voucher_activity_hash_bits)) -#define VATMID2ACTID(x, flags) \ - (((voucher_activity_id_t)(x) & 0xffffffffffffff) | \ - (((voucher_activity_id_t)(flags) & 0xfe) << 55)) - -typedef struct _voucher_activity_metadata_s { - _voucher_activity_buffer_t vam_client_metadata; - struct _voucher_activity_metadata_opaque_s *vasm_baseaddr; - _voucher_activity_bitmap_t volatile vam_buffer_bitmap; - _voucher_activity_bitmap_t volatile vam_pressure_locked_bitmap; - _voucher_activity_lock_s vam_atms_lock; - _voucher_activity_lock_s vam_activities_lock; - TAILQ_HEAD(, _voucher_atm_s) vam_atms[_voucher_activity_hash_size]; - TAILQ_HEAD(, _voucher_activity_s) - vam_activities[_voucher_activity_hash_size]; -} *_voucher_activity_metadata_t; - -#pragma mark - -#pragma mark _voucher_atm_t - -typedef struct _voucher_atm_s { - int32_t volatile vatm_refcnt; - mach_voucher_t vatm_kvoucher; - atm_aid_t vatm_id; - atm_guard_t vatm_generation; - TAILQ_ENTRY(_voucher_atm_s) vatm_list; -#if __LP64__ - uintptr_t vatm_pad[3]; - // cacheline -#endif -} *_voucher_atm_t; - -extern _voucher_atm_t _voucher_task_atm; - -#pragma mark - -#pragma mark _voucher_activity_t - -typedef struct _voucher_activity_s { - voucher_activity_id_t va_id; - voucher_activity_trace_id_t va_trace_id; - uint64_t va_location; - int32_t volatile va_refcnt; - uint32_t volatile va_buffer_count; - uint32_t va_buffer_limit; - _voucher_activity_buffer_header_t volatile va_current_buffer; - _voucher_atm_t va_atm; -#if __LP64__ - uint64_t va_unused; -#endif - // cacheline - _voucher_activity_lock_s va_buffers_lock; - TAILQ_HEAD(_voucher_activity_buffer_list_s, - _voucher_activity_buffer_header_s) va_buffers; - TAILQ_ENTRY(_voucher_activity_s) va_list; - TAILQ_ENTRY(_voucher_activity_s) va_atm_list; - TAILQ_ENTRY(_voucher_activity_s) va_atm_used_list; - pthread_mutex_t va_mutex; - pthread_cond_t va_cond; -} *_voucher_activity_t; - -_voucher_activity_tracepoint_t _voucher_activity_buffer_tracepoint_acquire_slow( - _voucher_activity_t *vap, _voucher_activity_buffer_header_t *vabp, - unsigned int slots, size_t strsize, uint16_t *stroffsetp); -void _voucher_activity_firehose_push(_voucher_activity_t act, - _voucher_activity_buffer_header_t buffer); -extern _voucher_activity_t _voucher_activity_default; -extern voucher_activity_mode_t _voucher_activity_mode; - -#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG -#define _dispatch_voucher_activity_debug(msg, act, ...) \ - _dispatch_debug("activity[%p] <0x%llx>: atm[%p] <%lld>: " msg, (act), \ - (act) ? (act)->va_id : 0, (act) ? (act)->va_atm : NULL, \ - (act) && (act)->va_atm ? (act)->va_atm->vatm_id : 0, ##__VA_ARGS__) -#define _dispatch_voucher_atm_debug(msg, atm, ...) \ - _dispatch_debug("atm[%p] <%lld> kvoucher[0x%08x]: " msg, (atm), \ - (atm) ? (atm)->vatm_id : 0, (atm) ? (atm)->vatm_kvoucher : 0, \ - ##__VA_ARGS__) -#else -#define _dispatch_voucher_activity_debug(msg, act, ...) -#define _dispatch_voucher_atm_debug(msg, atm, ...) -#endif - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_voucher_activity_timestamp(bool approx) -{ -#if TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101000 - (void)approx; - return mach_absolute_time(); -#else - return approx ? mach_approximate_time() : mach_absolute_time(); -#endif -} - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_voucher_activity_thread_id(void) -{ - uint64_t thread_id; - pthread_threadid_np(NULL, &thread_id); // TODO: 15923074: use TSD thread_id - return thread_id; -} - -#define _voucher_activity_buffer_pos2length(pos) \ - ({ _voucher_activity_buffer_position_u _pos = (pos); \ - _pos.vabp_pos.vabp_next_tracepoint_idx * \ - sizeof(struct _voucher_activity_tracepoint_s) + \ - _pos.vabp_pos.vabp_string_offset; }) +static inline dispatch_queue_t _dispatch_queue_get_current(void); DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_tracepoint_t -_voucher_activity_buffer_tracepoint_acquire( - _voucher_activity_buffer_header_t vab, unsigned int slots, - size_t strsize, uint16_t *stroffsetp) -{ - if (!vab) return NULL; - _voucher_activity_buffer_position_u pos_orig, pos; - pos_orig.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos; - do { - pos.vabp_atomic_pos = pos_orig.vabp_atomic_pos; - pos.vabp_pos.vabp_next_tracepoint_idx += slots; - pos.vabp_pos.vabp_string_offset += strsize; - size_t len = _voucher_activity_buffer_pos2length(pos); - if (len > _voucher_activity_buffer_size || pos.vabp_pos.vabp_flags) { - return NULL; - } - if (len == _voucher_activity_buffer_size) { - pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_full; - } - pos.vabp_pos.vabp_refcnt++; - } while (!dispatch_atomic_cmpxchgvw2o(vab, vabh_pos.vabp_atomic_pos, - pos_orig.vabp_atomic_pos, pos.vabp_atomic_pos, - &pos_orig.vabp_atomic_pos, relaxed)); - if (stroffsetp) *stroffsetp = pos.vabp_pos.vabp_string_offset; - return (_voucher_activity_tracepoint_t)vab + - pos_orig.vabp_pos.vabp_next_tracepoint_idx; -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_buffer_tracepoint_release( - _voucher_activity_buffer_header_t vab) -{ - _voucher_activity_buffer_position_u pos_orig, pos; - pos_orig.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos; - do { - pos.vabp_atomic_pos = pos_orig.vabp_atomic_pos; - pos.vabp_pos.vabp_refcnt--; - if (!pos.vabp_pos.vabp_refcnt && - (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_full)) { - pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_pushing; - } - } while (!dispatch_atomic_cmpxchgvw2o(vab, vabh_pos.vabp_atomic_pos, - pos_orig.vabp_atomic_pos, pos.vabp_atomic_pos, - &pos_orig.vabp_atomic_pos, relaxed)); - return (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_pushing); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_buffer_mark_full(_voucher_activity_buffer_header_t vab) -{ - _voucher_activity_buffer_position_u pos_orig, pos; - pos_orig.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos; - do { - pos.vabp_atomic_pos = pos_orig.vabp_atomic_pos; - if (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_full) { - return false; - } - pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_full; - if (!pos.vabp_pos.vabp_refcnt) { - pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_pushing; - } - } while (!dispatch_atomic_cmpxchgvw2o(vab, vabh_pos.vabp_atomic_pos, - pos_orig.vabp_atomic_pos, pos.vabp_atomic_pos, - &pos_orig.vabp_atomic_pos, relaxed)); - return (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_pushing); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_buffer_is_full(_voucher_activity_buffer_header_t vab) -{ - _voucher_activity_buffer_position_u pos; - pos.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos; - return (pos.vabp_pos.vabp_flags); -} - -DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_buffer_header_t -_voucher_activity_buffer_get_from_activity(_voucher_activity_t va) -{ - return va ? va->va_current_buffer : NULL; -} - -DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_t -_voucher_activity_get(void) -{ - _voucher_activity_t va; - voucher_t v = _voucher_get(); - va = v && v->v_activity ? v->v_activity : _voucher_activity_default; - return va; -} - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_voucher_activity_tracepoint_init(_voucher_activity_tracepoint_t vat, - uint8_t type, uint8_t code_namespace, uint32_t code, uint64_t location, - bool approx) -{ - if (!location) location = (uint64_t)__builtin_return_address(0); - uint64_t timestamp = _voucher_activity_timestamp(approx); - vat->vat_flags = _voucher_activity_trace_flag_tracepoint, - vat->vat_type = type, - vat->vat_namespace = code_namespace, - vat->vat_code = code, - vat->vat_timestamp = timestamp, - vat->vat_thread = _voucher_activity_thread_id(), - vat->vat_location = location; - return timestamp; -} - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_voucher_activity_tracepoint_init_with_id(_voucher_activity_tracepoint_t vat, - voucher_activity_trace_id_t trace_id, uint64_t location, bool approx) +static inline void +_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc, + voucher_t ov, uintptr_t dc_flags) { - uint8_t type = (uint8_t)(trace_id >> _voucher_activity_trace_id_type_shift); - uint8_t cns = (uint8_t)(trace_id >> - _voucher_activity_trace_id_code_namespace_shift); - uint32_t code = (uint32_t)trace_id; - return _voucher_activity_tracepoint_init(vat, type, cns, code, location, - approx); -} + voucher_t v = dc->dc_voucher; + _dispatch_thread_set_self_t consume = (dc_flags & DISPATCH_OBJ_CONSUME_BIT); + dispatch_assert(DISPATCH_OBJ_CONSUME_BIT == DISPATCH_VOUCHER_CONSUME); -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_trace_id_is_subtype(voucher_activity_trace_id_t trace_id, - uint8_t type) -{ - voucher_activity_trace_id_t type_id = voucher_activity_trace_id(type, 0, 0); - return (trace_id & type_id) == type_id; -} -#define _voucher_activity_trace_id_is_subtype(trace_id, name) \ - _voucher_activity_trace_id_is_subtype(trace_id, \ - voucher_activity_tracepoint_type_ ## name) - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_trace_id_enabled(voucher_activity_trace_id_t trace_id) -{ - switch (_voucher_activity_mode) { - case voucher_activity_mode_release: - return _voucher_activity_trace_id_is_subtype(trace_id, release); - case voucher_activity_mode_stream: - case voucher_activity_mode_debug: - return _voucher_activity_trace_id_is_subtype(trace_id, debug) || - _voucher_activity_trace_id_is_subtype(trace_id, release); + if (consume) { + dc->dc_voucher = VOUCHER_INVALID; } - return false; -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_trace_type_enabled(uint8_t type) -{ - voucher_activity_trace_id_t type_id = voucher_activity_trace_id(type, 0, 0); - return _voucher_activity_trace_id_enabled(type_id); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_disabled(void) -{ - return slowpath(_voucher_activity_mode == voucher_activity_mode_disable); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_trace_args_inline(uint8_t type, uint8_t code_namespace, - uint32_t code, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, - uintptr_t arg4) -{ - if (!_voucher_activity_trace_type_enabled(type)) return; - _voucher_activity_t act; - _voucher_activity_buffer_header_t vab; - _voucher_activity_tracepoint_t vat; - act = _voucher_activity_get(); - vab = _voucher_activity_buffer_get_from_activity(act); - vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL); - if (!vat) return; - _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0, true); - vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_args; - vat->vat_data[0] = arg1; - vat->vat_data[1] = arg2; - vat->vat_data[2] = arg3; - vat->vat_data[3] = arg4; - if (_voucher_activity_buffer_tracepoint_release(vab)) { - _voucher_activity_firehose_push(act, vab); + if (likely(v != DISPATCH_NO_VOUCHER)) { + _dispatch_voucher_ktrace_dc_pop(dc, v); + _dispatch_voucher_debug("continuation[%p] adopt", v, dc); + + if (likely(!(dc_flags & DISPATCH_OBJ_ENFORCE_VOUCHER))) { + if (unlikely(ov != DISPATCH_NO_VOUCHER && v != ov)) { + if (consume) _voucher_release(v); + consume = 0; + v = ov; + } + } + } else { + consume = 0; + v = ov; } + (void)_dispatch_adopt_priority_and_set_voucher(dc->dc_priority, v, + consume | DISPATCH_VOUCHER_REPLACE); } -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_trace_activity_event(voucher_activity_trace_id_t trace_id, - voucher_activity_id_t va_id, _voucher_activity_tracepoint_flag_t flags) -{ - _voucher_activity_t act; - _voucher_activity_buffer_header_t vab; - _voucher_activity_tracepoint_t vat; - act = _voucher_activity_get(); - vab = _voucher_activity_buffer_get_from_activity(act); - vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL); - if (!vat) return; - _voucher_activity_tracepoint_init_with_id(vat, trace_id, 0, false); - vat->vat_flags |= _voucher_activity_trace_flag_activity | flags; - vat->vat_data[0] = va_id; - if (_voucher_activity_buffer_tracepoint_release(vab)) { - _voucher_activity_firehose_push(act, vab); - } -} -#define _voucher_activity_trace_activity_event(trace_id, va_id, type) \ - _voucher_activity_trace_activity_event(trace_id, va_id, \ - _voucher_activity_trace_flag_ ## type) +#pragma mark - +#pragma mark _voucher activity subsystem -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_trace_msg(voucher_t v, mach_msg_header_t *msg, uint32_t code) -{ - if (!v || !v->v_activity) return; // Don't use default activity for IPC - const uint8_t type = voucher_activity_tracepoint_type_debug; - const uint8_t code_namespace = _voucher_activity_tracepoint_namespace_ipc; - if (!_voucher_activity_trace_type_enabled(type)) return; - _voucher_activity_buffer_header_t vab; - _voucher_activity_tracepoint_t vat; - vab = _voucher_activity_buffer_get_from_activity(v->v_activity); - vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL); - if (!vat) return; // TODO: slowpath ? - _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0, true); - vat->vat_flags |= _voucher_activity_trace_flag_libdispatch; -#if __has_extension(c_static_assert) - _Static_assert(sizeof(mach_msg_header_t) <= sizeof(vat->vat_data), - "mach_msg_header_t too large"); -#endif - memcpy(vat->vat_data, msg, sizeof(mach_msg_header_t)); - if (_voucher_activity_buffer_tracepoint_release(vab)) { - _voucher_activity_firehose_push(v->v_activity, vab); - } -} -#define _voucher_activity_trace_msg(v, msg, type) \ - _voucher_activity_trace_msg(v, msg, \ - _voucher_activity_tracepoint_namespace_ipc_ ## type) +extern dispatch_once_t _firehose_task_buffer_pred; +extern union firehose_buffer_u *_firehose_task_buffer; +extern uint64_t _voucher_unique_pid; +extern dispatch_mach_t _voucher_activity_debug_channel; +extern voucher_activity_hooks_t _voucher_libtrace_hooks; -#endif // !(USE_OBJC && __OBJC2__) && !defined(__cplusplus) +#endif // DISPATCH_PURE_C #else // VOUCHER_USE_MACH_VOUCHER @@ -983,9 +663,9 @@ _voucher_mach_msg_set(mach_msg_header_t *msg, voucher_t voucher) DISPATCH_ALWAYS_INLINE static inline mach_voucher_t -_voucher_mach_msg_get(mach_msg_header_t *msg) +_voucher_mach_msg_get(mach_msg_header_t *msg, mach_msg_bits_t *msgh_bits) { - (void)msg; + (void)msg;(void)msgh_bits; return 0; } @@ -997,31 +677,25 @@ _voucher_mach_msg_clear(mach_msg_header_t *msg, bool move_send) return MACH_VOUCHER_NULL; } +#define _dispatch_voucher_ktrace_dc_push(dc) +#define _dispatch_voucher_ktrace_dc_pop(dc, v) #define _dispatch_voucher_ktrace_dmsg_push(dmsg) #define _dispatch_voucher_ktrace_dmsg_pop(dmsg) DISPATCH_ALWAYS_INLINE static inline void _dispatch_continuation_voucher_set(dispatch_continuation_t dc, - dispatch_block_flags_t flags) + dispatch_queue_class_t dqu, dispatch_block_flags_t flags) { - (void)dc; (void)flags; + (void)dc; (void)dqu; (void)flags; } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc) -{ - (void)dc; -} - -#define _voucher_activity_trace_msg(v, msg, type) - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_disabled(void) +_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc, voucher_t ov, + uintptr_t dc_flags) { - return true; + (void)dc; (void)ov; (void)dc_flags; } #endif // VOUCHER_USE_MACH_VOUCHER diff --git a/xcodeconfig/libdispatch-dyld-stub.xcconfig b/xcodeconfig/libdispatch-dyld-stub.xcconfig new file mode 100644 index 0000000..aabda62 --- /dev/null +++ b/xcodeconfig/libdispatch-dyld-stub.xcconfig @@ -0,0 +1,28 @@ +// +// Copyright (c) 2016 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + +OTHER_LDFLAGS = +BUILD_VARIANTS = normal +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_VARIANT_STATIC=1 DISPATCH_VARIANT_DYLD_STUB=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0 +PRODUCT_NAME = libdispatch_dyld_stub +INSTALL_PATH = /usr/local/lib/dyld_stub +EXCLUDED_SOURCE_FILE_NAMES = * +INCLUDED_SOURCE_FILE_NAMES = voucher.c // it's minimal with DISPATCH_VARIANT_DYLD_STUB +VERSIONING_SYSTEM = diff --git a/xcodeconfig/libdispatch-introspection.xcconfig b/xcodeconfig/libdispatch-introspection.xcconfig index a2f98f9..c7826d5 100644 --- a/xcodeconfig/libdispatch-introspection.xcconfig +++ b/xcodeconfig/libdispatch-introspection.xcconfig @@ -21,6 +21,6 @@ BUILD_VARIANTS = normal INSTALL_PATH = /usr/lib/system/introspection -GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) DISPATCH_INTROSPECTION=1 +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_INTROSPECTION=1 CONFIGURATION_BUILD_DIR = $(BUILD_DIR)/introspection OTHER_LDFLAGS = $(OTHER_LDFLAGS) -Wl,-interposable_list,$(SRCROOT)/xcodeconfig/libdispatch.interposable diff --git a/xcodeconfig/libdispatch-mp-static.xcconfig b/xcodeconfig/libdispatch-mp-static.xcconfig new file mode 100644 index 0000000..1f0eddc --- /dev/null +++ b/xcodeconfig/libdispatch-mp-static.xcconfig @@ -0,0 +1,30 @@ +// +// Copyright (c) 2012-2013 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + +OTHER_LDFLAGS = +BUILD_VARIANTS = normal debug +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_VARIANT_STATIC=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0 +PRODUCT_NAME = libdispatch +INSTALL_PATH = /usr/local/lib/system + +// skip simulator +SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos +SKIP_INSTALL[sdk=*simulator*] = YES +EXCLUDED_SOURCE_FILE_NAMES[sdk=*simulator*] = * diff --git a/xcodeconfig/libdispatch-static.xcconfig b/xcodeconfig/libdispatch-up-static.xcconfig similarity index 89% rename from xcodeconfig/libdispatch-static.xcconfig rename to xcodeconfig/libdispatch-up-static.xcconfig index 632e01c..0ece635 100644 --- a/xcodeconfig/libdispatch-static.xcconfig +++ b/xcodeconfig/libdispatch-up-static.xcconfig @@ -22,4 +22,4 @@ OTHER_LDFLAGS = BUILD_VARIANTS = normal SKIP_INSTALL = YES EXCLUDED_SOURCE_FILE_NAMES = * -GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) USE_OBJC=0 DISPATCH_USE_DTRACE=0 +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) USE_OBJC=0 DISPATCH_USE_DTRACE=0 diff --git a/xcodeconfig/libdispatch.aliases b/xcodeconfig/libdispatch.aliases index c29b163..65dfd04 100644 --- a/xcodeconfig/libdispatch.aliases +++ b/xcodeconfig/libdispatch.aliases @@ -19,5 +19,8 @@ # __dispatch_data_destructor_vm_deallocate __dispatch_data_destructor_munmap -__dispatch_source_type_memorystatus __dispatch_source_type_memorypressure +__dispatch_source_type_memorypressure __dispatch_source_type_memorystatus __dispatch_queue_attrs __dispatch_queue_attr_concurrent +_dispatch_assert_queue$V2 _dispatch_assert_queue +_dispatch_assert_queue_not$V2 _dispatch_assert_queue_not +_dispatch_queue_create_with_target$V2 _dispatch_queue_create_with_target diff --git a/xcodeconfig/libdispatch.order b/xcodeconfig/libdispatch.order index 8bb4550..9642ca4 100644 --- a/xcodeconfig/libdispatch.order +++ b/xcodeconfig/libdispatch.order @@ -28,8 +28,14 @@ _OBJC_CLASS_$_OS_dispatch_group __OS_dispatch_group_vtable _OBJC_CLASS_$_OS_dispatch_queue __OS_dispatch_queue_vtable +_OBJC_CLASS_$_OS_dispatch_queue_serial +__OS_dispatch_queue_serial_vtable +_OBJC_CLASS_$_OS_dispatch_queue_concurrent +__OS_dispatch_queue_concurrent_vtable _OBJC_CLASS_$_OS_dispatch_queue_root __OS_dispatch_queue_root_vtable +_OBJC_CLASS_$_OS_dispatch_queue_main +__OS_dispatch_queue_main_vtable _OBJC_CLASS_$_OS_dispatch_queue_runloop __OS_dispatch_queue_runloop_vtable _OBJC_CLASS_$_OS_dispatch_queue_mgr @@ -62,7 +68,10 @@ _OBJC_METACLASS_$_OS_dispatch_object _OBJC_METACLASS_$_OS_dispatch_semaphore _OBJC_METACLASS_$_OS_dispatch_group _OBJC_METACLASS_$_OS_dispatch_queue +_OBJC_METACLASS_$_OS_dispatch_queue_serial +_OBJC_METACLASS_$_OS_dispatch_queue_concurrent _OBJC_METACLASS_$_OS_dispatch_queue_root +_OBJC_METACLASS_$_OS_dispatch_queue_main _OBJC_METACLASS_$_OS_dispatch_queue_runloop _OBJC_METACLASS_$_OS_dispatch_queue_mgr _OBJC_METACLASS_$_OS_dispatch_queue_specific_queue @@ -75,6 +84,6 @@ _OBJC_METACLASS_$_OS_dispatch_operation _OBJC_METACLASS_$_OS_dispatch_disk _OBJC_METACLASS_$_OS_object _OBJC_METACLASS_$_OS_voucher -_OBJC_METACLASS_$_OS_voucher_recipe +#_OBJC_METACLASS_$_OS_voucher_recipe _OBJC_METACLASS_$_OS_dispatch_data _OBJC_METACLASS_$_OS_dispatch_data_empty diff --git a/xcodeconfig/libdispatch.unexport b/xcodeconfig/libdispatch.unexport deleted file mode 100644 index dba78b9..0000000 --- a/xcodeconfig/libdispatch.unexport +++ /dev/null @@ -1,34 +0,0 @@ -# -# Copyright (c) 2012-2013 Apple Inc. All rights reserved. -# -# @APPLE_APACHE_LICENSE_HEADER_START@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# @APPLE_APACHE_LICENSE_HEADER_END@ -# - -__dispatch_semaphore_vtable -__dispatch_group_vtable -__dispatch_queue_vtable -__dispatch_queue_root_vtable -__dispatch_queue_runloop_vtable -__dispatch_queue_mgr_vtable -__dispatch_queue_specific_queue_vtable -__dispatch_queue_attr_vtable -__dispatch_source_vtable -__dispatch_mach_vtable -__dispatch_mach_msg_vtable -__dispatch_io_vtable -__dispatch_operation_vtable -__dispatch_disk_vtable diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig index 7fc525d..d5b08d6 100644 --- a/xcodeconfig/libdispatch.xcconfig +++ b/xcodeconfig/libdispatch.xcconfig @@ -30,8 +30,8 @@ PUBLIC_HEADERS_FOLDER_PATH = /usr/include/dispatch PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/dispatch OS_PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os OS_PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os -HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(PROJECT_DIR)/private $(PROJECT_DIR)/os -LIBRARY_SEARCH_PATHS = $(SDKROOT)/usr/lib/system +HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(PROJECT_DIR)/private $(PROJECT_DIR)/src +LIBRARY_SEARCH_PATHS = $(SDKROOT)/usr/lib/system $(SDKROOT)/usr/local/lib INSTALLHDRS_SCRIPT_PHASE = YES ALWAYS_SEARCH_USER_PATHS = NO USE_HEADERMAP = NO @@ -43,7 +43,6 @@ CLANG_CXX_LANGUAGE_STANDARD = gnu++11 GCC_ENABLE_CPP_EXCEPTIONS = NO GCC_STRICT_ALIASING = YES GCC_SYMBOLS_PRIVATE_EXTERN = YES -GCC_ENABLE_OBJC_GC[sdk=macosx*] = supported GCC_ENABLE_PASCAL_STRINGS = NO GCC_WARN_SHADOW = YES GCC_WARN_64_TO_32_BIT_CONVERSION = YES @@ -61,22 +60,19 @@ CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES CLANG_WARN_DOCUMENTATION_COMMENTS = YES GCC_TREAT_WARNINGS_AS_ERRORS = YES GCC_OPTIMIZATION_LEVEL = s -GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 -WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -Wno-unknown-warning-option +GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 $(DISPATCH_PREPROCESSOR_DEFINITIONS) +GCC_NO_COMMON_BLOCKS = YES +WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -Wno-packed -Wno-unknown-warning-option OTHER_CFLAGS = -fverbose-asm -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders OTHER_CFLAGS[arch=i386][sdk=macosx*] = $(OTHER_CFLAGS) -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions OTHER_CFLAGS_normal = -momit-leaf-frame-pointer OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1 -OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 +OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 -DOS_DEBUG=1 GENERATE_PROFILING_CODE = NO DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) SIM_SUFFIX[sdk=*simulator*] = _sim DYLIB_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem$(SIM_SUFFIX)_kernel -lsystem$(SIM_SUFFIX)_platform -lsystem$(SIM_SUFFIX)_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind -OBJC_LDFLAGS = -Wl,-upward-lobjc -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch_objc.aliases -Wl,-unexported_symbols_list,$(SRCROOT)/xcodeconfig/libdispatch.unexport -OBJC_LDFLAGS[sdk=macosx*] = $(OBJC_LDFLAGS) -Wl,-upward-lauto -OBJC_LDFLAGS[arch=i386][sdk=macosx*] = -OBJC_EXCLUDED_SOURCE_FILE_NAMES_i386_macosx = object.m data.m +OBJC_LDFLAGS = -Wl,-upward-lobjc -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order ALIASES_LDFLAGS = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases -PLATFORM_LDFLAGS[sdk=macosx*] = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch_macosx.aliases OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(DYLIB_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS) OTHER_MIGFLAGS = -novouchers diff --git a/xcodeconfig/libdispatch_objc.aliases b/xcodeconfig/libdispatch_objc.aliases deleted file mode 100644 index ad104a1..0000000 --- a/xcodeconfig/libdispatch_objc.aliases +++ /dev/null @@ -1,34 +0,0 @@ -# -# Copyright (c) 2012-2013 Apple Inc. All rights reserved. -# -# @APPLE_APACHE_LICENSE_HEADER_START@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# @APPLE_APACHE_LICENSE_HEADER_END@ -# - -_OBJC_CLASS_$_OS_dispatch_semaphore __dispatch_semaphore_vtable -_OBJC_CLASS_$_OS_dispatch_group __dispatch_group_vtable -_OBJC_CLASS_$_OS_dispatch_queue __dispatch_queue_vtable -_OBJC_CLASS_$_OS_dispatch_queue_root __dispatch_queue_root_vtable -_OBJC_CLASS_$_OS_dispatch_queue_runloop __dispatch_queue_runloop_vtable -_OBJC_CLASS_$_OS_dispatch_queue_mgr __dispatch_queue_mgr_vtable -_OBJC_CLASS_$_OS_dispatch_queue_specific_queue __dispatch_queue_specific_queue_vtable -_OBJC_CLASS_$_OS_dispatch_queue_attr __dispatch_queue_attr_vtable -_OBJC_CLASS_$_OS_dispatch_source __dispatch_source_vtable -_OBJC_CLASS_$_OS_dispatch_mach __dispatch_mach_vtable -_OBJC_CLASS_$_OS_dispatch_mach_msg __dispatch_mach_msg_vtable -_OBJC_CLASS_$_OS_dispatch_io __dispatch_io_vtable -_OBJC_CLASS_$_OS_dispatch_operation __dispatch_operation_vtable -_OBJC_CLASS_$_OS_dispatch_disk __dispatch_disk_vtable diff --git a/xcodeconfig/libfirehose.xcconfig b/xcodeconfig/libfirehose.xcconfig new file mode 100644 index 0000000..07a8b9a --- /dev/null +++ b/xcodeconfig/libfirehose.xcconfig @@ -0,0 +1,36 @@ +// +// Copyright (c) 2015 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + +OTHER_MIGFLAGS = -novouchers +OTHER_LDFLAGS = +SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator +PRODUCT_NAME = $(TARGET_NAME) +INSTALL_PATH = /usr/local/lib/ +PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os +PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os +STRIP_INSTALLED_PRODUCT = NO +COPY_PHASE_STRIP = NO +SEPARATE_STRIP = NO +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) FIREHOSE_SERVER=1 DISPATCH_USE_DTRACE=0 + +VALID_ARCHS[sdk=macosx*] = $(NATIVE_ARCH_ACTUAL) + +COPY_HEADERS_RUN_UNIFDEF = YES +COPY_HEADERS_UNIFDEF_FLAGS = -UKERNEL diff --git a/xcodeconfig/libfirehose_kernel.xcconfig b/xcodeconfig/libfirehose_kernel.xcconfig new file mode 100644 index 0000000..f6b2a99 --- /dev/null +++ b/xcodeconfig/libfirehose_kernel.xcconfig @@ -0,0 +1,35 @@ +// +// Copyright (c) 2015 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + +#include "libfirehose.xcconfig" + +OTHER_CFLAGS = -mkernel -nostdinc -Wno-packed +// LLVM_LTO = YES +PRODUCT_NAME = $(TARGET_NAME) +INSTALL_PATH = /usr/local/lib/kernel/ +PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/kernel/os +SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos + +HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(SDKROOT)/System/Library/Frameworks/Kernel.framework/PrivateHeaders $(SDKROOT)/System/Library/Frameworks/Kernel.framework/Headers $(SDKROOT)/usr/local/include/os $(SDKROOT)/usr/local/include/firehose + +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) KERNEL=1 DISPATCH_USE_DTRACE=0 + +COPY_HEADERS_RUN_UNIFDEF = YES +COPY_HEADERS_UNIFDEF_FLAGS = -DKERNEL=1 -DOS_FIREHOSE_SPI=1 -DOS_VOUCHER_ACTIVITY_SPI_TYPES=1 -UOS_VOUCHER_ACTIVITY_SPI diff --git a/xcodescripts/install-headers.sh b/xcodescripts/install-headers.sh index 1610b81..1fb149b 100755 --- a/xcodescripts/install-headers.sh +++ b/xcodescripts/install-headers.sh @@ -29,3 +29,4 @@ cp -X "${SCRIPT_INPUT_FILE_1}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" cp -X "${SCRIPT_INPUT_FILE_2}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" cp -X "${SCRIPT_INPUT_FILE_3}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" cp -X "${SCRIPT_INPUT_FILE_4}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_5}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" diff --git a/xcodescripts/mig-headers.sh b/xcodescripts/mig-headers.sh index f81eb85..003e9f2 100755 --- a/xcodescripts/mig-headers.sh +++ b/xcodescripts/mig-headers.sh @@ -26,4 +26,10 @@ for a in ${ARCHS}; do xcrun mig ${OTHER_MIGFLAGS} -arch $a -header "${SCRIPT_OUTPUT_FILE_0}" \ -sheader "${SCRIPT_OUTPUT_FILE_1}" -user /dev/null \ -server /dev/null "${SCRIPT_INPUT_FILE_0}" + xcrun mig ${OTHER_MIGFLAGS} -arch $a -header "${SCRIPT_OUTPUT_FILE_2}" \ + -sheader "${SCRIPT_OUTPUT_FILE_3}" -user /dev/null \ + -server /dev/null "${SCRIPT_INPUT_FILE_1}" + xcrun mig ${OTHER_MIGFLAGS} -arch $a -header "${SCRIPT_OUTPUT_FILE_4}" \ + -sheader "${SCRIPT_OUTPUT_FILE_5}" -user /dev/null \ + -server /dev/null "${SCRIPT_INPUT_FILE_2}" done diff --git a/xcodeconfig/libdispatch_macosx.aliases b/xcodescripts/run-on-install.sh similarity index 79% rename from xcodeconfig/libdispatch_macosx.aliases rename to xcodescripts/run-on-install.sh index 66b24a3..730b511 100644 --- a/xcodeconfig/libdispatch_macosx.aliases +++ b/xcodescripts/run-on-install.sh @@ -1,5 +1,6 @@ +#!/bin/bash -e # -# Copyright (c) 2013 Apple Inc. All rights reserved. +# Copyright (c) 2016 Apple Inc. All rights reserved. # # @APPLE_APACHE_LICENSE_HEADER_START@ # @@ -17,3 +18,9 @@ # # @APPLE_APACHE_LICENSE_HEADER_END@ # + +if [[ "x${ACTION}" == "xinstall" && "x${SKIP_INSTALL}" == "xNO" ]]; then + $@ +else + exit 0 +fi -- 2.45.2