From fa22f35b3ccab0081bb7090c32773dcd7463a045 Mon Sep 17 00:00:00 2001 From: Apple Date: Wed, 29 Mar 2017 20:09:34 +0000 Subject: [PATCH] libdispatch-703.50.37.tar.gz --- .gitmodules | 3 + INSTALL | 120 --- INSTALL.md | 186 +++++ Makefile.am | 31 +- PATCHES | 40 + config/config.h | 8 + configure.ac | 84 +- dispatch/Makefile.am | 2 +- dispatch/{ => darwin}/module.modulemap | 0 dispatch/dispatch.h | 2 + .../{module.map => generic/module.modulemap} | 2 - libdispatch.xcodeproj/project.pbxproj | 21 +- m4/blocks.m4 | 104 ++- man/dispatch_object.3 | 19 +- man/dispatch_semaphore_create.3 | 14 + man/dispatch_source_create.3 | 6 +- os/firehose_buffer_private.h | 76 +- os/firehose_server_private.h | 79 +- os/voucher_activity_private.h | 68 +- private/{ => darwin}/module.modulemap | 0 private/generic/module.modulemap | 11 + private/private.h | 7 - private/source_private.h | 8 + src/BlocksRuntime/Block.h | 54 ++ src/BlocksRuntime/Block_private.h | 264 +++++++ src/BlocksRuntime/data.c | 24 + src/BlocksRuntime/runtime.c | 747 ++++++++++++++++++ src/Makefile.am | 34 +- src/firehose/firehose.defs | 3 +- src/firehose/firehose_buffer.c | 195 ++--- src/firehose/firehose_buffer_internal.h | 7 +- src/firehose/firehose_inline_internal.h | 139 +--- src/firehose/firehose_server.c | 276 ++++--- src/firehose/firehose_server_internal.h | 5 +- src/init.c | 100 ++- src/inline_internal.h | 11 +- src/internal.h | 29 +- src/io.c | 8 +- src/object.m | 18 + src/object_internal.h | 3 + src/queue.c | 47 +- src/queue_internal.h | 1 + src/semaphore.c | 328 +------- src/semaphore_internal.h | 12 +- src/shims/atomic.h | 98 ++- src/shims/linux_stubs.h | 8 +- src/shims/lock.c | 307 ++++++- src/shims/lock.h | 176 +++-- src/shims/time.h | 92 ++- src/source.c | 470 +++-------- src/source_internal.h | 11 +- src/swift/Block.swift | 35 +- src/swift/Data.swift | 119 +-- src/swift/Dispatch.swift | 50 +- src/swift/DispatchStubs.cc | 12 +- src/swift/IO.swift | 60 +- src/swift/Private.swift | 66 +- src/swift/Queue.swift | 354 ++++----- src/swift/Source.swift | 76 +- src/swift/Time.swift | 64 +- src/swift/Wrapper.swift | 90 ++- src/time.c | 24 +- src/trace.h | 24 +- src/voucher.c | 108 ++- src/voucher_internal.h | 4 + xcodescripts/install-manpages.sh | 2 +- 66 files changed, 3237 insertions(+), 2209 deletions(-) delete mode 100644 INSTALL create mode 100644 INSTALL.md rename dispatch/{ => darwin}/module.modulemap (100%) rename dispatch/{module.map => generic/module.modulemap} (86%) rename private/{ => darwin}/module.modulemap (100%) create mode 100644 private/generic/module.modulemap create mode 100644 src/BlocksRuntime/Block.h create mode 100644 src/BlocksRuntime/Block_private.h create mode 100644 src/BlocksRuntime/data.c create mode 100644 src/BlocksRuntime/runtime.c diff --git a/.gitmodules b/.gitmodules index e6068b4..009b5fb 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "libpwq"] path = libpwq url = https://github.com/mheily/libpwq.git +[submodule "libkqueue"] + path = libkqueue + url = https://github.com/mheily/libkqueue.git diff --git a/INSTALL b/INSTALL deleted file mode 100644 index 9113e4a..0000000 --- a/INSTALL +++ /dev/null @@ -1,120 +0,0 @@ -Grand Central Dispatch (GCD) - -GCD is a concurrent programming framework first shipped with Mac OS X Snow -Leopard. This package is an open source bundling of libdispatch, the core -user space library implementing GCD. At the time of writing, support for -the BSD kqueue API, and specifically extensions introduced in Mac OS X Snow -Leopard and FreeBSD 9-CURRENT, are required to use libdispatch. Support -for Linux is a work in progress (see Linux notes below). Other systems are -currently unsupported. - - Configuring and installing libdispatch - -GCD is built using autoconf, automake, and libtool, and has a number of -compile-time configuration options that should be reviewed before starting. -An uncustomized install requires: - - sh autogen.sh - ./configure - make - make install - -The following configure options may be of general interest: - ---with-apple-libpthread-source - - Specify the path to Apple's libpthread package, so that appropriate headers - can be found and used. - ---with-apple-libplatform-source - - Specify the path to Apple's libplatform package, so that appropriate headers - can be found and used. - ---with-apple-libclosure-source - - Specify the path to Apple's Libclosure package, so that appropriate headers - can be found and used. - ---with-apple-xnu-source - - Specify the path to Apple's XNU package, so that appropriate headers can be - found and used. - ---with-blocks-runtime - - On systems where -fblocks is supported, specify an additional library path - in which libBlocksRuntime can be found. This is not required on OS X, - where the Blocks runtime is included in libSystem, but is required on - FreeBSD. - -The following options are likely to only be useful when building libdispatch on -OS X as a replacement for /usr/lib/system/libdispatch.dylib: - ---with-apple-objc4-source - - Specify the path to Apple's objc4 package, so that appropriate headers can - be found and used. - ---disable-libdispatch-init-constructor - - Do not tag libdispatch's init routine as __constructor, in which case it - must be run manually before libdispatch routines can be called. This is the - default when building on OS X. For /usr/lib/system/libdispatch.dylib - the init routine is called automatically during process start. - ---enable-apple-tsd-optimizations - - Use a non-portable allocation scheme for pthread per-thread data (TSD) keys - when building libdispatch for /usr/lib/system on OS X. This should not - be used on other OS's, or on OS X when building a stand-alone library. - - Typical configuration commands - -The following command lines create the configuration required to build -libdispatch for /usr/lib/system on OS X El Capitan: - - clangpath=$(dirname `xcrun --find clang`) - sudo mkdir -p "$clangpath/../local/lib/clang/enable_objc_gc" - LIBTOOLIZE=glibtoolize sh autogen.sh - cflags='-arch x86_64 -arch i386 -g -Os' - ./configure CFLAGS="$cflags" OBJCFLAGS="$cflags" CXXFLAGS="$cflags" \ - --prefix=/usr --libdir=/usr/lib/system --disable-static \ - --enable-apple-tsd-optimizations \ - --with-apple-libpthread-source=/path/to/10.11.0/libpthread-137.1.1 \ - --with-apple-libplatform-source=/path/to/10.11.0/libplatform-73.1.1 \ - --with-apple-libclosure-source=/path/to/10.11.0/libclosure-65 \ - --with-apple-xnu-source=/path/to/10.11.0/xnu-3247.1.106 \ - --with-apple-objc4-source=/path/to/10.11.0/objc4-680 - make check - -Typical configuration line for FreeBSD 8.x and 9.x to build libdispatch with -clang and blocks support: - - sh autogen.sh - ./configure CC=clang --with-blocks-runtime=/usr/local/lib - make check - -Instructions for building on Linux. Initial focus is on ubuntu 15.04. -Prepare your system - 1. Install compiler, autotools - sudo apt-get install clang - sudo apt-get install autoconf libtool pkg-config - 2. Install dtrace (to generate provider.h) - sudo apt-get install systemtap-sdt-dev - 3. Install libdispatch pre-reqs - sudo apt-get install libblocksruntime-dev libkqueue-dev libbsd-dev - -Initialize git submodules: - We are using git submodules to incorporate a specific revision of the - upstream pthread_workqueue library into the build. - git submodule init - git submodule update - -Build: - sh autogen.sh - ./configure - make - -Note: the build currently fails building tests, but libdispatch.so should - build successfully. diff --git a/INSTALL.md b/INSTALL.md new file mode 100644 index 0000000..fd999e7 --- /dev/null +++ b/INSTALL.md @@ -0,0 +1,186 @@ +## Grand Central Dispatch (GCD) + +GCD is a concurrent programming framework first shipped with Mac OS X Snow +Leopard. This package is an open source bundling of libdispatch, the core +user space library implementing GCD. At the time of writing, support for +the BSD kqueue API, and specifically extensions introduced in Mac OS X Snow +Leopard and FreeBSD 9-CURRENT, are required to use libdispatch. Linux is +supported, but requires specific packages to be installed (see Linux +section at the end of the file). Other systems are currently unsupported. + +### Configuring and installing libdispatch (general comments) + +GCD is built using autoconf, automake, and libtool, and has a number of +compile-time configuration options that should be reviewed before starting. +An uncustomized install of the C-API to libdispatch requires: + + sh autogen.sh + ./configure + make + make install + +libdispatch can be optionally built to include a Swift API. This requires a +Swift toolchain to compile the Swift code in libdispatch and can be done +in two possible scenarios. + +If you are building your own Swift toolchain from source, then you should build +libdispatch simply by giving additional arguments to swift/utils/build-script: + + ./swift/utils/build-script --libdispatch -- --install-libdispatch + +To build libdispatch using a pre-built Swift toolchain and install libdispatch +into that toolchain (to allow that toolchain to compile Swift code containing +"import Dispatch") requires: + + sh autogen.sh + ./configure --with-swift-toolchain= --prefix= + make + make install + +Note that once libdispatch is installed into a Swift toolchain, that +toolchain cannot be used to compile libdispatch again (you must 'make uninstall' +libdispatch from the toolchain before using it to rebuild libdispatch). + +You can also use the build-toolchain script to create a toolchain +that includes libdispatch on Linux: + +1. Add libdispatch and install-libdispatch lines to ./swift/utils/build-presets.ini under `[preset: buildbot_linux]` section, as following: + + ``` + [preset: buildbot_linux] + mixin-preset=mixin_linux_installation + build-subdir=buildbot_linux + lldb + release + test + validation-test + long-test + libdispatch + foundation + lit-args=-v + dash-dash + + install-libdispatch + install-foundation + reconfigure + ``` + +2. Run: + + ``` + ./swift/utils/build-toolchain local.swift + ``` + +Note that adding libdispatch in build-presets.ini is for Linux only as Swift on macOS platforms uses the system installed libdispatch, so its not required. + +### Building and installing on OS X + +The following configure options may be of general interest: + +`--with-apple-libpthread-source` + +Specify the path to Apple's libpthread package, so that appropriate headers + can be found and used. + +`--with-apple-libplatform-source` + +Specify the path to Apple's libplatform package, so that appropriate headers + can be found and used. + +`--with-apple-libclosure-source` + +Specify the path to Apple's Libclosure package, so that appropriate headers + can be found and used. + +`--with-apple-xnu-source` + +Specify the path to Apple's XNU package, so that appropriate headers can be + found and used. + +`--with-blocks-runtime` + +On systems where -fblocks is supported, specify an additional library path in which libBlocksRuntime can be found. This is not required on OS X, where the Blocks runtime is included in libSystem, but is required on FreeBSD. + +The following options are likely to only be useful when building libdispatch on +OS X as a replacement for /usr/lib/system/libdispatch.dylib: + +`--with-apple-objc4-source` + +Specify the path to Apple's objc4 package, so that appropriate headers can + be found and used. + +`--disable-libdispatch-init-constructor` + +Do not tag libdispatch's init routine as __constructor, in which case it must be run manually before libdispatch routines can be called. This is the default when building on OS X. For /usr/lib/system/libdispatch.dylib the init routine is called automatically during process start. + +`--enable-apple-tsd-optimizations` + +Use a non-portable allocation scheme for pthread per-thread data (TSD) keys when building libdispatch for /usr/lib/system on OS X. This should not be used on other OS's, or on OS X when building a stand-alone library. + +#### Typical configuration commands + +The following command lines create the configuration required to build +libdispatch for /usr/lib/system on OS X El Capitan: + + clangpath=$(dirname `xcrun --find clang`) + sudo mkdir -p "$clangpath/../local/lib/clang/enable_objc_gc" + LIBTOOLIZE=glibtoolize sh autogen.sh + cflags='-arch x86_64 -arch i386 -g -Os' + ./configure CFLAGS="$cflags" OBJCFLAGS="$cflags" CXXFLAGS="$cflags" \ + --prefix=/usr --libdir=/usr/lib/system --disable-static \ + --enable-apple-tsd-optimizations \ + --with-apple-libpthread-source=/path/to/10.11.0/libpthread-137.1.1 \ + --with-apple-libplatform-source=/path/to/10.11.0/libplatform-73.1.1 \ + --with-apple-libclosure-source=/path/to/10.11.0/libclosure-65 \ + --with-apple-xnu-source=/path/to/10.11.0/xnu-3247.1.106 \ + --with-apple-objc4-source=/path/to/10.11.0/objc4-680 + make check + +### Building and installing for FreeBSD + +Typical configuration line for FreeBSD 8.x and 9.x to build libdispatch with +clang and blocks support: + + sh autogen.sh + ./configure CC=clang --with-blocks-runtime=/usr/local/lib + make check + +### Building and installing for Linux + +Note that libdispatch development and testing is done only +on Ubuntu; currently supported versions are 14.04, 15.10 and 16.04. + +1. The first thing to do is install required packages: + + 1a. Install build tools and clang compiler. + `sudo apt-get install autoconf libtool pkg-config clang` + + 1b. Install dtrace (to generate provider.h) + `sudo apt-get install systemtap-sdt-dev` + + 1c. Install additional libdispatch dependencies + `sudo apt-get install libblocksruntime-dev libkqueue-dev libbsd-dev` + + Note: compiling libdispatch requires clang 3.8 or better and +the gold linker. If the default clang on your Ubuntu version is +too old, see http://apt.llvm.org/ to install a newer version. +On older Ubuntu releases, you may need to install binutils-gold +to get the gold linker. + +2. Initialize git submodules. + We are using git submodules to incorporate specific revisions of the + upstream pthread_workqueue and libkqueue projects into the build. + + ``` + git submodule init + git submodule update + ``` + +3. Build (as in the general instructions above) + + ``` + sh autogen.sh + ./configure + make + make install + ``` diff --git a/Makefile.am b/Makefile.am index cc01c7c..cdc642f 100644 --- a/Makefile.am +++ b/Makefile.am @@ -5,24 +5,23 @@ ACLOCAL_AMFLAGS = -I m4 if BUILD_OWN_PTHREAD_WORKQUEUES -SUBDIRS= \ - dispatch \ - libpwq \ - man \ - os \ - private \ - src \ - tests -else -SUBDIRS= \ - dispatch \ - man \ - os \ - private \ - src \ - tests + MAYBE_PTHREAD_WORKQUEUES = libpwq +endif + +if BUILD_OWN_KQUEUES + MAYBE_KQUEUES = libkqueue endif +SUBDIRS= \ + dispatch \ + $(MAYBE_PTHREAD_WORKQUEUES) \ + $(MAYBE_KQUEUES) \ + man \ + os \ + private \ + src \ + tests + EXTRA_DIST= \ README.md \ LICENSE \ diff --git a/PATCHES b/PATCHES index 28f7c52..0783ec9 100644 --- a/PATCHES +++ b/PATCHES @@ -253,3 +253,43 @@ github commits starting with 29bdc2f from [2dbf83c] APPLIED rdar://27303844 [78b9e82] APPLIED rdar://27303844 [2c0e5ee] APPLIED rdar://27303844 +[5ee237f] APPLIED rdar://27600964 +[77299ec] APPLIED rdar://27600964 +[57c5c28] APPLIED rdar://27600964 +[f8423ec] APPLIED rdar://27600964 +[325f73d] APPLIED rdar://27600964 +[b84e87e] APPLIED rdar://27600964 +[ae71a91] APPLIED rdar://27600964 +[8669dea] APPLIED rdar://27600964 +[a8d0327] APPLIED rdar://27600964 +[2e4e6af] APPLIED rdar://27600964 +[2457fb2] APPLIED rdar://27600964 +[4d58038] APPLIED rdar://27600964 +[98d0a05] APPLIED rdar://27600964 +[8976101] APPLIED rdar://27600964 +[0d9ea5f] APPLIED rdar://28486911 +[e7e9a32] APPLIED rdar://28486911 +[44174d9] APPLIED rdar://28486911 +[6402cb7] APPLIED rdar://28486911 +[e2d5eb5] APPLIED rdar://28486911 +[758bb7f] APPLIED rdar://28486911 +[4c588e9] APPLIED rdar://28486911 +[1300d06] APPLIED rdar://28486911 +[ae1f7e8] APPLIED rdar://28486911 +[40a9bfb] APPLIED rdar://28486911 +[6366081] APPLIED rdar://28486911 +[81d1d0c] APPLIED rdar://28486911 +[5526122] APPLIED rdar://28486911 +[1a7ff3f] APPLIED rdar://28486911 +[e905735] APPLIED rdar://28486911 +[7fe8323] APPLIED rdar://28486911 +[6249878] APPLIED rdar://28486911 +[20792fe] APPLIED rdar://28486911 +[3639fbe] APPLIED rdar://28486911 +[bda3baf] APPLIED rdar://28486911 +[8803d07] APPLIED rdar://28486911 +[d04a0df] APPLIED rdar://28486911 +[69d2a6a] APPLIED rdar://28486911 +[367bd95] APPLIED rdar://28486911 +[152985f] APPLIED rdar://28486911 +[ba7802e] APPLIED rdar://28486911 diff --git a/config/config.h b/config/config.h index ca3a1db..e39a9a9 100644 --- a/config/config.h +++ b/config/config.h @@ -57,6 +57,14 @@ you don't. */ #define HAVE_DECL_VQ_QUOTA 1 +/* Define to 1 if you have the declaration of `VQ_NEARLOWDISK', and to 0 if + you don't. */ +#define HAVE_DECL_VQ_NEARLOWDISK 1 + +/* Define to 1 if you have the declaration of `VQ_DESIRED_DISK', and to 0 if + you don't. */ +#define HAVE_DECL_VQ_DESIRED_DISK 1 + /* Define to 1 if you have the header file. */ #define HAVE_DLFCN_H 1 diff --git a/configure.ac b/configure.ac index e5c7c5e..6f66e52 100644 --- a/configure.ac +++ b/configure.ac @@ -3,7 +3,7 @@ # AC_PREREQ(2.69) -AC_INIT([libdispatch], [1.3], [libdispatch@macosforge.org], [libdispatch], [http://libdispatch.macosforge.org]) +AC_INIT([libdispatch], [1.3], [https://bugs.swift.org], [libdispatch], [https://github.com/apple/swift-corelibs-libdispatch]) AC_REVISION([$$]) AC_CONFIG_AUX_DIR(config) AC_CONFIG_HEADER([config/config_ac.h]) @@ -11,6 +11,46 @@ AC_CONFIG_MACRO_DIR([m4]) ac_clean_files=a.out.dSYM AM_MAINTAINER_MODE +# +# Command line argument to specify build variant (default to release). +# Impacts default value of CFLAGS et al. so must come before AC_PROG_CC +# +AC_ARG_WITH([build-variant], + [AS_HELP_STRING([--with-build-variant=release|debug|releaseassert|releasedebuginfo], [Specify build variant [default=release]])], + [dispatch_build_variant=${withval}], + [dispatch_build_variant=release] +) +AS_CASE([$dispatch_build_variant], + [debug], [ + default_compiler_flags="-g -O0" + dispatch_enable_asserts=true + dispatch_enable_optimization=false + ], + [release], [ + default_compiler_flags="-O2" + dispatch_enable_asserts=false + dispatch_enable_optimization=true + ], + [releaseassert], [ + default_compiler_flags="-O2" + dispatch_enable_asserts=true + dispatch_enable_optimization=true + ], + [releasedebuginfo], [ + default_compiler_flags="-g -O2" + dispatch_enable_asserts=false + dispatch_enable_optimization=true + ], + [AC_MSG_ERROR("invalid build-variant $dispatch_build_variant")] +) +AM_CONDITIONAL(DISPATCH_ENABLE_ASSERTS, $dispatch_enable_asserts) +AM_CONDITIONAL(DISPATCH_ENABLE_OPTIMIZATION, $dispatch_enable_optimization) + +: ${CFLAGS=$default_compiler_flags} +: ${CXXFLAGS=$default_compiler_flags} +: ${OBJCFLAGS=$default_compiler_flags} +: ${OBJCXXFLAGS=$default_compiler_flags} + AC_PROG_CC([clang gcc cc]) AC_PROG_CXX([clang++ g++ c++]) AC_PROG_OBJC([clang gcc cc]) @@ -107,6 +147,12 @@ AC_ARG_WITH([swift-toolchain], case $target_os in linux*) os_string="linux" + case $target_cpu in + armv7l*) + target_cpu="armv7" + ;; + *) + esac ;; *) os_string=$target_os @@ -183,12 +229,18 @@ esac AC_SEARCH_LIBS(clock_gettime, rt) AC_SEARCH_LIBS(pthread_create, pthread) -# -# Prefer native kqueue(2); otherwise use libkqueue if present. -# -AC_CHECK_HEADER(sys/event.h, [], - [PKG_CHECK_MODULES(KQUEUE, libkqueue)] +AS_IF([test -f $srcdir/libkqueue/configure.ac], + [AC_DEFINE(BUILD_OWN_KQUEUES, 1, [Define if building libkqueue from source]) + ac_configure_args="--disable-libkqueue-install $ac_configure_args" + AC_CONFIG_SUBDIRS([libkqueue]) + build_own_kqueues=true], + [build_own_kqueues=false + AC_CHECK_HEADER(sys/event.h, [], + [PKG_CHECK_MODULES(KQUEUE, libkqueue)] + ) + ] ) +AM_CONDITIONAL(BUILD_OWN_KQUEUES, $build_own_kqueues) AC_CHECK_FUNCS([strlcpy getprogname], [], [PKG_CHECK_MODULES(BSD_OVERLAY, libbsd-overlay,[ @@ -305,13 +357,13 @@ AC_CHECK_FUNCS([mach_port_construct]) # # Find functions and declarations we care about. # -AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC], [], [], +AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC, CLOCK_REALTIME], [], [], [[#include ]]) AC_CHECK_DECLS([NOTE_NONE, NOTE_REAP, NOTE_REVOKE, NOTE_SIGNAL, NOTE_LOWAT], [], [], [[#include ]]) AC_CHECK_DECLS([FD_COPY], [], [], [[#include ]]) AC_CHECK_DECLS([SIGEMT], [], [], [[#include ]]) -AC_CHECK_DECLS([VQ_UPDATE, VQ_VERYLOWDISK, VQ_QUOTA], [], [], [[#include ]]) +AC_CHECK_DECLS([VQ_UPDATE, VQ_VERYLOWDISK, VQ_QUOTA, VQ_NEARLOWDISK, VQ_DESIRED_DISK], [], [], [[#include ]]) AC_CHECK_DECLS([program_invocation_short_name], [], [], [[#include ]]) AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time malloc_create_zone sysconf]) @@ -384,6 +436,20 @@ AS_IF([test "x$have_mach" = "xtrue"], [ ]) AM_CONDITIONAL(HAVE_DARWIN_LD, [test "x$dispatch_cv_ld_darwin" == "xyes"]) +# +# symlink platform-specific module.modulemap files +# +AS_CASE([$target_os], + [darwin*], [ dispatch_module_map_os=darwin ], + [ dispatch_module_map_os=generic ] +) +AC_CONFIG_COMMANDS([modulemaps], [ + ln -fs $dispatch_module_map_os/module.modulemap $ac_top_srcdir/dispatch/module.modulemap + ln -fs $dispatch_module_map_os/module.modulemap $ac_top_srcdir/private/module.modulemap + ], + [dispatch_module_map_os="$dispatch_module_map_os"] +) + # # Temporary: some versions of clang do not mark __builtin_trap() as # __attribute__((__noreturn__)). Detect and add if required. @@ -401,6 +467,6 @@ AC_CONFIG_FILES([Makefile dispatch/Makefile man/Makefile os/Makefile private/Mak # # Generate testsuite links # -AC_CONFIG_LINKS([tests/dispatch:$top_srcdir/private tests/leaks-wrapper:tests/leaks-wrapper.sh]) +AC_CONFIG_LINKS([tests/dispatch:$ac_top_srcdir/private tests/leaks-wrapper:tests/leaks-wrapper.sh]) AC_OUTPUT diff --git a/dispatch/Makefile.am b/dispatch/Makefile.am index 53ea598..89fd3da 100644 --- a/dispatch/Makefile.am +++ b/dispatch/Makefile.am @@ -24,5 +24,5 @@ dispatch_HEADERS= \ time.h if HAVE_SWIFT -dispatch_HEADERS+=module.map +dispatch_HEADERS+=module.modulemap endif diff --git a/dispatch/module.modulemap b/dispatch/darwin/module.modulemap similarity index 100% rename from dispatch/module.modulemap rename to dispatch/darwin/module.modulemap diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index a26b951..e8d69f8 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -49,9 +49,11 @@ #if defined(__linux__) && defined(__has_feature) #if __has_feature(modules) +#if !defined(__arm__) #include // for off_t (to match Glibc.modulemap) #endif #endif +#endif #define DISPATCH_API_VERSION 20160712 diff --git a/dispatch/module.map b/dispatch/generic/module.modulemap similarity index 86% rename from dispatch/module.map rename to dispatch/generic/module.modulemap index 6f3c8aa..5c248e5 100644 --- a/dispatch/module.map +++ b/dispatch/generic/module.modulemap @@ -2,7 +2,6 @@ module Dispatch { requires blocks export * link "dispatch" - link "BlocksRuntime" } module DispatchIntrospection [system] [extern_c] { @@ -16,5 +15,4 @@ module CDispatch [system] [extern_c] { export * requires blocks link "dispatch" - link "BlocksRuntime" } diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index fb0ba91..df9d796 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -463,13 +463,6 @@ remoteGlobalIDString = 92F3FECA1BEC69E500025962; remoteInfo = darwintests; }; - C00B0E101C5AEBBE000330B3 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CB9108E6C7200FAA873; - remoteInfo = dispatch_deadname; - }; C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -617,6 +610,7 @@ 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libfirehose_server.a; sourceTree = BUILT_PRODUCTS_DIR; }; 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose.xcconfig; sourceTree = ""; }; 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_inline_internal.h; sourceTree = ""; }; + 6EC5ABF71D4446CA004F8674 /* dispatch_deadname.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_deadname.c; sourceTree = ""; }; 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_kevent_cancel_races.c; sourceTree = ""; }; 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_private.h; sourceTree = ""; }; 6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_state_machine.c; sourceTree = ""; }; @@ -662,8 +656,8 @@ C01866BD1C5973210040FC07 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = "libdispatch-mp-static.xcconfig"; sourceTree = ""; }; C01866BF1C5976C90040FC07 /* run-on-install.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "run-on-install.sh"; sourceTree = ""; }; - C901445E1C73A7FE002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; path = module.modulemap; sourceTree = ""; }; - C90144641C73A845002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; path = module.modulemap; sourceTree = ""; }; + C901445E1C73A7FE002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; name = module.modulemap; path = darwin/module.modulemap; sourceTree = ""; }; + C90144641C73A845002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; name = module.modulemap; path = darwin/module.modulemap; sourceTree = ""; }; C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = ""; tabWidth = 8; }; C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = ddt.xcodeproj; path = tools/ddt/ddt.xcodeproj; sourceTree = ""; }; C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = dispatch_objc.m; sourceTree = ""; }; @@ -840,7 +834,6 @@ 4552540519B1384900B88766 /* jsgc_bench */, 4552540719B1384900B88766 /* async_bench */, 4552540919B1384900B88766 /* apply_bench */, - C00B0E111C5AEBBE000330B3 /* dispatch_deadname */, ); name = Products; sourceTree = ""; @@ -894,6 +887,7 @@ 6E326ADE1C23451A002A6505 /* dispatch_concur.c */, 6E326AEF1C239303002A6505 /* dispatch_context_for_key.c */, 6E8E4EC71C1A61680004F5CC /* dispatch_data.m */, + 6EC5ABF71D4446CA004F8674 /* dispatch_deadname.c */, 6E67D90D1C16CCEB00FC98AC /* dispatch_debug.c */, 6E8E4ECB1C1A72650004F5CC /* dispatch_drift.c */, 6E67D90F1C16CF0B00FC98AC /* dispatch_group.c */, @@ -1643,13 +1637,6 @@ remoteRef = 4552540819B1384900B88766 /* PBXContainerItemProxy */; sourceTree = BUILT_PRODUCTS_DIR; }; - C00B0E111C5AEBBE000330B3 /* dispatch_deadname */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_deadname; - remoteRef = C00B0E101C5AEBBE000330B3 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; C927F36710FD7F1000C5AB8B /* ddt */ = { isa = PBXReferenceProxy; fileType = "compiled.mach-o.executable"; diff --git a/m4/blocks.m4 b/m4/blocks.m4 index 49ee2a3..38a8610 100644 --- a/m4/blocks.m4 +++ b/m4/blocks.m4 @@ -10,6 +10,21 @@ AC_ARG_WITH([blocks-runtime], LIBS="$LIBS -L$blocks_runtime"] ) +# +# Configure argument to enable/disable using an embedded blocks runtime +# +AC_ARG_ENABLE([embedded_blocks_runtime], + [AS_HELP_STRING([--enable-embedded-blocks-runtime], + [Embed blocks runtime in libdispatch [default=yes on Linux, default=no on all other platforms]])],, + [case $target_os in + linux*) + enable_embedded_blocks_runtime=yes + ;; + *) + enable_embedded_blocks_runtime=no + esac] +) + # # Detect compiler support for Blocks; perhaps someday -fblocks won't be # required, in which case we'll need to change this. @@ -29,30 +44,32 @@ AC_CACHE_CHECK([for C Blocks support], [dispatch_cv_cblocks], [ AS_IF([test "x$dispatch_cv_cblocks" != "xno"], [ CBLOCKS_FLAGS="$dispatch_cv_cblocks" - # - # It may be necessary to directly link the Blocks runtime on some - # systems, so give it a try if we can't link a C program that uses - # Blocks. We will want to remove this at somepoint, as really -fblocks - # should force that linkage already. - # - saveCFLAGS="$CFLAGS" - CFLAGS="$CFLAGS -fblocks -O0" - AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime]) - AC_TRY_LINK([], [ - ^{ int j; j=0; }(); - ], [ - AC_MSG_RESULT([no]); - ], [ - saveLIBS="$LIBS" - LIBS="$LIBS -lBlocksRuntime" - AC_TRY_LINK([], [ - ^{ int k; k=0; }(); - ], [ - AC_MSG_RESULT([-lBlocksRuntime]) - ], [ - AC_MSG_ERROR([can't find Blocks runtime]) - ]) - ]) + AS_IF([test "x$enable_embedded_blocks_runtime" != "xyes"], [ + # + # It may be necessary to directly link the Blocks runtime on some + # systems, so give it a try if we can't link a C program that uses + # Blocks. We will want to remove this at somepoint, as really -fblocks + # should force that linkage already. + # + saveCFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -fblocks -O0" + AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime]) + AC_TRY_LINK([], [ + ^{ int j; j=0; }(); + ], [ + AC_MSG_RESULT([no]); + ], [ + saveLIBS="$LIBS" + LIBS="$LIBS -lBlocksRuntime" + AC_TRY_LINK([], [ + ^{ int k; k=0; }(); + ], [ + AC_MSG_RESULT([-lBlocksRuntime]) + ], [ + AC_MSG_ERROR([can't find Blocks runtime]) + ]) + ]) + ]) CFLAGS="$saveCFLAGS" have_cblocks=true ], [ @@ -61,6 +78,7 @@ AS_IF([test "x$dispatch_cv_cblocks" != "xno"], [ ]) AM_CONDITIONAL(HAVE_CBLOCKS, $have_cblocks) AC_SUBST([CBLOCKS_FLAGS]) +AM_CONDITIONAL([BUILD_OWN_BLOCKS_RUNTIME], [test "x$enable_embedded_blocks_runtime" = "xyes"]) # # Because a different C++ compiler may be specified than C compiler, we have @@ -82,24 +100,26 @@ AC_CACHE_CHECK([for C++ Blocks support], [dispatch_cv_cxxblocks], [ AS_IF([test "x$dispatch_cv_cxxblocks" != "xno"], [ CXXBLOCKS_FLAGS="$dispatch_cv_cxxblocks" - saveCXXFLAGS="$CXXFLAGS" - CXXFLAGS="$CXXFLAGS -fblocks -O0" - AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime]) - AC_TRY_LINK([], [ - ^{ int j; j=0; }(); - ], [ - AC_MSG_RESULT([no]); - ], [ - saveLIBS="$LIBS" - LIBS="$LIBS -lBlocksRuntime" - AC_TRY_LINK([], [ - ^{ int k; k=0; }(); - ], [ - AC_MSG_RESULT([-lBlocksRuntime]) - ], [ - AC_MSG_ERROR([can't find Blocks runtime]) - ]) - ]) + AS_IF([test "x$enable_embedded_blocks_runtime" != "xyes"], [ + saveCXXFLAGS="$CXXFLAGS" + CXXFLAGS="$CXXFLAGS -fblocks -O0" + AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime]) + AC_TRY_LINK([], [ + ^{ int j; j=0; }(); + ], [ + AC_MSG_RESULT([no]); + ], [ + saveLIBS="$LIBS" + LIBS="$LIBS -lBlocksRuntime" + AC_TRY_LINK([], [ + ^{ int k; k=0; }(); + ], [ + AC_MSG_RESULT([-lBlocksRuntime]) + ], [ + AC_MSG_ERROR([can't find Blocks runtime]) + ]) + ]) + ]) CXXFLAGS="$saveCXXFLAGS" have_cxxblocks=true ], [ diff --git a/man/dispatch_object.3 b/man/dispatch_object.3 index 95ba1c3..cddcf32 100644 --- a/man/dispatch_object.3 +++ b/man/dispatch_object.3 @@ -23,6 +23,10 @@ .Fo dispatch_resume .Fa "dispatch_object_t object" .Fc +.Ft void +.Fo dispatch_activate +.Fa "dispatch_object_t object" +.Fc .Ft "void *" .Fo dispatch_get_context .Fa "dispatch_object_t object" @@ -40,7 +44,7 @@ .Sh DESCRIPTION Dispatch objects share functions for coordinating memory management, suspension, cancellation and context pointers. -.Sh MEMORY MANGEMENT +.Sh MEMORY MANAGEMENT Objects returned by creation functions in the dispatch framework may be uniformly retained and released with the functions .Fn dispatch_retain @@ -123,6 +127,17 @@ dispatch_async(queue, ^{ dispatch_release(object); }); .Ed +.Sh ACTIVATION +Dispatch objects such as queues and sources may be created in an inactive +state. Objects in this state must be activated before any blocks +associated with them will be invoked. Calling +.Fn dispatch_activate +on an active object has no effect. +.Pp +Changing attributes such as the target queue or a source handler is no longer permitted +once the object has been activated (see +.Xr dispatch_set_target_queue 3 , +.Xr dispatch_source_set_event_handler 3 ). .Sh SUSPENSION The invocation of blocks on dispatch queues or dispatch sources may be suspended or resumed with the functions @@ -148,7 +163,7 @@ and .Fn dispatch_resume such that the dispatch object is fully resumed when the last reference is released. The result of releasing all references to a dispatch object while in -a suspended state is undefined. +an inactive or suspended state is undefined. .Sh CONTEXT POINTERS Dispatch objects support supplemental context pointers. The value of the context pointer may be retrieved and updated with diff --git a/man/dispatch_semaphore_create.3 b/man/dispatch_semaphore_create.3 index 81c2915..da26365 100644 --- a/man/dispatch_semaphore_create.3 +++ b/man/dispatch_semaphore_create.3 @@ -23,6 +23,13 @@ .Fc .Sh DESCRIPTION Dispatch semaphores are used to synchronize threads. +.Pp +The +.Fn dispatch_semaphore_wait +function decrements the semaphore. If the resulting value is less than zero, +it waits for a signal from a thread that increments the semaphore by calling +.Fn dispatch_semaphore_signal +before returning. The .Fa timeout parameter is creatable with the @@ -30,6 +37,13 @@ parameter is creatable with the or .Xr dispatch_walltime 3 functions. +.Pp +The +.Fn dispatch_semaphore_signal +function increments the counting semaphore. If the previous value was less than zero, +it wakes one of the threads that are waiting in +.Fn dispatch_semaphore_wait +before returning. .Sh COMPLETION SYNCHRONIZATION If the .Fa count diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3 index 4da708c..e9b0fb7 100644 --- a/man/dispatch_source_create.3 +++ b/man/dispatch_source_create.3 @@ -295,7 +295,7 @@ The port's corresponding receive right has been destroyed .Pp The data returned by .Fn dispatch_source_get_data -indicates which of the events in the +is a bitmask that indicates which of the events in the .Fa mask were observed. Note that because this source type will request notifications on the provided port, it should not be mixed with the use of @@ -372,7 +372,7 @@ A signal was delivered to the process. .Pp The data returned by .Fn dispatch_source_get_data -indicates which of the events in the +is a bitmask that indicates which of the events in the .Fa mask were observed. .Pp @@ -548,7 +548,7 @@ or .Pp The data returned by .Fn dispatch_source_get_data -indicates which of the events in the +is a bitmask that indicates which of the events in the .Fa mask were observed. .Pp diff --git a/os/firehose_buffer_private.h b/os/firehose_buffer_private.h index 2c6466f..b73b39b 100644 --- a/os/firehose_buffer_private.h +++ b/os/firehose_buffer_private.h @@ -38,39 +38,9 @@ * Layout of structs is subject to change without notice */ -#define FIREHOSE_BUFFER_CHUNK_SIZE 4096ul #define FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE 2048ul #define FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT 16 -typedef union { - uint64_t fbc_atomic_pos; -#define FIREHOSE_BUFFER_POS_ENTRY_OFFS_INC (1ULL << 0) -#define FIREHOSE_BUFFER_POS_PRIVATE_OFFS_INC (1ULL << 16) -#define FIREHOSE_BUFFER_POS_REFCNT_INC (1ULL << 32) -#define FIREHOSE_BUFFER_POS_FULL_BIT (1ULL << 56) -#define FIREHOSE_BUFFER_POS_USABLE_FOR_STREAM(pos, stream) \ - ((((pos).fbc_atomic_pos >> 48) & 0x1ff) == (uint16_t)stream) - struct { - uint16_t fbc_next_entry_offs; - uint16_t fbc_private_offs; - uint8_t fbc_refcnt; - uint8_t fbc_qos_bits; - uint8_t fbc_stream; - uint8_t fbc_flag_full : 1; - uint8_t fbc_flag_io : 1; - uint8_t _fbc_flag_unused : 6; - }; -} firehose_buffer_pos_u; - -typedef struct firehose_buffer_chunk_s { - uint8_t fbc_start[0]; - firehose_buffer_pos_u volatile fbc_pos; - uint64_t fbc_timestamp; - uint8_t fbc_data[FIREHOSE_BUFFER_CHUNK_SIZE - - sizeof(firehose_buffer_pos_u) - - sizeof(uint64_t)]; -} __attribute__((aligned(8))) *firehose_buffer_chunk_t; - typedef struct firehose_buffer_range_s { uint16_t fbr_offset; // offset from the start of the buffer uint16_t fbr_length; @@ -78,6 +48,8 @@ typedef struct firehose_buffer_range_s { #ifdef KERNEL +typedef struct firehose_chunk_s *firehose_chunk_t; + // implemented by the kernel extern void __firehose_buffer_push_to_logd(firehose_buffer_t fb, bool for_io); extern void __firehose_critical_region_enter(void); @@ -89,19 +61,10 @@ firehose_tracepoint_t __firehose_buffer_tracepoint_reserve(uint64_t stamp, firehose_stream_t stream, uint16_t pubsize, uint16_t privsize, uint8_t **privptr); -firehose_tracepoint_t -__firehose_buffer_tracepoint_reserve_with_chunk(firehose_buffer_chunk_t fbc, - uint64_t stamp, firehose_stream_t stream, - uint16_t pubsize, uint16_t privsize, uint8_t **privptr); - void __firehose_buffer_tracepoint_flush(firehose_tracepoint_t vat, firehose_tracepoint_id_u vatid); -void -__firehose_buffer_tracepoint_flush_chunk(firehose_buffer_chunk_t fbc, - firehose_tracepoint_t vat, firehose_tracepoint_id_u vatid); - firehose_buffer_t __firehose_buffer_create(size_t *size); @@ -118,13 +81,12 @@ const uint32_t _firehose_spi_version; OS_ALWAYS_INLINE static inline const uint8_t * -_firehose_tracepoint_reader_init(firehose_buffer_chunk_t fbc, - const uint8_t **endptr) +_firehose_tracepoint_reader_init(firehose_chunk_t fc, const uint8_t **endptr) { - const uint8_t *start = fbc->fbc_data; - const uint8_t *end = fbc->fbc_start + fbc->fbc_pos.fbc_next_entry_offs; + const uint8_t *start = fc->fc_data; + const uint8_t *end = fc->fc_start + fc->fc_pos.fcp_next_entry_offs; - if (end > fbc->fbc_start + FIREHOSE_BUFFER_CHUNK_SIZE) { + if (end > fc->fc_start + FIREHOSE_CHUNK_SIZE) { end = start; } *endptr = end; @@ -136,27 +98,29 @@ static inline firehose_tracepoint_t _firehose_tracepoint_reader_next(const uint8_t **ptr, const uint8_t *end) { const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); - firehose_tracepoint_t ft; + struct ft_unaligned_s { + struct firehose_tracepoint_s ft; + } __attribute__((packed, aligned(1))) *uft; do { - ft = (firehose_tracepoint_t)*ptr; - if (ft->ft_data >= end) { + uft = (struct ft_unaligned_s *)*ptr; + if (uft->ft.ft_data >= end) { // reached the end return NULL; } - if (!ft->ft_length) { + if (!uft->ft.ft_length) { // tracepoint write didn't even start return NULL; } - if (ft->ft_length > end - ft->ft_data) { + if (uft->ft.ft_length > end - uft->ft.ft_data) { // invalid length return NULL; } - *ptr += roundup(ft_size + ft->ft_length, 8); + *ptr += roundup(ft_size + uft->ft.ft_length, 8); // test whether write of the tracepoint was finished - } while (os_unlikely(ft->ft_id.ftid_value == 0)); + } while (os_unlikely(uft->ft.ft_id.ftid_value == 0)); - return ft; + return (firehose_tracepoint_t)uft; } #define firehose_tracepoint_foreach(ft, fbc) \ @@ -165,13 +129,13 @@ _firehose_tracepoint_reader_next(const uint8_t **ptr, const uint8_t *end) OS_ALWAYS_INLINE static inline bool -firehose_buffer_range_validate(firehose_buffer_chunk_t fbc, - firehose_tracepoint_t ft, firehose_buffer_range_t range) +firehose_buffer_range_validate(firehose_chunk_t fc, firehose_tracepoint_t ft, + firehose_buffer_range_t range) { - if (range->fbr_offset + range->fbr_length > FIREHOSE_BUFFER_CHUNK_SIZE) { + if (range->fbr_offset + range->fbr_length > FIREHOSE_CHUNK_SIZE) { return false; } - if (fbc->fbc_start + range->fbr_offset < ft->ft_data + ft->ft_length) { + if (fc->fc_start + range->fbr_offset < ft->ft_data + ft->ft_length) { return false; } return true; diff --git a/os/firehose_server_private.h b/os/firehose_server_private.h index 4bff8ab..441bb52 100644 --- a/os/firehose_server_private.h +++ b/os/firehose_server_private.h @@ -139,6 +139,32 @@ OS_NOTHROW OS_NONNULL1 uint64_t firehose_client_get_unique_pid(firehose_client_t client, pid_t *pid); +/*! + * @function firehose_client_get_pid_version + * + * @abstract + * Returns the pid version for that client. + * + * @param client + * The specified client. + */ +OS_NOTHROW OS_NONNULL1 +int +firehose_client_get_pid_version(firehose_client_t client); + +/*! + * @function firehose_client_get_euid + * + * @abstract + * Returns the EUID for that client as discovered at connect time. + * + * @param client + * The specified client. + */ +OS_NOTHROW OS_NONNULL1 +uid_t +firehose_client_get_euid(firehose_client_t client); + /*! * @function firehose_client_get_metadata_buffer * @@ -235,7 +261,7 @@ OS_NOTHROW OS_NONNULL1 OS_NONNULL4 void firehose_client_metadata_stream_peek(firehose_client_t client, firehose_event_t context, OS_NOESCAPE bool (^peek_should_start)(void), - OS_NOESCAPE bool (^peek)(firehose_buffer_chunk_t fbc)); + OS_NOESCAPE bool (^peek)(firehose_chunk_t fbc)); #pragma mark - Firehose Server @@ -246,7 +272,7 @@ firehose_client_metadata_stream_peek(firehose_client_t client, * Type of the handler block for firehose_server_init() */ typedef void (^firehose_handler_t)(firehose_client_t client, - firehose_event_t event, firehose_buffer_chunk_t page); + firehose_event_t event, firehose_chunk_t page); /*! * @function firehose_server_init @@ -276,6 +302,20 @@ OS_NOTHROW void firehose_server_assert_spi_version(uint32_t spi_version); +/*! + * @function firehose_server_has_ever_flushed_pages + * + * @abstract + * Checks whether the firehose server has ever flushed any pages this boot. + * + * @discussion + * Must be called after firehose_server_init() and before calling + * firehose_server_resume(). + */ +OS_NOTHROW +bool +firehose_server_has_ever_flushed_pages(void); + /*! * @function firehose_server_resume * @@ -289,11 +329,42 @@ OS_NOTHROW void firehose_server_resume(void); +/*! + * @function firehose_server_cancel + * + * @abstract + * Cancels the server, disconnects all clients, and prevents new connections. + */ +OS_NOTHROW +void +firehose_server_cancel(void); + +/*! + * @typedef firehose_server_queue_t + * + * @abstract + * Values to pass to firehose_server_get_queue() + */ +OS_ENUM(firehose_server_queue, unsigned long, + FIREHOSE_SERVER_QUEUE_UNKNOWN, + FIREHOSE_SERVER_QUEUE_IO, + FIREHOSE_SERVER_QUEUE_MEMORY, +); + +/*! + * @function firehose_server_copy_queue + * + * @abstract + * Returns internal queues to the firehose server subsystem. + */ +OS_NOTHROW OS_OBJECT_RETURNS_RETAINED +dispatch_queue_t +firehose_server_copy_queue(firehose_server_queue_t which); + #pragma mark - Firehose Snapshot /*! * @typedef firehose_snapshot_event - * */ OS_ENUM(firehose_snapshot_event, unsigned long, FIREHOSE_SNAPSHOT_EVENT_IO_START = 1, @@ -310,7 +381,7 @@ OS_ENUM(firehose_snapshot_event, unsigned long, * Type of the handler block for firehose_snapshot */ typedef void (^firehose_snapshot_handler_t)(firehose_client_t client, - firehose_snapshot_event_t event, firehose_buffer_chunk_t page); + firehose_snapshot_event_t event, firehose_chunk_t page); /*! * @function firehose_snapshot diff --git a/os/voucher_activity_private.h b/os/voucher_activity_private.h index 8f233b3..456cb0c 100644 --- a/os/voucher_activity_private.h +++ b/os/voucher_activity_private.h @@ -29,10 +29,11 @@ #ifndef __linux__ #include #endif +#include #include #include "voucher_private.h" -#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20160329 +#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20161003 #if OS_VOUCHER_WEAK_IMPORT #define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT @@ -40,12 +41,6 @@ #define OS_VOUCHER_EXPORT OS_EXPORT #endif -#define __VOUCHER_ACTIVITY_IGNORE_DEPRECATION_PUSH \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") -#define __VOUCHER_ACTIVITY_IGNORE_DEPRECATION_POP \ - _Pragma("clang diagnostic pop") - __BEGIN_DECLS /*! @@ -117,7 +112,7 @@ voucher_get_activity_id_and_creator(voucher_t voucher, uint64_t *creator_pid, firehose_activity_id_t *parent_id); /*! - * @function voucher_activity_create + * @function voucher_activity_create_with_data * * @abstract * Creates a voucher object with a new activity identifier. @@ -151,19 +146,22 @@ voucher_get_activity_id_and_creator(voucher_t voucher, uint64_t *creator_pid, * @param flags * See voucher_activity_flag_t documentation for effect. * - * @param location - * Location identifier for the automatic tracepoint generated as part of - * creating the new activity. + * @param pubdata + * Pointer to packed buffer of tracepoint data. + * + * @param publen + * Length of data at 'pubdata'. * * @result * A new voucher with an activity identifier. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +__OSX_AVAILABLE(10.12.4) __IOS_AVAILABLE(10.3) +__TVOS_AVAILABLE(10.2) __WATCHOS_AVAILABLE(3.2) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t -voucher_activity_create(firehose_tracepoint_id_t trace_id, - voucher_t base, firehose_activity_flags_t flags, uint64_t location); +voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id, + voucher_t base, firehose_activity_flags_t flags, + const void *pubdata, size_t publen); __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) @@ -228,7 +226,7 @@ voucher_activity_trace(firehose_stream_t stream, const void *pubdata, size_t publen); /*! - * @function voucher_activity_trace_with_private_strings + * @function voucher_activity_trace_v * * @abstract * Add a tracepoint to the specified stream, with private data. @@ -242,20 +240,32 @@ voucher_activity_trace(firehose_stream_t stream, * @param timestamp * The mach_approximate_time()/mach_absolute_time() value for this tracepoint. * - * @param pubdata - * Pointer to packed buffer of tracepoint data. + * @param iov + * Array of `struct iovec` pointing to the data to layout. + * The total size of this iovec must span exactly `publen + privlen` bytes. + * The `publen` boundary must coincide with the end of an iovec (each iovec + * must either be pure public or pure private data). * * @param publen - * Length of data at 'pubdata'. - * - * @param privdata - * Pointer to packed buffer of private tracepoint data. + * Total length of data to read from the iovec for the public data. * * @param privlen - * Length of data at 'privdata'. + * Length of data to read from the iovec after the public data for the private + * data. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +__OSX_AVAILABLE(10.12.4) __IOS_AVAILABLE(10.3) +__TVOS_AVAILABLE(10.2) __WATCHOS_AVAILABLE(3.2) +OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 +firehose_tracepoint_id_t +voucher_activity_trace_v(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const struct iovec *iov, size_t publen, size_t privlen); + + +__OSX_DEPRECATED(10.12, 10.12.4, "Use voucher_activity_trace_v") +__IOS_DEPRECATED(10.0, 10.3, "Use voucher_activity_trace_v") +__TVOS_DEPRECATED(10.0, 10.2, "Use voucher_activity_trace_v") +__WATCHOS_DEPRECATED(3.0, 3.2, "Use voucher_activity_trace_v") OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 OS_NONNULL6 firehose_tracepoint_id_t voucher_activity_trace_with_private_strings(firehose_stream_t stream, @@ -263,15 +273,13 @@ voucher_activity_trace_with_private_strings(firehose_stream_t stream, const void *pubdata, size_t publen, const void *privdata, size_t privlen); -typedef struct voucher_activity_hooks_s { -#define VOUCHER_ACTIVITY_HOOKS_VERSION 3 +typedef const struct voucher_activity_hooks_s { +#define VOUCHER_ACTIVITY_HOOKS_VERSION 4 long vah_version; - // version 1 mach_port_t (*vah_get_logd_port)(void); - // version 2 dispatch_mach_handler_function_t vah_debug_channel_handler; - // version 3 kern_return_t (*vah_get_reconnect_info)(mach_vm_address_t *, mach_vm_size_t *); + void (*vah_metadata_init)(void *metadata_buffer, size_t size); } *voucher_activity_hooks_t; /*! diff --git a/private/module.modulemap b/private/darwin/module.modulemap similarity index 100% rename from private/module.modulemap rename to private/darwin/module.modulemap diff --git a/private/generic/module.modulemap b/private/generic/module.modulemap new file mode 100644 index 0000000..62975a5 --- /dev/null +++ b/private/generic/module.modulemap @@ -0,0 +1,11 @@ +module DispatchPrivate [system] [extern_c] { + umbrella header "private.h" + exclude header "mach_private.h" + module * { export * } + export * +} + +module DispatchIntrospectionPrivate [system] [extern_c] { + header "introspection_private.h" + export * +} diff --git a/private/private.h b/private/private.h index 3c37bed..7136e6d 100644 --- a/private/private.h +++ b/private/private.h @@ -199,17 +199,10 @@ DISPATCH_EXPORT DISPATCH_NOTHROW dispatch_runloop_handle_t _dispatch_get_main_queue_handle_4CF(void); -#if TARGET_OS_MAC -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NOTHROW -void -_dispatch_main_queue_callback_4CF(mach_msg_header_t *_Null_unspecified msg); -#else __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NOTHROW void _dispatch_main_queue_callback_4CF(void *_Null_unspecified msg); -#endif __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT diff --git a/private/source_private.h b/private/source_private.h index bb13702..1df0c1b 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -214,6 +214,12 @@ enum { * * @constant DISPATCH_VFS_QUOTA * We hit a user quota (quotactl) for this filesystem. + * + * @constant DISPATCH_VFS_NEARLOWDISK + * Filesystem is nearly full (below NEARLOWDISK level). + * + * @constant DISPATCH_VFS_DESIREDDISK + * Filesystem has exceeded the DESIREDDISK level */ enum { DISPATCH_VFS_NOTRESP = 0x0001, @@ -227,6 +233,8 @@ enum { DISPATCH_VFS_UPDATE = 0x0100, DISPATCH_VFS_VERYLOWDISK = 0x0200, DISPATCH_VFS_QUOTA = 0x1000, + DISPATCH_VFS_NEARLOWDISK = 0x2000, + DISPATCH_VFS_DESIREDDISK = 0x4000, }; /*! diff --git a/src/BlocksRuntime/Block.h b/src/BlocksRuntime/Block.h new file mode 100644 index 0000000..15c7242 --- /dev/null +++ b/src/BlocksRuntime/Block.h @@ -0,0 +1,54 @@ +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// + + +#ifndef _Block_H_ +#define _Block_H_ + +#if !defined(BLOCK_EXPORT) +# if defined(__cplusplus) +# define BLOCK_EXPORT extern "C" __attribute__((visibility("default"))) +# else +# define BLOCK_EXPORT extern __attribute__((visibility("default"))) +# endif +#endif + +#if __cplusplus +extern "C" { +#endif + +// Create a heap based copy of a Block or simply add a reference to an existing one. +// This must be paired with Block_release to recover memory, even when running +// under Objective-C Garbage Collection. +BLOCK_EXPORT void *_Block_copy(const void *aBlock); + +// Lose the reference, and if heap based and last reference, recover the memory +BLOCK_EXPORT void _Block_release(const void *aBlock); + +// Used by the compiler. Do not call this function yourself. +BLOCK_EXPORT void _Block_object_assign(void *, const void *, const int); + +// Used by the compiler. Do not call this function yourself. +BLOCK_EXPORT void _Block_object_dispose(const void *, const int); + +// Used by the compiler. Do not use these variables yourself. +BLOCK_EXPORT void * _NSConcreteGlobalBlock[32]; +BLOCK_EXPORT void * _NSConcreteStackBlock[32]; + +#if __cplusplus +} +#endif + +// Type correct macros + +#define Block_copy(...) ((__typeof(__VA_ARGS__))_Block_copy((const void *)(__VA_ARGS__))) +#define Block_release(...) _Block_release((const void *)(__VA_ARGS__)) + + +#endif diff --git a/src/BlocksRuntime/Block_private.h b/src/BlocksRuntime/Block_private.h new file mode 100644 index 0000000..deeb19a --- /dev/null +++ b/src/BlocksRuntime/Block_private.h @@ -0,0 +1,264 @@ +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// + + +#ifndef _BLOCK_PRIVATE_H_ +#define _BLOCK_PRIVATE_H_ + +#include +#include +#include + +#include "Block.h" + +#if __cplusplus +extern "C" { +#endif + + +// Values for Block_layout->flags to describe block objects +enum { + BLOCK_DEALLOCATING = (0x0001), // runtime + BLOCK_REFCOUNT_MASK = (0xfffe), // runtime + BLOCK_NEEDS_FREE = (1 << 24), // runtime + BLOCK_HAS_COPY_DISPOSE = (1 << 25), // compiler + BLOCK_HAS_CTOR = (1 << 26), // compiler: helpers have C++ code + BLOCK_IS_GC = (1 << 27), // runtime + BLOCK_IS_GLOBAL = (1 << 28), // compiler + BLOCK_USE_STRET = (1 << 29), // compiler: undefined if !BLOCK_HAS_SIGNATURE + BLOCK_HAS_SIGNATURE = (1 << 30), // compiler + BLOCK_HAS_EXTENDED_LAYOUT=(1 << 31) // compiler +}; + +#define BLOCK_DESCRIPTOR_1 1 +struct Block_descriptor_1 { + uintptr_t reserved; + uintptr_t size; +}; + +#define BLOCK_DESCRIPTOR_2 1 +struct Block_descriptor_2 { + // requires BLOCK_HAS_COPY_DISPOSE + void (*copy)(void *dst, const void *src); + void (*dispose)(const void *); +}; + +#define BLOCK_DESCRIPTOR_3 1 +struct Block_descriptor_3 { + // requires BLOCK_HAS_SIGNATURE + const char *signature; + const char *layout; // contents depend on BLOCK_HAS_EXTENDED_LAYOUT +}; + +struct Block_layout { + void *isa; + volatile int32_t flags; // contains ref count + int32_t reserved; + void (*invoke)(void *, ...); + struct Block_descriptor_1 *descriptor; + // imported variables +}; + + +// Values for Block_byref->flags to describe __block variables +enum { + // Byref refcount must use the same bits as Block_layout's refcount. + // BLOCK_DEALLOCATING = (0x0001), // runtime + // BLOCK_REFCOUNT_MASK = (0xfffe), // runtime + + BLOCK_BYREF_LAYOUT_MASK = (0xf << 28), // compiler + BLOCK_BYREF_LAYOUT_EXTENDED = ( 1 << 28), // compiler + BLOCK_BYREF_LAYOUT_NON_OBJECT = ( 2 << 28), // compiler + BLOCK_BYREF_LAYOUT_STRONG = ( 3 << 28), // compiler + BLOCK_BYREF_LAYOUT_WEAK = ( 4 << 28), // compiler + BLOCK_BYREF_LAYOUT_UNRETAINED = ( 5 << 28), // compiler + + BLOCK_BYREF_IS_GC = ( 1 << 27), // runtime + + BLOCK_BYREF_HAS_COPY_DISPOSE = ( 1 << 25), // compiler + BLOCK_BYREF_NEEDS_FREE = ( 1 << 24), // runtime +}; + +struct Block_byref { + void *isa; + struct Block_byref *forwarding; + volatile int32_t flags; // contains ref count + uint32_t size; +}; + +struct Block_byref_2 { + // requires BLOCK_BYREF_HAS_COPY_DISPOSE + void (*byref_keep)(struct Block_byref *dst, struct Block_byref *src); + void (*byref_destroy)(struct Block_byref *); +}; + +struct Block_byref_3 { + // requires BLOCK_BYREF_LAYOUT_EXTENDED + const char *layout; +}; + + +// Extended layout encoding. + +// Values for Block_descriptor_3->layout with BLOCK_HAS_EXTENDED_LAYOUT +// and for Block_byref_3->layout with BLOCK_BYREF_LAYOUT_EXTENDED + +// If the layout field is less than 0x1000, then it is a compact encoding +// of the form 0xXYZ: X strong pointers, then Y byref pointers, +// then Z weak pointers. + +// If the layout field is 0x1000 or greater, it points to a +// string of layout bytes. Each byte is of the form 0xPN. +// Operator P is from the list below. Value N is a parameter for the operator. +// Byte 0x00 terminates the layout; remaining block data is non-pointer bytes. + +enum { + BLOCK_LAYOUT_ESCAPE = 0, // N=0 halt, rest is non-pointer. N!=0 reserved. + BLOCK_LAYOUT_NON_OBJECT_BYTES = 1, // N bytes non-objects + BLOCK_LAYOUT_NON_OBJECT_WORDS = 2, // N words non-objects + BLOCK_LAYOUT_STRONG = 3, // N words strong pointers + BLOCK_LAYOUT_BYREF = 4, // N words byref pointers + BLOCK_LAYOUT_WEAK = 5, // N words weak pointers + BLOCK_LAYOUT_UNRETAINED = 6, // N words unretained pointers + BLOCK_LAYOUT_UNKNOWN_WORDS_7 = 7, // N words, reserved + BLOCK_LAYOUT_UNKNOWN_WORDS_8 = 8, // N words, reserved + BLOCK_LAYOUT_UNKNOWN_WORDS_9 = 9, // N words, reserved + BLOCK_LAYOUT_UNKNOWN_WORDS_A = 0xA, // N words, reserved + BLOCK_LAYOUT_UNUSED_B = 0xB, // unspecified, reserved + BLOCK_LAYOUT_UNUSED_C = 0xC, // unspecified, reserved + BLOCK_LAYOUT_UNUSED_D = 0xD, // unspecified, reserved + BLOCK_LAYOUT_UNUSED_E = 0xE, // unspecified, reserved + BLOCK_LAYOUT_UNUSED_F = 0xF, // unspecified, reserved +}; + + +// Runtime support functions used by compiler when generating copy/dispose helpers + +// Values for _Block_object_assign() and _Block_object_dispose() parameters +enum { + // see function implementation for a more complete description of these fields and combinations + BLOCK_FIELD_IS_OBJECT = 3, // id, NSObject, __attribute__((NSObject)), block, ... + BLOCK_FIELD_IS_BLOCK = 7, // a block variable + BLOCK_FIELD_IS_BYREF = 8, // the on stack structure holding the __block variable + BLOCK_FIELD_IS_WEAK = 16, // declared __weak, only used in byref copy helpers + BLOCK_BYREF_CALLER = 128, // called from __block (byref) copy/dispose support routines. +}; + +enum { + BLOCK_ALL_COPY_DISPOSE_FLAGS = + BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_BYREF | + BLOCK_FIELD_IS_WEAK | BLOCK_BYREF_CALLER +}; + +// Runtime entry point called by compiler when assigning objects inside copy helper routines +BLOCK_EXPORT void _Block_object_assign(void *destAddr, const void *object, const int flags); + // BLOCK_FIELD_IS_BYREF is only used from within block copy helpers + + +// runtime entry point called by the compiler when disposing of objects inside dispose helper routine +BLOCK_EXPORT void _Block_object_dispose(const void *object, const int flags); + + +// Other support functions + +// runtime entry to get total size of a closure +BLOCK_EXPORT size_t Block_size(void *aBlock); + +// indicates whether block was compiled with compiler that sets the ABI related metadata bits +BLOCK_EXPORT bool _Block_has_signature(void *aBlock); + +// returns TRUE if return value of block is on the stack, FALSE otherwise +BLOCK_EXPORT bool _Block_use_stret(void *aBlock); + +// Returns a string describing the block's parameter and return types. +// The encoding scheme is the same as Objective-C @encode. +// Returns NULL for blocks compiled with some compilers. +BLOCK_EXPORT const char * _Block_signature(void *aBlock); + +// Returns a string describing the block's GC layout. +// This uses the GC skip/scan encoding. +// May return NULL. +BLOCK_EXPORT const char * _Block_layout(void *aBlock); + +// Returns a string describing the block's layout. +// This uses the "extended layout" form described above. +// May return NULL. +BLOCK_EXPORT const char * _Block_extended_layout(void *aBlock); + +// Callable only from the ARR weak subsystem while in exclusion zone +BLOCK_EXPORT bool _Block_tryRetain(const void *aBlock); + +// Callable only from the ARR weak subsystem while in exclusion zone +BLOCK_EXPORT bool _Block_isDeallocating(const void *aBlock); + + +// the raw data space for runtime classes for blocks +// class+meta used for stack, malloc, and collectable based blocks +BLOCK_EXPORT void * _NSConcreteMallocBlock[32]; +BLOCK_EXPORT void * _NSConcreteAutoBlock[32]; +BLOCK_EXPORT void * _NSConcreteFinalizingBlock[32]; +BLOCK_EXPORT void * _NSConcreteWeakBlockVariable[32]; +// declared in Block.h +// BLOCK_EXPORT void * _NSConcreteGlobalBlock[32]; +// BLOCK_EXPORT void * _NSConcreteStackBlock[32]; + + +// the intercept routines that must be used under GC +BLOCK_EXPORT void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject), + void (*setHasRefcount)(const void *, const bool), + void (*gc_assign_strong)(void *, void **), + void (*gc_assign_weak)(const void *, void *), + void (*gc_memmove)(void *, void *, unsigned long)); + +// earlier version, now simply transitional +BLOCK_EXPORT void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject), + void (*setHasRefcount)(const void *, const bool), + void (*gc_assign_strong)(void *, void **), + void (*gc_assign_weak)(const void *, void *)); + +BLOCK_EXPORT void _Block_use_RR( void (*retain)(const void *), + void (*release)(const void *)); + +struct Block_callbacks_RR { + size_t size; // size == sizeof(struct Block_callbacks_RR) + void (*retain)(const void *); + void (*release)(const void *); + void (*destructInstance)(const void *); +}; +typedef struct Block_callbacks_RR Block_callbacks_RR; + +BLOCK_EXPORT void _Block_use_RR2(const Block_callbacks_RR *callbacks); + +// make a collectable GC heap based Block. Not useful under non-GC. +BLOCK_EXPORT void *_Block_copy_collectable(const void *aBlock); + +// thread-unsafe diagnostic +BLOCK_EXPORT const char *_Block_dump(const void *block); + + +// Obsolete + +// first layout +struct Block_basic { + void *isa; + int Block_flags; // int32_t + int Block_size; // XXX should be packed into Block_flags + void (*Block_invoke)(void *); + void (*Block_copy)(void *dst, void *src); // iff BLOCK_HAS_COPY_DISPOSE + void (*Block_dispose)(void *); // iff BLOCK_HAS_COPY_DISPOSE + //long params[0]; // where const imports, __block storage references, etc. get laid down +} __attribute__((deprecated)); + + +#if __cplusplus +} +#endif + + +#endif diff --git a/src/BlocksRuntime/data.c b/src/BlocksRuntime/data.c new file mode 100644 index 0000000..0837176 --- /dev/null +++ b/src/BlocksRuntime/data.c @@ -0,0 +1,24 @@ +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// + +/******************** +NSBlock support + +We allocate space and export a symbol to be used as the Class for the on-stack and malloc'ed copies until ObjC arrives on the scene. These data areas are set up by Foundation to link in as real classes post facto. + +We keep these in a separate file so that we can include the runtime code in test subprojects but not include the data so that compiled code that sees the data in libSystem doesn't get confused by a second copy. Somehow these don't get unified in a common block. +**********************/ +#define BLOCK_EXPORT extern __attribute__((visibility("default"))) + +BLOCK_EXPORT void * _NSConcreteStackBlock[32] = { 0 }; +BLOCK_EXPORT void * _NSConcreteMallocBlock[32] = { 0 }; +BLOCK_EXPORT void * _NSConcreteAutoBlock[32] = { 0 }; +BLOCK_EXPORT void * _NSConcreteFinalizingBlock[32] = { 0 }; +BLOCK_EXPORT void * _NSConcreteGlobalBlock[32] = { 0 }; +BLOCK_EXPORT void * _NSConcreteWeakBlockVariable[32] = { 0 }; diff --git a/src/BlocksRuntime/runtime.c b/src/BlocksRuntime/runtime.c new file mode 100644 index 0000000..1e10636 --- /dev/null +++ b/src/BlocksRuntime/runtime.c @@ -0,0 +1,747 @@ +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// + +#include "Block_private.h" +#include +#include +#include +#include +#define __USE_GNU +#include +#if __has_include() +#include +#else +#include +#endif +#ifndef os_assumes +#define os_assumes(_x) _x +#endif +#ifndef os_assert +#define os_assert(_x) assert(_x) +#endif + +#if TARGET_OS_WIN32 +#define _CRT_SECURE_NO_WARNINGS 1 +#include +static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) +{ + // fixme barrier is overkill -- see objc-os.h + long original = InterlockedCompareExchange(dst, newl, oldl); + return (original == oldl); +} + +static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) +{ + // fixme barrier is overkill -- see objc-os.h + int original = InterlockedCompareExchange(dst, newi, oldi); + return (original == oldi); +} +#else +#define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New) +#define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New) +#endif + +/*********************** +Globals +************************/ + +static void *_Block_copy_class = _NSConcreteMallocBlock; +static void *_Block_copy_finalizing_class = _NSConcreteMallocBlock; +static int _Block_copy_flag = BLOCK_NEEDS_FREE; +static int _Byref_flag_initial_value = BLOCK_BYREF_NEEDS_FREE | 4; // logical 2 + +static bool isGC = false; + +/******************************************************************************* +Internal Utilities +********************************************************************************/ + + +static int32_t latching_incr_int(volatile int32_t *where) { + while (1) { + int32_t old_value = *where; + if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) { + return BLOCK_REFCOUNT_MASK; + } + if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) { + return old_value+2; + } + } +} + +static bool latching_incr_int_not_deallocating(volatile int32_t *where) { + while (1) { + int32_t old_value = *where; + if (old_value & BLOCK_DEALLOCATING) { + // if deallocating we can't do this + return false; + } + if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) { + // if latched, we're leaking this block, and we succeed + return true; + } + if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) { + // otherwise, we must store a new retained value without the deallocating bit set + return true; + } + } +} + + +// return should_deallocate? +static bool latching_decr_int_should_deallocate(volatile int32_t *where) { + while (1) { + int32_t old_value = *where; + if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) { + return false; // latched high + } + if ((old_value & BLOCK_REFCOUNT_MASK) == 0) { + return false; // underflow, latch low + } + int32_t new_value = old_value - 2; + bool result = false; + if ((old_value & (BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING)) == 2) { + new_value = old_value - 1; + result = true; + } + if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) { + return result; + } + } +} + +// hit zero? +static bool latching_decr_int_now_zero(volatile int32_t *where) { + while (1) { + int32_t old_value = *where; + if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) { + return false; // latched high + } + if ((old_value & BLOCK_REFCOUNT_MASK) == 0) { + return false; // underflow, latch low + } + int32_t new_value = old_value - 2; + if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) { + return (new_value & BLOCK_REFCOUNT_MASK) == 0; + } + } +} + + +/*********************** +GC support stub routines +************************/ +#if !TARGET_OS_WIN32 +#pragma mark GC Support Routines +#endif + + + +static void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne, const bool isObject) { + return malloc(size); +} + +static void _Block_assign_default(void *value, void **destptr) { + *destptr = value; +} + +static void _Block_setHasRefcount_default(const void *ptr, const bool hasRefcount) { +} + +static void _Block_do_nothing(const void *aBlock) { } + +static void _Block_retain_object_default(const void *ptr) { +} + +static void _Block_release_object_default(const void *ptr) { +} + +static void _Block_assign_weak_default(const void *ptr, void *dest) { +#if !TARGET_OS_WIN32 + *(long *)dest = (long)ptr; +#else + *(void **)dest = (void *)ptr; +#endif +} + +static void _Block_memmove_default(void *dst, void *src, unsigned long size) { + memmove(dst, src, (size_t)size); +} + +static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) { + void **destp = (void **)dest; + void **srcp = (void **)src; + while (size) { + _Block_assign_default(*srcp, destp); + destp++; + srcp++; + size -= sizeof(void *); + } +} + +static void _Block_destructInstance_default(const void *aBlock) {} + +/************************************************************************** +GC support callout functions - initially set to stub routines +***************************************************************************/ + +static void *(*_Block_allocator)(const unsigned long, const bool isOne, const bool isObject) = _Block_alloc_default; +static void (*_Block_deallocator)(const void *) = (void (*)(const void *))free; +static void (*_Block_assign)(void *value, void **destptr) = _Block_assign_default; +static void (*_Block_setHasRefcount)(const void *ptr, const bool hasRefcount) = _Block_setHasRefcount_default; +static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default; +static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default; +static void (*_Block_assign_weak)(const void *dest, void *ptr) = _Block_assign_weak_default; +static void (*_Block_memmove)(void *dest, void *src, unsigned long size) = _Block_memmove_default; +static void (*_Block_destructInstance) (const void *aBlock) = _Block_destructInstance_default; + + +/************************************************************************** +GC support SPI functions - called from ObjC runtime and CoreFoundation +***************************************************************************/ + +// Public SPI +// Called from objc-auto to turn on GC. +// version 3, 4 arg, but changed 1st arg +void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject), + void (*setHasRefcount)(const void *, const bool), + void (*gc_assign)(void *, void **), + void (*gc_assign_weak)(const void *, void *), + void (*gc_memmove)(void *, void *, unsigned long)) { + + isGC = true; + _Block_allocator = alloc; + _Block_deallocator = _Block_do_nothing; + _Block_assign = gc_assign; + _Block_copy_flag = BLOCK_IS_GC; + _Block_copy_class = _NSConcreteAutoBlock; + // blocks with ctors & dtors need to have the dtor run from a class with a finalizer + _Block_copy_finalizing_class = _NSConcreteFinalizingBlock; + _Block_setHasRefcount = setHasRefcount; + _Byref_flag_initial_value = BLOCK_BYREF_IS_GC; // no refcount + _Block_retain_object = _Block_do_nothing; + _Block_release_object = _Block_do_nothing; + _Block_assign_weak = gc_assign_weak; + _Block_memmove = gc_memmove; +} + +// transitional +void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject), + void (*setHasRefcount)(const void *, const bool), + void (*gc_assign)(void *, void **), + void (*gc_assign_weak)(const void *, void *)) { + // until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then + _Block_use_GC(alloc, setHasRefcount, gc_assign, gc_assign_weak, _Block_memmove_gc_broken); +} + + +// Called from objc-auto to alternatively turn on retain/release. +// Prior to this the only "object" support we can provide is for those +// super special objects that live in libSystem, namely dispatch queues. +// Blocks and Block_byrefs have their own special entry points. +BLOCK_EXPORT +void _Block_use_RR( void (*retain)(const void *), + void (*release)(const void *)) { + _Block_retain_object = retain; + _Block_release_object = release; + _Block_destructInstance = dlsym(RTLD_DEFAULT, "objc_destructInstance"); +} + +// Called from CF to indicate MRR. Newer version uses a versioned structure, so we can add more functions +// without defining a new entry point. +BLOCK_EXPORT +void _Block_use_RR2(const Block_callbacks_RR *callbacks) { + _Block_retain_object = callbacks->retain; + _Block_release_object = callbacks->release; + _Block_destructInstance = callbacks->destructInstance; +} + +/**************************************************************************** +Accessors for block descriptor fields +*****************************************************************************/ +#if 0 +static struct Block_descriptor_1 * _Block_descriptor_1(struct Block_layout *aBlock) +{ + return aBlock->descriptor; +} +#endif + +static struct Block_descriptor_2 * _Block_descriptor_2(struct Block_layout *aBlock) +{ + if (! (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)) return NULL; + uint8_t *desc = (uint8_t *)aBlock->descriptor; + desc += sizeof(struct Block_descriptor_1); + return (struct Block_descriptor_2 *)desc; +} + +static struct Block_descriptor_3 * _Block_descriptor_3(struct Block_layout *aBlock) +{ + if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return NULL; + uint8_t *desc = (uint8_t *)aBlock->descriptor; + desc += sizeof(struct Block_descriptor_1); + if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) { + desc += sizeof(struct Block_descriptor_2); + } + return (struct Block_descriptor_3 *)desc; +} + +static __inline bool _Block_has_layout(struct Block_layout *aBlock) { + if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return false; + uint8_t *desc = (uint8_t *)aBlock->descriptor; + desc += sizeof(struct Block_descriptor_1); + if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) { + desc += sizeof(struct Block_descriptor_2); + } + return ((struct Block_descriptor_3 *)desc)->layout != NULL; +} + +static void _Block_call_copy_helper(void *result, struct Block_layout *aBlock) +{ + struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock); + if (!desc) return; + + (*desc->copy)(result, aBlock); // do fixup +} + +static void _Block_call_dispose_helper(struct Block_layout *aBlock) +{ + struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock); + if (!desc) return; + + (*desc->dispose)(aBlock); +} + +/******************************************************************************* +Internal Support routines for copying +********************************************************************************/ + +#if !TARGET_OS_WIN32 +#pragma mark Copy/Release support +#endif + +// Copy, or bump refcount, of a block. If really copying, call the copy helper if present. +static void *_Block_copy_internal(const void *arg, const bool wantsOne) { + struct Block_layout *aBlock; + + if (!arg) return NULL; + + + // The following would be better done as a switch statement + aBlock = (struct Block_layout *)arg; + if (aBlock->flags & BLOCK_NEEDS_FREE) { + // latches on high + latching_incr_int(&aBlock->flags); + return aBlock; + } + else if (aBlock->flags & BLOCK_IS_GC) { + // GC refcounting is expensive so do most refcounting here. + if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 2)) { + // Tell collector to hang on this - it will bump the GC refcount version + _Block_setHasRefcount(aBlock, true); + } + return aBlock; + } + else if (aBlock->flags & BLOCK_IS_GLOBAL) { + return aBlock; + } + + // Its a stack block. Make a copy. + if (!isGC) { + struct Block_layout *result = malloc(aBlock->descriptor->size); + if (!result) return NULL; + memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first + // reset refcount + result->flags &= ~(BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING); // XXX not needed + result->flags |= BLOCK_NEEDS_FREE | 2; // logical refcount 1 + result->isa = _NSConcreteMallocBlock; + _Block_call_copy_helper(result, aBlock); + return result; + } + else { + // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne + // This allows the copy helper routines to make non-refcounted block copies under GC + int32_t flags = aBlock->flags; + bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0; + struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR || _Block_has_layout(aBlock)); + if (!result) return NULL; + memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first + // reset refcount + // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE. + flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING); // XXX not needed + if (wantsOne) + flags |= BLOCK_IS_GC | 2; + else + flags |= BLOCK_IS_GC; + result->flags = flags; + _Block_call_copy_helper(result, aBlock); + if (hasCTOR) { + result->isa = _NSConcreteFinalizingBlock; + } + else { + result->isa = _NSConcreteAutoBlock; + } + return result; + } +} + + + + + +// Runtime entry points for maintaining the sharing knowledge of byref data blocks. + +// A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data +// Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr. +// We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it. +// Otherwise we need to copy it and update the stack forwarding pointer +static void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) { + struct Block_byref **destp = (struct Block_byref **)dest; + struct Block_byref *src = (struct Block_byref *)arg; + + if (src->forwarding->flags & BLOCK_BYREF_IS_GC) { + ; // don't need to do any more work + } + else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) { + // src points to stack + bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)); + // if its weak ask for an object (only matters under GC) + struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak); + copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack + copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier) + src->forwarding = copy; // patch stack to point to heap copy + copy->size = src->size; + if (isWeak) { + copy->isa = &_NSConcreteWeakBlockVariable; // mark isa field so it gets weak scanning + } + if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) { + // Trust copy helper to copy everything of interest + // If more than one field shows up in a byref block this is wrong XXX + struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src+1); + struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy+1); + copy2->byref_keep = src2->byref_keep; + copy2->byref_destroy = src2->byref_destroy; + + if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) { + struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2+1); + struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2+1); + copy3->layout = src3->layout; + } + + (*src2->byref_keep)(copy, src); + } + else { + // just bits. Blast 'em using _Block_memmove in case they're __strong + // This copy includes Block_byref_3, if any. + _Block_memmove(copy+1, src+1, + src->size - sizeof(struct Block_byref)); + } + } + // already copied to heap + else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) { + latching_incr_int(&src->forwarding->flags); + } + // assign byref data block pointer into new Block + _Block_assign(src->forwarding, (void **)destp); +} + +// Old compiler SPI +static void _Block_byref_release(const void *arg) { + struct Block_byref *byref = (struct Block_byref *)arg; + int32_t refcount; + + // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?) + byref = byref->forwarding; + + // To support C++ destructors under GC we arrange for there to be a finalizer for this + // by using an isa that directs the code to a finalizer that calls the byref_destroy method. + if ((byref->flags & BLOCK_BYREF_NEEDS_FREE) == 0) { + return; // stack or GC or global + } + refcount = byref->flags & BLOCK_REFCOUNT_MASK; + os_assert(refcount); + if (latching_decr_int_should_deallocate(&byref->flags)) { + if (byref->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) { + struct Block_byref_2 *byref2 = (struct Block_byref_2 *)(byref+1); + (*byref2->byref_destroy)(byref); + } + _Block_deallocator((struct Block_layout *)byref); + } +} + + +/************************************************************ + * + * API supporting SPI + * _Block_copy, _Block_release, and (old) _Block_destroy + * + ***********************************************************/ + +#if !TARGET_OS_WIN32 +#pragma mark SPI/API +#endif + +BLOCK_EXPORT +void *_Block_copy(const void *arg) { + return _Block_copy_internal(arg, true); +} + + +// API entry point to release a copied Block +BLOCK_EXPORT +void _Block_release(const void *arg) { + struct Block_layout *aBlock = (struct Block_layout *)arg; + if (!aBlock + || (aBlock->flags & BLOCK_IS_GLOBAL) + || ((aBlock->flags & (BLOCK_IS_GC|BLOCK_NEEDS_FREE)) == 0) + ) return; + if (aBlock->flags & BLOCK_IS_GC) { + if (latching_decr_int_now_zero(&aBlock->flags)) { + // Tell GC we no longer have our own refcounts. GC will decr its refcount + // and unless someone has done a CFRetain or marked it uncollectable it will + // now be subject to GC reclamation. + _Block_setHasRefcount(aBlock, false); + } + } + else if (aBlock->flags & BLOCK_NEEDS_FREE) { + if (latching_decr_int_should_deallocate(&aBlock->flags)) { + _Block_call_dispose_helper(aBlock); + _Block_destructInstance(aBlock); + _Block_deallocator(aBlock); + } + } +} + +BLOCK_EXPORT +bool _Block_tryRetain(const void *arg) { + struct Block_layout *aBlock = (struct Block_layout *)arg; + return latching_incr_int_not_deallocating(&aBlock->flags); +} + +BLOCK_EXPORT +bool _Block_isDeallocating(const void *arg) { + struct Block_layout *aBlock = (struct Block_layout *)arg; + return (aBlock->flags & BLOCK_DEALLOCATING) != 0; +} + +// Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers +static void _Block_destroy(const void *arg) { + struct Block_layout *aBlock; + if (!arg) return; + aBlock = (struct Block_layout *)arg; + if (aBlock->flags & BLOCK_IS_GC) { + // assert(aBlock->Block_flags & BLOCK_HAS_CTOR); + return; // ignore, we are being called because of a DTOR + } + _Block_release(aBlock); +} + + + +/************************************************************ + * + * SPI used by other layers + * + ***********************************************************/ + +// SPI, also internal. Called from NSAutoBlock only under GC +BLOCK_EXPORT +void *_Block_copy_collectable(const void *aBlock) { + return _Block_copy_internal(aBlock, false); +} + + +// SPI +BLOCK_EXPORT +size_t Block_size(void *aBlock) { + return ((struct Block_layout *)aBlock)->descriptor->size; +} + +BLOCK_EXPORT +bool _Block_use_stret(void *aBlock) { + struct Block_layout *layout = (struct Block_layout *)aBlock; + + int requiredFlags = BLOCK_HAS_SIGNATURE | BLOCK_USE_STRET; + return (layout->flags & requiredFlags) == requiredFlags; +} + +// Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit. +BLOCK_EXPORT +bool _Block_has_signature(void *aBlock) { + return _Block_signature(aBlock) ? true : false; +} + +BLOCK_EXPORT +const char * _Block_signature(void *aBlock) +{ + struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock); + if (!desc3) return NULL; + + return desc3->signature; +} + +BLOCK_EXPORT +const char * _Block_layout(void *aBlock) +{ + // Don't return extended layout to callers expecting GC layout + struct Block_layout *layout = (struct Block_layout *)aBlock; + if (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) return NULL; + + struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock); + if (!desc3) return NULL; + + return desc3->layout; +} + +BLOCK_EXPORT +const char * _Block_extended_layout(void *aBlock) +{ + // Don't return GC layout to callers expecting extended layout + struct Block_layout *layout = (struct Block_layout *)aBlock; + if (! (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT)) return NULL; + + struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock); + if (!desc3) return NULL; + + // Return empty string (all non-object bytes) instead of NULL + // so callers can distinguish "empty layout" from "no layout". + if (!desc3->layout) return ""; + else return desc3->layout; +} + +#if !TARGET_OS_WIN32 +#pragma mark Compiler SPI entry points +#endif + + +/******************************************************* + +Entry points used by the compiler - the real API! + + +A Block can reference four different kinds of things that require help when the Block is copied to the heap. +1) C++ stack based objects +2) References to Objective-C objects +3) Other Blocks +4) __block variables + +In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest. + +The flags parameter of _Block_object_assign and _Block_object_dispose is set to + * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object, + * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and + * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable. +If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16) + +So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24. + +When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied. + +So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities: + __block id 128+3 (0x83) + __block (^Block) 128+7 (0x87) + __weak __block id 128+3+16 (0x93) + __weak __block (^Block) 128+7+16 (0x97) + + +********************************************************/ + +// +// When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point +// to do the assignment. +// +BLOCK_EXPORT +void _Block_object_assign(void *destAddr, const void *object, const int flags) { + switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) { + case BLOCK_FIELD_IS_OBJECT: + /******* + id object = ...; + [^{ object; } copy]; + ********/ + + _Block_retain_object(object); + _Block_assign((void *)object, destAddr); + break; + + case BLOCK_FIELD_IS_BLOCK: + /******* + void (^object)(void) = ...; + [^{ object; } copy]; + ********/ + + _Block_assign(_Block_copy_internal(object, false), destAddr); + break; + + case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK: + case BLOCK_FIELD_IS_BYREF: + /******* + // copy the onstack __block container to the heap + __block ... x; + __weak __block ... x; + [^{ x; } copy]; + ********/ + + _Block_byref_assign_copy(destAddr, object, flags); + break; + + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT: + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK: + /******* + // copy the actual field held in the __block container + __block id object; + __block void (^object)(void); + [^{ object; } copy]; + ********/ + + // under manual retain release __block object/block variables are dangling + _Block_assign((void *)object, destAddr); + break; + + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK: + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK: + /******* + // copy the actual field held in the __block container + __weak __block id object; + __weak __block void (^object)(void); + [^{ object; } copy]; + ********/ + + _Block_assign_weak(object, destAddr); + break; + + default: + break; + } +} + +// When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point +// to help dispose of the contents +// Used initially only for __attribute__((NSObject)) marked pointers. +BLOCK_EXPORT +void _Block_object_dispose(const void *object, const int flags) { + switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) { + case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK: + case BLOCK_FIELD_IS_BYREF: + // get rid of the __block data structure held in a Block + _Block_byref_release(object); + break; + case BLOCK_FIELD_IS_BLOCK: + _Block_destroy(object); + break; + case BLOCK_FIELD_IS_OBJECT: + _Block_release_object(object); + break; + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT: + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK: + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK: + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK: + break; + default: + break; + } +} diff --git a/src/Makefile.am b/src/Makefile.am index c417aec..9848c6b 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -60,12 +60,20 @@ EXTRA_libdispatch_la_DEPENDENCIES= AM_CPPFLAGS=-I$(top_builddir) -I$(top_srcdir) -I$(top_srcdir)/private DISPATCH_CFLAGS=-Wall $(VISIBILITY_FLAGS) $(OMIT_LEAF_FP_FLAGS) \ - $(MARCH_FLAGS) $(KQUEUE_CFLAGS) $(BSD_OVERLAY_CFLAGS) -AM_CFLAGS= $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS) + $(MARCH_FLAGS) $(BSD_OVERLAY_CFLAGS) +if DISPATCH_ENABLE_ASSERTS +DISPATCH_CFLAGS+=-DDISPATCH_DEBUG=1 +endif +AM_CFLAGS= $(KQUEUE_CFLAGS) $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS) AM_OBJCFLAGS=$(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS) -AM_CXXFLAGS=$(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) +AM_CXXFLAGS=$(KQUEUE_CFLAGS) $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) AM_OBJCXXFLAGS=$(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) +if BUILD_OWN_KQUEUES + KQUEUE_LIBS+=$(top_builddir)/libkqueue/libkqueue.la + KQUEUE_CFLAGS+=-I$(top_srcdir)/libkqueue/include +endif + if BUILD_OWN_PTHREAD_WORKQUEUES PTHREAD_WORKQUEUE_LIBS=$(top_builddir)/libpwq/libpthread_workqueue.la PTHREAD_WORKQUEUE_CFLAGS=-I$(top_srcdir)/libpwq/include @@ -75,8 +83,15 @@ if HAVE_PTHREAD_WORKQUEUES endif endif +if BUILD_OWN_BLOCKS_RUNTIME +libdispatch_la_SOURCES+= BlocksRuntime/data.c BlocksRuntime/runtime.c +CBLOCKS_FLAGS+= -I$(top_srcdir)/src/BlocksRuntime +CXXBLOCKS_FLAGS+= -I$(top_srcdir)/src/BlocksRuntime +BLOCKS_RUNTIME_LIBS=-ldl +endif + libdispatch_la_LDFLAGS=-avoid-version -libdispatch_la_LIBADD=$(KQUEUE_LIBS) $(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) +libdispatch_la_LIBADD=$(KQUEUE_LIBS) $(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) $(BLOCKS_RUNTIME_LIBS) if HAVE_DARWIN_LD libdispatch_la_LDFLAGS+=-Wl,-compatibility_version,1 \ @@ -149,9 +164,13 @@ SWIFT_GEN_FILES= \ $(SWIFT_OBJ_FILES:%=%.~partial.swiftdoc) \ $(SWIFT_OBJ_FILES:%=%.~partial.swiftdeps) -SWIFTC_FLAGS = -Xcc -fmodule-map-file=$(abs_top_srcdir)/dispatch/module.map -I$(abs_top_srcdir) -Xcc -fblocks +SWIFTC_FLAGS = -Xcc -fmodule-map-file=$(abs_top_srcdir)/dispatch/module.modulemap -I$(abs_top_srcdir) -Xcc -fblocks +if DISPATCH_ENABLE_OPTIMIZATION +SWIFTC_FLAGS+=-O +endif -$(abs_builddir)/swift/%.o: $(abs_srcdir)/swift/%.swift +$(abs_builddir)/swift/%.o: $(abs_srcdir)/swift/%.swift $(SWIFTC) + @rm -f $@ $(SWIFTC) -frontend -c $(SWIFT_ABS_SRC_FILES) -primary-file $< \ $(SWIFTC_FLAGS) -module-name Dispatch -module-link-name dispatch \ -o $@ -emit-module-path $@.~partial.swiftmodule \ @@ -159,7 +178,8 @@ $(abs_builddir)/swift/%.o: $(abs_srcdir)/swift/%.swift -emit-reference-dependencies-path $@.swiftdeps \ -module-cache-path $(top_builddir) -$(abs_builddir)/swift/Dispatch.swiftmodule: $(SWIFT_ABS_SRC_FILES) +$(abs_builddir)/swift/Dispatch.swiftmodule: $(SWIFT_ABS_SRC_FILES) $(SWIFTC) + @rm -f $@ $(SWIFTC) -frontend -emit-module $(SWIFT_OBJ_FILES:%=%.~partial.swiftmodule) \ $(SWIFTC_FLAGS) -module-cache-path $(top_builddir) -module-link-name dispatch \ -o $@ -emit-module-doc-path $(@:%.swiftmodule=%.swiftdoc) diff --git a/src/firehose/firehose.defs b/src/firehose/firehose.defs index 986533c..7ed7958 100644 --- a/src/firehose/firehose.defs +++ b/src/firehose/firehose.defs @@ -35,7 +35,8 @@ register( comm_recvp : mach_port_move_receive_t; comm_sendp : mach_port_make_send_t; extra_info_port : mach_port_move_send_t; - extra_info_size : mach_vm_size_t + extra_info_size : mach_vm_size_t; + ServerAuditToken atoken : audit_token_t ); routine diff --git a/src/firehose/firehose_buffer.c b/src/firehose/firehose_buffer.c index 1305bde..a9b5af2 100644 --- a/src/firehose/firehose_buffer.c +++ b/src/firehose/firehose_buffer.c @@ -71,9 +71,10 @@ static void _dispatch_gate_wait(dispatch_gate_t l, uint32_t flags); #include #include #include +#include // os/internal/atomic.h #include // #include // -#include // os/internal/atomic.h +#include // #include "os/firehose_buffer_private.h" #include "firehose_buffer_internal.h" #include "firehose_inline_internal.h" @@ -93,14 +94,11 @@ _Static_assert(offsetof(firehose_stream_state_u, fss_gate) == offsetof(firehose_stream_state_u, fss_allocator), "fss_gate and fss_allocator alias"); _Static_assert(sizeof(struct firehose_buffer_header_s) == - FIREHOSE_BUFFER_CHUNK_SIZE, + FIREHOSE_CHUNK_SIZE, "firehose buffer header must be 4k"); _Static_assert(offsetof(struct firehose_buffer_header_s, fbh_unused) <= - FIREHOSE_BUFFER_CHUNK_SIZE - FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE, + FIREHOSE_CHUNK_SIZE - FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE, "we must have enough space for the libtrace header"); -_Static_assert(sizeof(struct firehose_buffer_chunk_s) == - FIREHOSE_BUFFER_CHUNK_SIZE, - "firehose buffer chunks must be 4k"); _Static_assert(powerof2(FIREHOSE_BUFFER_CHUNK_COUNT), "CHUNK_COUNT Must be a power of two"); _Static_assert(FIREHOSE_BUFFER_CHUNK_COUNT <= 64, @@ -109,14 +107,8 @@ _Static_assert(FIREHOSE_BUFFER_CHUNK_COUNT <= 64, _Static_assert(powerof2(FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT), "madvise chunk count must be a power of two"); #endif -_Static_assert(howmany(sizeof(struct firehose_tracepoint_s), - sizeof(struct firehose_buffer_chunk_s)) < 255, - "refcount assumes that you cannot have more than 255 tracepoints"); -// FIXME: we should have an event-count instead here _Static_assert(sizeof(struct firehose_buffer_stream_s) == 128, "firehose buffer stream must be small (single cacheline if possible)"); -_Static_assert(offsetof(struct firehose_buffer_chunk_s, fbc_data) % 8 == 0, - "Page header is 8 byte aligned"); _Static_assert(sizeof(struct firehose_tracepoint_s) == 24, "tracepoint header should be exactly 24 bytes"); #endif @@ -177,21 +169,19 @@ firehose_client_reconnect(firehose_buffer_t fb, mach_port_t oldsendp) uint32_t opts = MPO_CONTEXT_AS_GUARD | MPO_TEMPOWNER | MPO_INSERT_SEND_RIGHT; sendp = firehose_mach_port_allocate(opts, fb); - if (oldsendp && _voucher_libtrace_hooks->vah_version >= 3) { - if (_voucher_libtrace_hooks->vah_get_reconnect_info) { - kr = _voucher_libtrace_hooks->vah_get_reconnect_info(&addr, &size); - if (likely(kr == KERN_SUCCESS) && addr && size) { - extra_info_size = size; - kr = mach_make_memory_entry_64(mach_task_self(), &size, addr, - flags, &extra_info_port, MACH_PORT_NULL); - if (unlikely(kr)) { - // the client probably has some form of memory corruption - // and/or a port leak - DISPATCH_CLIENT_CRASH(kr, "Unable to make memory port"); - } - kr = mach_vm_deallocate(mach_task_self(), addr, size); - (void)dispatch_assume_zero(kr); + if (oldsendp && _voucher_libtrace_hooks->vah_get_reconnect_info) { + kr = _voucher_libtrace_hooks->vah_get_reconnect_info(&addr, &size); + if (likely(kr == KERN_SUCCESS) && addr && size) { + extra_info_size = size; + kr = mach_make_memory_entry_64(mach_task_self(), &size, addr, + flags, &extra_info_port, MACH_PORT_NULL); + if (unlikely(kr)) { + // the client probably has some form of memory corruption + // and/or a port leak + DISPATCH_CLIENT_CRASH(kr, "Unable to make memory port"); } + kr = mach_vm_deallocate(mach_task_self(), addr, size); + (void)dispatch_assume_zero(kr); } } @@ -261,7 +251,7 @@ firehose_buffer_update_limits_unlocked(firehose_buffer_t fb) } } - uint16_t ratio = (uint16_t)(PAGE_SIZE / FIREHOSE_BUFFER_CHUNK_SIZE); + uint16_t ratio = (uint16_t)(PAGE_SIZE / FIREHOSE_CHUNK_SIZE); if (ratio > 1) { total = roundup(total, ratio); } @@ -299,7 +289,7 @@ firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, vm_addr = vm_page_size; const size_t madvise_bytes = FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT * - FIREHOSE_BUFFER_CHUNK_SIZE; + FIREHOSE_CHUNK_SIZE; if (slowpath(madvise_bytes % PAGE_SIZE)) { DISPATCH_INTERNAL_CRASH(madvise_bytes, "Invalid values for MADVISE_CHUNK_COUNT / CHUNK_SIZE"); @@ -320,7 +310,7 @@ firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, vm_offset_t vm_addr = 0; vm_size_t size; - size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE; + size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE; __firehose_allocate(&vm_addr, size); (void)logd_port; (void)unique_pid; @@ -487,12 +477,7 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, return; } - bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) | - ((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1)); - state.fbs_atomic_state = os_atomic_sub2o(&fb->fb_header, - fbh_bank.fbb_state.fbs_atomic_state, bank_updates, relaxed); - if (state_out) *state_out = state; - + __firehose_critical_region_enter(); os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail, otail.frp_atomic_tail, ntail.frp_atomic_tail, relaxed, { ntail = otail; @@ -500,6 +485,15 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, ntail.frp_io_flushed += io_delta; ntail.frp_mem_flushed += mem_delta; }); + + bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) | + ((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1)); + state.fbs_atomic_state = os_atomic_sub2o(&fb->fb_header, + fbh_bank.fbb_state.fbs_atomic_state, bank_updates, release); + __firehose_critical_region_leave(); + + if (state_out) *state_out = state; + if (async_notif) { if (io_delta) { os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_io_notifs, relaxed); @@ -611,18 +605,18 @@ firehose_buffer_update_limits(firehose_buffer_t fb) OS_ALWAYS_INLINE static inline firehose_tracepoint_t -firehose_buffer_chunk_init(firehose_buffer_chunk_t fbc, +firehose_buffer_chunk_init(firehose_chunk_t fc, firehose_tracepoint_query_t ask, uint8_t **privptr) { const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); - uint16_t pub_offs = offsetof(struct firehose_buffer_chunk_s, fbc_data); - uint16_t priv_offs = FIREHOSE_BUFFER_CHUNK_SIZE; + uint16_t pub_offs = offsetof(struct firehose_chunk_s, fc_data); + uint16_t priv_offs = FIREHOSE_CHUNK_SIZE; pub_offs += roundup(ft_size + ask->pubsize, 8); priv_offs -= ask->privsize; - if (fbc->fbc_pos.fbc_atomic_pos) { + if (fc->fc_pos.fcp_atomic_pos) { // Needed for process death handling (recycle-reuse): // No atomic fences required, we merely want to make sure the observers // will see memory effects in program (asm) order. @@ -632,32 +626,32 @@ firehose_buffer_chunk_init(firehose_buffer_chunk_t fbc, // and it is dirty, when crawling the chunk, we don't see remnants of // other tracepoints // - // We only do that when the fbc_pos is non zero, because zero means + // We only do that when the fc_pos is non zero, because zero means // we just faulted the chunk, and the kernel already bzero-ed it. - bzero(fbc->fbc_data, sizeof(fbc->fbc_data)); + bzero(fc->fc_data, sizeof(fc->fc_data)); } dispatch_compiler_barrier(); // boot starts mach absolute time at 0, and // wrapping around to values above UINT64_MAX - FIREHOSE_STAMP_SLOP // breaks firehose_buffer_stream_flush() assumptions if (ask->stamp > FIREHOSE_STAMP_SLOP) { - fbc->fbc_timestamp = ask->stamp - FIREHOSE_STAMP_SLOP; + fc->fc_timestamp = ask->stamp - FIREHOSE_STAMP_SLOP; } else { - fbc->fbc_timestamp = 0; + fc->fc_timestamp = 0; } - fbc->fbc_pos = (firehose_buffer_pos_u){ - .fbc_next_entry_offs = pub_offs, - .fbc_private_offs = priv_offs, - .fbc_refcnt = 1, - .fbc_qos_bits = firehose_buffer_qos_bits_propagate(), - .fbc_stream = ask->stream, - .fbc_flag_io = ask->for_io, + fc->fc_pos = (firehose_chunk_pos_u){ + .fcp_next_entry_offs = pub_offs, + .fcp_private_offs = priv_offs, + .fcp_refcnt = 1, + .fcp_qos = firehose_buffer_qos_bits_propagate(), + .fcp_stream = ask->stream, + .fcp_flag_io = ask->for_io, }; if (privptr) { - *privptr = fbc->fbc_start + priv_offs; + *privptr = fc->fc_start + priv_offs; } - return (firehose_tracepoint_t)fbc->fbc_data; + return (firehose_tracepoint_t)fc->fc_data; } OS_NOINLINE @@ -671,14 +665,18 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, uint64_t stamp_and_len; if (fastpath(ref)) { - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); - ft = firehose_buffer_chunk_init(fbc, ask, privptr); + firehose_chunk_t fc = firehose_buffer_ref_to_chunk(fb, ref); + ft = firehose_buffer_chunk_init(fc, ask, privptr); // Needed for process death handling (tracepoint-begin): // write the length before making the chunk visible - stamp_and_len = ask->stamp - fbc->fbc_timestamp; + stamp_and_len = ask->stamp - fc->fc_timestamp; stamp_and_len |= (uint64_t)ask->pubsize << 48; os_atomic_store2o(ft, ft_stamp_and_length, stamp_and_len, relaxed); - +#ifdef KERNEL + ft->ft_thread = thread_tid(current_thread()); +#else + ft->ft_thread = _pthread_threadid_self_np_direct(); +#endif if (ask->stream == firehose_stream_metadata) { os_atomic_or2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap, 1ULL << ref, relaxed); @@ -750,7 +748,7 @@ static inline uint16_t firehose_buffer_ring_shrink(firehose_buffer_t fb, uint16_t ref) { const size_t madv_size = - FIREHOSE_BUFFER_CHUNK_SIZE * FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT; + FIREHOSE_CHUNK_SIZE * FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT; const size_t madv_mask = (1ULL << FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT) - 1; @@ -779,12 +777,12 @@ OS_NOINLINE void firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref) { - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + firehose_chunk_t fc = firehose_buffer_ref_to_chunk(fb, ref); uint16_t volatile *fbh_ring; uint16_t volatile *fbh_ring_head; uint16_t head, gen, dummy, idx; - firehose_buffer_pos_u fbc_pos = fbc->fbc_pos; - bool for_io = fbc_pos.fbc_flag_io; + firehose_chunk_pos_u fc_pos = fc->fc_pos; + bool for_io = fc_pos.fcp_flag_io; if (for_io) { fbh_ring = fb->fb_header.fbh_io_ring; @@ -871,13 +869,22 @@ firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref) })); } - pthread_priority_t pp = fbc_pos.fbc_qos_bits; + pthread_priority_t pp = fc_pos.fcp_qos; pp <<= _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; firehose_client_send_push_async(fb, _pthread_qos_class_decode(pp, NULL, NULL), for_io); #endif } +#ifndef KERNEL +void +firehose_buffer_force_connect(firehose_buffer_t fb) +{ + mach_port_t sendp = fb->fb_header.fbh_sendp; + if (sendp == MACH_PORT_NULL) firehose_client_reconnect(fb, MACH_PORT_NULL); +} +#endif + OS_ALWAYS_INLINE static inline uint16_t firehose_buffer_ring_try_recycle(firehose_buffer_t fb) @@ -885,7 +892,7 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb) firehose_ring_tail_u pos, old; uint16_t volatile *fbh_ring; uint16_t gen, ref, entry, tail; - firehose_buffer_chunk_t fbc; + firehose_chunk_t fc; bool for_io; os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail, @@ -923,14 +930,14 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb) // and it is dirty, it is a chunk being written to that needs a flush gen = (entry & FIREHOSE_RING_POS_GEN_MASK) + FIREHOSE_RING_POS_GEN_INC; ref = entry & FIREHOSE_RING_POS_IDX_MASK; - fbc = firehose_buffer_ref_to_chunk(fb, ref); + fc = firehose_buffer_ref_to_chunk(fb, ref); - if (!for_io && fbc->fbc_pos.fbc_stream == firehose_stream_metadata) { + if (!for_io && fc->fc_pos.fcp_stream == firehose_stream_metadata) { os_atomic_and2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap, ~(1ULL << ref), relaxed); } - os_atomic_store2o(fbc, fbc_pos.fbc_atomic_pos, - FIREHOSE_BUFFER_POS_FULL_BIT, relaxed); + os_atomic_store2o(fc, fc_pos.fcp_atomic_pos, + FIREHOSE_CHUNK_POS_FULL_BIT, relaxed); dispatch_compiler_barrier(); os_atomic_store(&fbh_ring[tail], gen | 0, relaxed); return ref; @@ -943,6 +950,7 @@ firehose_buffer_tracepoint_reserve_slow2(firehose_buffer_t fb, firehose_tracepoint_query_t ask, uint8_t **privptr, uint16_t ref) { const uint64_t bank_unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(ask->for_io); + const uint64_t bank_inc = FIREHOSE_BANK_INC(ask->for_io); firehose_buffer_bank_t const fbb = &fb->fb_header.fbh_bank; firehose_bank_state_u state; uint16_t fbs_max_ref; @@ -951,7 +959,7 @@ firehose_buffer_tracepoint_reserve_slow2(firehose_buffer_t fb, if (!fastpath(ask->is_bank_ok)) { state.fbs_atomic_state = os_atomic_load2o(fbb, fbb_state.fbs_atomic_state, relaxed); - while (state.fbs_atomic_state & bank_unavail_mask) { + while ((state.fbs_atomic_state - bank_inc) & bank_unavail_mask) { firehose_client_send_push(fb, ask->for_io, &state); if (slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD)) { // logd was unloaded, give up @@ -1020,7 +1028,7 @@ firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, uint64_t unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(for_io); #ifndef KERNEL state.fbs_atomic_state = os_atomic_add_orig2o(fbb, - fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), relaxed); + fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), acquire); if (fastpath(!(state.fbs_atomic_state & unavail_mask))) { ask->is_bank_ok = true; if (fastpath(ref = firehose_buffer_ring_try_recycle(fb))) { @@ -1034,7 +1042,7 @@ firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, #else firehose_bank_state_u value; ask->is_bank_ok = os_atomic_rmw_loop2o(fbb, fbb_state.fbs_atomic_state, - state.fbs_atomic_state, value.fbs_atomic_state, relaxed, { + state.fbs_atomic_state, value.fbs_atomic_state, acquire, { value = state; if (slowpath((value.fbs_atomic_state & unavail_mask) != 0)) { os_atomic_rmw_loop_give_up(break); @@ -1067,32 +1075,6 @@ __firehose_buffer_tracepoint_reserve(uint64_t stamp, firehose_stream_t stream, privsize, privptr); } -firehose_tracepoint_t -__firehose_buffer_tracepoint_reserve_with_chunk(firehose_buffer_chunk_t fbc, - uint64_t stamp, firehose_stream_t stream, - uint16_t pubsize, uint16_t privsize, uint8_t **privptr) -{ - - firehose_tracepoint_t ft; - long result; - - result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, - pubsize, privsize, privptr); - if (fastpath(result > 0)) { - ft = (firehose_tracepoint_t)(fbc->fbc_start + result); - stamp -= fbc->fbc_timestamp; - stamp |= (uint64_t)pubsize << 48; - // Needed for process death handling (tracepoint-begin) - // see firehose_buffer_stream_chunk_install - os_atomic_store2o(ft, ft_stamp_and_length, stamp, relaxed); - dispatch_compiler_barrier(); - return ft; - } - else { - return NULL; - } -} - firehose_buffer_t __firehose_buffer_create(size_t *size) { @@ -1101,7 +1083,7 @@ __firehose_buffer_create(size_t *size) } if (size) { - *size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE; + *size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE; } return kernel_firehose_buffer; } @@ -1113,27 +1095,6 @@ __firehose_buffer_tracepoint_flush(firehose_tracepoint_t ft, return firehose_buffer_tracepoint_flush(kernel_firehose_buffer, ft, ftid); } -void -__firehose_buffer_tracepoint_flush_chunk(firehose_buffer_chunk_t fbc, - firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid) -{ - firehose_buffer_pos_u pos; - - // Needed for process death handling (tracepoint-flush): - // We want to make sure the observers - // will see memory effects in program (asm) order. - // 1. write all the data to the tracepoint - // 2. write the tracepoint ID, so that seeing it means the tracepoint - // is valid - ft->ft_thread = thread_tid(current_thread()); - - // release barrier makes the log writes visible - os_atomic_store2o(ft, ft_id.ftid_value, ftid.ftid_value, release); - pos.fbc_atomic_pos = os_atomic_sub2o(fbc, fbc_pos.fbc_atomic_pos, - FIREHOSE_BUFFER_POS_REFCNT_INC, relaxed); - return; -} - void __firehose_merge_updates(firehose_push_reply_t update) { diff --git a/src/firehose/firehose_buffer_internal.h b/src/firehose/firehose_buffer_internal.h index db8e026..7679c8c 100644 --- a/src/firehose/firehose_buffer_internal.h +++ b/src/firehose/firehose_buffer_internal.h @@ -173,11 +173,11 @@ typedef struct firehose_buffer_header_s { dispatch_unfair_lock_s fbh_logd_lock; #endif uint64_t fbh_unused[0]; -} OS_ALIGNED(FIREHOSE_BUFFER_CHUNK_SIZE) *firehose_buffer_header_t; +} OS_ALIGNED(FIREHOSE_CHUNK_SIZE) *firehose_buffer_header_t; union firehose_buffer_u { struct firehose_buffer_header_s fb_header; - struct firehose_buffer_chunk_s fb_chunks[FIREHOSE_BUFFER_CHUNK_COUNT]; + struct firehose_chunk_s fb_chunks[FIREHOSE_BUFFER_CHUNK_COUNT]; }; // used to let the compiler pack these values in 1 or 2 registers @@ -206,6 +206,9 @@ firehose_buffer_update_limits(firehose_buffer_t fb); void firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref); +void +firehose_buffer_force_connect(firehose_buffer_t fb); + #endif #endif // __FIREHOSE_BUFFER_INTERNAL__ diff --git a/src/firehose/firehose_inline_internal.h b/src/firehose/firehose_inline_internal.h index 9576882..5f89e0d 100644 --- a/src/firehose/firehose_inline_internal.h +++ b/src/firehose/firehose_inline_internal.h @@ -142,36 +142,28 @@ firehose_mig_server(dispatch_mig_callback_t demux, size_t maxmsgsz, #pragma mark firehose buffer OS_ALWAYS_INLINE -static inline firehose_buffer_chunk_t +static inline firehose_chunk_t firehose_buffer_chunk_for_address(void *addr) { - uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_BUFFER_CHUNK_SIZE - 1); - return (firehose_buffer_chunk_t)chunk_addr; + uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_CHUNK_SIZE - 1); + return (firehose_chunk_t)chunk_addr; } OS_ALWAYS_INLINE static inline uint16_t -firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_buffer_chunk_t fbc) +firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_chunk_t fbc) { return (uint16_t)(fbc - fb->fb_chunks); } OS_ALWAYS_INLINE -static inline firehose_buffer_chunk_t +static inline firehose_chunk_t firehose_buffer_ref_to_chunk(firehose_buffer_t fb, uint16_t ref) { return fb->fb_chunks + ref; } #ifndef FIREHOSE_SERVER - -OS_ALWAYS_INLINE -static inline bool -firehose_buffer_pos_fits(firehose_buffer_pos_u pos, uint16_t size) -{ - return pos.fbc_next_entry_offs + size <= pos.fbc_private_offs; -} - #if DISPATCH_PURE_C OS_ALWAYS_INLINE @@ -188,84 +180,13 @@ firehose_buffer_qos_bits_propagate(void) #endif } -OS_ALWAYS_INLINE -static inline long -firehose_buffer_chunk_try_reserve(firehose_buffer_chunk_t fbc, uint64_t stamp, - firehose_stream_t stream, uint16_t pubsize, - uint16_t privsize, uint8_t **privptr) -{ - const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); - firehose_buffer_pos_u orig, pos; - uint8_t qos_bits = firehose_buffer_qos_bits_propagate(); - bool reservation_failed, stamp_delta_fits; - - stamp_delta_fits = ((stamp - fbc->fbc_timestamp) >> 48) == 0; - - // no acquire barrier because the returned space is written to only - os_atomic_rmw_loop2o(fbc, fbc_pos.fbc_atomic_pos, - orig.fbc_atomic_pos, pos.fbc_atomic_pos, relaxed, { - if (unlikely(orig.fbc_atomic_pos == 0)) { - // we acquired a really really old reference, and we probably - // just faulted in a new page - // FIXME: if/when we hit this we should try to madvise it back FREE - os_atomic_rmw_loop_give_up(return 0); - } - if (unlikely(!FIREHOSE_BUFFER_POS_USABLE_FOR_STREAM(orig, stream))) { - // nothing to do if the chunk is full, or the stream doesn't match, - // in which case the thread probably: - // - loaded the chunk ref - // - been suspended a long while - // - read the chunk to find a very old thing - os_atomic_rmw_loop_give_up(return 0); - } - pos = orig; - pos.fbc_qos_bits |= qos_bits; - if (unlikely(!firehose_buffer_pos_fits(orig, - ft_size + pubsize + privsize) || !stamp_delta_fits)) { - pos.fbc_flag_full = true; - reservation_failed = true; - } else { - // using these *_INC macros is so that the compiler generates better - // assembly: using the struct individual fields forces the compiler - // to handle carry propagations, and we know it won't happen - pos.fbc_atomic_pos += roundup(ft_size + pubsize, 8) * - FIREHOSE_BUFFER_POS_ENTRY_OFFS_INC; - pos.fbc_atomic_pos -= privsize * - FIREHOSE_BUFFER_POS_PRIVATE_OFFS_INC; - pos.fbc_atomic_pos += FIREHOSE_BUFFER_POS_REFCNT_INC; - const uint16_t minimum_payload_size = 16; - if (!firehose_buffer_pos_fits(pos, - roundup(ft_size + minimum_payload_size , 8))) { - // if we can't even have minimum_payload_size bytes of payload - // for the next tracepoint, just flush right away - pos.fbc_flag_full = true; - } - reservation_failed = false; - } - }); - - if (reservation_failed) { - if (pos.fbc_refcnt) { - // nothing to do, there is a thread writing that will pick up - // the "FULL" flag on flush and push as a consequence - return 0; - } - // caller must enqueue chunk - return -1; - } - if (privptr) { - *privptr = fbc->fbc_start + pos.fbc_private_offs; - } - return orig.fbc_next_entry_offs; -} - OS_ALWAYS_INLINE static inline void firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) { firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream]; firehose_stream_state_u old_state, new_state; - firehose_buffer_chunk_t fbc; + firehose_chunk_t fc; uint64_t stamp = UINT64_MAX; // will cause the reservation to fail uint16_t ref; long result; @@ -275,11 +196,15 @@ firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) ref = old_state.fss_current; if (!ref || ref == FIREHOSE_STREAM_STATE_PRISTINE) { // there is no installed page, nothing to flush, go away +#ifndef KERNEL + firehose_buffer_force_connect(fb); +#endif return; } - fbc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current); - result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, 1, 0, NULL); + fc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current); + result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream, + firehose_buffer_qos_bits_propagate(), 1, 0, NULL); if (likely(result < 0)) { firehose_buffer_ring_enqueue(fb, old_state.fss_current); } @@ -339,8 +264,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, { firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream]; firehose_stream_state_u old_state, new_state; - firehose_tracepoint_t ft; - firehose_buffer_chunk_t fbc; + firehose_chunk_t fc; #if KERNEL bool failable = false; #endif @@ -356,18 +280,19 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, ref = old_state.fss_current; if (likely(ref && ref != FIREHOSE_STREAM_STATE_PRISTINE)) { - fbc = firehose_buffer_ref_to_chunk(fb, ref); - result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, + fc = firehose_buffer_ref_to_chunk(fb, ref); + result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream, + firehose_buffer_qos_bits_propagate(), pubsize, privsize, privptr); if (likely(result > 0)) { - ft = (firehose_tracepoint_t)(fbc->fbc_start + result); - stamp -= fbc->fbc_timestamp; - stamp |= (uint64_t)pubsize << 48; - // Needed for process death handling (tracepoint-begin) - // see firehose_buffer_stream_chunk_install - os_atomic_store2o(ft, ft_stamp_and_length, stamp, relaxed); - dispatch_compiler_barrier(); - return ft; + uint64_t thread; +#ifdef KERNEL + thread = thread_tid(current_thread()); +#else + thread = _pthread_threadid_self_np_direct(); +#endif + return firehose_chunk_tracepoint_begin(fc, + stamp, pubsize, thread, result); } if (likely(result < 0)) { firehose_buffer_ring_enqueue(fb, old_state.fss_current); @@ -444,8 +369,7 @@ static inline void firehose_buffer_tracepoint_flush(firehose_buffer_t fb, firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid) { - firehose_buffer_chunk_t fbc = firehose_buffer_chunk_for_address(ft); - firehose_buffer_pos_u pos; + firehose_chunk_t fc = firehose_buffer_chunk_for_address(ft); // Needed for process death handling (tracepoint-flush): // We want to make sure the observers @@ -453,17 +377,8 @@ firehose_buffer_tracepoint_flush(firehose_buffer_t fb, // 1. write all the data to the tracepoint // 2. write the tracepoint ID, so that seeing it means the tracepoint // is valid -#ifdef KERNEL - ft->ft_thread = thread_tid(current_thread()); -#else - ft->ft_thread = _pthread_threadid_self_np_direct(); -#endif - // release barrier makes the log writes visible - os_atomic_store2o(ft, ft_id.ftid_value, ftid.ftid_value, release); - pos.fbc_atomic_pos = os_atomic_sub2o(fbc, fbc_pos.fbc_atomic_pos, - FIREHOSE_BUFFER_POS_REFCNT_INC, relaxed); - if (pos.fbc_refcnt == 0 && pos.fbc_flag_full) { - firehose_buffer_ring_enqueue(fb, firehose_buffer_chunk_to_ref(fb, fbc)); + if (firehose_chunk_tracepoint_end(fc, ft, ftid)) { + firehose_buffer_ring_enqueue(fb, firehose_buffer_chunk_to_ref(fb, fc)); } } diff --git a/src/firehose/firehose_server.c b/src/firehose/firehose_server.c index a6be2fa..e27293e 100644 --- a/src/firehose/firehose_server.c +++ b/src/firehose/firehose_server.c @@ -41,6 +41,8 @@ static struct firehose_server_s { firehose_handler_t fs_handler; firehose_snapshot_t fs_snapshot; + bool fs_io_snapshot_started; + bool fs_mem_snapshot_started; int fs_kernel_fd; firehose_client_t fs_kernel_client; @@ -74,7 +76,7 @@ firehose_client_notify(firehose_client_t fc, mach_port_t reply_port) firehose_atomic_max2o(fc, fc_io_sent_flushed_pos, push_reply.fpr_io_flushed_pos, relaxed); - if (fc->fc_is_kernel) { + if (!fc->fc_pid) { if (ioctl(server_config.fs_kernel_fd, LOGFLUSHED, &push_reply) < 0) { dispatch_assume_zero(errno); } @@ -157,7 +159,7 @@ static void firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) { firehose_buffer_t fb = fc->fc_buffer; - firehose_buffer_chunk_t fbc; + firehose_chunk_t fbc; firehose_event_t evt; uint16_t volatile *fbh_ring; uint16_t flushed, ref, count = 0; @@ -172,7 +174,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) fbh_ring = fb->fb_header.fbh_io_ring; sent_flushed = (uint16_t)fc->fc_io_sent_flushed_pos; flushed = (uint16_t)fc->fc_io_flushed_pos; - if (fc->fc_needs_io_snapshot) { + if (fc->fc_needs_io_snapshot && server_config.fs_io_snapshot_started) { snapshot = server_config.fs_snapshot; } } else { @@ -182,7 +184,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) fbh_ring = fb->fb_header.fbh_mem_ring; sent_flushed = (uint16_t)fc->fc_mem_sent_flushed_pos; flushed = (uint16_t)fc->fc_mem_flushed_pos; - if (fc->fc_needs_mem_snapshot) { + if (fc->fc_needs_mem_snapshot && server_config.fs_mem_snapshot_started) { snapshot = server_config.fs_snapshot; } } @@ -209,7 +211,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) ref = (flushed + count) & FIREHOSE_RING_POS_IDX_MASK; ref = os_atomic_load(&fbh_ring[ref], relaxed); ref &= FIREHOSE_RING_POS_IDX_MASK; - } while (fc->fc_is_kernel && !ref); + } while (!fc->fc_pid && !ref); count++; if (!ref) { _dispatch_debug("Ignoring invalid page reference in ring: %d", ref); @@ -217,10 +219,17 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) } fbc = firehose_buffer_ref_to_chunk(fb, ref); + if (fbc->fc_pos.fcp_stream == firehose_stream_metadata) { + // serialize with firehose_client_metadata_stream_peek + os_unfair_lock_lock(&fc->fc_lock); + } server_config.fs_handler(fc, evt, fbc); if (slowpath(snapshot)) { snapshot->handler(fc, evt, fbc); } + if (fbc->fc_pos.fcp_stream == firehose_stream_metadata) { + os_unfair_lock_unlock(&fc->fc_lock); + } // clients not using notifications (single threaded) always drain fully // because they use all their limit, always } while (!fc->fc_use_notifs || count < DRAIN_BATCH_SIZE || snapshot); @@ -238,7 +247,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) client_flushed = os_atomic_load2o(&fb->fb_header, fbh_ring_tail.frp_mem_flushed, relaxed); } - if (fc->fc_is_kernel) { + if (!fc->fc_pid) { // will fire firehose_client_notify() because port is MACH_PORT_DEAD port = fc->fc_sendp; } else if (!port && client_flushed == sent_flushed && fc->fc_use_notifs) { @@ -253,7 +262,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) if (port) { firehose_client_notify(fc, port); } - if (fc->fc_is_kernel) { + if (!fc->fc_pid) { if (!(flags & FIREHOSE_DRAIN_POLL)) { // see firehose_client_kernel_source_handle_event dispatch_resume(fc->fc_kernel_source); @@ -283,7 +292,7 @@ corrupt: // from now on all IO/mem drains depending on `for_io` will be no-op // (needs__snapshot: false, memory_corrupted: true). we can safely // silence the corresponding source of drain wake-ups. - if (!fc->fc_is_kernel) { + if (fc->fc_pid) { dispatch_source_cancel(for_io ? fc->fc_io_source : fc->fc_mem_source); } } @@ -327,6 +336,8 @@ firehose_client_finalize(firehose_client_t fc OS_OBJECT_CONSUMED) server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_DIED, NULL); TAILQ_REMOVE(&server_config.fs_clients, fc, fc_entry); + dispatch_release(fc->fc_mach_channel); + fc->fc_mach_channel = NULL; fc->fc_entry.tqe_next = DISPATCH_OBJECT_LISTLESS; fc->fc_entry.tqe_prev = DISPATCH_OBJECT_LISTLESS; _os_object_release(&fc->fc_as_os_object); @@ -383,26 +394,26 @@ firehose_client_handle_death(void *ctxt) // Then look at all the allocated pages not seen in the ring while (bitmap) { uint16_t ref = firehose_bitmap_first_set(bitmap); - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); - uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs; + firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs; bitmap &= ~(1ULL << ref); - if (fbc->fbc_start + fbc_length <= fbc->fbc_data) { + if (fbc->fc_start + fbc_length <= fbc->fc_data) { // this page has its "recycle-requeue" done, but hasn't gone // through "recycle-reuse", or it has no data, ditch it continue; } - if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) { + if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) { // this thing has data, but the first tracepoint is unreadable // so also just ditch it continue; } - if (!fbc->fbc_pos.fbc_flag_io) { + if (!fbc->fc_pos.fcp_flag_io) { mem_bitmap |= 1ULL << ref; continue; } server_config.fs_handler(fc, FIREHOSE_EVENT_IO_BUFFER_RECEIVED, fbc); - if (fc->fc_needs_io_snapshot && snapshot) { + if (fc->fc_needs_io_snapshot && server_config.fs_io_snapshot_started) { snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, fbc); } } @@ -416,11 +427,11 @@ firehose_client_handle_death(void *ctxt) while (mem_bitmap_copy) { uint16_t ref = firehose_bitmap_first_set(mem_bitmap_copy); - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); mem_bitmap_copy &= ~(1ULL << ref); server_config.fs_handler(fc, FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, fbc); - if (fc->fc_needs_mem_snapshot && snapshot) { + if (fc->fc_needs_mem_snapshot && server_config.fs_mem_snapshot_started) { snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, fbc); } } @@ -436,16 +447,10 @@ firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason, { mach_msg_header_t *msg_hdr; firehose_client_t fc = ctx; - mach_port_t oldsendp, oldrecvp; - - if (dmsg) { - msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); - oldsendp = msg_hdr->msgh_remote_port; - oldrecvp = msg_hdr->msgh_local_port; - } switch (reason) { case DISPATCH_MACH_MESSAGE_RECEIVED: + msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); if (msg_hdr->msgh_id == MACH_NOTIFY_NO_SENDERS) { _dispatch_debug("FIREHOSE NO_SENDERS (unique_pid: 0x%llx)", firehose_client_get_unique_pid(fc, NULL)); @@ -455,26 +460,8 @@ firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason, } break; - case DISPATCH_MACH_DISCONNECTED: - if (oldsendp) { - if (slowpath(oldsendp != fc->fc_sendp)) { - DISPATCH_INTERNAL_CRASH(oldsendp, - "disconnect event about unknown send-right"); - } - firehose_mach_port_send_release(fc->fc_sendp); - fc->fc_sendp = MACH_PORT_NULL; - } - if (oldrecvp) { - if (slowpath(oldrecvp != fc->fc_recvp)) { - DISPATCH_INTERNAL_CRASH(oldrecvp, - "disconnect event about unknown receive-right"); - } - firehose_mach_port_recv_dispose(fc->fc_recvp, fc); - fc->fc_recvp = MACH_PORT_NULL; - } - if (fc->fc_recvp == MACH_PORT_NULL && fc->fc_sendp == MACH_PORT_NULL) { - firehose_client_cancel(fc); - } + case DISPATCH_MACH_CANCELED: + firehose_client_cancel(fc); break; } } @@ -502,7 +489,7 @@ firehose_client_resume(firehose_client_t fc, dispatch_assert_queue(server_config.fs_io_drain_queue); TAILQ_INSERT_TAIL(&server_config.fs_clients, fc, fc_entry); server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_CONNECTED, (void *)fcci); - if (fc->fc_is_kernel) { + if (!fc->fc_pid) { dispatch_activate(fc->fc_kernel_source); } else { dispatch_mach_connect(fc->fc_mach_channel, @@ -515,16 +502,19 @@ firehose_client_resume(firehose_client_t fc, static void firehose_client_cancel(firehose_client_t fc) { - dispatch_mach_t dm; dispatch_block_t block; _dispatch_debug("client died (unique_pid: 0x%llx", firehose_client_get_unique_pid(fc, NULL)); - dm = fc->fc_mach_channel; - fc->fc_mach_channel = NULL; - dispatch_release(dm); - + if (MACH_PORT_VALID(fc->fc_sendp)) { + firehose_mach_port_send_release(fc->fc_sendp); + fc->fc_sendp = MACH_PORT_NULL; + } + if (MACH_PORT_VALID(fc->fc_recvp)) { + firehose_mach_port_recv_dispose(fc->fc_recvp, fc); + fc->fc_recvp = MACH_PORT_NULL; + } fc->fc_use_notifs = false; dispatch_source_cancel(fc->fc_io_source); dispatch_source_cancel(fc->fc_mem_source); @@ -552,8 +542,21 @@ _firehose_client_create(firehose_buffer_t fb) return fc; } +#pragma pack(4) +typedef struct firehose_token_s { + uid_t auid; + uid_t euid; + gid_t egid; + uid_t ruid; + gid_t rgid; + pid_t pid; + au_asid_t asid; + dev_t execcnt; +} *firehose_token_t; +#pragma pack() + static firehose_client_t -firehose_client_create(firehose_buffer_t fb, +firehose_client_create(firehose_buffer_t fb, firehose_token_t token, mach_port_t comm_recvp, mach_port_t comm_sendp) { uint64_t unique_pid = fb->fb_header.fbh_uniquepid; @@ -561,6 +564,9 @@ firehose_client_create(firehose_buffer_t fb, dispatch_mach_t dm; dispatch_source_t ds; + fc->fc_pid = token->pid ? token->pid : ~0; + fc->fc_euid = token->euid; + fc->fc_pidversion = token->execcnt; ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR, 0, 0, server_config.fs_mem_drain_queue); _os_object_retain_internal_inline(&fc->fc_as_os_object); @@ -617,12 +623,11 @@ firehose_kernel_client_create(void) DISPATCH_INTERNAL_CRASH(errno, "Unable to map kernel buffer"); } if (fb_map.fbmi_size != - FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE) { + FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE) { DISPATCH_INTERNAL_CRASH(fb_map.fbmi_size, "Unexpected kernel buffer size"); } fc = _firehose_client_create((firehose_buffer_t)(uintptr_t)fb_map.fbmi_addr); - fc->fc_is_kernel = true; ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)fd, 0, fs->fs_ipc_queue); dispatch_set_context(ds, fc); @@ -663,12 +668,21 @@ uint64_t firehose_client_get_unique_pid(firehose_client_t fc, pid_t *pid_out) { firehose_buffer_header_t fbh = &fc->fc_buffer->fb_header; - if (fc->fc_is_kernel) { - if (pid_out) *pid_out = 0; - return 0; - } - if (pid_out) *pid_out = fbh->fbh_pid ?: ~(pid_t)0; - return fbh->fbh_uniquepid ?: ~0ull; + if (pid_out) *pid_out = fc->fc_pid; + if (!fc->fc_pid) return 0; + return fbh->fbh_uniquepid ? fbh->fbh_uniquepid : ~0ull; +} + +uid_t +firehose_client_get_euid(firehose_client_t fc) +{ + return fc->fc_euid; +} + +int +firehose_client_get_pid_version(firehose_client_t fc) +{ + return fc->fc_pidversion; } void * @@ -760,6 +774,17 @@ firehose_server_assert_spi_version(uint32_t spi_version) } } +bool +firehose_server_has_ever_flushed_pages(void) +{ + // Use the IO pages flushed count from the kernel client as an + // approximation for whether the firehose has ever flushed pages during + // this boot. logd uses this detect the first time it starts after a + // fresh boot. + firehose_client_t fhc = server_config.fs_kernel_client; + return !fhc || fhc->fc_io_flushed_pos > 0; +} + void firehose_server_resume(void) { @@ -777,52 +802,83 @@ firehose_server_resume(void) MACH_PORT_NULL, NULL); } +OS_NOINLINE +static void +_firehose_server_cancel(void *ctxt OS_UNUSED) +{ + firehose_client_t fc; + TAILQ_FOREACH(fc, &server_config.fs_clients, fc_entry) { + dispatch_mach_cancel(fc->fc_mach_channel); + } +} + +void +firehose_server_cancel(void) +{ + dispatch_mach_cancel(server_config.fs_mach_channel); + dispatch_async_f(server_config.fs_io_drain_queue, NULL, + _firehose_server_cancel); +} + +dispatch_queue_t +firehose_server_copy_queue(firehose_server_queue_t which) +{ + dispatch_queue_t dq; + switch (which) { + case FIREHOSE_SERVER_QUEUE_IO: + dq = server_config.fs_io_drain_queue; + break; + case FIREHOSE_SERVER_QUEUE_MEMORY: + dq = server_config.fs_mem_drain_queue; + break; + default: + DISPATCH_INTERNAL_CRASH(which, "Invalid firehose server queue type"); + } + dispatch_retain(dq); + return dq; +} + #pragma mark - #pragma mark firehose snapshot and peeking void firehose_client_metadata_stream_peek(firehose_client_t fc, - firehose_event_t context, bool (^peek_should_start)(void), - bool (^peek)(firehose_buffer_chunk_t fbc)) + OS_UNUSED firehose_event_t context, bool (^peek_should_start)(void), + bool (^peek)(firehose_chunk_t fbc)) { - if (context != FIREHOSE_EVENT_MEM_BUFFER_RECEIVED) { - return dispatch_sync(server_config.fs_mem_drain_queue, ^{ - firehose_client_metadata_stream_peek(fc, - FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, peek_should_start, peek); - }); - } + os_unfair_lock_lock(&fc->fc_lock); - if (peek_should_start && !peek_should_start()) { - return; - } + if (peek_should_start && peek_should_start()) { + firehose_buffer_t fb = fc->fc_buffer; + firehose_buffer_header_t fbh = &fb->fb_header; + uint64_t bitmap = fbh->fbh_bank.fbb_metadata_bitmap; - firehose_buffer_t fb = fc->fc_buffer; - firehose_buffer_header_t fbh = &fb->fb_header; - uint64_t bitmap = fbh->fbh_bank.fbb_metadata_bitmap; + while (bitmap) { + uint16_t ref = firehose_bitmap_first_set(bitmap); + firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs; - while (bitmap) { - uint16_t ref = firehose_bitmap_first_set(bitmap); - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); - uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs; - - bitmap &= ~(1ULL << ref); - if (fbc->fbc_start + fbc_length <= fbc->fbc_data) { - // this page has its "recycle-requeue" done, but hasn't gone - // through "recycle-reuse", or it has no data, ditch it - continue; - } - if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) { - // this thing has data, but the first tracepoint is unreadable - // so also just ditch it - continue; - } - if (fbc->fbc_pos.fbc_stream != firehose_stream_metadata) { - continue; - } - if (!peek(fbc)) { - break; + bitmap &= ~(1ULL << ref); + if (fbc->fc_start + fbc_length <= fbc->fc_data) { + // this page has its "recycle-requeue" done, but hasn't gone + // through "recycle-reuse", or it has no data, ditch it + continue; + } + if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) { + // this thing has data, but the first tracepoint is unreadable + // so also just ditch it + continue; + } + if (fbc->fc_pos.fcp_stream != firehose_stream_metadata) { + continue; + } + if (!peek(fbc)) { + break; + } } } + + os_unfair_lock_unlock(&fc->fc_lock); } OS_NOINLINE OS_COLD @@ -872,21 +928,21 @@ firehose_client_snapshot_finish(firehose_client_t fc, // Then look at all the allocated pages not seen in the ring while (bitmap) { uint16_t ref = firehose_bitmap_first_set(bitmap); - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); - uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs; + firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs; bitmap &= ~(1ULL << ref); - if (fbc->fbc_start + fbc_length <= fbc->fbc_data) { + if (fbc->fc_start + fbc_length <= fbc->fc_data) { // this page has its "recycle-requeue" done, but hasn't gone // through "recycle-reuse", or it has no data, ditch it continue; } - if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) { + if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) { // this thing has data, but the first tracepoint is unreadable // so also just ditch it continue; } - if (fbc->fbc_pos.fbc_flag_io != for_io) { + if (fbc->fc_pos.fcp_flag_io != for_io) { continue; } snapshot->handler(fc, evt, fbc); @@ -903,11 +959,12 @@ firehose_snapshot_start(void *ctxt) // 0. we need to be on the IO queue so that client connection and/or death // cannot happen concurrently dispatch_assert_queue(server_config.fs_io_drain_queue); + server_config.fs_snapshot = snapshot; // 1. mark all the clients participating in the current snapshot // and enter the group for each bit set TAILQ_FOREACH(fci, &server_config.fs_clients, fc_entry) { - if (fci->fc_is_kernel) { + if (!fci->fc_pid) { #if TARGET_OS_SIMULATOR continue; #endif @@ -926,16 +983,18 @@ firehose_snapshot_start(void *ctxt) } dispatch_async(server_config.fs_mem_drain_queue, ^{ - // 2. make fs_snapshot visible, this is what triggers the snapshot - // logic from _drain() or handle_death(). until fs_snapshot is - // published, the bits set above are mostly ignored - server_config.fs_snapshot = snapshot; - + // 2. start the fs_mem_snapshot, this is what triggers the snapshot + // logic from _drain() or handle_death() + server_config.fs_mem_snapshot_started = true; snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_MEM_START, NULL); dispatch_async(server_config.fs_io_drain_queue, ^{ firehose_client_t fcj; + // 3. start the fs_io_snapshot, this is what triggers the snapshot + // logic from _drain() or handle_death() + // 29868879: must always happen after the memory snapshot started + server_config.fs_io_snapshot_started = true; snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_IO_START, NULL); // match group_enter from firehose_snapshot() after MEM+IO_START @@ -947,7 +1006,7 @@ firehose_snapshot_start(void *ctxt) // were removed from the list have already left the group // (see firehose_client_finalize()) TAILQ_FOREACH(fcj, &server_config.fs_clients, fc_entry) { - if (fcj->fc_is_kernel) { + if (!fcj->fc_pid) { #if !TARGET_OS_SIMULATOR firehose_client_kernel_source_handle_event(fcj); #endif @@ -967,6 +1026,8 @@ firehose_snapshot_finish(void *ctxt) fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_COMPLETE, NULL); server_config.fs_snapshot = NULL; + server_config.fs_mem_snapshot_started = false; + server_config.fs_io_snapshot_started = false; dispatch_release(fs->fs_group); Block_release(fs->handler); @@ -1010,7 +1071,8 @@ kern_return_t firehose_server_register(mach_port_t server_port OS_UNUSED, mach_port_t mem_port, mach_vm_size_t mem_size, mach_port_t comm_recvp, mach_port_t comm_sendp, - mach_port_t extra_info_port, mach_vm_size_t extra_info_size) + mach_port_t extra_info_port, mach_vm_size_t extra_info_size, + audit_token_t atoken) { mach_vm_address_t base_addr = 0; firehose_client_t fc = NULL; @@ -1060,7 +1122,7 @@ firehose_server_register(mach_port_t server_port OS_UNUSED, } fc = firehose_client_create((firehose_buffer_t)base_addr, - comm_recvp, comm_sendp); + (firehose_token_t)&atoken, comm_recvp, comm_sendp); dispatch_async(server_config.fs_io_drain_queue, ^{ firehose_client_resume(fc, &fcci); if (fcci.fcci_size) { diff --git a/src/firehose/firehose_server_internal.h b/src/firehose/firehose_server_internal.h index 7991721..d805167 100644 --- a/src/firehose/firehose_server_internal.h +++ b/src/firehose/firehose_server_internal.h @@ -53,11 +53,14 @@ struct firehose_client_s { dispatch_source_t fc_mem_source; mach_port_t fc_recvp; mach_port_t fc_sendp; + os_unfair_lock fc_lock; + pid_t fc_pid; + int fc_pidversion; + uid_t fc_euid; bool fc_use_notifs; bool fc_memory_corrupted; bool fc_needs_io_snapshot; bool fc_needs_mem_snapshot; - bool fc_is_kernel; }; void diff --git a/src/init.c b/src/init.c index 45cbff3..5b8d809 100644 --- a/src/init.c +++ b/src/init.c @@ -47,12 +47,28 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_prepare(void) { + _os_object_atfork_prepare(); } DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_parent(void) { + _os_object_atfork_parent(); +} + +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_atfork_child(void) +{ + _os_object_atfork_child(); + _voucher_atfork_child(); + if (_dispatch_is_multithreaded_inline()) { + _dispatch_child_of_unsafe_fork = true; + } + _dispatch_queue_atfork_child(); + // clear the _PROHIBIT and _MULTITHREADED bits if set + _dispatch_unsafe_fork = 0; } #pragma mark - @@ -1057,6 +1073,24 @@ os_release(void *obj) } } +void +_os_object_atfork_prepare(void) +{ + return; +} + +void +_os_object_atfork_parent(void) +{ + return; +} + +void +_os_object_atfork_child(void) +{ + return; +} + #pragma mark - #pragma mark dispatch_autorelease_pool no_objc @@ -1102,8 +1136,7 @@ static void dispatch_source_type_timer_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, uintptr_t handle DISPATCH_UNUSED, - unsigned long mask, - dispatch_queue_t q) + unsigned long mask) { if (fastpath(!ds->ds_refs)) { ds->ds_refs = _dispatch_calloc(1ul, @@ -1111,11 +1144,6 @@ dispatch_source_type_timer_init(dispatch_source_t ds, } ds->ds_needs_rearm = true; ds->ds_is_timer = true; - if (q == dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_BACKGROUND, 0) - || q == dispatch_get_global_queue( - DISPATCH_QUEUE_PRIORITY_BACKGROUND, DISPATCH_QUEUE_OVERCOMMIT)){ - mask |= DISPATCH_TIMER_BACKGROUND; // - } ds_timer(ds->ds_refs).flags = mask; } @@ -1130,10 +1158,9 @@ const struct dispatch_source_type_s _dispatch_source_type_timer = { static void dispatch_source_type_after_init(dispatch_source_t ds, - dispatch_source_type_t type, uintptr_t handle, unsigned long mask, - dispatch_queue_t q) + dispatch_source_type_t type, uintptr_t handle, unsigned long mask) { - dispatch_source_type_timer_init(ds, type, handle, mask, q); + dispatch_source_type_timer_init(ds, type, handle, mask); ds->ds_needs_rearm = false; ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_AFTER; } @@ -1147,12 +1174,11 @@ const struct dispatch_source_type_s _dispatch_source_type_after = { static void dispatch_source_type_timer_with_aggregate_init(dispatch_source_t ds, - dispatch_source_type_t type, uintptr_t handle, unsigned long mask, - dispatch_queue_t q) + dispatch_source_type_t type, uintptr_t handle, unsigned long mask) { ds->ds_refs = _dispatch_calloc(1ul, sizeof(struct dispatch_timer_source_aggregate_refs_s)); - dispatch_source_type_timer_init(ds, type, handle, mask, q); + dispatch_source_type_timer_init(ds, type, handle, mask); ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_WITH_AGGREGATE; ds->dq_specific_q = (void*)handle; _dispatch_retain(ds->dq_specific_q); @@ -1169,10 +1195,9 @@ const struct dispatch_source_type_s _dispatch_source_type_timer_with_aggregate={ static void dispatch_source_type_interval_init(dispatch_source_t ds, - dispatch_source_type_t type, uintptr_t handle, unsigned long mask, - dispatch_queue_t q) + dispatch_source_type_t type, uintptr_t handle, unsigned long mask) { - dispatch_source_type_timer_init(ds, type, handle, mask, q); + dispatch_source_type_timer_init(ds, type, handle, mask); ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_INTERVAL; unsigned long ident = _dispatch_source_timer_idx(ds->ds_refs); ds->ds_dkev->dk_kevent.ident = ds->ds_ident_hack = ident; @@ -1193,11 +1218,10 @@ static void dispatch_source_type_readwrite_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) + unsigned long mask DISPATCH_UNUSED) { ds->ds_is_level = true; -#ifdef HAVE_DECL_NOTE_LOWAT +#if HAVE_DECL_NOTE_LOWAT // bypass kernel check for device kqueue support rdar://19004921 ds->ds_dkev->dk_kevent.fflags = NOTE_LOWAT; #endif @@ -1241,8 +1265,7 @@ static void dispatch_source_type_memorypressure_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) + unsigned long mask DISPATCH_UNUSED) { static dispatch_once_t pred; dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init); @@ -1279,8 +1302,7 @@ static void dispatch_source_type_vm_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) + unsigned long mask DISPATCH_UNUSED) { // Map legacy vm pressure to memorypressure warning rdar://problem/15907505 mask = NOTE_MEMORYSTATUS_PRESSURE_WARN; @@ -1288,7 +1310,7 @@ dispatch_source_type_vm_init(dispatch_source_t ds, ds->ds_pending_data_mask = mask; ds->ds_vmpressure_override = 1; #if TARGET_IPHONE_SIMULATOR - dispatch_source_type_memorypressure_init(ds, type, handle, mask, q); + dispatch_source_type_memorypressure_init(ds, type, handle, mask); #endif } @@ -1301,17 +1323,7 @@ const struct dispatch_source_type_s _dispatch_source_type_vm = { .init = dispatch_source_type_vm_init, }; -#elif DISPATCH_USE_VM_PRESSURE - -const struct dispatch_source_type_s _dispatch_source_type_vm = { - .ke = { - .filter = EVFILT_VM, - .flags = EV_DISPATCH|EV_UDATA_SPECIFIC, - }, - .mask = NOTE_VM_PRESSURE, -}; - -#endif // DISPATCH_USE_VM_PRESSURE +#endif // DISPATCH_USE_MEMORYSTATUS const struct dispatch_source_type_s _dispatch_source_type_signal = { .ke = { @@ -1325,8 +1337,7 @@ static void dispatch_source_type_proc_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) + unsigned long mask DISPATCH_UNUSED) { ds->ds_dkev->dk_kevent.fflags |= NOTE_EXIT; // rdar://16655831 } @@ -1378,6 +1389,12 @@ const struct dispatch_source_type_s _dispatch_source_type_vfs = { #endif #if HAVE_DECL_VQ_QUOTA |VQ_QUOTA +#endif +#if HAVE_DECL_VQ_NEARLOWDISK + |VQ_NEARLOWDISK +#endif +#if HAVE_DECL_VQ_DESIRED_DISK + |VQ_DESIRED_DISK #endif , }; @@ -1409,8 +1426,7 @@ static void dispatch_source_type_data_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) + unsigned long mask DISPATCH_UNUSED) { ds->ds_is_installed = true; ds->ds_is_custom_source = true; @@ -1441,8 +1457,7 @@ const struct dispatch_source_type_s _dispatch_source_type_data_or = { static void dispatch_source_type_mach_send_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, unsigned long mask, - dispatch_queue_t q DISPATCH_UNUSED) + uintptr_t handle DISPATCH_UNUSED, unsigned long mask) { if (!mask) { // Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD @@ -1464,8 +1479,7 @@ static void dispatch_source_type_mach_recv_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) + unsigned long mask DISPATCH_UNUSED) { ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE; #if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK diff --git a/src/inline_internal.h b/src/inline_internal.h index daa0e9d..79f496f 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -2270,10 +2270,9 @@ _dispatch_priority_propagate(void) // including maintenance DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_is_background_thread(void) +_dispatch_is_background_priority(pthread_priority_t pp) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t pp = _dispatch_get_priority(); pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; return pp && (pp <= _dispatch_background_priority); #else @@ -2281,6 +2280,14 @@ _dispatch_is_background_thread(void) #endif } +// including maintenance +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_is_background_thread(void) +{ + return _dispatch_is_background_priority(_dispatch_get_priority()); +} + #pragma mark - #pragma mark dispatch_block_t diff --git a/src/internal.h b/src/internal.h index a9aee11..1f63cce 100644 --- a/src/internal.h +++ b/src/internal.h @@ -78,6 +78,9 @@ #if !defined(OS_VOUCHER_ACTIVITY_SPI) && TARGET_OS_MAC #define OS_VOUCHER_ACTIVITY_SPI 1 #endif +#if !defined(OS_VOUCHER_ACTIVITY_GENERATE_SWAPS) +#define OS_VOUCHER_ACTIVITY_GENERATE_SWAPS 0 +#endif #if !defined(OS_FIREHOSE_SPI) && TARGET_OS_MAC #define OS_FIREHOSE_SPI 1 #endif @@ -352,8 +355,12 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #if DISPATCH_DEBUG // sys/queue.h debugging +#if defined(__linux__) +#define QUEUE_MACRO_DEBUG 1 +#else #undef TRASHIT #define TRASHIT(x) do {(x) = (void *)-1;} while (0) +#endif #endif // DISPATCH_DEBUG #define _TAILQ_TRASH_ENTRY(elm, field) do { \ TRASHIT((elm)->field.tqe_next); \ @@ -649,22 +656,12 @@ typedef pthread_worqueue_function_kevent_t pthread_workqueue_function_kevent_t; #endif #endif // EVFILT_MEMORYSTATUS -#if defined(EVFILT_VM) && !DISPATCH_USE_MEMORYSTATUS -#ifndef DISPATCH_USE_VM_PRESSURE -#define DISPATCH_USE_VM_PRESSURE 1 -#endif -#endif // EVFILT_VM - #if TARGET_OS_SIMULATOR #undef DISPATCH_USE_MEMORYPRESSURE_SOURCE #define DISPATCH_USE_MEMORYPRESSURE_SOURCE 0 -#undef DISPATCH_USE_VM_PRESSURE_SOURCE -#define DISPATCH_USE_VM_PRESSURE_SOURCE 0 #endif // TARGET_OS_SIMULATOR #if !defined(DISPATCH_USE_MEMORYPRESSURE_SOURCE) && DISPATCH_USE_MEMORYSTATUS #define DISPATCH_USE_MEMORYPRESSURE_SOURCE 1 -#elif !defined(DISPATCH_USE_VM_PRESSURE_SOURCE) && DISPATCH_USE_VM_PRESSURE -#define DISPATCH_USE_VM_PRESSURE_SOURCE 1 #endif #if DISPATCH_USE_MEMORYPRESSURE_SOURCE extern bool _dispatch_memory_warn; @@ -706,6 +703,14 @@ extern bool _dispatch_memory_warn; #undef HAVE_DECL_VQ_QUOTA // rdar://problem/24160982 #endif // VQ_QUOTA +#ifndef VQ_NEARLOWDISK +#undef HAVE_DECL_VQ_NEARLOWDISK +#endif // VQ_NEARLOWDISK + +#ifndef VQ_DESIRED_DISK +#undef HAVE_DECL_VQ_DESIRED_DISK +#endif // VQ_DESIRED_DISK + #if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) || \ !DISPATCH_HOST_SUPPORTS_OSX(101200) #undef NOTE_MEMORYSTATUS_PROC_LIMIT_WARN @@ -836,10 +841,6 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #ifndef DISPATCH_USE_GUARDED_FD #define DISPATCH_USE_GUARDED_FD 1 #endif -// change_fdguard_np() requires GUARD_DUP -#if DISPATCH_USE_GUARDED_FD && RDAR_11814513 -#define DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD 1 -#endif #endif // HAVE_SYS_GUARDED_H diff --git a/src/io.c b/src/io.c index e4f05ae..0a00e6e 100644 --- a/src/io.c +++ b/src/io.c @@ -24,6 +24,10 @@ #define DISPATCH_IO_DEBUG DISPATCH_DEBUG #endif +#ifndef PAGE_SIZE +#define PAGE_SIZE getpagesize() +#endif + #if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA #define _dispatch_io_data_retain(x) _dispatch_objc_retain(x) #define _dispatch_io_data_release(x) _dispatch_objc_release(x) @@ -2284,10 +2288,10 @@ syscall: return DISPATCH_OP_DELIVER; } error: - if (err == EAGAIN) { + if (err == EAGAIN || err == EWOULDBLOCK) { // For disk based files with blocking I/O we should never get EAGAIN dispatch_assert(!op->fd_entry->disk); - _dispatch_op_debug("performed: EAGAIN", op); + _dispatch_op_debug("performed: EAGAIN/EWOULDBLOCK", op); if (op->direction == DOP_DIR_READ && op->total && op->channel == op->fd_entry->convenience_channel) { // Convenience read with available data completes on EAGAIN diff --git a/src/object.m b/src/object.m index 323c98b..a9153ac 100644 --- a/src/object.m +++ b/src/object.m @@ -126,6 +126,24 @@ os_release(void *obj) return objc_release(obj); } +void +_os_object_atfork_prepare(void) +{ + return _objc_atfork_prepare(); +} + +void +_os_object_atfork_parent(void) +{ + return _objc_atfork_parent(); +} + +void +_os_object_atfork_child(void) +{ + return _objc_atfork_child(); +} + #pragma mark - #pragma mark _os_object diff --git a/src/object_internal.h b/src/object_internal.h index 63f6cf5..40430b6 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -613,6 +613,9 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); #define _os_object_refcnt_dispose_barrier(o) \ _os_atomic_refcnt_dispose_barrier2o(o, os_obj_ref_cnt) +void _os_object_atfork_child(void); +void _os_object_atfork_parent(void); +void _os_object_atfork_prepare(void); void _os_object_init(void); unsigned long _os_object_retain_count(_os_object_t obj); bool _os_object_retain_weak(_os_object_t obj); diff --git a/src/queue.c b/src/queue.c index aeef1e8..e87de8d 100644 --- a/src/queue.c +++ b/src/queue.c @@ -784,18 +784,9 @@ _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, #endif } #endif // HAVE_PTHREAD_WORKQUEUES -#if USE_MACH_SEM - // override the default FIFO behavior for the pool semaphores - kern_return_t kr = semaphore_create(mach_task_self(), - &pqc->dpq_thread_mediator.dsema_port, SYNC_POLICY_LIFO, 0); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - (void)dispatch_assume(pqc->dpq_thread_mediator.dsema_port); -#elif USE_POSIX_SEM - /* XXXRW: POSIX semaphores don't support LIFO? */ - int ret = sem_init(&(pqc->dpq_thread_mediator.dsema_sem), 0, 0); - (void)dispatch_assume_zero(ret); -#endif + _os_semaphore_t *sema = &pqc->dpq_thread_mediator.dsema_sema; + _os_semaphore_init(sema, _OS_SEM_POLICY_LIFO); + _os_semaphore_create(sema, _OS_SEM_POLICY_LIFO); } #endif // DISPATCH_USE_PTHREAD_POOL @@ -1025,25 +1016,19 @@ libdispatch_tsd_init(void) } #endif -DISPATCH_EXPORT DISPATCH_NOTHROW +DISPATCH_NOTHROW void -dispatch_atfork_child(void) +_dispatch_queue_atfork_child(void) { void *crash = (void *)0x100; size_t i; #if HAVE_MACH _dispatch_mach_host_port_pred = 0; - _dispatch_mach_host_port = MACH_VOUCHER_NULL; + _dispatch_mach_host_port = MACH_PORT_NULL; #endif - _voucher_atfork_child(); - if (!_dispatch_is_multithreaded_inline()) { - // clear the _PROHIBIT bit if set - _dispatch_unsafe_fork = 0; - return; - } - _dispatch_unsafe_fork = 0; - _dispatch_child_of_unsafe_fork = true; + + if (!_dispatch_is_multithreaded_inline()) return; _dispatch_main_q.dq_items_head = crash; _dispatch_main_q.dq_items_tail = crash; @@ -2734,12 +2719,16 @@ _dispatch_block_create_with_voucher_and_priority(dispatch_block_flags_t flags, bool assign = (flags & DISPATCH_BLOCK_ASSIGN_CURRENT); if (assign && !(flags & DISPATCH_BLOCK_HAS_VOUCHER)) { +#if OS_VOUCHER_ACTIVITY_SPI voucher = VOUCHER_CURRENT; +#endif flags |= DISPATCH_BLOCK_HAS_VOUCHER; } +#if OS_VOUCHER_ACTIVITY_SPI if (voucher == VOUCHER_CURRENT) { voucher = _voucher_get(); } +#endif if (assign && !(flags & DISPATCH_BLOCK_HAS_PRIORITY)) { pri = _dispatch_priority_propagate(); flags |= DISPATCH_BLOCK_HAS_PRIORITY; @@ -3722,7 +3711,7 @@ _dispatch_sync_block_with_private_data(dispatch_queue_t dq, } // balanced in d_block_sync_invoke or d_block_wait if (os_atomic_cmpxchg2o(_dispatch_block_get_data(work), - dbpd_queue, NULL, dq, relaxed)) { + dbpd_queue, NULL, dq->_as_oq, relaxed)) { _dispatch_retain(dq); } if (flags & DISPATCH_BLOCK_BARRIER) { @@ -5833,12 +5822,7 @@ _dispatch_queue_set_mainq_drain_state(bool arg) void _dispatch_main_queue_callback_4CF( -#if TARGET_OS_MAC - mach_msg_header_t *_Null_unspecified msg -#else - void *ignored -#endif - DISPATCH_UNUSED) + void *ignored DISPATCH_UNUSED) { if (main_q_is_draining) { return; @@ -5853,6 +5837,9 @@ _dispatch_main_queue_callback_4CF( void dispatch_main(void) { + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init_once); + #if HAVE_PTHREAD_MAIN_NP if (pthread_main_np()) { #endif diff --git a/src/queue_internal.h b/src/queue_internal.h index 1bff7b0..2435b13 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -610,6 +610,7 @@ void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func); void _dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func); +void _dispatch_queue_atfork_child(void); #if DISPATCH_DEBUG void dispatch_debug_queue(dispatch_queue_t dq, const char* str); diff --git a/src/semaphore.c b/src/semaphore.c index 4d232b7..dc97ba9 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -20,53 +20,6 @@ #include "internal.h" -// semaphores are too fundamental to use the dispatch_assume*() macros -#if USE_WIN32_SEM -// rdar://problem/8428132 -static DWORD best_resolution = 1; // 1ms - -DWORD -_push_timer_resolution(DWORD ms) -{ - MMRESULT res; - static dispatch_once_t once; - - if (ms > 16) { - // only update timer resolution if smaller than default 15.6ms - // zero means not updated - return 0; - } - - // aim for the best resolution we can accomplish - dispatch_once(&once, ^{ - TIMECAPS tc; - MMRESULT res; - res = timeGetDevCaps(&tc, sizeof(tc)); - if (res == MMSYSERR_NOERROR) { - best_resolution = min(max(tc.wPeriodMin, best_resolution), - tc.wPeriodMax); - } - }); - - res = timeBeginPeriod(best_resolution); - if (res == TIMERR_NOERROR) { - return best_resolution; - } - // zero means not updated - return 0; -} - -// match ms parameter to result from _push_timer_resolution -void -_pop_timer_resolution(DWORD ms) -{ - if (ms) { - timeEndPeriod(ms); - } -} -#endif /* USE_WIN32_SEM */ - - DISPATCH_WEAK // rdar://problem/8503746 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); @@ -82,33 +35,7 @@ _dispatch_semaphore_class_init(long value, dispatch_semaphore_class_t dsemau) dsema->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, false); dsema->dsema_value = value; -#if USE_POSIX_SEM - int ret = sem_init(&dsema->dsema_sem, 0, 0); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#endif -} - -static void -_dispatch_semaphore_class_dispose(dispatch_semaphore_class_t dsemau) -{ - struct dispatch_semaphore_header_s *dsema = dsemau._dsema_hdr; - -#if USE_MACH_SEM - kern_return_t kr; - if (dsema->dsema_port) { - kr = semaphore_destroy(mach_task_self(), dsema->dsema_port); - DISPATCH_VERIFY_MIG(kr); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - } - dsema->dsema_port = MACH_PORT_DEAD; -#elif USE_POSIX_SEM - int ret = sem_destroy(&dsema->dsema_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - if (dsema->dsema_handle) { - CloseHandle(dsema->dsema_handle); - } -#endif + _os_semaphore_init(&dsema->dsema_sema, _OS_SEM_POLICY_FIFO); } #pragma mark - @@ -133,59 +60,6 @@ dispatch_semaphore_create(long value) return dsema; } -#if USE_MACH_SEM -static void -_dispatch_semaphore_create_port(semaphore_t *s4) -{ - kern_return_t kr; - semaphore_t tmp; - - if (*s4) { - return; - } - _dispatch_fork_becomes_unsafe(); - - // lazily allocate the semaphore port - - // Someday: - // 1) Switch to a doubly-linked FIFO in user-space. - // 2) User-space timers for the timeout. - // 3) Use the per-thread semaphore port. - - while ((kr = semaphore_create(mach_task_self(), &tmp, - SYNC_POLICY_FIFO, 0))) { - DISPATCH_VERIFY_MIG(kr); - _dispatch_temporary_resource_shortage(); - } - - if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) { - kr = semaphore_destroy(mach_task_self(), tmp); - DISPATCH_VERIFY_MIG(kr); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - } -} -#elif USE_WIN32_SEM -static void -_dispatch_semaphore_create_handle(HANDLE *s4) -{ - HANDLE tmp; - - if (*s4) { - return; - } - - // lazily allocate the semaphore port - - while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) { - _dispatch_temporary_resource_shortage(); - } - - if (!os_atomic_cmpxchg(s4, 0, tmp)) { - CloseHandle(tmp); - } -} -#endif - void _dispatch_semaphore_dispose(dispatch_object_t dou) { @@ -196,7 +70,7 @@ _dispatch_semaphore_dispose(dispatch_object_t dou) "Semaphore object deallocated while in use"); } - _dispatch_semaphore_class_dispose(dsema); + _os_semaphore_dispose(&dsema->dsema_sema); } size_t @@ -210,7 +84,7 @@ _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz) offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset); #if USE_MACH_SEM offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", - dsema->dsema_port); + dsema->dsema_sema); #endif offset += dsnprintf(&buf[offset], bufsiz - offset, "value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig); @@ -221,18 +95,8 @@ DISPATCH_NOINLINE long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) { -#if USE_MACH_SEM - _dispatch_semaphore_create_port(&dsema->dsema_port); - kern_return_t kr = semaphore_signal(dsema->dsema_port); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); -#elif USE_POSIX_SEM - int ret = sem_post(&dsema->dsema_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dsema->dsema_handle); - int ret = ReleaseSemaphore(dsema->dsema_handle, 1, NULL); - dispatch_assume(ret); -#endif + _os_semaphore_create(&dsema->dsema_sema, _OS_SEM_POLICY_FIFO); + _os_semaphore_signal(&dsema->dsema_sema, 1); return 1; } @@ -257,61 +121,12 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, { long orig; -#if USE_MACH_SEM - mach_timespec_t _timeout; - kern_return_t kr; -#elif USE_POSIX_SEM - struct timespec _timeout; - int ret; -#elif USE_WIN32_SEM - uint64_t nsec; - DWORD msec; - DWORD resolution; - DWORD wait_result; -#endif - -#if USE_MACH_SEM - _dispatch_semaphore_create_port(&dsema->dsema_port); -#elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dsema->dsema_handle); -#endif - + _os_semaphore_create(&dsema->dsema_sema, _OS_SEM_POLICY_FIFO); switch (timeout) { default: -#if USE_MACH_SEM - do { - uint64_t nsec = _dispatch_timeout(timeout); - _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); - _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout)); - } while (kr == KERN_ABORTED); - - if (kr != KERN_OPERATION_TIMED_OUT) { - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - break; - } -#elif USE_POSIX_SEM - do { - uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); - _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); - _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout)); - } while (ret == -1 && errno == EINTR); - - if (!(ret == -1 && errno == ETIMEDOUT)) { - DISPATCH_SEMAPHORE_VERIFY_RET(ret); - break; - } -#elif USE_WIN32_SEM - nsec = _dispatch_timeout(timeout); - msec = (DWORD)(nsec / (uint64_t)1000000); - resolution = _push_timer_resolution(msec); - wait_result = WaitForSingleObject(dsema->dsema_handle, msec); - _pop_timer_resolution(resolution); - if (wait_result != WAIT_TIMEOUT) { + if (!_os_semaphore_timedwait(&dsema->dsema_sema, timeout)) { break; } -#endif // Fall through and try to undo what the fast path did to // dsema->dsema_value case DISPATCH_TIME_NOW: @@ -319,30 +134,13 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, while (orig < 0) { if (os_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1, &orig, relaxed)) { -#if USE_MACH_SEM - return KERN_OPERATION_TIMED_OUT; -#elif USE_POSIX_SEM || USE_WIN32_SEM - errno = ETIMEDOUT; - return -1; -#endif + return _OS_SEM_TIMEOUT(); } } // Another thread called semaphore_signal(). // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: -#if USE_MACH_SEM - do { - kr = semaphore_wait(dsema->dsema_port); - } while (kr == KERN_ABORTED); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); -#elif USE_POSIX_SEM - do { - ret = sem_wait(&dsema->dsema_sem); - } while (ret != 0); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - WaitForSingleObject(dsema->dsema_handle, INFINITE); -#endif + _os_semaphore_wait(&dsema->dsema_sema); break; } return 0; @@ -416,25 +214,8 @@ _dispatch_group_wake(dispatch_group_t dg, bool needs_release) rval = (long)os_atomic_xchg2o(dg, dg_waiters, 0, relaxed); if (rval) { // wake group waiters -#if USE_MACH_SEM - _dispatch_semaphore_create_port(&dg->dg_port); - do { - kern_return_t kr = semaphore_signal(dg->dg_port); - DISPATCH_GROUP_VERIFY_KR(kr); - } while (--rval); -#elif USE_POSIX_SEM - do { - int ret = sem_post(&dg->dg_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); - } while (--rval); -#elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dg->dg_handle); - int ret; - ret = ReleaseSemaphore(dg->dg_handle, rval, NULL); - dispatch_assume(ret); -#else -#error "No supported semaphore type" -#endif + _os_semaphore_create(&dg->dg_sema, _OS_SEM_POLICY_FIFO); + _os_semaphore_signal(&dg->dg_sema, rval); } if (head) { // async group notify blocks @@ -475,7 +256,7 @@ _dispatch_group_dispose(dispatch_object_t dou) "Group object deallocated while in use"); } - _dispatch_semaphore_class_dispose(dg); + _os_semaphore_dispose(&dg->dg_sema); } size_t @@ -489,7 +270,7 @@ _dispatch_group_debug(dispatch_object_t dou, char *buf, size_t bufsiz) offset += _dispatch_object_debug_attr(dg, &buf[offset], bufsiz - offset); #if USE_MACH_SEM offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", - dg->dg_port); + dg->dg_sema); #endif offset += dsnprintf(&buf[offset], bufsiz - offset, "count = %ld, waiters = %d }", dg->dg_value, dg->dg_waiters); @@ -503,19 +284,6 @@ _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) long value; int orig_waiters; -#if USE_MACH_SEM - mach_timespec_t _timeout; - kern_return_t kr; -#elif USE_POSIX_SEM // KVV - struct timespec _timeout; - int ret; -#elif USE_WIN32_SEM // KVV - uint64_t nsec; - DWORD msec; - DWORD resolution; - DWORD wait_result; -#endif - // check before we cause another signal to be sent by incrementing // dg->dg_waiters value = os_atomic_load2o(dg, dg_value, ordered); // 19296565 @@ -533,48 +301,12 @@ _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) timeout = DISPATCH_TIME_FOREVER; } -#if USE_MACH_SEM - _dispatch_semaphore_create_port(&dg->dg_port); -#elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dg->dg_handle); -#endif - + _os_semaphore_create(&dg->dg_sema, _OS_SEM_POLICY_FIFO); switch (timeout) { default: -#if USE_MACH_SEM - do { - uint64_t nsec = _dispatch_timeout(timeout); - _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); - _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - kr = slowpath(semaphore_timedwait(dg->dg_port, _timeout)); - } while (kr == KERN_ABORTED); - - if (kr != KERN_OPERATION_TIMED_OUT) { - DISPATCH_GROUP_VERIFY_KR(kr); - break; - } -#elif USE_POSIX_SEM - do { - uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); - _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); - _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - ret = slowpath(sem_timedwait(&dg->dg_sem, &_timeout)); - } while (ret == -1 && errno == EINTR); - - if (!(ret == -1 && errno == ETIMEDOUT)) { - DISPATCH_SEMAPHORE_VERIFY_RET(ret); + if (!_os_semaphore_timedwait(&dg->dg_sema, timeout)) { break; } -#elif USE_WIN32_SEM - nsec = _dispatch_timeout(timeout); - msec = (DWORD)(nsec / (uint64_t)1000000); - resolution = _push_timer_resolution(msec); - wait_result = WaitForSingleObject(dg->dg_handle, msec); - _pop_timer_resolution(resolution); - if (wait_result != WAIT_TIMEOUT) { - break; - } -#endif // Fall through and try to undo the earlier change to // dg->dg_waiters case DISPATCH_TIME_NOW: @@ -582,30 +314,13 @@ _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) while (orig_waiters) { if (os_atomic_cmpxchgvw2o(dg, dg_waiters, orig_waiters, orig_waiters - 1, &orig_waiters, relaxed)) { -#if USE_MACH_SEM - return KERN_OPERATION_TIMED_OUT; -#elif USE_POSIX_SEM || USE_WIN32_SEM - errno = ETIMEDOUT; - return -1; -#endif + return _OS_SEM_TIMEOUT(); } } - // Another thread called semaphore_signal(). + // Another thread is running _dispatch_group_wake() // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: -#if USE_MACH_SEM - do { - kr = semaphore_wait(dg->dg_port); - } while (kr == KERN_ABORTED); - DISPATCH_GROUP_VERIFY_KR(kr); -#elif USE_POSIX_SEM - do { - ret = sem_wait(&dg->dg_sem); - } while (ret == -1 && errno == EINTR); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - WaitForSingleObject(dg->dg_handle, INFINITE); -#endif + _os_semaphore_wait(&dg->dg_sema); break; } return 0; @@ -618,12 +333,7 @@ dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) return 0; } if (timeout == 0) { -#if USE_MACH_SEM - return KERN_OPERATION_TIMED_OUT; -#elif USE_POSIX_SEM || USE_WIN32_SEM - errno = ETIMEDOUT; - return (-1); -#endif + return _OS_SEM_TIMEOUT(); } return _dispatch_group_wait_slow(dg, timeout); } diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h index dceda6d..f16152d 100644 --- a/src/semaphore_internal.h +++ b/src/semaphore_internal.h @@ -29,20 +29,10 @@ struct dispatch_queue_s; -#if USE_MACH_SEM -#define DISPATCH_OS_SEMA_FIELD(base) semaphore_t base##_port -#elif USE_POSIX_SEM -#define DISPATCH_OS_SEMA_FIELD(base) sem_t base##_sem -#elif USE_WIN32_SEM -#define DISPATCH_OS_SEMA_FIELD(base) HANDLE base##_handle -#else -#error "No supported semaphore type" -#endif - #define DISPATCH_SEMAPHORE_HEADER(cls, ns) \ DISPATCH_OBJECT_HEADER(cls); \ long volatile ns##_value; \ - DISPATCH_OS_SEMA_FIELD(ns) + _os_semaphore_t ns##_sema struct dispatch_semaphore_header_s { DISPATCH_SEMAPHORE_HEADER(semaphore, dsema); diff --git a/src/shims/atomic.h b/src/shims/atomic.h index 5199477..8a1ab18 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -35,64 +35,54 @@ #include -#define memory_order_ordered memory_order_seq_cst +#define memory_order_ordered memory_order_seq_cst +#define memory_order_dependency memory_order_acquire + +#if __has_extension(c_generic_selections) && __has_extension(c_atomic) +#define os_atomic(type) _Atomic(type) +#else +#define os_atomic(type) type volatile +#endif + +#define _os_atomic_type_cases(type, expr) \ + type *: expr, \ + type volatile *: expr, \ + _Atomic(type) *: expr, \ + _Atomic(type) volatile *: expr #define _os_atomic_basetypeof(p) \ typeof(*_Generic((p), \ - char*: (char*)(p), \ - volatile char*: (char*)(p), \ - signed char*: (signed char*)(p), \ - volatile signed char*: (signed char*)(p), \ - unsigned char*: (unsigned char*)(p), \ - volatile unsigned char*: (unsigned char*)(p), \ - short*: (short*)(p), \ - volatile short*: (short*)(p), \ - unsigned short*: (unsigned short*)(p), \ - volatile unsigned short*: (unsigned short*)(p), \ - int*: (int*)(p), \ - volatile int*: (int*)(p), \ - unsigned int*: (unsigned int*)(p), \ - volatile unsigned int*: (unsigned int*)(p), \ - long*: (long*)(p), \ - volatile long*: (long*)(p), \ - unsigned long*: (unsigned long*)(p), \ - volatile unsigned long*: (unsigned long*)(p), \ - long long*: (long long*)(p), \ - volatile long long*: (long long*)(p), \ - unsigned long long*: (unsigned long long*)(p), \ - volatile unsigned long long*: (unsigned long long*)(p), \ - const void**: (const void**)(p), \ - const void*volatile*: (const void**)(p), \ + _os_atomic_type_cases(char, (char *)(p)), \ + _os_atomic_type_cases(signed char, (signed char *)(p)), \ + _os_atomic_type_cases(unsigned char, (unsigned char *)(p)), \ + _os_atomic_type_cases(short, (short *)(p)), \ + _os_atomic_type_cases(unsigned short, (unsigned short *)(p)), \ + _os_atomic_type_cases(int, (int *)(p)), \ + _os_atomic_type_cases(unsigned int, (unsigned int *)(p)), \ + _os_atomic_type_cases(long, (long *)(p)), \ + _os_atomic_type_cases(unsigned long, (unsigned long *)(p)), \ + _os_atomic_type_cases(long long, (long long *)(p)), \ + _os_atomic_type_cases(unsigned long long, (unsigned long long *)(p)), \ + _os_atomic_type_cases(void *, (void **)(p)), \ + _os_atomic_type_cases(const void *, (const void **)(p)), \ default: (void**)(p))) #define _os_atomic_c11_atomic(p) \ _Generic((p), \ - char*: (_Atomic(char)*)(p), \ - volatile char*: (volatile _Atomic(char)*)(p), \ - signed char*: (_Atomic(signed char)*)(p), \ - volatile signed char*: (volatile _Atomic(signed char)*)(p), \ - unsigned char*: (_Atomic(unsigned char)*)(p), \ - volatile unsigned char*: (volatile _Atomic(unsigned char)*)(p), \ - short*: (_Atomic(short)*)(p), \ - volatile short*: (volatile _Atomic(short)*)(p), \ - unsigned short*: (_Atomic(unsigned short)*)(p), \ - volatile unsigned short*: (volatile _Atomic(unsigned short)*)(p), \ - int*: (_Atomic(int)*)(p), \ - volatile int*: (volatile _Atomic(int)*)(p), \ - unsigned int*: (_Atomic(unsigned int)*)(p), \ - volatile unsigned int*: (volatile _Atomic(unsigned int)*)(p), \ - long*: (_Atomic(long)*)(p), \ - volatile long*: (volatile _Atomic(long)*)(p), \ - unsigned long*: (_Atomic(unsigned long)*)(p), \ - volatile unsigned long*: (volatile _Atomic(unsigned long)*)(p), \ - long long*: (_Atomic(long long)*)(p), \ - volatile long long*: (volatile _Atomic(long long)*)(p), \ - unsigned long long*: (_Atomic(unsigned long long)*)(p), \ - volatile unsigned long long*: \ - (volatile _Atomic(unsigned long long)*)(p), \ - const void**: (_Atomic(const void*)*)(p), \ - const void*volatile*: (volatile _Atomic(const void*)*)(p), \ - default: (volatile _Atomic(void*)*)(p)) + _os_atomic_type_cases(char, (_Atomic(char)*)(p)), \ + _os_atomic_type_cases(signed char, (_Atomic(signed char)*)(p)), \ + _os_atomic_type_cases(unsigned char, (_Atomic(unsigned char)*)(p)), \ + _os_atomic_type_cases(short, (_Atomic(short)*)(p)), \ + _os_atomic_type_cases(unsigned short, (_Atomic(unsigned short)*)(p)), \ + _os_atomic_type_cases(int, (_Atomic(int)*)(p)), \ + _os_atomic_type_cases(unsigned int, (_Atomic(unsigned int)*)(p)), \ + _os_atomic_type_cases(long, (_Atomic(long)*)(p)), \ + _os_atomic_type_cases(unsigned long, (_Atomic(unsigned long)*)(p)), \ + _os_atomic_type_cases(long long, (_Atomic(long long)*)(p)), \ + _os_atomic_type_cases(unsigned long long, (_Atomic(unsigned long long)*)(p)), \ + _os_atomic_type_cases(void *, (_Atomic(void*)*)(p)), \ + _os_atomic_type_cases(const void *, (_Atomic(const void*)*)(p)), \ + default: (_Atomic(void*)*)(p)) #define os_atomic_thread_fence(m) atomic_thread_fence(memory_order_##m) // see comment in dispatch_once.c @@ -156,6 +146,12 @@ #define os_atomic_xor_orig(p, v, m) \ _os_atomic_c11_op_orig((p), (v), m, xor, ^) +#define os_atomic_force_dependency_on(p, e) (p) +#define os_atomic_load_with_dependency_on(p, e) \ + os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed) +#define os_atomic_load_with_dependency_on2o(p, f, e) \ + os_atomic_load_with_dependency_on(&(p)->f, e) + #define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ bool _result = false; \ typeof(p) _p = (p); \ diff --git a/src/shims/linux_stubs.h b/src/shims/linux_stubs.h index 6a70c0b..0c12e82 100644 --- a/src/shims/linux_stubs.h +++ b/src/shims/linux_stubs.h @@ -16,9 +16,6 @@ #ifndef __DISPATCH__STUBS__INTERNAL #define __DISPATCH__STUBS__INTERNAL -// marker for hacks we have made to make progress -#define __LINUX_PORT_HDD__ 1 - /* * Stub out defines for some mach types and related macros */ @@ -50,8 +47,6 @@ typedef uint32_t voucher_activity_trace_id_t; typedef uint32_t voucher_activity_id_t; -typedef uint32_t _voucher_activity_buffer_hook_t;; - typedef uint32_t voucher_activity_flag_t; typedef struct { } mach_msg_header_t; @@ -85,10 +80,13 @@ typedef void (*dispatch_mach_msg_destructor_t)(void*); #define IGNORE_KEVENT64_EXT /* will force the kevent64_s.ext[] to not be used -> leeway ignored */ +#ifndef NOTE_SECONDS #define NOTE_SECONDS 0x01 #define NOTE_USECONDS 0x02 #define NOTE_NSECONDS 0x04 #define NOTE_ABSOLUTE 0x08 +#define KEVENT_NSEC_NOT_SUPPORTED +#endif #define NOTE_CRITICAL 0x10 #define NOTE_BACKGROUND 0x20 #define NOTE_LEEWAY 0x40 diff --git a/src/shims/lock.c b/src/shims/lock.c index 2fab691..a64e9c2 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -49,6 +49,272 @@ _dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags, } #endif +#pragma mark - semaphores + +#if USE_MACH_SEM +#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \ + if (unlikely((x) == KERN_INVALID_NAME)) { \ + DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_semaphore_t"); \ + } else if (unlikely(x)) { \ + DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \ + } \ + } while (0) + +void +_os_semaphore_create_slow(_os_semaphore_t *s4, int policy) +{ + kern_return_t kr; + semaphore_t tmp; + + _dispatch_fork_becomes_unsafe(); + + // lazily allocate the semaphore port + + // Someday: + // 1) Switch to a doubly-linked FIFO in user-space. + // 2) User-space timers for the timeout. + // 3) Use the per-thread semaphore port. + + while ((kr = semaphore_create(mach_task_self(), &tmp, policy, 0))) { + DISPATCH_VERIFY_MIG(kr); + _dispatch_temporary_resource_shortage(); + } + + if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) { + kr = semaphore_destroy(mach_task_self(), tmp); + DISPATCH_VERIFY_MIG(kr); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + } +} + +void +_os_semaphore_dispose_slow(_os_semaphore_t *sema) +{ + kern_return_t kr; + semaphore_t sema_port = *sema; + kr = semaphore_destroy(mach_task_self(), sema_port); + DISPATCH_VERIFY_MIG(kr); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + *sema = MACH_PORT_DEAD; +} + +void +_os_semaphore_signal(_os_semaphore_t *sema, long count) +{ + do { + kern_return_t kr = semaphore_signal(*sema); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + } while (--count); +} + +void +_os_semaphore_wait(_os_semaphore_t *sema) +{ + kern_return_t kr; + do { + kr = semaphore_wait(*sema); + } while (kr == KERN_ABORTED); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +} + +bool +_os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout) +{ + mach_timespec_t _timeout; + kern_return_t kr; + + do { + uint64_t nsec = _dispatch_timeout(timeout); + _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); + _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); + kr = slowpath(semaphore_timedwait(*sema, _timeout)); + } while (kr == KERN_ABORTED); + + if (kr == KERN_OPERATION_TIMED_OUT) { + return true; + } + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + return false; +} +#elif USE_POSIX_SEM +#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \ + if (unlikely((x) == -1)) { \ + DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \ + } \ + } while (0) + +void +_os_semaphore_init(_os_semaphore_t *sema, int policy DISPATCH_UNUSED) +{ + int rc = sem_init(sema, 0, 0); + DISPATCH_SEMAPHORE_VERIFY_RET(rc); +} + +void +_os_semaphore_dispose_slow(_os_semaphore_t *sema) +{ + int rc = sem_destroy(sema); + DISPATCH_SEMAPHORE_VERIFY_RET(rc); +} + +void +_os_semaphore_signal(_os_semaphore_t *sema, long count) +{ + do { + int ret = sem_post(sema); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); + } while (--count); +} + +void +_os_semaphore_wait(_os_semaphore_t *sema) +{ + int ret = sem_wait(sema); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); +} + +bool +_os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout) +{ + struct timespec _timeout; + int ret; + + do { + uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); + _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); + _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); + ret = slowpath(sem_timedwait(sema, &_timeout)); + } while (ret == -1 && errno == EINTR); + + if (ret == -1 && errno == ETIMEDOUT) { + return true; + } + DISPATCH_SEMAPHORE_VERIFY_RET(ret); + return false; +} +#elif USE_WIN32_SEM +// rdar://problem/8428132 +static DWORD best_resolution = 1; // 1ms + +static DWORD +_push_timer_resolution(DWORD ms) +{ + MMRESULT res; + static dispatch_once_t once; + + if (ms > 16) { + // only update timer resolution if smaller than default 15.6ms + // zero means not updated + return 0; + } + + // aim for the best resolution we can accomplish + dispatch_once(&once, ^{ + TIMECAPS tc; + MMRESULT res; + res = timeGetDevCaps(&tc, sizeof(tc)); + if (res == MMSYSERR_NOERROR) { + best_resolution = min(max(tc.wPeriodMin, best_resolution), + tc.wPeriodMax); + } + }); + + res = timeBeginPeriod(best_resolution); + if (res == TIMERR_NOERROR) { + return best_resolution; + } + // zero means not updated + return 0; +} + +// match ms parameter to result from _push_timer_resolution +DISPATCH_ALWAYS_INLINE +static inline void +_pop_timer_resolution(DWORD ms) +{ + if (ms) timeEndPeriod(ms); +} + +void +_os_semaphore_create_slow(_os_semaphore_t *s4, int policy DISPATCH_UNUSED) +{ + HANDLE tmp; + + // lazily allocate the semaphore port + + while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) { + _dispatch_temporary_resource_shortage(); + } + + if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) { + CloseHandle(tmp); + } +} + +void +_os_semaphore_dispose_slow(_os_semaphore_t *sema) +{ + HANDLE sema_handle = *sema; + CloseHandle(sema_handle); + *sema = 0; +} + +void +_os_semaphore_signal(_os_semaphore_t *sema, long count) +{ + int ret = ReleaseSemaphore(*sema, count, NULL); + dispatch_assume(ret); +} + +void +_os_semaphore_wait(_os_semaphore_t *sema) +{ + WaitForSingleObject(*sema, INFINITE); +} + +bool +_os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout) +{ + uint64_t nsec; + DWORD msec; + DWORD resolution; + DWORD wait_result; + + nsec = _dispatch_timeout(timeout); + msec = (DWORD)(nsec / (uint64_t)1000000); + resolution = _push_timer_resolution(msec); + wait_result = WaitForSingleObject(dsema->dsema_handle, msec); + _pop_timer_resolution(resolution); + return wait_result == WAIT_TIMEOUT; +} +#else +#error "port has to implement _os_semaphore_t" +#endif + +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK +semaphore_t +_dispatch_thread_semaphore_create(void) +{ + semaphore_t s4; + kern_return_t kr; + while (unlikely(kr = semaphore_create(mach_task_self(), &s4, + SYNC_POLICY_FIFO, 0))) { + DISPATCH_VERIFY_MIG(kr); + _dispatch_temporary_resource_shortage(); + } + return s4; +} + +void +_dispatch_thread_semaphore_dispose(void *ctxt) +{ + semaphore_t s4 = (semaphore_t)(uintptr_t)ctxt; + kern_return_t kr = semaphore_destroy(mach_task_self(), s4); + DISPATCH_VERIFY_MIG(kr); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +} +#endif + #pragma mark - ulock wrappers #if HAVE_UL_COMPARE_AND_WAIT @@ -206,36 +472,12 @@ _dispatch_wake_by_address(uint32_t volatile *address) #pragma mark - thread event -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK -semaphore_t -_dispatch_thread_semaphore_create(void) -{ - semaphore_t s4; - kern_return_t kr; - while (unlikely(kr = semaphore_create(mach_task_self(), &s4, - SYNC_POLICY_FIFO, 0))) { - DISPATCH_VERIFY_MIG(kr); - _dispatch_temporary_resource_shortage(); - } - return s4; -} - -void -_dispatch_thread_semaphore_dispose(void *ctxt) -{ - semaphore_t s4 = (semaphore_t)(uintptr_t)ctxt; - kern_return_t kr = semaphore_destroy(mach_task_self(), s4); - DISPATCH_VERIFY_MIG(kr); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); -} -#endif - void _dispatch_thread_event_signal_slow(dispatch_thread_event_t dte) { #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - kern_return_t kr = semaphore_signal(dte->dte_semaphore); + kern_return_t kr = semaphore_signal(dte->dte_sema); DISPATCH_SEMAPHORE_VERIFY_KR(kr); return; } @@ -244,9 +486,8 @@ _dispatch_thread_event_signal_slow(dispatch_thread_event_t dte) _dispatch_ulock_wake(&dte->dte_value, 0); #elif HAVE_FUTEX _dispatch_futex_wake(&dte->dte_value, 1, FUTEX_PRIVATE_FLAG); -#elif USE_POSIX_SEM - int rc = sem_post(&dte->dte_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#else + _os_semaphore_signal(&dte->dte_sema, 1); #endif } @@ -257,7 +498,7 @@ _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte) if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { kern_return_t kr; do { - kr = semaphore_wait(dte->dte_semaphore); + kr = semaphore_wait(dte->dte_sema); } while (unlikely(kr == KERN_ABORTED)); DISPATCH_SEMAPHORE_VERIFY_KR(kr); return; @@ -278,12 +519,8 @@ _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte) NULL, FUTEX_PRIVATE_FLAG); #endif } -#elif USE_POSIX_SEM - int rc; - do { - rc = sem_wait(&dte->dte_sem); - } while (unlikely(rc != 0)); - DISPATCH_SEMAPHORE_VERIFY_RET(rc); +#else + _os_semaphore_wait(&dte->dte_sema); #endif } diff --git a/src/shims/lock.h b/src/shims/lock.h index 246c807..50dfaab 100644 --- a/src/shims/lock.h +++ b/src/shims/lock.h @@ -30,7 +30,7 @@ #pragma mark - platform macros DISPATCH_ENUM(dispatch_lock_options, uint32_t, - DLOCK_LOCK_NONE = 0x00000000, + DLOCK_LOCK_NONE = 0x00000000, DLOCK_LOCK_DATA_CONTENTION = 0x00010000, ); @@ -160,10 +160,6 @@ _dispatch_lock_has_failed_trylock(dispatch_lock lock_value) #endif #endif // HAVE_UL_UNFAIR_LOCK -#ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK -#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT && !HAVE_FUTEX) -#endif - #ifndef HAVE_FUTEX #ifdef __linux__ #define HAVE_FUTEX 1 @@ -172,29 +168,107 @@ _dispatch_lock_has_failed_trylock(dispatch_lock lock_value) #endif #endif // HAVE_FUTEX +#pragma mark - semaphores + +#ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK +#if TARGET_OS_MAC +#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT) +#else +#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK 0 +#endif +#endif + #if USE_MACH_SEM -#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \ - if (unlikely((x) == KERN_INVALID_NAME)) { \ - DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_semaphore_t"); \ - } else if (unlikely(x)) { \ - DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \ - } \ - } while (0) -#define DISPATCH_GROUP_VERIFY_KR(x) do { \ - if (unlikely((x) == KERN_INVALID_NAME)) { \ - DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_group_t"); \ - } else if (unlikely(x)) { \ - DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \ - } \ - } while (0) + +typedef semaphore_t _os_semaphore_t; +#define _OS_SEM_POLICY_FIFO SYNC_POLICY_FIFO +#define _OS_SEM_POLICY_LIFO SYNC_POLICY_LIFO +#define _OS_SEM_TIMEOUT() KERN_OPERATION_TIMED_OUT + +#define _os_semaphore_init(sema, policy) (void)(*(sema) = MACH_PORT_NULL) +#define _os_semaphore_is_created(sema) (*(sema) != MACH_PORT_NULL) +void _os_semaphore_create_slow(_os_semaphore_t *sema, int policy); + #elif USE_POSIX_SEM -#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \ - if (unlikely((x) == -1)) { \ - DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \ - } \ - } while (0) + +typedef sem_t _os_semaphore_t; +#define _OS_SEM_POLICY_FIFO 0 +#define _OS_SEM_POLICY_LIFO 0 +#define _OS_SEM_TIMEOUT() ((errno) = ETIMEDOUT, -1) + +void _os_semaphore_init(_os_semaphore_t *sema, int policy); +#define _os_semaphore_is_created(sema) 1 +#define _os_semaphore_create_slow(sema, policy) ((void)0) + +#elif USE_WIN32_SEM + +typedef HANDLE _os_semaphore_t; +#define _OS_SEM_POLICY_FIFO 0 +#define _OS_SEM_POLICY_LIFO 0 +#define _OS_SEM_TIMEOUT() ((errno) = ETIMEDOUT, -1) + +#define _os_semaphore_init(sema, policy) (void)(*(sema) = 0) +#define _os_semaphore_is_created(sema) (*(sema) != 0) +void _os_semaphore_create_slow(_os_semaphore_t *sema, int policy); + +#else +#error "port has to implement _os_semaphore_t" #endif +void _os_semaphore_dispose_slow(_os_semaphore_t *sema); +void _os_semaphore_signal(_os_semaphore_t *sema, long count); +void _os_semaphore_wait(_os_semaphore_t *sema); +bool _os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout); + +DISPATCH_ALWAYS_INLINE +static inline void +_os_semaphore_create(_os_semaphore_t *sema, int policy) +{ + if (!_os_semaphore_is_created(sema)) { + _os_semaphore_create_slow(sema, policy); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_os_semaphore_dispose(_os_semaphore_t *sema) +{ + if (_os_semaphore_is_created(sema)) { + _os_semaphore_dispose_slow(sema); + } +} + +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK +semaphore_t _dispatch_thread_semaphore_create(void); +void _dispatch_thread_semaphore_dispose(void *); + +DISPATCH_ALWAYS_INLINE +static inline semaphore_t +_dispatch_get_thread_semaphore(void) +{ + semaphore_t sema = (semaphore_t)(uintptr_t) + _dispatch_thread_getspecific(dispatch_sema4_key); + if (unlikely(!sema)) { + return _dispatch_thread_semaphore_create(); + } + _dispatch_thread_setspecific(dispatch_sema4_key, NULL); + return sema; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_put_thread_semaphore(semaphore_t sema) +{ + semaphore_t old_sema = (semaphore_t)(uintptr_t) + _dispatch_thread_getspecific(dispatch_sema4_key); + _dispatch_thread_setspecific(dispatch_sema4_key, (void*)(uintptr_t)sema); + if (unlikely(old_sema)) { + return _dispatch_thread_semaphore_dispose((void *)(uintptr_t)old_sema); + } +} +#endif + + #pragma mark - compare and wait DISPATCH_NOT_TAIL_CALLED @@ -224,7 +298,7 @@ void _dispatch_wake_by_address(uint32_t volatile *address); typedef struct dispatch_thread_event_s { #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK union { - semaphore_t dte_semaphore; + _os_semaphore_t dte_sema; uint32_t dte_value; }; #elif HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX @@ -232,43 +306,11 @@ typedef struct dispatch_thread_event_s { // UINT32_MAX means waited on, but not signalled yet // 0 is the initial and final state uint32_t dte_value; -#elif USE_POSIX_SEM - sem_t dte_sem; #else -# error define dispatch_thread_event_s for your platform + _os_semaphore_t dte_sema; #endif } dispatch_thread_event_s, *dispatch_thread_event_t; -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK -semaphore_t _dispatch_thread_semaphore_create(void); -void _dispatch_thread_semaphore_dispose(void *); - -DISPATCH_ALWAYS_INLINE -static inline semaphore_t -_dispatch_get_thread_semaphore(void) -{ - semaphore_t sema = (semaphore_t)(uintptr_t) - _dispatch_thread_getspecific(dispatch_sema4_key); - if (unlikely(!sema)) { - return _dispatch_thread_semaphore_create(); - } - _dispatch_thread_setspecific(dispatch_sema4_key, NULL); - return sema; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_put_thread_semaphore(semaphore_t sema) -{ - semaphore_t old_sema = (semaphore_t)(uintptr_t) - _dispatch_thread_getspecific(dispatch_sema4_key); - _dispatch_thread_setspecific(dispatch_sema4_key, (void*)(uintptr_t)sema); - if (unlikely(old_sema)) { - return _dispatch_thread_semaphore_dispose((void *)(uintptr_t)old_sema); - } -} -#endif - DISPATCH_NOT_TAIL_CALLED void _dispatch_thread_event_wait_slow(dispatch_thread_event_t); void _dispatch_thread_event_signal_slow(dispatch_thread_event_t); @@ -279,15 +321,14 @@ _dispatch_thread_event_init(dispatch_thread_event_t dte) { #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - dte->dte_semaphore = _dispatch_get_thread_semaphore(); + dte->dte_sema = _dispatch_get_thread_semaphore(); return; } #endif #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX dte->dte_value = 0; -#elif USE_POSIX_SEM - int rc = sem_init(&dte->dte_sem, 0, 0); - DISPATCH_SEMAPHORE_VERIFY_RET(rc); +#else + _os_semaphore_init(&dte->dte_sema, _OS_SEM_POLICY_FIFO); #endif } @@ -308,7 +349,7 @@ _dispatch_thread_event_signal(dispatch_thread_event_t dte) // waiters do the validation return; } -#elif USE_POSIX_SEM +#else // fallthrough #endif _dispatch_thread_event_signal_slow(dte); @@ -331,7 +372,7 @@ _dispatch_thread_event_wait(dispatch_thread_event_t dte) // for any other value, go to the slowpath which checks it's not corrupt return; } -#elif USE_POSIX_SEM +#else // fallthrough #endif _dispatch_thread_event_wait_slow(dte); @@ -343,16 +384,15 @@ _dispatch_thread_event_destroy(dispatch_thread_event_t dte) { #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - _dispatch_put_thread_semaphore(dte->dte_semaphore); + _dispatch_put_thread_semaphore(dte->dte_sema); return; } #endif #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX // nothing to do dispatch_assert(dte->dte_value == 0); -#elif USE_POSIX_SEM - int rc = sem_destroy(&dte->dte_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(rc); +#else + _os_semaphore_dispose(&dte->dte_sema); #endif } diff --git a/src/shims/time.h b/src/shims/time.h index 7b29771..13fe4f8 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -40,7 +40,11 @@ sleep(unsigned int seconds) } #endif -uint64_t _dispatch_get_nanoseconds(void); +typedef enum { + DISPATCH_CLOCK_WALL, + DISPATCH_CLOCK_MACH, +#define DISPATCH_CLOCK_COUNT (DISPATCH_CLOCK_MACH + 1) +} dispatch_clock_t; #if defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME // x86 currently implements mach time in nanoseconds @@ -106,36 +110,94 @@ _dispatch_time_nano2mach(uint64_t nsec) } #endif +/* XXXRW: Some kind of overflow detection needed? */ +#define _dispatch_timespec_to_nano(ts) \ + ((uint64_t)(ts).tv_sec * NSEC_PER_SEC + (uint64_t)(ts).tv_nsec) +#define _dispatch_timeval_to_nano(tv) \ + ((uint64_t)(tv).tv_sec * NSEC_PER_SEC + \ + (uint64_t)(tv).tv_usec * NSEC_PER_USEC) + +static inline uint64_t +_dispatch_get_nanoseconds(void) +{ + dispatch_static_assert(sizeof(NSEC_PER_SEC) == 8); + dispatch_static_assert(sizeof(USEC_PER_SEC) == 8); + +#if TARGET_OS_MAC && DISPATCH_HOST_SUPPORTS_OSX(101200) + return clock_gettime_nsec_np(CLOCK_REALTIME); +#elif HAVE_DECL_CLOCK_REALTIME + struct timespec ts; + dispatch_assume_zero(clock_gettime(CLOCK_REALTIME, &ts)); + return _dispatch_timespec_to_nano(ts); +#elif TARGET_OS_WIN32 + // FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC). + FILETIME ft; + ULARGE_INTEGER li; + GetSystemTimeAsFileTime(&ft); + li.LowPart = ft.dwLowDateTime; + li.HighPart = ft.dwHighDateTime; + return li.QuadPart * 100ull; +#else + struct timeval tv; + dispatch_assert_zero(gettimeofday(&tv, NULL)); + return _dispatch_timeval_to_nano(tv); +#endif +} + static inline uint64_t _dispatch_absolute_time(void) { #if HAVE_MACH_ABSOLUTE_TIME return mach_absolute_time(); +#elif HAVE_DECL_CLOCK_UPTIME && !defined(__linux__) + struct timespec ts; + dispatch_assume_zero(clock_gettime(CLOCK_UPTIME, &ts)); + return _dispatch_timespec_to_nano(ts); +#elif HAVE_DECL_CLOCK_MONOTONIC + struct timespec ts; + dispatch_assume_zero(clock_gettime(CLOCK_MONOTONIC, &ts)); + return _dispatch_timespec_to_nano(ts); #elif TARGET_OS_WIN32 LARGE_INTEGER now; return QueryPerformanceCounter(&now) ? now.QuadPart : 0; #else - struct timespec ts; - int ret; - -#if HAVE_DECL_CLOCK_UPTIME - ret = clock_gettime(CLOCK_UPTIME, &ts); -#elif HAVE_DECL_CLOCK_MONOTONIC - ret = clock_gettime(CLOCK_MONOTONIC, &ts); -#else -#error "clock_gettime: no supported absolute time clock" +#error platform needs to implement _dispatch_absolute_time() #endif - (void)dispatch_assume_zero(ret); - - /* XXXRW: Some kind of overflow detection needed? */ - return (ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec); -#endif // HAVE_MACH_ABSOLUTE_TIME } +DISPATCH_ALWAYS_INLINE static inline uint64_t _dispatch_approximate_time(void) { return _dispatch_absolute_time(); } +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_time_now(dispatch_clock_t clock) +{ + switch (clock) { + case DISPATCH_CLOCK_MACH: + return _dispatch_absolute_time(); + case DISPATCH_CLOCK_WALL: + return _dispatch_get_nanoseconds(); + } + __builtin_unreachable(); +} + +typedef struct { + uint64_t nows[DISPATCH_CLOCK_COUNT]; +} dispatch_clock_now_cache_s, *dispatch_clock_now_cache_t; + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_time_now_cached(dispatch_clock_t clock, + dispatch_clock_now_cache_t cache) +{ + if (likely(cache->nows[clock])) { + return cache->nows[clock]; + } + return cache->nows[clock] = _dispatch_time_now(clock); +} + #endif // __DISPATCH_SHIMS_TIME__ diff --git a/src/source.c b/src/source.c index 3d0eee8..4e1d806 100644 --- a/src/source.c +++ b/src/source.c @@ -23,13 +23,14 @@ #include "protocol.h" #include "protocolServer.h" #endif -#include #define DKEV_DISPOSE_IMMEDIATE_DELETE 0x1 #define DKEV_UNREGISTER_DISCONNECTED 0x2 #define DKEV_UNREGISTER_REPLY_REMOVE 0x4 #define DKEV_UNREGISTER_WAKEUP 0x8 +static pthread_priority_t +_dispatch_source_compute_kevent_priority(dispatch_source_t ds); static void _dispatch_source_handler_free(dispatch_source_t ds, long kind); static void _dispatch_source_merge_kevent(dispatch_source_t ds, const _dispatch_kevent_qos_s *ke); @@ -77,7 +78,6 @@ static const char * _evfiltstr(short filt); static void dispatch_kevent_debug(const char *verb, const _dispatch_kevent_qos_s *kev, int i, int n, const char *function, unsigned int line); -static void _dispatch_kevent_debugger(void *context); #define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \ dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q) #else @@ -125,10 +125,6 @@ dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, switch (type->ke.filter) { case DISPATCH_EVFILT_TIMER: break; // timers don't need masks -#if DISPATCH_USE_VM_PRESSURE - case EVFILT_VM: - break; // type->init forces the only acceptable mask -#endif case DISPATCH_EVFILT_MACH_NOTIFICATION: break; // type->init handles zero mask as a legacy case default: @@ -144,9 +140,6 @@ dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, } break; case EVFILT_FS: -#if DISPATCH_USE_VM_PRESSURE - case EVFILT_VM: -#endif #if DISPATCH_USE_MEMORYSTATUS case EVFILT_MEMORYSTATUS: #endif @@ -207,7 +200,7 @@ dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, } // Some sources require special processing if (type->init != NULL) { - type->init(ds, type, handle, mask, dq); + type->init(ds, type, handle, mask); } dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder)); if (!ds->ds_is_custom_source && (dk->dk_kevent.flags & EV_VANISHED)) { @@ -717,6 +710,12 @@ _dispatch_source_kevent_register(dispatch_source_t ds, pthread_priority_t pp) dispatch_assert_zero((bool)ds->ds_is_installed); switch (ds->ds_dkev->dk_kevent.filter) { case DISPATCH_EVFILT_TIMER: + // aggressively coalesce background/maintenance QoS timers + // + pp = _dispatch_source_compute_kevent_priority(ds); + if (_dispatch_is_background_priority(pp)) { + ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_BACKGROUND; + } _dispatch_timers_update(ds); _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev); @@ -1251,30 +1250,6 @@ done: #pragma mark - #pragma mark dispatch_kevent_t -#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD -static void _dispatch_kevent_guard(dispatch_kevent_t dk); -static void _dispatch_kevent_unguard(dispatch_kevent_t dk); -#else -static inline void _dispatch_kevent_guard(dispatch_kevent_t dk) { (void)dk; } -static inline void _dispatch_kevent_unguard(dispatch_kevent_t dk) { (void)dk; } -#endif - -#if !DISPATCH_USE_EV_UDATA_SPECIFIC -static struct dispatch_kevent_s _dispatch_kevent_data_or = { - .dk_kevent = { - .filter = DISPATCH_EVFILT_CUSTOM_OR, - .flags = EV_CLEAR, - }, - .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_or.dk_sources), -}; -static struct dispatch_kevent_s _dispatch_kevent_data_add = { - .dk_kevent = { - .filter = DISPATCH_EVFILT_CUSTOM_ADD, - }, - .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_add.dk_sources), -}; -#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC - #define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1)) DISPATCH_CACHELINE_ALIGN @@ -1287,17 +1262,6 @@ _dispatch_kevent_init() for (i = 0; i < DSL_HASH_SIZE; i++) { TAILQ_INIT(&_dispatch_sources[i]); } - -#if !DISPATCH_USE_EV_UDATA_SPECIFIC - TAILQ_INSERT_TAIL(&_dispatch_sources[0], - &_dispatch_kevent_data_or, dk_list); - TAILQ_INSERT_TAIL(&_dispatch_sources[0], - &_dispatch_kevent_data_add, dk_list); - _dispatch_kevent_data_or.dk_kevent.udata = - (_dispatch_kevent_qos_udata_t)&_dispatch_kevent_data_or; - _dispatch_kevent_data_add.dk_kevent.udata = - (_dispatch_kevent_qos_udata_t)&_dispatch_kevent_data_add; -#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC } static inline uintptr_t @@ -1333,7 +1297,6 @@ static void _dispatch_kevent_insert(dispatch_kevent_t dk) { if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) return; - _dispatch_kevent_guard(dk); uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident, dk->dk_kevent.filter); TAILQ_INSERT_TAIL(&_dispatch_sources[hash], dk, dk_list); @@ -1500,7 +1463,6 @@ _dispatch_kevent_dispose(dispatch_kevent_t dk, unsigned int options) dk->dk_kevent.filter); TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list); } - _dispatch_kevent_unguard(dk); free(dk); return r; } @@ -1598,10 +1560,6 @@ _dispatch_kevent_error(_dispatch_kevent_qos_s *ke) static void _dispatch_kevent_drain(_dispatch_kevent_qos_s *ke) { -#if DISPATCH_DEBUG - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger); -#endif if (ke->filter == EVFILT_USER) { _dispatch_kevent_mgr_debug(ke); return; @@ -1643,61 +1601,6 @@ _dispatch_kevent_merge(_dispatch_kevent_qos_s *ke) } } -#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD -static void -_dispatch_kevent_guard(dispatch_kevent_t dk) -{ - guardid_t guard; - const unsigned int guard_flags = GUARD_CLOSE; - int r, fd_flags = 0; - switch (dk->dk_kevent.filter) { - case EVFILT_READ: - case EVFILT_WRITE: - case EVFILT_VNODE: - guard = &dk->dk_kevent; - r = change_fdguard_np((int)dk->dk_kevent.ident, NULL, 0, - &guard, guard_flags, &fd_flags); - if (slowpath(r == -1)) { - int err = errno; - if (err != EPERM) { - (void)dispatch_assume_zero(err); - } - return; - } - dk->dk_kevent.ext[0] = guard_flags; - dk->dk_kevent.ext[1] = fd_flags; - break; - } -} - -static void -_dispatch_kevent_unguard(dispatch_kevent_t dk) -{ - guardid_t guard; - unsigned int guard_flags; - int r, fd_flags; - switch (dk->dk_kevent.filter) { - case EVFILT_READ: - case EVFILT_WRITE: - case EVFILT_VNODE: - guard_flags = (unsigned int)dk->dk_kevent.ext[0]; - if (!guard_flags) { - return; - } - guard = &dk->dk_kevent; - fd_flags = (int)dk->dk_kevent.ext[1]; - r = change_fdguard_np((int)dk->dk_kevent.ident, &guard, - guard_flags, NULL, 0, &fd_flags); - if (slowpath(r == -1)) { - (void)dispatch_assume_zero(errno); - return; - } - dk->dk_kevent.ext[0] = 0; - break; - } -} -#endif // DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD - #pragma mark - #pragma mark dispatch_source_timer @@ -1721,58 +1624,32 @@ static dispatch_source_refs_t DISPATCH_NOINLINE static void _dispatch_source_timer_telemetry_slow(dispatch_source_t ds, - uintptr_t ident, struct dispatch_timer_source_s *values) + dispatch_clock_t clock, struct dispatch_timer_source_s *values) { if (_dispatch_trace_timer_configure_enabled()) { - _dispatch_trace_timer_configure(ds, ident, values); + _dispatch_trace_timer_configure(ds, clock, values); } } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_source_timer_telemetry(dispatch_source_t ds, uintptr_t ident, +_dispatch_source_timer_telemetry(dispatch_source_t ds, dispatch_clock_t clock, struct dispatch_timer_source_s *values) { if (_dispatch_trace_timer_configure_enabled() || _dispatch_source_timer_telemetry_enabled()) { - _dispatch_source_timer_telemetry_slow(ds, ident, values); + _dispatch_source_timer_telemetry_slow(ds, clock, values); asm(""); // prevent tailcall } } -// approx 1 year (60s * 60m * 24h * 365d) -#define FOREVER_NSEC 31536000000000000ull - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dispatch_source_timer_now(uint64_t nows[], unsigned int tidx) -{ - unsigned int tk = DISPATCH_TIMER_KIND(tidx); - if (nows && fastpath(nows[tk] != 0)) { - return nows[tk]; - } - uint64_t now; - switch (tk) { - case DISPATCH_TIMER_KIND_MACH: - now = _dispatch_absolute_time(); - break; - case DISPATCH_TIMER_KIND_WALL: - now = _dispatch_get_nanoseconds(); - break; - } - if (nows) { - nows[tk] = now; - } - return now; -} - static inline unsigned long _dispatch_source_timer_data(dispatch_source_refs_t dr, unsigned long prev) { // calculate the number of intervals since last fire unsigned long data, missed; uint64_t now; - now = _dispatch_source_timer_now(NULL, _dispatch_source_timer_idx(dr)); + now = _dispatch_time_now(DISPATCH_TIMER_CLOCK(_dispatch_source_timer_idx(dr))); missed = (unsigned long)((now - ds_timer(dr).last_fire) / ds_timer(dr).interval); // correct for missed intervals already delivered last time @@ -1783,8 +1660,8 @@ _dispatch_source_timer_data(dispatch_source_refs_t dr, unsigned long prev) struct dispatch_set_timer_params { dispatch_source_t ds; - uintptr_t ident; struct dispatch_timer_source_s values; + dispatch_clock_t clock; }; static void @@ -1793,21 +1670,31 @@ _dispatch_source_set_timer3(void *context) // Called on the _dispatch_mgr_q struct dispatch_set_timer_params *params = context; dispatch_source_t ds = params->ds; - ds->ds_ident_hack = params->ident; - ds_timer(ds->ds_refs) = params->values; + dispatch_timer_source_refs_t dt = (dispatch_timer_source_refs_t)ds->ds_refs; + + params->values.flags = ds_timer(dt).flags; + if (params->clock == DISPATCH_CLOCK_WALL) { + params->values.flags |= DISPATCH_TIMER_WALL_CLOCK; +#if HAVE_MACH + _dispatch_mach_host_calendar_change_register(); +#endif + } else { + params->values.flags &= ~(unsigned long)DISPATCH_TIMER_WALL_CLOCK; + } + ds_timer(dt) = params->values; + ds->ds_ident_hack = _dispatch_source_timer_idx(ds->ds_refs); // Clear any pending data that might have accumulated on // older timer params ds->ds_pending_data = 0; - // Re-arm in case we got disarmed because of pending set_timer suspension - _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, ds->ds_dkev); + dispatch_resume(ds); - // Must happen after resume to avoid getting disarmed due to suspension - _dispatch_timers_update(ds); - dispatch_release(ds); - if (params->values.flags & DISPATCH_TIMER_WALL_CLOCK) { - _dispatch_mach_host_calendar_change_register(); + if (_dispatch_source_tryarm(ds)) { + // Re-arm in case we got disarmed because of pending set_timer suspension + _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, dt); + // Must happen after resume to avoid getting disarmed due to suspension + _dispatch_timers_update(ds); } + dispatch_release(ds); free(params); } @@ -1829,7 +1716,6 @@ _dispatch_source_timer_params(dispatch_source_t ds, dispatch_time_t start, struct dispatch_set_timer_params *params; params = _dispatch_calloc(1ul, sizeof(struct dispatch_set_timer_params)); params->ds = ds; - params->values.flags = ds_timer(ds->ds_refs).flags; if (interval == 0) { // we use zero internally to mean disabled @@ -1850,7 +1736,7 @@ _dispatch_source_timer_params(dispatch_source_t ds, dispatch_time_t start, if ((int64_t)start < 0) { // wall clock start = (dispatch_time_t)-((int64_t)start); - params->values.flags |= DISPATCH_TIMER_WALL_CLOCK; + params->clock = DISPATCH_CLOCK_WALL; } else { // absolute clock interval = _dispatch_time_nano2mach(interval); @@ -1862,9 +1748,8 @@ _dispatch_source_timer_params(dispatch_source_t ds, dispatch_time_t start, interval = 1; } leeway = _dispatch_time_nano2mach(leeway); - params->values.flags &= ~(unsigned long)DISPATCH_TIMER_WALL_CLOCK; + params->clock = DISPATCH_CLOCK_MACH; } - params->ident = DISPATCH_TIMER_IDENT(params->values.flags); params->values.target = start; params->values.deadline = (start < UINT64_MAX - leeway) ? start + leeway : UINT64_MAX; @@ -1887,7 +1772,7 @@ _dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, struct dispatch_set_timer_params *params; params = _dispatch_source_timer_params(ds, start, interval, leeway); - _dispatch_source_timer_telemetry(ds, params->ident, ¶ms->values); + _dispatch_source_timer_telemetry(ds, params->clock, ¶ms->values); // Suspend the source so that it doesn't fire with pending changes // The use of suspend/resume requires the external retain/release dispatch_retain(ds); @@ -1917,8 +1802,11 @@ _dispatch_source_set_runloop_timer_4CF(dispatch_source_t ds, void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval) { +#define NSEC_PER_FRAME (NSEC_PER_SEC/60) +// approx 1 year (60s * 60m * 24h * 365d) +#define FOREVER_NSEC 31536000000000000ull + dispatch_source_refs_t dr = ds->ds_refs; - #define NSEC_PER_FRAME (NSEC_PER_SEC/60) const bool animation = ds_timer(dr).flags & DISPATCH_INTERVAL_UI_ANIMATION; if (fastpath(interval <= (animation ? FOREVER_NSEC/NSEC_PER_FRAME : FOREVER_NSEC/NSEC_PER_MSEC))) { @@ -1935,7 +1823,8 @@ _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval) ds_timer(dr).deadline = target + leeway; ds_timer(dr).interval = interval; ds_timer(dr).leeway = leeway; - _dispatch_source_timer_telemetry(ds, ds->ds_ident_hack, &ds_timer(dr)); + dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(ds->ds_ident_hack); + _dispatch_source_timer_telemetry(ds, clock, &ds_timer(dr)); } #pragma mark - @@ -1958,7 +1847,7 @@ typedef struct dispatch_timer_s { } #define DISPATCH_TIMER_INIT(kind, qos) \ DISPATCH_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \ - DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos)) + DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos)) struct dispatch_timer_s _dispatch_timer[] = { DISPATCH_TIMER_INIT(WALL, NORMAL), @@ -1998,7 +1887,7 @@ struct dispatch_timer_s _dispatch_timer[] = { } #define DISPATCH_KEVENT_TIMER_INIT(kind, qos) \ DISPATCH_KEVENT_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \ - DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos)) + DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos)) struct dispatch_kevent_s _dispatch_kevent_timer[] = { DISPATCH_KEVENT_TIMER_INIT(WALL, NORMAL), @@ -2022,7 +1911,7 @@ struct dispatch_kevent_s _dispatch_kevent_timer[] = { } #define DISPATCH_KEVENT_TIMEOUT_INIT(kind, qos, note) \ DISPATCH_KEVENT_TIMEOUT_INITIALIZER(DISPATCH_TIMER_INDEX( \ - DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos), note) + DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos), note) _dispatch_kevent_qos_s _dispatch_kevent_timeout[] = { DISPATCH_KEVENT_TIMEOUT_INIT(WALL, NORMAL, NOTE_MACH_CONTINUOUS_TIME), @@ -2198,13 +2087,13 @@ _dispatch_timers_update(dispatch_source_t ds) } static inline void -_dispatch_timers_run2(uint64_t nows[], unsigned int tidx) +_dispatch_timers_run2(dispatch_clock_now_cache_t nows, unsigned int tidx) { dispatch_source_refs_t dr; dispatch_source_t ds; uint64_t now, missed; - now = _dispatch_source_timer_now(nows, tidx); + now = _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows); while ((dr = TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources))) { ds = _dispatch_source_from_refs(dr); // We may find timers on the wrong list due to a pending update from @@ -2255,7 +2144,7 @@ _dispatch_timers_run2(uint64_t nows[], unsigned int tidx) DISPATCH_NOINLINE static void -_dispatch_timers_run(uint64_t nows[]) +_dispatch_timers_run(dispatch_clock_now_cache_t nows) { unsigned int tidx; for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { @@ -2265,27 +2154,33 @@ _dispatch_timers_run(uint64_t nows[]) } } +#define DISPATCH_TIMERS_GET_DELAY_ALL (~0u) + static inline unsigned int -_dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], - uint64_t *delay, uint64_t *leeway, int qos, int kind) +_dispatch_timers_get_delay(dispatch_clock_now_cache_t nows, + struct dispatch_timer_s timer[], + uint64_t *delay, uint64_t *leeway, unsigned int query) { - unsigned int tidx, ridx = DISPATCH_TIMER_COUNT; - uint64_t tmp, delta = UINT64_MAX, dldelta = UINT64_MAX; + unsigned int tidx, ridx = DISPATCH_TIMER_COUNT, minidx, maxidx; + uint64_t tmp, delta = INT64_MAX, dldelta = INT64_MAX; - for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { - if (qos >= 0 && qos != DISPATCH_TIMER_QOS(tidx)){ - continue; - } - if (kind >= 0 && kind != DISPATCH_TIMER_KIND(tidx)){ - continue; - } + if (query == DISPATCH_TIMERS_GET_DELAY_ALL) { + minidx = 0; + maxidx = DISPATCH_TIMER_COUNT - 1; + } else { + minidx = maxidx = query; + } + + for (tidx = minidx; tidx <= maxidx; tidx++) { + dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx); uint64_t target = timer[tidx].target; - if (target == UINT64_MAX) { + if (target >= INT64_MAX) { continue; } uint64_t deadline = timer[tidx].deadline; - if (qos >= 0) { + if (query != DISPATCH_TIMERS_GET_DELAY_ALL) { // Timer pre-coalescing + unsigned int qos = DISPATCH_TIMER_QOS(tidx); uint64_t window = _dispatch_kevent_coalescing_window[qos]; uint64_t latest = deadline > window ? deadline - window : 0; dispatch_source_refs_t dri; @@ -2296,13 +2191,13 @@ _dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], target = tmp; } } - uint64_t now = _dispatch_source_timer_now(nows, tidx); + uint64_t now = _dispatch_time_now_cached(clock, nows); if (target <= now) { delta = 0; break; } tmp = target - now; - if (DISPATCH_TIMER_KIND(tidx) != DISPATCH_TIMER_KIND_WALL) { + if (clock != DISPATCH_CLOCK_WALL) { tmp = _dispatch_time_mach2nano(tmp); } if (tmp < INT64_MAX && tmp < delta) { @@ -2311,7 +2206,7 @@ _dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], } dispatch_assert(target <= deadline); tmp = deadline - now; - if (DISPATCH_TIMER_KIND(tidx) != DISPATCH_TIMER_KIND_WALL) { + if (clock != DISPATCH_CLOCK_WALL) { tmp = _dispatch_time_mach2nano(tmp); } if (tmp < INT64_MAX && tmp < dldelta) { @@ -2319,7 +2214,7 @@ _dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], } } *delay = delta; - *leeway = delta && delta < UINT64_MAX ? dldelta - delta : UINT64_MAX; + *leeway = delta && delta < INT64_MAX ? dldelta - delta : INT64_MAX; return ridx; } @@ -2335,24 +2230,28 @@ _dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], static void _dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay, - uint64_t leeway, uint64_t nows[]) + uint64_t leeway, dispatch_clock_now_cache_t nows) { // call to update nows[] - _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL); + _dispatch_time_now_cached(DISPATCH_CLOCK_WALL, nows); +#ifdef KEVENT_NSEC_NOT_SUPPORTED // adjust nsec based delay to msec based and ignore leeway delay /= 1000000L; if ((int64_t)(delay) <= 0) { delay = 1; // if value <= 0 the dispatch will stop } +#else + ke->fflags |= NOTE_NSECONDS; +#endif ke->data = (int64_t)delay; } #else static void _dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay, - uint64_t leeway, uint64_t nows[]) + uint64_t leeway, dispatch_clock_now_cache_t nows) { - delay += _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL); + delay += _dispatch_time_now_cached(DISPATCH_CLOCK_WALL, nows); if (slowpath(_dispatch_timers_force_max_leeway)) { ke->data = (int64_t)(delay + leeway); ke->ext[1] = 0; @@ -2364,14 +2263,13 @@ _dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay, #endif // __linux__ static bool -_dispatch_timers_program2(uint64_t nows[], _dispatch_kevent_qos_s *ke, - unsigned int tidx) +_dispatch_timers_program2(dispatch_clock_now_cache_t nows, + _dispatch_kevent_qos_s *ke, unsigned int tidx) { bool poll; uint64_t delay, leeway; - _dispatch_timers_get_delay(nows, _dispatch_timer, &delay, &leeway, - (int)DISPATCH_TIMER_QOS(tidx), (int)DISPATCH_TIMER_KIND(tidx)); + _dispatch_timers_get_delay(nows, _dispatch_timer, &delay, &leeway, tidx); poll = (delay == 0); if (poll || delay == UINT64_MAX) { _dispatch_trace_next_timer_set(NULL, DISPATCH_TIMER_QOS(tidx)); @@ -2400,7 +2298,7 @@ _dispatch_timers_program2(uint64_t nows[], _dispatch_kevent_qos_s *ke, DISPATCH_NOINLINE static bool -_dispatch_timers_program(uint64_t nows[]) +_dispatch_timers_program(dispatch_clock_now_cache_t nows) { bool poll = false; unsigned int tidx, timerm = _dispatch_timers_mask; @@ -2433,7 +2331,7 @@ _dispatch_timers_calendar_change(void) _dispatch_timer_expired = true; for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) { _dispatch_timers_mask |= - 1 << DISPATCH_TIMER_INDEX(DISPATCH_TIMER_KIND_WALL, qos); + 1 << DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_WALL, qos); } } #endif @@ -2456,10 +2354,10 @@ _dispatch_timers_kevent(_dispatch_kevent_qos_s *ke) static inline bool _dispatch_mgr_timers(void) { - uint64_t nows[DISPATCH_TIMER_KIND_COUNT] = {}; + dispatch_clock_now_cache_s nows = { }; bool expired = slowpath(_dispatch_timer_expired); if (expired) { - _dispatch_timers_run(nows); + _dispatch_timers_run(&nows); } bool reconfigure = slowpath(_dispatch_timers_reconfigure); if (reconfigure || expired) { @@ -2468,7 +2366,7 @@ _dispatch_mgr_timers(void) _dispatch_timers_reconfigure = false; } if (reconfigure || expired) { - expired = _dispatch_timer_expired = _dispatch_timers_program(nows); + expired = _dispatch_timer_expired = _dispatch_timers_program(&nows); expired = expired || _dispatch_mgr_q.dq_items_tail; } _dispatch_timers_mask = 0; @@ -2534,9 +2432,9 @@ static void _dispatch_timer_aggregate_get_delay(void *ctxt) { dispatch_timer_delay_t dtd = ctxt; - struct { uint64_t nows[DISPATCH_TIMER_KIND_COUNT]; } dtn = {}; - _dispatch_timers_get_delay(dtn.nows, dtd->timer, &dtd->delay, &dtd->leeway, - -1, -1); + dispatch_clock_now_cache_s nows = { }; + _dispatch_timers_get_delay(&nows, dtd->timer, &dtd->delay, &dtd->leeway, + DISPATCH_TIMERS_GET_DELAY_ALL); } uint64_t @@ -3172,12 +3070,9 @@ _dispatch_kevent_worker_thread(_dispatch_kevent_qos_s **events, int *nevents) DISPATCH_MEMORYPRESSURE_CRITICAL | \ DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \ DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL) -#elif DISPATCH_USE_VM_PRESSURE_SOURCE -#define DISPATCH_MEMORYPRESSURE_SOURCE_TYPE DISPATCH_SOURCE_TYPE_VM -#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK DISPATCH_VM_PRESSURE #endif -#if DISPATCH_USE_MEMORYPRESSURE_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE static dispatch_source_t _dispatch_memorypressure_source; static void @@ -3211,9 +3106,6 @@ _dispatch_memorypressure_handler(void *context DISPATCH_UNUSED) if (memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK) { malloc_memory_event_handler(memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK); } -#elif DISPATCH_USE_VM_PRESSURE_SOURCE - // we must have gotten DISPATCH_VM_PRESSURE - malloc_zone_pressure_relief(0,0); #endif } @@ -3230,7 +3122,7 @@ _dispatch_memorypressure_init(void) } #else static inline void _dispatch_memorypressure_init(void) {} -#endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE +#endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE #pragma mark - #pragma mark dispatch_mach @@ -3322,8 +3214,7 @@ static void _dispatch_source_type_mach_recv_direct_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) + unsigned long mask DISPATCH_UNUSED) { ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE_DIRECT; #if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK @@ -6414,9 +6305,6 @@ _evfiltstr(short filt) #endif _evfilt2(EVFILT_FS); _evfilt2(EVFILT_USER); -#ifdef EVFILT_VM - _evfilt2(EVFILT_VM); -#endif #ifdef EVFILT_SOCK _evfilt2(EVFILT_SOCK); #endif @@ -6600,174 +6488,6 @@ dispatch_kevent_debug(const char *verb, const _dispatch_kevent_qos_s *kev, #endif } -static void -_dispatch_kevent_debugger2(void *context) -{ - struct sockaddr sa; - socklen_t sa_len = sizeof(sa); - int c, fd = (int)(long)context; - unsigned int i; - dispatch_kevent_t dk; - dispatch_source_t ds; - dispatch_source_refs_t dr; - FILE *debug_stream; - - c = accept(fd, &sa, &sa_len); - if (c == -1) { - if (errno != EAGAIN) { - (void)dispatch_assume_zero(errno); - } - return; - } -#if 0 - int r = fcntl(c, F_SETFL, 0); // disable non-blocking IO - if (r == -1) { - (void)dispatch_assume_zero(errno); - } -#endif - debug_stream = fdopen(c, "a"); - if (!dispatch_assume(debug_stream)) { - close(c); - return; - } - - fprintf(debug_stream, "HTTP/1.0 200 OK\r\n"); - fprintf(debug_stream, "Content-type: text/html\r\n"); - fprintf(debug_stream, "Pragma: nocache\r\n"); - fprintf(debug_stream, "\r\n"); - fprintf(debug_stream, "\n"); - fprintf(debug_stream, "PID %u\n", getpid()); - fprintf(debug_stream, "\n
    \n"); - - for (i = 0; i < DSL_HASH_SIZE; i++) { - if (TAILQ_EMPTY(&_dispatch_sources[i])) { - continue; - } - TAILQ_FOREACH(dk, &_dispatch_sources[i], dk_list) { - fprintf(debug_stream, "\t
  • DK %p ident %lu filter %s flags " - "0x%hx fflags 0x%x data 0x%lx udata %p\n", - dk, (unsigned long)dk->dk_kevent.ident, - _evfiltstr(dk->dk_kevent.filter), dk->dk_kevent.flags, - dk->dk_kevent.fflags, (unsigned long)dk->dk_kevent.data, - (void*)dk->dk_kevent.udata); - fprintf(debug_stream, "\t\t
      \n"); - TAILQ_FOREACH(dr, &dk->dk_sources, dr_list) { - ds = _dispatch_source_from_refs(dr); - fprintf(debug_stream, "\t\t\t
    • DS %p refcnt 0x%x state " - "0x%llx data 0x%lx mask 0x%lx flags 0x%x
    • \n", - ds, ds->do_ref_cnt + 1, ds->dq_state, - ds->ds_pending_data, ds->ds_pending_data_mask, - ds->dq_atomic_flags); - if (_dq_state_is_enqueued(ds->dq_state)) { - dispatch_queue_t dq = ds->do_targetq; - fprintf(debug_stream, "\t\t
      DQ: %p refcnt 0x%x state " - "0x%llx label: %s\n", dq, dq->do_ref_cnt + 1, - dq->dq_state, dq->dq_label ?: ""); - } - } - fprintf(debug_stream, "\t\t
    \n"); - fprintf(debug_stream, "\t
  • \n"); - } - } - fprintf(debug_stream, "
\n\n\n"); - fflush(debug_stream); - fclose(debug_stream); -} - -static void -_dispatch_kevent_debugger2_cancel(void *context) -{ - int ret, fd = (int)(long)context; - - ret = close(fd); - if (ret != -1) { - (void)dispatch_assume_zero(errno); - } -} - -static void -_dispatch_kevent_debugger(void *context DISPATCH_UNUSED) -{ - union { - struct sockaddr_in sa_in; - struct sockaddr sa; - } sa_u = { - .sa_in = { - .sin_family = AF_INET, - .sin_addr = { htonl(INADDR_LOOPBACK), }, - }, - }; - dispatch_source_t ds; - const char *valstr; - int val, r, fd, sock_opt = 1; - socklen_t slen = sizeof(sa_u); - -#ifndef __linux__ - if (issetugid()) { - return; - } -#endif - valstr = getenv("LIBDISPATCH_DEBUGGER"); - if (!valstr) { - return; - } - val = atoi(valstr); - if (val == 2) { - sa_u.sa_in.sin_addr.s_addr = 0; - } - fd = socket(PF_INET, SOCK_STREAM, 0); - if (fd == -1) { - (void)dispatch_assume_zero(errno); - return; - } - r = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (void *)&sock_opt, - (socklen_t) sizeof sock_opt); - if (r == -1) { - (void)dispatch_assume_zero(errno); - goto out_bad; - } -#if 0 - r = fcntl(fd, F_SETFL, O_NONBLOCK); - if (r == -1) { - (void)dispatch_assume_zero(errno); - goto out_bad; - } -#endif - r = bind(fd, &sa_u.sa, sizeof(sa_u)); - if (r == -1) { - (void)dispatch_assume_zero(errno); - goto out_bad; - } - r = listen(fd, SOMAXCONN); - if (r == -1) { - (void)dispatch_assume_zero(errno); - goto out_bad; - } - r = getsockname(fd, &sa_u.sa, &slen); - if (r == -1) { - (void)dispatch_assume_zero(errno); - goto out_bad; - } - - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)fd, 0, - &_dispatch_mgr_q); - if (dispatch_assume(ds)) { - _dispatch_log("LIBDISPATCH: debug port: %hu", - (in_port_t)ntohs(sa_u.sa_in.sin_port)); - - /* ownership of fd transfers to ds */ - dispatch_set_context(ds, (void *)(long)fd); - dispatch_source_set_event_handler_f(ds, _dispatch_kevent_debugger2); - dispatch_source_set_cancel_handler_f(ds, - _dispatch_kevent_debugger2_cancel); - dispatch_resume(ds); - - return; - } -out_bad: - close(fd); -} - #if HAVE_MACH #ifndef MACH_PORT_TYPE_SPREQUEST diff --git a/src/source_internal.h b/src/source_internal.h index 41b6d11..a9bf1c5 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -87,18 +87,15 @@ enum { #define DISPATCH_TIMER_QOS_COUNT (DISPATCH_TIMER_QOS_BACKGROUND + 1) #define DISPATCH_TIMER_QOS(tidx) (((uintptr_t)(tidx) >> 1) & 0x3ul) -#define DISPATCH_TIMER_KIND_WALL 0u -#define DISPATCH_TIMER_KIND_MACH 1u -#define DISPATCH_TIMER_KIND_COUNT (DISPATCH_TIMER_KIND_MACH + 1) -#define DISPATCH_TIMER_KIND(tidx) ((uintptr_t)(tidx) & 0x1ul) +#define DISPATCH_TIMER_CLOCK(tidx) ((dispatch_clock_t)((uintptr_t)(tidx) & 1)) -#define DISPATCH_TIMER_INDEX(kind, qos) ((qos) << 1 | (kind)) +#define DISPATCH_TIMER_INDEX(clock, qos) ((qos) << 1 | (clock)) #define DISPATCH_TIMER_INDEX_DISARM \ DISPATCH_TIMER_INDEX(0, DISPATCH_TIMER_QOS_COUNT) #define DISPATCH_TIMER_INDEX_COUNT (DISPATCH_TIMER_INDEX_DISARM + 1) #define DISPATCH_TIMER_IDENT(flags) ({ unsigned long f = (flags); \ DISPATCH_TIMER_INDEX(f & DISPATCH_TIMER_WALL_CLOCK ? \ - DISPATCH_TIMER_KIND_WALL : DISPATCH_TIMER_KIND_MACH, \ + DISPATCH_CLOCK_WALL : DISPATCH_CLOCK_MACH, \ f & DISPATCH_TIMER_STRICT ? DISPATCH_TIMER_QOS_CRITICAL : \ f & DISPATCH_TIMER_BACKGROUND ? DISPATCH_TIMER_QOS_BACKGROUND : \ DISPATCH_TIMER_QOS_NORMAL); }) @@ -120,7 +117,7 @@ struct dispatch_source_type_s { _dispatch_kevent_qos_s ke; uint64_t mask; void (*init)(dispatch_source_t ds, dispatch_source_type_t type, - uintptr_t handle, unsigned long mask, dispatch_queue_t q); + uintptr_t handle, unsigned long mask); }; struct dispatch_timer_source_s { diff --git a/src/swift/Block.swift b/src/swift/Block.swift index c1266ce..d4cae3c 100644 --- a/src/swift/Block.swift +++ b/src/swift/Block.swift @@ -37,24 +37,19 @@ public struct DispatchWorkItemFlags : OptionSet, RawRepresentable { @available(OSX 10.10, iOS 8.0, *) public class DispatchWorkItem { internal var _block: _DispatchBlock - internal var _group: DispatchGroup? - public init(group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], block: @convention(block) () -> ()) { + public init(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], block: @escaping @convention(block) () -> ()) { _block = dispatch_block_create_with_qos_class(dispatch_block_flags_t(flags.rawValue), qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority), block) } - // Used by DispatchQueue.synchronously to provide a @noescape path through + // Used by DispatchQueue.synchronously to provide a path through // dispatch_block_t, as we know the lifetime of the block in question. - internal init(flags: DispatchWorkItemFlags = [], noescapeBlock: @noescape () -> ()) { + internal init(flags: DispatchWorkItemFlags = [], noescapeBlock: () -> ()) { _block = _swift_dispatch_block_create_noescape(dispatch_block_flags_t(flags.rawValue), noescapeBlock) } public func perform() { - if let g = _group { - g.enter() - defer { g.leave() } - } _block() } @@ -63,14 +58,19 @@ public class DispatchWorkItem { } public func wait(timeout: DispatchTime) -> DispatchTimeoutResult { - return dispatch_block_wait(_block, timeout.rawValue) == 0 ? .Success : .TimedOut + return dispatch_block_wait(_block, timeout.rawValue) == 0 ? .success : .timedOut } public func wait(wallTimeout: DispatchWallTime) -> DispatchTimeoutResult { - return dispatch_block_wait(_block, wallTimeout.rawValue) == 0 ? .Success : .TimedOut + return dispatch_block_wait(_block, wallTimeout.rawValue) == 0 ? .success : .timedOut } - public func notify(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], queue: DispatchQueue, execute: @convention(block) () -> Void) { + public func notify( + qos: DispatchQoS = .unspecified, + flags: DispatchWorkItemFlags = [], + queue: DispatchQueue, + execute: @escaping @convention(block) () -> ()) + { if qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: execute) dispatch_block_notify(_block, queue.__wrapped, item._block) @@ -92,17 +92,6 @@ public class DispatchWorkItem { } } -@available(OSX 10.10, iOS 8.0, *) -public extension DispatchWorkItem { - @available(*, deprecated, renamed: "DispatchWorkItem.wait(self:wallTimeout:)") - public func wait(timeout: DispatchWallTime) -> Int { - switch wait(wallTimeout: timeout) { - case .Success: return 0 - case .TimedOut: return DispatchTimeoutResult.KERN_OPERATION_TIMED_OUT - } - } -} - /// The dispatch_block_t typealias is different from usual closures in that it /// uses @convention(block). This is to avoid unnecessary bridging between /// C blocks and Swift closures, which interferes with dispatch APIs that depend @@ -111,4 +100,4 @@ internal typealias _DispatchBlock = @convention(block) () -> Void internal typealias dispatch_block_t = @convention(block) () -> Void @_silgen_name("_swift_dispatch_block_create_noescape") -internal func _swift_dispatch_block_create_noescape(_ flags: dispatch_block_flags_t, _ block: @noescape () -> ()) -> _DispatchBlock +internal func _swift_dispatch_block_create_noescape(_ flags: dispatch_block_flags_t, _ block: () -> ()) -> _DispatchBlock diff --git a/src/swift/Data.swift b/src/swift/Data.swift index 0d21e27..982411f 100644 --- a/src/swift/Data.swift +++ b/src/swift/Data.swift @@ -19,7 +19,6 @@ public struct DispatchData : RandomAccessCollection { public static let empty: DispatchData = DispatchData(data: _swift_dispatch_data_empty()) -#if false /* FIXME: dragging in _TMBO (Objective-C) */ public enum Deallocator { /// Use `free` case free @@ -28,9 +27,15 @@ public struct DispatchData : RandomAccessCollection { case unmap /// A custom deallocator - case custom(DispatchQueue?, @convention(block) () -> Void) - - private var _deallocator: (DispatchQueue?, @convention(block) () -> Void) { + // FIXME: Want @convention(block) here to minimize the overhead of + // doing the conversion (once per custom enum instance instead + // of once per call to DispatchData.init using the enum instance). + // However, adding the annotation here results in Data.o containing + // a reference to _TMBO (opaque metadata for Builtin.UnknownObject) + // which is only made available on platforms with Objective-C. + case custom(DispatchQueue?, () -> Void) + + fileprivate var _deallocator: (DispatchQueue?, @convention(block) () -> Void) { switch self { case .free: return (nil, _dispatch_data_destructor_free()) case .unmap: return (nil, _dispatch_data_destructor_munmap()) @@ -38,54 +43,59 @@ public struct DispatchData : RandomAccessCollection { } } } -#endif - internal var __wrapped: dispatch_data_t + + internal var __wrapped: __DispatchData /// Initialize a `Data` with copied memory content. /// /// - parameter bytes: A pointer to the memory. It will be copied. - /// - parameter count: The number of bytes to copy. public init(bytes buffer: UnsafeBufferPointer) { - __wrapped = dispatch_data_create( - buffer.baseAddress!, buffer.count, nil, _dispatch_data_destructor_default()) + let d = dispatch_data_create(buffer.baseAddress!, buffer.count, nil, _dispatch_data_destructor_default()) + self.init(data: d) } -#if false /* FIXME: dragging in _TMBO (Objective-C) */ + /// Initialize a `Data` without copying the bytes. /// - /// - parameter bytes: A pointer to the bytes. - /// - parameter count: The size of the bytes. + /// - parameter bytes: A buffer pointer containing the data. /// - parameter deallocator: Specifies the mechanism to free the indicated buffer. public init(bytesNoCopy bytes: UnsafeBufferPointer, deallocator: Deallocator = .free) { let (q, b) = deallocator._deallocator - - __wrapped = dispatch_data_create(bytes.baseAddress!, bytes.count, q?.__wrapped, b) + let d = dispatch_data_create(bytes.baseAddress!, bytes.count, q?.__wrapped, b) + self.init(data: d) } -#endif + internal init(data: dispatch_data_t) { - __wrapped = data + __wrapped = __DispatchData(data: data, owned: true) + } + + internal init(borrowedData: dispatch_data_t) { + __wrapped = __DispatchData(data: borrowedData, owned: false) } public var count: Int { - return CDispatch.dispatch_data_get_size(__wrapped) + return CDispatch.dispatch_data_get_size(__wrapped.__wrapped) } public func withUnsafeBytes( - body: @noescape (UnsafePointer) throws -> Result) rethrows -> Result + body: (UnsafePointer) throws -> Result) rethrows -> Result { - var ptr: UnsafePointer? = nil - var size = 0; - let data = CDispatch.dispatch_data_create_map(__wrapped, &ptr, &size) + var ptr: UnsafeRawPointer? = nil + var size = 0 + let data = CDispatch.dispatch_data_create_map(__wrapped.__wrapped, &ptr, &size) + let contentPtr = ptr!.bindMemory( + to: ContentType.self, capacity: size / MemoryLayout.stride) defer { _fixLifetime(data) } - return try body(UnsafePointer(ptr!)) + return try body(contentPtr) } public func enumerateBytes( - block: @noescape (buffer: UnsafeBufferPointer, byteIndex: Int, stop: inout Bool) -> Void) + block: @noescape (_ buffer: UnsafeBufferPointer, _ byteIndex: Int, _ stop: inout Bool) -> Void) { - _swift_dispatch_data_apply(__wrapped) { (data: dispatch_data_t, offset: Int, ptr: UnsafePointer, size: Int) in - let bp = UnsafeBufferPointer(start: UnsafePointer(ptr), count: size) + _swift_dispatch_data_apply(__wrapped.__wrapped) { (_, offset: Int, ptr: UnsafeRawPointer, size: Int) in + let bytePtr = ptr.bindMemory(to: UInt8.self, capacity: size) + let bp = UnsafeBufferPointer(start: bytePtr, count: size) var stop = false - block(buffer: bp, byteIndex: offset, stop: &stop) + block(bp, offset, &stop) return !stop } } @@ -103,20 +113,23 @@ public struct DispatchData : RandomAccessCollection { /// /// - parameter data: The data to append to this data. public mutating func append(_ other: DispatchData) { - let data = CDispatch.dispatch_data_create_concat(__wrapped, other.__wrapped) - __wrapped = data + let data = CDispatch.dispatch_data_create_concat(__wrapped.__wrapped, other.__wrapped.__wrapped) + __wrapped = __DispatchData(data: data, owned: true) } /// Append a buffer of bytes to the data. /// /// - parameter buffer: The buffer of bytes to append. The size is calculated from `SourceType` and `buffer.count`. public mutating func append(_ buffer : UnsafeBufferPointer) { - self.append(UnsafePointer(buffer.baseAddress!), count: buffer.count * sizeof(SourceType.self)) + let count = buffer.count * sizeof(SourceType.self) + buffer.baseAddress?.withMemoryRebound(to: UInt8.self, capacity: count) { + self.append($0, count: count) + } } - private func _copyBytesHelper(to pointer: UnsafeMutablePointer, from range: CountableRange) { + private func _copyBytesHelper(to pointer: UnsafeMutableRawPointer, from range: CountableRange) { var copiedCount = 0 - _ = CDispatch.dispatch_data_apply(__wrapped) { (data: dispatch_data_t, offset: Int, ptr: UnsafePointer, size: Int) in + _ = CDispatch.dispatch_data_apply(__wrapped.__wrapped) { (data: dispatch_data_t, offset: Int, ptr: UnsafeRawPointer, size: Int) in let limit = Swift.min((range.endIndex - range.startIndex) - copiedCount, size) memcpy(pointer + copiedCount, ptr, limit) copiedCount += limit @@ -144,7 +157,7 @@ public struct DispatchData : RandomAccessCollection { /// Copy the contents of the data into a buffer. /// - /// This function copies the bytes in `range` from the data into the buffer. If the count of the `range` is greater than `sizeof(DestinationType) * buffer.count` then the first N bytes will be copied into the buffer. + /// This function copies the bytes in `range` from the data into the buffer. If the count of the `range` is greater than `MemoryLayout.stride * buffer.count` then the first N bytes will be copied into the buffer. /// - precondition: The range must be within the bounds of the data. Otherwise `fatalError` is called. /// - parameter buffer: A buffer to copy the data into. /// - parameter range: A range in the data to copy into the buffer. If the range is empty, this function will return 0 without copying anything. If the range is nil, as much data as will fit into `buffer` is copied. @@ -162,30 +175,31 @@ public struct DispatchData : RandomAccessCollection { precondition(r.endIndex >= 0) precondition(r.endIndex <= cnt, "The range is outside the bounds of the data") - copyRange = r.startIndex..<(r.startIndex + Swift.min(buffer.count * sizeof(DestinationType.self), r.count)) + copyRange = r.startIndex..<(r.startIndex + Swift.min(buffer.count * MemoryLayout.stride, r.count)) } else { - copyRange = 0...stride, cnt) } guard !copyRange.isEmpty else { return 0 } - let pointer : UnsafeMutablePointer = UnsafeMutablePointer(buffer.baseAddress!) - _copyBytesHelper(to: pointer, from: copyRange) + let bufferCapacity = buffer.count * sizeof(DestinationType.self) + buffer.baseAddress?.withMemoryRebound(to: UInt8.self, capacity: bufferCapacity) { + _copyBytesHelper(to: $0, from: copyRange) + } return copyRange.count } /// Sets or returns the byte at the specified index. public subscript(index: Index) -> UInt8 { var offset = 0 - let subdata = CDispatch.dispatch_data_copy_region(__wrapped, index, &offset) + let subdata = CDispatch.dispatch_data_copy_region(__wrapped.__wrapped, index, &offset) - var ptr: UnsafePointer? = nil + var ptr: UnsafeRawPointer? = nil var size = 0 let map = CDispatch.dispatch_data_create_map(subdata, &ptr, &size) defer { _fixLifetime(map) } - let pptr = UnsafePointer(ptr!) - return pptr[index - offset] + return ptr!.load(fromByteOffset: index - offset, as: UInt8.self) } public subscript(bounds: Range) -> RandomAccessSlice { @@ -197,13 +211,13 @@ public struct DispatchData : RandomAccessCollection { /// - parameter range: The range to copy. public func subdata(in range: CountableRange) -> DispatchData { let subrange = CDispatch.dispatch_data_create_subrange( - __wrapped, range.startIndex, range.endIndex - range.startIndex) + __wrapped.__wrapped, range.startIndex, range.endIndex - range.startIndex) return DispatchData(data: subrange) } public func region(location: Int) -> (data: DispatchData, offset: Int) { var offset: Int = 0 - let data = CDispatch.dispatch_data_copy_region(__wrapped, location, &offset) + let data = CDispatch.dispatch_data_copy_region(__wrapped.__wrapped, location, &offset) return (DispatchData(data: data), offset) } @@ -233,33 +247,34 @@ public struct DispatchData : RandomAccessCollection { public struct DispatchDataIterator : IteratorProtocol, Sequence { - /// Create an iterator over the given DisaptchData + /// Create an iterator over the given DispatchData public init(_data: DispatchData) { - var ptr: UnsafePointer? + var ptr: UnsafeRawPointer? self._count = 0 - self._data = CDispatch.dispatch_data_create_map(_data.__wrapped, &ptr, &self._count) - self._ptr = UnsafePointer(ptr!) + self._data = __DispatchData(data: CDispatch.dispatch_data_create_map(_data.__wrapped.__wrapped, &ptr, &self._count), owned: true) + self._ptr = ptr self._position = _data.startIndex + + // The only time we expect a 'nil' pointer is when the data is empty. + assert(self._ptr != nil || self._count == self._position) } /// Advance to the next element and return it, or `nil` if no next /// element exists. - /// - /// - Precondition: No preceding call to `self.next()` has returned `nil`. public mutating func next() -> DispatchData._Element? { if _position == _count { return nil } - let element = _ptr[_position]; + let element = _ptr.load(fromByteOffset: _position, as: UInt8.self) _position = _position + 1 return element } - internal let _data: dispatch_data_t - internal var _ptr: UnsafePointer + internal let _data: __DispatchData + internal var _ptr: UnsafeRawPointer! internal var _count: Int internal var _position: DispatchData.Index } -typealias _swift_data_applier = @convention(block) @noescape (dispatch_data_t, Int, UnsafePointer, Int) -> Bool +typealias _swift_data_applier = @convention(block) (dispatch_data_t, Int, UnsafeRawPointer, Int) -> Bool @_silgen_name("_swift_dispatch_data_apply") internal func _swift_dispatch_data_apply(_ data: dispatch_data_t, _ block: _swift_data_applier) diff --git a/src/swift/Dispatch.swift b/src/swift/Dispatch.swift index 2b9cb21..ec73acb 100644 --- a/src/swift/Dispatch.swift +++ b/src/swift/Dispatch.swift @@ -59,11 +59,6 @@ public struct DispatchQoS : Equatable { @available(OSX 10.10, iOS 8.0, *) public static let `default` = DispatchQoS(qosClass: .default, relativePriority: 0) - @available(OSX, introduced: 10.10, deprecated: 10.10, renamed: "DispatchQoS.default") - @available(iOS, introduced: 8.0, deprecated: 8.0, renamed: "DispatchQoS.default") - @available(*, deprecated, renamed: "DispatchQoS.default") - public static let defaultQoS = DispatchQoS.default - @available(OSX 10.10, iOS 8.0, *) public static let userInitiated = DispatchQoS(qosClass: .userInitiated, relativePriority: 0) @@ -82,11 +77,6 @@ public struct DispatchQoS : Equatable { @available(OSX 10.10, iOS 8.0, *) case `default` - @available(OSX, introduced: 10.10, deprecated: 10.10, renamed: "QoSClass.default") - @available(iOS, introduced: 8.0, deprecated: 8.0, renamed: "QoSClass.default") - @available(*, deprecated, renamed: "QoSClass.default") - static let defaultQoS = QoSClass.default - @available(OSX 10.10, iOS 8.0, *) case userInitiated @@ -95,9 +85,11 @@ public struct DispatchQoS : Equatable { case unspecified + // _OSQoSClass is internal on Linux, so this initialiser has to + // remain as an internal init. @available(OSX 10.10, iOS 8.0, *) - internal init?(qosClass: _OSQoSClass) { - switch qosClass { + internal init?(rawValue: _OSQoSClass) { + switch rawValue { case .QOS_CLASS_BACKGROUND: self = .background case .QOS_CLASS_UTILITY: self = .utility case .QOS_CLASS_DEFAULT: self = .default @@ -135,14 +127,14 @@ public func ==(a: DispatchQoS, b: DispatchQoS) -> Bool { public enum DispatchTimeoutResult { static let KERN_OPERATION_TIMED_OUT:Int = 49 - case Success - case TimedOut + case success + case timedOut } /// dispatch_group public extension DispatchGroup { - public func notify(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], queue: DispatchQueue, execute work: @convention(block) () -> ()) { + public func notify(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], queue: DispatchQueue, execute work: @escaping @convention(block) () -> ()) { if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: work) dispatch_group_notify(self.__wrapped, queue.__wrapped, item._block) @@ -161,21 +153,11 @@ public extension DispatchGroup { } public func wait(timeout: DispatchTime) -> DispatchTimeoutResult { - return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .Success : .TimedOut + return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .success : .timedOut } public func wait(wallTimeout timeout: DispatchWallTime) -> DispatchTimeoutResult { - return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .Success : .TimedOut - } -} - -public extension DispatchGroup { - @available(*, deprecated, renamed: "DispatchGroup.wait(self:wallTimeout:)") - public func wait(walltime timeout: DispatchWallTime) -> Int { - switch wait(wallTimeout: timeout) { - case .Success: return 0 - case .TimedOut: return DispatchTimeoutResult.KERN_OPERATION_TIMED_OUT - } + return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .success : .timedOut } } @@ -192,20 +174,10 @@ public extension DispatchSemaphore { } public func wait(timeout: DispatchTime) -> DispatchTimeoutResult { - return dispatch_semaphore_wait(self.__wrapped, timeout.rawValue) == 0 ? .Success : .TimedOut + return dispatch_semaphore_wait(self.__wrapped, timeout.rawValue) == 0 ? .success : .timedOut } public func wait(wallTimeout: DispatchWallTime) -> DispatchTimeoutResult { - return dispatch_semaphore_wait(self.__wrapped, wallTimeout.rawValue) == 0 ? .Success : .TimedOut - } -} - -public extension DispatchSemaphore { - @available(*, deprecated, renamed: "DispatchSemaphore.wait(self:wallTimeout:)") - public func wait(walltime timeout: DispatchWalltime) -> Int { - switch wait(wallTimeout: timeout) { - case .Success: return 0 - case .TimedOut: return DispatchTimeoutResult.KERN_OPERATION_TIMED_OUT - } + return dispatch_semaphore_wait(self.__wrapped, wallTimeout.rawValue) == 0 ? .success : .timedOut } } diff --git a/src/swift/DispatchStubs.cc b/src/swift/DispatchStubs.cc index 1e5ec74..ae82299 100644 --- a/src/swift/DispatchStubs.cc +++ b/src/swift/DispatchStubs.cc @@ -51,10 +51,16 @@ static void _dispatch_overlay_constructor() { #endif /* USE_OBJC */ -#if 0 /* FIXME -- adding directory to include path may need build-script plumbing to do properly... */ -#include "swift/Runtime/Config.h" + +// Replicate the SWIFT_CC(swift) calling convention macro from +// swift/include/swift/Runtime/Config.h because it is +// quite awkward to include Config.h and its recursive includes +// in dispatch. This define must be manually kept in synch +#define SWIFT_CC(CC) SWIFT_CC_##CC +#if SWIFT_USE_SWIFTCALL +#define SWIFT_CC_swift __attribute__((swiftcall)) #else -#define SWIFT_CC(x) /* FIXME!! */ +#define SWIFT_CC_swift #endif SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE diff --git a/src/swift/IO.swift b/src/swift/IO.swift index 6e6b669..8ce417a 100644 --- a/src/swift/IO.swift +++ b/src/swift/IO.swift @@ -34,15 +34,15 @@ public extension DispatchIO { public static let strictInterval = IntervalFlags(rawValue: 1) } - public class func read(fromFileDescriptor: Int32, maxLength: Int, runningHandlerOn queue: DispatchQueue, handler: (data: DispatchData, error: Int32) -> Void) { + public class func read(fromFileDescriptor: Int32, maxLength: Int, runningHandlerOn queue: DispatchQueue, handler: @escaping (_ data: DispatchData, _ error: Int32) -> Void) { dispatch_read(fromFileDescriptor, maxLength, queue.__wrapped) { (data: dispatch_data_t, error: Int32) in - handler(data: DispatchData(data: data), error: error) + handler(DispatchData(borrowedData: data), error) } } - public class func write(fromFileDescriptor: Int32, data: DispatchData, runningHandlerOn queue: DispatchQueue, handler: (data: DispatchData?, error: Int32) -> Void) { - dispatch_write(fromFileDescriptor, data.__wrapped, queue.__wrapped) { (data: dispatch_data_t?, error: Int32) in - handler(data: data.flatMap { DispatchData(data: $0) }, error: error) + public class func write(toFileDescriptor: Int32, data: DispatchData, runningHandlerOn queue: DispatchQueue, handler: @escaping (_ data: DispatchData?, _ error: Int32) -> Void) { + dispatch_write(toFileDescriptor, data.__wrapped.__wrapped, queue.__wrapped) { (data: dispatch_data_t?, error: Int32) in + handler(data.flatMap { DispatchData(borrowedData: $0) }, error) } } @@ -50,7 +50,7 @@ public extension DispatchIO { type: StreamType, fileDescriptor: Int32, queue: DispatchQueue, - cleanupHandler: (error: Int32) -> Void) + cleanupHandler: @escaping (_ error: Int32) -> Void) { self.init(__type: type.rawValue, fd: fileDescriptor, queue: queue, handler: cleanupHandler) } @@ -61,7 +61,7 @@ public extension DispatchIO { oflag: Int32, mode: mode_t, queue: DispatchQueue, - cleanupHandler: (error: Int32) -> Void) + cleanupHandler: @escaping (_ error: Int32) -> Void) { self.init(__type: type.rawValue, path: path, oflag: oflag, mode: mode, queue: queue, handler: cleanupHandler) } @@ -70,60 +70,28 @@ public extension DispatchIO { type: StreamType, io: DispatchIO, queue: DispatchQueue, - cleanupHandler: (error: Int32) -> Void) + cleanupHandler: @escaping (_ error: Int32) -> Void) { self.init(__type: type.rawValue, io: io, queue: queue, handler: cleanupHandler) } - public func read(offset: off_t, length: Int, queue: DispatchQueue, ioHandler: (done: Bool, data: DispatchData?, error: Int32) -> Void) { + public func read(offset: off_t, length: Int, queue: DispatchQueue, ioHandler: @escaping (_ done: Bool, _ data: DispatchData?, _ error: Int32) -> Void) { dispatch_io_read(self.__wrapped, offset, length, queue.__wrapped) { (done: Bool, data: dispatch_data_t?, error: Int32) in - ioHandler(done: done, data: data.flatMap { DispatchData(data: $0) }, error: error) + ioHandler(done, data.flatMap { DispatchData(borrowedData: $0) }, error) } } - public func write(offset: off_t, data: DispatchData, queue: DispatchQueue, ioHandler: (done: Bool, data: DispatchData?, error: Int32) -> Void) { - dispatch_io_write(self.__wrapped, offset, data.__wrapped, queue.__wrapped) { (done: Bool, data: dispatch_data_t?, error: Int32) in - ioHandler(done: done, data: data.flatMap { DispatchData(data: $0) }, error: error) + public func write(offset: off_t, data: DispatchData, queue: DispatchQueue, ioHandler: @escaping (_ done: Bool, _ data: DispatchData?, _ error: Int32) -> Void) { + dispatch_io_write(self.__wrapped, offset, data.__wrapped.__wrapped, queue.__wrapped) { (done: Bool, data: dispatch_data_t?, error: Int32) in + ioHandler(done, data.flatMap { DispatchData(borrowedData: $0) }, error) } } public func setInterval(interval: DispatchTimeInterval, flags: IntervalFlags = []) { - dispatch_io_set_interval(self.__wrapped, interval.rawValue, flags.rawValue) + dispatch_io_set_interval(self.__wrapped, UInt64(interval.rawValue), flags.rawValue) } public func close(flags: CloseFlags = []) { dispatch_io_close(self.__wrapped, flags.rawValue) } } - -extension DispatchIO { - @available(*, deprecated, renamed: "DispatchIO.read(fromFileDescriptor:maxLength:runningHandlerOn:handler:)") - public class func read(fd: Int32, length: Int, queue: DispatchQueue, handler: (DispatchData, Int32) -> Void) { - DispatchIO.read(fromFileDescriptor: fd, maxLength: length, runningHandlerOn: queue, handler: handler) - } - - @available(*, deprecated, renamed: "DispatchIO.write(fromFileDescriptor:data:runningHandlerOn:handler:)") - public class func write(fd: Int32, data: DispatchData, queue: DispatchQueue, handler: (DispatchData?, Int32) -> Void) { - DispatchIO.write(fromFileDescriptor: fd, data: data, runningHandlerOn: queue, handler: handler) - } - - @available(*, deprecated, renamed: "DispatchIO.barrier(self:execute:)") - public func withBarrier(barrier work: () -> ()) { - barrier(execute: work) - } - - @available(*, deprecated, renamed: "DispatchIO.setLimit(self:highWater:)") - public func setHighWater(highWater: Int) { - setLimit(highWater: highWater) - } - - @available(*, deprecated, renamed: "DispatchIO.setLimit(self:lowWater:)") - public func setLowWater(lowWater: Int) { - setLimit(lowWater: lowWater) - } - - @available(*, deprecated, renamed: "DispatchIO.setInterval(self:interval:flags:)") - public func setInterval(interval: UInt64, flags: IntervalFlags) { - setInterval(interval: .nanoseconds(Int(interval)), flags: flags) - } -} diff --git a/src/swift/Private.swift b/src/swift/Private.swift index e38f728..5443b7c 100644 --- a/src/swift/Private.swift +++ b/src/swift/Private.swift @@ -14,62 +14,62 @@ import CDispatch -@available(*, unavailable, renamed:"DispatchQueue.init(label:attributes:target:)") +@available(*, unavailable, renamed:"DispatchQueue.init(label:qos:attributes:autoreleaseFrequency:target:)") public func dispatch_queue_create(_ label: UnsafePointer?, _ attr: dispatch_queue_attr_t?) -> DispatchQueue { fatalError() } -@available(*, unavailable, renamed:"DispatchQueue.init(label:attributes:target:)") +@available(*, unavailable, renamed:"DispatchQueue.init(label:qos:attributes:autoreleaseFrequency:target:)") public func dispatch_queue_create_with_target(_ label: UnsafePointer?, _ attr: dispatch_queue_attr_t?, _ queue: DispatchQueue?) -> DispatchQueue { fatalError() } @available(*, unavailable, renamed:"DispatchIO.init(type:fileDescriptor:queue:cleanupHandler:)") -public func dispatch_io_create(_ type: UInt, _ fd: Int32, _ queue: DispatchQueue, _ cleanup_handler: (Int32) -> Void) -> DispatchIO +public func dispatch_io_create(_ type: UInt, _ fd: Int32, _ queue: DispatchQueue, _ cleanup_handler: @escaping (Int32) -> Void) -> DispatchIO { fatalError() } @available(*, unavailable, renamed:"DispatchIO.init(type:path:oflag:mode:queue:cleanupHandler:)") -public func dispatch_io_create_with_path(_ type: UInt, _ path: UnsafePointer, _ oflag: Int32, _ mode: mode_t, _ queue: DispatchQueue, _ cleanup_handler: (Int32) -> Void) -> DispatchIO +public func dispatch_io_create_with_path(_ type: UInt, _ path: UnsafePointer, _ oflag: Int32, _ mode: mode_t, _ queue: DispatchQueue, _ cleanup_handler: @escaping (Int32) -> Void) -> DispatchIO { fatalError() } @available(*, unavailable, renamed:"DispatchIO.init(type:io:queue:cleanupHandler:)") -public func dispatch_io_create_with_io(_ type: UInt, _ io: DispatchIO, _ queue: DispatchQueue, _ cleanup_handler: (Int32) -> Void) -> DispatchIO +public func dispatch_io_create_with_io(_ type: UInt, _ io: DispatchIO, _ queue: DispatchQueue, _ cleanup_handler: @escaping (Int32) -> Void) -> DispatchIO { fatalError() } @available(*, unavailable, renamed:"DispatchIO.read(fileDescriptor:length:queue:handler:)") -public func dispatch_read(_ fd: Int32, _ length: Int, _ queue: DispatchQueue, _ handler: (dispatch_data_t, Int32) -> Void) +public func dispatch_read(_ fd: Int32, _ length: Int, _ queue: DispatchQueue, _ handler: @escaping (dispatch_data_t, Int32) -> Void) { fatalError() } @available(*, unavailable, renamed:"DispatchIO.read(self:offset:length:queue:ioHandler:)") -func dispatch_io_read(_ channel: DispatchIO, _ offset: off_t, _ length: Int, _ queue: DispatchQueue, _ io_handler: (Bool, dispatch_data_t?, Int32) -> Void) +func dispatch_io_read(_ channel: DispatchIO, _ offset: off_t, _ length: Int, _ queue: DispatchQueue, _ io_handler: @escaping (Bool, dispatch_data_t?, Int32) -> Void) { fatalError() } @available(*, unavailable, renamed:"DispatchIO.write(self:offset:data:queue:ioHandler:)") -func dispatch_io_write(_ channel: DispatchIO, _ offset: off_t, _ data: dispatch_data_t, _ queue: DispatchQueue, _ io_handler: (Bool, dispatch_data_t?, Int32) -> Void) +func dispatch_io_write(_ channel: DispatchIO, _ offset: off_t, _ data: dispatch_data_t, _ queue: DispatchQueue, _ io_handler: @escaping (Bool, dispatch_data_t?, Int32) -> Void) { fatalError() } @available(*, unavailable, renamed:"DispatchIO.write(fileDescriptor:data:queue:handler:)") -func dispatch_write(_ fd: Int32, _ data: dispatch_data_t, _ queue: DispatchQueue, _ handler: (dispatch_data_t?, Int32) -> Void) +func dispatch_write(_ fd: Int32, _ data: dispatch_data_t, _ queue: DispatchQueue, _ handler: @escaping (dispatch_data_t?, Int32) -> Void) { fatalError() } @available(*, unavailable, renamed:"DispatchData.init(bytes:)") -public func dispatch_data_create(_ buffer: UnsafePointer, _ size: Int, _ queue: DispatchQueue?, _ destructor: (() -> Void)?) -> dispatch_data_t +public func dispatch_data_create(_ buffer: UnsafeRawPointer, _ size: Int, _ queue: DispatchQueue?, _ destructor: (() -> Void)?) -> dispatch_data_t { fatalError() } @@ -81,7 +81,7 @@ public func dispatch_data_get_size(_ data: dispatch_data_t) -> Int } @available(*, unavailable, renamed:"DispatchData.withUnsafeBytes(self:body:)") -public func dispatch_data_create_map(_ data: dispatch_data_t, _ buffer_ptr: UnsafeMutablePointer?>?, _ size_ptr: UnsafeMutablePointer?) -> dispatch_data_t +public func dispatch_data_create_map(_ data: dispatch_data_t, _ buffer_ptr: UnsafeMutablePointer?, _ size_ptr: UnsafeMutablePointer?) -> dispatch_data_t { fatalError() } @@ -99,7 +99,7 @@ public func dispatch_data_create_subrange(_ data: dispatch_data_t, _ offset: Int } @available(*, unavailable, renamed:"DispatchData.enumerateBytes(self:block:)") -public func dispatch_data_apply(_ data: dispatch_data_t, _ applier: (dispatch_data_t, Int, UnsafePointer, Int) -> Bool) -> Bool +public func dispatch_data_apply(_ data: dispatch_data_t, _ applier: @escaping (dispatch_data_t, Int, UnsafeRawPointer, Int) -> Bool) -> Bool { fatalError() } @@ -111,13 +111,13 @@ public func dispatch_data_copy_region(_ data: dispatch_data_t, _ location: Int, } @available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:group:qos:flags:execute:)") -public func dispatch_group_async(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: () -> Void) +public func dispatch_group_async(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: @escaping () -> Void) { fatalError() } @available(*, unavailable, renamed: "DispatchGroup.notify(self:qos:flags:queue:execute:)") -public func dispatch_group_notify(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: () -> Void) +public func dispatch_group_notify(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: @escaping () -> Void) { fatalError() } @@ -141,13 +141,13 @@ public func dispatch_io_set_interval(_ channel: DispatchIO, _ interval: UInt64, } @available(*, unavailable, renamed:"DispatchQueue.apply(attributes:iterations:execute:)") -public func dispatch_apply(_ iterations: Int, _ queue: DispatchQueue, _ block: @noescape (Int) -> Void) +public func dispatch_apply(_ iterations: Int, _ queue: DispatchQueue, _ block: (Int) -> Void) { fatalError() } @available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:execute:)") -public func dispatch_async(_ queue: DispatchQueue, _ block: () -> Void) +public func dispatch_async(_ queue: DispatchQueue, _ block: @escaping () -> Void) { fatalError() } @@ -159,24 +159,24 @@ public func dispatch_get_global_queue(_ identifier: Int, _ flags: UInt) -> Dispa } @available(*, unavailable, renamed: "DispatchQueue.main") -public func dispatch_get_main_queue() -> DispatchQueue +public func dispatch_get_main_queue() -> DispatchQueue { fatalError() } -@available(*, unavailable, renamed:"DispatchQueueAttributes.initiallyInactive") +@available(*, unavailable, renamed:"DispatchQueue.Attributes.initiallyInactive") public func dispatch_queue_attr_make_initially_inactive(_ attr: dispatch_queue_attr_t?) -> dispatch_queue_attr_t { fatalError() } -@available(*, unavailable, renamed:"DispatchQueueAttributes.autoreleaseWorkItem") +@available(*, unavailable, renamed:"DispatchQueue.AutoreleaseFrequency.workItem") public func dispatch_queue_attr_make_with_autorelease_frequency(_ attr: dispatch_queue_attr_t?, _ frequency: dispatch_autorelease_frequency_t) -> dispatch_queue_attr_t { fatalError() } -@available(*, unavailable, renamed:"DispatchQueueAttributes.qosUserInitiated") +@available(*, unavailable, renamed:"DispatchQoS") public func dispatch_queue_attr_make_with_qos_class(_ attr: dispatch_queue_attr_t?, _ qos_class: dispatch_qos_class_t, _ relative_priority: Int32) -> dispatch_queue_attr_t { fatalError() @@ -194,38 +194,38 @@ public func dispatch_queue_get_qos_class(_ queue: DispatchQueue, _ relative_prio fatalError() } -@available(*, unavailable, renamed:"DispatchQueue.after(self:when:execute:)") -public func dispatch_after(_ when: dispatch_time_t, _ queue: DispatchQueue, _ block: () -> Void) +@available(*, unavailable, renamed:"DispatchQueue.asyncAfter(self:deadline:qos:flags:execute:)") +public func dispatch_after(_ when: dispatch_time_t, _ queue: DispatchQueue, _ block: @escaping () -> Void) { fatalError() } -@available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:group:qos:flags:execute:)") -public func dispatch_barrier_async(_ queue: DispatchQueue, _ block: () -> Void) +@available(*, unavailable, renamed:"DispatchQueue.async(self:group:qos:flags:execute:)") +public func dispatch_barrier_async(_ queue: DispatchQueue, _ block: @escaping () -> Void) { fatalError() } -@available(*, unavailable, renamed:"DispatchQueue.synchronously(self:flags:execute:)") -public func dispatch_barrier_sync(_ queue: DispatchQueue, _ block: @noescape () -> Void) +@available(*, unavailable, renamed:"DispatchQueue.sync(self:flags:execute:)") +public func dispatch_barrier_sync(_ queue: DispatchQueue, _ block: () -> Void) { fatalError() } @available(*, unavailable, renamed:"DispatchQueue.setSpecific(self:key:value:)") -public func dispatch_queue_set_specific(_ queue: DispatchQueue, _ key: UnsafePointer, _ context: UnsafeMutablePointer?, _ destructor: (@convention(c) (UnsafeMutablePointer?) -> Void)?) +public func dispatch_queue_set_specific(_ queue: DispatchQueue, _ key: UnsafeRawPointer, _ context: UnsafeMutableRawPointer?, _ destructor: (@convention(c) (UnsafeMutableRawPointer?) -> Void)?) { fatalError() } @available(*, unavailable, renamed:"DispatchQueue.getSpecific(self:key:)") -public func dispatch_queue_get_specific(_ queue: DispatchQueue, _ key: UnsafePointer) -> UnsafeMutablePointer? +public func dispatch_queue_get_specific(_ queue: DispatchQueue, _ key: UnsafeRawPointer) -> UnsafeMutableRawPointer? { fatalError() } @available(*, unavailable, renamed:"DispatchQueue.getSpecific(key:)") -public func dispatch_get_specific(_ key: UnsafePointer) -> UnsafeMutablePointer? +public func dispatch_get_specific(_ key: UnsafeRawPointer) -> UnsafeMutableRawPointer? { fatalError() } @@ -338,22 +338,22 @@ public func dispatch_walltime(_ when: UnsafePointer?, _ delta: Int64) fatalError() } -@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosUserInitiated") +@available(*, unavailable, renamed: "DispatchQueue.GlobalQueuePriority.high") public var DISPATCH_QUEUE_PRIORITY_HIGH: Int { fatalError() } -@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosDefault") +@available(*, unavailable, renamed: "DispatchQueue.GlobalQueuePriority.default") public var DISPATCH_QUEUE_PRIORITY_DEFAULT: Int { fatalError() } -@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosUtility") +@available(*, unavailable, renamed: "DispatchQueue.GlobalQueuePriority.low") public var DISPATCH_QUEUE_PRIORITY_LOW: Int { fatalError() } -@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosBackground") +@available(*, unavailable, renamed: "DispatchQueue.GlobalQueuePriority.background") public var DISPATCH_QUEUE_PRIORITY_BACKGROUND: Int { fatalError() } diff --git a/src/swift/Queue.swift b/src/swift/Queue.swift index 5a45fdc..b7628c9 100644 --- a/src/swift/Queue.swift +++ b/src/swift/Queue.swift @@ -14,82 +14,6 @@ import CDispatch -public struct DispatchQueueAttributes : OptionSet { - public let rawValue: UInt64 - public init(rawValue: UInt64) { self.rawValue = rawValue } - - public static let serial = DispatchQueueAttributes(rawValue: 0<<0) - public static let concurrent = DispatchQueueAttributes(rawValue: 1<<1) - - @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) - public static let initiallyInactive = DispatchQueueAttributes(rawValue: 1<<2) - - @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) - public static let autoreleaseInherit = DispatchQueueAttributes(rawValue: 1<<3) - - @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) - public static let autoreleaseWorkItem = DispatchQueueAttributes(rawValue: 1<<4) - - @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) - public static let autoreleaseNever = DispatchQueueAttributes(rawValue: 1<<5) - - @available(OSX 10.10, iOS 8.0, *) - public static let qosUserInteractive = DispatchQueueAttributes(rawValue: 1<<6) - - @available(OSX 10.10, iOS 8.0, *) - public static let qosUserInitiated = DispatchQueueAttributes(rawValue: 1<<7) - - @available(OSX 10.10, iOS 8.0, *) - public static let qosDefault = DispatchQueueAttributes(rawValue: 1<<8) - - @available(OSX 10.10, iOS 8.0, *) - public static let qosUtility = DispatchQueueAttributes(rawValue: 1<<9) - - @available(OSX 10.10, iOS 8.0, *) - public static let qosBackground = DispatchQueueAttributes(rawValue: 1<<10) - - @available(*, deprecated, message: ".noQoS has no effect, it should not be used") - public static let noQoS = DispatchQueueAttributes(rawValue: 1<<11) - - private var attr: dispatch_queue_attr_t? { - var attr: dispatch_queue_attr_t? - - if self.contains(.concurrent) { - attr = _swift_dispatch_queue_concurrent() - } - if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { - if self.contains(.initiallyInactive) { - attr = CDispatch.dispatch_queue_attr_make_initially_inactive(attr) - } - if self.contains(.autoreleaseWorkItem) { - // DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM - attr = CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(1)) - } else if self.contains(.autoreleaseInherit) { - // DISPATCH_AUTORELEASE_FREQUENCY_INHERIT - attr = CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(0)) - } else if self.contains(.autoreleaseNever) { - // DISPATCH_AUTORELEASE_FREQUENCY_NEVER - attr = CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(2)) - } - } - if #available(OSX 10.10, iOS 8.0, *) { - if self.contains(.qosUserInteractive) { - attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_USER_INTERACTIVE.rawValue, 0) - } else if self.contains(.qosUserInitiated) { - attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_USER_INITIATED.rawValue, 0) - } else if self.contains(.qosDefault) { - attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_DEFAULT.rawValue, 0) - } else if self.contains(.qosUtility) { - attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_UTILITY.rawValue, 0) - } else if self.contains(.qosBackground) { - attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_BACKGROUND.rawValue, 0) - } - } - return attr - } -} - - public final class DispatchSpecificKey { public init() {} } @@ -100,66 +24,86 @@ internal class _DispatchSpecificValue { } public extension DispatchQueue { - - public struct GlobalAttributes : OptionSet { + public struct Attributes : OptionSet { public let rawValue: UInt64 public init(rawValue: UInt64) { self.rawValue = rawValue } - @available(OSX 10.10, iOS 8.0, *) - public static let qosUserInteractive = GlobalAttributes(rawValue: 1<<0) - - @available(OSX 10.10, iOS 8.0, *) - public static let qosUserInitiated = GlobalAttributes(rawValue: 1<<1) + public static let concurrent = Attributes(rawValue: 1<<1) - @available(OSX 10.10, iOS 8.0, *) - public static let qosDefault = GlobalAttributes(rawValue: 1<<2) + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + public static let initiallyInactive = Attributes(rawValue: 1<<2) - @available(OSX 10.10, iOS 8.0, *) - public static let qosUtility = GlobalAttributes(rawValue: 1<<3) + fileprivate func _attr() -> dispatch_queue_attr_t? { + var attr: dispatch_queue_attr_t? = nil - @available(OSX 10.10, iOS 8.0, *) - public static let qosBackground = GlobalAttributes(rawValue: 1<<4) - - // Avoid using our own deprecated constants here by declaring - // non-deprecated constants and then basing the public ones on those. - internal static let _priorityHigh = GlobalAttributes(rawValue: 1<<5) - internal static let _priorityDefault = GlobalAttributes(rawValue: 1<<6) - internal static let _priorityLow = GlobalAttributes(rawValue: 1<<7) - internal static let _priorityBackground = GlobalAttributes(rawValue: 1<<8) + if self.contains(.concurrent) { + attr = _swift_dispatch_queue_concurrent() + } + if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { + if self.contains(.initiallyInactive) { + attr = CDispatch.dispatch_queue_attr_make_initially_inactive(attr) + } + } + return attr + } + } + public enum GlobalQueuePriority { @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") @available(*, deprecated: 8.0, message: "Use qos attributes instead") - public static let priorityHigh = _priorityHigh + case high @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") @available(*, deprecated: 8.0, message: "Use qos attributes instead") - public static let priorityDefault = _priorityDefault + case `default` @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") @available(*, deprecated: 8.0, message: "Use qos attributes instead") - public static let priorityLow = _priorityLow + case low @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") @available(*, deprecated: 8.0, message: "Use qos attributes instead") - public static let priorityBackground = _priorityBackground + case background internal var _translatedValue: Int { - if #available(OSX 10.10, iOS 8.0, *) { - if self.contains(.qosUserInteractive) { return Int(_OSQoSClass.QOS_CLASS_USER_INTERACTIVE.rawValue) } - else if self.contains(.qosUserInitiated) { return Int(_OSQoSClass.QOS_CLASS_USER_INITIATED.rawValue) } - else if self.contains(.qosDefault) { return Int(_OSQoSClass.QOS_CLASS_DEFAULT.rawValue) } - else if self.contains(.qosUtility) { return Int(_OSQoSClass.QOS_CLASS_UTILITY.rawValue) } - else { return Int(_OSQoSClass.QOS_CLASS_BACKGROUND.rawValue) } + switch self { + case .high: return 2 // DISPATCH_QUEUE_PRIORITY_HIGH + case .default: return 0 // DISPATCH_QUEUE_PRIORITY_DEFAULT + case .low: return -2 // DISPATCH_QUEUE_PRIORITY_LOW + case .background: return Int(Int16.min) // DISPATCH_QUEUE_PRIORITY_BACKGROUND } - if self.contains(._priorityHigh) { return 2 } // DISPATCH_QUEUE_PRIORITY_HIGH - else if self.contains(._priorityDefault) { return 0 } // DISPATCH_QUEUE_PRIORITY_DEFAULT - else if self.contains(._priorityLow) { return -2 } // // DISPATCH_QUEUE_PRIORITY_LOW - else if self.contains(._priorityBackground) { return Int(Int16.min) } // // DISPATCH_QUEUE_PRIORITY_BACKGROUND - return 0 } } - public class func concurrentPerform(iterations: Int, execute work: @noescape (Int) -> Void) { + public enum AutoreleaseFrequency { + case inherit + + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + case workItem + + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + case never + + internal func _attr(attr: dispatch_queue_attr_t?) -> dispatch_queue_attr_t? { + if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { + switch self { + case .inherit: + // DISPATCH_AUTORELEASE_FREQUENCY_INHERIT + return CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(0)) + case .workItem: + // DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM + return CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(1)) + case .never: + // DISPATCH_AUTORELEASE_FREQUENCY_NEVER + return CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(2)) + } + } else { + return attr + } + } + } + + public class func concurrentPerform(iterations: Int, execute work: (Int) -> Void) { _swift_dispatch_apply_current(iterations, work) } @@ -167,9 +111,15 @@ public extension DispatchQueue { return DispatchQueue(queue: _swift_dispatch_get_main_queue()) } - public class func global(attributes: GlobalAttributes = []) -> DispatchQueue { - // SubOptimal? Should we be caching these global DispatchQueue objects? - return DispatchQueue(queue:dispatch_get_global_queue(attributes._translatedValue, 0)) + @available(OSX, deprecated: 10.10, message: "") + @available(*, deprecated: 8.0, message: "") + public class func global(priority: GlobalQueuePriority) -> DispatchQueue { + return DispatchQueue(queue: CDispatch.dispatch_get_global_queue(priority._translatedValue, 0)) + } + + @available(OSX 10.10, iOS 8.0, *) + public class func global(qos: DispatchQoS.QoSClass = .default) -> DispatchQueue { + return DispatchQueue(queue: CDispatch.dispatch_get_global_queue(Int(qos.rawValue.rawValue), 0)) } public class func getSpecific(key: DispatchSpecificKey) -> T? { @@ -185,13 +135,23 @@ public extension DispatchQueue { public convenience init( label: String, - attributes: DispatchQueueAttributes = .serial, + qos: DispatchQoS = .unspecified, + attributes: Attributes = [], + autoreleaseFrequency: AutoreleaseFrequency = .inherit, target: DispatchQueue? = nil) { + var attr = attributes._attr() + if autoreleaseFrequency != .inherit { + attr = autoreleaseFrequency._attr(attr: attr) + } + if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified { + attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority)) + } + if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { - self.init(__label: label, attr: attributes.attr, queue: target) + self.init(__label: label, attr: attr, queue: target) } else { - self.init(__label: label, attr: attributes.attr) + self.init(__label: label, attr: attr) if let tq = target { self.setTarget(queue: tq) } } } @@ -202,50 +162,56 @@ public extension DispatchQueue { @available(OSX 10.10, iOS 8.0, *) public func sync(execute workItem: DispatchWorkItem) { - dispatch_sync(self.__wrapped, workItem._block) + CDispatch.dispatch_sync(self.__wrapped, workItem._block) } @available(OSX 10.10, iOS 8.0, *) public func async(execute workItem: DispatchWorkItem) { - // _swift_dispatch_{group,}_async preserves the @convention(block) - // for work item blocks. - if let g = workItem._group { - dispatch_group_async(g.__wrapped, self.__wrapped, workItem._block) - } else { - dispatch_async(self.__wrapped, workItem._block) - } + CDispatch.dispatch_async(self.__wrapped, workItem._block) } - public func async(group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { - if group == nil && qos == .unspecified && flags.isEmpty { + @available(OSX 10.10, iOS 8.0, *) + public func async(group: DispatchGroup, execute workItem: DispatchWorkItem) { + CDispatch.dispatch_group_async(group.__wrapped, self.__wrapped, workItem._block) + } + + public func async( + group: DispatchGroup? = nil, + qos: DispatchQoS = .unspecified, + flags: DispatchWorkItemFlags = [], + execute work: @escaping @convention(block) () -> Void) + { + if group == nil && qos == .unspecified { // Fast-path route for the most common API usage - dispatch_async(self.__wrapped, work) - return + if flags.isEmpty { + CDispatch.dispatch_async(self.__wrapped, work) + return + } else if flags == .barrier { + CDispatch.dispatch_barrier_async(self.__wrapped, work) + return + } } + var block: @convention(block) () -> Void = work if #available(OSX 10.10, iOS 8.0, *), (qos != .unspecified || !flags.isEmpty) { let workItem = DispatchWorkItem(qos: qos, flags: flags, block: work) - if let g = group { - dispatch_group_async(g.__wrapped, self.__wrapped, workItem._block) - } else { - dispatch_async(self.__wrapped, workItem._block) - } + block = workItem._block + } + + if let g = group { + CDispatch.dispatch_group_async(g.__wrapped, self.__wrapped, block) } else { - if let g = group { - dispatch_group_async(g.__wrapped, self.__wrapped, work) - } else { - dispatch_async(self.__wrapped, work) - } + CDispatch.dispatch_async(self.__wrapped, block) } } - private func _syncBarrier(block: @noescape () -> ()) { - dispatch_barrier_sync(self.__wrapped, block) + private func _syncBarrier(block: () -> ()) { + CDispatch.dispatch_barrier_sync(self.__wrapped, block) } private func _syncHelper( - fn: (@noescape () -> ()) -> (), - execute work: @noescape () throws -> T, + fn: (() -> ()) -> (), + execute work: () throws -> T, rescue: ((Swift.Error) throws -> (T))) rethrows -> T { var result: T? @@ -266,10 +232,10 @@ public extension DispatchQueue { @available(OSX 10.10, iOS 8.0, *) private func _syncHelper( - fn: (DispatchWorkItem) -> (), + fn: (DispatchWorkItem) -> (), flags: DispatchWorkItemFlags, - execute work: @noescape () throws -> T, - rescue: ((Swift.Error) throws -> (T))) rethrows -> T + execute work: () throws -> T, + rescue: @escaping ((Swift.Error) throws -> (T))) rethrows -> T { var result: T? var error: Swift.Error? @@ -277,7 +243,7 @@ public extension DispatchQueue { do { result = try work() } catch let e { - error = e + error = e } }) fn(workItem) @@ -288,11 +254,11 @@ public extension DispatchQueue { } } - public func sync(execute work: @noescape () throws -> T) rethrows -> T { + public func sync(execute work: () throws -> T) rethrows -> T { return try self._syncHelper(fn: sync, execute: work, rescue: { throw $0 }) } - public func sync(flags: DispatchWorkItemFlags, execute work: @noescape () throws -> T) rethrows -> T { + public func sync(flags: DispatchWorkItemFlags, execute work: () throws -> T) rethrows -> T { if flags == .barrier { return try self._syncHelper(fn: _syncBarrier, execute: work, rescue: { throw $0 }) } else if #available(OSX 10.10, iOS 8.0, *), !flags.isEmpty { @@ -302,38 +268,48 @@ public extension DispatchQueue { } } - public func after(when: DispatchTime, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { + public func asyncAfter( + deadline: DispatchTime, + qos: DispatchQoS = .unspecified, + flags: DispatchWorkItemFlags = [], + execute work: @escaping @convention(block) () -> Void) + { if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: work) - dispatch_after(when.rawValue, self.__wrapped, item._block) + CDispatch.dispatch_after(deadline.rawValue, self.__wrapped, item._block) } else { - dispatch_after(when.rawValue, self.__wrapped, work) + CDispatch.dispatch_after(deadline.rawValue, self.__wrapped, work) } } - @available(OSX 10.10, iOS 8.0, *) - public func after(when: DispatchTime, execute: DispatchWorkItem) { - dispatch_after(when.rawValue, self.__wrapped, execute._block) - } - - public func after(walltime when: DispatchWallTime, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { + public func asyncAfter( + wallDeadline: DispatchWallTime, + qos: DispatchQoS = .unspecified, + flags: DispatchWorkItemFlags = [], + execute work: @escaping @convention(block) () -> Void) + { if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: work) - dispatch_after(when.rawValue, self.__wrapped, item._block) + CDispatch.dispatch_after(wallDeadline.rawValue, self.__wrapped, item._block) } else { - dispatch_after(when.rawValue, self.__wrapped, work) + CDispatch.dispatch_after(wallDeadline.rawValue, self.__wrapped, work) } } @available(OSX 10.10, iOS 8.0, *) - public func after(walltime when: DispatchWallTime, execute: DispatchWorkItem) { - dispatch_after(when.rawValue, self.__wrapped, execute._block) + public func asyncAfter(deadline: DispatchTime, execute: DispatchWorkItem) { + CDispatch.dispatch_after(deadline.rawValue, self.__wrapped, execute._block) + } + + @available(OSX 10.10, iOS 8.0, *) + public func asyncAfter(wallDeadline: DispatchWallTime, execute: DispatchWorkItem) { + CDispatch.dispatch_after(wallDeadline.rawValue, self.__wrapped, execute._block) } @available(OSX 10.10, iOS 8.0, *) public var qos: DispatchQoS { var relPri: Int32 = 0 - let cls = DispatchQoS.QoSClass(qosClass: _OSQoSClass(qosClass: dispatch_queue_get_qos_class(self.__wrapped, &relPri))!)! + let cls = DispatchQoS.QoSClass(rawValue: _OSQoSClass(qosClass: dispatch_queue_get_qos_class(self.__wrapped, &relPri))!)! return DispatchQoS(qosClass: cls, relativePriority: Int(relPri)) } @@ -356,53 +332,7 @@ public extension DispatchQueue { } } -extension DispatchQueue { - @available(*, deprecated, renamed: "DispatchQueue.sync(self:execute:)") - public func synchronously(execute work: @noescape () -> ()) { - sync(execute: work) - } - - @available(OSX, introduced: 10.10, deprecated: 10.12, renamed: "DispatchQueue.sync(self:execute:)") - @available(iOS, introduced: 8.0, deprecated: 10.0, renamed: "DispatchQueue.sync(self:execute:)") - @available(*, deprecated, renamed: "DispatchQueue.sync(self:execute:)") - public func synchronously(execute workItem: DispatchWorkItem) { - sync(execute: workItem) - } - - @available(OSX, introduced: 10.10, deprecated: 10.12, renamed: "DispatchQueue.async(self:execute:)") - @available(iOS, introduced: 8.0, deprecated: 10.0, renamed: "DispatchQueue.async(self:execute:)") - @available(*, deprecated, renamed: "DispatchQueue.async(self:execute:)") - public func asynchronously(execute workItem: DispatchWorkItem) { - async(execute: workItem) - } - - @available(*, deprecated, renamed: "DispatchQueue.async(self:group:qos:flags:execute:)") - public func asynchronously(group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { - async(group: group, qos: qos, flags: flags, execute: work) - } - - @available(*, deprecated, renamed: "DispatchQueue.sync(self:execute:)") - public func synchronously(execute work: @noescape () throws -> T) rethrows -> T { - return try sync(execute: work) - } - - @available(*, deprecated, renamed: "DispatchQueue.sync(self:flags:execute:)") - public func synchronously(flags: DispatchWorkItemFlags, execute work: @noescape () throws -> T) rethrows -> T { - return try sync(flags: flags, execute: work) - } - - @available(*, deprecated, renamed: "DispatchQueue.concurrentPerform(iterations:execute:)") - public func apply(applier iterations: Int, execute block: @noescape (Int) -> Void) { - DispatchQueue.concurrentPerform(iterations: iterations, execute: block) - } - - @available(*, deprecated, renamed: "DispatchQueue.setTarget(self:queue:)") - public func setTargetQueue(queue: DispatchQueue) { - self.setTarget(queue: queue) - } -} - -private func _destructDispatchSpecificValue(ptr: UnsafeMutablePointer?) { +private func _destructDispatchSpecificValue(ptr: UnsafeMutableRawPointer?) { if let p = ptr { Unmanaged.fromOpaque(p).release() } @@ -418,4 +348,4 @@ internal func _swift_dispatch_get_main_queue() -> dispatch_queue_t internal func _swift_dispatch_apply_current_root_queue() -> dispatch_queue_t @_silgen_name("_swift_dispatch_apply_current") -internal func _swift_dispatch_apply_current(_ iterations: Int, _ block: @convention(block) @noescape (Int) -> Void) +internal func _swift_dispatch_apply_current(_ iterations: Int, _ block: @convention(block) (Int) -> Void) diff --git a/src/swift/Source.swift b/src/swift/Source.swift index 2830f01..9dab8f0 100644 --- a/src/swift/Source.swift +++ b/src/swift/Source.swift @@ -12,10 +12,10 @@ import CDispatch -public extension DispatchSourceType { +public extension DispatchSourceProtocol { public func setEventHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) { - if #available(OSX 10.10, iOS 8.0, *), let h = handler where qos != .unspecified || !flags.isEmpty { + if #available(OSX 10.10, iOS 8.0, *), let h = handler, qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: h) CDispatch.dispatch_source_set_event_handler((self as! DispatchSource).__wrapped, item._block) } else { @@ -29,7 +29,7 @@ public extension DispatchSourceType { } public func setCancelHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) { - if #available(OSX 10.10, iOS 8.0, *), let h = handler where qos != .unspecified || !flags.isEmpty { + if #available(OSX 10.10, iOS 8.0, *), let h = handler, qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: h) CDispatch.dispatch_source_set_cancel_handler((self as! DispatchSource).__wrapped, item._block) } else { @@ -43,7 +43,7 @@ public extension DispatchSourceType { } public func setRegistrationHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) { - if #available(OSX 10.10, iOS 8.0, *), let h = handler where qos != .unspecified || !flags.isEmpty { + if #available(OSX 10.10, iOS 8.0, *), let h = handler, qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: h) CDispatch.dispatch_source_set_registration_handler((self as! DispatchSource).__wrapped, item._block) } else { @@ -150,66 +150,66 @@ public extension DispatchSource { } #if HAVE_MACH - public class func machSend(port: mach_port_t, eventMask: MachSendEvent, queue: DispatchQueue? = nil) -> DispatchSourceMachSend { + public class func makeMachSendSource(port: mach_port_t, eventMask: MachSendEvent, queue: DispatchQueue? = nil) -> DispatchSourceMachSend { let source = dispatch_source_create(_swift_dispatch_source_type_mach_send(), UInt(port), eventMask.rawValue, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceMachSend } #endif #if HAVE_MACH - public class func machReceive(port: mach_port_t, queue: DispatchQueue? = nil) -> DispatchSourceMachReceive { + public class func makeMachReceiveSource(port: mach_port_t, queue: DispatchQueue? = nil) -> DispatchSourceMachReceive { let source = dispatch_source_create(_swift_dispatch_source_type_mach_recv(), UInt(port), 0, queue?.__wrapped) return DispatchSource(source) as DispatchSourceMachReceive } #endif #if HAVE_MACH - public class func memoryPressure(eventMask: MemoryPressureEvent, queue: DispatchQueue? = nil) -> DispatchSourceMemoryPressure { + public class func makeMemoryPressureSource(eventMask: MemoryPressureEvent, queue: DispatchQueue? = nil) -> DispatchSourceMemoryPressure { let source = dispatch_source_create(_swift_dispatch_source_type_memorypressure(), 0, eventMask.rawValue, queue.__wrapped) return DispatchSourceMemoryPressure(source) } #endif #if !os(Linux) - public class func process(identifier: pid_t, eventMask: ProcessEvent, queue: DispatchQueue? = nil) -> DispatchSourceProcess { + public class func makeProcessSource(identifier: pid_t, eventMask: ProcessEvent, queue: DispatchQueue? = nil) -> DispatchSourceProcess { let source = dispatch_source_create(_swift_dispatch_source_type_proc(), UInt(identifier), eventMask.rawValue, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceProcess } #endif - public class func read(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceRead { + public class func makeReadSource(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceRead { let source = dispatch_source_create(_swift_dispatch_source_type_read(), UInt(fileDescriptor), 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceRead } - public class func signal(signal: Int32, queue: DispatchQueue? = nil) -> DispatchSourceSignal { + public class func makeSignalSource(signal: Int32, queue: DispatchQueue? = nil) -> DispatchSourceSignal { let source = dispatch_source_create(_swift_dispatch_source_type_signal(), UInt(signal), 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceSignal } - public class func timer(flags: TimerFlags = [], queue: DispatchQueue? = nil) -> DispatchSourceTimer { + public class func makeTimerSource(flags: TimerFlags = [], queue: DispatchQueue? = nil) -> DispatchSourceTimer { let source = dispatch_source_create(_swift_dispatch_source_type_timer(), 0, flags.rawValue, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceTimer } - public class func userDataAdd(queue: DispatchQueue? = nil) -> DispatchSourceUserDataAdd { + public class func makeUserDataAddSource(queue: DispatchQueue? = nil) -> DispatchSourceUserDataAdd { let source = dispatch_source_create(_swift_dispatch_source_type_data_add(), 0, 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceUserDataAdd } - public class func userDataOr(queue: DispatchQueue? = nil) -> DispatchSourceUserDataOr { + public class func makeUserDataOrSource(queue: DispatchQueue? = nil) -> DispatchSourceUserDataOr { let source = dispatch_source_create(_swift_dispatch_source_type_data_or(), 0, 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceUserDataOr } #if !os(Linux) - public class func fileSystemObject(fileDescriptor: Int32, eventMask: FileSystemEvent, queue: DispatchQueue? = nil) -> DispatchSourceFileSystemObject { + public class func makeFileSystemObjectSource(fileDescriptor: Int32, eventMask: FileSystemEvent, queue: DispatchQueue? = nil) -> DispatchSourceFileSystemObject { let source = dispatch_source_create(_swift_dispatch_source_type_vnode(), UInt(fileDescriptor), eventMask.rawValue, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceFileSystemObject } #endif - public class func write(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceWrite { + public class func makeWriteSource(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceWrite { let source = dispatch_source_create(_swift_dispatch_source_type_write(), UInt(fileDescriptor), 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceWrite } @@ -283,7 +283,7 @@ public extension DispatchSourceTimer { } public func scheduleRepeating(deadline: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { - dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval.rawValue, UInt64(leeway.rawValue)) + dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, UInt64(interval.rawValue), UInt64(leeway.rawValue)) } public func scheduleRepeating(deadline: DispatchTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { @@ -291,7 +291,7 @@ public extension DispatchSourceTimer { } public func scheduleRepeating(wallDeadline: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { - dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval.rawValue, UInt64(leeway.rawValue)) + dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, UInt64(interval.rawValue), UInt64(leeway.rawValue)) } public func scheduleRepeating(wallDeadline: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { @@ -299,38 +299,6 @@ public extension DispatchSourceTimer { } } -public extension DispatchSourceTimer { - @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleOneshot(self:deadline:leeway:)") - public func setTimer(start: DispatchTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { - scheduleOneshot(deadline: start, leeway: leeway) - } - - @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleOneshot(self:wallDeadline:leeway:)") - public func setTimer(walltime start: DispatchWallTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { - scheduleOneshot(wallDeadline: start, leeway: leeway) - } - - @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:deadline:interval:leeway:)") - public func setTimer(start: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { - scheduleRepeating(deadline: start, interval: interval, leeway: leeway) - } - - @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:deadline:interval:leeway:)") - public func setTimer(start: DispatchTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { - scheduleRepeating(deadline: start, interval: interval, leeway: leeway) - } - - @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:wallDeadline:interval:leeway:)") - public func setTimer(walltime start: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { - scheduleRepeating(wallDeadline: start, interval: interval, leeway: leeway) - } - - @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:wallDeadline:interval:leeway:)") - public func setTimer(walltime start: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { - scheduleRepeating(wallDeadline: start, interval: interval, leeway: leeway) - } -} - #if !os(Linux) public extension DispatchSourceFileSystemObject { public var handle: Int32 { @@ -361,13 +329,12 @@ public extension DispatchSourceUserDataAdd { /// The value to coalesce with the pending data using a logical OR or an ADD /// as specified by the dispatch source type. A value of zero has no effect /// and will not result in the submission of the event handler block. - public func mergeData(value: UInt) { - dispatch_source_merge_data((self as! DispatchSource).__wrapped, value) + public func add(data: UInt) { + dispatch_source_merge_data((self as! DispatchSource).__wrapped, data) } } public extension DispatchSourceUserDataOr { -#if false /*FIXME: clashes with UserDataAdd?? */ /// @function mergeData /// /// @abstract @@ -379,10 +346,9 @@ public extension DispatchSourceUserDataOr { /// The value to coalesce with the pending data using a logical OR or an ADD /// as specified by the dispatch source type. A value of zero has no effect /// and will not result in the submission of the event handler block. - public func mergeData(value: UInt) { - dispatch_source_merge_data((self as! DispatchSource).__wrapped, value) + public func or(data: UInt) { + dispatch_source_merge_data((self as! DispatchSource).__wrapped, data) } -#endif } @_silgen_name("_swift_dispatch_source_type_DATA_ADD") diff --git a/src/swift/Time.swift b/src/swift/Time.swift index 76a6979..a9559fd 100644 --- a/src/swift/Time.swift +++ b/src/swift/Time.swift @@ -16,7 +16,7 @@ import CDispatch -public struct DispatchTime { +public struct DispatchTime : Comparable { public let rawValue: dispatch_time_t public static func now() -> DispatchTime { @@ -26,12 +26,36 @@ public struct DispatchTime { public static let distantFuture = DispatchTime(rawValue: ~0) - private init(rawValue: dispatch_time_t) { + fileprivate init(rawValue: dispatch_time_t) { self.rawValue = rawValue } + + /// Creates a `DispatchTime` relative to the system clock that + /// ticks since boot. + /// + /// - Parameters: + /// - uptimeNanoseconds: The number of nanoseconds since boot, excluding + /// time the system spent asleep + /// - Returns: A new `DispatchTime` + public init(uptimeNanoseconds: UInt64) { + self.rawValue = dispatch_time_t(uptimeNanoseconds) + } + + public var uptimeNanoseconds: UInt64 { + return UInt64(self.rawValue) + } +} + +public func <(a: DispatchTime, b: DispatchTime) -> Bool { + if a.rawValue == ~0 || b.rawValue == ~0 { return false } + return a.rawValue < b.rawValue } -public struct DispatchWallTime { +public func ==(a: DispatchTime, b: DispatchTime) -> Bool { + return a.rawValue == b.rawValue +} + +public struct DispatchWallTime : Comparable { public let rawValue: dispatch_time_t public static func now() -> DispatchWallTime { @@ -40,18 +64,24 @@ public struct DispatchWallTime { public static let distantFuture = DispatchWallTime(rawValue: ~0) - private init(rawValue: dispatch_time_t) { + fileprivate init(rawValue: dispatch_time_t) { self.rawValue = rawValue } - public init(time: timespec) { - var t = time + public init(timespec: timespec) { + var t = timespec self.rawValue = CDispatch.dispatch_walltime(&t, 0) } } -@available(*, deprecated, renamed: "DispatchWallTime") -public typealias DispatchWalltime = DispatchWallTime +public func <(a: DispatchWallTime, b: DispatchWallTime) -> Bool { + if a.rawValue == ~0 || b.rawValue == ~0 { return false } + return -Int64(a.rawValue) < -Int64(b.rawValue) +} + +public func ==(a: DispatchWallTime, b: DispatchWallTime) -> Bool { + return a.rawValue == b.rawValue +} public enum DispatchTimeInterval { case seconds(Int) @@ -59,23 +89,23 @@ public enum DispatchTimeInterval { case microseconds(Int) case nanoseconds(Int) - internal var rawValue: UInt64 { + internal var rawValue: Int64 { switch self { - case .seconds(let s): return UInt64(s) * NSEC_PER_SEC - case .milliseconds(let ms): return UInt64(ms) * NSEC_PER_MSEC - case .microseconds(let us): return UInt64(us) * NSEC_PER_USEC - case .nanoseconds(let ns): return UInt64(ns) + case .seconds(let s): return Int64(s) * Int64(NSEC_PER_SEC) + case .milliseconds(let ms): return Int64(ms) * Int64(NSEC_PER_MSEC) + case .microseconds(let us): return Int64(us) * Int64(NSEC_PER_USEC) + case .nanoseconds(let ns): return Int64(ns) } } } public func +(time: DispatchTime, interval: DispatchTimeInterval) -> DispatchTime { - let t = CDispatch.dispatch_time(time.rawValue, Int64(interval.rawValue)) + let t = CDispatch.dispatch_time(time.rawValue, interval.rawValue) return DispatchTime(rawValue: t) } public func -(time: DispatchTime, interval: DispatchTimeInterval) -> DispatchTime { - let t = CDispatch.dispatch_time(time.rawValue, -Int64(interval.rawValue)) + let t = CDispatch.dispatch_time(time.rawValue, -interval.rawValue) return DispatchTime(rawValue: t) } @@ -90,12 +120,12 @@ public func -(time: DispatchTime, seconds: Double) -> DispatchTime { } public func +(time: DispatchWallTime, interval: DispatchTimeInterval) -> DispatchWallTime { - let t = CDispatch.dispatch_time(time.rawValue, Int64(interval.rawValue)) + let t = CDispatch.dispatch_time(time.rawValue, interval.rawValue) return DispatchWallTime(rawValue: t) } public func -(time: DispatchWallTime, interval: DispatchTimeInterval) -> DispatchWallTime { - let t = CDispatch.dispatch_time(time.rawValue, -Int64(interval.rawValue)) + let t = CDispatch.dispatch_time(time.rawValue, -interval.rawValue) return DispatchWallTime(rawValue: t) } diff --git a/src/swift/Wrapper.swift b/src/swift/Wrapper.swift index d38bb93..deb3c6d 100644 --- a/src/swift/Wrapper.swift +++ b/src/swift/Wrapper.swift @@ -15,6 +15,10 @@ import CDispatch // This file contains declarations that are provided by the // importer via Dispatch.apinote when the platform has Objective-C support +public func dispatchMain() -> Never { + CDispatch.dispatch_main() +} + public class DispatchObject { internal func wrapped() -> dispatch_object_t { @@ -59,7 +63,7 @@ public class DispatchGroup : DispatchObject { } public func leave() { - dispatch_group_enter(__wrapped) + dispatch_group_leave(__wrapped) } } @@ -87,29 +91,25 @@ public class DispatchIO : DispatchObject { } internal init(__type: UInt, fd: Int32, queue: DispatchQueue, - handler: (error: Int32) -> Void) { + handler: @escaping (_ error: Int32) -> Void) { __wrapped = dispatch_io_create(__type, fd, queue.__wrapped, handler) } internal init(__type: UInt, path: UnsafePointer, oflag: Int32, - mode: mode_t, queue: DispatchQueue, handler: (error: Int32) -> Void) { + mode: mode_t, queue: DispatchQueue, handler: @escaping (_ error: Int32) -> Void) { __wrapped = dispatch_io_create_with_path(__type, path, oflag, mode, queue.__wrapped, handler) } internal init(__type: UInt, io: DispatchIO, - queue: DispatchQueue, handler: (error: Int32) -> Void) { + queue: DispatchQueue, handler: @escaping (_ error: Int32) -> Void) { __wrapped = dispatch_io_create_with_io(__type, io.__wrapped, queue.__wrapped, handler) } - internal init(queue:dispatch_queue_t) { - __wrapped = queue - } - deinit { _swift_dispatch_release(wrapped()) } - public func barrier(execute: () -> ()) { + public func barrier(execute: @escaping () -> ()) { dispatch_io_barrier(self.__wrapped, execute) } @@ -149,13 +149,13 @@ public class DispatchQueue : DispatchObject { _swift_dispatch_release(wrapped()) } - public func sync(execute workItem: @noescape ()->()) { + public func sync(execute workItem: ()->()) { dispatch_sync(self.__wrapped, workItem) } } public class DispatchSource : DispatchObject, - DispatchSourceType, DispatchSourceRead, + DispatchSourceProtocol, DispatchSourceRead, DispatchSourceSignal, DispatchSourceTimer, DispatchSourceUserDataAdd, DispatchSourceUserDataOr, DispatchSourceWrite { @@ -186,9 +186,29 @@ extension DispatchSource : DispatchSourceProcess, } #endif +internal class __DispatchData : DispatchObject { + internal let __wrapped:dispatch_data_t + internal let __owned:Bool + + final internal override func wrapped() -> dispatch_object_t { + return unsafeBitCast(__wrapped, to: dispatch_object_t.self) + } + + internal init(data:dispatch_data_t, owned:Bool) { + __wrapped = data + __owned = owned + } + + deinit { + if __owned { + _swift_dispatch_release(wrapped()) + } + } +} + public typealias DispatchSourceHandler = @convention(block) () -> Void -public protocol DispatchSourceType { +public protocol DispatchSourceProtocol { func setEventHandler(qos: DispatchQoS, flags: DispatchWorkItemFlags, handler: DispatchSourceHandler?) func setEventHandler(handler: DispatchWorkItem) @@ -216,18 +236,16 @@ public protocol DispatchSourceType { var isCancelled: Bool { get } } -public protocol DispatchSourceUserDataAdd : DispatchSourceType { - func mergeData(value: UInt) +public protocol DispatchSourceUserDataAdd : DispatchSourceProtocol { + func add(data: UInt) } -public protocol DispatchSourceUserDataOr { -#if false /*FIXME: clashes with UserDataAdd?? */ - func mergeData(value: UInt) -#endif +public protocol DispatchSourceUserDataOr : DispatchSourceProtocol { + func or(data: UInt) } #if HAVE_MACH -public protocol DispatchSourceMachSend : DispatchSourceType { +public protocol DispatchSourceMachSend : DispatchSourceProtocol { public var handle: mach_port_t { get } public var data: DispatchSource.MachSendEvent { get } @@ -237,13 +255,13 @@ public protocol DispatchSourceMachSend : DispatchSourceType { #endif #if HAVE_MACH -public protocol DispatchSourceMachReceive : DispatchSourceType { +public protocol DispatchSourceMachReceive : DispatchSourceProtocol { var handle: mach_port_t { get } } #endif #if HAVE_MACH -public protocol DispatchSourceMemoryPressure : DispatchSourceType { +public protocol DispatchSourceMemoryPressure : DispatchSourceProtocol { public var data: DispatchSource.MemoryPressureEvent { get } public var mask: DispatchSource.MemoryPressureEvent { get } @@ -251,7 +269,7 @@ public protocol DispatchSourceMemoryPressure : DispatchSourceType { #endif #if !os(Linux) -public protocol DispatchSourceProcess : DispatchSourceType { +public protocol DispatchSourceProcess : DispatchSourceProtocol { var handle: pid_t { get } var data: DispatchSource.ProcessEvent { get } @@ -260,28 +278,28 @@ public protocol DispatchSourceProcess : DispatchSourceType { } #endif -public protocol DispatchSourceRead : DispatchSourceType { +public protocol DispatchSourceRead : DispatchSourceProtocol { } -public protocol DispatchSourceSignal : DispatchSourceType { +public protocol DispatchSourceSignal : DispatchSourceProtocol { } -public protocol DispatchSourceTimer : DispatchSourceType { - func setTimer(start: DispatchTime, leeway: DispatchTimeInterval) +public protocol DispatchSourceTimer : DispatchSourceProtocol { + func scheduleOneshot(deadline: DispatchTime, leeway: DispatchTimeInterval) - func setTimer(walltime start: DispatchWallTime, leeway: DispatchTimeInterval) + func scheduleOneshot(wallDeadline: DispatchWallTime, leeway: DispatchTimeInterval) - func setTimer(start: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval) + func scheduleRepeating(deadline: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval) - func setTimer(start: DispatchTime, interval: Double, leeway: DispatchTimeInterval) + func scheduleRepeating(deadline: DispatchTime, interval: Double, leeway: DispatchTimeInterval) - func setTimer(walltime start: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval) + func scheduleRepeating(wallDeadline: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval) - func setTimer(walltime start: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval) + func scheduleRepeating(wallDeadline: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval) } #if !os(Linux) -public protocol DispatchSourceFileSystemObject : DispatchSourceType { +public protocol DispatchSourceFileSystemObject : DispatchSourceProtocol { var handle: Int32 { get } var data: DispatchSource.FileSystemEvent { get } @@ -290,7 +308,7 @@ public protocol DispatchSourceFileSystemObject : DispatchSourceType { } #endif -public protocol DispatchSourceWrite : DispatchSourceType { +public protocol DispatchSourceWrite : DispatchSourceProtocol { } @@ -307,9 +325,9 @@ internal enum _OSQoSClass : UInt32 { case 0x21: self = .QOS_CLASS_USER_INTERACTIVE case 0x19: self = .QOS_CLASS_USER_INITIATED case 0x15: self = .QOS_CLASS_DEFAULT - case 0x11: self = QOS_CLASS_UTILITY - case 0x09: self = QOS_CLASS_BACKGROUND - case 0x00: self = QOS_CLASS_UNSPECIFIED + case 0x11: self = .QOS_CLASS_UTILITY + case 0x09: self = .QOS_CLASS_BACKGROUND + case 0x00: self = .QOS_CLASS_UNSPECIFIED default: return nil } } diff --git a/src/time.c b/src/time.c index 6d00831..6db4880 100644 --- a/src/time.c +++ b/src/time.c @@ -20,28 +20,6 @@ #include "internal.h" -uint64_t -_dispatch_get_nanoseconds(void) -{ -#if !TARGET_OS_WIN32 - struct timeval now; - int r = gettimeofday(&now, NULL); - dispatch_assert_zero(r); - dispatch_assert(sizeof(NSEC_PER_SEC) == 8); - dispatch_assert(sizeof(NSEC_PER_USEC) == 8); - return (uint64_t)now.tv_sec * NSEC_PER_SEC + - (uint64_t)now.tv_usec * NSEC_PER_USEC; -#else /* TARGET_OS_WIN32 */ - // FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC). - FILETIME ft; - ULARGE_INTEGER li; - GetSystemTimeAsFileTime(&ft); - li.LowPart = ft.dwLowDateTime; - li.HighPart = ft.dwHighDateTime; - return li.QuadPart * 100ull; -#endif /* TARGET_OS_WIN32 */ -} - #if !(defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME) \ || TARGET_OS_WIN32 DISPATCH_CACHELINE_ALIGN _dispatch_host_time_data_s _dispatch_host_time_data = { @@ -115,7 +93,7 @@ dispatch_walltime(const struct timespec *inval, int64_t delta) { int64_t nsec; if (inval) { - nsec = inval->tv_sec * 1000000000ll + inval->tv_nsec; + nsec = (int64_t)_dispatch_timespec_to_nano(*inval); } else { nsec = (int64_t)_dispatch_get_nanoseconds(); } diff --git a/src/trace.h b/src/trace.h index d73ff3f..c496893 100644 --- a/src/trace.h +++ b/src/trace.h @@ -198,12 +198,12 @@ _dispatch_trace_timer_function(dispatch_source_refs_t dr) DISPATCH_ALWAYS_INLINE static inline dispatch_trace_timer_params_t -_dispatch_trace_timer_params(uintptr_t ident, +_dispatch_trace_timer_params(dispatch_clock_t clock, struct dispatch_timer_source_s *values, uint64_t deadline, dispatch_trace_timer_params_t params) { - #define _dispatch_trace_time2nano3(t) (DISPATCH_TIMER_KIND(ident) \ - == DISPATCH_TIMER_KIND_MACH ? _dispatch_time_mach2nano(t) : (t)) + #define _dispatch_trace_time2nano3(t) \ + (clock == DISPATCH_CLOCK_MACH ? _dispatch_time_mach2nano(t) : (t)) #define _dispatch_trace_time2nano2(v, t) ({ uint64_t _t = (t); \ (v) >= INT64_MAX ? -1ll : (int64_t)_dispatch_trace_time2nano3(_t);}) #define _dispatch_trace_time2nano(v) ({ uint64_t _t; \ @@ -212,9 +212,7 @@ _dispatch_trace_timer_params(uintptr_t ident, if (deadline) { params->deadline = (int64_t)deadline; } else { - uint64_t now = (DISPATCH_TIMER_KIND(ident) == - DISPATCH_TIMER_KIND_MACH ? _dispatch_absolute_time() : - _dispatch_get_nanoseconds()); + uint64_t now = _dispatch_time_now(clock); params->deadline = _dispatch_trace_time2nano2(values->target, values->target < now ? 0 : values->target - now); } @@ -232,13 +230,12 @@ _dispatch_trace_timer_configure_enabled(void) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_timer_configure(dispatch_source_t ds, uintptr_t ident, +_dispatch_trace_timer_configure(dispatch_source_t ds, dispatch_clock_t clock, struct dispatch_timer_source_s *values) { struct dispatch_trace_timer_params_s params; DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(ds->ds_refs), - _dispatch_trace_timer_params(ident, values, 0, - ¶ms)); + _dispatch_trace_timer_params(clock, values, 0, ¶ms)); } DISPATCH_ALWAYS_INLINE @@ -248,10 +245,11 @@ _dispatch_trace_timer_program(dispatch_source_refs_t dr, uint64_t deadline) if (slowpath(DISPATCH_TIMER_PROGRAM_ENABLED())) { if (deadline && dr) { dispatch_source_t ds = _dispatch_source_from_refs(dr); + dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(ds->ds_ident_hack); struct dispatch_trace_timer_params_s params; DISPATCH_TIMER_PROGRAM(ds, _dispatch_trace_timer_function(dr), - _dispatch_trace_timer_params(ds->ds_ident_hack, - &ds_timer(dr), deadline, ¶ms)); + _dispatch_trace_timer_params(clock, &ds_timer(dr), + deadline, ¶ms)); } } } @@ -284,8 +282,8 @@ _dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data, #else #define _dispatch_trace_timer_configure_enabled() false -#define _dispatch_trace_timer_configure(ds, ident, values) \ - do { (void)(ds); (void)(ident); (void)(values); } while(0) +#define _dispatch_trace_timer_configure(ds, clock, values) \ + do { (void)(ds); (void)(clock); (void)(values); } while(0) #define _dispatch_trace_timer_program(dr, deadline) \ do { (void)(dr); (void)(deadline); } while(0) #define _dispatch_trace_timer_wake(dr) \ diff --git a/src/voucher.c b/src/voucher.c index 94a2934..9c474c8 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -806,10 +806,9 @@ _voucher_activity_debug_channel_init(void) { dispatch_mach_handler_function_t handler = NULL; - if (_voucher_libtrace_hooks && _voucher_libtrace_hooks->vah_version >= 2) { + if (_voucher_libtrace_hooks) { handler = _voucher_libtrace_hooks->vah_debug_channel_handler; } - if (!handler) return; dispatch_mach_t dm; @@ -989,6 +988,9 @@ _voucher_libkernel_init(void) void voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks) { + if (hooks->vah_version < 3) { + DISPATCH_CLIENT_CRASH(hooks->vah_version, "unsupported vah_version"); + } if (!os_atomic_cmpxchg(&_voucher_libtrace_hooks, NULL, hooks, relaxed)) { DISPATCH_CLIENT_CRASH(_voucher_libtrace_hooks, @@ -1094,6 +1096,13 @@ _firehose_task_buffer_init(void *ctx OS_UNUSED) // firehose_buffer_create always consumes the send-right _firehose_task_buffer = firehose_buffer_create(logd_port, _voucher_unique_pid, flags); + if (_voucher_libtrace_hooks->vah_version >= 4 && + _voucher_libtrace_hooks->vah_metadata_init) { + firehose_buffer_t fb = _firehose_task_buffer; + size_t meta_sz = FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE; + void *meta = (void *)((uintptr_t)(&fb->fb_header + 1) - meta_sz); + _voucher_libtrace_hooks->vah_metadata_init(meta, meta_sz); + } } } @@ -1126,23 +1135,20 @@ voucher_activity_get_metadata_buffer(size_t *length) } voucher_t -voucher_activity_create(firehose_tracepoint_id_t trace_id, - voucher_t base, firehose_activity_flags_t flags, uint64_t location) -{ - return voucher_activity_create_with_location(&trace_id, base, flags, location); -} - -voucher_t -voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, - voucher_t base, firehose_activity_flags_t flags, uint64_t location) +voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id, + voucher_t base, firehose_activity_flags_t flags, + const void *pubdata, size_t publen) { firehose_activity_id_t va_id = 0, current_id = 0, parent_id = 0; firehose_tracepoint_id_u ftid = { .ftid_value = *trace_id }; - uint16_t pubsize = sizeof(va_id) + sizeof(location); uint64_t creator_id = 0; + uint16_t pubsize; voucher_t ov = _voucher_get(); voucher_t v; + if (os_add_overflow(sizeof(va_id), publen, &pubsize) || pubsize > 128) { + DISPATCH_CLIENT_CRASH(pubsize, "Absurd publen"); + } if (base == VOUCHER_CURRENT) { base = ov; } @@ -1202,13 +1208,22 @@ voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, pubptr = _dispatch_memappend(pubptr, &parent_id); } pubptr = _dispatch_memappend(pubptr, &va_id); - pubptr = _dispatch_memappend(pubptr, &location); + pubptr = _dispatch_mempcpy(pubptr, pubdata, publen); _voucher_activity_tracepoint_flush(ft, ftid); } *trace_id = ftid.ftid_value; return v; } +voucher_t +voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, + voucher_t base, firehose_activity_flags_t flags, uint64_t loc) +{ + return voucher_activity_create_with_data(trace_id, base, flags, + &loc, sizeof(loc)); +} + +#if OS_VOUCHER_ACTIVITY_GENERATE_SWAPS void _voucher_activity_swap(firehose_activity_id_t old_id, firehose_activity_id_t new_id) @@ -1245,6 +1260,7 @@ _voucher_activity_swap(firehose_activity_id_t old_id, if (new_id) pubptr = _dispatch_memappend(pubptr, &new_id); _voucher_activity_tracepoint_flush(ft, ftid); } +#endif firehose_activity_id_t voucher_get_activity_id_and_creator(voucher_t v, uint64_t *creator_pid, @@ -1276,22 +1292,22 @@ voucher_activity_flush(firehose_stream_t stream) firehose_buffer_stream_flush(_firehose_task_buffer, stream); } -DISPATCH_ALWAYS_INLINE -static inline firehose_tracepoint_id_t -_voucher_activity_trace(firehose_stream_t stream, - firehose_tracepoint_id_u ftid, uint64_t stamp, - const void *pubdata, size_t publen, - const void *privdata, size_t privlen) +DISPATCH_NOINLINE +firehose_tracepoint_id_t +voucher_activity_trace_v(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t stamp, + const struct iovec *iov, size_t publen, size_t privlen) { + firehose_tracepoint_id_u ftid = { .ftid_value = trace_id }; const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); const size_t _firehose_chunk_payload_size = - sizeof(((struct firehose_buffer_chunk_s *)0)->fbc_data); + sizeof(((struct firehose_chunk_s *)0)->fc_data); if (_voucher_activity_disabled()) return 0; firehose_tracepoint_t ft; firehose_activity_id_t va_id = 0; - firehose_buffer_chunk_t fbc; + firehose_chunk_t fc; uint8_t *privptr, *pubptr; size_t pubsize = publen; voucher_t ov = _voucher_get(); @@ -1331,38 +1347,52 @@ _voucher_activity_trace(firehose_stream_t stream, pubptr = _dispatch_memappend(pubptr, &creator_pid); } if (privlen) { - fbc = firehose_buffer_chunk_for_address(ft); + fc = firehose_buffer_chunk_for_address(ft); struct firehose_buffer_range_s range = { - .fbr_offset = (uint16_t)(privptr - fbc->fbc_start), + .fbr_offset = (uint16_t)(privptr - fc->fc_start), .fbr_length = (uint16_t)privlen, }; pubptr = _dispatch_memappend(pubptr, &range); - _dispatch_mempcpy(privptr, privdata, privlen); } - _dispatch_mempcpy(pubptr, pubdata, publen); + while (publen > 0) { + pubptr = _dispatch_mempcpy(pubptr, iov->iov_base, iov->iov_len); + if (unlikely(os_sub_overflow(publen, iov->iov_len, &publen))) { + DISPATCH_CLIENT_CRASH(0, "Invalid arguments"); + } + iov++; + } + while (privlen > 0) { + privptr = _dispatch_mempcpy(privptr, iov->iov_base, iov->iov_len); + if (unlikely(os_sub_overflow(privlen, iov->iov_len, &privlen))) { + DISPATCH_CLIENT_CRASH(0, "Invalid arguments"); + } + iov++; + } _voucher_activity_tracepoint_flush(ft, ftid); return ftid.ftid_value; } firehose_tracepoint_id_t voucher_activity_trace(firehose_stream_t stream, - firehose_tracepoint_id_t trace_id, uint64_t timestamp, + firehose_tracepoint_id_t trace_id, uint64_t stamp, const void *pubdata, size_t publen) { - firehose_tracepoint_id_u ftid = { .ftid_value = trace_id }; - return _voucher_activity_trace(stream, ftid, timestamp, pubdata, publen, - NULL, 0); + struct iovec iov = { (void *)pubdata, publen }; + return voucher_activity_trace_v(stream, trace_id, stamp, &iov, publen, 0); } firehose_tracepoint_id_t voucher_activity_trace_with_private_strings(firehose_stream_t stream, - firehose_tracepoint_id_t trace_id, uint64_t timestamp, + firehose_tracepoint_id_t trace_id, uint64_t stamp, const void *pubdata, size_t publen, const void *privdata, size_t privlen) { - firehose_tracepoint_id_u ftid = { .ftid_value = trace_id }; - return _voucher_activity_trace(stream, ftid, timestamp, - pubdata, publen, privdata, privlen); + struct iovec iov[2] = { + { (void *)pubdata, publen }, + { (void *)privdata, privlen }, + }; + return voucher_activity_trace_v(stream, trace_id, stamp, + iov, publen, privlen); } #pragma mark - @@ -1560,6 +1590,7 @@ _voucher_init(void) { } +#if OS_VOUCHER_ACTIVITY_SPI void* voucher_activity_get_metadata_buffer(size_t *length) { @@ -1620,6 +1651,16 @@ voucher_activity_trace_with_private_strings(firehose_stream_t stream, return 0; } +firehose_tracepoint_id_t +voucher_activity_trace_v(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const struct iovec *iov, size_t publen, size_t privlen) +{ + (void)stream; (void)trace_id; (void)timestamp; + (void)iov; (void)publen; (void)privlen; + return 0; +} + void voucher_activity_flush(firehose_stream_t stream) { @@ -1631,6 +1672,7 @@ voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks) { (void)hooks; } +#endif // OS_VOUCHER_ACTIVITY_SPI size_t _voucher_debug(voucher_t v, char* buf, size_t bufsiz) diff --git a/src/voucher_internal.h b/src/voucher_internal.h index 3aa1a65..449f4ba 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -90,8 +90,10 @@ voucher_get_mach_voucher(voucher_t voucher); void _voucher_init(void); void _voucher_atfork_child(void); void _voucher_activity_debug_channel_init(void); +#if OS_VOUCHER_ACTIVITY_SPI && OS_VOUCHER_ACTIVITY_GENERATE_SWAPS void _voucher_activity_swap(firehose_activity_id_t old_id, firehose_activity_id_t new_id); +#endif void _voucher_xref_dispose(voucher_t voucher); void _voucher_dispose(voucher_t voucher); size_t _voucher_debug(voucher_t v, char* buf, size_t bufsiz); @@ -321,9 +323,11 @@ _voucher_swap_and_get_mach_voucher(voucher_t ov, voucher_t voucher) _dispatch_thread_setspecific(dispatch_voucher_key, voucher); mach_voucher_t kv = voucher ? voucher->v_kvoucher : MACH_VOUCHER_NULL; mach_voucher_t okv = ov ? ov->v_kvoucher : MACH_VOUCHER_NULL; +#if OS_VOUCHER_ACTIVITY_GENERATE_SWAPS firehose_activity_id_t aid = voucher ? voucher->v_activity : 0; firehose_activity_id_t oaid = ov ? ov->v_activity : 0; if (aid != oaid) _voucher_activity_swap(aid, oaid); +#endif return (kv != okv) ? kv : VOUCHER_NO_MACH_VOUCHER; } diff --git a/xcodescripts/install-manpages.sh b/xcodescripts/install-manpages.sh index d9e28af..db13163 100755 --- a/xcodescripts/install-manpages.sh +++ b/xcodescripts/install-manpages.sh @@ -64,7 +64,7 @@ for m in dispatch_group_enter dispatch_group_leave dispatch_group_wait \ ln -f dispatch_group_create.3 ${m}.3 done -for m in dispatch_retain dispatch_release dispatch_suspend dispatch_resume \ +for m in dispatch_retain dispatch_release dispatch_suspend dispatch_resume dispatch_activate \ dispatch_get_context dispatch_set_context dispatch_set_finalizer_f; do ln -f dispatch_object.3 ${m}.3 done -- 2.45.2