[submodule "libpwq"]
path = libpwq
url = https://github.com/mheily/libpwq.git
+[submodule "libkqueue"]
+ path = libkqueue
+ url = https://github.com/mheily/libkqueue.git
+++ /dev/null
-Grand Central Dispatch (GCD)
-
-GCD is a concurrent programming framework first shipped with Mac OS X Snow
-Leopard. This package is an open source bundling of libdispatch, the core
-user space library implementing GCD. At the time of writing, support for
-the BSD kqueue API, and specifically extensions introduced in Mac OS X Snow
-Leopard and FreeBSD 9-CURRENT, are required to use libdispatch. Support
-for Linux is a work in progress (see Linux notes below). Other systems are
-currently unsupported.
-
- Configuring and installing libdispatch
-
-GCD is built using autoconf, automake, and libtool, and has a number of
-compile-time configuration options that should be reviewed before starting.
-An uncustomized install requires:
-
- sh autogen.sh
- ./configure
- make
- make install
-
-The following configure options may be of general interest:
-
---with-apple-libpthread-source
-
- Specify the path to Apple's libpthread package, so that appropriate headers
- can be found and used.
-
---with-apple-libplatform-source
-
- Specify the path to Apple's libplatform package, so that appropriate headers
- can be found and used.
-
---with-apple-libclosure-source
-
- Specify the path to Apple's Libclosure package, so that appropriate headers
- can be found and used.
-
---with-apple-xnu-source
-
- Specify the path to Apple's XNU package, so that appropriate headers can be
- found and used.
-
---with-blocks-runtime
-
- On systems where -fblocks is supported, specify an additional library path
- in which libBlocksRuntime can be found. This is not required on OS X,
- where the Blocks runtime is included in libSystem, but is required on
- FreeBSD.
-
-The following options are likely to only be useful when building libdispatch on
-OS X as a replacement for /usr/lib/system/libdispatch.dylib:
-
---with-apple-objc4-source
-
- Specify the path to Apple's objc4 package, so that appropriate headers can
- be found and used.
-
---disable-libdispatch-init-constructor
-
- Do not tag libdispatch's init routine as __constructor, in which case it
- must be run manually before libdispatch routines can be called. This is the
- default when building on OS X. For /usr/lib/system/libdispatch.dylib
- the init routine is called automatically during process start.
-
---enable-apple-tsd-optimizations
-
- Use a non-portable allocation scheme for pthread per-thread data (TSD) keys
- when building libdispatch for /usr/lib/system on OS X. This should not
- be used on other OS's, or on OS X when building a stand-alone library.
-
- Typical configuration commands
-
-The following command lines create the configuration required to build
-libdispatch for /usr/lib/system on OS X El Capitan:
-
- clangpath=$(dirname `xcrun --find clang`)
- sudo mkdir -p "$clangpath/../local/lib/clang/enable_objc_gc"
- LIBTOOLIZE=glibtoolize sh autogen.sh
- cflags='-arch x86_64 -arch i386 -g -Os'
- ./configure CFLAGS="$cflags" OBJCFLAGS="$cflags" CXXFLAGS="$cflags" \
- --prefix=/usr --libdir=/usr/lib/system --disable-static \
- --enable-apple-tsd-optimizations \
- --with-apple-libpthread-source=/path/to/10.11.0/libpthread-137.1.1 \
- --with-apple-libplatform-source=/path/to/10.11.0/libplatform-73.1.1 \
- --with-apple-libclosure-source=/path/to/10.11.0/libclosure-65 \
- --with-apple-xnu-source=/path/to/10.11.0/xnu-3247.1.106 \
- --with-apple-objc4-source=/path/to/10.11.0/objc4-680
- make check
-
-Typical configuration line for FreeBSD 8.x and 9.x to build libdispatch with
-clang and blocks support:
-
- sh autogen.sh
- ./configure CC=clang --with-blocks-runtime=/usr/local/lib
- make check
-
-Instructions for building on Linux. Initial focus is on ubuntu 15.04.
-Prepare your system
- 1. Install compiler, autotools
- sudo apt-get install clang
- sudo apt-get install autoconf libtool pkg-config
- 2. Install dtrace (to generate provider.h)
- sudo apt-get install systemtap-sdt-dev
- 3. Install libdispatch pre-reqs
- sudo apt-get install libblocksruntime-dev libkqueue-dev libbsd-dev
-
-Initialize git submodules:
- We are using git submodules to incorporate a specific revision of the
- upstream pthread_workqueue library into the build.
- git submodule init
- git submodule update
-
-Build:
- sh autogen.sh
- ./configure
- make
-
-Note: the build currently fails building tests, but libdispatch.so should
- build successfully.
--- /dev/null
+## Grand Central Dispatch (GCD)
+
+GCD is a concurrent programming framework first shipped with Mac OS X Snow
+Leopard. This package is an open source bundling of libdispatch, the core
+user space library implementing GCD. At the time of writing, support for
+the BSD kqueue API, and specifically extensions introduced in Mac OS X Snow
+Leopard and FreeBSD 9-CURRENT, are required to use libdispatch. Linux is
+supported, but requires specific packages to be installed (see Linux
+section at the end of the file). Other systems are currently unsupported.
+
+### Configuring and installing libdispatch (general comments)
+
+GCD is built using autoconf, automake, and libtool, and has a number of
+compile-time configuration options that should be reviewed before starting.
+An uncustomized install of the C-API to libdispatch requires:
+
+ sh autogen.sh
+ ./configure
+ make
+ make install
+
+libdispatch can be optionally built to include a Swift API. This requires a
+Swift toolchain to compile the Swift code in libdispatch and can be done
+in two possible scenarios.
+
+If you are building your own Swift toolchain from source, then you should build
+libdispatch simply by giving additional arguments to swift/utils/build-script:
+
+ ./swift/utils/build-script --libdispatch -- --install-libdispatch
+
+To build libdispatch using a pre-built Swift toolchain and install libdispatch
+into that toolchain (to allow that toolchain to compile Swift code containing
+"import Dispatch") requires:
+
+ sh autogen.sh
+ ./configure --with-swift-toolchain=<PATH_TO_SWIFT_TOOLCHAIN> --prefix=<PATH_TO_SWIFT_TOOLCHAIN>
+ make
+ make install
+
+Note that once libdispatch is installed into a Swift toolchain, that
+toolchain cannot be used to compile libdispatch again (you must 'make uninstall'
+libdispatch from the toolchain before using it to rebuild libdispatch).
+
+You can also use the build-toolchain script to create a toolchain
+that includes libdispatch on Linux:
+
+1. Add libdispatch and install-libdispatch lines to ./swift/utils/build-presets.ini under `[preset: buildbot_linux]` section, as following:
+
+ ```
+ [preset: buildbot_linux]
+ mixin-preset=mixin_linux_installation
+ build-subdir=buildbot_linux
+ lldb
+ release
+ test
+ validation-test
+ long-test
+ libdispatch
+ foundation
+ lit-args=-v
+ dash-dash
+
+ install-libdispatch
+ install-foundation
+ reconfigure
+ ```
+
+2. Run:
+
+ ```
+ ./swift/utils/build-toolchain local.swift
+ ```
+
+Note that adding libdispatch in build-presets.ini is for Linux only as Swift on macOS platforms uses the system installed libdispatch, so its not required.
+
+### Building and installing on OS X
+
+The following configure options may be of general interest:
+
+`--with-apple-libpthread-source`
+
+Specify the path to Apple's libpthread package, so that appropriate headers
+ can be found and used.
+
+`--with-apple-libplatform-source`
+
+Specify the path to Apple's libplatform package, so that appropriate headers
+ can be found and used.
+
+`--with-apple-libclosure-source`
+
+Specify the path to Apple's Libclosure package, so that appropriate headers
+ can be found and used.
+
+`--with-apple-xnu-source`
+
+Specify the path to Apple's XNU package, so that appropriate headers can be
+ found and used.
+
+`--with-blocks-runtime`
+
+On systems where -fblocks is supported, specify an additional library path in which libBlocksRuntime can be found. This is not required on OS X, where the Blocks runtime is included in libSystem, but is required on FreeBSD.
+
+The following options are likely to only be useful when building libdispatch on
+OS X as a replacement for /usr/lib/system/libdispatch.dylib:
+
+`--with-apple-objc4-source`
+
+Specify the path to Apple's objc4 package, so that appropriate headers can
+ be found and used.
+
+`--disable-libdispatch-init-constructor`
+
+Do not tag libdispatch's init routine as __constructor, in which case it must be run manually before libdispatch routines can be called. This is the default when building on OS X. For /usr/lib/system/libdispatch.dylib the init routine is called automatically during process start.
+
+`--enable-apple-tsd-optimizations`
+
+Use a non-portable allocation scheme for pthread per-thread data (TSD) keys when building libdispatch for /usr/lib/system on OS X. This should not be used on other OS's, or on OS X when building a stand-alone library.
+
+#### Typical configuration commands
+
+The following command lines create the configuration required to build
+libdispatch for /usr/lib/system on OS X El Capitan:
+
+ clangpath=$(dirname `xcrun --find clang`)
+ sudo mkdir -p "$clangpath/../local/lib/clang/enable_objc_gc"
+ LIBTOOLIZE=glibtoolize sh autogen.sh
+ cflags='-arch x86_64 -arch i386 -g -Os'
+ ./configure CFLAGS="$cflags" OBJCFLAGS="$cflags" CXXFLAGS="$cflags" \
+ --prefix=/usr --libdir=/usr/lib/system --disable-static \
+ --enable-apple-tsd-optimizations \
+ --with-apple-libpthread-source=/path/to/10.11.0/libpthread-137.1.1 \
+ --with-apple-libplatform-source=/path/to/10.11.0/libplatform-73.1.1 \
+ --with-apple-libclosure-source=/path/to/10.11.0/libclosure-65 \
+ --with-apple-xnu-source=/path/to/10.11.0/xnu-3247.1.106 \
+ --with-apple-objc4-source=/path/to/10.11.0/objc4-680
+ make check
+
+### Building and installing for FreeBSD
+
+Typical configuration line for FreeBSD 8.x and 9.x to build libdispatch with
+clang and blocks support:
+
+ sh autogen.sh
+ ./configure CC=clang --with-blocks-runtime=/usr/local/lib
+ make check
+
+### Building and installing for Linux
+
+Note that libdispatch development and testing is done only
+on Ubuntu; currently supported versions are 14.04, 15.10 and 16.04.
+
+1. The first thing to do is install required packages:
+
+ 1a. Install build tools and clang compiler.
+ `sudo apt-get install autoconf libtool pkg-config clang`
+
+ 1b. Install dtrace (to generate provider.h)
+ `sudo apt-get install systemtap-sdt-dev`
+
+ 1c. Install additional libdispatch dependencies
+ `sudo apt-get install libblocksruntime-dev libkqueue-dev libbsd-dev`
+
+ Note: compiling libdispatch requires clang 3.8 or better and
+the gold linker. If the default clang on your Ubuntu version is
+too old, see http://apt.llvm.org/ to install a newer version.
+On older Ubuntu releases, you may need to install binutils-gold
+to get the gold linker.
+
+2. Initialize git submodules.
+ We are using git submodules to incorporate specific revisions of the
+ upstream pthread_workqueue and libkqueue projects into the build.
+
+ ```
+ git submodule init
+ git submodule update
+ ```
+
+3. Build (as in the general instructions above)
+
+ ```
+ sh autogen.sh
+ ./configure
+ make
+ make install
+ ```
ACLOCAL_AMFLAGS = -I m4
if BUILD_OWN_PTHREAD_WORKQUEUES
-SUBDIRS= \
- dispatch \
- libpwq \
- man \
- os \
- private \
- src \
- tests
-else
-SUBDIRS= \
- dispatch \
- man \
- os \
- private \
- src \
- tests
+ MAYBE_PTHREAD_WORKQUEUES = libpwq
+endif
+
+if BUILD_OWN_KQUEUES
+ MAYBE_KQUEUES = libkqueue
endif
+SUBDIRS= \
+ dispatch \
+ $(MAYBE_PTHREAD_WORKQUEUES) \
+ $(MAYBE_KQUEUES) \
+ man \
+ os \
+ private \
+ src \
+ tests
+
EXTRA_DIST= \
README.md \
LICENSE \
[2dbf83c] APPLIED rdar://27303844
[78b9e82] APPLIED rdar://27303844
[2c0e5ee] APPLIED rdar://27303844
+[5ee237f] APPLIED rdar://27600964
+[77299ec] APPLIED rdar://27600964
+[57c5c28] APPLIED rdar://27600964
+[f8423ec] APPLIED rdar://27600964
+[325f73d] APPLIED rdar://27600964
+[b84e87e] APPLIED rdar://27600964
+[ae71a91] APPLIED rdar://27600964
+[8669dea] APPLIED rdar://27600964
+[a8d0327] APPLIED rdar://27600964
+[2e4e6af] APPLIED rdar://27600964
+[2457fb2] APPLIED rdar://27600964
+[4d58038] APPLIED rdar://27600964
+[98d0a05] APPLIED rdar://27600964
+[8976101] APPLIED rdar://27600964
+[0d9ea5f] APPLIED rdar://28486911
+[e7e9a32] APPLIED rdar://28486911
+[44174d9] APPLIED rdar://28486911
+[6402cb7] APPLIED rdar://28486911
+[e2d5eb5] APPLIED rdar://28486911
+[758bb7f] APPLIED rdar://28486911
+[4c588e9] APPLIED rdar://28486911
+[1300d06] APPLIED rdar://28486911
+[ae1f7e8] APPLIED rdar://28486911
+[40a9bfb] APPLIED rdar://28486911
+[6366081] APPLIED rdar://28486911
+[81d1d0c] APPLIED rdar://28486911
+[5526122] APPLIED rdar://28486911
+[1a7ff3f] APPLIED rdar://28486911
+[e905735] APPLIED rdar://28486911
+[7fe8323] APPLIED rdar://28486911
+[6249878] APPLIED rdar://28486911
+[20792fe] APPLIED rdar://28486911
+[3639fbe] APPLIED rdar://28486911
+[bda3baf] APPLIED rdar://28486911
+[8803d07] APPLIED rdar://28486911
+[d04a0df] APPLIED rdar://28486911
+[69d2a6a] APPLIED rdar://28486911
+[367bd95] APPLIED rdar://28486911
+[152985f] APPLIED rdar://28486911
+[ba7802e] APPLIED rdar://28486911
you don't. */
#define HAVE_DECL_VQ_QUOTA 1
+/* Define to 1 if you have the declaration of `VQ_NEARLOWDISK', and to 0 if
+ you don't. */
+#define HAVE_DECL_VQ_NEARLOWDISK 1
+
+/* Define to 1 if you have the declaration of `VQ_DESIRED_DISK', and to 0 if
+ you don't. */
+#define HAVE_DECL_VQ_DESIRED_DISK 1
+
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
#
AC_PREREQ(2.69)
-AC_INIT([libdispatch], [1.3], [libdispatch@macosforge.org], [libdispatch], [http://libdispatch.macosforge.org])
+AC_INIT([libdispatch], [1.3], [https://bugs.swift.org], [libdispatch], [https://github.com/apple/swift-corelibs-libdispatch])
AC_REVISION([$$])
AC_CONFIG_AUX_DIR(config)
AC_CONFIG_HEADER([config/config_ac.h])
ac_clean_files=a.out.dSYM
AM_MAINTAINER_MODE
+#
+# Command line argument to specify build variant (default to release).
+# Impacts default value of CFLAGS et al. so must come before AC_PROG_CC
+#
+AC_ARG_WITH([build-variant],
+ [AS_HELP_STRING([--with-build-variant=release|debug|releaseassert|releasedebuginfo], [Specify build variant [default=release]])],
+ [dispatch_build_variant=${withval}],
+ [dispatch_build_variant=release]
+)
+AS_CASE([$dispatch_build_variant],
+ [debug], [
+ default_compiler_flags="-g -O0"
+ dispatch_enable_asserts=true
+ dispatch_enable_optimization=false
+ ],
+ [release], [
+ default_compiler_flags="-O2"
+ dispatch_enable_asserts=false
+ dispatch_enable_optimization=true
+ ],
+ [releaseassert], [
+ default_compiler_flags="-O2"
+ dispatch_enable_asserts=true
+ dispatch_enable_optimization=true
+ ],
+ [releasedebuginfo], [
+ default_compiler_flags="-g -O2"
+ dispatch_enable_asserts=false
+ dispatch_enable_optimization=true
+ ],
+ [AC_MSG_ERROR("invalid build-variant $dispatch_build_variant")]
+)
+AM_CONDITIONAL(DISPATCH_ENABLE_ASSERTS, $dispatch_enable_asserts)
+AM_CONDITIONAL(DISPATCH_ENABLE_OPTIMIZATION, $dispatch_enable_optimization)
+
+: ${CFLAGS=$default_compiler_flags}
+: ${CXXFLAGS=$default_compiler_flags}
+: ${OBJCFLAGS=$default_compiler_flags}
+: ${OBJCXXFLAGS=$default_compiler_flags}
+
AC_PROG_CC([clang gcc cc])
AC_PROG_CXX([clang++ g++ c++])
AC_PROG_OBJC([clang gcc cc])
case $target_os in
linux*)
os_string="linux"
+ case $target_cpu in
+ armv7l*)
+ target_cpu="armv7"
+ ;;
+ *)
+ esac
;;
*)
os_string=$target_os
AC_SEARCH_LIBS(clock_gettime, rt)
AC_SEARCH_LIBS(pthread_create, pthread)
-#
-# Prefer native kqueue(2); otherwise use libkqueue if present.
-#
-AC_CHECK_HEADER(sys/event.h, [],
- [PKG_CHECK_MODULES(KQUEUE, libkqueue)]
+AS_IF([test -f $srcdir/libkqueue/configure.ac],
+ [AC_DEFINE(BUILD_OWN_KQUEUES, 1, [Define if building libkqueue from source])
+ ac_configure_args="--disable-libkqueue-install $ac_configure_args"
+ AC_CONFIG_SUBDIRS([libkqueue])
+ build_own_kqueues=true],
+ [build_own_kqueues=false
+ AC_CHECK_HEADER(sys/event.h, [],
+ [PKG_CHECK_MODULES(KQUEUE, libkqueue)]
+ )
+ ]
)
+AM_CONDITIONAL(BUILD_OWN_KQUEUES, $build_own_kqueues)
AC_CHECK_FUNCS([strlcpy getprogname], [],
[PKG_CHECK_MODULES(BSD_OVERLAY, libbsd-overlay,[
#
# Find functions and declarations we care about.
#
-AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC], [], [],
+AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC, CLOCK_REALTIME], [], [],
[[#include <time.h>]])
AC_CHECK_DECLS([NOTE_NONE, NOTE_REAP, NOTE_REVOKE, NOTE_SIGNAL, NOTE_LOWAT], [], [],
[[#include <sys/event.h>]])
AC_CHECK_DECLS([FD_COPY], [], [], [[#include <sys/select.h>]])
AC_CHECK_DECLS([SIGEMT], [], [], [[#include <signal.h>]])
-AC_CHECK_DECLS([VQ_UPDATE, VQ_VERYLOWDISK, VQ_QUOTA], [], [], [[#include <sys/mount.h>]])
+AC_CHECK_DECLS([VQ_UPDATE, VQ_VERYLOWDISK, VQ_QUOTA, VQ_NEARLOWDISK, VQ_DESIRED_DISK], [], [], [[#include <sys/mount.h>]])
AC_CHECK_DECLS([program_invocation_short_name], [], [], [[#include <errno.h>]])
AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time malloc_create_zone sysconf])
])
AM_CONDITIONAL(HAVE_DARWIN_LD, [test "x$dispatch_cv_ld_darwin" == "xyes"])
+#
+# symlink platform-specific module.modulemap files
+#
+AS_CASE([$target_os],
+ [darwin*], [ dispatch_module_map_os=darwin ],
+ [ dispatch_module_map_os=generic ]
+)
+AC_CONFIG_COMMANDS([modulemaps], [
+ ln -fs $dispatch_module_map_os/module.modulemap $ac_top_srcdir/dispatch/module.modulemap
+ ln -fs $dispatch_module_map_os/module.modulemap $ac_top_srcdir/private/module.modulemap
+ ],
+ [dispatch_module_map_os="$dispatch_module_map_os"]
+)
+
#
# Temporary: some versions of clang do not mark __builtin_trap() as
# __attribute__((__noreturn__)). Detect and add if required.
#
# Generate testsuite links
#
-AC_CONFIG_LINKS([tests/dispatch:$top_srcdir/private tests/leaks-wrapper:tests/leaks-wrapper.sh])
+AC_CONFIG_LINKS([tests/dispatch:$ac_top_srcdir/private tests/leaks-wrapper:tests/leaks-wrapper.sh])
AC_OUTPUT
time.h
if HAVE_SWIFT
-dispatch_HEADERS+=module.map
+dispatch_HEADERS+=module.modulemap
endif
--- /dev/null
+module Dispatch [system] [extern_c] {
+ umbrella header "dispatch.h"
+ module * { export * }
+ export *
+}
+
+module DispatchIntrospection [system] [extern_c] {
+ header "introspection.h"
+ export *
+}
#if defined(__linux__) && defined(__has_feature)
#if __has_feature(modules)
+#if !defined(__arm__)
#include <stdio.h> // for off_t (to match Glibc.modulemap)
#endif
#endif
+#endif
#define DISPATCH_API_VERSION 20160712
--- /dev/null
+module Dispatch {
+ requires blocks
+ export *
+ link "dispatch"
+}
+
+module DispatchIntrospection [system] [extern_c] {
+ header "introspection.h"
+ export *
+}
+
+module CDispatch [system] [extern_c] {
+ umbrella header "dispatch.h"
+ module * { export * }
+ export *
+ requires blocks
+ link "dispatch"
+}
+++ /dev/null
-module Dispatch {
- requires blocks
- export *
- link "dispatch"
- link "BlocksRuntime"
-}
-
-module DispatchIntrospection [system] [extern_c] {
- header "introspection.h"
- export *
-}
-
-module CDispatch [system] [extern_c] {
- umbrella header "dispatch.h"
- module * { export * }
- export *
- requires blocks
- link "dispatch"
- link "BlocksRuntime"
-}
+++ /dev/null
-module Dispatch [system] [extern_c] {
- umbrella header "dispatch.h"
- module * { export * }
- export *
-}
-
-module DispatchIntrospection [system] [extern_c] {
- header "introspection.h"
- export *
-}
remoteGlobalIDString = 92F3FECA1BEC69E500025962;
remoteInfo = darwintests;
};
- C00B0E101C5AEBBE000330B3 /* PBXContainerItemProxy */ = {
- isa = PBXContainerItemProxy;
- containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
- proxyType = 2;
- remoteGlobalIDString = E4D01CB9108E6C7200FAA873;
- remoteInfo = dispatch_deadname;
- };
C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy;
containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */;
6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libfirehose_server.a; sourceTree = BUILT_PRODUCTS_DIR; };
6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose.xcconfig; sourceTree = "<group>"; };
6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_inline_internal.h; sourceTree = "<group>"; };
+ 6EC5ABF71D4446CA004F8674 /* dispatch_deadname.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_deadname.c; sourceTree = "<group>"; };
6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_kevent_cancel_races.c; sourceTree = "<group>"; };
6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_private.h; sourceTree = "<group>"; };
6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_state_machine.c; sourceTree = "<group>"; };
C01866BD1C5973210040FC07 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; };
C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = "libdispatch-mp-static.xcconfig"; sourceTree = "<group>"; };
C01866BF1C5976C90040FC07 /* run-on-install.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "run-on-install.sh"; sourceTree = "<group>"; };
- C901445E1C73A7FE002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; path = module.modulemap; sourceTree = "<group>"; };
- C90144641C73A845002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; path = module.modulemap; sourceTree = "<group>"; };
+ C901445E1C73A7FE002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; name = module.modulemap; path = darwin/module.modulemap; sourceTree = "<group>"; };
+ C90144641C73A845002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; name = module.modulemap; path = darwin/module.modulemap; sourceTree = "<group>"; };
C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = "<group>"; tabWidth = 8; };
C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = ddt.xcodeproj; path = tools/ddt/ddt.xcodeproj; sourceTree = "<group>"; };
C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = dispatch_objc.m; sourceTree = "<group>"; };
4552540519B1384900B88766 /* jsgc_bench */,
4552540719B1384900B88766 /* async_bench */,
4552540919B1384900B88766 /* apply_bench */,
- C00B0E111C5AEBBE000330B3 /* dispatch_deadname */,
);
name = Products;
sourceTree = "<group>";
6E326ADE1C23451A002A6505 /* dispatch_concur.c */,
6E326AEF1C239303002A6505 /* dispatch_context_for_key.c */,
6E8E4EC71C1A61680004F5CC /* dispatch_data.m */,
+ 6EC5ABF71D4446CA004F8674 /* dispatch_deadname.c */,
6E67D90D1C16CCEB00FC98AC /* dispatch_debug.c */,
6E8E4ECB1C1A72650004F5CC /* dispatch_drift.c */,
6E67D90F1C16CF0B00FC98AC /* dispatch_group.c */,
remoteRef = 4552540819B1384900B88766 /* PBXContainerItemProxy */;
sourceTree = BUILT_PRODUCTS_DIR;
};
- C00B0E111C5AEBBE000330B3 /* dispatch_deadname */ = {
- isa = PBXReferenceProxy;
- fileType = "compiled.mach-o.executable";
- path = dispatch_deadname;
- remoteRef = C00B0E101C5AEBBE000330B3 /* PBXContainerItemProxy */;
- sourceTree = BUILT_PRODUCTS_DIR;
- };
C927F36710FD7F1000C5AB8B /* ddt */ = {
isa = PBXReferenceProxy;
fileType = "compiled.mach-o.executable";
LIBS="$LIBS -L$blocks_runtime"]
)
+#
+# Configure argument to enable/disable using an embedded blocks runtime
+#
+AC_ARG_ENABLE([embedded_blocks_runtime],
+ [AS_HELP_STRING([--enable-embedded-blocks-runtime],
+ [Embed blocks runtime in libdispatch [default=yes on Linux, default=no on all other platforms]])],,
+ [case $target_os in
+ linux*)
+ enable_embedded_blocks_runtime=yes
+ ;;
+ *)
+ enable_embedded_blocks_runtime=no
+ esac]
+)
+
#
# Detect compiler support for Blocks; perhaps someday -fblocks won't be
# required, in which case we'll need to change this.
AS_IF([test "x$dispatch_cv_cblocks" != "xno"], [
CBLOCKS_FLAGS="$dispatch_cv_cblocks"
- #
- # It may be necessary to directly link the Blocks runtime on some
- # systems, so give it a try if we can't link a C program that uses
- # Blocks. We will want to remove this at somepoint, as really -fblocks
- # should force that linkage already.
- #
- saveCFLAGS="$CFLAGS"
- CFLAGS="$CFLAGS -fblocks -O0"
- AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime])
- AC_TRY_LINK([], [
- ^{ int j; j=0; }();
- ], [
- AC_MSG_RESULT([no]);
- ], [
- saveLIBS="$LIBS"
- LIBS="$LIBS -lBlocksRuntime"
- AC_TRY_LINK([], [
- ^{ int k; k=0; }();
- ], [
- AC_MSG_RESULT([-lBlocksRuntime])
- ], [
- AC_MSG_ERROR([can't find Blocks runtime])
- ])
- ])
+ AS_IF([test "x$enable_embedded_blocks_runtime" != "xyes"], [
+ #
+ # It may be necessary to directly link the Blocks runtime on some
+ # systems, so give it a try if we can't link a C program that uses
+ # Blocks. We will want to remove this at somepoint, as really -fblocks
+ # should force that linkage already.
+ #
+ saveCFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS -fblocks -O0"
+ AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime])
+ AC_TRY_LINK([], [
+ ^{ int j; j=0; }();
+ ], [
+ AC_MSG_RESULT([no]);
+ ], [
+ saveLIBS="$LIBS"
+ LIBS="$LIBS -lBlocksRuntime"
+ AC_TRY_LINK([], [
+ ^{ int k; k=0; }();
+ ], [
+ AC_MSG_RESULT([-lBlocksRuntime])
+ ], [
+ AC_MSG_ERROR([can't find Blocks runtime])
+ ])
+ ])
+ ])
CFLAGS="$saveCFLAGS"
have_cblocks=true
], [
])
AM_CONDITIONAL(HAVE_CBLOCKS, $have_cblocks)
AC_SUBST([CBLOCKS_FLAGS])
+AM_CONDITIONAL([BUILD_OWN_BLOCKS_RUNTIME], [test "x$enable_embedded_blocks_runtime" = "xyes"])
#
# Because a different C++ compiler may be specified than C compiler, we have
AS_IF([test "x$dispatch_cv_cxxblocks" != "xno"], [
CXXBLOCKS_FLAGS="$dispatch_cv_cxxblocks"
- saveCXXFLAGS="$CXXFLAGS"
- CXXFLAGS="$CXXFLAGS -fblocks -O0"
- AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime])
- AC_TRY_LINK([], [
- ^{ int j; j=0; }();
- ], [
- AC_MSG_RESULT([no]);
- ], [
- saveLIBS="$LIBS"
- LIBS="$LIBS -lBlocksRuntime"
- AC_TRY_LINK([], [
- ^{ int k; k=0; }();
- ], [
- AC_MSG_RESULT([-lBlocksRuntime])
- ], [
- AC_MSG_ERROR([can't find Blocks runtime])
- ])
- ])
+ AS_IF([test "x$enable_embedded_blocks_runtime" != "xyes"], [
+ saveCXXFLAGS="$CXXFLAGS"
+ CXXFLAGS="$CXXFLAGS -fblocks -O0"
+ AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime])
+ AC_TRY_LINK([], [
+ ^{ int j; j=0; }();
+ ], [
+ AC_MSG_RESULT([no]);
+ ], [
+ saveLIBS="$LIBS"
+ LIBS="$LIBS -lBlocksRuntime"
+ AC_TRY_LINK([], [
+ ^{ int k; k=0; }();
+ ], [
+ AC_MSG_RESULT([-lBlocksRuntime])
+ ], [
+ AC_MSG_ERROR([can't find Blocks runtime])
+ ])
+ ])
+ ])
CXXFLAGS="$saveCXXFLAGS"
have_cxxblocks=true
], [
.Fo dispatch_resume
.Fa "dispatch_object_t object"
.Fc
+.Ft void
+.Fo dispatch_activate
+.Fa "dispatch_object_t object"
+.Fc
.Ft "void *"
.Fo dispatch_get_context
.Fa "dispatch_object_t object"
.Sh DESCRIPTION
Dispatch objects share functions for coordinating memory management, suspension,
cancellation and context pointers.
-.Sh MEMORY MANGEMENT
+.Sh MEMORY MANAGEMENT
Objects returned by creation functions in the dispatch framework may be
uniformly retained and released with the functions
.Fn dispatch_retain
dispatch_release(object);
});
.Ed
+.Sh ACTIVATION
+Dispatch objects such as queues and sources may be created in an inactive
+state. Objects in this state must be activated before any blocks
+associated with them will be invoked. Calling
+.Fn dispatch_activate
+on an active object has no effect.
+.Pp
+Changing attributes such as the target queue or a source handler is no longer permitted
+once the object has been activated (see
+.Xr dispatch_set_target_queue 3 ,
+.Xr dispatch_source_set_event_handler 3 ).
.Sh SUSPENSION
The invocation of blocks on dispatch queues or dispatch sources may be suspended
or resumed with the functions
.Fn dispatch_resume
such that the dispatch object is fully resumed when the last reference is
released. The result of releasing all references to a dispatch object while in
-a suspended state is undefined.
+an inactive or suspended state is undefined.
.Sh CONTEXT POINTERS
Dispatch objects support supplemental context pointers. The value of the
context pointer may be retrieved and updated with
.Fc
.Sh DESCRIPTION
Dispatch semaphores are used to synchronize threads.
+.Pp
+The
+.Fn dispatch_semaphore_wait
+function decrements the semaphore. If the resulting value is less than zero,
+it waits for a signal from a thread that increments the semaphore by calling
+.Fn dispatch_semaphore_signal
+before returning.
The
.Fa timeout
parameter is creatable with the
or
.Xr dispatch_walltime 3
functions.
+.Pp
+The
+.Fn dispatch_semaphore_signal
+function increments the counting semaphore. If the previous value was less than zero,
+it wakes one of the threads that are waiting in
+.Fn dispatch_semaphore_wait
+before returning.
.Sh COMPLETION SYNCHRONIZATION
If the
.Fa count
.Pp
The data returned by
.Fn dispatch_source_get_data
-indicates which of the events in the
+is a bitmask that indicates which of the events in the
.Fa mask
were observed. Note that because this source type will request notifications on
the provided port, it should not be mixed with the use of
.Pp
The data returned by
.Fn dispatch_source_get_data
-indicates which of the events in the
+is a bitmask that indicates which of the events in the
.Fa mask
were observed.
.Pp
.Pp
The data returned by
.Fn dispatch_source_get_data
-indicates which of the events in the
+is a bitmask that indicates which of the events in the
.Fa mask
were observed.
.Pp
* Layout of structs is subject to change without notice
*/
-#define FIREHOSE_BUFFER_CHUNK_SIZE 4096ul
#define FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE 2048ul
#define FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT 16
-typedef union {
- uint64_t fbc_atomic_pos;
-#define FIREHOSE_BUFFER_POS_ENTRY_OFFS_INC (1ULL << 0)
-#define FIREHOSE_BUFFER_POS_PRIVATE_OFFS_INC (1ULL << 16)
-#define FIREHOSE_BUFFER_POS_REFCNT_INC (1ULL << 32)
-#define FIREHOSE_BUFFER_POS_FULL_BIT (1ULL << 56)
-#define FIREHOSE_BUFFER_POS_USABLE_FOR_STREAM(pos, stream) \
- ((((pos).fbc_atomic_pos >> 48) & 0x1ff) == (uint16_t)stream)
- struct {
- uint16_t fbc_next_entry_offs;
- uint16_t fbc_private_offs;
- uint8_t fbc_refcnt;
- uint8_t fbc_qos_bits;
- uint8_t fbc_stream;
- uint8_t fbc_flag_full : 1;
- uint8_t fbc_flag_io : 1;
- uint8_t _fbc_flag_unused : 6;
- };
-} firehose_buffer_pos_u;
-
-typedef struct firehose_buffer_chunk_s {
- uint8_t fbc_start[0];
- firehose_buffer_pos_u volatile fbc_pos;
- uint64_t fbc_timestamp;
- uint8_t fbc_data[FIREHOSE_BUFFER_CHUNK_SIZE
- - sizeof(firehose_buffer_pos_u)
- - sizeof(uint64_t)];
-} __attribute__((aligned(8))) *firehose_buffer_chunk_t;
-
typedef struct firehose_buffer_range_s {
uint16_t fbr_offset; // offset from the start of the buffer
uint16_t fbr_length;
#ifdef KERNEL
+typedef struct firehose_chunk_s *firehose_chunk_t;
+
// implemented by the kernel
extern void __firehose_buffer_push_to_logd(firehose_buffer_t fb, bool for_io);
extern void __firehose_critical_region_enter(void);
__firehose_buffer_tracepoint_reserve(uint64_t stamp, firehose_stream_t stream,
uint16_t pubsize, uint16_t privsize, uint8_t **privptr);
-firehose_tracepoint_t
-__firehose_buffer_tracepoint_reserve_with_chunk(firehose_buffer_chunk_t fbc,
- uint64_t stamp, firehose_stream_t stream,
- uint16_t pubsize, uint16_t privsize, uint8_t **privptr);
-
void
__firehose_buffer_tracepoint_flush(firehose_tracepoint_t vat,
firehose_tracepoint_id_u vatid);
-void
-__firehose_buffer_tracepoint_flush_chunk(firehose_buffer_chunk_t fbc,
- firehose_tracepoint_t vat, firehose_tracepoint_id_u vatid);
-
firehose_buffer_t
__firehose_buffer_create(size_t *size);
OS_ALWAYS_INLINE
static inline const uint8_t *
-_firehose_tracepoint_reader_init(firehose_buffer_chunk_t fbc,
- const uint8_t **endptr)
+_firehose_tracepoint_reader_init(firehose_chunk_t fc, const uint8_t **endptr)
{
- const uint8_t *start = fbc->fbc_data;
- const uint8_t *end = fbc->fbc_start + fbc->fbc_pos.fbc_next_entry_offs;
+ const uint8_t *start = fc->fc_data;
+ const uint8_t *end = fc->fc_start + fc->fc_pos.fcp_next_entry_offs;
- if (end > fbc->fbc_start + FIREHOSE_BUFFER_CHUNK_SIZE) {
+ if (end > fc->fc_start + FIREHOSE_CHUNK_SIZE) {
end = start;
}
*endptr = end;
_firehose_tracepoint_reader_next(const uint8_t **ptr, const uint8_t *end)
{
const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data);
- firehose_tracepoint_t ft;
+ struct ft_unaligned_s {
+ struct firehose_tracepoint_s ft;
+ } __attribute__((packed, aligned(1))) *uft;
do {
- ft = (firehose_tracepoint_t)*ptr;
- if (ft->ft_data >= end) {
+ uft = (struct ft_unaligned_s *)*ptr;
+ if (uft->ft.ft_data >= end) {
// reached the end
return NULL;
}
- if (!ft->ft_length) {
+ if (!uft->ft.ft_length) {
// tracepoint write didn't even start
return NULL;
}
- if (ft->ft_length > end - ft->ft_data) {
+ if (uft->ft.ft_length > end - uft->ft.ft_data) {
// invalid length
return NULL;
}
- *ptr += roundup(ft_size + ft->ft_length, 8);
+ *ptr += roundup(ft_size + uft->ft.ft_length, 8);
// test whether write of the tracepoint was finished
- } while (os_unlikely(ft->ft_id.ftid_value == 0));
+ } while (os_unlikely(uft->ft.ft_id.ftid_value == 0));
- return ft;
+ return (firehose_tracepoint_t)uft;
}
#define firehose_tracepoint_foreach(ft, fbc) \
OS_ALWAYS_INLINE
static inline bool
-firehose_buffer_range_validate(firehose_buffer_chunk_t fbc,
- firehose_tracepoint_t ft, firehose_buffer_range_t range)
+firehose_buffer_range_validate(firehose_chunk_t fc, firehose_tracepoint_t ft,
+ firehose_buffer_range_t range)
{
- if (range->fbr_offset + range->fbr_length > FIREHOSE_BUFFER_CHUNK_SIZE) {
+ if (range->fbr_offset + range->fbr_length > FIREHOSE_CHUNK_SIZE) {
return false;
}
- if (fbc->fbc_start + range->fbr_offset < ft->ft_data + ft->ft_length) {
+ if (fc->fc_start + range->fbr_offset < ft->ft_data + ft->ft_length) {
return false;
}
return true;
uint64_t
firehose_client_get_unique_pid(firehose_client_t client, pid_t *pid);
+/*!
+ * @function firehose_client_get_pid_version
+ *
+ * @abstract
+ * Returns the pid version for that client.
+ *
+ * @param client
+ * The specified client.
+ */
+OS_NOTHROW OS_NONNULL1
+int
+firehose_client_get_pid_version(firehose_client_t client);
+
+/*!
+ * @function firehose_client_get_euid
+ *
+ * @abstract
+ * Returns the EUID for that client as discovered at connect time.
+ *
+ * @param client
+ * The specified client.
+ */
+OS_NOTHROW OS_NONNULL1
+uid_t
+firehose_client_get_euid(firehose_client_t client);
+
/*!
* @function firehose_client_get_metadata_buffer
*
void
firehose_client_metadata_stream_peek(firehose_client_t client,
firehose_event_t context, OS_NOESCAPE bool (^peek_should_start)(void),
- OS_NOESCAPE bool (^peek)(firehose_buffer_chunk_t fbc));
+ OS_NOESCAPE bool (^peek)(firehose_chunk_t fbc));
#pragma mark - Firehose Server
* Type of the handler block for firehose_server_init()
*/
typedef void (^firehose_handler_t)(firehose_client_t client,
- firehose_event_t event, firehose_buffer_chunk_t page);
+ firehose_event_t event, firehose_chunk_t page);
/*!
* @function firehose_server_init
void
firehose_server_assert_spi_version(uint32_t spi_version);
+/*!
+ * @function firehose_server_has_ever_flushed_pages
+ *
+ * @abstract
+ * Checks whether the firehose server has ever flushed any pages this boot.
+ *
+ * @discussion
+ * Must be called after firehose_server_init() and before calling
+ * firehose_server_resume().
+ */
+OS_NOTHROW
+bool
+firehose_server_has_ever_flushed_pages(void);
+
/*!
* @function firehose_server_resume
*
void
firehose_server_resume(void);
+/*!
+ * @function firehose_server_cancel
+ *
+ * @abstract
+ * Cancels the server, disconnects all clients, and prevents new connections.
+ */
+OS_NOTHROW
+void
+firehose_server_cancel(void);
+
+/*!
+ * @typedef firehose_server_queue_t
+ *
+ * @abstract
+ * Values to pass to firehose_server_get_queue()
+ */
+OS_ENUM(firehose_server_queue, unsigned long,
+ FIREHOSE_SERVER_QUEUE_UNKNOWN,
+ FIREHOSE_SERVER_QUEUE_IO,
+ FIREHOSE_SERVER_QUEUE_MEMORY,
+);
+
+/*!
+ * @function firehose_server_copy_queue
+ *
+ * @abstract
+ * Returns internal queues to the firehose server subsystem.
+ */
+OS_NOTHROW OS_OBJECT_RETURNS_RETAINED
+dispatch_queue_t
+firehose_server_copy_queue(firehose_server_queue_t which);
+
#pragma mark - Firehose Snapshot
/*!
* @typedef firehose_snapshot_event
- *
*/
OS_ENUM(firehose_snapshot_event, unsigned long,
FIREHOSE_SNAPSHOT_EVENT_IO_START = 1,
* Type of the handler block for firehose_snapshot
*/
typedef void (^firehose_snapshot_handler_t)(firehose_client_t client,
- firehose_snapshot_event_t event, firehose_buffer_chunk_t page);
+ firehose_snapshot_event_t event, firehose_chunk_t page);
/*!
* @function firehose_snapshot
#ifndef __linux__
#include <os/base.h>
#endif
+#include <sys/uio.h>
#include <os/object.h>
#include "voucher_private.h"
-#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20160329
+#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20161003
#if OS_VOUCHER_WEAK_IMPORT
#define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT
#define OS_VOUCHER_EXPORT OS_EXPORT
#endif
-#define __VOUCHER_ACTIVITY_IGNORE_DEPRECATION_PUSH \
- _Pragma("clang diagnostic push") \
- _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"")
-#define __VOUCHER_ACTIVITY_IGNORE_DEPRECATION_POP \
- _Pragma("clang diagnostic pop")
-
__BEGIN_DECLS
/*!
firehose_activity_id_t *parent_id);
/*!
- * @function voucher_activity_create
+ * @function voucher_activity_create_with_data
*
* @abstract
* Creates a voucher object with a new activity identifier.
* @param flags
* See voucher_activity_flag_t documentation for effect.
*
- * @param location
- * Location identifier for the automatic tracepoint generated as part of
- * creating the new activity.
+ * @param pubdata
+ * Pointer to packed buffer of tracepoint data.
+ *
+ * @param publen
+ * Length of data at 'pubdata'.
*
* @result
* A new voucher with an activity identifier.
*/
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+__OSX_AVAILABLE(10.12.4) __IOS_AVAILABLE(10.3)
+__TVOS_AVAILABLE(10.2) __WATCHOS_AVAILABLE(3.2)
OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
voucher_t
-voucher_activity_create(firehose_tracepoint_id_t trace_id,
- voucher_t base, firehose_activity_flags_t flags, uint64_t location);
+voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id,
+ voucher_t base, firehose_activity_flags_t flags,
+ const void *pubdata, size_t publen);
__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
const void *pubdata, size_t publen);
/*!
- * @function voucher_activity_trace_with_private_strings
+ * @function voucher_activity_trace_v
*
* @abstract
* Add a tracepoint to the specified stream, with private data.
* @param timestamp
* The mach_approximate_time()/mach_absolute_time() value for this tracepoint.
*
- * @param pubdata
- * Pointer to packed buffer of tracepoint data.
+ * @param iov
+ * Array of `struct iovec` pointing to the data to layout.
+ * The total size of this iovec must span exactly `publen + privlen` bytes.
+ * The `publen` boundary must coincide with the end of an iovec (each iovec
+ * must either be pure public or pure private data).
*
* @param publen
- * Length of data at 'pubdata'.
- *
- * @param privdata
- * Pointer to packed buffer of private tracepoint data.
+ * Total length of data to read from the iovec for the public data.
*
* @param privlen
- * Length of data at 'privdata'.
+ * Length of data to read from the iovec after the public data for the private
+ * data.
*/
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+__OSX_AVAILABLE(10.12.4) __IOS_AVAILABLE(10.3)
+__TVOS_AVAILABLE(10.2) __WATCHOS_AVAILABLE(3.2)
+OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4
+firehose_tracepoint_id_t
+voucher_activity_trace_v(firehose_stream_t stream,
+ firehose_tracepoint_id_t trace_id, uint64_t timestamp,
+ const struct iovec *iov, size_t publen, size_t privlen);
+
+
+__OSX_DEPRECATED(10.12, 10.12.4, "Use voucher_activity_trace_v")
+__IOS_DEPRECATED(10.0, 10.3, "Use voucher_activity_trace_v")
+__TVOS_DEPRECATED(10.0, 10.2, "Use voucher_activity_trace_v")
+__WATCHOS_DEPRECATED(3.0, 3.2, "Use voucher_activity_trace_v")
OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 OS_NONNULL6
firehose_tracepoint_id_t
voucher_activity_trace_with_private_strings(firehose_stream_t stream,
const void *pubdata, size_t publen,
const void *privdata, size_t privlen);
-typedef struct voucher_activity_hooks_s {
-#define VOUCHER_ACTIVITY_HOOKS_VERSION 3
+typedef const struct voucher_activity_hooks_s {
+#define VOUCHER_ACTIVITY_HOOKS_VERSION 4
long vah_version;
- // version 1
mach_port_t (*vah_get_logd_port)(void);
- // version 2
dispatch_mach_handler_function_t vah_debug_channel_handler;
- // version 3
kern_return_t (*vah_get_reconnect_info)(mach_vm_address_t *, mach_vm_size_t *);
+ void (*vah_metadata_init)(void *metadata_buffer, size_t size);
} *voucher_activity_hooks_t;
/*!
--- /dev/null
+module DispatchPrivate [system] [extern_c] {
+ umbrella header "private.h"
+ exclude header "mach_private.h"
+ module * { export * }
+ export *
+}
+
+module DispatchIntrospectionPrivate [system] [extern_c] {
+ header "introspection_private.h"
+ export *
+}
--- /dev/null
+module DispatchPrivate [system] [extern_c] {
+ umbrella header "private.h"
+ exclude header "mach_private.h"
+ module * { export * }
+ export *
+}
+
+module DispatchIntrospectionPrivate [system] [extern_c] {
+ header "introspection_private.h"
+ export *
+}
+++ /dev/null
-module DispatchPrivate [system] [extern_c] {
- umbrella header "private.h"
- exclude header "mach_private.h"
- module * { export * }
- export *
-}
-
-module DispatchIntrospectionPrivate [system] [extern_c] {
- header "introspection_private.h"
- export *
-}
dispatch_runloop_handle_t
_dispatch_get_main_queue_handle_4CF(void);
-#if TARGET_OS_MAC
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
-DISPATCH_EXPORT DISPATCH_NOTHROW
-void
-_dispatch_main_queue_callback_4CF(mach_msg_header_t *_Null_unspecified msg);
-#else
__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
DISPATCH_EXPORT DISPATCH_NOTHROW
void
_dispatch_main_queue_callback_4CF(void *_Null_unspecified msg);
-#endif
__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
*
* @constant DISPATCH_VFS_QUOTA
* We hit a user quota (quotactl) for this filesystem.
+ *
+ * @constant DISPATCH_VFS_NEARLOWDISK
+ * Filesystem is nearly full (below NEARLOWDISK level).
+ *
+ * @constant DISPATCH_VFS_DESIREDDISK
+ * Filesystem has exceeded the DESIREDDISK level
*/
enum {
DISPATCH_VFS_NOTRESP = 0x0001,
DISPATCH_VFS_UPDATE = 0x0100,
DISPATCH_VFS_VERYLOWDISK = 0x0200,
DISPATCH_VFS_QUOTA = 0x1000,
+ DISPATCH_VFS_NEARLOWDISK = 0x2000,
+ DISPATCH_VFS_DESIREDDISK = 0x4000,
};
/*!
--- /dev/null
+// This source file is part of the Swift.org open source project
+//
+// Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
+// Licensed under Apache License v2.0 with Runtime Library Exception
+//
+// See http://swift.org/LICENSE.txt for license information
+// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
+//
+
+
+#ifndef _Block_H_
+#define _Block_H_
+
+#if !defined(BLOCK_EXPORT)
+# if defined(__cplusplus)
+# define BLOCK_EXPORT extern "C" __attribute__((visibility("default")))
+# else
+# define BLOCK_EXPORT extern __attribute__((visibility("default")))
+# endif
+#endif
+
+#if __cplusplus
+extern "C" {
+#endif
+
+// Create a heap based copy of a Block or simply add a reference to an existing one.
+// This must be paired with Block_release to recover memory, even when running
+// under Objective-C Garbage Collection.
+BLOCK_EXPORT void *_Block_copy(const void *aBlock);
+
+// Lose the reference, and if heap based and last reference, recover the memory
+BLOCK_EXPORT void _Block_release(const void *aBlock);
+
+// Used by the compiler. Do not call this function yourself.
+BLOCK_EXPORT void _Block_object_assign(void *, const void *, const int);
+
+// Used by the compiler. Do not call this function yourself.
+BLOCK_EXPORT void _Block_object_dispose(const void *, const int);
+
+// Used by the compiler. Do not use these variables yourself.
+BLOCK_EXPORT void * _NSConcreteGlobalBlock[32];
+BLOCK_EXPORT void * _NSConcreteStackBlock[32];
+
+#if __cplusplus
+}
+#endif
+
+// Type correct macros
+
+#define Block_copy(...) ((__typeof(__VA_ARGS__))_Block_copy((const void *)(__VA_ARGS__)))
+#define Block_release(...) _Block_release((const void *)(__VA_ARGS__))
+
+
+#endif
--- /dev/null
+// This source file is part of the Swift.org open source project
+//
+// Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
+// Licensed under Apache License v2.0 with Runtime Library Exception
+//
+// See http://swift.org/LICENSE.txt for license information
+// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
+//
+
+
+#ifndef _BLOCK_PRIVATE_H_
+#define _BLOCK_PRIVATE_H_
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include "Block.h"
+
+#if __cplusplus
+extern "C" {
+#endif
+
+
+// Values for Block_layout->flags to describe block objects
+enum {
+ BLOCK_DEALLOCATING = (0x0001), // runtime
+ BLOCK_REFCOUNT_MASK = (0xfffe), // runtime
+ BLOCK_NEEDS_FREE = (1 << 24), // runtime
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25), // compiler
+ BLOCK_HAS_CTOR = (1 << 26), // compiler: helpers have C++ code
+ BLOCK_IS_GC = (1 << 27), // runtime
+ BLOCK_IS_GLOBAL = (1 << 28), // compiler
+ BLOCK_USE_STRET = (1 << 29), // compiler: undefined if !BLOCK_HAS_SIGNATURE
+ BLOCK_HAS_SIGNATURE = (1 << 30), // compiler
+ BLOCK_HAS_EXTENDED_LAYOUT=(1 << 31) // compiler
+};
+
+#define BLOCK_DESCRIPTOR_1 1
+struct Block_descriptor_1 {
+ uintptr_t reserved;
+ uintptr_t size;
+};
+
+#define BLOCK_DESCRIPTOR_2 1
+struct Block_descriptor_2 {
+ // requires BLOCK_HAS_COPY_DISPOSE
+ void (*copy)(void *dst, const void *src);
+ void (*dispose)(const void *);
+};
+
+#define BLOCK_DESCRIPTOR_3 1
+struct Block_descriptor_3 {
+ // requires BLOCK_HAS_SIGNATURE
+ const char *signature;
+ const char *layout; // contents depend on BLOCK_HAS_EXTENDED_LAYOUT
+};
+
+struct Block_layout {
+ void *isa;
+ volatile int32_t flags; // contains ref count
+ int32_t reserved;
+ void (*invoke)(void *, ...);
+ struct Block_descriptor_1 *descriptor;
+ // imported variables
+};
+
+
+// Values for Block_byref->flags to describe __block variables
+enum {
+ // Byref refcount must use the same bits as Block_layout's refcount.
+ // BLOCK_DEALLOCATING = (0x0001), // runtime
+ // BLOCK_REFCOUNT_MASK = (0xfffe), // runtime
+
+ BLOCK_BYREF_LAYOUT_MASK = (0xf << 28), // compiler
+ BLOCK_BYREF_LAYOUT_EXTENDED = ( 1 << 28), // compiler
+ BLOCK_BYREF_LAYOUT_NON_OBJECT = ( 2 << 28), // compiler
+ BLOCK_BYREF_LAYOUT_STRONG = ( 3 << 28), // compiler
+ BLOCK_BYREF_LAYOUT_WEAK = ( 4 << 28), // compiler
+ BLOCK_BYREF_LAYOUT_UNRETAINED = ( 5 << 28), // compiler
+
+ BLOCK_BYREF_IS_GC = ( 1 << 27), // runtime
+
+ BLOCK_BYREF_HAS_COPY_DISPOSE = ( 1 << 25), // compiler
+ BLOCK_BYREF_NEEDS_FREE = ( 1 << 24), // runtime
+};
+
+struct Block_byref {
+ void *isa;
+ struct Block_byref *forwarding;
+ volatile int32_t flags; // contains ref count
+ uint32_t size;
+};
+
+struct Block_byref_2 {
+ // requires BLOCK_BYREF_HAS_COPY_DISPOSE
+ void (*byref_keep)(struct Block_byref *dst, struct Block_byref *src);
+ void (*byref_destroy)(struct Block_byref *);
+};
+
+struct Block_byref_3 {
+ // requires BLOCK_BYREF_LAYOUT_EXTENDED
+ const char *layout;
+};
+
+
+// Extended layout encoding.
+
+// Values for Block_descriptor_3->layout with BLOCK_HAS_EXTENDED_LAYOUT
+// and for Block_byref_3->layout with BLOCK_BYREF_LAYOUT_EXTENDED
+
+// If the layout field is less than 0x1000, then it is a compact encoding
+// of the form 0xXYZ: X strong pointers, then Y byref pointers,
+// then Z weak pointers.
+
+// If the layout field is 0x1000 or greater, it points to a
+// string of layout bytes. Each byte is of the form 0xPN.
+// Operator P is from the list below. Value N is a parameter for the operator.
+// Byte 0x00 terminates the layout; remaining block data is non-pointer bytes.
+
+enum {
+ BLOCK_LAYOUT_ESCAPE = 0, // N=0 halt, rest is non-pointer. N!=0 reserved.
+ BLOCK_LAYOUT_NON_OBJECT_BYTES = 1, // N bytes non-objects
+ BLOCK_LAYOUT_NON_OBJECT_WORDS = 2, // N words non-objects
+ BLOCK_LAYOUT_STRONG = 3, // N words strong pointers
+ BLOCK_LAYOUT_BYREF = 4, // N words byref pointers
+ BLOCK_LAYOUT_WEAK = 5, // N words weak pointers
+ BLOCK_LAYOUT_UNRETAINED = 6, // N words unretained pointers
+ BLOCK_LAYOUT_UNKNOWN_WORDS_7 = 7, // N words, reserved
+ BLOCK_LAYOUT_UNKNOWN_WORDS_8 = 8, // N words, reserved
+ BLOCK_LAYOUT_UNKNOWN_WORDS_9 = 9, // N words, reserved
+ BLOCK_LAYOUT_UNKNOWN_WORDS_A = 0xA, // N words, reserved
+ BLOCK_LAYOUT_UNUSED_B = 0xB, // unspecified, reserved
+ BLOCK_LAYOUT_UNUSED_C = 0xC, // unspecified, reserved
+ BLOCK_LAYOUT_UNUSED_D = 0xD, // unspecified, reserved
+ BLOCK_LAYOUT_UNUSED_E = 0xE, // unspecified, reserved
+ BLOCK_LAYOUT_UNUSED_F = 0xF, // unspecified, reserved
+};
+
+
+// Runtime support functions used by compiler when generating copy/dispose helpers
+
+// Values for _Block_object_assign() and _Block_object_dispose() parameters
+enum {
+ // see function implementation for a more complete description of these fields and combinations
+ BLOCK_FIELD_IS_OBJECT = 3, // id, NSObject, __attribute__((NSObject)), block, ...
+ BLOCK_FIELD_IS_BLOCK = 7, // a block variable
+ BLOCK_FIELD_IS_BYREF = 8, // the on stack structure holding the __block variable
+ BLOCK_FIELD_IS_WEAK = 16, // declared __weak, only used in byref copy helpers
+ BLOCK_BYREF_CALLER = 128, // called from __block (byref) copy/dispose support routines.
+};
+
+enum {
+ BLOCK_ALL_COPY_DISPOSE_FLAGS =
+ BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_BYREF |
+ BLOCK_FIELD_IS_WEAK | BLOCK_BYREF_CALLER
+};
+
+// Runtime entry point called by compiler when assigning objects inside copy helper routines
+BLOCK_EXPORT void _Block_object_assign(void *destAddr, const void *object, const int flags);
+ // BLOCK_FIELD_IS_BYREF is only used from within block copy helpers
+
+
+// runtime entry point called by the compiler when disposing of objects inside dispose helper routine
+BLOCK_EXPORT void _Block_object_dispose(const void *object, const int flags);
+
+
+// Other support functions
+
+// runtime entry to get total size of a closure
+BLOCK_EXPORT size_t Block_size(void *aBlock);
+
+// indicates whether block was compiled with compiler that sets the ABI related metadata bits
+BLOCK_EXPORT bool _Block_has_signature(void *aBlock);
+
+// returns TRUE if return value of block is on the stack, FALSE otherwise
+BLOCK_EXPORT bool _Block_use_stret(void *aBlock);
+
+// Returns a string describing the block's parameter and return types.
+// The encoding scheme is the same as Objective-C @encode.
+// Returns NULL for blocks compiled with some compilers.
+BLOCK_EXPORT const char * _Block_signature(void *aBlock);
+
+// Returns a string describing the block's GC layout.
+// This uses the GC skip/scan encoding.
+// May return NULL.
+BLOCK_EXPORT const char * _Block_layout(void *aBlock);
+
+// Returns a string describing the block's layout.
+// This uses the "extended layout" form described above.
+// May return NULL.
+BLOCK_EXPORT const char * _Block_extended_layout(void *aBlock);
+
+// Callable only from the ARR weak subsystem while in exclusion zone
+BLOCK_EXPORT bool _Block_tryRetain(const void *aBlock);
+
+// Callable only from the ARR weak subsystem while in exclusion zone
+BLOCK_EXPORT bool _Block_isDeallocating(const void *aBlock);
+
+
+// the raw data space for runtime classes for blocks
+// class+meta used for stack, malloc, and collectable based blocks
+BLOCK_EXPORT void * _NSConcreteMallocBlock[32];
+BLOCK_EXPORT void * _NSConcreteAutoBlock[32];
+BLOCK_EXPORT void * _NSConcreteFinalizingBlock[32];
+BLOCK_EXPORT void * _NSConcreteWeakBlockVariable[32];
+// declared in Block.h
+// BLOCK_EXPORT void * _NSConcreteGlobalBlock[32];
+// BLOCK_EXPORT void * _NSConcreteStackBlock[32];
+
+
+// the intercept routines that must be used under GC
+BLOCK_EXPORT void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
+ void (*setHasRefcount)(const void *, const bool),
+ void (*gc_assign_strong)(void *, void **),
+ void (*gc_assign_weak)(const void *, void *),
+ void (*gc_memmove)(void *, void *, unsigned long));
+
+// earlier version, now simply transitional
+BLOCK_EXPORT void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
+ void (*setHasRefcount)(const void *, const bool),
+ void (*gc_assign_strong)(void *, void **),
+ void (*gc_assign_weak)(const void *, void *));
+
+BLOCK_EXPORT void _Block_use_RR( void (*retain)(const void *),
+ void (*release)(const void *));
+
+struct Block_callbacks_RR {
+ size_t size; // size == sizeof(struct Block_callbacks_RR)
+ void (*retain)(const void *);
+ void (*release)(const void *);
+ void (*destructInstance)(const void *);
+};
+typedef struct Block_callbacks_RR Block_callbacks_RR;
+
+BLOCK_EXPORT void _Block_use_RR2(const Block_callbacks_RR *callbacks);
+
+// make a collectable GC heap based Block. Not useful under non-GC.
+BLOCK_EXPORT void *_Block_copy_collectable(const void *aBlock);
+
+// thread-unsafe diagnostic
+BLOCK_EXPORT const char *_Block_dump(const void *block);
+
+
+// Obsolete
+
+// first layout
+struct Block_basic {
+ void *isa;
+ int Block_flags; // int32_t
+ int Block_size; // XXX should be packed into Block_flags
+ void (*Block_invoke)(void *);
+ void (*Block_copy)(void *dst, void *src); // iff BLOCK_HAS_COPY_DISPOSE
+ void (*Block_dispose)(void *); // iff BLOCK_HAS_COPY_DISPOSE
+ //long params[0]; // where const imports, __block storage references, etc. get laid down
+} __attribute__((deprecated));
+
+
+#if __cplusplus
+}
+#endif
+
+
+#endif
--- /dev/null
+// This source file is part of the Swift.org open source project
+//
+// Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
+// Licensed under Apache License v2.0 with Runtime Library Exception
+//
+// See http://swift.org/LICENSE.txt for license information
+// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
+//
+
+/********************
+NSBlock support
+
+We allocate space and export a symbol to be used as the Class for the on-stack and malloc'ed copies until ObjC arrives on the scene. These data areas are set up by Foundation to link in as real classes post facto.
+
+We keep these in a separate file so that we can include the runtime code in test subprojects but not include the data so that compiled code that sees the data in libSystem doesn't get confused by a second copy. Somehow these don't get unified in a common block.
+**********************/
+#define BLOCK_EXPORT extern __attribute__((visibility("default")))
+
+BLOCK_EXPORT void * _NSConcreteStackBlock[32] = { 0 };
+BLOCK_EXPORT void * _NSConcreteMallocBlock[32] = { 0 };
+BLOCK_EXPORT void * _NSConcreteAutoBlock[32] = { 0 };
+BLOCK_EXPORT void * _NSConcreteFinalizingBlock[32] = { 0 };
+BLOCK_EXPORT void * _NSConcreteGlobalBlock[32] = { 0 };
+BLOCK_EXPORT void * _NSConcreteWeakBlockVariable[32] = { 0 };
--- /dev/null
+// This source file is part of the Swift.org open source project
+//
+// Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
+// Licensed under Apache License v2.0 with Runtime Library Exception
+//
+// See http://swift.org/LICENSE.txt for license information
+// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
+//
+
+#include "Block_private.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#define __USE_GNU
+#include <dlfcn.h>
+#if __has_include(<os/assumes.h>)
+#include <os/assumes.h>
+#else
+#include <assert.h>
+#endif
+#ifndef os_assumes
+#define os_assumes(_x) _x
+#endif
+#ifndef os_assert
+#define os_assert(_x) assert(_x)
+#endif
+
+#if TARGET_OS_WIN32
+#define _CRT_SECURE_NO_WARNINGS 1
+#include <windows.h>
+static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst)
+{
+ // fixme barrier is overkill -- see objc-os.h
+ long original = InterlockedCompareExchange(dst, newl, oldl);
+ return (original == oldl);
+}
+
+static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst)
+{
+ // fixme barrier is overkill -- see objc-os.h
+ int original = InterlockedCompareExchange(dst, newi, oldi);
+ return (original == oldi);
+}
+#else
+#define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
+#define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New)
+#endif
+
+/***********************
+Globals
+************************/
+
+static void *_Block_copy_class = _NSConcreteMallocBlock;
+static void *_Block_copy_finalizing_class = _NSConcreteMallocBlock;
+static int _Block_copy_flag = BLOCK_NEEDS_FREE;
+static int _Byref_flag_initial_value = BLOCK_BYREF_NEEDS_FREE | 4; // logical 2
+
+static bool isGC = false;
+
+/*******************************************************************************
+Internal Utilities
+********************************************************************************/
+
+
+static int32_t latching_incr_int(volatile int32_t *where) {
+ while (1) {
+ int32_t old_value = *where;
+ if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
+ return BLOCK_REFCOUNT_MASK;
+ }
+ if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {
+ return old_value+2;
+ }
+ }
+}
+
+static bool latching_incr_int_not_deallocating(volatile int32_t *where) {
+ while (1) {
+ int32_t old_value = *where;
+ if (old_value & BLOCK_DEALLOCATING) {
+ // if deallocating we can't do this
+ return false;
+ }
+ if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
+ // if latched, we're leaking this block, and we succeed
+ return true;
+ }
+ if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) {
+ // otherwise, we must store a new retained value without the deallocating bit set
+ return true;
+ }
+ }
+}
+
+
+// return should_deallocate?
+static bool latching_decr_int_should_deallocate(volatile int32_t *where) {
+ while (1) {
+ int32_t old_value = *where;
+ if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
+ return false; // latched high
+ }
+ if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
+ return false; // underflow, latch low
+ }
+ int32_t new_value = old_value - 2;
+ bool result = false;
+ if ((old_value & (BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING)) == 2) {
+ new_value = old_value - 1;
+ result = true;
+ }
+ if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
+ return result;
+ }
+ }
+}
+
+// hit zero?
+static bool latching_decr_int_now_zero(volatile int32_t *where) {
+ while (1) {
+ int32_t old_value = *where;
+ if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
+ return false; // latched high
+ }
+ if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
+ return false; // underflow, latch low
+ }
+ int32_t new_value = old_value - 2;
+ if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) {
+ return (new_value & BLOCK_REFCOUNT_MASK) == 0;
+ }
+ }
+}
+
+
+/***********************
+GC support stub routines
+************************/
+#if !TARGET_OS_WIN32
+#pragma mark GC Support Routines
+#endif
+
+
+
+static void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne, const bool isObject) {
+ return malloc(size);
+}
+
+static void _Block_assign_default(void *value, void **destptr) {
+ *destptr = value;
+}
+
+static void _Block_setHasRefcount_default(const void *ptr, const bool hasRefcount) {
+}
+
+static void _Block_do_nothing(const void *aBlock) { }
+
+static void _Block_retain_object_default(const void *ptr) {
+}
+
+static void _Block_release_object_default(const void *ptr) {
+}
+
+static void _Block_assign_weak_default(const void *ptr, void *dest) {
+#if !TARGET_OS_WIN32
+ *(long *)dest = (long)ptr;
+#else
+ *(void **)dest = (void *)ptr;
+#endif
+}
+
+static void _Block_memmove_default(void *dst, void *src, unsigned long size) {
+ memmove(dst, src, (size_t)size);
+}
+
+static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) {
+ void **destp = (void **)dest;
+ void **srcp = (void **)src;
+ while (size) {
+ _Block_assign_default(*srcp, destp);
+ destp++;
+ srcp++;
+ size -= sizeof(void *);
+ }
+}
+
+static void _Block_destructInstance_default(const void *aBlock) {}
+
+/**************************************************************************
+GC support callout functions - initially set to stub routines
+***************************************************************************/
+
+static void *(*_Block_allocator)(const unsigned long, const bool isOne, const bool isObject) = _Block_alloc_default;
+static void (*_Block_deallocator)(const void *) = (void (*)(const void *))free;
+static void (*_Block_assign)(void *value, void **destptr) = _Block_assign_default;
+static void (*_Block_setHasRefcount)(const void *ptr, const bool hasRefcount) = _Block_setHasRefcount_default;
+static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
+static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
+static void (*_Block_assign_weak)(const void *dest, void *ptr) = _Block_assign_weak_default;
+static void (*_Block_memmove)(void *dest, void *src, unsigned long size) = _Block_memmove_default;
+static void (*_Block_destructInstance) (const void *aBlock) = _Block_destructInstance_default;
+
+
+/**************************************************************************
+GC support SPI functions - called from ObjC runtime and CoreFoundation
+***************************************************************************/
+
+// Public SPI
+// Called from objc-auto to turn on GC.
+// version 3, 4 arg, but changed 1st arg
+void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
+ void (*setHasRefcount)(const void *, const bool),
+ void (*gc_assign)(void *, void **),
+ void (*gc_assign_weak)(const void *, void *),
+ void (*gc_memmove)(void *, void *, unsigned long)) {
+
+ isGC = true;
+ _Block_allocator = alloc;
+ _Block_deallocator = _Block_do_nothing;
+ _Block_assign = gc_assign;
+ _Block_copy_flag = BLOCK_IS_GC;
+ _Block_copy_class = _NSConcreteAutoBlock;
+ // blocks with ctors & dtors need to have the dtor run from a class with a finalizer
+ _Block_copy_finalizing_class = _NSConcreteFinalizingBlock;
+ _Block_setHasRefcount = setHasRefcount;
+ _Byref_flag_initial_value = BLOCK_BYREF_IS_GC; // no refcount
+ _Block_retain_object = _Block_do_nothing;
+ _Block_release_object = _Block_do_nothing;
+ _Block_assign_weak = gc_assign_weak;
+ _Block_memmove = gc_memmove;
+}
+
+// transitional
+void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
+ void (*setHasRefcount)(const void *, const bool),
+ void (*gc_assign)(void *, void **),
+ void (*gc_assign_weak)(const void *, void *)) {
+ // until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then
+ _Block_use_GC(alloc, setHasRefcount, gc_assign, gc_assign_weak, _Block_memmove_gc_broken);
+}
+
+
+// Called from objc-auto to alternatively turn on retain/release.
+// Prior to this the only "object" support we can provide is for those
+// super special objects that live in libSystem, namely dispatch queues.
+// Blocks and Block_byrefs have their own special entry points.
+BLOCK_EXPORT
+void _Block_use_RR( void (*retain)(const void *),
+ void (*release)(const void *)) {
+ _Block_retain_object = retain;
+ _Block_release_object = release;
+ _Block_destructInstance = dlsym(RTLD_DEFAULT, "objc_destructInstance");
+}
+
+// Called from CF to indicate MRR. Newer version uses a versioned structure, so we can add more functions
+// without defining a new entry point.
+BLOCK_EXPORT
+void _Block_use_RR2(const Block_callbacks_RR *callbacks) {
+ _Block_retain_object = callbacks->retain;
+ _Block_release_object = callbacks->release;
+ _Block_destructInstance = callbacks->destructInstance;
+}
+
+/****************************************************************************
+Accessors for block descriptor fields
+*****************************************************************************/
+#if 0
+static struct Block_descriptor_1 * _Block_descriptor_1(struct Block_layout *aBlock)
+{
+ return aBlock->descriptor;
+}
+#endif
+
+static struct Block_descriptor_2 * _Block_descriptor_2(struct Block_layout *aBlock)
+{
+ if (! (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)) return NULL;
+ uint8_t *desc = (uint8_t *)aBlock->descriptor;
+ desc += sizeof(struct Block_descriptor_1);
+ return (struct Block_descriptor_2 *)desc;
+}
+
+static struct Block_descriptor_3 * _Block_descriptor_3(struct Block_layout *aBlock)
+{
+ if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return NULL;
+ uint8_t *desc = (uint8_t *)aBlock->descriptor;
+ desc += sizeof(struct Block_descriptor_1);
+ if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
+ desc += sizeof(struct Block_descriptor_2);
+ }
+ return (struct Block_descriptor_3 *)desc;
+}
+
+static __inline bool _Block_has_layout(struct Block_layout *aBlock) {
+ if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return false;
+ uint8_t *desc = (uint8_t *)aBlock->descriptor;
+ desc += sizeof(struct Block_descriptor_1);
+ if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) {
+ desc += sizeof(struct Block_descriptor_2);
+ }
+ return ((struct Block_descriptor_3 *)desc)->layout != NULL;
+}
+
+static void _Block_call_copy_helper(void *result, struct Block_layout *aBlock)
+{
+ struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock);
+ if (!desc) return;
+
+ (*desc->copy)(result, aBlock); // do fixup
+}
+
+static void _Block_call_dispose_helper(struct Block_layout *aBlock)
+{
+ struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock);
+ if (!desc) return;
+
+ (*desc->dispose)(aBlock);
+}
+
+/*******************************************************************************
+Internal Support routines for copying
+********************************************************************************/
+
+#if !TARGET_OS_WIN32
+#pragma mark Copy/Release support
+#endif
+
+// Copy, or bump refcount, of a block. If really copying, call the copy helper if present.
+static void *_Block_copy_internal(const void *arg, const bool wantsOne) {
+ struct Block_layout *aBlock;
+
+ if (!arg) return NULL;
+
+
+ // The following would be better done as a switch statement
+ aBlock = (struct Block_layout *)arg;
+ if (aBlock->flags & BLOCK_NEEDS_FREE) {
+ // latches on high
+ latching_incr_int(&aBlock->flags);
+ return aBlock;
+ }
+ else if (aBlock->flags & BLOCK_IS_GC) {
+ // GC refcounting is expensive so do most refcounting here.
+ if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 2)) {
+ // Tell collector to hang on this - it will bump the GC refcount version
+ _Block_setHasRefcount(aBlock, true);
+ }
+ return aBlock;
+ }
+ else if (aBlock->flags & BLOCK_IS_GLOBAL) {
+ return aBlock;
+ }
+
+ // Its a stack block. Make a copy.
+ if (!isGC) {
+ struct Block_layout *result = malloc(aBlock->descriptor->size);
+ if (!result) return NULL;
+ memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
+ // reset refcount
+ result->flags &= ~(BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING); // XXX not needed
+ result->flags |= BLOCK_NEEDS_FREE | 2; // logical refcount 1
+ result->isa = _NSConcreteMallocBlock;
+ _Block_call_copy_helper(result, aBlock);
+ return result;
+ }
+ else {
+ // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne
+ // This allows the copy helper routines to make non-refcounted block copies under GC
+ int32_t flags = aBlock->flags;
+ bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0;
+ struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR || _Block_has_layout(aBlock));
+ if (!result) return NULL;
+ memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
+ // reset refcount
+ // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE.
+ flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING); // XXX not needed
+ if (wantsOne)
+ flags |= BLOCK_IS_GC | 2;
+ else
+ flags |= BLOCK_IS_GC;
+ result->flags = flags;
+ _Block_call_copy_helper(result, aBlock);
+ if (hasCTOR) {
+ result->isa = _NSConcreteFinalizingBlock;
+ }
+ else {
+ result->isa = _NSConcreteAutoBlock;
+ }
+ return result;
+ }
+}
+
+
+
+
+
+// Runtime entry points for maintaining the sharing knowledge of byref data blocks.
+
+// A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
+// Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
+// We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it.
+// Otherwise we need to copy it and update the stack forwarding pointer
+static void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) {
+ struct Block_byref **destp = (struct Block_byref **)dest;
+ struct Block_byref *src = (struct Block_byref *)arg;
+
+ if (src->forwarding->flags & BLOCK_BYREF_IS_GC) {
+ ; // don't need to do any more work
+ }
+ else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
+ // src points to stack
+ bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK));
+ // if its weak ask for an object (only matters under GC)
+ struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak);
+ copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack
+ copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier)
+ src->forwarding = copy; // patch stack to point to heap copy
+ copy->size = src->size;
+ if (isWeak) {
+ copy->isa = &_NSConcreteWeakBlockVariable; // mark isa field so it gets weak scanning
+ }
+ if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
+ // Trust copy helper to copy everything of interest
+ // If more than one field shows up in a byref block this is wrong XXX
+ struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src+1);
+ struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy+1);
+ copy2->byref_keep = src2->byref_keep;
+ copy2->byref_destroy = src2->byref_destroy;
+
+ if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) {
+ struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2+1);
+ struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2+1);
+ copy3->layout = src3->layout;
+ }
+
+ (*src2->byref_keep)(copy, src);
+ }
+ else {
+ // just bits. Blast 'em using _Block_memmove in case they're __strong
+ // This copy includes Block_byref_3, if any.
+ _Block_memmove(copy+1, src+1,
+ src->size - sizeof(struct Block_byref));
+ }
+ }
+ // already copied to heap
+ else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) {
+ latching_incr_int(&src->forwarding->flags);
+ }
+ // assign byref data block pointer into new Block
+ _Block_assign(src->forwarding, (void **)destp);
+}
+
+// Old compiler SPI
+static void _Block_byref_release(const void *arg) {
+ struct Block_byref *byref = (struct Block_byref *)arg;
+ int32_t refcount;
+
+ // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
+ byref = byref->forwarding;
+
+ // To support C++ destructors under GC we arrange for there to be a finalizer for this
+ // by using an isa that directs the code to a finalizer that calls the byref_destroy method.
+ if ((byref->flags & BLOCK_BYREF_NEEDS_FREE) == 0) {
+ return; // stack or GC or global
+ }
+ refcount = byref->flags & BLOCK_REFCOUNT_MASK;
+ os_assert(refcount);
+ if (latching_decr_int_should_deallocate(&byref->flags)) {
+ if (byref->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) {
+ struct Block_byref_2 *byref2 = (struct Block_byref_2 *)(byref+1);
+ (*byref2->byref_destroy)(byref);
+ }
+ _Block_deallocator((struct Block_layout *)byref);
+ }
+}
+
+
+/************************************************************
+ *
+ * API supporting SPI
+ * _Block_copy, _Block_release, and (old) _Block_destroy
+ *
+ ***********************************************************/
+
+#if !TARGET_OS_WIN32
+#pragma mark SPI/API
+#endif
+
+BLOCK_EXPORT
+void *_Block_copy(const void *arg) {
+ return _Block_copy_internal(arg, true);
+}
+
+
+// API entry point to release a copied Block
+BLOCK_EXPORT
+void _Block_release(const void *arg) {
+ struct Block_layout *aBlock = (struct Block_layout *)arg;
+ if (!aBlock
+ || (aBlock->flags & BLOCK_IS_GLOBAL)
+ || ((aBlock->flags & (BLOCK_IS_GC|BLOCK_NEEDS_FREE)) == 0)
+ ) return;
+ if (aBlock->flags & BLOCK_IS_GC) {
+ if (latching_decr_int_now_zero(&aBlock->flags)) {
+ // Tell GC we no longer have our own refcounts. GC will decr its refcount
+ // and unless someone has done a CFRetain or marked it uncollectable it will
+ // now be subject to GC reclamation.
+ _Block_setHasRefcount(aBlock, false);
+ }
+ }
+ else if (aBlock->flags & BLOCK_NEEDS_FREE) {
+ if (latching_decr_int_should_deallocate(&aBlock->flags)) {
+ _Block_call_dispose_helper(aBlock);
+ _Block_destructInstance(aBlock);
+ _Block_deallocator(aBlock);
+ }
+ }
+}
+
+BLOCK_EXPORT
+bool _Block_tryRetain(const void *arg) {
+ struct Block_layout *aBlock = (struct Block_layout *)arg;
+ return latching_incr_int_not_deallocating(&aBlock->flags);
+}
+
+BLOCK_EXPORT
+bool _Block_isDeallocating(const void *arg) {
+ struct Block_layout *aBlock = (struct Block_layout *)arg;
+ return (aBlock->flags & BLOCK_DEALLOCATING) != 0;
+}
+
+// Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers
+static void _Block_destroy(const void *arg) {
+ struct Block_layout *aBlock;
+ if (!arg) return;
+ aBlock = (struct Block_layout *)arg;
+ if (aBlock->flags & BLOCK_IS_GC) {
+ // assert(aBlock->Block_flags & BLOCK_HAS_CTOR);
+ return; // ignore, we are being called because of a DTOR
+ }
+ _Block_release(aBlock);
+}
+
+
+
+/************************************************************
+ *
+ * SPI used by other layers
+ *
+ ***********************************************************/
+
+// SPI, also internal. Called from NSAutoBlock only under GC
+BLOCK_EXPORT
+void *_Block_copy_collectable(const void *aBlock) {
+ return _Block_copy_internal(aBlock, false);
+}
+
+
+// SPI
+BLOCK_EXPORT
+size_t Block_size(void *aBlock) {
+ return ((struct Block_layout *)aBlock)->descriptor->size;
+}
+
+BLOCK_EXPORT
+bool _Block_use_stret(void *aBlock) {
+ struct Block_layout *layout = (struct Block_layout *)aBlock;
+
+ int requiredFlags = BLOCK_HAS_SIGNATURE | BLOCK_USE_STRET;
+ return (layout->flags & requiredFlags) == requiredFlags;
+}
+
+// Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit.
+BLOCK_EXPORT
+bool _Block_has_signature(void *aBlock) {
+ return _Block_signature(aBlock) ? true : false;
+}
+
+BLOCK_EXPORT
+const char * _Block_signature(void *aBlock)
+{
+ struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock);
+ if (!desc3) return NULL;
+
+ return desc3->signature;
+}
+
+BLOCK_EXPORT
+const char * _Block_layout(void *aBlock)
+{
+ // Don't return extended layout to callers expecting GC layout
+ struct Block_layout *layout = (struct Block_layout *)aBlock;
+ if (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) return NULL;
+
+ struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock);
+ if (!desc3) return NULL;
+
+ return desc3->layout;
+}
+
+BLOCK_EXPORT
+const char * _Block_extended_layout(void *aBlock)
+{
+ // Don't return GC layout to callers expecting extended layout
+ struct Block_layout *layout = (struct Block_layout *)aBlock;
+ if (! (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT)) return NULL;
+
+ struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock);
+ if (!desc3) return NULL;
+
+ // Return empty string (all non-object bytes) instead of NULL
+ // so callers can distinguish "empty layout" from "no layout".
+ if (!desc3->layout) return "";
+ else return desc3->layout;
+}
+
+#if !TARGET_OS_WIN32
+#pragma mark Compiler SPI entry points
+#endif
+
+
+/*******************************************************
+
+Entry points used by the compiler - the real API!
+
+
+A Block can reference four different kinds of things that require help when the Block is copied to the heap.
+1) C++ stack based objects
+2) References to Objective-C objects
+3) Other Blocks
+4) __block variables
+
+In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
+
+The flags parameter of _Block_object_assign and _Block_object_dispose is set to
+ * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
+ * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
+ * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
+If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16)
+
+So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
+
+When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
+
+So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
+ __block id 128+3 (0x83)
+ __block (^Block) 128+7 (0x87)
+ __weak __block id 128+3+16 (0x93)
+ __weak __block (^Block) 128+7+16 (0x97)
+
+
+********************************************************/
+
+//
+// When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
+// to do the assignment.
+//
+BLOCK_EXPORT
+void _Block_object_assign(void *destAddr, const void *object, const int flags) {
+ switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
+ case BLOCK_FIELD_IS_OBJECT:
+ /*******
+ id object = ...;
+ [^{ object; } copy];
+ ********/
+
+ _Block_retain_object(object);
+ _Block_assign((void *)object, destAddr);
+ break;
+
+ case BLOCK_FIELD_IS_BLOCK:
+ /*******
+ void (^object)(void) = ...;
+ [^{ object; } copy];
+ ********/
+
+ _Block_assign(_Block_copy_internal(object, false), destAddr);
+ break;
+
+ case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
+ case BLOCK_FIELD_IS_BYREF:
+ /*******
+ // copy the onstack __block container to the heap
+ __block ... x;
+ __weak __block ... x;
+ [^{ x; } copy];
+ ********/
+
+ _Block_byref_assign_copy(destAddr, object, flags);
+ break;
+
+ case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
+ case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
+ /*******
+ // copy the actual field held in the __block container
+ __block id object;
+ __block void (^object)(void);
+ [^{ object; } copy];
+ ********/
+
+ // under manual retain release __block object/block variables are dangling
+ _Block_assign((void *)object, destAddr);
+ break;
+
+ case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
+ case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
+ /*******
+ // copy the actual field held in the __block container
+ __weak __block id object;
+ __weak __block void (^object)(void);
+ [^{ object; } copy];
+ ********/
+
+ _Block_assign_weak(object, destAddr);
+ break;
+
+ default:
+ break;
+ }
+}
+
+// When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
+// to help dispose of the contents
+// Used initially only for __attribute__((NSObject)) marked pointers.
+BLOCK_EXPORT
+void _Block_object_dispose(const void *object, const int flags) {
+ switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) {
+ case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK:
+ case BLOCK_FIELD_IS_BYREF:
+ // get rid of the __block data structure held in a Block
+ _Block_byref_release(object);
+ break;
+ case BLOCK_FIELD_IS_BLOCK:
+ _Block_destroy(object);
+ break;
+ case BLOCK_FIELD_IS_OBJECT:
+ _Block_release_object(object);
+ break;
+ case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT:
+ case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK:
+ case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK:
+ case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK:
+ break;
+ default:
+ break;
+ }
+}
AM_CPPFLAGS=-I$(top_builddir) -I$(top_srcdir) -I$(top_srcdir)/private
DISPATCH_CFLAGS=-Wall $(VISIBILITY_FLAGS) $(OMIT_LEAF_FP_FLAGS) \
- $(MARCH_FLAGS) $(KQUEUE_CFLAGS) $(BSD_OVERLAY_CFLAGS)
-AM_CFLAGS= $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS)
+ $(MARCH_FLAGS) $(BSD_OVERLAY_CFLAGS)
+if DISPATCH_ENABLE_ASSERTS
+DISPATCH_CFLAGS+=-DDISPATCH_DEBUG=1
+endif
+AM_CFLAGS= $(KQUEUE_CFLAGS) $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS)
AM_OBJCFLAGS=$(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS)
-AM_CXXFLAGS=$(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS)
+AM_CXXFLAGS=$(KQUEUE_CFLAGS) $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS)
AM_OBJCXXFLAGS=$(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS)
+if BUILD_OWN_KQUEUES
+ KQUEUE_LIBS+=$(top_builddir)/libkqueue/libkqueue.la
+ KQUEUE_CFLAGS+=-I$(top_srcdir)/libkqueue/include
+endif
+
if BUILD_OWN_PTHREAD_WORKQUEUES
PTHREAD_WORKQUEUE_LIBS=$(top_builddir)/libpwq/libpthread_workqueue.la
PTHREAD_WORKQUEUE_CFLAGS=-I$(top_srcdir)/libpwq/include
endif
endif
+if BUILD_OWN_BLOCKS_RUNTIME
+libdispatch_la_SOURCES+= BlocksRuntime/data.c BlocksRuntime/runtime.c
+CBLOCKS_FLAGS+= -I$(top_srcdir)/src/BlocksRuntime
+CXXBLOCKS_FLAGS+= -I$(top_srcdir)/src/BlocksRuntime
+BLOCKS_RUNTIME_LIBS=-ldl
+endif
+
libdispatch_la_LDFLAGS=-avoid-version
-libdispatch_la_LIBADD=$(KQUEUE_LIBS) $(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS)
+libdispatch_la_LIBADD=$(KQUEUE_LIBS) $(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) $(BLOCKS_RUNTIME_LIBS)
if HAVE_DARWIN_LD
libdispatch_la_LDFLAGS+=-Wl,-compatibility_version,1 \
$(SWIFT_OBJ_FILES:%=%.~partial.swiftdoc) \
$(SWIFT_OBJ_FILES:%=%.~partial.swiftdeps)
-SWIFTC_FLAGS = -Xcc -fmodule-map-file=$(abs_top_srcdir)/dispatch/module.map -I$(abs_top_srcdir) -Xcc -fblocks
+SWIFTC_FLAGS = -Xcc -fmodule-map-file=$(abs_top_srcdir)/dispatch/module.modulemap -I$(abs_top_srcdir) -Xcc -fblocks
+if DISPATCH_ENABLE_OPTIMIZATION
+SWIFTC_FLAGS+=-O
+endif
-$(abs_builddir)/swift/%.o: $(abs_srcdir)/swift/%.swift
+$(abs_builddir)/swift/%.o: $(abs_srcdir)/swift/%.swift $(SWIFTC)
+ @rm -f $@
$(SWIFTC) -frontend -c $(SWIFT_ABS_SRC_FILES) -primary-file $< \
$(SWIFTC_FLAGS) -module-name Dispatch -module-link-name dispatch \
-o $@ -emit-module-path $@.~partial.swiftmodule \
-emit-reference-dependencies-path $@.swiftdeps \
-module-cache-path $(top_builddir)
-$(abs_builddir)/swift/Dispatch.swiftmodule: $(SWIFT_ABS_SRC_FILES)
+$(abs_builddir)/swift/Dispatch.swiftmodule: $(SWIFT_ABS_SRC_FILES) $(SWIFTC)
+ @rm -f $@
$(SWIFTC) -frontend -emit-module $(SWIFT_OBJ_FILES:%=%.~partial.swiftmodule) \
$(SWIFTC_FLAGS) -module-cache-path $(top_builddir) -module-link-name dispatch \
-o $@ -emit-module-doc-path $(@:%.swiftmodule=%.swiftdoc)
comm_recvp : mach_port_move_receive_t;
comm_sendp : mach_port_make_send_t;
extra_info_port : mach_port_move_send_t;
- extra_info_size : mach_vm_size_t
+ extra_info_size : mach_vm_size_t;
+ ServerAuditToken atoken : audit_token_t
);
routine
#include <sys/param.h>
#include <sys/types.h>
#include <vm/vm_kern.h>
+#include <internal/atomic.h> // os/internal/atomic.h
#include <firehose_types_private.h> // <firehose/firehose_types_private.h>
#include <tracepoint_private.h> // <firehose/tracepoint_private.h>
-#include <internal/atomic.h> // os/internal/atomic.h
+#include <chunk_private.h> // <firehose/chunk_private.h>
#include "os/firehose_buffer_private.h"
#include "firehose_buffer_internal.h"
#include "firehose_inline_internal.h"
offsetof(firehose_stream_state_u, fss_allocator),
"fss_gate and fss_allocator alias");
_Static_assert(sizeof(struct firehose_buffer_header_s) ==
- FIREHOSE_BUFFER_CHUNK_SIZE,
+ FIREHOSE_CHUNK_SIZE,
"firehose buffer header must be 4k");
_Static_assert(offsetof(struct firehose_buffer_header_s, fbh_unused) <=
- FIREHOSE_BUFFER_CHUNK_SIZE - FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE,
+ FIREHOSE_CHUNK_SIZE - FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE,
"we must have enough space for the libtrace header");
-_Static_assert(sizeof(struct firehose_buffer_chunk_s) ==
- FIREHOSE_BUFFER_CHUNK_SIZE,
- "firehose buffer chunks must be 4k");
_Static_assert(powerof2(FIREHOSE_BUFFER_CHUNK_COUNT),
"CHUNK_COUNT Must be a power of two");
_Static_assert(FIREHOSE_BUFFER_CHUNK_COUNT <= 64,
_Static_assert(powerof2(FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT),
"madvise chunk count must be a power of two");
#endif
-_Static_assert(howmany(sizeof(struct firehose_tracepoint_s),
- sizeof(struct firehose_buffer_chunk_s)) < 255,
- "refcount assumes that you cannot have more than 255 tracepoints");
-// FIXME: we should have an event-count instead here
_Static_assert(sizeof(struct firehose_buffer_stream_s) == 128,
"firehose buffer stream must be small (single cacheline if possible)");
-_Static_assert(offsetof(struct firehose_buffer_chunk_s, fbc_data) % 8 == 0,
- "Page header is 8 byte aligned");
_Static_assert(sizeof(struct firehose_tracepoint_s) == 24,
"tracepoint header should be exactly 24 bytes");
#endif
uint32_t opts = MPO_CONTEXT_AS_GUARD | MPO_TEMPOWNER | MPO_INSERT_SEND_RIGHT;
sendp = firehose_mach_port_allocate(opts, fb);
- if (oldsendp && _voucher_libtrace_hooks->vah_version >= 3) {
- if (_voucher_libtrace_hooks->vah_get_reconnect_info) {
- kr = _voucher_libtrace_hooks->vah_get_reconnect_info(&addr, &size);
- if (likely(kr == KERN_SUCCESS) && addr && size) {
- extra_info_size = size;
- kr = mach_make_memory_entry_64(mach_task_self(), &size, addr,
- flags, &extra_info_port, MACH_PORT_NULL);
- if (unlikely(kr)) {
- // the client probably has some form of memory corruption
- // and/or a port leak
- DISPATCH_CLIENT_CRASH(kr, "Unable to make memory port");
- }
- kr = mach_vm_deallocate(mach_task_self(), addr, size);
- (void)dispatch_assume_zero(kr);
+ if (oldsendp && _voucher_libtrace_hooks->vah_get_reconnect_info) {
+ kr = _voucher_libtrace_hooks->vah_get_reconnect_info(&addr, &size);
+ if (likely(kr == KERN_SUCCESS) && addr && size) {
+ extra_info_size = size;
+ kr = mach_make_memory_entry_64(mach_task_self(), &size, addr,
+ flags, &extra_info_port, MACH_PORT_NULL);
+ if (unlikely(kr)) {
+ // the client probably has some form of memory corruption
+ // and/or a port leak
+ DISPATCH_CLIENT_CRASH(kr, "Unable to make memory port");
}
+ kr = mach_vm_deallocate(mach_task_self(), addr, size);
+ (void)dispatch_assume_zero(kr);
}
}
}
}
- uint16_t ratio = (uint16_t)(PAGE_SIZE / FIREHOSE_BUFFER_CHUNK_SIZE);
+ uint16_t ratio = (uint16_t)(PAGE_SIZE / FIREHOSE_CHUNK_SIZE);
if (ratio > 1) {
total = roundup(total, ratio);
}
vm_addr = vm_page_size;
const size_t madvise_bytes = FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT *
- FIREHOSE_BUFFER_CHUNK_SIZE;
+ FIREHOSE_CHUNK_SIZE;
if (slowpath(madvise_bytes % PAGE_SIZE)) {
DISPATCH_INTERNAL_CRASH(madvise_bytes,
"Invalid values for MADVISE_CHUNK_COUNT / CHUNK_SIZE");
vm_offset_t vm_addr = 0;
vm_size_t size;
- size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE;
+ size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE;
__firehose_allocate(&vm_addr, size);
(void)logd_port; (void)unique_pid;
return;
}
- bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) |
- ((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1));
- state.fbs_atomic_state = os_atomic_sub2o(&fb->fb_header,
- fbh_bank.fbb_state.fbs_atomic_state, bank_updates, relaxed);
- if (state_out) *state_out = state;
-
+ __firehose_critical_region_enter();
os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail,
otail.frp_atomic_tail, ntail.frp_atomic_tail, relaxed, {
ntail = otail;
ntail.frp_io_flushed += io_delta;
ntail.frp_mem_flushed += mem_delta;
});
+
+ bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) |
+ ((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1));
+ state.fbs_atomic_state = os_atomic_sub2o(&fb->fb_header,
+ fbh_bank.fbb_state.fbs_atomic_state, bank_updates, release);
+ __firehose_critical_region_leave();
+
+ if (state_out) *state_out = state;
+
if (async_notif) {
if (io_delta) {
os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_io_notifs, relaxed);
OS_ALWAYS_INLINE
static inline firehose_tracepoint_t
-firehose_buffer_chunk_init(firehose_buffer_chunk_t fbc,
+firehose_buffer_chunk_init(firehose_chunk_t fc,
firehose_tracepoint_query_t ask, uint8_t **privptr)
{
const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data);
- uint16_t pub_offs = offsetof(struct firehose_buffer_chunk_s, fbc_data);
- uint16_t priv_offs = FIREHOSE_BUFFER_CHUNK_SIZE;
+ uint16_t pub_offs = offsetof(struct firehose_chunk_s, fc_data);
+ uint16_t priv_offs = FIREHOSE_CHUNK_SIZE;
pub_offs += roundup(ft_size + ask->pubsize, 8);
priv_offs -= ask->privsize;
- if (fbc->fbc_pos.fbc_atomic_pos) {
+ if (fc->fc_pos.fcp_atomic_pos) {
// Needed for process death handling (recycle-reuse):
// No atomic fences required, we merely want to make sure the observers
// will see memory effects in program (asm) order.
// and it is dirty, when crawling the chunk, we don't see remnants of
// other tracepoints
//
- // We only do that when the fbc_pos is non zero, because zero means
+ // We only do that when the fc_pos is non zero, because zero means
// we just faulted the chunk, and the kernel already bzero-ed it.
- bzero(fbc->fbc_data, sizeof(fbc->fbc_data));
+ bzero(fc->fc_data, sizeof(fc->fc_data));
}
dispatch_compiler_barrier();
// <rdar://problem/23562733> boot starts mach absolute time at 0, and
// wrapping around to values above UINT64_MAX - FIREHOSE_STAMP_SLOP
// breaks firehose_buffer_stream_flush() assumptions
if (ask->stamp > FIREHOSE_STAMP_SLOP) {
- fbc->fbc_timestamp = ask->stamp - FIREHOSE_STAMP_SLOP;
+ fc->fc_timestamp = ask->stamp - FIREHOSE_STAMP_SLOP;
} else {
- fbc->fbc_timestamp = 0;
+ fc->fc_timestamp = 0;
}
- fbc->fbc_pos = (firehose_buffer_pos_u){
- .fbc_next_entry_offs = pub_offs,
- .fbc_private_offs = priv_offs,
- .fbc_refcnt = 1,
- .fbc_qos_bits = firehose_buffer_qos_bits_propagate(),
- .fbc_stream = ask->stream,
- .fbc_flag_io = ask->for_io,
+ fc->fc_pos = (firehose_chunk_pos_u){
+ .fcp_next_entry_offs = pub_offs,
+ .fcp_private_offs = priv_offs,
+ .fcp_refcnt = 1,
+ .fcp_qos = firehose_buffer_qos_bits_propagate(),
+ .fcp_stream = ask->stream,
+ .fcp_flag_io = ask->for_io,
};
if (privptr) {
- *privptr = fbc->fbc_start + priv_offs;
+ *privptr = fc->fc_start + priv_offs;
}
- return (firehose_tracepoint_t)fbc->fbc_data;
+ return (firehose_tracepoint_t)fc->fc_data;
}
OS_NOINLINE
uint64_t stamp_and_len;
if (fastpath(ref)) {
- firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
- ft = firehose_buffer_chunk_init(fbc, ask, privptr);
+ firehose_chunk_t fc = firehose_buffer_ref_to_chunk(fb, ref);
+ ft = firehose_buffer_chunk_init(fc, ask, privptr);
// Needed for process death handling (tracepoint-begin):
// write the length before making the chunk visible
- stamp_and_len = ask->stamp - fbc->fbc_timestamp;
+ stamp_and_len = ask->stamp - fc->fc_timestamp;
stamp_and_len |= (uint64_t)ask->pubsize << 48;
os_atomic_store2o(ft, ft_stamp_and_length, stamp_and_len, relaxed);
-
+#ifdef KERNEL
+ ft->ft_thread = thread_tid(current_thread());
+#else
+ ft->ft_thread = _pthread_threadid_self_np_direct();
+#endif
if (ask->stream == firehose_stream_metadata) {
os_atomic_or2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap,
1ULL << ref, relaxed);
firehose_buffer_ring_shrink(firehose_buffer_t fb, uint16_t ref)
{
const size_t madv_size =
- FIREHOSE_BUFFER_CHUNK_SIZE * FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT;
+ FIREHOSE_CHUNK_SIZE * FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT;
const size_t madv_mask =
(1ULL << FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT) - 1;
void
firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref)
{
- firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
+ firehose_chunk_t fc = firehose_buffer_ref_to_chunk(fb, ref);
uint16_t volatile *fbh_ring;
uint16_t volatile *fbh_ring_head;
uint16_t head, gen, dummy, idx;
- firehose_buffer_pos_u fbc_pos = fbc->fbc_pos;
- bool for_io = fbc_pos.fbc_flag_io;
+ firehose_chunk_pos_u fc_pos = fc->fc_pos;
+ bool for_io = fc_pos.fcp_flag_io;
if (for_io) {
fbh_ring = fb->fb_header.fbh_io_ring;
}));
}
- pthread_priority_t pp = fbc_pos.fbc_qos_bits;
+ pthread_priority_t pp = fc_pos.fcp_qos;
pp <<= _PTHREAD_PRIORITY_QOS_CLASS_SHIFT;
firehose_client_send_push_async(fb, _pthread_qos_class_decode(pp, NULL, NULL),
for_io);
#endif
}
+#ifndef KERNEL
+void
+firehose_buffer_force_connect(firehose_buffer_t fb)
+{
+ mach_port_t sendp = fb->fb_header.fbh_sendp;
+ if (sendp == MACH_PORT_NULL) firehose_client_reconnect(fb, MACH_PORT_NULL);
+}
+#endif
+
OS_ALWAYS_INLINE
static inline uint16_t
firehose_buffer_ring_try_recycle(firehose_buffer_t fb)
firehose_ring_tail_u pos, old;
uint16_t volatile *fbh_ring;
uint16_t gen, ref, entry, tail;
- firehose_buffer_chunk_t fbc;
+ firehose_chunk_t fc;
bool for_io;
os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail,
// and it is dirty, it is a chunk being written to that needs a flush
gen = (entry & FIREHOSE_RING_POS_GEN_MASK) + FIREHOSE_RING_POS_GEN_INC;
ref = entry & FIREHOSE_RING_POS_IDX_MASK;
- fbc = firehose_buffer_ref_to_chunk(fb, ref);
+ fc = firehose_buffer_ref_to_chunk(fb, ref);
- if (!for_io && fbc->fbc_pos.fbc_stream == firehose_stream_metadata) {
+ if (!for_io && fc->fc_pos.fcp_stream == firehose_stream_metadata) {
os_atomic_and2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap,
~(1ULL << ref), relaxed);
}
- os_atomic_store2o(fbc, fbc_pos.fbc_atomic_pos,
- FIREHOSE_BUFFER_POS_FULL_BIT, relaxed);
+ os_atomic_store2o(fc, fc_pos.fcp_atomic_pos,
+ FIREHOSE_CHUNK_POS_FULL_BIT, relaxed);
dispatch_compiler_barrier();
os_atomic_store(&fbh_ring[tail], gen | 0, relaxed);
return ref;
firehose_tracepoint_query_t ask, uint8_t **privptr, uint16_t ref)
{
const uint64_t bank_unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(ask->for_io);
+ const uint64_t bank_inc = FIREHOSE_BANK_INC(ask->for_io);
firehose_buffer_bank_t const fbb = &fb->fb_header.fbh_bank;
firehose_bank_state_u state;
uint16_t fbs_max_ref;
if (!fastpath(ask->is_bank_ok)) {
state.fbs_atomic_state =
os_atomic_load2o(fbb, fbb_state.fbs_atomic_state, relaxed);
- while (state.fbs_atomic_state & bank_unavail_mask) {
+ while ((state.fbs_atomic_state - bank_inc) & bank_unavail_mask) {
firehose_client_send_push(fb, ask->for_io, &state);
if (slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD)) {
// logd was unloaded, give up
uint64_t unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(for_io);
#ifndef KERNEL
state.fbs_atomic_state = os_atomic_add_orig2o(fbb,
- fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), relaxed);
+ fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), acquire);
if (fastpath(!(state.fbs_atomic_state & unavail_mask))) {
ask->is_bank_ok = true;
if (fastpath(ref = firehose_buffer_ring_try_recycle(fb))) {
#else
firehose_bank_state_u value;
ask->is_bank_ok = os_atomic_rmw_loop2o(fbb, fbb_state.fbs_atomic_state,
- state.fbs_atomic_state, value.fbs_atomic_state, relaxed, {
+ state.fbs_atomic_state, value.fbs_atomic_state, acquire, {
value = state;
if (slowpath((value.fbs_atomic_state & unavail_mask) != 0)) {
os_atomic_rmw_loop_give_up(break);
privsize, privptr);
}
-firehose_tracepoint_t
-__firehose_buffer_tracepoint_reserve_with_chunk(firehose_buffer_chunk_t fbc,
- uint64_t stamp, firehose_stream_t stream,
- uint16_t pubsize, uint16_t privsize, uint8_t **privptr)
-{
-
- firehose_tracepoint_t ft;
- long result;
-
- result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream,
- pubsize, privsize, privptr);
- if (fastpath(result > 0)) {
- ft = (firehose_tracepoint_t)(fbc->fbc_start + result);
- stamp -= fbc->fbc_timestamp;
- stamp |= (uint64_t)pubsize << 48;
- // Needed for process death handling (tracepoint-begin)
- // see firehose_buffer_stream_chunk_install
- os_atomic_store2o(ft, ft_stamp_and_length, stamp, relaxed);
- dispatch_compiler_barrier();
- return ft;
- }
- else {
- return NULL;
- }
-}
-
firehose_buffer_t
__firehose_buffer_create(size_t *size)
{
}
if (size) {
- *size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE;
+ *size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE;
}
return kernel_firehose_buffer;
}
return firehose_buffer_tracepoint_flush(kernel_firehose_buffer, ft, ftid);
}
-void
-__firehose_buffer_tracepoint_flush_chunk(firehose_buffer_chunk_t fbc,
- firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid)
-{
- firehose_buffer_pos_u pos;
-
- // Needed for process death handling (tracepoint-flush):
- // We want to make sure the observers
- // will see memory effects in program (asm) order.
- // 1. write all the data to the tracepoint
- // 2. write the tracepoint ID, so that seeing it means the tracepoint
- // is valid
- ft->ft_thread = thread_tid(current_thread());
-
- // release barrier makes the log writes visible
- os_atomic_store2o(ft, ft_id.ftid_value, ftid.ftid_value, release);
- pos.fbc_atomic_pos = os_atomic_sub2o(fbc, fbc_pos.fbc_atomic_pos,
- FIREHOSE_BUFFER_POS_REFCNT_INC, relaxed);
- return;
-}
-
void
__firehose_merge_updates(firehose_push_reply_t update)
{
dispatch_unfair_lock_s fbh_logd_lock;
#endif
uint64_t fbh_unused[0];
-} OS_ALIGNED(FIREHOSE_BUFFER_CHUNK_SIZE) *firehose_buffer_header_t;
+} OS_ALIGNED(FIREHOSE_CHUNK_SIZE) *firehose_buffer_header_t;
union firehose_buffer_u {
struct firehose_buffer_header_s fb_header;
- struct firehose_buffer_chunk_s fb_chunks[FIREHOSE_BUFFER_CHUNK_COUNT];
+ struct firehose_chunk_s fb_chunks[FIREHOSE_BUFFER_CHUNK_COUNT];
};
// used to let the compiler pack these values in 1 or 2 registers
void
firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref);
+void
+firehose_buffer_force_connect(firehose_buffer_t fb);
+
#endif
#endif // __FIREHOSE_BUFFER_INTERNAL__
#pragma mark firehose buffer
OS_ALWAYS_INLINE
-static inline firehose_buffer_chunk_t
+static inline firehose_chunk_t
firehose_buffer_chunk_for_address(void *addr)
{
- uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_BUFFER_CHUNK_SIZE - 1);
- return (firehose_buffer_chunk_t)chunk_addr;
+ uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_CHUNK_SIZE - 1);
+ return (firehose_chunk_t)chunk_addr;
}
OS_ALWAYS_INLINE
static inline uint16_t
-firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_buffer_chunk_t fbc)
+firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_chunk_t fbc)
{
return (uint16_t)(fbc - fb->fb_chunks);
}
OS_ALWAYS_INLINE
-static inline firehose_buffer_chunk_t
+static inline firehose_chunk_t
firehose_buffer_ref_to_chunk(firehose_buffer_t fb, uint16_t ref)
{
return fb->fb_chunks + ref;
}
#ifndef FIREHOSE_SERVER
-
-OS_ALWAYS_INLINE
-static inline bool
-firehose_buffer_pos_fits(firehose_buffer_pos_u pos, uint16_t size)
-{
- return pos.fbc_next_entry_offs + size <= pos.fbc_private_offs;
-}
-
#if DISPATCH_PURE_C
OS_ALWAYS_INLINE
#endif
}
-OS_ALWAYS_INLINE
-static inline long
-firehose_buffer_chunk_try_reserve(firehose_buffer_chunk_t fbc, uint64_t stamp,
- firehose_stream_t stream, uint16_t pubsize,
- uint16_t privsize, uint8_t **privptr)
-{
- const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data);
- firehose_buffer_pos_u orig, pos;
- uint8_t qos_bits = firehose_buffer_qos_bits_propagate();
- bool reservation_failed, stamp_delta_fits;
-
- stamp_delta_fits = ((stamp - fbc->fbc_timestamp) >> 48) == 0;
-
- // no acquire barrier because the returned space is written to only
- os_atomic_rmw_loop2o(fbc, fbc_pos.fbc_atomic_pos,
- orig.fbc_atomic_pos, pos.fbc_atomic_pos, relaxed, {
- if (unlikely(orig.fbc_atomic_pos == 0)) {
- // we acquired a really really old reference, and we probably
- // just faulted in a new page
- // FIXME: if/when we hit this we should try to madvise it back FREE
- os_atomic_rmw_loop_give_up(return 0);
- }
- if (unlikely(!FIREHOSE_BUFFER_POS_USABLE_FOR_STREAM(orig, stream))) {
- // nothing to do if the chunk is full, or the stream doesn't match,
- // in which case the thread probably:
- // - loaded the chunk ref
- // - been suspended a long while
- // - read the chunk to find a very old thing
- os_atomic_rmw_loop_give_up(return 0);
- }
- pos = orig;
- pos.fbc_qos_bits |= qos_bits;
- if (unlikely(!firehose_buffer_pos_fits(orig,
- ft_size + pubsize + privsize) || !stamp_delta_fits)) {
- pos.fbc_flag_full = true;
- reservation_failed = true;
- } else {
- // using these *_INC macros is so that the compiler generates better
- // assembly: using the struct individual fields forces the compiler
- // to handle carry propagations, and we know it won't happen
- pos.fbc_atomic_pos += roundup(ft_size + pubsize, 8) *
- FIREHOSE_BUFFER_POS_ENTRY_OFFS_INC;
- pos.fbc_atomic_pos -= privsize *
- FIREHOSE_BUFFER_POS_PRIVATE_OFFS_INC;
- pos.fbc_atomic_pos += FIREHOSE_BUFFER_POS_REFCNT_INC;
- const uint16_t minimum_payload_size = 16;
- if (!firehose_buffer_pos_fits(pos,
- roundup(ft_size + minimum_payload_size , 8))) {
- // if we can't even have minimum_payload_size bytes of payload
- // for the next tracepoint, just flush right away
- pos.fbc_flag_full = true;
- }
- reservation_failed = false;
- }
- });
-
- if (reservation_failed) {
- if (pos.fbc_refcnt) {
- // nothing to do, there is a thread writing that will pick up
- // the "FULL" flag on flush and push as a consequence
- return 0;
- }
- // caller must enqueue chunk
- return -1;
- }
- if (privptr) {
- *privptr = fbc->fbc_start + pos.fbc_private_offs;
- }
- return orig.fbc_next_entry_offs;
-}
-
OS_ALWAYS_INLINE
static inline void
firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream)
{
firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream];
firehose_stream_state_u old_state, new_state;
- firehose_buffer_chunk_t fbc;
+ firehose_chunk_t fc;
uint64_t stamp = UINT64_MAX; // will cause the reservation to fail
uint16_t ref;
long result;
ref = old_state.fss_current;
if (!ref || ref == FIREHOSE_STREAM_STATE_PRISTINE) {
// there is no installed page, nothing to flush, go away
+#ifndef KERNEL
+ firehose_buffer_force_connect(fb);
+#endif
return;
}
- fbc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current);
- result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, 1, 0, NULL);
+ fc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current);
+ result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream,
+ firehose_buffer_qos_bits_propagate(), 1, 0, NULL);
if (likely(result < 0)) {
firehose_buffer_ring_enqueue(fb, old_state.fss_current);
}
{
firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream];
firehose_stream_state_u old_state, new_state;
- firehose_tracepoint_t ft;
- firehose_buffer_chunk_t fbc;
+ firehose_chunk_t fc;
#if KERNEL
bool failable = false;
#endif
ref = old_state.fss_current;
if (likely(ref && ref != FIREHOSE_STREAM_STATE_PRISTINE)) {
- fbc = firehose_buffer_ref_to_chunk(fb, ref);
- result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream,
+ fc = firehose_buffer_ref_to_chunk(fb, ref);
+ result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream,
+ firehose_buffer_qos_bits_propagate(),
pubsize, privsize, privptr);
if (likely(result > 0)) {
- ft = (firehose_tracepoint_t)(fbc->fbc_start + result);
- stamp -= fbc->fbc_timestamp;
- stamp |= (uint64_t)pubsize << 48;
- // Needed for process death handling (tracepoint-begin)
- // see firehose_buffer_stream_chunk_install
- os_atomic_store2o(ft, ft_stamp_and_length, stamp, relaxed);
- dispatch_compiler_barrier();
- return ft;
+ uint64_t thread;
+#ifdef KERNEL
+ thread = thread_tid(current_thread());
+#else
+ thread = _pthread_threadid_self_np_direct();
+#endif
+ return firehose_chunk_tracepoint_begin(fc,
+ stamp, pubsize, thread, result);
}
if (likely(result < 0)) {
firehose_buffer_ring_enqueue(fb, old_state.fss_current);
firehose_buffer_tracepoint_flush(firehose_buffer_t fb,
firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid)
{
- firehose_buffer_chunk_t fbc = firehose_buffer_chunk_for_address(ft);
- firehose_buffer_pos_u pos;
+ firehose_chunk_t fc = firehose_buffer_chunk_for_address(ft);
// Needed for process death handling (tracepoint-flush):
// We want to make sure the observers
// 1. write all the data to the tracepoint
// 2. write the tracepoint ID, so that seeing it means the tracepoint
// is valid
-#ifdef KERNEL
- ft->ft_thread = thread_tid(current_thread());
-#else
- ft->ft_thread = _pthread_threadid_self_np_direct();
-#endif
- // release barrier makes the log writes visible
- os_atomic_store2o(ft, ft_id.ftid_value, ftid.ftid_value, release);
- pos.fbc_atomic_pos = os_atomic_sub2o(fbc, fbc_pos.fbc_atomic_pos,
- FIREHOSE_BUFFER_POS_REFCNT_INC, relaxed);
- if (pos.fbc_refcnt == 0 && pos.fbc_flag_full) {
- firehose_buffer_ring_enqueue(fb, firehose_buffer_chunk_to_ref(fb, fbc));
+ if (firehose_chunk_tracepoint_end(fc, ft, ftid)) {
+ firehose_buffer_ring_enqueue(fb, firehose_buffer_chunk_to_ref(fb, fc));
}
}
firehose_handler_t fs_handler;
firehose_snapshot_t fs_snapshot;
+ bool fs_io_snapshot_started;
+ bool fs_mem_snapshot_started;
int fs_kernel_fd;
firehose_client_t fs_kernel_client;
firehose_atomic_max2o(fc, fc_io_sent_flushed_pos,
push_reply.fpr_io_flushed_pos, relaxed);
- if (fc->fc_is_kernel) {
+ if (!fc->fc_pid) {
if (ioctl(server_config.fs_kernel_fd, LOGFLUSHED, &push_reply) < 0) {
dispatch_assume_zero(errno);
}
firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags)
{
firehose_buffer_t fb = fc->fc_buffer;
- firehose_buffer_chunk_t fbc;
+ firehose_chunk_t fbc;
firehose_event_t evt;
uint16_t volatile *fbh_ring;
uint16_t flushed, ref, count = 0;
fbh_ring = fb->fb_header.fbh_io_ring;
sent_flushed = (uint16_t)fc->fc_io_sent_flushed_pos;
flushed = (uint16_t)fc->fc_io_flushed_pos;
- if (fc->fc_needs_io_snapshot) {
+ if (fc->fc_needs_io_snapshot && server_config.fs_io_snapshot_started) {
snapshot = server_config.fs_snapshot;
}
} else {
fbh_ring = fb->fb_header.fbh_mem_ring;
sent_flushed = (uint16_t)fc->fc_mem_sent_flushed_pos;
flushed = (uint16_t)fc->fc_mem_flushed_pos;
- if (fc->fc_needs_mem_snapshot) {
+ if (fc->fc_needs_mem_snapshot && server_config.fs_mem_snapshot_started) {
snapshot = server_config.fs_snapshot;
}
}
ref = (flushed + count) & FIREHOSE_RING_POS_IDX_MASK;
ref = os_atomic_load(&fbh_ring[ref], relaxed);
ref &= FIREHOSE_RING_POS_IDX_MASK;
- } while (fc->fc_is_kernel && !ref);
+ } while (!fc->fc_pid && !ref);
count++;
if (!ref) {
_dispatch_debug("Ignoring invalid page reference in ring: %d", ref);
}
fbc = firehose_buffer_ref_to_chunk(fb, ref);
+ if (fbc->fc_pos.fcp_stream == firehose_stream_metadata) {
+ // serialize with firehose_client_metadata_stream_peek
+ os_unfair_lock_lock(&fc->fc_lock);
+ }
server_config.fs_handler(fc, evt, fbc);
if (slowpath(snapshot)) {
snapshot->handler(fc, evt, fbc);
}
+ if (fbc->fc_pos.fcp_stream == firehose_stream_metadata) {
+ os_unfair_lock_unlock(&fc->fc_lock);
+ }
// clients not using notifications (single threaded) always drain fully
// because they use all their limit, always
} while (!fc->fc_use_notifs || count < DRAIN_BATCH_SIZE || snapshot);
client_flushed = os_atomic_load2o(&fb->fb_header,
fbh_ring_tail.frp_mem_flushed, relaxed);
}
- if (fc->fc_is_kernel) {
+ if (!fc->fc_pid) {
// will fire firehose_client_notify() because port is MACH_PORT_DEAD
port = fc->fc_sendp;
} else if (!port && client_flushed == sent_flushed && fc->fc_use_notifs) {
if (port) {
firehose_client_notify(fc, port);
}
- if (fc->fc_is_kernel) {
+ if (!fc->fc_pid) {
if (!(flags & FIREHOSE_DRAIN_POLL)) {
// see firehose_client_kernel_source_handle_event
dispatch_resume(fc->fc_kernel_source);
// from now on all IO/mem drains depending on `for_io` will be no-op
// (needs_<for_io>_snapshot: false, memory_corrupted: true). we can safely
// silence the corresponding source of drain wake-ups.
- if (!fc->fc_is_kernel) {
+ if (fc->fc_pid) {
dispatch_source_cancel(for_io ? fc->fc_io_source : fc->fc_mem_source);
}
}
server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_DIED, NULL);
TAILQ_REMOVE(&server_config.fs_clients, fc, fc_entry);
+ dispatch_release(fc->fc_mach_channel);
+ fc->fc_mach_channel = NULL;
fc->fc_entry.tqe_next = DISPATCH_OBJECT_LISTLESS;
fc->fc_entry.tqe_prev = DISPATCH_OBJECT_LISTLESS;
_os_object_release(&fc->fc_as_os_object);
// Then look at all the allocated pages not seen in the ring
while (bitmap) {
uint16_t ref = firehose_bitmap_first_set(bitmap);
- firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
- uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs;
+ firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
+ uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs;
bitmap &= ~(1ULL << ref);
- if (fbc->fbc_start + fbc_length <= fbc->fbc_data) {
+ if (fbc->fc_start + fbc_length <= fbc->fc_data) {
// this page has its "recycle-requeue" done, but hasn't gone
// through "recycle-reuse", or it has no data, ditch it
continue;
}
- if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) {
+ if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) {
// this thing has data, but the first tracepoint is unreadable
// so also just ditch it
continue;
}
- if (!fbc->fbc_pos.fbc_flag_io) {
+ if (!fbc->fc_pos.fcp_flag_io) {
mem_bitmap |= 1ULL << ref;
continue;
}
server_config.fs_handler(fc, FIREHOSE_EVENT_IO_BUFFER_RECEIVED, fbc);
- if (fc->fc_needs_io_snapshot && snapshot) {
+ if (fc->fc_needs_io_snapshot && server_config.fs_io_snapshot_started) {
snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, fbc);
}
}
while (mem_bitmap_copy) {
uint16_t ref = firehose_bitmap_first_set(mem_bitmap_copy);
- firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
+ firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
mem_bitmap_copy &= ~(1ULL << ref);
server_config.fs_handler(fc, FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, fbc);
- if (fc->fc_needs_mem_snapshot && snapshot) {
+ if (fc->fc_needs_mem_snapshot && server_config.fs_mem_snapshot_started) {
snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, fbc);
}
}
{
mach_msg_header_t *msg_hdr;
firehose_client_t fc = ctx;
- mach_port_t oldsendp, oldrecvp;
-
- if (dmsg) {
- msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL);
- oldsendp = msg_hdr->msgh_remote_port;
- oldrecvp = msg_hdr->msgh_local_port;
- }
switch (reason) {
case DISPATCH_MACH_MESSAGE_RECEIVED:
+ msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL);
if (msg_hdr->msgh_id == MACH_NOTIFY_NO_SENDERS) {
_dispatch_debug("FIREHOSE NO_SENDERS (unique_pid: 0x%llx)",
firehose_client_get_unique_pid(fc, NULL));
}
break;
- case DISPATCH_MACH_DISCONNECTED:
- if (oldsendp) {
- if (slowpath(oldsendp != fc->fc_sendp)) {
- DISPATCH_INTERNAL_CRASH(oldsendp,
- "disconnect event about unknown send-right");
- }
- firehose_mach_port_send_release(fc->fc_sendp);
- fc->fc_sendp = MACH_PORT_NULL;
- }
- if (oldrecvp) {
- if (slowpath(oldrecvp != fc->fc_recvp)) {
- DISPATCH_INTERNAL_CRASH(oldrecvp,
- "disconnect event about unknown receive-right");
- }
- firehose_mach_port_recv_dispose(fc->fc_recvp, fc);
- fc->fc_recvp = MACH_PORT_NULL;
- }
- if (fc->fc_recvp == MACH_PORT_NULL && fc->fc_sendp == MACH_PORT_NULL) {
- firehose_client_cancel(fc);
- }
+ case DISPATCH_MACH_CANCELED:
+ firehose_client_cancel(fc);
break;
}
}
dispatch_assert_queue(server_config.fs_io_drain_queue);
TAILQ_INSERT_TAIL(&server_config.fs_clients, fc, fc_entry);
server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_CONNECTED, (void *)fcci);
- if (fc->fc_is_kernel) {
+ if (!fc->fc_pid) {
dispatch_activate(fc->fc_kernel_source);
} else {
dispatch_mach_connect(fc->fc_mach_channel,
static void
firehose_client_cancel(firehose_client_t fc)
{
- dispatch_mach_t dm;
dispatch_block_t block;
_dispatch_debug("client died (unique_pid: 0x%llx",
firehose_client_get_unique_pid(fc, NULL));
- dm = fc->fc_mach_channel;
- fc->fc_mach_channel = NULL;
- dispatch_release(dm);
-
+ if (MACH_PORT_VALID(fc->fc_sendp)) {
+ firehose_mach_port_send_release(fc->fc_sendp);
+ fc->fc_sendp = MACH_PORT_NULL;
+ }
+ if (MACH_PORT_VALID(fc->fc_recvp)) {
+ firehose_mach_port_recv_dispose(fc->fc_recvp, fc);
+ fc->fc_recvp = MACH_PORT_NULL;
+ }
fc->fc_use_notifs = false;
dispatch_source_cancel(fc->fc_io_source);
dispatch_source_cancel(fc->fc_mem_source);
return fc;
}
+#pragma pack(4)
+typedef struct firehose_token_s {
+ uid_t auid;
+ uid_t euid;
+ gid_t egid;
+ uid_t ruid;
+ gid_t rgid;
+ pid_t pid;
+ au_asid_t asid;
+ dev_t execcnt;
+} *firehose_token_t;
+#pragma pack()
+
static firehose_client_t
-firehose_client_create(firehose_buffer_t fb,
+firehose_client_create(firehose_buffer_t fb, firehose_token_t token,
mach_port_t comm_recvp, mach_port_t comm_sendp)
{
uint64_t unique_pid = fb->fb_header.fbh_uniquepid;
dispatch_mach_t dm;
dispatch_source_t ds;
+ fc->fc_pid = token->pid ? token->pid : ~0;
+ fc->fc_euid = token->euid;
+ fc->fc_pidversion = token->execcnt;
ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR, 0, 0,
server_config.fs_mem_drain_queue);
_os_object_retain_internal_inline(&fc->fc_as_os_object);
DISPATCH_INTERNAL_CRASH(errno, "Unable to map kernel buffer");
}
if (fb_map.fbmi_size !=
- FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE) {
+ FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE) {
DISPATCH_INTERNAL_CRASH(fb_map.fbmi_size, "Unexpected kernel buffer size");
}
fc = _firehose_client_create((firehose_buffer_t)(uintptr_t)fb_map.fbmi_addr);
- fc->fc_is_kernel = true;
ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)fd, 0,
fs->fs_ipc_queue);
dispatch_set_context(ds, fc);
firehose_client_get_unique_pid(firehose_client_t fc, pid_t *pid_out)
{
firehose_buffer_header_t fbh = &fc->fc_buffer->fb_header;
- if (fc->fc_is_kernel) {
- if (pid_out) *pid_out = 0;
- return 0;
- }
- if (pid_out) *pid_out = fbh->fbh_pid ?: ~(pid_t)0;
- return fbh->fbh_uniquepid ?: ~0ull;
+ if (pid_out) *pid_out = fc->fc_pid;
+ if (!fc->fc_pid) return 0;
+ return fbh->fbh_uniquepid ? fbh->fbh_uniquepid : ~0ull;
+}
+
+uid_t
+firehose_client_get_euid(firehose_client_t fc)
+{
+ return fc->fc_euid;
+}
+
+int
+firehose_client_get_pid_version(firehose_client_t fc)
+{
+ return fc->fc_pidversion;
}
void *
}
}
+bool
+firehose_server_has_ever_flushed_pages(void)
+{
+ // Use the IO pages flushed count from the kernel client as an
+ // approximation for whether the firehose has ever flushed pages during
+ // this boot. logd uses this detect the first time it starts after a
+ // fresh boot.
+ firehose_client_t fhc = server_config.fs_kernel_client;
+ return !fhc || fhc->fc_io_flushed_pos > 0;
+}
+
void
firehose_server_resume(void)
{
MACH_PORT_NULL, NULL);
}
+OS_NOINLINE
+static void
+_firehose_server_cancel(void *ctxt OS_UNUSED)
+{
+ firehose_client_t fc;
+ TAILQ_FOREACH(fc, &server_config.fs_clients, fc_entry) {
+ dispatch_mach_cancel(fc->fc_mach_channel);
+ }
+}
+
+void
+firehose_server_cancel(void)
+{
+ dispatch_mach_cancel(server_config.fs_mach_channel);
+ dispatch_async_f(server_config.fs_io_drain_queue, NULL,
+ _firehose_server_cancel);
+}
+
+dispatch_queue_t
+firehose_server_copy_queue(firehose_server_queue_t which)
+{
+ dispatch_queue_t dq;
+ switch (which) {
+ case FIREHOSE_SERVER_QUEUE_IO:
+ dq = server_config.fs_io_drain_queue;
+ break;
+ case FIREHOSE_SERVER_QUEUE_MEMORY:
+ dq = server_config.fs_mem_drain_queue;
+ break;
+ default:
+ DISPATCH_INTERNAL_CRASH(which, "Invalid firehose server queue type");
+ }
+ dispatch_retain(dq);
+ return dq;
+}
+
#pragma mark -
#pragma mark firehose snapshot and peeking
void
firehose_client_metadata_stream_peek(firehose_client_t fc,
- firehose_event_t context, bool (^peek_should_start)(void),
- bool (^peek)(firehose_buffer_chunk_t fbc))
+ OS_UNUSED firehose_event_t context, bool (^peek_should_start)(void),
+ bool (^peek)(firehose_chunk_t fbc))
{
- if (context != FIREHOSE_EVENT_MEM_BUFFER_RECEIVED) {
- return dispatch_sync(server_config.fs_mem_drain_queue, ^{
- firehose_client_metadata_stream_peek(fc,
- FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, peek_should_start, peek);
- });
- }
+ os_unfair_lock_lock(&fc->fc_lock);
- if (peek_should_start && !peek_should_start()) {
- return;
- }
+ if (peek_should_start && peek_should_start()) {
+ firehose_buffer_t fb = fc->fc_buffer;
+ firehose_buffer_header_t fbh = &fb->fb_header;
+ uint64_t bitmap = fbh->fbh_bank.fbb_metadata_bitmap;
- firehose_buffer_t fb = fc->fc_buffer;
- firehose_buffer_header_t fbh = &fb->fb_header;
- uint64_t bitmap = fbh->fbh_bank.fbb_metadata_bitmap;
+ while (bitmap) {
+ uint16_t ref = firehose_bitmap_first_set(bitmap);
+ firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
+ uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs;
- while (bitmap) {
- uint16_t ref = firehose_bitmap_first_set(bitmap);
- firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
- uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs;
-
- bitmap &= ~(1ULL << ref);
- if (fbc->fbc_start + fbc_length <= fbc->fbc_data) {
- // this page has its "recycle-requeue" done, but hasn't gone
- // through "recycle-reuse", or it has no data, ditch it
- continue;
- }
- if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) {
- // this thing has data, but the first tracepoint is unreadable
- // so also just ditch it
- continue;
- }
- if (fbc->fbc_pos.fbc_stream != firehose_stream_metadata) {
- continue;
- }
- if (!peek(fbc)) {
- break;
+ bitmap &= ~(1ULL << ref);
+ if (fbc->fc_start + fbc_length <= fbc->fc_data) {
+ // this page has its "recycle-requeue" done, but hasn't gone
+ // through "recycle-reuse", or it has no data, ditch it
+ continue;
+ }
+ if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) {
+ // this thing has data, but the first tracepoint is unreadable
+ // so also just ditch it
+ continue;
+ }
+ if (fbc->fc_pos.fcp_stream != firehose_stream_metadata) {
+ continue;
+ }
+ if (!peek(fbc)) {
+ break;
+ }
}
}
+
+ os_unfair_lock_unlock(&fc->fc_lock);
}
OS_NOINLINE OS_COLD
// Then look at all the allocated pages not seen in the ring
while (bitmap) {
uint16_t ref = firehose_bitmap_first_set(bitmap);
- firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
- uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs;
+ firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref);
+ uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs;
bitmap &= ~(1ULL << ref);
- if (fbc->fbc_start + fbc_length <= fbc->fbc_data) {
+ if (fbc->fc_start + fbc_length <= fbc->fc_data) {
// this page has its "recycle-requeue" done, but hasn't gone
// through "recycle-reuse", or it has no data, ditch it
continue;
}
- if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) {
+ if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) {
// this thing has data, but the first tracepoint is unreadable
// so also just ditch it
continue;
}
- if (fbc->fbc_pos.fbc_flag_io != for_io) {
+ if (fbc->fc_pos.fcp_flag_io != for_io) {
continue;
}
snapshot->handler(fc, evt, fbc);
// 0. we need to be on the IO queue so that client connection and/or death
// cannot happen concurrently
dispatch_assert_queue(server_config.fs_io_drain_queue);
+ server_config.fs_snapshot = snapshot;
// 1. mark all the clients participating in the current snapshot
// and enter the group for each bit set
TAILQ_FOREACH(fci, &server_config.fs_clients, fc_entry) {
- if (fci->fc_is_kernel) {
+ if (!fci->fc_pid) {
#if TARGET_OS_SIMULATOR
continue;
#endif
}
dispatch_async(server_config.fs_mem_drain_queue, ^{
- // 2. make fs_snapshot visible, this is what triggers the snapshot
- // logic from _drain() or handle_death(). until fs_snapshot is
- // published, the bits set above are mostly ignored
- server_config.fs_snapshot = snapshot;
-
+ // 2. start the fs_mem_snapshot, this is what triggers the snapshot
+ // logic from _drain() or handle_death()
+ server_config.fs_mem_snapshot_started = true;
snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_MEM_START, NULL);
dispatch_async(server_config.fs_io_drain_queue, ^{
firehose_client_t fcj;
+ // 3. start the fs_io_snapshot, this is what triggers the snapshot
+ // logic from _drain() or handle_death()
+ // 29868879: must always happen after the memory snapshot started
+ server_config.fs_io_snapshot_started = true;
snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_IO_START, NULL);
// match group_enter from firehose_snapshot() after MEM+IO_START
// were removed from the list have already left the group
// (see firehose_client_finalize())
TAILQ_FOREACH(fcj, &server_config.fs_clients, fc_entry) {
- if (fcj->fc_is_kernel) {
+ if (!fcj->fc_pid) {
#if !TARGET_OS_SIMULATOR
firehose_client_kernel_source_handle_event(fcj);
#endif
fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_COMPLETE, NULL);
server_config.fs_snapshot = NULL;
+ server_config.fs_mem_snapshot_started = false;
+ server_config.fs_io_snapshot_started = false;
dispatch_release(fs->fs_group);
Block_release(fs->handler);
firehose_server_register(mach_port_t server_port OS_UNUSED,
mach_port_t mem_port, mach_vm_size_t mem_size,
mach_port_t comm_recvp, mach_port_t comm_sendp,
- mach_port_t extra_info_port, mach_vm_size_t extra_info_size)
+ mach_port_t extra_info_port, mach_vm_size_t extra_info_size,
+ audit_token_t atoken)
{
mach_vm_address_t base_addr = 0;
firehose_client_t fc = NULL;
}
fc = firehose_client_create((firehose_buffer_t)base_addr,
- comm_recvp, comm_sendp);
+ (firehose_token_t)&atoken, comm_recvp, comm_sendp);
dispatch_async(server_config.fs_io_drain_queue, ^{
firehose_client_resume(fc, &fcci);
if (fcci.fcci_size) {
dispatch_source_t fc_mem_source;
mach_port_t fc_recvp;
mach_port_t fc_sendp;
+ os_unfair_lock fc_lock;
+ pid_t fc_pid;
+ int fc_pidversion;
+ uid_t fc_euid;
bool fc_use_notifs;
bool fc_memory_corrupted;
bool fc_needs_io_snapshot;
bool fc_needs_mem_snapshot;
- bool fc_is_kernel;
};
void
void
dispatch_atfork_prepare(void)
{
+ _os_object_atfork_prepare();
}
DISPATCH_EXPORT DISPATCH_NOTHROW
void
dispatch_atfork_parent(void)
{
+ _os_object_atfork_parent();
+}
+
+DISPATCH_EXPORT DISPATCH_NOTHROW
+void
+dispatch_atfork_child(void)
+{
+ _os_object_atfork_child();
+ _voucher_atfork_child();
+ if (_dispatch_is_multithreaded_inline()) {
+ _dispatch_child_of_unsafe_fork = true;
+ }
+ _dispatch_queue_atfork_child();
+ // clear the _PROHIBIT and _MULTITHREADED bits if set
+ _dispatch_unsafe_fork = 0;
}
#pragma mark -
}
}
+void
+_os_object_atfork_prepare(void)
+{
+ return;
+}
+
+void
+_os_object_atfork_parent(void)
+{
+ return;
+}
+
+void
+_os_object_atfork_child(void)
+{
+ return;
+}
+
#pragma mark -
#pragma mark dispatch_autorelease_pool no_objc
dispatch_source_type_timer_init(dispatch_source_t ds,
dispatch_source_type_t type DISPATCH_UNUSED,
uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask,
- dispatch_queue_t q)
+ unsigned long mask)
{
if (fastpath(!ds->ds_refs)) {
ds->ds_refs = _dispatch_calloc(1ul,
}
ds->ds_needs_rearm = true;
ds->ds_is_timer = true;
- if (q == dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_BACKGROUND, 0)
- || q == dispatch_get_global_queue(
- DISPATCH_QUEUE_PRIORITY_BACKGROUND, DISPATCH_QUEUE_OVERCOMMIT)){
- mask |= DISPATCH_TIMER_BACKGROUND; // <rdar://problem/12200216>
- }
ds_timer(ds->ds_refs).flags = mask;
}
static void
dispatch_source_type_after_init(dispatch_source_t ds,
- dispatch_source_type_t type, uintptr_t handle, unsigned long mask,
- dispatch_queue_t q)
+ dispatch_source_type_t type, uintptr_t handle, unsigned long mask)
{
- dispatch_source_type_timer_init(ds, type, handle, mask, q);
+ dispatch_source_type_timer_init(ds, type, handle, mask);
ds->ds_needs_rearm = false;
ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_AFTER;
}
static void
dispatch_source_type_timer_with_aggregate_init(dispatch_source_t ds,
- dispatch_source_type_t type, uintptr_t handle, unsigned long mask,
- dispatch_queue_t q)
+ dispatch_source_type_t type, uintptr_t handle, unsigned long mask)
{
ds->ds_refs = _dispatch_calloc(1ul,
sizeof(struct dispatch_timer_source_aggregate_refs_s));
- dispatch_source_type_timer_init(ds, type, handle, mask, q);
+ dispatch_source_type_timer_init(ds, type, handle, mask);
ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_WITH_AGGREGATE;
ds->dq_specific_q = (void*)handle;
_dispatch_retain(ds->dq_specific_q);
static void
dispatch_source_type_interval_init(dispatch_source_t ds,
- dispatch_source_type_t type, uintptr_t handle, unsigned long mask,
- dispatch_queue_t q)
+ dispatch_source_type_t type, uintptr_t handle, unsigned long mask)
{
- dispatch_source_type_timer_init(ds, type, handle, mask, q);
+ dispatch_source_type_timer_init(ds, type, handle, mask);
ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_INTERVAL;
unsigned long ident = _dispatch_source_timer_idx(ds->ds_refs);
ds->ds_dkev->dk_kevent.ident = ds->ds_ident_hack = ident;
dispatch_source_type_readwrite_init(dispatch_source_t ds,
dispatch_source_type_t type DISPATCH_UNUSED,
uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask DISPATCH_UNUSED,
- dispatch_queue_t q DISPATCH_UNUSED)
+ unsigned long mask DISPATCH_UNUSED)
{
ds->ds_is_level = true;
-#ifdef HAVE_DECL_NOTE_LOWAT
+#if HAVE_DECL_NOTE_LOWAT
// bypass kernel check for device kqueue support rdar://19004921
ds->ds_dkev->dk_kevent.fflags = NOTE_LOWAT;
#endif
dispatch_source_type_memorypressure_init(dispatch_source_t ds,
dispatch_source_type_t type DISPATCH_UNUSED,
uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask DISPATCH_UNUSED,
- dispatch_queue_t q DISPATCH_UNUSED)
+ unsigned long mask DISPATCH_UNUSED)
{
static dispatch_once_t pred;
dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init);
dispatch_source_type_vm_init(dispatch_source_t ds,
dispatch_source_type_t type DISPATCH_UNUSED,
uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask DISPATCH_UNUSED,
- dispatch_queue_t q DISPATCH_UNUSED)
+ unsigned long mask DISPATCH_UNUSED)
{
// Map legacy vm pressure to memorypressure warning rdar://problem/15907505
mask = NOTE_MEMORYSTATUS_PRESSURE_WARN;
ds->ds_pending_data_mask = mask;
ds->ds_vmpressure_override = 1;
#if TARGET_IPHONE_SIMULATOR
- dispatch_source_type_memorypressure_init(ds, type, handle, mask, q);
+ dispatch_source_type_memorypressure_init(ds, type, handle, mask);
#endif
}
.init = dispatch_source_type_vm_init,
};
-#elif DISPATCH_USE_VM_PRESSURE
-
-const struct dispatch_source_type_s _dispatch_source_type_vm = {
- .ke = {
- .filter = EVFILT_VM,
- .flags = EV_DISPATCH|EV_UDATA_SPECIFIC,
- },
- .mask = NOTE_VM_PRESSURE,
-};
-
-#endif // DISPATCH_USE_VM_PRESSURE
+#endif // DISPATCH_USE_MEMORYSTATUS
const struct dispatch_source_type_s _dispatch_source_type_signal = {
.ke = {
dispatch_source_type_proc_init(dispatch_source_t ds,
dispatch_source_type_t type DISPATCH_UNUSED,
uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask DISPATCH_UNUSED,
- dispatch_queue_t q DISPATCH_UNUSED)
+ unsigned long mask DISPATCH_UNUSED)
{
ds->ds_dkev->dk_kevent.fflags |= NOTE_EXIT; // rdar://16655831
}
#endif
#if HAVE_DECL_VQ_QUOTA
|VQ_QUOTA
+#endif
+#if HAVE_DECL_VQ_NEARLOWDISK
+ |VQ_NEARLOWDISK
+#endif
+#if HAVE_DECL_VQ_DESIRED_DISK
+ |VQ_DESIRED_DISK
#endif
,
};
dispatch_source_type_data_init(dispatch_source_t ds,
dispatch_source_type_t type DISPATCH_UNUSED,
uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask DISPATCH_UNUSED,
- dispatch_queue_t q DISPATCH_UNUSED)
+ unsigned long mask DISPATCH_UNUSED)
{
ds->ds_is_installed = true;
ds->ds_is_custom_source = true;
static void
dispatch_source_type_mach_send_init(dispatch_source_t ds,
dispatch_source_type_t type DISPATCH_UNUSED,
- uintptr_t handle DISPATCH_UNUSED, unsigned long mask,
- dispatch_queue_t q DISPATCH_UNUSED)
+ uintptr_t handle DISPATCH_UNUSED, unsigned long mask)
{
if (!mask) {
// Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD
dispatch_source_type_mach_recv_init(dispatch_source_t ds,
dispatch_source_type_t type DISPATCH_UNUSED,
uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask DISPATCH_UNUSED,
- dispatch_queue_t q DISPATCH_UNUSED)
+ unsigned long mask DISPATCH_UNUSED)
{
ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE;
#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
// including maintenance
DISPATCH_ALWAYS_INLINE
static inline bool
-_dispatch_is_background_thread(void)
+_dispatch_is_background_priority(pthread_priority_t pp)
{
#if HAVE_PTHREAD_WORKQUEUE_QOS
- pthread_priority_t pp = _dispatch_get_priority();
pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
return pp && (pp <= _dispatch_background_priority);
#else
#endif
}
+// including maintenance
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_is_background_thread(void)
+{
+ return _dispatch_is_background_priority(_dispatch_get_priority());
+}
+
#pragma mark -
#pragma mark dispatch_block_t
#if !defined(OS_VOUCHER_ACTIVITY_SPI) && TARGET_OS_MAC
#define OS_VOUCHER_ACTIVITY_SPI 1
#endif
+#if !defined(OS_VOUCHER_ACTIVITY_GENERATE_SWAPS)
+#define OS_VOUCHER_ACTIVITY_GENERATE_SWAPS 0
+#endif
#if !defined(OS_FIREHOSE_SPI) && TARGET_OS_MAC
#define OS_FIREHOSE_SPI 1
#endif
#if DISPATCH_DEBUG
// sys/queue.h debugging
+#if defined(__linux__)
+#define QUEUE_MACRO_DEBUG 1
+#else
#undef TRASHIT
#define TRASHIT(x) do {(x) = (void *)-1;} while (0)
+#endif
#endif // DISPATCH_DEBUG
#define _TAILQ_TRASH_ENTRY(elm, field) do { \
TRASHIT((elm)->field.tqe_next); \
#endif
#endif // EVFILT_MEMORYSTATUS
-#if defined(EVFILT_VM) && !DISPATCH_USE_MEMORYSTATUS
-#ifndef DISPATCH_USE_VM_PRESSURE
-#define DISPATCH_USE_VM_PRESSURE 1
-#endif
-#endif // EVFILT_VM
-
#if TARGET_OS_SIMULATOR
#undef DISPATCH_USE_MEMORYPRESSURE_SOURCE
#define DISPATCH_USE_MEMORYPRESSURE_SOURCE 0
-#undef DISPATCH_USE_VM_PRESSURE_SOURCE
-#define DISPATCH_USE_VM_PRESSURE_SOURCE 0
#endif // TARGET_OS_SIMULATOR
#if !defined(DISPATCH_USE_MEMORYPRESSURE_SOURCE) && DISPATCH_USE_MEMORYSTATUS
#define DISPATCH_USE_MEMORYPRESSURE_SOURCE 1
-#elif !defined(DISPATCH_USE_VM_PRESSURE_SOURCE) && DISPATCH_USE_VM_PRESSURE
-#define DISPATCH_USE_VM_PRESSURE_SOURCE 1
#endif
#if DISPATCH_USE_MEMORYPRESSURE_SOURCE
extern bool _dispatch_memory_warn;
#undef HAVE_DECL_VQ_QUOTA // rdar://problem/24160982
#endif // VQ_QUOTA
+#ifndef VQ_NEARLOWDISK
+#undef HAVE_DECL_VQ_NEARLOWDISK
+#endif // VQ_NEARLOWDISK
+
+#ifndef VQ_DESIRED_DISK
+#undef HAVE_DECL_VQ_DESIRED_DISK
+#endif // VQ_DESIRED_DISK
+
#if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) || \
!DISPATCH_HOST_SUPPORTS_OSX(101200)
#undef NOTE_MEMORYSTATUS_PROC_LIMIT_WARN
#ifndef DISPATCH_USE_GUARDED_FD
#define DISPATCH_USE_GUARDED_FD 1
#endif
-// change_fdguard_np() requires GUARD_DUP <rdar://problem/11814513>
-#if DISPATCH_USE_GUARDED_FD && RDAR_11814513
-#define DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD 1
-#endif
#endif // HAVE_SYS_GUARDED_H
#define DISPATCH_IO_DEBUG DISPATCH_DEBUG
#endif
+#ifndef PAGE_SIZE
+#define PAGE_SIZE getpagesize()
+#endif
+
#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA
#define _dispatch_io_data_retain(x) _dispatch_objc_retain(x)
#define _dispatch_io_data_release(x) _dispatch_objc_release(x)
return DISPATCH_OP_DELIVER;
}
error:
- if (err == EAGAIN) {
+ if (err == EAGAIN || err == EWOULDBLOCK) {
// For disk based files with blocking I/O we should never get EAGAIN
dispatch_assert(!op->fd_entry->disk);
- _dispatch_op_debug("performed: EAGAIN", op);
+ _dispatch_op_debug("performed: EAGAIN/EWOULDBLOCK", op);
if (op->direction == DOP_DIR_READ && op->total &&
op->channel == op->fd_entry->convenience_channel) {
// Convenience read with available data completes on EAGAIN
return objc_release(obj);
}
+void
+_os_object_atfork_prepare(void)
+{
+ return _objc_atfork_prepare();
+}
+
+void
+_os_object_atfork_parent(void)
+{
+ return _objc_atfork_parent();
+}
+
+void
+_os_object_atfork_child(void)
+{
+ return _objc_atfork_child();
+}
+
#pragma mark -
#pragma mark _os_object
#define _os_object_refcnt_dispose_barrier(o) \
_os_atomic_refcnt_dispose_barrier2o(o, os_obj_ref_cnt)
+void _os_object_atfork_child(void);
+void _os_object_atfork_parent(void);
+void _os_object_atfork_prepare(void);
void _os_object_init(void);
unsigned long _os_object_retain_count(_os_object_t obj);
bool _os_object_retain_weak(_os_object_t obj);
#endif
}
#endif // HAVE_PTHREAD_WORKQUEUES
-#if USE_MACH_SEM
- // override the default FIFO behavior for the pool semaphores
- kern_return_t kr = semaphore_create(mach_task_self(),
- &pqc->dpq_thread_mediator.dsema_port, SYNC_POLICY_LIFO, 0);
- DISPATCH_VERIFY_MIG(kr);
- (void)dispatch_assume_zero(kr);
- (void)dispatch_assume(pqc->dpq_thread_mediator.dsema_port);
-#elif USE_POSIX_SEM
- /* XXXRW: POSIX semaphores don't support LIFO? */
- int ret = sem_init(&(pqc->dpq_thread_mediator.dsema_sem), 0, 0);
- (void)dispatch_assume_zero(ret);
-#endif
+ _os_semaphore_t *sema = &pqc->dpq_thread_mediator.dsema_sema;
+ _os_semaphore_init(sema, _OS_SEM_POLICY_LIFO);
+ _os_semaphore_create(sema, _OS_SEM_POLICY_LIFO);
}
#endif // DISPATCH_USE_PTHREAD_POOL
}
#endif
-DISPATCH_EXPORT DISPATCH_NOTHROW
+DISPATCH_NOTHROW
void
-dispatch_atfork_child(void)
+_dispatch_queue_atfork_child(void)
{
void *crash = (void *)0x100;
size_t i;
#if HAVE_MACH
_dispatch_mach_host_port_pred = 0;
- _dispatch_mach_host_port = MACH_VOUCHER_NULL;
+ _dispatch_mach_host_port = MACH_PORT_NULL;
#endif
- _voucher_atfork_child();
- if (!_dispatch_is_multithreaded_inline()) {
- // clear the _PROHIBIT bit if set
- _dispatch_unsafe_fork = 0;
- return;
- }
- _dispatch_unsafe_fork = 0;
- _dispatch_child_of_unsafe_fork = true;
+
+ if (!_dispatch_is_multithreaded_inline()) return;
_dispatch_main_q.dq_items_head = crash;
_dispatch_main_q.dq_items_tail = crash;
bool assign = (flags & DISPATCH_BLOCK_ASSIGN_CURRENT);
if (assign && !(flags & DISPATCH_BLOCK_HAS_VOUCHER)) {
+#if OS_VOUCHER_ACTIVITY_SPI
voucher = VOUCHER_CURRENT;
+#endif
flags |= DISPATCH_BLOCK_HAS_VOUCHER;
}
+#if OS_VOUCHER_ACTIVITY_SPI
if (voucher == VOUCHER_CURRENT) {
voucher = _voucher_get();
}
+#endif
if (assign && !(flags & DISPATCH_BLOCK_HAS_PRIORITY)) {
pri = _dispatch_priority_propagate();
flags |= DISPATCH_BLOCK_HAS_PRIORITY;
}
// balanced in d_block_sync_invoke or d_block_wait
if (os_atomic_cmpxchg2o(_dispatch_block_get_data(work),
- dbpd_queue, NULL, dq, relaxed)) {
+ dbpd_queue, NULL, dq->_as_oq, relaxed)) {
_dispatch_retain(dq);
}
if (flags & DISPATCH_BLOCK_BARRIER) {
void
_dispatch_main_queue_callback_4CF(
-#if TARGET_OS_MAC
- mach_msg_header_t *_Null_unspecified msg
-#else
- void *ignored
-#endif
- DISPATCH_UNUSED)
+ void *ignored DISPATCH_UNUSED)
{
if (main_q_is_draining) {
return;
void
dispatch_main(void)
{
+ dispatch_once_f(&_dispatch_root_queues_pred, NULL,
+ _dispatch_root_queues_init_once);
+
#if HAVE_PTHREAD_MAIN_NP
if (pthread_main_np()) {
#endif
dispatch_function_t func);
void _dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
+void _dispatch_queue_atfork_child(void);
#if DISPATCH_DEBUG
void dispatch_debug_queue(dispatch_queue_t dq, const char* str);
#include "internal.h"
-// semaphores are too fundamental to use the dispatch_assume*() macros
-#if USE_WIN32_SEM
-// rdar://problem/8428132
-static DWORD best_resolution = 1; // 1ms
-
-DWORD
-_push_timer_resolution(DWORD ms)
-{
- MMRESULT res;
- static dispatch_once_t once;
-
- if (ms > 16) {
- // only update timer resolution if smaller than default 15.6ms
- // zero means not updated
- return 0;
- }
-
- // aim for the best resolution we can accomplish
- dispatch_once(&once, ^{
- TIMECAPS tc;
- MMRESULT res;
- res = timeGetDevCaps(&tc, sizeof(tc));
- if (res == MMSYSERR_NOERROR) {
- best_resolution = min(max(tc.wPeriodMin, best_resolution),
- tc.wPeriodMax);
- }
- });
-
- res = timeBeginPeriod(best_resolution);
- if (res == TIMERR_NOERROR) {
- return best_resolution;
- }
- // zero means not updated
- return 0;
-}
-
-// match ms parameter to result from _push_timer_resolution
-void
-_pop_timer_resolution(DWORD ms)
-{
- if (ms) {
- timeEndPeriod(ms);
- }
-}
-#endif /* USE_WIN32_SEM */
-
-
DISPATCH_WEAK // rdar://problem/8503746
long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema);
dsema->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
false);
dsema->dsema_value = value;
-#if USE_POSIX_SEM
- int ret = sem_init(&dsema->dsema_sem, 0, 0);
- DISPATCH_SEMAPHORE_VERIFY_RET(ret);
-#endif
-}
-
-static void
-_dispatch_semaphore_class_dispose(dispatch_semaphore_class_t dsemau)
-{
- struct dispatch_semaphore_header_s *dsema = dsemau._dsema_hdr;
-
-#if USE_MACH_SEM
- kern_return_t kr;
- if (dsema->dsema_port) {
- kr = semaphore_destroy(mach_task_self(), dsema->dsema_port);
- DISPATCH_VERIFY_MIG(kr);
- DISPATCH_SEMAPHORE_VERIFY_KR(kr);
- }
- dsema->dsema_port = MACH_PORT_DEAD;
-#elif USE_POSIX_SEM
- int ret = sem_destroy(&dsema->dsema_sem);
- DISPATCH_SEMAPHORE_VERIFY_RET(ret);
-#elif USE_WIN32_SEM
- if (dsema->dsema_handle) {
- CloseHandle(dsema->dsema_handle);
- }
-#endif
+ _os_semaphore_init(&dsema->dsema_sema, _OS_SEM_POLICY_FIFO);
}
#pragma mark -
return dsema;
}
-#if USE_MACH_SEM
-static void
-_dispatch_semaphore_create_port(semaphore_t *s4)
-{
- kern_return_t kr;
- semaphore_t tmp;
-
- if (*s4) {
- return;
- }
- _dispatch_fork_becomes_unsafe();
-
- // lazily allocate the semaphore port
-
- // Someday:
- // 1) Switch to a doubly-linked FIFO in user-space.
- // 2) User-space timers for the timeout.
- // 3) Use the per-thread semaphore port.
-
- while ((kr = semaphore_create(mach_task_self(), &tmp,
- SYNC_POLICY_FIFO, 0))) {
- DISPATCH_VERIFY_MIG(kr);
- _dispatch_temporary_resource_shortage();
- }
-
- if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) {
- kr = semaphore_destroy(mach_task_self(), tmp);
- DISPATCH_VERIFY_MIG(kr);
- DISPATCH_SEMAPHORE_VERIFY_KR(kr);
- }
-}
-#elif USE_WIN32_SEM
-static void
-_dispatch_semaphore_create_handle(HANDLE *s4)
-{
- HANDLE tmp;
-
- if (*s4) {
- return;
- }
-
- // lazily allocate the semaphore port
-
- while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) {
- _dispatch_temporary_resource_shortage();
- }
-
- if (!os_atomic_cmpxchg(s4, 0, tmp)) {
- CloseHandle(tmp);
- }
-}
-#endif
-
void
_dispatch_semaphore_dispose(dispatch_object_t dou)
{
"Semaphore object deallocated while in use");
}
- _dispatch_semaphore_class_dispose(dsema);
+ _os_semaphore_dispose(&dsema->dsema_sema);
}
size_t
offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset);
#if USE_MACH_SEM
offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ",
- dsema->dsema_port);
+ dsema->dsema_sema);
#endif
offset += dsnprintf(&buf[offset], bufsiz - offset,
"value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig);
long
_dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema)
{
-#if USE_MACH_SEM
- _dispatch_semaphore_create_port(&dsema->dsema_port);
- kern_return_t kr = semaphore_signal(dsema->dsema_port);
- DISPATCH_SEMAPHORE_VERIFY_KR(kr);
-#elif USE_POSIX_SEM
- int ret = sem_post(&dsema->dsema_sem);
- DISPATCH_SEMAPHORE_VERIFY_RET(ret);
-#elif USE_WIN32_SEM
- _dispatch_semaphore_create_handle(&dsema->dsema_handle);
- int ret = ReleaseSemaphore(dsema->dsema_handle, 1, NULL);
- dispatch_assume(ret);
-#endif
+ _os_semaphore_create(&dsema->dsema_sema, _OS_SEM_POLICY_FIFO);
+ _os_semaphore_signal(&dsema->dsema_sema, 1);
return 1;
}
{
long orig;
-#if USE_MACH_SEM
- mach_timespec_t _timeout;
- kern_return_t kr;
-#elif USE_POSIX_SEM
- struct timespec _timeout;
- int ret;
-#elif USE_WIN32_SEM
- uint64_t nsec;
- DWORD msec;
- DWORD resolution;
- DWORD wait_result;
-#endif
-
-#if USE_MACH_SEM
- _dispatch_semaphore_create_port(&dsema->dsema_port);
-#elif USE_WIN32_SEM
- _dispatch_semaphore_create_handle(&dsema->dsema_handle);
-#endif
-
+ _os_semaphore_create(&dsema->dsema_sema, _OS_SEM_POLICY_FIFO);
switch (timeout) {
default:
-#if USE_MACH_SEM
- do {
- uint64_t nsec = _dispatch_timeout(timeout);
- _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
- _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
- kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout));
- } while (kr == KERN_ABORTED);
-
- if (kr != KERN_OPERATION_TIMED_OUT) {
- DISPATCH_SEMAPHORE_VERIFY_KR(kr);
- break;
- }
-#elif USE_POSIX_SEM
- do {
- uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout);
- _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
- _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
- ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout));
- } while (ret == -1 && errno == EINTR);
-
- if (!(ret == -1 && errno == ETIMEDOUT)) {
- DISPATCH_SEMAPHORE_VERIFY_RET(ret);
- break;
- }
-#elif USE_WIN32_SEM
- nsec = _dispatch_timeout(timeout);
- msec = (DWORD)(nsec / (uint64_t)1000000);
- resolution = _push_timer_resolution(msec);
- wait_result = WaitForSingleObject(dsema->dsema_handle, msec);
- _pop_timer_resolution(resolution);
- if (wait_result != WAIT_TIMEOUT) {
+ if (!_os_semaphore_timedwait(&dsema->dsema_sema, timeout)) {
break;
}
-#endif
// Fall through and try to undo what the fast path did to
// dsema->dsema_value
case DISPATCH_TIME_NOW:
while (orig < 0) {
if (os_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1,
&orig, relaxed)) {
-#if USE_MACH_SEM
- return KERN_OPERATION_TIMED_OUT;
-#elif USE_POSIX_SEM || USE_WIN32_SEM
- errno = ETIMEDOUT;
- return -1;
-#endif
+ return _OS_SEM_TIMEOUT();
}
}
// Another thread called semaphore_signal().
// Fall through and drain the wakeup.
case DISPATCH_TIME_FOREVER:
-#if USE_MACH_SEM
- do {
- kr = semaphore_wait(dsema->dsema_port);
- } while (kr == KERN_ABORTED);
- DISPATCH_SEMAPHORE_VERIFY_KR(kr);
-#elif USE_POSIX_SEM
- do {
- ret = sem_wait(&dsema->dsema_sem);
- } while (ret != 0);
- DISPATCH_SEMAPHORE_VERIFY_RET(ret);
-#elif USE_WIN32_SEM
- WaitForSingleObject(dsema->dsema_handle, INFINITE);
-#endif
+ _os_semaphore_wait(&dsema->dsema_sema);
break;
}
return 0;
rval = (long)os_atomic_xchg2o(dg, dg_waiters, 0, relaxed);
if (rval) {
// wake group waiters
-#if USE_MACH_SEM
- _dispatch_semaphore_create_port(&dg->dg_port);
- do {
- kern_return_t kr = semaphore_signal(dg->dg_port);
- DISPATCH_GROUP_VERIFY_KR(kr);
- } while (--rval);
-#elif USE_POSIX_SEM
- do {
- int ret = sem_post(&dg->dg_sem);
- DISPATCH_SEMAPHORE_VERIFY_RET(ret);
- } while (--rval);
-#elif USE_WIN32_SEM
- _dispatch_semaphore_create_handle(&dg->dg_handle);
- int ret;
- ret = ReleaseSemaphore(dg->dg_handle, rval, NULL);
- dispatch_assume(ret);
-#else
-#error "No supported semaphore type"
-#endif
+ _os_semaphore_create(&dg->dg_sema, _OS_SEM_POLICY_FIFO);
+ _os_semaphore_signal(&dg->dg_sema, rval);
}
if (head) {
// async group notify blocks
"Group object deallocated while in use");
}
- _dispatch_semaphore_class_dispose(dg);
+ _os_semaphore_dispose(&dg->dg_sema);
}
size_t
offset += _dispatch_object_debug_attr(dg, &buf[offset], bufsiz - offset);
#if USE_MACH_SEM
offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ",
- dg->dg_port);
+ dg->dg_sema);
#endif
offset += dsnprintf(&buf[offset], bufsiz - offset,
"count = %ld, waiters = %d }", dg->dg_value, dg->dg_waiters);
long value;
int orig_waiters;
-#if USE_MACH_SEM
- mach_timespec_t _timeout;
- kern_return_t kr;
-#elif USE_POSIX_SEM // KVV
- struct timespec _timeout;
- int ret;
-#elif USE_WIN32_SEM // KVV
- uint64_t nsec;
- DWORD msec;
- DWORD resolution;
- DWORD wait_result;
-#endif
-
// check before we cause another signal to be sent by incrementing
// dg->dg_waiters
value = os_atomic_load2o(dg, dg_value, ordered); // 19296565
timeout = DISPATCH_TIME_FOREVER;
}
-#if USE_MACH_SEM
- _dispatch_semaphore_create_port(&dg->dg_port);
-#elif USE_WIN32_SEM
- _dispatch_semaphore_create_handle(&dg->dg_handle);
-#endif
-
+ _os_semaphore_create(&dg->dg_sema, _OS_SEM_POLICY_FIFO);
switch (timeout) {
default:
-#if USE_MACH_SEM
- do {
- uint64_t nsec = _dispatch_timeout(timeout);
- _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
- _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
- kr = slowpath(semaphore_timedwait(dg->dg_port, _timeout));
- } while (kr == KERN_ABORTED);
-
- if (kr != KERN_OPERATION_TIMED_OUT) {
- DISPATCH_GROUP_VERIFY_KR(kr);
- break;
- }
-#elif USE_POSIX_SEM
- do {
- uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout);
- _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
- _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
- ret = slowpath(sem_timedwait(&dg->dg_sem, &_timeout));
- } while (ret == -1 && errno == EINTR);
-
- if (!(ret == -1 && errno == ETIMEDOUT)) {
- DISPATCH_SEMAPHORE_VERIFY_RET(ret);
+ if (!_os_semaphore_timedwait(&dg->dg_sema, timeout)) {
break;
}
-#elif USE_WIN32_SEM
- nsec = _dispatch_timeout(timeout);
- msec = (DWORD)(nsec / (uint64_t)1000000);
- resolution = _push_timer_resolution(msec);
- wait_result = WaitForSingleObject(dg->dg_handle, msec);
- _pop_timer_resolution(resolution);
- if (wait_result != WAIT_TIMEOUT) {
- break;
- }
-#endif
// Fall through and try to undo the earlier change to
// dg->dg_waiters
case DISPATCH_TIME_NOW:
while (orig_waiters) {
if (os_atomic_cmpxchgvw2o(dg, dg_waiters, orig_waiters,
orig_waiters - 1, &orig_waiters, relaxed)) {
-#if USE_MACH_SEM
- return KERN_OPERATION_TIMED_OUT;
-#elif USE_POSIX_SEM || USE_WIN32_SEM
- errno = ETIMEDOUT;
- return -1;
-#endif
+ return _OS_SEM_TIMEOUT();
}
}
- // Another thread called semaphore_signal().
+ // Another thread is running _dispatch_group_wake()
// Fall through and drain the wakeup.
case DISPATCH_TIME_FOREVER:
-#if USE_MACH_SEM
- do {
- kr = semaphore_wait(dg->dg_port);
- } while (kr == KERN_ABORTED);
- DISPATCH_GROUP_VERIFY_KR(kr);
-#elif USE_POSIX_SEM
- do {
- ret = sem_wait(&dg->dg_sem);
- } while (ret == -1 && errno == EINTR);
- DISPATCH_SEMAPHORE_VERIFY_RET(ret);
-#elif USE_WIN32_SEM
- WaitForSingleObject(dg->dg_handle, INFINITE);
-#endif
+ _os_semaphore_wait(&dg->dg_sema);
break;
}
return 0;
return 0;
}
if (timeout == 0) {
-#if USE_MACH_SEM
- return KERN_OPERATION_TIMED_OUT;
-#elif USE_POSIX_SEM || USE_WIN32_SEM
- errno = ETIMEDOUT;
- return (-1);
-#endif
+ return _OS_SEM_TIMEOUT();
}
return _dispatch_group_wait_slow(dg, timeout);
}
struct dispatch_queue_s;
-#if USE_MACH_SEM
-#define DISPATCH_OS_SEMA_FIELD(base) semaphore_t base##_port
-#elif USE_POSIX_SEM
-#define DISPATCH_OS_SEMA_FIELD(base) sem_t base##_sem
-#elif USE_WIN32_SEM
-#define DISPATCH_OS_SEMA_FIELD(base) HANDLE base##_handle
-#else
-#error "No supported semaphore type"
-#endif
-
#define DISPATCH_SEMAPHORE_HEADER(cls, ns) \
DISPATCH_OBJECT_HEADER(cls); \
long volatile ns##_value; \
- DISPATCH_OS_SEMA_FIELD(ns)
+ _os_semaphore_t ns##_sema
struct dispatch_semaphore_header_s {
DISPATCH_SEMAPHORE_HEADER(semaphore, dsema);
#include <stdatomic.h>
-#define memory_order_ordered memory_order_seq_cst
+#define memory_order_ordered memory_order_seq_cst
+#define memory_order_dependency memory_order_acquire
+
+#if __has_extension(c_generic_selections) && __has_extension(c_atomic)
+#define os_atomic(type) _Atomic(type)
+#else
+#define os_atomic(type) type volatile
+#endif
+
+#define _os_atomic_type_cases(type, expr) \
+ type *: expr, \
+ type volatile *: expr, \
+ _Atomic(type) *: expr, \
+ _Atomic(type) volatile *: expr
#define _os_atomic_basetypeof(p) \
typeof(*_Generic((p), \
- char*: (char*)(p), \
- volatile char*: (char*)(p), \
- signed char*: (signed char*)(p), \
- volatile signed char*: (signed char*)(p), \
- unsigned char*: (unsigned char*)(p), \
- volatile unsigned char*: (unsigned char*)(p), \
- short*: (short*)(p), \
- volatile short*: (short*)(p), \
- unsigned short*: (unsigned short*)(p), \
- volatile unsigned short*: (unsigned short*)(p), \
- int*: (int*)(p), \
- volatile int*: (int*)(p), \
- unsigned int*: (unsigned int*)(p), \
- volatile unsigned int*: (unsigned int*)(p), \
- long*: (long*)(p), \
- volatile long*: (long*)(p), \
- unsigned long*: (unsigned long*)(p), \
- volatile unsigned long*: (unsigned long*)(p), \
- long long*: (long long*)(p), \
- volatile long long*: (long long*)(p), \
- unsigned long long*: (unsigned long long*)(p), \
- volatile unsigned long long*: (unsigned long long*)(p), \
- const void**: (const void**)(p), \
- const void*volatile*: (const void**)(p), \
+ _os_atomic_type_cases(char, (char *)(p)), \
+ _os_atomic_type_cases(signed char, (signed char *)(p)), \
+ _os_atomic_type_cases(unsigned char, (unsigned char *)(p)), \
+ _os_atomic_type_cases(short, (short *)(p)), \
+ _os_atomic_type_cases(unsigned short, (unsigned short *)(p)), \
+ _os_atomic_type_cases(int, (int *)(p)), \
+ _os_atomic_type_cases(unsigned int, (unsigned int *)(p)), \
+ _os_atomic_type_cases(long, (long *)(p)), \
+ _os_atomic_type_cases(unsigned long, (unsigned long *)(p)), \
+ _os_atomic_type_cases(long long, (long long *)(p)), \
+ _os_atomic_type_cases(unsigned long long, (unsigned long long *)(p)), \
+ _os_atomic_type_cases(void *, (void **)(p)), \
+ _os_atomic_type_cases(const void *, (const void **)(p)), \
default: (void**)(p)))
#define _os_atomic_c11_atomic(p) \
_Generic((p), \
- char*: (_Atomic(char)*)(p), \
- volatile char*: (volatile _Atomic(char)*)(p), \
- signed char*: (_Atomic(signed char)*)(p), \
- volatile signed char*: (volatile _Atomic(signed char)*)(p), \
- unsigned char*: (_Atomic(unsigned char)*)(p), \
- volatile unsigned char*: (volatile _Atomic(unsigned char)*)(p), \
- short*: (_Atomic(short)*)(p), \
- volatile short*: (volatile _Atomic(short)*)(p), \
- unsigned short*: (_Atomic(unsigned short)*)(p), \
- volatile unsigned short*: (volatile _Atomic(unsigned short)*)(p), \
- int*: (_Atomic(int)*)(p), \
- volatile int*: (volatile _Atomic(int)*)(p), \
- unsigned int*: (_Atomic(unsigned int)*)(p), \
- volatile unsigned int*: (volatile _Atomic(unsigned int)*)(p), \
- long*: (_Atomic(long)*)(p), \
- volatile long*: (volatile _Atomic(long)*)(p), \
- unsigned long*: (_Atomic(unsigned long)*)(p), \
- volatile unsigned long*: (volatile _Atomic(unsigned long)*)(p), \
- long long*: (_Atomic(long long)*)(p), \
- volatile long long*: (volatile _Atomic(long long)*)(p), \
- unsigned long long*: (_Atomic(unsigned long long)*)(p), \
- volatile unsigned long long*: \
- (volatile _Atomic(unsigned long long)*)(p), \
- const void**: (_Atomic(const void*)*)(p), \
- const void*volatile*: (volatile _Atomic(const void*)*)(p), \
- default: (volatile _Atomic(void*)*)(p))
+ _os_atomic_type_cases(char, (_Atomic(char)*)(p)), \
+ _os_atomic_type_cases(signed char, (_Atomic(signed char)*)(p)), \
+ _os_atomic_type_cases(unsigned char, (_Atomic(unsigned char)*)(p)), \
+ _os_atomic_type_cases(short, (_Atomic(short)*)(p)), \
+ _os_atomic_type_cases(unsigned short, (_Atomic(unsigned short)*)(p)), \
+ _os_atomic_type_cases(int, (_Atomic(int)*)(p)), \
+ _os_atomic_type_cases(unsigned int, (_Atomic(unsigned int)*)(p)), \
+ _os_atomic_type_cases(long, (_Atomic(long)*)(p)), \
+ _os_atomic_type_cases(unsigned long, (_Atomic(unsigned long)*)(p)), \
+ _os_atomic_type_cases(long long, (_Atomic(long long)*)(p)), \
+ _os_atomic_type_cases(unsigned long long, (_Atomic(unsigned long long)*)(p)), \
+ _os_atomic_type_cases(void *, (_Atomic(void*)*)(p)), \
+ _os_atomic_type_cases(const void *, (_Atomic(const void*)*)(p)), \
+ default: (_Atomic(void*)*)(p))
#define os_atomic_thread_fence(m) atomic_thread_fence(memory_order_##m)
// see comment in dispatch_once.c
#define os_atomic_xor_orig(p, v, m) \
_os_atomic_c11_op_orig((p), (v), m, xor, ^)
+#define os_atomic_force_dependency_on(p, e) (p)
+#define os_atomic_load_with_dependency_on(p, e) \
+ os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed)
+#define os_atomic_load_with_dependency_on2o(p, f, e) \
+ os_atomic_load_with_dependency_on(&(p)->f, e)
+
#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \
bool _result = false; \
typeof(p) _p = (p); \
#ifndef __DISPATCH__STUBS__INTERNAL
#define __DISPATCH__STUBS__INTERNAL
-// marker for hacks we have made to make progress
-#define __LINUX_PORT_HDD__ 1
-
/*
* Stub out defines for some mach types and related macros
*/
typedef uint32_t voucher_activity_id_t;
-typedef uint32_t _voucher_activity_buffer_hook_t;;
-
typedef uint32_t voucher_activity_flag_t;
typedef struct { } mach_msg_header_t;
#define IGNORE_KEVENT64_EXT /* will force the kevent64_s.ext[] to not be used -> leeway ignored */
+#ifndef NOTE_SECONDS
#define NOTE_SECONDS 0x01
#define NOTE_USECONDS 0x02
#define NOTE_NSECONDS 0x04
#define NOTE_ABSOLUTE 0x08
+#define KEVENT_NSEC_NOT_SUPPORTED
+#endif
#define NOTE_CRITICAL 0x10
#define NOTE_BACKGROUND 0x20
#define NOTE_LEEWAY 0x40
}
#endif
+#pragma mark - semaphores
+
+#if USE_MACH_SEM
+#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
+ if (unlikely((x) == KERN_INVALID_NAME)) { \
+ DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_semaphore_t"); \
+ } else if (unlikely(x)) { \
+ DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \
+ } \
+ } while (0)
+
+void
+_os_semaphore_create_slow(_os_semaphore_t *s4, int policy)
+{
+ kern_return_t kr;
+ semaphore_t tmp;
+
+ _dispatch_fork_becomes_unsafe();
+
+ // lazily allocate the semaphore port
+
+ // Someday:
+ // 1) Switch to a doubly-linked FIFO in user-space.
+ // 2) User-space timers for the timeout.
+ // 3) Use the per-thread semaphore port.
+
+ while ((kr = semaphore_create(mach_task_self(), &tmp, policy, 0))) {
+ DISPATCH_VERIFY_MIG(kr);
+ _dispatch_temporary_resource_shortage();
+ }
+
+ if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) {
+ kr = semaphore_destroy(mach_task_self(), tmp);
+ DISPATCH_VERIFY_MIG(kr);
+ DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+ }
+}
+
+void
+_os_semaphore_dispose_slow(_os_semaphore_t *sema)
+{
+ kern_return_t kr;
+ semaphore_t sema_port = *sema;
+ kr = semaphore_destroy(mach_task_self(), sema_port);
+ DISPATCH_VERIFY_MIG(kr);
+ DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+ *sema = MACH_PORT_DEAD;
+}
+
+void
+_os_semaphore_signal(_os_semaphore_t *sema, long count)
+{
+ do {
+ kern_return_t kr = semaphore_signal(*sema);
+ DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+ } while (--count);
+}
+
+void
+_os_semaphore_wait(_os_semaphore_t *sema)
+{
+ kern_return_t kr;
+ do {
+ kr = semaphore_wait(*sema);
+ } while (kr == KERN_ABORTED);
+ DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+}
+
+bool
+_os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout)
+{
+ mach_timespec_t _timeout;
+ kern_return_t kr;
+
+ do {
+ uint64_t nsec = _dispatch_timeout(timeout);
+ _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
+ _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
+ kr = slowpath(semaphore_timedwait(*sema, _timeout));
+ } while (kr == KERN_ABORTED);
+
+ if (kr == KERN_OPERATION_TIMED_OUT) {
+ return true;
+ }
+ DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+ return false;
+}
+#elif USE_POSIX_SEM
+#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
+ if (unlikely((x) == -1)) { \
+ DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \
+ } \
+ } while (0)
+
+void
+_os_semaphore_init(_os_semaphore_t *sema, int policy DISPATCH_UNUSED)
+{
+ int rc = sem_init(sema, 0, 0);
+ DISPATCH_SEMAPHORE_VERIFY_RET(rc);
+}
+
+void
+_os_semaphore_dispose_slow(_os_semaphore_t *sema)
+{
+ int rc = sem_destroy(sema);
+ DISPATCH_SEMAPHORE_VERIFY_RET(rc);
+}
+
+void
+_os_semaphore_signal(_os_semaphore_t *sema, long count)
+{
+ do {
+ int ret = sem_post(sema);
+ DISPATCH_SEMAPHORE_VERIFY_RET(ret);
+ } while (--count);
+}
+
+void
+_os_semaphore_wait(_os_semaphore_t *sema)
+{
+ int ret = sem_wait(sema);
+ DISPATCH_SEMAPHORE_VERIFY_RET(ret);
+}
+
+bool
+_os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout)
+{
+ struct timespec _timeout;
+ int ret;
+
+ do {
+ uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout);
+ _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
+ _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
+ ret = slowpath(sem_timedwait(sema, &_timeout));
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1 && errno == ETIMEDOUT) {
+ return true;
+ }
+ DISPATCH_SEMAPHORE_VERIFY_RET(ret);
+ return false;
+}
+#elif USE_WIN32_SEM
+// rdar://problem/8428132
+static DWORD best_resolution = 1; // 1ms
+
+static DWORD
+_push_timer_resolution(DWORD ms)
+{
+ MMRESULT res;
+ static dispatch_once_t once;
+
+ if (ms > 16) {
+ // only update timer resolution if smaller than default 15.6ms
+ // zero means not updated
+ return 0;
+ }
+
+ // aim for the best resolution we can accomplish
+ dispatch_once(&once, ^{
+ TIMECAPS tc;
+ MMRESULT res;
+ res = timeGetDevCaps(&tc, sizeof(tc));
+ if (res == MMSYSERR_NOERROR) {
+ best_resolution = min(max(tc.wPeriodMin, best_resolution),
+ tc.wPeriodMax);
+ }
+ });
+
+ res = timeBeginPeriod(best_resolution);
+ if (res == TIMERR_NOERROR) {
+ return best_resolution;
+ }
+ // zero means not updated
+ return 0;
+}
+
+// match ms parameter to result from _push_timer_resolution
+DISPATCH_ALWAYS_INLINE
+static inline void
+_pop_timer_resolution(DWORD ms)
+{
+ if (ms) timeEndPeriod(ms);
+}
+
+void
+_os_semaphore_create_slow(_os_semaphore_t *s4, int policy DISPATCH_UNUSED)
+{
+ HANDLE tmp;
+
+ // lazily allocate the semaphore port
+
+ while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) {
+ _dispatch_temporary_resource_shortage();
+ }
+
+ if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) {
+ CloseHandle(tmp);
+ }
+}
+
+void
+_os_semaphore_dispose_slow(_os_semaphore_t *sema)
+{
+ HANDLE sema_handle = *sema;
+ CloseHandle(sema_handle);
+ *sema = 0;
+}
+
+void
+_os_semaphore_signal(_os_semaphore_t *sema, long count)
+{
+ int ret = ReleaseSemaphore(*sema, count, NULL);
+ dispatch_assume(ret);
+}
+
+void
+_os_semaphore_wait(_os_semaphore_t *sema)
+{
+ WaitForSingleObject(*sema, INFINITE);
+}
+
+bool
+_os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout)
+{
+ uint64_t nsec;
+ DWORD msec;
+ DWORD resolution;
+ DWORD wait_result;
+
+ nsec = _dispatch_timeout(timeout);
+ msec = (DWORD)(nsec / (uint64_t)1000000);
+ resolution = _push_timer_resolution(msec);
+ wait_result = WaitForSingleObject(dsema->dsema_handle, msec);
+ _pop_timer_resolution(resolution);
+ return wait_result == WAIT_TIMEOUT;
+}
+#else
+#error "port has to implement _os_semaphore_t"
+#endif
+
+#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
+semaphore_t
+_dispatch_thread_semaphore_create(void)
+{
+ semaphore_t s4;
+ kern_return_t kr;
+ while (unlikely(kr = semaphore_create(mach_task_self(), &s4,
+ SYNC_POLICY_FIFO, 0))) {
+ DISPATCH_VERIFY_MIG(kr);
+ _dispatch_temporary_resource_shortage();
+ }
+ return s4;
+}
+
+void
+_dispatch_thread_semaphore_dispose(void *ctxt)
+{
+ semaphore_t s4 = (semaphore_t)(uintptr_t)ctxt;
+ kern_return_t kr = semaphore_destroy(mach_task_self(), s4);
+ DISPATCH_VERIFY_MIG(kr);
+ DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+}
+#endif
+
#pragma mark - ulock wrappers
#if HAVE_UL_COMPARE_AND_WAIT
#pragma mark - thread event
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-semaphore_t
-_dispatch_thread_semaphore_create(void)
-{
- semaphore_t s4;
- kern_return_t kr;
- while (unlikely(kr = semaphore_create(mach_task_self(), &s4,
- SYNC_POLICY_FIFO, 0))) {
- DISPATCH_VERIFY_MIG(kr);
- _dispatch_temporary_resource_shortage();
- }
- return s4;
-}
-
-void
-_dispatch_thread_semaphore_dispose(void *ctxt)
-{
- semaphore_t s4 = (semaphore_t)(uintptr_t)ctxt;
- kern_return_t kr = semaphore_destroy(mach_task_self(), s4);
- DISPATCH_VERIFY_MIG(kr);
- DISPATCH_SEMAPHORE_VERIFY_KR(kr);
-}
-#endif
-
void
_dispatch_thread_event_signal_slow(dispatch_thread_event_t dte)
{
#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
- kern_return_t kr = semaphore_signal(dte->dte_semaphore);
+ kern_return_t kr = semaphore_signal(dte->dte_sema);
DISPATCH_SEMAPHORE_VERIFY_KR(kr);
return;
}
_dispatch_ulock_wake(&dte->dte_value, 0);
#elif HAVE_FUTEX
_dispatch_futex_wake(&dte->dte_value, 1, FUTEX_PRIVATE_FLAG);
-#elif USE_POSIX_SEM
- int rc = sem_post(&dte->dte_sem);
- DISPATCH_SEMAPHORE_VERIFY_RET(ret);
+#else
+ _os_semaphore_signal(&dte->dte_sema, 1);
#endif
}
if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
kern_return_t kr;
do {
- kr = semaphore_wait(dte->dte_semaphore);
+ kr = semaphore_wait(dte->dte_sema);
} while (unlikely(kr == KERN_ABORTED));
DISPATCH_SEMAPHORE_VERIFY_KR(kr);
return;
NULL, FUTEX_PRIVATE_FLAG);
#endif
}
-#elif USE_POSIX_SEM
- int rc;
- do {
- rc = sem_wait(&dte->dte_sem);
- } while (unlikely(rc != 0));
- DISPATCH_SEMAPHORE_VERIFY_RET(rc);
+#else
+ _os_semaphore_wait(&dte->dte_sema);
#endif
}
#pragma mark - platform macros
DISPATCH_ENUM(dispatch_lock_options, uint32_t,
- DLOCK_LOCK_NONE = 0x00000000,
+ DLOCK_LOCK_NONE = 0x00000000,
DLOCK_LOCK_DATA_CONTENTION = 0x00010000,
);
#endif
#endif // HAVE_UL_UNFAIR_LOCK
-#ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT && !HAVE_FUTEX)
-#endif
-
#ifndef HAVE_FUTEX
#ifdef __linux__
#define HAVE_FUTEX 1
#endif
#endif // HAVE_FUTEX
+#pragma mark - semaphores
+
+#ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
+#if TARGET_OS_MAC
+#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT)
+#else
+#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK 0
+#endif
+#endif
+
#if USE_MACH_SEM
-#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
- if (unlikely((x) == KERN_INVALID_NAME)) { \
- DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_semaphore_t"); \
- } else if (unlikely(x)) { \
- DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \
- } \
- } while (0)
-#define DISPATCH_GROUP_VERIFY_KR(x) do { \
- if (unlikely((x) == KERN_INVALID_NAME)) { \
- DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_group_t"); \
- } else if (unlikely(x)) { \
- DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \
- } \
- } while (0)
+
+typedef semaphore_t _os_semaphore_t;
+#define _OS_SEM_POLICY_FIFO SYNC_POLICY_FIFO
+#define _OS_SEM_POLICY_LIFO SYNC_POLICY_LIFO
+#define _OS_SEM_TIMEOUT() KERN_OPERATION_TIMED_OUT
+
+#define _os_semaphore_init(sema, policy) (void)(*(sema) = MACH_PORT_NULL)
+#define _os_semaphore_is_created(sema) (*(sema) != MACH_PORT_NULL)
+void _os_semaphore_create_slow(_os_semaphore_t *sema, int policy);
+
#elif USE_POSIX_SEM
-#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
- if (unlikely((x) == -1)) { \
- DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \
- } \
- } while (0)
+
+typedef sem_t _os_semaphore_t;
+#define _OS_SEM_POLICY_FIFO 0
+#define _OS_SEM_POLICY_LIFO 0
+#define _OS_SEM_TIMEOUT() ((errno) = ETIMEDOUT, -1)
+
+void _os_semaphore_init(_os_semaphore_t *sema, int policy);
+#define _os_semaphore_is_created(sema) 1
+#define _os_semaphore_create_slow(sema, policy) ((void)0)
+
+#elif USE_WIN32_SEM
+
+typedef HANDLE _os_semaphore_t;
+#define _OS_SEM_POLICY_FIFO 0
+#define _OS_SEM_POLICY_LIFO 0
+#define _OS_SEM_TIMEOUT() ((errno) = ETIMEDOUT, -1)
+
+#define _os_semaphore_init(sema, policy) (void)(*(sema) = 0)
+#define _os_semaphore_is_created(sema) (*(sema) != 0)
+void _os_semaphore_create_slow(_os_semaphore_t *sema, int policy);
+
+#else
+#error "port has to implement _os_semaphore_t"
#endif
+void _os_semaphore_dispose_slow(_os_semaphore_t *sema);
+void _os_semaphore_signal(_os_semaphore_t *sema, long count);
+void _os_semaphore_wait(_os_semaphore_t *sema);
+bool _os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout);
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_os_semaphore_create(_os_semaphore_t *sema, int policy)
+{
+ if (!_os_semaphore_is_created(sema)) {
+ _os_semaphore_create_slow(sema, policy);
+ }
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_os_semaphore_dispose(_os_semaphore_t *sema)
+{
+ if (_os_semaphore_is_created(sema)) {
+ _os_semaphore_dispose_slow(sema);
+ }
+}
+
+#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
+semaphore_t _dispatch_thread_semaphore_create(void);
+void _dispatch_thread_semaphore_dispose(void *);
+
+DISPATCH_ALWAYS_INLINE
+static inline semaphore_t
+_dispatch_get_thread_semaphore(void)
+{
+ semaphore_t sema = (semaphore_t)(uintptr_t)
+ _dispatch_thread_getspecific(dispatch_sema4_key);
+ if (unlikely(!sema)) {
+ return _dispatch_thread_semaphore_create();
+ }
+ _dispatch_thread_setspecific(dispatch_sema4_key, NULL);
+ return sema;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_put_thread_semaphore(semaphore_t sema)
+{
+ semaphore_t old_sema = (semaphore_t)(uintptr_t)
+ _dispatch_thread_getspecific(dispatch_sema4_key);
+ _dispatch_thread_setspecific(dispatch_sema4_key, (void*)(uintptr_t)sema);
+ if (unlikely(old_sema)) {
+ return _dispatch_thread_semaphore_dispose((void *)(uintptr_t)old_sema);
+ }
+}
+#endif
+
+
#pragma mark - compare and wait
DISPATCH_NOT_TAIL_CALLED
typedef struct dispatch_thread_event_s {
#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
union {
- semaphore_t dte_semaphore;
+ _os_semaphore_t dte_sema;
uint32_t dte_value;
};
#elif HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
// UINT32_MAX means waited on, but not signalled yet
// 0 is the initial and final state
uint32_t dte_value;
-#elif USE_POSIX_SEM
- sem_t dte_sem;
#else
-# error define dispatch_thread_event_s for your platform
+ _os_semaphore_t dte_sema;
#endif
} dispatch_thread_event_s, *dispatch_thread_event_t;
-#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
-semaphore_t _dispatch_thread_semaphore_create(void);
-void _dispatch_thread_semaphore_dispose(void *);
-
-DISPATCH_ALWAYS_INLINE
-static inline semaphore_t
-_dispatch_get_thread_semaphore(void)
-{
- semaphore_t sema = (semaphore_t)(uintptr_t)
- _dispatch_thread_getspecific(dispatch_sema4_key);
- if (unlikely(!sema)) {
- return _dispatch_thread_semaphore_create();
- }
- _dispatch_thread_setspecific(dispatch_sema4_key, NULL);
- return sema;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_put_thread_semaphore(semaphore_t sema)
-{
- semaphore_t old_sema = (semaphore_t)(uintptr_t)
- _dispatch_thread_getspecific(dispatch_sema4_key);
- _dispatch_thread_setspecific(dispatch_sema4_key, (void*)(uintptr_t)sema);
- if (unlikely(old_sema)) {
- return _dispatch_thread_semaphore_dispose((void *)(uintptr_t)old_sema);
- }
-}
-#endif
-
DISPATCH_NOT_TAIL_CALLED
void _dispatch_thread_event_wait_slow(dispatch_thread_event_t);
void _dispatch_thread_event_signal_slow(dispatch_thread_event_t);
{
#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
- dte->dte_semaphore = _dispatch_get_thread_semaphore();
+ dte->dte_sema = _dispatch_get_thread_semaphore();
return;
}
#endif
#if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
dte->dte_value = 0;
-#elif USE_POSIX_SEM
- int rc = sem_init(&dte->dte_sem, 0, 0);
- DISPATCH_SEMAPHORE_VERIFY_RET(rc);
+#else
+ _os_semaphore_init(&dte->dte_sema, _OS_SEM_POLICY_FIFO);
#endif
}
// waiters do the validation
return;
}
-#elif USE_POSIX_SEM
+#else
// fallthrough
#endif
_dispatch_thread_event_signal_slow(dte);
// for any other value, go to the slowpath which checks it's not corrupt
return;
}
-#elif USE_POSIX_SEM
+#else
// fallthrough
#endif
_dispatch_thread_event_wait_slow(dte);
{
#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
- _dispatch_put_thread_semaphore(dte->dte_semaphore);
+ _dispatch_put_thread_semaphore(dte->dte_sema);
return;
}
#endif
#if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
// nothing to do
dispatch_assert(dte->dte_value == 0);
-#elif USE_POSIX_SEM
- int rc = sem_destroy(&dte->dte_sem);
- DISPATCH_SEMAPHORE_VERIFY_RET(rc);
+#else
+ _os_semaphore_dispose(&dte->dte_sema);
#endif
}
}
#endif
-uint64_t _dispatch_get_nanoseconds(void);
+typedef enum {
+ DISPATCH_CLOCK_WALL,
+ DISPATCH_CLOCK_MACH,
+#define DISPATCH_CLOCK_COUNT (DISPATCH_CLOCK_MACH + 1)
+} dispatch_clock_t;
#if defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME
// x86 currently implements mach time in nanoseconds
}
#endif
+/* XXXRW: Some kind of overflow detection needed? */
+#define _dispatch_timespec_to_nano(ts) \
+ ((uint64_t)(ts).tv_sec * NSEC_PER_SEC + (uint64_t)(ts).tv_nsec)
+#define _dispatch_timeval_to_nano(tv) \
+ ((uint64_t)(tv).tv_sec * NSEC_PER_SEC + \
+ (uint64_t)(tv).tv_usec * NSEC_PER_USEC)
+
+static inline uint64_t
+_dispatch_get_nanoseconds(void)
+{
+ dispatch_static_assert(sizeof(NSEC_PER_SEC) == 8);
+ dispatch_static_assert(sizeof(USEC_PER_SEC) == 8);
+
+#if TARGET_OS_MAC && DISPATCH_HOST_SUPPORTS_OSX(101200)
+ return clock_gettime_nsec_np(CLOCK_REALTIME);
+#elif HAVE_DECL_CLOCK_REALTIME
+ struct timespec ts;
+ dispatch_assume_zero(clock_gettime(CLOCK_REALTIME, &ts));
+ return _dispatch_timespec_to_nano(ts);
+#elif TARGET_OS_WIN32
+ // FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC).
+ FILETIME ft;
+ ULARGE_INTEGER li;
+ GetSystemTimeAsFileTime(&ft);
+ li.LowPart = ft.dwLowDateTime;
+ li.HighPart = ft.dwHighDateTime;
+ return li.QuadPart * 100ull;
+#else
+ struct timeval tv;
+ dispatch_assert_zero(gettimeofday(&tv, NULL));
+ return _dispatch_timeval_to_nano(tv);
+#endif
+}
+
static inline uint64_t
_dispatch_absolute_time(void)
{
#if HAVE_MACH_ABSOLUTE_TIME
return mach_absolute_time();
+#elif HAVE_DECL_CLOCK_UPTIME && !defined(__linux__)
+ struct timespec ts;
+ dispatch_assume_zero(clock_gettime(CLOCK_UPTIME, &ts));
+ return _dispatch_timespec_to_nano(ts);
+#elif HAVE_DECL_CLOCK_MONOTONIC
+ struct timespec ts;
+ dispatch_assume_zero(clock_gettime(CLOCK_MONOTONIC, &ts));
+ return _dispatch_timespec_to_nano(ts);
#elif TARGET_OS_WIN32
LARGE_INTEGER now;
return QueryPerformanceCounter(&now) ? now.QuadPart : 0;
#else
- struct timespec ts;
- int ret;
-
-#if HAVE_DECL_CLOCK_UPTIME
- ret = clock_gettime(CLOCK_UPTIME, &ts);
-#elif HAVE_DECL_CLOCK_MONOTONIC
- ret = clock_gettime(CLOCK_MONOTONIC, &ts);
-#else
-#error "clock_gettime: no supported absolute time clock"
+#error platform needs to implement _dispatch_absolute_time()
#endif
- (void)dispatch_assume_zero(ret);
-
- /* XXXRW: Some kind of overflow detection needed? */
- return (ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec);
-#endif // HAVE_MACH_ABSOLUTE_TIME
}
+DISPATCH_ALWAYS_INLINE
static inline uint64_t
_dispatch_approximate_time(void)
{
return _dispatch_absolute_time();
}
+DISPATCH_ALWAYS_INLINE
+static inline uint64_t
+_dispatch_time_now(dispatch_clock_t clock)
+{
+ switch (clock) {
+ case DISPATCH_CLOCK_MACH:
+ return _dispatch_absolute_time();
+ case DISPATCH_CLOCK_WALL:
+ return _dispatch_get_nanoseconds();
+ }
+ __builtin_unreachable();
+}
+
+typedef struct {
+ uint64_t nows[DISPATCH_CLOCK_COUNT];
+} dispatch_clock_now_cache_s, *dispatch_clock_now_cache_t;
+
+DISPATCH_ALWAYS_INLINE
+static inline uint64_t
+_dispatch_time_now_cached(dispatch_clock_t clock,
+ dispatch_clock_now_cache_t cache)
+{
+ if (likely(cache->nows[clock])) {
+ return cache->nows[clock];
+ }
+ return cache->nows[clock] = _dispatch_time_now(clock);
+}
+
#endif // __DISPATCH_SHIMS_TIME__
#include "protocol.h"
#include "protocolServer.h"
#endif
-#include <sys/mount.h>
#define DKEV_DISPOSE_IMMEDIATE_DELETE 0x1
#define DKEV_UNREGISTER_DISCONNECTED 0x2
#define DKEV_UNREGISTER_REPLY_REMOVE 0x4
#define DKEV_UNREGISTER_WAKEUP 0x8
+static pthread_priority_t
+_dispatch_source_compute_kevent_priority(dispatch_source_t ds);
static void _dispatch_source_handler_free(dispatch_source_t ds, long kind);
static void _dispatch_source_merge_kevent(dispatch_source_t ds,
const _dispatch_kevent_qos_s *ke);
static void dispatch_kevent_debug(const char *verb,
const _dispatch_kevent_qos_s *kev, int i, int n,
const char *function, unsigned int line);
-static void _dispatch_kevent_debugger(void *context);
#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \
dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q)
#else
switch (type->ke.filter) {
case DISPATCH_EVFILT_TIMER:
break; // timers don't need masks
-#if DISPATCH_USE_VM_PRESSURE
- case EVFILT_VM:
- break; // type->init forces the only acceptable mask
-#endif
case DISPATCH_EVFILT_MACH_NOTIFICATION:
break; // type->init handles zero mask as a legacy case
default:
}
break;
case EVFILT_FS:
-#if DISPATCH_USE_VM_PRESSURE
- case EVFILT_VM:
-#endif
#if DISPATCH_USE_MEMORYSTATUS
case EVFILT_MEMORYSTATUS:
#endif
}
// Some sources require special processing
if (type->init != NULL) {
- type->init(ds, type, handle, mask, dq);
+ type->init(ds, type, handle, mask);
}
dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder));
if (!ds->ds_is_custom_source && (dk->dk_kevent.flags & EV_VANISHED)) {
dispatch_assert_zero((bool)ds->ds_is_installed);
switch (ds->ds_dkev->dk_kevent.filter) {
case DISPATCH_EVFILT_TIMER:
+ // aggressively coalesce background/maintenance QoS timers
+ // <rdar://problem/12200216&27342536>
+ pp = _dispatch_source_compute_kevent_priority(ds);
+ if (_dispatch_is_background_priority(pp)) {
+ ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_BACKGROUND;
+ }
_dispatch_timers_update(ds);
_dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED);
_dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev);
#pragma mark -
#pragma mark dispatch_kevent_t
-#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD
-static void _dispatch_kevent_guard(dispatch_kevent_t dk);
-static void _dispatch_kevent_unguard(dispatch_kevent_t dk);
-#else
-static inline void _dispatch_kevent_guard(dispatch_kevent_t dk) { (void)dk; }
-static inline void _dispatch_kevent_unguard(dispatch_kevent_t dk) { (void)dk; }
-#endif
-
-#if !DISPATCH_USE_EV_UDATA_SPECIFIC
-static struct dispatch_kevent_s _dispatch_kevent_data_or = {
- .dk_kevent = {
- .filter = DISPATCH_EVFILT_CUSTOM_OR,
- .flags = EV_CLEAR,
- },
- .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_or.dk_sources),
-};
-static struct dispatch_kevent_s _dispatch_kevent_data_add = {
- .dk_kevent = {
- .filter = DISPATCH_EVFILT_CUSTOM_ADD,
- },
- .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_add.dk_sources),
-};
-#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC
-
#define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1))
DISPATCH_CACHELINE_ALIGN
for (i = 0; i < DSL_HASH_SIZE; i++) {
TAILQ_INIT(&_dispatch_sources[i]);
}
-
-#if !DISPATCH_USE_EV_UDATA_SPECIFIC
- TAILQ_INSERT_TAIL(&_dispatch_sources[0],
- &_dispatch_kevent_data_or, dk_list);
- TAILQ_INSERT_TAIL(&_dispatch_sources[0],
- &_dispatch_kevent_data_add, dk_list);
- _dispatch_kevent_data_or.dk_kevent.udata =
- (_dispatch_kevent_qos_udata_t)&_dispatch_kevent_data_or;
- _dispatch_kevent_data_add.dk_kevent.udata =
- (_dispatch_kevent_qos_udata_t)&_dispatch_kevent_data_add;
-#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC
}
static inline uintptr_t
_dispatch_kevent_insert(dispatch_kevent_t dk)
{
if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) return;
- _dispatch_kevent_guard(dk);
uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident,
dk->dk_kevent.filter);
TAILQ_INSERT_TAIL(&_dispatch_sources[hash], dk, dk_list);
dk->dk_kevent.filter);
TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list);
}
- _dispatch_kevent_unguard(dk);
free(dk);
return r;
}
static void
_dispatch_kevent_drain(_dispatch_kevent_qos_s *ke)
{
-#if DISPATCH_DEBUG
- static dispatch_once_t pred;
- dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger);
-#endif
if (ke->filter == EVFILT_USER) {
_dispatch_kevent_mgr_debug(ke);
return;
}
}
-#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD
-static void
-_dispatch_kevent_guard(dispatch_kevent_t dk)
-{
- guardid_t guard;
- const unsigned int guard_flags = GUARD_CLOSE;
- int r, fd_flags = 0;
- switch (dk->dk_kevent.filter) {
- case EVFILT_READ:
- case EVFILT_WRITE:
- case EVFILT_VNODE:
- guard = &dk->dk_kevent;
- r = change_fdguard_np((int)dk->dk_kevent.ident, NULL, 0,
- &guard, guard_flags, &fd_flags);
- if (slowpath(r == -1)) {
- int err = errno;
- if (err != EPERM) {
- (void)dispatch_assume_zero(err);
- }
- return;
- }
- dk->dk_kevent.ext[0] = guard_flags;
- dk->dk_kevent.ext[1] = fd_flags;
- break;
- }
-}
-
-static void
-_dispatch_kevent_unguard(dispatch_kevent_t dk)
-{
- guardid_t guard;
- unsigned int guard_flags;
- int r, fd_flags;
- switch (dk->dk_kevent.filter) {
- case EVFILT_READ:
- case EVFILT_WRITE:
- case EVFILT_VNODE:
- guard_flags = (unsigned int)dk->dk_kevent.ext[0];
- if (!guard_flags) {
- return;
- }
- guard = &dk->dk_kevent;
- fd_flags = (int)dk->dk_kevent.ext[1];
- r = change_fdguard_np((int)dk->dk_kevent.ident, &guard,
- guard_flags, NULL, 0, &fd_flags);
- if (slowpath(r == -1)) {
- (void)dispatch_assume_zero(errno);
- return;
- }
- dk->dk_kevent.ext[0] = 0;
- break;
- }
-}
-#endif // DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD
-
#pragma mark -
#pragma mark dispatch_source_timer
DISPATCH_NOINLINE
static void
_dispatch_source_timer_telemetry_slow(dispatch_source_t ds,
- uintptr_t ident, struct dispatch_timer_source_s *values)
+ dispatch_clock_t clock, struct dispatch_timer_source_s *values)
{
if (_dispatch_trace_timer_configure_enabled()) {
- _dispatch_trace_timer_configure(ds, ident, values);
+ _dispatch_trace_timer_configure(ds, clock, values);
}
}
DISPATCH_ALWAYS_INLINE
static inline void
-_dispatch_source_timer_telemetry(dispatch_source_t ds, uintptr_t ident,
+_dispatch_source_timer_telemetry(dispatch_source_t ds, dispatch_clock_t clock,
struct dispatch_timer_source_s *values)
{
if (_dispatch_trace_timer_configure_enabled() ||
_dispatch_source_timer_telemetry_enabled()) {
- _dispatch_source_timer_telemetry_slow(ds, ident, values);
+ _dispatch_source_timer_telemetry_slow(ds, clock, values);
asm(""); // prevent tailcall
}
}
-// approx 1 year (60s * 60m * 24h * 365d)
-#define FOREVER_NSEC 31536000000000000ull
-
-DISPATCH_ALWAYS_INLINE
-static inline uint64_t
-_dispatch_source_timer_now(uint64_t nows[], unsigned int tidx)
-{
- unsigned int tk = DISPATCH_TIMER_KIND(tidx);
- if (nows && fastpath(nows[tk] != 0)) {
- return nows[tk];
- }
- uint64_t now;
- switch (tk) {
- case DISPATCH_TIMER_KIND_MACH:
- now = _dispatch_absolute_time();
- break;
- case DISPATCH_TIMER_KIND_WALL:
- now = _dispatch_get_nanoseconds();
- break;
- }
- if (nows) {
- nows[tk] = now;
- }
- return now;
-}
-
static inline unsigned long
_dispatch_source_timer_data(dispatch_source_refs_t dr, unsigned long prev)
{
// calculate the number of intervals since last fire
unsigned long data, missed;
uint64_t now;
- now = _dispatch_source_timer_now(NULL, _dispatch_source_timer_idx(dr));
+ now = _dispatch_time_now(DISPATCH_TIMER_CLOCK(_dispatch_source_timer_idx(dr)));
missed = (unsigned long)((now - ds_timer(dr).last_fire) /
ds_timer(dr).interval);
// correct for missed intervals already delivered last time
struct dispatch_set_timer_params {
dispatch_source_t ds;
- uintptr_t ident;
struct dispatch_timer_source_s values;
+ dispatch_clock_t clock;
};
static void
// Called on the _dispatch_mgr_q
struct dispatch_set_timer_params *params = context;
dispatch_source_t ds = params->ds;
- ds->ds_ident_hack = params->ident;
- ds_timer(ds->ds_refs) = params->values;
+ dispatch_timer_source_refs_t dt = (dispatch_timer_source_refs_t)ds->ds_refs;
+
+ params->values.flags = ds_timer(dt).flags;
+ if (params->clock == DISPATCH_CLOCK_WALL) {
+ params->values.flags |= DISPATCH_TIMER_WALL_CLOCK;
+#if HAVE_MACH
+ _dispatch_mach_host_calendar_change_register();
+#endif
+ } else {
+ params->values.flags &= ~(unsigned long)DISPATCH_TIMER_WALL_CLOCK;
+ }
+ ds_timer(dt) = params->values;
+ ds->ds_ident_hack = _dispatch_source_timer_idx(ds->ds_refs);
// Clear any pending data that might have accumulated on
// older timer params <rdar://problem/8574886>
ds->ds_pending_data = 0;
- // Re-arm in case we got disarmed because of pending set_timer suspension
- _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED);
- _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, ds->ds_dkev);
+
dispatch_resume(ds);
- // Must happen after resume to avoid getting disarmed due to suspension
- _dispatch_timers_update(ds);
- dispatch_release(ds);
- if (params->values.flags & DISPATCH_TIMER_WALL_CLOCK) {
- _dispatch_mach_host_calendar_change_register();
+ if (_dispatch_source_tryarm(ds)) {
+ // Re-arm in case we got disarmed because of pending set_timer suspension
+ _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, dt);
+ // Must happen after resume to avoid getting disarmed due to suspension
+ _dispatch_timers_update(ds);
}
+ dispatch_release(ds);
free(params);
}
struct dispatch_set_timer_params *params;
params = _dispatch_calloc(1ul, sizeof(struct dispatch_set_timer_params));
params->ds = ds;
- params->values.flags = ds_timer(ds->ds_refs).flags;
if (interval == 0) {
// we use zero internally to mean disabled
if ((int64_t)start < 0) {
// wall clock
start = (dispatch_time_t)-((int64_t)start);
- params->values.flags |= DISPATCH_TIMER_WALL_CLOCK;
+ params->clock = DISPATCH_CLOCK_WALL;
} else {
// absolute clock
interval = _dispatch_time_nano2mach(interval);
interval = 1;
}
leeway = _dispatch_time_nano2mach(leeway);
- params->values.flags &= ~(unsigned long)DISPATCH_TIMER_WALL_CLOCK;
+ params->clock = DISPATCH_CLOCK_MACH;
}
- params->ident = DISPATCH_TIMER_IDENT(params->values.flags);
params->values.target = start;
params->values.deadline = (start < UINT64_MAX - leeway) ?
start + leeway : UINT64_MAX;
struct dispatch_set_timer_params *params;
params = _dispatch_source_timer_params(ds, start, interval, leeway);
- _dispatch_source_timer_telemetry(ds, params->ident, ¶ms->values);
+ _dispatch_source_timer_telemetry(ds, params->clock, ¶ms->values);
// Suspend the source so that it doesn't fire with pending changes
// The use of suspend/resume requires the external retain/release
dispatch_retain(ds);
void
_dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval)
{
+#define NSEC_PER_FRAME (NSEC_PER_SEC/60)
+// approx 1 year (60s * 60m * 24h * 365d)
+#define FOREVER_NSEC 31536000000000000ull
+
dispatch_source_refs_t dr = ds->ds_refs;
- #define NSEC_PER_FRAME (NSEC_PER_SEC/60)
const bool animation = ds_timer(dr).flags & DISPATCH_INTERVAL_UI_ANIMATION;
if (fastpath(interval <= (animation ? FOREVER_NSEC/NSEC_PER_FRAME :
FOREVER_NSEC/NSEC_PER_MSEC))) {
ds_timer(dr).deadline = target + leeway;
ds_timer(dr).interval = interval;
ds_timer(dr).leeway = leeway;
- _dispatch_source_timer_telemetry(ds, ds->ds_ident_hack, &ds_timer(dr));
+ dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(ds->ds_ident_hack);
+ _dispatch_source_timer_telemetry(ds, clock, &ds_timer(dr));
}
#pragma mark -
}
#define DISPATCH_TIMER_INIT(kind, qos) \
DISPATCH_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \
- DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos))
+ DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos))
struct dispatch_timer_s _dispatch_timer[] = {
DISPATCH_TIMER_INIT(WALL, NORMAL),
}
#define DISPATCH_KEVENT_TIMER_INIT(kind, qos) \
DISPATCH_KEVENT_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \
- DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos))
+ DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos))
struct dispatch_kevent_s _dispatch_kevent_timer[] = {
DISPATCH_KEVENT_TIMER_INIT(WALL, NORMAL),
}
#define DISPATCH_KEVENT_TIMEOUT_INIT(kind, qos, note) \
DISPATCH_KEVENT_TIMEOUT_INITIALIZER(DISPATCH_TIMER_INDEX( \
- DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos), note)
+ DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos), note)
_dispatch_kevent_qos_s _dispatch_kevent_timeout[] = {
DISPATCH_KEVENT_TIMEOUT_INIT(WALL, NORMAL, NOTE_MACH_CONTINUOUS_TIME),
}
static inline void
-_dispatch_timers_run2(uint64_t nows[], unsigned int tidx)
+_dispatch_timers_run2(dispatch_clock_now_cache_t nows, unsigned int tidx)
{
dispatch_source_refs_t dr;
dispatch_source_t ds;
uint64_t now, missed;
- now = _dispatch_source_timer_now(nows, tidx);
+ now = _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows);
while ((dr = TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources))) {
ds = _dispatch_source_from_refs(dr);
// We may find timers on the wrong list due to a pending update from
DISPATCH_NOINLINE
static void
-_dispatch_timers_run(uint64_t nows[])
+_dispatch_timers_run(dispatch_clock_now_cache_t nows)
{
unsigned int tidx;
for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) {
}
}
+#define DISPATCH_TIMERS_GET_DELAY_ALL (~0u)
+
static inline unsigned int
-_dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[],
- uint64_t *delay, uint64_t *leeway, int qos, int kind)
+_dispatch_timers_get_delay(dispatch_clock_now_cache_t nows,
+ struct dispatch_timer_s timer[],
+ uint64_t *delay, uint64_t *leeway, unsigned int query)
{
- unsigned int tidx, ridx = DISPATCH_TIMER_COUNT;
- uint64_t tmp, delta = UINT64_MAX, dldelta = UINT64_MAX;
+ unsigned int tidx, ridx = DISPATCH_TIMER_COUNT, minidx, maxidx;
+ uint64_t tmp, delta = INT64_MAX, dldelta = INT64_MAX;
- for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) {
- if (qos >= 0 && qos != DISPATCH_TIMER_QOS(tidx)){
- continue;
- }
- if (kind >= 0 && kind != DISPATCH_TIMER_KIND(tidx)){
- continue;
- }
+ if (query == DISPATCH_TIMERS_GET_DELAY_ALL) {
+ minidx = 0;
+ maxidx = DISPATCH_TIMER_COUNT - 1;
+ } else {
+ minidx = maxidx = query;
+ }
+
+ for (tidx = minidx; tidx <= maxidx; tidx++) {
+ dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx);
uint64_t target = timer[tidx].target;
- if (target == UINT64_MAX) {
+ if (target >= INT64_MAX) {
continue;
}
uint64_t deadline = timer[tidx].deadline;
- if (qos >= 0) {
+ if (query != DISPATCH_TIMERS_GET_DELAY_ALL) {
// Timer pre-coalescing <rdar://problem/13222034>
+ unsigned int qos = DISPATCH_TIMER_QOS(tidx);
uint64_t window = _dispatch_kevent_coalescing_window[qos];
uint64_t latest = deadline > window ? deadline - window : 0;
dispatch_source_refs_t dri;
target = tmp;
}
}
- uint64_t now = _dispatch_source_timer_now(nows, tidx);
+ uint64_t now = _dispatch_time_now_cached(clock, nows);
if (target <= now) {
delta = 0;
break;
}
tmp = target - now;
- if (DISPATCH_TIMER_KIND(tidx) != DISPATCH_TIMER_KIND_WALL) {
+ if (clock != DISPATCH_CLOCK_WALL) {
tmp = _dispatch_time_mach2nano(tmp);
}
if (tmp < INT64_MAX && tmp < delta) {
}
dispatch_assert(target <= deadline);
tmp = deadline - now;
- if (DISPATCH_TIMER_KIND(tidx) != DISPATCH_TIMER_KIND_WALL) {
+ if (clock != DISPATCH_CLOCK_WALL) {
tmp = _dispatch_time_mach2nano(tmp);
}
if (tmp < INT64_MAX && tmp < dldelta) {
}
}
*delay = delta;
- *leeway = delta && delta < UINT64_MAX ? dldelta - delta : UINT64_MAX;
+ *leeway = delta && delta < INT64_MAX ? dldelta - delta : INT64_MAX;
return ridx;
}
static void
_dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay,
- uint64_t leeway, uint64_t nows[])
+ uint64_t leeway, dispatch_clock_now_cache_t nows)
{
// call to update nows[]
- _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL);
+ _dispatch_time_now_cached(DISPATCH_CLOCK_WALL, nows);
+#ifdef KEVENT_NSEC_NOT_SUPPORTED
// adjust nsec based delay to msec based and ignore leeway
delay /= 1000000L;
if ((int64_t)(delay) <= 0) {
delay = 1; // if value <= 0 the dispatch will stop
}
+#else
+ ke->fflags |= NOTE_NSECONDS;
+#endif
ke->data = (int64_t)delay;
}
#else
static void
_dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay,
- uint64_t leeway, uint64_t nows[])
+ uint64_t leeway, dispatch_clock_now_cache_t nows)
{
- delay += _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL);
+ delay += _dispatch_time_now_cached(DISPATCH_CLOCK_WALL, nows);
if (slowpath(_dispatch_timers_force_max_leeway)) {
ke->data = (int64_t)(delay + leeway);
ke->ext[1] = 0;
#endif // __linux__
static bool
-_dispatch_timers_program2(uint64_t nows[], _dispatch_kevent_qos_s *ke,
- unsigned int tidx)
+_dispatch_timers_program2(dispatch_clock_now_cache_t nows,
+ _dispatch_kevent_qos_s *ke, unsigned int tidx)
{
bool poll;
uint64_t delay, leeway;
- _dispatch_timers_get_delay(nows, _dispatch_timer, &delay, &leeway,
- (int)DISPATCH_TIMER_QOS(tidx), (int)DISPATCH_TIMER_KIND(tidx));
+ _dispatch_timers_get_delay(nows, _dispatch_timer, &delay, &leeway, tidx);
poll = (delay == 0);
if (poll || delay == UINT64_MAX) {
_dispatch_trace_next_timer_set(NULL, DISPATCH_TIMER_QOS(tidx));
DISPATCH_NOINLINE
static bool
-_dispatch_timers_program(uint64_t nows[])
+_dispatch_timers_program(dispatch_clock_now_cache_t nows)
{
bool poll = false;
unsigned int tidx, timerm = _dispatch_timers_mask;
_dispatch_timer_expired = true;
for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) {
_dispatch_timers_mask |=
- 1 << DISPATCH_TIMER_INDEX(DISPATCH_TIMER_KIND_WALL, qos);
+ 1 << DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_WALL, qos);
}
}
#endif
static inline bool
_dispatch_mgr_timers(void)
{
- uint64_t nows[DISPATCH_TIMER_KIND_COUNT] = {};
+ dispatch_clock_now_cache_s nows = { };
bool expired = slowpath(_dispatch_timer_expired);
if (expired) {
- _dispatch_timers_run(nows);
+ _dispatch_timers_run(&nows);
}
bool reconfigure = slowpath(_dispatch_timers_reconfigure);
if (reconfigure || expired) {
_dispatch_timers_reconfigure = false;
}
if (reconfigure || expired) {
- expired = _dispatch_timer_expired = _dispatch_timers_program(nows);
+ expired = _dispatch_timer_expired = _dispatch_timers_program(&nows);
expired = expired || _dispatch_mgr_q.dq_items_tail;
}
_dispatch_timers_mask = 0;
_dispatch_timer_aggregate_get_delay(void *ctxt)
{
dispatch_timer_delay_t dtd = ctxt;
- struct { uint64_t nows[DISPATCH_TIMER_KIND_COUNT]; } dtn = {};
- _dispatch_timers_get_delay(dtn.nows, dtd->timer, &dtd->delay, &dtd->leeway,
- -1, -1);
+ dispatch_clock_now_cache_s nows = { };
+ _dispatch_timers_get_delay(&nows, dtd->timer, &dtd->delay, &dtd->leeway,
+ DISPATCH_TIMERS_GET_DELAY_ALL);
}
uint64_t
DISPATCH_MEMORYPRESSURE_CRITICAL | \
DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \
DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL)
-#elif DISPATCH_USE_VM_PRESSURE_SOURCE
-#define DISPATCH_MEMORYPRESSURE_SOURCE_TYPE DISPATCH_SOURCE_TYPE_VM
-#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK DISPATCH_VM_PRESSURE
#endif
-#if DISPATCH_USE_MEMORYPRESSURE_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE
+#if DISPATCH_USE_MEMORYPRESSURE_SOURCE
static dispatch_source_t _dispatch_memorypressure_source;
static void
if (memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK) {
malloc_memory_event_handler(memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK);
}
-#elif DISPATCH_USE_VM_PRESSURE_SOURCE
- // we must have gotten DISPATCH_VM_PRESSURE
- malloc_zone_pressure_relief(0,0);
#endif
}
}
#else
static inline void _dispatch_memorypressure_init(void) {}
-#endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE
+#endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE
#pragma mark -
#pragma mark dispatch_mach
_dispatch_source_type_mach_recv_direct_init(dispatch_source_t ds,
dispatch_source_type_t type DISPATCH_UNUSED,
uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask DISPATCH_UNUSED,
- dispatch_queue_t q DISPATCH_UNUSED)
+ unsigned long mask DISPATCH_UNUSED)
{
ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE_DIRECT;
#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK
#endif
_evfilt2(EVFILT_FS);
_evfilt2(EVFILT_USER);
-#ifdef EVFILT_VM
- _evfilt2(EVFILT_VM);
-#endif
#ifdef EVFILT_SOCK
_evfilt2(EVFILT_SOCK);
#endif
#endif
}
-static void
-_dispatch_kevent_debugger2(void *context)
-{
- struct sockaddr sa;
- socklen_t sa_len = sizeof(sa);
- int c, fd = (int)(long)context;
- unsigned int i;
- dispatch_kevent_t dk;
- dispatch_source_t ds;
- dispatch_source_refs_t dr;
- FILE *debug_stream;
-
- c = accept(fd, &sa, &sa_len);
- if (c == -1) {
- if (errno != EAGAIN) {
- (void)dispatch_assume_zero(errno);
- }
- return;
- }
-#if 0
- int r = fcntl(c, F_SETFL, 0); // disable non-blocking IO
- if (r == -1) {
- (void)dispatch_assume_zero(errno);
- }
-#endif
- debug_stream = fdopen(c, "a");
- if (!dispatch_assume(debug_stream)) {
- close(c);
- return;
- }
-
- fprintf(debug_stream, "HTTP/1.0 200 OK\r\n");
- fprintf(debug_stream, "Content-type: text/html\r\n");
- fprintf(debug_stream, "Pragma: nocache\r\n");
- fprintf(debug_stream, "\r\n");
- fprintf(debug_stream, "<html>\n");
- fprintf(debug_stream, "<head><title>PID %u</title></head>\n", getpid());
- fprintf(debug_stream, "<body>\n<ul>\n");
-
- for (i = 0; i < DSL_HASH_SIZE; i++) {
- if (TAILQ_EMPTY(&_dispatch_sources[i])) {
- continue;
- }
- TAILQ_FOREACH(dk, &_dispatch_sources[i], dk_list) {
- fprintf(debug_stream, "\t<br><li>DK %p ident %lu filter %s flags "
- "0x%hx fflags 0x%x data 0x%lx udata %p\n",
- dk, (unsigned long)dk->dk_kevent.ident,
- _evfiltstr(dk->dk_kevent.filter), dk->dk_kevent.flags,
- dk->dk_kevent.fflags, (unsigned long)dk->dk_kevent.data,
- (void*)dk->dk_kevent.udata);
- fprintf(debug_stream, "\t\t<ul>\n");
- TAILQ_FOREACH(dr, &dk->dk_sources, dr_list) {
- ds = _dispatch_source_from_refs(dr);
- fprintf(debug_stream, "\t\t\t<li>DS %p refcnt 0x%x state "
- "0x%llx data 0x%lx mask 0x%lx flags 0x%x</li>\n",
- ds, ds->do_ref_cnt + 1, ds->dq_state,
- ds->ds_pending_data, ds->ds_pending_data_mask,
- ds->dq_atomic_flags);
- if (_dq_state_is_enqueued(ds->dq_state)) {
- dispatch_queue_t dq = ds->do_targetq;
- fprintf(debug_stream, "\t\t<br>DQ: %p refcnt 0x%x state "
- "0x%llx label: %s\n", dq, dq->do_ref_cnt + 1,
- dq->dq_state, dq->dq_label ?: "");
- }
- }
- fprintf(debug_stream, "\t\t</ul>\n");
- fprintf(debug_stream, "\t</li>\n");
- }
- }
- fprintf(debug_stream, "</ul>\n</body>\n</html>\n");
- fflush(debug_stream);
- fclose(debug_stream);
-}
-
-static void
-_dispatch_kevent_debugger2_cancel(void *context)
-{
- int ret, fd = (int)(long)context;
-
- ret = close(fd);
- if (ret != -1) {
- (void)dispatch_assume_zero(errno);
- }
-}
-
-static void
-_dispatch_kevent_debugger(void *context DISPATCH_UNUSED)
-{
- union {
- struct sockaddr_in sa_in;
- struct sockaddr sa;
- } sa_u = {
- .sa_in = {
- .sin_family = AF_INET,
- .sin_addr = { htonl(INADDR_LOOPBACK), },
- },
- };
- dispatch_source_t ds;
- const char *valstr;
- int val, r, fd, sock_opt = 1;
- socklen_t slen = sizeof(sa_u);
-
-#ifndef __linux__
- if (issetugid()) {
- return;
- }
-#endif
- valstr = getenv("LIBDISPATCH_DEBUGGER");
- if (!valstr) {
- return;
- }
- val = atoi(valstr);
- if (val == 2) {
- sa_u.sa_in.sin_addr.s_addr = 0;
- }
- fd = socket(PF_INET, SOCK_STREAM, 0);
- if (fd == -1) {
- (void)dispatch_assume_zero(errno);
- return;
- }
- r = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (void *)&sock_opt,
- (socklen_t) sizeof sock_opt);
- if (r == -1) {
- (void)dispatch_assume_zero(errno);
- goto out_bad;
- }
-#if 0
- r = fcntl(fd, F_SETFL, O_NONBLOCK);
- if (r == -1) {
- (void)dispatch_assume_zero(errno);
- goto out_bad;
- }
-#endif
- r = bind(fd, &sa_u.sa, sizeof(sa_u));
- if (r == -1) {
- (void)dispatch_assume_zero(errno);
- goto out_bad;
- }
- r = listen(fd, SOMAXCONN);
- if (r == -1) {
- (void)dispatch_assume_zero(errno);
- goto out_bad;
- }
- r = getsockname(fd, &sa_u.sa, &slen);
- if (r == -1) {
- (void)dispatch_assume_zero(errno);
- goto out_bad;
- }
-
- ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)fd, 0,
- &_dispatch_mgr_q);
- if (dispatch_assume(ds)) {
- _dispatch_log("LIBDISPATCH: debug port: %hu",
- (in_port_t)ntohs(sa_u.sa_in.sin_port));
-
- /* ownership of fd transfers to ds */
- dispatch_set_context(ds, (void *)(long)fd);
- dispatch_source_set_event_handler_f(ds, _dispatch_kevent_debugger2);
- dispatch_source_set_cancel_handler_f(ds,
- _dispatch_kevent_debugger2_cancel);
- dispatch_resume(ds);
-
- return;
- }
-out_bad:
- close(fd);
-}
-
#if HAVE_MACH
#ifndef MACH_PORT_TYPE_SPREQUEST
#define DISPATCH_TIMER_QOS_COUNT (DISPATCH_TIMER_QOS_BACKGROUND + 1)
#define DISPATCH_TIMER_QOS(tidx) (((uintptr_t)(tidx) >> 1) & 0x3ul)
-#define DISPATCH_TIMER_KIND_WALL 0u
-#define DISPATCH_TIMER_KIND_MACH 1u
-#define DISPATCH_TIMER_KIND_COUNT (DISPATCH_TIMER_KIND_MACH + 1)
-#define DISPATCH_TIMER_KIND(tidx) ((uintptr_t)(tidx) & 0x1ul)
+#define DISPATCH_TIMER_CLOCK(tidx) ((dispatch_clock_t)((uintptr_t)(tidx) & 1))
-#define DISPATCH_TIMER_INDEX(kind, qos) ((qos) << 1 | (kind))
+#define DISPATCH_TIMER_INDEX(clock, qos) ((qos) << 1 | (clock))
#define DISPATCH_TIMER_INDEX_DISARM \
DISPATCH_TIMER_INDEX(0, DISPATCH_TIMER_QOS_COUNT)
#define DISPATCH_TIMER_INDEX_COUNT (DISPATCH_TIMER_INDEX_DISARM + 1)
#define DISPATCH_TIMER_IDENT(flags) ({ unsigned long f = (flags); \
DISPATCH_TIMER_INDEX(f & DISPATCH_TIMER_WALL_CLOCK ? \
- DISPATCH_TIMER_KIND_WALL : DISPATCH_TIMER_KIND_MACH, \
+ DISPATCH_CLOCK_WALL : DISPATCH_CLOCK_MACH, \
f & DISPATCH_TIMER_STRICT ? DISPATCH_TIMER_QOS_CRITICAL : \
f & DISPATCH_TIMER_BACKGROUND ? DISPATCH_TIMER_QOS_BACKGROUND : \
DISPATCH_TIMER_QOS_NORMAL); })
_dispatch_kevent_qos_s ke;
uint64_t mask;
void (*init)(dispatch_source_t ds, dispatch_source_type_t type,
- uintptr_t handle, unsigned long mask, dispatch_queue_t q);
+ uintptr_t handle, unsigned long mask);
};
struct dispatch_timer_source_s {
@available(OSX 10.10, iOS 8.0, *)
public class DispatchWorkItem {
internal var _block: _DispatchBlock
- internal var _group: DispatchGroup?
- public init(group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], block: @convention(block) () -> ()) {
+ public init(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], block: @escaping @convention(block) () -> ()) {
_block = dispatch_block_create_with_qos_class(dispatch_block_flags_t(flags.rawValue),
qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority), block)
}
- // Used by DispatchQueue.synchronously<T> to provide a @noescape path through
+ // Used by DispatchQueue.synchronously<T> to provide a path through
// dispatch_block_t, as we know the lifetime of the block in question.
- internal init(flags: DispatchWorkItemFlags = [], noescapeBlock: @noescape () -> ()) {
+ internal init(flags: DispatchWorkItemFlags = [], noescapeBlock: () -> ()) {
_block = _swift_dispatch_block_create_noescape(dispatch_block_flags_t(flags.rawValue), noescapeBlock)
}
public func perform() {
- if let g = _group {
- g.enter()
- defer { g.leave() }
- }
_block()
}
}
public func wait(timeout: DispatchTime) -> DispatchTimeoutResult {
- return dispatch_block_wait(_block, timeout.rawValue) == 0 ? .Success : .TimedOut
+ return dispatch_block_wait(_block, timeout.rawValue) == 0 ? .success : .timedOut
}
public func wait(wallTimeout: DispatchWallTime) -> DispatchTimeoutResult {
- return dispatch_block_wait(_block, wallTimeout.rawValue) == 0 ? .Success : .TimedOut
+ return dispatch_block_wait(_block, wallTimeout.rawValue) == 0 ? .success : .timedOut
}
- public func notify(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], queue: DispatchQueue, execute: @convention(block) () -> Void) {
+ public func notify(
+ qos: DispatchQoS = .unspecified,
+ flags: DispatchWorkItemFlags = [],
+ queue: DispatchQueue,
+ execute: @escaping @convention(block) () -> ())
+ {
if qos != .unspecified || !flags.isEmpty {
let item = DispatchWorkItem(qos: qos, flags: flags, block: execute)
dispatch_block_notify(_block, queue.__wrapped, item._block)
}
}
-@available(OSX 10.10, iOS 8.0, *)
-public extension DispatchWorkItem {
- @available(*, deprecated, renamed: "DispatchWorkItem.wait(self:wallTimeout:)")
- public func wait(timeout: DispatchWallTime) -> Int {
- switch wait(wallTimeout: timeout) {
- case .Success: return 0
- case .TimedOut: return DispatchTimeoutResult.KERN_OPERATION_TIMED_OUT
- }
- }
-}
-
/// The dispatch_block_t typealias is different from usual closures in that it
/// uses @convention(block). This is to avoid unnecessary bridging between
/// C blocks and Swift closures, which interferes with dispatch APIs that depend
internal typealias dispatch_block_t = @convention(block) () -> Void
@_silgen_name("_swift_dispatch_block_create_noescape")
-internal func _swift_dispatch_block_create_noescape(_ flags: dispatch_block_flags_t, _ block: @noescape () -> ()) -> _DispatchBlock
+internal func _swift_dispatch_block_create_noescape(_ flags: dispatch_block_flags_t, _ block: () -> ()) -> _DispatchBlock
public static let empty: DispatchData = DispatchData(data: _swift_dispatch_data_empty())
-#if false /* FIXME: dragging in _TMBO (Objective-C) */
public enum Deallocator {
/// Use `free`
case free
case unmap
/// A custom deallocator
- case custom(DispatchQueue?, @convention(block) () -> Void)
-
- private var _deallocator: (DispatchQueue?, @convention(block) () -> Void) {
+ // FIXME: Want @convention(block) here to minimize the overhead of
+ // doing the conversion (once per custom enum instance instead
+ // of once per call to DispatchData.init using the enum instance).
+ // However, adding the annotation here results in Data.o containing
+ // a reference to _TMBO (opaque metadata for Builtin.UnknownObject)
+ // which is only made available on platforms with Objective-C.
+ case custom(DispatchQueue?, () -> Void)
+
+ fileprivate var _deallocator: (DispatchQueue?, @convention(block) () -> Void) {
switch self {
case .free: return (nil, _dispatch_data_destructor_free())
case .unmap: return (nil, _dispatch_data_destructor_munmap())
}
}
}
-#endif
- internal var __wrapped: dispatch_data_t
+
+ internal var __wrapped: __DispatchData
/// Initialize a `Data` with copied memory content.
///
/// - parameter bytes: A pointer to the memory. It will be copied.
- /// - parameter count: The number of bytes to copy.
public init(bytes buffer: UnsafeBufferPointer<UInt8>) {
- __wrapped = dispatch_data_create(
- buffer.baseAddress!, buffer.count, nil, _dispatch_data_destructor_default())
+ let d = dispatch_data_create(buffer.baseAddress!, buffer.count, nil, _dispatch_data_destructor_default())
+ self.init(data: d)
}
-#if false /* FIXME: dragging in _TMBO (Objective-C) */
+
/// Initialize a `Data` without copying the bytes.
///
- /// - parameter bytes: A pointer to the bytes.
- /// - parameter count: The size of the bytes.
+ /// - parameter bytes: A buffer pointer containing the data.
/// - parameter deallocator: Specifies the mechanism to free the indicated buffer.
public init(bytesNoCopy bytes: UnsafeBufferPointer<UInt8>, deallocator: Deallocator = .free) {
let (q, b) = deallocator._deallocator
-
- __wrapped = dispatch_data_create(bytes.baseAddress!, bytes.count, q?.__wrapped, b)
+ let d = dispatch_data_create(bytes.baseAddress!, bytes.count, q?.__wrapped, b)
+ self.init(data: d)
}
-#endif
+
internal init(data: dispatch_data_t) {
- __wrapped = data
+ __wrapped = __DispatchData(data: data, owned: true)
+ }
+
+ internal init(borrowedData: dispatch_data_t) {
+ __wrapped = __DispatchData(data: borrowedData, owned: false)
}
public var count: Int {
- return CDispatch.dispatch_data_get_size(__wrapped)
+ return CDispatch.dispatch_data_get_size(__wrapped.__wrapped)
}
public func withUnsafeBytes<Result, ContentType>(
- body: @noescape (UnsafePointer<ContentType>) throws -> Result) rethrows -> Result
+ body: (UnsafePointer<ContentType>) throws -> Result) rethrows -> Result
{
- var ptr: UnsafePointer<Void>? = nil
- var size = 0;
- let data = CDispatch.dispatch_data_create_map(__wrapped, &ptr, &size)
+ var ptr: UnsafeRawPointer? = nil
+ var size = 0
+ let data = CDispatch.dispatch_data_create_map(__wrapped.__wrapped, &ptr, &size)
+ let contentPtr = ptr!.bindMemory(
+ to: ContentType.self, capacity: size / MemoryLayout<ContentType>.stride)
defer { _fixLifetime(data) }
- return try body(UnsafePointer<ContentType>(ptr!))
+ return try body(contentPtr)
}
public func enumerateBytes(
- block: @noescape (buffer: UnsafeBufferPointer<UInt8>, byteIndex: Int, stop: inout Bool) -> Void)
+ block: @noescape (_ buffer: UnsafeBufferPointer<UInt8>, _ byteIndex: Int, _ stop: inout Bool) -> Void)
{
- _swift_dispatch_data_apply(__wrapped) { (data: dispatch_data_t, offset: Int, ptr: UnsafePointer<Void>, size: Int) in
- let bp = UnsafeBufferPointer(start: UnsafePointer<UInt8>(ptr), count: size)
+ _swift_dispatch_data_apply(__wrapped.__wrapped) { (_, offset: Int, ptr: UnsafeRawPointer, size: Int) in
+ let bytePtr = ptr.bindMemory(to: UInt8.self, capacity: size)
+ let bp = UnsafeBufferPointer(start: bytePtr, count: size)
var stop = false
- block(buffer: bp, byteIndex: offset, stop: &stop)
+ block(bp, offset, &stop)
return !stop
}
}
///
/// - parameter data: The data to append to this data.
public mutating func append(_ other: DispatchData) {
- let data = CDispatch.dispatch_data_create_concat(__wrapped, other.__wrapped)
- __wrapped = data
+ let data = CDispatch.dispatch_data_create_concat(__wrapped.__wrapped, other.__wrapped.__wrapped)
+ __wrapped = __DispatchData(data: data, owned: true)
}
/// Append a buffer of bytes to the data.
///
/// - parameter buffer: The buffer of bytes to append. The size is calculated from `SourceType` and `buffer.count`.
public mutating func append<SourceType>(_ buffer : UnsafeBufferPointer<SourceType>) {
- self.append(UnsafePointer(buffer.baseAddress!), count: buffer.count * sizeof(SourceType.self))
+ let count = buffer.count * sizeof(SourceType.self)
+ buffer.baseAddress?.withMemoryRebound(to: UInt8.self, capacity: count) {
+ self.append($0, count: count)
+ }
}
- private func _copyBytesHelper(to pointer: UnsafeMutablePointer<UInt8>, from range: CountableRange<Index>) {
+ private func _copyBytesHelper(to pointer: UnsafeMutableRawPointer, from range: CountableRange<Index>) {
var copiedCount = 0
- _ = CDispatch.dispatch_data_apply(__wrapped) { (data: dispatch_data_t, offset: Int, ptr: UnsafePointer<Void>, size: Int) in
+ _ = CDispatch.dispatch_data_apply(__wrapped.__wrapped) { (data: dispatch_data_t, offset: Int, ptr: UnsafeRawPointer, size: Int) in
let limit = Swift.min((range.endIndex - range.startIndex) - copiedCount, size)
memcpy(pointer + copiedCount, ptr, limit)
copiedCount += limit
/// Copy the contents of the data into a buffer.
///
- /// This function copies the bytes in `range` from the data into the buffer. If the count of the `range` is greater than `sizeof(DestinationType) * buffer.count` then the first N bytes will be copied into the buffer.
+ /// This function copies the bytes in `range` from the data into the buffer. If the count of the `range` is greater than `MemoryLayout<DestinationType>.stride * buffer.count` then the first N bytes will be copied into the buffer.
/// - precondition: The range must be within the bounds of the data. Otherwise `fatalError` is called.
/// - parameter buffer: A buffer to copy the data into.
/// - parameter range: A range in the data to copy into the buffer. If the range is empty, this function will return 0 without copying anything. If the range is nil, as much data as will fit into `buffer` is copied.
precondition(r.endIndex >= 0)
precondition(r.endIndex <= cnt, "The range is outside the bounds of the data")
- copyRange = r.startIndex..<(r.startIndex + Swift.min(buffer.count * sizeof(DestinationType.self), r.count))
+ copyRange = r.startIndex..<(r.startIndex + Swift.min(buffer.count * MemoryLayout<DestinationType>.stride, r.count))
} else {
- copyRange = 0..<Swift.min(buffer.count * sizeof(DestinationType.self), cnt)
+ copyRange = 0..<Swift.min(buffer.count * MemoryLayout<DestinationType>.stride, cnt)
}
guard !copyRange.isEmpty else { return 0 }
- let pointer : UnsafeMutablePointer<UInt8> = UnsafeMutablePointer<UInt8>(buffer.baseAddress!)
- _copyBytesHelper(to: pointer, from: copyRange)
+ let bufferCapacity = buffer.count * sizeof(DestinationType.self)
+ buffer.baseAddress?.withMemoryRebound(to: UInt8.self, capacity: bufferCapacity) {
+ _copyBytesHelper(to: $0, from: copyRange)
+ }
return copyRange.count
}
/// Sets or returns the byte at the specified index.
public subscript(index: Index) -> UInt8 {
var offset = 0
- let subdata = CDispatch.dispatch_data_copy_region(__wrapped, index, &offset)
+ let subdata = CDispatch.dispatch_data_copy_region(__wrapped.__wrapped, index, &offset)
- var ptr: UnsafePointer<Void>? = nil
+ var ptr: UnsafeRawPointer? = nil
var size = 0
let map = CDispatch.dispatch_data_create_map(subdata, &ptr, &size)
defer { _fixLifetime(map) }
- let pptr = UnsafePointer<UInt8>(ptr!)
- return pptr[index - offset]
+ return ptr!.load(fromByteOffset: index - offset, as: UInt8.self)
}
public subscript(bounds: Range<Int>) -> RandomAccessSlice<DispatchData> {
/// - parameter range: The range to copy.
public func subdata(in range: CountableRange<Index>) -> DispatchData {
let subrange = CDispatch.dispatch_data_create_subrange(
- __wrapped, range.startIndex, range.endIndex - range.startIndex)
+ __wrapped.__wrapped, range.startIndex, range.endIndex - range.startIndex)
return DispatchData(data: subrange)
}
public func region(location: Int) -> (data: DispatchData, offset: Int) {
var offset: Int = 0
- let data = CDispatch.dispatch_data_copy_region(__wrapped, location, &offset)
+ let data = CDispatch.dispatch_data_copy_region(__wrapped.__wrapped, location, &offset)
return (DispatchData(data: data), offset)
}
public struct DispatchDataIterator : IteratorProtocol, Sequence {
- /// Create an iterator over the given DisaptchData
+ /// Create an iterator over the given DispatchData
public init(_data: DispatchData) {
- var ptr: UnsafePointer<Void>?
+ var ptr: UnsafeRawPointer?
self._count = 0
- self._data = CDispatch.dispatch_data_create_map(_data.__wrapped, &ptr, &self._count)
- self._ptr = UnsafePointer(ptr!)
+ self._data = __DispatchData(data: CDispatch.dispatch_data_create_map(_data.__wrapped.__wrapped, &ptr, &self._count), owned: true)
+ self._ptr = ptr
self._position = _data.startIndex
+
+ // The only time we expect a 'nil' pointer is when the data is empty.
+ assert(self._ptr != nil || self._count == self._position)
}
/// Advance to the next element and return it, or `nil` if no next
/// element exists.
- ///
- /// - Precondition: No preceding call to `self.next()` has returned `nil`.
public mutating func next() -> DispatchData._Element? {
if _position == _count { return nil }
- let element = _ptr[_position];
+ let element = _ptr.load(fromByteOffset: _position, as: UInt8.self)
_position = _position + 1
return element
}
- internal let _data: dispatch_data_t
- internal var _ptr: UnsafePointer<UInt8>
+ internal let _data: __DispatchData
+ internal var _ptr: UnsafeRawPointer!
internal var _count: Int
internal var _position: DispatchData.Index
}
-typealias _swift_data_applier = @convention(block) @noescape (dispatch_data_t, Int, UnsafePointer<Void>, Int) -> Bool
+typealias _swift_data_applier = @convention(block) (dispatch_data_t, Int, UnsafeRawPointer, Int) -> Bool
@_silgen_name("_swift_dispatch_data_apply")
internal func _swift_dispatch_data_apply(_ data: dispatch_data_t, _ block: _swift_data_applier)
@available(OSX 10.10, iOS 8.0, *)
public static let `default` = DispatchQoS(qosClass: .default, relativePriority: 0)
- @available(OSX, introduced: 10.10, deprecated: 10.10, renamed: "DispatchQoS.default")
- @available(iOS, introduced: 8.0, deprecated: 8.0, renamed: "DispatchQoS.default")
- @available(*, deprecated, renamed: "DispatchQoS.default")
- public static let defaultQoS = DispatchQoS.default
-
@available(OSX 10.10, iOS 8.0, *)
public static let userInitiated = DispatchQoS(qosClass: .userInitiated, relativePriority: 0)
@available(OSX 10.10, iOS 8.0, *)
case `default`
- @available(OSX, introduced: 10.10, deprecated: 10.10, renamed: "QoSClass.default")
- @available(iOS, introduced: 8.0, deprecated: 8.0, renamed: "QoSClass.default")
- @available(*, deprecated, renamed: "QoSClass.default")
- static let defaultQoS = QoSClass.default
-
@available(OSX 10.10, iOS 8.0, *)
case userInitiated
case unspecified
+ // _OSQoSClass is internal on Linux, so this initialiser has to
+ // remain as an internal init.
@available(OSX 10.10, iOS 8.0, *)
- internal init?(qosClass: _OSQoSClass) {
- switch qosClass {
+ internal init?(rawValue: _OSQoSClass) {
+ switch rawValue {
case .QOS_CLASS_BACKGROUND: self = .background
case .QOS_CLASS_UTILITY: self = .utility
case .QOS_CLASS_DEFAULT: self = .default
public enum DispatchTimeoutResult {
static let KERN_OPERATION_TIMED_OUT:Int = 49
- case Success
- case TimedOut
+ case success
+ case timedOut
}
/// dispatch_group
public extension DispatchGroup {
- public func notify(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], queue: DispatchQueue, execute work: @convention(block) () -> ()) {
+ public func notify(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], queue: DispatchQueue, execute work: @escaping @convention(block) () -> ()) {
if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty {
let item = DispatchWorkItem(qos: qos, flags: flags, block: work)
dispatch_group_notify(self.__wrapped, queue.__wrapped, item._block)
}
public func wait(timeout: DispatchTime) -> DispatchTimeoutResult {
- return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .Success : .TimedOut
+ return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .success : .timedOut
}
public func wait(wallTimeout timeout: DispatchWallTime) -> DispatchTimeoutResult {
- return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .Success : .TimedOut
- }
-}
-
-public extension DispatchGroup {
- @available(*, deprecated, renamed: "DispatchGroup.wait(self:wallTimeout:)")
- public func wait(walltime timeout: DispatchWallTime) -> Int {
- switch wait(wallTimeout: timeout) {
- case .Success: return 0
- case .TimedOut: return DispatchTimeoutResult.KERN_OPERATION_TIMED_OUT
- }
+ return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .success : .timedOut
}
}
}
public func wait(timeout: DispatchTime) -> DispatchTimeoutResult {
- return dispatch_semaphore_wait(self.__wrapped, timeout.rawValue) == 0 ? .Success : .TimedOut
+ return dispatch_semaphore_wait(self.__wrapped, timeout.rawValue) == 0 ? .success : .timedOut
}
public func wait(wallTimeout: DispatchWallTime) -> DispatchTimeoutResult {
- return dispatch_semaphore_wait(self.__wrapped, wallTimeout.rawValue) == 0 ? .Success : .TimedOut
- }
-}
-
-public extension DispatchSemaphore {
- @available(*, deprecated, renamed: "DispatchSemaphore.wait(self:wallTimeout:)")
- public func wait(walltime timeout: DispatchWalltime) -> Int {
- switch wait(wallTimeout: timeout) {
- case .Success: return 0
- case .TimedOut: return DispatchTimeoutResult.KERN_OPERATION_TIMED_OUT
- }
+ return dispatch_semaphore_wait(self.__wrapped, wallTimeout.rawValue) == 0 ? .success : .timedOut
}
}
#endif /* USE_OBJC */
-#if 0 /* FIXME -- adding directory to include path may need build-script plumbing to do properly... */
-#include "swift/Runtime/Config.h"
+
+// Replicate the SWIFT_CC(swift) calling convention macro from
+// swift/include/swift/Runtime/Config.h because it is
+// quite awkward to include Config.h and its recursive includes
+// in dispatch. This define must be manually kept in synch
+#define SWIFT_CC(CC) SWIFT_CC_##CC
+#if SWIFT_USE_SWIFTCALL
+#define SWIFT_CC_swift __attribute__((swiftcall))
#else
-#define SWIFT_CC(x) /* FIXME!! */
+#define SWIFT_CC_swift
#endif
SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE
public static let strictInterval = IntervalFlags(rawValue: 1)
}
- public class func read(fromFileDescriptor: Int32, maxLength: Int, runningHandlerOn queue: DispatchQueue, handler: (data: DispatchData, error: Int32) -> Void) {
+ public class func read(fromFileDescriptor: Int32, maxLength: Int, runningHandlerOn queue: DispatchQueue, handler: @escaping (_ data: DispatchData, _ error: Int32) -> Void) {
dispatch_read(fromFileDescriptor, maxLength, queue.__wrapped) { (data: dispatch_data_t, error: Int32) in
- handler(data: DispatchData(data: data), error: error)
+ handler(DispatchData(borrowedData: data), error)
}
}
- public class func write(fromFileDescriptor: Int32, data: DispatchData, runningHandlerOn queue: DispatchQueue, handler: (data: DispatchData?, error: Int32) -> Void) {
- dispatch_write(fromFileDescriptor, data.__wrapped, queue.__wrapped) { (data: dispatch_data_t?, error: Int32) in
- handler(data: data.flatMap { DispatchData(data: $0) }, error: error)
+ public class func write(toFileDescriptor: Int32, data: DispatchData, runningHandlerOn queue: DispatchQueue, handler: @escaping (_ data: DispatchData?, _ error: Int32) -> Void) {
+ dispatch_write(toFileDescriptor, data.__wrapped.__wrapped, queue.__wrapped) { (data: dispatch_data_t?, error: Int32) in
+ handler(data.flatMap { DispatchData(borrowedData: $0) }, error)
}
}
type: StreamType,
fileDescriptor: Int32,
queue: DispatchQueue,
- cleanupHandler: (error: Int32) -> Void)
+ cleanupHandler: @escaping (_ error: Int32) -> Void)
{
self.init(__type: type.rawValue, fd: fileDescriptor, queue: queue, handler: cleanupHandler)
}
oflag: Int32,
mode: mode_t,
queue: DispatchQueue,
- cleanupHandler: (error: Int32) -> Void)
+ cleanupHandler: @escaping (_ error: Int32) -> Void)
{
self.init(__type: type.rawValue, path: path, oflag: oflag, mode: mode, queue: queue, handler: cleanupHandler)
}
type: StreamType,
io: DispatchIO,
queue: DispatchQueue,
- cleanupHandler: (error: Int32) -> Void)
+ cleanupHandler: @escaping (_ error: Int32) -> Void)
{
self.init(__type: type.rawValue, io: io, queue: queue, handler: cleanupHandler)
}
- public func read(offset: off_t, length: Int, queue: DispatchQueue, ioHandler: (done: Bool, data: DispatchData?, error: Int32) -> Void) {
+ public func read(offset: off_t, length: Int, queue: DispatchQueue, ioHandler: @escaping (_ done: Bool, _ data: DispatchData?, _ error: Int32) -> Void) {
dispatch_io_read(self.__wrapped, offset, length, queue.__wrapped) { (done: Bool, data: dispatch_data_t?, error: Int32) in
- ioHandler(done: done, data: data.flatMap { DispatchData(data: $0) }, error: error)
+ ioHandler(done, data.flatMap { DispatchData(borrowedData: $0) }, error)
}
}
- public func write(offset: off_t, data: DispatchData, queue: DispatchQueue, ioHandler: (done: Bool, data: DispatchData?, error: Int32) -> Void) {
- dispatch_io_write(self.__wrapped, offset, data.__wrapped, queue.__wrapped) { (done: Bool, data: dispatch_data_t?, error: Int32) in
- ioHandler(done: done, data: data.flatMap { DispatchData(data: $0) }, error: error)
+ public func write(offset: off_t, data: DispatchData, queue: DispatchQueue, ioHandler: @escaping (_ done: Bool, _ data: DispatchData?, _ error: Int32) -> Void) {
+ dispatch_io_write(self.__wrapped, offset, data.__wrapped.__wrapped, queue.__wrapped) { (done: Bool, data: dispatch_data_t?, error: Int32) in
+ ioHandler(done, data.flatMap { DispatchData(borrowedData: $0) }, error)
}
}
public func setInterval(interval: DispatchTimeInterval, flags: IntervalFlags = []) {
- dispatch_io_set_interval(self.__wrapped, interval.rawValue, flags.rawValue)
+ dispatch_io_set_interval(self.__wrapped, UInt64(interval.rawValue), flags.rawValue)
}
public func close(flags: CloseFlags = []) {
dispatch_io_close(self.__wrapped, flags.rawValue)
}
}
-
-extension DispatchIO {
- @available(*, deprecated, renamed: "DispatchIO.read(fromFileDescriptor:maxLength:runningHandlerOn:handler:)")
- public class func read(fd: Int32, length: Int, queue: DispatchQueue, handler: (DispatchData, Int32) -> Void) {
- DispatchIO.read(fromFileDescriptor: fd, maxLength: length, runningHandlerOn: queue, handler: handler)
- }
-
- @available(*, deprecated, renamed: "DispatchIO.write(fromFileDescriptor:data:runningHandlerOn:handler:)")
- public class func write(fd: Int32, data: DispatchData, queue: DispatchQueue, handler: (DispatchData?, Int32) -> Void) {
- DispatchIO.write(fromFileDescriptor: fd, data: data, runningHandlerOn: queue, handler: handler)
- }
-
- @available(*, deprecated, renamed: "DispatchIO.barrier(self:execute:)")
- public func withBarrier(barrier work: () -> ()) {
- barrier(execute: work)
- }
-
- @available(*, deprecated, renamed: "DispatchIO.setLimit(self:highWater:)")
- public func setHighWater(highWater: Int) {
- setLimit(highWater: highWater)
- }
-
- @available(*, deprecated, renamed: "DispatchIO.setLimit(self:lowWater:)")
- public func setLowWater(lowWater: Int) {
- setLimit(lowWater: lowWater)
- }
-
- @available(*, deprecated, renamed: "DispatchIO.setInterval(self:interval:flags:)")
- public func setInterval(interval: UInt64, flags: IntervalFlags) {
- setInterval(interval: .nanoseconds(Int(interval)), flags: flags)
- }
-}
import CDispatch
-@available(*, unavailable, renamed:"DispatchQueue.init(label:attributes:target:)")
+@available(*, unavailable, renamed:"DispatchQueue.init(label:qos:attributes:autoreleaseFrequency:target:)")
public func dispatch_queue_create(_ label: UnsafePointer<Int8>?, _ attr: dispatch_queue_attr_t?) -> DispatchQueue
{
fatalError()
}
-@available(*, unavailable, renamed:"DispatchQueue.init(label:attributes:target:)")
+@available(*, unavailable, renamed:"DispatchQueue.init(label:qos:attributes:autoreleaseFrequency:target:)")
public func dispatch_queue_create_with_target(_ label: UnsafePointer<Int8>?, _ attr: dispatch_queue_attr_t?, _ queue: DispatchQueue?) -> DispatchQueue
{
fatalError()
}
@available(*, unavailable, renamed:"DispatchIO.init(type:fileDescriptor:queue:cleanupHandler:)")
-public func dispatch_io_create(_ type: UInt, _ fd: Int32, _ queue: DispatchQueue, _ cleanup_handler: (Int32) -> Void) -> DispatchIO
+public func dispatch_io_create(_ type: UInt, _ fd: Int32, _ queue: DispatchQueue, _ cleanup_handler: @escaping (Int32) -> Void) -> DispatchIO
{
fatalError()
}
@available(*, unavailable, renamed:"DispatchIO.init(type:path:oflag:mode:queue:cleanupHandler:)")
-public func dispatch_io_create_with_path(_ type: UInt, _ path: UnsafePointer<Int8>, _ oflag: Int32, _ mode: mode_t, _ queue: DispatchQueue, _ cleanup_handler: (Int32) -> Void) -> DispatchIO
+public func dispatch_io_create_with_path(_ type: UInt, _ path: UnsafePointer<Int8>, _ oflag: Int32, _ mode: mode_t, _ queue: DispatchQueue, _ cleanup_handler: @escaping (Int32) -> Void) -> DispatchIO
{
fatalError()
}
@available(*, unavailable, renamed:"DispatchIO.init(type:io:queue:cleanupHandler:)")
-public func dispatch_io_create_with_io(_ type: UInt, _ io: DispatchIO, _ queue: DispatchQueue, _ cleanup_handler: (Int32) -> Void) -> DispatchIO
+public func dispatch_io_create_with_io(_ type: UInt, _ io: DispatchIO, _ queue: DispatchQueue, _ cleanup_handler: @escaping (Int32) -> Void) -> DispatchIO
{
fatalError()
}
@available(*, unavailable, renamed:"DispatchIO.read(fileDescriptor:length:queue:handler:)")
-public func dispatch_read(_ fd: Int32, _ length: Int, _ queue: DispatchQueue, _ handler: (dispatch_data_t, Int32) -> Void)
+public func dispatch_read(_ fd: Int32, _ length: Int, _ queue: DispatchQueue, _ handler: @escaping (dispatch_data_t, Int32) -> Void)
{
fatalError()
}
@available(*, unavailable, renamed:"DispatchIO.read(self:offset:length:queue:ioHandler:)")
-func dispatch_io_read(_ channel: DispatchIO, _ offset: off_t, _ length: Int, _ queue: DispatchQueue, _ io_handler: (Bool, dispatch_data_t?, Int32) -> Void)
+func dispatch_io_read(_ channel: DispatchIO, _ offset: off_t, _ length: Int, _ queue: DispatchQueue, _ io_handler: @escaping (Bool, dispatch_data_t?, Int32) -> Void)
{
fatalError()
}
@available(*, unavailable, renamed:"DispatchIO.write(self:offset:data:queue:ioHandler:)")
-func dispatch_io_write(_ channel: DispatchIO, _ offset: off_t, _ data: dispatch_data_t, _ queue: DispatchQueue, _ io_handler: (Bool, dispatch_data_t?, Int32) -> Void)
+func dispatch_io_write(_ channel: DispatchIO, _ offset: off_t, _ data: dispatch_data_t, _ queue: DispatchQueue, _ io_handler: @escaping (Bool, dispatch_data_t?, Int32) -> Void)
{
fatalError()
}
@available(*, unavailable, renamed:"DispatchIO.write(fileDescriptor:data:queue:handler:)")
-func dispatch_write(_ fd: Int32, _ data: dispatch_data_t, _ queue: DispatchQueue, _ handler: (dispatch_data_t?, Int32) -> Void)
+func dispatch_write(_ fd: Int32, _ data: dispatch_data_t, _ queue: DispatchQueue, _ handler: @escaping (dispatch_data_t?, Int32) -> Void)
{
fatalError()
}
@available(*, unavailable, renamed:"DispatchData.init(bytes:)")
-public func dispatch_data_create(_ buffer: UnsafePointer<Void>, _ size: Int, _ queue: DispatchQueue?, _ destructor: (() -> Void)?) -> dispatch_data_t
+public func dispatch_data_create(_ buffer: UnsafeRawPointer, _ size: Int, _ queue: DispatchQueue?, _ destructor: (() -> Void)?) -> dispatch_data_t
{
fatalError()
}
}
@available(*, unavailable, renamed:"DispatchData.withUnsafeBytes(self:body:)")
-public func dispatch_data_create_map(_ data: dispatch_data_t, _ buffer_ptr: UnsafeMutablePointer<UnsafePointer<Void>?>?, _ size_ptr: UnsafeMutablePointer<Int>?) -> dispatch_data_t
+public func dispatch_data_create_map(_ data: dispatch_data_t, _ buffer_ptr: UnsafeMutablePointer<UnsafeRawPointer?>?, _ size_ptr: UnsafeMutablePointer<Int>?) -> dispatch_data_t
{
fatalError()
}
}
@available(*, unavailable, renamed:"DispatchData.enumerateBytes(self:block:)")
-public func dispatch_data_apply(_ data: dispatch_data_t, _ applier: (dispatch_data_t, Int, UnsafePointer<Void>, Int) -> Bool) -> Bool
+public func dispatch_data_apply(_ data: dispatch_data_t, _ applier: @escaping (dispatch_data_t, Int, UnsafeRawPointer, Int) -> Bool) -> Bool
{
fatalError()
}
}
@available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:group:qos:flags:execute:)")
-public func dispatch_group_async(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: () -> Void)
+public func dispatch_group_async(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: @escaping () -> Void)
{
fatalError()
}
@available(*, unavailable, renamed: "DispatchGroup.notify(self:qos:flags:queue:execute:)")
-public func dispatch_group_notify(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: () -> Void)
+public func dispatch_group_notify(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: @escaping () -> Void)
{
fatalError()
}
}
@available(*, unavailable, renamed:"DispatchQueue.apply(attributes:iterations:execute:)")
-public func dispatch_apply(_ iterations: Int, _ queue: DispatchQueue, _ block: @noescape (Int) -> Void)
+public func dispatch_apply(_ iterations: Int, _ queue: DispatchQueue, _ block: (Int) -> Void)
{
fatalError()
}
@available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:execute:)")
-public func dispatch_async(_ queue: DispatchQueue, _ block: () -> Void)
+public func dispatch_async(_ queue: DispatchQueue, _ block: @escaping () -> Void)
{
fatalError()
}
}
@available(*, unavailable, renamed: "DispatchQueue.main")
-public func dispatch_get_main_queue() -> DispatchQueue
+public func dispatch_get_main_queue() -> DispatchQueue
{
fatalError()
}
-@available(*, unavailable, renamed:"DispatchQueueAttributes.initiallyInactive")
+@available(*, unavailable, renamed:"DispatchQueue.Attributes.initiallyInactive")
public func dispatch_queue_attr_make_initially_inactive(_ attr: dispatch_queue_attr_t?) -> dispatch_queue_attr_t
{
fatalError()
}
-@available(*, unavailable, renamed:"DispatchQueueAttributes.autoreleaseWorkItem")
+@available(*, unavailable, renamed:"DispatchQueue.AutoreleaseFrequency.workItem")
public func dispatch_queue_attr_make_with_autorelease_frequency(_ attr: dispatch_queue_attr_t?, _ frequency: dispatch_autorelease_frequency_t) -> dispatch_queue_attr_t
{
fatalError()
}
-@available(*, unavailable, renamed:"DispatchQueueAttributes.qosUserInitiated")
+@available(*, unavailable, renamed:"DispatchQoS")
public func dispatch_queue_attr_make_with_qos_class(_ attr: dispatch_queue_attr_t?, _ qos_class: dispatch_qos_class_t, _ relative_priority: Int32) -> dispatch_queue_attr_t
{
fatalError()
fatalError()
}
-@available(*, unavailable, renamed:"DispatchQueue.after(self:when:execute:)")
-public func dispatch_after(_ when: dispatch_time_t, _ queue: DispatchQueue, _ block: () -> Void)
+@available(*, unavailable, renamed:"DispatchQueue.asyncAfter(self:deadline:qos:flags:execute:)")
+public func dispatch_after(_ when: dispatch_time_t, _ queue: DispatchQueue, _ block: @escaping () -> Void)
{
fatalError()
}
-@available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:group:qos:flags:execute:)")
-public func dispatch_barrier_async(_ queue: DispatchQueue, _ block: () -> Void)
+@available(*, unavailable, renamed:"DispatchQueue.async(self:group:qos:flags:execute:)")
+public func dispatch_barrier_async(_ queue: DispatchQueue, _ block: @escaping () -> Void)
{
fatalError()
}
-@available(*, unavailable, renamed:"DispatchQueue.synchronously(self:flags:execute:)")
-public func dispatch_barrier_sync(_ queue: DispatchQueue, _ block: @noescape () -> Void)
+@available(*, unavailable, renamed:"DispatchQueue.sync(self:flags:execute:)")
+public func dispatch_barrier_sync(_ queue: DispatchQueue, _ block: () -> Void)
{
fatalError()
}
@available(*, unavailable, renamed:"DispatchQueue.setSpecific(self:key:value:)")
-public func dispatch_queue_set_specific(_ queue: DispatchQueue, _ key: UnsafePointer<Void>, _ context: UnsafeMutablePointer<Void>?, _ destructor: (@convention(c) (UnsafeMutablePointer<Void>?) -> Void)?)
+public func dispatch_queue_set_specific(_ queue: DispatchQueue, _ key: UnsafeRawPointer, _ context: UnsafeMutableRawPointer?, _ destructor: (@convention(c) (UnsafeMutableRawPointer?) -> Void)?)
{
fatalError()
}
@available(*, unavailable, renamed:"DispatchQueue.getSpecific(self:key:)")
-public func dispatch_queue_get_specific(_ queue: DispatchQueue, _ key: UnsafePointer<Void>) -> UnsafeMutablePointer<Void>?
+public func dispatch_queue_get_specific(_ queue: DispatchQueue, _ key: UnsafeRawPointer) -> UnsafeMutableRawPointer?
{
fatalError()
}
@available(*, unavailable, renamed:"DispatchQueue.getSpecific(key:)")
-public func dispatch_get_specific(_ key: UnsafePointer<Void>) -> UnsafeMutablePointer<Void>?
+public func dispatch_get_specific(_ key: UnsafeRawPointer) -> UnsafeMutableRawPointer?
{
fatalError()
}
fatalError()
}
-@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosUserInitiated")
+@available(*, unavailable, renamed: "DispatchQueue.GlobalQueuePriority.high")
public var DISPATCH_QUEUE_PRIORITY_HIGH: Int {
fatalError()
}
-@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosDefault")
+@available(*, unavailable, renamed: "DispatchQueue.GlobalQueuePriority.default")
public var DISPATCH_QUEUE_PRIORITY_DEFAULT: Int {
fatalError()
}
-@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosUtility")
+@available(*, unavailable, renamed: "DispatchQueue.GlobalQueuePriority.low")
public var DISPATCH_QUEUE_PRIORITY_LOW: Int {
fatalError()
}
-@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosBackground")
+@available(*, unavailable, renamed: "DispatchQueue.GlobalQueuePriority.background")
public var DISPATCH_QUEUE_PRIORITY_BACKGROUND: Int {
fatalError()
}
import CDispatch
-public struct DispatchQueueAttributes : OptionSet {
- public let rawValue: UInt64
- public init(rawValue: UInt64) { self.rawValue = rawValue }
-
- public static let serial = DispatchQueueAttributes(rawValue: 0<<0)
- public static let concurrent = DispatchQueueAttributes(rawValue: 1<<1)
-
- @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *)
- public static let initiallyInactive = DispatchQueueAttributes(rawValue: 1<<2)
-
- @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *)
- public static let autoreleaseInherit = DispatchQueueAttributes(rawValue: 1<<3)
-
- @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *)
- public static let autoreleaseWorkItem = DispatchQueueAttributes(rawValue: 1<<4)
-
- @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *)
- public static let autoreleaseNever = DispatchQueueAttributes(rawValue: 1<<5)
-
- @available(OSX 10.10, iOS 8.0, *)
- public static let qosUserInteractive = DispatchQueueAttributes(rawValue: 1<<6)
-
- @available(OSX 10.10, iOS 8.0, *)
- public static let qosUserInitiated = DispatchQueueAttributes(rawValue: 1<<7)
-
- @available(OSX 10.10, iOS 8.0, *)
- public static let qosDefault = DispatchQueueAttributes(rawValue: 1<<8)
-
- @available(OSX 10.10, iOS 8.0, *)
- public static let qosUtility = DispatchQueueAttributes(rawValue: 1<<9)
-
- @available(OSX 10.10, iOS 8.0, *)
- public static let qosBackground = DispatchQueueAttributes(rawValue: 1<<10)
-
- @available(*, deprecated, message: ".noQoS has no effect, it should not be used")
- public static let noQoS = DispatchQueueAttributes(rawValue: 1<<11)
-
- private var attr: dispatch_queue_attr_t? {
- var attr: dispatch_queue_attr_t?
-
- if self.contains(.concurrent) {
- attr = _swift_dispatch_queue_concurrent()
- }
- if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) {
- if self.contains(.initiallyInactive) {
- attr = CDispatch.dispatch_queue_attr_make_initially_inactive(attr)
- }
- if self.contains(.autoreleaseWorkItem) {
- // DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM
- attr = CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(1))
- } else if self.contains(.autoreleaseInherit) {
- // DISPATCH_AUTORELEASE_FREQUENCY_INHERIT
- attr = CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(0))
- } else if self.contains(.autoreleaseNever) {
- // DISPATCH_AUTORELEASE_FREQUENCY_NEVER
- attr = CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(2))
- }
- }
- if #available(OSX 10.10, iOS 8.0, *) {
- if self.contains(.qosUserInteractive) {
- attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_USER_INTERACTIVE.rawValue, 0)
- } else if self.contains(.qosUserInitiated) {
- attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_USER_INITIATED.rawValue, 0)
- } else if self.contains(.qosDefault) {
- attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_DEFAULT.rawValue, 0)
- } else if self.contains(.qosUtility) {
- attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_UTILITY.rawValue, 0)
- } else if self.contains(.qosBackground) {
- attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_BACKGROUND.rawValue, 0)
- }
- }
- return attr
- }
-}
-
-
public final class DispatchSpecificKey<T> {
public init() {}
}
}
public extension DispatchQueue {
-
- public struct GlobalAttributes : OptionSet {
+ public struct Attributes : OptionSet {
public let rawValue: UInt64
public init(rawValue: UInt64) { self.rawValue = rawValue }
- @available(OSX 10.10, iOS 8.0, *)
- public static let qosUserInteractive = GlobalAttributes(rawValue: 1<<0)
-
- @available(OSX 10.10, iOS 8.0, *)
- public static let qosUserInitiated = GlobalAttributes(rawValue: 1<<1)
+ public static let concurrent = Attributes(rawValue: 1<<1)
- @available(OSX 10.10, iOS 8.0, *)
- public static let qosDefault = GlobalAttributes(rawValue: 1<<2)
+ @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *)
+ public static let initiallyInactive = Attributes(rawValue: 1<<2)
- @available(OSX 10.10, iOS 8.0, *)
- public static let qosUtility = GlobalAttributes(rawValue: 1<<3)
+ fileprivate func _attr() -> dispatch_queue_attr_t? {
+ var attr: dispatch_queue_attr_t? = nil
- @available(OSX 10.10, iOS 8.0, *)
- public static let qosBackground = GlobalAttributes(rawValue: 1<<4)
-
- // Avoid using our own deprecated constants here by declaring
- // non-deprecated constants and then basing the public ones on those.
- internal static let _priorityHigh = GlobalAttributes(rawValue: 1<<5)
- internal static let _priorityDefault = GlobalAttributes(rawValue: 1<<6)
- internal static let _priorityLow = GlobalAttributes(rawValue: 1<<7)
- internal static let _priorityBackground = GlobalAttributes(rawValue: 1<<8)
+ if self.contains(.concurrent) {
+ attr = _swift_dispatch_queue_concurrent()
+ }
+ if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) {
+ if self.contains(.initiallyInactive) {
+ attr = CDispatch.dispatch_queue_attr_make_initially_inactive(attr)
+ }
+ }
+ return attr
+ }
+ }
+ public enum GlobalQueuePriority {
@available(OSX, deprecated: 10.10, message: "Use qos attributes instead")
@available(*, deprecated: 8.0, message: "Use qos attributes instead")
- public static let priorityHigh = _priorityHigh
+ case high
@available(OSX, deprecated: 10.10, message: "Use qos attributes instead")
@available(*, deprecated: 8.0, message: "Use qos attributes instead")
- public static let priorityDefault = _priorityDefault
+ case `default`
@available(OSX, deprecated: 10.10, message: "Use qos attributes instead")
@available(*, deprecated: 8.0, message: "Use qos attributes instead")
- public static let priorityLow = _priorityLow
+ case low
@available(OSX, deprecated: 10.10, message: "Use qos attributes instead")
@available(*, deprecated: 8.0, message: "Use qos attributes instead")
- public static let priorityBackground = _priorityBackground
+ case background
internal var _translatedValue: Int {
- if #available(OSX 10.10, iOS 8.0, *) {
- if self.contains(.qosUserInteractive) { return Int(_OSQoSClass.QOS_CLASS_USER_INTERACTIVE.rawValue) }
- else if self.contains(.qosUserInitiated) { return Int(_OSQoSClass.QOS_CLASS_USER_INITIATED.rawValue) }
- else if self.contains(.qosDefault) { return Int(_OSQoSClass.QOS_CLASS_DEFAULT.rawValue) }
- else if self.contains(.qosUtility) { return Int(_OSQoSClass.QOS_CLASS_UTILITY.rawValue) }
- else { return Int(_OSQoSClass.QOS_CLASS_BACKGROUND.rawValue) }
+ switch self {
+ case .high: return 2 // DISPATCH_QUEUE_PRIORITY_HIGH
+ case .default: return 0 // DISPATCH_QUEUE_PRIORITY_DEFAULT
+ case .low: return -2 // DISPATCH_QUEUE_PRIORITY_LOW
+ case .background: return Int(Int16.min) // DISPATCH_QUEUE_PRIORITY_BACKGROUND
}
- if self.contains(._priorityHigh) { return 2 } // DISPATCH_QUEUE_PRIORITY_HIGH
- else if self.contains(._priorityDefault) { return 0 } // DISPATCH_QUEUE_PRIORITY_DEFAULT
- else if self.contains(._priorityLow) { return -2 } // // DISPATCH_QUEUE_PRIORITY_LOW
- else if self.contains(._priorityBackground) { return Int(Int16.min) } // // DISPATCH_QUEUE_PRIORITY_BACKGROUND
- return 0
}
}
- public class func concurrentPerform(iterations: Int, execute work: @noescape (Int) -> Void) {
+ public enum AutoreleaseFrequency {
+ case inherit
+
+ @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *)
+ case workItem
+
+ @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *)
+ case never
+
+ internal func _attr(attr: dispatch_queue_attr_t?) -> dispatch_queue_attr_t? {
+ if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) {
+ switch self {
+ case .inherit:
+ // DISPATCH_AUTORELEASE_FREQUENCY_INHERIT
+ return CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(0))
+ case .workItem:
+ // DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM
+ return CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(1))
+ case .never:
+ // DISPATCH_AUTORELEASE_FREQUENCY_NEVER
+ return CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(2))
+ }
+ } else {
+ return attr
+ }
+ }
+ }
+
+ public class func concurrentPerform(iterations: Int, execute work: (Int) -> Void) {
_swift_dispatch_apply_current(iterations, work)
}
return DispatchQueue(queue: _swift_dispatch_get_main_queue())
}
- public class func global(attributes: GlobalAttributes = []) -> DispatchQueue {
- // SubOptimal? Should we be caching these global DispatchQueue objects?
- return DispatchQueue(queue:dispatch_get_global_queue(attributes._translatedValue, 0))
+ @available(OSX, deprecated: 10.10, message: "")
+ @available(*, deprecated: 8.0, message: "")
+ public class func global(priority: GlobalQueuePriority) -> DispatchQueue {
+ return DispatchQueue(queue: CDispatch.dispatch_get_global_queue(priority._translatedValue, 0))
+ }
+
+ @available(OSX 10.10, iOS 8.0, *)
+ public class func global(qos: DispatchQoS.QoSClass = .default) -> DispatchQueue {
+ return DispatchQueue(queue: CDispatch.dispatch_get_global_queue(Int(qos.rawValue.rawValue), 0))
}
public class func getSpecific<T>(key: DispatchSpecificKey<T>) -> T? {
public convenience init(
label: String,
- attributes: DispatchQueueAttributes = .serial,
+ qos: DispatchQoS = .unspecified,
+ attributes: Attributes = [],
+ autoreleaseFrequency: AutoreleaseFrequency = .inherit,
target: DispatchQueue? = nil)
{
+ var attr = attributes._attr()
+ if autoreleaseFrequency != .inherit {
+ attr = autoreleaseFrequency._attr(attr: attr)
+ }
+ if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified {
+ attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority))
+ }
+
if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) {
- self.init(__label: label, attr: attributes.attr, queue: target)
+ self.init(__label: label, attr: attr, queue: target)
} else {
- self.init(__label: label, attr: attributes.attr)
+ self.init(__label: label, attr: attr)
if let tq = target { self.setTarget(queue: tq) }
}
}
@available(OSX 10.10, iOS 8.0, *)
public func sync(execute workItem: DispatchWorkItem) {
- dispatch_sync(self.__wrapped, workItem._block)
+ CDispatch.dispatch_sync(self.__wrapped, workItem._block)
}
@available(OSX 10.10, iOS 8.0, *)
public func async(execute workItem: DispatchWorkItem) {
- // _swift_dispatch_{group,}_async preserves the @convention(block)
- // for work item blocks.
- if let g = workItem._group {
- dispatch_group_async(g.__wrapped, self.__wrapped, workItem._block)
- } else {
- dispatch_async(self.__wrapped, workItem._block)
- }
+ CDispatch.dispatch_async(self.__wrapped, workItem._block)
}
- public func async(group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) {
- if group == nil && qos == .unspecified && flags.isEmpty {
+ @available(OSX 10.10, iOS 8.0, *)
+ public func async(group: DispatchGroup, execute workItem: DispatchWorkItem) {
+ CDispatch.dispatch_group_async(group.__wrapped, self.__wrapped, workItem._block)
+ }
+
+ public func async(
+ group: DispatchGroup? = nil,
+ qos: DispatchQoS = .unspecified,
+ flags: DispatchWorkItemFlags = [],
+ execute work: @escaping @convention(block) () -> Void)
+ {
+ if group == nil && qos == .unspecified {
// Fast-path route for the most common API usage
- dispatch_async(self.__wrapped, work)
- return
+ if flags.isEmpty {
+ CDispatch.dispatch_async(self.__wrapped, work)
+ return
+ } else if flags == .barrier {
+ CDispatch.dispatch_barrier_async(self.__wrapped, work)
+ return
+ }
}
+ var block: @convention(block) () -> Void = work
if #available(OSX 10.10, iOS 8.0, *), (qos != .unspecified || !flags.isEmpty) {
let workItem = DispatchWorkItem(qos: qos, flags: flags, block: work)
- if let g = group {
- dispatch_group_async(g.__wrapped, self.__wrapped, workItem._block)
- } else {
- dispatch_async(self.__wrapped, workItem._block)
- }
+ block = workItem._block
+ }
+
+ if let g = group {
+ CDispatch.dispatch_group_async(g.__wrapped, self.__wrapped, block)
} else {
- if let g = group {
- dispatch_group_async(g.__wrapped, self.__wrapped, work)
- } else {
- dispatch_async(self.__wrapped, work)
- }
+ CDispatch.dispatch_async(self.__wrapped, block)
}
}
- private func _syncBarrier(block: @noescape () -> ()) {
- dispatch_barrier_sync(self.__wrapped, block)
+ private func _syncBarrier(block: () -> ()) {
+ CDispatch.dispatch_barrier_sync(self.__wrapped, block)
}
private func _syncHelper<T>(
- fn: (@noescape () -> ()) -> (),
- execute work: @noescape () throws -> T,
+ fn: (() -> ()) -> (),
+ execute work: () throws -> T,
rescue: ((Swift.Error) throws -> (T))) rethrows -> T
{
var result: T?
@available(OSX 10.10, iOS 8.0, *)
private func _syncHelper<T>(
- fn: (DispatchWorkItem) -> (),
+ fn: (DispatchWorkItem) -> (),
flags: DispatchWorkItemFlags,
- execute work: @noescape () throws -> T,
- rescue: ((Swift.Error) throws -> (T))) rethrows -> T
+ execute work: () throws -> T,
+ rescue: @escaping ((Swift.Error) throws -> (T))) rethrows -> T
{
var result: T?
var error: Swift.Error?
do {
result = try work()
} catch let e {
- error = e
+ error = e
}
})
fn(workItem)
}
}
- public func sync<T>(execute work: @noescape () throws -> T) rethrows -> T {
+ public func sync<T>(execute work: () throws -> T) rethrows -> T {
return try self._syncHelper(fn: sync, execute: work, rescue: { throw $0 })
}
- public func sync<T>(flags: DispatchWorkItemFlags, execute work: @noescape () throws -> T) rethrows -> T {
+ public func sync<T>(flags: DispatchWorkItemFlags, execute work: () throws -> T) rethrows -> T {
if flags == .barrier {
return try self._syncHelper(fn: _syncBarrier, execute: work, rescue: { throw $0 })
} else if #available(OSX 10.10, iOS 8.0, *), !flags.isEmpty {
}
}
- public func after(when: DispatchTime, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) {
+ public func asyncAfter(
+ deadline: DispatchTime,
+ qos: DispatchQoS = .unspecified,
+ flags: DispatchWorkItemFlags = [],
+ execute work: @escaping @convention(block) () -> Void)
+ {
if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty {
let item = DispatchWorkItem(qos: qos, flags: flags, block: work)
- dispatch_after(when.rawValue, self.__wrapped, item._block)
+ CDispatch.dispatch_after(deadline.rawValue, self.__wrapped, item._block)
} else {
- dispatch_after(when.rawValue, self.__wrapped, work)
+ CDispatch.dispatch_after(deadline.rawValue, self.__wrapped, work)
}
}
- @available(OSX 10.10, iOS 8.0, *)
- public func after(when: DispatchTime, execute: DispatchWorkItem) {
- dispatch_after(when.rawValue, self.__wrapped, execute._block)
- }
-
- public func after(walltime when: DispatchWallTime, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) {
+ public func asyncAfter(
+ wallDeadline: DispatchWallTime,
+ qos: DispatchQoS = .unspecified,
+ flags: DispatchWorkItemFlags = [],
+ execute work: @escaping @convention(block) () -> Void)
+ {
if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty {
let item = DispatchWorkItem(qos: qos, flags: flags, block: work)
- dispatch_after(when.rawValue, self.__wrapped, item._block)
+ CDispatch.dispatch_after(wallDeadline.rawValue, self.__wrapped, item._block)
} else {
- dispatch_after(when.rawValue, self.__wrapped, work)
+ CDispatch.dispatch_after(wallDeadline.rawValue, self.__wrapped, work)
}
}
@available(OSX 10.10, iOS 8.0, *)
- public func after(walltime when: DispatchWallTime, execute: DispatchWorkItem) {
- dispatch_after(when.rawValue, self.__wrapped, execute._block)
+ public func asyncAfter(deadline: DispatchTime, execute: DispatchWorkItem) {
+ CDispatch.dispatch_after(deadline.rawValue, self.__wrapped, execute._block)
+ }
+
+ @available(OSX 10.10, iOS 8.0, *)
+ public func asyncAfter(wallDeadline: DispatchWallTime, execute: DispatchWorkItem) {
+ CDispatch.dispatch_after(wallDeadline.rawValue, self.__wrapped, execute._block)
}
@available(OSX 10.10, iOS 8.0, *)
public var qos: DispatchQoS {
var relPri: Int32 = 0
- let cls = DispatchQoS.QoSClass(qosClass: _OSQoSClass(qosClass: dispatch_queue_get_qos_class(self.__wrapped, &relPri))!)!
+ let cls = DispatchQoS.QoSClass(rawValue: _OSQoSClass(qosClass: dispatch_queue_get_qos_class(self.__wrapped, &relPri))!)!
return DispatchQoS(qosClass: cls, relativePriority: Int(relPri))
}
}
}
-extension DispatchQueue {
- @available(*, deprecated, renamed: "DispatchQueue.sync(self:execute:)")
- public func synchronously(execute work: @noescape () -> ()) {
- sync(execute: work)
- }
-
- @available(OSX, introduced: 10.10, deprecated: 10.12, renamed: "DispatchQueue.sync(self:execute:)")
- @available(iOS, introduced: 8.0, deprecated: 10.0, renamed: "DispatchQueue.sync(self:execute:)")
- @available(*, deprecated, renamed: "DispatchQueue.sync(self:execute:)")
- public func synchronously(execute workItem: DispatchWorkItem) {
- sync(execute: workItem)
- }
-
- @available(OSX, introduced: 10.10, deprecated: 10.12, renamed: "DispatchQueue.async(self:execute:)")
- @available(iOS, introduced: 8.0, deprecated: 10.0, renamed: "DispatchQueue.async(self:execute:)")
- @available(*, deprecated, renamed: "DispatchQueue.async(self:execute:)")
- public func asynchronously(execute workItem: DispatchWorkItem) {
- async(execute: workItem)
- }
-
- @available(*, deprecated, renamed: "DispatchQueue.async(self:group:qos:flags:execute:)")
- public func asynchronously(group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) {
- async(group: group, qos: qos, flags: flags, execute: work)
- }
-
- @available(*, deprecated, renamed: "DispatchQueue.sync(self:execute:)")
- public func synchronously<T>(execute work: @noescape () throws -> T) rethrows -> T {
- return try sync(execute: work)
- }
-
- @available(*, deprecated, renamed: "DispatchQueue.sync(self:flags:execute:)")
- public func synchronously<T>(flags: DispatchWorkItemFlags, execute work: @noescape () throws -> T) rethrows -> T {
- return try sync(flags: flags, execute: work)
- }
-
- @available(*, deprecated, renamed: "DispatchQueue.concurrentPerform(iterations:execute:)")
- public func apply(applier iterations: Int, execute block: @noescape (Int) -> Void) {
- DispatchQueue.concurrentPerform(iterations: iterations, execute: block)
- }
-
- @available(*, deprecated, renamed: "DispatchQueue.setTarget(self:queue:)")
- public func setTargetQueue(queue: DispatchQueue) {
- self.setTarget(queue: queue)
- }
-}
-
-private func _destructDispatchSpecificValue(ptr: UnsafeMutablePointer<Void>?) {
+private func _destructDispatchSpecificValue(ptr: UnsafeMutableRawPointer?) {
if let p = ptr {
Unmanaged<AnyObject>.fromOpaque(p).release()
}
internal func _swift_dispatch_apply_current_root_queue() -> dispatch_queue_t
@_silgen_name("_swift_dispatch_apply_current")
-internal func _swift_dispatch_apply_current(_ iterations: Int, _ block: @convention(block) @noescape (Int) -> Void)
+internal func _swift_dispatch_apply_current(_ iterations: Int, _ block: @convention(block) (Int) -> Void)
import CDispatch
-public extension DispatchSourceType {
+public extension DispatchSourceProtocol {
public func setEventHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) {
- if #available(OSX 10.10, iOS 8.0, *), let h = handler where qos != .unspecified || !flags.isEmpty {
+ if #available(OSX 10.10, iOS 8.0, *), let h = handler, qos != .unspecified || !flags.isEmpty {
let item = DispatchWorkItem(qos: qos, flags: flags, block: h)
CDispatch.dispatch_source_set_event_handler((self as! DispatchSource).__wrapped, item._block)
} else {
}
public func setCancelHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) {
- if #available(OSX 10.10, iOS 8.0, *), let h = handler where qos != .unspecified || !flags.isEmpty {
+ if #available(OSX 10.10, iOS 8.0, *), let h = handler, qos != .unspecified || !flags.isEmpty {
let item = DispatchWorkItem(qos: qos, flags: flags, block: h)
CDispatch.dispatch_source_set_cancel_handler((self as! DispatchSource).__wrapped, item._block)
} else {
}
public func setRegistrationHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) {
- if #available(OSX 10.10, iOS 8.0, *), let h = handler where qos != .unspecified || !flags.isEmpty {
+ if #available(OSX 10.10, iOS 8.0, *), let h = handler, qos != .unspecified || !flags.isEmpty {
let item = DispatchWorkItem(qos: qos, flags: flags, block: h)
CDispatch.dispatch_source_set_registration_handler((self as! DispatchSource).__wrapped, item._block)
} else {
}
#if HAVE_MACH
- public class func machSend(port: mach_port_t, eventMask: MachSendEvent, queue: DispatchQueue? = nil) -> DispatchSourceMachSend {
+ public class func makeMachSendSource(port: mach_port_t, eventMask: MachSendEvent, queue: DispatchQueue? = nil) -> DispatchSourceMachSend {
let source = dispatch_source_create(_swift_dispatch_source_type_mach_send(), UInt(port), eventMask.rawValue, queue?.__wrapped)
return DispatchSource(source: source) as DispatchSourceMachSend
}
#endif
#if HAVE_MACH
- public class func machReceive(port: mach_port_t, queue: DispatchQueue? = nil) -> DispatchSourceMachReceive {
+ public class func makeMachReceiveSource(port: mach_port_t, queue: DispatchQueue? = nil) -> DispatchSourceMachReceive {
let source = dispatch_source_create(_swift_dispatch_source_type_mach_recv(), UInt(port), 0, queue?.__wrapped)
return DispatchSource(source) as DispatchSourceMachReceive
}
#endif
#if HAVE_MACH
- public class func memoryPressure(eventMask: MemoryPressureEvent, queue: DispatchQueue? = nil) -> DispatchSourceMemoryPressure {
+ public class func makeMemoryPressureSource(eventMask: MemoryPressureEvent, queue: DispatchQueue? = nil) -> DispatchSourceMemoryPressure {
let source = dispatch_source_create(_swift_dispatch_source_type_memorypressure(), 0, eventMask.rawValue, queue.__wrapped)
return DispatchSourceMemoryPressure(source)
}
#endif
#if !os(Linux)
- public class func process(identifier: pid_t, eventMask: ProcessEvent, queue: DispatchQueue? = nil) -> DispatchSourceProcess {
+ public class func makeProcessSource(identifier: pid_t, eventMask: ProcessEvent, queue: DispatchQueue? = nil) -> DispatchSourceProcess {
let source = dispatch_source_create(_swift_dispatch_source_type_proc(), UInt(identifier), eventMask.rawValue, queue?.__wrapped)
return DispatchSource(source: source) as DispatchSourceProcess
}
#endif
- public class func read(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceRead {
+ public class func makeReadSource(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceRead {
let source = dispatch_source_create(_swift_dispatch_source_type_read(), UInt(fileDescriptor), 0, queue?.__wrapped)
return DispatchSource(source: source) as DispatchSourceRead
}
- public class func signal(signal: Int32, queue: DispatchQueue? = nil) -> DispatchSourceSignal {
+ public class func makeSignalSource(signal: Int32, queue: DispatchQueue? = nil) -> DispatchSourceSignal {
let source = dispatch_source_create(_swift_dispatch_source_type_signal(), UInt(signal), 0, queue?.__wrapped)
return DispatchSource(source: source) as DispatchSourceSignal
}
- public class func timer(flags: TimerFlags = [], queue: DispatchQueue? = nil) -> DispatchSourceTimer {
+ public class func makeTimerSource(flags: TimerFlags = [], queue: DispatchQueue? = nil) -> DispatchSourceTimer {
let source = dispatch_source_create(_swift_dispatch_source_type_timer(), 0, flags.rawValue, queue?.__wrapped)
return DispatchSource(source: source) as DispatchSourceTimer
}
- public class func userDataAdd(queue: DispatchQueue? = nil) -> DispatchSourceUserDataAdd {
+ public class func makeUserDataAddSource(queue: DispatchQueue? = nil) -> DispatchSourceUserDataAdd {
let source = dispatch_source_create(_swift_dispatch_source_type_data_add(), 0, 0, queue?.__wrapped)
return DispatchSource(source: source) as DispatchSourceUserDataAdd
}
- public class func userDataOr(queue: DispatchQueue? = nil) -> DispatchSourceUserDataOr {
+ public class func makeUserDataOrSource(queue: DispatchQueue? = nil) -> DispatchSourceUserDataOr {
let source = dispatch_source_create(_swift_dispatch_source_type_data_or(), 0, 0, queue?.__wrapped)
return DispatchSource(source: source) as DispatchSourceUserDataOr
}
#if !os(Linux)
- public class func fileSystemObject(fileDescriptor: Int32, eventMask: FileSystemEvent, queue: DispatchQueue? = nil) -> DispatchSourceFileSystemObject {
+ public class func makeFileSystemObjectSource(fileDescriptor: Int32, eventMask: FileSystemEvent, queue: DispatchQueue? = nil) -> DispatchSourceFileSystemObject {
let source = dispatch_source_create(_swift_dispatch_source_type_vnode(), UInt(fileDescriptor), eventMask.rawValue, queue?.__wrapped)
return DispatchSource(source: source) as DispatchSourceFileSystemObject
}
#endif
- public class func write(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceWrite {
+ public class func makeWriteSource(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceWrite {
let source = dispatch_source_create(_swift_dispatch_source_type_write(), UInt(fileDescriptor), 0, queue?.__wrapped)
return DispatchSource(source: source) as DispatchSourceWrite
}
}
public func scheduleRepeating(deadline: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) {
- dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval.rawValue, UInt64(leeway.rawValue))
+ dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, UInt64(interval.rawValue), UInt64(leeway.rawValue))
}
public func scheduleRepeating(deadline: DispatchTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) {
}
public func scheduleRepeating(wallDeadline: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) {
- dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval.rawValue, UInt64(leeway.rawValue))
+ dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, UInt64(interval.rawValue), UInt64(leeway.rawValue))
}
public func scheduleRepeating(wallDeadline: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) {
}
}
-public extension DispatchSourceTimer {
- @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleOneshot(self:deadline:leeway:)")
- public func setTimer(start: DispatchTime, leeway: DispatchTimeInterval = .nanoseconds(0)) {
- scheduleOneshot(deadline: start, leeway: leeway)
- }
-
- @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleOneshot(self:wallDeadline:leeway:)")
- public func setTimer(walltime start: DispatchWallTime, leeway: DispatchTimeInterval = .nanoseconds(0)) {
- scheduleOneshot(wallDeadline: start, leeway: leeway)
- }
-
- @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:deadline:interval:leeway:)")
- public func setTimer(start: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) {
- scheduleRepeating(deadline: start, interval: interval, leeway: leeway)
- }
-
- @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:deadline:interval:leeway:)")
- public func setTimer(start: DispatchTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) {
- scheduleRepeating(deadline: start, interval: interval, leeway: leeway)
- }
-
- @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:wallDeadline:interval:leeway:)")
- public func setTimer(walltime start: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) {
- scheduleRepeating(wallDeadline: start, interval: interval, leeway: leeway)
- }
-
- @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:wallDeadline:interval:leeway:)")
- public func setTimer(walltime start: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) {
- scheduleRepeating(wallDeadline: start, interval: interval, leeway: leeway)
- }
-}
-
#if !os(Linux)
public extension DispatchSourceFileSystemObject {
public var handle: Int32 {
/// The value to coalesce with the pending data using a logical OR or an ADD
/// as specified by the dispatch source type. A value of zero has no effect
/// and will not result in the submission of the event handler block.
- public func mergeData(value: UInt) {
- dispatch_source_merge_data((self as! DispatchSource).__wrapped, value)
+ public func add(data: UInt) {
+ dispatch_source_merge_data((self as! DispatchSource).__wrapped, data)
}
}
public extension DispatchSourceUserDataOr {
-#if false /*FIXME: clashes with UserDataAdd?? */
/// @function mergeData
///
/// @abstract
/// The value to coalesce with the pending data using a logical OR or an ADD
/// as specified by the dispatch source type. A value of zero has no effect
/// and will not result in the submission of the event handler block.
- public func mergeData(value: UInt) {
- dispatch_source_merge_data((self as! DispatchSource).__wrapped, value)
+ public func or(data: UInt) {
+ dispatch_source_merge_data((self as! DispatchSource).__wrapped, data)
}
-#endif
}
@_silgen_name("_swift_dispatch_source_type_DATA_ADD")
import CDispatch
-public struct DispatchTime {
+public struct DispatchTime : Comparable {
public let rawValue: dispatch_time_t
public static func now() -> DispatchTime {
public static let distantFuture = DispatchTime(rawValue: ~0)
- private init(rawValue: dispatch_time_t) {
+ fileprivate init(rawValue: dispatch_time_t) {
self.rawValue = rawValue
}
+
+ /// Creates a `DispatchTime` relative to the system clock that
+ /// ticks since boot.
+ ///
+ /// - Parameters:
+ /// - uptimeNanoseconds: The number of nanoseconds since boot, excluding
+ /// time the system spent asleep
+ /// - Returns: A new `DispatchTime`
+ public init(uptimeNanoseconds: UInt64) {
+ self.rawValue = dispatch_time_t(uptimeNanoseconds)
+ }
+
+ public var uptimeNanoseconds: UInt64 {
+ return UInt64(self.rawValue)
+ }
+}
+
+public func <(a: DispatchTime, b: DispatchTime) -> Bool {
+ if a.rawValue == ~0 || b.rawValue == ~0 { return false }
+ return a.rawValue < b.rawValue
}
-public struct DispatchWallTime {
+public func ==(a: DispatchTime, b: DispatchTime) -> Bool {
+ return a.rawValue == b.rawValue
+}
+
+public struct DispatchWallTime : Comparable {
public let rawValue: dispatch_time_t
public static func now() -> DispatchWallTime {
public static let distantFuture = DispatchWallTime(rawValue: ~0)
- private init(rawValue: dispatch_time_t) {
+ fileprivate init(rawValue: dispatch_time_t) {
self.rawValue = rawValue
}
- public init(time: timespec) {
- var t = time
+ public init(timespec: timespec) {
+ var t = timespec
self.rawValue = CDispatch.dispatch_walltime(&t, 0)
}
}
-@available(*, deprecated, renamed: "DispatchWallTime")
-public typealias DispatchWalltime = DispatchWallTime
+public func <(a: DispatchWallTime, b: DispatchWallTime) -> Bool {
+ if a.rawValue == ~0 || b.rawValue == ~0 { return false }
+ return -Int64(a.rawValue) < -Int64(b.rawValue)
+}
+
+public func ==(a: DispatchWallTime, b: DispatchWallTime) -> Bool {
+ return a.rawValue == b.rawValue
+}
public enum DispatchTimeInterval {
case seconds(Int)
case microseconds(Int)
case nanoseconds(Int)
- internal var rawValue: UInt64 {
+ internal var rawValue: Int64 {
switch self {
- case .seconds(let s): return UInt64(s) * NSEC_PER_SEC
- case .milliseconds(let ms): return UInt64(ms) * NSEC_PER_MSEC
- case .microseconds(let us): return UInt64(us) * NSEC_PER_USEC
- case .nanoseconds(let ns): return UInt64(ns)
+ case .seconds(let s): return Int64(s) * Int64(NSEC_PER_SEC)
+ case .milliseconds(let ms): return Int64(ms) * Int64(NSEC_PER_MSEC)
+ case .microseconds(let us): return Int64(us) * Int64(NSEC_PER_USEC)
+ case .nanoseconds(let ns): return Int64(ns)
}
}
}
public func +(time: DispatchTime, interval: DispatchTimeInterval) -> DispatchTime {
- let t = CDispatch.dispatch_time(time.rawValue, Int64(interval.rawValue))
+ let t = CDispatch.dispatch_time(time.rawValue, interval.rawValue)
return DispatchTime(rawValue: t)
}
public func -(time: DispatchTime, interval: DispatchTimeInterval) -> DispatchTime {
- let t = CDispatch.dispatch_time(time.rawValue, -Int64(interval.rawValue))
+ let t = CDispatch.dispatch_time(time.rawValue, -interval.rawValue)
return DispatchTime(rawValue: t)
}
}
public func +(time: DispatchWallTime, interval: DispatchTimeInterval) -> DispatchWallTime {
- let t = CDispatch.dispatch_time(time.rawValue, Int64(interval.rawValue))
+ let t = CDispatch.dispatch_time(time.rawValue, interval.rawValue)
return DispatchWallTime(rawValue: t)
}
public func -(time: DispatchWallTime, interval: DispatchTimeInterval) -> DispatchWallTime {
- let t = CDispatch.dispatch_time(time.rawValue, -Int64(interval.rawValue))
+ let t = CDispatch.dispatch_time(time.rawValue, -interval.rawValue)
return DispatchWallTime(rawValue: t)
}
// This file contains declarations that are provided by the
// importer via Dispatch.apinote when the platform has Objective-C support
+public func dispatchMain() -> Never {
+ CDispatch.dispatch_main()
+}
+
public class DispatchObject {
internal func wrapped() -> dispatch_object_t {
}
public func leave() {
- dispatch_group_enter(__wrapped)
+ dispatch_group_leave(__wrapped)
}
}
}
internal init(__type: UInt, fd: Int32, queue: DispatchQueue,
- handler: (error: Int32) -> Void) {
+ handler: @escaping (_ error: Int32) -> Void) {
__wrapped = dispatch_io_create(__type, fd, queue.__wrapped, handler)
}
internal init(__type: UInt, path: UnsafePointer<Int8>, oflag: Int32,
- mode: mode_t, queue: DispatchQueue, handler: (error: Int32) -> Void) {
+ mode: mode_t, queue: DispatchQueue, handler: @escaping (_ error: Int32) -> Void) {
__wrapped = dispatch_io_create_with_path(__type, path, oflag, mode, queue.__wrapped, handler)
}
internal init(__type: UInt, io: DispatchIO,
- queue: DispatchQueue, handler: (error: Int32) -> Void) {
+ queue: DispatchQueue, handler: @escaping (_ error: Int32) -> Void) {
__wrapped = dispatch_io_create_with_io(__type, io.__wrapped, queue.__wrapped, handler)
}
- internal init(queue:dispatch_queue_t) {
- __wrapped = queue
- }
-
deinit {
_swift_dispatch_release(wrapped())
}
- public func barrier(execute: () -> ()) {
+ public func barrier(execute: @escaping () -> ()) {
dispatch_io_barrier(self.__wrapped, execute)
}
_swift_dispatch_release(wrapped())
}
- public func sync(execute workItem: @noescape ()->()) {
+ public func sync(execute workItem: ()->()) {
dispatch_sync(self.__wrapped, workItem)
}
}
public class DispatchSource : DispatchObject,
- DispatchSourceType, DispatchSourceRead,
+ DispatchSourceProtocol, DispatchSourceRead,
DispatchSourceSignal, DispatchSourceTimer,
DispatchSourceUserDataAdd, DispatchSourceUserDataOr,
DispatchSourceWrite {
}
#endif
+internal class __DispatchData : DispatchObject {
+ internal let __wrapped:dispatch_data_t
+ internal let __owned:Bool
+
+ final internal override func wrapped() -> dispatch_object_t {
+ return unsafeBitCast(__wrapped, to: dispatch_object_t.self)
+ }
+
+ internal init(data:dispatch_data_t, owned:Bool) {
+ __wrapped = data
+ __owned = owned
+ }
+
+ deinit {
+ if __owned {
+ _swift_dispatch_release(wrapped())
+ }
+ }
+}
+
public typealias DispatchSourceHandler = @convention(block) () -> Void
-public protocol DispatchSourceType {
+public protocol DispatchSourceProtocol {
func setEventHandler(qos: DispatchQoS, flags: DispatchWorkItemFlags, handler: DispatchSourceHandler?)
func setEventHandler(handler: DispatchWorkItem)
var isCancelled: Bool { get }
}
-public protocol DispatchSourceUserDataAdd : DispatchSourceType {
- func mergeData(value: UInt)
+public protocol DispatchSourceUserDataAdd : DispatchSourceProtocol {
+ func add(data: UInt)
}
-public protocol DispatchSourceUserDataOr {
-#if false /*FIXME: clashes with UserDataAdd?? */
- func mergeData(value: UInt)
-#endif
+public protocol DispatchSourceUserDataOr : DispatchSourceProtocol {
+ func or(data: UInt)
}
#if HAVE_MACH
-public protocol DispatchSourceMachSend : DispatchSourceType {
+public protocol DispatchSourceMachSend : DispatchSourceProtocol {
public var handle: mach_port_t { get }
public var data: DispatchSource.MachSendEvent { get }
#endif
#if HAVE_MACH
-public protocol DispatchSourceMachReceive : DispatchSourceType {
+public protocol DispatchSourceMachReceive : DispatchSourceProtocol {
var handle: mach_port_t { get }
}
#endif
#if HAVE_MACH
-public protocol DispatchSourceMemoryPressure : DispatchSourceType {
+public protocol DispatchSourceMemoryPressure : DispatchSourceProtocol {
public var data: DispatchSource.MemoryPressureEvent { get }
public var mask: DispatchSource.MemoryPressureEvent { get }
#endif
#if !os(Linux)
-public protocol DispatchSourceProcess : DispatchSourceType {
+public protocol DispatchSourceProcess : DispatchSourceProtocol {
var handle: pid_t { get }
var data: DispatchSource.ProcessEvent { get }
}
#endif
-public protocol DispatchSourceRead : DispatchSourceType {
+public protocol DispatchSourceRead : DispatchSourceProtocol {
}
-public protocol DispatchSourceSignal : DispatchSourceType {
+public protocol DispatchSourceSignal : DispatchSourceProtocol {
}
-public protocol DispatchSourceTimer : DispatchSourceType {
- func setTimer(start: DispatchTime, leeway: DispatchTimeInterval)
+public protocol DispatchSourceTimer : DispatchSourceProtocol {
+ func scheduleOneshot(deadline: DispatchTime, leeway: DispatchTimeInterval)
- func setTimer(walltime start: DispatchWallTime, leeway: DispatchTimeInterval)
+ func scheduleOneshot(wallDeadline: DispatchWallTime, leeway: DispatchTimeInterval)
- func setTimer(start: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval)
+ func scheduleRepeating(deadline: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval)
- func setTimer(start: DispatchTime, interval: Double, leeway: DispatchTimeInterval)
+ func scheduleRepeating(deadline: DispatchTime, interval: Double, leeway: DispatchTimeInterval)
- func setTimer(walltime start: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval)
+ func scheduleRepeating(wallDeadline: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval)
- func setTimer(walltime start: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval)
+ func scheduleRepeating(wallDeadline: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval)
}
#if !os(Linux)
-public protocol DispatchSourceFileSystemObject : DispatchSourceType {
+public protocol DispatchSourceFileSystemObject : DispatchSourceProtocol {
var handle: Int32 { get }
var data: DispatchSource.FileSystemEvent { get }
}
#endif
-public protocol DispatchSourceWrite : DispatchSourceType {
+public protocol DispatchSourceWrite : DispatchSourceProtocol {
}
case 0x21: self = .QOS_CLASS_USER_INTERACTIVE
case 0x19: self = .QOS_CLASS_USER_INITIATED
case 0x15: self = .QOS_CLASS_DEFAULT
- case 0x11: self = QOS_CLASS_UTILITY
- case 0x09: self = QOS_CLASS_BACKGROUND
- case 0x00: self = QOS_CLASS_UNSPECIFIED
+ case 0x11: self = .QOS_CLASS_UTILITY
+ case 0x09: self = .QOS_CLASS_BACKGROUND
+ case 0x00: self = .QOS_CLASS_UNSPECIFIED
default: return nil
}
}
#include "internal.h"
-uint64_t
-_dispatch_get_nanoseconds(void)
-{
-#if !TARGET_OS_WIN32
- struct timeval now;
- int r = gettimeofday(&now, NULL);
- dispatch_assert_zero(r);
- dispatch_assert(sizeof(NSEC_PER_SEC) == 8);
- dispatch_assert(sizeof(NSEC_PER_USEC) == 8);
- return (uint64_t)now.tv_sec * NSEC_PER_SEC +
- (uint64_t)now.tv_usec * NSEC_PER_USEC;
-#else /* TARGET_OS_WIN32 */
- // FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC).
- FILETIME ft;
- ULARGE_INTEGER li;
- GetSystemTimeAsFileTime(&ft);
- li.LowPart = ft.dwLowDateTime;
- li.HighPart = ft.dwHighDateTime;
- return li.QuadPart * 100ull;
-#endif /* TARGET_OS_WIN32 */
-}
-
#if !(defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME) \
|| TARGET_OS_WIN32
DISPATCH_CACHELINE_ALIGN _dispatch_host_time_data_s _dispatch_host_time_data = {
{
int64_t nsec;
if (inval) {
- nsec = inval->tv_sec * 1000000000ll + inval->tv_nsec;
+ nsec = (int64_t)_dispatch_timespec_to_nano(*inval);
} else {
nsec = (int64_t)_dispatch_get_nanoseconds();
}
DISPATCH_ALWAYS_INLINE
static inline dispatch_trace_timer_params_t
-_dispatch_trace_timer_params(uintptr_t ident,
+_dispatch_trace_timer_params(dispatch_clock_t clock,
struct dispatch_timer_source_s *values, uint64_t deadline,
dispatch_trace_timer_params_t params)
{
- #define _dispatch_trace_time2nano3(t) (DISPATCH_TIMER_KIND(ident) \
- == DISPATCH_TIMER_KIND_MACH ? _dispatch_time_mach2nano(t) : (t))
+ #define _dispatch_trace_time2nano3(t) \
+ (clock == DISPATCH_CLOCK_MACH ? _dispatch_time_mach2nano(t) : (t))
#define _dispatch_trace_time2nano2(v, t) ({ uint64_t _t = (t); \
(v) >= INT64_MAX ? -1ll : (int64_t)_dispatch_trace_time2nano3(_t);})
#define _dispatch_trace_time2nano(v) ({ uint64_t _t; \
if (deadline) {
params->deadline = (int64_t)deadline;
} else {
- uint64_t now = (DISPATCH_TIMER_KIND(ident) ==
- DISPATCH_TIMER_KIND_MACH ? _dispatch_absolute_time() :
- _dispatch_get_nanoseconds());
+ uint64_t now = _dispatch_time_now(clock);
params->deadline = _dispatch_trace_time2nano2(values->target,
values->target < now ? 0 : values->target - now);
}
DISPATCH_ALWAYS_INLINE
static inline void
-_dispatch_trace_timer_configure(dispatch_source_t ds, uintptr_t ident,
+_dispatch_trace_timer_configure(dispatch_source_t ds, dispatch_clock_t clock,
struct dispatch_timer_source_s *values)
{
struct dispatch_trace_timer_params_s params;
DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(ds->ds_refs),
- _dispatch_trace_timer_params(ident, values, 0,
- ¶ms));
+ _dispatch_trace_timer_params(clock, values, 0, ¶ms));
}
DISPATCH_ALWAYS_INLINE
if (slowpath(DISPATCH_TIMER_PROGRAM_ENABLED())) {
if (deadline && dr) {
dispatch_source_t ds = _dispatch_source_from_refs(dr);
+ dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(ds->ds_ident_hack);
struct dispatch_trace_timer_params_s params;
DISPATCH_TIMER_PROGRAM(ds, _dispatch_trace_timer_function(dr),
- _dispatch_trace_timer_params(ds->ds_ident_hack,
- &ds_timer(dr), deadline, ¶ms));
+ _dispatch_trace_timer_params(clock, &ds_timer(dr),
+ deadline, ¶ms));
}
}
}
#else
#define _dispatch_trace_timer_configure_enabled() false
-#define _dispatch_trace_timer_configure(ds, ident, values) \
- do { (void)(ds); (void)(ident); (void)(values); } while(0)
+#define _dispatch_trace_timer_configure(ds, clock, values) \
+ do { (void)(ds); (void)(clock); (void)(values); } while(0)
#define _dispatch_trace_timer_program(dr, deadline) \
do { (void)(dr); (void)(deadline); } while(0)
#define _dispatch_trace_timer_wake(dr) \
{
dispatch_mach_handler_function_t handler = NULL;
- if (_voucher_libtrace_hooks && _voucher_libtrace_hooks->vah_version >= 2) {
+ if (_voucher_libtrace_hooks) {
handler = _voucher_libtrace_hooks->vah_debug_channel_handler;
}
-
if (!handler) return;
dispatch_mach_t dm;
void
voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks)
{
+ if (hooks->vah_version < 3) {
+ DISPATCH_CLIENT_CRASH(hooks->vah_version, "unsupported vah_version");
+ }
if (!os_atomic_cmpxchg(&_voucher_libtrace_hooks, NULL,
hooks, relaxed)) {
DISPATCH_CLIENT_CRASH(_voucher_libtrace_hooks,
// firehose_buffer_create always consumes the send-right
_firehose_task_buffer = firehose_buffer_create(logd_port,
_voucher_unique_pid, flags);
+ if (_voucher_libtrace_hooks->vah_version >= 4 &&
+ _voucher_libtrace_hooks->vah_metadata_init) {
+ firehose_buffer_t fb = _firehose_task_buffer;
+ size_t meta_sz = FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE;
+ void *meta = (void *)((uintptr_t)(&fb->fb_header + 1) - meta_sz);
+ _voucher_libtrace_hooks->vah_metadata_init(meta, meta_sz);
+ }
}
}
}
voucher_t
-voucher_activity_create(firehose_tracepoint_id_t trace_id,
- voucher_t base, firehose_activity_flags_t flags, uint64_t location)
-{
- return voucher_activity_create_with_location(&trace_id, base, flags, location);
-}
-
-voucher_t
-voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id,
- voucher_t base, firehose_activity_flags_t flags, uint64_t location)
+voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id,
+ voucher_t base, firehose_activity_flags_t flags,
+ const void *pubdata, size_t publen)
{
firehose_activity_id_t va_id = 0, current_id = 0, parent_id = 0;
firehose_tracepoint_id_u ftid = { .ftid_value = *trace_id };
- uint16_t pubsize = sizeof(va_id) + sizeof(location);
uint64_t creator_id = 0;
+ uint16_t pubsize;
voucher_t ov = _voucher_get();
voucher_t v;
+ if (os_add_overflow(sizeof(va_id), publen, &pubsize) || pubsize > 128) {
+ DISPATCH_CLIENT_CRASH(pubsize, "Absurd publen");
+ }
if (base == VOUCHER_CURRENT) {
base = ov;
}
pubptr = _dispatch_memappend(pubptr, &parent_id);
}
pubptr = _dispatch_memappend(pubptr, &va_id);
- pubptr = _dispatch_memappend(pubptr, &location);
+ pubptr = _dispatch_mempcpy(pubptr, pubdata, publen);
_voucher_activity_tracepoint_flush(ft, ftid);
}
*trace_id = ftid.ftid_value;
return v;
}
+voucher_t
+voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id,
+ voucher_t base, firehose_activity_flags_t flags, uint64_t loc)
+{
+ return voucher_activity_create_with_data(trace_id, base, flags,
+ &loc, sizeof(loc));
+}
+
+#if OS_VOUCHER_ACTIVITY_GENERATE_SWAPS
void
_voucher_activity_swap(firehose_activity_id_t old_id,
firehose_activity_id_t new_id)
if (new_id) pubptr = _dispatch_memappend(pubptr, &new_id);
_voucher_activity_tracepoint_flush(ft, ftid);
}
+#endif
firehose_activity_id_t
voucher_get_activity_id_and_creator(voucher_t v, uint64_t *creator_pid,
firehose_buffer_stream_flush(_firehose_task_buffer, stream);
}
-DISPATCH_ALWAYS_INLINE
-static inline firehose_tracepoint_id_t
-_voucher_activity_trace(firehose_stream_t stream,
- firehose_tracepoint_id_u ftid, uint64_t stamp,
- const void *pubdata, size_t publen,
- const void *privdata, size_t privlen)
+DISPATCH_NOINLINE
+firehose_tracepoint_id_t
+voucher_activity_trace_v(firehose_stream_t stream,
+ firehose_tracepoint_id_t trace_id, uint64_t stamp,
+ const struct iovec *iov, size_t publen, size_t privlen)
{
+ firehose_tracepoint_id_u ftid = { .ftid_value = trace_id };
const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data);
const size_t _firehose_chunk_payload_size =
- sizeof(((struct firehose_buffer_chunk_s *)0)->fbc_data);
+ sizeof(((struct firehose_chunk_s *)0)->fc_data);
if (_voucher_activity_disabled()) return 0;
firehose_tracepoint_t ft;
firehose_activity_id_t va_id = 0;
- firehose_buffer_chunk_t fbc;
+ firehose_chunk_t fc;
uint8_t *privptr, *pubptr;
size_t pubsize = publen;
voucher_t ov = _voucher_get();
pubptr = _dispatch_memappend(pubptr, &creator_pid);
}
if (privlen) {
- fbc = firehose_buffer_chunk_for_address(ft);
+ fc = firehose_buffer_chunk_for_address(ft);
struct firehose_buffer_range_s range = {
- .fbr_offset = (uint16_t)(privptr - fbc->fbc_start),
+ .fbr_offset = (uint16_t)(privptr - fc->fc_start),
.fbr_length = (uint16_t)privlen,
};
pubptr = _dispatch_memappend(pubptr, &range);
- _dispatch_mempcpy(privptr, privdata, privlen);
}
- _dispatch_mempcpy(pubptr, pubdata, publen);
+ while (publen > 0) {
+ pubptr = _dispatch_mempcpy(pubptr, iov->iov_base, iov->iov_len);
+ if (unlikely(os_sub_overflow(publen, iov->iov_len, &publen))) {
+ DISPATCH_CLIENT_CRASH(0, "Invalid arguments");
+ }
+ iov++;
+ }
+ while (privlen > 0) {
+ privptr = _dispatch_mempcpy(privptr, iov->iov_base, iov->iov_len);
+ if (unlikely(os_sub_overflow(privlen, iov->iov_len, &privlen))) {
+ DISPATCH_CLIENT_CRASH(0, "Invalid arguments");
+ }
+ iov++;
+ }
_voucher_activity_tracepoint_flush(ft, ftid);
return ftid.ftid_value;
}
firehose_tracepoint_id_t
voucher_activity_trace(firehose_stream_t stream,
- firehose_tracepoint_id_t trace_id, uint64_t timestamp,
+ firehose_tracepoint_id_t trace_id, uint64_t stamp,
const void *pubdata, size_t publen)
{
- firehose_tracepoint_id_u ftid = { .ftid_value = trace_id };
- return _voucher_activity_trace(stream, ftid, timestamp, pubdata, publen,
- NULL, 0);
+ struct iovec iov = { (void *)pubdata, publen };
+ return voucher_activity_trace_v(stream, trace_id, stamp, &iov, publen, 0);
}
firehose_tracepoint_id_t
voucher_activity_trace_with_private_strings(firehose_stream_t stream,
- firehose_tracepoint_id_t trace_id, uint64_t timestamp,
+ firehose_tracepoint_id_t trace_id, uint64_t stamp,
const void *pubdata, size_t publen,
const void *privdata, size_t privlen)
{
- firehose_tracepoint_id_u ftid = { .ftid_value = trace_id };
- return _voucher_activity_trace(stream, ftid, timestamp,
- pubdata, publen, privdata, privlen);
+ struct iovec iov[2] = {
+ { (void *)pubdata, publen },
+ { (void *)privdata, privlen },
+ };
+ return voucher_activity_trace_v(stream, trace_id, stamp,
+ iov, publen, privlen);
}
#pragma mark -
{
}
+#if OS_VOUCHER_ACTIVITY_SPI
void*
voucher_activity_get_metadata_buffer(size_t *length)
{
return 0;
}
+firehose_tracepoint_id_t
+voucher_activity_trace_v(firehose_stream_t stream,
+ firehose_tracepoint_id_t trace_id, uint64_t timestamp,
+ const struct iovec *iov, size_t publen, size_t privlen)
+{
+ (void)stream; (void)trace_id; (void)timestamp;
+ (void)iov; (void)publen; (void)privlen;
+ return 0;
+}
+
void
voucher_activity_flush(firehose_stream_t stream)
{
{
(void)hooks;
}
+#endif // OS_VOUCHER_ACTIVITY_SPI
size_t
_voucher_debug(voucher_t v, char* buf, size_t bufsiz)
void _voucher_init(void);
void _voucher_atfork_child(void);
void _voucher_activity_debug_channel_init(void);
+#if OS_VOUCHER_ACTIVITY_SPI && OS_VOUCHER_ACTIVITY_GENERATE_SWAPS
void _voucher_activity_swap(firehose_activity_id_t old_id,
firehose_activity_id_t new_id);
+#endif
void _voucher_xref_dispose(voucher_t voucher);
void _voucher_dispose(voucher_t voucher);
size_t _voucher_debug(voucher_t v, char* buf, size_t bufsiz);
_dispatch_thread_setspecific(dispatch_voucher_key, voucher);
mach_voucher_t kv = voucher ? voucher->v_kvoucher : MACH_VOUCHER_NULL;
mach_voucher_t okv = ov ? ov->v_kvoucher : MACH_VOUCHER_NULL;
+#if OS_VOUCHER_ACTIVITY_GENERATE_SWAPS
firehose_activity_id_t aid = voucher ? voucher->v_activity : 0;
firehose_activity_id_t oaid = ov ? ov->v_activity : 0;
if (aid != oaid) _voucher_activity_swap(aid, oaid);
+#endif
return (kv != okv) ? kv : VOUCHER_NO_MACH_VOUCHER;
}
ln -f dispatch_group_create.3 ${m}.3
done
-for m in dispatch_retain dispatch_release dispatch_suspend dispatch_resume \
+for m in dispatch_retain dispatch_release dispatch_suspend dispatch_resume dispatch_activate \
dispatch_get_context dispatch_set_context dispatch_set_finalizer_f; do
ln -f dispatch_object.3 ${m}.3
done