From 517da941284910bcce6aed25a1e923708f0ed33f Mon Sep 17 00:00:00 2001 From: Apple Date: Thu, 10 Oct 2013 23:37:40 +0000 Subject: [PATCH] libdispatch-339.1.9.tar.gz --- config/config.h | 15 +- configure.ac | 4 +- dispatch/Makefile.am | 1 + dispatch/base.h | 27 +- dispatch/data.h | 36 +- dispatch/dispatch.h | 4 +- dispatch/group.h | 4 +- dispatch/introspection.h | 163 + dispatch/io.h | 94 +- dispatch/object.h | 10 +- dispatch/queue.h | 31 +- dispatch/semaphore.h | 4 +- dispatch/source.h | 165 +- libdispatch.xcodeproj/project.pbxproj | 383 +- .../contents.xcworkspacedata | 6 - man/dispatch.3 | 2 +- man/dispatch_async.3 | 4 +- man/dispatch_data_create.3 | 2 +- man/dispatch_group_create.3 | 2 +- man/dispatch_io_create.3 | 28 +- man/dispatch_queue_create.3 | 139 +- man/dispatch_semaphore_create.3 | 2 +- man/dispatch_source_create.3 | 124 +- man/dispatch_time.3 | 23 +- os/object.h | 25 +- os/object_private.h | 11 +- private/Makefile.am | 4 +- private/data_private.h | 176 +- private/dispatch.h | 39 - private/introspection_private.h | 727 +++ private/io_private.h | 411 ++ private/mach_private.h | 547 +++ private/private.h | 155 +- private/queue_private.h | 204 +- private/source_private.h | 170 +- resolver/resolved.h | 2 +- resolver/resolver.c | 2 +- src/Makefile.am | 1 - src/allocator.c | 764 ++++ src/allocator_internal.h | 268 ++ src/apply.c | 165 +- src/benchmark.c | 16 +- src/data.c | 365 +- src/data.m | 177 + src/data_internal.h | 73 +- src/init.c | 443 +- src/internal.h | 335 +- src/introspection.c | 595 +++ src/introspection_internal.h | 119 + src/io.c | 457 +- src/io_internal.h | 26 +- src/object.c | 61 +- src/object.m | 178 +- src/object_internal.h | 104 +- src/once.c | 21 +- src/protocol.defs | 4 +- src/provider.d | 63 +- src/queue.c | 2135 ++++----- src/queue_internal.h | 245 +- src/semaphore.c | 440 +- src/semaphore_internal.h | 61 +- src/shims.h | 25 +- src/shims/atomic.h | 415 +- src/shims/atomic_sfb.h | 115 + src/shims/hw_config.h | 36 +- src/shims/malloc_zone.h | 98 - src/shims/perfmon.h | 26 +- src/shims/time.h | 92 +- src/shims/tsd.h | 37 +- src/source.c | 3916 +++++++++++++---- src/source_internal.h | 199 +- src/time.c | 72 +- src/trace.h | 178 +- src/transform.c | 154 +- tools/dispatch_timers.d | 89 + tools/dispatch_trace.d | 42 +- .../libdispatch-introspection.xcconfig | 26 + xcodeconfig/libdispatch-static.xcconfig | 25 + xcodeconfig/libdispatch.aliases | 26 +- xcodeconfig/libdispatch.interposable | 28 + xcodeconfig/libdispatch.order | 67 +- xcodeconfig/libdispatch.unexport | 24 +- xcodeconfig/libdispatch.xcconfig | 31 +- xcodeconfig/libdispatch_macosx.aliases | 21 + xcodescripts/install-dtrace.sh | 30 + xcodescripts/install-manpages.sh | 2 +- 86 files changed, 13305 insertions(+), 3331 deletions(-) create mode 100644 dispatch/introspection.h delete mode 100644 libdispatch.xcodeproj/project.xcworkspace/contents.xcworkspacedata delete mode 100644 private/dispatch.h create mode 100644 private/introspection_private.h create mode 100644 private/io_private.h create mode 100644 private/mach_private.h create mode 100644 src/allocator.c create mode 100644 src/allocator_internal.h create mode 100644 src/data.m create mode 100644 src/introspection.c create mode 100644 src/introspection_internal.h create mode 100644 src/shims/atomic_sfb.h delete mode 100644 src/shims/malloc_zone.h create mode 100755 tools/dispatch_timers.d create mode 100644 xcodeconfig/libdispatch-introspection.xcconfig create mode 100644 xcodeconfig/libdispatch-static.xcconfig create mode 100644 xcodeconfig/libdispatch.interposable create mode 100644 xcodeconfig/libdispatch_macosx.aliases create mode 100644 xcodescripts/install-dtrace.sh diff --git a/config/config.h b/config/config.h index 0818e1e..d2ad0ff 100644 --- a/config/config.h +++ b/config/config.h @@ -48,6 +48,9 @@ /* Define to 1 if you have the header file. */ #define HAVE_DLFCN_H 1 +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + /* Define to 1 if you have the `getprogname' function. */ #define HAVE_GETPROGNAME 1 @@ -63,6 +66,9 @@ /* Define to 1 if you have the header file. */ #define HAVE_LIBKERN_OSCROSSENDIAN_H 1 +/* Define to 1 if you have the header file. */ +#define HAVE_LIBPROC_INTERNAL_H 1 + /* Define if mach is present */ #define HAVE_MACH 1 @@ -81,6 +87,9 @@ /* Define if __builtin_trap marked noreturn */ #define HAVE_NORETURN_BUILTIN_TRAP 1 +/* Define if you have the Objective-C runtime */ +#define HAVE_OBJC 1 + /* Define to 1 if you have the `pthread_key_init_np' function. */ #define HAVE_PTHREAD_KEY_INIT_NP 1 @@ -117,6 +126,9 @@ /* Define to 1 if you have the header file. */ #define HAVE_SYS_CDEFS_H 1 +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_GUARDED_H 1 + /* Define to 1 if you have the header file. */ #define HAVE_SYS_STAT_H 1 @@ -163,9 +175,6 @@ /* Define to use Mach semaphores */ #define USE_MACH_SEM 1 -/* Define to use Objective-C runtime */ -#define USE_OBJC 1 - /* Define to use POSIX semaphores */ /* #undef USE_POSIX_SEM */ diff --git a/configure.ac b/configure.ac index d1fe9c4..223084c 100644 --- a/configure.ac +++ b/configure.ac @@ -128,7 +128,7 @@ AC_CHECK_HEADER(sys/event.h, [], # Checks for header files. # AC_HEADER_STDC -AC_CHECK_HEADERS([TargetConditionals.h pthread_np.h malloc/malloc.h libkern/OSCrossEndian.h libkern/OSAtomic.h]) +AC_CHECK_HEADERS([TargetConditionals.h pthread_np.h malloc/malloc.h libkern/OSCrossEndian.h libkern/OSAtomic.h sys/guarded.h libproc_internal.h]) # hack for pthread_machdep.h's #include AS_IF([test -n "$apple_xnu_source_osfmk_path"], [ @@ -169,7 +169,7 @@ AS_IF([test -n "$apple_objc4_source_runtime_path"], [ ln -fsh "$apple_objc4_source_runtime_path" objc ]) AC_CHECK_HEADER([objc/objc-internal.h], [ - AC_DEFINE(USE_OBJC, 1, [Define to use Objective-C runtime]) + AC_DEFINE(HAVE_OBJC, 1, [Define if you have the Objective-C runtime]) have_objc=true], [have_objc=false], [#include ] ) diff --git a/dispatch/Makefile.am b/dispatch/Makefile.am index 5cba713..6dc850b 100644 --- a/dispatch/Makefile.am +++ b/dispatch/Makefile.am @@ -9,6 +9,7 @@ dispatch_HEADERS= \ data.h \ dispatch.h \ group.h \ + introspection.h \ io.h \ object.h \ once.h \ diff --git a/dispatch/base.h b/dispatch/base.h index 2af340e..af17ccf 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2012 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -82,7 +82,17 @@ #define DISPATCH_ALWAYS_INLINE #endif -#if __GNUC__ + +#if TARGET_OS_WIN32 && defined(__DISPATCH_BUILDING_DISPATCH__) && \ + defined(__cplusplus) +#define DISPATCH_EXPORT extern "C" extern __declspec(dllexport) +#elif TARGET_OS_WIN32 && defined(__DISPATCH_BUILDING_DISPATCH__) +#define DISPATCH_EXPORT extern __declspec(dllexport) +#elif TARGET_OS_WIN32 && defined(__cplusplus) +#define DISPATCH_EXPORT extern "C" extern __declspec(dllimport) +#elif TARGET_OS_WIN32 +#define DISPATCH_EXPORT extern __declspec(dllimport) +#elif __GNUC__ #define DISPATCH_EXPORT extern __attribute__((visibility("default"))) #else #define DISPATCH_EXPORT extern @@ -100,6 +110,19 @@ #define DISPATCH_EXPECT(x, v) (x) #endif +#if defined(__has_feature) +#if __has_feature(objc_fixed_enum) +#define DISPATCH_ENUM(name, type, ...) \ + typedef enum : type { __VA_ARGS__ } name##_t +#else +#define DISPATCH_ENUM(name, type, ...) \ + enum { __VA_ARGS__ }; typedef type name##_t +#endif +#else +#define DISPATCH_ENUM(name, type, ...) \ + enum { __VA_ARGS__ }; typedef type name##_t +#endif + typedef void (*dispatch_function_t)(void *); #endif diff --git a/dispatch/data.h b/dispatch/data.h index ddba5dc..d656584 100644 --- a/dispatch/data.h +++ b/dispatch/data.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011 Apple Inc. All rights reserved. + * Copyright (c) 2009-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -51,8 +51,6 @@ DISPATCH_DECL(dispatch_data); __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty; -#ifdef __BLOCKS__ - /*! * @const DISPATCH_DATA_DESTRUCTOR_DEFAULT * @discussion The default destructor for dispatch data objects. @@ -61,6 +59,21 @@ DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty; */ #define DISPATCH_DATA_DESTRUCTOR_DEFAULT NULL +#ifdef __BLOCKS__ +#if !TARGET_OS_WIN32 +/*! @parseOnly */ +#define DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(name) \ + DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_##name +#else +#define DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(name) \ + DISPATCH_EXPORT dispatch_block_t _dispatch_data_destructor_##name +#endif +#else +#define DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(name) \ + DISPATCH_EXPORT const dispatch_function_t \ + _dispatch_data_destructor_##name +#endif /* __BLOCKS__ */ + /*! * @const DISPATCH_DATA_DESTRUCTOR_FREE * @discussion The destructor for dispatch data objects created from a malloc'd @@ -69,8 +82,18 @@ DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty; */ #define DISPATCH_DATA_DESTRUCTOR_FREE (_dispatch_data_destructor_free) __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) -DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_free; +DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(free); +/*! + * @const DISPATCH_DATA_DESTRUCTOR_MUNMAP + * @discussion The destructor for dispatch data objects that have been created + * from buffers that require deallocation with munmap(2). + */ +#define DISPATCH_DATA_DESTRUCTOR_MUNMAP (_dispatch_data_destructor_munmap) +__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0) +DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(munmap); + +#ifdef __BLOCKS__ /*! * @function dispatch_data_create * Creates a dispatch data object from the given contiguous buffer of memory. If @@ -99,6 +122,7 @@ dispatch_data_create(const void *buffer, size_t size, dispatch_queue_t queue, dispatch_block_t destructor); +#endif /* __BLOCKS__ */ /*! * @function dispatch_data_get_size @@ -184,6 +208,7 @@ dispatch_data_create_subrange(dispatch_data_t data, size_t offset, size_t length); +#ifdef __BLOCKS__ /*! * @typedef dispatch_data_applier_t * A block to be invoked for every contiguous memory region in a data object. @@ -224,6 +249,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW bool dispatch_data_apply(dispatch_data_t data, dispatch_data_applier_t applier); +#endif /* __BLOCKS__ */ /*! * @function dispatch_data_copy_region @@ -247,8 +273,6 @@ dispatch_data_copy_region(dispatch_data_t data, size_t location, size_t *offset_ptr); -#endif /* __BLOCKS__ */ - __END_DECLS #endif /* __DISPATCH_DATA__ */ diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index 119b413..cb5af23 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -37,7 +37,7 @@ #define __OSX_AVAILABLE_STARTING(x, y) #endif -#define DISPATCH_API_VERSION 20111201 +#define DISPATCH_API_VERSION 20130520 #ifndef __DISPATCH_BUILDING_DISPATCH__ diff --git a/dispatch/group.h b/dispatch/group.h index 88e8087..77420c1 100644 --- a/dispatch/group.h +++ b/dispatch/group.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -138,7 +138,7 @@ dispatch_group_async_f(dispatch_group_t group, * This function will return immediately if there are no blocks associated * with the dispatch group (i.e. the group is empty). * - * The result of calling this function from mulitple threads simultaneously + * The result of calling this function from multiple threads simultaneously * with the same dispatch group is undefined. * * After the successful return of this function, the dispatch group is empty. diff --git a/dispatch/introspection.h b/dispatch/introspection.h new file mode 100644 index 0000000..9e96341 --- /dev/null +++ b/dispatch/introspection.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_INTROSPECTION__ +#define __DISPATCH_INTROSPECTION__ + +#include + +/*! + * @header + * + * @abstract + * Interposable introspection hooks for libdispatch. + * + * @discussion + * These hooks are only available in the introspection version of the library, + * loaded by running a process with the environment variable + * DYLD_LIBRARY_PATH=/usr/lib/system/introspection + */ + +__BEGIN_DECLS + +/*! + * @function dispatch_introspection_hook_queue_create + * + * @abstract + * Interposable hook function called when a dispatch queue was created. + * + * @param queue + * The newly created dispatch queue. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT +void +dispatch_introspection_hook_queue_create(dispatch_queue_t queue); + +/*! + * @function dispatch_introspection_hook_queue_destroy + * + * @abstract + * Interposable hook function called when a dispatch queue is about to be + * destroyed. + * + * @param queue + * The dispatch queue about to be destroyed. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT +void +dispatch_introspection_hook_queue_destroy(dispatch_queue_t queue); + +/*! + * @function dispatch_introspection_hook_queue_item_enqueue + * + * @abstract + * Interposable hook function called when an item is about to be enqueued onto + * a dispatch queue. + * + * @param queue + * The dispatch queue enqueued onto. + * + * @param item + * The object about to be enqueued. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT +void +dispatch_introspection_hook_queue_item_enqueue(dispatch_queue_t queue, + dispatch_object_t item); + +/*! + * @function dispatch_introspection_hook_queue_item_dequeue + * + * @abstract + * Interposable hook function called when an item was dequeued from a dispatch + * queue. + * + * @param queue + * The dispatch queue dequeued from. + * + * @param item + * The dequeued object. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT +void +dispatch_introspection_hook_queue_item_dequeue(dispatch_queue_t queue, + dispatch_object_t item); + +/*! + * @function dispatch_introspection_hook_queue_callout_begin + * + * @abstract + * Interposable hook function called when a client function is about to be + * called out to on a dispatch queue. + * + * @param queue + * The dispatch queue the callout is performed on. + * + * @param context + * The context parameter passed to the function. For a callout to a block, + * this is a pointer to the block object. + * + * @param function + * The client function about to be called out to. For a callout to a block, + * this is the block object's invoke function. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT +void +dispatch_introspection_hook_queue_callout_begin(dispatch_queue_t queue, + void *context, dispatch_function_t function); + +/*! + * @function dispatch_introspection_hook_queue_callout_end + * + * @abstract + * Interposable hook function called after a client function has returned from + * a callout on a dispatch queue. + * + * @param queue + * The dispatch queue the callout was performed on. + * + * @param context + * The context parameter passed to the function. For a callout to a block, + * this is a pointer to the block object. + * + * @param function + * The client function that was called out to. For a callout to a block, + * this is the block object's invoke function. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT +void +dispatch_introspection_hook_queue_callout_end(dispatch_queue_t queue, + void *context, dispatch_function_t function); + +__END_DECLS + +#endif diff --git a/dispatch/io.h b/dispatch/io.h index dd83e7d..569dbdb 100644 --- a/dispatch/io.h +++ b/dispatch/io.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2010 Apple Inc. All rights reserved. + * Copyright (c) 2009-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -50,8 +50,6 @@ __BEGIN_DECLS */ typedef int dispatch_fd_t; -#ifdef __BLOCKS__ - /*! * @functiongroup Dispatch I/O Convenience API * Convenience wrappers around the dispatch I/O channel API, with simpler @@ -61,6 +59,7 @@ typedef int dispatch_fd_t; * may incur more overhead than by using the dispatch I/O channel API directly. */ +#ifdef __BLOCKS__ /*! * @function dispatch_read * Schedule a read operation for asynchronous execution on the specified file @@ -147,6 +146,7 @@ dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, void (^handler)(dispatch_data_t data, int error)); +#endif /* __BLOCKS__ */ /*! * @functiongroup Dispatch I/O Channel API @@ -160,17 +160,6 @@ dispatch_write(dispatch_fd_t fd, */ DISPATCH_DECL(dispatch_io); -/*! - * @typedef dispatch_io_handler_t - * The prototype of I/O handler blocks for dispatch I/O operations. - * - * @param done A flag indicating whether the operation is complete. - * @param data The data object to be handled. - * @param error An errno condition for the operation. - */ -typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t data, - int error); - /*! * @typedef dispatch_io_type_t * The type of a dispatch I/O channel: @@ -194,6 +183,7 @@ typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t data, typedef unsigned long dispatch_io_type_t; +#ifdef __BLOCKS__ /*! * @function dispatch_io_create * Create a dispatch I/O channel associated with a file descriptor. The system @@ -217,7 +207,7 @@ typedef unsigned long dispatch_io_type_t; * @param error An errno condition if control is relinquished * because channel creation failed, zero otherwise. * @result The newly created dispatch I/O channel or NULL if an error - * occurred. + * occurred (invalid type specified). */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT @@ -229,32 +219,32 @@ dispatch_io_create(dispatch_io_type_t type, void (^cleanup_handler)(int error)); /*! -* @function dispatch_io_create_with_path -* Create a dispatch I/O channel associated with a path name. The specified -* path, oflag and mode parameters will be passed to open(2) when the first I/O -* operation on the channel is ready to execute and the resulting file -* descriptor will remain open and under the control of the system until the -* channel is closed, an error occurs on the file descriptor or all references -* to the channel are released. At that time the file descriptor will be closed -* and the specified cleanup handler will be enqueued. -* -* @param type The desired type of I/O channel (DISPATCH_IO_STREAM -* or DISPATCH_IO_RANDOM). -* @param path The path to associate with the I/O channel. -* @param oflag The flags to pass to open(2) when opening the file at -* path. -* @param mode The mode to pass to open(2) when creating the file at -* path (i.e. with flag O_CREAT), zero otherwise. -* @param queue The dispatch queue to which the handler should be -* submitted. -* @param cleanup_handler The handler to enqueue when the system -* has closed the file at path. -* @param error An errno condition if control is relinquished -* because channel creation or opening of the -* specified file failed, zero otherwise. -* @result The newly created dispatch I/O channel or NULL if an error -* occurred. -*/ + * @function dispatch_io_create_with_path + * Create a dispatch I/O channel associated with a path name. The specified + * path, oflag and mode parameters will be passed to open(2) when the first I/O + * operation on the channel is ready to execute and the resulting file + * descriptor will remain open and under the control of the system until the + * channel is closed, an error occurs on the file descriptor or all references + * to the channel are released. At that time the file descriptor will be closed + * and the specified cleanup handler will be enqueued. + * + * @param type The desired type of I/O channel (DISPATCH_IO_STREAM + * or DISPATCH_IO_RANDOM). + * @param path The absolute path to associate with the I/O channel. + * @param oflag The flags to pass to open(2) when opening the file at + * path. + * @param mode The mode to pass to open(2) when creating the file at + * path (i.e. with flag O_CREAT), zero otherwise. + * @param queue The dispatch queue to which the handler should be + * submitted. + * @param cleanup_handler The handler to enqueue when the system + * has closed the file at path. + * @param error An errno condition if control is relinquished + * because channel creation or opening of the + * specified file failed, zero otherwise. + * @result The newly created dispatch I/O channel or NULL if an error + * occurred (invalid type or non-absolute path specified). + */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW @@ -293,7 +283,7 @@ dispatch_io_create_with_path(dispatch_io_type_t type, * @param error An errno condition if control is relinquished * because channel creation failed, zero otherwise. * @result The newly created dispatch I/O channel or NULL if an error - * occurred. + * occurred (invalid type specified). */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED @@ -304,6 +294,17 @@ dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_queue_t queue, void (^cleanup_handler)(int error)); +/*! + * @typedef dispatch_io_handler_t + * The prototype of I/O handler blocks for dispatch I/O operations. + * + * @param done A flag indicating whether the operation is complete. + * @param data The data object to be handled. + * @param error An errno condition for the operation. + */ +typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t data, + int error); + /*! * @function dispatch_io_read * Schedule a read operation for asynchronous execution on the specified I/O @@ -408,6 +409,7 @@ dispatch_io_write(dispatch_io_t channel, dispatch_data_t data, dispatch_queue_t queue, dispatch_io_handler_t io_handler); +#endif /* __BLOCKS__ */ /*! * @typedef dispatch_io_close_flags_t @@ -442,6 +444,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_io_close(dispatch_io_t channel, dispatch_io_close_flags_t flags); +#ifdef __BLOCKS__ /*! * @function dispatch_io_barrier * Schedule a barrier operation on the specified I/O channel; all previously @@ -460,13 +463,14 @@ dispatch_io_close(dispatch_io_t channel, dispatch_io_close_flags_t flags); * While the barrier block is running, it may safely operate on the channel's * underlying file descriptor with fsync(2), lseek(2) etc. (but not close(2)). * - * @param channel The dispatch I/O channel to close. - * @param barrier The flags for the close operation. + * @param channel The dispatch I/O channel to schedule the barrier on. + * @param barrier The barrier block. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier); +#endif /* __BLOCKS__ */ /*! * @function dispatch_io_get_descriptor @@ -580,8 +584,6 @@ dispatch_io_set_interval(dispatch_io_t channel, uint64_t interval, dispatch_io_interval_flags_t flags); -#endif /* __BLOCKS__ */ - __END_DECLS #endif /* __DISPATCH_IO__ */ diff --git a/dispatch/object.h b/dispatch/object.h index bc7257a..4ae0ab6 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -81,6 +81,9 @@ typedef union { struct dispatch_queue_attr_s *_dqa; struct dispatch_group_s *_dg; struct dispatch_source_s *_ds; + struct dispatch_mach_s *_dm; + struct dispatch_mach_msg_s *_dmsg; + struct dispatch_timer_aggregate_s *_dta; struct dispatch_source_attr_s *_dsa; struct dispatch_semaphore_s *_dsema; struct dispatch_data_s *_ddata; @@ -111,19 +114,22 @@ __BEGIN_DECLS * The log output destination can be configured via the LIBDISPATCH_LOG * environment variable, valid values are: YES, NO, syslog, stderr, file. * + * This function is deprecated and will be removed in a future release. + * Objective-C callers may use -debugDescription instead. + * * @param object * The object to introspect. * * @param message * The message to log above and beyond the introspection. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW __attribute__((__format__(printf,2,3))) void dispatch_debug(dispatch_object_t object, const char *message, ...); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW __attribute__((__format__(printf,2,0))) void diff --git a/dispatch/queue.h b/dispatch/queue.h index b8050f9..9090676 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2012 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -272,7 +272,7 @@ dispatch_sync_f(dispatch_queue_t queue, */ #ifdef __BLOCKS__ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_apply(size_t iterations, dispatch_queue_t queue, void (^block)(size_t)); @@ -305,7 +305,7 @@ dispatch_apply(size_t iterations, dispatch_queue_t queue, * The result of passing NULL in this parameter is undefined. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_apply_f(size_t iterations, dispatch_queue_t queue, void *context, @@ -335,10 +335,12 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t queue, * the two is not a valid way to test whether code is executing on the * main thread. * + * This function is deprecated and will be removed in a future release. + * * @result * Returns the current queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t dispatch_get_current_queue(void); @@ -483,22 +485,31 @@ DISPATCH_NOTHROW dispatch_queue_t dispatch_queue_create(const char *label, dispatch_queue_attr_t attr); +/*! + * @const DISPATCH_CURRENT_QUEUE_LABEL + * @discussion Constant to pass to the dispatch_queue_get_label() function to + * retrieve the label of the current queue. + */ +#define DISPATCH_CURRENT_QUEUE_LABEL NULL + /*! * @function dispatch_queue_get_label * * @abstract - * Returns the label of the queue that was specified when the - * queue was created. + * Returns the label of the given queue, as specified when the queue was + * created, or the empty string if a NULL label was specified. + * + * Passing DISPATCH_CURRENT_QUEUE_LABEL will return the label of the current + * queue. * * @param queue - * The result of passing NULL in this parameter is undefined. + * The queue to query, or DISPATCH_CURRENT_QUEUE_LABEL. * * @result - * The label of the queue. The result may be NULL. + * The label of the queue. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT -DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW const char * dispatch_queue_get_label(dispatch_queue_t queue); diff --git a/dispatch/semaphore.h b/dispatch/semaphore.h index 312a2c2..8f68407 100644 --- a/dispatch/semaphore.h +++ b/dispatch/semaphore.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -69,7 +69,7 @@ dispatch_semaphore_create(long value); * * @discussion * Decrement the counting semaphore. If the resulting value is less than zero, - * this function waits in FIFO order for a signal to occur before returning. + * this function waits for a signal to occur before returning. * * @param dsema * The semaphore. The result of passing NULL in this parameter is undefined. diff --git a/dispatch/source.h b/dispatch/source.h index e37ecec..ebbf8b9 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -30,7 +30,10 @@ #include #include #endif + +#if !TARGET_OS_WIN32 #include +#endif /*! * @header @@ -64,6 +67,16 @@ DISPATCH_DECL(dispatch_source); */ typedef const struct dispatch_source_type_s *dispatch_source_type_t; +#if !TARGET_OS_WIN32 +/*! @parseOnly */ +#define DISPATCH_SOURCE_TYPE_DECL(name) \ + DISPATCH_EXPORT const struct dispatch_source_type_s \ + _dispatch_source_type_##name +#else +#define DISPATCH_SOURCE_TYPE_DECL(name) \ + DISPATCH_EXPORT struct dispatch_source_type_s _dispatch_source_type_##name +#endif + /*! * @const DISPATCH_SOURCE_TYPE_DATA_ADD * @discussion A dispatch source that coalesces data obtained via calls to @@ -73,21 +86,18 @@ typedef const struct dispatch_source_type_s *dispatch_source_type_t; */ #define DISPATCH_SOURCE_TYPE_DATA_ADD (&_dispatch_source_type_data_add) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_data_add; +DISPATCH_SOURCE_TYPE_DECL(data_add); /*! * @const DISPATCH_SOURCE_TYPE_DATA_OR * @discussion A dispatch source that coalesces data obtained via calls to - * dispatch_source_merge_data(). A logical OR is used to coalesce the data. + * dispatch_source_merge_data(). A bitwise OR is used to coalesce the data. * The handle is unused (pass zero for now). - * The mask is used to perform a logical AND with the value passed to - * dispatch_source_merge_data(). + * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_DATA_OR (&_dispatch_source_type_data_or) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_data_or; +DISPATCH_SOURCE_TYPE_DECL(data_or); /*! * @const DISPATCH_SOURCE_TYPE_MACH_SEND @@ -98,8 +108,7 @@ const struct dispatch_source_type_s _dispatch_source_type_data_or; */ #define DISPATCH_SOURCE_TYPE_MACH_SEND (&_dispatch_source_type_mach_send) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_mach_send; +DISPATCH_SOURCE_TYPE_DECL(mach_send); /*! * @const DISPATCH_SOURCE_TYPE_MACH_RECV @@ -109,8 +118,20 @@ const struct dispatch_source_type_s _dispatch_source_type_mach_send; */ #define DISPATCH_SOURCE_TYPE_MACH_RECV (&_dispatch_source_type_mach_recv) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_mach_recv; +DISPATCH_SOURCE_TYPE_DECL(mach_recv); + +/*! + * @const DISPATCH_SOURCE_TYPE_MEMORYPRESSURE + * @discussion A dispatch source that monitors the system for changes in + * memory pressure condition. + * The handle is unused (pass zero for now). + * The mask is a mask of desired events from + * dispatch_source_memorypressure_flags_t. + */ +#define DISPATCH_SOURCE_TYPE_MEMORYPRESSURE \ + (&_dispatch_source_type_memorypressure) +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_NA) +DISPATCH_SOURCE_TYPE_DECL(memorypressure); /*! * @const DISPATCH_SOURCE_TYPE_PROC @@ -121,8 +142,7 @@ const struct dispatch_source_type_s _dispatch_source_type_mach_recv; */ #define DISPATCH_SOURCE_TYPE_PROC (&_dispatch_source_type_proc) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_proc; +DISPATCH_SOURCE_TYPE_DECL(proc); /*! * @const DISPATCH_SOURCE_TYPE_READ @@ -133,8 +153,7 @@ const struct dispatch_source_type_s _dispatch_source_type_proc; */ #define DISPATCH_SOURCE_TYPE_READ (&_dispatch_source_type_read) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_read; +DISPATCH_SOURCE_TYPE_DECL(read); /*! * @const DISPATCH_SOURCE_TYPE_SIGNAL @@ -144,20 +163,18 @@ const struct dispatch_source_type_s _dispatch_source_type_read; */ #define DISPATCH_SOURCE_TYPE_SIGNAL (&_dispatch_source_type_signal) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_signal; +DISPATCH_SOURCE_TYPE_DECL(signal); /*! * @const DISPATCH_SOURCE_TYPE_TIMER * @discussion A dispatch source that submits the event handler block based * on a timer. * The handle is unused (pass zero for now). - * The mask is unused (pass zero for now). + * The mask specifies which flags from dispatch_source_timer_flags_t to apply. */ #define DISPATCH_SOURCE_TYPE_TIMER (&_dispatch_source_type_timer) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_timer; +DISPATCH_SOURCE_TYPE_DECL(timer); /*! * @const DISPATCH_SOURCE_TYPE_VNODE @@ -168,8 +185,7 @@ const struct dispatch_source_type_s _dispatch_source_type_timer; */ #define DISPATCH_SOURCE_TYPE_VNODE (&_dispatch_source_type_vnode) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_vnode; +DISPATCH_SOURCE_TYPE_DECL(vnode); /*! * @const DISPATCH_SOURCE_TYPE_WRITE @@ -180,8 +196,7 @@ const struct dispatch_source_type_s _dispatch_source_type_vnode; */ #define DISPATCH_SOURCE_TYPE_WRITE (&_dispatch_source_type_write) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_write; +DISPATCH_SOURCE_TYPE_DECL(write); /*! * @typedef dispatch_source_mach_send_flags_t @@ -194,6 +209,36 @@ const struct dispatch_source_type_s _dispatch_source_type_write; typedef unsigned long dispatch_source_mach_send_flags_t; +/*! + * @typedef dispatch_source_memorypressure_flags_t + * Type of dispatch_source_memorypressure flags + * + * @constant DISPATCH_MEMORYPRESSURE_NORMAL + * The system memory pressure condition has returned to normal. + * + * @constant DISPATCH_MEMORYPRESSURE_WARN + * The system memory pressure condition has changed to warning. + * + * @constant DISPATCH_MEMORYPRESSURE_CRITICAL + * The system memory pressure condition has changed to critical. + * + * @discussion + * Elevated memory pressure is a system-wide condition that applications + * registered for this source should react to by changing their future memory + * use behavior, e.g. by reducing cache sizes of newly initiated operations + * until memory pressure returns back to normal. + * NOTE: applications should NOT traverse and discard existing caches for past + * operations when the system memory pressure enters an elevated state, as that + * is likely to trigger VM operations that will further aggravate system memory + * pressure. + */ + +#define DISPATCH_MEMORYPRESSURE_NORMAL 0x01 +#define DISPATCH_MEMORYPRESSURE_WARN 0x02 +#define DISPATCH_MEMORYPRESSURE_CRITICAL 0x04 + +typedef unsigned long dispatch_source_memorypressure_flags_t; + /*! * @typedef dispatch_source_proc_flags_t * Type of dispatch_source_proc flags @@ -254,6 +299,26 @@ typedef unsigned long dispatch_source_proc_flags_t; typedef unsigned long dispatch_source_vnode_flags_t; +/*! + * @typedef dispatch_source_timer_flags_t + * Type of dispatch_source_timer flags + * + * @constant DISPATCH_TIMER_STRICT + * Specifies that the system should make a best effort to strictly observe the + * leeway value specified for the timer via dispatch_source_set_timer(), even + * if that value is smaller than the default leeway value that would be applied + * to the timer otherwise. A minimal amount of leeway will be applied to the + * timer even if this flag is specified. + * + * CAUTION: Use of this flag may override power-saving techniques employed by + * the system and cause higher power consumption, so it must be used with care + * and only when absolutely necessary. + */ + +#define DISPATCH_TIMER_STRICT 0x1 + +typedef unsigned long dispatch_source_timer_flags_t; + __BEGIN_DECLS /*! @@ -461,6 +526,7 @@ dispatch_source_testcancel(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_DATA_OR: n/a * DISPATCH_SOURCE_TYPE_MACH_SEND: mach port (mach_port_t) * DISPATCH_SOURCE_TYPE_MACH_RECV: mach port (mach_port_t) + * DISPATCH_SOURCE_TYPE_MEMORYPRESSURE n/a * DISPATCH_SOURCE_TYPE_PROC: process identifier (pid_t) * DISPATCH_SOURCE_TYPE_READ: file descriptor (int) * DISPATCH_SOURCE_TYPE_SIGNAL: signal number (int) @@ -491,10 +557,11 @@ dispatch_source_get_handle(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_DATA_OR: n/a * DISPATCH_SOURCE_TYPE_MACH_SEND: dispatch_source_mach_send_flags_t * DISPATCH_SOURCE_TYPE_MACH_RECV: n/a + * DISPATCH_SOURCE_TYPE_MEMORYPRESSURE dispatch_source_memorypressure_flags_t * DISPATCH_SOURCE_TYPE_PROC: dispatch_source_proc_flags_t * DISPATCH_SOURCE_TYPE_READ: n/a * DISPATCH_SOURCE_TYPE_SIGNAL: n/a - * DISPATCH_SOURCE_TYPE_TIMER: n/a + * DISPATCH_SOURCE_TYPE_TIMER: dispatch_source_timer_flags_t * DISPATCH_SOURCE_TYPE_VNODE: dispatch_source_vnode_flags_t * DISPATCH_SOURCE_TYPE_WRITE: n/a */ @@ -526,6 +593,7 @@ dispatch_source_get_mask(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_DATA_OR: application defined data * DISPATCH_SOURCE_TYPE_MACH_SEND: dispatch_source_mach_send_flags_t * DISPATCH_SOURCE_TYPE_MACH_RECV: n/a + * DISPATCH_SOURCE_TYPE_MEMORYPRESSURE dispatch_source_memorypressure_flags_t * DISPATCH_SOURCE_TYPE_PROC: dispatch_source_proc_flags_t * DISPATCH_SOURCE_TYPE_READ: estimated bytes available to read * DISPATCH_SOURCE_TYPE_SIGNAL: number of signals delivered since @@ -569,30 +637,45 @@ dispatch_source_merge_data(dispatch_source_t source, unsigned long value); * Sets a start time, interval, and leeway value for a timer source. * * @discussion - * Calling this function has no effect if the timer source has already been - * canceled. Once this function returns, any pending timer data accumulated - * for the previous timer values has been cleared + * Once this function returns, any pending source data accumulated for the + * previous timer values has been cleared; the next fire of the timer will + * occur at 'start', and every 'interval' nanoseconds thereafter until the + * timer source is canceled. + * + * Any fire of the timer may be delayed by the system in order to improve power + * consumption and system performance. The upper limit to the allowable delay + * may be configured with the 'leeway' argument, the lower limit is under the + * control of the system. + * + * For the initial timer fire at 'start', the upper limit to the allowable + * delay is set to 'leeway' nanoseconds. For the subsequent timer fires at + * 'start' + N * 'interval', the upper limit is MIN('leeway','interval'/2). + * + * The lower limit to the allowable delay may vary with process state such as + * visibility of application UI. If the specified timer source was created with + * a mask of DISPATCH_TIMER_STRICT, the system will make a best effort to + * strictly observe the provided 'leeway' value even if it is smaller than the + * current lower limit. Note that a minimal amount of delay is to be expected + * even if this flag is specified. + * + * The 'start' argument also determines which clock will be used for the timer: + * If 'start' is DISPATCH_TIME_NOW or was created with dispatch_time(3), the + * timer is based on mach_absolute_time(). If 'start' was created with + * dispatch_walltime(3), the timer is based on gettimeofday(3). * - * The start time argument also determines which clock will be used for the - * timer. If the start time is DISPATCH_TIME_NOW or created with - * dispatch_time() then the timer is based on mach_absolute_time(). Otherwise, - * if the start time of the timer is created with dispatch_walltime() then the - * timer is based on gettimeofday(3). + * Calling this function has no effect if the timer source has already been + * canceled. * * @param start * The start time of the timer. See dispatch_time() and dispatch_walltime() * for more information. * * @param interval - * The nanosecond interval for the timer. + * The nanosecond interval for the timer. Use DISPATCH_TIME_FOREVER for a + * one-shot timer. * * @param leeway - * A hint given to the system by the application for the amount of leeway, in - * nanoseconds, that the system may defer the timer in order to align with other - * system activity for improved system performance or power consumption. (For - * example, an application might perform a periodic task every 5 minutes, with - * a leeway of up to 30 seconds.) Note that some latency is to be expected for - * all timers even when a leeway value of zero is specified. + * The nanosecond leeway for the timer. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index 59d706e..b465ba7 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -32,6 +32,13 @@ /* End PBXAggregateTarget section */ /* Begin PBXBuildFile section */ + 2BBF5A60154B64D8002B20F9 /* allocator_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */; }; + 2BBF5A61154B64D8002B20F9 /* allocator_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */; }; + 2BBF5A63154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + 2BBF5A64154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + 2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + 2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + 2BBF5A67154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; 5A0095A210F274B0000E2A31 /* io_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A0095A110F274B0000E2A31 /* io_internal.h */; }; 5A27262610F26F1900751FBC /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; 5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */; }; @@ -65,19 +72,63 @@ E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; E417A38412A472C4004D659D /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; E417A38512A472C5004D659D /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; + E420867016027AE500EEE210 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; + E420867116027AE500EEE210 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; + E420867216027AE500EEE210 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; + E420867316027AE500EEE210 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; + E421E5F91716ADA10090DC9B /* introspection.h in Headers */ = {isa = PBXBuildFile; fileRef = E421E5F81716ADA10090DC9B /* introspection.h */; settings = {ATTRIBUTES = (Public, ); }; }; E422A0D512A557B5005E5BDB /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = E422A0D412A557B5005E5BDB /* trace.h */; }; E422A0D612A557B5005E5BDB /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = E422A0D412A557B5005E5BDB /* trace.h */; }; E43570B9126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; E43570BA126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; - E4407FAE143CC984003A9E80 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = E4407FAD143CC984003A9E80 /* dispatch.h */; settings = {ATTRIBUTES = (Private, ); }; }; - E4407FAF143CC984003A9E80 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = E4407FAD143CC984003A9E80 /* dispatch.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43A710615783F7E0012D38D /* data_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C913AC0E143BD34800B78976 /* data_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E44EBE3E1251659900645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; E44EBE5412517EBE00645D88 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; E44EBE5512517EBE00645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; E44EBE5612517EBE00645D88 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; E44EBE5712517EBE00645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + E44F9DAB16543F94001DCD38 /* introspection_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44F9DA816543F79001DCD38 /* introspection_internal.h */; }; + E44F9DAC1654400D001DCD38 /* introspection_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44F9DA816543F79001DCD38 /* introspection_internal.h */; }; + E44F9DAD1654400E001DCD38 /* introspection_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44F9DA816543F79001DCD38 /* introspection_internal.h */; }; + E44F9DAE16544022001DCD38 /* allocator_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */; }; + E44F9DAF16544026001DCD38 /* internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8F0E8361E600161930 /* internal.h */; }; + E44F9DB01654402B001DCD38 /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; }; + E44F9DB11654402E001DCD38 /* io_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A0095A110F274B0000E2A31 /* io_internal.h */; }; + E44F9DB216544032001DCD38 /* object_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 965ECC200F3EAB71004DDD89 /* object_internal.h */; }; + E44F9DB316544037001DCD38 /* queue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D950F3EA2170041FF5D /* queue_internal.h */; }; + E44F9DB41654403B001DCD38 /* semaphore_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */; }; + E44F9DB51654403F001DCD38 /* source_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC0B34780FA2851C0080FFA0 /* source_internal.h */; }; + E44F9DB616544043001DCD38 /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = E422A0D412A557B5005E5BDB /* trace.h */; }; + E44F9DB71654404F001DCD38 /* shims.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D830F3EA1020041FF5D /* shims.h */; }; + E44F9DB816544053001DCD38 /* atomic.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D820F3EA1020041FF5D /* atomic.h */; }; + E44F9DB916544056001DCD38 /* getprogname.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743913A8911B0095BDF1 /* getprogname.h */; }; + E44F9DBA1654405B001DCD38 /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; + E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A2109923C7003403D5 /* perfmon.h */; }; + E44F9DBE1654405B001DCD38 /* tsd.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A4109923C7003403D5 /* tsd.h */; }; + E44F9DBF165440EF001DCD38 /* config.h in Headers */ = {isa = PBXBuildFile; fileRef = FC9C70E7105EC9620074F9CA /* config.h */; }; + E44F9DC016544115001DCD38 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; + E44F9DC116544115001DCD38 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; }; E454569314746F1B00106147 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; settings = {ATTRIBUTES = (); }; }; E454569414746F1B00106147 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; settings = {ATTRIBUTES = (); }; }; + E4630251176162D200E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; + E4630252176162D300E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; + E4630253176162D400E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; + E46DBC4014EE10C80001F9F6 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + E46DBC4114EE10C80001F9F6 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + E46DBC4214EE10C80001F9F6 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + E46DBC4314EE10C80001F9F6 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + E46DBC4514EE10C80001F9F6 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + E46DBC4614EE10C80001F9F6 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + E46DBC4714EE10C80001F9F6 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + E46DBC4814EE10C80001F9F6 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + E46DBC4914EE10C80001F9F6 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + E46DBC4A14EE10C80001F9F6 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + E46DBC4B14EE10C80001F9F6 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + E46DBC4C14EE10C80001F9F6 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + E46DBC4D14EE10C80001F9F6 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + E48AF55A16E70FD9004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E48AF55B16E72D44004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E49F2423125D3C960057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F2424125D3C970057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F2499125D48D80057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; @@ -121,10 +172,31 @@ E49F24D2125D57FA0057C971 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; E49F24D3125D57FA0057C971 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; E49F24D4125D57FA0057C971 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + E4A2C9C5176019820000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; }; + E4A2C9C6176019830000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; }; + E4A2C9C7176019840000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; }; + E4B515BD164B2DA300E003AF /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; + E4B515BE164B2DA300E003AF /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + E4B515BF164B2DA300E003AF /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + E4B515C0164B2DA300E003AF /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + E4B515C1164B2DA300E003AF /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + E4B515C2164B2DA300E003AF /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + E4B515C3164B2DA300E003AF /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + E4B515C4164B2DA300E003AF /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + E4B515C5164B2DA300E003AF /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + E4B515C6164B2DA300E003AF /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + E4B515C7164B2DA300E003AF /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + E4B515C8164B2DA300E003AF /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + E4B515C9164B2DA300E003AF /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + E4B515CA164B2DA300E003AF /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + E4B515CB164B2DA300E003AF /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + E4B515CC164B2DA300E003AF /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; + E4B515CD164B2DA300E003AF /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + E4B515CE164B2DA300E003AF /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; + E4B515D8164B2DFB00E003AF /* introspection_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4B515D7164B2DFB00E003AF /* introspection_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E4B515DD164B32E000E003AF /* introspection.c in Sources */ = {isa = PBXBuildFile; fileRef = E4B515DC164B32E000E003AF /* introspection.c */; }; E4BA743B13A8911B0095BDF1 /* getprogname.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743913A8911B0095BDF1 /* getprogname.h */; }; E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743913A8911B0095BDF1 /* getprogname.h */; }; - E4BA743F13A8911B0095BDF1 /* malloc_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743A13A8911B0095BDF1 /* malloc_zone.h */; }; - E4BA744013A8911B0095BDF1 /* malloc_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743A13A8911B0095BDF1 /* malloc_zone.h */; }; E4C1ED6F1263E714000D3C8B /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; }; E4C1ED701263E714000D3C8B /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; }; E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; @@ -149,6 +221,8 @@ E4EC122112514715000DDBD1 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; E4EC122312514715000DDBD1 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; E4EC122412514715000DDBD1 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + E4ECBAA515253C25002C313C /* mach_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4ECBAA415253C25002C313C /* mach_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E4ECBAA615253D17002C313C /* mach_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4ECBAA415253C25002C313C /* mach_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E4FC3264145F46C9002FBDDB /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; E4FC3265145F46C9002FBDDB /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; E4FC3266145F46C9002FBDDB /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; @@ -191,6 +265,13 @@ remoteGlobalIDString = D2AAC045055464E500DB518D; remoteInfo = libdispatch; }; + E437F0D514F7441F00F0B997 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = E46DBC1A14EE10C80001F9F6; + remoteInfo = libdispatch_static; + }; E47D6ECA125FEB9D0070D91C /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -205,9 +286,18 @@ remoteGlobalIDString = E4EC121612514715000DDBD1; remoteInfo = "libdispatch mp resolved"; }; + E4B515DA164B317700E003AF /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = E4B51595164B2DA300E003AF; + remoteInfo = "libdispatch introspection"; + }; /* End PBXContainerItemProxy section */ /* Begin PBXFileReference section */ + 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = allocator_internal.h; sourceTree = ""; }; + 2BBF5A62154B64F5002B20F9 /* allocator.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = allocator.c; sourceTree = ""; }; 5A0095A110F274B0000E2A31 /* io_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io_internal.h; sourceTree = ""; }; 5A27262510F26F1900751FBC /* io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = io.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore_internal.h; sourceTree = ""; }; @@ -222,11 +312,11 @@ 72CC942F0ECCD8750031B751 /* base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = base.h; sourceTree = ""; }; 96032E4A0F5CC8C700241C5F /* time.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = time.c; sourceTree = ""; }; 96032E4C0F5CC8D100241C5F /* time.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; - 960F0E7D0F3FB232000D88BF /* dispatch_apply.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_apply.3; sourceTree = ""; }; - 960F0E7E0F3FB232000D88BF /* dispatch_once.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_once.3; sourceTree = ""; }; + 960F0E7D0F3FB232000D88BF /* dispatch_apply.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_apply.3; sourceTree = ""; }; + 960F0E7E0F3FB232000D88BF /* dispatch_once.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_once.3; sourceTree = ""; }; 961B99350F3E83980006BC96 /* benchmark.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = benchmark.h; sourceTree = ""; }; 961B994F0F3E85C30006BC96 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object.h; sourceTree = ""; }; - 963FDDE50F3FB6BD00BF2D00 /* dispatch_semaphore_create.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_semaphore_create.3; sourceTree = ""; }; + 963FDDE50F3FB6BD00BF2D00 /* dispatch_semaphore_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_semaphore_create.3; sourceTree = ""; }; 965CD6340F3E806200D4E28D /* benchmark.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = benchmark.c; sourceTree = ""; }; 965ECC200F3EAB71004DDD89 /* object_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_internal.h; sourceTree = ""; }; 9661E56A0F3E7DDF00749F3E /* object.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = object.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; @@ -239,40 +329,54 @@ 96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue_private.h; sourceTree = ""; }; 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = ""; }; 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; - C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = ""; }; + C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = ""; tabWidth = 8; }; C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = ddt.xcodeproj; path = tools/ddt/ddt.xcodeproj; sourceTree = ""; }; C9C5F80D143C1771006DC718 /* transform.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = transform.c; sourceTree = ""; }; D2AAC046055464E500DB518D /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-resolved.xcconfig"; sourceTree = ""; }; E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-resolver.xcconfig"; sourceTree = ""; }; E4128ED513BA9A1700ABB2CB /* hw_config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hw_config.h; sourceTree = ""; }; + E416F53F175D04B800B23711 /* libdispatch_macosx.aliases */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch_macosx.aliases; sourceTree = ""; }; + E420866F16027AE500EEE210 /* data.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = data.m; sourceTree = ""; }; + E421E5F81716ADA10090DC9B /* introspection.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection.h; sourceTree = ""; }; + E421E5FB1716B8730090DC9B /* install-dtrace.sh */ = {isa = PBXFileReference; lastKnownFileType = text.script.sh; path = "install-dtrace.sh"; sourceTree = ""; }; + E421E5FD1716BEA70090DC9B /* libdispatch.interposable */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch.interposable; sourceTree = ""; }; E422A0D412A557B5005E5BDB /* trace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = trace.h; sourceTree = ""; }; E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.aliases; sourceTree = ""; }; E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.unexport; sourceTree = ""; }; - E43570B8126E93380097AB9F /* provider.d */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.dtrace; path = provider.d; sourceTree = ""; }; + E43570B8126E93380097AB9F /* provider.d */ = {isa = PBXFileReference; explicitFileType = sourcecode.dtrace; fileEncoding = 4; path = provider.d; sourceTree = ""; }; E43D93F11097917E004F6A62 /* libdispatch.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libdispatch.xcconfig; sourceTree = ""; }; - E4407FAD143CC984003A9E80 /* dispatch.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dispatch.h; sourceTree = ""; }; E448727914C6215D00BB45C2 /* libdispatch.order */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.order; sourceTree = ""; }; E44EBE331251654000645D88 /* resolver.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolver.h; sourceTree = ""; }; E44EBE371251656400645D88 /* resolver.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resolver.c; sourceTree = ""; }; E44EBE3B1251659900645D88 /* init.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = init.c; sourceTree = ""; }; + E44F9DA816543F79001DCD38 /* introspection_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_internal.h; sourceTree = ""; }; E454569214746F1B00106147 /* object_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_private.h; sourceTree = ""; }; + E463024F1761603C00E11F4C /* atomic_sfb.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_sfb.h; sourceTree = ""; }; + E46DBC5714EE10C80001F9F6 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; + E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-static.xcconfig"; sourceTree = ""; }; E47D6BB5125F0F800070D91C /* resolved.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolved.h; sourceTree = ""; }; E482F1CD12DBAB590030614D /* postprocess-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-headers.sh"; sourceTree = ""; }; + E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = io_private.h; path = private/io_private.h; sourceTree = SOURCE_ROOT; tabWidth = 8; }; E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E49F251D125D630A0057C971 /* install-manpages.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-manpages.sh"; sourceTree = ""; }; E49F251E125D631D0057C971 /* mig-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "mig-headers.sh"; sourceTree = ""; }; - E4BA743513A88FE10095BDF1 /* dispatch_data_create.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = dispatch_data_create.3; sourceTree = ""; }; - E4BA743613A88FF30095BDF1 /* dispatch_io_create.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = dispatch_io_create.3; sourceTree = ""; }; - E4BA743713A88FF30095BDF1 /* dispatch_io_read.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = dispatch_io_read.3; sourceTree = ""; }; - E4BA743813A8900B0095BDF1 /* dispatch_read.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = dispatch_read.3; sourceTree = ""; }; + E4A2C9C4176019760000F809 /* atomic_llsc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_llsc.h; sourceTree = ""; }; + E4B515D6164B2DA300E003AF /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; + E4B515D7164B2DFB00E003AF /* introspection_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_private.h; sourceTree = ""; }; + E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-introspection.xcconfig"; sourceTree = ""; }; + E4B515DC164B32E000E003AF /* introspection.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = introspection.c; sourceTree = ""; }; + E4BA743513A88FE10095BDF1 /* dispatch_data_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; path = dispatch_data_create.3; sourceTree = ""; }; + E4BA743613A88FF30095BDF1 /* dispatch_io_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; path = dispatch_io_create.3; sourceTree = ""; }; + E4BA743713A88FF30095BDF1 /* dispatch_io_read.3 */ = {isa = PBXFileReference; explicitFileType = text.man; path = dispatch_io_read.3; sourceTree = ""; }; + E4BA743813A8900B0095BDF1 /* dispatch_read.3 */ = {isa = PBXFileReference; explicitFileType = text.man; path = dispatch_read.3; sourceTree = ""; }; E4BA743913A8911B0095BDF1 /* getprogname.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = getprogname.h; sourceTree = ""; }; - E4BA743A13A8911B0095BDF1 /* malloc_zone.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = malloc_zone.h; sourceTree = ""; }; E4C1ED6E1263E714000D3C8B /* data_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_internal.h; sourceTree = ""; }; E4EB4A2614C35ECE00AA0FA9 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object.h; sourceTree = ""; }; E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-headers.sh"; sourceTree = ""; }; E4EC11C312514302000DDBD1 /* libdispatch_up.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_up.a; sourceTree = BUILT_PRODUCTS_DIR; }; E4EC122D12514715000DDBD1 /* libdispatch_mp.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_mp.a; sourceTree = BUILT_PRODUCTS_DIR; }; + E4ECBAA415253C25002C313C /* mach_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mach_private.h; sourceTree = ""; }; E4FC3263145F46C9002FBDDB /* object.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = object.m; sourceTree = ""; }; FC0B34780FA2851C0080FFA0 /* source_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = source_internal.h; sourceTree = ""; }; FC1832A2109923C7003403D5 /* perfmon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = perfmon.h; sourceTree = ""; }; @@ -280,11 +384,11 @@ FC1832A4109923C7003403D5 /* tsd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tsd.h; sourceTree = ""; }; FC36279C0E933ED80054F1A3 /* dispatch_queue_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_queue_create.3; sourceTree = ""; }; FC5C9C1D0EADABE3006E462D /* group.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = group.h; sourceTree = ""; }; - FC678DE80F97E0C300AB5993 /* dispatch_after.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_after.3; sourceTree = ""; }; - FC678DE90F97E0C300AB5993 /* dispatch_api.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_api.3; sourceTree = ""; }; - FC678DEA0F97E0C300AB5993 /* dispatch_async.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_async.3; sourceTree = ""; }; - FC678DEB0F97E0C300AB5993 /* dispatch_group_create.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_group_create.3; sourceTree = ""; }; - FC678DEC0F97E0C300AB5993 /* dispatch_time.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_time.3; sourceTree = ""; }; + FC678DE80F97E0C300AB5993 /* dispatch_after.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_after.3; sourceTree = ""; }; + FC678DE90F97E0C300AB5993 /* dispatch_api.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_api.3; sourceTree = ""; }; + FC678DEA0F97E0C300AB5993 /* dispatch_async.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_async.3; sourceTree = ""; }; + FC678DEB0F97E0C300AB5993 /* dispatch_group_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_group_create.3; sourceTree = ""; }; + FC678DEC0F97E0C300AB5993 /* dispatch_time.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_time.3; sourceTree = ""; }; FC7BED8A0E8361E600161930 /* queue.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = queue.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; FC7BED8B0E8361E600161930 /* queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue.h; sourceTree = ""; }; FC7BED8D0E8361E600161930 /* source.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = source.h; sourceTree = ""; }; @@ -335,10 +439,13 @@ 08FB7795FE84155DC02AAC07 /* Source */ = { isa = PBXGroup; children = ( + 2BBF5A62154B64F5002B20F9 /* allocator.c */, 9676A0E00F3E755D00713ADB /* apply.c */, 965CD6340F3E806200D4E28D /* benchmark.c */, 5AAB45BF10D30B79004407EA /* data.c */, + E420866F16027AE500EEE210 /* data.m */, E44EBE3B1251659900645D88 /* init.c */, + E4B515DC164B32E000E003AF /* introspection.c */, 5A27262510F26F1900751FBC /* io.c */, 9661E56A0F3E7DDF00749F3E /* object.c */, E4FC3263145F46C9002FBDDB /* object.m */, @@ -362,6 +469,8 @@ E4EC11C312514302000DDBD1 /* libdispatch_up.a */, E4EC122D12514715000DDBD1 /* libdispatch_mp.a */, E49F24DF125D57FA0057C971 /* libdispatch.dylib */, + E46DBC5714EE10C80001F9F6 /* libdispatch.a */, + E4B515D6164B2DA300E003AF /* libdispatch.dylib */, ); name = Products; sourceTree = ""; @@ -405,9 +514,13 @@ E43D93F11097917E004F6A62 /* libdispatch.xcconfig */, E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */, E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */, + E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */, + E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */, E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */, + E416F53F175D04B800B23711 /* libdispatch_macosx.aliases */, E448727914C6215D00BB45C2 /* libdispatch.order */, E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */, + E421E5FD1716BEA70090DC9B /* libdispatch.interposable */, ); path = xcodeconfig; sourceTree = ""; @@ -439,6 +552,7 @@ children = ( E49F251D125D630A0057C971 /* install-manpages.sh */, E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */, + E421E5FB1716B8730090DC9B /* install-dtrace.sh */, E49F251E125D631D0057C971 /* mig-headers.sh */, E482F1CD12DBAB590030614D /* postprocess-headers.sh */, ); @@ -467,9 +581,10 @@ isa = PBXGroup; children = ( 96929D820F3EA1020041FF5D /* atomic.h */, + E4A2C9C4176019760000F809 /* atomic_llsc.h */, + E463024F1761603C00E11F4C /* atomic_sfb.h */, E4BA743913A8911B0095BDF1 /* getprogname.h */, E4128ED513BA9A1700ABB2CB /* hw_config.h */, - E4BA743A13A8911B0095BDF1 /* malloc_zone.h */, FC1832A2109923C7003403D5 /* perfmon.h */, FC1832A3109923C7003403D5 /* time.h */, FC1832A4109923C7003403D5 /* tsd.h */, @@ -491,6 +606,7 @@ 721F5C5C0F15520500FF03A6 /* semaphore.h */, FC7BED8D0E8361E600161930 /* source.h */, 96032E4C0F5CC8D100241C5F /* time.h */, + E421E5F81716ADA10090DC9B /* introspection.h */, ); name = "Public Headers"; path = dispatch; @@ -499,12 +615,14 @@ FC7BEDAF0E83626100161930 /* Private Headers */ = { isa = PBXGroup; children = ( - E4407FAD143CC984003A9E80 /* dispatch.h */, FC7BED930E8361E600161930 /* private.h */, + C913AC0E143BD34800B78976 /* data_private.h */, + E48AF55916E70FD9004105FF /* io_private.h */, 96BC39BC0F3EBAB100C59689 /* queue_private.h */, FCEF047F0F5661960067401F /* source_private.h */, + E4ECBAA415253C25002C313C /* mach_private.h */, 961B99350F3E83980006BC96 /* benchmark.h */, - C913AC0E143BD34800B78976 /* data_private.h */, + E4B515D7164B2DFB00E003AF /* introspection_private.h */, ); name = "Private Headers"; path = private; @@ -513,6 +631,7 @@ FC7BEDB60E8363DC00161930 /* Project Headers */ = { isa = PBXGroup; children = ( + 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */, FC7BED8F0E8361E600161930 /* internal.h */, E4C1ED6E1263E714000D3C8B /* data_internal.h */, 5A0095A110F274B0000E2A31 /* io_internal.h */, @@ -521,6 +640,7 @@ 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */, FC0B34780FA2851C0080FFA0 /* source_internal.h */, E422A0D412A557B5005E5BDB /* trace.h */, + E44F9DA816543F79001DCD38 /* introspection_internal.h */, 96929D830F3EA1020041FF5D /* shims.h */, FC1832A0109923B3003403D5 /* shims */, ); @@ -544,9 +664,11 @@ FC5C9C1E0EADABE3006E462D /* group.h in Headers */, 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */, 5AAB45C410D30CC7004407EA /* io.h in Headers */, + E4630253176162D400E11F4C /* atomic_sfb.h in Headers */, 5AAB45C610D30D0C004407EA /* data.h in Headers */, 96032E4D0F5CC8D100241C5F /* time.h in Headers */, FC7BEDA20E8361E600161930 /* private.h in Headers */, + E4A2C9C7176019840000F809 /* atomic_llsc.h in Headers */, C913AC0F143BD34800B78976 /* data_private.h in Headers */, 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */, FCEF04800F5661960067401F /* source_private.h in Headers */, @@ -566,11 +688,13 @@ FC9C70E8105EC9620074F9CA /* config.h in Headers */, E422A0D512A557B5005E5BDB /* trace.h in Headers */, E4BA743B13A8911B0095BDF1 /* getprogname.h in Headers */, - E4BA743F13A8911B0095BDF1 /* malloc_zone.h in Headers */, E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */, - E4407FAE143CC984003A9E80 /* dispatch.h in Headers */, E454569314746F1B00106147 /* object_private.h in Headers */, E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */, + E48AF55A16E70FD9004105FF /* io_private.h in Headers */, + E4ECBAA515253C25002C313C /* mach_private.h in Headers */, + 2BBF5A60154B64D8002B20F9 /* allocator_internal.h in Headers */, + E44F9DAC1654400D001DCD38 /* introspection_internal.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -587,9 +711,11 @@ E49F24B1125D57FA0057C971 /* group.h in Headers */, E49F24B2125D57FA0057C971 /* once.h in Headers */, E49F24B3125D57FA0057C971 /* io.h in Headers */, + E4630252176162D300E11F4C /* atomic_sfb.h in Headers */, E49F24B4125D57FA0057C971 /* data.h in Headers */, E49F24B5125D57FA0057C971 /* time.h in Headers */, E49F24B6125D57FA0057C971 /* private.h in Headers */, + E4A2C9C6176019830000F809 /* atomic_llsc.h in Headers */, E49F24B7125D57FA0057C971 /* queue_private.h in Headers */, E49F24B8125D57FA0057C971 /* source_private.h in Headers */, E49F24B9125D57FA0057C971 /* benchmark.h in Headers */, @@ -608,11 +734,44 @@ E49F24C6125D57FA0057C971 /* config.h in Headers */, E422A0D612A557B5005E5BDB /* trace.h in Headers */, E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */, - E4BA744013A8911B0095BDF1 /* malloc_zone.h in Headers */, E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */, - E4407FAF143CC984003A9E80 /* dispatch.h in Headers */, E454569414746F1B00106147 /* object_private.h in Headers */, E4EB4A2814C35ECE00AA0FA9 /* object.h in Headers */, + E4ECBAA615253D17002C313C /* mach_private.h in Headers */, + E48AF55B16E72D44004105FF /* io_private.h in Headers */, + 2BBF5A61154B64D8002B20F9 /* allocator_internal.h in Headers */, + E43A710615783F7E0012D38D /* data_private.h in Headers */, + E44F9DAD1654400E001DCD38 /* introspection_internal.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + E4B51596164B2DA300E003AF /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + E4B515D8164B2DFB00E003AF /* introspection_private.h in Headers */, + E44F9DAF16544026001DCD38 /* internal.h in Headers */, + E421E5F91716ADA10090DC9B /* introspection.h in Headers */, + E44F9DB216544032001DCD38 /* object_internal.h in Headers */, + E44F9DB316544037001DCD38 /* queue_internal.h in Headers */, + E44F9DB51654403F001DCD38 /* source_internal.h in Headers */, + E44F9DB41654403B001DCD38 /* semaphore_internal.h in Headers */, + E44F9DB01654402B001DCD38 /* data_internal.h in Headers */, + E44F9DB11654402E001DCD38 /* io_internal.h in Headers */, + E4630251176162D200E11F4C /* atomic_sfb.h in Headers */, + E44F9DBE1654405B001DCD38 /* tsd.h in Headers */, + E44F9DB816544053001DCD38 /* atomic.h in Headers */, + E44F9DB71654404F001DCD38 /* shims.h in Headers */, + E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */, + E44F9DBF165440EF001DCD38 /* config.h in Headers */, + E4A2C9C5176019820000F809 /* atomic_llsc.h in Headers */, + E44F9DB616544043001DCD38 /* trace.h in Headers */, + E44F9DB916544056001DCD38 /* getprogname.h in Headers */, + E44F9DBA1654405B001DCD38 /* hw_config.h in Headers */, + E44F9DC116544115001DCD38 /* object_private.h in Headers */, + E44F9DC016544115001DCD38 /* object.h in Headers */, + E44F9DAE16544022001DCD38 /* allocator_internal.h in Headers */, + E44F9DAB16543F94001DCD38 /* introspection_internal.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -635,12 +794,29 @@ dependencies = ( E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */, E47D6ECD125FEBA10070D91C /* PBXTargetDependency */, + E4B515DB164B317700E003AF /* PBXTargetDependency */, + E437F0D614F7441F00F0B997 /* PBXTargetDependency */, ); name = libdispatch; productName = libdispatch; productReference = D2AAC046055464E500DB518D /* libdispatch.dylib */; productType = "com.apple.product-type.library.dynamic"; }; + E46DBC1A14EE10C80001F9F6 /* libdispatch static */ = { + isa = PBXNativeTarget; + buildConfigurationList = E46DBC5414EE10C80001F9F6 /* Build configuration list for PBXNativeTarget "libdispatch static" */; + buildPhases = ( + E46DBC3E14EE10C80001F9F6 /* Sources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libdispatch static"; + productName = libdispatch; + productReference = E46DBC5714EE10C80001F9F6 /* libdispatch.a */; + productType = "com.apple.product-type.library.static"; + }; E49F24A9125D57FA0057C971 /* libdispatch no resolver */ = { isa = PBXNativeTarget; buildConfigurationList = E49F24D8125D57FA0057C971 /* Build configuration list for PBXNativeTarget "libdispatch no resolver" */; @@ -661,6 +837,23 @@ productReference = E49F24DF125D57FA0057C971 /* libdispatch.dylib */; productType = "com.apple.product-type.library.dynamic"; }; + E4B51595164B2DA300E003AF /* libdispatch introspection */ = { + isa = PBXNativeTarget; + buildConfigurationList = E4B515D3164B2DA300E003AF /* Build configuration list for PBXNativeTarget "libdispatch introspection" */; + buildPhases = ( + E4B51596164B2DA300E003AF /* Headers */, + E4B515BC164B2DA300E003AF /* Sources */, + E421E5FC1716B8E10090DC9B /* Install DTrace Header */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libdispatch introspection"; + productName = libdispatch; + productReference = E4B515D6164B2DA300E003AF /* libdispatch.dylib */; + productType = "com.apple.product-type.library.dynamic"; + }; E4EC118F12514302000DDBD1 /* libdispatch up resolved */ = { isa = PBXNativeTarget; buildConfigurationList = E4EC11BC12514302000DDBD1 /* Build configuration list for PBXNativeTarget "libdispatch up resolved" */; @@ -702,7 +895,7 @@ isa = PBXProject; attributes = { BuildIndependentTargetsInParallel = YES; - LastUpgradeCheck = 0420; + LastUpgradeCheck = 0500; }; buildConfigurationList = 1DEB91EF08733DB70010E9CD /* Build configuration list for PBXProject "libdispatch" */; compatibilityVersion = "Xcode 3.2"; @@ -726,8 +919,10 @@ targets = ( D2AAC045055464E500DB518D /* libdispatch */, E49F24A9125D57FA0057C971 /* libdispatch no resolver */, - E4EC118F12514302000DDBD1 /* libdispatch up resolved */, E4EC121612514715000DDBD1 /* libdispatch mp resolved */, + E4EC118F12514302000DDBD1 /* libdispatch up resolved */, + E4B51595164B2DA300E003AF /* libdispatch introspection */, + E46DBC1A14EE10C80001F9F6 /* libdispatch static */, 3F3C9326128E637B0042B1F7 /* libdispatch_Sim */, C927F35A10FD7F0600C5AB8B /* libdispatch_tools */, ); @@ -777,6 +972,24 @@ shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; showEnvVarsInLog = 0; }; + E421E5FC1716B8E10090DC9B /* Install DTrace Header */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/xcodescripts/install-dtrace.sh", + "$(SRCROOT)/src/provider.d", + ); + name = "Install DTrace Header"; + outputPaths = ( + "$(CONFIGURATION_BUILD_DIR)/$(PUBLIC_HEADERS_FOLDER_PATH)/introspection.d", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + showEnvVarsInLog = 0; + }; E482F1C512DBAA110030614D /* Postprocess Headers */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 8; @@ -942,6 +1155,30 @@ 5A27262610F26F1900751FBC /* io.c in Sources */, C9C5F80E143C1771006DC718 /* transform.c in Sources */, E4FC3264145F46C9002FBDDB /* object.m in Sources */, + 2BBF5A63154B64F5002B20F9 /* allocator.c in Sources */, + E420867016027AE500EEE210 /* data.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + E46DBC3E14EE10C80001F9F6 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + E46DBC4014EE10C80001F9F6 /* protocol.defs in Sources */, + E46DBC4114EE10C80001F9F6 /* resolver.c in Sources */, + E46DBC4214EE10C80001F9F6 /* init.c in Sources */, + E46DBC4314EE10C80001F9F6 /* queue.c in Sources */, + E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */, + E46DBC4514EE10C80001F9F6 /* once.c in Sources */, + E46DBC4614EE10C80001F9F6 /* apply.c in Sources */, + E46DBC4714EE10C80001F9F6 /* object.c in Sources */, + E46DBC4814EE10C80001F9F6 /* benchmark.c in Sources */, + E46DBC4914EE10C80001F9F6 /* source.c in Sources */, + E46DBC4A14EE10C80001F9F6 /* time.c in Sources */, + E46DBC4B14EE10C80001F9F6 /* data.c in Sources */, + E46DBC4C14EE10C80001F9F6 /* io.c in Sources */, + E46DBC4D14EE10C80001F9F6 /* transform.c in Sources */, + 2BBF5A67154B64F5002B20F9 /* allocator.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -965,6 +1202,34 @@ E49F24D4125D57FA0057C971 /* io.c in Sources */, C93D6165143E190E00EB9023 /* transform.c in Sources */, E4FC3265145F46C9002FBDDB /* object.m in Sources */, + 2BBF5A64154B64F5002B20F9 /* allocator.c in Sources */, + E420867116027AE500EEE210 /* data.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + E4B515BC164B2DA300E003AF /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + E4B515BD164B2DA300E003AF /* provider.d in Sources */, + E4B515BE164B2DA300E003AF /* protocol.defs in Sources */, + E4B515BF164B2DA300E003AF /* resolver.c in Sources */, + E4B515C0164B2DA300E003AF /* init.c in Sources */, + E4B515C1164B2DA300E003AF /* queue.c in Sources */, + E4B515C2164B2DA300E003AF /* semaphore.c in Sources */, + E4B515C3164B2DA300E003AF /* once.c in Sources */, + E4B515C4164B2DA300E003AF /* apply.c in Sources */, + E4B515C5164B2DA300E003AF /* object.c in Sources */, + E4B515C6164B2DA300E003AF /* benchmark.c in Sources */, + E4B515C7164B2DA300E003AF /* source.c in Sources */, + E4B515C8164B2DA300E003AF /* time.c in Sources */, + E4B515C9164B2DA300E003AF /* data.c in Sources */, + E4B515CA164B2DA300E003AF /* io.c in Sources */, + E4B515CB164B2DA300E003AF /* transform.c in Sources */, + E4B515CC164B2DA300E003AF /* object.m in Sources */, + E4B515CD164B2DA300E003AF /* allocator.c in Sources */, + E4B515CE164B2DA300E003AF /* data.m in Sources */, + E4B515DD164B32E000E003AF /* introspection.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -988,6 +1253,8 @@ E4EC11B812514302000DDBD1 /* io.c in Sources */, C93D6166143E190F00EB9023 /* transform.c in Sources */, E4FC3266145F46C9002FBDDB /* object.m in Sources */, + 2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */, + E420867316027AE500EEE210 /* data.m in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1011,6 +1278,8 @@ E4EC122412514715000DDBD1 /* io.c in Sources */, C93D6167143E190F00EB9023 /* transform.c in Sources */, E4FC3267145F46C9002FBDDB /* object.m in Sources */, + 2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */, + E420867216027AE500EEE210 /* data.m in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1027,6 +1296,11 @@ target = D2AAC045055464E500DB518D /* libdispatch */; targetProxy = E4128E4913B94BCE00ABB2CB /* PBXContainerItemProxy */; }; + E437F0D614F7441F00F0B997 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = E46DBC1A14EE10C80001F9F6 /* libdispatch static */; + targetProxy = E437F0D514F7441F00F0B997 /* PBXContainerItemProxy */; + }; E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = E4EC118F12514302000DDBD1 /* libdispatch up resolved */; @@ -1037,6 +1311,11 @@ target = E4EC121612514715000DDBD1 /* libdispatch mp resolved */; targetProxy = E47D6ECC125FEBA10070D91C /* PBXContainerItemProxy */; }; + E4B515DB164B317700E003AF /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = E4B51595164B2DA300E003AF /* libdispatch introspection */; + targetProxy = E4B515DA164B317700E003AF /* PBXContainerItemProxy */; + }; /* End PBXTargetDependency section */ /* Begin XCBuildConfiguration section */ @@ -1078,6 +1357,20 @@ }; name = Debug; }; + E46DBC5514EE10C80001F9F6 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */; + buildSettings = { + }; + name = Release; + }; + E46DBC5614EE10C80001F9F6 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */; + buildSettings = { + }; + name = Debug; + }; E49F24D9125D57FA0057C971 /* Release */ = { isa = XCBuildConfiguration; buildSettings = { @@ -1090,6 +1383,20 @@ }; name = Debug; }; + E4B515D4164B2DA300E003AF /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */; + buildSettings = { + }; + name = Release; + }; + E4B515D5164B2DA300E003AF /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */; + buildSettings = { + }; + name = Debug; + }; E4EB382D1089033000C33AD4 /* Debug */ = { isa = XCBuildConfiguration; baseConfigurationReference = E43D93F11097917E004F6A62 /* libdispatch.xcconfig */; @@ -1177,6 +1484,15 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + E46DBC5414EE10C80001F9F6 /* Build configuration list for PBXNativeTarget "libdispatch static" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + E46DBC5514EE10C80001F9F6 /* Release */, + E46DBC5614EE10C80001F9F6 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; E49F24D8125D57FA0057C971 /* Build configuration list for PBXNativeTarget "libdispatch no resolver" */ = { isa = XCConfigurationList; buildConfigurations = ( @@ -1186,6 +1502,15 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + E4B515D3164B2DA300E003AF /* Build configuration list for PBXNativeTarget "libdispatch introspection" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + E4B515D4164B2DA300E003AF /* Release */, + E4B515D5164B2DA300E003AF /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; E4EC11BC12514302000DDBD1 /* Build configuration list for PBXNativeTarget "libdispatch up resolved" */ = { isa = XCConfigurationList; buildConfigurations = ( diff --git a/libdispatch.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/libdispatch.xcodeproj/project.xcworkspace/contents.xcworkspacedata deleted file mode 100644 index 23ad996..0000000 --- a/libdispatch.xcodeproj/project.xcworkspace/contents.xcworkspacedata +++ /dev/null @@ -1,6 +0,0 @@ - - - - - diff --git a/man/dispatch.3 b/man/dispatch.3 index d25e083..6e5cfed 100644 --- a/man/dispatch.3 +++ b/man/dispatch.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2012 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch 3 .Os Darwin diff --git a/man/dispatch_async.3 b/man/dispatch_async.3 index 9c09bb2..99c532d 100644 --- a/man/dispatch_async.3 +++ b/man/dispatch_async.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2012 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_async 3 .Os Darwin @@ -85,7 +85,7 @@ dispatch_async(my_queue, ^{ .Sh BACKGROUND CONCURRENCY .The .Fn dispatch_async -function may be used to execute trivial backgound tasks on a global concurrent +function may be used to execute trivial background tasks on a global concurrent queue. For example: .Bd -literal dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0), ^{ diff --git a/man/dispatch_data_create.3 b/man/dispatch_data_create.3 index b941b34..b3a216e 100644 --- a/man/dispatch_data_create.3 +++ b/man/dispatch_data_create.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2010-2012 Apple Inc. All rights reserved. .Dd December 1, 2010 .Dt dispatch_data_create 3 .Os Darwin diff --git a/man/dispatch_group_create.3 b/man/dispatch_group_create.3 index 4b8063c..d82391e 100644 --- a/man/dispatch_group_create.3 +++ b/man/dispatch_group_create.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2012 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_group_create 3 .Os Darwin diff --git a/man/dispatch_io_create.3 b/man/dispatch_io_create.3 index 7af5b6d..83e5514 100644 --- a/man/dispatch_io_create.3 +++ b/man/dispatch_io_create.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2010-2013 Apple Inc. All rights reserved. .Dd December 1, 2010 .Dt dispatch_io_create 3 .Os Darwin @@ -102,32 +102,40 @@ functions create a dispatch I/O channel of provided .Fa type from a file descriptor .Fa fd -or a pathname, respectively. They can be thought of as -analogous to the +or an absolute pathname, respectively. They can be thought of as analogous to +the .Xr fdopen 3 POSIX function and the .Xr fopen 3 -function in the standard C library. For a channel created from a -pathname, the provided +function in the standard C library. For a channel created from a pathname, the +provided .Fa path , .Fa oflag and .Fa mode parameters will be passed to .Xr open 2 -when the first I/O operation on the channel is ready to execute. The provided +when the first I/O operation on the channel is ready to execute. +.Pp +The provided .Fa cleanup_handler block will be submitted to the specified .Fa queue -when all I/O operations on the channel have completed and is is closed or +when all I/O operations on the channel have completed and it is closed or reaches the end of its lifecycle. If an error occurs during channel creation, the .Fa cleanup_handler block will be submitted immediately and passed an .Fa error -parameter with the POSIX error encountered. After creating a dispatch I/O -channel from a file descriptor, the application must take care not to modify -that file descriptor until the associated +parameter with the POSIX error encountered. If an invalid +.Fa type +or a non-absolute +.Fa path +argument is specified, these functions will return NULL and the +.Fa cleanup_handler +will not be invoked. After successfully creating a dispatch I/O channel from a +file descriptor, the application must take care not to modify that file +descriptor until the associated .Fa cleanup_handler is invoked, see .Sx "FILEDESCRIPTOR OWNERSHIP" diff --git a/man/dispatch_queue_create.3 b/man/dispatch_queue_create.3 index b657abf..0ca0648 100644 --- a/man/dispatch_queue_create.3 +++ b/man/dispatch_queue_create.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2012 Apple Inc. All rights reserved. .Dd May 1, 2008 .Dt dispatch_queue_create 3 .Os Darwin @@ -22,10 +22,6 @@ .Fa "dispatch_queue_t queue" .Fc .Ft dispatch_queue_t -.Fo dispatch_get_current_queue -.Fa void -.Fc -.Ft dispatch_queue_t .Fo dispatch_get_global_queue .Fa "long priority" .Fa "unsigned long flags" @@ -50,35 +46,53 @@ the framework. .Pp All blocks submitted to dispatch queues are dequeued in FIFO order. -By default, queues created with +Queues created with the +.Dv DISPATCH_QUEUE_SERIAL +attribute wait for the previously dequeued block to complete before dequeuing +the next block. A queue with this FIFO completion behavior is usually simply +described as a "serial queue." All memory writes performed by a block dispatched +to a serial queue are guaranteed to be visible to subsequent blocks dispatched +to the same queue. Queues are not bound to any specific thread of execution and +blocks submitted to independent queues may execute concurrently. +.Pp +Queues created with the +.Dv DISPATCH_QUEUE_CONCURRENT +attribute may execute dequeued blocks concurrently and support barrier blocks +submitted with the dispatch barrier API. +.Sh CREATION +Queues are created with the .Fn dispatch_queue_create -wait for the previously dequeued block to complete before dequeuing the next -block. This FIFO completion behavior is sometimes simply described as a "serial -queue." All memory writes performed by a block dispatched to a serial queue are -guaranteed to be visible to subsequent blocks dispatched to the same queue. -Queues are not bound to any specific thread of execution and blocks submitted -to independent queues may execute concurrently. Queues, like all dispatch -objects, are reference counted and newly created queues have a reference count -of one. +function. Queues, like all dispatch objects, are reference counted and newly +created queues have a reference count of one. .Pp The optional .Fa label argument is used to describe the purpose of the queue and is useful during -debugging and performance analysis. By convention, clients should pass a -reverse DNS style label. -If a label is provided, it is copied. If a label is not provided, then -.Fn dispatch_queue_get_label -returns an empty C string. -For example: +debugging and performance analysis. If a label is provided, it is copied. +By convention, clients should pass a reverse DNS style label. For example: .Pp -.Bd -literal +.Bd -literal -offset indent my_queue = dispatch_queue_create("com.example.subsystem.taskXYZ", NULL); .Ed .Pp The .Fa attr -argument is reserved for future use and must be NULL. +argument specifies the type of queue to create and must be either +.Dv DISPATCH_QUEUE_SERIAL +or +.Dv DISPATCH_QUEUE_CONCURRENT . .Pp +The +.Fn dispatch_queue_get_label +function returns the label provided when the given +.Fa queue +was created (or an empty C string if no label was provided at creation). +Passing the constant +.Dv DISPATCH_CURRENT_QUEUE_LABEL +to +.Fn dispatch_queue_get_label +returns the label of the current queue. +.Sh SUSPENSION Queues may be temporarily suspended and resumed with the functions .Fn dispatch_suspend and @@ -88,14 +102,19 @@ respectively. Suspension is checked prior to block execution and is preemptive. .Sh MAIN QUEUE The dispatch framework provides a default serial queue for the application to -use. This queue is accessed via -.Fn dispatch_get_main_queue . +use. This queue is accessed via the +.Fn dispatch_get_main_queue +function. +.Pp Programs must call .Fn dispatch_main at the end of .Fn main -in order to process blocks submitted to the main queue. (See the compatibility -section for exceptions.) +in order to process blocks submitted to the main queue. (See the +.Sx COMPATIBILITY +section for exceptions.) The +.Fn dispatch_main +function never returns. .Sh GLOBAL CONCURRENT QUEUES Unlike the main queue or queues allocated with .Fn dispatch_queue_create , @@ -129,38 +148,6 @@ function to obtain the global queue of given priority. The .Fa flags argument is reserved for future use and must be zero. Passing any value other than zero may result in a NULL return value. -.Pp -.Sh RETURN VALUES -The -.Fn dispatch_queue_create -function returns NULL on failure. -.Pp -The -.Fn dispatch_queue_get_label -function always returns a valid C string. An empty C string is returned if the -.Fa label -was NULL creation time. -.Pp -The -.Fn dispatch_get_main_queue -function returns the default main queue. -.Pp -The -.Fn dispatch_get_current_queue -function always returns a valid queue. When called from within a block -submitted to a dispatch queue, that queue will be returned. If this function is -called from the main thread before -.Fn dispatch_main -is called, then the result of -.Fn dispatch_get_main_queue -is returned. In all other cases, the default target queue will be returned. See -the -.Sx CAVEATS -section below. -.Pp -The -.Fn dispatch_main -function never returns. .Sh TARGET QUEUE The .Fn dispatch_set_target_queue @@ -210,16 +197,33 @@ is undefined. .Pp Directly or indirectly setting the target queue of a dispatch queue to itself is undefined. -.Sh CAVEATS -The +.Sh DEPRECATED FUNCTIONS +The following functions are deprecated and will be removed in a future release: +.Bl -item +.It +.Ft dispatch_queue_t +.Fn dispatch_get_current_queue void ; +.El +.Pp +.Fn dispatch_get_current_queue +always returns a valid queue. When called from within a block +submitted to a dispatch queue, that queue will be returned. If this function is +called from the main thread before +.Fn dispatch_main +is called, then the result of +.Fn dispatch_get_main_queue +is returned. In all other cases, the default target queue will be returned. +.Pp +The use of .Fn dispatch_get_current_queue -function is only recommended for debugging and logging purposes. Code must not +is strongly discouraged except for debugging and logging purposes. Code must not make any assumptions about the queue returned, unless it is one of the global queues or a queue the code has itself created. The returned queue may have arbitrary policies that may surprise code that tries to schedule work with the queue. The list of policies includes, but is not limited to, queue width (i.e. serial vs. concurrent), scheduling priority, security credential or filesystem -configuration. +configuration. This function is deprecated and will be removed in a future +release. .Pp It is equally unsafe for code to assume that synchronous execution onto a queue is safe from deadlock if that queue is not the one returned by @@ -233,6 +237,17 @@ when called on the main thread. Comparing the two is not a valid way to test whether code is executing on the main thread. Foundation/AppKit programs should use [NSThread isMainThread]. POSIX programs may use .Xr pthread_main_np 3 . +.Pp +.Fn dispatch_get_current_queue +may return a queue owned by a different subsystem which has already had all +external references to it released. While such a queue will continue to exist +until all blocks submitted to it have completed, attempting to retain it is +forbidden and will trigger an assertion. If Objective-C Automatic Reference +Counting is enabled, any use of the object returned by +.Fn dispatch_get_current_queue +will cause retain calls to be automatically generated, so the use of +.Fn dispatch_get_current_queue +for any reason in code built with ARC is particularly strongly discouraged. .Sh COMPATIBILITY Cocoa applications need not call .Fn dispatch_main . diff --git a/man/dispatch_semaphore_create.3 b/man/dispatch_semaphore_create.3 index 896412b..81c2915 100644 --- a/man/dispatch_semaphore_create.3 +++ b/man/dispatch_semaphore_create.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2012 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_semaphore_create 3 .Os Darwin diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3 index 89e7d51..a17e868 100644 --- a/man/dispatch_source_create.3 +++ b/man/dispatch_source_create.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2013 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_source_create 3 .Os Darwin @@ -117,6 +117,8 @@ DISPATCH_SOURCE_TYPE_MACH_SEND .It DISPATCH_SOURCE_TYPE_MACH_RECV .It +DISPATCH_SOURCE_TYPE_MEMORYPRESSURE +.It DISPATCH_SOURCE_TYPE_PROC .It DISPATCH_SOURCE_TYPE_READ @@ -170,7 +172,7 @@ function is intended for use with the and .Vt DISPATCH_SOURCE_TYPE_DATA_OR source types. The result of using this function with any other source type is -undefined. Calling this function will atomically add or logical OR the data +undefined. Calling this function will atomically add or bitwise OR the data into the source's data, and trigger the delivery of the source's event handler. .Pp .Sh SOURCE EVENT HANDLERS @@ -308,6 +310,40 @@ is unused and should be zero. The event handler block will be submitted to the target queue when a message on the mach port is waiting to be received. .Pp +.Vt DISPATCH_SOURCE_TYPE_MEMORYPRESSURE +.Pp +Sources of this type monitor the system memory pressure condition for state changes. +The +.Fa handle +is unused and should be zero. The +.Fa mask +may be one or more of the following: +.Bl -tag -width "XXDISPATCH_MEMORYPRESSURE_CRITICAL" -compact -offset indent +.It \(bu DISPATCH_MEMORYPRESSURE_NORMAL +The system memory pressure condition has returned to normal. +.It \(bu DISPATCH_MEMORYPRESSURE_WARN +The system memory pressure condition has changed to warning. +.It \(bu DISPATCH_MEMORYPRESSURE_CRITICAL +The system memory pressure condition has changed to critical. +.El +.Pp +The data returned by +.Fn dispatch_source_get_data +indicates which of the events in the +.Fa mask +were observed. +.Pp +Elevated memory pressure is a system-wide condition that applications +registered for this source should react to by changing their future memory use +behavior, e.g. by reducing cache sizes of newly initiated operations until +memory pressure returns back to normal. +.Pp +However, applications should +.Em NOT +traverse and discard existing caches for past operations when the system memory +pressure enters an elevated state, as that is likely to trigger VM operations +that will further aggravate system memory pressure. +.Pp .Vt DISPATCH_SOURCE_TYPE_PROC .Pp Sources of this type monitor processes for state changes. @@ -327,9 +363,6 @@ The process has become another executable image via a call to .Xr execve 2 or .Xr posix_spawn 2 . -.It \(bu DISPATCH_PROC_REAP -The process status has been collected by its parent process via -.Xr wait 2 . .It \(bu DISPATCH_PROC_SIGNAL A signal was delivered to the process. .El @@ -397,44 +430,71 @@ signal(SIGTERM, SIG_IGN); .Vt DISPATCH_SOURCE_TYPE_TIMER .Pp Sources of this type periodically submit the event handler block to the target -queue on an interval specified by -.Fn dispatch_source_set_timer . -The +queue. The .Fa handle -and -.Fa mask -arguments are unused and should be zero. -.Pp -A best effort attempt is made to submit the event handler block to the target -queue at the specified time; however, actual invocation may occur at a later -time. +argument is unused and should be zero. .Pp The data returned by .Fn dispatch_source_get_data is the number of times the timer has fired since the last invocation of the event handler block. .Pp -The function +The timer parameters are configured with the .Fn dispatch_source_set_timer -takes as an argument the +function. Once this function returns, any pending source data accumulated for +the previous timer parameters has been cleared; the next fire of the timer will +occur at +.Fa start , +and every +.Fa interval +nanoseconds thereafter until the timer source is canceled. +.Pp +Any fire of the timer may be delayed by the system in order to improve power +consumption and system performance. The upper limit to the allowable delay may +be configured with the +.Fa leeway +argument, the lower limit is under the control of the system. +.Pp +For the initial timer fire at +.Fa start , +the upper limit to the allowable delay is set to +.Fa leeway +nanoseconds. For the subsequent timer fires at .Fa start -time of the timer (initial fire time) represented as a -.Vt dispatch_time_t . -The timer dispatch source will use the same clock as the function used to -create this value. (See -.Xr dispatch_time 3 -for more information.) The +.Li "+ N *" .Fa interval , -in nanoseconds, specifies the period at which the timer should repeat. All -timers will repeat indefinitely until -.Fn dispatch_source_cancel -is called. The +the upper limit is +.Li MIN( .Fa leeway , -in nanoseconds, is a hint to the system that it may defer the timer in order to -align with other system activity for improved system performance or reduced -power consumption. (For example, an application might perform a periodic task -every 5 minutes with a leeway of up to 30 seconds.) Note that some latency is -to be expected for all timers even when a value of zero is used. +.Fa interval +.Li "/ 2 )" . +.Pp +The lower limit to the allowable delay may vary with process state such as +visibility of application UI. If the specified timer source was created with a +.Fa mask +of +.Vt DISPATCH_TIMER_STRICT , +the system will make a best effort to strictly observe the provided +.Fa leeway +value even if it is smaller than the current lower limit. Note that a minimal +amount of delay is to be expected even if this flag is specified. +.Pp +The +.Fa start +argument also determines which clock will be used for the timer: If +.Fa start +is +.Vt DISPATCH_TIME_NOW +or was created with +.Xr dispatch_time 3 , +the timer is based on +.Fn mach_absolute_time . +If +.Fa start +was created with +.Xr dispatch_walltime 3 , +the timer is based on +.Xr gettimeofday 3 . .Pp .Em Note : Under the C language, untyped numbers default to the diff --git a/man/dispatch_time.3 b/man/dispatch_time.3 index cb65dc5..e318e90 100644 --- a/man/dispatch_time.3 +++ b/man/dispatch_time.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2013 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_time 3 .Os Darwin @@ -80,6 +80,24 @@ parameter is ignored. .Pp Underflow causes the smallest representable value to be returned for a given clock. +.Sh CAVEATS +Under the C language, untyped numbers default to the +.Vt int +type. This can lead to truncation bugs when arithmetic operations with other +numbers are expected to generate a +.Vt int64_t +sized result, such as the +.Fa offset +argument to +.Fn dispatch_time +and +.Fn dispatch_walltime . +When in doubt, use +.Vt ull +as a suffix. For example: +.Bd -literal -offset indent +3ull * NSEC_PER_SEC +.Ed .Sh EXAMPLES Create a milestone two seconds in the future: .Bd -literal -offset indent @@ -102,8 +120,9 @@ milestone = dispatch_walltime(&ts, 0); These functions return an abstract value for use with .Fn dispatch_after , .Fn dispatch_group_wait , +.Fn dispatch_semaphore_wait , or -.Fn dispatch_semaphore_wait . +.Fn dispatch_source_set_timer . .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_after 3 , diff --git a/os/object.h b/os/object.h index 4ddfa89..f8d23a3 100644 --- a/os/object.h +++ b/os/object.h @@ -45,7 +45,7 @@ * * This mode requires a platform with the modern Objective-C runtime, the * Objective-C GC compiler option to be disabled, and at least a Mac OS X 10.8 - * deployment target. + * or iOS 6.0 deployment target. */ #ifndef OS_OBJECT_HAVE_OBJC_SUPPORT @@ -71,7 +71,7 @@ #endif #if OS_OBJECT_USE_OBJC -#import +#import #define OS_OBJECT_CLASS(name) OS_##name #define OS_OBJECT_DECL(name, ...) \ @protocol OS_OBJECT_CLASS(name) __VA_ARGS__ \ @@ -79,23 +79,36 @@ typedef NSObject *name##_t #define OS_OBJECT_DECL_SUBCLASS(name, super) \ OS_OBJECT_DECL(name, ) -#if defined(__has_attribute) && __has_attribute(ns_returns_retained) +#if defined(__has_attribute) +#if __has_attribute(ns_returns_retained) #define OS_OBJECT_RETURNS_RETAINED __attribute__((__ns_returns_retained__)) #else #define OS_OBJECT_RETURNS_RETAINED #endif -#if defined(__has_feature) && __has_feature(objc_arc) +#else +#define OS_OBJECT_RETURNS_RETAINED +#endif +#if defined(__has_feature) +#if __has_feature(objc_arc) #define OS_OBJECT_BRIDGE __bridge #else #define OS_OBJECT_BRIDGE #endif +#else +#define OS_OBJECT_BRIDGE +#endif #ifndef OS_OBJECT_USE_OBJC_RETAIN_RELEASE -#if defined(__has_feature) && __has_feature(objc_arc) || \ - defined(__clang_analyzer__) +#if defined(__clang_analyzer__) +#define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 1 +#elif defined(__has_feature) +#if __has_feature(objc_arc) #define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 1 #else #define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 0 #endif +#else +#define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 0 +#endif #endif #else /*! @parseOnly */ diff --git a/os/object_private.h b/os/object_private.h index 235e0d3..f5d3268 100644 --- a/os/object_private.h +++ b/os/object_private.h @@ -53,11 +53,15 @@ #define OS_OBJECT_EXPORT extern #endif -#if OS_OBJECT_USE_OBJC && defined(__has_feature) && __has_feature(objc_arc) +#if OS_OBJECT_USE_OBJC && defined(__has_feature) +#if __has_feature(objc_arc) #define _OS_OBJECT_OBJC_ARC 1 #else #define _OS_OBJECT_OBJC_ARC 0 #endif +#else +#define _OS_OBJECT_OBJC_ARC 0 +#endif #define _OS_OBJECT_GLOBAL_REFCNT INT_MAX @@ -103,6 +107,11 @@ OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW _os_object_t _os_object_alloc(const void *cls, size_t size); +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW +_os_object_t +_os_object_alloc_realized(const void *cls, size_t size); + __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW void _os_object_dealloc(_os_object_t object); diff --git a/private/Makefile.am b/private/Makefile.am index f8d0ec7..de12391 100644 --- a/private/Makefile.am +++ b/private/Makefile.am @@ -5,7 +5,9 @@ noinst_HEADERS= \ benchmark.h \ data_private.h \ - dispatch.h \ + introspection_private.h \ + io_private.h \ + mach_private.h \ private.h \ queue_private.h \ source_private.h diff --git a/private/data_private.h b/private/data_private.h index 6562b37..df60d28 100644 --- a/private/data_private.h +++ b/private/data_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011 Apple Inc. All rights reserved. + * Copyright (c) 2011-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -34,17 +34,15 @@ __BEGIN_DECLS -#ifdef __BLOCKS__ - /*! * @const DISPATCH_DATA_DESTRUCTOR_NONE - * @discussion The destructor for dispatch data objects that require no - * management. This can be used to allow a data object to efficiently - * encapsulate data that should not be copied or freed by the system. + * @discussion The destructor for dispatch data objects that require no buffer + * memory management. This can be used to allow a data object to efficiently + * encapsulate buffers that should not be copied or freed by the system. */ #define DISPATCH_DATA_DESTRUCTOR_NONE (_dispatch_data_destructor_none) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_none; +DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(none); /*! * @const DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE @@ -54,7 +52,124 @@ DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_none; #define DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE \ (_dispatch_data_destructor_vm_deallocate) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_vm_deallocate; +DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(vm_deallocate); + +/*! + * @function dispatch_data_create_f + * Creates a dispatch data object from the given contiguous buffer of memory. If + * a non-default destructor is provided, ownership of the buffer remains with + * the caller (i.e. the bytes will not be copied). The last release of the data + * object will result in the invocation of the specified destructor function on + * specified queue to free the buffer (passed as the context parameter). + * + * If the DISPATCH_DATA_DESTRUCTOR_FREE destructor is provided the buffer will + * be freed via free(3) and the queue argument ignored. + * + * If the DISPATCH_DATA_DESTRUCTOR_DEFAULT destructor is provided, data object + * creation will copy the buffer into internal memory managed by the system. + * + * @param buffer A contiguous buffer of data. + * @param size The size of the contiguous buffer of data. + * @param queue The queue to which the destructor should be submitted. + * @param destructor The destructor function responsible for freeing the + * data buffer when it is no longer needed. + * @result A newly created dispatch data object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_data_t +dispatch_data_create_f(const void *buffer, + size_t size, + dispatch_queue_t queue, + dispatch_function_t destructor); + +/*! + * @function dispatch_data_create_alloc + * Creates a dispatch data object representing a newly allocated memory region + * of the given size. If a non-NULL reference to a pointer is provided, it is + * filled with the location of the memory region. + * + * It is the responsibility of the application to ensure that the data object + * becomes immutable (i.e. the returned memory region is not further modified) + * once the dispatch data object is passed to other API. + * + * @param size The size of the required allocation. + * @param buffer_ptr A pointer to a pointer variable to be filled with the + * location of the newly allocated memory region, or NULL. + * @result A newly created dispatch data object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED +DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_data_t +dispatch_data_create_alloc(size_t size, void** buffer_ptr); + +/*! + * @typedef dispatch_data_applier_function_t + * A function to be invoked for every contiguous memory region in a data object. + * + * @param context Application-defined context parameter. + * @param region A data object representing the current region. + * @param offset The logical offset of the current region to the start + * of the data object. + * @param buffer The location of the memory for the current region. + * @param size The size of the memory for the current region. + * @result A Boolean indicating whether traversal should continue. + */ +typedef bool (*dispatch_data_applier_function_t)(void *context, + dispatch_data_t region, size_t offset, const void *buffer, size_t size); + +/*! + * @function dispatch_data_apply_f + * Traverse the memory regions represented by the specified dispatch data object + * in logical order and invoke the specified function once for every contiguous + * memory region encountered. + * + * Each invocation of the function is passed a data object representing the + * current region and its logical offset, along with the memory location and + * extent of the region. These allow direct read access to the memory region, + * but are only valid until the passed-in region object is released. Note that + * the region object is released by the system when the function returns, it is + * the responsibility of the application to retain it if the region object or + * the associated memory location are needed after the function returns. + * + * @param data The data object to traverse. + * @param context The application-defined context to pass to the function. + * @param applier The function to be invoked for every contiguous memory + * region in the data object. + * @result A Boolean indicating whether traversal completed + * successfully. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +bool +dispatch_data_apply_f(dispatch_data_t data, void *context, + dispatch_data_applier_function_t applier); + +#if TARGET_OS_MAC +/*! + * @function dispatch_data_make_memory_entry + * Return a mach memory entry for the memory regions represented by the + * specified dispatch data object. + * + * For data objects created with the DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE + * destructor, directly makes a memory entry from the represented region; + * otherwise, makes a memory entry from newly allocated pages containing a copy + * of the represented memory regions. + * + * @param data The data object to make a memory entry for. + * @result A mach port for the newly made memory entry, or + * MACH_PORT_NULL if an error ocurred. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +mach_port_t +dispatch_data_make_memory_entry(dispatch_data_t dd); +#endif + +/*! + * @functiongroup Dispatch data transform SPI + */ /*! * @typedef dispatch_data_format_type_t @@ -65,6 +180,16 @@ DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_vm_deallocate; */ typedef const struct dispatch_data_format_type_s *dispatch_data_format_type_t; +#if !TARGET_OS_WIN32 +#define DISPATCH_DATA_FORMAT_TYPE_DECL(name) \ + DISPATCH_EXPORT const struct dispatch_data_format_type_s \ + _dispatch_data_format_type_##name +#else +#define DISPATCH_DATA_FORMAT_TYPE_DECL(name) \ + DISPATCH_EXPORT struct dispatch_data_format_type_s \ + _dispatch_data_format_type_##name +#endif + /*! * @const DISPATCH_DATA_FORMAT_TYPE_NONE * @discussion A data format denoting that the given input or output format is, @@ -72,8 +197,7 @@ typedef const struct dispatch_data_format_type_s *dispatch_data_format_type_t; */ #define DISPATCH_DATA_FORMAT_TYPE_NONE (&_dispatch_data_format_type_none) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT -const struct dispatch_data_format_type_s _dispatch_data_format_type_none; +DISPATCH_DATA_FORMAT_TYPE_DECL(none); /*! * @const DISPATCH_DATA_FORMAT_TYPE_BASE32 @@ -84,8 +208,19 @@ const struct dispatch_data_format_type_s _dispatch_data_format_type_none; */ #define DISPATCH_DATA_FORMAT_TYPE_BASE32 (&_dispatch_data_format_type_base32) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT -const struct dispatch_data_format_type_s _dispatch_data_format_type_base32; +DISPATCH_DATA_FORMAT_TYPE_DECL(base32); + +/*! + * @const DISPATCH_DATA_FORMAT_TYPE_BASE32HEX + * @discussion A data format denoting that the given input or output format is, + * or should be, encoded in Base32Hex (RFC 4648) format. On input, this format + * will skip whitespace characters. Cannot be used in conjunction with UTF + * format types. + */ +#define DISPATCH_DATA_FORMAT_TYPE_BASE32HEX \ + (&_dispatch_data_format_type_base32hex) +__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0) +DISPATCH_DATA_FORMAT_TYPE_DECL(base32hex); /*! * @const DISPATCH_DATA_FORMAT_TYPE_BASE64 @@ -96,8 +231,7 @@ const struct dispatch_data_format_type_s _dispatch_data_format_type_base32; */ #define DISPATCH_DATA_FORMAT_TYPE_BASE64 (&_dispatch_data_format_type_base64) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT -const struct dispatch_data_format_type_s _dispatch_data_format_type_base64; +DISPATCH_DATA_FORMAT_TYPE_DECL(base64); /*! * @const DISPATCH_DATA_FORMAT_TYPE_UTF8 @@ -107,8 +241,7 @@ const struct dispatch_data_format_type_s _dispatch_data_format_type_base64; */ #define DISPATCH_DATA_FORMAT_TYPE_UTF8 (&_dispatch_data_format_type_utf8) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT -const struct dispatch_data_format_type_s _dispatch_data_format_type_utf8; +DISPATCH_DATA_FORMAT_TYPE_DECL(utf8); /*! * @const DISPATCH_DATA_FORMAT_TYPE_UTF16LE @@ -118,8 +251,7 @@ const struct dispatch_data_format_type_s _dispatch_data_format_type_utf8; */ #define DISPATCH_DATA_FORMAT_TYPE_UTF16LE (&_dispatch_data_format_type_utf16le) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT -const struct dispatch_data_format_type_s _dispatch_data_format_type_utf16le; +DISPATCH_DATA_FORMAT_TYPE_DECL(utf16le); /*! * @const DISPATCH_DATA_FORMAT_TYPE_UTF16BE @@ -129,8 +261,7 @@ const struct dispatch_data_format_type_s _dispatch_data_format_type_utf16le; */ #define DISPATCH_DATA_FORMAT_TYPE_UTF16BE (&_dispatch_data_format_type_utf16be) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT -const struct dispatch_data_format_type_s _dispatch_data_format_type_utf16be; +DISPATCH_DATA_FORMAT_TYPE_DECL(utf16be); /*! * @const DISPATCH_DATA_FORMAT_TYPE_UTFANY @@ -142,8 +273,7 @@ const struct dispatch_data_format_type_s _dispatch_data_format_type_utf16be; */ #define DISPATCH_DATA_FORMAT_TYPE_UTF_ANY (&_dispatch_data_format_type_utf_any) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT -const struct dispatch_data_format_type_s _dispatch_data_format_type_utf_any; +DISPATCH_DATA_FORMAT_TYPE_DECL(utf_any); /*! * @function dispatch_data_create_transform @@ -171,8 +301,6 @@ dispatch_data_create_with_transform(dispatch_data_t data, dispatch_data_format_type_t input_type, dispatch_data_format_type_t output_type); -#endif /* __BLOCKS__ */ - __END_DECLS #endif // __DISPATCH_DATA_PRIVATE__ diff --git a/private/dispatch.h b/private/dispatch.h deleted file mode 100644 index 3f1f374..0000000 --- a/private/dispatch.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2011 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -/* - * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch - * which are subject to change in future releases of Mac OS X. Any applications - * relying on these interfaces WILL break. - */ - -#ifndef __DISPATCH_PRIVATE_LEGACY__ -#define __DISPATCH_PRIVATE_LEGACY__ - -#define DISPATCH_NO_LEGACY 1 -#ifdef DISPATCH_LEGACY // -#error "Dispatch legacy API unavailable." -#endif - -#ifndef __DISPATCH_BUILDING_DISPATCH__ -#include_next -#endif - -#endif // __DISPATCH_PRIVATE_LEGACY__ diff --git a/private/introspection_private.h b/private/introspection_private.h new file mode 100644 index 0000000..727d971 --- /dev/null +++ b/private/introspection_private.h @@ -0,0 +1,727 @@ +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_INTROSPECTION_PRIVATE__ +#define __DISPATCH_INTROSPECTION_PRIVATE__ + +/*! + * @header + * + * @abstract + * Introspection SPI for libdispatch. + * + * @discussion + * This SPI is only available in the introspection version of the library, + * loaded by running a process with the environment variable + * DYLD_LIBRARY_PATH=/usr/lib/system/introspection + * + * NOTE: these functions are _not_ exported from the shared library, they are + * only intended to be called from a debugger context while the rest of the + * process is suspended. + */ + +#ifndef __BEGIN_DECLS +#if defined(__cplusplus) +#define __BEGIN_DECLS extern "C" { +#define __END_DECLS } +#else +#define __BEGIN_DECLS +#define __END_DECLS +#endif +#endif + +__BEGIN_DECLS + +#ifndef __DISPATCH_INDIRECT__ +/* + * Typedefs of opaque types, for direct inclusion of header in lldb expressions + */ +typedef __typeof__(sizeof(int)) size_t; +typedef struct _opaque_pthread_t *pthread_t; +typedef void (*dispatch_function_t)(void *); +typedef struct Block_layout *dispatch_block_t; +typedef struct dispatch_continuation_s *dispatch_continuation_t; +typedef struct dispatch_queue_s *dispatch_queue_t; +typedef struct dispatch_source_s *dispatch_source_t; +typedef struct dispatch_group_s *dispatch_group_t; +typedef struct dispatch_object_s *dispatch_object_t; +#endif + +/*! + * @typedef dispatch_introspection_versions_s + * + * @abstract + * A structure of version and size information of introspection structures. + * + * @field introspection_version + * Version of overall dispatch_introspection SPI. + * + * @field queue_version + * Version of dispatch_introspection_queue_s structure. + * + * @field queue_size + * Size of dispatch_introspection_queue_s structure. + * + * @field source_version + * Version of dispatch_introspection_source_s structure. + * + * @field source_size + * Size of dispatch_introspection_source_s structure. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT const struct dispatch_introspection_versions_s { + unsigned long introspection_version; + unsigned long hooks_version; + size_t hooks_size; + unsigned long queue_item_version; + size_t queue_item_size; + unsigned long queue_block_version; + size_t queue_block_size; + unsigned long queue_function_version; + size_t queue_function_size; + unsigned long queue_thread_version; + size_t queue_thread_size; + unsigned long object_version; + size_t object_size; + unsigned long queue_version; + size_t queue_size; + unsigned long source_version; + size_t source_size; +} dispatch_introspection_versions; + +/*! + * @typedef dispatch_introspection_queue_block_s + * + * @abstract + * A structure of introspection information for a block item enqueued on a + * dispatch queue. + * + * @field continuation + * Pointer to enqueued item. + * + * @field target_queue + * Target queue of item (may be different to the queue the item is currently + * enqueued on). + * + * @field block + * Block for enqueued item. + * + * @field block_invoke + * Function pointer of block for enqueued item. + * + * @field group + * Group containing enqueued item (may be NULL). + * + * @field waiter + * Thread waiting for completion of enqueued item (NULL if sync == 0). + * + * @field barrier + * Item is a barrier on the queue (all items on serial queues are barriers). + * + * @field sync + * Item was enqueued by a dispatch_sync/dispatch_barrier_sync. + * + * @field apply + * Item is part of a dispatch_apply. + */ +typedef struct dispatch_introspection_queue_block_s { + dispatch_continuation_t continuation; + dispatch_queue_t target_queue; + dispatch_block_t block; + dispatch_function_t block_invoke; + dispatch_group_t group; + pthread_t waiter; + unsigned long barrier:1, + sync:1, + apply:1; +} dispatch_introspection_queue_block_s; +typedef dispatch_introspection_queue_block_s + *dispatch_introspection_queue_block_t; + +/*! + * @typedef dispatch_introspection_queue_function_s + * + * @abstract + * A structure of introspection information for a function & context pointer + * item enqueued on a dispatch queue. + * + * @field continuation + * Pointer to enqueued item. + * + * @field target_queue + * Target queue of item (may be different to the queue the item is currently + * enqueued on). + * + * @field context + * Context in enqueued item. + * + * @field block_invoke + * Function pointer in enqueued item. + * + * @field group + * Group containing enqueued item (may be NULL). + * + * @field waiter + * Thread waiting for completion of enqueued item (NULL if sync == 0). + * + * @field barrier + * Item is a barrier on the queue (all items on serial queues are barriers). + * + * @field sync + * Item was enqueued by a dispatch_sync_f/dispatch_barrier_sync_f. + * + * @field apply + * Item is part of a dispatch_apply_f. + */ +typedef struct dispatch_introspection_queue_function_s { + dispatch_continuation_t continuation; + dispatch_queue_t target_queue; + void *context; + dispatch_function_t function; + dispatch_group_t group; + pthread_t waiter; + unsigned long barrier:1, + sync:1, + apply:1; +} dispatch_introspection_queue_function_s; +typedef dispatch_introspection_queue_function_s + *dispatch_introspection_queue_function_t; + +/*! + * @typedef dispatch_introspection_object_s + * + * @abstract + * A structure of introspection information for a generic dispatch object. + * + * @field object + * Pointer to object. + * + * @field target_queue + * Target queue of object (may be different to the queue the object is + * currently enqueued on). + * + * @field type + * Object class pointer. + * + * @field kind + * String describing the object type. + */ +typedef struct dispatch_introspection_object_s { + dispatch_continuation_t object; + dispatch_queue_t target_queue; + void *type; + const char *kind; +} dispatch_introspection_object_s; +typedef dispatch_introspection_object_s *dispatch_introspection_object_t; + +/*! + * @typedef dispatch_introspection_queue_s + * + * @abstract + * A structure of introspection information for a dispatch queue. + * + * @field queue + * Pointer to queue object. + * + * @field target_queue + * Target queue of queue (may be different to the queue the queue is currently + * enqueued on). NULL indicates queue is a root queue. + * + * @field label + * Pointer to queue label. + * + * @field serialnum + * Queue serial number (unique per process). + * + * @field width + * Queue width (1: serial queue, UINT_MAX: concurrent queue). + * + * @field suspend_count + * Number of times the queue has been suspended. + * + * @field enqueued + * Queue is enqueued on another queue. + * + * @field barrier + * Queue is executing a barrier item. + * + * @field draining + * Queue is being drained (cannot get queue items). + * + * @field global + * Queue is a global queue. + * + * @field main + * Queue is the main queue. + */ +typedef struct dispatch_introspection_queue_s { + dispatch_queue_t queue; + dispatch_queue_t target_queue; + const char *label; + unsigned long serialnum; + unsigned int width; + unsigned int suspend_count; + unsigned long enqueued:1, + barrier:1, + draining:1, + global:1, + main:1; +} dispatch_introspection_queue_s; +typedef dispatch_introspection_queue_s *dispatch_introspection_queue_t; + +/*! + * @typedef dispatch_introspection_source_s + * + * @abstract + * A structure of introspection information for a dispatch source. + * + * @field source + * Pointer to source object. + * + * @field target_queue + * Target queue of source (may be different to the queue the source is currently + * enqueued on). + * + * @field type + * Source type (kevent filter) + * + * @field handle + * Source handle (monitored entity). + * + * @field context + * Context pointer passed to source handler. Pointer to handler block if + * handler_is_block == 1. + * + * @field handler + * Source handler function. Function pointer of handler block if + * handler_is_block == 1. + * + * @field suspend_count + * Number of times the source has been suspended. + * + * @field enqueued + * Source is enqueued on a queue. + * + * @field handler_is_block + * Source handler is a block. + * + * @field timer + * Source is a timer. + * + * @field after + * Source is a dispatch_after timer. + */ +typedef struct dispatch_introspection_source_s { + dispatch_source_t source; + dispatch_queue_t target_queue; + unsigned long type; + unsigned long handle; + void *context; + dispatch_function_t handler; + unsigned int suspend_count; + unsigned long enqueued:1, + handler_is_block:1, + timer:1, + after:1; +} dispatch_introspection_source_s; +typedef dispatch_introspection_source_s *dispatch_introspection_source_t; + +/*! + * @typedef dispatch_introspection_queue_thread_s + * + * @abstract + * A structure of introspection information about a thread executing items for + * a dispatch queue. + * + * @field object + * Pointer to thread object. + * + * @field thread + * Thread executing items for a queue. + * + * @field queue + * Queue introspection information. The queue.queue field is NULL if this thread + * is not currently executing items for a queue. + */ +typedef struct dispatch_introspection_queue_thread_s { + dispatch_continuation_t object; + pthread_t thread; + dispatch_introspection_queue_s queue; +} dispatch_introspection_queue_thread_s; +typedef dispatch_introspection_queue_thread_s + *dispatch_introspection_queue_thread_t; + +/*! + * @enum dispatch_introspection_queue_item_type + * + * @abstract + * Types of items enqueued on a dispatch queue. + */ +enum dispatch_introspection_queue_item_type { + dispatch_introspection_queue_item_type_none = 0x0, + dispatch_introspection_queue_item_type_block = 0x11, + dispatch_introspection_queue_item_type_function = 0x12, + dispatch_introspection_queue_item_type_object = 0x100, + dispatch_introspection_queue_item_type_queue = 0x101, + dispatch_introspection_queue_item_type_source = 0102, +}; + +/*! + * @typedef dispatch_introspection_queue_item_s + * + * @abstract + * A structure of introspection information about an item enqueued on a + * dispatch queue. + * + * @field type + * Indicates which of the union members applies to this item. + */ +typedef struct dispatch_introspection_queue_item_s { + unsigned long type; // dispatch_introspection_queue_item_type + union { + dispatch_introspection_queue_block_s block; + dispatch_introspection_queue_function_s function; + dispatch_introspection_object_s object; + dispatch_introspection_queue_s queue; + dispatch_introspection_source_s source; + }; +} dispatch_introspection_queue_item_s; +typedef dispatch_introspection_queue_item_s + *dispatch_introspection_queue_item_t; + +/*! + * @typedef dispatch_introspection_hook_queue_create_t + * + * @abstract + * A function pointer called when a dispatch queue is created. + * + * @param queue_info + * Pointer to queue introspection structure. + */ +typedef void (*dispatch_introspection_hook_queue_create_t)( + dispatch_introspection_queue_t queue_info); + +/*! + * @typedef dispatch_introspection_hook_queue_dispose_t + * + * @abstract + * A function pointer called when a dispatch queue is destroyed. + * + * @param queue_info + * Pointer to queue introspection structure. + */ +typedef void (*dispatch_introspection_hook_queue_dispose_t)( + dispatch_introspection_queue_t queue_info); + +/*! + * @typedef dispatch_introspection_hook_queue_item_enqueue_t + * + * @abstract + * A function pointer called when an item is enqueued onto a dispatch queue. + * + * @param queue + * Pointer to queue. + * + * @param item + * Pointer to item introspection structure. + */ +typedef void (*dispatch_introspection_hook_queue_item_enqueue_t)( + dispatch_queue_t queue, dispatch_introspection_queue_item_t item); + +/*! + * @typedef dispatch_introspection_hook_queue_item_dequeue_t + * + * @abstract + * A function pointer called when an item is dequeued from a dispatch queue. + * + * @param queue + * Pointer to queue. + * + * @param item + * Pointer to item introspection structure. + */ +typedef void (*dispatch_introspection_hook_queue_item_dequeue_t)( + dispatch_queue_t queue, dispatch_introspection_queue_item_t item); + +/*! + * @typedef dispatch_introspection_hooks_s + * + * @abstract + * A structure of function pointer hoooks into libdispatch. + */ + +typedef struct dispatch_introspection_hooks_s { + dispatch_introspection_hook_queue_create_t queue_create; + dispatch_introspection_hook_queue_dispose_t queue_dispose; + dispatch_introspection_hook_queue_item_enqueue_t queue_item_enqueue; + dispatch_introspection_hook_queue_item_dequeue_t queue_item_dequeue; + void *_reserved[6]; +} dispatch_introspection_hooks_s; +typedef dispatch_introspection_hooks_s *dispatch_introspection_hooks_t; + +/*! + * @function dispatch_introspection_get_queues + * + * @abstract + * Retrieve introspection information about all dispatch queues in the process, + * in batches of specified size. + * + * @discussion + * Retrieving queue information and iterating through the list of all queues + * must take place from a debugger context (while the rest of the process is + * suspended). + * + * @param start + * Starting point for this batch of queue information, as returned by a previous + * call to _dispatch_introspection_get_queues(). + * Pass NULL to retrieve the initial batch. + * + * @param count + * Number of queues to introspect. + * + * @param queues + * Array to fill with queue information. If less than 'count' queues are left + * in this batch, the end of valid entries in the array will be indicated + * by an entry with NULL queue member. + * + * @result + * Queue to pass to another call to _dispatch_introspection_get_queues() to + * retrieve information about the next batch of queues. May be NULL if there + * are no more queues to iterate over. + */ +extern dispatch_queue_t +dispatch_introspection_get_queues(dispatch_queue_t start, size_t count, + dispatch_introspection_queue_t queues); + +/*! + * @function dispatch_introspection_get_queue_threads + * + * @abstract + * Retrieve introspection information about all threads in the process executing + * items for dispatch queues, in batches of specified size. + * + * @discussion + * Retrieving thread information and iterating through the list of all queue + * threads must take place from a debugger context (while the rest of the + * process is suspended). + * + * @param start + * Starting point for this batch of thread information, as returned by a + * previous call to _dispatch_introspection_get_queue_threads(). + * Pass NULL to retrieve the initial batch. + * + * @param count + * Number of queue threads to introspect. + * + * @param threads + * Array to fill with queue thread information. If less than 'count' threads are + * left in this batch, the end of valid entries in the array will be indicated + * by an entry with NULL object member. + * + * @result + * Object to pass to another call to _dispatch_introspection_get_queues() to + * retrieve information about the next batch of queues. May be NULL if there + * are no more queues to iterate over. + */ +extern dispatch_continuation_t +dispatch_introspection_get_queue_threads(dispatch_continuation_t start, + size_t count, dispatch_introspection_queue_thread_t threads); + +/*! + * @function dispatch_introspection_queue_get_items + * + * @abstract + * Retrieve introspection information about all items enqueued on a queue, in + * batches of specified size. + * + * @discussion + * Retrieving queue item information and iterating through a queue must take + * place from a debugger context (while the rest of the process is suspended). + * + * @param queue + * Queue to introspect. + * + * @param start + * Starting point for this batch of queue item information, as returned by a + * previous call to _dispatch_introspection_queue_get_items(). + * Pass NULL to retrieve the initial batch. + * + * @param count + * Number of items to introspect. + * + * @param items + * Array to fill with queue item information. If less than 'count' queues are + * left in this batch, the end of valid entries in the array will be indicated + * by an entry with type dispatch_introspection_queue_item_type_none. + * + * @result + * Item to pass to another call to _dispatch_introspection_queue_get_items() to + * retrieve information about the next batch of queue items. May be NULL if + * there are no more items to iterate over. + */ +extern dispatch_continuation_t +dispatch_introspection_queue_get_items(dispatch_queue_t queue, + dispatch_continuation_t start, size_t count, + dispatch_introspection_queue_item_t items); + +/*! + * @function dispatch_introspection_queue_get_info + * + * @abstract + * Retrieve introspection information about a specified dispatch queue. + * + * @discussion + * Retrieving queue information must take place from a debugger context (while + * the rest of the process is suspended). + * + * @param queue + * Queue to introspect. + * + * @result + * Queue information struct. + */ +extern dispatch_introspection_queue_s +dispatch_introspection_queue_get_info(dispatch_queue_t queue); + +/*! + * @function dispatch_introspection_queue_item_get_info + * + * @abstract + * Retrieve introspection information about a specified dispatch queue item. + * + * @discussion + * Retrieving queue item information must take place from a debugger context + * (while the rest of the process is suspended). + * + * @param queue + * Queue to introspect. + * + * @param item + * Item to introspect. + * + * @result + * Queue item information struct. + */ +extern dispatch_introspection_queue_item_s +dispatch_introspection_queue_item_get_info(dispatch_queue_t queue, + dispatch_continuation_t item); + +/*! + * @function dispatch_introspection_hooks_install + * + * @abstract + * Install hook functions into libdispatch. + * + * @discussion + * Installing hook functions must take place from a debugger context (while the + * rest of the process is suspended). + * + * The caller is responsible for implementing chaining to the hooks that were + * previously installed (if any). + * + * @param hooks + * Pointer to structure of hook function pointers. Any of the structure members + * may be NULL to indicate that the hook in question should not be installed. + * The structure is copied on input and filled with the previously installed + * hooks on output. + */ + +extern void +dispatch_introspection_hooks_install(dispatch_introspection_hooks_t hooks); + +/*! + * @function dispatch_introspection_hook_callouts_enable + * + * @abstract + * Enable hook callout functions in libdispatch that a debugger can break on + * and get introspection arguments even if there are no hook functions + * installed via dispatch_introspection_hooks_install(). + * + * @discussion + * Enabling hook callout functions must take place from a debugger context + * (while the rest of the process is suspended). + * + * @param enable + * Pointer to dispatch_introspection_hooks_s structure. For every structure + * member with (any) non-NULL value, the corresponding hook callout will be + * enabled; for every NULL member the hook callout will be disabled (if there + * is no hook function installed). + * As a convenience, the 'enable' pointer may itself be NULL to indicate that + * all hook callouts should be enabled. + */ + +extern void +dispatch_introspection_hook_callouts_enable( + dispatch_introspection_hooks_t enable); + +/*! + * @function dispatch_introspection_hook_callout_queue_create + * + * @abstract + * Callout to queue creation hook that a debugger can break on. + */ + +extern void +dispatch_introspection_hook_callout_queue_create( + dispatch_introspection_queue_t queue_info); + +/*! + * @function dispatch_introspection_hook_callout_queue_dispose + * + * @abstract + * Callout to queue destruction hook that a debugger can break on. + */ + +extern void +dispatch_introspection_hook_callout_queue_dispose( + dispatch_introspection_queue_t queue_info); + +/*! + * @function dispatch_introspection_hook_callout_queue_item_enqueue + * + * @abstract + * Callout to queue enqueue hook that a debugger can break on. + */ + +extern void +dispatch_introspection_hook_callout_queue_item_enqueue( + dispatch_queue_t queue, dispatch_introspection_queue_item_t item); + +/*! + * @function dispatch_introspection_hook_callout_queue_item_dequeue + * + * @abstract + * Callout to queue dequeue hook that a debugger can break on. + */ + +extern void +dispatch_introspection_hook_callout_queue_item_dequeue( + dispatch_queue_t queue, dispatch_introspection_queue_item_t item); + +__END_DECLS + +#endif diff --git a/private/io_private.h b/private/io_private.h new file mode 100644 index 0000000..c35b41f --- /dev/null +++ b/private/io_private.h @@ -0,0 +1,411 @@ +/* + * Copyright (c) 2009-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_IO_PRIVATE__ +#define __DISPATCH_IO_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +__BEGIN_DECLS + +/*! + * @function dispatch_read_f + * Schedule a read operation for asynchronous execution on the specified file + * descriptor. The specified handler is enqueued with the data read from the + * file descriptor when the operation has completed or an error occurs. + * + * The data object passed to the handler will be automatically released by the + * system when the handler returns. It is the responsibility of the application + * to retain, concatenate or copy the data object if it is needed after the + * handler returns. + * + * The data object passed to the handler will only contain as much data as is + * currently available from the file descriptor (up to the specified length). + * + * If an unrecoverable error occurs on the file descriptor, the handler will be + * enqueued with the appropriate error code along with a data object of any data + * that could be read successfully. + * + * An invocation of the handler with an error code of zero and an empty data + * object indicates that EOF was reached. + * + * The system takes control of the file descriptor until the handler is + * enqueued, and during this time file descriptor flags such as O_NONBLOCK will + * be modified by the system on behalf of the application. It is an error for + * the application to modify a file descriptor directly while it is under the + * control of the system, but it may create additional dispatch I/O convenience + * operations or dispatch I/O channels associated with that file descriptor. + * + * @param fd The file descriptor from which to read the data. + * @param length The length of data to read from the file descriptor, + * or SIZE_MAX to indicate that all of the data currently + * available from the file descriptor should be read. + * @param queue The dispatch queue to which the handler should be + * submitted. + * @param context The application-defined context parameter to pass to + * the handler function. + * @param handler The handler to enqueue when data is ready to be + * delivered. + * @param context Application-defined context parameter. + * @param data The data read from the file descriptor. + * @param error An errno condition for the read operation or + * zero if the read was successful. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW +void +dispatch_read_f(dispatch_fd_t fd, + size_t length, + dispatch_queue_t queue, + void *context, + void (*handler)(void *context, dispatch_data_t data, int error)); + +/*! + * @function dispatch_write_f + * Schedule a write operation for asynchronous execution on the specified file + * descriptor. The specified handler is enqueued when the operation has + * completed or an error occurs. + * + * If an unrecoverable error occurs on the file descriptor, the handler will be + * enqueued with the appropriate error code along with the data that could not + * be successfully written. + * + * An invocation of the handler with an error code of zero indicates that the + * data was fully written to the channel. + * + * The system takes control of the file descriptor until the handler is + * enqueued, and during this time file descriptor flags such as O_NONBLOCK will + * be modified by the system on behalf of the application. It is an error for + * the application to modify a file descriptor directly while it is under the + * control of the system, but it may create additional dispatch I/O convenience + * operations or dispatch I/O channels associated with that file descriptor. + * + * @param fd The file descriptor to which to write the data. + * @param data The data object to write to the file descriptor. + * @param queue The dispatch queue to which the handler should be + * submitted. + * @param context The application-defined context parameter to pass to + * the handler function. + * @param handler The handler to enqueue when the data has been written. + * @param context Application-defined context parameter. + * @param data The data that could not be written to the I/O + * channel, or NULL. + * @param error An errno condition for the write operation or + * zero if the write was successful. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NONNULL5 +DISPATCH_NOTHROW +void +dispatch_write_f(dispatch_fd_t fd, + dispatch_data_t data, + dispatch_queue_t queue, + void *context, + void (*handler)(void *context, dispatch_data_t data, int error)); + +/*! + * @function dispatch_io_create_f + * Create a dispatch I/O channel associated with a file descriptor. The system + * takes control of the file descriptor until the channel is closed, an error + * occurs on the file descriptor or all references to the channel are released. + * At that time the specified cleanup handler will be enqueued and control over + * the file descriptor relinquished. + * + * While a file descriptor is under the control of a dispatch I/O channel, file + * descriptor flags such as O_NONBLOCK will be modified by the system on behalf + * of the application. It is an error for the application to modify a file + * descriptor directly while it is under the control of a dispatch I/O channel, + * but it may create additional channels associated with that file descriptor. + * + * @param type The desired type of I/O channel (DISPATCH_IO_STREAM + * or DISPATCH_IO_RANDOM). + * @param fd The file descriptor to associate with the I/O channel. + * @param queue The dispatch queue to which the handler should be submitted. + * @param context The application-defined context parameter to pass to + * the cleanup handler function. + * @param cleanup_handler The handler to enqueue when the system + * relinquishes control over the file descriptor. + * @param context Application-defined context parameter. + * @param error An errno condition if control is relinquished + * because channel creation failed, zero otherwise. + * @result The newly created dispatch I/O channel or NULL if an error + * occurred (invalid type specified). + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_io_t +dispatch_io_create_f(dispatch_io_type_t type, + dispatch_fd_t fd, + dispatch_queue_t queue, + void *context, + void (*cleanup_handler)(void *context, int error)); + +/*! + * @function dispatch_io_create_with_path_f + * Create a dispatch I/O channel associated with a path name. The specified + * path, oflag and mode parameters will be passed to open(2) when the first I/O + * operation on the channel is ready to execute and the resulting file + * descriptor will remain open and under the control of the system until the + * channel is closed, an error occurs on the file descriptor or all references + * to the channel are released. At that time the file descriptor will be closed + * and the specified cleanup handler will be enqueued. + * + * @param type The desired type of I/O channel (DISPATCH_IO_STREAM + * or DISPATCH_IO_RANDOM). + * @param path The absolute path to associate with the I/O channel. + * @param oflag The flags to pass to open(2) when opening the file at + * path. + * @param mode The mode to pass to open(2) when creating the file at + * path (i.e. with flag O_CREAT), zero otherwise. + * @param queue The dispatch queue to which the handler should be + * submitted. + * @param context The application-defined context parameter to pass to + * the cleanup handler function. + * @param cleanup_handler The handler to enqueue when the system + * has closed the file at path. + * @param context Application-defined context parameter. + * @param error An errno condition if control is relinquished + * because channel creation or opening of the + * specified file failed, zero otherwise. + * @result The newly created dispatch I/O channel or NULL if an error + * occurred (invalid type or non-absolute path specified). + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED +DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_io_t +dispatch_io_create_with_path_f(dispatch_io_type_t type, + const char *path, int oflag, mode_t mode, + dispatch_queue_t queue, + void *context, + void (*cleanup_handler)(void *context, int error)); + +/*! + * @function dispatch_io_create_with_io_f + * Create a new dispatch I/O channel from an existing dispatch I/O channel. + * The new channel inherits the file descriptor or path name associated with + * the existing channel, but not its channel type or policies. + * + * If the existing channel is associated with a file descriptor, control by the + * system over that file descriptor is extended until the new channel is also + * closed, an error occurs on the file descriptor, or all references to both + * channels are released. At that time the specified cleanup handler will be + * enqueued and control over the file descriptor relinquished. + * + * While a file descriptor is under the control of a dispatch I/O channel, file + * descriptor flags such as O_NONBLOCK will be modified by the system on behalf + * of the application. It is an error for the application to modify a file + * descriptor directly while it is under the control of a dispatch I/O channel, + * but it may create additional channels associated with that file descriptor. + * + * @param type The desired type of I/O channel (DISPATCH_IO_STREAM + * or DISPATCH_IO_RANDOM). + * @param io The existing channel to create the new I/O channel from. + * @param queue The dispatch queue to which the handler should be submitted. + * @param context The application-defined context parameter to pass to + * the cleanup handler function. + * @param cleanup_handler The handler to enqueue when the system + * relinquishes control over the file descriptor + * (resp. closes the file at path) associated with + * the existing channel. + * @param context Application-defined context parameter. + * @param error An errno condition if control is relinquished + * because channel creation failed, zero otherwise. + * @result The newly created dispatch I/O channel or NULL if an error + * occurred (invalid type specified). + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED +DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_io_t +dispatch_io_create_with_io_f(dispatch_io_type_t type, + dispatch_io_t io, + dispatch_queue_t queue, + void *context, + void (*cleanup_handler)(void *context, int error)); + +/*! + * @typedef dispatch_io_handler_function_t + * The prototype of I/O handler functions for dispatch I/O operations. + * + * @param context Application-defined context parameter. + * @param done A flag indicating whether the operation is complete. + * @param data The data object to be handled. + * @param error An errno condition for the operation. + */ +typedef void (*dispatch_io_handler_function_t)(void *context, bool done, + dispatch_data_t data, int error); + +/*! + * @function dispatch_io_read_f + * Schedule a read operation for asynchronous execution on the specified I/O + * channel. The I/O handler is enqueued one or more times depending on the + * general load of the system and the policy specified on the I/O channel. + * + * Any data read from the channel is described by the dispatch data object + * passed to the I/O handler. This object will be automatically released by the + * system when the I/O handler returns. It is the responsibility of the + * application to retain, concatenate or copy the data object if it is needed + * after the I/O handler returns. + * + * Dispatch I/O handlers are not reentrant. The system will ensure that no new + * I/O handler instance is invoked until the previously enqueued handler + * function has returned. + * + * An invocation of the I/O handler with the done flag set indicates that the + * read operation is complete and that the handler will not be enqueued again. + * + * If an unrecoverable error occurs on the I/O channel's underlying file + * descriptor, the I/O handler will be enqueued with the done flag set, the + * appropriate error code and a NULL data object. + * + * An invocation of the I/O handler with the done flag set, an error code of + * zero and an empty data object indicates that EOF was reached. + * + * @param channel The dispatch I/O channel from which to read the data. + * @param offset The offset relative to the channel position from which + * to start reading (only for DISPATCH_IO_RANDOM). + * @param length The length of data to read from the I/O channel, or + * SIZE_MAX to indicate that data should be read until EOF + * is reached. + * @param queue The dispatch queue to which the I/O handler should be + * submitted. + * @param context The application-defined context parameter to pass to + * the handler function. + * @param io_handler The I/O handler to enqueue when data is ready to be + * delivered. + * @param context Application-defined context parameter. + * @param done A flag indicating whether the operation is complete. + * @param data An object with the data most recently read from the + * I/O channel as part of this read operation, or NULL. + * @param error An errno condition for the read operation or zero if + * the read was successful. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL4 DISPATCH_NONNULL6 +DISPATCH_NOTHROW +void +dispatch_io_read_f(dispatch_io_t channel, + off_t offset, + size_t length, + dispatch_queue_t queue, + void *context, + dispatch_io_handler_function_t io_handler); + +/*! + * @function dispatch_io_write_f + * Schedule a write operation for asynchronous execution on the specified I/O + * channel. The I/O handler is enqueued one or more times depending on the + * general load of the system and the policy specified on the I/O channel. + * + * Any data remaining to be written to the I/O channel is described by the + * dispatch data object passed to the I/O handler. This object will be + * automatically released by the system when the I/O handler returns. It is the + * responsibility of the application to retain, concatenate or copy the data + * object if it is needed after the I/O handler returns. + * + * Dispatch I/O handlers are not reentrant. The system will ensure that no new + * I/O handler instance is invoked until the previously enqueued handler + * function has returned. + * + * An invocation of the I/O handler with the done flag set indicates that the + * write operation is complete and that the handler will not be enqueued again. + * + * If an unrecoverable error occurs on the I/O channel's underlying file + * descriptor, the I/O handler will be enqueued with the done flag set, the + * appropriate error code and an object containing the data that could not be + * written. + * + * An invocation of the I/O handler with the done flag set and an error code of + * zero indicates that the data was fully written to the channel. + * + * @param channel The dispatch I/O channel on which to write the data. + * @param offset The offset relative to the channel position from which + * to start writing (only for DISPATCH_IO_RANDOM). + * @param data The data to write to the I/O channel. The data object + * will be retained by the system until the write operation + * is complete. + * @param queue The dispatch queue to which the I/O handler should be + * submitted. + * @param context The application-defined context parameter to pass to + * the handler function. + * @param io_handler The I/O handler to enqueue when data has been delivered. + * @param context Application-defined context parameter. + * @param done A flag indicating whether the operation is complete. + * @param data An object of the data remaining to be + * written to the I/O channel as part of this write + * operation, or NULL. + * @param error An errno condition for the write operation or zero + * if the write was successful. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NONNULL4 +DISPATCH_NONNULL6 DISPATCH_NOTHROW +void +dispatch_io_write_f(dispatch_io_t channel, + off_t offset, + dispatch_data_t data, + dispatch_queue_t queue, + void *context, + dispatch_io_handler_function_t io_handler); + +/*! + * @function dispatch_io_barrier_f + * Schedule a barrier operation on the specified I/O channel; all previously + * scheduled operations on the channel will complete before the provided + * barrier function is enqueued onto the global queue determined by the + * channel's target queue, and no subsequently scheduled operations will start + * until the barrier function has returned. + * + * If multiple channels are associated with the same file descriptor, a barrier + * operation scheduled on any of these channels will act as a barrier across all + * channels in question, i.e. all previously scheduled operations on any of the + * channels will complete before the barrier function is enqueued, and no + * operations subsequently scheduled on any of the channels will start until the + * barrier function has returned. + * + * While the barrier function is running, it may safely operate on the channel's + * underlying file descriptor with fsync(2), lseek(2) etc. (but not close(2)). + * + * @param channel The dispatch I/O channel to schedule the barrier on. + * @param context The application-defined context parameter to pass to + * the barrier function. + * @param barrier The barrier function. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_io_barrier_f(dispatch_io_t channel, + void *context, + dispatch_function_t barrier); + +__END_DECLS + +#endif /* __DISPATCH_IO_PRIVATE__ */ diff --git a/private/mach_private.h b/private/mach_private.h new file mode 100644 index 0000000..603330d --- /dev/null +++ b/private/mach_private.h @@ -0,0 +1,547 @@ +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_MACH_PRIVATE__ +#define __DISPATCH_MACH_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +__BEGIN_DECLS + +#if DISPATCH_MACH_SPI + +#include + +/*! + * @functiongroup Dispatch Mach Channel SPI + * + * IMPORTANT: This is Libsystem-internal SPI not intended for general use and + * is subject to change at any time without warning. + */ + +/*! + * @typedef dispatch_mach_t + * A dispatch mach channel asynchronously recevives and sends mach messages. + */ +DISPATCH_DECL(dispatch_mach); + +/*! + * @typedef dispatch_mach_reason_t + * Reasons for a mach channel handler to be invoked. + * + * @const DISPATCH_MACH_CONNECTED + * The channel has been connected. The first handler invocation on a channel + * after calling dispatch_mach_connect() will have this reason. + * + * @const DISPATCH_MACH_MESSAGE_RECEIVED + * A message was received, it is passed in the message parameter. + * + * @const DISPATCH_MACH_MESSAGE_SENT + * A message was sent, it is passed in the message parameter (so that associated + * resources can be disposed of). + * + * @const DISPATCH_MACH_MESSAGE_SEND_FAILED + * A message failed to be sent, it is passed in the message parameter (so that + * associated resources can be disposed of), along with the error code from + * mach_msg(). + * + * @const DISPATCH_MACH_MESSAGE_NOT_SENT + * A message was not sent due to the channel being canceled or reconnected, it + * is passed in the message parameter (so that associated resources can be + * disposed of). + * + * @const DISPATCH_MACH_BARRIER_COMPLETED + * A barrier block has finished executing. + * + * @const DISPATCH_MACH_DISCONNECTED + * The channel has been disconnected by a call to dispatch_mach_reconnect() or + * dispatch_mach_cancel(), an empty message is passed in the message parameter + * (so that associated port rights can be disposed of). + * The message header will contain either a remote port with a previously + * connected send right, or a local port with a previously connected receive + * right (if the channel was canceled), or a local port with a receive right + * that was being monitored for a direct reply to a message previously sent to + * the channel (if no reply was received). + * + * @const DISPATCH_MACH_CANCELED + * The channel has been canceled. + */ +DISPATCH_ENUM(dispatch_mach_reason, unsigned long, + DISPATCH_MACH_CONNECTED = 1, + DISPATCH_MACH_MESSAGE_RECEIVED, + DISPATCH_MACH_MESSAGE_SENT, + DISPATCH_MACH_MESSAGE_SEND_FAILED, + DISPATCH_MACH_MESSAGE_NOT_SENT, + DISPATCH_MACH_BARRIER_COMPLETED, + DISPATCH_MACH_DISCONNECTED, + DISPATCH_MACH_CANCELED, + DISPATCH_MACH_REASON_LAST, /* unused */ +); + +/*! + * @typedef dispatch_mach_trailer_t + * Trailer type of mach message received by dispatch mach channels + */ + +typedef mach_msg_context_trailer_t dispatch_mach_trailer_t; + +/*! + * @constant DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE + * Maximum size of a message that can be received inline by a dispatch mach + * channel, reception of larger messages requires an extra roundtrip through + * the kernel. + */ + +#define DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE \ + ((PAGE_SIZE > 0x1000 ? 1 : 3) * PAGE_SIZE - \ + sizeof(dispatch_mach_trailer_t)) + +/*! + * @typedef dispatch_mach_msg_t + * A dispatch mach message encapsulates messages received or sent with dispatch + * mach channels. + */ +DISPATCH_DECL(dispatch_mach_msg); + +/*! + * @typedef dispatch_mach_msg_destructor_t + * Dispatch mach message object destructors. + * + * @const DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT + * Message buffer storage is internal to the object, if a buffer is supplied + * during object creation, its contents are copied. + * + * @const DISPATCH_MACH_MSG_DESTRUCTOR_FREE + * Message buffer will be deallocated with free(3). + * + * @const DISPATCH_MACH_MSG_DESTRUCTOR_FREE + * Message buffer will be deallocated with vm_deallocate. + */ +DISPATCH_ENUM(dispatch_mach_msg_destructor, unsigned int, + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT = 0, + DISPATCH_MACH_MSG_DESTRUCTOR_FREE, + DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE, +); + +/*! + * @function dispatch_mach_msg_create + * Creates a dispatch mach message object, either with a newly allocated message + * buffer of given size, or from an existing message buffer that will be + * deallocated with the specified destructor when the object is released. + * + * If a non-NULL reference to a pointer is provided in 'msg_ptr', it is filled + * with the location of the (possibly newly allocated) message buffer. + * + * It is the responsibility of the application to ensure that it does not modify + * the underlying message buffer once the dispatch mach message object is passed + * to other dispatch mach API. + * + * @param msg The message buffer to create the message object from. + * If 'destructor' is DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, + * this argument may be NULL to leave the newly allocated + * message buffer zero-initialized. + * @param size The size of the message buffer. + * Must be >= sizeof(mach_msg_header_t) + * @param destructor The destructor to use to deallocate the message buffer + * when the object is released. + * @param msg_ptr A pointer to a pointer variable to be filled with the + * location of the (possibly newly allocated) message + * buffer, or NULL. + * @result A newly created dispatch mach message object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_mach_msg_t +dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, + dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr); + +/*! + * @function dispatch_mach_msg_get_msg + * Returns the message buffer underlying a dispatch mach message object. + * + * @param message The dispatch mach message object to query. + * @param size_ptr A pointer to a size_t variable to be filled with the + * size of the message buffer, or NULL. + * @result Pointer to message buffer underlying the object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +mach_msg_header_t* +dispatch_mach_msg_get_msg(dispatch_mach_msg_t message, size_t *size_ptr); + +#ifdef __BLOCKS__ +/*! + * @typedef dispatch_mach_handler_t + * Prototype of dispatch mach channel handler blocks. + * + * @param reason Reason the handler was invoked. + * @param message Message object that was sent or received. + * @param error Mach error code for the send operation. + */ +typedef void (^dispatch_mach_handler_t)(dispatch_mach_reason_t reason, + dispatch_mach_msg_t message, mach_error_t error); + +/*! + * @function dispatch_mach_create + * Create a dispatch mach channel to asynchronously receive and send mach + * messages. + * + * The specified handler will be called with the corresponding reason parameter + * for each message received and for each message that was successfully sent, + * that failed to be sent, or was not sent; as well as when a barrier block + * has completed, or when channel connection, reconnection or cancellation has + * taken effect. + * + * Dispatch mach channels are created in a disconnected state, they must be + * connected via dispatch_mach_connect() to begin receiving and sending + * messages. + * + * @param label + * An optional string label to attach to the channel. The string is not copied, + * if it is non-NULL it must point to storage that remains valid for the + * lifetime of the channel object. May be NULL. + * + * @param queue + * The target queue of the channel, where the handler and barrier blocks will + * be submitted. + * + * @param handler + * The handler block to submit when a message has been sent or received. + * + * @result + * The newly created dispatch mach channel. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NONNULL3 DISPATCH_NOTHROW +dispatch_mach_t +dispatch_mach_create(const char *label, dispatch_queue_t queue, + dispatch_mach_handler_t handler); +#endif + +/*! + * @typedef dispatch_mach_handler_function_t + * Prototype of dispatch mach channel handler functions. + * + * @param context Application-defined context parameter. + * @param reason Reason the handler was invoked. + * @param message Message object that was sent or received. + * @param error Mach error code for the send operation. + */ +typedef void (*dispatch_mach_handler_function_t)(void *context, + dispatch_mach_reason_t reason, dispatch_mach_msg_t message, + mach_error_t error); + +/*! + * @function dispatch_mach_create_f + * Create a dispatch mach channel to asynchronously receive and send mach + * messages. + * + * The specified handler will be called with the corresponding reason parameter + * for each message received and for each message that was successfully sent, + * that failed to be sent, or was not sent; as well as when a barrier block + * has completed, or when channel connection, reconnection or cancellation has + * taken effect. + * + * Dispatch mach channels are created in a disconnected state, they must be + * connected via dispatch_mach_connect() to begin receiving and sending + * messages. + * + * @param label + * An optional string label to attach to the channel. The string is not copied, + * if it is non-NULL it must point to storage that remains valid for the + * lifetime of the channel object. May be NULL. + * + * @param queue + * The target queue of the channel, where the handler and barrier blocks will + * be submitted. + * + * @param context + * The application-defined context to pass to the handler. + * + * @param handler + * The handler function to submit when a message has been sent or received. + * + * @result + * The newly created dispatch mach channel. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NONNULL4 DISPATCH_NOTHROW +dispatch_mach_t +dispatch_mach_create_f(const char *label, dispatch_queue_t queue, void *context, + dispatch_mach_handler_function_t handler); + +/*! + * @function dispatch_mach_connect + * Connect a mach channel to the specified receive and send rights. + * + * This function must only be called once during the lifetime of a channel, it + * will initiate message reception and perform any already submitted message + * sends or barrier operations. + * + * @param channel + * The mach channel to connect. + * + * @param receive + * The receive right to associate with the channel. May be MACH_PORT_NULL. + * + * @param send + * The send right to associate with the channel. May be MACH_PORT_NULL. + * + * @param checkin + * An optional message object encapsulating the initial check-in message to send + * upon channel connection. The check-in message is sent immediately before the + * first message submitted via dispatch_mach_send(). The message object will be + * retained until the initial send operation is complete (or not peformed due + * to channel cancellation or reconnection) and the channel handler has + * returned. May be NULL. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_mach_connect(dispatch_mach_t channel, mach_port_t receive, + mach_port_t send, dispatch_mach_msg_t checkin); + +/*! + * @function dispatch_mach_reconnect + * Reconnect a mach channel to the specified send right. + * + * Disconnects the channel from the current send right, interrupts any pending + * message sends (and returns the messages as unsent), and reconnects the + * channel to a new send right. + * + * The application must wait for the channel handler to be invoked with + * DISPATCH_MACH_DISCONNECTED before releasing the previous send right. + * + * @param channel + * The mach channel to reconnect. + * + * @param send + * The new send right to associate with the channel. May be MACH_PORT_NULL. + * + * @param checkin + * An optional message object encapsulating the initial check-in message to send + * upon channel reconnection. The check-in message is sent immediately before + * the first message submitted via dispatch_mach_send() after this function + * returns. The message object will be retained until the initial send operation + * is complete (or not peformed due to channel cancellation or reconnection) + * and the channel handler has returned. May be NULL. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_mach_reconnect(dispatch_mach_t channel, mach_port_t send, + dispatch_mach_msg_t checkin); + +/*! + * @function dispatch_mach_cancel + * Cancel a mach channel, preventing any further messages from being sent or + * received. + * + * The application must wait for the channel handler to be invoked with + * DISPATCH_MACH_DISCONNECTED before releasing the underlying send and receive + * rights. + * + * Note: explicit cancellation of mach channels is required, no implicit + * cancellation takes place on release of the last application reference + * to the channel object. Failure to cancel will cause the channel and + * its associated resources to be leaked. + * + * @param channel + * The mach channel to cancel. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_mach_cancel(dispatch_mach_t channel); + +/*! + * @function dispatch_mach_send + * Asynchronously send a message encapsulated in a dispatch mach message object + * to the specified mach channel. + * + * Unless the message is being sent to a send-once right (as determined by the + * presence of MACH_MSG_TYPE_MOVE_SEND_ONCE in the message header remote bits), + * the message header remote port is set to the channel send right before the + * send operation is performed. + * + * If the message expects a direct reply (as determined by the presence of + * MACH_MSG_TYPE_MAKE_SEND_ONCE in the message header local bits) the receive + * right specified in the message header local port will be monitored until a + * reply message (or a send-once notification) is received, or the channel is + * canceled. Hence the application must wait for the channel handler to be + * invoked with a DISPATCH_MACH_DISCONNECTED message before releasing that + * receive right. + * + * If the message send operation is attempted but the channel is canceled + * before the send operation succesfully completes, the message returned to the + * channel handler with DISPATCH_MACH_MESSAGE_NOT_SENT may be the result of a + * pseudo-receive operation. If the message expected a direct reply, the + * receive right originally specified in the message header local port will + * returned in a DISPATCH_MACH_DISCONNECTED message. + * + * @param channel + * The mach channel to which to send the message. + * + * @param message + * The message object encapsulating the message to send. The object will be + * retained until the send operation is complete and the channel handler has + * returned. The storage underlying the message object may be modified by the + * send operation. + * + * @param options + * Additional send options to pass to mach_msg() when performing the send + * operation. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NOTHROW +void +dispatch_mach_send(dispatch_mach_t channel, dispatch_mach_msg_t message, + mach_msg_option_t options); + +#ifdef __BLOCKS__ +/*! + * @function dispatch_mach_send_barrier + * Submit a send barrier to the specified mach channel. Messages submitted to + * the channel before the barrier will be sent before the barrier block is + * executed, and messages submitted to the channel after the barrier will only + * be sent once the barrier block has completed and the channel handler + * invocation for the barrier has returned. + * + * @param channel + * The mach channel to which to submit the barrier. + * + * @param barrier + * The barrier block to submit to the channel target queue. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_mach_send_barrier(dispatch_mach_t channel, dispatch_block_t barrier); +#endif + +/*! + * @function dispatch_mach_send_barrier_f + * Submit a send barrier to the specified mach channel. Messages submitted to + * the channel before the barrier will be sent before the barrier block is + * executed, and messages submitted to the channel after the barrier will only + * be sent once the barrier block has completed and the channel handler + * invocation for the barrier has returned. + * + * @param channel + * The mach channel to which to submit the barrier. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param barrier + * The barrier function to submit to the channel target queue. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_mach_send_barrier_f(dispatch_mach_t channel, void *context, + dispatch_function_t barrier); + +#ifdef __BLOCKS__ +/*! + * @function dispatch_mach_receive_barrier + * Submit a receive barrier to the specified mach channel. Channel handlers for + * messages received by the channel after the receive barrier has been + * submitted will only be invoked once the barrier block has completed and the + * channel handler invocation for the barrier has returned. + * + * @param channel + * The mach channel to which to submit the receive barrier. + * + * @param barrier + * The barrier block to submit to the channel target queue. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_mach_receive_barrier(dispatch_mach_t channel, + dispatch_block_t barrier); +#endif + +/*! + * @function dispatch_mach_receive_barrier_f + * Submit a receive barrier to the specified mach channel. Channel handlers for + * messages received by the channel after the receive barrier has been + * submitted will only be invoked once the barrier block has completed and the + * channel handler invocation for the barrier has returned. + * + * @param channel + * The mach channel to which to submit the receive barrier. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param barrier + * The barrier function to submit to the channel target queue. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_mach_receive_barrier_f(dispatch_mach_t channel, void *context, + dispatch_function_t barrier); + +/*! + * @function dispatch_mach_get_checkin_port + * Returns the port specified in the message header remote port of the check-in + * message passed to the most recent invocation of dispatch_mach_connect() or + * dispatch_mach_reconnect() for the provided mach channel (irrespective of the + * completion of the (re)connect or check-in operations in question). + * + * Returns MACH_PORT_NULL if dispatch_mach_connect() has not yet been called or + * if the most recently specified check-in message was NULL, and MACH_PORT_DEAD + * if the channel has been canceled. + * + * It is the responsibility of the application to ensure that the port + * specified in a check-in message remains valid at the time this function is + * called. + * + * @param channel + * The mach channel to query. + * + * @result + * The most recently specified check-in port for the channel. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +mach_port_t +dispatch_mach_get_checkin_port(dispatch_mach_t channel); + +#endif // DISPATCH_MACH_SPI + +__END_DECLS + +#endif diff --git a/private/private.h b/private/private.h index 08a14ce..4e32e73 100644 --- a/private/private.h +++ b/private/private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -54,27 +54,90 @@ #include #include #include +#include #include +#include #undef __DISPATCH_INDIRECT__ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ // Check that public and private dispatch headers match -#if DISPATCH_API_VERSION != 20111201 // Keep in sync with +#if DISPATCH_API_VERSION != 20130520 // Keep in sync with #error "Dispatch header mismatch between /usr/include and /usr/local/include" #endif __BEGIN_DECLS +/*! + * @function _dispatch_is_multithreaded + * + * @abstract + * Returns true if the current process has become multithreaded by the use + * of libdispatch functionality. + * + * @discussion + * This SPI is intended for use by low-level system components that need to + * ensure that they do not make a single-threaded process multithreaded, to + * avoid negatively affecting child processes of a fork (without exec). + * + * Such components must not use any libdispatch functionality if this function + * returns false. + * + * @result + * Boolean indicating whether the process has used libdispatch and become + * multithreaded. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NOTHROW -void -libdispatch_init(void); +bool _dispatch_is_multithreaded(void); + +/*! + * @function _dispatch_is_fork_of_multithreaded_parent + * + * @abstract + * Returns true if the current process is a child of a parent process that had + * become multithreaded by the use of libdispatch functionality at the time of + * fork (without exec). + * + * @discussion + * This SPI is intended for use by (rare) low-level system components that need + * to continue working on the child side of a fork (without exec) of a + * multithreaded process. + * + * Such components must not use any libdispatch functionality if this function + * returns true. + * + * @result + * Boolean indicating whether the parent process had used libdispatch and + * become multithreaded at the time of fork. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NOTHROW +bool _dispatch_is_fork_of_multithreaded_parent(void); + +/* + * dispatch_time convenience macros + */ + +#define _dispatch_time_after_nsec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t)) +#define _dispatch_time_after_usec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_USEC) +#define _dispatch_time_after_msec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_MSEC) +#define _dispatch_time_after_sec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_SEC) + +/* + * SPI for CoreFoundation/Foundation/libauto ONLY + */ + +#define DISPATCH_COCOA_COMPAT (TARGET_OS_MAC || TARGET_OS_WIN32) -#if TARGET_OS_MAC -#define DISPATCH_COCOA_COMPAT 1 #if DISPATCH_COCOA_COMPAT +#if TARGET_OS_MAC __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW mach_port_t @@ -84,6 +147,46 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NOTHROW void _dispatch_main_queue_callback_4CF(mach_msg_header_t *msg); +#elif TARGET_OS_WIN32 +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NOTHROW +HANDLE +_dispatch_get_main_queue_handle_4CF(void); + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +_dispatch_main_queue_callback_4CF(void); +#endif // TARGET_OS_WIN32 + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_queue_t +_dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags); + +#if TARGET_OS_MAC +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW +mach_port_t +_dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t queue); +#endif + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +_dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t queue); + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW +bool +_dispatch_runloop_root_queue_perform_4CF(dispatch_queue_t queue); + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +_dispatch_source_set_runloop_timer_4CF(dispatch_source_t source, + dispatch_time_t start, uint64_t interval, uint64_t leeway); __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT @@ -92,11 +195,6 @@ void (*dispatch_begin_thread_4GC)(void); __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT void (*dispatch_end_thread_4GC)(void); - -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) -DISPATCH_EXPORT -void (*dispatch_no_worker_threads_4GC)(void); - __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT void *(*_dispatch_begin_NSAutoReleasePool)(void); @@ -105,39 +203,8 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT void (*_dispatch_end_NSAutoReleasePool)(void *); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) -DISPATCH_EXPORT DISPATCH_NOTHROW -bool _dispatch_is_multithreaded(void); - -#define _dispatch_time_after_nsec(t) \ - dispatch_time(DISPATCH_TIME_NOW, (t)) -#define _dispatch_time_after_usec(t) \ - dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_USEC) -#define _dispatch_time_after_msec(t) \ - dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_MSEC) -#define _dispatch_time_after_sec(t) \ - dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_SEC) - -#endif -#endif /* TARGET_OS_MAC */ - -/* pthreads magic */ - -DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_prepare(void); -DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_parent(void); -DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); - -#if TARGET_OS_MAC -/* - * Extract the context pointer from a mach message trailer. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NONNULL_ALL -DISPATCH_NOTHROW -void * -dispatch_mach_msg_get_context(mach_msg_header_t *msg); -#endif /* TARGET_OS_MAC */ +#endif /* DISPATCH_COCOA_COMPAT */ __END_DECLS -#endif +#endif // __DISPATCH_PRIVATE__ diff --git a/private/queue_private.h b/private/queue_private.h index bdfb5b8..dfef785 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -34,7 +34,6 @@ __BEGIN_DECLS - /*! * @enum dispatch_queue_flags_t * @@ -48,6 +47,17 @@ enum { #define DISPATCH_QUEUE_FLAGS_MASK (DISPATCH_QUEUE_OVERCOMMIT) +/*! + * @typedef dispatch_queue_priority_t + * + * @constant DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE + * Items dispatched to the queue will run at non-interactive priority. + * This priority level is intended for user-initiated application activity that + * is long-running and CPU or IO intensive and that the user is actively waiting + * on, but that should not interfere with interactive use of the application. + */ +#define DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE INT8_MIN + /*! * @function dispatch_queue_set_width * @@ -84,31 +94,122 @@ void dispatch_queue_set_width(dispatch_queue_t dq, long width); // DEPRECATED /*! - * @function dispatch_set_current_target_queue + * @function dispatch_queue_create_with_target * * @abstract - * Synchronously sets the target queue of the current serial queue. + * Creates a new dispatch queue with a specified target queue. * * @discussion - * This SPI is provided for a limited purpose case when calling - * dispatch_set_target_queue() is not sufficient. It works similarly to - * dispatch_set_target_queue() except the target queue of the current queue - * is immediately changed so that pending blocks on the queue will run on the - * new target queue. Calling this from outside of a block executing on a serial - * queue is undefined. + * Dispatch queues created with the DISPATCH_QUEUE_SERIAL or a NULL attribute + * invoke blocks serially in FIFO order. * - * @param queue - * The new target queue for the object. The queue is retained, and the - * previous target queue, if any, is released. - * If queue is DISPATCH_TARGET_QUEUE_DEFAULT, set the object's target queue - * to the default target queue for the given object type. + * Dispatch queues created with the DISPATCH_QUEUE_CONCURRENT attribute may + * invoke blocks concurrently (similarly to the global concurrent queues, but + * potentially with more overhead), and support barrier blocks submitted with + * the dispatch barrier API, which e.g. enables the implementation of efficient + * reader-writer schemes. + * + * When a dispatch queue is no longer needed, it should be released with + * dispatch_release(). Note that any pending blocks submitted to a queue will + * hold a reference to that queue. Therefore a queue will not be deallocated + * until all pending blocks have finished. + * + * @param label + * A string label to attach to the queue. + * This parameter is optional and may be NULL. + * + * @param attr + * DISPATCH_QUEUE_SERIAL or DISPATCH_QUEUE_CONCURRENT. + * + * @param target + * The target queue for the newly created queue. The target queue is retained. + * If this parameter is DISPATCH_TARGET_QUEUE_DEFAULT, sets the queue's target + * queue to the default target queue for the given queue type. + * + * @result + * The newly created dispatch queue. */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_queue_t +dispatch_queue_create_with_target(const char *label, + dispatch_queue_attr_t attr, dispatch_queue_t target); -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) -DISPATCH_EXPORT DISPATCH_NOTHROW -void -dispatch_set_current_target_queue(dispatch_queue_t queue); +#ifdef __BLOCKS__ +/*! + * @function dispatch_pthread_root_queue_create + * + * @abstract + * Creates a new concurrent dispatch root queue with a pthread-based pool of + * worker threads owned by the application. + * + * @discussion + * Dispatch pthread root queues are similar to the global concurrent dispatch + * queues in that they invoke blocks concurrently, however the blocks are not + * executed on ordinary worker threads but use a dedicated pool of pthreads not + * shared with the global queues or any other pthread root queues. + * + * NOTE: this is a special-purpose facility that should only be used in very + * limited circumstances, in almost all cases the global concurrent queues + * should be preferred. While this facility allows for more flexibility in + * configuring worker threads for special needs it comes at the cost of + * increased overall memory usage due to reduced thread sharing and higher + * latency in worker thread bringup. + * + * Dispatch pthread root queues do not support suspension, application context + * and change of width or of target queue. They can however be used as the + * target queue for serial or concurrent queues obtained via + * dispatch_queue_create() or dispatch_queue_create_with_target(), which + * enables the blocks submitted to those queues to be processed on the root + * queue's pthread pool. + * + * When a dispatch pthread root queue is no longer needed, it should be + * released with dispatch_release(). Existing worker pthreads and pending blocks + * submitted to the root queue will hold a reference to the queue so it will not + * be deallocated until all blocks have finished and worker threads exited. + * + * @param label + * A string label to attach to the queue. + * This parameter is optional and may be NULL. + * + * @param flags + * Reserved for future use. Passing any value other than zero may result in + * a NULL return value. + * + * @param attr + * Attributes passed to pthread_create(3) when creating worker pthreads. This + * parameter is copied and can be destroyed after this call returns. + * This parameter is optional and may be NULL. + * + * @param configure + * Configuration block called on newly created worker pthreads before any blocks + * for the root queue are executed. The block may configure the current thread + * as needed. + * This parameter is optional and may be NULL. + * + * @result + * The newly created dispatch pthread root queue. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_queue_t +dispatch_pthread_root_queue_create(const char *label, unsigned long flags, + const pthread_attr_t *attr, dispatch_block_t configure); +#endif /* __BLOCKS__ */ + +/*! + * @constant DISPATCH_APPLY_CURRENT_ROOT_QUEUE + * @discussion Constant to pass to the dispatch_apply() and dispatch_apply_f() + * functions to indicate that the root queue for the current thread should be + * used (i.e. one of the global concurrent queues or a queue created with + * dispatch_pthread_root_queue_create()). If there is no such queue, the + * default priority global concurrent queue will be used. + */ +#define DISPATCH_APPLY_CURRENT_ROOT_QUEUE NULL +#if !TARGET_OS_WIN32 __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT const struct dispatch_queue_offsets_s { // always add new fields at the end @@ -124,22 +225,73 @@ DISPATCH_EXPORT const struct dispatch_queue_offsets_s { const uint16_t dqo_running; const uint16_t dqo_running_size; } dispatch_queue_offsets; +#endif /*! - * @function dispatch_flush_continuation_cache + * @function dispatch_assert_queue * * @abstract - * Flushes the current thread's cache of continuation objects, if any. + * Verifies that the current block is executing on a certain dispatch queue. * * @discussion - * Warning: this function is subject to change in a future release. - * Please contact the GCD team before using it in your code. + * Some code expects to be run on a specific dispatch queue. This function + * verifies that expectation for debugging. + * + * This function will only return if the currently executing block was submitted + * to the specified queue or to any queue targeting it (see + * dispatch_set_target_queue()). Otherwise, it logs an explanation to the system + * log, then terminates the application. + * + * When dispatch_assert_queue() is called outside of the context of a + * submitted block, its behavior is undefined. + * + * Passing the result of dispatch_get_main_queue() to this function verifies + * that the current block was submitted to the main queue or to a queue + * targeting it. + * IMPORTANT: this is NOT the same as verifying that the current block is + * executing on the main thread. + * + * The variant dispatch_assert_queue_debug() is compiled out when the + * preprocessor macro NDEBUG is defined. (See also assert(3)). + * + * @param queue + * The dispatch queue that the current block is expected to run on. + * The result of passing NULL in this parameter is undefined. */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 +void +dispatch_assert_queue(dispatch_queue_t queue); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_NA) -DISPATCH_EXPORT DISPATCH_NOTHROW +/*! + * @function dispatch_assert_queue_not + * + * @abstract + * Verifies that the current block is not executing on a certain dispatch queue. + * + * @discussion + * This function is the equivalent of dispatch_queue_assert() with the test for + * equality inverted. See discussion there. + * + * The variant dispatch_assert_queue_not_debug() is compiled out when the + * preprocessor macro NDEBUG is defined. (See also assert(3)). + * + * @param queue + * The dispatch queue that the current block is expected not to run on. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 void -dispatch_flush_continuation_cache(void); +dispatch_assert_queue_not(dispatch_queue_t queue); + +#ifdef NDEBUG +#define dispatch_assert_queue_debug(q) ((void)0) +#define dispatch_assert_queue_not_debug(q) ((void)0) +#else +#define dispatch_assert_queue_debug(q) dispatch_assert_queue(q) +#define dispatch_assert_queue_not_debug(q) dispatch_assert_queue_not(q) +#endif __END_DECLS diff --git a/private/source_private.h b/private/source_private.h index 8de7308..0f44e27 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -32,6 +32,42 @@ #include // for HeaderDoc #endif +/*! + * @const DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE + * @discussion A dispatch timer source that is part of a timer aggregate. + * The handle is the dispatch timer aggregate object. + * The mask specifies which flags from dispatch_source_timer_flags_t to apply. + */ +#define DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE \ + (&_dispatch_source_type_timer_with_aggregate) +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_SOURCE_TYPE_DECL(timer_with_aggregate); + +/*! + * @const DISPATCH_SOURCE_TYPE_INTERVAL + * @discussion A dispatch source that submits the event handler block at a + * specified time interval, phase-aligned with all other interval sources on + * the system that have the same interval value. + * + * The initial submission of the event handler will occur at some point during + * the first time interval after the source is created (assuming the source is + * resumed at that time). + * + * By default, the unit for the interval value is milliseconds and the leeway + * (maximum amount of time any individual handler submission may be deferred to + * align with other system activity) for the source is fixed at interval/2. + * + * If the DISPATCH_INTERVAL_UI_ANIMATION flag is specified, the unit for the + * interval value is animation frames (1/60th of a second) and the leeway is + * fixed at one frame. + * + * The handle is the interval value in milliseconds or frames. + * The mask specifies which flags from dispatch_source_timer_flags_t to apply. + */ +#define DISPATCH_SOURCE_TYPE_INTERVAL (&_dispatch_source_type_interval) +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_SOURCE_TYPE_DECL(interval); + /*! * @const DISPATCH_SOURCE_TYPE_VFS * @discussion Apple-internal dispatch source that monitors for vfs events @@ -51,6 +87,17 @@ DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vfs; __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm; +/*! + * @const DISPATCH_SOURCE_TYPE_MEMORYSTATUS + * @discussion A dispatch source that monitors memory status + * The mask is a mask of desired events from + * dispatch_source_memorystatus_flags_t. + */ +#define DISPATCH_SOURCE_TYPE_MEMORYSTATUS (&_dispatch_source_type_memorystatus) +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT const struct dispatch_source_type_s + _dispatch_source_type_memorystatus; + /*! * @const DISPATCH_SOURCE_TYPE_SOCK * @discussion A dispatch source that monitors events on socket state changes. @@ -89,6 +136,14 @@ DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_sock; * @constant DISPATCH_SOCK_KEEPALIVE * TCP Keepalive received * + * @constant DISPATCH_SOCK_CONNECTED + * Socket is connected + * + * @constant DISPATCH_SOCK_DISCONNECTED + * Socket is disconnected + * + * @constant DISPATCH_SOCK_CONNINFO_UPDATED + * Connection info was updated */ enum { DISPATCH_SOCK_CONNRESET = 0x00000001, @@ -100,6 +155,11 @@ enum { DISPATCH_SOCK_SUSPEND = 0x00000040, DISPATCH_SOCK_RESUME = 0x00000080, DISPATCH_SOCK_KEEPALIVE = 0x00000100, + DISPATCH_SOCK_ADAPTIVE_WTIMO = 0x00000200, + DISPATCH_SOCK_ADAPTIVE_RTIMO = 0x00000400, + DISPATCH_SOCK_CONNECTED = 0x00000800, + DISPATCH_SOCK_DISCONNECTED = 0x00001000, + DISPATCH_SOCK_CONNINFO_UPDATED = 0x00002000, }; /*! @@ -148,6 +208,24 @@ enum { DISPATCH_VFS_VERYLOWDISK = 0x0200, }; +/*! + * @enum dispatch_source_timer_flags_t + * + * @constant DISPATCH_TIMER_BACKGROUND + * Specifies that the timer is used to trigger low priority maintenance-level + * activity and that the system may apply larger minimum leeway values to the + * timer in order to align it with other system activity. + * + * @constant DISPATCH_INTERVAL_UI_ANIMATION + * Specifies that the interval source is used for UI animation. The unit for + * the interval value of such sources is frames (1/60th of a second) and the + * leeway is fixed at one frame. + */ +enum { + DISPATCH_TIMER_BACKGROUND = 0x2, + DISPATCH_INTERVAL_UI_ANIMATION = 0x20, +}; + /*! * @enum dispatch_source_mach_send_flags_t * @@ -168,11 +246,12 @@ enum { * @enum dispatch_source_proc_flags_t * * @constant DISPATCH_PROC_REAP - * The process has been reaped by the parent process via - * wait*(). + * The process has been reaped by the parent process via wait*(). + * This flag is deprecated and will be removed in a future release. */ enum { - DISPATCH_PROC_REAP = 0x10000000, + DISPATCH_PROC_REAP __OSX_AVAILABLE_BUT_DEPRECATED( + __MAC_10_6, __MAC_10_9, __IPHONE_4_0, __IPHONE_7_0) = 0x10000000, }; /*! @@ -186,12 +265,83 @@ enum { DISPATCH_VM_PRESSURE = 0x80000000, }; +/*! + * @enum dispatch_source_memorystatus_flags_t + * + * @constant DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL + * The system's memory pressure state has returned to normal. + * @constant DISPATCH_MEMORYSTATUS_PRESSURE_WARN + * The system's memory pressure state has changed to warning. + * @constant DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL + * The system's memory pressure state has changed to critical. + */ + +enum { + DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL = 0x01, + DISPATCH_MEMORYSTATUS_PRESSURE_WARN = 0x02, +#if !TARGET_OS_EMBEDDED + DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL = 0x04, +#endif +}; + #if TARGET_IPHONE_SIMULATOR // rdar://problem/9219483 #define DISPATCH_VM_PRESSURE DISPATCH_VNODE_ATTRIB #endif __BEGIN_DECLS +/*! + * @typedef dispatch_timer_aggregate_t + * + * @abstract + * Dispatch timer aggregates are sets of related timers. + */ +DISPATCH_DECL(dispatch_timer_aggregate); + +/*! + * @function dispatch_timer_aggregate_create + * + * @abstract + * Creates a new dispatch timer aggregate. + * + * @discussion + * A dispatch timer aggregate is a set of related timers whose overall timing + * parameters can be queried. + * + * Timers are added to an aggregate when a timer source is created with type + * DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE. + * + * @result + * The newly created dispatch timer aggregate. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_timer_aggregate_t +dispatch_timer_aggregate_create(void); + +/*! + * @function dispatch_timer_aggregate_get_delay + * + * @abstract + * Retrieves the delay until a timer in the given aggregate will next fire. + * + * @param aggregate + * The dispatch timer aggregate to query. + * + * @param leeway_ptr + * Optional pointer to a variable filled with the leeway (in ns) that will be + * applied to the return value. May be NULL. + * + * @result + * Delay in ns from now. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NOTHROW +uint64_t +dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t aggregate, + uint64_t *leeway_ptr); + #if TARGET_OS_MAC /*! * @typedef dispatch_mig_callback_t @@ -207,6 +357,18 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_msg_return_t dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback_t callback); + +/*! + * @function dispatch_mach_msg_get_context + * + * @abstract + * Extract the context pointer from a mach message trailer. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NONNULL_ALL +DISPATCH_NOTHROW +void * +dispatch_mach_msg_get_context(mach_msg_header_t *msg); #endif __END_DECLS diff --git a/resolver/resolved.h b/resolver/resolved.h index bb9a82d..a481a20 100644 --- a/resolver/resolved.h +++ b/resolver/resolved.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2011 Apple Inc. All rights reserved. + * Copyright (c) 2010-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * diff --git a/resolver/resolver.c b/resolver/resolver.c index 8b390b4..9afc893 100644 --- a/resolver/resolver.c +++ b/resolver/resolver.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Apple Inc. All rights reserved. + * Copyright (c) 2010-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * diff --git a/src/Makefile.am b/src/Makefile.am index f6b95e3..630a480 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -31,7 +31,6 @@ libdispatch_la_SOURCES= \ shims/atomic.h \ shims/getprogname.h \ shims/hw_config.h \ - shims/malloc_zone.h \ shims/perfmon.h \ shims/time.h \ shims/tsd.h diff --git a/src/allocator.c b/src/allocator.c new file mode 100644 index 0000000..7b4c165 --- /dev/null +++ b/src/allocator.c @@ -0,0 +1,764 @@ +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" +#include "allocator_internal.h" + +#if DISPATCH_ALLOCATOR + +#ifndef VM_MEMORY_LIBDISPATCH +#define VM_MEMORY_LIBDISPATCH 74 +#endif + +// _dispatch_main_heap is is the first heap in the linked list, where searches +// always begin. +// +// _dispatch_main_heap, and dh_next, are read normally but only written (in +// try_create_heap) by cmpxchg. They start life at 0, and are only written +// once to non-zero. They are not marked volatile. There is a small risk that +// some thread may see a stale 0 value and enter try_create_heap. It will +// waste some time in an allocate syscall, but eventually it will try to +// cmpxchg, expecting to overwite 0 with an address. This will fail +// (because another thread already did this), the thread will deallocate the +// unused allocated memory, and continue with the new value. +// +// If something goes wrong here, the symptom would be a NULL dereference +// in alloc_continuation_from_heap or _magazine when derefing the magazine ptr. +static dispatch_heap_t _dispatch_main_heap; + +DISPATCH_ALWAYS_INLINE +static void +set_last_found_page(bitmap_t *val) +{ + dispatch_assert(_dispatch_main_heap); + unsigned int cpu = _dispatch_cpu_number(); + _dispatch_main_heap[cpu].header.last_found_page = val; +} + +DISPATCH_ALWAYS_INLINE +static bitmap_t * +last_found_page(void) +{ + dispatch_assert(_dispatch_main_heap); + unsigned int cpu = _dispatch_cpu_number(); + return _dispatch_main_heap[cpu].header.last_found_page; +} + +#pragma mark - +#pragma mark dispatch_alloc_bitmaps + +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static bitmap_t * +supermap_address(struct dispatch_magazine_s *magazine, unsigned int supermap) +{ + return &magazine->supermaps[supermap]; +} + +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static bitmap_t * +bitmap_address(struct dispatch_magazine_s *magazine, unsigned int supermap, + unsigned int map) +{ + return &magazine->maps[supermap][map]; +} + +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static dispatch_continuation_t +continuation_address(struct dispatch_magazine_s *magazine, + unsigned int supermap, unsigned int map, unsigned int index) +{ +#if DISPATCH_DEBUG + dispatch_assert(supermap < SUPERMAPS_PER_MAGAZINE); + dispatch_assert(map < BITMAPS_PER_SUPERMAP); + dispatch_assert(index < CONTINUATIONS_PER_BITMAP); +#endif + return (dispatch_continuation_t)&magazine->conts[supermap][map][index]; +} + +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static struct dispatch_magazine_s * +magazine_for_continuation(dispatch_continuation_t c) +{ + return (struct dispatch_magazine_s *)((uintptr_t)c & MAGAZINE_MASK); +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static void +get_cont_and_indices_for_bitmap_and_index(bitmap_t *bitmap, + unsigned int index, dispatch_continuation_t *continuation_out, + bitmap_t **supermap_out, unsigned int *bitmap_index_out) +{ + // m_for_c wants a continuation not a bitmap, but it works because it + // just masks off the bottom bits of the address. + struct dispatch_magazine_s *m = magazine_for_continuation((void *)bitmap); + unsigned int mindex = (unsigned int)(bitmap - m->maps[0]); + unsigned int bindex = mindex % BITMAPS_PER_SUPERMAP; + unsigned int sindex = mindex / BITMAPS_PER_SUPERMAP; + dispatch_assert(&m->maps[sindex][bindex] == bitmap); + if (fastpath(continuation_out)) { + *continuation_out = continuation_address(m, sindex, bindex, index); + } + if (fastpath(supermap_out)) *supermap_out = supermap_address(m, sindex); + if (fastpath(bitmap_index_out)) *bitmap_index_out = bindex; +} + +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static bool +continuation_is_in_first_page(dispatch_continuation_t c) +{ +#if PACK_FIRST_PAGE_WITH_CONTINUATIONS + // (the base of c's magazine == the base of c's page) + // => c is in first page of magazine + return (((uintptr_t)c & MAGAZINE_MASK) == + ((uintptr_t)c & ~(uintptr_t)PAGE_MASK)); +#else + (void)c; + return false; +#endif +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static void +get_maps_and_indices_for_continuation(dispatch_continuation_t c, + bitmap_t **supermap_out, unsigned int *bitmap_index_out, + bitmap_t **bitmap_out, unsigned int *index_out) +{ + unsigned int cindex, sindex, index, mindex; + padded_continuation *p = (padded_continuation *)c; + struct dispatch_magazine_s *m = magazine_for_continuation(c); +#if PACK_FIRST_PAGE_WITH_CONTINUATIONS + if (fastpath(continuation_is_in_first_page(c))) { + cindex = (unsigned int)(p - m->fp_conts); + index = cindex % CONTINUATIONS_PER_BITMAP; + mindex = cindex / CONTINUATIONS_PER_BITMAP; + if (fastpath(supermap_out)) *supermap_out = NULL; + if (fastpath(bitmap_index_out)) *bitmap_index_out = mindex; + if (fastpath(bitmap_out)) *bitmap_out = &m->fp_maps[mindex]; + if (fastpath(index_out)) *index_out = index; + return; + } +#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS + cindex = (unsigned int)(p - (padded_continuation *)m->conts); + sindex = cindex / (BITMAPS_PER_SUPERMAP * CONTINUATIONS_PER_BITMAP); + mindex = (cindex / CONTINUATIONS_PER_BITMAP) % BITMAPS_PER_SUPERMAP; + index = cindex % CONTINUATIONS_PER_BITMAP; + if (fastpath(supermap_out)) *supermap_out = &m->supermaps[sindex]; + if (fastpath(bitmap_index_out)) *bitmap_index_out = mindex; + if (fastpath(bitmap_out)) *bitmap_out = &m->maps[sindex][mindex]; + if (fastpath(index_out)) *index_out = index; +} + +// Base address of page, or NULL if this page shouldn't be madvise()d +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static void * +madvisable_page_base_for_continuation(dispatch_continuation_t c) +{ + if (fastpath(continuation_is_in_first_page(c))) { + return NULL; + } + void *page_base = (void *)((uintptr_t)c & ~(uintptr_t)PAGE_MASK); +#if DISPATCH_DEBUG + struct dispatch_magazine_s *m = magazine_for_continuation(c); + if (slowpath(page_base < (void *)&m->conts)) { + DISPATCH_CRASH("madvisable continuation too low"); + } + if (slowpath(page_base > (void *)&m->conts[SUPERMAPS_PER_MAGAZINE-1] + [BITMAPS_PER_SUPERMAP-1][CONTINUATIONS_PER_BITMAP-1])) { + DISPATCH_CRASH("madvisable continuation too high"); + } +#endif + return page_base; +} + +// Bitmap that controls the first few continuations in the same page as +// the continuations controlled by the passed bitmap. Undefined results if the +// passed bitmap controls continuations in the first page. +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static bitmap_t * +first_bitmap_in_same_page(bitmap_t *b) +{ +#if DISPATCH_DEBUG + struct dispatch_magazine_s *m; + m = magazine_for_continuation((void*)b); + dispatch_assert(b >= &m->maps[0][0]); + dispatch_assert(b < &m->maps[SUPERMAPS_PER_MAGAZINE] + [BITMAPS_PER_SUPERMAP]); +#endif + const uintptr_t PAGE_BITMAP_MASK = (BITMAPS_PER_PAGE * + BYTES_PER_BITMAP) - 1; + return (bitmap_t *)((uintptr_t)b & ~PAGE_BITMAP_MASK); +} + +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static bool +bitmap_is_full(bitmap_t bits) +{ + return (bits == BITMAP_ALL_ONES); +} + +#define NO_BITS_WERE_UNSET (UINT_MAX) + +// max_index is the 0-based position of the most significant bit that is +// allowed to be set. +DISPATCH_ALWAYS_INLINE_NDEBUG +static unsigned int +bitmap_set_first_unset_bit_upto_index(volatile bitmap_t *bitmap, + unsigned int max_index) +{ + // No barriers needed in acquire path: the just-allocated + // continuation is "uninitialized", so the caller shouldn't + // load from it before storing, so we don't need to guard + // against reordering those loads. +#if defined(__x86_64__) // TODO rdar://problem/11477843 + dispatch_assert(sizeof(*bitmap) == sizeof(uint64_t)); + return dispatch_atomic_set_first_bit((volatile uint64_t *)bitmap,max_index); +#else + dispatch_assert(sizeof(*bitmap) == sizeof(uint32_t)); + return dispatch_atomic_set_first_bit((volatile uint32_t *)bitmap,max_index); +#endif +} + +DISPATCH_ALWAYS_INLINE +static unsigned int +bitmap_set_first_unset_bit(volatile bitmap_t *bitmap) +{ + return bitmap_set_first_unset_bit_upto_index(bitmap, UINT_MAX); +} + +#define CLEAR_EXCLUSIVELY true +#define CLEAR_NONEXCLUSIVELY false + +// Return true if this bit was the last in the bitmap, and it is now all zeroes +DISPATCH_ALWAYS_INLINE_NDEBUG +static bool +bitmap_clear_bit(volatile bitmap_t *bitmap, unsigned int index, + bool exclusively) +{ +#if DISPATCH_DEBUG + dispatch_assert(index < CONTINUATIONS_PER_BITMAP); +#endif + const bitmap_t mask = BITMAP_C(1) << index; + bitmap_t b; + + b = *bitmap; + if (exclusively == CLEAR_EXCLUSIVELY) { + if (slowpath((b & mask) == 0)) { + DISPATCH_CRASH("Corruption: failed to clear bit exclusively"); + } + } + + // and-and-fetch + b = dispatch_atomic_and(bitmap, ~mask, release); + return b == 0; +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static void +mark_bitmap_as_full_if_still_full(volatile bitmap_t *supermap, + unsigned int bitmap_index, volatile bitmap_t *bitmap) +{ +#if DISPATCH_DEBUG + dispatch_assert(bitmap_index < BITMAPS_PER_SUPERMAP); +#endif + const bitmap_t mask = BITMAP_C(1) << bitmap_index; + bitmap_t s, s_new, s_masked; + + if (!bitmap_is_full(*bitmap)) { + return; + } + s_new = *supermap; + for (;;) { + // No barriers because supermaps are only advisory, they + // don't protect access to other memory. + s = s_new; + s_masked = s | mask; + if (dispatch_atomic_cmpxchgvw(supermap, s, s_masked, &s_new, relaxed) || + !bitmap_is_full(*bitmap)) { + return; + } + } +} + +#pragma mark - +#pragma mark dispatch_alloc_continuation_alloc + +#if PACK_FIRST_PAGE_WITH_CONTINUATIONS +DISPATCH_ALWAYS_INLINE_NDEBUG +static dispatch_continuation_t +alloc_continuation_from_first_page(struct dispatch_magazine_s *magazine) +{ + unsigned int i, index, continuation_index; + + // TODO: unroll if this is hot? + for (i = 0; i < FULL_BITMAPS_IN_FIRST_PAGE; i++) { + index = bitmap_set_first_unset_bit(&magazine->fp_maps[i]); + if (fastpath(index != NO_BITS_WERE_UNSET)) goto found; + } + if (REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE) { + index = bitmap_set_first_unset_bit_upto_index(&magazine->fp_maps[i], + REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE - 1); + if (fastpath(index != NO_BITS_WERE_UNSET)) goto found; + } + return NULL; + +found: + continuation_index = (i * CONTINUATIONS_PER_BITMAP) + index; + return (dispatch_continuation_t)&magazine->fp_conts[continuation_index]; +} +#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS + +DISPATCH_ALWAYS_INLINE_NDEBUG +static dispatch_continuation_t +alloc_continuation_from_magazine(struct dispatch_magazine_s *magazine) +{ + unsigned int s, b, index; + + for (s = 0; s < SUPERMAPS_PER_MAGAZINE; s++) { + volatile bitmap_t *supermap = supermap_address(magazine, s); + if (bitmap_is_full(*supermap)) { + continue; + } + for (b = 0; b < BITMAPS_PER_SUPERMAP; b++) { + volatile bitmap_t *bitmap = bitmap_address(magazine, s, b); + index = bitmap_set_first_unset_bit(bitmap); + if (index != NO_BITS_WERE_UNSET) { + set_last_found_page( + first_bitmap_in_same_page((bitmap_t *)bitmap)); + mark_bitmap_as_full_if_still_full(supermap, b, bitmap); + return continuation_address(magazine, s, b, index); + } + } + } + return NULL; +} + +DISPATCH_NOINLINE +static void +_dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr) +{ +#if HAVE_MACH + kern_return_t kr; + mach_vm_size_t vm_size = MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE; + mach_vm_offset_t vm_mask = ~MAGAZINE_MASK; + mach_vm_address_t vm_addr = vm_page_size; + while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size, + vm_mask, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), + MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, + VM_INHERIT_DEFAULT))) { + if (kr != KERN_NO_SPACE) { + (void)dispatch_assume_zero(kr); + DISPATCH_CLIENT_CRASH("Could not allocate heap"); + } + _dispatch_temporary_resource_shortage(); + vm_addr = vm_page_size; + } + uintptr_t aligned_region = (uintptr_t)vm_addr; +#else // HAVE_MACH + const size_t region_sz = (1 + MAGAZINES_PER_HEAP) * BYTES_PER_MAGAZINE; + void *region_p; + while (!dispatch_assume((region_p = mmap(NULL, region_sz, + PROT_READ|PROT_WRITE, MAP_ANON | MAP_PRIVATE, + VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), 0)) != MAP_FAILED)) { + _dispatch_temporary_resource_shortage(); + } + uintptr_t region = (uintptr_t)region_p; + uintptr_t region_end = region + region_sz; + uintptr_t aligned_region, aligned_region_end; + uintptr_t bottom_slop_len, top_slop_len; + // Realign if needed; find the slop at top/bottom to unmap + if ((region & ~(MAGAZINE_MASK)) == 0) { + bottom_slop_len = 0; + aligned_region = region; + aligned_region_end = region_end - BYTES_PER_MAGAZINE; + top_slop_len = BYTES_PER_MAGAZINE; + } else { + aligned_region = (region & MAGAZINE_MASK) + BYTES_PER_MAGAZINE; + aligned_region_end = aligned_region + + (MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE); + bottom_slop_len = aligned_region - region; + top_slop_len = BYTES_PER_MAGAZINE - bottom_slop_len; + } +#if DISPATCH_DEBUG + // Double-check our math. + dispatch_assert(aligned_region % PAGE_SIZE == 0); + dispatch_assert(aligned_region_end % PAGE_SIZE == 0); + dispatch_assert(aligned_region_end > aligned_region); + dispatch_assert(top_slop_len % PAGE_SIZE == 0); + dispatch_assert(bottom_slop_len % PAGE_SIZE == 0); + dispatch_assert(aligned_region_end + top_slop_len == region_end); + dispatch_assert(region + bottom_slop_len == aligned_region); + dispatch_assert(region_sz == bottom_slop_len + top_slop_len + + MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE); + if (bottom_slop_len) { + (void)dispatch_assume_zero(mprotect((void *)region, bottom_slop_len, + PROT_NONE)); + } + if (top_slop_len) { + (void)dispatch_assume_zero(mprotect((void *)aligned_region_end, + top_slop_len, PROT_NONE)); + } +#else + if (bottom_slop_len) { + (void)dispatch_assume_zero(munmap((void *)region, bottom_slop_len)); + } + if (top_slop_len) { + (void)dispatch_assume_zero(munmap((void *)aligned_region_end, + top_slop_len)); + } +#endif // DISPATCH_DEBUG +#endif // HAVE_MACH + + if (!dispatch_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region, + relaxed)) { + // If we lost the race to link in the new region, unmap the whole thing. +#if DISPATCH_DEBUG + (void)dispatch_assume_zero(mprotect((void *)aligned_region, + MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE, PROT_NONE)); +#else + (void)dispatch_assume_zero(munmap((void *)aligned_region, + MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE)); +#endif + } +} + +DISPATCH_NOINLINE +static dispatch_continuation_t +_dispatch_alloc_continuation_from_heap(dispatch_heap_t heap) +{ + dispatch_continuation_t cont; + + unsigned int cpu_number = _dispatch_cpu_number(); +#ifdef DISPATCH_DEBUG + dispatch_assert(cpu_number < NUM_CPU); +#endif + +#if PACK_FIRST_PAGE_WITH_CONTINUATIONS + // First try the continuations in the first page for this CPU + cont = alloc_continuation_from_first_page(&(heap[cpu_number])); + if (fastpath(cont)) { + return cont; + } +#endif + // Next, try the rest of the magazine for this CPU + cont = alloc_continuation_from_magazine(&(heap[cpu_number])); + return cont; +} + +DISPATCH_NOINLINE +static dispatch_continuation_t +_dispatch_alloc_continuation_from_heap_slow(void) +{ + dispatch_heap_t *heap = &_dispatch_main_heap; + dispatch_continuation_t cont; + + for (;;) { + if (!fastpath(*heap)) { + _dispatch_alloc_try_create_heap(heap); + } + cont = _dispatch_alloc_continuation_from_heap(*heap); + if (fastpath(cont)) { + return cont; + } + // If we have tuned our parameters right, 99.999% of apps should + // never reach this point! The ones that do have gone off the rails... + // + // Magazine is full? Onto the next heap! + // We tried 'stealing' from other CPUs' magazines. The net effect + // was worse performance from more wasted search time and more + // cache contention. + + // rdar://11378331 + // Future optimization: start at the page we last used, start + // in the *zone* we last used. But this would only improve deeply + // pathological cases like dispatch_starfish + heap = &(*heap)->header.dh_next; + } +} + +DISPATCH_ALLOC_NOINLINE +static dispatch_continuation_t +_dispatch_alloc_continuation_alloc(void) +{ + dispatch_continuation_t cont; + + if (fastpath(_dispatch_main_heap)) { + // Start looking in the same page where we found a continuation + // last time. + bitmap_t *last = last_found_page(); + if (fastpath(last)) { + unsigned int i; + for (i = 0; i < BITMAPS_PER_PAGE; i++) { + bitmap_t *cur = last + i; + unsigned int index = bitmap_set_first_unset_bit(cur); + if (fastpath(index != NO_BITS_WERE_UNSET)) { + bitmap_t *supermap; + unsigned int bindex; + get_cont_and_indices_for_bitmap_and_index(cur, + index, &cont, &supermap, &bindex); + mark_bitmap_as_full_if_still_full(supermap, bindex, + cur); + return cont; + } + } + } + + cont = _dispatch_alloc_continuation_from_heap(_dispatch_main_heap); + if (fastpath(cont)) { + return cont; + } + } + return _dispatch_alloc_continuation_from_heap_slow(); +} + +#pragma mark - +#pragma mark dispatch_alloc_continuation_free + +DISPATCH_NOINLINE +static void +_dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c) +{ + void *page = madvisable_page_base_for_continuation(c); + if (!page) { + // page can't be madvised; maybe it contains non-continuations + return; + } + // Are all the continuations in this page unallocated? + volatile bitmap_t *page_bitmaps; + get_maps_and_indices_for_continuation((dispatch_continuation_t)page, NULL, + NULL, (bitmap_t **)&page_bitmaps, NULL); + unsigned int i; + for (i = 0; i < BITMAPS_PER_PAGE; i++) { + if (page_bitmaps[i] != 0) { + return; + } + } + // They are all unallocated, so we could madvise the page. Try to + // take ownership of them all. + int last_locked = 0; + do { + if (!dispatch_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0), + BITMAP_ALL_ONES, relaxed)) { + // We didn't get one; since there is a cont allocated in + // the page, we can't madvise. Give up and unlock all. + goto unlock; + } + } while (++last_locked < (signed)BITMAPS_PER_PAGE); +#if DISPATCH_DEBUG + //fprintf(stderr, "%s: madvised page %p for cont %p (next = %p), " + // "[%u+1]=%u bitmaps at %p\n", __func__, page, c, c->do_next, + // last_locked-1, BITMAPS_PER_PAGE, &page_bitmaps[0]); + // Scribble to expose use-after-free bugs + // madvise (syscall) flushes these stores + memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, PAGE_SIZE); +#endif + (void)dispatch_assume_zero(madvise(page, PAGE_SIZE, MADV_FREE)); + +unlock: + while (last_locked > 1) { + page_bitmaps[--last_locked] = BITMAP_C(0); + } + if (last_locked) { + dispatch_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed); + } + return; +} + +DISPATCH_ALLOC_NOINLINE +static void +_dispatch_alloc_continuation_free(dispatch_continuation_t c) +{ + bitmap_t *b, *s; + unsigned int b_idx, idx; + + get_maps_and_indices_for_continuation(c, &s, &b_idx, &b, &idx); + bool bitmap_now_empty = bitmap_clear_bit(b, idx, CLEAR_EXCLUSIVELY); + if (slowpath(s)) { + (void)bitmap_clear_bit(s, b_idx, CLEAR_NONEXCLUSIVELY); + } + // We only try to madvise(2) pages outside of the first page. + // (Allocations in the first page do not have a supermap entry.) + if (slowpath(bitmap_now_empty) && slowpath(s)) { + return _dispatch_alloc_maybe_madvise_page(c); + } +} + +#pragma mark - +#pragma mark dispatch_alloc_init + +#if DISPATCH_DEBUG +static void +_dispatch_alloc_init(void) +{ + // Double-check our math. These are all compile time checks and don't + // generate code. + + dispatch_assert(sizeof(bitmap_t) == BYTES_PER_BITMAP); + dispatch_assert(sizeof(bitmap_t) == BYTES_PER_SUPERMAP); + dispatch_assert(sizeof(struct dispatch_magazine_header_s) == + SIZEOF_HEADER); + + dispatch_assert(sizeof(struct dispatch_continuation_s) <= + DISPATCH_CONTINUATION_SIZE); + + // Magazines should be the right size, so they pack neatly into an array of + // heaps. + dispatch_assert(sizeof(struct dispatch_magazine_s) == BYTES_PER_MAGAZINE); + + // The header and maps sizes should match what we computed. + dispatch_assert(SIZEOF_HEADER == + sizeof(((struct dispatch_magazine_s *)0x0)->header)); + dispatch_assert(SIZEOF_MAPS == + sizeof(((struct dispatch_magazine_s *)0x0)->maps)); + + // The main array of continuations should start at the second page, + // self-aligned. + dispatch_assert(offsetof(struct dispatch_magazine_s, conts) % + (CONTINUATIONS_PER_BITMAP * DISPATCH_CONTINUATION_SIZE) == 0); + dispatch_assert(offsetof(struct dispatch_magazine_s, conts) == PAGE_SIZE); + +#if PACK_FIRST_PAGE_WITH_CONTINUATIONS + // The continuations in the first page should actually fit within the first + // page. + dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) < PAGE_SIZE); + dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) % + DISPATCH_CONTINUATION_SIZE == 0); + dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) + + sizeof(((struct dispatch_magazine_s *)0x0)->fp_conts) == PAGE_SIZE); +#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS +} +#else +static inline void _dispatch_alloc_init(void) {} +#endif + +#endif // DISPATCH_ALLOCATOR + +#pragma mark - +#pragma mark dispatch_malloc + +#if DISPATCH_CONTINUATION_MALLOC + +#if DISPATCH_USE_MALLOCZONE +static malloc_zone_t *_dispatch_ccache_zone; + +#define calloc(n, s) malloc_zone_calloc(_dispatch_ccache_zone, (n), (s)) +#define free(c) malloc_zone_free(_dispatch_ccache_zone, (c)) + +static void +_dispatch_malloc_init(void) +{ + _dispatch_ccache_zone = malloc_create_zone(0, 0); + dispatch_assert(_dispatch_ccache_zone); + malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations"); +} +#else +static inline void _dispatch_malloc_init(void) {} +#endif // DISPATCH_USE_MALLOCZONE + +static dispatch_continuation_t +_dispatch_malloc_continuation_alloc(void) +{ + dispatch_continuation_t dc; + while (!(dc = fastpath(calloc(1, + ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)))))) { + _dispatch_temporary_resource_shortage(); + } + return dc; +} + +static inline void +_dispatch_malloc_continuation_free(dispatch_continuation_t c) +{ + free(c); +} +#endif // DISPATCH_CONTINUATION_MALLOC + +#pragma mark - +#pragma mark dispatch_continuation_alloc + +#if DISPATCH_ALLOCATOR +#if DISPATCH_CONTINUATION_MALLOC +#if DISPATCH_USE_NANOZONE +extern boolean_t malloc_engaged_nano(void); +#else +#define malloc_engaged_nano() false +#endif // DISPATCH_USE_NANOZONE +static int _dispatch_use_dispatch_alloc; +#else +#define _dispatch_use_dispatch_alloc 1 +#endif // DISPATCH_CONTINUATION_MALLOC +#endif // DISPATCH_ALLOCATOR + +#if (DISPATCH_ALLOCATOR && (DISPATCH_CONTINUATION_MALLOC || DISPATCH_DEBUG)) \ + || (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE) +static void +_dispatch_continuation_alloc_init(void *ctxt DISPATCH_UNUSED) +{ +#if DISPATCH_ALLOCATOR +#if DISPATCH_CONTINUATION_MALLOC + bool use_dispatch_alloc = !malloc_engaged_nano(); + char *e = getenv("LIBDISPATCH_CONTINUATION_ALLOCATOR"); + if (e) { + use_dispatch_alloc = atoi(e); + } + _dispatch_use_dispatch_alloc = use_dispatch_alloc; +#endif // DISPATCH_CONTINUATION_MALLOC + if (_dispatch_use_dispatch_alloc) + return _dispatch_alloc_init(); +#endif // DISPATCH_ALLOCATOR +#if DISPATCH_CONTINUATION_MALLOC + return _dispatch_malloc_init(); +#endif // DISPATCH_ALLOCATOR +} + +static void +_dispatch_continuation_alloc_once() +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_continuation_alloc_init); +} +#else +static inline void _dispatch_continuation_alloc_once(void) {} +#endif // DISPATCH_ALLOCATOR ... || DISPATCH_CONTINUATION_MALLOC ... + +dispatch_continuation_t +_dispatch_continuation_alloc_from_heap(void) +{ + _dispatch_continuation_alloc_once(); +#if DISPATCH_ALLOCATOR + if (_dispatch_use_dispatch_alloc) + return _dispatch_alloc_continuation_alloc(); +#endif +#if DISPATCH_CONTINUATION_MALLOC + return _dispatch_malloc_continuation_alloc(); +#endif +} + +void +_dispatch_continuation_free_to_heap(dispatch_continuation_t c) +{ +#if DISPATCH_ALLOCATOR + if (_dispatch_use_dispatch_alloc) + return _dispatch_alloc_continuation_free(c); +#endif +#if DISPATCH_CONTINUATION_MALLOC + return _dispatch_malloc_continuation_free(c); +#endif +} + diff --git a/src/allocator_internal.h b/src/allocator_internal.h new file mode 100644 index 0000000..5f223f6 --- /dev/null +++ b/src/allocator_internal.h @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_ALLOCATOR_INTERNAL__ +#define __DISPATCH_ALLOCATOR_INTERNAL__ + +#ifndef DISPATCH_ALLOCATOR +#if TARGET_OS_MAC && (defined(__LP64__) || TARGET_OS_EMBEDDED) +#define DISPATCH_ALLOCATOR 1 +#endif +#endif + +#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090 +#undef DISPATCH_USE_NANOZONE +#define DISPATCH_USE_NANOZONE 0 +#endif +#ifndef DISPATCH_USE_NANOZONE +#if TARGET_OS_MAC && defined(__LP64__) && \ + (__MAC_OS_X_VERSION_MIN_REQUIRED >= 1090 || \ + __IPHONE_OS_VERSION_MIN_REQUIRED >= 70000) +#define DISPATCH_USE_NANOZONE 1 +#endif +#endif + +#ifndef DISPATCH_USE_MALLOCZONE +#if (TARGET_OS_MAC && !DISPATCH_USE_NANOZONE) || \ + (!TARGET_OS_MAC && HAVE_MALLOC_CREATE_ZONE) +#define DISPATCH_USE_MALLOCZONE 1 +#endif +#endif + +#ifndef DISPATCH_CONTINUATION_MALLOC +#if DISPATCH_USE_NANOZONE || !DISPATCH_ALLOCATOR +#define DISPATCH_CONTINUATION_MALLOC 1 +#endif +#endif + +#if !DISPATCH_ALLOCATOR && !DISPATCH_CONTINUATION_MALLOC +#error Invalid allocator configuration +#endif + +#if DISPATCH_ALLOCATOR && DISPATCH_CONTINUATION_MALLOC +#define DISPATCH_ALLOC_NOINLINE DISPATCH_NOINLINE +#else +#define DISPATCH_ALLOC_NOINLINE +#endif + +#pragma mark - +#pragma mark DISPATCH_ALLOCATOR + +#if DISPATCH_ALLOCATOR + +// Configuration here! +#define NUM_CPU _dispatch_hw_config.cc_max_logical +#define MAGAZINES_PER_HEAP (NUM_CPU) + +// Do you care about compaction or performance? +#if TARGET_OS_EMBEDDED +#define PACK_FIRST_PAGE_WITH_CONTINUATIONS 1 +#else +#define PACK_FIRST_PAGE_WITH_CONTINUATIONS 0 +#endif + +#if TARGET_OS_EMBEDDED +#define PAGES_PER_MAGAZINE 64 +#else +#define PAGES_PER_MAGAZINE 512 +#endif + +// Use the largest type your platform is comfortable doing atomic ops with. +#if defined(__x86_64__) // TODO: rdar://11477843 +typedef unsigned long bitmap_t; +#define BYTES_PER_BITMAP 8 +#else +typedef uint32_t bitmap_t; +#define BYTES_PER_BITMAP 4 +#endif + +#define BITMAP_C(v) ((bitmap_t)(v)) +#define BITMAP_ALL_ONES (~BITMAP_C(0)) + +// Stop configuring. + +#define CONTINUATIONS_PER_BITMAP (BYTES_PER_BITMAP * 8) +#define BITMAPS_PER_SUPERMAP (BYTES_PER_SUPERMAP * 8) + +#define BYTES_PER_MAGAZINE (PAGES_PER_MAGAZINE * PAGE_SIZE) +#define CONSUMED_BYTES_PER_BITMAP (BYTES_PER_BITMAP + \ + (DISPATCH_CONTINUATION_SIZE * CONTINUATIONS_PER_BITMAP)) + +#define BYTES_PER_SUPERMAP BYTES_PER_BITMAP +#define CONSUMED_BYTES_PER_SUPERMAP (BYTES_PER_SUPERMAP + \ + (BITMAPS_PER_SUPERMAP * CONSUMED_BYTES_PER_BITMAP)) + +#define BYTES_PER_HEAP (BYTES_PER_MAGAZINE * MAGAZINES_PER_HEAP) + +#define BYTES_PER_PAGE PAGE_SIZE +#define CONTINUATIONS_PER_PAGE (BYTES_PER_PAGE / DISPATCH_CONTINUATION_SIZE) +#define BITMAPS_PER_PAGE (CONTINUATIONS_PER_PAGE / CONTINUATIONS_PER_BITMAP) + +// Assumption: metadata will be only in the first page. +#define SUPERMAPS_PER_MAGAZINE ((BYTES_PER_MAGAZINE - BYTES_PER_PAGE) / \ + CONSUMED_BYTES_PER_SUPERMAP) +#define BITMAPS_PER_MAGAZINE (SUPERMAPS_PER_MAGAZINE * BITMAPS_PER_SUPERMAP) +#define CONTINUATIONS_PER_MAGAZINE \ + (BITMAPS_PER_MAGAZINE * CONTINUATIONS_PER_BITMAP) + +#define HEAP_MASK (~(uintptr_t)(BYTES_PER_HEAP - 1)) +#define MAGAZINE_MASK (~(uintptr_t)(BYTES_PER_MAGAZINE - 1)) + +#define PADDING_TO_CONTINUATION_SIZE(x) (ROUND_UP_TO_CONTINUATION_SIZE(x) - (x)) + +#if defined(__LP64__) +#define SIZEOF_HEADER 16 +#else +#define SIZEOF_HEADER 8 +#endif + +#define SIZEOF_SUPERMAPS (BYTES_PER_SUPERMAP * SUPERMAPS_PER_MAGAZINE) +#define SIZEOF_MAPS (BYTES_PER_BITMAP * BITMAPS_PER_SUPERMAP * \ + SUPERMAPS_PER_MAGAZINE) + +// header is expected to end on supermap's required alignment +#define HEADER_TO_SUPERMAPS_PADDING 0 +#define SUPERMAPS_TO_MAPS_PADDING (PADDING_TO_CONTINUATION_SIZE( \ + SIZEOF_SUPERMAPS + HEADER_TO_SUPERMAPS_PADDING + SIZEOF_HEADER)) +#define MAPS_TO_FPMAPS_PADDING (PADDING_TO_CONTINUATION_SIZE(SIZEOF_MAPS)) + +#define BYTES_LEFT_IN_FIRST_PAGE (BYTES_PER_PAGE - \ + (SIZEOF_HEADER + HEADER_TO_SUPERMAPS_PADDING + SIZEOF_SUPERMAPS + \ + SUPERMAPS_TO_MAPS_PADDING + SIZEOF_MAPS + MAPS_TO_FPMAPS_PADDING)) + +#if PACK_FIRST_PAGE_WITH_CONTINUATIONS + +#define FULL_BITMAPS_IN_FIRST_PAGE \ + (BYTES_LEFT_IN_FIRST_PAGE / CONSUMED_BYTES_PER_BITMAP) +#define REMAINDER_IN_FIRST_PAGE (BYTES_LEFT_IN_FIRST_PAGE - \ + (FULL_BITMAPS_IN_FIRST_PAGE * CONSUMED_BYTES_PER_BITMAP) - \ + (FULL_BITMAPS_IN_FIRST_PAGE ? 0 : ROUND_UP_TO_CONTINUATION_SIZE(BYTES_PER_BITMAP))) + +#define REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE \ + (REMAINDER_IN_FIRST_PAGE / DISPATCH_CONTINUATION_SIZE) +#define CONTINUATIONS_IN_FIRST_PAGE (FULL_BITMAPS_IN_FIRST_PAGE * \ + CONTINUATIONS_PER_BITMAP) + REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE +#define BITMAPS_IN_FIRST_PAGE (FULL_BITMAPS_IN_FIRST_PAGE + \ + (REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE == 0 ? 0 : 1)) + +#define FPMAPS_TO_FPCONTS_PADDING (PADDING_TO_CONTINUATION_SIZE(\ + BYTES_PER_BITMAP * BITMAPS_IN_FIRST_PAGE)) + +#else // PACK_FIRST_PAGE_WITH_CONTINUATIONS + +#define MAPS_TO_CONTS_PADDING BYTES_LEFT_IN_FIRST_PAGE + +#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS + +#define AFTER_CONTS_PADDING (BYTES_PER_MAGAZINE - (BYTES_PER_PAGE + \ + (DISPATCH_CONTINUATION_SIZE * CONTINUATIONS_PER_MAGAZINE))) + +// This is the object our allocator allocates: a chunk of memory rounded up +// from sizeof(struct dispatch_continuation_s) to the cacheline size, so +// unrelated continuations don't share cachelines. It'd be nice if +// dispatch_continuation_s included this rounding/padding, but it doesn't. +typedef char padded_continuation[DISPATCH_CONTINUATION_SIZE]; + +// A dispatch_heap_t is the base address of an array of dispatch_magazine_s, +// one magazine per CPU. +typedef struct dispatch_magazine_s * dispatch_heap_t; + +struct dispatch_magazine_header_s { + // Link to the next heap in the chain. Only used in magazine 0's header + dispatch_heap_t dh_next; + + // Points to the first bitmap in the page where this CPU succesfully + // allocated a continuation last time. Only used in the first heap. + bitmap_t *last_found_page; +}; + +// A magazine is a complex data structure. It must be exactly +// PAGES_PER_MAGAZINE * PAGE_SIZE bytes long, and that value must be a +// power of 2. (See magazine_for_continuation()). +struct dispatch_magazine_s { + // See above. + struct dispatch_magazine_header_s header; + + // Align supermaps as needed. +#if HEADER_TO_SUPERMAPS_PADDING > 0 + char _pad0[HEADER_TO_SUPERMAPS_PADDING]; +#endif + + // Second-level bitmap; each set bit means a bitmap_t in maps[][] + // is completely full (and can be skipped while searching). + bitmap_t supermaps[SUPERMAPS_PER_MAGAZINE]; + + // Align maps to a cacheline. +#if SUPERMAPS_TO_MAPS_PADDING > 0 + char _pad1[SUPERMAPS_TO_MAPS_PADDING]; +#endif + + // Each bit in maps[][] is the free/used state of a member of conts[][][]. + bitmap_t maps[SUPERMAPS_PER_MAGAZINE][BITMAPS_PER_SUPERMAP]; + + // Align fp_maps to a cacheline. +#if MAPS_TO_FPMAPS_PADDING > 0 + char _pad2[MAPS_TO_FPMAPS_PADDING]; +#endif + +#if PACK_FIRST_PAGE_WITH_CONTINUATIONS + // Bitmaps for the continuations that live in the first page, which + // are treated specially (they have faster search code). + bitmap_t fp_maps[BITMAPS_IN_FIRST_PAGE]; + + // Align fp_conts to cacheline. +#if FPMAPS_TO_FPCONTS_PADDING > 0 + char _pad3[FPMAPS_TO_FPCONTS_PADDING]; +#endif + + // Continuations that live in the first page. + padded_continuation fp_conts[CONTINUATIONS_IN_FIRST_PAGE]; + +#else // PACK_FIRST_PAGE_WITH_CONTINUATIONS + +#if MAPS_TO_CONTS_PADDING > 0 + char _pad4[MAPS_TO_CONTS_PADDING]; +#endif +#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS + + // This is the big array of continuations. + // This must start on a page boundary. + padded_continuation conts[SUPERMAPS_PER_MAGAZINE][BITMAPS_PER_SUPERMAP] + [CONTINUATIONS_PER_BITMAP]; + + // Fill the unused space to exactly BYTES_PER_MAGAZINE +#if AFTER_CONTS_PADDING > 0 + char _pad5[AFTER_CONTS_PADDING]; +#endif +}; + +#if DISPATCH_DEBUG +#define DISPATCH_ALLOCATOR_SCRIBBLE ((uintptr_t)0xAFAFAFAFAFAFAFAF) +#endif + +#endif // DISPATCH_ALLOCATOR + +#endif // __DISPATCH_ALLOCATOR_INTERNAL__ diff --git a/src/apply.c b/src/apply.c index 1a77114..aa187a0 100644 --- a/src/apply.c +++ b/src/apply.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -20,77 +20,86 @@ #include "internal.h" +typedef void (*dispatch_apply_function_t)(void *, size_t); + DISPATCH_ALWAYS_INLINE static inline void -_dispatch_apply_invoke(void *ctxt) +_dispatch_apply_invoke2(void *ctxt) { - dispatch_apply_t da = ctxt; + dispatch_apply_t da = (dispatch_apply_t)ctxt; size_t const iter = da->da_iterations; - typeof(da->da_func) const func = da->da_func; - void *const da_ctxt = da->da_ctxt; size_t idx, done = 0; - _dispatch_workitem_dec(); // this unit executes many items + idx = dispatch_atomic_inc_orig2o(da, da_index, acquire); + if (!fastpath(idx < iter)) goto out; + + // da_dc is only safe to access once the 'index lock' has been acquired + dispatch_apply_function_t const func = (void *)da->da_dc->dc_func; + void *const da_ctxt = da->da_dc->dc_ctxt; + + _dispatch_perfmon_workitem_dec(); // this unit executes many items + + // Handle nested dispatch_apply rdar://problem/9294578 + size_t nested = (size_t)_dispatch_thread_getspecific(dispatch_apply_key); + _dispatch_thread_setspecific(dispatch_apply_key, (void*)da->da_nested); - // Make nested dispatch_apply fall into serial case rdar://problem/9294578 - _dispatch_thread_setspecific(dispatch_apply_key, (void*)~0ul); // Striding is the responsibility of the caller. - while (fastpath((idx = dispatch_atomic_inc2o(da, da_index) - 1) < iter)) { + do { _dispatch_client_callout2(da_ctxt, idx, func); - _dispatch_workitem_inc(); + _dispatch_perfmon_workitem_inc(); done++; - } - _dispatch_thread_setspecific(dispatch_apply_key, NULL); + idx = dispatch_atomic_inc_orig2o(da, da_index, relaxed); + } while (fastpath(idx < iter)); + _dispatch_thread_setspecific(dispatch_apply_key, (void*)nested); - dispatch_atomic_release_barrier(); - - // The thread that finished the last workitem wakes up the (possibly waiting) + // The thread that finished the last workitem wakes up the possibly waiting // thread that called dispatch_apply. They could be one and the same. - if (done && (dispatch_atomic_add2o(da, da_done, done) == iter)) { + if (!dispatch_atomic_sub2o(da, da_todo, done, release)) { _dispatch_thread_semaphore_signal(da->da_sema); } - - if (dispatch_atomic_dec2o(da, da_thr_cnt) == 0) { +out: + if (dispatch_atomic_dec2o(da, da_thr_cnt, release) == 0) { _dispatch_continuation_free((dispatch_continuation_t)da); } } DISPATCH_NOINLINE -static void -_dispatch_apply2(void *ctxt) +void +_dispatch_apply_invoke(void *ctxt) { - _dispatch_apply_invoke(ctxt); + _dispatch_apply_invoke2(ctxt); } -static void -_dispatch_apply3(void *ctxt) +DISPATCH_NOINLINE +void +_dispatch_apply_redirect_invoke(void *ctxt) { - dispatch_apply_t da = ctxt; - dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + dispatch_apply_t da = (dispatch_apply_t)ctxt; + dispatch_queue_t old_dq; + old_dq = (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key); - _dispatch_thread_setspecific(dispatch_queue_key, da->da_queue); - _dispatch_apply_invoke(ctxt); + _dispatch_thread_setspecific(dispatch_queue_key, da->da_dc->dc_data); + _dispatch_apply_invoke2(ctxt); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); } static void _dispatch_apply_serial(void *ctxt) { - dispatch_apply_t da = ctxt; + dispatch_apply_t da = (dispatch_apply_t)ctxt; + dispatch_continuation_t dc = da->da_dc; + size_t const iter = da->da_iterations; size_t idx = 0; - _dispatch_workitem_dec(); // this unit executes many items + _dispatch_perfmon_workitem_dec(); // this unit executes many items do { - _dispatch_client_callout2(da->da_ctxt, idx, da->da_func); - _dispatch_workitem_inc(); - } while (++idx < da->da_iterations); + _dispatch_client_callout2(dc->dc_ctxt, idx, (void*)dc->dc_func); + _dispatch_perfmon_workitem_inc(); + } while (++idx < iter); _dispatch_continuation_free((dispatch_continuation_t)da); } -// 64 threads should be good enough for the short to mid term -#define DISPATCH_APPLY_MAX_CPUS 64 - DISPATCH_ALWAYS_INLINE static inline void _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, @@ -123,8 +132,8 @@ _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, _dispatch_queue_push_list(dq, head, tail, continuation_cnt); // Call the first element directly - _dispatch_apply2(da); - _dispatch_workitem_inc(); + _dispatch_apply_invoke(da); + _dispatch_perfmon_workitem_inc(); _dispatch_thread_semaphore_wait(sema); _dispatch_put_thread_semaphore(sema); @@ -134,17 +143,17 @@ _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, static void _dispatch_apply_redirect(void *ctxt) { - dispatch_apply_t da = ctxt; + dispatch_apply_t da = (dispatch_apply_t)ctxt; uint32_t da_width = 2 * (da->da_thr_cnt - 1); - dispatch_queue_t dq = da->da_queue, rq = dq, tq; + dispatch_queue_t dq = da->da_dc->dc_data, rq = dq, tq; do { - uint32_t running = dispatch_atomic_add2o(rq, dq_running, da_width); - uint32_t width = rq->dq_width; + uint32_t running, width = rq->dq_width; + running = dispatch_atomic_add2o(rq, dq_running, da_width, relaxed); if (slowpath(running > width)) { uint32_t excess = width > 1 ? running - width : da_width; for (tq = dq; 1; tq = tq->do_targetq) { - (void)dispatch_atomic_sub2o(tq, dq_running, excess); + (void)dispatch_atomic_sub2o(tq, dq_running, excess, relaxed); if (tq == rq) { break; } @@ -157,13 +166,15 @@ _dispatch_apply_redirect(void *ctxt) } rq = rq->do_targetq; } while (slowpath(rq->do_targetq)); - _dispatch_apply_f2(rq, da, _dispatch_apply3); + _dispatch_apply_f2(rq, da, _dispatch_apply_redirect_invoke); do { - (void)dispatch_atomic_sub2o(dq, dq_running, da_width); + (void)dispatch_atomic_sub2o(dq, dq_running, da_width, relaxed); dq = dq->do_targetq; } while (slowpath(dq->do_targetq)); } +#define DISPATCH_APPLY_MAX UINT16_MAX // must be < sqrt(SIZE_MAX) + DISPATCH_NOINLINE void dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, @@ -172,39 +183,51 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, if (slowpath(iterations == 0)) { return; } - + uint32_t thr_cnt = _dispatch_hw_config.cc_max_active; + size_t nested = (size_t)_dispatch_thread_getspecific(dispatch_apply_key); + if (!slowpath(nested)) { + nested = iterations; + } else { + thr_cnt = nested < thr_cnt ? thr_cnt / nested : 1; + nested = nested < DISPATCH_APPLY_MAX && iterations < DISPATCH_APPLY_MAX + ? nested * iterations : DISPATCH_APPLY_MAX; + } + if (iterations < thr_cnt) { + thr_cnt = (uint32_t)iterations; + } + struct dispatch_continuation_s dc = { + .dc_func = (void*)func, + .dc_ctxt = ctxt, + }; dispatch_apply_t da = (typeof(da))_dispatch_continuation_alloc(); - - da->da_func = func; - da->da_ctxt = ctxt; - da->da_iterations = iterations; da->da_index = 0; - da->da_thr_cnt = _dispatch_hw_config.cc_max_active; - da->da_done = 0; - da->da_queue = NULL; - - if (da->da_thr_cnt > DISPATCH_APPLY_MAX_CPUS) { - da->da_thr_cnt = DISPATCH_APPLY_MAX_CPUS; - } - if (iterations < da->da_thr_cnt) { - da->da_thr_cnt = (uint32_t)iterations; + da->da_todo = iterations; + da->da_iterations = iterations; + da->da_nested = nested; + da->da_thr_cnt = thr_cnt; + da->da_dc = &dc; + + dispatch_queue_t old_dq; + old_dq = (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key); + if (slowpath(dq == DISPATCH_APPLY_CURRENT_ROOT_QUEUE)) { + dq = old_dq ? old_dq : _dispatch_get_root_queue(0, 0); + while (slowpath(dq->do_targetq)) { + dq = dq->do_targetq; + } } - if (slowpath(dq->dq_width <= 2) || slowpath(da->da_thr_cnt <= 1) || - slowpath(_dispatch_thread_getspecific(dispatch_apply_key))) { + if (slowpath(dq->dq_width <= 2) || slowpath(thr_cnt <= 1)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } - dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); if (slowpath(dq->do_targetq)) { if (slowpath(dq == old_dq)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } else { - da->da_queue = dq; + dc.dc_data = dq; return dispatch_sync_f(dq, da, _dispatch_apply_redirect); } } - dispatch_atomic_acquire_barrier(); _dispatch_thread_setspecific(dispatch_queue_key, dq); - _dispatch_apply_f2(dq, da, _dispatch_apply2); + _dispatch_apply_f2(dq, da, _dispatch_apply_invoke); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); } @@ -215,8 +238,9 @@ static void _dispatch_apply_slow(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) { - struct Block_basic *bb = (void *)_dispatch_Block_copy((void *)work); - dispatch_apply_f(iterations, dq, bb, (void *)bb->Block_invoke); + dispatch_block_t bb = _dispatch_Block_copy((void *)work); + dispatch_apply_f(iterations, dq, bb, + (dispatch_apply_function_t)_dispatch_Block_invoke(bb)); Block_release(bb); } #endif @@ -231,8 +255,8 @@ dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) return _dispatch_apply_slow(iterations, dq, work); } #endif - struct Block_basic *bb = (void *)work; - dispatch_apply_f(iterations, dq, bb, (void *)bb->Block_invoke); + dispatch_apply_f(iterations, dq, work, + (dispatch_apply_function_t)_dispatch_Block_invoke(work)); } #endif @@ -242,9 +266,8 @@ void dispatch_stride(size_t offset, size_t stride, size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) { - struct Block_basic *bb = (void *)work; - dispatch_stride_f(offset, stride, iterations, dq, bb, - (void *)bb->Block_invoke); + dispatch_stride_f(offset, stride, iterations, dq, work, + (dispatch_apply_function_t)_dispatch_Block_invoke(work)); } #endif diff --git a/src/benchmark.c b/src/benchmark.c index f340b44..49a4faa 100644 --- a/src/benchmark.c +++ b/src/benchmark.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -67,24 +67,28 @@ _dispatch_benchmark_init(void *context) #endif lcost /= cnt; - bdata->loop_cost = lcost; + bdata->loop_cost = lcost > UINT64_MAX ? UINT64_MAX : (uint64_t)lcost; } #ifdef __BLOCKS__ uint64_t dispatch_benchmark(size_t count, void (^block)(void)) { - struct Block_basic *bb = (void *)block; - return dispatch_benchmark_f(count, block, (void *)bb->Block_invoke); + return dispatch_benchmark_f(count, block, _dispatch_Block_invoke(block)); } #endif +static void +_dispatch_benchmark_dummy_function(void *ctxt DISPATCH_UNUSED) +{ +} + uint64_t dispatch_benchmark_f(size_t count, register void *ctxt, register void (*func)(void *)) { static struct __dispatch_benchmark_data_s bdata = { - .func = (void *)dummy_function, + .func = _dispatch_benchmark_dummy_function, .count = 10000000ul, // ten million }; static dispatch_once_t pred; @@ -118,7 +122,7 @@ dispatch_benchmark_f(size_t count, register void *ctxt, #endif big_denom *= count; conversion /= big_denom; - ns = conversion; + ns = conversion > UINT64_MAX ? UINT64_MAX : (uint64_t)conversion; return ns - bdata.loop_cost; } diff --git a/src/data.c b/src/data.c index 8048964..feb6012 100644 --- a/src/data.c +++ b/src/data.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011 Apple Inc. All rights reserved. + * Copyright (c) 2009-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -26,22 +26,15 @@ // a pointer to represented memory. A composite data object specifies the total // size of data it represents and list of constituent records. // -// A leaf data object has a single entry in records[], the object size is the -// same as records[0].length and records[0].from is always 0. In other words, a -// leaf data object always points to a full represented buffer, so a composite +// A leaf data object always points to a full represented buffer, a composite // dispatch data object is needed to represent a subrange of a memory region. +#if USE_OBJC +#define _dispatch_data_retain(x) _dispatch_objc_retain(x) +#define _dispatch_data_release(x) _dispatch_objc_release(x) +#else #define _dispatch_data_retain(x) dispatch_retain(x) #define _dispatch_data_release(x) dispatch_release(x) - -#if DISPATCH_DATA_MOVABLE -#if DISPATCH_USE_RESOLVERS && !defined(DISPATCH_RESOLVED_VARIANT) -#error Resolved variant required for movable -#endif -static const dispatch_block_t _dispatch_data_destructor_unlock = ^{ - DISPATCH_CRASH("unlock destructor called"); -}; -#define DISPATCH_DATA_DESTRUCTOR_UNLOCK (_dispatch_data_destructor_unlock) #endif const dispatch_block_t _dispatch_data_destructor_free = ^{ @@ -56,22 +49,32 @@ const dispatch_block_t _dispatch_data_destructor_vm_deallocate = ^{ DISPATCH_CRASH("vmdeallocate destructor called"); }; +const dispatch_block_t _dispatch_data_destructor_inline = ^{ + DISPATCH_CRASH("inline destructor called"); +}; + struct dispatch_data_s _dispatch_data_empty = { - .do_vtable = DISPATCH_VTABLE(data), + .do_vtable = DISPATCH_DATA_EMPTY_CLASS, +#if !USE_OBJC .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_next = DISPATCH_OBJECT_LISTLESS, +#endif }; -static dispatch_data_t -_dispatch_data_init(size_t n) +DISPATCH_ALWAYS_INLINE +static inline dispatch_data_t +_dispatch_data_alloc(size_t n, size_t extra) { - dispatch_data_t data = _dispatch_alloc(DISPATCH_VTABLE(data), - sizeof(struct dispatch_data_s) + n * sizeof(range_record)); + dispatch_data_t data = _dispatch_alloc(DISPATCH_DATA_CLASS, + sizeof(struct dispatch_data_s) + extra + + (n ? n * sizeof(range_record) - sizeof(data->buf) : 0)); data->num_records = n; +#if !USE_OBJC data->do_targetq = dispatch_get_global_queue( DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); data->do_next = DISPATCH_OBJECT_LISTLESS; +#endif return data; } @@ -84,7 +87,9 @@ _dispatch_data_destroy_buffer(const void* buffer, size_t size, } else if (destructor == DISPATCH_DATA_DESTRUCTOR_NONE) { // do nothing } else if (destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE) { - vm_deallocate(mach_task_self(), (vm_address_t)buffer, size); + mach_vm_size_t vm_size = size; + mach_vm_address_t vm_addr = (uintptr_t)buffer; + mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); } else { if (!queue) { queue = dispatch_get_global_queue( @@ -94,11 +99,46 @@ _dispatch_data_destroy_buffer(const void* buffer, size_t size, } } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size, + dispatch_queue_t queue, dispatch_block_t destructor) +{ + data->buf = buffer; + data->size = size; + data->destructor = destructor; +#if DISPATCH_DATA_USE_LEAF_MEMBER + data->leaf = true; + data->num_records = 1; +#endif + if (queue) { + _dispatch_retain(queue); + data->do_targetq = queue; + } +} + +void +dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size, + dispatch_block_t destructor) +{ + if (!buffer || !size) { + if (destructor) { + _dispatch_data_destroy_buffer(buffer, size, NULL, + _dispatch_Block_copy(destructor)); + } + buffer = NULL; + size = 0; + destructor = DISPATCH_DATA_DESTRUCTOR_NONE; + } + _dispatch_data_init(data, buffer, size, NULL, destructor); +} + dispatch_data_t dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue, dispatch_block_t destructor) { dispatch_data_t data; + void *data_buf = NULL; if (!buffer || !size) { // Empty data requested so return the singleton empty object. Call // destructor immediately in this case to ensure any unused associated @@ -109,35 +149,60 @@ dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue, } return dispatch_data_empty; } - data = _dispatch_data_init(1); - // Leaf objects always point to the entirety of the memory region - data->leaf = true; - data->size = size; - data->records[0].from = 0; - data->records[0].length = size; if (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT) { // The default destructor was provided, indicating the data should be // copied. - void *data_buf = malloc(size); + data_buf = malloc(size); if (slowpath(!data_buf)) { - free(data); return NULL; } buffer = memcpy(data_buf, buffer, size); - data->destructor = DISPATCH_DATA_DESTRUCTOR_FREE; + data = _dispatch_data_alloc(0, 0); + destructor = DISPATCH_DATA_DESTRUCTOR_FREE; + } else if (destructor == DISPATCH_DATA_DESTRUCTOR_INLINE) { + data = _dispatch_data_alloc(0, size); + buffer = memcpy((void*)data + sizeof(struct dispatch_data_s), buffer, + size); + destructor = DISPATCH_DATA_DESTRUCTOR_NONE; } else { - data->destructor = _dispatch_Block_copy(destructor); -#if DISPATCH_DATA_MOVABLE - // A non-default destructor was provided, indicating the system does not - // own the buffer. Mark the object as locked since the application has - // direct access to the buffer and it cannot be reallocated/moved. - data->locked = 1; -#endif + data = _dispatch_data_alloc(0, 0); + destructor = _dispatch_Block_copy(destructor); } - data->records[0].data_object = (void*)buffer; - if (queue) { - _dispatch_retain(queue); - data->do_targetq = queue; + _dispatch_data_init(data, buffer, size, queue, destructor); + return data; +} + +dispatch_data_t +dispatch_data_create_f(const void *buffer, size_t size, dispatch_queue_t queue, + dispatch_function_t destructor_function) +{ + dispatch_block_t destructor = (dispatch_block_t)destructor_function; + if (destructor != DISPATCH_DATA_DESTRUCTOR_DEFAULT && + destructor != DISPATCH_DATA_DESTRUCTOR_FREE && + destructor != DISPATCH_DATA_DESTRUCTOR_NONE && + destructor != DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE && + destructor != DISPATCH_DATA_DESTRUCTOR_INLINE) { + destructor = ^{ destructor_function((void*)buffer); }; + } + return dispatch_data_create(buffer, size, queue, destructor); +} + +dispatch_data_t +dispatch_data_create_alloc(size_t size, void** buffer_ptr) +{ + dispatch_data_t data = dispatch_data_empty; + void *buffer = NULL; + + if (slowpath(!size)) { + goto out; + } + data = _dispatch_data_alloc(0, size); + buffer = (void*)data + sizeof(struct dispatch_data_s); + _dispatch_data_init(data, buffer, size, NULL, + DISPATCH_DATA_DESTRUCTOR_NONE); +out: + if (buffer_ptr) { + *buffer_ptr = buffer; } return data; } @@ -148,18 +213,12 @@ _dispatch_data_dispose(dispatch_data_t dd) dispatch_block_t destructor = dd->destructor; if (destructor == NULL) { size_t i; - for (i = 0; i < dd->num_records; ++i) { + for (i = 0; i < _dispatch_data_num_records(dd); ++i) { _dispatch_data_release(dd->records[i].data_object); } -#if DISPATCH_DATA_MOVABLE - } else if (destructor == DISPATCH_DATA_DESTRUCTOR_UNLOCK) { - dispatch_data_t data = (dispatch_data_t)dd->records[0].data_object; - (void)dispatch_atomic_dec2o(data, locked); - _dispatch_data_release(data); -#endif } else { - _dispatch_data_destroy_buffer(dd->records[0].data_object, - dd->records[0].length, dd->do_targetq, destructor); + _dispatch_data_destroy_buffer(dd->buf, dd->size, dd->do_targetq, + destructor); } } @@ -167,22 +226,23 @@ size_t _dispatch_data_debug(dispatch_data_t dd, char* buf, size_t bufsiz) { size_t offset = 0; - if (dd->leaf) { - offset += snprintf(&buf[offset], bufsiz - offset, - "leaf: %d, size: %zd, data: %p", dd->leaf, dd->size, - dd->records[0].data_object); + offset += dsnprintf(&buf[offset], bufsiz - offset, "data[%p] = { ", dd); + if (_dispatch_data_leaf(dd)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, + "leaf, size = %zd, buf = %p ", dd->size, dd->buf); } else { - offset += snprintf(&buf[offset], bufsiz - offset, - "leaf: %d, size: %zd, num_records: %zd", dd->leaf, - dd->size, dd->num_records); + offset += dsnprintf(&buf[offset], bufsiz - offset, + "composite, size = %zd, num_records = %zd ", dd->size, + _dispatch_data_num_records(dd)); size_t i; - for (i = 0; i < dd->num_records; ++i) { + for (i = 0; i < _dispatch_data_num_records(dd); ++i) { range_record r = dd->records[i]; - offset += snprintf(&buf[offset], bufsiz - offset, - "records[%zd] from: %zd, length %zd, data_object: %p", i, + offset += dsnprintf(&buf[offset], bufsiz - offset, "record[%zd] = " + "{ from = %zd, length = %zd, data_object = %p }, ", i, r.from, r.length, r.data_object); } } + offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); return offset; } @@ -204,22 +264,29 @@ dispatch_data_create_concat(dispatch_data_t dd1, dispatch_data_t dd2) _dispatch_data_retain(dd1); return dd1; } - data = _dispatch_data_init(dd1->num_records + dd2->num_records); + data = _dispatch_data_alloc(_dispatch_data_num_records(dd1) + + _dispatch_data_num_records(dd2), 0); data->size = dd1->size + dd2->size; // Copy the constituent records into the newly created data object - memcpy(data->records, dd1->records, dd1->num_records * - sizeof(range_record)); - memcpy(data->records + dd1->num_records, dd2->records, dd2->num_records * - sizeof(range_record)); // Reference leaf objects as sub-objects - if (dd1->leaf) { + if (_dispatch_data_leaf(dd1)) { + data->records[0].from = 0; + data->records[0].length = dd1->size; data->records[0].data_object = dd1; + } else { + memcpy(data->records, dd1->records, _dispatch_data_num_records(dd1) * + sizeof(range_record)); } - if (dd2->leaf) { - data->records[dd1->num_records].data_object = dd2; + if (_dispatch_data_leaf(dd2)) { + data->records[_dispatch_data_num_records(dd1)].from = 0; + data->records[_dispatch_data_num_records(dd1)].length = dd2->size; + data->records[_dispatch_data_num_records(dd1)].data_object = dd2; + } else { + memcpy(data->records + _dispatch_data_num_records(dd1), dd2->records, + _dispatch_data_num_records(dd2) * sizeof(range_record)); } size_t i; - for (i = 0; i < data->num_records; ++i) { + for (i = 0; i < _dispatch_data_num_records(data); ++i) { _dispatch_data_retain(data->records[i].data_object); } return data; @@ -238,8 +305,8 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, _dispatch_data_retain(dd); return dd; } - if (dd->leaf) { - data = _dispatch_data_init(1); + if (_dispatch_data_leaf(dd)) { + data = _dispatch_data_alloc(1, 0); data->size = length; data->records[0].from = offset; data->records[0].length = length; @@ -251,10 +318,11 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, // the specified offset data = dispatch_data_empty; size_t i = 0, bytes_left = length; - while (i < dd->num_records && offset >= dd->records[i].length) { + while (i < _dispatch_data_num_records(dd) && + offset >= dd->records[i].length) { offset -= dd->records[i++].length; } - while (i < dd->num_records) { + while (i < _dispatch_data_num_records(dd)) { size_t record_len = dd->records[i].length - offset; if (record_len > bytes_left) { record_len = bytes_left; @@ -287,32 +355,20 @@ dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr, size_t *size_ptr) { dispatch_data_t data = dd; - void *buffer = NULL; + const void *buffer = NULL; size_t size = dd->size, offset = 0; if (!size) { data = dispatch_data_empty; goto out; } - if (!dd->leaf && dd->num_records == 1 && - ((dispatch_data_t)dd->records[0].data_object)->leaf) { + if (!_dispatch_data_leaf(dd) && _dispatch_data_num_records(dd) == 1 && + _dispatch_data_leaf(dd->records[0].data_object)) { offset = dd->records[0].from; - dd = (dispatch_data_t)(dd->records[0].data_object); - } - if (dd->leaf) { -#if DISPATCH_DATA_MOVABLE - data = _dispatch_data_init(1); - // Make sure the underlying leaf object does not move the backing buffer - (void)dispatch_atomic_inc2o(dd, locked); - data->size = size; - data->destructor = DISPATCH_DATA_DESTRUCTOR_UNLOCK; - data->records[0].data_object = dd; - data->records[0].from = offset; - data->records[0].length = size; - _dispatch_data_retain(dd); -#else + dd = dd->records[0].data_object; + } + if (_dispatch_data_leaf(dd)) { _dispatch_data_retain(data); -#endif - buffer = dd->records[0].data_object + offset; + buffer = dd->buf + offset; goto out; } // Composite data object, copy the represented buffers @@ -324,7 +380,7 @@ dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr, } dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED, size_t off, const void* buf, size_t len) { - memcpy(buffer + off, buf, len); + memcpy((void*)buffer + off, buf, len); return (bool)true; }); data = dispatch_data_create(buffer, size, NULL, @@ -341,56 +397,50 @@ out: static bool _dispatch_data_apply(dispatch_data_t dd, size_t offset, size_t from, - size_t size, dispatch_data_applier_t applier) + size_t size, void *ctxt, dispatch_data_applier_function_t applier) { bool result = true; dispatch_data_t data = dd; const void *buffer; dispatch_assert(dd->size); -#if DISPATCH_DATA_MOVABLE - if (dd->leaf) { - data = _dispatch_data_init(1); - // Make sure the underlying leaf object does not move the backing buffer - (void)dispatch_atomic_inc2o(dd, locked); - data->size = size; - data->destructor = DISPATCH_DATA_DESTRUCTOR_UNLOCK; - data->records[0].data_object = dd; - data->records[0].from = from; - data->records[0].length = size; - _dispatch_data_retain(dd); - buffer = dd->records[0].data_object + from; - result = applier(data, offset, buffer, size); - _dispatch_data_release(data); - return result; - } -#else - if (!dd->leaf && dd->num_records == 1 && - ((dispatch_data_t)dd->records[0].data_object)->leaf) { + if (!_dispatch_data_leaf(dd) && _dispatch_data_num_records(dd) == 1 && + _dispatch_data_leaf(dd->records[0].data_object)) { from = dd->records[0].from; - dd = (dispatch_data_t)(dd->records[0].data_object); + dd = dd->records[0].data_object; } - if (dd->leaf) { - buffer = dd->records[0].data_object + from; - return applier(data, offset, buffer, size); + if (_dispatch_data_leaf(dd)) { + buffer = dd->buf + from; + return _dispatch_client_callout3(ctxt, data, offset, buffer, size, + applier); } -#endif size_t i; - for (i = 0; i < dd->num_records && result; ++i) { + for (i = 0; i < _dispatch_data_num_records(dd) && result; ++i) { result = _dispatch_data_apply(dd->records[i].data_object, - offset, dd->records[i].from, dd->records[i].length, + offset, dd->records[i].from, dd->records[i].length, ctxt, applier); offset += dd->records[i].length; } return result; } +bool +dispatch_data_apply_f(dispatch_data_t dd, void *ctxt, + dispatch_data_applier_function_t applier) +{ + if (!dd->size) { + return true; + } + return _dispatch_data_apply(dd, 0, 0, dd->size, ctxt, applier); +} + bool dispatch_data_apply(dispatch_data_t dd, dispatch_data_applier_t applier) { if (!dd->size) { return true; } - return _dispatch_data_apply(dd, 0, 0, dd->size, applier); + return _dispatch_data_apply(dd, 0, 0, dd->size, applier, + (dispatch_data_applier_function_t)_dispatch_Block_invoke(applier)); } // Returs either a leaf object or an object composed of a single leaf object @@ -405,14 +455,14 @@ dispatch_data_copy_region(dispatch_data_t dd, size_t location, dispatch_data_t data; size_t size = dd->size, offset = 0, from = 0; while (true) { - if (dd->leaf) { + if (_dispatch_data_leaf(dd)) { _dispatch_data_retain(dd); *offset_ptr = offset; if (size == dd->size) { return dd; } else { // Create a new object for the requested subrange of the leaf - data = _dispatch_data_init(1); + data = _dispatch_data_alloc(1, 0); data->size = size; data->records[0].from = from; data->records[0].length = size; @@ -422,13 +472,14 @@ dispatch_data_copy_region(dispatch_data_t dd, size_t location, } else { // Find record at the specified location size_t i, pos; - for (i = 0; i < dd->num_records; ++i) { + for (i = 0; i < _dispatch_data_num_records(dd); ++i) { pos = offset + dd->records[i].length; if (location < pos) { size = dd->records[i].length; from = dd->records[i].from; - data = (dispatch_data_t)(dd->records[i].data_object); - if (dd->num_records == 1 && data->leaf) { + data = dd->records[i].data_object; + if (_dispatch_data_num_records(dd) == 1 && + _dispatch_data_leaf(data)) { // Return objects composed of a single leaf node *offset_ptr = offset; _dispatch_data_retain(dd); @@ -445,3 +496,69 @@ dispatch_data_copy_region(dispatch_data_t dd, size_t location, } } } + +#if HAVE_MACH + +#ifndef MAP_MEM_VM_COPY +#define MAP_MEM_VM_COPY 0x200000 // +#endif + +mach_port_t +dispatch_data_make_memory_entry(dispatch_data_t dd) +{ + mach_port_t mep = MACH_PORT_NULL; + memory_object_size_t mos; + mach_vm_size_t vm_size = dd->size; + mach_vm_address_t vm_addr; + vm_prot_t flags; + kern_return_t kr; + bool copy = (dd->destructor != DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE); + +retry: + if (copy) { + vm_addr = vm_page_size; + kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size, + VM_FLAGS_ANYWHERE); + if (kr) { + if (kr != KERN_NO_SPACE) { + (void)dispatch_assume_zero(kr); + } + return mep; + } + dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED, + size_t off, const void* buf, size_t len) { + memcpy((void*)(vm_addr + off), buf, len); + return (bool)true; + }); + } else { + vm_addr = (uintptr_t)dd->buf; + } + flags = VM_PROT_DEFAULT|VM_PROT_IS_MASK|MAP_MEM_VM_COPY; + mos = vm_size; + kr = mach_make_memory_entry_64(mach_task_self(), &mos, vm_addr, flags, + &mep, MACH_PORT_NULL); + if (kr == KERN_INVALID_VALUE) { + // Fallback in case MAP_MEM_VM_COPY is not supported + flags &= ~MAP_MEM_VM_COPY; + kr = mach_make_memory_entry_64(mach_task_self(), &mos, vm_addr, flags, + &mep, MACH_PORT_NULL); + } + if (dispatch_assume_zero(kr)) { + mep = MACH_PORT_NULL; + } else if (mos < vm_size) { + // Memory object was truncated, e.g. due to lack of MAP_MEM_VM_COPY + kr = mach_port_deallocate(mach_task_self(), mep); + (void)dispatch_assume_zero(kr); + if (!copy) { + copy = true; + goto retry; + } + mep = MACH_PORT_NULL; + } + if (copy) { + kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); + (void)dispatch_assume_zero(kr); + } + return mep; +} +#endif // HAVE_MACH diff --git a/src/data.m b/src/data.m new file mode 100644 index 0000000..3e3eee1 --- /dev/null +++ b/src/data.m @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +#if USE_OBJC + +#if !__OBJC2__ +#error "Cannot build with legacy ObjC runtime" +#endif +#if _OS_OBJECT_OBJC_ARC +#error "Cannot build with ARC" +#endif + +#include + +@interface DISPATCH_CLASS(data) () +- (id)initWithBytes:(void *)bytes length:(NSUInteger)length copy:(BOOL)copy + freeWhenDone:(BOOL)freeBytes bytesAreVM:(BOOL)vm; +- (BOOL)_bytesAreVM; +@end + +@interface DISPATCH_CLASS(data_empty) : DISPATCH_CLASS(data) +@end + +@implementation DISPATCH_CLASS(data) + ++ (id)allocWithZone:(NSZone *) DISPATCH_UNUSED zone { + return _dispatch_objc_alloc(self, sizeof(struct dispatch_data_s)); +} + +- (id)init { + return [self initWithBytes:NULL length:0 copy:NO freeWhenDone:NO + bytesAreVM:NO]; +} + +- (id)initWithBytes:(void *)bytes length:(NSUInteger)length copy:(BOOL)copy + freeWhenDone:(BOOL)freeBytes bytesAreVM:(BOOL)vm { + dispatch_block_t destructor; + if (copy) { + destructor = DISPATCH_DATA_DESTRUCTOR_DEFAULT; + } else if (freeBytes) { + if (vm) { + destructor = DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE; + } else { + destructor = DISPATCH_DATA_DESTRUCTOR_FREE; + } + } else { + destructor = DISPATCH_DATA_DESTRUCTOR_NONE; + } + dispatch_data_init(self, bytes, length, destructor); + return self; +} + +#define _dispatch_data_objc_dispose(selector) \ + struct dispatch_data_s *dd = (void*)self; \ + _dispatch_data_dispose(self); \ + dispatch_queue_t tq = dd->do_targetq; \ + dispatch_function_t func = dd->finalizer; \ + void *ctxt = dd->ctxt; \ + [super selector]; \ + if (func && ctxt) { \ + if (!tq) { \ + tq = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0);\ + } \ + dispatch_async_f(tq, ctxt, func); \ + } \ + if (tq) { \ + _dispatch_release(tq); \ + } + +- (void)dealloc { + _dispatch_data_objc_dispose(dealloc); +} + +- (void)finalize { + _dispatch_data_objc_dispose(finalize); +} + +- (BOOL)_bytesAreVM { + struct dispatch_data_s *dd = (void*)self; + return dd->destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE; +} + +- (void)_setContext:(void*)context { + struct dispatch_data_s *dd = (void*)self; + dd->ctxt = context; +} + +- (void*)_getContext { + struct dispatch_data_s *dd = (void*)self; + return dd->ctxt; +} + +- (void)_setFinalizer:(dispatch_function_t)finalizer { + struct dispatch_data_s *dd = (void*)self; + dd->finalizer = finalizer; +} + +- (void)_setTargetQueue:(dispatch_queue_t)queue { + struct dispatch_data_s *dd = (void*)self; + _dispatch_retain(queue); + dispatch_queue_t prev; + prev = dispatch_atomic_xchg2o(dd, do_targetq, queue, release); + if (prev) _dispatch_release(prev); +} + +- (NSString *)debugDescription { + Class nsstring = objc_lookUpClass("NSString"); + if (!nsstring) return nil; + char buf[2048]; + _dispatch_data_debug(self, buf, sizeof(buf)); + return [nsstring stringWithFormat: + [nsstring stringWithUTF8String:"<%s: %s>"], + class_getName([self class]), buf]; +} + +@end + +@implementation DISPATCH_CLASS(data_empty) + +// Force non-lazy class realization rdar://10640168 ++ (void)load { +} + +- (id)retain { + return (id)self; +} + +- (oneway void)release { +} + +- (id)autorelease { + return (id)self; +} + +- (NSUInteger)retainCount { + return ULONG_MAX; +} + ++ (id)allocWithZone:(NSZone *) DISPATCH_UNUSED zone { + return (id)&_dispatch_data_empty; +} + +- (void)_setContext:(void*) DISPATCH_UNUSED context { +} + +- (void*)_getContext { + return NULL; +} + +- (void)_setFinalizer:(dispatch_function_t) DISPATCH_UNUSED finalizer { +} + +- (void)_setTargetQueue:(dispatch_queue_t) DISPATCH_UNUSED queue { +} + +@end + +#endif // USE_OBJC diff --git a/src/data_internal.h b/src/data_internal.h index 2dec5f0..d0de8bb 100644 --- a/src/data_internal.h +++ b/src/data_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011 Apple Inc. All rights reserved. + * Copyright (c) 2009-2012 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -32,24 +32,61 @@ #include // for HeaderDoc #endif +#if defined(__LP64__) && !defined(DISPATCH_DATA_USE_LEAF_MEMBER) && !USE_OBJC +// explicit leaf member is free on 64bit due to padding +#define DISPATCH_DATA_USE_LEAF_MEMBER 1 +#endif + typedef struct range_record_s { - void* data_object; + dispatch_data_t data_object; size_t from; size_t length; } range_record; +#if USE_OBJC +#if OS_OBJECT_USE_OBJC +@interface DISPATCH_CLASS(data) : NSObject +@end +#endif +DISPATCH_OBJC_CLASS_DECL(data); +DISPATCH_OBJC_CLASS_DECL(data_empty); +#define DISPATCH_DATA_CLASS DISPATCH_OBJC_CLASS(data) +#define DISPATCH_DATA_EMPTY_CLASS DISPATCH_OBJC_CLASS(data_empty) +#else // USE_OBJC DISPATCH_CLASS_DECL(data); +#define DISPATCH_DATA_CLASS DISPATCH_VTABLE(data) +#define DISPATCH_DATA_EMPTY_CLASS DISPATCH_VTABLE(data) +#endif // USE_OBJC + struct dispatch_data_s { +#if USE_OBJC + const void *do_vtable; + dispatch_queue_t do_targetq; + void *ctxt; + void *finalizer; +#else // USE_OBJC DISPATCH_STRUCT_HEADER(data); -#if DISPATCH_DATA_MOVABLE - unsigned int locked; -#endif +#endif // USE_OBJC +#if DISPATCH_DATA_USE_LEAF_MEMBER bool leaf; +#endif dispatch_block_t destructor; size_t size, num_records; - range_record records[]; + union { + const void* buf; + range_record records[0]; + }; }; +#if DISPATCH_DATA_USE_LEAF_MEMBER +#define _dispatch_data_leaf(d) ((d)->leaf) +#define _dispatch_data_num_records(d) ((d)->num_records) +#else +#define _dispatch_data_leaf(d) ((d)->num_records ? 0 : ((d)->size ? 1 : 0)) +#define _dispatch_data_num_records(d) \ + (_dispatch_data_leaf(d) ? 1 : (d)->num_records) +#endif // DISPATCH_DATA_USE_LEAF_MEMBER + typedef dispatch_data_t (*dispatch_transform_t)(dispatch_data_t data); struct dispatch_data_format_type_s { @@ -60,7 +97,31 @@ struct dispatch_data_format_type_s { dispatch_transform_t encode; }; +void dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size, + dispatch_block_t destructor); void _dispatch_data_dispose(dispatch_data_t data); size_t _dispatch_data_debug(dispatch_data_t data, char* buf, size_t bufsiz); +const dispatch_block_t _dispatch_data_destructor_inline; +#define DISPATCH_DATA_DESTRUCTOR_INLINE (_dispatch_data_destructor_inline) + +#if !__OBJC2__ + +static inline const void* +_dispatch_data_map_direct(dispatch_data_t dd) +{ + size_t offset = 0; + if (slowpath(!dd->size)) { + return NULL; + } + if (slowpath(!_dispatch_data_leaf(dd)) && + _dispatch_data_num_records(dd) == 1 && + _dispatch_data_leaf(dd->records[0].data_object)) { + offset = dd->records[0].from; + dd = dd->records[0].data_object; + } + return fastpath(_dispatch_data_leaf(dd)) ? (dd->buf + offset) : NULL; +} + +#endif // !__OBJC2__ #endif // __DISPATCH_DATA_INTERNAL__ diff --git a/src/init.c b/src/init.c index 0cdaeb0..5a8b4bb 100644 --- a/src/init.c +++ b/src/init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -55,24 +55,12 @@ dispatch_atfork_parent(void) { } -void -dummy_function(void) -{ -} - -long -dummy_function_r0(void) -{ - return 0; -} - #pragma mark - #pragma mark dispatch_globals #if DISPATCH_COCOA_COMPAT void (*dispatch_begin_thread_4GC)(void); void (*dispatch_end_thread_4GC)(void); -void (*dispatch_no_worker_threads_4GC)(void); void *(*_dispatch_begin_NSAutoReleasePool)(void); void (*_dispatch_end_NSAutoReleasePool)(void *); #endif @@ -83,13 +71,15 @@ pthread_key_t dispatch_sema4_key; pthread_key_t dispatch_cache_key; pthread_key_t dispatch_io_key; pthread_key_t dispatch_apply_key; -#if DISPATCH_PERF_MON +#if DISPATCH_INTROSPECTION +pthread_key_t dispatch_introspection_key; +#elif DISPATCH_PERF_MON pthread_key_t dispatch_bcounter_key; #endif #endif // !DISPATCH_USE_DIRECT_TSD struct _dispatch_hw_config_s _dispatch_hw_config; -bool _dispatch_safe_fork = true; +bool _dispatch_safe_fork = true, _dispatch_child_of_unsafe_fork; DISPATCH_NOINLINE bool @@ -98,8 +88,16 @@ _dispatch_is_multithreaded(void) return !_dispatch_safe_fork; } + +DISPATCH_NOINLINE +bool +_dispatch_is_fork_of_multithreaded_parent(void) +{ + return _dispatch_child_of_unsafe_fork; +} + const struct dispatch_queue_offsets_s dispatch_queue_offsets = { - .dqo_version = 3, + .dqo_version = 4, .dqo_label = offsetof(struct dispatch_queue_s, dq_label), .dqo_label_size = sizeof(((dispatch_queue_t)NULL)->dq_label), .dqo_flags = 0, @@ -127,6 +125,7 @@ struct dispatch_queue_s _dispatch_main_q = { .dq_label = "com.apple.main-thread", .dq_running = 1, .dq_width = 1, + .dq_is_thread_bound = 1, .dq_serialnum = 1, }; @@ -158,32 +157,42 @@ DISPATCH_VTABLE_INSTANCE(queue, .do_type = DISPATCH_QUEUE_TYPE, .do_kind = "queue", .do_dispose = _dispatch_queue_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, + .do_invoke = _dispatch_queue_invoke, + .do_probe = _dispatch_queue_probe, .do_debug = dispatch_queue_debug, ); DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_root, queue, - .do_type = DISPATCH_QUEUE_GLOBAL_TYPE, + .do_type = DISPATCH_QUEUE_ROOT_TYPE, .do_kind = "global-queue", + .do_dispose = _dispatch_pthread_root_queue_dispose, + .do_probe = _dispatch_root_queue_probe, + .do_debug = dispatch_queue_debug, +); + +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_runloop, queue, + .do_type = DISPATCH_QUEUE_ROOT_TYPE, + .do_kind = "runloop-queue", + .do_dispose = _dispatch_runloop_queue_dispose, + .do_invoke = _dispatch_queue_invoke, + .do_probe = _dispatch_runloop_queue_probe, .do_debug = dispatch_queue_debug, - .do_probe = _dispatch_queue_probe_root, ); DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_mgr, queue, .do_type = DISPATCH_QUEUE_MGR_TYPE, .do_kind = "mgr-queue", .do_invoke = _dispatch_mgr_thread, + .do_probe = _dispatch_mgr_queue_probe, .do_debug = dispatch_queue_debug, - .do_probe = _dispatch_mgr_wakeup, ); DISPATCH_VTABLE_INSTANCE(queue_specific_queue, .do_type = DISPATCH_QUEUE_SPECIFIC_TYPE, .do_kind = "queue-context", .do_dispose = _dispatch_queue_specific_queue_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, + .do_invoke = (void*)_dispatch_queue_invoke, + .do_probe = (void *)_dispatch_queue_probe, .do_debug = (void *)dispatch_queue_debug, ); @@ -195,46 +204,56 @@ DISPATCH_VTABLE_INSTANCE(queue_attr, DISPATCH_VTABLE_INSTANCE(source, .do_type = DISPATCH_SOURCE_KEVENT_TYPE, .do_kind = "kevent-source", - .do_invoke = _dispatch_source_invoke, .do_dispose = _dispatch_source_dispose, + .do_invoke = _dispatch_source_invoke, .do_probe = _dispatch_source_probe, .do_debug = _dispatch_source_debug, ); +DISPATCH_VTABLE_INSTANCE(mach, + .do_type = DISPATCH_MACH_CHANNEL_TYPE, + .do_kind = "mach-channel", + .do_dispose = _dispatch_mach_dispose, + .do_invoke = _dispatch_mach_invoke, + .do_probe = _dispatch_mach_probe, + .do_debug = _dispatch_mach_debug, +); + +DISPATCH_VTABLE_INSTANCE(mach_msg, + .do_type = DISPATCH_MACH_MSG_TYPE, + .do_kind = "mach-msg", + .do_dispose = _dispatch_mach_msg_dispose, + .do_invoke = _dispatch_mach_msg_invoke, + .do_debug = _dispatch_mach_msg_debug, +); + +#if !USE_OBJC DISPATCH_VTABLE_INSTANCE(data, .do_type = DISPATCH_DATA_TYPE, .do_kind = "data", .do_dispose = _dispatch_data_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, .do_debug = _dispatch_data_debug, ); +#endif DISPATCH_VTABLE_INSTANCE(io, .do_type = DISPATCH_IO_TYPE, .do_kind = "channel", .do_dispose = _dispatch_io_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, - .do_debug = (void *)dummy_function_r0, + .do_debug = _dispatch_io_debug, ); DISPATCH_VTABLE_INSTANCE(operation, .do_type = DISPATCH_OPERATION_TYPE, .do_kind = "operation", .do_dispose = _dispatch_operation_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, - .do_debug = (void *)dummy_function_r0, + .do_debug = _dispatch_operation_debug, ); DISPATCH_VTABLE_INSTANCE(disk, .do_type = DISPATCH_DISK_TYPE, .do_kind = "disk", .do_dispose = _dispatch_disk_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, - .do_debug = (void *)dummy_function_r0, ); void @@ -243,15 +262,13 @@ _dispatch_vtable_init(void) #if USE_OBJC // ObjC classes and dispatch vtables are co-located via linker order and // alias files, verify correct layout during initialization rdar://10640168 - #define DISPATCH_OBJC_CLASS(name) \ - DISPATCH_CONCAT(OBJC_CLASS_$_,DISPATCH_CLASS(name)) - extern void *DISPATCH_OBJC_CLASS(semaphore); + DISPATCH_OBJC_CLASS_DECL(semaphore); dispatch_assert((char*)DISPATCH_VTABLE(semaphore) - - (char*)&DISPATCH_OBJC_CLASS(semaphore) == 0); + (char*)DISPATCH_OBJC_CLASS(semaphore) == 0); dispatch_assert((char*)&DISPATCH_CONCAT(_,DISPATCH_CLASS(semaphore_vtable)) - - (char*)&DISPATCH_OBJC_CLASS(semaphore) == + - (char*)DISPATCH_OBJC_CLASS(semaphore) == sizeof(_os_object_class_s)); -#endif +#endif // USE_OBJC } #pragma mark - @@ -275,21 +292,28 @@ _dispatch_build_init(void *context DISPATCH_UNUSED) #endif } +static dispatch_once_t _dispatch_build_pred; + +char* +_dispatch_get_build(void) +{ + dispatch_once_f(&_dispatch_build_pred, NULL, _dispatch_build_init); + return _dispatch_build; +} + #define _dispatch_bug_log(msg, ...) do { \ static void *last_seen; \ void *ra = __builtin_return_address(0); \ if (last_seen != ra) { \ last_seen = ra; \ - _dispatch_log((msg), ##__VA_ARGS__); \ + _dispatch_log(msg, ##__VA_ARGS__); \ } \ } while(0) void _dispatch_bug(size_t line, long val) { - static dispatch_once_t pred; - - dispatch_once_f(&pred, NULL, _dispatch_build_init); + dispatch_once_f(&_dispatch_build_pred, NULL, _dispatch_build_init); _dispatch_bug_log("BUG in libdispatch: %s - %lu - 0x%lx", _dispatch_build, (unsigned long)line, val); } @@ -307,6 +331,14 @@ _dispatch_bug_mach_client(const char* msg, mach_msg_return_t kr) mach_error_string(kr), kr); } +void +_dispatch_bug_kevent_client(const char* msg, const char* filter, + const char *operation, int err) +{ + _dispatch_bug_log("BUG in libdispatch client: %s[%s] %s: \"%s\" - 0x%x", + msg, filter, operation, strerror(err), err); +} + void _dispatch_abort(size_t line, long val) { @@ -314,10 +346,12 @@ _dispatch_abort(size_t line, long val) abort(); } +#if !DISPATCH_USE_OS_TRACE + #pragma mark - #pragma mark dispatch_log -static FILE *dispatch_logfile; +static int dispatch_logfile = -1; static bool dispatch_log_disabled; static dispatch_once_t _dispatch_logv_pred; @@ -341,52 +375,72 @@ _dispatch_logv_init(void *context DISPATCH_UNUSED) log_to_file = true; } else if (strcmp(e, "stderr") == 0) { log_to_file = true; - dispatch_logfile = stderr; + dispatch_logfile = STDERR_FILENO; } } if (!dispatch_log_disabled) { - if (log_to_file && !dispatch_logfile) { + if (log_to_file && dispatch_logfile == -1) { char path[PATH_MAX]; snprintf(path, sizeof(path), "/var/tmp/libdispatch.%d.log", getpid()); - dispatch_logfile = fopen(path, "a"); + dispatch_logfile = open(path, O_WRONLY | O_APPEND | O_CREAT | + O_NOFOLLOW | O_CLOEXEC, 0666); } - if (dispatch_logfile) { + if (dispatch_logfile != -1) { struct timeval tv; gettimeofday(&tv, NULL); - fprintf(dispatch_logfile, "=== log file opened for %s[%u] at " + dprintf(dispatch_logfile, "=== log file opened for %s[%u] at " "%ld.%06u ===\n", getprogname() ?: "", getpid(), tv.tv_sec, tv.tv_usec); - fflush(dispatch_logfile); } } } +static inline void +_dispatch_log_file(char *buf, size_t len) +{ + ssize_t r; + + buf[len++] = '\n'; +retry: + r = write(dispatch_logfile, buf, len); + if (slowpath(r == -1) && errno == EINTR) { + goto retry; + } +} + DISPATCH_NOINLINE static void _dispatch_logv_file(const char *msg, va_list ap) { - char *buf; - size_t len; - - len = vasprintf(&buf, msg, ap); - buf[len++] = '\n'; - fwrite(buf, 1, len, dispatch_logfile); - fflush(dispatch_logfile); - free(buf); + char buf[2048]; + int r = vsnprintf(buf, sizeof(buf), msg, ap); + if (r < 0) return; + size_t len = (size_t)r; + if (len > sizeof(buf) - 1) { + len = sizeof(buf) - 1; + } + _dispatch_log_file(buf, len); } +DISPATCH_ALWAYS_INLINE static inline void -_dispatch_logv(const char *msg, va_list ap) +_dispatch_logv(const char *msg, size_t len, va_list ap) { dispatch_once_f(&_dispatch_logv_pred, NULL, _dispatch_logv_init); if (slowpath(dispatch_log_disabled)) { return; } - if (slowpath(dispatch_logfile)) { + if (slowpath(dispatch_logfile != -1)) { + if (!ap) { + return _dispatch_log_file((char*)msg, len); + } return _dispatch_logv_file(msg, ap); } - vsyslog(LOG_NOTICE, msg, ap); + if (!ap) { + return syslog(LOG_NOTICE, "%s", msg); + } + return vsyslog(LOG_NOTICE, msg, ap); } DISPATCH_NOINLINE @@ -396,28 +450,58 @@ _dispatch_log(const char *msg, ...) va_list ap; va_start(ap, msg); - _dispatch_logv(msg, ap); + _dispatch_logv(msg, 0, ap); va_end(ap); } +#endif // DISPATCH_USE_OS_TRACE + #pragma mark - #pragma mark dispatch_debug +static size_t +_dispatch_object_debug2(dispatch_object_t dou, char* buf, size_t bufsiz) +{ + DISPATCH_OBJECT_TFB(_dispatch_objc_debug, dou, buf, bufsiz); + if (dou._do->do_vtable->do_debug) { + return dx_debug(dou._do, buf, bufsiz); + } + return strlcpy(buf, "NULL vtable slot: ", bufsiz); +} + DISPATCH_NOINLINE -void -dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) +static void +_dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) { - char buf[4096]; + char buf[2048]; + int r; size_t offs; - - if (dou._do && dou._do->do_vtable->do_debug) { - offs = dx_debug(dou._do, buf, sizeof(buf)); + if (dou._do) { + offs = _dispatch_object_debug2(dou, buf, sizeof(buf)); + dispatch_assert(offs + 2 < sizeof(buf)); + buf[offs++] = ':'; + buf[offs++] = ' '; + buf[offs] = '\0'; } else { - offs = snprintf(buf, sizeof(buf), "NULL vtable slot"); + offs = strlcpy(buf, "NULL: ", sizeof(buf)); + } + r = vsnprintf(buf + offs, sizeof(buf) - offs, msg, ap); +#if !DISPATCH_USE_OS_TRACE + size_t len = offs + (r < 0 ? 0 : (size_t)r); + if (len > sizeof(buf) - 1) { + len = sizeof(buf) - 1; } + _dispatch_logv(buf, len, NULL); +#else + _dispatch_log("%s", buf); +#endif +} - snprintf(buf + offs, sizeof(buf) - offs, ": %s", msg); - _dispatch_logv(buf, ap); +DISPATCH_NOINLINE +void +dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) +{ + _dispatch_debugv(dou, msg, ap); } DISPATCH_NOINLINE @@ -427,10 +511,43 @@ dispatch_debug(dispatch_object_t dou, const char *msg, ...) va_list ap; va_start(ap, msg); - dispatch_debugv(dou._do, msg, ap); + _dispatch_debugv(dou, msg, ap); va_end(ap); } +#if DISPATCH_DEBUG +DISPATCH_NOINLINE +void +_dispatch_object_debug(dispatch_object_t dou, const char *msg, ...) +{ + va_list ap; + + va_start(ap, msg); + _dispatch_debugv(dou._do, msg, ap); + va_end(ap); +} +#endif + +#pragma mark - +#pragma mark dispatch_calloc + +DISPATCH_NOINLINE +void +_dispatch_temporary_resource_shortage(void) +{ + sleep(1); +} + +void * +_dispatch_calloc(size_t num_items, size_t size) +{ + void *buf; + while (!fastpath(buf = calloc(num_items, size))) { + _dispatch_temporary_resource_shortage(); + } + return buf; +} + #pragma mark - #pragma mark dispatch_block_t @@ -442,9 +559,9 @@ _dispatch_Block_copy(dispatch_block_t db) { dispatch_block_t rval; - if (fastpath(db)) { + if (fastpath(db)) { while (!fastpath(rval = Block_copy(db))) { - sleep(1); + _dispatch_temporary_resource_shortage(); } return rval; } @@ -481,6 +598,7 @@ _dispatch_client_callout(void *ctxt, dispatch_function_t f) { _dispatch_get_tsd_base(); void *u = _dispatch_get_unwind_tsd(); + if (fastpath(!u)) return f(ctxt); _dispatch_set_unwind_tsd(NULL); f(ctxt); _dispatch_free_unwind_tsd(); @@ -494,12 +612,43 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) { _dispatch_get_tsd_base(); void *u = _dispatch_get_unwind_tsd(); + if (fastpath(!u)) return f(ctxt, i); _dispatch_set_unwind_tsd(NULL); f(ctxt, i); _dispatch_free_unwind_tsd(); _dispatch_set_unwind_tsd(u); } +#undef _dispatch_client_callout3 +bool +_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, + const void *buffer, size_t size, dispatch_data_applier_function_t f) +{ + _dispatch_get_tsd_base(); + void *u = _dispatch_get_unwind_tsd(); + if (fastpath(!u)) return f(ctxt, region, offset, buffer, size); + _dispatch_set_unwind_tsd(NULL); + bool res = f(ctxt, region, offset, buffer, size); + _dispatch_free_unwind_tsd(); + _dispatch_set_unwind_tsd(u); + return res; +} + +#undef _dispatch_client_callout4 +void +_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, mach_error_t error, + dispatch_mach_handler_function_t f) +{ + _dispatch_get_tsd_base(); + void *u = _dispatch_get_unwind_tsd(); + if (fastpath(!u)) return f(ctxt, reason, dmsg, error); + _dispatch_set_unwind_tsd(NULL); + f(ctxt, reason, dmsg, error); + _dispatch_free_unwind_tsd(); + _dispatch_set_unwind_tsd(u); +} + #endif // DISPATCH_USE_CLIENT_CALLOUT #pragma mark - @@ -515,19 +664,25 @@ _os_object_init(void) return; } -_os_object_t -_os_object_alloc(const void *cls, size_t size) +inline _os_object_t +_os_object_alloc_realized(const void *cls, size_t size) { _os_object_t obj; dispatch_assert(size >= sizeof(struct _os_object_s)); - if (!cls) cls = &_os_object_class; while (!fastpath(obj = calloc(1u, size))) { - sleep(1); // Temporary resource shortage + _dispatch_temporary_resource_shortage(); } obj->os_obj_isa = cls; return obj; } +_os_object_t +_os_object_alloc(const void *cls, size_t size) +{ + if (!cls) cls = &_os_object_class; + return _os_object_alloc_realized(cls, size); +} + void _os_object_dealloc(_os_object_t obj) { @@ -583,12 +738,19 @@ dispatch_source_type_timer_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, uintptr_t handle DISPATCH_UNUSED, unsigned long mask, - dispatch_queue_t q DISPATCH_UNUSED) + dispatch_queue_t q) { - ds->ds_refs = calloc(1ul, sizeof(struct dispatch_timer_source_refs_s)); - if (slowpath(!ds->ds_refs)) return; + if (fastpath(!ds->ds_refs)) { + ds->ds_refs = _dispatch_calloc(1ul, + sizeof(struct dispatch_timer_source_refs_s)); + } ds->ds_needs_rearm = true; ds->ds_is_timer = true; + if (q == dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_BACKGROUND, 0) + || q == dispatch_get_global_queue( + DISPATCH_QUEUE_PRIORITY_BACKGROUND, DISPATCH_QUEUE_OVERCOMMIT)){ + mask |= DISPATCH_TIMER_BACKGROUND; // + } ds_timer(ds->ds_refs).flags = mask; } @@ -596,10 +758,55 @@ const struct dispatch_source_type_s _dispatch_source_type_timer = { .ke = { .filter = DISPATCH_EVFILT_TIMER, }, - .mask = DISPATCH_TIMER_WALL_CLOCK, + .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND| + DISPATCH_TIMER_WALL_CLOCK, .init = dispatch_source_type_timer_init, }; +static void +dispatch_source_type_timer_with_aggregate_init(dispatch_source_t ds, + dispatch_source_type_t type, uintptr_t handle, unsigned long mask, + dispatch_queue_t q) +{ + ds->ds_refs = _dispatch_calloc(1ul, + sizeof(struct dispatch_timer_source_aggregate_refs_s)); + dispatch_source_type_timer_init(ds, type, handle, mask, q); + ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_WITH_AGGREGATE; + ds->dq_specific_q = (void*)handle; + _dispatch_retain(ds->dq_specific_q); +} + +const struct dispatch_source_type_s _dispatch_source_type_timer_with_aggregate={ + .ke = { + .filter = DISPATCH_EVFILT_TIMER, + .ident = ~0ull, + }, + .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND, + .init = dispatch_source_type_timer_with_aggregate_init, +}; + +static void +dispatch_source_type_interval_init(dispatch_source_t ds, + dispatch_source_type_t type, uintptr_t handle, unsigned long mask, + dispatch_queue_t q) +{ + dispatch_source_type_timer_init(ds, type, handle, mask, q); + ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_INTERVAL; + unsigned long ident = _dispatch_source_timer_idx(ds->ds_refs); + ds->ds_dkev->dk_kevent.ident = ds->ds_ident_hack = ident; + _dispatch_source_set_interval(ds, handle); +} + +const struct dispatch_source_type_s _dispatch_source_type_interval = { + .ke = { + .filter = DISPATCH_EVFILT_TIMER, + .ident = ~0ull, + }, + .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND| + DISPATCH_INTERVAL_UI_ANIMATION, + .init = dispatch_source_type_interval_init, +}; + const struct dispatch_source_type_s _dispatch_source_type_read = { .ke = { .filter = EVFILT_READ, @@ -636,7 +843,7 @@ dispatch_source_type_vm_init(dispatch_source_t ds, { static dispatch_once_t pred; dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_vm_source_init); - ds->ds_dkev->dk_kevent.ident = (mask & DISPATCH_VM_PRESSURE ? + ds->ds_dkev->dk_kevent.ident = (uint64_t)(mask & DISPATCH_VM_PRESSURE ? _dispatch_ios_simulator_memory_warnings_fd : -1); } @@ -670,6 +877,31 @@ const struct dispatch_source_type_s _dispatch_source_type_vm = { #endif #endif +#ifdef DISPATCH_USE_MEMORYSTATUS +static void +dispatch_source_type_memorystatus_init(dispatch_source_t ds, + dispatch_source_type_t type DISPATCH_UNUSED, + uintptr_t handle DISPATCH_UNUSED, + unsigned long mask DISPATCH_UNUSED, + dispatch_queue_t q DISPATCH_UNUSED) +{ + ds->ds_is_level = false; +} + +const struct dispatch_source_type_s _dispatch_source_type_memorystatus = { + .ke = { + .filter = EVFILT_MEMORYSTATUS, + .flags = EV_DISPATCH, + }, + .mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL|NOTE_MEMORYSTATUS_PRESSURE_WARN +#ifdef NOTE_MEMORYSTATUS_PRESSURE_CRITICAL + |NOTE_MEMORYSTATUS_PRESSURE_CRITICAL +#endif + , + .init = dispatch_source_type_memorystatus_init, +}; +#endif + const struct dispatch_source_type_s _dispatch_source_type_proc = { .ke = { .filter = EVFILT_PROC, @@ -720,6 +952,25 @@ const struct dispatch_source_type_s _dispatch_source_type_vfs = { , }; +const struct dispatch_source_type_s _dispatch_source_type_sock = { +#ifdef EVFILT_SOCK + .ke = { + .filter = EVFILT_SOCK, + .flags = EV_CLEAR, + }, + .mask = NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | + NOTE_TIMEOUT | NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | + NOTE_RESUME | NOTE_KEEPALIVE +#ifdef NOTE_ADAPTIVE_WTIMO + | NOTE_ADAPTIVE_WTIMO | NOTE_ADAPTIVE_RTIMO +#endif +#ifdef NOTE_CONNECTED + | NOTE_CONNECTED | NOTE_DISCONNECTED | NOTE_CONNINFO_UPDATED +#endif + , +#endif // EVFILT_SOCK +}; + const struct dispatch_source_type_s _dispatch_source_type_data_add = { .ke = { .filter = DISPATCH_EVFILT_CUSTOM_ADD, @@ -730,7 +981,7 @@ const struct dispatch_source_type_s _dispatch_source_type_data_or = { .ke = { .filter = DISPATCH_EVFILT_CUSTOM_OR, .flags = EV_CLEAR, - .fflags = ~0, + .fflags = ~0u, }, }; @@ -742,8 +993,6 @@ dispatch_source_type_mach_send_init(dispatch_source_t ds, uintptr_t handle DISPATCH_UNUSED, unsigned long mask, dispatch_queue_t q DISPATCH_UNUSED) { - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_mach_notify_source_init); if (!mask) { // Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_SEND_DEAD; @@ -753,7 +1002,7 @@ dispatch_source_type_mach_send_init(dispatch_source_t ds, const struct dispatch_source_type_s _dispatch_source_type_mach_send = { .ke = { - .filter = EVFILT_MACHPORT, + .filter = DISPATCH_EVFILT_MACH_NOTIFICATION, .flags = EV_CLEAR, }, .mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, @@ -779,18 +1028,6 @@ const struct dispatch_source_type_s _dispatch_source_type_mach_recv = { .init = dispatch_source_type_mach_recv_init, }; -const struct dispatch_source_type_s _dispatch_source_type_sock = { -#ifdef EVFILT_SOCK - .ke = { - .filter = EVFILT_SOCK, - .flags = EV_CLEAR, - }, - .mask = NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | - NOTE_TIMEOUT | NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | - NOTE_RESUME | NOTE_KEEPALIVE, -#endif -}; - #pragma mark - #pragma mark dispatch_mig @@ -810,9 +1047,9 @@ dispatch_mach_msg_get_context(mach_msg_header_t *msg) } kern_return_t -_dispatch_wakeup_main_thread(mach_port_t mp DISPATCH_UNUSED) +_dispatch_wakeup_runloop_thread(mach_port_t mp DISPATCH_UNUSED) { - // dummy function just to pop out the main thread out of mach_msg() + // dummy function just to pop a runloop thread out of mach_msg() return 0; } diff --git a/src/internal.h b/src/internal.h index a90f93f..ed1a9c7 100644 --- a/src/internal.h +++ b/src/internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -38,6 +38,14 @@ #endif +#if !defined(DISPATCH_MACH_SPI) && TARGET_OS_MAC +#define DISPATCH_MACH_SPI 1 +#endif + +#if !defined(USE_OBJC) && HAVE_OBJC +#define USE_OBJC 1 +#endif + #if USE_OBJC && ((!TARGET_IPHONE_SIMULATOR && defined(__i386__)) || \ (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1080)) // Disable Objective-C support on platforms with legacy objc runtime @@ -69,16 +77,60 @@ #include #include #include +#if !TARGET_OS_WIN32 #include +#endif + +#define DISPATCH_STRUCT_DECL(type, name, ...) \ + struct type __VA_ARGS__ name + +// Visual Studio C++ does not support C99 designated initializers. +// This means that static declarations should be zero initialized and cannot +// be const since we must fill in the values during DLL initialization. +#if !TARGET_OS_WIN32 +#define DISPATCH_STRUCT_INSTANCE(type, name, ...) \ +struct type name = { \ +__VA_ARGS__ \ +} +#else +#define DISPATCH_STRUCT_INSTANCE(type, name, ...) \ +struct type name = { 0 } +#endif + +#if !TARGET_OS_WIN32 +#define DISPATCH_CONST_STRUCT_DECL(type, name, ...) \ + const DISPATCH_STRUCT_DECL(type, name, __VA_ARGS__) + +#define DISPATCH_CONST_STRUCT_INSTANCE(type, name, ...) \ + const DISPATCH_STRUCT_INSTANCE(type, name, __VA_ARGS__) +#else +#define DISPATCH_CONST_STRUCT_DECL(type, name, ...) \ + DISPATCH_STRUCT_DECL(type, name, __VA_ARGS__) + +#define DISPATCH_CONST_STRUCT_INSTANCE(type, name, ...) \ + DISPATCH_STRUCT_INSTANCE(type, name, __VA_ARGS__) +#endif /* private.h must be included last to avoid picking up installed headers. */ #include "object_private.h" #include "queue_private.h" #include "source_private.h" +#include "mach_private.h" #include "data_private.h" +#if !TARGET_OS_WIN32 +#include "io_private.h" +#endif #include "benchmark.h" #include "private.h" +/* SPI for Libsystem-internal use */ +DISPATCH_EXPORT DISPATCH_NOTHROW void libdispatch_init(void); +#if !TARGET_OS_WIN32 +DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_prepare(void); +DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_parent(void); +DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); +#endif + /* More #includes at EOF (dependent on the contents of internal.h) ... */ // Abort on uncaught exceptions thrown from client callouts rdar://8577499 @@ -95,10 +147,16 @@ #define DISPATCH_PROFILE 0 #endif -#if (DISPATCH_DEBUG || DISPATCH_PROFILE) && !defined(DISPATCH_USE_DTRACE) +#if (!TARGET_OS_EMBEDDED || DISPATCH_DEBUG || DISPATCH_PROFILE) && \ + !defined(DISPATCH_USE_DTRACE) #define DISPATCH_USE_DTRACE 1 #endif +#if ((!TARGET_OS_EMBEDDED && DISPATCH_INTROSPECTION) || DISPATCH_DEBUG || \ + DISPATCH_PROFILE) && !defined(DISPATCH_USE_DTRACE_INTROSPECTION) +#define DISPATCH_USE_DTRACE_INTROSPECTION 1 +#endif + #if HAVE_LIBKERN_OSCROSSENDIAN_H #include #endif @@ -120,18 +178,27 @@ #include #include #include +#include +#include #endif /* HAVE_MACH */ #if HAVE_MALLOC_MALLOC_H #include #endif + +#include + +#if !TARGET_OS_WIN32 #include #include #include -#include #include #include #include +#include #include +#else +#include "sys_queue.h" +#endif #ifdef __BLOCKS__ #include @@ -140,7 +207,9 @@ #include #include +#if HAVE_FCNTL_H #include +#endif #include #include #if USE_POSIX_SEM @@ -153,7 +222,9 @@ #include #include #include +#if !TARGET_OS_WIN32 #include +#endif #if HAVE_UNISTD_H #include #endif @@ -171,15 +242,25 @@ #define __has_attribute(x) 0 #endif +#if __GNUC__ #define DISPATCH_NOINLINE __attribute__((__noinline__)) #define DISPATCH_USED __attribute__((__used__)) #define DISPATCH_UNUSED __attribute__((__unused__)) #define DISPATCH_WEAK __attribute__((__weak__)) +#define DISPATCH_OVERLOADABLE __attribute__((__overloadable__)) #if DISPATCH_DEBUG #define DISPATCH_ALWAYS_INLINE_NDEBUG #else #define DISPATCH_ALWAYS_INLINE_NDEBUG __attribute__((__always_inline__)) #endif +#else /* __GNUC__ */ +#define DISPATCH_NOINLINE +#define DISPATCH_USED +#define DISPATCH_UNUSED +#define DISPATCH_WEAK +#define DISPATCH_ALWAYS_INLINE_NDEBUG +#endif /* __GNUC__ */ + #define DISPATCH_CONCAT(x,y) DISPATCH_CONCAT1(x,y) #define DISPATCH_CONCAT1(x,y) x ## y @@ -198,24 +279,52 @@ #define NSEC_PER_USEC 1000ull /* I wish we had __builtin_expect_range() */ +#if __GNUC__ #define fastpath(x) ((typeof(x))__builtin_expect((long)(x), ~0l)) #define slowpath(x) ((typeof(x))__builtin_expect((long)(x), 0l)) +#else +#define fastpath(x) (x) +#define slowpath(x) (x) +#endif // __GNUC__ DISPATCH_NOINLINE void _dispatch_bug(size_t line, long val); + +#if HAVE_MACH DISPATCH_NOINLINE void _dispatch_bug_client(const char* msg); DISPATCH_NOINLINE void _dispatch_bug_mach_client(const char *msg, mach_msg_return_t kr); +DISPATCH_NOINLINE +void _dispatch_bug_kevent_client(const char* msg, const char* filter, + const char *operation, int err); +#endif + DISPATCH_NOINLINE DISPATCH_NORETURN void _dispatch_abort(size_t line, long val); + +#if !defined(DISPATCH_USE_OS_TRACE) && DISPATCH_DEBUG +#if __has_include() +#define DISPATCH_USE_OS_TRACE 1 +#include +#endif +#endif // DISPATCH_USE_OS_TRACE + +#if DISPATCH_USE_OS_TRACE +#define _dispatch_log(msg, ...) os_trace("libdispatch", msg, ## __VA_ARGS__) +#else DISPATCH_NOINLINE __attribute__((__format__(__printf__,1,2))) void _dispatch_log(const char *msg, ...); +#endif // DISPATCH_USE_OS_TRACE + +#define dsnprintf(...) \ + ({ int _r = snprintf(__VA_ARGS__); _r < 0 ? 0u : (size_t)_r; }) /* * For reporting bugs within libdispatch when using the "_debug" version of the * library. */ +#if __GNUC__ #define dispatch_assert(e) do { \ if (__builtin_constant_p(e)) { \ char __compile_time_assert__[(bool)(e) ? 1 : -1] DISPATCH_UNUSED; \ @@ -226,6 +335,14 @@ void _dispatch_log(const char *msg, ...); } \ } \ } while (0) +#else +static inline void _dispatch_assert(long e, long line) { + if (DISPATCH_DEBUG && !e) _dispatch_abort(line, e); +} +#define dispatch_assert(e) _dispatch_assert((long)(e), __LINE__) +#endif /* __GNUC__ */ + +#if __GNUC__ /* * A lot of API return zero upon success and not-zero on fail. Let's capture * and log the non-zero value @@ -240,6 +357,12 @@ void _dispatch_log(const char *msg, ...); } \ } \ } while (0) +#else +static inline void _dispatch_assert_zero(long e, long line) { + if (DISPATCH_DEBUG && e) _dispatch_abort(line, e); +} +#define dispatch_assert_zero(e) _dispatch_assert((long)(e), __LINE__) +#endif /* __GNUC__ */ /* * For reporting bugs or impedance mismatches between libdispatch and external @@ -247,6 +370,7 @@ void _dispatch_log(const char *msg, ...); * * In particular, we wrap all system-calls with assume() macros. */ +#if __GNUC__ #define dispatch_assume(e) ({ \ typeof(e) _e = fastpath(e); /* always eval 'e' */ \ if (!_e) { \ @@ -258,10 +382,19 @@ void _dispatch_log(const char *msg, ...); } \ _e; \ }) +#else +static inline long _dispatch_assume(long e, long line) { + if (!e) _dispatch_bug(line, e); + return e; +} +#define dispatch_assume(e) _dispatch_assume((long)(e), __LINE__) +#endif /* __GNUC__ */ + /* * A lot of API return zero upon success and not-zero on fail. Let's capture * and log the non-zero value */ +#if __GNUC__ #define dispatch_assume_zero(e) ({ \ typeof(e) _e = slowpath(e); /* always eval 'e' */ \ if (_e) { \ @@ -273,10 +406,18 @@ void _dispatch_log(const char *msg, ...); } \ _e; \ }) +#else +static inline long _dispatch_assume_zero(long e, long line) { + if (e) _dispatch_bug(line, e); + return e; +} +#define dispatch_assume_zero(e) _dispatch_assume_zero((long)(e), __LINE__) +#endif /* __GNUC__ */ /* * For reporting bugs in clients when using the "_debug" version of the library. */ +#if __GNUC__ #define dispatch_debug_assert(e, msg, args...) do { \ if (__builtin_constant_p(e)) { \ char __compile_time_assert__[(bool)(e) ? 1 : -1] DISPATCH_UNUSED; \ @@ -288,38 +429,56 @@ void _dispatch_log(const char *msg, ...); } \ } \ } while (0) +#else +#define dispatch_debug_assert(e, msg, args...) do { \ + long _e = (long)fastpath(e); /* always eval 'e' */ \ + if (DISPATCH_DEBUG && !_e) { \ + _dispatch_log("%s() 0x%lx: " msg, __FUNCTION__, _e, ##args); \ + abort(); \ + } \ +} while (0) +#endif /* __GNUC__ */ /* Make sure the debug statments don't get too stale */ -#define _dispatch_debug(x, args...) \ -({ \ +#define _dispatch_debug(x, args...) do { \ if (DISPATCH_DEBUG) { \ - _dispatch_log("libdispatch: %u\t%p\t" x, __LINE__, \ + _dispatch_log("%u\t%p\t" x, __LINE__, \ (void *)_dispatch_thread_self(), ##args); \ } \ -}) +} while (0) #if DISPATCH_DEBUG #if HAVE_MACH DISPATCH_NOINLINE DISPATCH_USED void dispatch_debug_machport(mach_port_t name, const char* str); #endif -DISPATCH_NOINLINE DISPATCH_USED -void dispatch_debug_kevents(struct kevent* kev, size_t count, const char* str); -#else -static inline void -dispatch_debug_kevents(struct kevent* kev DISPATCH_UNUSED, - size_t count DISPATCH_UNUSED, - const char* str DISPATCH_UNUSED) {} #endif +#if DISPATCH_DEBUG +/* This is the private version of the deprecated dispatch_debug() */ +DISPATCH_NONNULL2 DISPATCH_NOTHROW +__attribute__((__format__(printf,2,3))) +void +_dispatch_object_debug(dispatch_object_t object, const char *message, ...); +#else +#define _dispatch_object_debug(object, message, ...) +#endif // DISPATCH_DEBUG + #if DISPATCH_USE_CLIENT_CALLOUT DISPATCH_NOTHROW void _dispatch_client_callout(void *ctxt, dispatch_function_t f); DISPATCH_NOTHROW void _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)); +DISPATCH_NOTHROW bool +_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, + const void *buffer, size_t size, dispatch_data_applier_function_t f); +DISPATCH_NOTHROW void +_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, mach_error_t error, + dispatch_mach_handler_function_t f); -#else +#else // !DISPATCH_USE_CLIENT_CALLOUT DISPATCH_ALWAYS_INLINE static inline void @@ -335,36 +494,53 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) return f(ctxt, i); } -#endif +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, + const void *buffer, size_t size, dispatch_data_applier_function_t f) +{ + return f(ctxt, region, offset, buffer, size); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, mach_error_t error, + dispatch_mach_handler_function_t f); +{ + return f(ctxt, reason, dmsg, error); +} + +#endif // !DISPATCH_USE_CLIENT_CALLOUT #ifdef __BLOCKS__ +#define _dispatch_Block_invoke(bb) \ + ((dispatch_function_t)((struct Block_layout *)bb)->invoke) DISPATCH_ALWAYS_INLINE static inline void _dispatch_client_callout_block(dispatch_block_t b) { - struct Block_basic *bb = (void*)b; - return _dispatch_client_callout(b, (dispatch_function_t)bb->Block_invoke); + return _dispatch_client_callout(b, _dispatch_Block_invoke(b)); } +#if __GNUC__ dispatch_block_t _dispatch_Block_copy(dispatch_block_t block); #define _dispatch_Block_copy(x) ((typeof(x))_dispatch_Block_copy(x)) +#else +dispatch_block_t _dispatch_Block_copy(const void *block); +#endif + void _dispatch_call_block_and_release(void *block); #endif /* __BLOCKS__ */ -void dummy_function(void); -long dummy_function_r0(void); +void _dispatch_temporary_resource_shortage(void); +void *_dispatch_calloc(size_t num_items, size_t size); void _dispatch_vtable_init(void); - -void _dispatch_source_drain_kevent(struct kevent *); - -long _dispatch_update_kq(const struct kevent *); -void _dispatch_run_timers(void); -// Returns howsoon with updated time value, or NULL if no timers active. -struct timespec *_dispatch_get_next_timer_fire(struct timespec *howsoon); +char *_dispatch_get_build(void); uint64_t _dispatch_timeout(dispatch_time_t when); -extern bool _dispatch_safe_fork; +extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; extern struct _dispatch_hw_config_s { uint32_t cc_max_active; @@ -372,10 +548,18 @@ extern struct _dispatch_hw_config_s { uint32_t cc_max_physical; } _dispatch_hw_config; +#if !defined(DISPATCH_USE_OS_SEMAPHORE_CACHE) && !(TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) +#if __has_include() +#define DISPATCH_USE_OS_SEMAPHORE_CACHE 1 +#include +#endif +#endif + /* #includes dependent on internal.h */ #include "shims.h" -// SnowLeopard and iOS Simulator fallbacks +// Older Mac OS X and iOS Simulator fallbacks #if HAVE_PTHREAD_WORKQUEUES #ifndef WORKQ_BG_PRIOQUEUE @@ -384,12 +568,12 @@ extern struct _dispatch_hw_config_s { #ifndef WORKQ_ADDTHREADS_OPTION_OVERCOMMIT #define WORKQ_ADDTHREADS_OPTION_OVERCOMMIT 0x00000001 #endif -#if TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1070 +#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070 #ifndef DISPATCH_NO_BG_PRIORITY #define DISPATCH_NO_BG_PRIORITY 1 #endif #endif -#if TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1080 +#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1080 #ifndef DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK #define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1 #endif @@ -401,28 +585,60 @@ extern struct _dispatch_hw_config_s { #endif // HAVE_PTHREAD_WORKQUEUES #if HAVE_MACH -#if !defined(MACH_NOTIFY_SEND_POSSIBLE) || \ - (TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1070) +#if !defined(MACH_NOTIFY_SEND_POSSIBLE) || (TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070) #undef MACH_NOTIFY_SEND_POSSIBLE #define MACH_NOTIFY_SEND_POSSIBLE MACH_NOTIFY_DEAD_NAME #endif #endif // HAVE_MACH #ifdef EVFILT_VM -#if TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1070 -#undef DISPATCH_USE_MALLOC_VM_PRESSURE_SOURCE -#define DISPATCH_USE_MALLOC_VM_PRESSURE_SOURCE 0 -#endif #ifndef DISPATCH_USE_VM_PRESSURE #define DISPATCH_USE_VM_PRESSURE 1 #endif -#ifndef DISPATCH_USE_MALLOC_VM_PRESSURE_SOURCE -#define DISPATCH_USE_MALLOC_VM_PRESSURE_SOURCE 1 -#endif #endif // EVFILT_VM +#ifdef EVFILT_MEMORYSTATUS +#ifndef DISPATCH_USE_MEMORYSTATUS +#define DISPATCH_USE_MEMORYSTATUS 1 +#endif +#endif // EVFILT_MEMORYSTATUS + +#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070 +#undef DISPATCH_USE_VM_PRESSURE_SOURCE +#define DISPATCH_USE_VM_PRESSURE_SOURCE 0 +#endif // TARGET_IPHONE_SIMULATOR +#if TARGET_OS_EMBEDDED +#if !defined(DISPATCH_USE_VM_PRESSURE_SOURCE) && DISPATCH_USE_VM_PRESSURE +#define DISPATCH_USE_VM_PRESSURE_SOURCE 1 +#endif +#else // !TARGET_OS_EMBEDDED +#if !defined(DISPATCH_USE_MEMORYSTATUS_SOURCE) && DISPATCH_USE_MEMORYSTATUS +#define DISPATCH_USE_MEMORYSTATUS_SOURCE 1 +#elif !defined(DISPATCH_USE_VM_PRESSURE_SOURCE) && DISPATCH_USE_VM_PRESSURE +#define DISPATCH_USE_VM_PRESSURE_SOURCE 1 +#endif +#endif // TARGET_OS_EMBEDDED + +#if !defined(NOTE_LEEWAY) || (TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) +#undef NOTE_LEEWAY +#define NOTE_LEEWAY 0 +#undef NOTE_CRITICAL +#define NOTE_CRITICAL 0 +#undef NOTE_BACKGROUND +#define NOTE_BACKGROUND 0 +#endif // NOTE_LEEWAY + +#if HAVE_DECL_NOTE_REAP +#if defined(NOTE_REAP) && defined(__APPLE__) +#undef NOTE_REAP +#define NOTE_REAP 0x10000000 // +#endif +#endif // HAVE_DECL_NOTE_REAP + #if defined(F_SETNOSIGPIPE) && defined(F_GETNOSIGPIPE) -#if TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1070 +#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070 #undef DISPATCH_USE_SETNOSIGPIPE #define DISPATCH_USE_SETNOSIGPIPE 0 #endif @@ -432,6 +648,40 @@ extern struct _dispatch_hw_config_s { #endif // F_SETNOSIGPIPE +#if HAVE_LIBPROC_INTERNAL_H +#include +#include +#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090 +#undef DISPATCH_USE_IMPORTANCE_ASSERTION +#define DISPATCH_USE_IMPORTANCE_ASSERTION 0 +#endif +#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 +#undef DISPATCH_USE_IMPORTANCE_ASSERTION +#define DISPATCH_USE_IMPORTANCE_ASSERTION 0 +#endif +#ifndef DISPATCH_USE_IMPORTANCE_ASSERTION +#define DISPATCH_USE_IMPORTANCE_ASSERTION 1 +#endif +#endif // HAVE_LIBPROC_INTERNAL_H + +#if HAVE_SYS_GUARDED_H +#include +#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090 +#undef DISPATCH_USE_GUARDED_FD +#define DISPATCH_USE_GUARDED_FD 0 +#endif +#ifndef DISPATCH_USE_GUARDED_FD +#define DISPATCH_USE_GUARDED_FD 1 +#endif +// change_fdguard_np() requires GUARD_DUP +#if DISPATCH_USE_GUARDED_FD && RDAR_11814513 +#define DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD 1 +#endif +#endif // HAVE_SYS_GUARDED_H + + +#define _dispatch_hardware_crash() __builtin_trap() + #define _dispatch_set_crash_log_message(x) #if HAVE_MACH @@ -465,10 +715,13 @@ extern struct _dispatch_hw_config_s { /* #includes dependent on internal.h */ #include "object_internal.h" #include "semaphore_internal.h" +#include "introspection_internal.h" #include "queue_internal.h" #include "source_internal.h" #include "data_internal.h" +#if !TARGET_OS_WIN32 #include "io_internal.h" +#endif #include "trace.h" #endif /* __DISPATCH_INTERNAL__ */ diff --git a/src/introspection.c b/src/introspection.c new file mode 100644 index 0000000..5338f25 --- /dev/null +++ b/src/introspection.c @@ -0,0 +1,595 @@ +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +// Contains introspection routines that only exist in the version of the +// library with introspection support + +#if DISPATCH_INTROSPECTION + +#include "internal.h" +#include "introspection.h" +#include "introspection_private.h" + +typedef struct dispatch_introspection_thread_s { + void *dit_isa; + TAILQ_ENTRY(dispatch_introspection_thread_s) dit_list; + pthread_t thread; + dispatch_queue_t *queue; +} dispatch_introspection_thread_s; +typedef struct dispatch_introspection_thread_s *dispatch_introspection_thread_t; + +static TAILQ_HEAD(, dispatch_introspection_thread_s) + _dispatch_introspection_threads = + TAILQ_HEAD_INITIALIZER(_dispatch_introspection_threads); +static volatile OSSpinLock _dispatch_introspection_threads_lock; + +static void _dispatch_introspection_thread_remove(void *ctxt); + +static TAILQ_HEAD(, dispatch_queue_s) _dispatch_introspection_queues = + TAILQ_HEAD_INITIALIZER(_dispatch_introspection_queues); +static volatile OSSpinLock _dispatch_introspection_queues_lock; + +static ptrdiff_t _dispatch_introspection_thread_queue_offset; + +#pragma mark - +#pragma mark dispatch_introspection_init + +void +_dispatch_introspection_init(void) +{ + TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + &_dispatch_main_q, diq_list); + TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + &_dispatch_mgr_q, diq_list); +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES + TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + _dispatch_mgr_q.do_targetq, diq_list); +#endif + for (size_t i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { + TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + &_dispatch_root_queues[i], diq_list); + } + + // Hack to determine queue TSD offset from start of pthread structure + uintptr_t thread = _dispatch_thread_self(); + thread_identifier_info_data_t tiid; + mach_msg_type_number_t cnt = THREAD_IDENTIFIER_INFO_COUNT; + kern_return_t kr = thread_info(pthread_mach_thread_np((void*)thread), + THREAD_IDENTIFIER_INFO, (thread_info_t)&tiid, &cnt); + if (!dispatch_assume_zero(kr)) { + _dispatch_introspection_thread_queue_offset = + (void*)(uintptr_t)tiid.dispatch_qaddr - (void*)thread; + } + _dispatch_thread_key_create(&dispatch_introspection_key, + _dispatch_introspection_thread_remove); + _dispatch_introspection_thread_add(); // add main thread +} + +const struct dispatch_introspection_versions_s +dispatch_introspection_versions = { + .introspection_version = 1, + .hooks_version = 1, + .hooks_size = sizeof(dispatch_introspection_hooks_s), + .queue_item_version = 1, + .queue_item_size = sizeof(dispatch_introspection_queue_item_s), + .queue_block_version = 1, + .queue_block_size = sizeof(dispatch_introspection_queue_block_s), + .queue_function_version = 1, + .queue_function_size = sizeof(dispatch_introspection_queue_function_s), + .queue_thread_version = 1, + .queue_thread_size = sizeof(dispatch_introspection_queue_thread_s), + .object_version = 1, + .object_size = sizeof(dispatch_introspection_object_s), + .queue_version = 1, + .queue_size = sizeof(dispatch_introspection_queue_s), + .source_version = 1, + .source_size = sizeof(dispatch_introspection_source_s), +}; + +#pragma mark - +#pragma mark dispatch_introspection_threads + +void +_dispatch_introspection_thread_add(void) +{ + if (_dispatch_thread_getspecific(dispatch_introspection_key)) { + return; + } + uintptr_t thread = _dispatch_thread_self(); + dispatch_introspection_thread_t dit = (void*)_dispatch_continuation_alloc(); + dit->dit_isa = (void*)0x41; + dit->thread = (void*)thread; + dit->queue = !_dispatch_introspection_thread_queue_offset ? NULL : + (void*)thread + _dispatch_introspection_thread_queue_offset; + _dispatch_thread_setspecific(dispatch_introspection_key, dit); + OSSpinLockLock(&_dispatch_introspection_threads_lock); + TAILQ_INSERT_TAIL(&_dispatch_introspection_threads, dit, dit_list); + OSSpinLockUnlock(&_dispatch_introspection_threads_lock); +} + +static void +_dispatch_introspection_thread_remove(void *ctxt) +{ + dispatch_introspection_thread_t dit = ctxt; + OSSpinLockLock(&_dispatch_introspection_threads_lock); + TAILQ_REMOVE(&_dispatch_introspection_threads, dit, dit_list); + OSSpinLockUnlock(&_dispatch_introspection_threads_lock); + _dispatch_continuation_free((void*)dit); + _dispatch_thread_setspecific(dispatch_introspection_key, NULL); +} + +#pragma mark - +#pragma mark dispatch_introspection_info + +static inline +dispatch_introspection_queue_function_s +_dispatch_introspection_continuation_get_info(dispatch_queue_t dq, + dispatch_continuation_t dc, unsigned long *type) +{ + void *ctxt = dc->dc_ctxt; + dispatch_function_t func = dc->dc_func; + pthread_t waiter = NULL; + bool apply = false; + long flags = (long)dc->do_vtable; + if (flags & DISPATCH_OBJ_SYNC_SLOW_BIT) { + waiter = dc->dc_data; + if (flags & DISPATCH_OBJ_BARRIER_BIT) { + dc = dc->dc_ctxt; + dq = dc->dc_data; + } + ctxt = dc->dc_ctxt; + func = dc->dc_func; + } + if (func == _dispatch_sync_recurse_invoke) { + dc = dc->dc_ctxt; + dq = dc->dc_data; + ctxt = dc->dc_ctxt; + func = dc->dc_func; + } else if (func == _dispatch_async_redirect_invoke) { + dq = dc->dc_data; + dc = dc->dc_other; + ctxt = dc->dc_ctxt; + func = dc->dc_func; + flags = (long)dc->do_vtable; + } else if (func == _dispatch_mach_barrier_invoke) { + dq = dq->do_targetq; + ctxt = dc->dc_data; + func = dc->dc_other; + } else if (func == _dispatch_apply_invoke || + func == _dispatch_apply_redirect_invoke) { + dispatch_apply_t da = ctxt; + if (da->da_todo) { + dc = da->da_dc; + if (func == _dispatch_apply_redirect_invoke) { + dq = dc->dc_data; + } + ctxt = dc->dc_ctxt; + func = dc->dc_func; + apply = true; + } + } + if (func == _dispatch_call_block_and_release) { + *type = dispatch_introspection_queue_item_type_block; + func = _dispatch_Block_invoke(ctxt); + } else { + *type = dispatch_introspection_queue_item_type_function; + } + dispatch_introspection_queue_function_s diqf= { + .continuation = dc, + .target_queue = dq, + .context = ctxt, + .function = func, + .group = flags & DISPATCH_OBJ_GROUP_BIT ? dc->dc_data : NULL, + .waiter = waiter, + .barrier = flags & DISPATCH_OBJ_BARRIER_BIT, + .sync = flags & DISPATCH_OBJ_SYNC_SLOW_BIT, + .apply = apply, + }; + return diqf; +} + +static inline +dispatch_introspection_object_s +_dispatch_introspection_object_get_info(dispatch_object_t dou) +{ + dispatch_introspection_object_s dio = { + .object = dou._dc, + .target_queue = dou._do->do_targetq, + .type = (void*)dou._do->do_vtable, + .kind = dx_kind(dou._do), + }; + return dio; +} + +DISPATCH_USED inline +dispatch_introspection_queue_s +dispatch_introspection_queue_get_info(dispatch_queue_t dq) +{ + bool global = (dq->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + (dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT); + uint32_t width = dq->dq_width; + if (width > 1 && width != UINT32_MAX) width /= 2; + dispatch_introspection_queue_s diq = { + .queue = dq, + .target_queue = dq->do_targetq, + .label = dq->dq_label, + .serialnum = dq->dq_serialnum, + .width = width, + .suspend_count = dq->do_suspend_cnt / 2, + .enqueued = (dq->do_suspend_cnt & 1) && !global, + .barrier = (dq->dq_running & 1) && !global, + .draining = (dq->dq_items_head == (void*)~0ul) || + (!dq->dq_items_head && dq->dq_items_tail), + .global = global, + .main = (dq == &_dispatch_main_q), + }; + return diq; +} + +static inline +dispatch_introspection_source_s +_dispatch_introspection_source_get_info(dispatch_source_t ds) +{ + dispatch_source_refs_t dr = ds->ds_refs; + void *ctxt = dr->ds_handler_ctxt; + dispatch_function_t handler = dr->ds_handler_func; + bool handler_is_block = ds->ds_handler_is_block; + bool after = (handler == _dispatch_after_timer_callback); + if (after) { + dispatch_continuation_t dc = ctxt; + ctxt = dc->dc_ctxt; + handler = dc->dc_func; + if (handler == _dispatch_call_block_and_release) { + handler = _dispatch_Block_invoke(ctxt); + handler_is_block = 1; + } + } + dispatch_introspection_source_s dis = { + .source = ds, + .target_queue = ds->do_targetq, + .type = ds->ds_dkev ? (unsigned long)ds->ds_dkev->dk_kevent.filter : 0, + .handle = ds->ds_dkev ? (unsigned long)ds->ds_dkev->dk_kevent.ident : 0, + .context = ctxt, + .handler = handler, + .suspend_count = ds->do_suspend_cnt / 2, + .enqueued = (ds->do_suspend_cnt & 1), + .handler_is_block = handler_is_block, + .timer = ds->ds_is_timer, + .after = after, + }; + return dis; +} + +static inline +dispatch_introspection_queue_thread_s +_dispatch_introspection_thread_get_info(dispatch_introspection_thread_t dit) +{ + dispatch_introspection_queue_thread_s diqt = { + .object = (void*)dit, + .thread = dit->thread, + }; + if (dit->queue && *dit->queue) { + diqt.queue = dispatch_introspection_queue_get_info(*dit->queue); + } + return diqt; +} + +DISPATCH_USED inline +dispatch_introspection_queue_item_s +dispatch_introspection_queue_item_get_info(dispatch_queue_t dq, + dispatch_continuation_t dc) +{ + dispatch_introspection_queue_item_s diqi; + if (DISPATCH_OBJ_IS_VTABLE(dc)) { + dispatch_object_t dou = (dispatch_object_t)dc; + unsigned long type = dx_type(dou._do); + unsigned long metatype = type & _DISPATCH_META_TYPE_MASK; + if (metatype == _DISPATCH_QUEUE_TYPE && + type != DISPATCH_QUEUE_SPECIFIC_TYPE) { + diqi.type = dispatch_introspection_queue_item_type_queue; + diqi.queue = dispatch_introspection_queue_get_info(dou._dq); + } else if (metatype == _DISPATCH_SOURCE_TYPE) { + diqi.type = dispatch_introspection_queue_item_type_source; + diqi.source = _dispatch_introspection_source_get_info(dou._ds); + } else { + diqi.type = dispatch_introspection_queue_item_type_object; + diqi.object = _dispatch_introspection_object_get_info(dou._do); + } + } else { + diqi.function = _dispatch_introspection_continuation_get_info(dq, dc, + &diqi.type); + } + return diqi; +} + +#pragma mark - +#pragma mark dispatch_introspection_iterators + +DISPATCH_USED +dispatch_queue_t +dispatch_introspection_get_queues(dispatch_queue_t start, size_t count, + dispatch_introspection_queue_t queues) +{ + dispatch_queue_t next; + next = start ? start : TAILQ_FIRST(&_dispatch_introspection_queues); + while (count--) { + if (!next) { + queues->queue = NULL; + break; + } + *queues++ = dispatch_introspection_queue_get_info(next); + next = TAILQ_NEXT(next, diq_list); + } + return next; +} + +DISPATCH_USED +dispatch_continuation_t +dispatch_introspection_get_queue_threads(dispatch_continuation_t start, + size_t count, dispatch_introspection_queue_thread_t threads) +{ + dispatch_introspection_thread_t next = start ? (void*)start : + TAILQ_FIRST(&_dispatch_introspection_threads); + while (count--) { + if (!next) { + threads->object = NULL; + break; + } + *threads++ = _dispatch_introspection_thread_get_info(next); + next = TAILQ_NEXT(next, dit_list); + } + return (void*)next; +} + +DISPATCH_USED +dispatch_continuation_t +dispatch_introspection_queue_get_items(dispatch_queue_t dq, + dispatch_continuation_t start, size_t count, + dispatch_introspection_queue_item_t items) +{ + dispatch_continuation_t next = start ? start : + dq->dq_items_head == (void*)~0ul ? NULL : (void*)dq->dq_items_head; + while (count--) { + if (!next) { + items->type = dispatch_introspection_queue_item_type_none; + break; + } + *items++ = dispatch_introspection_queue_item_get_info(dq, next); + next = next->do_next; + } + return next; +} + +#pragma mark - +#pragma mark dispatch_introspection_hooks + +#define DISPATCH_INTROSPECTION_NO_HOOK ((void*)~0ul) + +dispatch_introspection_hooks_s _dispatch_introspection_hooks; +dispatch_introspection_hooks_s _dispatch_introspection_hook_callouts; +static const +dispatch_introspection_hooks_s _dispatch_introspection_hook_callouts_enabled = { + .queue_create = DISPATCH_INTROSPECTION_NO_HOOK, + .queue_dispose = DISPATCH_INTROSPECTION_NO_HOOK, + .queue_item_enqueue = DISPATCH_INTROSPECTION_NO_HOOK, + .queue_item_dequeue = DISPATCH_INTROSPECTION_NO_HOOK, +}; + +#define DISPATCH_INTROSPECTION_HOOKS_COUNT (( \ + sizeof(_dispatch_introspection_hook_callouts_enabled) - \ + sizeof(_dispatch_introspection_hook_callouts_enabled._reserved)) / \ + sizeof(dispatch_function_t)) + +#define DISPATCH_INTROSPECTION_HOOK_ENABLED(h) \ + (slowpath(_dispatch_introspection_hooks.h)) + +#define DISPATCH_INTROSPECTION_HOOK_CALLOUT(h, ...) ({ \ + typeof(_dispatch_introspection_hooks.h) _h; \ + _h = _dispatch_introspection_hooks.h; \ + if (slowpath((void*)(_h) != DISPATCH_INTROSPECTION_NO_HOOK)) { \ + _h(__VA_ARGS__); \ + } }) + +#define DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(h) \ + DISPATCH_EXPORT void _dispatch_introspection_hook_##h(void) \ + asm("_dispatch_introspection_hook_" #h); \ + void _dispatch_introspection_hook_##h(void) {} + +#define DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(h, ...)\ + dispatch_introspection_hook_##h(__VA_ARGS__) + +DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_create); +DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_destroy); +DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_enqueue); +DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_dequeue); +DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_callout_begin); +DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_callout_end); + +DISPATCH_USED +void +dispatch_introspection_hooks_install(dispatch_introspection_hooks_t hooks) +{ + dispatch_introspection_hooks_s old_hooks = _dispatch_introspection_hooks; + _dispatch_introspection_hooks = *hooks; + dispatch_function_t *e = (void*)&_dispatch_introspection_hook_callouts, + *h = (void*)&_dispatch_introspection_hooks, *oh = (void*)&old_hooks; + for (size_t i = 0; i < DISPATCH_INTROSPECTION_HOOKS_COUNT; i++) { + if (!h[i] && e[i]) { + h[i] = DISPATCH_INTROSPECTION_NO_HOOK; + } + if (oh[i] == DISPATCH_INTROSPECTION_NO_HOOK) { + oh[i] = NULL; + } + } + *hooks = old_hooks; +} + +DISPATCH_USED +void +dispatch_introspection_hook_callouts_enable( + dispatch_introspection_hooks_t enable) +{ + _dispatch_introspection_hook_callouts = enable ? *enable : + _dispatch_introspection_hook_callouts_enabled; + dispatch_function_t *e = (void*)&_dispatch_introspection_hook_callouts, + *h = (void*)&_dispatch_introspection_hooks; + for (size_t i = 0; i < DISPATCH_INTROSPECTION_HOOKS_COUNT; i++) { + if (e[i] && !h[i]) { + h[i] = DISPATCH_INTROSPECTION_NO_HOOK; + } else if (!e[i] && h[i] == DISPATCH_INTROSPECTION_NO_HOOK) { + h[i] = NULL; + } + } +} + +DISPATCH_NOINLINE +void +dispatch_introspection_hook_callout_queue_create( + dispatch_introspection_queue_t queue_info) +{ + DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_create, queue_info); +} + +DISPATCH_NOINLINE +static void +_dispatch_introspection_queue_create_hook(dispatch_queue_t dq) +{ + dispatch_introspection_queue_s diq; + diq = dispatch_introspection_queue_get_info(dq); + dispatch_introspection_hook_callout_queue_create(&diq); +} + +dispatch_queue_t +_dispatch_introspection_queue_create(dispatch_queue_t dq) +{ + OSSpinLockLock(&_dispatch_introspection_queues_lock); + TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, dq, diq_list); + OSSpinLockUnlock(&_dispatch_introspection_queues_lock); + + DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_create, dq); + if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_create)) { + _dispatch_introspection_queue_create_hook(dq); + } + return dq; +} + +DISPATCH_NOINLINE +void +dispatch_introspection_hook_callout_queue_dispose( + dispatch_introspection_queue_t queue_info) +{ + DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_dispose, queue_info); +} + +DISPATCH_NOINLINE +static void +_dispatch_introspection_queue_dispose_hook(dispatch_queue_t dq) +{ + dispatch_introspection_queue_s diq; + diq = dispatch_introspection_queue_get_info(dq); + dispatch_introspection_hook_callout_queue_dispose(&diq); +} + +void +_dispatch_introspection_queue_dispose(dispatch_queue_t dq) +{ + DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_destroy, dq); + if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_dispose)) { + _dispatch_introspection_queue_dispose_hook(dq); + } + + OSSpinLockLock(&_dispatch_introspection_queues_lock); + TAILQ_REMOVE(&_dispatch_introspection_queues, dq, diq_list); + OSSpinLockUnlock(&_dispatch_introspection_queues_lock); +} + +DISPATCH_NOINLINE +void +dispatch_introspection_hook_callout_queue_item_enqueue(dispatch_queue_t queue, + dispatch_introspection_queue_item_t item) +{ + DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_item_enqueue, queue, item); +} + +DISPATCH_NOINLINE +static void +_dispatch_introspection_queue_item_enqueue_hook(dispatch_queue_t dq, + dispatch_object_t dou) +{ + dispatch_introspection_queue_item_s diqi; + diqi = dispatch_introspection_queue_item_get_info(dq, dou._dc); + dispatch_introspection_hook_callout_queue_item_enqueue(dq, &diqi); +} + +void +_dispatch_introspection_queue_item_enqueue(dispatch_queue_t dq, + dispatch_object_t dou) +{ + DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT( + queue_item_enqueue, dq, dou); + if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_item_enqueue)) { + _dispatch_introspection_queue_item_enqueue_hook(dq, dou); + } +} + +DISPATCH_NOINLINE +void +dispatch_introspection_hook_callout_queue_item_dequeue(dispatch_queue_t queue, + dispatch_introspection_queue_item_t item) +{ + DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_item_dequeue, queue, item); +} + +DISPATCH_NOINLINE +static void +_dispatch_introspection_queue_item_dequeue_hook(dispatch_queue_t dq, + dispatch_object_t dou) +{ + dispatch_introspection_queue_item_s diqi; + diqi = dispatch_introspection_queue_item_get_info(dq, dou._dc); + dispatch_introspection_hook_callout_queue_item_enqueue(dq, &diqi); +} + +void +_dispatch_introspection_queue_item_dequeue(dispatch_queue_t dq, + dispatch_object_t dou) +{ + DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT( + queue_item_dequeue, dq, dou); + if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_item_dequeue)) { + _dispatch_introspection_queue_item_dequeue_hook(dq, dou); + } +} + +void +_dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f) { + dispatch_queue_t dq = _dispatch_queue_get_current(); + DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT( + queue_callout_begin, dq, ctxt, f); +} + +void +_dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f) { + dispatch_queue_t dq = _dispatch_queue_get_current(); + DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT( + queue_callout_end, dq, ctxt, f); +} + +#endif // DISPATCH_INTROSPECTION diff --git a/src/introspection_internal.h b/src/introspection_internal.h new file mode 100644 index 0000000..89a9360 --- /dev/null +++ b/src/introspection_internal.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2010-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_INTROSPECTION_INTERNAL__ +#define __DISPATCH_INTROSPECTION_INTERNAL__ + +#if DISPATCH_INTROSPECTION + +#define DISPATCH_INTROSPECTION_QUEUE_LIST \ + TAILQ_ENTRY(dispatch_queue_s) diq_list +#define DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE \ + sizeof(TAILQ_ENTRY(dispatch_queue_s)) + +void _dispatch_introspection_init(void); +void _dispatch_introspection_thread_add(void); +dispatch_queue_t _dispatch_introspection_queue_create(dispatch_queue_t dq); +void _dispatch_introspection_queue_dispose(dispatch_queue_t dq); +void _dispatch_introspection_queue_item_enqueue(dispatch_queue_t dq, + dispatch_object_t dou); +void _dispatch_introspection_queue_item_dequeue(dispatch_queue_t dq, + dispatch_object_t dou); +void _dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f); +void _dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f); + +#if !__OBJC2__ + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_queue_push_list(dispatch_queue_t dq, + dispatch_object_t head, dispatch_object_t tail) { + struct dispatch_object_s *dou = head._do; + do { + _dispatch_introspection_queue_item_enqueue(dq, dou); + } while (dou != tail._do && (dou = dou->do_next)); +}; + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_queue_push(dispatch_queue_t dq, dispatch_object_t dou) { + _dispatch_introspection_queue_item_enqueue(dq, dou); +}; + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_queue_pop(dispatch_queue_t dq, dispatch_object_t dou) { + _dispatch_introspection_queue_item_dequeue(dq, dou); +}; + +#endif + +#else + +#define DISPATCH_INTROSPECTION_QUEUE_LIST +#define DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE 0 + +#define _dispatch_introspection_init() +#define _dispatch_introspection_thread_add() +#define _dispatch_introspection_thread_remove() + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_introspection_queue_create(dispatch_queue_t dq) { return dq; } + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_queue_dispose(dispatch_queue_t dq) { (void)dq; } + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_queue_push_list(dispatch_queue_t dq DISPATCH_UNUSED, + dispatch_object_t head DISPATCH_UNUSED, + dispatch_object_t tail DISPATCH_UNUSED) {} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_queue_push(dispatch_queue_t dq DISPATCH_UNUSED, + dispatch_object_t dou DISPATCH_UNUSED) {} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_queue_pop(dispatch_queue_t dq DISPATCH_UNUSED, + dispatch_object_t dou DISPATCH_UNUSED) {} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_callout_entry(void *ctxt DISPATCH_UNUSED, + dispatch_function_t f DISPATCH_UNUSED) {} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_callout_return(void *ctxt DISPATCH_UNUSED, + dispatch_function_t f DISPATCH_UNUSED) {} + +#endif // DISPATCH_INTROSPECTION + +#endif // __DISPATCH_INTROSPECTION_INTERNAL__ diff --git a/src/io.c b/src/io.c index 4e36015..48683a6 100644 --- a/src/io.c +++ b/src/io.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011 Apple Inc. All rights reserved. + * Copyright (c) 2009-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -20,6 +20,25 @@ #include "internal.h" +#ifndef DISPATCH_IO_DEBUG +#define DISPATCH_IO_DEBUG DISPATCH_DEBUG +#endif + +#if DISPATCH_IO_DEBUG +#define _dispatch_fd_debug(msg, fd, args...) \ + _dispatch_debug("fd[0x%x]: " msg, (fd), ##args) +#else +#define _dispatch_fd_debug(msg, fd, args...) +#endif + +#if USE_OBJC +#define _dispatch_io_data_retain(x) _dispatch_objc_retain(x) +#define _dispatch_io_data_release(x) _dispatch_objc_release(x) +#else +#define _dispatch_io_data_retain(x) dispatch_retain(x) +#define _dispatch_io_data_release(x) dispatch_release(x) +#endif + typedef void (^dispatch_fd_entry_init_callback_t)(dispatch_fd_entry_t fd_entry); DISPATCH_EXPORT DISPATCH_NOTHROW @@ -59,6 +78,7 @@ static void _dispatch_stream_cleanup_operations(dispatch_stream_t stream, static void _dispatch_disk_cleanup_operations(dispatch_disk_t disk, dispatch_io_t channel); static void _dispatch_stream_source_handler(void *ctx); +static void _dispatch_stream_queue_handler(void *ctx); static void _dispatch_stream_handler(void *ctx); static void _dispatch_disk_handler(void *ctx); static void _dispatch_disk_perform(void *ctxt); @@ -74,7 +94,8 @@ static void _dispatch_operation_deliver_data(dispatch_operation_t op, case EINTR: continue; \ __VA_ARGS__ \ } \ - } while (0) + break; \ + } while (1) #define _dispatch_io_syscall_switch(__err, __syscall, ...) do { \ _dispatch_io_syscall_switch_noerr(__err, __syscall, \ case 0: break; \ @@ -95,7 +116,8 @@ enum { DISPATCH_OP_FD_ERR, }; -#define _dispatch_io_Block_copy(x) ((typeof(x))_dispatch_Block_copy((dispatch_block_t)(x))) +#define _dispatch_io_Block_copy(x) \ + ((typeof(x))_dispatch_Block_copy((dispatch_block_t)(x))) #pragma mark - #pragma mark dispatch_io_hashtables @@ -105,7 +127,7 @@ enum { #else #define DIO_HASH_SIZE 256u // must be a power of two #endif -#define DIO_HASH(x) ((uintptr_t)((x) & (DIO_HASH_SIZE - 1))) +#define DIO_HASH(x) ((uintptr_t)(x) & (DIO_HASH_SIZE - 1)) // Global hashtable of dev_t -> disk_s mappings DISPATCH_CACHELINE_ALIGN @@ -209,7 +231,7 @@ _dispatch_io_init(dispatch_io_t channel, dispatch_fd_entry_t fd_entry, _dispatch_retain(queue); dispatch_async(!err ? fd_entry->close_queue : channel->queue, ^{ dispatch_async(queue, ^{ - _dispatch_io_debug("cleanup handler invoke", -1); + _dispatch_fd_debug("cleanup handler invoke", -1); cleanup_handler(err); }); _dispatch_release(queue); @@ -233,7 +255,9 @@ _dispatch_io_init(dispatch_io_t channel, dispatch_fd_entry_t fd_entry, void _dispatch_io_dispose(dispatch_io_t channel) { - if (channel->fd_entry && !(channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED))) { + _dispatch_object_debug(channel, "%s", __func__); + if (channel->fd_entry && + !(channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED))) { if (channel->fd_entry->path_data) { // This modification is safe since path_data->channel is checked // only on close_queue (which is still suspended at this point) @@ -298,7 +322,7 @@ dispatch_io_create(dispatch_io_type_t type, dispatch_fd_t fd, if (type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) { return NULL; } - _dispatch_io_debug("io create", fd); + _dispatch_fd_debug("io create", fd); dispatch_io_t channel = _dispatch_io_create(type); channel->fd = fd; channel->fd_actual = fd; @@ -323,12 +347,23 @@ dispatch_io_create(dispatch_io_type_t type, dispatch_fd_t fd, _dispatch_fd_entry_retain(fd_entry); _dispatch_io_init(channel, fd_entry, queue, err, cleanup_handler); dispatch_resume(channel->queue); + _dispatch_object_debug(channel, "%s", __func__); _dispatch_release(channel); _dispatch_release(queue); }); + _dispatch_object_debug(channel, "%s", __func__); return channel; } +dispatch_io_t +dispatch_io_create_f(dispatch_io_type_t type, dispatch_fd_t fd, + dispatch_queue_t queue, void *context, + void (*cleanup_handler)(void *context, int error)) +{ + return dispatch_io_create(type, fd, queue, !cleanup_handler ? NULL : + ^(int error){ cleanup_handler(context, error); }); +} + dispatch_io_t dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, int oflag, mode_t mode, dispatch_queue_t queue, @@ -343,7 +378,7 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, if (!path_data) { return NULL; } - _dispatch_io_debug("io create with path %s", -1, path); + _dispatch_fd_debug("io create with path %s", -1, path); dispatch_io_t channel = _dispatch_io_create(type); channel->fd = -1; channel->fd_actual = -1; @@ -402,13 +437,25 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, path_data, st.st_dev, st.st_mode); _dispatch_io_init(channel, fd_entry, queue, 0, cleanup_handler); dispatch_resume(channel->queue); + _dispatch_object_debug(channel, "%s", __func__); _dispatch_release(channel); _dispatch_release(queue); }); }); + _dispatch_object_debug(channel, "%s", __func__); return channel; } +dispatch_io_t +dispatch_io_create_with_path_f(dispatch_io_type_t type, const char *path, + int oflag, mode_t mode, dispatch_queue_t queue, void *context, + void (*cleanup_handler)(void *context, int error)) +{ + return dispatch_io_create_with_path(type, path, oflag, mode, queue, + !cleanup_handler ? NULL : + ^(int error){ cleanup_handler(context, error); }); +} + dispatch_io_t dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_io_t in_channel, dispatch_queue_t queue, void (^cleanup_handler)(int error)) @@ -416,7 +463,7 @@ dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_io_t in_channel, if (type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) { return NULL; } - _dispatch_io_debug("io create with io %p", -1, in_channel); + _dispatch_fd_debug("io create with io %p", -1, in_channel); dispatch_io_t channel = _dispatch_io_create(type); dispatch_suspend(channel->queue); _dispatch_retain(queue); @@ -499,11 +546,23 @@ dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_io_t in_channel, _dispatch_release(queue); } _dispatch_release(in_channel); + _dispatch_object_debug(channel, "%s", __func__); }); }); + _dispatch_object_debug(channel, "%s", __func__); return channel; } +dispatch_io_t +dispatch_io_create_with_io_f(dispatch_io_type_t type, dispatch_io_t in_channel, + dispatch_queue_t queue, void *context, + void (*cleanup_handler)(void *context, int error)) +{ + return dispatch_io_create_with_io(type, in_channel, queue, + !cleanup_handler ? NULL : + ^(int error){ cleanup_handler(context, error); }); +} + #pragma mark - #pragma mark dispatch_io_accessors @@ -512,7 +571,7 @@ dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water) { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_io_debug("io set high water", channel->fd); + _dispatch_fd_debug("io set high water", channel->fd); if (channel->params.low > high_water) { channel->params.low = high_water; } @@ -526,7 +585,7 @@ dispatch_io_set_low_water(dispatch_io_t channel, size_t low_water) { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_io_debug("io set low water", channel->fd); + _dispatch_fd_debug("io set low water", channel->fd); if (channel->params.high < low_water) { channel->params.high = low_water ? low_water : 1; } @@ -541,8 +600,8 @@ dispatch_io_set_interval(dispatch_io_t channel, uint64_t interval, { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_io_debug("io set interval", channel->fd); - channel->params.interval = interval; + _dispatch_fd_debug("io set interval", channel->fd); + channel->params.interval = interval < INT64_MAX ? interval : INT64_MAX; channel->params.interval_flags = flags; _dispatch_release(channel); }); @@ -557,6 +616,7 @@ _dispatch_io_set_target_queue(dispatch_io_t channel, dispatch_queue_t dq) dispatch_queue_t prev_dq = channel->do_targetq; channel->do_targetq = dq; _dispatch_release(prev_dq); + _dispatch_object_debug(channel, "%s", __func__); _dispatch_release(channel); }); } @@ -568,8 +628,8 @@ dispatch_io_get_descriptor(dispatch_io_t channel) return -1; } dispatch_fd_t fd = channel->fd_actual; - if (fd == -1 && - _dispatch_thread_getspecific(dispatch_io_key) == channel) { + if (fd == -1 && _dispatch_thread_getspecific(dispatch_io_key) == channel && + !_dispatch_io_get_error(NULL, channel, false)) { dispatch_fd_entry_t fd_entry = channel->fd_entry; (void)_dispatch_fd_entry_open(fd_entry, channel); } @@ -582,14 +642,15 @@ dispatch_io_get_descriptor(dispatch_io_t channel) static void _dispatch_io_stop(dispatch_io_t channel) { - _dispatch_io_debug("io stop", channel->fd); - (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_STOPPED); + _dispatch_fd_debug("io stop", channel->fd); + (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_STOPPED, relaxed); _dispatch_retain(channel); dispatch_async(channel->queue, ^{ dispatch_async(channel->barrier_queue, ^{ + _dispatch_object_debug(channel, "%s", __func__); dispatch_fd_entry_t fd_entry = channel->fd_entry; if (fd_entry) { - _dispatch_io_debug("io stop cleanup", channel->fd); + _dispatch_fd_debug("io stop cleanup", channel->fd); _dispatch_fd_entry_cleanup_operations(fd_entry, channel); if (!(channel->atomic_flags & DIO_CLOSED)) { channel->fd_entry = NULL; @@ -599,7 +660,8 @@ _dispatch_io_stop(dispatch_io_t channel) // Stop after close, need to check if fd_entry still exists _dispatch_retain(channel); dispatch_async(_dispatch_io_fds_lockq, ^{ - _dispatch_io_debug("io stop after close cleanup", + _dispatch_object_debug(channel, "%s", __func__); + _dispatch_fd_debug("io stop after close cleanup", channel->fd); dispatch_fd_entry_t fdi; uintptr_t hash = DIO_HASH(channel->fd); @@ -634,14 +696,18 @@ dispatch_io_close(dispatch_io_t channel, unsigned long flags) _dispatch_retain(channel); dispatch_async(channel->queue, ^{ dispatch_async(channel->barrier_queue, ^{ - _dispatch_io_debug("io close", channel->fd); + _dispatch_object_debug(channel, "%s", __func__); + _dispatch_fd_debug("io close", channel->fd); if (!(channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED))) { - (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_CLOSED); + (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_CLOSED, + relaxed); dispatch_fd_entry_t fd_entry = channel->fd_entry; - if (!fd_entry->path_data) { - channel->fd_entry = NULL; + if (fd_entry) { + if (!fd_entry->path_data) { + channel->fd_entry = NULL; + } + _dispatch_fd_entry_release(fd_entry); } - _dispatch_fd_entry_release(fd_entry); } _dispatch_release(channel); }); @@ -659,6 +725,7 @@ dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier) dispatch_async(barrier_queue, ^{ dispatch_suspend(barrier_queue); dispatch_group_notify(barrier_group, io_q, ^{ + _dispatch_object_debug(channel, "%s", __func__); _dispatch_thread_setspecific(dispatch_io_key, channel); barrier(); _dispatch_thread_setspecific(dispatch_io_key, NULL); @@ -669,6 +736,13 @@ dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier) }); } +void +dispatch_io_barrier_f(dispatch_io_t channel, void *context, + dispatch_function_t barrier) +{ + return dispatch_io_barrier(channel, ^{ barrier(context); }); +} + void dispatch_io_read(dispatch_io_t channel, off_t offset, size_t length, dispatch_queue_t queue, dispatch_io_handler_t handler) @@ -691,6 +765,17 @@ dispatch_io_read(dispatch_io_t channel, off_t offset, size_t length, }); } +void +dispatch_io_read_f(dispatch_io_t channel, off_t offset, size_t length, + dispatch_queue_t queue, void *context, + dispatch_io_handler_function_t handler) +{ + return dispatch_io_read(channel, offset, length, queue, + ^(bool done, dispatch_data_t d, int error){ + handler(context, done, d, error); + }); +} + void dispatch_io_write(dispatch_io_t channel, off_t offset, dispatch_data_t data, dispatch_queue_t queue, dispatch_io_handler_t handler) @@ -716,6 +801,17 @@ dispatch_io_write(dispatch_io_t channel, off_t offset, dispatch_data_t data, }); } +void +dispatch_io_write_f(dispatch_io_t channel, off_t offset, dispatch_data_t data, + dispatch_queue_t queue, void *context, + dispatch_io_handler_function_t handler) +{ + return dispatch_io_write(channel, offset, data, queue, + ^(bool done, dispatch_data_t d, int error){ + handler(context, done, d, error); + }); +} + void dispatch_read(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, void (^handler)(dispatch_data_t, int)) @@ -726,7 +822,7 @@ dispatch_read(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, if (fd_entry->err) { int err = fd_entry->err; dispatch_async(queue, ^{ - _dispatch_io_debug("convenience handler invoke", fd); + _dispatch_fd_debug("convenience handler invoke", fd); handler(dispatch_data_empty, err); }); _dispatch_release(queue); @@ -749,7 +845,7 @@ dispatch_read(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, __block int err = 0; dispatch_async(fd_entry->close_queue, ^{ dispatch_async(queue, ^{ - _dispatch_io_debug("convenience handler invoke", fd); + _dispatch_fd_debug("convenience handler invoke", fd); handler(deliver_data, err); _dispatch_io_data_release(deliver_data); }); @@ -775,6 +871,15 @@ dispatch_read(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, }); } +void +dispatch_read_f(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, + void *context, void (*handler)(void *, dispatch_data_t, int)) +{ + return dispatch_read(fd, length, queue, ^(dispatch_data_t d, int error){ + handler(context, d, error); + }); +} + void dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, void (^handler)(dispatch_data_t, int)) @@ -786,7 +891,7 @@ dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, if (fd_entry->err) { int err = fd_entry->err; dispatch_async(queue, ^{ - _dispatch_io_debug("convenience handler invoke", fd); + _dispatch_fd_debug("convenience handler invoke", fd); handler(NULL, err); }); _dispatch_release(queue); @@ -809,7 +914,7 @@ dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, __block int err = 0; dispatch_async(fd_entry->close_queue, ^{ dispatch_async(queue, ^{ - _dispatch_io_debug("convenience handler invoke", fd); + _dispatch_fd_debug("convenience handler invoke", fd); handler(deliver_data, err); if (deliver_data) { _dispatch_io_data_release(deliver_data); @@ -837,6 +942,15 @@ dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, }); } +void +dispatch_write_f(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, + void *context, void (*handler)(void *, dispatch_data_t, int)) +{ + return dispatch_write(fd, data, queue, ^(dispatch_data_t d, int error){ + handler(context, d, error); + }); +} + #pragma mark - #pragma mark dispatch_operation_t @@ -848,7 +962,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, { // On channel queue dispatch_assert(direction < DOP_DIR_MAX); - _dispatch_io_debug("operation create", channel->fd); + _dispatch_fd_debug("operation create", channel->fd); #if DISPATCH_IO_DEBUG int fd = channel->fd; #endif @@ -866,7 +980,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, } else if (direction == DOP_DIR_WRITE && !err) { d = NULL; } - _dispatch_io_debug("IO handler invoke", fd); + _dispatch_fd_debug("IO handler invoke", fd); handler(true, d, err); _dispatch_io_data_release(data); }); @@ -896,12 +1010,14 @@ _dispatch_operation_create(dispatch_op_direction_t direction, targetq = targetq->do_targetq; } op->do_targetq = targetq; + _dispatch_object_debug(op, "%s", __func__); return op; } void _dispatch_operation_dispose(dispatch_operation_t op) { + _dispatch_object_debug(op, "%s", __func__); // Deliver the data if there's any if (op->fd_entry) { _dispatch_operation_deliver_data(op, DOP_DONE); @@ -977,7 +1093,7 @@ _dispatch_operation_should_enqueue(dispatch_operation_t op, dispatch_queue_t tq, dispatch_data_t data) { // On stream queue or disk queue - _dispatch_io_debug("enqueue operation", op->fd_entry->fd); + _dispatch_fd_debug("enqueue operation", op->fd_entry->fd); _dispatch_io_data_retain(data); op->data = data; int err = _dispatch_io_get_error(op, NULL, true); @@ -1003,7 +1119,7 @@ _dispatch_operation_timer(dispatch_queue_t tq, dispatch_operation_t op) dispatch_source_t timer = dispatch_source_create( DISPATCH_SOURCE_TYPE_TIMER, 0, 0, tq); dispatch_source_set_timer(timer, dispatch_time(DISPATCH_TIME_NOW, - op->params.interval), op->params.interval, 0); + (int64_t)op->params.interval), op->params.interval, 0); dispatch_source_set_event_handler(timer, ^{ // On stream queue or pick queue if (dispatch_source_testcancel(timer)) { @@ -1029,6 +1145,79 @@ _dispatch_operation_timer(dispatch_queue_t tq, dispatch_operation_t op) #pragma mark - #pragma mark dispatch_fd_entry_t +#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD +static void +_dispatch_fd_entry_guard(dispatch_fd_entry_t fd_entry) +{ + guardid_t guard = fd_entry; + const unsigned int guard_flags = GUARD_CLOSE; + int err, fd_flags = 0; + _dispatch_io_syscall_switch_noerr(err, + change_fdguard_np(fd_entry->fd, NULL, 0, &guard, guard_flags, + &fd_flags), + case 0: + fd_entry->guard_flags = guard_flags; + fd_entry->orig_fd_flags = fd_flags; + break; + case EPERM: break; + default: (void)dispatch_assume_zero(err); break; + ); +} + +static void +_dispatch_fd_entry_unguard(dispatch_fd_entry_t fd_entry) +{ + if (!fd_entry->guard_flags) { + return; + } + guardid_t guard = fd_entry; + int err, fd_flags = fd_entry->orig_fd_flags; + _dispatch_io_syscall_switch(err, + change_fdguard_np(fd_entry->fd, &guard, fd_entry->guard_flags, NULL, 0, + &fd_flags), + default: (void)dispatch_assume_zero(err); break; + ); +} +#else +static inline void +_dispatch_fd_entry_guard(dispatch_fd_entry_t fd_entry) { (void)fd_entry; } +static inline void +_dispatch_fd_entry_unguard(dispatch_fd_entry_t fd_entry) { (void)fd_entry; } +#endif // DISPATCH_USE_GUARDED_FD + +static inline int +_dispatch_fd_entry_guarded_open(dispatch_fd_entry_t fd_entry, const char *path, + int oflag, mode_t mode) { +#if DISPATCH_USE_GUARDED_FD + guardid_t guard = (uintptr_t)fd_entry; + const unsigned int guard_flags = GUARD_CLOSE | GUARD_DUP | + GUARD_SOCKET_IPC | GUARD_FILEPORT; + int fd = guarded_open_np(path, &guard, guard_flags, oflag | O_CLOEXEC, + mode); + if (fd != -1) { + fd_entry->guard_flags = guard_flags; + return fd; + } + errno = 0; +#endif + return open(path, oflag, mode); + (void)fd_entry; +} + +static inline int +_dispatch_fd_entry_guarded_close(dispatch_fd_entry_t fd_entry, int fd) { +#if DISPATCH_USE_GUARDED_FD + if (fd_entry->guard_flags) { + guardid_t guard = (uintptr_t)fd_entry; + return guarded_close_np(fd, &guard); + } else +#endif + { + return close(fd); + } + (void)fd_entry; +} + static inline void _dispatch_fd_entry_retain(dispatch_fd_entry_t fd_entry) { dispatch_suspend(fd_entry->close_queue); @@ -1047,7 +1236,7 @@ _dispatch_fd_entry_init_async(dispatch_fd_t fd, dispatch_once_f(&_dispatch_io_fds_lockq_pred, NULL, _dispatch_io_fds_lockq_init); dispatch_async(_dispatch_io_fds_lockq, ^{ - _dispatch_io_debug("fd entry init", fd); + _dispatch_fd_debug("fd entry init", fd); dispatch_fd_entry_t fd_entry = NULL; // Check to see if there is an existing entry for the given fd uintptr_t hash = DIO_HASH(fd); @@ -1064,7 +1253,7 @@ _dispatch_fd_entry_init_async(dispatch_fd_t fd, fd_entry = _dispatch_fd_entry_create_with_fd(fd, hash); } dispatch_async(fd_entry->barrier_queue, ^{ - _dispatch_io_debug("fd entry init completion", fd); + _dispatch_fd_debug("fd entry init completion", fd); completion_callback(fd_entry); // stat() is complete, release reference to fd_entry _dispatch_fd_entry_release(fd_entry); @@ -1076,7 +1265,7 @@ static dispatch_fd_entry_t _dispatch_fd_entry_create(dispatch_queue_t q) { dispatch_fd_entry_t fd_entry; - fd_entry = calloc(1ul, sizeof(struct dispatch_fd_entry_s)); + fd_entry = _dispatch_calloc(1ul, sizeof(struct dispatch_fd_entry_s)); fd_entry->close_queue = dispatch_queue_create( "com.apple.libdispatch-io.closeq", NULL); // Use target queue to ensure that no concurrent lookups are going on when @@ -1092,7 +1281,7 @@ static dispatch_fd_entry_t _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) { // On fds lock queue - _dispatch_io_debug("fd entry create", fd); + _dispatch_fd_debug("fd entry create", fd); dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create( _dispatch_io_fds_lockq); fd_entry->fd = fd; @@ -1101,7 +1290,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) "com.apple.libdispatch-io.barrierq", NULL); fd_entry->barrier_group = dispatch_group_create(); dispatch_async(fd_entry->barrier_queue, ^{ - _dispatch_io_debug("fd entry stat", fd); + _dispatch_fd_debug("fd entry stat", fd); int err, orig_flags, orig_nosigpipe = -1; struct stat st; _dispatch_io_syscall_switch(err, @@ -1110,6 +1299,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) ); fd_entry->stat.dev = st.st_dev; fd_entry->stat.mode = st.st_mode; + _dispatch_fd_entry_guard(fd_entry); _dispatch_io_syscall_switch(err, orig_flags = fcntl(fd, F_GETFL), default: (void)dispatch_assume_zero(err); break; @@ -1172,7 +1362,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) // all operations associated with this entry have been freed dispatch_async(fd_entry->close_queue, ^{ if (!fd_entry->disk) { - _dispatch_io_debug("close queue fd_entry cleanup", fd); + _dispatch_fd_debug("close queue fd_entry cleanup", fd); dispatch_op_direction_t dir; for (dir = 0; dir < DOP_DIR_MAX; dir++) { _dispatch_stream_dispose(fd_entry, dir); @@ -1190,11 +1380,11 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) // source cancels it and suspends the close queue. Freeing the fd_entry // structure must happen after the source cancel handler has finished dispatch_async(fd_entry->close_queue, ^{ - _dispatch_io_debug("close queue release", fd); + _dispatch_fd_debug("close queue release", fd); dispatch_release(fd_entry->close_queue); - _dispatch_io_debug("barrier queue release", fd); + _dispatch_fd_debug("barrier queue release", fd); dispatch_release(fd_entry->barrier_queue); - _dispatch_io_debug("barrier group release", fd); + _dispatch_fd_debug("barrier group release", fd); dispatch_release(fd_entry->barrier_group); if (fd_entry->orig_flags != -1) { _dispatch_io_syscall( @@ -1208,6 +1398,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) ); } #endif + _dispatch_fd_entry_unguard(fd_entry); if (fd_entry->convenience_channel) { fd_entry->convenience_channel->fd_entry = NULL; dispatch_release(fd_entry->convenience_channel); @@ -1222,7 +1413,7 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, dev_t dev, mode_t mode) { // On devs lock queue - _dispatch_io_debug("fd entry create with path %s", -1, path_data->path); + _dispatch_fd_debug("fd entry create with path %s", -1, path_data->path); dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create( path_data->channel->queue); if (S_ISREG(mode)) { @@ -1243,7 +1434,7 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, // that the channel associated with this entry has been closed and that // all operations associated with this entry have been freed dispatch_async(fd_entry->close_queue, ^{ - _dispatch_io_debug("close queue fd_entry cleanup", -1); + _dispatch_fd_debug("close queue fd_entry cleanup", -1); if (!fd_entry->disk) { dispatch_op_direction_t dir; for (dir = 0; dir < DOP_DIR_MAX; dir++) { @@ -1251,7 +1442,7 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, } } if (fd_entry->fd != -1) { - close(fd_entry->fd); + _dispatch_fd_entry_guarded_close(fd_entry, fd_entry->fd); } if (fd_entry->path_data->channel) { // If associated channel has not been released yet, mark it as @@ -1262,7 +1453,7 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, } }); dispatch_async(fd_entry->close_queue, ^{ - _dispatch_io_debug("close queue release", -1); + _dispatch_fd_debug("close queue release", -1); dispatch_release(fd_entry->close_queue); dispatch_release(fd_entry->barrier_queue); dispatch_release(fd_entry->barrier_group); @@ -1285,21 +1476,23 @@ _dispatch_fd_entry_open(dispatch_fd_entry_t fd_entry, dispatch_io_t channel) int oflag = fd_entry->disk ? fd_entry->path_data->oflag & ~O_NONBLOCK : fd_entry->path_data->oflag | O_NONBLOCK; open: - fd = open(fd_entry->path_data->path, oflag, fd_entry->path_data->mode); + fd = _dispatch_fd_entry_guarded_open(fd_entry, fd_entry->path_data->path, + oflag, fd_entry->path_data->mode); if (fd == -1) { int err = errno; if (err == EINTR) { goto open; } - (void)dispatch_atomic_cmpxchg2o(fd_entry, err, 0, err); + (void)dispatch_atomic_cmpxchg2o(fd_entry, err, 0, err, relaxed); return err; } - if (!dispatch_atomic_cmpxchg2o(fd_entry, fd, -1, fd)) { + if (!dispatch_atomic_cmpxchg2o(fd_entry, fd, -1, fd, relaxed)) { // Lost the race with another open - close(fd); + _dispatch_fd_entry_guarded_close(fd_entry, fd); } else { channel->fd_actual = fd; } + _dispatch_object_debug(channel, "%s", __func__); return 0; } @@ -1350,9 +1543,10 @@ _dispatch_stream_init(dispatch_fd_entry_t fd_entry, dispatch_queue_t tq) dispatch_op_direction_t direction; for (direction = 0; direction < DOP_DIR_MAX; direction++) { dispatch_stream_t stream; - stream = calloc(1ul, sizeof(struct dispatch_stream_s)); + stream = _dispatch_calloc(1ul, sizeof(struct dispatch_stream_s)); stream->dq = dispatch_queue_create("com.apple.libdispatch-io.streamq", NULL); + dispatch_set_context(stream->dq, stream); _dispatch_retain(tq); stream->dq->do_targetq = tq; TAILQ_INIT(&stream->operations[DISPATCH_IO_RANDOM]); @@ -1379,6 +1573,7 @@ _dispatch_stream_dispose(dispatch_fd_entry_t fd_entry, dispatch_resume(stream->source); dispatch_release(stream->source); } + dispatch_set_context(stream->dq, NULL); dispatch_release(stream->dq); free(stream); } @@ -1388,7 +1583,6 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) { // On devs lock queue dispatch_disk_t disk; - char label_name[256]; // Check to see if there is an existing entry for the given device uintptr_t hash = DIO_HASH(dev); TAILQ_FOREACH(disk, &_dispatch_io_devs[hash], disk_list) { @@ -1410,8 +1604,9 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) disk->dev = dev; TAILQ_INIT(&disk->operations); disk->cur_rq = TAILQ_FIRST(&disk->operations); - sprintf(label_name, "com.apple.libdispatch-io.deviceq.%d", dev); - disk->pick_queue = dispatch_queue_create(label_name, NULL); + char label[45]; + snprintf(label, sizeof(label), "com.apple.libdispatch-io.deviceq.%d", dev); + disk->pick_queue = dispatch_queue_create(label, NULL); TAILQ_INSERT_TAIL(&_dispatch_io_devs[hash], disk, disk_list); out: fd_entry->disk = disk; @@ -1448,10 +1643,12 @@ _dispatch_stream_enqueue_operation(dispatch_stream_t stream, if (!_dispatch_operation_should_enqueue(op, stream->dq, data)) { return; } + _dispatch_object_debug(op, "%s", __func__); bool no_ops = !_dispatch_stream_operation_avail(stream); TAILQ_INSERT_TAIL(&stream->operations[op->params.type], op, operation_list); if (no_ops) { - dispatch_async_f(stream->dq, stream, _dispatch_stream_handler); + dispatch_async_f(stream->dq, stream->dq, + _dispatch_stream_queue_handler); } } @@ -1462,6 +1659,7 @@ _dispatch_disk_enqueue_operation(dispatch_disk_t disk, dispatch_operation_t op, if (!_dispatch_operation_should_enqueue(op, disk->pick_queue, data)) { return; } + _dispatch_object_debug(op, "%s", __func__); if (op->params.type == DISPATCH_IO_STREAM) { if (TAILQ_EMPTY(&op->fd_entry->stream_ops)) { TAILQ_INSERT_TAIL(&disk->operations, op, operation_list); @@ -1478,7 +1676,8 @@ _dispatch_stream_complete_operation(dispatch_stream_t stream, dispatch_operation_t op) { // On stream queue - _dispatch_io_debug("complete operation", op->fd_entry->fd); + _dispatch_object_debug(op, "%s", __func__); + _dispatch_fd_debug("complete operation", op->fd_entry->fd); TAILQ_REMOVE(&stream->operations[op->params.type], op, operation_list); if (op == stream->op) { stream->op = NULL; @@ -1494,7 +1693,8 @@ static void _dispatch_disk_complete_operation(dispatch_disk_t disk, dispatch_operation_t op) { // On pick queue - _dispatch_io_debug("complete operation", op->fd_entry->fd); + _dispatch_object_debug(op, "%s", __func__); + _dispatch_fd_debug("complete operation", op->fd_entry->fd); // Current request is always the last op returned if (disk->cur_rq == op) { disk->cur_rq = TAILQ_PREV(op, dispatch_disk_operations_s, @@ -1623,14 +1823,14 @@ _dispatch_stream_source(dispatch_stream_t stream, dispatch_operation_t op) return stream->source; } dispatch_fd_t fd = op->fd_entry->fd; - _dispatch_io_debug("stream source create", fd); + _dispatch_fd_debug("stream source create", fd); dispatch_source_t source = NULL; if (op->direction == DOP_DIR_READ) { - source = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, fd, 0, - stream->dq); + source = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, + (uintptr_t)fd, 0, stream->dq); } else if (op->direction == DOP_DIR_WRITE) { - source = dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE, fd, 0, - stream->dq); + source = dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE, + (uintptr_t)fd, 0, stream->dq); } else { dispatch_assert(op->direction < DOP_DIR_MAX); return NULL; @@ -1642,7 +1842,7 @@ _dispatch_stream_source(dispatch_stream_t stream, dispatch_operation_t op) // unregistered dispatch_queue_t close_queue = op->fd_entry->close_queue; dispatch_source_set_cancel_handler(source, ^{ - _dispatch_io_debug("stream source cancel", fd); + _dispatch_fd_debug("stream source cancel", fd); dispatch_resume(close_queue); }); stream->source = source; @@ -1659,6 +1859,18 @@ _dispatch_stream_source_handler(void *ctx) return _dispatch_stream_handler(stream); } +static void +_dispatch_stream_queue_handler(void *ctx) +{ + // On stream queue + dispatch_stream_t stream = (dispatch_stream_t)dispatch_get_context(ctx); + if (!stream) { + // _dispatch_stream_dispose has been called + return; + } + return _dispatch_stream_handler(stream); +} + static void _dispatch_stream_handler(void *ctx) { @@ -1678,17 +1890,18 @@ pick: goto pick; } stream->op = op; - _dispatch_io_debug("stream handler", op->fd_entry->fd); + _dispatch_fd_debug("stream handler", op->fd_entry->fd); dispatch_fd_entry_t fd_entry = op->fd_entry; _dispatch_fd_entry_retain(fd_entry); // For performance analysis if (!op->total && dispatch_io_defaults.initial_delivery) { // Empty delivery to signal the start of the operation - _dispatch_io_debug("initial delivery", op->fd_entry->fd); + _dispatch_fd_debug("initial delivery", op->fd_entry->fd); _dispatch_operation_deliver_data(op, DOP_DELIVER); } // TODO: perform on the operation target queue to get correct priority - int result = _dispatch_operation_perform(op), flags = -1; + int result = _dispatch_operation_perform(op); + dispatch_op_flags_t flags = ~0u; switch (result) { case DISPATCH_OP_DELIVER: flags = DOP_DEFAULT; @@ -1703,7 +1916,8 @@ pick: _dispatch_stream_complete_operation(stream, op); } if (_dispatch_stream_operation_avail(stream)) { - dispatch_async_f(stream->dq, stream, _dispatch_stream_handler); + dispatch_async_f(stream->dq, stream->dq, + _dispatch_stream_queue_handler); } break; case DISPATCH_OP_COMPLETE_RESUME: @@ -1740,7 +1954,7 @@ _dispatch_disk_handler(void *ctx) if (disk->io_active) { return; } - _dispatch_io_debug("disk handler", -1); + _dispatch_fd_debug("disk handler", -1); dispatch_operation_t op; size_t i = disk->free_idx, j = disk->req_idx; if (j <= i) { @@ -1758,6 +1972,7 @@ _dispatch_disk_handler(void *ctx) _dispatch_retain(op); disk->advise_list[i%disk->advise_list_depth] = op; op->active = true; + _dispatch_object_debug(op, "%s", __func__); } else { // No more operations to get break; @@ -1777,7 +1992,7 @@ _dispatch_disk_perform(void *ctxt) { dispatch_disk_t disk = ctxt; size_t chunk_size = dispatch_io_defaults.chunk_pages * PAGE_SIZE; - _dispatch_io_debug("disk perform", -1); + _dispatch_fd_debug("disk perform", -1); dispatch_operation_t op; size_t i = disk->advise_idx, j = disk->free_idx; if (j <= i) { @@ -1801,7 +2016,7 @@ _dispatch_disk_perform(void *ctxt) // For performance analysis if (!op->total && dispatch_io_defaults.initial_delivery) { // Empty delivery to signal the start of the operation - _dispatch_io_debug("initial delivery", op->fd_entry->fd); + _dispatch_fd_debug("initial delivery", op->fd_entry->fd); _dispatch_operation_deliver_data(op, DOP_DELIVER); } // Advise two chunks if the list only has one element and this is the @@ -1859,22 +2074,25 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) struct radvisory advise; // No point in issuing a read advise for the next chunk if we are already // a chunk ahead from reading the bytes - if (op->advise_offset > (off_t)((op->offset+op->total) + chunk_size + - PAGE_SIZE)) { + if (op->advise_offset > (off_t)(((size_t)op->offset + op->total) + + chunk_size + PAGE_SIZE)) { return; } + _dispatch_object_debug(op, "%s", __func__); advise.ra_count = (int)chunk_size; if (!op->advise_offset) { op->advise_offset = op->offset; // If this is the first time through, align the advised range to a // page boundary - size_t pg_fraction = (size_t)((op->offset + chunk_size) % PAGE_SIZE); + size_t pg_fraction = ((size_t)op->offset + chunk_size) % PAGE_SIZE; advise.ra_count += (int)(pg_fraction ? PAGE_SIZE - pg_fraction : 0); } advise.ra_offset = op->advise_offset; op->advise_offset += advise.ra_count; _dispatch_io_syscall_switch(err, fcntl(op->fd_entry->fd, F_RDADVISE, &advise), + case EFBIG: break; // advised past the end of the file rdar://10415691 + case ENOTSUP: break; // not all FS support radvise rdar://13484629 // TODO: set disk status on error default: (void)dispatch_assume_zero(err); break; ); @@ -1887,6 +2105,7 @@ _dispatch_operation_perform(dispatch_operation_t op) if (err) { goto error; } + _dispatch_object_debug(op, "%s", __func__); if (!op->buf) { size_t max_buf_siz = op->params.high; size_t chunk_siz = dispatch_io_defaults.chunk_pages * PAGE_SIZE; @@ -1910,7 +2129,7 @@ _dispatch_operation_perform(dispatch_operation_t op) op->buf_siz = max_buf_siz; } op->buf = valloc(op->buf_siz); - _dispatch_io_debug("buffer allocated", op->fd_entry->fd); + _dispatch_fd_debug("buffer allocated", op->fd_entry->fd); } else if (op->direction == DOP_DIR_WRITE) { // Always write the first data piece, if that is smaller than a // chunk, accumulate further data pieces until chunk size is reached @@ -1936,7 +2155,7 @@ _dispatch_operation_perform(dispatch_operation_t op) op->buf_data = dispatch_data_create_map(d, (const void**)&op->buf, NULL); _dispatch_io_data_release(d); - _dispatch_io_debug("buffer mapped", op->fd_entry->fd); + _dispatch_fd_debug("buffer mapped", op->fd_entry->fd); } } if (op->fd_entry->fd == -1) { @@ -1947,7 +2166,7 @@ _dispatch_operation_perform(dispatch_operation_t op) } void *buf = op->buf + op->buf_len; size_t len = op->buf_siz - op->buf_len; - off_t off = op->offset + op->total; + off_t off = (off_t)((size_t)op->offset + op->total); ssize_t processed = -1; syscall: if (op->direction == DOP_DIR_READ) { @@ -1973,11 +2192,11 @@ syscall: } // EOF is indicated by two handler invocations if (processed == 0) { - _dispatch_io_debug("EOF", op->fd_entry->fd); + _dispatch_fd_debug("EOF", op->fd_entry->fd); return DISPATCH_OP_DELIVER_AND_COMPLETE; } - op->buf_len += processed; - op->total += processed; + op->buf_len += (size_t)processed; + op->total += (size_t)processed; if (op->total == op->length) { // Finished processing all the bytes requested by the operation return DISPATCH_OP_COMPLETE; @@ -1989,7 +2208,7 @@ error: if (err == EAGAIN) { // For disk based files with blocking I/O we should never get EAGAIN dispatch_assert(!op->fd_entry->disk); - _dispatch_io_debug("EAGAIN %d", op->fd_entry->fd, err); + _dispatch_fd_debug("EAGAIN %d", op->fd_entry->fd, err); if (op->direction == DOP_DIR_READ && op->total && op->channel == op->fd_entry->convenience_channel) { // Convenience read with available data completes on EAGAIN @@ -2002,7 +2221,7 @@ error: case ECANCELED: return DISPATCH_OP_ERR; case EBADF: - (void)dispatch_atomic_cmpxchg2o(op->fd_entry, err, 0, err); + (void)dispatch_atomic_cmpxchg2o(op->fd_entry, err, 0, err, relaxed); return DISPATCH_OP_FD_ERR; default: return DISPATCH_OP_COMPLETE; @@ -2026,7 +2245,7 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, deliver = true; } else if (op->buf_len < op->buf_siz) { // Request buffer is not yet used up - _dispatch_io_debug("buffer data", op->fd_entry->fd); + _dispatch_fd_debug("buffer data", op->fd_entry->fd); return; } } else { @@ -2080,11 +2299,12 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, } if (!deliver || ((flags & DOP_NO_EMPTY) && !dispatch_data_get_size(data))) { op->undelivered = undelivered; - _dispatch_io_debug("buffer data", op->fd_entry->fd); + _dispatch_fd_debug("buffer data", op->fd_entry->fd); return; } op->undelivered = 0; - _dispatch_io_debug("deliver data", op->fd_entry->fd); + _dispatch_object_debug(op, "%s", __func__); + _dispatch_fd_debug("deliver data", op->fd_entry->fd); dispatch_op_direction_t direction = op->direction; dispatch_io_handler_t handler = op->handler; #if DISPATCH_IO_DEBUG @@ -2101,7 +2321,7 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, if (done) { if (direction == DOP_DIR_READ && err) { if (dispatch_data_get_size(d)) { - _dispatch_io_debug("IO handler invoke", fd); + _dispatch_fd_debug("IO handler invoke", fd); handler(false, d, 0); } d = NULL; @@ -2109,10 +2329,77 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, d = NULL; } } - _dispatch_io_debug("IO handler invoke", fd); + _dispatch_fd_debug("IO handler invoke", fd); handler(done, d, err); _dispatch_release(channel); _dispatch_fd_entry_release(fd_entry); _dispatch_io_data_release(data); }); } + +#pragma mark - +#pragma mark dispatch_io_debug + +static size_t +_dispatch_io_debug_attr(dispatch_io_t channel, char* buf, size_t bufsiz) +{ + dispatch_queue_t target = channel->do_targetq; + return dsnprintf(buf, bufsiz, "type = %s, fd = 0x%x, %sfd_entry = %p, " + "queue = %p, target = %s[%p], barrier_queue = %p, barrier_group = " + "%p, err = 0x%x, low = 0x%zx, high = 0x%zx, interval%s = %llu ", + channel->params.type == DISPATCH_IO_STREAM ? "stream" : "random", + channel->fd_actual, channel->atomic_flags & DIO_STOPPED ? + "stopped, " : channel->atomic_flags & DIO_CLOSED ? "closed, " : "", + channel->fd_entry, channel->queue, target && target->dq_label ? + target->dq_label : "", target, channel->barrier_queue, + channel->barrier_group, channel->err, channel->params.low, + channel->params.high, channel->params.interval_flags & + DISPATCH_IO_STRICT_INTERVAL ? "(strict)" : "", + channel->params.interval); +} + +size_t +_dispatch_io_debug(dispatch_io_t channel, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dx_kind(channel), channel); + offset += _dispatch_object_debug_attr(channel, &buf[offset], + bufsiz - offset); + offset += _dispatch_io_debug_attr(channel, &buf[offset], bufsiz - offset); + offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); + return offset; +} + +static size_t +_dispatch_operation_debug_attr(dispatch_operation_t op, char* buf, + size_t bufsiz) +{ + dispatch_queue_t target = op->do_targetq; + dispatch_queue_t oqtarget = op->op_q ? op->op_q->do_targetq : NULL; + return dsnprintf(buf, bufsiz, "type = %s %s, fd = 0x%x, fd_entry = %p, " + "channel = %p, queue = %p -> %s[%p], target = %s[%p], " + "offset = %lld, length = %zu, done = %zu, undelivered = %zu, " + "flags = %u, err = 0x%x, low = 0x%zx, high = 0x%zx, " + "interval%s = %llu ", op->params.type == DISPATCH_IO_STREAM ? + "stream" : "random", op->direction == DOP_DIR_READ ? "read" : + "write", op->fd_entry ? op->fd_entry->fd : -1, op->fd_entry, + op->channel, op->op_q, oqtarget && oqtarget->dq_label ? + oqtarget->dq_label : "", oqtarget, target && target->dq_label ? + target->dq_label : "", target, op->offset, op->length, op->total, + op->undelivered + op->buf_len, op->flags, op->err, op->params.low, + op->params.high, op->params.interval_flags & + DISPATCH_IO_STRICT_INTERVAL ? "(strict)" : "", op->params.interval); +} + +size_t +_dispatch_operation_debug(dispatch_operation_t op, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dx_kind(op), op); + offset += _dispatch_object_debug_attr(op, &buf[offset], bufsiz - offset); + offset += _dispatch_operation_debug_attr(op, &buf[offset], bufsiz - offset); + offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); + return offset; +} diff --git a/src/io_internal.h b/src/io_internal.h index dbbb6bf..fbb27c5 100644 --- a/src/io_internal.h +++ b/src/io_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011 Apple Inc. All rights reserved. + * Copyright (c) 2009-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -34,10 +34,6 @@ #define _DISPATCH_IO_LABEL_SIZE 16 -#ifndef DISPATCH_IO_DEBUG -#define DISPATCH_IO_DEBUG 0 -#endif - #if TARGET_OS_EMBEDDED // rdar://problem/9032036 #define DIO_MAX_CHUNK_PAGES 128u // 512kB chunk size #else @@ -66,16 +62,6 @@ typedef unsigned int dispatch_op_flags_t; #define DIO_CLOSED 1u // channel has been closed #define DIO_STOPPED 2u // channel has been stopped (implies closed) -#define _dispatch_io_data_retain(x) dispatch_retain(x) -#define _dispatch_io_data_release(x) dispatch_release(x) - -#if DISPATCH_IO_DEBUG -#define _dispatch_io_debug(msg, fd, args...) \ - _dispatch_debug("fd %d: " msg, (fd), ##args) -#else -#define _dispatch_io_debug(msg, fd, args...) -#endif - DISPATCH_DECL_INTERNAL(dispatch_operation); DISPATCH_DECL_INTERNAL(dispatch_disk); @@ -126,6 +112,12 @@ struct dispatch_fd_entry_s { dispatch_fd_t fd; dispatch_io_path_data_t path_data; int orig_flags, orig_nosigpipe, err; +#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD + int orig_fd_flags; +#endif +#if DISPATCH_USE_GUARDED_FD + unsigned int guard_flags; +#endif struct dispatch_stat_s stat; dispatch_stream_t streams[2]; dispatch_disk_t disk; @@ -160,7 +152,6 @@ struct dispatch_operation_s { dispatch_fd_entry_t fd_entry; dispatch_source_t timer; bool active; - int count; off_t advise_offset; void* buf; dispatch_op_flags_t flags; @@ -185,7 +176,10 @@ struct dispatch_io_s { }; void _dispatch_io_set_target_queue(dispatch_io_t channel, dispatch_queue_t dq); +size_t _dispatch_io_debug(dispatch_io_t channel, char* buf, size_t bufsiz); void _dispatch_io_dispose(dispatch_io_t channel); +size_t _dispatch_operation_debug(dispatch_operation_t op, char* buf, + size_t bufsiz); void _dispatch_operation_dispose(dispatch_operation_t operation); void _dispatch_disk_dispose(dispatch_disk_t disk); diff --git a/src/object.c b/src/object.c index 7b94c75..a305039 100644 --- a/src/object.c +++ b/src/object.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -30,9 +30,10 @@ _os_object_retain_count(_os_object_t obj) if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { return ULONG_MAX; // global object } - return xref_cnt + 1; + return (unsigned long)(xref_cnt + 1); } +DISPATCH_NOINLINE _os_object_t _os_object_retain_internal(_os_object_t obj) { @@ -40,13 +41,14 @@ _os_object_retain_internal(_os_object_t obj) if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { return obj; // global object } - ref_cnt = dispatch_atomic_inc2o(obj, os_obj_ref_cnt); + ref_cnt = dispatch_atomic_inc2o(obj, os_obj_ref_cnt, relaxed); if (slowpath(ref_cnt <= 0)) { DISPATCH_CRASH("Resurrection of an object"); } return obj; } +DISPATCH_NOINLINE void _os_object_release_internal(_os_object_t obj) { @@ -54,7 +56,7 @@ _os_object_release_internal(_os_object_t obj) if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { return; // global object } - ref_cnt = dispatch_atomic_dec2o(obj, os_obj_ref_cnt); + ref_cnt = dispatch_atomic_dec2o(obj, os_obj_ref_cnt, relaxed); if (fastpath(ref_cnt >= 0)) { return; } @@ -69,6 +71,7 @@ _os_object_release_internal(_os_object_t obj) return _os_object_dispose(obj); } +DISPATCH_NOINLINE _os_object_t _os_object_retain(_os_object_t obj) { @@ -76,13 +79,14 @@ _os_object_retain(_os_object_t obj) if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { return obj; // global object } - xref_cnt = dispatch_atomic_inc2o(obj, os_obj_xref_cnt); + xref_cnt = dispatch_atomic_inc2o(obj, os_obj_xref_cnt, relaxed); if (slowpath(xref_cnt <= 0)) { _OS_OBJECT_CLIENT_CRASH("Resurrection of an object"); } return obj; } +DISPATCH_NOINLINE void _os_object_release(_os_object_t obj) { @@ -90,7 +94,7 @@ _os_object_release(_os_object_t obj) if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { return; // global object } - xref_cnt = dispatch_atomic_dec2o(obj, os_obj_xref_cnt); + xref_cnt = dispatch_atomic_dec2o(obj, os_obj_xref_cnt, relaxed); if (fastpath(xref_cnt >= 0)) { return; } @@ -114,9 +118,8 @@ retry: if (slowpath(xref_cnt < -1)) { goto overrelease; } - if (slowpath(!dispatch_atomic_cmpxchg2o(obj, os_obj_xref_cnt, xref_cnt, - xref_cnt + 1))) { - xref_cnt = obj->os_obj_xref_cnt; + if (slowpath(!dispatch_atomic_cmpxchgvw2o(obj, os_obj_xref_cnt, xref_cnt, + xref_cnt + 1, &xref_cnt, relaxed))) { goto retry; } return true; @@ -143,12 +146,13 @@ _os_object_allows_weak_reference(_os_object_t obj) void * _dispatch_alloc(const void *vtable, size_t size) { - return _os_object_alloc(vtable, size); + return _os_object_alloc_realized(vtable, size); } void dispatch_retain(dispatch_object_t dou) { + DISPATCH_OBJECT_TFB(_dispatch_objc_retain, dou); (void)_os_object_retain(dou._os_obj); } @@ -161,6 +165,7 @@ _dispatch_retain(dispatch_object_t dou) void dispatch_release(dispatch_object_t dou) { + DISPATCH_OBJECT_TFB(_dispatch_objc_release, dou); _os_object_release(dou._os_obj); } @@ -195,6 +200,8 @@ _dispatch_xref_dispose(dispatch_object_t dou) #if !USE_OBJC if (dx_type(dou._do) == DISPATCH_SOURCE_KEVENT_TYPE) { _dispatch_source_xref_dispose(dou._ds); + } else if (dou._dq->do_vtable == DISPATCH_VTABLE(queue_runloop)) { + _dispatch_runloop_queue_xref_dispose(dou._dq); } return _dispatch_release(dou._os_obj); #endif @@ -213,33 +220,48 @@ _dispatch_dispose(dispatch_object_t dou) void * dispatch_get_context(dispatch_object_t dou) { + DISPATCH_OBJECT_TFB(_dispatch_objc_get_context, dou); + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { + return NULL; + } return dou._do->do_ctxt; } void dispatch_set_context(dispatch_object_t dou, void *context) { - if (dou._do->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT) { - dou._do->do_ctxt = context; + DISPATCH_OBJECT_TFB(_dispatch_objc_set_context, dou, context); + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { + return; } + dou._do->do_ctxt = context; } void dispatch_set_finalizer_f(dispatch_object_t dou, dispatch_function_t finalizer) { + DISPATCH_OBJECT_TFB(_dispatch_objc_set_finalizer_f, dou, finalizer); + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { + return; + } dou._do->do_finalizer = finalizer; } void dispatch_suspend(dispatch_object_t dou) { - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + DISPATCH_OBJECT_TFB(_dispatch_objc_suspend, dou); + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { return; } // rdar://8181908 explains why we need to do an internal retain at every // suspension. (void)dispatch_atomic_add2o(dou._do, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL); + DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); _dispatch_retain(dou._do); } @@ -255,19 +277,20 @@ _dispatch_resume_slow(dispatch_object_t dou) void dispatch_resume(dispatch_object_t dou) { + DISPATCH_OBJECT_TFB(_dispatch_objc_resume, dou); // Global objects cannot be suspended or resumed. This also has the // side effect of saturating the suspend count of an object and // guarding against resuming due to overflow. - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { return; } // Check the previous value of the suspend count. If the previous // value was a single suspend interval, the object should be resumed. // If the previous value was less than the suspend interval, the object // has been over-resumed. - unsigned int suspend_cnt = dispatch_atomic_sub2o(dou._do, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL) + - DISPATCH_OBJECT_SUSPEND_INTERVAL; + unsigned int suspend_cnt = dispatch_atomic_sub_orig2o(dou._do, + do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); if (fastpath(suspend_cnt > DISPATCH_OBJECT_SUSPEND_INTERVAL)) { // Balancing the retain() done in suspend() for rdar://8181908 return _dispatch_release(dou._do); @@ -281,7 +304,7 @@ dispatch_resume(dispatch_object_t dou) size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz) { - return snprintf(buf, bufsiz, "xrefcnt = 0x%x, refcnt = 0x%x, " + return dsnprintf(buf, bufsiz, "xrefcnt = 0x%x, refcnt = 0x%x, " "suspend_cnt = 0x%x, locked = %d, ", dou._do->do_xref_cnt + 1, dou._do->do_ref_cnt + 1, dou._do->do_suspend_cnt / DISPATCH_OBJECT_SUSPEND_INTERVAL, diff --git a/src/object.m b/src/object.m index ea69622..e64a4fd 100644 --- a/src/object.m +++ b/src/object.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011 Apple Inc. All rights reserved. + * Copyright (c) 2011-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -29,7 +29,6 @@ #error "Cannot build with ARC" #endif -#include #include #include @@ -40,12 +39,11 @@ #include #include -static dispatch_once_t _os_object_gc_pred; static bool _os_object_have_gc; static malloc_zone_t *_os_object_gc_zone; static void -_os_object_gc_init(void *ctxt DISPATCH_UNUSED) +_os_object_gc_init(void) { _os_object_have_gc = objc_collectingEnabled(); if (slowpath(_os_object_have_gc)) { @@ -56,7 +54,6 @@ _os_object_gc_init(void *ctxt DISPATCH_UNUSED) static _os_object_t _os_object_make_uncollectable(_os_object_t obj) { - dispatch_once_f(&_os_object_gc_pred, NULL, _os_object_gc_init); if (slowpath(_os_object_have_gc)) { auto_zone_retain(_os_object_gc_zone, obj); } @@ -66,38 +63,64 @@ _os_object_make_uncollectable(_os_object_t obj) static _os_object_t _os_object_make_collectable(_os_object_t obj) { - dispatch_once_f(&_os_object_gc_pred, NULL, _os_object_gc_init); if (slowpath(_os_object_have_gc)) { auto_zone_release(_os_object_gc_zone, obj); } return obj; } -#else + +#define _os_objc_gc_retain(obj) \ + if (slowpath(_os_object_have_gc)) { \ + return auto_zone_retain(_os_object_gc_zone, obj); \ + } + +#define _os_objc_gc_release(obj) \ + if (slowpath(_os_object_have_gc)) { \ + return (void)auto_zone_release(_os_object_gc_zone, obj); \ + } + +#else // __OBJC_GC__ +#define _os_object_gc_init() #define _os_object_make_uncollectable(obj) (obj) #define _os_object_make_collectable(obj) (obj) +#define _os_objc_gc_retain(obj) +#define _os_objc_gc_release(obj) #endif // __OBJC_GC__ #pragma mark - #pragma mark _os_object_t +static inline id +_os_objc_alloc(Class cls, size_t size) +{ + id obj; + size -= sizeof(((struct _os_object_s *)NULL)->os_obj_isa); + while (!fastpath(obj = class_createInstance(cls, size))) { + _dispatch_temporary_resource_shortage(); + } + return obj; +} + void _os_object_init(void) { - return _objc_init(); + _objc_init(); + _os_object_gc_init(); +} + +_os_object_t +_os_object_alloc_realized(const void *cls, size_t size) +{ + dispatch_assert(size >= sizeof(struct _os_object_s)); + return _os_object_make_uncollectable(_os_objc_alloc(cls, size)); } _os_object_t _os_object_alloc(const void *_cls, size_t size) { - Class cls = _cls; - _os_object_t obj; dispatch_assert(size >= sizeof(struct _os_object_s)); - size -= sizeof(((struct _os_object_s *)NULL)->os_obj_isa); - if (!cls) cls = [OS_OBJECT_CLASS(object) class]; - while (!fastpath(obj = class_createInstance(cls, size))) { - sleep(1); // Temporary resource shortage - } - return _os_object_make_uncollectable(obj); + Class cls = _cls ? [(id)_cls class] : [OS_OBJECT_CLASS(object) class]; + return _os_object_make_uncollectable(_os_objc_alloc(cls, size)); } void @@ -154,10 +177,82 @@ _os_object_dispose(_os_object_t obj) @end #pragma mark - -#pragma mark _dispatch_object +#pragma mark _dispatch_objc #include +id +_dispatch_objc_alloc(Class cls, size_t size) +{ + return _os_objc_alloc(cls, size); +} + +void +_dispatch_objc_retain(dispatch_object_t dou) +{ + _os_objc_gc_retain(dou); + return (void)[dou retain]; +} + +void +_dispatch_objc_release(dispatch_object_t dou) +{ + _os_objc_gc_release(dou); + return [dou release]; +} + +void +_dispatch_objc_set_context(dispatch_object_t dou, void *context) +{ + return [dou _setContext:context]; +} + +void * +_dispatch_objc_get_context(dispatch_object_t dou) +{ + return [dou _getContext]; +} + +void +_dispatch_objc_set_finalizer_f(dispatch_object_t dou, + dispatch_function_t finalizer) +{ + return [dou _setFinalizer:finalizer]; +} + +void +_dispatch_objc_set_target_queue(dispatch_object_t dou, dispatch_queue_t queue) +{ + return [dou _setTargetQueue:queue]; +} + +void +_dispatch_objc_suspend(dispatch_object_t dou) +{ + return [dou _suspend]; +} + +void +_dispatch_objc_resume(dispatch_object_t dou) +{ + return [dou _resume]; +} + +size_t +_dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz) +{ + NSUInteger offset = 0; + NSString *desc = [dou debugDescription]; + [desc getBytes:buf maxLength:bufsiz-1 usedLength:&offset + encoding:NSUTF8StringEncoding options:0 + range:NSMakeRange(0, [desc length]) remainingRange:NULL]; + if (offset) buf[offset] = 0; + return offset; +} + +#pragma mark - +#pragma mark _dispatch_object + // Force non-lazy class realization rdar://10640168 #define DISPATCH_OBJC_LOAD() + (void)load {} @@ -182,8 +277,13 @@ _os_object_dispose(_os_object_t obj) - (NSString *)debugDescription { Class nsstring = objc_lookUpClass("NSString"); if (!nsstring) return nil; - char buf[4096]; - dx_debug((struct dispatch_object_s *)self, buf, sizeof(buf)); + char buf[2048]; + struct dispatch_object_s *obj = (struct dispatch_object_s *)self; + if (obj->do_vtable->do_debug) { + dx_debug(obj, buf, sizeof(buf)); + } else { + strlcpy(buf, dx_kind(obj), sizeof(buf)); + } return [nsstring stringWithFormat: [nsstring stringWithUTF8String:"<%s: %s>"], class_getName([self class]), buf]; @@ -214,6 +314,16 @@ DISPATCH_OBJC_LOAD() @end +@implementation DISPATCH_CLASS(queue_runloop) +DISPATCH_OBJC_LOAD() + +- (void)_xref_dispose { + _dispatch_runloop_queue_xref_dispose(self); + [super _xref_dispose]; +} + +@end + #define DISPATCH_CLASS_IMPL(name) \ @implementation DISPATCH_CLASS(name) \ DISPATCH_OBJC_LOAD() \ @@ -225,10 +335,11 @@ DISPATCH_CLASS_IMPL(queue_root) DISPATCH_CLASS_IMPL(queue_mgr) DISPATCH_CLASS_IMPL(queue_specific_queue) DISPATCH_CLASS_IMPL(queue_attr) +DISPATCH_CLASS_IMPL(mach) +DISPATCH_CLASS_IMPL(mach_msg) DISPATCH_CLASS_IMPL(io) DISPATCH_CLASS_IMPL(operation) DISPATCH_CLASS_IMPL(disk) -DISPATCH_CLASS_IMPL(data) #pragma mark - #pragma mark dispatch_autorelease_pool @@ -281,6 +392,33 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) } } +#undef _dispatch_client_callout3 +bool +_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, + const void *buffer, size_t size, dispatch_data_applier_function_t f) +{ + @try { + return f(ctxt, region, offset, buffer, size); + } + @catch (...) { + objc_terminate(); + } +} + +#undef _dispatch_client_callout4 +void +_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, mach_error_t error, + dispatch_mach_handler_function_t f) +{ + @try { + return f(ctxt, reason, dmsg, error); + } + @catch (...) { + objc_terminate(); + } +} + #endif // DISPATCH_USE_CLIENT_CALLOUT #endif // USE_OBJC diff --git a/src/object_internal.h b/src/object_internal.h index 8bb6733..b369663 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -53,38 +53,53 @@ } #else #define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, super, ...) \ - const struct dispatch_##super##_vtable_s _dispatch_##name##_vtable = { \ + DISPATCH_CONST_STRUCT_INSTANCE(dispatch_##super##_vtable_s, \ + _dispatch_##name##_vtable, \ ._os_obj_xref_dispose = _dispatch_xref_dispose, \ ._os_obj_dispose = _dispatch_dispose, \ - __VA_ARGS__ \ - } + __VA_ARGS__) #endif // USE_OBJC #define DISPATCH_SUBCLASS_DECL(name, super) \ DISPATCH_DECL_SUBCLASS_INTERFACE(dispatch_##name, super) \ struct dispatch_##name##_s; \ - extern const struct dispatch_##name##_vtable_s { \ + extern DISPATCH_CONST_STRUCT_DECL(dispatch_##name##_vtable_s, \ + _dispatch_##name##_vtable, \ + { \ _OS_OBJECT_CLASS_HEADER(); \ DISPATCH_VTABLE_HEADER(name); \ - } _dispatch_##name##_vtable + }) #define DISPATCH_CLASS_DECL(name) DISPATCH_SUBCLASS_DECL(name, dispatch_object) #define DISPATCH_INTERNAL_SUBCLASS_DECL(name, super) \ DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_##name, dispatch_##super); \ DISPATCH_DECL_SUBCLASS_INTERFACE(dispatch_##name, dispatch_##super) \ - extern const struct dispatch_##super##_vtable_s _dispatch_##name##_vtable + extern DISPATCH_CONST_STRUCT_DECL(dispatch_##super##_vtable_s, \ + _dispatch_##name##_vtable) #define DISPATCH_VTABLE_INSTANCE(name, ...) \ DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, name, __VA_ARGS__) #define DISPATCH_VTABLE(name) &_dispatch_##name##_vtable +#if !TARGET_OS_WIN32 #define DISPATCH_VTABLE_HEADER(x) \ unsigned long const do_type; \ const char *const do_kind; \ size_t (*const do_debug)(struct dispatch_##x##_s *, char *, size_t); \ - struct dispatch_queue_s *(*const do_invoke)(struct dispatch_##x##_s *); \ - bool (*const do_probe)(struct dispatch_##x##_s *); \ - void (*const do_dispose)(struct dispatch_##x##_s *) + void (*const do_invoke)(struct dispatch_##x##_s *); \ + unsigned long (*const do_probe)(struct dispatch_##x##_s *); \ + void (*const do_dispose)(struct dispatch_##x##_s *); +#else +// Cannot be const on Win32 because we initialize at runtime. +#define DISPATCH_VTABLE_HEADER(x) \ + unsigned long do_type; \ + const char *do_kind; \ + size_t (*do_debug)(struct dispatch_##x##_s *, char *, size_t); \ + void (*do_invoke)(struct dispatch_##x##_s *); \ + unsigned long (*do_probe)(struct dispatch_##x##_s *); \ + void (*do_dispose)(struct dispatch_##x##_s *); +#endif #define dx_type(x) (x)->do_vtable->do_type +#define dx_metatype(x) ((x)->do_vtable->do_type & _DISPATCH_META_TYPE_MASK) #define dx_kind(x) (x)->do_vtable->do_kind #define dx_debug(x, y, z) (x)->do_vtable->do_debug((x), (y), (z)) #define dx_dispose(x) (x)->do_vtable->do_dispose(x) @@ -131,7 +146,8 @@ enum { DISPATCH_CONTINUATION_TYPE = _DISPATCH_CONTINUATION_TYPE, - DISPATCH_DATA_TYPE = _DISPATCH_NODE_TYPE, + DISPATCH_DATA_TYPE = 1 | _DISPATCH_NODE_TYPE, + DISPATCH_MACH_MSG_TYPE = 2 | _DISPATCH_NODE_TYPE, DISPATCH_IO_TYPE = _DISPATCH_IO_TYPE, DISPATCH_OPERATION_TYPE = _DISPATCH_OPERATION_TYPE, @@ -140,7 +156,7 @@ enum { DISPATCH_QUEUE_ATTR_TYPE = _DISPATCH_QUEUE_TYPE |_DISPATCH_ATTR_TYPE, DISPATCH_QUEUE_TYPE = 1 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_GLOBAL_TYPE = 2 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_ROOT_TYPE = 2 | _DISPATCH_QUEUE_TYPE, DISPATCH_QUEUE_MGR_TYPE = 3 | _DISPATCH_QUEUE_TYPE, DISPATCH_QUEUE_SPECIFIC_TYPE = 4 | _DISPATCH_QUEUE_TYPE, @@ -148,6 +164,7 @@ enum { DISPATCH_GROUP_TYPE = 2 | _DISPATCH_SEMAPHORE_TYPE, DISPATCH_SOURCE_KEVENT_TYPE = 1 | _DISPATCH_SOURCE_TYPE, + DISPATCH_MACH_CHANNEL_TYPE = 2 | _DISPATCH_SOURCE_TYPE, }; DISPATCH_SUBCLASS_DECL(object, object); @@ -167,6 +184,67 @@ void *_dispatch_autorelease_pool_push(void); void _dispatch_autorelease_pool_pop(void *context); #endif +#if USE_OBJC +#include + +#define OS_OBJC_CLASS_SYMBOL(name) \ + DISPATCH_CONCAT(OBJC_CLASS_$_,name) +#define OS_OBJC_CLASS_DECL(name) \ + extern void *OS_OBJC_CLASS_SYMBOL(name) +#define OS_OBJC_CLASS(name) \ + ((Class)&OS_OBJC_CLASS_SYMBOL(name)) +#define OS_OBJECT_OBJC_CLASS_DECL(name) \ + OS_OBJC_CLASS_DECL(OS_OBJECT_CLASS(name)) +#define OS_OBJECT_OBJC_CLASS(name) \ + OS_OBJC_CLASS(OS_OBJECT_CLASS(name)) +#define DISPATCH_OBJC_CLASS_DECL(name) \ + OS_OBJC_CLASS_DECL(DISPATCH_CLASS(name)) +#define DISPATCH_OBJC_CLASS(name) \ + OS_OBJC_CLASS(DISPATCH_CLASS(name)) + +OS_OBJECT_OBJC_CLASS_DECL(object); +DISPATCH_OBJC_CLASS_DECL(object); + +// ObjC toll-free bridging, keep in sync with libdispatch.order file +#define DISPATCH_OBJECT_TFB(f, o, ...) \ + if (slowpath((uintptr_t)((o)._os_obj->os_obj_isa) & 1) || \ + slowpath((Class)((o)._os_obj->os_obj_isa) < \ + DISPATCH_OBJC_CLASS(object)) || \ + slowpath((Class)((o)._os_obj->os_obj_isa) >= \ + OS_OBJECT_OBJC_CLASS(object))) { \ + return f((o), ##__VA_ARGS__); \ + } + +id _dispatch_objc_alloc(Class cls, size_t size); +void _dispatch_objc_retain(dispatch_object_t dou); +void _dispatch_objc_release(dispatch_object_t dou); +void _dispatch_objc_set_context(dispatch_object_t dou, void *context); +void *_dispatch_objc_get_context(dispatch_object_t dou); +void _dispatch_objc_set_finalizer_f(dispatch_object_t dou, + dispatch_function_t finalizer); +void _dispatch_objc_set_target_queue(dispatch_object_t dou, + dispatch_queue_t queue); +void _dispatch_objc_suspend(dispatch_object_t dou); +void _dispatch_objc_resume(dispatch_object_t dou); +size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); + +#if __OBJC2__ +@interface NSObject (DISPATCH_CONCAT(_,DISPATCH_CLASS(object))) +- (void)_setContext:(void*)context; +- (void*)_getContext; +- (void)_setFinalizer:(dispatch_function_t)finalizer; +- (void)_setTargetQueue:(dispatch_queue_t)queue; +- (void)_suspend; +- (void)_resume; +@end +#endif // __OBJC2__ +#else // USE_OBJC +#define DISPATCH_OBJECT_TFB(f, o, ...) +#endif // USE_OBJC + +#pragma mark - +#pragma mark _os_object_s + typedef struct _os_object_class_s { _OS_OBJECT_CLASS_HEADER(); } _os_object_class_s; @@ -185,4 +263,4 @@ bool _os_object_allows_weak_reference(_os_object_t obj); void _os_object_dispose(_os_object_t obj); void _os_object_xref_dispose(_os_object_t obj); -#endif +#endif // __DISPATCH_OBJECT_INTERNAL__ diff --git a/src/once.c b/src/once.c index ab4a4e8..ef57fc3 100644 --- a/src/once.c +++ b/src/once.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -35,9 +35,7 @@ struct _dispatch_once_waiter_s { void dispatch_once(dispatch_once_t *val, dispatch_block_t block) { - struct Block_basic *bb = (void *)block; - - dispatch_once_f(val, block, (void *)bb->Block_invoke); + dispatch_once_f(val, block, _dispatch_Block_invoke(block)); } #endif @@ -51,8 +49,7 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) struct _dispatch_once_waiter_s *tail, *tmp; _dispatch_thread_semaphore_t sema; - if (dispatch_atomic_cmpxchg(vval, NULL, &dow)) { - dispatch_atomic_acquire_barrier(); + if (dispatch_atomic_cmpxchg(vval, NULL, &dow, acquire)) { _dispatch_client_callout(ctxt, func); // The next barrier must be long and strong. @@ -106,12 +103,12 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) // need to be issued. dispatch_atomic_maximally_synchronizing_barrier(); - //dispatch_atomic_release_barrier(); // assumed contained in above - tmp = dispatch_atomic_xchg(vval, DISPATCH_ONCE_DONE); + // above assumed to contain release barrier + tmp = dispatch_atomic_xchg(vval, DISPATCH_ONCE_DONE, relaxed); tail = &dow; while (tail != tmp) { while (!tmp->dow_next) { - _dispatch_hardware_pause(); + dispatch_hardware_pause(); } sema = tmp->dow_sema; tmp = (struct _dispatch_once_waiter_s*)tmp->dow_next; @@ -119,15 +116,15 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) } } else { dow.dow_sema = _dispatch_get_thread_semaphore(); + tmp = *vval; for (;;) { - tmp = *vval; if (tmp == DISPATCH_ONCE_DONE) { break; } - dispatch_atomic_store_barrier(); - if (dispatch_atomic_cmpxchg(vval, tmp, &dow)) { + if (dispatch_atomic_cmpxchgvw(vval, tmp, &dow, &tmp, release)) { dow.dow_next = tmp; _dispatch_thread_semaphore_wait(dow.dow_sema); + break; } } _dispatch_put_thread_semaphore(dow.dow_sema); diff --git a/src/protocol.defs b/src/protocol.defs index bf5fe5b..7a9cf18 100644 --- a/src/protocol.defs +++ b/src/protocol.defs @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -83,7 +83,7 @@ skip; skip; simpleroutine -wakeup_main_thread( +wakeup_runloop_thread( _port : mach_port_t; WaitTime _waitTimeout : natural_t ); diff --git a/src/provider.d b/src/provider.d index 59fe790..ede3c56 100644 --- a/src/provider.d +++ b/src/provider.d @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Apple Inc. All rights reserved. + * Copyright (c) 2010-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -18,23 +18,84 @@ * @APPLE_APACHE_LICENSE_HEADER_END@ */ +/* + * DTrace Probes for libdispatch + * + * Only available in the introspection version of the library, + * loaded by running a process with the environment variable + * DYLD_LIBRARY_PATH=/usr/lib/system/introspection + */ + typedef struct dispatch_object_s *dispatch_object_t; typedef struct dispatch_queue_s *dispatch_queue_t; +typedef struct dispatch_source_s *dispatch_source_t; typedef void (*dispatch_function_t)(void *); +typedef struct dispatch_trace_timer_params_s { + int64_t deadline, interval, leeway; +} *dispatch_trace_timer_params_t; + provider dispatch { + +/* + * Probes for dispatch queue push and pop operations + * + * dispatch$target:libdispatch*.dylib::queue-push + * dispatch$target:libdispatch*.dylib::queue-pop + */ probe queue__push(dispatch_queue_t queue, const char *label, dispatch_object_t item, const char *kind, dispatch_function_t function, void *context); probe queue__pop(dispatch_queue_t queue, const char *label, dispatch_object_t item, const char *kind, dispatch_function_t function, void *context); + +/* + * Probes for dispatch callouts to client functions + * + * dispatch$target:libdispatch*.dylib::callout-entry + * dispatch$target:libdispatch*.dylib::callout-return + */ probe callout__entry(dispatch_queue_t queue, const char *label, dispatch_function_t function, void *context); probe callout__return(dispatch_queue_t queue, const char *label, dispatch_function_t function, void *context); + +/* + * Probes for dispatch timer configuration and programming + * + * Timer configuration indicates that dispatch_source_set_timer() was called. + * Timer programming indicates that the dispatch manager is about to sleep + * for 'deadline' ns (but may wake up earlier if non-timer events occur). + * Time parameters are in nanoseconds, a value of -1 means "forever". + * + * dispatch$target:libdispatch*.dylib::timer-configure + * dispatch$target:libdispatch*.dylib::timer-program + */ + probe timer__configure(dispatch_source_t source, + dispatch_function_t handler, dispatch_trace_timer_params_t params); + probe timer__program(dispatch_source_t source, dispatch_function_t handler, + dispatch_trace_timer_params_t params); + +/* + * Probes for dispatch timer wakes and fires + * + * Timer wakes indicate that the dispatch manager woke up due to expiry of the + * deadline for the specified timer. + * Timer fires indicate that that the dispatch manager scheduled the event + * handler of the specified timer for asynchronous execution (may occur without + * a corresponding timer wake if the manager was awake processing other events + * when the timer deadline expired). + * + * dispatch$target:libdispatch*.dylib::timer-wake + * dispatch$target:libdispatch*.dylib::timer-fire + */ + probe timer__wake(dispatch_source_t source, dispatch_function_t handler); + probe timer__fire(dispatch_source_t source, dispatch_function_t handler); + }; + #pragma D attributes Evolving/Evolving/Common provider dispatch provider #pragma D attributes Private/Private/Common provider dispatch module #pragma D attributes Private/Private/Common provider dispatch function diff --git a/src/queue.c b/src/queue.c index f01d7f8..0568762 100644 --- a/src/queue.c +++ b/src/queue.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -27,7 +27,15 @@ !defined(DISPATCH_ENABLE_THREAD_POOL) #define DISPATCH_ENABLE_THREAD_POOL 1 #endif -#if DISPATCH_ENABLE_THREAD_POOL && !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_ENABLE_THREAD_POOL +#define DISPATCH_USE_PTHREAD_POOL 1 +#endif +#if HAVE_PTHREAD_WORKQUEUES && !HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \ + !defined(DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK) +#define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1 +#endif +#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_USE_PTHREAD_POOL && \ + !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK #define pthread_workqueue_t void* #endif @@ -38,28 +46,25 @@ static void _dispatch_queue_cleanup(void *ctxt); static inline void _dispatch_queue_wakeup_global2(dispatch_queue_t dq, unsigned int n); static inline void _dispatch_queue_wakeup_global(dispatch_queue_t dq); -static _dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_queue_t dq); static inline _dispatch_thread_semaphore_t _dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq); -#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK +#if HAVE_PTHREAD_WORKQUEUES static void _dispatch_worker_thread3(void *context); -#endif #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP static void _dispatch_worker_thread2(int priority, int options, void *context); #endif -#if DISPATCH_ENABLE_THREAD_POOL +#endif +#if DISPATCH_USE_PTHREAD_POOL static void *_dispatch_worker_thread(void *context); static int _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset); #endif #if DISPATCH_COCOA_COMPAT -static unsigned int _dispatch_worker_threads; static dispatch_once_t _dispatch_main_q_port_pred; -static mach_port_t main_q_port; - -static void _dispatch_main_q_port_init(void *ctxt); -static dispatch_queue_t _dispatch_queue_wakeup_main(void); -static void _dispatch_main_queue_drain(void); +static dispatch_queue_t _dispatch_main_queue_wakeup(void); +unsigned long _dispatch_runloop_queue_wakeup(dispatch_queue_t dq); +static void _dispatch_runloop_queue_port_init(void *ctxt); +static void _dispatch_runloop_queue_port_dispose(dispatch_queue_t dq); #endif #pragma mark - @@ -110,7 +115,7 @@ static struct dispatch_semaphore_s _dispatch_thread_mediator[] = { }; #endif -#define MAX_THREAD_COUNT 255 +#define MAX_PTHREAD_COUNT 255 struct dispatch_root_queue_context_s { union { @@ -118,18 +123,20 @@ struct dispatch_root_queue_context_s { unsigned int volatile dgq_pending; #if HAVE_PTHREAD_WORKQUEUES int dgq_wq_priority, dgq_wq_options; -#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_ENABLE_THREAD_POOL +#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL pthread_workqueue_t dgq_kworkqueue; #endif #endif // HAVE_PTHREAD_WORKQUEUES -#if DISPATCH_ENABLE_THREAD_POOL +#if DISPATCH_USE_PTHREAD_POOL + void *dgq_ctxt; dispatch_semaphore_t dgq_thread_mediator; - uint32_t dgq_thread_pool_size; + uint32_t volatile dgq_thread_pool_size; #endif }; char _dgq_pad[DISPATCH_CACHELINE_SIZE]; }; }; +typedef struct dispatch_root_queue_context_s *dispatch_root_queue_context_t; DISPATCH_CACHELINE_ALIGN static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { @@ -141,7 +148,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = {{{ @@ -152,7 +158,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = {{{ @@ -163,7 +168,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = {{{ @@ -174,7 +178,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = {{{ @@ -185,7 +188,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = {{{ @@ -196,7 +198,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = {{{ @@ -207,7 +208,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = {{{ @@ -218,7 +218,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, }; @@ -351,6 +350,13 @@ static const dispatch_queue_t _dispatch_wq2root_queues[][2] = { }; #endif // HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +static struct dispatch_queue_s _dispatch_mgr_root_queue; +#else +#define _dispatch_mgr_root_queue \ + _dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] +#endif + // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol DISPATCH_CACHELINE_ALIGN @@ -359,29 +365,101 @@ struct dispatch_queue_s _dispatch_mgr_q = { .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_targetq = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY], + .do_targetq = &_dispatch_mgr_root_queue, .dq_label = "com.apple.libdispatch-manager", .dq_width = 1, + .dq_is_thread_bound = 1, .dq_serialnum = 2, }; dispatch_queue_t dispatch_get_global_queue(long priority, unsigned long flags) { - if (flags & ~DISPATCH_QUEUE_OVERCOMMIT) { + if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) { return NULL; } return _dispatch_get_root_queue(priority, flags & DISPATCH_QUEUE_OVERCOMMIT); } +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_get_current_queue(void) +{ + return _dispatch_queue_get_current() ?: _dispatch_get_root_queue(0, true); +} + dispatch_queue_t dispatch_get_current_queue(void) { - return _dispatch_queue_get_current() ?: _dispatch_get_root_queue(0, true); + return _dispatch_get_current_queue(); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_targets_queue(dispatch_queue_t dq1, dispatch_queue_t dq2) +{ + while (dq1) { + if (dq1 == dq2) { + return true; + } + dq1 = dq1->do_targetq; + } + return false; +} + +#define DISPATCH_ASSERT_QUEUE_MESSAGE "BUG in client of libdispatch: " \ + "Assertion failed: Block was run on an unexpected queue" + +DISPATCH_NOINLINE +static void +_dispatch_assert_queue_fail(dispatch_queue_t dq, bool expected) +{ + char *msg; + asprintf(&msg, "%s\n%s queue: 0x%p[%s]", DISPATCH_ASSERT_QUEUE_MESSAGE, + expected ? "Expected" : "Unexpected", dq, dq->dq_label ? + dq->dq_label : ""); + _dispatch_log("%s", msg); + _dispatch_set_crash_log_message(msg); + _dispatch_hardware_crash(); + free(msg); +} + +void +dispatch_assert_queue(dispatch_queue_t dq) +{ + if (slowpath(!dq) || slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) { + DISPATCH_CLIENT_CRASH("invalid queue passed to " + "dispatch_assert_queue()"); + } + dispatch_queue_t cq = _dispatch_queue_get_current(); + if (fastpath(cq) && fastpath(_dispatch_queue_targets_queue(cq, dq))) { + return; + } + _dispatch_assert_queue_fail(dq, true); } +void +dispatch_assert_queue_not(dispatch_queue_t dq) +{ + if (slowpath(!dq) || slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) { + DISPATCH_CLIENT_CRASH("invalid queue passed to " + "dispatch_assert_queue_not()"); + } + dispatch_queue_t cq = _dispatch_queue_get_current(); + if (slowpath(cq) && slowpath(_dispatch_queue_targets_queue(cq, dq))) { + _dispatch_assert_queue_fail(dq, false); + } +} + +#if DISPATCH_DEBUG && DISPATCH_ROOT_QUEUE_DEBUG +#define _dispatch_root_queue_debug(...) _dispatch_debug(__VA_ARGS__) +#define _dispatch_debug_root_queue(...) dispatch_debug_queue(__VA_ARGS__) +#else +#define _dispatch_root_queue_debug(...) +#define _dispatch_debug_root_queue(...) +#endif + #pragma mark - #pragma mark dispatch_init @@ -399,12 +477,16 @@ _dispatch_root_queues_init_workq(void) bool result = false; #if HAVE_PTHREAD_WORKQUEUES bool disable_wq = false; -#if DISPATCH_ENABLE_THREAD_POOL +#if DISPATCH_ENABLE_THREAD_POOL && DISPATCH_DEBUG disable_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KWQ")); #endif int r; #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP if (!disable_wq) { +#if PTHREAD_WORKQUEUE_SPI_VERSION >= 20121218 + pthread_workqueue_setdispatchoffset_np( + offsetof(struct dispatch_queue_s, dq_serialnum)); +#endif r = pthread_workqueue_setdispatch_np(_dispatch_worker_thread2); #if !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK (void)dispatch_assume_zero(r); @@ -412,7 +494,7 @@ _dispatch_root_queues_init_workq(void) result = !r; } #endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP -#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_ENABLE_THREAD_POOL +#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL if (!result) { #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK pthread_workqueue_attr_t pwq_attr; @@ -424,8 +506,8 @@ _dispatch_root_queues_init_workq(void) int i; for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { pthread_workqueue_t pwq = NULL; - struct dispatch_root_queue_context_s *qc = - &_dispatch_root_queue_contexts[i]; + dispatch_root_queue_context_t qc; + qc = &_dispatch_root_queue_contexts[i]; #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK if (!disable_wq #if DISPATCH_NO_BG_PRIORITY @@ -436,13 +518,14 @@ _dispatch_root_queues_init_workq(void) qc->dgq_wq_priority); (void)dispatch_assume_zero(r); r = pthread_workqueue_attr_setovercommit_np(&pwq_attr, - qc->dgq_wq_options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT); + qc->dgq_wq_options & + WORKQ_ADDTHREADS_OPTION_OVERCOMMIT); (void)dispatch_assume_zero(r); r = pthread_workqueue_create_np(&pwq, &pwq_attr); (void)dispatch_assume_zero(r); result = result || dispatch_assume(pwq); } -#endif +#endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK qc->dgq_kworkqueue = pwq ? pwq : (void*)(~0ul); } #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK @@ -457,45 +540,51 @@ _dispatch_root_queues_init_workq(void) return result; } +#if DISPATCH_USE_PTHREAD_POOL static inline void -_dispatch_root_queues_init_thread_pool(void) +_dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, + bool overcommit) { -#if DISPATCH_ENABLE_THREAD_POOL - int i; - for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { -#if TARGET_OS_EMBEDDED - // some software hangs if the non-overcommitting queues do not - // overcommit when threads block. Someday, this behavior should apply - // to all platforms - if (!(i & 1)) { - _dispatch_root_queue_contexts[i].dgq_thread_pool_size = - _dispatch_hw_config.cc_max_active; - } -#endif + qc->dgq_thread_pool_size = overcommit ? MAX_PTHREAD_COUNT : + _dispatch_hw_config.cc_max_active; #if USE_MACH_SEM - // override the default FIFO behavior for the pool semaphores - kern_return_t kr = semaphore_create(mach_task_self(), - &_dispatch_thread_mediator[i].dsema_port, SYNC_POLICY_LIFO, 0); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - (void)dispatch_assume(_dispatch_thread_mediator[i].dsema_port); + // override the default FIFO behavior for the pool semaphores + kern_return_t kr = semaphore_create(mach_task_self(), + &qc->dgq_thread_mediator->dsema_port, SYNC_POLICY_LIFO, 0); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + (void)dispatch_assume(qc->dgq_thread_mediator->dsema_port); #elif USE_POSIX_SEM - /* XXXRW: POSIX semaphores don't support LIFO? */ - int ret = sem_init(&_dispatch_thread_mediator[i].dsema_sem, 0, 0); - (void)dispatch_assume_zero(ret); + /* XXXRW: POSIX semaphores don't support LIFO? */ + int ret = sem_init(&qc->dgq_thread_mediator->dsema_sem, 0, 0); + (void)dispatch_assume_zero(ret); #endif - } -#else - DISPATCH_CRASH("Thread pool creation failed"); -#endif // DISPATCH_ENABLE_THREAD_POOL } +#endif // DISPATCH_USE_PTHREAD_POOL static void _dispatch_root_queues_init(void *context DISPATCH_UNUSED) { _dispatch_safe_fork = false; if (!_dispatch_root_queues_init_workq()) { - _dispatch_root_queues_init_thread_pool(); +#if DISPATCH_ENABLE_THREAD_POOL + int i; + for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { + bool overcommit = true; +#if TARGET_OS_EMBEDDED + // some software hangs if the non-overcommitting queues do not + // overcommit when threads block. Someday, this behavior should + // apply to all platforms + if (!(i & 1)) { + overcommit = false; + } +#endif + _dispatch_root_queue_init_pthread_pool( + &_dispatch_root_queue_contexts[i], overcommit); + } +#else + DISPATCH_CRASH("Root queue initialization failed"); +#endif // DISPATCH_ENABLE_THREAD_POOL } } @@ -526,18 +615,17 @@ libdispatch_init(void) #endif dispatch_assert(sizeof(struct dispatch_apply_s) <= - ROUND_UP_TO_CACHELINE_SIZE(sizeof( - struct dispatch_continuation_s))); - dispatch_assert(sizeof(struct dispatch_source_s) == - sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD); + DISPATCH_CONTINUATION_SIZE); dispatch_assert(sizeof(struct dispatch_queue_s) % DISPATCH_CACHELINE_SIZE == 0); dispatch_assert(sizeof(struct dispatch_root_queue_context_s) % DISPATCH_CACHELINE_SIZE == 0); _dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup); +#if !DISPATCH_USE_OS_SEMAPHORE_CACHE _dispatch_thread_key_create(&dispatch_sema4_key, (void (*)(void *))_dispatch_thread_semaphore_dispose); +#endif _dispatch_thread_key_create(&dispatch_cache_key, _dispatch_cache_cleanup); _dispatch_thread_key_create(&dispatch_io_key, NULL); _dispatch_thread_key_create(&dispatch_apply_key, NULL); @@ -551,6 +639,7 @@ libdispatch_init(void) #endif _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_main_q); + _dispatch_queue_set_bound_thread(&_dispatch_main_q); #if DISPATCH_USE_PTHREAD_ATFORK (void)dispatch_assume_zero(pthread_atfork(dispatch_atfork_prepare, @@ -560,6 +649,7 @@ libdispatch_init(void) _dispatch_hw_config_init(); _dispatch_vtable_init(); _os_object_init(); + _dispatch_introspection_init(); } DISPATCH_EXPORT DISPATCH_NOTHROW @@ -572,6 +662,7 @@ dispatch_atfork_child(void) if (_dispatch_safe_fork) { return; } + _dispatch_child_of_unsafe_fork = true; _dispatch_main_q.dq_items_head = crash; _dispatch_main_q.dq_items_tail = crash; @@ -591,51 +682,55 @@ dispatch_atfork_child(void) // skip zero // 1 - main_q // 2 - mgr_q -// 3 - _unused_ +// 3 - mgr_root_q // 4,5,6,7,8,9,10,11 - global queues // we use 'xadd' on Intel, so the initial value == next assigned -unsigned long _dispatch_queue_serial_numbers = 12; +unsigned long volatile _dispatch_queue_serial_numbers = 12; dispatch_queue_t -dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) +dispatch_queue_create_with_target(const char *label, + dispatch_queue_attr_t attr, dispatch_queue_t tq) { dispatch_queue_t dq; - size_t label_len; - - if (!label) { - label = ""; - } - - label_len = strlen(label); - if (label_len < (DISPATCH_QUEUE_MIN_LABEL_SIZE - 1)) { - label_len = (DISPATCH_QUEUE_MIN_LABEL_SIZE - 1); - } - // XXX switch to malloc() dq = _dispatch_alloc(DISPATCH_VTABLE(queue), - sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_MIN_LABEL_SIZE - - DISPATCH_QUEUE_CACHELINE_PAD + label_len + 1); + sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD); _dispatch_queue_init(dq); - strcpy(dq->dq_label, label); - - if (fastpath(!attr)) { - return dq; + if (label) { + dq->dq_label = strdup(label); } - if (fastpath(attr == DISPATCH_QUEUE_CONCURRENT)) { + + if (attr == DISPATCH_QUEUE_CONCURRENT) { dq->dq_width = UINT32_MAX; - dq->do_targetq = _dispatch_get_root_queue(0, false); + if (!tq) { + tq = _dispatch_get_root_queue(0, false); + } } else { - dispatch_debug_assert(!attr, "Invalid attribute"); + if (!tq) { + // Default target queue is overcommit! + tq = _dispatch_get_root_queue(0, true); + } + if (slowpath(attr)) { + dispatch_debug_assert(!attr, "Invalid attribute"); + } } - return dq; + dq->do_targetq = tq; + _dispatch_object_debug(dq, "%s", __func__); + return _dispatch_introspection_queue_create(dq); +} + +dispatch_queue_t +dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) +{ + return dispatch_queue_create_with_target(label, attr, + DISPATCH_TARGET_QUEUE_DEFAULT); } -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol void -_dispatch_queue_dispose(dispatch_queue_t dq) +_dispatch_queue_destroy(dispatch_object_t dou) { + dispatch_queue_t dq = dou._dq; if (slowpath(dq == _dispatch_queue_get_current())) { DISPATCH_CRASH("Release of a queue by itself"); } @@ -647,16 +742,32 @@ _dispatch_queue_dispose(dispatch_queue_t dq) dq->dq_items_tail = (void *)0x200; dispatch_queue_t dqsq = dispatch_atomic_xchg2o(dq, dq_specific_q, - (void *)0x200); + (void *)0x200, relaxed); if (dqsq) { _dispatch_release(dqsq); } } +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol +void +_dispatch_queue_dispose(dispatch_queue_t dq) +{ + _dispatch_object_debug(dq, "%s", __func__); + _dispatch_introspection_queue_dispose(dq); + if (dq->dq_label) { + free((void*)dq->dq_label); + } + _dispatch_queue_destroy(dq); +} + const char * dispatch_queue_get_label(dispatch_queue_t dq) { - return dq->dq_label; + if (slowpath(dq == DISPATCH_CURRENT_QUEUE_LABEL)) { + dq = _dispatch_get_current_queue(); + } + return dq->dq_label ? dq->dq_label : ""; } static void @@ -668,10 +779,11 @@ _dispatch_queue_set_width2(void *ctxt) if (w == 1 || w == 0) { dq->dq_width = 1; + _dispatch_object_debug(dq, "%s", __func__); return; } if (w > 0) { - tmp = w; + tmp = (unsigned int)w; } else switch (w) { case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: tmp = _dispatch_hw_config.cc_max_physical; @@ -688,15 +800,17 @@ _dispatch_queue_set_width2(void *ctxt) // multiply by two since the running count is inc/dec by two // (the low bit == barrier) dq->dq_width = tmp * 2; + _dispatch_object_debug(dq, "%s", __func__); } void dispatch_queue_set_width(dispatch_queue_t dq, long width) { - if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + slowpath(dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE)) { return; } - dispatch_barrier_async_f(dq, (void*)(intptr_t)width, + _dispatch_barrier_trysync_f(dq, (void*)(intptr_t)width, _dispatch_queue_set_width2); } @@ -710,18 +824,18 @@ _dispatch_set_target_queue2(void *ctxt) prev_dq = dq->do_targetq; dq->do_targetq = ctxt; _dispatch_release(prev_dq); + _dispatch_object_debug(dq, "%s", __func__); } void dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t dq) { - dispatch_queue_t prev_dq; - unsigned long type; - - if (slowpath(dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + DISPATCH_OBJECT_TFB(_dispatch_objc_set_target_queue, dou, dq); + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { return; } - type = dx_type(dou._do) & _DISPATCH_META_TYPE_MASK; + unsigned long type = dx_metatype(dou._do); if (slowpath(!dq)) { bool is_concurrent_q = (type == _DISPATCH_QUEUE_TYPE && slowpath(dou._dq->dq_width > 1)); @@ -732,38 +846,210 @@ dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t dq) case _DISPATCH_QUEUE_TYPE: case _DISPATCH_SOURCE_TYPE: _dispatch_retain(dq); - return dispatch_barrier_async_f(dou._dq, dq, + return _dispatch_barrier_trysync_f(dou._dq, dq, _dispatch_set_target_queue2); case _DISPATCH_IO_TYPE: return _dispatch_io_set_target_queue(dou._dchannel, dq); - default: + default: { + dispatch_queue_t prev_dq; _dispatch_retain(dq); - dispatch_atomic_store_barrier(); - prev_dq = dispatch_atomic_xchg2o(dou._do, do_targetq, dq); + prev_dq = dispatch_atomic_xchg2o(dou._do, do_targetq, dq, release); if (prev_dq) _dispatch_release(prev_dq); + _dispatch_object_debug(dou._do, "%s", __func__); return; + } } } +#pragma mark - +#pragma mark dispatch_pthread_root_queue + +struct dispatch_pthread_root_queue_context_s { + pthread_attr_t dpq_thread_attr; + dispatch_block_t dpq_thread_configure; + struct dispatch_semaphore_s dpq_thread_mediator; +}; +typedef struct dispatch_pthread_root_queue_context_s * + dispatch_pthread_root_queue_context_t; + +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +static struct dispatch_pthread_root_queue_context_s + _dispatch_mgr_root_queue_pthread_context; +static struct dispatch_root_queue_context_s + _dispatch_mgr_root_queue_context = {{{ +#if HAVE_PTHREAD_WORKQUEUES + .dgq_kworkqueue = (void*)(~0ul), +#endif + .dgq_ctxt = &_dispatch_mgr_root_queue_pthread_context, + .dgq_thread_pool_size = 1, +}}}; +static struct dispatch_queue_s _dispatch_mgr_root_queue = { + .do_vtable = DISPATCH_VTABLE(queue_root), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_ctxt = &_dispatch_mgr_root_queue_context, + .dq_label = "com.apple.root.libdispatch-manager", + .dq_running = 2, + .dq_width = UINT32_MAX, + .dq_serialnum = 3, +}; +static struct { + volatile int prio; + int policy; + pthread_t tid; +} _dispatch_mgr_sched; +static dispatch_once_t _dispatch_mgr_sched_pred; + +static void +_dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) +{ + struct sched_param param; + pthread_attr_t *attr; + attr = &_dispatch_mgr_root_queue_pthread_context.dpq_thread_attr; + (void)dispatch_assume_zero(pthread_attr_init(attr)); + (void)dispatch_assume_zero(pthread_attr_getschedpolicy(attr, + &_dispatch_mgr_sched.policy)); + (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); + // high-priority workq threads are at priority 2 above default + _dispatch_mgr_sched.prio = param.sched_priority + 2; +} + +DISPATCH_NOINLINE +static pthread_t * +_dispatch_mgr_root_queue_init(void) +{ + dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); + struct sched_param param; + pthread_attr_t *attr; + attr = &_dispatch_mgr_root_queue_pthread_context.dpq_thread_attr; + (void)dispatch_assume_zero(pthread_attr_setdetachstate(attr, + PTHREAD_CREATE_DETACHED)); +#if !DISPATCH_DEBUG + (void)dispatch_assume_zero(pthread_attr_setstacksize(attr, 64 * 1024)); +#endif + param.sched_priority = _dispatch_mgr_sched.prio; + (void)dispatch_assume_zero(pthread_attr_setschedparam(attr, ¶m)); + return &_dispatch_mgr_sched.tid; +} + +static inline void +_dispatch_mgr_priority_apply(void) +{ + struct sched_param param; + do { + param.sched_priority = _dispatch_mgr_sched.prio; + (void)dispatch_assume_zero(pthread_setschedparam( + _dispatch_mgr_sched.tid, _dispatch_mgr_sched.policy, ¶m)); + } while (_dispatch_mgr_sched.prio > param.sched_priority); +} + +DISPATCH_NOINLINE void -dispatch_set_current_target_queue(dispatch_queue_t dq) +_dispatch_mgr_priority_init(void) +{ + struct sched_param param; + pthread_attr_t *attr; + attr = &_dispatch_mgr_root_queue_pthread_context.dpq_thread_attr; + (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); + if (slowpath(_dispatch_mgr_sched.prio > param.sched_priority)) { + return _dispatch_mgr_priority_apply(); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_mgr_priority_raise(const pthread_attr_t *attr) +{ + dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); + struct sched_param param; + (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); + int p = _dispatch_mgr_sched.prio; + do if (p >= param.sched_priority) { + return; + } while (slowpath(!dispatch_atomic_cmpxchgvw2o(&_dispatch_mgr_sched, prio, + p, param.sched_priority, &p, relaxed))); + if (_dispatch_mgr_sched.tid) { + return _dispatch_mgr_priority_apply(); + } +} + +dispatch_queue_t +dispatch_pthread_root_queue_create(const char *label, unsigned long flags, + const pthread_attr_t *attr, dispatch_block_t configure) { - dispatch_queue_t queue = _dispatch_queue_get_current(); + dispatch_queue_t dq; + dispatch_root_queue_context_t qc; + dispatch_pthread_root_queue_context_t pqc; + size_t dqs; - if (slowpath(!queue)) { - DISPATCH_CLIENT_CRASH("SPI not called from a queue"); + if (slowpath(flags)) { + return NULL; } - if (slowpath(queue->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { - DISPATCH_CLIENT_CRASH("SPI not supported on this queue"); + dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD; + dq = _dispatch_alloc(DISPATCH_VTABLE(queue_root), dqs + + sizeof(struct dispatch_root_queue_context_s) + + sizeof(struct dispatch_pthread_root_queue_context_s)); + qc = (void*)dq + dqs; + pqc = (void*)qc + sizeof(struct dispatch_root_queue_context_s); + + _dispatch_queue_init(dq); + if (label) { + dq->dq_label = strdup(label); } - if (slowpath(queue->dq_width != 1)) { - DISPATCH_CLIENT_CRASH("SPI not called from a serial queue"); + + dq->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK; + dq->do_ctxt = qc; + dq->do_targetq = NULL; + dq->dq_running = 2; + dq->dq_width = UINT32_MAX; + + pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore); + qc->dgq_thread_mediator = &pqc->dpq_thread_mediator; + qc->dgq_ctxt = pqc; +#if HAVE_PTHREAD_WORKQUEUES + qc->dgq_kworkqueue = (void*)(~0ul); +#endif + _dispatch_root_queue_init_pthread_pool(qc, true); // rdar://11352331 + + if (attr) { + memcpy(&pqc->dpq_thread_attr, attr, sizeof(pthread_attr_t)); + _dispatch_mgr_priority_raise(&pqc->dpq_thread_attr); + } else { + (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr)); } - if (slowpath(!dq)) { - dq = _dispatch_get_root_queue(0, true); + (void)dispatch_assume_zero(pthread_attr_setdetachstate( + &pqc->dpq_thread_attr, PTHREAD_CREATE_DETACHED)); + if (configure) { + pqc->dpq_thread_configure = _dispatch_Block_copy(configure); } - _dispatch_retain(dq); - _dispatch_set_target_queue2(dq); + _dispatch_object_debug(dq, "%s", __func__); + return _dispatch_introspection_queue_create(dq); +} +#endif + +void +_dispatch_pthread_root_queue_dispose(dispatch_queue_t dq) +{ + if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + DISPATCH_CRASH("Global root queue disposed"); + } + _dispatch_object_debug(dq, "%s", __func__); + _dispatch_introspection_queue_dispose(dq); +#if DISPATCH_USE_PTHREAD_POOL + dispatch_root_queue_context_t qc = dq->do_ctxt; + dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; + + _dispatch_semaphore_dispose(qc->dgq_thread_mediator); + if (pqc->dpq_thread_configure) { + Block_release(pqc->dpq_thread_configure); + } + dq->do_targetq = _dispatch_get_root_queue(0, false); +#endif + if (dq->dq_label) { + free((void*)dq->dq_label); + } + _dispatch_queue_destroy(dq); } #pragma mark - @@ -772,14 +1058,8 @@ dispatch_set_current_target_queue(dispatch_queue_t dq) struct dispatch_queue_specific_queue_s { DISPATCH_STRUCT_HEADER(queue_specific_queue); DISPATCH_QUEUE_HEADER; - union { - char _dqsq_pad[DISPATCH_QUEUE_MIN_LABEL_SIZE]; - struct { - char dq_label[16]; - TAILQ_HEAD(dispatch_queue_specific_head_s, - dispatch_queue_specific_s) dqsq_contexts; - }; - }; + TAILQ_HEAD(dispatch_queue_specific_head_s, + dispatch_queue_specific_s) dqsq_contexts; }; struct dispatch_queue_specific_s { @@ -803,7 +1083,7 @@ _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq) } free(dqs); } - _dispatch_queue_dispose((dispatch_queue_t)dqsq); + _dispatch_queue_destroy((dispatch_queue_t)dqsq); } static void @@ -818,11 +1098,10 @@ _dispatch_queue_init_specific(dispatch_queue_t dq) dqsq->do_targetq = _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_HIGH, true); dqsq->dq_width = UINT32_MAX; - strlcpy(dqsq->dq_label, "queue-specific", sizeof(dqsq->dq_label)); + dqsq->dq_label = "queue-specific"; TAILQ_INIT(&dqsq->dqsq_contexts); - dispatch_atomic_store_barrier(); if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_specific_q, NULL, - (dispatch_queue_t)dqsq))) { + (dispatch_queue_t)dqsq, release))) { _dispatch_release((dispatch_queue_t)dqsq); } } @@ -868,14 +1147,14 @@ dispatch_queue_set_specific(dispatch_queue_t dq, const void *key, } dispatch_queue_specific_t dqs; - dqs = calloc(1, sizeof(struct dispatch_queue_specific_s)); + dqs = _dispatch_calloc(1, sizeof(struct dispatch_queue_specific_s)); dqs->dqs_key = key; dqs->dqs_ctxt = ctxt; dqs->dqs_destructor = destructor; if (slowpath(!dq->dq_specific_q)) { _dispatch_queue_init_specific(dq); } - dispatch_barrier_async_f(dq->dq_specific_q, dqs, + _dispatch_barrier_trysync_f(dq->dq_specific_q, dqs, _dispatch_queue_set_specific); } @@ -941,21 +1220,28 @@ dispatch_get_specific(const void *key) size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) { + size_t offset = 0; dispatch_queue_t target = dq->do_targetq; - return snprintf(buf, bufsiz, "target = %s[%p], width = 0x%x, " - "running = 0x%x, barrier = %d ", target ? target->dq_label : "", - target, dq->dq_width / 2, dq->dq_running / 2, dq->dq_running & 1); + offset += dsnprintf(buf, bufsiz, "target = %s[%p], width = 0x%x, " + "running = 0x%x, barrier = %d ", target && target->dq_label ? + target->dq_label : "", target, dq->dq_width / 2, + dq->dq_running / 2, dq->dq_running & 1); + if (dq->dq_is_thread_bound) { + offset += dsnprintf(buf, bufsiz, ", thread = %p ", + _dispatch_queue_get_bound_thread(dq)); + } + return offset; } size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz) { size_t offset = 0; - offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dq->dq_label, dq); + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dq->dq_label ? dq->dq_label : dx_kind(dq), dq); offset += _dispatch_object_debug_attr(dq, &buf[offset], bufsiz - offset); offset += _dispatch_queue_debug_attr(dq, &buf[offset], bufsiz - offset); - offset += snprintf(&buf[offset], bufsiz - offset, "}"); + offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); return offset; } @@ -963,7 +1249,7 @@ dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz) void dispatch_debug_queue(dispatch_queue_t dq, const char* str) { if (fastpath(dq)) { - dispatch_debug(dq, "%s", str); + _dispatch_object_debug(dq, "%s", str); } else { _dispatch_log("queue[NULL]: %s", str); } @@ -972,7 +1258,6 @@ dispatch_debug_queue(dispatch_queue_t dq, const char* str) { #if DISPATCH_PERF_MON static OSSpinLock _dispatch_stats_lock; -static size_t _dispatch_bad_ratio; static struct { uint64_t time_total; uint64_t count_total; @@ -1009,34 +1294,6 @@ _dispatch_queue_merge_stats(uint64_t start) #pragma mark - #pragma mark dispatch_continuation_t -static malloc_zone_t *_dispatch_ccache_zone; - -static void -_dispatch_ccache_init(void *context DISPATCH_UNUSED) -{ - _dispatch_ccache_zone = malloc_create_zone(0, 0); - dispatch_assert(_dispatch_ccache_zone); - malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations"); -} - -dispatch_continuation_t -_dispatch_continuation_alloc_from_heap(void) -{ - static dispatch_once_t pred; - dispatch_continuation_t dc; - - dispatch_once_f(&pred, NULL, _dispatch_ccache_init); - - // This is also used for allocating struct dispatch_apply_s. If the - // ROUND_UP behavior is changed, adjust the assert in libdispatch_init - while (!(dc = fastpath(malloc_zone_calloc(_dispatch_ccache_zone, 1, - ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)))))) { - sleep(1); - } - - return dc; -} - static void _dispatch_force_cache_cleanup(void) { @@ -1048,13 +1305,6 @@ _dispatch_force_cache_cleanup(void) } } -// rdar://problem/11500155 -void -dispatch_flush_continuation_cache(void) -{ - _dispatch_force_cache_cleanup(); -} - DISPATCH_NOINLINE static void _dispatch_cache_cleanup(void *value) @@ -1063,9 +1313,31 @@ _dispatch_cache_cleanup(void *value) while ((dc = next_dc)) { next_dc = dc->do_next; - malloc_zone_free(_dispatch_ccache_zone, dc); + _dispatch_continuation_free_to_heap(dc); + } +} + +#if DISPATCH_USE_MEMORYSTATUS_SOURCE +int _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; + +DISPATCH_NOINLINE +void +_dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc) +{ + _dispatch_continuation_free_to_heap(dc); + dispatch_continuation_t next_dc; + dc = _dispatch_thread_getspecific(dispatch_cache_key); + int cnt; + if (!dc || (cnt = dc->do_ref_cnt-_dispatch_continuation_cache_limit) <= 0) { + return; } + do { + next_dc = dc->do_next; + _dispatch_continuation_free_to_heap(dc); + } while (--cnt && (dc = next_dc)); + _dispatch_thread_setspecific(dispatch_cache_key, next_dc); } +#endif DISPATCH_ALWAYS_INLINE_NDEBUG static inline void @@ -1074,12 +1346,11 @@ _dispatch_continuation_redirect(dispatch_queue_t dq, dispatch_object_t dou) dispatch_continuation_t dc = dou._dc; _dispatch_trace_continuation_pop(dq, dou); - (void)dispatch_atomic_add2o(dq, dq_running, 2); + (void)dispatch_atomic_add2o(dq, dq_running, 2, acquire); if (!DISPATCH_OBJ_IS_VTABLE(dc) && (long)dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT) { - dispatch_atomic_barrier(); _dispatch_thread_semaphore_signal( - (_dispatch_thread_semaphore_t)dc->dc_ctxt); + (_dispatch_thread_semaphore_t)dc->dc_other); } else { _dispatch_async_f_redirect(dq, dc); } @@ -1089,12 +1360,12 @@ DISPATCH_ALWAYS_INLINE_NDEBUG static inline void _dispatch_continuation_pop(dispatch_object_t dou) { - dispatch_continuation_t dc = dou._dc; + dispatch_continuation_t dc = dou._dc, dc1; dispatch_group_t dg; _dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dou); if (DISPATCH_OBJ_IS_VTABLE(dou._do)) { - return _dispatch_queue_invoke(dou._dq); + return dx_invoke(dou._do); } // Add the item back to the cache before calling the function. This @@ -1104,7 +1375,9 @@ _dispatch_continuation_pop(dispatch_object_t dou) // Therefore, the object has not been reused yet. // This generates better assembly. if ((long)dc->do_vtable & DISPATCH_OBJ_ASYNC_BIT) { - _dispatch_continuation_free(dc); + dc1 = _dispatch_continuation_free_cacheonly(dc); + } else { + dc1 = NULL; } if ((long)dc->do_vtable & DISPATCH_OBJ_GROUP_BIT) { dg = dc->dc_data; @@ -1116,6 +1389,9 @@ _dispatch_continuation_pop(dispatch_object_t dou) dispatch_group_leave(dg); _dispatch_release(dg); } + if (slowpath(dc1)) { + _dispatch_continuation_free_to_cache_limit(dc1); + } } #pragma mark - @@ -1166,10 +1442,10 @@ dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void)) #pragma mark - #pragma mark dispatch_async -static void -_dispatch_async_f_redirect_invoke(void *_ctxt) +void +_dispatch_async_redirect_invoke(void *ctxt) { - struct dispatch_continuation_s *dc = _ctxt; + struct dispatch_continuation_s *dc = ctxt; struct dispatch_continuation_s *other_dc = dc->dc_other; dispatch_queue_t old_dq, dq = dc->dc_data, rq; @@ -1180,24 +1456,39 @@ _dispatch_async_f_redirect_invoke(void *_ctxt) rq = dq->do_targetq; while (slowpath(rq->do_targetq) && rq != old_dq) { - if (dispatch_atomic_sub2o(rq, dq_running, 2) == 0) { + if (dispatch_atomic_sub2o(rq, dq_running, 2, relaxed) == 0) { _dispatch_wakeup(rq); } rq = rq->do_targetq; } - if (dispatch_atomic_sub2o(dq, dq_running, 2) == 0) { + if (dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0) { _dispatch_wakeup(dq); } _dispatch_release(dq); } -DISPATCH_NOINLINE -static void -_dispatch_async_f2_slow(dispatch_queue_t dq, dispatch_continuation_t dc) +static inline void +_dispatch_async_f_redirect2(dispatch_queue_t dq, dispatch_continuation_t dc) { - _dispatch_wakeup(dq); - _dispatch_queue_push(dq, dc); + uint32_t running = 2; + + // Find the queue to redirect to + do { + if (slowpath(dq->dq_items_tail) || + slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) || + slowpath(dq->dq_width == 1)) { + break; + } + running = dispatch_atomic_add2o(dq, dq_running, 2, relaxed); + if (slowpath(running & 1) || slowpath(running > dq->dq_width)) { + running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); + break; + } + dq = dq->do_targetq; + } while (slowpath(dq->do_targetq)); + + _dispatch_queue_push_wakeup(dq, dc, running == 0); } DISPATCH_NOINLINE @@ -1205,69 +1496,47 @@ static void _dispatch_async_f_redirect(dispatch_queue_t dq, dispatch_continuation_t other_dc) { - dispatch_continuation_t dc; - dispatch_queue_t rq; - - _dispatch_retain(dq); - - dc = _dispatch_continuation_alloc(); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; - dc->dc_func = _dispatch_async_f_redirect_invoke; + dc->dc_func = _dispatch_async_redirect_invoke; dc->dc_ctxt = dc; dc->dc_data = dq; dc->dc_other = other_dc; - // Find the queue to redirect to - rq = dq->do_targetq; - while (slowpath(rq->do_targetq)) { - uint32_t running; - - if (slowpath(rq->dq_items_tail) || - slowpath(DISPATCH_OBJECT_SUSPENDED(rq)) || - slowpath(rq->dq_width == 1)) { - break; - } - running = dispatch_atomic_add2o(rq, dq_running, 2) - 2; - if (slowpath(running & 1) || slowpath(running + 2 > rq->dq_width)) { - if (slowpath(dispatch_atomic_sub2o(rq, dq_running, 2) == 0)) { - return _dispatch_async_f2_slow(rq, dc); - } - break; - } - rq = rq->do_targetq; + _dispatch_retain(dq); + dq = dq->do_targetq; + if (slowpath(dq->do_targetq)) { + return _dispatch_async_f_redirect2(dq, dc); } - _dispatch_queue_push(rq, dc); + + _dispatch_queue_push(dq, dc); } DISPATCH_NOINLINE static void _dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc) { - uint32_t running; - bool locked; + uint32_t running = 2; do { if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))) { break; } - running = dispatch_atomic_add2o(dq, dq_running, 2); + running = dispatch_atomic_add2o(dq, dq_running, 2, relaxed); if (slowpath(running > dq->dq_width)) { - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { - return _dispatch_async_f2_slow(dq, dc); - } + running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); break; } - locked = running & 1; - if (fastpath(!locked)) { + if (!slowpath(running & 1)) { return _dispatch_async_f_redirect(dq, dc); } - locked = dispatch_atomic_sub2o(dq, dq_running, 2) & 1; + running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); // We might get lucky and find that the barrier has ended by now - } while (!locked); + } while (!(running & 1)); - _dispatch_queue_push(dq, dc); + _dispatch_queue_push_wakeup(dq, dc, running == 0); } DISPATCH_NOINLINE @@ -1375,21 +1644,15 @@ _dispatch_function_invoke(dispatch_queue_t dq, void *ctxt, dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); _dispatch_thread_setspecific(dispatch_queue_key, dq); _dispatch_client_callout(ctxt, func); - _dispatch_workitem_inc(); + _dispatch_perfmon_workitem_inc(); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); } -struct dispatch_function_recurse_s { - dispatch_queue_t dfr_dq; - void* dfr_ctxt; - dispatch_function_t dfr_func; -}; - -static void -_dispatch_function_recurse_invoke(void *ctxt) +void +_dispatch_sync_recurse_invoke(void *ctxt) { - struct dispatch_function_recurse_s *dfr = ctxt; - _dispatch_function_invoke(dfr->dfr_dq, dfr->dfr_ctxt, dfr->dfr_func); + dispatch_continuation_t dc = ctxt; + _dispatch_function_invoke(dc->dc_data, dc->dc_ctxt, dc->dc_func); } DISPATCH_ALWAYS_INLINE @@ -1397,35 +1660,26 @@ static inline void _dispatch_function_recurse(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - struct dispatch_function_recurse_s dfr = { - .dfr_dq = dq, - .dfr_func = func, - .dfr_ctxt = ctxt, + struct dispatch_continuation_s dc = { + .dc_data = dq, + .dc_func = func, + .dc_ctxt = ctxt, }; - dispatch_sync_f(dq->do_targetq, &dfr, _dispatch_function_recurse_invoke); + dispatch_sync_f(dq->do_targetq, &dc, _dispatch_sync_recurse_invoke); } #pragma mark - #pragma mark dispatch_barrier_sync -struct dispatch_barrier_sync_slow_s { - DISPATCH_CONTINUATION_HEADER(barrier_sync_slow); -}; - -struct dispatch_barrier_sync_slow2_s { - dispatch_queue_t dbss2_dq; -#if DISPATCH_COCOA_COMPAT - dispatch_function_t dbss2_func; - void *dbss2_ctxt; -#endif - _dispatch_thread_semaphore_t dbss2_sema; -}; +static void _dispatch_sync_f_invoke(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func); DISPATCH_ALWAYS_INLINE_NDEBUG static inline _dispatch_thread_semaphore_t _dispatch_barrier_sync_f_pop(dispatch_queue_t dq, dispatch_object_t dou, bool lock) { + _dispatch_thread_semaphore_t sema; dispatch_continuation_t dc = dou._dc; if (DISPATCH_OBJ_IS_VTABLE(dc) || ((long)dc->do_vtable & @@ -1434,42 +1688,44 @@ _dispatch_barrier_sync_f_pop(dispatch_queue_t dq, dispatch_object_t dou, return 0; } _dispatch_trace_continuation_pop(dq, dc); - _dispatch_workitem_inc(); + _dispatch_perfmon_workitem_inc(); - struct dispatch_barrier_sync_slow_s *dbssp = (void *)dc; - struct dispatch_barrier_sync_slow2_s *dbss2 = dbssp->dc_ctxt; + dc = dc->dc_ctxt; + dq = dc->dc_data; + sema = (_dispatch_thread_semaphore_t)dc->dc_other; if (lock) { - (void)dispatch_atomic_add2o(dbss2->dbss2_dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL); + (void)dispatch_atomic_add2o(dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); // rdar://problem/9032024 running lock must be held until sync_f_slow // returns - (void)dispatch_atomic_add2o(dbss2->dbss2_dq, dq_running, 2); + (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); } - return dbss2->dbss2_sema ? dbss2->dbss2_sema : MACH_PORT_DEAD; + return sema ? sema : MACH_PORT_DEAD; } static void _dispatch_barrier_sync_f_slow_invoke(void *ctxt) { - struct dispatch_barrier_sync_slow2_s *dbss2 = ctxt; + dispatch_continuation_t dc = ctxt; + dispatch_queue_t dq = dc->dc_data; + _dispatch_thread_semaphore_t sema; + sema = (_dispatch_thread_semaphore_t)dc->dc_other; - dispatch_assert(dbss2->dbss2_dq == _dispatch_queue_get_current()); + dispatch_assert(dq == _dispatch_queue_get_current()); #if DISPATCH_COCOA_COMPAT - // When the main queue is bound to the main thread - if (dbss2->dbss2_dq == &_dispatch_main_q && pthread_main_np()) { - dbss2->dbss2_func(dbss2->dbss2_ctxt); - dbss2->dbss2_func = NULL; - dispatch_atomic_barrier(); - _dispatch_thread_semaphore_signal(dbss2->dbss2_sema); + if (slowpath(dq->dq_is_thread_bound)) { + // The queue is bound to a non-dispatch thread (e.g. main thread) + dc->dc_func(dc->dc_ctxt); + dispatch_atomic_store2o(dc, dc_func, NULL, release); + _dispatch_thread_semaphore_signal(sema); // release return; } #endif - (void)dispatch_atomic_add2o(dbss2->dbss2_dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL); + (void)dispatch_atomic_add2o(dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); // rdar://9032024 running lock must be held until sync_f_slow returns - (void)dispatch_atomic_add2o(dbss2->dbss2_dq, dq_running, 2); - dispatch_atomic_barrier(); - _dispatch_thread_semaphore_signal(dbss2->dbss2_sema); + (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); + _dispatch_thread_semaphore_signal(sema); // release } DISPATCH_NOINLINE @@ -1477,55 +1733,61 @@ static void _dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { + if (slowpath(!dq->do_targetq)) { + // the global concurrent queues do not need strict ordering + (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); + return _dispatch_sync_f_invoke(dq, ctxt, func); + } // It's preferred to execute synchronous blocks on the current thread // due to thread-local side effects, garbage collection, etc. However, // blocks submitted to the main thread MUST be run on the main thread - struct dispatch_barrier_sync_slow2_s dbss2 = { - .dbss2_dq = dq, + _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); + struct dispatch_continuation_s dc = { + .dc_data = dq, #if DISPATCH_COCOA_COMPAT - .dbss2_func = func, - .dbss2_ctxt = ctxt, + .dc_func = func, + .dc_ctxt = ctxt, #endif - .dbss2_sema = _dispatch_get_thread_semaphore(), + .dc_other = (void*)sema, }; - struct dispatch_barrier_sync_slow_s dbss = { + struct dispatch_continuation_s dbss = { .do_vtable = (void *)(DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT), .dc_func = _dispatch_barrier_sync_f_slow_invoke, - .dc_ctxt = &dbss2, + .dc_ctxt = &dc, +#if DISPATCH_INTROSPECTION + .dc_data = (void*)_dispatch_thread_self(), +#endif }; - _dispatch_queue_push(dq, (void *)&dbss); + _dispatch_queue_push(dq, &dbss); - _dispatch_thread_semaphore_wait(dbss2.dbss2_sema); - _dispatch_put_thread_semaphore(dbss2.dbss2_sema); + _dispatch_thread_semaphore_wait(sema); // acquire + _dispatch_put_thread_semaphore(sema); #if DISPATCH_COCOA_COMPAT - // Main queue bound to main thread - if (dbss2.dbss2_func == NULL) { + // Queue bound to a non-dispatch thread + if (dc.dc_func == NULL) { return; } #endif - dispatch_atomic_acquire_barrier(); - if (slowpath(dq->do_targetq) && slowpath(dq->do_targetq->do_targetq)) { + if (slowpath(dq->do_targetq->do_targetq)) { _dispatch_function_recurse(dq, ctxt, func); } else { _dispatch_function_invoke(dq, ctxt, func); } - dispatch_atomic_release_barrier(); if (fastpath(dq->do_suspend_cnt < 2 * DISPATCH_OBJECT_SUSPEND_INTERVAL) && dq->dq_running == 2) { // rdar://problem/8290662 "lock transfer" - _dispatch_thread_semaphore_t sema; sema = _dispatch_queue_drain_one_barrier_sync(dq); if (sema) { - _dispatch_thread_semaphore_signal(sema); + _dispatch_thread_semaphore_signal(sema); // release return; } } (void)dispatch_atomic_sub2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL); - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { + DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, release) == 0)) { _dispatch_wakeup(dq); } } @@ -1540,15 +1802,15 @@ _dispatch_barrier_sync_f2(dispatch_queue_t dq) sema = _dispatch_queue_drain_one_barrier_sync(dq); if (sema) { (void)dispatch_atomic_add2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL); + DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); // rdar://9032024 running lock must be held until sync_f_slow // returns: increment by 2 and decrement by 1 - (void)dispatch_atomic_inc2o(dq, dq_running); + (void)dispatch_atomic_inc2o(dq, dq_running, relaxed); _dispatch_thread_semaphore_signal(sema); return; } } - if (slowpath(dispatch_atomic_dec2o(dq, dq_running) == 0)) { + if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { _dispatch_wakeup(dq); } } @@ -1558,13 +1820,11 @@ static void _dispatch_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - dispatch_atomic_acquire_barrier(); _dispatch_function_invoke(dq, ctxt, func); - dispatch_atomic_release_barrier(); if (slowpath(dq->dq_items_tail)) { return _dispatch_barrier_sync_f2(dq); } - if (slowpath(dispatch_atomic_dec2o(dq, dq_running) == 0)) { + if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { _dispatch_wakeup(dq); } } @@ -1574,13 +1834,11 @@ static void _dispatch_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - dispatch_atomic_acquire_barrier(); _dispatch_function_recurse(dq, ctxt, func); - dispatch_atomic_release_barrier(); if (slowpath(dq->dq_items_tail)) { return _dispatch_barrier_sync_f2(dq); } - if (slowpath(dispatch_atomic_dec2o(dq, dq_running) == 0)) { + if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { _dispatch_wakeup(dq); } } @@ -1595,9 +1853,9 @@ dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){ return _dispatch_barrier_sync_f_slow(dq, ctxt, func); } - if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1))) { - // global queues and main queue bound to main thread always falls into - // the slow case + if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))) { + // global concurrent queues and queues bound to non-dispatch threads + // always fall into the slow case return _dispatch_barrier_sync_f_slow(dq, ctxt, func); } if (slowpath(dq->do_targetq->do_targetq)) { @@ -1621,8 +1879,7 @@ _dispatch_barrier_sync_slow(dispatch_queue_t dq, void (^work)(void)) return dispatch_barrier_sync_f(dq, block, _dispatch_call_block_and_release); } - struct Block_basic *bb = (void *)work; - dispatch_barrier_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); + dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work)); } #endif @@ -1630,30 +1887,59 @@ void dispatch_barrier_sync(dispatch_queue_t dq, void (^work)(void)) { #if DISPATCH_COCOA_COMPAT - if (slowpath(dq == &_dispatch_main_q)) { + if (slowpath(dq->dq_is_thread_bound)) { return _dispatch_barrier_sync_slow(dq, work); } #endif - struct Block_basic *bb = (void *)work; - dispatch_barrier_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); + dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work)); } #endif +DISPATCH_NOINLINE +static void +_dispatch_barrier_trysync_f_invoke(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + _dispatch_function_invoke(dq, ctxt, func); + if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { + _dispatch_wakeup(dq); + } +} + +DISPATCH_NOINLINE +void +_dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + // Use for mutation of queue-/source-internal state only, ignores target + // queue hierarchy! + if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) + || slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, + acquire))) { + return dispatch_barrier_async_f(dq, ctxt, func); + } + _dispatch_barrier_trysync_f_invoke(dq, ctxt, func); +} + #pragma mark - #pragma mark dispatch_sync DISPATCH_NOINLINE static void -_dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +_dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, + bool wakeup) { _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); - struct dispatch_sync_slow_s { - DISPATCH_CONTINUATION_HEADER(sync_slow); - } dss = { + struct dispatch_continuation_s dss = { .do_vtable = (void*)DISPATCH_OBJ_SYNC_SLOW_BIT, - .dc_ctxt = (void*)sema, +#if DISPATCH_INTROSPECTION + .dc_func = func, + .dc_ctxt = ctxt, + .dc_data = (void*)_dispatch_thread_self(), +#endif + .dc_other = (void*)sema, }; - _dispatch_queue_push(dq, (void *)&dss); + _dispatch_queue_push_wakeup(dq, &dss, wakeup); _dispatch_thread_semaphore_wait(sema); _dispatch_put_thread_semaphore(sema); @@ -1663,29 +1949,18 @@ _dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) } else { _dispatch_function_invoke(dq, ctxt, func); } - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { _dispatch_wakeup(dq); } } -DISPATCH_NOINLINE -static void -_dispatch_sync_f_slow2(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { - _dispatch_wakeup(dq); - } - _dispatch_sync_f_slow(dq, ctxt, func); -} - DISPATCH_NOINLINE static void _dispatch_sync_f_invoke(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { _dispatch_function_invoke(dq, ctxt, func); - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { _dispatch_wakeup(dq); } } @@ -1696,22 +1971,23 @@ _dispatch_sync_f_recurse(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { _dispatch_function_recurse(dq, ctxt, func); - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { _dispatch_wakeup(dq); } } -DISPATCH_NOINLINE -static void +static inline void _dispatch_sync_f2(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { // 1) ensure that this thread hasn't enqueued anything ahead of this call // 2) the queue is not suspended if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){ - return _dispatch_sync_f_slow(dq, ctxt, func); + return _dispatch_sync_f_slow(dq, ctxt, func, false); } - if (slowpath(dispatch_atomic_add2o(dq, dq_running, 2) & 1)) { - return _dispatch_sync_f_slow2(dq, ctxt, func); + uint32_t running = dispatch_atomic_add2o(dq, dq_running, 2, relaxed); + if (slowpath(running & 1)) { + running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); + return _dispatch_sync_f_slow(dq, ctxt, func, running == 0); } if (slowpath(dq->do_targetq->do_targetq)) { return _dispatch_sync_f_recurse(dq, ctxt, func); @@ -1727,8 +2003,8 @@ dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) return dispatch_barrier_sync_f(dq, ctxt, func); } if (slowpath(!dq->do_targetq)) { - // the global root queues do not need strict ordering - (void)dispatch_atomic_add2o(dq, dq_running, 2); + // the global concurrent queues do not need strict ordering + (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); return _dispatch_sync_f_invoke(dq, ctxt, func); } _dispatch_sync_f2(dq, ctxt, func); @@ -1748,8 +2024,7 @@ _dispatch_sync_slow(dispatch_queue_t dq, void (^work)(void)) dispatch_block_t block = _dispatch_Block_copy(work); return dispatch_sync_f(dq, block, _dispatch_call_block_and_release); } - struct Block_basic *bb = (void *)work; - dispatch_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); + dispatch_sync_f(dq, work, _dispatch_Block_invoke(work)); } #endif @@ -1757,37 +2032,29 @@ void dispatch_sync(dispatch_queue_t dq, void (^work)(void)) { #if DISPATCH_COCOA_COMPAT - if (slowpath(dq == &_dispatch_main_q)) { + if (slowpath(dq->dq_is_thread_bound)) { return _dispatch_sync_slow(dq, work); } #endif - struct Block_basic *bb = (void *)work; - dispatch_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); + dispatch_sync_f(dq, work, _dispatch_Block_invoke(work)); } #endif #pragma mark - #pragma mark dispatch_after -struct _dispatch_after_time_s { - void *datc_ctxt; - void (*datc_func)(void *); - dispatch_source_t ds; -}; - -static void +void _dispatch_after_timer_callback(void *ctxt) { - struct _dispatch_after_time_s *datc = ctxt; - - dispatch_assert(datc->datc_func); - _dispatch_client_callout(datc->datc_ctxt, datc->datc_func); - - dispatch_source_t ds = datc->ds; - free(datc); - - dispatch_source_cancel(ds); // Needed until 7287561 gets integrated + dispatch_continuation_t dc = ctxt, dc1; + dispatch_source_t ds = dc->dc_data; + dc1 = _dispatch_continuation_free_cacheonly(dc); + _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); + dispatch_source_cancel(ds); dispatch_release(ds); + if (slowpath(dc1)) { + _dispatch_continuation_free_to_cache_limit(dc1); + } } DISPATCH_NOINLINE @@ -1795,8 +2062,7 @@ void dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, dispatch_function_t func) { - uint64_t delta; - struct _dispatch_after_time_s *datc = NULL; + uint64_t delta, leeway; dispatch_source_t ds; if (when == DISPATCH_TIME_FOREVER) { @@ -1807,25 +2073,27 @@ dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, return; } - // this function can and should be optimized to not use a dispatch source delta = _dispatch_timeout(when); if (delta == 0) { return dispatch_async_f(queue, ctxt, func); } - // on successful creation, source owns malloc-ed context (which it frees in - // the event handler) + leeway = delta / 10; // + if (leeway < NSEC_PER_MSEC) leeway = NSEC_PER_MSEC; + if (leeway > 60 * NSEC_PER_SEC) leeway = 60 * NSEC_PER_SEC; + + // this function can and should be optimized to not use a dispatch source ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, queue); dispatch_assert(ds); - datc = malloc(sizeof(*datc)); - dispatch_assert(datc); - datc->datc_ctxt = ctxt; - datc->datc_func = func; - datc->ds = ds; + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); + dc->dc_func = func; + dc->dc_ctxt = ctxt; + dc->dc_data = ds; - dispatch_set_context(ds, datc); + dispatch_set_context(ds, dc); dispatch_source_set_event_handler_f(ds, _dispatch_after_timer_callback); - dispatch_source_set_timer(ds, when, DISPATCH_TIME_FOREVER, 0); + dispatch_source_set_timer(ds, when, DISPATCH_TIME_FOREVER, leeway); dispatch_resume(ds); } @@ -1848,7 +2116,7 @@ dispatch_after(dispatch_time_t when, dispatch_queue_t queue, #endif #pragma mark - -#pragma mark dispatch_wakeup +#pragma mark dispatch_queue_push DISPATCH_NOINLINE static void @@ -1872,8 +2140,8 @@ void _dispatch_queue_push_list_slow(dispatch_queue_t dq, struct dispatch_object_s *obj, unsigned int n) { - if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_TYPE) { - dq->dq_items_head = obj; + if (dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE && !dq->dq_is_thread_bound) { + dispatch_atomic_store2o(dq, dq_items_head, obj, relaxed); return _dispatch_queue_wakeup_global2(dq, n); } _dispatch_queue_push_list_slow2(dq, obj); @@ -1884,61 +2152,89 @@ void _dispatch_queue_push_slow(dispatch_queue_t dq, struct dispatch_object_s *obj) { - if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_TYPE) { - dq->dq_items_head = obj; + if (dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE && !dq->dq_is_thread_bound) { + dispatch_atomic_store2o(dq, dq_items_head, obj, relaxed); return _dispatch_queue_wakeup_global(dq); } _dispatch_queue_push_list_slow2(dq, obj); } +#pragma mark - +#pragma mark dispatch_queue_probe + +unsigned long +_dispatch_queue_probe(dispatch_queue_t dq) +{ + return (unsigned long)slowpath(dq->dq_items_tail != NULL); +} + +#if DISPATCH_COCOA_COMPAT +unsigned long +_dispatch_runloop_queue_probe(dispatch_queue_t dq) +{ + if (_dispatch_queue_probe(dq)) { + if (dq->do_xref_cnt == -1) return true; // + return _dispatch_runloop_queue_wakeup(dq); + } + return false; +} +#endif + +unsigned long +_dispatch_mgr_queue_probe(dispatch_queue_t dq) +{ + if (_dispatch_queue_probe(dq)) { + return _dispatch_mgr_wakeup(dq); + } + return false; +} + +unsigned long +_dispatch_root_queue_probe(dispatch_queue_t dq) +{ + _dispatch_queue_wakeup_global(dq); + return false; +} + +#pragma mark - +#pragma mark dispatch_wakeup + // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou) { - dispatch_queue_t tq; - if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) { return NULL; } - if (!dx_probe(dou._do) && !dou._dq->dq_items_tail) { + if (!dx_probe(dou._do)) { return NULL; } - - // _dispatch_source_invoke() relies on this testing the whole suspend count - // word, not just the lock bit. In other words, no point taking the lock - // if the source is suspended or canceled. if (!dispatch_atomic_cmpxchg2o(dou._do, do_suspend_cnt, 0, - DISPATCH_OBJECT_SUSPEND_LOCK)) { + DISPATCH_OBJECT_SUSPEND_LOCK, release)) { #if DISPATCH_COCOA_COMPAT if (dou._dq == &_dispatch_main_q) { - return _dispatch_queue_wakeup_main(); + return _dispatch_main_queue_wakeup(); } #endif return NULL; } - dispatch_atomic_acquire_barrier(); _dispatch_retain(dou._do); - tq = dou._do->do_targetq; + dispatch_queue_t tq = dou._do->do_targetq; _dispatch_queue_push(tq, dou._do); return tq; // libdispatch does not need this, but the Instrument DTrace // probe does } #if DISPATCH_COCOA_COMPAT -DISPATCH_NOINLINE -dispatch_queue_t -_dispatch_queue_wakeup_main(void) +static inline void +_dispatch_runloop_queue_wakeup_thread(dispatch_queue_t dq) { - kern_return_t kr; - - dispatch_once_f(&_dispatch_main_q_port_pred, NULL, - _dispatch_main_q_port_init); - if (!main_q_port) { - return NULL; + mach_port_t mp = (mach_port_t)dq->do_ctxt; + if (!mp) { + return; } - kr = _dispatch_send_wakeup_main_thread(main_q_port, 0); - + kern_return_t kr = _dispatch_send_wakeup_runloop_thread(mp, 0); switch (kr) { case MACH_SEND_TIMEOUT: case MACH_SEND_TIMED_OUT: @@ -1948,6 +2244,27 @@ _dispatch_queue_wakeup_main(void) (void)dispatch_assume_zero(kr); break; } +} + +DISPATCH_NOINLINE DISPATCH_WEAK +unsigned long +_dispatch_runloop_queue_wakeup(dispatch_queue_t dq) +{ + _dispatch_runloop_queue_wakeup_thread(dq); + return false; +} + +DISPATCH_NOINLINE +static dispatch_queue_t +_dispatch_main_queue_wakeup(void) +{ + dispatch_queue_t dq = &_dispatch_main_q; + if (!dq->dq_is_thread_bound) { + return NULL; + } + dispatch_once_f(&_dispatch_main_q_port_pred, dq, + _dispatch_runloop_queue_port_init); + _dispatch_runloop_queue_wakeup_thread(dq); return NULL; } #endif @@ -1957,22 +2274,24 @@ static void _dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) { static dispatch_once_t pred; - struct dispatch_root_queue_context_s *qc = dq->do_ctxt; + dispatch_root_queue_context_t qc = dq->do_ctxt; + uint32_t i = n; int r; - dispatch_debug_queue(dq, __func__); + _dispatch_debug_root_queue(dq, __func__); dispatch_once_f(&pred, NULL, _dispatch_root_queues_init); #if HAVE_PTHREAD_WORKQUEUES -#if DISPATCH_ENABLE_THREAD_POOL +#if DISPATCH_USE_PTHREAD_POOL if (qc->dgq_kworkqueue != (void*)(~0ul)) #endif { - _dispatch_debug("requesting new worker thread"); + _dispatch_root_queue_debug("requesting new worker thread for global " + "queue: %p", dq); #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK if (qc->dgq_kworkqueue) { pthread_workitem_handle_t wh; - unsigned int gen_cnt, i = n; + unsigned int gen_cnt; do { r = pthread_workqueue_additem_np(qc->dgq_kworkqueue, _dispatch_worker_thread3, dq, &wh, &gen_cnt); @@ -1983,55 +2302,70 @@ _dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) #endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority, - qc->dgq_wq_options, n); + qc->dgq_wq_options, (int)i); (void)dispatch_assume_zero(r); #endif return; } #endif // HAVE_PTHREAD_WORKQUEUES -#if DISPATCH_ENABLE_THREAD_POOL - if (dispatch_semaphore_signal(qc->dgq_thread_mediator)) { - return; +#if DISPATCH_USE_PTHREAD_POOL + if (fastpath(qc->dgq_thread_mediator)) { + while (dispatch_semaphore_signal(qc->dgq_thread_mediator)) { + if (!--i) { + return; + } + } } - - pthread_t pthr; - int t_count; + uint32_t j, t_count = qc->dgq_thread_pool_size; do { - t_count = qc->dgq_thread_pool_size; if (!t_count) { - _dispatch_debug("The thread pool is full: %p", dq); + _dispatch_root_queue_debug("pthread pool is full for root queue: " + "%p", dq); return; } - } while (!dispatch_atomic_cmpxchg2o(qc, dgq_thread_pool_size, t_count, - t_count - 1)); + j = i > t_count ? t_count : i; + } while (!dispatch_atomic_cmpxchgvw2o(qc, dgq_thread_pool_size, t_count, + t_count - j, &t_count, relaxed)); - while ((r = pthread_create(&pthr, NULL, _dispatch_worker_thread, dq))) { - if (r != EAGAIN) { + dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; + pthread_attr_t *attr = pqc ? &pqc->dpq_thread_attr : NULL; + pthread_t tid, *pthr = &tid; +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES + if (slowpath(dq == &_dispatch_mgr_root_queue)) { + pthr = _dispatch_mgr_root_queue_init(); + } +#endif + do { + _dispatch_retain(dq); + while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) { + if (r != EAGAIN) { + (void)dispatch_assume_zero(r); + } + _dispatch_temporary_resource_shortage(); + } + if (!attr) { + r = pthread_detach(*pthr); (void)dispatch_assume_zero(r); } - sleep(1); - } - r = pthread_detach(pthr); - (void)dispatch_assume_zero(r); -#endif // DISPATCH_ENABLE_THREAD_POOL + } while (--j); +#endif // DISPATCH_USE_PTHREAD_POOL } static inline void _dispatch_queue_wakeup_global2(dispatch_queue_t dq, unsigned int n) { - struct dispatch_root_queue_context_s *qc = dq->do_ctxt; - if (!dq->dq_items_tail) { return; } #if HAVE_PTHREAD_WORKQUEUES + dispatch_root_queue_context_t qc = dq->do_ctxt; if ( -#if DISPATCH_ENABLE_THREAD_POOL +#if DISPATCH_USE_PTHREAD_POOL (qc->dgq_kworkqueue != (void*)(~0ul)) && #endif - !dispatch_atomic_cmpxchg2o(qc, dgq_pending, 0, n)) { - _dispatch_debug("work thread request still pending on global queue: " - "%p", dq); + !dispatch_atomic_cmpxchg2o(qc, dgq_pending, 0, n, relaxed)) { + _dispatch_root_queue_debug("worker thread request still pending for " + "global queue: %p", dq); return; } #endif // HAVE_PTHREAD_WORKQUEUES @@ -2044,15 +2378,24 @@ _dispatch_queue_wakeup_global(dispatch_queue_t dq) return _dispatch_queue_wakeup_global2(dq, 1); } -bool -_dispatch_queue_probe_root(dispatch_queue_t dq) +#pragma mark - +#pragma mark dispatch_queue_invoke + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +dispatch_queue_invoke2(dispatch_object_t dou, + _dispatch_thread_semaphore_t *sema_ptr) { - _dispatch_queue_wakeup_global2(dq, 1); - return false; -} + dispatch_queue_t dq = dou._dq; + dispatch_queue_t otq = dq->do_targetq; + *sema_ptr = _dispatch_queue_drain(dq); -#pragma mark - -#pragma mark dispatch_queue_drain + if (slowpath(otq != dq->do_targetq)) { + // An item on the queue changed the target queue + return dq->do_targetq; + } + return NULL; +} // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol @@ -2060,46 +2403,47 @@ DISPATCH_NOINLINE void _dispatch_queue_invoke(dispatch_queue_t dq) { - if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && - fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1))) { - dispatch_atomic_acquire_barrier(); - dispatch_queue_t otq = dq->do_targetq, tq = NULL; - _dispatch_thread_semaphore_t sema = _dispatch_queue_drain(dq); - if (dq->do_vtable->do_invoke) { - // Assume that object invoke checks it is executing on correct queue - tq = dx_invoke(dq); - } else if (slowpath(otq != dq->do_targetq)) { - // An item on the queue changed the target queue - tq = dq->do_targetq; - } - // We do not need to check the result. - // When the suspend-count lock is dropped, then the check will happen. - dispatch_atomic_release_barrier(); - (void)dispatch_atomic_dec2o(dq, dq_running); - if (sema) { - _dispatch_thread_semaphore_signal(sema); - } else if (tq) { - return _dispatch_queue_push(tq, dq); - } + _dispatch_queue_class_invoke(dq, dispatch_queue_invoke2); +} + +#pragma mark - +#pragma mark dispatch_queue_drain + +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_object_s* +_dispatch_queue_head(dispatch_queue_t dq) +{ + struct dispatch_object_s *dc; + while (!(dc = fastpath(dq->dq_items_head))) { + dispatch_hardware_pause(); } + return dc; +} - dq->do_next = DISPATCH_OBJECT_LISTLESS; - dispatch_atomic_release_barrier(); - if (!dispatch_atomic_sub2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_LOCK)) { - if (dq->dq_running == 0) { - _dispatch_wakeup(dq); // verify that the queue is idle +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_object_s* +_dispatch_queue_next(dispatch_queue_t dq, struct dispatch_object_s *dc) +{ + struct dispatch_object_s *next_dc; + next_dc = fastpath(dc->do_next); + dq->dq_items_head = next_dc; + if (!next_dc && !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, dc, NULL, + relaxed)) { + // Enqueue is TIGHTLY controlled, we won't wait long. + while (!(next_dc = fastpath(dc->do_next))) { + dispatch_hardware_pause(); } + dq->dq_items_head = next_dc; } - _dispatch_release(dq); // added when the queue is put on the list + return next_dc; } -static _dispatch_thread_semaphore_t -_dispatch_queue_drain(dispatch_queue_t dq) +_dispatch_thread_semaphore_t +_dispatch_queue_drain(dispatch_object_t dou) { - dispatch_queue_t orig_tq, old_dq; + dispatch_queue_t dq = dou._dq, orig_tq, old_dq; old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - struct dispatch_object_s *dc = NULL, *next_dc = NULL; + struct dispatch_object_s *dc, *next_dc; _dispatch_thread_semaphore_t sema = 0; // Continue draining sources after target queue change rdar://8928171 @@ -2111,19 +2455,8 @@ _dispatch_queue_drain(dispatch_queue_t dq) //dispatch_debug_queue(dq, __func__); while (dq->dq_items_tail) { - while (!(dc = fastpath(dq->dq_items_head))) { - _dispatch_hardware_pause(); - } - dq->dq_items_head = NULL; + dc = _dispatch_queue_head(dq); do { - next_dc = fastpath(dc->do_next); - if (!next_dc && - !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, dc, NULL)) { - // Enqueue is TIGHTLY controlled, we won't wait long. - while (!(next_dc = fastpath(dc->do_next))) { - _dispatch_hardware_pause(); - } - } if (DISPATCH_OBJECT_SUSPENDED(dq)) { goto out; } @@ -2133,6 +2466,7 @@ _dispatch_queue_drain(dispatch_queue_t dq) if (slowpath(orig_tq != dq->do_targetq) && check_tq) { goto out; } + bool redirect = false; if (!fastpath(dq->dq_width == 1)) { if (!DISPATCH_OBJ_IS_VTABLE(dc) && (long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) { @@ -2140,113 +2474,87 @@ _dispatch_queue_drain(dispatch_queue_t dq) goto out; } } else { - _dispatch_continuation_redirect(dq, dc); - continue; + redirect = true; } } + next_dc = _dispatch_queue_next(dq, dc); + if (redirect) { + _dispatch_continuation_redirect(dq, dc); + continue; + } if ((sema = _dispatch_barrier_sync_f_pop(dq, dc, true))) { - dc = next_dc; goto out; } _dispatch_continuation_pop(dc); - _dispatch_workitem_inc(); + _dispatch_perfmon_workitem_inc(); } while ((dc = next_dc)); } out: - // if this is not a complete drain, we must undo some things - if (slowpath(dc)) { - // 'dc' must NOT be "popped" - // 'dc' might be the last item - if (!next_dc && - !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, NULL, dc)) { - // wait for enqueue slow path to finish - while (!(next_dc = fastpath(dq->dq_items_head))) { - _dispatch_hardware_pause(); - } - dc->do_next = next_dc; - } - dq->dq_items_head = dc; - } - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); return sema; } -static void -_dispatch_queue_serial_drain_till_empty(dispatch_queue_t dq) -{ -#if DISPATCH_PERF_MON - uint64_t start = _dispatch_absolute_time(); -#endif - _dispatch_thread_semaphore_t sema = _dispatch_queue_drain(dq); - if (sema) { - dispatch_atomic_barrier(); - _dispatch_thread_semaphore_signal(sema); - } -#if DISPATCH_PERF_MON - _dispatch_queue_merge_stats(start); -#endif - _dispatch_force_cache_cleanup(); -} - #if DISPATCH_COCOA_COMPAT -void +static void _dispatch_main_queue_drain(void) { dispatch_queue_t dq = &_dispatch_main_q; if (!dq->dq_items_tail) { return; } - struct dispatch_main_queue_drain_marker_s { - DISPATCH_CONTINUATION_HEADER(main_queue_drain_marker); - } marker = { + struct dispatch_continuation_s marker = { .do_vtable = NULL, }; struct dispatch_object_s *dmarker = (void*)▮ _dispatch_queue_push_notrace(dq, dmarker); -#if DISPATCH_PERF_MON - uint64_t start = _dispatch_absolute_time(); -#endif + _dispatch_perfmon_start(); dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); _dispatch_thread_setspecific(dispatch_queue_key, dq); - struct dispatch_object_s *dc = NULL, *next_dc = NULL; - while (dq->dq_items_tail) { - while (!(dc = fastpath(dq->dq_items_head))) { - _dispatch_hardware_pause(); + struct dispatch_object_s *dc, *next_dc; + dc = _dispatch_queue_head(dq); + do { + next_dc = _dispatch_queue_next(dq, dc); + if (dc == dmarker) { + goto out; } - dq->dq_items_head = NULL; - do { - next_dc = fastpath(dc->do_next); - if (!next_dc && - !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, dc, NULL)) { - // Enqueue is TIGHTLY controlled, we won't wait long. - while (!(next_dc = fastpath(dc->do_next))) { - _dispatch_hardware_pause(); - } - } - if (dc == dmarker) { - if (next_dc) { - dq->dq_items_head = next_dc; - _dispatch_queue_wakeup_main(); - } - goto out; - } - _dispatch_continuation_pop(dc); - _dispatch_workitem_inc(); - } while ((dc = next_dc)); - } - dispatch_assert(dc); // did not encounter marker + _dispatch_continuation_pop(dc); + _dispatch_perfmon_workitem_inc(); + } while ((dc = next_dc)); + DISPATCH_CRASH("Main queue corruption"); out: + if (next_dc) { + _dispatch_main_queue_wakeup(); + } _dispatch_thread_setspecific(dispatch_queue_key, old_dq); -#if DISPATCH_PERF_MON - _dispatch_queue_merge_stats(start); -#endif + _dispatch_perfmon_end(); _dispatch_force_cache_cleanup(); } + +static bool +_dispatch_runloop_queue_drain_one(dispatch_queue_t dq) +{ + if (!dq->dq_items_tail) { + return false; + } + _dispatch_perfmon_start(); + dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + _dispatch_thread_setspecific(dispatch_queue_key, dq); + + struct dispatch_object_s *dc, *next_dc; + dc = _dispatch_queue_head(dq); + next_dc = _dispatch_queue_next(dq, dc); + _dispatch_continuation_pop(dc); + _dispatch_perfmon_workitem_inc(); + + _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_perfmon_end(); + _dispatch_force_cache_cleanup(); + return next_dc; +} #endif DISPATCH_ALWAYS_INLINE_NDEBUG @@ -2254,7 +2562,7 @@ static inline _dispatch_thread_semaphore_t _dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq) { // rdar://problem/8290662 "lock transfer" - struct dispatch_object_s *dc, *next_dc; + struct dispatch_object_s *dc; _dispatch_thread_semaphore_t sema; // queue is locked, or suspended and not being drained @@ -2263,58 +2571,122 @@ _dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq) return 0; } // dequeue dc, it is a barrier sync - next_dc = fastpath(dc->do_next); - dq->dq_items_head = next_dc; - if (!next_dc && !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, dc, NULL)) { - // Enqueue is TIGHTLY controlled, we won't wait long. - while (!(next_dc = fastpath(dc->do_next))) { - _dispatch_hardware_pause(); - } - dq->dq_items_head = next_dc; - } + (void)_dispatch_queue_next(dq, dc); return sema; } -#ifndef DISPATCH_HEAD_CONTENTION_SPINS -#define DISPATCH_HEAD_CONTENTION_SPINS 10000 +void +_dispatch_mgr_queue_drain(void) +{ + dispatch_queue_t dq = &_dispatch_mgr_q; + if (!dq->dq_items_tail) { + return _dispatch_force_cache_cleanup(); + } + _dispatch_perfmon_start(); + if (slowpath(_dispatch_queue_drain(dq))) { + DISPATCH_CRASH("Sync onto manager queue"); + } + _dispatch_perfmon_end(); + _dispatch_force_cache_cleanup(); +} + +#pragma mark - +#pragma mark dispatch_root_queue_drain + +#ifndef DISPATCH_CONTENTION_USE_RAND +#define DISPATCH_CONTENTION_USE_RAND (!TARGET_OS_EMBEDDED) +#endif +#ifndef DISPATCH_CONTENTION_SPINS_MAX +#define DISPATCH_CONTENTION_SPINS_MAX (128 - 1) +#endif +#ifndef DISPATCH_CONTENTION_SPINS_MIN +#define DISPATCH_CONTENTION_SPINS_MIN (32 - 1) +#endif +#ifndef DISPATCH_CONTENTION_USLEEP_START +#define DISPATCH_CONTENTION_USLEEP_START 500 #endif +#ifndef DISPATCH_CONTENTION_USLEEP_MAX +#define DISPATCH_CONTENTION_USLEEP_MAX 100000 +#endif + +DISPATCH_NOINLINE +static bool +_dispatch_queue_concurrent_drain_one_slow(dispatch_queue_t dq) +{ + dispatch_root_queue_context_t qc = dq->do_ctxt; + struct dispatch_object_s *const mediator = (void *)~0ul; + bool pending = false, available = true; + unsigned int spins, sleep_time = DISPATCH_CONTENTION_USLEEP_START; -static struct dispatch_object_s * + do { + // Spin for a short while in case the contention is temporary -- e.g. + // when starting up after dispatch_apply, or when executing a few + // short continuations in a row. +#if DISPATCH_CONTENTION_USE_RAND + // Use randomness to prevent threads from resonating at the same + // frequency and permanently contending. All threads sharing the same + // seed value is safe with the FreeBSD rand_r implementation. + static unsigned int seed; + spins = (rand_r(&seed) & DISPATCH_CONTENTION_SPINS_MAX) | + DISPATCH_CONTENTION_SPINS_MIN; +#else + spins = DISPATCH_CONTENTION_SPINS_MIN + + (DISPATCH_CONTENTION_SPINS_MAX-DISPATCH_CONTENTION_SPINS_MIN)/2; +#endif + while (spins--) { + dispatch_hardware_pause(); + if (fastpath(dq->dq_items_head != mediator)) goto out; + }; + // Since we have serious contention, we need to back off. + if (!pending) { + // Mark this queue as pending to avoid requests for further threads + (void)dispatch_atomic_inc2o(qc, dgq_pending, relaxed); + pending = true; + } + _dispatch_contention_usleep(sleep_time); + if (fastpath(dq->dq_items_head != mediator)) goto out; + sleep_time *= 2; + } while (sleep_time < DISPATCH_CONTENTION_USLEEP_MAX); + + // The ratio of work to libdispatch overhead must be bad. This + // scenario implies that there are too many threads in the pool. + // Create a new pending thread and then exit this thread. + // The kernel will grant a new thread when the load subsides. + _dispatch_debug("contention on global queue: %p", dq); + _dispatch_queue_wakeup_global(dq); + available = false; +out: + if (pending) { + (void)dispatch_atomic_dec2o(qc, dgq_pending, relaxed); + } + return available; +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline struct dispatch_object_s * _dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) { struct dispatch_object_s *head, *next, *const mediator = (void *)~0ul; start: // The mediator value acts both as a "lock" and a signal - head = dispatch_atomic_xchg2o(dq, dq_items_head, mediator); + head = dispatch_atomic_xchg2o(dq, dq_items_head, mediator, relaxed); if (slowpath(head == NULL)) { // The first xchg on the tail will tell the enqueueing thread that it // is safe to blindly write out to the head pointer. A cmpxchg honors // the algorithm. - (void)dispatch_atomic_cmpxchg2o(dq, dq_items_head, mediator, NULL); - _dispatch_debug("no work on global work queue"); + (void)dispatch_atomic_cmpxchg2o(dq, dq_items_head, mediator, NULL, + relaxed); + _dispatch_root_queue_debug("no work on global queue: %p", dq); return NULL; } if (slowpath(head == mediator)) { // This thread lost the race for ownership of the queue. - // Spin for a short while in case many threads have started draining at - // once as part of a dispatch_apply - unsigned int i = DISPATCH_HEAD_CONTENTION_SPINS; - do { - _dispatch_hardware_pause(); - if (dq->dq_items_head != mediator) goto start; - } while (--i); - // The ratio of work to libdispatch overhead must be bad. This - // scenario implies that there are too many threads in the pool. - // Create a new pending thread and then exit this thread. - // The kernel will grant a new thread when the load subsides. - _dispatch_debug("Contention on queue: %p", dq); - _dispatch_queue_wakeup_global(dq); -#if DISPATCH_PERF_MON - dispatch_atomic_inc(&_dispatch_bad_ratio); -#endif + if (fastpath(_dispatch_queue_concurrent_drain_one_slow(dq))) { + goto start; + } return NULL; } @@ -2323,34 +2695,28 @@ start: next = fastpath(head->do_next); if (slowpath(!next)) { - dq->dq_items_head = NULL; + dispatch_atomic_store2o(dq, dq_items_head, NULL, relaxed); - if (dispatch_atomic_cmpxchg2o(dq, dq_items_tail, head, NULL)) { + if (dispatch_atomic_cmpxchg2o(dq, dq_items_tail, head, NULL, relaxed)) { // both head and tail are NULL now goto out; } // There must be a next item now. This thread won't wait long. while (!(next = head->do_next)) { - _dispatch_hardware_pause(); + dispatch_hardware_pause(); } } - dq->dq_items_head = next; + dispatch_atomic_store2o(dq, dq_items_head, next, relaxed); _dispatch_queue_wakeup_global(dq); out: return head; } -#pragma mark - -#pragma mark dispatch_worker_thread - static void -_dispatch_worker_thread4(dispatch_queue_t dq) +_dispatch_root_queue_drain(dispatch_queue_t dq) { - struct dispatch_object_s *item; - - #if DISPATCH_DEBUG if (_dispatch_thread_getspecific(dispatch_queue_key)) { DISPATCH_CRASH("Premature thread recycling"); @@ -2359,7 +2725,6 @@ _dispatch_worker_thread4(dispatch_queue_t dq) _dispatch_thread_setspecific(dispatch_queue_key, dq); #if DISPATCH_COCOA_COMPAT - (void)dispatch_atomic_inc(&_dispatch_worker_threads); // ensure that high-level memory management techniques do not leak/crash if (dispatch_begin_thread_4GC) { dispatch_begin_thread_4GC(); @@ -2367,44 +2732,40 @@ _dispatch_worker_thread4(dispatch_queue_t dq) void *pool = _dispatch_autorelease_pool_push(); #endif // DISPATCH_COCOA_COMPAT -#if DISPATCH_PERF_MON - uint64_t start = _dispatch_absolute_time(); -#endif + _dispatch_perfmon_start(); + struct dispatch_object_s *item; while ((item = fastpath(_dispatch_queue_concurrent_drain_one(dq)))) { _dispatch_continuation_pop(item); } -#if DISPATCH_PERF_MON - _dispatch_queue_merge_stats(start); -#endif + _dispatch_perfmon_end(); #if DISPATCH_COCOA_COMPAT _dispatch_autorelease_pool_pop(pool); if (dispatch_end_thread_4GC) { dispatch_end_thread_4GC(); } - if (!dispatch_atomic_dec(&_dispatch_worker_threads) && - dispatch_no_worker_threads_4GC) { - dispatch_no_worker_threads_4GC(); - } #endif // DISPATCH_COCOA_COMPAT _dispatch_thread_setspecific(dispatch_queue_key, NULL); - - _dispatch_force_cache_cleanup(); - } -#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK +#pragma mark - +#pragma mark dispatch_worker_thread + +#if HAVE_PTHREAD_WORKQUEUES static void _dispatch_worker_thread3(void *context) { dispatch_queue_t dq = context; - struct dispatch_root_queue_context_s *qc = dq->do_ctxt; + dispatch_root_queue_context_t qc = dq->do_ctxt; + + _dispatch_introspection_thread_add(); + + (void)dispatch_atomic_dec2o(qc, dgq_pending, relaxed); + _dispatch_root_queue_drain(dq); + __asm__(""); // prevent tailcall (for Instrument DTrace probe) - (void)dispatch_atomic_dec2o(qc, dgq_pending); - _dispatch_worker_thread4(dq); } -#endif #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP // 6618342 Contact the team that owns the Instrument DTrace probe before @@ -2416,40 +2777,47 @@ _dispatch_worker_thread2(int priority, int options, dispatch_assert(priority >= 0 && priority < WORKQ_NUM_PRIOQUEUE); dispatch_assert(!(options & ~WORKQ_ADDTHREADS_OPTION_OVERCOMMIT)); dispatch_queue_t dq = _dispatch_wq2root_queues[priority][options]; - struct dispatch_root_queue_context_s *qc = dq->do_ctxt; - (void)dispatch_atomic_dec2o(qc, dgq_pending); - _dispatch_worker_thread4(dq); + return _dispatch_worker_thread3(dq); } -#endif +#endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#endif // HAVE_PTHREAD_WORKQUEUES -#if DISPATCH_ENABLE_THREAD_POOL +#if DISPATCH_USE_PTHREAD_POOL // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol static void * _dispatch_worker_thread(void *context) { dispatch_queue_t dq = context; - struct dispatch_root_queue_context_s *qc = dq->do_ctxt; + dispatch_root_queue_context_t qc = dq->do_ctxt; + dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; + + if (pqc && pqc->dpq_thread_configure) { + pqc->dpq_thread_configure(); + } + sigset_t mask; int r; - // workaround tweaks the kernel workqueue does for us r = sigfillset(&mask); (void)dispatch_assume_zero(r); r = _dispatch_pthread_sigmask(SIG_BLOCK, &mask, NULL); (void)dispatch_assume_zero(r); + _dispatch_introspection_thread_add(); + + // Non-pthread-root-queue pthreads use a 65 second timeout in case there + // are any timers that run once a minute + const int64_t timeout = (pqc ? 5ull : 65ull) * NSEC_PER_SEC; do { - _dispatch_worker_thread4(dq); - // we use 65 seconds in case there are any timers that run once a minute + _dispatch_root_queue_drain(dq); } while (dispatch_semaphore_wait(qc->dgq_thread_mediator, - dispatch_time(0, 65ull * NSEC_PER_SEC)) == 0); + dispatch_time(0, timeout)) == 0); - (void)dispatch_atomic_inc2o(qc, dgq_thread_pool_size); - if (dq->dq_items_tail) { - _dispatch_queue_wakeup_global(dq); - } + (void)dispatch_atomic_inc2o(qc, dgq_thread_pool_size, relaxed); + _dispatch_queue_wakeup_global(dq); + _dispatch_release(dq); return NULL; } @@ -2482,38 +2850,146 @@ _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset) return pthread_sigmask(how, set, oset); } -#endif +#endif // DISPATCH_USE_PTHREAD_POOL #pragma mark - -#pragma mark dispatch_main_queue +#pragma mark dispatch_runloop_queue static bool _dispatch_program_is_probably_callback_driven; #if DISPATCH_COCOA_COMPAT + +dispatch_queue_t +_dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags) +{ + dispatch_queue_t dq; + size_t dqs; + + if (slowpath(flags)) { + return NULL; + } + dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD; + dq = _dispatch_alloc(DISPATCH_VTABLE(queue_runloop), dqs); + _dispatch_queue_init(dq); + dq->do_targetq = _dispatch_get_root_queue(0, true); + dq->dq_label = label ? label : "runloop-queue"; // no-copy contract + dq->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK; + dq->dq_running = 1; + dq->dq_is_thread_bound = 1; + _dispatch_runloop_queue_port_init(dq); + _dispatch_queue_set_bound_thread(dq); + _dispatch_object_debug(dq, "%s", __func__); + return _dispatch_introspection_queue_create(dq); +} + +void +_dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq) +{ + _dispatch_object_debug(dq, "%s", __func__); + (void)dispatch_atomic_dec2o(dq, dq_running, relaxed); + unsigned int suspend_cnt = dispatch_atomic_sub2o(dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_LOCK, release); + _dispatch_queue_clear_bound_thread(dq); + if (suspend_cnt == 0) { + _dispatch_wakeup(dq); + } +} + +void +_dispatch_runloop_queue_dispose(dispatch_queue_t dq) +{ + _dispatch_object_debug(dq, "%s", __func__); + _dispatch_introspection_queue_dispose(dq); + _dispatch_runloop_queue_port_dispose(dq); + _dispatch_queue_destroy(dq); +} + +bool +_dispatch_runloop_root_queue_perform_4CF(dispatch_queue_t dq) +{ + if (slowpath(dq->do_vtable != DISPATCH_VTABLE(queue_runloop))) { + DISPATCH_CLIENT_CRASH("Not a runloop queue"); + } + dispatch_retain(dq); + bool r = _dispatch_runloop_queue_drain_one(dq); + dispatch_release(dq); + return r; +} + +void +_dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t dq) +{ + if (slowpath(dq->do_vtable != DISPATCH_VTABLE(queue_runloop))) { + DISPATCH_CLIENT_CRASH("Not a runloop queue"); + } + _dispatch_runloop_queue_probe(dq); +} + +mach_port_t +_dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq) +{ + if (slowpath(dq->do_vtable != DISPATCH_VTABLE(queue_runloop))) { + DISPATCH_CLIENT_CRASH("Not a runloop queue"); + } + return (mach_port_t)dq->do_ctxt; +} + static void -_dispatch_main_q_port_init(void *ctxt DISPATCH_UNUSED) +_dispatch_runloop_queue_port_init(void *ctxt) { + dispatch_queue_t dq = (dispatch_queue_t)ctxt; + mach_port_t mp; kern_return_t kr; _dispatch_safe_fork = false; - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, - &main_q_port); + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &mp); DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); - kr = mach_port_insert_right(mach_task_self(), main_q_port, main_q_port, + kr = mach_port_insert_right(mach_task_self(), mp, mp, MACH_MSG_TYPE_MAKE_SEND); DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); + if (dq != &_dispatch_main_q) { + struct mach_port_limits limits = { + .mpl_qlimit = 1, + }; + kr = mach_port_set_attributes(mach_task_self(), mp, + MACH_PORT_LIMITS_INFO, (mach_port_info_t)&limits, + sizeof(limits)); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + } + dq->do_ctxt = (void*)(uintptr_t)mp; _dispatch_program_is_probably_callback_driven = true; } +static void +_dispatch_runloop_queue_port_dispose(dispatch_queue_t dq) +{ + mach_port_t mp = (mach_port_t)dq->do_ctxt; + if (!mp) { + return; + } + dq->do_ctxt = NULL; + kern_return_t kr = mach_port_deallocate(mach_task_self(), mp); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + kr = mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_RECEIVE, -1); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); +} + +#pragma mark - +#pragma mark dispatch_main_queue + mach_port_t _dispatch_get_main_queue_port_4CF(void) { - dispatch_once_f(&_dispatch_main_q_port_pred, NULL, - _dispatch_main_q_port_init); - return main_q_port; + dispatch_queue_t dq = &_dispatch_main_q; + dispatch_once_f(&_dispatch_main_q_port_pred, dq, + _dispatch_runloop_queue_port_init); + return (mach_port_t)dq->do_ctxt; } static bool main_q_is_draining; @@ -2546,6 +3022,7 @@ dispatch_main(void) #if HAVE_PTHREAD_MAIN_NP if (pthread_main_np()) { #endif + _dispatch_object_debug(&_dispatch_main_q, "%s", __func__); _dispatch_program_is_probably_callback_driven = true; pthread_exit(NULL); DISPATCH_CRASH("pthread_exit() returned"); @@ -2561,10 +3038,6 @@ _dispatch_sigsuspend(void) { static const sigset_t mask; -#if DISPATCH_COCOA_COMPAT - // Do not count the signal handling thread as a worker thread - (void)dispatch_atomic_dec(&_dispatch_worker_threads); -#endif for (;;) { sigsuspend(&mask); } @@ -2583,12 +3056,13 @@ DISPATCH_NOINLINE static void _dispatch_queue_cleanup2(void) { - (void)dispatch_atomic_dec(&_dispatch_main_q.dq_running); - - dispatch_atomic_release_barrier(); - if (dispatch_atomic_sub2o(&_dispatch_main_q, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_LOCK) == 0) { - _dispatch_wakeup(&_dispatch_main_q); + dispatch_queue_t dq = &_dispatch_main_q; + (void)dispatch_atomic_dec2o(dq, dq_running, relaxed); + unsigned int suspend_cnt = dispatch_atomic_sub2o(dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_LOCK, release); + dq->dq_is_thread_bound = 0; + if (suspend_cnt == 0) { + _dispatch_wakeup(dq); } // overload the "probably" variable to mean that dispatch_main() or @@ -2601,23 +3075,9 @@ _dispatch_queue_cleanup2(void) } #if DISPATCH_COCOA_COMPAT - dispatch_once_f(&_dispatch_main_q_port_pred, NULL, - _dispatch_main_q_port_init); - - mach_port_t mp = main_q_port; - kern_return_t kr; - - main_q_port = 0; - - if (mp) { - kr = mach_port_deallocate(mach_task_self(), mp); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - kr = mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_RECEIVE, - -1); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - } + dispatch_once_f(&_dispatch_main_q_port_pred, dq, + _dispatch_runloop_queue_port_init); + _dispatch_runloop_queue_port_dispose(dq); #endif } @@ -2630,350 +3090,3 @@ _dispatch_queue_cleanup(void *ctxt) // POSIX defines that destructors are only called if 'ctxt' is non-null DISPATCH_CRASH("Premature thread exit while a dispatch queue is running"); } - -#pragma mark - -#pragma mark dispatch_manager_queue - -static unsigned int _dispatch_select_workaround; -static fd_set _dispatch_rfds; -static fd_set _dispatch_wfds; -static void **_dispatch_rfd_ptrs; -static void **_dispatch_wfd_ptrs; - -static int _dispatch_kq; - -static void -_dispatch_get_kq_init(void *context DISPATCH_UNUSED) -{ - static const struct kevent kev = { - .ident = 1, - .filter = EVFILT_USER, - .flags = EV_ADD|EV_CLEAR, - }; - - _dispatch_safe_fork = false; - _dispatch_kq = kqueue(); - if (_dispatch_kq == -1) { - DISPATCH_CLIENT_CRASH("kqueue() create failed: " - "probably out of file descriptors"); - } else if (dispatch_assume(_dispatch_kq < FD_SETSIZE)) { - // in case we fall back to select() - FD_SET(_dispatch_kq, &_dispatch_rfds); - } - - (void)dispatch_assume_zero(kevent(_dispatch_kq, &kev, 1, NULL, 0, NULL)); - - _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q); -} - -static int -_dispatch_get_kq(void) -{ - static dispatch_once_t pred; - - dispatch_once_f(&pred, NULL, _dispatch_get_kq_init); - - return _dispatch_kq; -} - -long -_dispatch_update_kq(const struct kevent *kev) -{ - int rval; - struct kevent kev_copy = *kev; - // This ensures we don't get a pending kevent back while registering - // a new kevent - kev_copy.flags |= EV_RECEIPT; - - if (_dispatch_select_workaround && (kev_copy.flags & EV_DELETE)) { - // Only executed on manager queue - switch (kev_copy.filter) { - case EVFILT_READ: - if (kev_copy.ident < FD_SETSIZE && - FD_ISSET((int)kev_copy.ident, &_dispatch_rfds)) { - FD_CLR((int)kev_copy.ident, &_dispatch_rfds); - _dispatch_rfd_ptrs[kev_copy.ident] = 0; - (void)dispatch_atomic_dec(&_dispatch_select_workaround); - return 0; - } - break; - case EVFILT_WRITE: - if (kev_copy.ident < FD_SETSIZE && - FD_ISSET((int)kev_copy.ident, &_dispatch_wfds)) { - FD_CLR((int)kev_copy.ident, &_dispatch_wfds); - _dispatch_wfd_ptrs[kev_copy.ident] = 0; - (void)dispatch_atomic_dec(&_dispatch_select_workaround); - return 0; - } - break; - default: - break; - } - } - -retry: - rval = kevent(_dispatch_get_kq(), &kev_copy, 1, &kev_copy, 1, NULL); - if (rval == -1) { - // If we fail to register with kevents, for other reasons aside from - // changelist elements. - int err = errno; - switch (err) { - case EINTR: - goto retry; - case EBADF: - _dispatch_bug_client("Do not close random Unix descriptors"); - break; - default: - (void)dispatch_assume_zero(err); - break; - } - //kev_copy.flags |= EV_ERROR; - //kev_copy.data = err; - return err; - } - - // The following select workaround only applies to adding kevents - if ((kev->flags & (EV_DISABLE|EV_DELETE)) || - !(kev->flags & (EV_ADD|EV_ENABLE))) { - return 0; - } - - // Only executed on manager queue - switch (kev_copy.data) { - case 0: - return 0; - case EBADF: - break; - default: - // If an error occurred while registering with kevent, and it was - // because of a kevent changelist processing && the kevent involved - // either doing a read or write, it would indicate we were trying - // to register a /dev/* port; fall back to select - switch (kev_copy.filter) { - case EVFILT_READ: - if (dispatch_assume(kev_copy.ident < FD_SETSIZE)) { - if (!_dispatch_rfd_ptrs) { - _dispatch_rfd_ptrs = calloc(FD_SETSIZE, sizeof(void*)); - } - _dispatch_rfd_ptrs[kev_copy.ident] = kev_copy.udata; - FD_SET((int)kev_copy.ident, &_dispatch_rfds); - (void)dispatch_atomic_inc(&_dispatch_select_workaround); - _dispatch_debug("select workaround used to read fd %d: 0x%lx", - (int)kev_copy.ident, (long)kev_copy.data); - return 0; - } - break; - case EVFILT_WRITE: - if (dispatch_assume(kev_copy.ident < FD_SETSIZE)) { - if (!_dispatch_wfd_ptrs) { - _dispatch_wfd_ptrs = calloc(FD_SETSIZE, sizeof(void*)); - } - _dispatch_wfd_ptrs[kev_copy.ident] = kev_copy.udata; - FD_SET((int)kev_copy.ident, &_dispatch_wfds); - (void)dispatch_atomic_inc(&_dispatch_select_workaround); - _dispatch_debug("select workaround used to write fd %d: 0x%lx", - (int)kev_copy.ident, (long)kev_copy.data); - return 0; - } - break; - default: - // kevent error, _dispatch_source_merge_kevent() will handle it - _dispatch_source_drain_kevent(&kev_copy); - break; - } - break; - } - return kev_copy.data; -} - -bool -_dispatch_mgr_wakeup(dispatch_queue_t dq) -{ - static const struct kevent kev = { - .ident = 1, - .filter = EVFILT_USER, - .fflags = NOTE_TRIGGER, - }; - - _dispatch_debug("waking up the _dispatch_mgr_q: %p", dq); - - _dispatch_update_kq(&kev); - - return false; -} - -static void -_dispatch_mgr_thread2(struct kevent *kev, size_t cnt) -{ - size_t i; - - for (i = 0; i < cnt; i++) { - // EVFILT_USER isn't used by sources - if (kev[i].filter == EVFILT_USER) { - // If _dispatch_mgr_thread2() ever is changed to return to the - // caller, then this should become _dispatch_queue_drain() - _dispatch_queue_serial_drain_till_empty(&_dispatch_mgr_q); - } else { - _dispatch_source_drain_kevent(&kev[i]); - } - } -} - -#if DISPATCH_USE_VM_PRESSURE && DISPATCH_USE_MALLOC_VM_PRESSURE_SOURCE -// VM Pressure source for malloc -static dispatch_source_t _dispatch_malloc_vm_pressure_source; - -static void -_dispatch_malloc_vm_pressure_handler(void *context DISPATCH_UNUSED) -{ - malloc_zone_pressure_relief(0,0); -} - -static void -_dispatch_malloc_vm_pressure_setup(void) -{ - _dispatch_malloc_vm_pressure_source = dispatch_source_create( - DISPATCH_SOURCE_TYPE_VM, 0, DISPATCH_VM_PRESSURE, - _dispatch_get_root_queue(0, true)); - dispatch_source_set_event_handler_f(_dispatch_malloc_vm_pressure_source, - _dispatch_malloc_vm_pressure_handler); - dispatch_resume(_dispatch_malloc_vm_pressure_source); -} -#else -#define _dispatch_malloc_vm_pressure_setup() -#endif - -DISPATCH_NOINLINE DISPATCH_NORETURN -static void -_dispatch_mgr_invoke(void) -{ - static const struct timespec timeout_immediately = { 0, 0 }; - struct timespec timeout; - const struct timespec *timeoutp; - struct timeval sel_timeout, *sel_timeoutp; - fd_set tmp_rfds, tmp_wfds; - struct kevent kev[1]; - int k_cnt, err, i, r; - - _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_mgr_q); -#if DISPATCH_COCOA_COMPAT - // Do not count the manager thread as a worker thread - (void)dispatch_atomic_dec(&_dispatch_worker_threads); -#endif - _dispatch_malloc_vm_pressure_setup(); - - for (;;) { - _dispatch_run_timers(); - - timeoutp = _dispatch_get_next_timer_fire(&timeout); - - if (_dispatch_select_workaround) { - FD_COPY(&_dispatch_rfds, &tmp_rfds); - FD_COPY(&_dispatch_wfds, &tmp_wfds); - if (timeoutp) { - sel_timeout.tv_sec = timeoutp->tv_sec; - sel_timeout.tv_usec = (typeof(sel_timeout.tv_usec)) - (timeoutp->tv_nsec / 1000u); - sel_timeoutp = &sel_timeout; - } else { - sel_timeoutp = NULL; - } - - r = select(FD_SETSIZE, &tmp_rfds, &tmp_wfds, NULL, sel_timeoutp); - if (r == -1) { - err = errno; - if (err != EBADF) { - if (err != EINTR) { - (void)dispatch_assume_zero(err); - } - continue; - } - for (i = 0; i < FD_SETSIZE; i++) { - if (i == _dispatch_kq) { - continue; - } - if (!FD_ISSET(i, &_dispatch_rfds) && !FD_ISSET(i, - &_dispatch_wfds)) { - continue; - } - r = dup(i); - if (r != -1) { - close(r); - } else { - if (FD_ISSET(i, &_dispatch_rfds)) { - FD_CLR(i, &_dispatch_rfds); - _dispatch_rfd_ptrs[i] = 0; - (void)dispatch_atomic_dec( - &_dispatch_select_workaround); - } - if (FD_ISSET(i, &_dispatch_wfds)) { - FD_CLR(i, &_dispatch_wfds); - _dispatch_wfd_ptrs[i] = 0; - (void)dispatch_atomic_dec( - &_dispatch_select_workaround); - } - } - } - continue; - } - - if (r > 0) { - for (i = 0; i < FD_SETSIZE; i++) { - if (i == _dispatch_kq) { - continue; - } - if (FD_ISSET(i, &tmp_rfds)) { - FD_CLR(i, &_dispatch_rfds); // emulate EV_DISABLE - EV_SET(&kev[0], i, EVFILT_READ, - EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, - _dispatch_rfd_ptrs[i]); - _dispatch_rfd_ptrs[i] = 0; - (void)dispatch_atomic_dec(&_dispatch_select_workaround); - _dispatch_mgr_thread2(kev, 1); - } - if (FD_ISSET(i, &tmp_wfds)) { - FD_CLR(i, &_dispatch_wfds); // emulate EV_DISABLE - EV_SET(&kev[0], i, EVFILT_WRITE, - EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, - _dispatch_wfd_ptrs[i]); - _dispatch_wfd_ptrs[i] = 0; - (void)dispatch_atomic_dec(&_dispatch_select_workaround); - _dispatch_mgr_thread2(kev, 1); - } - } - } - - timeoutp = &timeout_immediately; - } - - k_cnt = kevent(_dispatch_kq, NULL, 0, kev, sizeof(kev) / sizeof(kev[0]), - timeoutp); - err = errno; - - switch (k_cnt) { - case -1: - if (err == EBADF) { - DISPATCH_CLIENT_CRASH("Do not close random Unix descriptors"); - } - if (err != EINTR) { - (void)dispatch_assume_zero(err); - } - continue; - default: - _dispatch_mgr_thread2(kev, (size_t)k_cnt); - // fall through - case 0: - _dispatch_force_cache_cleanup(); - continue; - } - } -} - -DISPATCH_NORETURN -dispatch_queue_t -_dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED) -{ - // never returns, so burn bridges behind us & clear stack 2k ahead - _dispatch_clear_stack(2048); - _dispatch_mgr_invoke(); -} diff --git a/src/queue_internal.h b/src/queue_internal.h index b223cce..4f42d24 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -32,6 +32,42 @@ #include // for HeaderDoc #endif +#if defined(__BLOCKS__) && !defined(DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES) +#define DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES 1 // +#endif + +/* x86 & cortex-a8 have a 64 byte cacheline */ +#define DISPATCH_CACHELINE_SIZE 64u +#define DISPATCH_CONTINUATION_SIZE DISPATCH_CACHELINE_SIZE +#define ROUND_UP_TO_CACHELINE_SIZE(x) \ + (((x) + (DISPATCH_CACHELINE_SIZE - 1u)) & \ + ~(DISPATCH_CACHELINE_SIZE - 1u)) +#define ROUND_UP_TO_CONTINUATION_SIZE(x) \ + (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \ + ~(DISPATCH_CONTINUATION_SIZE - 1u)) +#define ROUND_UP_TO_VECTOR_SIZE(x) \ + (((x) + 15u) & ~15u) +#define DISPATCH_CACHELINE_ALIGN \ + __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE))) + + +#define DISPATCH_QUEUE_CACHELINE_PADDING \ + char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD] +#ifdef __LP64__ +#define DISPATCH_QUEUE_CACHELINE_PAD (( \ + (3*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \ + + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) +#else +#define DISPATCH_QUEUE_CACHELINE_PAD (( \ + (0*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \ + + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) +#if !DISPATCH_INTROSPECTION +// No padding, DISPATCH_QUEUE_CACHELINE_PAD == 0 +#undef DISPATCH_QUEUE_CACHELINE_PADDING +#define DISPATCH_QUEUE_CACHELINE_PADDING +#endif +#endif + // If dc_vtable is less than 127, then the object is a continuation. // Otherwise, the object has a private layout and memory management rules. The // layout until after 'do_next' must align with normal objects. @@ -60,13 +96,10 @@ struct dispatch_continuation_s { typedef struct dispatch_continuation_s *dispatch_continuation_t; struct dispatch_apply_s { - size_t da_index; - size_t da_iterations; - void (*da_func)(void *, size_t); - void *da_ctxt; + size_t volatile da_index, da_todo; + size_t da_iterations, da_nested; + dispatch_continuation_t da_dc; _dispatch_thread_semaphore_t da_sema; - dispatch_queue_t da_queue; - size_t da_done; uint32_t da_thr_cnt; }; @@ -77,31 +110,27 @@ struct dispatch_queue_attr_s { DISPATCH_STRUCT_HEADER(queue_attr); }; -#define DISPATCH_QUEUE_MIN_LABEL_SIZE 64 - -#ifdef __LP64__ -#define DISPATCH_QUEUE_CACHELINE_PAD (4*sizeof(void*)) -#else -#define DISPATCH_QUEUE_CACHELINE_PAD (2*sizeof(void*)) -#endif - #define DISPATCH_QUEUE_HEADER \ uint32_t volatile dq_running; \ - uint32_t dq_width; \ - struct dispatch_object_s *volatile dq_items_tail; \ struct dispatch_object_s *volatile dq_items_head; \ + /* LP64 global queue cacheline boundary */ \ + struct dispatch_object_s *volatile dq_items_tail; \ + dispatch_queue_t dq_specific_q; \ + uint32_t dq_width; \ + unsigned int dq_is_thread_bound:1; \ unsigned long dq_serialnum; \ - dispatch_queue_t dq_specific_q; + const char *dq_label; \ + DISPATCH_INTROSPECTION_QUEUE_LIST; DISPATCH_CLASS_DECL(queue); struct dispatch_queue_s { DISPATCH_STRUCT_HEADER(queue); DISPATCH_QUEUE_HEADER; - char dq_label[DISPATCH_QUEUE_MIN_LABEL_SIZE]; // must be last - char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]; // for static queues only + DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only }; DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root, queue); +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue); DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_queue_specific_queue, dispatch_queue); @@ -109,19 +138,37 @@ DISPATCH_CLASS_DECL(queue_specific_queue); extern struct dispatch_queue_s _dispatch_mgr_q; +void _dispatch_queue_destroy(dispatch_object_t dou); void _dispatch_queue_dispose(dispatch_queue_t dq); void _dispatch_queue_invoke(dispatch_queue_t dq); void _dispatch_queue_push_list_slow(dispatch_queue_t dq, struct dispatch_object_s *obj, unsigned int n); void _dispatch_queue_push_slow(dispatch_queue_t dq, struct dispatch_object_s *obj); +unsigned long _dispatch_queue_probe(dispatch_queue_t dq); dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou); +_dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_object_t dou); void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq); -bool _dispatch_queue_probe_root(dispatch_queue_t dq); -bool _dispatch_mgr_wakeup(dispatch_queue_t dq); -DISPATCH_NORETURN -dispatch_queue_t _dispatch_mgr_thread(dispatch_queue_t dq); +unsigned long _dispatch_root_queue_probe(dispatch_queue_t dq); +void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq); +unsigned long _dispatch_runloop_queue_probe(dispatch_queue_t dq); +void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq); +void _dispatch_runloop_queue_dispose(dispatch_queue_t dq); +void _dispatch_mgr_queue_drain(void); +unsigned long _dispatch_mgr_queue_probe(dispatch_queue_t dq); +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +void _dispatch_mgr_priority_init(void); +#else +static inline void _dispatch_mgr_priority_init(void) {} +#endif +void _dispatch_after_timer_callback(void *ctxt); +void _dispatch_async_redirect_invoke(void *ctxt); +void _dispatch_sync_recurse_invoke(void *ctxt); +void _dispatch_apply_invoke(void *ctxt); +void _dispatch_apply_redirect_invoke(void *ctxt); +void _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func); #if DISPATCH_DEBUG void dispatch_debug_queue(dispatch_queue_t dq, const char* str); @@ -149,10 +196,10 @@ enum { DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY, }; -extern unsigned long _dispatch_queue_serial_numbers; +extern unsigned long volatile _dispatch_queue_serial_numbers; extern struct dispatch_queue_s _dispatch_root_queues[]; -#if !__OBJC2__ +#if !(USE_OBJC && __OBJC2__) DISPATCH_ALWAYS_INLINE static inline bool @@ -161,15 +208,14 @@ _dispatch_queue_push_list2(dispatch_queue_t dq, struct dispatch_object_s *head, { struct dispatch_object_s *prev; tail->do_next = NULL; - dispatch_atomic_store_barrier(); - prev = dispatch_atomic_xchg2o(dq, dq_items_tail, tail); + prev = dispatch_atomic_xchg2o(dq, dq_items_tail, tail, release); if (fastpath(prev)) { // if we crash here with a value less than 0x1000, then we are at a // known bug in client code for example, see _dispatch_queue_dispose // or _dispatch_atfork_child prev->do_next = head; } - return prev; + return (prev != NULL); } DISPATCH_ALWAYS_INLINE @@ -193,11 +239,56 @@ _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t _tail) } } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail, + bool wakeup) +{ + struct dispatch_object_s *tail = _tail._do; + if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) { + _dispatch_queue_push_slow(dq, tail); + } else if (slowpath(wakeup)) { + _dispatch_wakeup(dq); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_class_invoke(dispatch_object_t dou, + dispatch_queue_t (*invoke)(dispatch_object_t, + _dispatch_thread_semaphore_t*)) +{ + dispatch_queue_t dq = dou._dq; + if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && + fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))){ + dispatch_queue_t tq = NULL; + _dispatch_thread_semaphore_t sema = 0; + tq = invoke(dq, &sema); + // We do not need to check the result. + // When the suspend-count lock is dropped, then the check will happen. + (void)dispatch_atomic_dec2o(dq, dq_running, release); + if (sema) { + _dispatch_thread_semaphore_signal(sema); + } else if (tq) { + return _dispatch_queue_push(tq, dq); + } + } + dq->do_next = DISPATCH_OBJECT_LISTLESS; + if (!dispatch_atomic_sub2o(dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_LOCK, release)) { + dispatch_atomic_barrier(seq_cst); // + if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) { + _dispatch_wakeup(dq); // verify that the queue is idle + } + } + _dispatch_release(dq); // added when the queue is put on the list +} + DISPATCH_ALWAYS_INLINE static inline dispatch_queue_t _dispatch_queue_get_current(void) { - return _dispatch_thread_getspecific(dispatch_queue_key); + return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key); } DISPATCH_ALWAYS_INLINE DISPATCH_CONST @@ -211,6 +302,7 @@ _dispatch_get_root_queue(long priority, bool overcommit) DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY]; #endif case DISPATCH_QUEUE_PRIORITY_LOW: + case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE: return &_dispatch_root_queues[ DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY]; case DISPATCH_QUEUE_PRIORITY_DEFAULT: @@ -227,6 +319,7 @@ _dispatch_get_root_queue(long priority, bool overcommit) DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY]; #endif case DISPATCH_QUEUE_PRIORITY_LOW: + case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE: return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY]; case DISPATCH_QUEUE_PRIORITY_DEFAULT: return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY]; @@ -242,23 +335,67 @@ _dispatch_get_root_queue(long priority, bool overcommit) static inline void _dispatch_queue_init(dispatch_queue_t dq) { - dq->do_next = DISPATCH_OBJECT_LISTLESS; - // Default target queue is overcommit! - dq->do_targetq = _dispatch_get_root_queue(0, true); + dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS; + dq->dq_running = 0; dq->dq_width = 1; - dq->dq_serialnum = dispatch_atomic_inc(&_dispatch_queue_serial_numbers) - 1; + dq->dq_serialnum = dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers, + relaxed); } -dispatch_continuation_t -_dispatch_continuation_alloc_from_heap(void); +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_set_bound_thread(dispatch_queue_t dq) +{ + //Tag thread-bound queues with the owning thread + dispatch_assert(dq->dq_is_thread_bound); + dq->do_finalizer = (void*)_dispatch_thread_self(); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_clear_bound_thread(dispatch_queue_t dq) +{ + dispatch_assert(dq->dq_is_thread_bound); + dq->do_finalizer = NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_t +_dispatch_queue_get_bound_thread(dispatch_queue_t dq) +{ + dispatch_assert(dq->dq_is_thread_bound); + return (pthread_t)dq->do_finalizer; +} + +#ifndef DISPATCH_CONTINUATION_CACHE_LIMIT +#if TARGET_OS_EMBEDDED +#define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads +#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 16 +#else +#define DISPATCH_CONTINUATION_CACHE_LIMIT 65536 +#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 128 +#endif +#endif + +dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void); +void _dispatch_continuation_free_to_heap(dispatch_continuation_t c); + +#if DISPATCH_USE_MEMORYSTATUS_SOURCE +extern int _dispatch_continuation_cache_limit; +void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c); +#else +#define _dispatch_continuation_cache_limit DISPATCH_CONTINUATION_CACHE_LIMIT +#define _dispatch_continuation_free_to_cache_limit(c) \ + _dispatch_continuation_free_to_heap(c) +#endif DISPATCH_ALWAYS_INLINE static inline dispatch_continuation_t _dispatch_continuation_alloc_cacheonly(void) { - dispatch_continuation_t dc; - dc = fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); + dispatch_continuation_t dc = (dispatch_continuation_t) + fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); if (dc) { _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next); } @@ -269,26 +406,40 @@ DISPATCH_ALWAYS_INLINE static inline dispatch_continuation_t _dispatch_continuation_alloc(void) { - dispatch_continuation_t dc; - - dc = fastpath(_dispatch_continuation_alloc_cacheonly()); + dispatch_continuation_t dc = + fastpath(_dispatch_continuation_alloc_cacheonly()); if(!dc) { return _dispatch_continuation_alloc_from_heap(); } return dc; } - DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_continuation_free(dispatch_continuation_t dc) +static inline dispatch_continuation_t +_dispatch_continuation_free_cacheonly(dispatch_continuation_t dc) { - dispatch_continuation_t prev_dc; - prev_dc = _dispatch_thread_getspecific(dispatch_cache_key); + dispatch_continuation_t prev_dc = (dispatch_continuation_t) + fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); + int cnt = prev_dc ? prev_dc->do_ref_cnt + 1 : 1; + // Cap continuation cache + if (slowpath(cnt > _dispatch_continuation_cache_limit)) { + return dc; + } dc->do_next = prev_dc; + dc->do_ref_cnt = cnt; _dispatch_thread_setspecific(dispatch_cache_key, dc); + return NULL; } -#endif // !__OBJC2__ +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_free(dispatch_continuation_t dc) +{ + dc = _dispatch_continuation_free_cacheonly(dc); + if (slowpath(dc)) { + _dispatch_continuation_free_to_cache_limit(dc); + } +} +#endif // !(USE_OBJC && __OBJC2__) #endif diff --git a/src/semaphore.c b/src/semaphore.c index c3692c6..20d9ae5 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -35,6 +35,52 @@ } while (0) #endif +#if USE_WIN32_SEM +// rdar://problem/8428132 +static DWORD best_resolution = 1; // 1ms + +DWORD +_push_timer_resolution(DWORD ms) +{ + MMRESULT res; + static dispatch_once_t once; + + if (ms > 16) { + // only update timer resolution if smaller than default 15.6ms + // zero means not updated + return 0; + } + + // aim for the best resolution we can accomplish + dispatch_once(&once, ^{ + TIMECAPS tc; + MMRESULT res; + res = timeGetDevCaps(&tc, sizeof(tc)); + if (res == MMSYSERR_NOERROR) { + best_resolution = min(max(tc.wPeriodMin, best_resolution), + tc.wPeriodMax); + } + }); + + res = timeBeginPeriod(best_resolution); + if (res == TIMERR_NOERROR) { + return best_resolution; + } + // zero means not updated + return 0; +} + +// match ms parameter to result from _push_timer_resolution +void +_pop_timer_resolution(DWORD ms) +{ + if (ms) { + timeEndPeriod(ms); + } +} +#endif /* USE_WIN32_SEM */ + + DISPATCH_WEAK // rdar://problem/8503746 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); @@ -48,7 +94,7 @@ _dispatch_semaphore_init(long value, dispatch_object_t dou) { dispatch_semaphore_t dsema = dou._dsema; - dsema->do_next = DISPATCH_OBJECT_LISTLESS; + dsema->do_next = (dispatch_semaphore_t)DISPATCH_OBJECT_LISTLESS; dsema->do_targetq = dispatch_get_global_queue( DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); dsema->dsema_value = value; @@ -71,8 +117,10 @@ dispatch_semaphore_create(long value) return NULL; } - dsema = _dispatch_alloc(DISPATCH_VTABLE(semaphore), - sizeof(struct dispatch_semaphore_s)); + dsema = (dispatch_semaphore_t)_dispatch_alloc(DISPATCH_VTABLE(semaphore), + sizeof(struct dispatch_semaphore_s) - + sizeof(dsema->dsema_notify_head) - + sizeof(dsema->dsema_notify_tail)); _dispatch_semaphore_init(value, dsema); return dsema; } @@ -99,14 +147,34 @@ _dispatch_semaphore_create_port(semaphore_t *s4) while ((kr = semaphore_create(mach_task_self(), &tmp, SYNC_POLICY_FIFO, 0))) { DISPATCH_VERIFY_MIG(kr); - sleep(1); + _dispatch_temporary_resource_shortage(); } - if (!dispatch_atomic_cmpxchg(s4, 0, tmp)) { + if (!dispatch_atomic_cmpxchg(s4, 0, tmp, relaxed)) { kr = semaphore_destroy(mach_task_self(), tmp); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } } +#elif USE_WIN32_SEM +static void +_dispatch_semaphore_create_handle(HANDLE *s4) +{ + HANDLE tmp; + + if (*s4) { + return; + } + + // lazily allocate the semaphore port + + while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) { + _dispatch_temporary_resource_shortage(); + } + + if (!dispatch_atomic_cmpxchg(s4, 0, tmp)) { + CloseHandle(tmp); + } +} #endif void @@ -125,13 +193,13 @@ _dispatch_semaphore_dispose(dispatch_object_t dou) kr = semaphore_destroy(mach_task_self(), dsema->dsema_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } - if (dsema->dsema_waiter_port) { - kr = semaphore_destroy(mach_task_self(), dsema->dsema_waiter_port); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - } #elif USE_POSIX_SEM int ret = sem_destroy(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + if (dsema->dsema_handle) { + CloseHandle(dsema->dsema_handle); + } #endif } @@ -141,14 +209,14 @@ _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz) dispatch_semaphore_t dsema = dou._dsema; size_t offset = 0; - offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dx_kind(dsema), dsema); offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset); #if USE_MACH_SEM - offset += snprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", + offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", dsema->dsema_port); #endif - offset += snprintf(&buf[offset], bufsiz - offset, + offset += dsnprintf(&buf[offset], bufsiz - offset, "value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig); return offset; } @@ -164,7 +232,9 @@ _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) // dsema after the atomic increment. _dispatch_retain(dsema); - (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals); +#if USE_MACH_SEM || USE_POSIX_SEM + (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals, relaxed); +#endif #if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_port); @@ -173,6 +243,10 @@ _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) #elif USE_POSIX_SEM int ret = sem_post(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + _dispatch_semaphore_create_handle(&dsema->dsema_handle); + int ret = ReleaseSemaphore(dsema->dsema_handle, 1, NULL); + dispatch_assume(ret); #endif _dispatch_release(dsema); @@ -182,8 +256,7 @@ _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) long dispatch_semaphore_signal(dispatch_semaphore_t dsema) { - dispatch_atomic_release_barrier(); - long value = dispatch_atomic_inc2o(dsema, dsema_value); + long value = dispatch_atomic_inc2o(dsema, dsema_value, release); if (fastpath(value > 0)) { return 0; } @@ -200,22 +273,38 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, { long orig; +#if USE_MACH_SEM + mach_timespec_t _timeout; + kern_return_t kr; +#elif USE_POSIX_SEM + struct timespec _timeout; + int ret; +#elif USE_WIN32_SEM + uint64_t nsec; + DWORD msec; + DWORD resolution; + DWORD wait_result; +#endif + +#if USE_MACH_SEM || USE_POSIX_SEM again: // Mach semaphores appear to sometimes spuriously wake up. Therefore, // we keep a parallel count of the number of times a Mach semaphore is // signaled (6880961). - while ((orig = dsema->dsema_sent_ksignals)) { - if (dispatch_atomic_cmpxchg2o(dsema, dsema_sent_ksignals, orig, - orig - 1)) { + orig = dsema->dsema_sent_ksignals; + while (orig) { + if (dispatch_atomic_cmpxchgvw2o(dsema, dsema_sent_ksignals, orig, + orig - 1, &orig, relaxed)) { return 0; } } +#endif #if USE_MACH_SEM - mach_timespec_t _timeout; - kern_return_t kr; - _dispatch_semaphore_create_port(&dsema->dsema_port); +#elif USE_WIN32_SEM + _dispatch_semaphore_create_handle(&dsema->dsema_handle); +#endif // From xnu/osfmk/kern/sync_sema.c: // wait_semaphore->count = -1; /* we don't keep an actual count */ @@ -227,6 +316,7 @@ again: switch (timeout) { default: +#if USE_MACH_SEM do { uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); @@ -238,29 +328,7 @@ again: DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } - // Fall through and try to undo what the fast path did to - // dsema->dsema_value - case DISPATCH_TIME_NOW: - while ((orig = dsema->dsema_value) < 0) { - if (dispatch_atomic_cmpxchg2o(dsema, dsema_value, orig, orig + 1)) { - return KERN_OPERATION_TIMED_OUT; - } - } - // Another thread called semaphore_signal(). - // Fall through and drain the wakeup. - case DISPATCH_TIME_FOREVER: - do { - kr = semaphore_wait(dsema->dsema_port); - } while (kr == KERN_ABORTED); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - break; - } #elif USE_POSIX_SEM - struct timespec _timeout; - int ret; - - switch (timeout) { - default: do { uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); @@ -272,34 +340,60 @@ again: DISPATCH_SEMAPHORE_VERIFY_RET(ret); break; } +#elif USE_WIN32_SEM + nsec = _dispatch_timeout(timeout); + msec = (DWORD)(nsec / (uint64_t)1000000); + resolution = _push_timer_resolution(msec); + wait_result = WaitForSingleObject(dsema->dsema_handle, msec); + _pop_timer_resolution(resolution); + if (wait_result != WAIT_TIMEOUT) { + break; + } +#endif // Fall through and try to undo what the fast path did to // dsema->dsema_value case DISPATCH_TIME_NOW: - while ((orig = dsema->dsema_value) < 0) { - if (dispatch_atomic_cmpxchg2o(dsema, dsema_value, orig, orig + 1)) { + orig = dsema->dsema_value; + while (orig < 0) { + if (dispatch_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1, + &orig, relaxed)) { +#if USE_MACH_SEM + return KERN_OPERATION_TIMED_OUT; +#elif USE_POSIX_SEM || USE_WIN32_SEM errno = ETIMEDOUT; return -1; +#endif } } // Another thread called semaphore_signal(). // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: +#if USE_MACH_SEM + do { + kr = semaphore_wait(dsema->dsema_port); + } while (kr == KERN_ABORTED); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +#elif USE_POSIX_SEM do { ret = sem_wait(&dsema->dsema_sem); } while (ret != 0); DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + WaitForSingleObject(dsema->dsema_handle, INFINITE); +#endif break; } -#endif - +#if USE_MACH_SEM || USE_POSIX_SEM goto again; +#else + return 0; +#endif } long dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) { - long value = dispatch_atomic_dec2o(dsema, dsema_value); - dispatch_atomic_acquire_barrier(); + long value = dispatch_atomic_dec2o(dsema, dsema_value, acquire); if (fastpath(value >= 0)) { return 0; } @@ -312,8 +406,8 @@ dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) dispatch_group_t dispatch_group_create(void) { - dispatch_group_t dg = _dispatch_alloc(DISPATCH_VTABLE(group), - sizeof(struct dispatch_semaphore_s)); + dispatch_group_t dg = (dispatch_group_t)_dispatch_alloc( + DISPATCH_VTABLE(group), sizeof(struct dispatch_semaphore_s)); _dispatch_semaphore_init(LONG_MAX, dg); return dg; } @@ -322,29 +416,32 @@ void dispatch_group_enter(dispatch_group_t dg) { dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - - (void)dispatch_semaphore_wait(dsema, DISPATCH_TIME_FOREVER); + long value = dispatch_atomic_dec2o(dsema, dsema_value, acquire); + if (slowpath(value < 0)) { + DISPATCH_CLIENT_CRASH( + "Too many nested calls to dispatch_group_enter()"); + } } DISPATCH_NOINLINE static long _dispatch_group_wake(dispatch_semaphore_t dsema) { - struct dispatch_sema_notify_s *next, *head, *tail = NULL; + dispatch_continuation_t next, head, tail = NULL, dc; long rval; - head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL); + head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL, relaxed); if (head) { // snapshot before anything is notified/woken - tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL); + tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL, relaxed); } - rval = dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0); + rval = (long)dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0, relaxed); if (rval) { // wake group waiters #if USE_MACH_SEM - _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); + _dispatch_semaphore_create_port(&dsema->dsema_port); do { - kern_return_t kr = semaphore_signal(dsema->dsema_waiter_port); + kern_return_t kr = semaphore_signal(dsema->dsema_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } while (--rval); #elif USE_POSIX_SEM @@ -352,20 +449,31 @@ _dispatch_group_wake(dispatch_semaphore_t dsema) int ret = sem_post(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); } while (--rval); +#elif USE_WIN32_SEM + _dispatch_semaphore_create_handle(&dsema->dsema_handle); + int ret; + ret = ReleaseSemaphore(dsema->dsema_handle, rval, NULL); + dispatch_assume(ret); +#else +#error "No supported semaphore type" #endif } if (head) { // async group notify blocks do { - dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func); - _dispatch_release(head->dsn_queue); - next = fastpath(head->dsn_next); + next = fastpath(head->do_next); if (!next && head != tail) { - while (!(next = fastpath(head->dsn_next))) { - _dispatch_hardware_pause(); + while (!(next = fastpath(head->do_next))) { + dispatch_hardware_pause(); } } - free(head); + dispatch_queue_t dsn_queue = (dispatch_queue_t)head->dc_data; + dc = _dispatch_continuation_free_cacheonly(head); + dispatch_async_f(dsn_queue, head->dc_ctxt, head->dc_func); + _dispatch_release(dsn_queue); + if (slowpath(dc)) { + _dispatch_continuation_free_to_cache_limit(dc); + } } while ((head = next)); _dispatch_release(dsema); } @@ -376,12 +484,11 @@ void dispatch_group_leave(dispatch_group_t dg) { dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - dispatch_atomic_release_barrier(); - long value = dispatch_atomic_inc2o(dsema, dsema_value); - if (slowpath(value == LONG_MIN)) { + long value = dispatch_atomic_inc2o(dsema, dsema_value, release); + if (slowpath(value < 0)) { DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave()"); } - if (slowpath(value == dsema->dsema_orig)) { + if (slowpath(value == LONG_MAX)) { (void)_dispatch_group_wake(dsema); } } @@ -392,26 +499,39 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) { long orig; +#if USE_MACH_SEM + mach_timespec_t _timeout; + kern_return_t kr; +#elif USE_POSIX_SEM // KVV + struct timespec _timeout; + int ret; +#elif USE_WIN32_SEM // KVV + uint64_t nsec; + DWORD msec; + DWORD resolution; + DWORD wait_result; +#endif + again: // check before we cause another signal to be sent by incrementing // dsema->dsema_group_waiters - if (dsema->dsema_value == dsema->dsema_orig) { + if (dsema->dsema_value == LONG_MAX) { return _dispatch_group_wake(dsema); } // Mach semaphores appear to sometimes spuriously wake up. Therefore, // we keep a parallel count of the number of times a Mach semaphore is // signaled (6880961). - (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters); + (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters, relaxed); // check the values again in case we need to wake any threads - if (dsema->dsema_value == dsema->dsema_orig) { + if (dsema->dsema_value == LONG_MAX) { return _dispatch_group_wake(dsema); } #if USE_MACH_SEM - mach_timespec_t _timeout; - kern_return_t kr; - - _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); + _dispatch_semaphore_create_port(&dsema->dsema_port); +#elif USE_WIN32_SEM + _dispatch_semaphore_create_handle(&dsema->dsema_handle); +#endif // From xnu/osfmk/kern/sync_sema.c: // wait_semaphore->count = -1; /* we don't keep an actual count */ @@ -423,42 +543,19 @@ again: switch (timeout) { default: +#if USE_MACH_SEM do { uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - kr = slowpath(semaphore_timedwait(dsema->dsema_waiter_port, - _timeout)); + kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout)); } while (kr == KERN_ABORTED); if (kr != KERN_OPERATION_TIMED_OUT) { DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } - // Fall through and try to undo the earlier change to - // dsema->dsema_group_waiters - case DISPATCH_TIME_NOW: - while ((orig = dsema->dsema_group_waiters)) { - if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig, - orig - 1)) { - return KERN_OPERATION_TIMED_OUT; - } - } - // Another thread called semaphore_signal(). - // Fall through and drain the wakeup. - case DISPATCH_TIME_FOREVER: - do { - kr = semaphore_wait(dsema->dsema_waiter_port); - } while (kr == KERN_ABORTED); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - break; - } #elif USE_POSIX_SEM - struct timespec _timeout; - int ret; - - switch (timeout) { - default: do { uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); @@ -470,42 +567,64 @@ again: DISPATCH_SEMAPHORE_VERIFY_RET(ret); break; } +#elif USE_WIN32_SEM + nsec = _dispatch_timeout(timeout); + msec = (DWORD)(nsec / (uint64_t)1000000); + resolution = _push_timer_resolution(msec); + wait_result = WaitForSingleObject(dsema->dsema_handle, msec); + _pop_timer_resolution(resolution); + if (wait_result != WAIT_TIMEOUT) { + break; + } +#endif // Fall through and try to undo the earlier change to // dsema->dsema_group_waiters case DISPATCH_TIME_NOW: - while ((orig = dsema->dsema_group_waiters)) { - if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig, - orig - 1)) { + orig = dsema->dsema_group_waiters; + while (orig) { + if (dispatch_atomic_cmpxchgvw2o(dsema, dsema_group_waiters, orig, + orig - 1, &orig, relaxed)) { +#if USE_MACH_SEM + return KERN_OPERATION_TIMED_OUT; +#elif USE_POSIX_SEM || USE_WIN32_SEM errno = ETIMEDOUT; return -1; +#endif } } // Another thread called semaphore_signal(). // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: +#if USE_MACH_SEM + do { + kr = semaphore_wait(dsema->dsema_port); + } while (kr == KERN_ABORTED); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +#elif USE_POSIX_SEM do { ret = sem_wait(&dsema->dsema_sem); } while (ret == -1 && errno == EINTR); DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + WaitForSingleObject(dsema->dsema_handle, INFINITE); +#endif break; } -#endif - goto again; -} + } long dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) { dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - if (dsema->dsema_value == dsema->dsema_orig) { + if (dsema->dsema_value == LONG_MAX) { return 0; } if (timeout == 0) { #if USE_MACH_SEM return KERN_OPERATION_TIMED_OUT; -#elif USE_POSIX_SEM +#elif USE_POSIX_SEM || USE_WIN32_SEM errno = ETIMEDOUT; return (-1); #endif @@ -519,25 +638,21 @@ dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, void (*func)(void *)) { dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - struct dispatch_sema_notify_s *dsn, *prev; - - // FIXME -- this should be updated to use the continuation cache - while (!(dsn = calloc(1, sizeof(*dsn)))) { - sleep(1); - } - - dsn->dsn_queue = dq; - dsn->dsn_ctxt = ctxt; - dsn->dsn_func = func; + dispatch_continuation_t prev, dsn = _dispatch_continuation_alloc(); + dsn->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; + dsn->dc_data = dq; + dsn->dc_ctxt = ctxt; + dsn->dc_func = func; + dsn->do_next = NULL; _dispatch_retain(dq); - dispatch_atomic_store_barrier(); - prev = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, dsn); + prev = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, dsn, release); if (fastpath(prev)) { - prev->dsn_next = dsn; + prev->do_next = dsn; } else { _dispatch_retain(dg); - (void)dispatch_atomic_xchg2o(dsema, dsema_notify_head, dsn); - if (dsema->dsema_value == dsema->dsema_orig) { + dispatch_atomic_store2o(dsema, dsema_notify_head, dsn, seq_cst); + dispatch_atomic_barrier(seq_cst); // + if (dispatch_atomic_load2o(dsema, dsema_value, seq_cst) == LONG_MAX) { _dispatch_group_wake(dsema); } } @@ -556,18 +671,19 @@ dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, #pragma mark - #pragma mark _dispatch_thread_semaphore_t -DISPATCH_NOINLINE -static _dispatch_thread_semaphore_t +_dispatch_thread_semaphore_t _dispatch_thread_semaphore_create(void) { _dispatch_safe_fork = false; -#if USE_MACH_SEM +#if DISPATCH_USE_OS_SEMAPHORE_CACHE + return _os_semaphore_create(); +#elif USE_MACH_SEM semaphore_t s4; kern_return_t kr; while (slowpath(kr = semaphore_create(mach_task_self(), &s4, SYNC_POLICY_FIFO, 0))) { DISPATCH_VERIFY_MIG(kr); - sleep(1); + _dispatch_temporary_resource_shortage(); } return s4; #elif USE_POSIX_SEM @@ -575,14 +691,23 @@ _dispatch_thread_semaphore_create(void) int ret = sem_init(&s4, 0, 0); DISPATCH_SEMAPHORE_VERIFY_RET(ret); return s4; +#elif USE_WIN32_SEM + HANDLE tmp; + while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) { + _dispatch_temporary_resource_shortage(); + } + return (_dispatch_thread_semaphore_t)tmp; +#else +#error "No supported semaphore type" #endif } -DISPATCH_NOINLINE void _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema) { -#if USE_MACH_SEM +#if DISPATCH_USE_OS_SEMAPHORE_CACHE + return _os_semaphore_dispose(sema); +#elif USE_MACH_SEM semaphore_t s4 = (semaphore_t)sema; kern_return_t kr = semaphore_destroy(mach_task_self(), s4); DISPATCH_SEMAPHORE_VERIFY_KR(kr); @@ -590,13 +715,23 @@ _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema) sem_t s4 = (sem_t)sema; int ret = sem_destroy(&s4); DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + // XXX: signal the semaphore? + WINBOOL success; + success = CloseHandle((HANDLE)sema); + dispatch_assume(success); +#else +#error "No supported semaphore type" #endif } void _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema) { -#if USE_MACH_SEM + // assumed to contain a release barrier +#if DISPATCH_USE_OS_SEMAPHORE_CACHE + return _os_semaphore_signal(sema); +#elif USE_MACH_SEM semaphore_t s4 = (semaphore_t)sema; kern_return_t kr = semaphore_signal(s4); DISPATCH_SEMAPHORE_VERIFY_KR(kr); @@ -604,13 +739,22 @@ _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema) sem_t s4 = (sem_t)sema; int ret = sem_post(&s4); DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + int ret; + ret = ReleaseSemaphore((HANDLE)sema, 1, NULL); + dispatch_assume(ret); +#else +#error "No supported semaphore type" #endif } void _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema) { -#if USE_MACH_SEM + // assumed to contain an acquire barrier +#if DISPATCH_USE_OS_SEMAPHORE_CACHE + return _os_semaphore_wait(sema); +#elif USE_MACH_SEM semaphore_t s4 = (semaphore_t)sema; kern_return_t kr; do { @@ -624,28 +768,12 @@ _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema) ret = sem_wait(&s4); } while (slowpath(ret != 0)); DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + DWORD wait_result; + do { + wait_result = WaitForSingleObject((HANDLE)sema, INFINITE); + } while (wait_result != WAIT_OBJECT_0); +#else +#error "No supported semaphore type" #endif } - -_dispatch_thread_semaphore_t -_dispatch_get_thread_semaphore(void) -{ - _dispatch_thread_semaphore_t sema = (_dispatch_thread_semaphore_t) - _dispatch_thread_getspecific(dispatch_sema4_key); - if (slowpath(!sema)) { - return _dispatch_thread_semaphore_create(); - } - _dispatch_thread_setspecific(dispatch_sema4_key, NULL); - return sema; -} - -void -_dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t sema) -{ - _dispatch_thread_semaphore_t old_sema = (_dispatch_thread_semaphore_t) - _dispatch_thread_getspecific(dispatch_sema4_key); - _dispatch_thread_setspecific(dispatch_sema4_key, (void*)sema); - if (slowpath(old_sema)) { - return _dispatch_thread_semaphore_dispose(old_sema); - } -} diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h index e27f934..c8174b6 100644 --- a/src/semaphore_internal.h +++ b/src/semaphore_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -29,32 +29,26 @@ struct dispatch_queue_s; -struct dispatch_sema_notify_s { - struct dispatch_sema_notify_s *volatile dsn_next; - struct dispatch_queue_s *dsn_queue; - void *dsn_ctxt; - void (*dsn_func)(void *); -}; - DISPATCH_CLASS_DECL(semaphore); struct dispatch_semaphore_s { DISPATCH_STRUCT_HEADER(semaphore); - long dsema_value; - long dsema_orig; - size_t dsema_sent_ksignals; -#if USE_MACH_SEM && USE_POSIX_SEM -#error "Too many supported semaphore types" -#elif USE_MACH_SEM +#if USE_MACH_SEM semaphore_t dsema_port; - semaphore_t dsema_waiter_port; #elif USE_POSIX_SEM sem_t dsema_sem; +#elif USE_WIN32_SEM + HANDLE dsema_handle; #else #error "No supported semaphore type" #endif - size_t dsema_group_waiters; - struct dispatch_sema_notify_s *dsema_notify_head; - struct dispatch_sema_notify_s *dsema_notify_tail; + long dsema_orig; + long volatile dsema_value; + union { + long volatile dsema_sent_ksignals; + long volatile dsema_group_waiters; + }; + struct dispatch_continuation_s *volatile dsema_notify_head; + struct dispatch_continuation_s *volatile dsema_notify_tail; }; DISPATCH_CLASS_DECL(group); @@ -64,10 +58,35 @@ size_t _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz); typedef uintptr_t _dispatch_thread_semaphore_t; -_dispatch_thread_semaphore_t _dispatch_get_thread_semaphore(void); -void _dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t); + +_dispatch_thread_semaphore_t _dispatch_thread_semaphore_create(void); +void _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t); void _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t); void _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t); -void _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t); + +DISPATCH_ALWAYS_INLINE +static inline _dispatch_thread_semaphore_t +_dispatch_get_thread_semaphore(void) +{ + _dispatch_thread_semaphore_t sema = (_dispatch_thread_semaphore_t) + _dispatch_thread_getspecific(dispatch_sema4_key); + if (slowpath(!sema)) { + return _dispatch_thread_semaphore_create(); + } + _dispatch_thread_setspecific(dispatch_sema4_key, NULL); + return sema; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t sema) +{ + _dispatch_thread_semaphore_t old_sema = (_dispatch_thread_semaphore_t) + _dispatch_thread_getspecific(dispatch_sema4_key); + _dispatch_thread_setspecific(dispatch_sema4_key, (void*)sema); + if (slowpath(old_sema)) { + return _dispatch_thread_semaphore_dispose(old_sema); + } +} #endif diff --git a/src/shims.h b/src/shims.h index 73322be..32376ee 100644 --- a/src/shims.h +++ b/src/shims.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -39,6 +39,21 @@ #define FD_COPY(f, t) (void)(*(t) = *(f)) #endif +#if TARGET_OS_WIN32 +#define bzero(ptr,len) memset((ptr), 0, (len)) +#define snprintf _snprintf + +inline size_t strlcpy(char *dst, const char *src, size_t size) { + int res = strlen(dst) + strlen(src) + 1; + if (size > 0) { + size_t n = size - 1; + strncpy(dst, src, n); + dst[n] = 0; + } + return res; +} +#endif // TARGET_OS_WIN32 + #if !HAVE_NORETURN_BUILTIN_TRAP /* * XXXRW: Work-around for possible clang bug in which __builtin_trap() is not @@ -49,13 +64,17 @@ DISPATCH_NORETURN void __builtin_trap(void); #endif +#if DISPATCH_HW_CONFIG_UP +#define DISPATCH_ATOMIC_UP 1 +#endif + #include "shims/atomic.h" +#include "shims/atomic_sfb.h" #include "shims/tsd.h" #include "shims/hw_config.h" #include "shims/perfmon.h" #include "shims/getprogname.h" -#include "shims/malloc_zone.h" #include "shims/time.h" #ifdef __APPLE__ @@ -65,7 +84,7 @@ void __builtin_trap(void); #define _dispatch_clear_stack(s) do { \ void *a[(s)/sizeof(void*) ? (s)/sizeof(void*) : 1]; \ a[0] = pthread_get_stackaddr_np(pthread_self()); \ - bzero((void*)&a[1], a[0] - (void*)&a[1]); \ + bzero((void*)&a[1], (size_t)(a[0] - (void*)&a[1])); \ } while (0) #else #define _dispatch_clear_stack(s) diff --git a/src/shims/atomic.h b/src/shims/atomic.h index a30c89f..2f44775 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -27,129 +27,346 @@ #ifndef __DISPATCH_SHIMS_ATOMIC__ #define __DISPATCH_SHIMS_ATOMIC__ -/* x86 & cortex-a8 have a 64 byte cacheline */ -#define DISPATCH_CACHELINE_SIZE 64 -#define ROUND_UP_TO_CACHELINE_SIZE(x) \ - (((x) + (DISPATCH_CACHELINE_SIZE - 1)) & ~(DISPATCH_CACHELINE_SIZE - 1)) -#define ROUND_UP_TO_VECTOR_SIZE(x) \ - (((x) + 15) & ~15) -#define DISPATCH_CACHELINE_ALIGN \ - __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE))) +// generate error during codegen +#define _dispatch_atomic_unimplemented() \ + ({ __asm__(".err unimplemented"); }) -#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2) +#pragma mark - +#pragma mark memory_order -#define _dispatch_atomic_barrier() __sync_synchronize() -// see comment in dispatch_once.c -#define dispatch_atomic_maximally_synchronizing_barrier() \ - _dispatch_atomic_barrier() -// assume atomic builtins provide barriers -#define dispatch_atomic_barrier() -#define dispatch_atomic_acquire_barrier() -#define dispatch_atomic_release_barrier() -#define dispatch_atomic_store_barrier() +typedef enum _dispatch_atomic_memory_order +{ + _dispatch_atomic_memory_order_relaxed, + _dispatch_atomic_memory_order_consume, + _dispatch_atomic_memory_order_acquire, + _dispatch_atomic_memory_order_release, + _dispatch_atomic_memory_order_acq_rel, + _dispatch_atomic_memory_order_seq_cst, +} _dispatch_atomic_memory_order; + +#if !DISPATCH_ATOMIC_UP + +#define dispatch_atomic_memory_order_relaxed \ + _dispatch_atomic_memory_order_relaxed +#define dispatch_atomic_memory_order_acquire \ + _dispatch_atomic_memory_order_acquire +#define dispatch_atomic_memory_order_release \ + _dispatch_atomic_memory_order_release +#define dispatch_atomic_memory_order_acq_rel \ + _dispatch_atomic_memory_order_acq_rel +#define dispatch_atomic_memory_order_seq_cst \ + _dispatch_atomic_memory_order_seq_cst + +#else // DISPATCH_ATOMIC_UP + +#define dispatch_atomic_memory_order_relaxed \ + _dispatch_atomic_memory_order_relaxed +#define dispatch_atomic_memory_order_acquire \ + _dispatch_atomic_memory_order_relaxed +#define dispatch_atomic_memory_order_release \ + _dispatch_atomic_memory_order_relaxed +#define dispatch_atomic_memory_order_acq_rel \ + _dispatch_atomic_memory_order_relaxed +#define dispatch_atomic_memory_order_seq_cst \ + _dispatch_atomic_memory_order_relaxed + +#endif // DISPATCH_ATOMIC_UP + +#if __has_extension(c_generic_selections) +#define _dispatch_atomic_basetypeof(p) \ + typeof(*_Generic((p), \ + int*: (int*)(p), \ + volatile int*: (int*)(p), \ + unsigned int*: (unsigned int*)(p), \ + volatile unsigned int*: (unsigned int*)(p), \ + long*: (long*)(p), \ + volatile long*: (long*)(p), \ + unsigned long*: (unsigned long*)(p), \ + volatile unsigned long*: (unsigned long*)(p), \ + long long*: (long long*)(p), \ + volatile long long*: (long long*)(p), \ + unsigned long long*: (unsigned long long*)(p), \ + volatile unsigned long long*: (unsigned long long*)(p), \ + default: (void**)(p))) +#endif + +#if __has_extension(c_atomic) && __has_extension(c_generic_selections) +#pragma mark - +#pragma mark c11 + +#define _dispatch_atomic_c11_atomic(p) \ + _Generic((p), \ + int*: (_Atomic(int)*)(p), \ + volatile int*: (volatile _Atomic(int)*)(p), \ + unsigned int*: (_Atomic(unsigned int)*)(p), \ + volatile unsigned int*: (volatile _Atomic(unsigned int)*)(p), \ + long*: (_Atomic(long)*)(p), \ + volatile long*: (volatile _Atomic(long)*)(p), \ + unsigned long*: (_Atomic(unsigned long)*)(p), \ + volatile unsigned long*: (volatile _Atomic(unsigned long)*)(p), \ + long long*: (_Atomic(long long)*)(p), \ + volatile long long*: (volatile _Atomic(long long)*)(p), \ + unsigned long long*: (_Atomic(unsigned long long)*)(p), \ + volatile unsigned long long*: \ + (volatile _Atomic(unsigned long long)*)(p), \ + default: (volatile _Atomic(void*)*)(p)) + +#define _dispatch_atomic_barrier(m) \ + ({ __c11_atomic_thread_fence(dispatch_atomic_memory_order_##m); }) +#define dispatch_atomic_load(p, m) \ + ({ _dispatch_atomic_basetypeof(p) _r = \ + __c11_atomic_load(_dispatch_atomic_c11_atomic(p), \ + dispatch_atomic_memory_order_##m); (typeof(*(p)))_r; }) +#define dispatch_atomic_store(p, v, m) \ + ({ _dispatch_atomic_basetypeof(p) _v = (v); \ + __c11_atomic_store(_dispatch_atomic_c11_atomic(p), _v, \ + dispatch_atomic_memory_order_##m); }) +#define dispatch_atomic_xchg(p, v, m) \ + ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = \ + __c11_atomic_exchange(_dispatch_atomic_c11_atomic(p), _v, \ + dispatch_atomic_memory_order_##m); (typeof(*(p)))_r; }) +#define dispatch_atomic_cmpxchg(p, e, v, m) \ + ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = (e); \ + __c11_atomic_compare_exchange_strong(_dispatch_atomic_c11_atomic(p), \ + &_r, _v, dispatch_atomic_memory_order_##m, \ + dispatch_atomic_memory_order_relaxed); }) +#define dispatch_atomic_cmpxchgv(p, e, v, g, m) \ + ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ + __c11_atomic_compare_exchange_strong(_dispatch_atomic_c11_atomic(p), \ + &_r, _v, dispatch_atomic_memory_order_##m, \ + dispatch_atomic_memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) +#define dispatch_atomic_cmpxchgvw(p, e, v, g, m) \ + ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ + __c11_atomic_compare_exchange_weak(_dispatch_atomic_c11_atomic(p), \ + &_r, _v, dispatch_atomic_memory_order_##m, \ + dispatch_atomic_memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) +#define _dispatch_atomic_c11_op(p, v, m, o, op) \ + ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = \ + __c11_atomic_fetch_##o(_dispatch_atomic_c11_atomic(p), _v, \ + dispatch_atomic_memory_order_##m); (typeof(*(p)))(_r op _v); }) +#define _dispatch_atomic_c11_op_orig(p, v, m, o, op) \ + ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = \ + __c11_atomic_fetch_##o(_dispatch_atomic_c11_atomic(p), _v, \ + dispatch_atomic_memory_order_##m); (typeof(*(p)))_r; }) + +#define dispatch_atomic_add(p, v, m) \ + _dispatch_atomic_c11_op((p), (v), m, add, +) +#define dispatch_atomic_add_orig(p, v, m) \ + _dispatch_atomic_c11_op_orig((p), (v), m, add, +) +#define dispatch_atomic_sub(p, v, m) \ + _dispatch_atomic_c11_op((p), (v), m, sub, -) +#define dispatch_atomic_sub_orig(p, v, m) \ + _dispatch_atomic_c11_op_orig((p), (v), m, sub, -) +#define dispatch_atomic_and(p, v, m) \ + _dispatch_atomic_c11_op((p), (v), m, and, &) +#define dispatch_atomic_and_orig(p, v, m) \ + _dispatch_atomic_c11_op_orig((p), (v), m, and, &) +#define dispatch_atomic_or(p, v, m) \ + _dispatch_atomic_c11_op((p), (v), m, or, |) +#define dispatch_atomic_or_orig(p, v, m) \ + _dispatch_atomic_c11_op_orig((p), (v), m, or, |) +#define dispatch_atomic_xor(p, v, m) \ + _dispatch_atomic_c11_op((p), (v), m, xor, ^) +#define dispatch_atomic_xor_orig(p, v, m) \ + _dispatch_atomic_c11_op_orig((p), (v), m, xor, ^) -#define _dispatch_hardware_pause() __asm__("") -#define _dispatch_debugger() __asm__("trap") +#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2) +#pragma mark - +#pragma mark gnu99 -#define dispatch_atomic_cmpxchg(p, e, n) \ - __sync_bool_compare_and_swap((p), (e), (n)) +#define _dispatch_atomic_full_barrier() \ + __sync_synchronize() +#define _dispatch_atomic_barrier(m) \ + ({ switch(dispatch_atomic_memory_order_##m) { \ + case _dispatch_atomic_memory_order_relaxed: \ + break; \ + default: \ + _dispatch_atomic_full_barrier(); break; \ + } }) +// Only emulate store seq_cst -> load seq_cst +#define dispatch_atomic_load(p, m) \ + ({ switch(dispatch_atomic_memory_order_##m) { \ + case _dispatch_atomic_memory_order_relaxed: \ + case _dispatch_atomic_memory_order_seq_cst: \ + break; \ + default: \ + _dispatch_atomic_unimplemented(); break; \ + }; *(p); }) +#define dispatch_atomic_store(p, v, m) \ + ({ switch(dispatch_atomic_memory_order_##m) { \ + case _dispatch_atomic_memory_order_release: \ + _dispatch_atomic_barrier(m); /* fallthrough */ \ + case _dispatch_atomic_memory_order_relaxed: \ + case _dispatch_atomic_memory_order_seq_cst: \ + *(p) = (v); break; \ + default:\ + _dispatch_atomic_unimplemented(); break; \ + } switch(dispatch_atomic_memory_order_##m) { \ + case _dispatch_atomic_memory_order_seq_cst: \ + _dispatch_atomic_barrier(m); break; \ + default: \ + break; \ + } }) #if __has_builtin(__sync_swap) -#define dispatch_atomic_xchg(p, n) \ - ((typeof(*(p)))__sync_swap((p), (n))) +#define dispatch_atomic_xchg(p, v, m) \ + ((typeof(*(p)))__sync_swap((p), (v))) #else -#define dispatch_atomic_xchg(p, n) \ - ((typeof(*(p)))__sync_lock_test_and_set((p), (n))) +#define dispatch_atomic_xchg(p, v, m) \ + ((typeof(*(p)))__sync_lock_test_and_set((p), (v))) +#endif +#define dispatch_atomic_cmpxchg(p, e, v, m) \ + __sync_bool_compare_and_swap((p), (e), (v)) +#define dispatch_atomic_cmpxchgv(p, e, v, g, m) \ + ({ typeof(*(g)) _e = (e), _r = \ + __sync_val_compare_and_swap((p), _e, (v)); \ + bool _b = (_e == _r); *(g) = _r; _b; }) +#define dispatch_atomic_cmpxchgvw(p, e, v, g, m) \ + dispatch_atomic_cmpxchgv((p), (e), (v), (g), m) + +#define dispatch_atomic_add(p, v, m) \ + __sync_add_and_fetch((p), (v)) +#define dispatch_atomic_add_orig(p, v, m) \ + __sync_fetch_and_add((p), (v)) +#define dispatch_atomic_sub(p, v, m) \ + __sync_sub_and_fetch((p), (v)) +#define dispatch_atomic_sub_orig(p, v, m) \ + __sync_fetch_and_sub((p), (v)) +#define dispatch_atomic_and(p, v, m) \ + __sync_and_and_fetch((p), (v)) +#define dispatch_atomic_and_orig(p, v, m) \ + __sync_fetch_and_and((p), (v)) +#define dispatch_atomic_or(p, v, m) \ + __sync_or_and_fetch((p), (v)) +#define dispatch_atomic_or_orig(p, v, m) \ + __sync_fetch_and_or((p), (v)) +#define dispatch_atomic_xor(p, v, m) \ + __sync_xor_and_fetch((p), (v)) +#define dispatch_atomic_xor_orig(p, v, m) \ + __sync_fetch_and_xor((p), (v)) + +#if defined(__x86_64__) || defined(__i386__) +// GCC emits nothing for __sync_synchronize() on x86_64 & i386 +#undef _dispatch_atomic_full_barrier +#define _dispatch_atomic_full_barrier() \ + ({ __asm__ __volatile__( \ + "mfence" \ + : : : "memory"); }) +// xchg is faster than store + mfence +#undef dispatch_atomic_store +#define dispatch_atomic_store(p, v, m) \ + ({ switch(dispatch_atomic_memory_order_##m) { \ + case _dispatch_atomic_memory_order_relaxed: \ + case _dispatch_atomic_memory_order_release: \ + *(p) = (v); break; \ + case _dispatch_atomic_memory_order_seq_cst: \ + (void)dispatch_atomic_xchg((p), (v), m); break; \ + default:\ + _dispatch_atomic_unimplemented(); break; \ + } }) #endif -#define dispatch_atomic_add(p, v) __sync_add_and_fetch((p), (v)) -#define dispatch_atomic_sub(p, v) __sync_sub_and_fetch((p), (v)) -#define dispatch_atomic_or(p, v) __sync_fetch_and_or((p), (v)) -#define dispatch_atomic_and(p, v) __sync_fetch_and_and((p), (v)) - -#define dispatch_atomic_inc(p) dispatch_atomic_add((p), 1) -#define dispatch_atomic_dec(p) dispatch_atomic_sub((p), 1) -// really just a low level abort() -#define _dispatch_hardware_crash() __builtin_trap() - -#define dispatch_atomic_cmpxchg2o(p, f, e, n) \ - dispatch_atomic_cmpxchg(&(p)->f, (e), (n)) -#define dispatch_atomic_xchg2o(p, f, n) \ - dispatch_atomic_xchg(&(p)->f, (n)) -#define dispatch_atomic_add2o(p, f, v) \ - dispatch_atomic_add(&(p)->f, (v)) -#define dispatch_atomic_sub2o(p, f, v) \ - dispatch_atomic_sub(&(p)->f, (v)) -#define dispatch_atomic_or2o(p, f, v) \ - dispatch_atomic_or(&(p)->f, (v)) -#define dispatch_atomic_and2o(p, f, v) \ - dispatch_atomic_and(&(p)->f, (v)) -#define dispatch_atomic_inc2o(p, f) \ - dispatch_atomic_add2o((p), f, 1) -#define dispatch_atomic_dec2o(p, f) \ - dispatch_atomic_sub2o((p), f, 1) #else #error "Please upgrade to GCC 4.2 or newer." #endif +#pragma mark - +#pragma mark generic + +#define dispatch_hardware_pause() ({ __asm__(""); }) +// assume atomic builtins provide barriers +#define dispatch_atomic_barrier(m) +// see comment in dispatch_once.c +#define dispatch_atomic_maximally_synchronizing_barrier() \ + _dispatch_atomic_barrier(seq_cst) + +#define dispatch_atomic_load2o(p, f, m) \ + dispatch_atomic_load(&(p)->f, m) +#define dispatch_atomic_store2o(p, f, v, m) \ + dispatch_atomic_store(&(p)->f, (v), m) +#define dispatch_atomic_xchg2o(p, f, v, m) \ + dispatch_atomic_xchg(&(p)->f, (v), m) +#define dispatch_atomic_cmpxchg2o(p, f, e, v, m) \ + dispatch_atomic_cmpxchg(&(p)->f, (e), (v), m) +#define dispatch_atomic_cmpxchgv2o(p, f, e, v, g, m) \ + dispatch_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m) +#define dispatch_atomic_cmpxchgvw2o(p, f, e, v, g, m) \ + dispatch_atomic_cmpxchgvw(&(p)->f, (e), (v), (g), m) +#define dispatch_atomic_add2o(p, f, v, m) \ + dispatch_atomic_add(&(p)->f, (v), m) +#define dispatch_atomic_add_orig2o(p, f, v, m) \ + dispatch_atomic_add_orig(&(p)->f, (v), m) +#define dispatch_atomic_sub2o(p, f, v, m) \ + dispatch_atomic_sub(&(p)->f, (v), m) +#define dispatch_atomic_sub_orig2o(p, f, v, m) \ + dispatch_atomic_sub_orig(&(p)->f, (v), m) +#define dispatch_atomic_and2o(p, f, v, m) \ + dispatch_atomic_and(&(p)->f, (v), m) +#define dispatch_atomic_and_orig2o(p, f, v, m) \ + dispatch_atomic_and_orig(&(p)->f, (v), m) +#define dispatch_atomic_or2o(p, f, v, m) \ + dispatch_atomic_or(&(p)->f, (v), m) +#define dispatch_atomic_or_orig2o(p, f, v, m) \ + dispatch_atomic_or_orig(&(p)->f, (v), m) +#define dispatch_atomic_xor2o(p, f, v, m) \ + dispatch_atomic_xor(&(p)->f, (v), m) +#define dispatch_atomic_xor_orig2o(p, f, v, m) \ + dispatch_atomic_xor_orig(&(p)->f, (v), m) + +#define dispatch_atomic_inc(p, m) \ + dispatch_atomic_add((p), 1, m) +#define dispatch_atomic_inc_orig(p, m) \ + dispatch_atomic_add_orig((p), 1, m) +#define dispatch_atomic_inc2o(p, f, m) \ + dispatch_atomic_add2o(p, f, 1, m) +#define dispatch_atomic_inc_orig2o(p, f, m) \ + dispatch_atomic_add_orig2o(p, f, 1, m) +#define dispatch_atomic_dec(p, m) \ + dispatch_atomic_sub((p), 1, m) +#define dispatch_atomic_dec_orig(p, m) \ + dispatch_atomic_sub_orig((p), 1, m) +#define dispatch_atomic_dec2o(p, f, m) \ + dispatch_atomic_sub2o(p, f, 1, m) +#define dispatch_atomic_dec_orig2o(p, f, m) \ + dispatch_atomic_sub_orig2o(p, f, 1, m) + +#define dispatch_atomic_tsx_xacq_cmpxchgv(p, e, v, g) \ + dispatch_atomic_cmpxchgv((p), (e), (v), (g), acquire) +#define dispatch_atomic_tsx_xrel_store(p, v) \ + dispatch_atomic_store(p, v, release) +#define dispatch_atomic_tsx_xacq_cmpxchgv2o(p, f, e, v, g) \ + dispatch_atomic_tsx_xacq_cmpxchgv(&(p)->f, (e), (v), (g)) +#define dispatch_atomic_tsx_xrel_store2o(p, f, v) \ + dispatch_atomic_tsx_xrel_store(&(p)->f, (v)) + #if defined(__x86_64__) || defined(__i386__) +#pragma mark - +#pragma mark x86 + +#undef dispatch_hardware_pause +#define dispatch_hardware_pause() ({ __asm__("pause"); }) -// GCC emits nothing for __sync_synchronize() on x86_64 & i386 -#undef _dispatch_atomic_barrier -#define _dispatch_atomic_barrier() \ - __asm__ __volatile__( \ - "mfence" \ - : : : "memory") #undef dispatch_atomic_maximally_synchronizing_barrier #ifdef __LP64__ #define dispatch_atomic_maximally_synchronizing_barrier() \ - do { unsigned long _clbr; __asm__ __volatile__( \ - "cpuid" \ - : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory" \ - ); } while(0) + ({ unsigned long _clbr; __asm__ __volatile__( \ + "cpuid" \ + : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory"); }) #else #ifdef __llvm__ #define dispatch_atomic_maximally_synchronizing_barrier() \ - do { unsigned long _clbr; __asm__ __volatile__( \ - "cpuid" \ - : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory" \ - ); } while(0) + ({ unsigned long _clbr; __asm__ __volatile__( \ + "cpuid" \ + : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory"); }) #else // gcc does not allow inline i386 asm to clobber ebx #define dispatch_atomic_maximally_synchronizing_barrier() \ - do { unsigned long _clbr; __asm__ __volatile__( \ - "pushl %%ebx\n\t" \ - "cpuid\n\t" \ - "popl %%ebx" \ - : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory" \ - ); } while(0) + ({ unsigned long _clbr; __asm__ __volatile__( \ + "pushl %%ebx\n\t" \ + "cpuid\n\t" \ + "popl %%ebx" \ + : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory"); }) #endif #endif -#undef _dispatch_hardware_pause -#define _dispatch_hardware_pause() __asm__("pause") -#undef _dispatch_debugger -#define _dispatch_debugger() __asm__("int3") -#elif defined(__ppc__) || defined(__ppc64__) - -// GCC emits "sync" for __sync_synchronize() on ppc & ppc64 -#undef _dispatch_atomic_barrier -#ifdef __LP64__ -#define _dispatch_atomic_barrier() \ - __asm__ __volatile__( \ - "isync\n\t" \ - "lwsync" - : : : "memory") -#else -#define _dispatch_atomic_barrier() \ - __asm__ __volatile__( \ - "isync\n\t" \ - "eieio" \ - : : : "memory") -#endif -#undef dispatch_atomic_maximally_synchronizing_barrier -#define dispatch_atomic_maximally_synchronizing_barrier() \ - __asm__ __volatile__( \ - "sync" \ - : : : "memory") #endif diff --git a/src/shims/atomic_sfb.h b/src/shims/atomic_sfb.h new file mode 100644 index 0000000..c5e7be3 --- /dev/null +++ b/src/shims/atomic_sfb.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_SHIMS_ATOMIC_SFB__ +#define __DISPATCH_SHIMS_ATOMIC_SFB__ + +#if __clang__ && __clang_major__ < 5 // +#define __builtin_ffs(x) __builtin_ffs((unsigned int)(x)) +#endif + +// Returns UINT_MAX if all the bits in p were already set. +#define dispatch_atomic_set_first_bit(p,m) _dispatch_atomic_set_first_bit(p,m) + +// TODO: rdar://11477843 +DISPATCH_ALWAYS_INLINE +static inline unsigned int +_dispatch_atomic_set_first_bit(volatile uint32_t *p, unsigned int max_index) +{ + unsigned int index; + typeof(*p) b, mask, b_masked; + + for (;;) { + b = *p; + // ffs returns 1 + index, or 0 if none set. + index = (unsigned int)__builtin_ffs((int)~b); + if (slowpath(index == 0)) { + return UINT_MAX; + } + index--; + if (slowpath(index > max_index)) { + return UINT_MAX; + } + mask = ((typeof(b))1) << index; + b_masked = b | mask; + if (__sync_bool_compare_and_swap(p, b, b_masked)) { + return index; + } + } +} + +#if defined(__x86_64__) || defined(__i386__) + +#undef dispatch_atomic_set_first_bit +// TODO: rdar://11477843 uint64_t -> long +DISPATCH_ALWAYS_INLINE +static inline unsigned int +dispatch_atomic_set_first_bit(volatile uint64_t *p, unsigned int max) +{ + typeof(*p) val, bit; + if (max > (sizeof(val) * 8)) { + __asm__ ( + "1: \n\t" + "mov %[_p], %[_val] \n\t" + "not %[_val] \n\t" + "bsf %[_val], %[_bit] \n\t" /* val is 0 => set zf */ + "jz 2f \n\t" + "lock \n\t" + "bts %[_bit], %[_p] \n\t" /* cf = prev bit val */ + "jc 1b \n\t" /* lost race, retry */ + "jmp 3f \n\t" + "2: \n\t" + "mov %[_all_ones], %[_bit]" "\n\t" + "3: \n\t" + : [_p] "=m" (*p), [_val] "=&r" (val), [_bit] "=&r" (bit) + : [_all_ones] "i" ((typeof(bit))UINT_MAX) : "memory", "cc"); + } else { + __asm__ ( + "1: \n\t" + "mov %[_p], %[_val] \n\t" + "not %[_val] \n\t" + "bsf %[_val], %[_bit] \n\t" /* val is 0 => set zf */ + "jz 2f \n\t" + "cmp %[_max], %[_bit] \n\t" + "jg 2f \n\t" + "lock \n\t" + "bts %[_bit], %[_p] \n\t" /* cf = prev bit val */ + "jc 1b \n\t" /* lost race, retry */ + "jmp 3f \n\t" + "2: \n\t" + "mov %[_all_ones], %[_bit]" "\n\t" + "3: \n\t" + : [_p] "=m" (*p), [_val] "=&r" (val), [_bit] "=&r" (bit) + : [_all_ones] "i" ((typeof(bit))UINT_MAX), + [_max] "g" ((typeof(bit))max) : "memory", "cc"); + } + return (unsigned int)bit; +} + +#endif + + +#endif // __DISPATCH_SHIMS_ATOMIC_SFB__ diff --git a/src/shims/hw_config.h b/src/shims/hw_config.h index 2d99759..ede0d48 100644 --- a/src/shims/hw_config.h +++ b/src/shims/hw_config.h @@ -37,8 +37,10 @@ #define DISPATCH_SYSCTL_ACTIVE_CPUS "kern.smp.cpus" #endif +#if !TARGET_OS_WIN32 + static inline uint32_t -_dispatch_get_logicalcpu_max() +_dispatch_get_logicalcpu_max(void) { uint32_t val = 1; #if defined(_COMM_PAGE_LOGICAL_CPUS) @@ -60,7 +62,7 @@ _dispatch_get_logicalcpu_max() } static inline uint32_t -_dispatch_get_physicalcpu_max() +_dispatch_get_physicalcpu_max(void) { uint32_t val = 1; #if defined(_COMM_PAGE_PHYSICAL_CPUS) @@ -82,7 +84,7 @@ _dispatch_get_physicalcpu_max() } static inline uint32_t -_dispatch_get_activecpu() +_dispatch_get_activecpu(void) { uint32_t val = 1; #if defined(_COMM_PAGE_ACTIVE_CPUS) @@ -103,4 +105,32 @@ _dispatch_get_activecpu() return val; } +#else // TARGET_OS_WIN32 + +static inline long +_dispatch_count_bits(unsigned long value) +{ + long bits = 0; + while (value) { + bits += (value & 1); + value = value >> 1; + } + return bits; +} + + +static inline uint32_t +_dispatch_get_ncpus(void) +{ + uint32_t val; + DWORD_PTR procmask, sysmask; + if (GetProcessAffinityMask(GetCurrentProcess(), &procmask, &sysmask)) { + val = _dispatch_count_bits(procmask); + } else { + val = 1; + } + return val; +} +#endif // TARGET_OS_WIN32 + #endif /* __DISPATCH_SHIMS_HW_CONFIG__ */ diff --git a/src/shims/malloc_zone.h b/src/shims/malloc_zone.h deleted file mode 100644 index 3975b4f..0000000 --- a/src/shims/malloc_zone.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2009 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -#ifndef __DISPATCH_SHIMS_MALLOC_ZONE__ -#define __DISPATCH_SHIMS_MALLOC_ZONE__ - -#include - -#include - -/* - * Implement malloc zones as a simple wrapper around malloc(3) on systems - * that don't support them. - */ -#if !HAVE_MALLOC_CREATE_ZONE -typedef void * malloc_zone_t; - -static inline malloc_zone_t * -malloc_create_zone(size_t start_size, unsigned flags) -{ - - return ((void *)(-1)); -} - -static inline void -malloc_destroy_zone(malloc_zone_t *zone) -{ - -} - -static inline malloc_zone_t * -malloc_default_zone(void) -{ - - return ((void *)(-1)); -} - -static inline malloc_zone_t * -malloc_zone_from_ptr(const void *ptr) -{ - - return ((void *)(-1)); -} - -static inline void * -malloc_zone_malloc(malloc_zone_t *zone, size_t size) -{ - - return (malloc(size)); -} - -static inline void * -malloc_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size) -{ - - return (calloc(num_items, size)); -} - -static inline void * -malloc_zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) -{ - - return (realloc(ptr, size)); -} - -static inline void -malloc_zone_free(malloc_zone_t *zone, void *ptr) -{ - - free(ptr); -} - -static inline void -malloc_set_zone_name(malloc_zone_t *zone, const char *name) -{ - - /* No-op. */ -} -#endif - -#endif /* __DISPATCH_SHIMS_MALLOC_ZONE__ */ diff --git a/src/shims/perfmon.h b/src/shims/perfmon.h index bf5eb28..f739006 100644 --- a/src/shims/perfmon.h +++ b/src/shims/perfmon.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -32,30 +32,30 @@ #if defined (USE_APPLE_TSD_OPTIMIZATIONS) && defined(SIMULATE_5491082) && \ (defined(__i386__) || defined(__x86_64__)) #ifdef __LP64__ -#define _dispatch_workitem_inc() asm("incq %%gs:%0" : "+m" \ +#define _dispatch_perfmon_workitem_inc() asm("incq %%gs:%0" : "+m" \ (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ _PTHREAD_TSD_OFFSET)) :: "cc") -#define _dispatch_workitem_dec() asm("decq %%gs:%0" : "+m" \ +#define _dispatch_perfmon_workitem_dec() asm("decq %%gs:%0" : "+m" \ (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ _PTHREAD_TSD_OFFSET)) :: "cc") #else -#define _dispatch_workitem_inc() asm("incl %%gs:%0" : "+m" \ +#define _dispatch_perfmon_workitem_inc() asm("incl %%gs:%0" : "+m" \ (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ _PTHREAD_TSD_OFFSET)) :: "cc") -#define _dispatch_workitem_dec() asm("decl %%gs:%0" : "+m" \ +#define _dispatch_perfmon_workitem_dec() asm("decl %%gs:%0" : "+m" \ (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ _PTHREAD_TSD_OFFSET)) :: "cc") #endif #else /* !USE_APPLE_TSD_OPTIMIZATIONS */ static inline void -_dispatch_workitem_inc(void) +_dispatch_perfmon_workitem_inc(void) { unsigned long cnt; cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); _dispatch_thread_setspecific(dispatch_bcounter_key, (void *)++cnt); } static inline void -_dispatch_workitem_dec(void) +_dispatch_perfmon_workitem_dec(void) { unsigned long cnt; cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); @@ -89,9 +89,17 @@ flsll(uint64_t val) } #endif +#define _dispatch_perfmon_start() \ + uint64_t start = _dispatch_absolute_time() +#define _dispatch_perfmon_end() \ + _dispatch_queue_merge_stats(start) #else -#define _dispatch_workitem_inc() -#define _dispatch_workitem_dec() + +#define _dispatch_perfmon_workitem_inc() +#define _dispatch_perfmon_workitem_dec() +#define _dispatch_perfmon_start() +#define _dispatch_perfmon_end() + #endif // DISPATCH_PERF_MON #endif diff --git a/src/shims/time.h b/src/shims/time.h index 9ae9160..b30b989 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -31,18 +31,54 @@ #error "Please #include instead of this file directly." #endif +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_contention_usleep(unsigned int us) +{ +#if HAVE_MACH +#if defined(SWITCH_OPTION_DISPATCH_CONTENTION) && !(TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) + thread_switch(MACH_PORT_NULL, SWITCH_OPTION_DISPATCH_CONTENTION, us); +#else + thread_switch(MACH_PORT_NULL, SWITCH_OPTION_WAIT, ((us-1)/1000)+1); +#endif +#else + usleep(us); +#endif +} + +#if TARGET_OS_WIN32 +static inline unsigned int +sleep(unsigned int seconds) +{ + Sleep(seconds * 1000); // milliseconds + return 0; +} +#endif + uint64_t _dispatch_get_nanoseconds(void); #if defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME // x86 currently implements mach time in nanoseconds // this is NOT likely to change -#define _dispatch_time_mach2nano(x) ({x;}) -#define _dispatch_time_nano2mach(x) ({x;}) +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_time_mach2nano(uint64_t machtime) +{ + return machtime; +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_time_nano2mach(uint64_t nsec) +{ + return nsec; +} #else typedef struct _dispatch_host_time_data_s { + dispatch_once_t pred; long double frac; bool ratio_1_to_1; - dispatch_once_t pred; } _dispatch_host_time_data_s; extern _dispatch_host_time_data_s _dispatch_host_time_data; void _dispatch_get_host_time_init(void *context); @@ -53,39 +89,48 @@ _dispatch_time_mach2nano(uint64_t machtime) _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init); - return machtime * data->frac; + if (!machtime || slowpath(data->ratio_1_to_1)) { + return machtime; + } + if (machtime >= INT64_MAX) { + return INT64_MAX; + } + long double big_tmp = ((long double)machtime * data->frac) + .5; + if (slowpath(big_tmp >= INT64_MAX)) { + return INT64_MAX; + } + return (uint64_t)big_tmp; } -static inline int64_t -_dispatch_time_nano2mach(int64_t nsec) +static inline uint64_t +_dispatch_time_nano2mach(uint64_t nsec) { _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init); - if (slowpath(_dispatch_host_time_data.ratio_1_to_1)) { + if (!nsec || slowpath(data->ratio_1_to_1)) { return nsec; } - - long double big_tmp = nsec; - - // Divide by tbi.numer/tbi.denom to convert nsec to Mach absolute time - big_tmp /= data->frac; - - // Clamp to a 64bit signed int - if (slowpath(big_tmp > INT64_MAX)) { + if (nsec >= INT64_MAX) { return INT64_MAX; } - if (slowpath(big_tmp < INT64_MIN)) { - return INT64_MIN; + long double big_tmp = ((long double)nsec / data->frac) + .5; + if (slowpath(big_tmp >= INT64_MAX)) { + return INT64_MAX; } - return big_tmp; + return (uint64_t)big_tmp; } #endif static inline uint64_t _dispatch_absolute_time(void) { -#if !HAVE_MACH_ABSOLUTE_TIME +#if HAVE_MACH_ABSOLUTE_TIME + return mach_absolute_time(); +#elif TARGET_OS_WIN32 + LARGE_INTEGER now; + return QueryPerformanceCounter(&now) ? now.QuadPart : 0; +#else struct timespec ts; int ret; @@ -100,9 +145,8 @@ _dispatch_absolute_time(void) /* XXXRW: Some kind of overflow detection needed? */ return (ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec); -#else - return mach_absolute_time(); -#endif +#endif // HAVE_MACH_ABSOLUTE_TIME } -#endif + +#endif // __DISPATCH_SHIMS_TIME__ diff --git a/src/shims/tsd.h b/src/shims/tsd.h index f300d64..2a0ab22 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -40,11 +40,19 @@ #if DISPATCH_USE_DIRECT_TSD static const unsigned long dispatch_queue_key = __PTK_LIBDISPATCH_KEY0; +#if DISPATCH_USE_OS_SEMAPHORE_CACHE +static const unsigned long dispatch_sema4_key = __TSD_SEMAPHORE_CACHE; +#else static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY1; +#endif static const unsigned long dispatch_cache_key = __PTK_LIBDISPATCH_KEY2; static const unsigned long dispatch_io_key = __PTK_LIBDISPATCH_KEY3; static const unsigned long dispatch_apply_key = __PTK_LIBDISPATCH_KEY4; +#if DISPATCH_INTROSPECTION +static const unsigned long dispatch_introspection_key = __PTK_LIBDISPATCH_KEY5; +#elif DISPATCH_PERF_MON static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY5; +#endif DISPATCH_TSD_INLINE static inline void @@ -54,11 +62,20 @@ _dispatch_thread_key_create(const unsigned long *k, void (*d)(void *)) } #else extern pthread_key_t dispatch_queue_key; +#if DISPATCH_USE_OS_SEMAPHORE_CACHE +#error "Invalid DISPATCH_USE_OS_SEMAPHORE_CACHE configuration" +#else extern pthread_key_t dispatch_sema4_key; +#endif extern pthread_key_t dispatch_cache_key; extern pthread_key_t dispatch_io_key; extern pthread_key_t dispatch_apply_key; +#if DISPATCH_INTROSPECTION +extern pthread_key_t dispatch_introspection_key; +#elif DISPATCH_PERF_MON extern pthread_key_t dispatch_bcounter_key; +#endif + DISPATCH_TSD_INLINE static inline void @@ -96,8 +113,18 @@ _dispatch_thread_getspecific(pthread_key_t k) } #endif // DISPATCH_USE_TSD_BASE -#define _dispatch_thread_self (uintptr_t)pthread_self - -#undef DISPATCH_TSD_INLINE - +#if TARGET_OS_WIN32 +#define _dispatch_thread_self() ((uintptr_t)GetCurrentThreadId()) +#else +#if DISPATCH_USE_DIRECT_TSD +#define _dispatch_thread_self() ((uintptr_t)_dispatch_thread_getspecific( \ + _PTHREAD_TSD_SLOT_PTHREAD_SELF)) +#else +#define _dispatch_thread_self() ((uintptr_t)pthread_self()) #endif +#endif + +DISPATCH_TSD_INLINE DISPATCH_CONST +static inline unsigned int +_dispatch_cpu_number(void) +{ diff --git a/src/source.c b/src/source.c index 2b0a9a2..067c5ba 100644 --- a/src/source.c +++ b/src/source.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -26,22 +26,50 @@ #include static void _dispatch_source_merge_kevent(dispatch_source_t ds, - const struct kevent *ke); -static void _dispatch_kevent_register(dispatch_source_t ds); -static void _dispatch_kevent_unregister(dispatch_source_t ds); + const struct kevent64_s *ke); +static bool _dispatch_kevent_register(dispatch_kevent_t *dkp, uint32_t *flgp); +static void _dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg); static bool _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); -static inline void _dispatch_source_timer_init(void); -static void _dispatch_timer_list_update(dispatch_source_t ds); +static void _dispatch_kevent_drain(struct kevent64_s *ke); +static void _dispatch_kevent_merge(struct kevent64_s *ke); +static void _dispatch_timers_kevent(struct kevent64_s *ke); +static void _dispatch_timers_unregister(dispatch_source_t ds, + dispatch_kevent_t dk); +static void _dispatch_timers_update(dispatch_source_t ds); +static void _dispatch_timer_aggregates_check(void); +static void _dispatch_timer_aggregates_register(dispatch_source_t ds); +static void _dispatch_timer_aggregates_update(dispatch_source_t ds, + unsigned int tidx); +static void _dispatch_timer_aggregates_unregister(dispatch_source_t ds, + unsigned int tidx); static inline unsigned long _dispatch_source_timer_data( dispatch_source_refs_t dr, unsigned long prev); +static long _dispatch_kq_update(const struct kevent64_s *); +static void _dispatch_memorystatus_init(void); #if HAVE_MACH +static void _dispatch_mach_host_calendar_change_register(void); +static void _dispatch_mach_recv_msg_buf_init(void); static kern_return_t _dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); -static void _dispatch_drain_mach_messages(struct kevent *ke); +static kern_return_t _dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, + uint32_t new_flags, uint32_t del_flags); +static inline void _dispatch_kevent_mach_portset(struct kevent64_s *ke); +#else +static inline void _dispatch_mach_host_calendar_change_register(void) {} +static inline void _dispatch_mach_recv_msg_buf_init(void) {} #endif +static const char * _evfiltstr(short filt); #if DISPATCH_DEBUG +static void _dispatch_kevent_debug(struct kevent64_s* kev, const char* str); static void _dispatch_kevent_debugger(void *context); +#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \ + dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q) +#else +static inline void +_dispatch_kevent_debug(struct kevent64_s* kev DISPATCH_UNUSED, + const char* str DISPATCH_UNUSED) {} +#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() #endif #pragma mark - @@ -53,9 +81,9 @@ dispatch_source_create(dispatch_source_type_t type, unsigned long mask, dispatch_queue_t q) { - const struct kevent *proto_kev = &type->ke; - dispatch_source_t ds = NULL; - dispatch_kevent_t dk = NULL; + const struct kevent64_s *proto_kev = &type->ke; + dispatch_source_t ds; + dispatch_kevent_t dk; // input validation if (type == NULL || (mask & ~type->mask)) { @@ -71,44 +99,49 @@ dispatch_source_create(dispatch_source_type_t type, case EVFILT_FS: #if DISPATCH_USE_VM_PRESSURE case EVFILT_VM: +#endif +#if DISPATCH_USE_MEMORYSTATUS + case EVFILT_MEMORYSTATUS: #endif case DISPATCH_EVFILT_CUSTOM_ADD: case DISPATCH_EVFILT_CUSTOM_OR: - case DISPATCH_EVFILT_TIMER: if (handle) { return NULL; } break; + case DISPATCH_EVFILT_TIMER: + if (!!handle ^ !!type->ke.ident) { + return NULL; + } + break; default: break; } - dk = calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = *proto_kev; - dk->dk_kevent.ident = handle; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; - dk->dk_kevent.fflags |= (uint32_t)mask; - dk->dk_kevent.udata = dk; - TAILQ_INIT(&dk->dk_sources); - ds = _dispatch_alloc(DISPATCH_VTABLE(source), sizeof(struct dispatch_source_s)); // Initialize as a queue first, then override some settings below. _dispatch_queue_init((dispatch_queue_t)ds); - strlcpy(ds->dq_label, "source", sizeof(ds->dq_label)); + ds->dq_label = "source"; - // Dispatch Object - ds->do_ref_cnt++; // the reference the manger queue holds + ds->do_ref_cnt++; // the reference the manager queue holds ds->do_ref_cnt++; // since source is created suspended ds->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL; // The initial target queue is the manager queue, in order to get // the source installed. ds->do_targetq = &_dispatch_mgr_q; - // Dispatch Source - ds->ds_ident_hack = dk->dk_kevent.ident; + dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); + dk->dk_kevent = *proto_kev; + dk->dk_kevent.ident = handle; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; + dk->dk_kevent.fflags |= (uint32_t)mask; + dk->dk_kevent.udata = (uintptr_t)dk; + TAILQ_INIT(&dk->dk_sources); + ds->ds_dkev = dk; ds->ds_pending_data_mask = dk->dk_kevent.fflags; + ds->ds_ident_hack = (uintptr_t)dk->dk_kevent.ident; if ((EV_DISPATCH|EV_ONESHOT) & proto_kev->flags) { ds->ds_is_level = true; ds->ds_needs_rearm = true; @@ -116,38 +149,30 @@ dispatch_source_create(dispatch_source_type_t type, // we cheat and use EV_CLEAR to mean a "flag thingy" ds->ds_is_adder = true; } - // Some sources require special processing if (type->init != NULL) { type->init(ds, type, handle, mask, q); } + dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder)); + if (fastpath(!ds->ds_refs)) { - ds->ds_refs = calloc(1ul, sizeof(struct dispatch_source_refs_s)); - if (slowpath(!ds->ds_refs)) { - goto out_bad; - } + ds->ds_refs = _dispatch_calloc(1ul, + sizeof(struct dispatch_source_refs_s)); } ds->ds_refs->dr_source_wref = _dispatch_ptr2wref(ds); - dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder)); // First item on the queue sets the user-specified target queue dispatch_set_target_queue(ds, q); -#if DISPATCH_DEBUG - dispatch_debug(ds, "%s", __func__); -#endif + _dispatch_object_debug(ds, "%s", __func__); return ds; - -out_bad: - free(ds); - free(dk); - return NULL; } void _dispatch_source_dispose(dispatch_source_t ds) { + _dispatch_object_debug(ds, "%s", __func__); free(ds->ds_refs); - _dispatch_queue_dispose((dispatch_queue_t)ds); + _dispatch_queue_destroy(ds); } void @@ -159,16 +184,14 @@ _dispatch_source_xref_dispose(dispatch_source_t ds) void dispatch_source_cancel(dispatch_source_t ds) { -#if DISPATCH_DEBUG - dispatch_debug(ds, "%s", __func__); -#endif + _dispatch_object_debug(ds, "%s", __func__); // Right after we set the cancel flag, someone else // could potentially invoke the source, do the cancelation, // unregister the source, and deallocate it. We would // need to therefore retain/release before setting the bit _dispatch_retain(ds); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_CANCELED); + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_CANCELED, relaxed); _dispatch_wakeup(ds); _dispatch_release(ds); } @@ -189,7 +212,7 @@ dispatch_source_get_mask(dispatch_source_t ds) uintptr_t dispatch_source_get_handle(dispatch_source_t ds) { - return (int)ds->ds_ident_hack; + return (unsigned int)ds->ds_ident_hack; } unsigned long @@ -201,9 +224,9 @@ dispatch_source_get_data(dispatch_source_t ds) void dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) { - struct kevent kev = { + struct kevent64_s kev = { .fflags = (typeof(kev.fflags))val, - .data = val, + .data = (typeof(kev.data))val, }; dispatch_assert( @@ -222,8 +245,6 @@ dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) static void _dispatch_source_set_event_handler2(void *context) { - struct Block_layout *bl = context; - dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); dispatch_source_refs_t dr = ds->ds_refs; @@ -231,8 +252,8 @@ _dispatch_source_set_event_handler2(void *context) if (ds->ds_handler_is_block && dr->ds_handler_ctxt) { Block_release(dr->ds_handler_ctxt); } - dr->ds_handler_func = bl ? (void *)bl->invoke : NULL; - dr->ds_handler_ctxt = bl; + dr->ds_handler_func = context ? _dispatch_Block_invoke(context) : NULL; + dr->ds_handler_ctxt = context; ds->ds_handler_is_block = true; } @@ -241,7 +262,7 @@ dispatch_source_set_event_handler(dispatch_source_t ds, dispatch_block_t handler) { handler = _dispatch_Block_copy(handler); - dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, _dispatch_source_set_event_handler2); } #endif /* __BLOCKS__ */ @@ -267,7 +288,7 @@ void dispatch_source_set_event_handler_f(dispatch_source_t ds, dispatch_function_t handler) { - dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, _dispatch_source_set_event_handler_f); } @@ -293,7 +314,7 @@ dispatch_source_set_cancel_handler(dispatch_source_t ds, dispatch_block_t handler) { handler = _dispatch_Block_copy(handler); - dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, _dispatch_source_set_cancel_handler2); } #endif /* __BLOCKS__ */ @@ -318,7 +339,7 @@ void dispatch_source_set_cancel_handler_f(dispatch_source_t ds, dispatch_function_t handler) { - dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, _dispatch_source_set_cancel_handler_f); } @@ -342,7 +363,7 @@ dispatch_source_set_registration_handler(dispatch_source_t ds, dispatch_block_t handler) { handler = _dispatch_Block_copy(handler); - dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, _dispatch_source_set_registration_handler2); } #endif /* __BLOCKS__ */ @@ -367,7 +388,7 @@ void dispatch_source_set_registration_handler_f(dispatch_source_t ds, dispatch_function_t handler) { - dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, _dispatch_source_set_registration_handler_f); } @@ -451,7 +472,7 @@ _dispatch_source_latch_and_call(dispatch_source_t ds) return; } dispatch_source_refs_t dr = ds->ds_refs; - prev = dispatch_atomic_xchg2o(ds, ds_pending_data, 0); + prev = dispatch_atomic_xchg2o(ds, ds_pending_data, 0, relaxed); if (ds->ds_is_level) { ds->ds_data = ~prev; } else if (ds->ds_is_timer && ds_timer(dr).target && prev) { @@ -464,13 +485,33 @@ _dispatch_source_latch_and_call(dispatch_source_t ds) } } +static void +_dispatch_source_kevent_unregister(dispatch_source_t ds) +{ + _dispatch_object_debug(ds, "%s", __func__); + dispatch_kevent_t dk = ds->ds_dkev; + ds->ds_dkev = NULL; + switch (dk->dk_kevent.filter) { + case DISPATCH_EVFILT_TIMER: + _dispatch_timers_unregister(ds, dk); + break; + default: + TAILQ_REMOVE(&dk->dk_sources, ds->ds_refs, dr_list); + _dispatch_kevent_unregister(dk, (uint32_t)ds->ds_pending_data_mask); + break; + } + + (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed); + ds->ds_needs_rearm = false; // re-arm is pointless and bad now + _dispatch_release(ds); // the retain is done at creation time +} + static void _dispatch_source_kevent_resume(dispatch_source_t ds, uint32_t new_flags) { switch (ds->ds_dkev->dk_kevent.filter) { case DISPATCH_EVFILT_TIMER: - // called on manager queue only - return _dispatch_timer_list_update(ds); + return _dispatch_timers_update(ds); case EVFILT_MACHPORT: if (ds->ds_pending_data_mask & DISPATCH_MACH_RECV_MESSAGE) { new_flags |= DISPATCH_MACH_RECV_MESSAGE; // emulate EV_DISPATCH @@ -478,13 +519,38 @@ _dispatch_source_kevent_resume(dispatch_source_t ds, uint32_t new_flags) break; } if (_dispatch_kevent_resume(ds->ds_dkev, new_flags, 0)) { - _dispatch_kevent_unregister(ds); + _dispatch_source_kevent_unregister(ds); } } -dispatch_queue_t -_dispatch_source_invoke(dispatch_source_t ds) +static void +_dispatch_source_kevent_register(dispatch_source_t ds) +{ + dispatch_assert_zero(ds->ds_is_installed); + switch (ds->ds_dkev->dk_kevent.filter) { + case DISPATCH_EVFILT_TIMER: + return _dispatch_timers_update(ds); + } + uint32_t flags; + bool do_resume = _dispatch_kevent_register(&ds->ds_dkev, &flags); + TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, ds->ds_refs, dr_list); + if (do_resume || ds->ds_needs_rearm) { + _dispatch_source_kevent_resume(ds, flags); + } + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); + _dispatch_object_debug(ds, "%s", __func__); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_source_invoke2(dispatch_object_t dou, + _dispatch_thread_semaphore_t *sema_ptr DISPATCH_UNUSED) { + dispatch_source_t ds = dou._ds; + if (slowpath(_dispatch_queue_drain(ds))) { + DISPATCH_CLIENT_CRASH("Sync onto source"); + } + // This function performs all source actions. Each action is responsible // for verifying that it takes place on the appropriate queue. If the // current queue is not the correct queue for this action, the correct queue @@ -500,7 +566,8 @@ _dispatch_source_invoke(dispatch_source_t ds) if (dq != &_dispatch_mgr_q) { return &_dispatch_mgr_q; } - _dispatch_kevent_register(ds); + _dispatch_source_kevent_register(ds); + ds->ds_is_installed = true; if (dr->ds_registration_handler) { return ds->do_targetq; } @@ -529,7 +596,7 @@ _dispatch_source_invoke(dispatch_source_t ds) if (dq != &_dispatch_mgr_q) { return &_dispatch_mgr_q; } - _dispatch_kevent_unregister(ds); + _dispatch_source_kevent_unregister(ds); } if (dr->ds_cancel_handler || ds->ds_handler_is_block || ds->ds_registration_is_block) { @@ -555,13 +622,20 @@ _dispatch_source_invoke(dispatch_source_t ds) return &_dispatch_mgr_q; } _dispatch_source_kevent_resume(ds, 0); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED); + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); } return NULL; } -bool +DISPATCH_NOINLINE +void +_dispatch_source_invoke(dispatch_source_t ds) +{ + _dispatch_queue_class_invoke(ds, _dispatch_source_invoke2); +} + +unsigned long _dispatch_source_probe(dispatch_source_t ds) { // This function determines whether the source needs to be invoked. @@ -592,128 +666,69 @@ _dispatch_source_probe(dispatch_source_t ds) // The source needs to be rearmed on the manager queue. return true; } - // Nothing to do. - return false; + return (ds->dq_items_tail != NULL); } -#pragma mark - -#pragma mark dispatch_source_kevent - static void -_dispatch_source_merge_kevent(dispatch_source_t ds, const struct kevent *ke) +_dispatch_source_merge_kevent(dispatch_source_t ds, const struct kevent64_s *ke) { - struct kevent fake; - if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) { return; } - - // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie - // . As a workaround, we simulate an exit event for - // any EVFILT_PROC with an invalid pid . - if (ke->flags & EV_ERROR) { - if (ke->filter == EVFILT_PROC && ke->data == ESRCH) { - fake = *ke; - fake.flags &= ~EV_ERROR; - fake.fflags = NOTE_EXIT; - fake.data = 0; - ke = &fake; -#if DISPATCH_USE_VM_PRESSURE - } else if (ke->filter == EVFILT_VM && ke->data == ENOTSUP) { - // Memory pressure kevent is not supported on all platforms - // - return; -#endif - } else { - // log the unexpected error - (void)dispatch_assume_zero(ke->data); - return; - } - } - if (ds->ds_is_level) { // ke->data is signed and "negative available data" makes no sense // zero bytes happens when EV_EOF is set // 10A268 does not fail this assert with EVFILT_READ and a 10 GB file dispatch_assert(ke->data >= 0l); - ds->ds_pending_data = ~ke->data; + dispatch_atomic_store2o(ds, ds_pending_data, ~(unsigned long)ke->data, + relaxed); } else if (ds->ds_is_adder) { - (void)dispatch_atomic_add2o(ds, ds_pending_data, ke->data); + (void)dispatch_atomic_add2o(ds, ds_pending_data, + (unsigned long)ke->data, relaxed); } else if (ke->fflags & ds->ds_pending_data_mask) { (void)dispatch_atomic_or2o(ds, ds_pending_data, - ke->fflags & ds->ds_pending_data_mask); + ke->fflags & ds->ds_pending_data_mask, relaxed); } - // EV_DISPATCH and EV_ONESHOT sources are no longer armed after delivery if (ds->ds_needs_rearm) { - (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED); + (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed); } _dispatch_wakeup(ds); } -void -_dispatch_source_drain_kevent(struct kevent *ke) -{ - dispatch_kevent_t dk = ke->udata; - dispatch_source_refs_t dri; - -#if DISPATCH_DEBUG - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger); -#endif - - dispatch_debug_kevents(ke, 1, __func__); - -#if HAVE_MACH - if (ke->filter == EVFILT_MACHPORT) { - return _dispatch_drain_mach_messages(ke); - } -#endif - dispatch_assert(dk); - - if (ke->flags & EV_ONESHOT) { - dk->dk_kevent.flags |= EV_ONESHOT; - } - - TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { - _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), ke); - } -} - #pragma mark - #pragma mark dispatch_kevent_t +#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD +static void _dispatch_kevent_guard(dispatch_kevent_t dk); +static void _dispatch_kevent_unguard(dispatch_kevent_t dk); +#else +static inline void _dispatch_kevent_guard(dispatch_kevent_t dk) { (void)dk; } +static inline void _dispatch_kevent_unguard(dispatch_kevent_t dk) { (void)dk; } +#endif + static struct dispatch_kevent_s _dispatch_kevent_data_or = { .dk_kevent = { .filter = DISPATCH_EVFILT_CUSTOM_OR, .flags = EV_CLEAR, - .udata = &_dispatch_kevent_data_or, }, .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_or.dk_sources), }; static struct dispatch_kevent_s _dispatch_kevent_data_add = { .dk_kevent = { .filter = DISPATCH_EVFILT_CUSTOM_ADD, - .udata = &_dispatch_kevent_data_add, }, .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_add.dk_sources), }; -#if TARGET_OS_EMBEDDED -#define DSL_HASH_SIZE 64u // must be a power of two -#else -#define DSL_HASH_SIZE 256u // must be a power of two -#endif #define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1)) DISPATCH_CACHELINE_ALIGN static TAILQ_HEAD(, dispatch_kevent_s) _dispatch_sources[DSL_HASH_SIZE]; -static dispatch_once_t __dispatch_kevent_init_pred; - static void -_dispatch_kevent_init(void *context DISPATCH_UNUSED) +_dispatch_kevent_init() { unsigned int i; for (i = 0; i < DSL_HASH_SIZE; i++) { @@ -724,24 +739,28 @@ _dispatch_kevent_init(void *context DISPATCH_UNUSED) &_dispatch_kevent_data_or, dk_list); TAILQ_INSERT_TAIL(&_dispatch_sources[0], &_dispatch_kevent_data_add, dk_list); - - _dispatch_source_timer_init(); + _dispatch_kevent_data_or.dk_kevent.udata = + (uintptr_t)&_dispatch_kevent_data_or; + _dispatch_kevent_data_add.dk_kevent.udata = + (uintptr_t)&_dispatch_kevent_data_add; } static inline uintptr_t -_dispatch_kevent_hash(uintptr_t ident, short filter) +_dispatch_kevent_hash(uint64_t ident, short filter) { - uintptr_t value; + uint64_t value; #if HAVE_MACH - value = (filter == EVFILT_MACHPORT ? MACH_PORT_INDEX(ident) : ident); + value = (filter == EVFILT_MACHPORT || + filter == DISPATCH_EVFILT_MACH_NOTIFICATION ? + MACH_PORT_INDEX(ident) : ident); #else value = ident; #endif - return DSL_HASH(value); + return DSL_HASH((uintptr_t)value); } static dispatch_kevent_t -_dispatch_kevent_find(uintptr_t ident, short filter) +_dispatch_kevent_find(uint64_t ident, short filter) { uintptr_t hash = _dispatch_kevent_hash(ident, filter); dispatch_kevent_t dki; @@ -757,57 +776,43 @@ _dispatch_kevent_find(uintptr_t ident, short filter) static void _dispatch_kevent_insert(dispatch_kevent_t dk) { + _dispatch_kevent_guard(dk); uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident, dk->dk_kevent.filter); - TAILQ_INSERT_TAIL(&_dispatch_sources[hash], dk, dk_list); } // Find existing kevents, and merge any new flags if necessary -static void -_dispatch_kevent_register(dispatch_source_t ds) +static bool +_dispatch_kevent_register(dispatch_kevent_t *dkp, uint32_t *flgp) { - dispatch_kevent_t dk; - typeof(dk->dk_kevent.fflags) new_flags; + dispatch_kevent_t dk, ds_dkev = *dkp; + uint32_t new_flags; bool do_resume = false; - if (ds->ds_is_installed) { - return; - } - ds->ds_is_installed = true; - - dispatch_once_f(&__dispatch_kevent_init_pred, - NULL, _dispatch_kevent_init); - - dk = _dispatch_kevent_find(ds->ds_dkev->dk_kevent.ident, - ds->ds_dkev->dk_kevent.filter); - + dk = _dispatch_kevent_find(ds_dkev->dk_kevent.ident, + ds_dkev->dk_kevent.filter); if (dk) { // If an existing dispatch kevent is found, check to see if new flags // need to be added to the existing kevent - new_flags = ~dk->dk_kevent.fflags & ds->ds_dkev->dk_kevent.fflags; - dk->dk_kevent.fflags |= ds->ds_dkev->dk_kevent.fflags; - free(ds->ds_dkev); - ds->ds_dkev = dk; + new_flags = ~dk->dk_kevent.fflags & ds_dkev->dk_kevent.fflags; + dk->dk_kevent.fflags |= ds_dkev->dk_kevent.fflags; + free(ds_dkev); + *dkp = dk; do_resume = new_flags; } else { - dk = ds->ds_dkev; + dk = ds_dkev; _dispatch_kevent_insert(dk); new_flags = dk->dk_kevent.fflags; do_resume = true; } - - TAILQ_INSERT_TAIL(&dk->dk_sources, ds->ds_refs, dr_list); - // Re-register the kevent with the kernel if new flags were added // by the dispatch kevent if (do_resume) { dk->dk_kevent.flags |= EV_ADD; } - if (do_resume || ds->ds_needs_rearm) { - _dispatch_source_kevent_resume(ds, new_flags); - } - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED); + *flgp = new_flags; + return do_resume; } static bool @@ -824,6 +829,8 @@ _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, #if HAVE_MACH case EVFILT_MACHPORT: return _dispatch_kevent_machport_resume(dk, new_flags, del_flags); + case DISPATCH_EVFILT_MACH_NOTIFICATION: + return _dispatch_kevent_mach_notify_resume(dk, new_flags, del_flags); #endif case EVFILT_PROC: if (dk->dk_kevent.flags & EV_ONESHOT) { @@ -831,7 +838,7 @@ _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, } // fall through default: - r = _dispatch_update_kq(&dk->dk_kevent); + r = _dispatch_kq_update(&dk->dk_kevent); if (dk->dk_kevent.flags & EV_DISPATCH) { dk->dk_kevent.flags &= ~EV_ADD; } @@ -854,6 +861,9 @@ _dispatch_kevent_dispose(dispatch_kevent_t dk) case EVFILT_MACHPORT: _dispatch_kevent_machport_resume(dk, 0, dk->dk_kevent.fflags); break; + case DISPATCH_EVFILT_MACH_NOTIFICATION: + _dispatch_kevent_mach_notify_resume(dk, 0, dk->dk_kevent.fflags); + break; #endif case EVFILT_PROC: if (dk->dk_kevent.flags & EV_ONESHOT) { @@ -863,7 +873,8 @@ _dispatch_kevent_dispose(dispatch_kevent_t dk) default: if (~dk->dk_kevent.flags & EV_DELETE) { dk->dk_kevent.flags |= EV_DELETE; - _dispatch_update_kq(&dk->dk_kevent); + dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE); + _dispatch_kq_update(&dk->dk_kevent); } break; } @@ -871,220 +882,247 @@ _dispatch_kevent_dispose(dispatch_kevent_t dk) hash = _dispatch_kevent_hash(dk->dk_kevent.ident, dk->dk_kevent.filter); TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list); + _dispatch_kevent_unguard(dk); free(dk); } static void -_dispatch_kevent_unregister(dispatch_source_t ds) +_dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg) { - dispatch_kevent_t dk = ds->ds_dkev; dispatch_source_refs_t dri; uint32_t del_flags, fflags = 0; - ds->ds_dkev = NULL; - - TAILQ_REMOVE(&dk->dk_sources, ds->ds_refs, dr_list); - if (TAILQ_EMPTY(&dk->dk_sources)) { _dispatch_kevent_dispose(dk); } else { TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { dispatch_source_t dsi = _dispatch_source_from_refs(dri); - fflags |= (uint32_t)dsi->ds_pending_data_mask; + uint32_t mask = (uint32_t)dsi->ds_pending_data_mask; + fflags |= mask; } - del_flags = (uint32_t)ds->ds_pending_data_mask & ~fflags; + del_flags = flg & ~fflags; if (del_flags) { dk->dk_kevent.flags |= EV_ADD; dk->dk_kevent.fflags = fflags; _dispatch_kevent_resume(dk, 0, del_flags); } } - - (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED); - ds->ds_needs_rearm = false; // re-arm is pointless and bad now - _dispatch_release(ds); // the retain is done at creation time -} - -#pragma mark - -#pragma mark dispatch_timer - -DISPATCH_CACHELINE_ALIGN -static struct dispatch_kevent_s _dispatch_kevent_timer[] = { - [DISPATCH_TIMER_INDEX_WALL] = { - .dk_kevent = { - .ident = DISPATCH_TIMER_INDEX_WALL, - .filter = DISPATCH_EVFILT_TIMER, - .udata = &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_WALL], - }, - .dk_sources = TAILQ_HEAD_INITIALIZER( - _dispatch_kevent_timer[DISPATCH_TIMER_INDEX_WALL].dk_sources), - }, - [DISPATCH_TIMER_INDEX_MACH] = { - .dk_kevent = { - .ident = DISPATCH_TIMER_INDEX_MACH, - .filter = DISPATCH_EVFILT_TIMER, - .udata = &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_MACH], - }, - .dk_sources = TAILQ_HEAD_INITIALIZER( - _dispatch_kevent_timer[DISPATCH_TIMER_INDEX_MACH].dk_sources), - }, - [DISPATCH_TIMER_INDEX_DISARM] = { - .dk_kevent = { - .ident = DISPATCH_TIMER_INDEX_DISARM, - .filter = DISPATCH_EVFILT_TIMER, - .udata = &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_DISARM], - }, - .dk_sources = TAILQ_HEAD_INITIALIZER( - _dispatch_kevent_timer[DISPATCH_TIMER_INDEX_DISARM].dk_sources), - }, -}; -// Don't count disarmed timer list -#define DISPATCH_TIMER_COUNT ((sizeof(_dispatch_kevent_timer) \ - / sizeof(_dispatch_kevent_timer[0])) - 1) - -static inline void -_dispatch_source_timer_init(void) -{ - TAILQ_INSERT_TAIL(&_dispatch_sources[DSL_HASH(DISPATCH_TIMER_INDEX_WALL)], - &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_WALL], dk_list); - TAILQ_INSERT_TAIL(&_dispatch_sources[DSL_HASH(DISPATCH_TIMER_INDEX_MACH)], - &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_MACH], dk_list); - TAILQ_INSERT_TAIL(&_dispatch_sources[DSL_HASH(DISPATCH_TIMER_INDEX_DISARM)], - &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_DISARM], dk_list); } -DISPATCH_ALWAYS_INLINE -static inline unsigned int -_dispatch_source_timer_idx(dispatch_source_refs_t dr) +DISPATCH_NOINLINE +static void +_dispatch_kevent_proc_exit(struct kevent64_s *ke) { - return ds_timer(dr).flags & DISPATCH_TIMER_WALL_CLOCK ? - DISPATCH_TIMER_INDEX_WALL : DISPATCH_TIMER_INDEX_MACH; + // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie + // . As a workaround, we simulate an exit event for + // any EVFILT_PROC with an invalid pid . + struct kevent64_s fake; + fake = *ke; + fake.flags &= ~EV_ERROR; + fake.fflags = NOTE_EXIT; + fake.data = 0; + _dispatch_kevent_drain(&fake); } -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dispatch_source_timer_now2(unsigned int timer) +DISPATCH_NOINLINE +static void +_dispatch_kevent_error(struct kevent64_s *ke) { - switch (timer) { - case DISPATCH_TIMER_INDEX_MACH: - return _dispatch_absolute_time(); - case DISPATCH_TIMER_INDEX_WALL: - return _dispatch_get_nanoseconds(); - default: - DISPATCH_CRASH("Invalid timer"); + _dispatch_kevent_debug(ke, __func__); + if (ke->data) { + // log the unexpected error + _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter), + ke->flags & EV_DELETE ? "delete" : + ke->flags & EV_ADD ? "add" : + ke->flags & EV_ENABLE ? "enable" : "monitor", + (int)ke->data); } } -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dispatch_source_timer_now(dispatch_source_refs_t dr) +static void +_dispatch_kevent_drain(struct kevent64_s *ke) { - return _dispatch_source_timer_now2(_dispatch_source_timer_idx(dr)); +#if DISPATCH_DEBUG + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger); +#endif + if (ke->filter == EVFILT_USER) { + return; + } + if (slowpath(ke->flags & EV_ERROR)) { + if (ke->filter == EVFILT_PROC) { + if (ke->flags & EV_DELETE) { + // Process exited while monitored + return; + } else if (ke->data == ESRCH) { + return _dispatch_kevent_proc_exit(ke); + } +#if DISPATCH_USE_VM_PRESSURE + } else if (ke->filter == EVFILT_VM && ke->data == ENOTSUP) { + // Memory pressure kevent is not supported on all platforms + // + return; +#endif +#if DISPATCH_USE_MEMORYSTATUS + } else if (ke->filter == EVFILT_MEMORYSTATUS && + (ke->data == EINVAL || ke->data == ENOTSUP)) { + // Memory status kevent is not supported on all platforms + return; +#endif + } + return _dispatch_kevent_error(ke); + } + _dispatch_kevent_debug(ke, __func__); + if (ke->filter == EVFILT_TIMER) { + return _dispatch_timers_kevent(ke); + } +#if HAVE_MACH + if (ke->filter == EVFILT_MACHPORT) { + return _dispatch_kevent_mach_portset(ke); + } +#endif + return _dispatch_kevent_merge(ke); } -// Updates the ordered list of timers based on next fire date for changes to ds. -// Should only be called from the context of _dispatch_mgr_q. +DISPATCH_NOINLINE static void -_dispatch_timer_list_update(dispatch_source_t ds) +_dispatch_kevent_merge(struct kevent64_s *ke) { - dispatch_source_refs_t dr = ds->ds_refs, dri = NULL; + dispatch_kevent_t dk; + dispatch_source_refs_t dri; - dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q); + dk = (void*)ke->udata; + dispatch_assert(dk); - // do not reschedule timers unregistered with _dispatch_kevent_unregister() - if (!ds->ds_dkev) { - return; + if (ke->flags & EV_ONESHOT) { + dk->dk_kevent.flags |= EV_ONESHOT; } - - // Ensure the source is on the global kevent lists before it is removed and - // readded below. - _dispatch_kevent_register(ds); - - TAILQ_REMOVE(&ds->ds_dkev->dk_sources, dr, dr_list); - - // Move timers that are disabled, suspended or have missed intervals to the - // disarmed list, rearm after resume resp. source invoke will reenable them - if (!ds_timer(dr).target || DISPATCH_OBJECT_SUSPENDED(ds) || - ds->ds_pending_data) { - (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED); - ds->ds_dkev = &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_DISARM]; - TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, (dispatch_source_refs_t)dr, - dr_list); - return; + TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { + _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), ke); } +} - // change the list if the clock type has changed - ds->ds_dkev = &_dispatch_kevent_timer[_dispatch_source_timer_idx(dr)]; +#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD +static void +_dispatch_kevent_guard(dispatch_kevent_t dk) +{ + guardid_t guard; + const unsigned int guard_flags = GUARD_CLOSE; + int r, fd_flags = 0; + switch (dk->dk_kevent.filter) { + case EVFILT_READ: + case EVFILT_WRITE: + case EVFILT_VNODE: + guard = &dk->dk_kevent; + r = change_fdguard_np((int)dk->dk_kevent.ident, NULL, 0, + &guard, guard_flags, &fd_flags); + if (slowpath(r == -1)) { + int err = errno; + if (err != EPERM) { + (void)dispatch_assume_zero(err); + } + return; + } + dk->dk_kevent.ext[0] = guard_flags; + dk->dk_kevent.ext[1] = fd_flags; + break; + } +} - TAILQ_FOREACH(dri, &ds->ds_dkev->dk_sources, dr_list) { - if (ds_timer(dri).target == 0 || - ds_timer(dr).target < ds_timer(dri).target) { - break; +static void +_dispatch_kevent_unguard(dispatch_kevent_t dk) +{ + guardid_t guard; + unsigned int guard_flags; + int r, fd_flags; + switch (dk->dk_kevent.filter) { + case EVFILT_READ: + case EVFILT_WRITE: + case EVFILT_VNODE: + guard_flags = (unsigned int)dk->dk_kevent.ext[0]; + if (!guard_flags) { + return; + } + guard = &dk->dk_kevent; + fd_flags = (int)dk->dk_kevent.ext[1]; + r = change_fdguard_np((int)dk->dk_kevent.ident, &guard, + guard_flags, NULL, 0, &fd_flags); + if (slowpath(r == -1)) { + (void)dispatch_assume_zero(errno); + return; } + dk->dk_kevent.ext[0] = 0; + break; } +} +#endif // DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD - if (dri) { - TAILQ_INSERT_BEFORE(dri, dr, dr_list); - } else { - TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, dr, dr_list); +#pragma mark - +#pragma mark dispatch_source_timer + +#if DISPATCH_USE_DTRACE && DISPATCH_USE_DTRACE_INTROSPECTION +static dispatch_source_refs_t + _dispatch_trace_next_timer[DISPATCH_TIMER_QOS_COUNT]; +#define _dispatch_trace_next_timer_set(x, q) \ + _dispatch_trace_next_timer[(q)] = (x) +#define _dispatch_trace_next_timer_program(d, q) \ + _dispatch_trace_timer_program(_dispatch_trace_next_timer[(q)], (d)) +#define _dispatch_trace_next_timer_wake(q) \ + _dispatch_trace_timer_wake(_dispatch_trace_next_timer[(q)]) +#else +#define _dispatch_trace_next_timer_set(x, q) +#define _dispatch_trace_next_timer_program(d, q) +#define _dispatch_trace_next_timer_wake(q) +#endif + +#define _dispatch_source_timer_telemetry_enabled() false + +DISPATCH_NOINLINE +static void +_dispatch_source_timer_telemetry_slow(dispatch_source_t ds, + uintptr_t ident, struct dispatch_timer_source_s *values) +{ + if (_dispatch_trace_timer_configure_enabled()) { + _dispatch_trace_timer_configure(ds, ident, values); } } +DISPATCH_ALWAYS_INLINE static inline void -_dispatch_run_timers2(unsigned int timer) +_dispatch_source_timer_telemetry(dispatch_source_t ds, uintptr_t ident, + struct dispatch_timer_source_s *values) { - dispatch_source_refs_t dr; - dispatch_source_t ds; - uint64_t now, missed; + if (_dispatch_trace_timer_configure_enabled() || + _dispatch_source_timer_telemetry_enabled()) { + _dispatch_source_timer_telemetry_slow(ds, ident, values); + asm(""); // prevent tailcall + } +} - now = _dispatch_source_timer_now2(timer); - while ((dr = TAILQ_FIRST(&_dispatch_kevent_timer[timer].dk_sources))) { - ds = _dispatch_source_from_refs(dr); - // We may find timers on the wrong list due to a pending update from - // dispatch_source_set_timer. Force an update of the list in that case. - if (timer != ds->ds_ident_hack) { - _dispatch_timer_list_update(ds); - continue; - } - if (!ds_timer(dr).target) { - // no configured timers on the list - break; - } - if (ds_timer(dr).target > now) { - // Done running timers for now. - break; - } - // Remove timers that are suspended or have missed intervals from the - // list, rearm after resume resp. source invoke will reenable them - if (DISPATCH_OBJECT_SUSPENDED(ds) || ds->ds_pending_data) { - _dispatch_timer_list_update(ds); - continue; - } - // Calculate number of missed intervals. - missed = (now - ds_timer(dr).target) / ds_timer(dr).interval; - if (++missed > INT_MAX) { - missed = INT_MAX; - } - ds_timer(dr).target += missed * ds_timer(dr).interval; - _dispatch_timer_list_update(ds); - ds_timer(dr).last_fire = now; - (void)dispatch_atomic_add2o(ds, ds_pending_data, (int)missed); - _dispatch_wakeup(ds); - } -} +// approx 1 year (60s * 60m * 24h * 365d) +#define FOREVER_NSEC 31536000000000000ull -void -_dispatch_run_timers(void) +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_source_timer_now(uint64_t nows[], unsigned int tidx) { - dispatch_once_f(&__dispatch_kevent_init_pred, - NULL, _dispatch_kevent_init); - - unsigned int i; - for (i = 0; i < DISPATCH_TIMER_COUNT; i++) { - if (!TAILQ_EMPTY(&_dispatch_kevent_timer[i].dk_sources)) { - _dispatch_run_timers2(i); - } + unsigned int tk = DISPATCH_TIMER_KIND(tidx); + if (nows && fastpath(nows[tk])) { + return nows[tk]; + } + uint64_t now; + switch (tk) { + case DISPATCH_TIMER_KIND_MACH: + now = _dispatch_absolute_time(); + break; + case DISPATCH_TIMER_KIND_WALL: + now = _dispatch_get_nanoseconds(); + break; } + if (nows) { + nows[tk] = now; + } + return now; } static inline unsigned long @@ -1092,7 +1130,8 @@ _dispatch_source_timer_data(dispatch_source_refs_t dr, unsigned long prev) { // calculate the number of intervals since last fire unsigned long data, missed; - uint64_t now = _dispatch_source_timer_now(dr); + uint64_t now; + now = _dispatch_source_timer_now(NULL, _dispatch_source_timer_idx(dr)); missed = (unsigned long)((now - ds_timer(dr).last_fire) / ds_timer(dr).interval); // correct for missed intervals already delivered last time @@ -1101,51 +1140,6 @@ _dispatch_source_timer_data(dispatch_source_refs_t dr, unsigned long prev) return data; } -// approx 1 year (60s * 60m * 24h * 365d) -#define FOREVER_NSEC 31536000000000000ull - -struct timespec * -_dispatch_get_next_timer_fire(struct timespec *howsoon) -{ - // - // kevent(2) does not allow large timeouts, so we use a long timeout - // instead (approximately 1 year). - dispatch_source_refs_t dr = NULL; - unsigned int timer; - uint64_t now, delta_tmp, delta = UINT64_MAX; - - for (timer = 0; timer < DISPATCH_TIMER_COUNT; timer++) { - // Timers are kept in order, first one will fire next - dr = TAILQ_FIRST(&_dispatch_kevent_timer[timer].dk_sources); - if (!dr || !ds_timer(dr).target) { - // Empty list or disabled timer - continue; - } - now = _dispatch_source_timer_now(dr); - if (ds_timer(dr).target <= now) { - howsoon->tv_sec = 0; - howsoon->tv_nsec = 0; - return howsoon; - } - // the subtraction cannot go negative because the previous "if" - // verified that the target is greater than now. - delta_tmp = ds_timer(dr).target - now; - if (!(ds_timer(dr).flags & DISPATCH_TIMER_WALL_CLOCK)) { - delta_tmp = _dispatch_time_mach2nano(delta_tmp); - } - if (delta_tmp < delta) { - delta = delta_tmp; - } - } - if (slowpath(delta > FOREVER_NSEC)) { - return NULL; - } else { - howsoon->tv_sec = (time_t)(delta / NSEC_PER_SEC); - howsoon->tv_nsec = (long)(delta % NSEC_PER_SEC); - } - return howsoon; -} - struct dispatch_set_timer_params { dispatch_source_t ds; uintptr_t ident; @@ -1163,9 +1157,15 @@ _dispatch_source_set_timer3(void *context) // Clear any pending data that might have accumulated on // older timer params ds->ds_pending_data = 0; - _dispatch_timer_list_update(ds); + // Re-arm in case we got disarmed because of pending set_timer suspension + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, release); dispatch_resume(ds); + // Must happen after resume to avoid getting disarmed due to suspension + _dispatch_timers_update(ds); dispatch_release(ds); + if (params->values.flags & DISPATCH_TIMER_WALL_CLOCK) { + _dispatch_mach_host_calendar_change_register(); + } free(params); } @@ -1179,20 +1179,18 @@ _dispatch_source_set_timer2(void *context) _dispatch_source_set_timer3); } -void -dispatch_source_set_timer(dispatch_source_t ds, - dispatch_time_t start, - uint64_t interval, - uint64_t leeway) +DISPATCH_NOINLINE +static struct dispatch_set_timer_params * +_dispatch_source_timer_params(dispatch_source_t ds, dispatch_time_t start, + uint64_t interval, uint64_t leeway) { - if (slowpath(!ds->ds_is_timer)) { - DISPATCH_CLIENT_CRASH("Attempt to set timer on a non-timer source"); - } - struct dispatch_set_timer_params *params; + params = _dispatch_calloc(1ul, sizeof(struct dispatch_set_timer_params)); + params->ds = ds; + params->values.flags = ds_timer(ds->ds_refs).flags; - // we use zero internally to mean disabled if (interval == 0) { + // we use zero internally to mean disabled interval = 1; } else if ((int64_t)interval < 0) { // 6866347 - make sure nanoseconds won't overflow @@ -1201,202 +1199,1384 @@ dispatch_source_set_timer(dispatch_source_t ds, if ((int64_t)leeway < 0) { leeway = INT64_MAX; } - if (start == DISPATCH_TIME_NOW) { start = _dispatch_absolute_time(); } else if (start == DISPATCH_TIME_FOREVER) { start = INT64_MAX; } - while (!(params = calloc(1ul, sizeof(struct dispatch_set_timer_params)))) { - sleep(1); - } - - params->ds = ds; - params->values.flags = ds_timer(ds->ds_refs).flags; - if ((int64_t)start < 0) { // wall clock - params->ident = DISPATCH_TIMER_INDEX_WALL; - params->values.target = -((int64_t)start); - params->values.interval = interval; - params->values.leeway = leeway; + start = (dispatch_time_t)-((int64_t)start); params->values.flags |= DISPATCH_TIMER_WALL_CLOCK; } else { // absolute clock - params->ident = DISPATCH_TIMER_INDEX_MACH; - params->values.target = start; - params->values.interval = _dispatch_time_nano2mach(interval); - - // rdar://problem/7287561 interval must be at least one in - // in order to avoid later division by zero when calculating - // the missed interval count. (NOTE: the wall clock's - // interval is already "fixed" to be 1 or more) - if (params->values.interval < 1) { - params->values.interval = 1; + interval = _dispatch_time_nano2mach(interval); + if (interval < 1) { + // rdar://problem/7287561 interval must be at least one in + // in order to avoid later division by zero when calculating + // the missed interval count. (NOTE: the wall clock's + // interval is already "fixed" to be 1 or more) + interval = 1; } + leeway = _dispatch_time_nano2mach(leeway); + params->values.flags &= ~(unsigned long)DISPATCH_TIMER_WALL_CLOCK; + } + params->ident = DISPATCH_TIMER_IDENT(params->values.flags); + params->values.target = start; + params->values.deadline = (start < UINT64_MAX - leeway) ? + start + leeway : UINT64_MAX; + params->values.interval = interval; + params->values.leeway = (interval == INT64_MAX || leeway < interval / 2) ? + leeway : interval / 2; + return params; +} - params->values.leeway = _dispatch_time_nano2mach(leeway); - params->values.flags &= ~DISPATCH_TIMER_WALL_CLOCK; +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, + uint64_t interval, uint64_t leeway, bool source_sync) +{ + if (slowpath(!ds->ds_is_timer) || + slowpath(ds_timer(ds->ds_refs).flags & DISPATCH_TIMER_INTERVAL)) { + DISPATCH_CLIENT_CRASH("Attempt to set timer on a non-timer source"); } + + struct dispatch_set_timer_params *params; + params = _dispatch_source_timer_params(ds, start, interval, leeway); + + _dispatch_source_timer_telemetry(ds, params->ident, ¶ms->values); // Suspend the source so that it doesn't fire with pending changes // The use of suspend/resume requires the external retain/release dispatch_retain(ds); - dispatch_barrier_async_f((dispatch_queue_t)ds, params, - _dispatch_source_set_timer2); + if (source_sync) { + return _dispatch_barrier_trysync_f((dispatch_queue_t)ds, params, + _dispatch_source_set_timer2); + } else { + return _dispatch_source_set_timer2(params); + } } -#pragma mark - -#pragma mark dispatch_mach +void +dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, + uint64_t interval, uint64_t leeway) +{ + _dispatch_source_set_timer(ds, start, interval, leeway, true); +} -#if HAVE_MACH +void +_dispatch_source_set_runloop_timer_4CF(dispatch_source_t ds, + dispatch_time_t start, uint64_t interval, uint64_t leeway) +{ + // Don't serialize through the source queue for CF timers + _dispatch_source_set_timer(ds, start, interval, leeway, false); +} -#if DISPATCH_DEBUG && DISPATCH_MACHPORT_DEBUG -#define _dispatch_debug_machport(name) \ - dispatch_debug_machport((name), __func__) -#else -#define _dispatch_debug_machport(name) -#endif +void +_dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval) +{ + dispatch_source_refs_t dr = ds->ds_refs; + #define NSEC_PER_FRAME (NSEC_PER_SEC/60) + const bool animation = ds_timer(dr).flags & DISPATCH_INTERVAL_UI_ANIMATION; + if (fastpath(interval <= (animation ? FOREVER_NSEC/NSEC_PER_FRAME : + FOREVER_NSEC/NSEC_PER_MSEC))) { + interval *= animation ? NSEC_PER_FRAME : NSEC_PER_MSEC; + } else { + interval = FOREVER_NSEC; + } + interval = _dispatch_time_nano2mach(interval); + uint64_t target = _dispatch_absolute_time() + interval; + target = (target / interval) * interval; + const uint64_t leeway = animation ? + _dispatch_time_nano2mach(NSEC_PER_FRAME) : interval / 2; + ds_timer(dr).target = target; + ds_timer(dr).deadline = target + leeway; + ds_timer(dr).interval = interval; + ds_timer(dr).leeway = leeway; + _dispatch_source_timer_telemetry(ds, ds->ds_ident_hack, &ds_timer(dr)); +} -// Flags for all notifications that are registered/unregistered when a -// send-possible notification is requested/delivered -#define _DISPATCH_MACH_SP_FLAGS (DISPATCH_MACH_SEND_POSSIBLE| \ - DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_DELETED) +#pragma mark - +#pragma mark dispatch_timers + +#define DISPATCH_TIMER_STRUCT(refs) \ + uint64_t target, deadline; \ + TAILQ_HEAD(, refs) dt_sources + +typedef struct dispatch_timer_s { + DISPATCH_TIMER_STRUCT(dispatch_timer_source_refs_s); +} *dispatch_timer_t; + +#define DISPATCH_TIMER_INITIALIZER(tidx) \ + [tidx] = { \ + .target = UINT64_MAX, \ + .deadline = UINT64_MAX, \ + .dt_sources = TAILQ_HEAD_INITIALIZER( \ + _dispatch_timer[tidx].dt_sources), \ + } +#define DISPATCH_TIMER_INIT(kind, qos) \ + DISPATCH_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \ + DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos)) + +struct dispatch_timer_s _dispatch_timer[] = { + DISPATCH_TIMER_INIT(WALL, NORMAL), + DISPATCH_TIMER_INIT(WALL, CRITICAL), + DISPATCH_TIMER_INIT(WALL, BACKGROUND), + DISPATCH_TIMER_INIT(MACH, NORMAL), + DISPATCH_TIMER_INIT(MACH, CRITICAL), + DISPATCH_TIMER_INIT(MACH, BACKGROUND), +}; +#define DISPATCH_TIMER_COUNT \ + ((sizeof(_dispatch_timer) / sizeof(_dispatch_timer[0]))) + +#define DISPATCH_KEVENT_TIMER_UDATA(tidx) \ + (uintptr_t)&_dispatch_kevent_timer[tidx] +#ifdef __LP64__ +#define DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx) \ + .udata = DISPATCH_KEVENT_TIMER_UDATA(tidx) +#else // __LP64__ +// dynamic initialization in _dispatch_timers_init() +#define DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx) \ + .udata = 0 +#endif // __LP64__ +#define DISPATCH_KEVENT_TIMER_INITIALIZER(tidx) \ + [tidx] = { \ + .dk_kevent = { \ + .ident = tidx, \ + .filter = DISPATCH_EVFILT_TIMER, \ + DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx), \ + }, \ + .dk_sources = TAILQ_HEAD_INITIALIZER( \ + _dispatch_kevent_timer[tidx].dk_sources), \ + } +#define DISPATCH_KEVENT_TIMER_INIT(kind, qos) \ + DISPATCH_KEVENT_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \ + DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos)) + +struct dispatch_kevent_s _dispatch_kevent_timer[] = { + DISPATCH_KEVENT_TIMER_INIT(WALL, NORMAL), + DISPATCH_KEVENT_TIMER_INIT(WALL, CRITICAL), + DISPATCH_KEVENT_TIMER_INIT(WALL, BACKGROUND), + DISPATCH_KEVENT_TIMER_INIT(MACH, NORMAL), + DISPATCH_KEVENT_TIMER_INIT(MACH, CRITICAL), + DISPATCH_KEVENT_TIMER_INIT(MACH, BACKGROUND), + DISPATCH_KEVENT_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX_DISARM), +}; +#define DISPATCH_KEVENT_TIMER_COUNT \ + ((sizeof(_dispatch_kevent_timer) / sizeof(_dispatch_kevent_timer[0]))) + +#define DISPATCH_KEVENT_TIMEOUT_IDENT_MASK (~0ull << 8) +#define DISPATCH_KEVENT_TIMEOUT_INITIALIZER(qos, note) \ + [qos] = { \ + .ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK|(qos), \ + .filter = EVFILT_TIMER, \ + .flags = EV_ONESHOT, \ + .fflags = NOTE_ABSOLUTE|NOTE_NSECONDS|NOTE_LEEWAY|(note), \ + } +#define DISPATCH_KEVENT_TIMEOUT_INIT(qos, note) \ + DISPATCH_KEVENT_TIMEOUT_INITIALIZER(DISPATCH_TIMER_QOS_##qos, note) -#define _DISPATCH_IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v) -#define _DISPATCH_HASH(x, y) (_DISPATCH_IS_POWER_OF_TWO(y) ? \ - (MACH_PORT_INDEX(x) & ((y) - 1)) : (MACH_PORT_INDEX(x) % (y))) +struct kevent64_s _dispatch_kevent_timeout[] = { + DISPATCH_KEVENT_TIMEOUT_INIT(NORMAL, 0), + DISPATCH_KEVENT_TIMEOUT_INIT(CRITICAL, NOTE_CRITICAL), + DISPATCH_KEVENT_TIMEOUT_INIT(BACKGROUND, NOTE_BACKGROUND), +}; -#define _DISPATCH_MACHPORT_HASH_SIZE 32 -#define _DISPATCH_MACHPORT_HASH(x) \ - _DISPATCH_HASH((x), _DISPATCH_MACHPORT_HASH_SIZE) +#define DISPATCH_KEVENT_COALESCING_WINDOW_INIT(qos, ms) \ + [DISPATCH_TIMER_QOS_##qos] = 2ull * (ms) * NSEC_PER_MSEC -static dispatch_source_t _dispatch_mach_notify_source; -static mach_port_t _dispatch_port_set; -static mach_port_t _dispatch_event_port; +static const uint64_t _dispatch_kevent_coalescing_window[] = { + DISPATCH_KEVENT_COALESCING_WINDOW_INIT(NORMAL, 75), + DISPATCH_KEVENT_COALESCING_WINDOW_INIT(CRITICAL, 1), + DISPATCH_KEVENT_COALESCING_WINDOW_INIT(BACKGROUND, 100), +}; -static kern_return_t _dispatch_mach_notify_update(dispatch_kevent_t dk, - uint32_t new_flags, uint32_t del_flags, uint32_t mask, - mach_msg_id_t notify_msgid, mach_port_mscount_t notify_sync); +#define _dispatch_timers_insert(tidx, dra, dr, dr_list, dta, dt, dt_list) ({ \ + typeof(dr) dri = NULL; typeof(dt) dti; \ + if (tidx != DISPATCH_TIMER_INDEX_DISARM) { \ + TAILQ_FOREACH(dri, &dra[tidx].dk_sources, dr_list) { \ + if (ds_timer(dr).target < ds_timer(dri).target) { \ + break; \ + } \ + } \ + TAILQ_FOREACH(dti, &dta[tidx].dt_sources, dt_list) { \ + if (ds_timer(dt).deadline < ds_timer(dti).deadline) { \ + break; \ + } \ + } \ + if (dti) { \ + TAILQ_INSERT_BEFORE(dti, dt, dt_list); \ + } else { \ + TAILQ_INSERT_TAIL(&dta[tidx].dt_sources, dt, dt_list); \ + } \ + } \ + if (dri) { \ + TAILQ_INSERT_BEFORE(dri, dr, dr_list); \ + } else { \ + TAILQ_INSERT_TAIL(&dra[tidx].dk_sources, dr, dr_list); \ + } \ + }) + +#define _dispatch_timers_remove(tidx, dk, dra, dr, dr_list, dta, dt, dt_list) \ + ({ \ + if (tidx != DISPATCH_TIMER_INDEX_DISARM) { \ + TAILQ_REMOVE(&dta[tidx].dt_sources, dt, dt_list); \ + } \ + TAILQ_REMOVE(dk ? &(*(dk)).dk_sources : &dra[tidx].dk_sources, dr, \ + dr_list); }) + +#define _dispatch_timers_check(dra, dta) ({ \ + unsigned int qosm = _dispatch_timers_qos_mask; \ + bool update = false; \ + unsigned int tidx; \ + for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { \ + if (!(qosm & 1 << DISPATCH_TIMER_QOS(tidx))){ \ + continue; \ + } \ + dispatch_timer_source_refs_t dr = (dispatch_timer_source_refs_t) \ + TAILQ_FIRST(&dra[tidx].dk_sources); \ + dispatch_timer_source_refs_t dt = (dispatch_timer_source_refs_t) \ + TAILQ_FIRST(&dta[tidx].dt_sources); \ + uint64_t target = dr ? ds_timer(dr).target : UINT64_MAX; \ + uint64_t deadline = dr ? ds_timer(dt).deadline : UINT64_MAX; \ + if (target != dta[tidx].target) { \ + dta[tidx].target = target; \ + update = true; \ + } \ + if (deadline != dta[tidx].deadline) { \ + dta[tidx].deadline = deadline; \ + update = true; \ + } \ + } \ + update; }) + +static bool _dispatch_timers_reconfigure, _dispatch_timer_expired; +static unsigned int _dispatch_timers_qos_mask; +static bool _dispatch_timers_force_max_leeway; static void -_dispatch_port_set_init(void *context DISPATCH_UNUSED) +_dispatch_timers_init(void) { - struct kevent kev = { - .filter = EVFILT_MACHPORT, - .flags = EV_ADD, - }; - kern_return_t kr; - - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, - &_dispatch_port_set); - DISPATCH_VERIFY_MIG(kr); - if (kr) { - _dispatch_bug_mach_client( - "_dispatch_port_set_init: mach_port_allocate() failed", kr); - DISPATCH_CLIENT_CRASH( - "mach_port_allocate() failed: cannot create port set"); - } - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, - &_dispatch_event_port); - DISPATCH_VERIFY_MIG(kr); - if (kr) { - _dispatch_bug_mach_client( - "_dispatch_port_set_init: mach_port_allocate() failed", kr); - DISPATCH_CLIENT_CRASH( - "mach_port_allocate() failed: cannot create receive right"); - } - kr = mach_port_move_member(mach_task_self(), _dispatch_event_port, - _dispatch_port_set); - DISPATCH_VERIFY_MIG(kr); - if (kr) { - _dispatch_bug_mach_client( - "_dispatch_port_set_init: mach_port_move_member() failed", kr); - DISPATCH_CLIENT_CRASH("mach_port_move_member() failed"); +#ifndef __LP64__ + unsigned int tidx; + for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { + _dispatch_kevent_timer[tidx].dk_kevent.udata = \ + DISPATCH_KEVENT_TIMER_UDATA(tidx); } +#endif // __LP64__ + _dispatch_timers_force_max_leeway = + getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY"); +} - kev.ident = _dispatch_port_set; +static inline void +_dispatch_timers_unregister(dispatch_source_t ds, dispatch_kevent_t dk) +{ + dispatch_source_refs_t dr = ds->ds_refs; + unsigned int tidx = (unsigned int)dk->dk_kevent.ident; - _dispatch_update_kq(&kev); + if (slowpath(ds_timer_aggregate(ds))) { + _dispatch_timer_aggregates_unregister(ds, tidx); + } + _dispatch_timers_remove(tidx, dk, _dispatch_kevent_timer, dr, dr_list, + _dispatch_timer, (dispatch_timer_source_refs_t)dr, dt_list); + if (tidx != DISPATCH_TIMER_INDEX_DISARM) { + _dispatch_timers_reconfigure = true; + _dispatch_timers_qos_mask |= 1 << DISPATCH_TIMER_QOS(tidx); + } } -static mach_port_t -_dispatch_get_port_set(void) +// Updates the ordered list of timers based on next fire date for changes to ds. +// Should only be called from the context of _dispatch_mgr_q. +static void +_dispatch_timers_update(dispatch_source_t ds) { - static dispatch_once_t pred; + dispatch_kevent_t dk = ds->ds_dkev; + dispatch_source_refs_t dr = ds->ds_refs; + unsigned int tidx; - dispatch_once_f(&pred, NULL, _dispatch_port_set_init); + DISPATCH_ASSERT_ON_MANAGER_QUEUE(); - return _dispatch_port_set; + // Do not reschedule timers unregistered with _dispatch_kevent_unregister() + if (slowpath(!dk)) { + return; + } + // Move timers that are disabled, suspended or have missed intervals to the + // disarmed list, rearm after resume resp. source invoke will reenable them + if (!ds_timer(dr).target || DISPATCH_OBJECT_SUSPENDED(ds) || + ds->ds_pending_data) { + tidx = DISPATCH_TIMER_INDEX_DISARM; + (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed); + } else { + tidx = _dispatch_source_timer_idx(dr); + } + if (slowpath(ds_timer_aggregate(ds))) { + _dispatch_timer_aggregates_register(ds); + } + if (slowpath(!ds->ds_is_installed)) { + ds->ds_is_installed = true; + if (tidx != DISPATCH_TIMER_INDEX_DISARM) { + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); + } + free(dk); + _dispatch_object_debug(ds, "%s", __func__); + } else { + _dispatch_timers_unregister(ds, dk); + } + if (tidx != DISPATCH_TIMER_INDEX_DISARM) { + _dispatch_timers_reconfigure = true; + _dispatch_timers_qos_mask |= 1 << DISPATCH_TIMER_QOS(tidx); + } + if (dk != &_dispatch_kevent_timer[tidx]){ + ds->ds_dkev = &_dispatch_kevent_timer[tidx]; + } + _dispatch_timers_insert(tidx, _dispatch_kevent_timer, dr, dr_list, + _dispatch_timer, (dispatch_timer_source_refs_t)dr, dt_list); + if (slowpath(ds_timer_aggregate(ds))) { + _dispatch_timer_aggregates_update(ds, tidx); + } } -static kern_return_t -_dispatch_kevent_machport_enable(dispatch_kevent_t dk) +static inline void +_dispatch_timers_run2(uint64_t nows[], unsigned int tidx) { - mach_port_t mp = (mach_port_t)dk->dk_kevent.ident; - kern_return_t kr; + dispatch_source_refs_t dr; + dispatch_source_t ds; + uint64_t now, missed; - _dispatch_debug_machport(mp); - kr = mach_port_move_member(mach_task_self(), mp, _dispatch_get_port_set()); - if (slowpath(kr)) { - DISPATCH_VERIFY_MIG(kr); - switch (kr) { - case KERN_INVALID_NAME: -#if DISPATCH_DEBUG - _dispatch_log("Corruption: Mach receive right 0x%x destroyed " - "prematurely", mp); -#endif - break; - case KERN_INVALID_RIGHT: - _dispatch_bug_mach_client("_dispatch_kevent_machport_enable: " - "mach_port_move_member() failed ", kr); + now = _dispatch_source_timer_now(nows, tidx); + while ((dr = TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources))) { + ds = _dispatch_source_from_refs(dr); + // We may find timers on the wrong list due to a pending update from + // dispatch_source_set_timer. Force an update of the list in that case. + if (tidx != ds->ds_ident_hack) { + _dispatch_timers_update(ds); + continue; + } + if (!ds_timer(dr).target) { + // No configured timers on the list break; - default: - (void)dispatch_assume_zero(kr); + } + if (ds_timer(dr).target > now) { + // Done running timers for now. break; } + // Remove timers that are suspended or have missed intervals from the + // list, rearm after resume resp. source invoke will reenable them + if (DISPATCH_OBJECT_SUSPENDED(ds) || ds->ds_pending_data) { + _dispatch_timers_update(ds); + continue; + } + // Calculate number of missed intervals. + missed = (now - ds_timer(dr).target) / ds_timer(dr).interval; + if (++missed > INT_MAX) { + missed = INT_MAX; + } + if (ds_timer(dr).interval < INT64_MAX) { + ds_timer(dr).target += missed * ds_timer(dr).interval; + ds_timer(dr).deadline = ds_timer(dr).target + ds_timer(dr).leeway; + } else { + ds_timer(dr).target = UINT64_MAX; + ds_timer(dr).deadline = UINT64_MAX; + } + _dispatch_timers_update(ds); + ds_timer(dr).last_fire = now; + + unsigned long data; + data = dispatch_atomic_add2o(ds, ds_pending_data, + (unsigned long)missed, relaxed); + _dispatch_trace_timer_fire(dr, data, (unsigned long)missed); + _dispatch_wakeup(ds); } - return kr; } +DISPATCH_NOINLINE static void -_dispatch_kevent_machport_disable(dispatch_kevent_t dk) +_dispatch_timers_run(uint64_t nows[]) { - mach_port_t mp = (mach_port_t)dk->dk_kevent.ident; - kern_return_t kr; + unsigned int tidx; + for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { + if (!TAILQ_EMPTY(&_dispatch_kevent_timer[tidx].dk_sources)) { + _dispatch_timers_run2(nows, tidx); + } + } +} - _dispatch_debug_machport(mp); - kr = mach_port_move_member(mach_task_self(), mp, 0); - if (slowpath(kr)) { - DISPATCH_VERIFY_MIG(kr); - switch (kr) { - case KERN_INVALID_RIGHT: - case KERN_INVALID_NAME: -#if DISPATCH_DEBUG - _dispatch_log("Corruption: Mach receive right 0x%x destroyed " - "prematurely", mp); -#endif - break; - default: - (void)dispatch_assume_zero(kr); +static inline unsigned int +_dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], + uint64_t *delay, uint64_t *leeway, int qos) +{ + unsigned int tidx, ridx = DISPATCH_TIMER_COUNT; + uint64_t tmp, delta = UINT64_MAX, dldelta = UINT64_MAX; + + for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { + if (qos >= 0 && qos != DISPATCH_TIMER_QOS(tidx)){ + continue; + } + uint64_t target = timer[tidx].target; + if (target == UINT64_MAX) { + continue; + } + uint64_t deadline = timer[tidx].deadline; + if (qos >= 0) { + // Timer pre-coalescing + uint64_t window = _dispatch_kevent_coalescing_window[qos]; + uint64_t latest = deadline > window ? deadline - window : 0; + dispatch_source_refs_t dri; + TAILQ_FOREACH(dri, &_dispatch_kevent_timer[tidx].dk_sources, + dr_list) { + tmp = ds_timer(dri).target; + if (tmp > latest) break; + target = tmp; + } + } + uint64_t now = _dispatch_source_timer_now(nows, tidx); + if (target <= now) { + delta = 0; break; } + tmp = target - now; + if (DISPATCH_TIMER_KIND(tidx) != DISPATCH_TIMER_KIND_WALL) { + tmp = _dispatch_time_mach2nano(tmp); + } + if (tmp < INT64_MAX && tmp < delta) { + ridx = tidx; + delta = tmp; + } + dispatch_assert(target <= deadline); + tmp = deadline - now; + if (DISPATCH_TIMER_KIND(tidx) != DISPATCH_TIMER_KIND_WALL) { + tmp = _dispatch_time_mach2nano(tmp); + } + if (tmp < INT64_MAX && tmp < dldelta) { + dldelta = tmp; + } } + *delay = delta; + *leeway = delta && delta < UINT64_MAX ? dldelta - delta : UINT64_MAX; + return ridx; } -kern_return_t -_dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, - uint32_t del_flags) +static bool +_dispatch_timers_program2(uint64_t nows[], struct kevent64_s *ke, + unsigned int qos) { - kern_return_t kr_recv = 0, kr_sp = 0; + unsigned int tidx; + bool poll; + uint64_t delay, leeway; + + tidx = _dispatch_timers_get_delay(nows, _dispatch_timer, &delay, &leeway, + (int)qos); + poll = (delay == 0); + if (poll || delay == UINT64_MAX) { + _dispatch_trace_next_timer_set(NULL, qos); + if (!ke->data) { + return poll; + } + ke->data = 0; + ke->flags |= EV_DELETE; + ke->flags &= ~(EV_ADD|EV_ENABLE); + } else { + _dispatch_trace_next_timer_set( + TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources), qos); + _dispatch_trace_next_timer_program(delay, qos); + delay += _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL); + if (slowpath(_dispatch_timers_force_max_leeway)) { + ke->data = (int64_t)(delay + leeway); + ke->ext[1] = 0; + } else { + ke->data = (int64_t)delay; + ke->ext[1] = leeway; + } + ke->flags |= EV_ADD|EV_ENABLE; + ke->flags &= ~EV_DELETE; + } + _dispatch_kq_update(ke); + return poll; +} - dispatch_assert_zero(new_flags & del_flags); - if (new_flags & DISPATCH_MACH_RECV_MESSAGE) { - kr_recv = _dispatch_kevent_machport_enable(dk); - } else if (del_flags & DISPATCH_MACH_RECV_MESSAGE) { - _dispatch_kevent_machport_disable(dk); +DISPATCH_NOINLINE +static bool +_dispatch_timers_program(uint64_t nows[]) +{ + bool poll = false; + unsigned int qos, qosm = _dispatch_timers_qos_mask; + for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) { + if (!(qosm & 1 << qos)){ + continue; + } + poll |= _dispatch_timers_program2(nows, &_dispatch_kevent_timeout[qos], + qos); } - if ((new_flags & _DISPATCH_MACH_SP_FLAGS) || + return poll; +} + +DISPATCH_NOINLINE +static bool +_dispatch_timers_configure(void) +{ + _dispatch_timer_aggregates_check(); + // Find out if there is a new target/deadline on the timer lists + return _dispatch_timers_check(_dispatch_kevent_timer, _dispatch_timer); +} + +static void +_dispatch_timers_calendar_change(void) +{ + // calendar change may have gone past the wallclock deadline + _dispatch_timer_expired = true; + _dispatch_timers_qos_mask = ~0u; +} + +static void +_dispatch_timers_kevent(struct kevent64_s *ke) +{ + dispatch_assert(ke->data > 0); + dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) == + DISPATCH_KEVENT_TIMEOUT_IDENT_MASK); + unsigned int qos = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK; + dispatch_assert(qos < DISPATCH_TIMER_QOS_COUNT); + dispatch_assert(_dispatch_kevent_timeout[qos].data); + _dispatch_kevent_timeout[qos].data = 0; // kevent deleted via EV_ONESHOT + _dispatch_timer_expired = true; + _dispatch_timers_qos_mask |= 1 << qos; + _dispatch_trace_next_timer_wake(qos); +} + +static inline bool +_dispatch_mgr_timers(void) +{ + uint64_t nows[DISPATCH_TIMER_KIND_COUNT] = {}; + bool expired = slowpath(_dispatch_timer_expired); + if (expired) { + _dispatch_timers_run(nows); + } + bool reconfigure = slowpath(_dispatch_timers_reconfigure); + if (reconfigure || expired) { + if (reconfigure) { + reconfigure = _dispatch_timers_configure(); + _dispatch_timers_reconfigure = false; + } + if (reconfigure || expired) { + expired = _dispatch_timer_expired = _dispatch_timers_program(nows); + expired = expired || _dispatch_mgr_q.dq_items_tail; + } + _dispatch_timers_qos_mask = 0; + } + return expired; +} + +#pragma mark - +#pragma mark dispatch_timer_aggregate + +typedef struct { + TAILQ_HEAD(, dispatch_timer_source_aggregate_refs_s) dk_sources; +} dispatch_timer_aggregate_refs_s; + +typedef struct dispatch_timer_aggregate_s { + DISPATCH_STRUCT_HEADER(queue); + DISPATCH_QUEUE_HEADER; + TAILQ_ENTRY(dispatch_timer_aggregate_s) dta_list; + dispatch_timer_aggregate_refs_s + dta_kevent_timer[DISPATCH_KEVENT_TIMER_COUNT]; + struct { + DISPATCH_TIMER_STRUCT(dispatch_timer_source_aggregate_refs_s); + } dta_timer[DISPATCH_TIMER_COUNT]; + struct dispatch_timer_s dta_timer_data[DISPATCH_TIMER_COUNT]; + unsigned int dta_refcount; +} dispatch_timer_aggregate_s; + +typedef TAILQ_HEAD(, dispatch_timer_aggregate_s) dispatch_timer_aggregates_s; +static dispatch_timer_aggregates_s _dispatch_timer_aggregates = + TAILQ_HEAD_INITIALIZER(_dispatch_timer_aggregates); + +dispatch_timer_aggregate_t +dispatch_timer_aggregate_create(void) +{ + unsigned int tidx; + dispatch_timer_aggregate_t dta = _dispatch_alloc(DISPATCH_VTABLE(queue), + sizeof(struct dispatch_timer_aggregate_s)); + _dispatch_queue_init((dispatch_queue_t)dta); + dta->do_targetq = _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_HIGH, + true); + dta->dq_width = UINT32_MAX; + //FIXME: aggregates need custom vtable + //dta->dq_label = "timer-aggregate"; + for (tidx = 0; tidx < DISPATCH_KEVENT_TIMER_COUNT; tidx++) { + TAILQ_INIT(&dta->dta_kevent_timer[tidx].dk_sources); + } + for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { + TAILQ_INIT(&dta->dta_timer[tidx].dt_sources); + dta->dta_timer[tidx].target = UINT64_MAX; + dta->dta_timer[tidx].deadline = UINT64_MAX; + dta->dta_timer_data[tidx].target = UINT64_MAX; + dta->dta_timer_data[tidx].deadline = UINT64_MAX; + } + return (dispatch_timer_aggregate_t)_dispatch_introspection_queue_create( + (dispatch_queue_t)dta); +} + +typedef struct dispatch_timer_delay_s { + dispatch_timer_t timer; + uint64_t delay, leeway; +} *dispatch_timer_delay_t; + +static void +_dispatch_timer_aggregate_get_delay(void *ctxt) +{ + dispatch_timer_delay_t dtd = ctxt; + struct { uint64_t nows[DISPATCH_TIMER_KIND_COUNT]; } dtn = {}; + _dispatch_timers_get_delay(dtn.nows, dtd->timer, &dtd->delay, &dtd->leeway, + -1); +} + +uint64_t +dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t dta, + uint64_t *leeway_ptr) +{ + struct dispatch_timer_delay_s dtd = { + .timer = dta->dta_timer_data, + }; + dispatch_sync_f((dispatch_queue_t)dta, &dtd, + _dispatch_timer_aggregate_get_delay); + if (leeway_ptr) { + *leeway_ptr = dtd.leeway; + } + return dtd.delay; +} + +static void +_dispatch_timer_aggregate_update(void *ctxt) +{ + dispatch_timer_aggregate_t dta = (void*)_dispatch_queue_get_current(); + dispatch_timer_t dtau = ctxt; + unsigned int tidx; + for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { + dta->dta_timer_data[tidx].target = dtau[tidx].target; + dta->dta_timer_data[tidx].deadline = dtau[tidx].deadline; + } + free(dtau); +} + +DISPATCH_NOINLINE +static void +_dispatch_timer_aggregates_configure(void) +{ + dispatch_timer_aggregate_t dta; + dispatch_timer_t dtau; + TAILQ_FOREACH(dta, &_dispatch_timer_aggregates, dta_list) { + if (!_dispatch_timers_check(dta->dta_kevent_timer, dta->dta_timer)) { + continue; + } + dtau = _dispatch_calloc(DISPATCH_TIMER_COUNT, sizeof(*dtau)); + memcpy(dtau, dta->dta_timer, sizeof(dta->dta_timer)); + dispatch_barrier_async_f((dispatch_queue_t)dta, dtau, + _dispatch_timer_aggregate_update); + } +} + +static inline void +_dispatch_timer_aggregates_check(void) +{ + if (fastpath(TAILQ_EMPTY(&_dispatch_timer_aggregates))) { + return; + } + _dispatch_timer_aggregates_configure(); +} + +static void +_dispatch_timer_aggregates_register(dispatch_source_t ds) +{ + dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds); + if (!dta->dta_refcount++) { + TAILQ_INSERT_TAIL(&_dispatch_timer_aggregates, dta, dta_list); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_timer_aggregates_update(dispatch_source_t ds, unsigned int tidx) +{ + dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds); + dispatch_timer_source_aggregate_refs_t dr; + dr = (dispatch_timer_source_aggregate_refs_t)ds->ds_refs; + _dispatch_timers_insert(tidx, dta->dta_kevent_timer, dr, dra_list, + dta->dta_timer, dr, dta_list); +} + +DISPATCH_NOINLINE +static void +_dispatch_timer_aggregates_unregister(dispatch_source_t ds, unsigned int tidx) +{ + dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds); + dispatch_timer_source_aggregate_refs_t dr; + dr = (dispatch_timer_source_aggregate_refs_t)ds->ds_refs; + _dispatch_timers_remove(tidx, (dispatch_timer_aggregate_refs_s*)NULL, + dta->dta_kevent_timer, dr, dra_list, dta->dta_timer, dr, dta_list); + if (!--dta->dta_refcount) { + TAILQ_REMOVE(&_dispatch_timer_aggregates, dta, dta_list); + } +} + +#pragma mark - +#pragma mark dispatch_select + +static int _dispatch_kq; + +static unsigned int _dispatch_select_workaround; +static fd_set _dispatch_rfds; +static fd_set _dispatch_wfds; +static uint64_t*_dispatch_rfd_ptrs; +static uint64_t*_dispatch_wfd_ptrs; + +DISPATCH_NOINLINE +static bool +_dispatch_select_register(struct kevent64_s *kev) +{ + + // Must execute on manager queue + DISPATCH_ASSERT_ON_MANAGER_QUEUE(); + + // If an EINVAL or ENOENT error occurred while adding/enabling a read or + // write kevent, assume it was due to a type of filedescriptor not + // supported by kqueue and fall back to select + switch (kev->filter) { + case EVFILT_READ: + if ((kev->data == EINVAL || kev->data == ENOENT) && + dispatch_assume(kev->ident < FD_SETSIZE)) { + FD_SET((int)kev->ident, &_dispatch_rfds); + if (slowpath(!_dispatch_rfd_ptrs)) { + _dispatch_rfd_ptrs = _dispatch_calloc(FD_SETSIZE, + sizeof(*_dispatch_rfd_ptrs)); + } + if (!_dispatch_rfd_ptrs[kev->ident]) { + _dispatch_rfd_ptrs[kev->ident] = kev->udata; + _dispatch_select_workaround++; + _dispatch_debug("select workaround used to read fd %d: 0x%lx", + (int)kev->ident, (long)kev->data); + } + } + return true; + case EVFILT_WRITE: + if ((kev->data == EINVAL || kev->data == ENOENT) && + dispatch_assume(kev->ident < FD_SETSIZE)) { + FD_SET((int)kev->ident, &_dispatch_wfds); + if (slowpath(!_dispatch_wfd_ptrs)) { + _dispatch_wfd_ptrs = _dispatch_calloc(FD_SETSIZE, + sizeof(*_dispatch_wfd_ptrs)); + } + if (!_dispatch_wfd_ptrs[kev->ident]) { + _dispatch_wfd_ptrs[kev->ident] = kev->udata; + _dispatch_select_workaround++; + _dispatch_debug("select workaround used to write fd %d: 0x%lx", + (int)kev->ident, (long)kev->data); + } + } + return true; + } + return false; +} + +DISPATCH_NOINLINE +static bool +_dispatch_select_unregister(const struct kevent64_s *kev) +{ + // Must execute on manager queue + DISPATCH_ASSERT_ON_MANAGER_QUEUE(); + + switch (kev->filter) { + case EVFILT_READ: + if (_dispatch_rfd_ptrs && kev->ident < FD_SETSIZE && + _dispatch_rfd_ptrs[kev->ident]) { + FD_CLR((int)kev->ident, &_dispatch_rfds); + _dispatch_rfd_ptrs[kev->ident] = 0; + _dispatch_select_workaround--; + return true; + } + break; + case EVFILT_WRITE: + if (_dispatch_wfd_ptrs && kev->ident < FD_SETSIZE && + _dispatch_wfd_ptrs[kev->ident]) { + FD_CLR((int)kev->ident, &_dispatch_wfds); + _dispatch_wfd_ptrs[kev->ident] = 0; + _dispatch_select_workaround--; + return true; + } + break; + } + return false; +} + +DISPATCH_NOINLINE +static bool +_dispatch_mgr_select(bool poll) +{ + static const struct timeval timeout_immediately = { 0, 0 }; + fd_set tmp_rfds, tmp_wfds; + struct kevent64_s kev; + int err, i, r; + bool kevent_avail = false; + + FD_COPY(&_dispatch_rfds, &tmp_rfds); + FD_COPY(&_dispatch_wfds, &tmp_wfds); + + r = select(FD_SETSIZE, &tmp_rfds, &tmp_wfds, NULL, + poll ? (struct timeval*)&timeout_immediately : NULL); + if (slowpath(r == -1)) { + err = errno; + if (err != EBADF) { + if (err != EINTR) { + (void)dispatch_assume_zero(err); + } + return false; + } + for (i = 0; i < FD_SETSIZE; i++) { + if (i == _dispatch_kq) { + continue; + } + if (!FD_ISSET(i, &_dispatch_rfds) && !FD_ISSET(i, &_dispatch_wfds)){ + continue; + } + r = dup(i); + if (dispatch_assume(r != -1)) { + close(r); + } else { + if (_dispatch_rfd_ptrs && _dispatch_rfd_ptrs[i]) { + FD_CLR(i, &_dispatch_rfds); + _dispatch_rfd_ptrs[i] = 0; + _dispatch_select_workaround--; + } + if (_dispatch_wfd_ptrs && _dispatch_wfd_ptrs[i]) { + FD_CLR(i, &_dispatch_wfds); + _dispatch_wfd_ptrs[i] = 0; + _dispatch_select_workaround--; + } + } + } + return false; + } + if (r > 0) { + for (i = 0; i < FD_SETSIZE; i++) { + if (FD_ISSET(i, &tmp_rfds)) { + if (i == _dispatch_kq) { + kevent_avail = true; + continue; + } + FD_CLR(i, &_dispatch_rfds); // emulate EV_DISPATCH + EV_SET64(&kev, i, EVFILT_READ, + EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, + _dispatch_rfd_ptrs[i], 0, 0); + _dispatch_kevent_drain(&kev); + } + if (FD_ISSET(i, &tmp_wfds)) { + FD_CLR(i, &_dispatch_wfds); // emulate EV_DISPATCH + EV_SET64(&kev, i, EVFILT_WRITE, + EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, + _dispatch_wfd_ptrs[i], 0, 0); + _dispatch_kevent_drain(&kev); + } + } + } + return kevent_avail; +} + +#pragma mark - +#pragma mark dispatch_kqueue + +static void +_dispatch_kq_init(void *context DISPATCH_UNUSED) +{ + static const struct kevent64_s kev = { + .ident = 1, + .filter = EVFILT_USER, + .flags = EV_ADD|EV_CLEAR, + }; + + _dispatch_safe_fork = false; +#if DISPATCH_USE_GUARDED_FD + guardid_t guard = (uintptr_t)&kev; + _dispatch_kq = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP); +#else + _dispatch_kq = kqueue(); +#endif + if (_dispatch_kq == -1) { + DISPATCH_CLIENT_CRASH("kqueue() create failed: " + "probably out of file descriptors"); + } else if (dispatch_assume(_dispatch_kq < FD_SETSIZE)) { + // in case we fall back to select() + FD_SET(_dispatch_kq, &_dispatch_rfds); + } + + (void)dispatch_assume_zero(kevent64(_dispatch_kq, &kev, 1, NULL, 0, 0, + NULL)); + _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q); +} + +static int +_dispatch_get_kq(void) +{ + static dispatch_once_t pred; + + dispatch_once_f(&pred, NULL, _dispatch_kq_init); + + return _dispatch_kq; +} + +DISPATCH_NOINLINE +static long +_dispatch_kq_update(const struct kevent64_s *kev) +{ + int r; + struct kevent64_s kev_copy; + + if (slowpath(_dispatch_select_workaround) && (kev->flags & EV_DELETE)) { + if (_dispatch_select_unregister(kev)) { + return 0; + } + } + kev_copy = *kev; + // This ensures we don't get a pending kevent back while registering + // a new kevent + kev_copy.flags |= EV_RECEIPT; +retry: + r = dispatch_assume(kevent64(_dispatch_get_kq(), &kev_copy, 1, + &kev_copy, 1, 0, NULL)); + if (slowpath(r == -1)) { + int err = errno; + switch (err) { + case EINTR: + goto retry; + case EBADF: + DISPATCH_CLIENT_CRASH("Do not close random Unix descriptors"); + break; + default: + (void)dispatch_assume_zero(err); + break; + } + return err; + } + switch (kev_copy.data) { + case 0: + return 0; + case EBADF: + case EPERM: + case EINVAL: + case ENOENT: + if ((kev->flags & (EV_ADD|EV_ENABLE)) && !(kev->flags & EV_DELETE)) { + if (_dispatch_select_register(&kev_copy)) { + return 0; + } + } + // fall through + default: + kev_copy.flags |= kev->flags; + _dispatch_kevent_drain(&kev_copy); + break; + } + return (long)kev_copy.data; +} + +#pragma mark - +#pragma mark dispatch_mgr + +static struct kevent64_s *_dispatch_kevent_enable; + +static void inline +_dispatch_mgr_kevent_reenable(struct kevent64_s *ke) +{ + dispatch_assert(!_dispatch_kevent_enable || _dispatch_kevent_enable == ke); + _dispatch_kevent_enable = ke; +} + +unsigned long +_dispatch_mgr_wakeup(dispatch_queue_t dq DISPATCH_UNUSED) +{ + if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { + return false; + } + + static const struct kevent64_s kev = { + .ident = 1, + .filter = EVFILT_USER, + .fflags = NOTE_TRIGGER, + }; + +#if DISPATCH_DEBUG && DISPATCH_MGR_QUEUE_DEBUG + _dispatch_debug("waking up the dispatch manager queue: %p", dq); +#endif + + _dispatch_kq_update(&kev); + + return false; +} + +DISPATCH_NOINLINE +static void +_dispatch_mgr_init(void) +{ + (void)dispatch_atomic_inc2o(&_dispatch_mgr_q, dq_running, relaxed); + _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_mgr_q); + _dispatch_queue_set_bound_thread(&_dispatch_mgr_q); + _dispatch_mgr_priority_init(); + _dispatch_kevent_init(); + _dispatch_timers_init(); + _dispatch_mach_recv_msg_buf_init(); + _dispatch_memorystatus_init(); +} + +DISPATCH_NOINLINE DISPATCH_NORETURN +static void +_dispatch_mgr_invoke(void) +{ + static const struct timespec timeout_immediately = { 0, 0 }; + struct kevent64_s kev; + bool poll; + int r; + + for (;;) { + _dispatch_mgr_queue_drain(); + poll = _dispatch_mgr_timers(); + if (slowpath(_dispatch_select_workaround)) { + poll = _dispatch_mgr_select(poll); + if (!poll) continue; + } + r = kevent64(_dispatch_kq, _dispatch_kevent_enable, + _dispatch_kevent_enable ? 1 : 0, &kev, 1, 0, + poll ? &timeout_immediately : NULL); + _dispatch_kevent_enable = NULL; + if (slowpath(r == -1)) { + int err = errno; + switch (err) { + case EINTR: + break; + case EBADF: + DISPATCH_CLIENT_CRASH("Do not close random Unix descriptors"); + break; + default: + (void)dispatch_assume_zero(err); + break; + } + } else if (r) { + _dispatch_kevent_drain(&kev); + } + } +} + +DISPATCH_NORETURN +void +_dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED) +{ + _dispatch_mgr_init(); + // never returns, so burn bridges behind us & clear stack 2k ahead + _dispatch_clear_stack(2048); + _dispatch_mgr_invoke(); +} + +#pragma mark - +#pragma mark dispatch_memorystatus + +#if DISPATCH_USE_MEMORYSTATUS_SOURCE +#define DISPATCH_MEMORYSTATUS_SOURCE_TYPE DISPATCH_SOURCE_TYPE_MEMORYSTATUS +#define DISPATCH_MEMORYSTATUS_SOURCE_MASK ( \ + DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL | \ + DISPATCH_MEMORYSTATUS_PRESSURE_WARN) +#elif DISPATCH_USE_VM_PRESSURE_SOURCE +#define DISPATCH_MEMORYSTATUS_SOURCE_TYPE DISPATCH_SOURCE_TYPE_VM +#define DISPATCH_MEMORYSTATUS_SOURCE_MASK DISPATCH_VM_PRESSURE +#endif + +#if DISPATCH_USE_MEMORYSTATUS_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE +static dispatch_source_t _dispatch_memorystatus_source; + +static void +_dispatch_memorystatus_handler(void *context DISPATCH_UNUSED) +{ +#if DISPATCH_USE_MEMORYSTATUS_SOURCE + unsigned long memorystatus; + memorystatus = dispatch_source_get_data(_dispatch_memorystatus_source); + if (memorystatus & DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL) { + _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; + return; + } + _dispatch_continuation_cache_limit = + DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN; +#endif + malloc_zone_pressure_relief(0,0); +} + +static void +_dispatch_memorystatus_init(void) +{ + _dispatch_memorystatus_source = dispatch_source_create( + DISPATCH_MEMORYSTATUS_SOURCE_TYPE, 0, + DISPATCH_MEMORYSTATUS_SOURCE_MASK, + _dispatch_get_root_queue(0, true)); + dispatch_source_set_event_handler_f(_dispatch_memorystatus_source, + _dispatch_memorystatus_handler); + dispatch_resume(_dispatch_memorystatus_source); +} +#else +static inline void _dispatch_memorystatus_init(void) {} +#endif // DISPATCH_USE_MEMORYSTATUS_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE + +#pragma mark - +#pragma mark dispatch_mach + +#if HAVE_MACH + +#if DISPATCH_DEBUG && DISPATCH_MACHPORT_DEBUG +#define _dispatch_debug_machport(name) \ + dispatch_debug_machport((name), __func__) +#else +#define _dispatch_debug_machport(name) ((void)(name)) +#endif + +// Flags for all notifications that are registered/unregistered when a +// send-possible notification is requested/delivered +#define _DISPATCH_MACH_SP_FLAGS (DISPATCH_MACH_SEND_POSSIBLE| \ + DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_DELETED) +#define _DISPATCH_MACH_RECV_FLAGS (DISPATCH_MACH_RECV_MESSAGE| \ + DISPATCH_MACH_RECV_MESSAGE_DIRECT| \ + DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE) +#define _DISPATCH_MACH_RECV_DIRECT_FLAGS ( \ + DISPATCH_MACH_RECV_MESSAGE_DIRECT| \ + DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE) + +#define _DISPATCH_IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v) +#define _DISPATCH_HASH(x, y) (_DISPATCH_IS_POWER_OF_TWO(y) ? \ + (MACH_PORT_INDEX(x) & ((y) - 1)) : (MACH_PORT_INDEX(x) % (y))) + +#define _DISPATCH_MACHPORT_HASH_SIZE 32 +#define _DISPATCH_MACHPORT_HASH(x) \ + _DISPATCH_HASH((x), _DISPATCH_MACHPORT_HASH_SIZE) + +#ifndef MACH_RCV_LARGE_IDENTITY +#define MACH_RCV_LARGE_IDENTITY 0x00000008 +#endif +#define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_CTX +#define DISPATCH_MACH_RCV_OPTIONS ( \ + MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \ + MACH_RCV_TRAILER_ELEMENTS(DISPATCH_MACH_RCV_TRAILER) | \ + MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)) + +#define DISPATCH_MACH_KEVENT_ARMED(dk) ((dk)->dk_kevent.ext[0]) + +static void _dispatch_kevent_machport_drain(struct kevent64_s *ke); +static void _dispatch_kevent_mach_msg_drain(struct kevent64_s *ke); +static void _dispatch_kevent_mach_msg_recv(mach_msg_header_t *hdr); +static void _dispatch_kevent_mach_msg_destroy(mach_msg_header_t *hdr); +static void _dispatch_source_merge_mach_msg(dispatch_source_t ds, + dispatch_source_refs_t dr, dispatch_kevent_t dk, + mach_msg_header_t *hdr, mach_msg_size_t siz); +static kern_return_t _dispatch_mach_notify_update(dispatch_kevent_t dk, + uint32_t new_flags, uint32_t del_flags, uint32_t mask, + mach_msg_id_t notify_msgid, mach_port_mscount_t notify_sync); +static void _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr); +static void _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, bool disconnected); +static void _dispatch_mach_msg_recv(dispatch_mach_t dm, mach_msg_header_t *hdr, + mach_msg_size_t siz); +static void _dispatch_mach_merge_kevent(dispatch_mach_t dm, + const struct kevent64_s *ke); +static void _dispatch_mach_kevent_unregister(dispatch_mach_t dm); + +static const size_t _dispatch_mach_recv_msg_size = + DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE; +static const size_t dispatch_mach_trailer_size = + sizeof(dispatch_mach_trailer_t); +static const size_t _dispatch_mach_recv_msg_buf_size = mach_vm_round_page( + _dispatch_mach_recv_msg_size + dispatch_mach_trailer_size); +static mach_port_t _dispatch_mach_portset, _dispatch_mach_recv_portset; +static mach_port_t _dispatch_mach_notify_port; +static struct kevent64_s _dispatch_mach_recv_kevent = { + .filter = EVFILT_MACHPORT, + .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, + .fflags = DISPATCH_MACH_RCV_OPTIONS, +}; +static dispatch_source_t _dispatch_mach_notify_source; +static const +struct dispatch_source_type_s _dispatch_source_type_mach_recv_direct = { + .ke = { + .filter = EVFILT_MACHPORT, + .flags = EV_CLEAR, + .fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT, + }, +}; + +static void +_dispatch_mach_recv_msg_buf_init(void) +{ + mach_vm_size_t vm_size = _dispatch_mach_recv_msg_buf_size; + mach_vm_address_t vm_addr = vm_page_size; + kern_return_t kr; + + while (slowpath(kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size, + VM_FLAGS_ANYWHERE))) { + if (kr != KERN_NO_SPACE) { + (void)dispatch_assume_zero(kr); + DISPATCH_CLIENT_CRASH("Could not allocate mach msg receive buffer"); + } + _dispatch_temporary_resource_shortage(); + vm_addr = vm_page_size; + } + _dispatch_mach_recv_kevent.ext[0] = (uintptr_t)vm_addr; + _dispatch_mach_recv_kevent.ext[1] = _dispatch_mach_recv_msg_buf_size; +} + +static inline void* +_dispatch_get_mach_recv_msg_buf(void) +{ + return (void*)_dispatch_mach_recv_kevent.ext[0]; +} + +static void +_dispatch_mach_recv_portset_init(void *context DISPATCH_UNUSED) +{ + kern_return_t kr; + + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, + &_dispatch_mach_recv_portset); + DISPATCH_VERIFY_MIG(kr); + if (dispatch_assume_zero(kr)) { + DISPATCH_CLIENT_CRASH( + "mach_port_allocate() failed: cannot create port set"); + } + dispatch_assert(_dispatch_get_mach_recv_msg_buf()); + dispatch_assert(dispatch_mach_trailer_size == + REQUESTED_TRAILER_SIZE_NATIVE(MACH_RCV_TRAILER_ELEMENTS( + DISPATCH_MACH_RCV_TRAILER))); + _dispatch_mach_recv_kevent.ident = _dispatch_mach_recv_portset; + _dispatch_kq_update(&_dispatch_mach_recv_kevent); + + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, + &_dispatch_mach_notify_port); + DISPATCH_VERIFY_MIG(kr); + if (dispatch_assume_zero(kr)) { + DISPATCH_CLIENT_CRASH( + "mach_port_allocate() failed: cannot create receive right"); + } + _dispatch_mach_notify_source = dispatch_source_create( + &_dispatch_source_type_mach_recv_direct, + _dispatch_mach_notify_port, 0, &_dispatch_mgr_q); + _dispatch_mach_notify_source->ds_refs->ds_handler_func = + (void*)_dispatch_mach_notify_source_invoke; + dispatch_assert(_dispatch_mach_notify_source); + dispatch_resume(_dispatch_mach_notify_source); +} + +static mach_port_t +_dispatch_get_mach_recv_portset(void) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_recv_portset_init); + return _dispatch_mach_recv_portset; +} + +static void +_dispatch_mach_portset_init(void *context DISPATCH_UNUSED) +{ + struct kevent64_s kev = { + .filter = EVFILT_MACHPORT, + .flags = EV_ADD, + }; + kern_return_t kr; + + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, + &_dispatch_mach_portset); + DISPATCH_VERIFY_MIG(kr); + if (dispatch_assume_zero(kr)) { + DISPATCH_CLIENT_CRASH( + "mach_port_allocate() failed: cannot create port set"); + } + kev.ident = _dispatch_mach_portset; + _dispatch_kq_update(&kev); +} + +static mach_port_t +_dispatch_get_mach_portset(void) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_portset_init); + return _dispatch_mach_portset; +} + +static kern_return_t +_dispatch_mach_portset_update(dispatch_kevent_t dk, mach_port_t mps) +{ + mach_port_t mp = (mach_port_t)dk->dk_kevent.ident; + kern_return_t kr; + + _dispatch_debug_machport(mp); + kr = mach_port_move_member(mach_task_self(), mp, mps); + if (slowpath(kr)) { + DISPATCH_VERIFY_MIG(kr); + switch (kr) { + case KERN_INVALID_RIGHT: + if (mps) { + _dispatch_bug_mach_client("_dispatch_kevent_machport_enable: " + "mach_port_move_member() failed ", kr); + break; + } + //fall through + case KERN_INVALID_NAME: +#if DISPATCH_DEBUG + _dispatch_log("Corruption: Mach receive right 0x%x destroyed " + "prematurely", mp); +#endif + break; + default: + (void)dispatch_assume_zero(kr); + break; + } + } + return mps ? kr : 0; +} + +static void +_dispatch_kevent_mach_recv_reenable(struct kevent64_s *ke DISPATCH_UNUSED) +{ +#if (TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) || \ + (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090) + // delete and re-add kevent to workaround + if (ke->ext[1] != _dispatch_mach_recv_kevent.ext[1]) { + struct kevent64_s kev = _dispatch_mach_recv_kevent; + kev.flags = EV_DELETE; + _dispatch_kq_update(&kev); + } +#endif + _dispatch_mgr_kevent_reenable(&_dispatch_mach_recv_kevent); +} + +static kern_return_t +_dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, + uint32_t del_flags) +{ + kern_return_t kr = 0; + dispatch_assert_zero(new_flags & del_flags); + if ((new_flags & _DISPATCH_MACH_RECV_FLAGS) || + (del_flags & _DISPATCH_MACH_RECV_FLAGS)) { + mach_port_t mps; + if (new_flags & _DISPATCH_MACH_RECV_DIRECT_FLAGS) { + mps = _dispatch_get_mach_recv_portset(); + } else if ((new_flags & DISPATCH_MACH_RECV_MESSAGE) || + ((del_flags & _DISPATCH_MACH_RECV_DIRECT_FLAGS) && + (dk->dk_kevent.fflags & DISPATCH_MACH_RECV_MESSAGE))) { + mps = _dispatch_get_mach_portset(); + } else { + mps = MACH_PORT_NULL; + } + kr = _dispatch_mach_portset_update(dk, mps); + } + return kr; +} + +static kern_return_t +_dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, uint32_t new_flags, + uint32_t del_flags) +{ + kern_return_t kr = 0; + dispatch_assert_zero(new_flags & del_flags); + if ((new_flags & _DISPATCH_MACH_SP_FLAGS) || (del_flags & _DISPATCH_MACH_SP_FLAGS)) { // Requesting a (delayed) non-sync send-possible notification // registers for both immediate dead-name notification and delayed-arm @@ -1405,234 +2585,1430 @@ _dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, // the MACH_SEND_NOTIFY to the port times out. // If send-possible is unavailable, fall back to immediate dead-name // registration rdar://problem/2527840&9008724 - kr_sp = _dispatch_mach_notify_update(dk, new_flags, del_flags, + kr = _dispatch_mach_notify_update(dk, new_flags, del_flags, _DISPATCH_MACH_SP_FLAGS, MACH_NOTIFY_SEND_POSSIBLE, MACH_NOTIFY_SEND_POSSIBLE == MACH_NOTIFY_DEAD_NAME ? 1 : 0); } + return kr; +} + +static inline void +_dispatch_kevent_mach_portset(struct kevent64_s *ke) +{ + if (ke->ident == _dispatch_mach_recv_portset) { + return _dispatch_kevent_mach_msg_drain(ke); + } else if (ke->ident == _dispatch_mach_portset) { + return _dispatch_kevent_machport_drain(ke); + } else { + return _dispatch_kevent_error(ke); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_machport_drain(struct kevent64_s *ke) +{ + mach_port_t name = (mach_port_name_t)ke->data; + dispatch_kevent_t dk; + struct kevent64_s kev; + + _dispatch_debug_machport(name); + dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); + if (!dispatch_assume(dk)) { + return; + } + _dispatch_mach_portset_update(dk, MACH_PORT_NULL); // emulate EV_DISPATCH + + EV_SET64(&kev, name, EVFILT_MACHPORT, EV_ADD|EV_ENABLE|EV_DISPATCH, + DISPATCH_MACH_RECV_MESSAGE, 0, (uintptr_t)dk, 0, 0); + _dispatch_kevent_debug(&kev, __func__); + _dispatch_kevent_merge(&kev); +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_mach_msg_drain(struct kevent64_s *ke) +{ + mach_msg_header_t *hdr = (mach_msg_header_t*)ke->ext[0]; + mach_msg_size_t siz, msgsiz; + mach_msg_return_t kr = (mach_msg_return_t)ke->fflags; + + _dispatch_kevent_mach_recv_reenable(ke); + if (!dispatch_assume(hdr)) { + DISPATCH_CRASH("EVFILT_MACHPORT with no message"); + } + if (fastpath(!kr)) { + return _dispatch_kevent_mach_msg_recv(hdr); + } else if (kr != MACH_RCV_TOO_LARGE) { + goto out; + } + if (!dispatch_assume(ke->ext[1] <= UINT_MAX - + dispatch_mach_trailer_size)) { + DISPATCH_CRASH("EVFILT_MACHPORT with overlarge message"); + } + siz = (mach_msg_size_t)ke->ext[1] + dispatch_mach_trailer_size; + hdr = malloc(siz); + if (ke->data) { + if (!dispatch_assume(hdr)) { + // Kernel will discard message too large to fit + hdr = _dispatch_get_mach_recv_msg_buf(); + siz = _dispatch_mach_recv_msg_buf_size; + } + mach_port_t name = (mach_port_name_t)ke->data; + const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS | + MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE); + kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); + if (fastpath(!kr)) { + return _dispatch_kevent_mach_msg_recv(hdr); + } else if (kr == MACH_RCV_TOO_LARGE) { + _dispatch_log("BUG in libdispatch client: " + "_dispatch_kevent_mach_msg_drain: dropped message too " + "large to fit in memory: id = 0x%x, size = %lld", + hdr->msgh_id, ke->ext[1]); + kr = MACH_MSG_SUCCESS; + } + } else { + // We don't know which port in the portset contains the large message, + // so need to receive all messages pending on the portset to ensure the + // large message is drained. + bool received = false; + for (;;) { + if (!dispatch_assume(hdr)) { + DISPATCH_CLIENT_CRASH("Message too large to fit in memory"); + } + const mach_msg_option_t options = (DISPATCH_MACH_RCV_OPTIONS | + MACH_RCV_TIMEOUT); + kr = mach_msg(hdr, options, 0, siz, _dispatch_mach_recv_portset, + MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if ((!kr || kr == MACH_RCV_TOO_LARGE) && !dispatch_assume( + hdr->msgh_size <= UINT_MAX - dispatch_mach_trailer_size)) { + DISPATCH_CRASH("Overlarge message"); + } + if (fastpath(!kr)) { + msgsiz = hdr->msgh_size + dispatch_mach_trailer_size; + if (msgsiz < siz) { + void *shrink = realloc(hdr, msgsiz); + if (shrink) hdr = shrink; + } + _dispatch_kevent_mach_msg_recv(hdr); + hdr = NULL; + received = true; + } else if (kr == MACH_RCV_TOO_LARGE) { + siz = hdr->msgh_size + dispatch_mach_trailer_size; + } else { + if (kr == MACH_RCV_TIMED_OUT && received) { + kr = MACH_MSG_SUCCESS; + } + break; + } + hdr = reallocf(hdr, siz); + } + } + if (hdr != _dispatch_get_mach_recv_msg_buf()) { + free(hdr); + } +out: + if (slowpath(kr)) { + _dispatch_bug_mach_client("_dispatch_kevent_mach_msg_drain: " + "message reception failed", kr); + } +} + +static void +_dispatch_kevent_mach_msg_recv(mach_msg_header_t *hdr) +{ + dispatch_source_refs_t dri; + dispatch_kevent_t dk; + mach_port_t name = hdr->msgh_local_port; + mach_msg_size_t siz = hdr->msgh_size + dispatch_mach_trailer_size; + + if (!dispatch_assume(hdr->msgh_size <= UINT_MAX - + dispatch_mach_trailer_size)) { + _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " + "received overlarge message"); + return _dispatch_kevent_mach_msg_destroy(hdr); + } + if (!dispatch_assume(name)) { + _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " + "received message with MACH_PORT_NULL port"); + return _dispatch_kevent_mach_msg_destroy(hdr); + } + _dispatch_debug_machport(name); + dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); + if (!dispatch_assume(dk)) { + _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " + "received message with unknown kevent"); + return _dispatch_kevent_mach_msg_destroy(hdr); + } + _dispatch_kevent_debug(&dk->dk_kevent, __func__); + TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { + dispatch_source_t dsi = _dispatch_source_from_refs(dri); + if (dsi->ds_pending_data_mask & _DISPATCH_MACH_RECV_DIRECT_FLAGS) { + return _dispatch_source_merge_mach_msg(dsi, dri, dk, hdr, siz); + } + } + _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " + "received message with no listeners"); + return _dispatch_kevent_mach_msg_destroy(hdr); +} + +static void +_dispatch_kevent_mach_msg_destroy(mach_msg_header_t *hdr) +{ + if (hdr) { + mach_msg_destroy(hdr); + if (hdr != _dispatch_get_mach_recv_msg_buf()) { + free(hdr); + } + } +} + +static void +_dispatch_source_merge_mach_msg(dispatch_source_t ds, dispatch_source_refs_t dr, + dispatch_kevent_t dk, mach_msg_header_t *hdr, mach_msg_size_t siz) +{ + if (ds == _dispatch_mach_notify_source) { + _dispatch_mach_notify_source_invoke(hdr); + return _dispatch_kevent_mach_msg_destroy(hdr); + } + if (dk->dk_kevent.fflags & DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE) { + _dispatch_mach_reply_kevent_unregister((dispatch_mach_t)ds, + (dispatch_mach_reply_refs_t)dr, false); + } + return _dispatch_mach_msg_recv((dispatch_mach_t)ds, hdr, siz); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, bool final) +{ + dispatch_source_refs_t dri, dr_next; + dispatch_kevent_t dk; + struct kevent64_s kev; + bool unreg; + + dk = _dispatch_kevent_find(name, DISPATCH_EVFILT_MACH_NOTIFICATION); + if (!dk) { + return; + } + + // Update notification registration state. + dk->dk_kevent.data &= ~_DISPATCH_MACH_SP_FLAGS; + EV_SET64(&kev, name, DISPATCH_EVFILT_MACH_NOTIFICATION, EV_ADD|EV_ENABLE, + flag, 0, (uintptr_t)dk, 0, 0); + if (final) { + // This can never happen again + unreg = true; + } else { + // Re-register for notification before delivery + unreg = _dispatch_kevent_resume(dk, flag, 0); + } + DISPATCH_MACH_KEVENT_ARMED(dk) = 0; + TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) { + dispatch_source_t dsi = _dispatch_source_from_refs(dri); + if (dx_type(dsi) == DISPATCH_MACH_CHANNEL_TYPE) { + dispatch_mach_t dm = (dispatch_mach_t)dsi; + _dispatch_mach_merge_kevent(dm, &kev); + if (unreg && dm->dm_dkev) { + _dispatch_mach_kevent_unregister(dm); + } + } else { + _dispatch_source_merge_kevent(dsi, &kev); + if (unreg) { + _dispatch_source_kevent_unregister(dsi); + } + } + if (!dr_next || DISPATCH_MACH_KEVENT_ARMED(dk)) { + // current merge is last in list (dk might have been freed) + // or it re-armed the notification + return; + } + } +} + +static kern_return_t +_dispatch_mach_notify_update(dispatch_kevent_t dk, uint32_t new_flags, + uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid, + mach_port_mscount_t notify_sync) +{ + mach_port_t previous, port = (mach_port_t)dk->dk_kevent.ident; + typeof(dk->dk_kevent.data) prev = dk->dk_kevent.data; + kern_return_t kr, krr = 0; + + // Update notification registration state. + dk->dk_kevent.data |= (new_flags | dk->dk_kevent.fflags) & mask; + dk->dk_kevent.data &= ~(del_flags & mask); + + _dispatch_debug_machport(port); + if ((dk->dk_kevent.data & mask) && !(prev & mask)) { + // initialize _dispatch_mach_notify_port: + (void)_dispatch_get_mach_recv_portset(); + _dispatch_debug("machport[0x%08x]: registering for send-possible " + "notification", port); + previous = MACH_PORT_NULL; + krr = mach_port_request_notification(mach_task_self(), port, + notify_msgid, notify_sync, _dispatch_mach_notify_port, + MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(krr); + + switch(krr) { + case KERN_INVALID_NAME: + case KERN_INVALID_RIGHT: + // Supress errors & clear registration state + dk->dk_kevent.data &= ~mask; + break; + default: + // Else, we dont expect any errors from mach. Log any errors + if (dispatch_assume_zero(krr)) { + // log the error & clear registration state + dk->dk_kevent.data &= ~mask; + } else if (dispatch_assume_zero(previous)) { + // Another subsystem has beat libdispatch to requesting the + // specified Mach notification on this port. We should + // technically cache the previous port and message it when the + // kernel messages our port. Or we can just say screw those + // subsystems and deallocate the previous port. + // They should adopt libdispatch :-P + kr = mach_port_deallocate(mach_task_self(), previous); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + previous = MACH_PORT_NULL; + } + } + } else if (!(dk->dk_kevent.data & mask) && (prev & mask)) { + _dispatch_debug("machport[0x%08x]: unregistering for send-possible " + "notification", port); + previous = MACH_PORT_NULL; + kr = mach_port_request_notification(mach_task_self(), port, + notify_msgid, notify_sync, MACH_PORT_NULL, + MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(kr); + + switch (kr) { + case KERN_INVALID_NAME: + case KERN_INVALID_RIGHT: + case KERN_INVALID_ARGUMENT: + break; + default: + if (dispatch_assume_zero(kr)) { + // log the error + } + } + } else { + return 0; + } + if (slowpath(previous)) { + // the kernel has not consumed the send-once right yet + (void)dispatch_assume_zero( + _dispatch_send_consume_send_once_right(previous)); + } + return krr; +} + +static void +_dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED) +{ + (void)_dispatch_get_mach_recv_portset(); + _dispatch_debug("registering for calendar-change notification"); + kern_return_t kr = host_request_notification(mach_host_self(), + HOST_NOTIFY_CALENDAR_CHANGE, _dispatch_mach_notify_port); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); +} + +static void +_dispatch_mach_host_calendar_change_register(void) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_host_notify_update); +} + +static void +_dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr) +{ + mig_reply_error_t reply; + dispatch_assert(sizeof(mig_reply_error_t) == sizeof(union + __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem)); + dispatch_assert(sizeof(mig_reply_error_t) < _dispatch_mach_recv_msg_size); + boolean_t success = libdispatch_internal_protocol_server(hdr, &reply.Head); + if (!success && reply.RetCode == MIG_BAD_ID && hdr->msgh_id == 950) { + // host_notify_reply.defs: host_calendar_changed + _dispatch_debug("calendar-change notification"); + _dispatch_timers_calendar_change(); + _dispatch_mach_host_notify_update(NULL); + success = TRUE; + reply.RetCode = KERN_SUCCESS; + } + if (dispatch_assume(success) && reply.RetCode != MIG_NO_REPLY) { + (void)dispatch_assume_zero(reply.RetCode); + } +} + +kern_return_t +_dispatch_mach_notify_port_deleted(mach_port_t notify DISPATCH_UNUSED, + mach_port_name_t name) +{ +#if DISPATCH_DEBUG + _dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x " + "deleted prematurely", name); +#endif + + _dispatch_debug_machport(name); + _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DELETED, true); + + return KERN_SUCCESS; +} + +kern_return_t +_dispatch_mach_notify_dead_name(mach_port_t notify DISPATCH_UNUSED, + mach_port_name_t name) +{ + kern_return_t kr; + + _dispatch_debug("machport[0x%08x]: dead-name notification", name); + _dispatch_debug_machport(name); + _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DEAD, true); + + // the act of receiving a dead name notification allocates a dead-name + // right that must be deallocated + kr = mach_port_deallocate(mach_task_self(), name); + DISPATCH_VERIFY_MIG(kr); + //(void)dispatch_assume_zero(kr); + + return KERN_SUCCESS; +} + +kern_return_t +_dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED, + mach_port_name_t name) +{ + _dispatch_debug("machport[0x%08x]: send-possible notification", name); + _dispatch_debug_machport(name); + _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_POSSIBLE, false); + + return KERN_SUCCESS; +} + +#pragma mark - +#pragma mark dispatch_mach_t + +#define DISPATCH_MACH_NEVER_CONNECTED (UINT32_MAX/2) +#define DISPATCH_MACH_PSEUDO_RECEIVED 0x1 +#define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2 +#define DISPATCH_MACH_OPTIONS_MASK 0xffff + +static mach_port_t _dispatch_mach_msg_get_remote_port(dispatch_object_t dou); +static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm, + mach_port_t local_port, mach_port_t remote_port); +static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm, + dispatch_object_t dou); +static inline mach_msg_header_t* _dispatch_mach_msg_get_msg( + dispatch_mach_msg_t dmsg); + +static dispatch_mach_t +_dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, + dispatch_mach_handler_function_t handler, bool handler_is_block) +{ + dispatch_mach_t dm; + dispatch_mach_refs_t dr; + + dm = _dispatch_alloc(DISPATCH_VTABLE(mach), + sizeof(struct dispatch_mach_s)); + _dispatch_queue_init((dispatch_queue_t)dm); + dm->dq_label = label; + + dm->do_ref_cnt++; // the reference _dispatch_mach_cancel_invoke holds + dm->do_ref_cnt++; // since channel is created suspended + dm->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL; + dm->do_targetq = &_dispatch_mgr_q; + + dr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_refs_s)); + dr->dr_source_wref = _dispatch_ptr2wref(dm); + dr->dm_handler_func = handler; + dr->dm_handler_ctxt = context; + dm->ds_refs = dr; + dm->ds_handler_is_block = handler_is_block; + + dm->dm_refs = _dispatch_calloc(1ul, + sizeof(struct dispatch_mach_send_refs_s)); + dm->dm_refs->dr_source_wref = _dispatch_ptr2wref(dm); + dm->dm_refs->dm_disconnect_cnt = DISPATCH_MACH_NEVER_CONNECTED; + TAILQ_INIT(&dm->dm_refs->dm_replies); + + // First item on the channel sets the user-specified target queue + dispatch_set_target_queue(dm, q); + _dispatch_object_debug(dm, "%s", __func__); + return dm; +} + +dispatch_mach_t +dispatch_mach_create(const char *label, dispatch_queue_t q, + dispatch_mach_handler_t handler) +{ + dispatch_block_t bb = _dispatch_Block_copy((void*)handler); + return _dispatch_mach_create(label, q, bb, + (dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), true); +} + +dispatch_mach_t +dispatch_mach_create_f(const char *label, dispatch_queue_t q, void *context, + dispatch_mach_handler_function_t handler) +{ + return _dispatch_mach_create(label, q, context, handler, false); +} + +void +_dispatch_mach_dispose(dispatch_mach_t dm) +{ + _dispatch_object_debug(dm, "%s", __func__); + dispatch_mach_refs_t dr = dm->ds_refs; + if (dm->ds_handler_is_block && dr->dm_handler_ctxt) { + Block_release(dr->dm_handler_ctxt); + } + free(dr); + free(dm->dm_refs); + _dispatch_queue_destroy(dm); +} + +void +dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, + mach_port_t send, dispatch_mach_msg_t checkin) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + dispatch_kevent_t dk; + + if (MACH_PORT_VALID(receive)) { + dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); + dk->dk_kevent = _dispatch_source_type_mach_recv_direct.ke; + dk->dk_kevent.ident = receive; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; + dk->dk_kevent.udata = (uintptr_t)dk; + TAILQ_INIT(&dk->dk_sources); + dm->ds_dkev = dk; + dm->ds_pending_data_mask = dk->dk_kevent.fflags; + _dispatch_retain(dm); // the reference the manager queue holds + } + dr->dm_send = send; + if (MACH_PORT_VALID(send)) { + if (checkin) { + dispatch_retain(checkin); + dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin); + } + dr->dm_checkin = checkin; + } + // monitor message reply ports + dm->ds_pending_data_mask |= DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; + if (slowpath(!dispatch_atomic_cmpxchg2o(dr, dm_disconnect_cnt, + DISPATCH_MACH_NEVER_CONNECTED, 0, release))) { + DISPATCH_CLIENT_CRASH("Channel already connected"); + } + _dispatch_object_debug(dm, "%s", __func__); + return dispatch_resume(dm); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, bool disconnected) +{ + dispatch_kevent_t dk = dmr->dm_dkev; + mach_port_t local_port = (mach_port_t)dk->dk_kevent.ident; + TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dmr, dr_list); + _dispatch_kevent_unregister(dk, DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE); + TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dm_list); + free(dmr); + if (disconnected) { + _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply, + void *ctxt) +{ + dispatch_kevent_t dk; + dispatch_mach_reply_refs_t dmr; + + dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); + dk->dk_kevent = _dispatch_source_type_mach_recv_direct.ke; + dk->dk_kevent.ident = reply; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; + dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; + dk->dk_kevent.udata = (uintptr_t)dk; + // make reply context visible to leaks rdar://11777199 + dk->dk_kevent.ext[1] = (uintptr_t)ctxt; + TAILQ_INIT(&dk->dk_sources); + + dmr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_reply_refs_s)); + dmr->dr_source_wref = _dispatch_ptr2wref(dm); + dmr->dm_dkev = dk; + + _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p", reply, + ctxt); + uint32_t flags; + bool do_resume = _dispatch_kevent_register(&dmr->dm_dkev, &flags); + TAILQ_INSERT_TAIL(&dmr->dm_dkev->dk_sources, (dispatch_source_refs_t)dmr, + dr_list); + TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dm_list); + if (do_resume && _dispatch_kevent_resume(dmr->dm_dkev, flags, 0)) { + _dispatch_mach_reply_kevent_unregister(dm, dmr, true); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_kevent_unregister(dispatch_mach_t dm) +{ + dispatch_kevent_t dk = dm->dm_dkev; + dm->dm_dkev = NULL; + TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dm->dm_refs, + dr_list); + dm->ds_pending_data_mask &= ~(unsigned long) + (DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD); + _dispatch_kevent_unregister(dk, + DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_kevent_register(dispatch_mach_t dm, mach_port_t send) +{ + dispatch_kevent_t dk; + + dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); + dk->dk_kevent = _dispatch_source_type_mach_send.ke; + dk->dk_kevent.ident = send; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; + dk->dk_kevent.fflags = DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD; + dk->dk_kevent.udata = (uintptr_t)dk; + TAILQ_INIT(&dk->dk_sources); + + dm->ds_pending_data_mask |= dk->dk_kevent.fflags; + + uint32_t flags; + bool do_resume = _dispatch_kevent_register(&dk, &flags); + TAILQ_INSERT_TAIL(&dk->dk_sources, + (dispatch_source_refs_t)dm->dm_refs, dr_list); + dm->dm_dkev = dk; + if (do_resume && _dispatch_kevent_resume(dm->dm_dkev, flags, 0)) { + _dispatch_mach_kevent_unregister(dm); + } +} + +static inline void +_dispatch_mach_push(dispatch_object_t dm, dispatch_object_t dou) +{ + return _dispatch_queue_push(dm._dq, dou); +} + +static inline void +_dispatch_mach_msg_set_options(dispatch_object_t dou, mach_msg_option_t options) +{ + dou._do->do_suspend_cnt = (unsigned int)options; +} + +static inline mach_msg_option_t +_dispatch_mach_msg_get_options(dispatch_object_t dou) +{ + mach_msg_option_t options = (mach_msg_option_t)dou._do->do_suspend_cnt; + return options; +} + +static inline void +_dispatch_mach_msg_set_reason(dispatch_object_t dou, mach_error_t err, + unsigned long reason) +{ + dispatch_assert_zero(reason & ~(unsigned long)code_emask); + dou._do->do_suspend_cnt = (unsigned int)((err || !reason) ? err : + err_local|err_sub(0x3e0)|(mach_error_t)reason); +} + +static inline unsigned long +_dispatch_mach_msg_get_reason(dispatch_object_t dou, mach_error_t *err_ptr) +{ + mach_error_t err = (mach_error_t)dou._do->do_suspend_cnt; + dou._do->do_suspend_cnt = 0; + if ((err & system_emask) == err_local && err_get_sub(err) == 0x3e0) { + *err_ptr = 0; + return err_get_code(err); + } + *err_ptr = err; + return err ? DISPATCH_MACH_MESSAGE_SEND_FAILED : DISPATCH_MACH_MESSAGE_SENT; +} + +static void +_dispatch_mach_msg_recv(dispatch_mach_t dm, mach_msg_header_t *hdr, + mach_msg_size_t siz) +{ + _dispatch_debug_machport(hdr->msgh_remote_port); + _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x", + hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); + if (slowpath(dm->ds_atomic_flags & DSF_CANCELED)) { + return _dispatch_kevent_mach_msg_destroy(hdr); + } + dispatch_mach_msg_t dmsg; + dispatch_mach_msg_destructor_t destructor; + destructor = (hdr == _dispatch_get_mach_recv_msg_buf()) ? + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT : + DISPATCH_MACH_MSG_DESTRUCTOR_FREE; + dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL); + _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_RECEIVED); + return _dispatch_mach_push(dm, dmsg); +} + +static inline mach_port_t +_dispatch_mach_msg_get_remote_port(dispatch_object_t dou) +{ + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); + mach_port_t remote = hdr->msgh_remote_port; + return remote; +} + +static inline mach_port_t +_dispatch_mach_msg_get_reply_port(dispatch_mach_t dm, dispatch_object_t dou) +{ + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); + mach_port_t reply = MACH_PORT_NULL; + mach_msg_option_t msg_opts = _dispatch_mach_msg_get_options(dou); + if (msg_opts & DISPATCH_MACH_PSEUDO_RECEIVED) { + reply = hdr->msgh_reserved; + hdr->msgh_reserved = 0; + } else if (MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) == + MACH_MSG_TYPE_MAKE_SEND_ONCE && + MACH_PORT_VALID(hdr->msgh_local_port) && (!dm->ds_dkev || + dm->ds_dkev->dk_kevent.ident != hdr->msgh_local_port)) { + reply = hdr->msgh_local_port; + } + return reply; +} + +static inline void +_dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port, + mach_port_t remote_port) +{ + mach_msg_header_t *hdr; + dispatch_mach_msg_t dmsg; + dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); + if (local_port) hdr->msgh_local_port = local_port; + if (remote_port) hdr->msgh_remote_port = remote_port; + _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED); + return _dispatch_mach_push(dm, dmsg); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou) +{ + mach_port_t reply = _dispatch_mach_msg_get_reply_port(dm, dou); + _dispatch_mach_msg_set_reason(dou, 0, DISPATCH_MACH_MESSAGE_NOT_SENT); + _dispatch_mach_push(dm, dou); + if (reply) { + _dispatch_mach_msg_disconnected(dm, reply, MACH_PORT_NULL); + } +} + +DISPATCH_NOINLINE +static dispatch_object_t +_dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + dispatch_mach_msg_t dmsg = dou._dmsg; + dr->dm_needs_mgr = 0; + if (slowpath(dr->dm_checkin) && dmsg != dr->dm_checkin) { + // send initial checkin message + if (dm->dm_dkev && slowpath(_dispatch_queue_get_current() != + &_dispatch_mgr_q)) { + // send kevent must be uninstalled on the manager queue + dr->dm_needs_mgr = 1; + goto out; + } + dr->dm_checkin = _dispatch_mach_msg_send(dm, dr->dm_checkin)._dmsg; + if (slowpath(dr->dm_checkin)) { + goto out; + } + } + mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); + mach_msg_return_t kr = 0; + mach_port_t reply = _dispatch_mach_msg_get_reply_port(dm, dmsg); + mach_msg_option_t opts = 0, msg_opts = _dispatch_mach_msg_get_options(dmsg); + if (!slowpath(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) { + opts = MACH_SEND_MSG | (msg_opts & DISPATCH_MACH_OPTIONS_MASK); + if (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) != + MACH_MSG_TYPE_MOVE_SEND_ONCE) { + if (dmsg != dr->dm_checkin) { + msg->msgh_remote_port = dr->dm_send; + } + if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { + if (slowpath(!dm->dm_dkev)) { + _dispatch_mach_kevent_register(dm, msg->msgh_remote_port); + } + if (fastpath(dm->dm_dkev)) { + if (DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev)) { + goto out; + } + opts |= MACH_SEND_NOTIFY; + } + } + opts |= MACH_SEND_TIMEOUT; + } + _dispatch_debug_machport(msg->msgh_remote_port); + if (reply) _dispatch_debug_machport(reply); + kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0, + MACH_PORT_NULL); + } + _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, opts 0x%x, " + "msg_opts 0x%x, reply on 0x%08x: %s - 0x%x", msg->msgh_remote_port, + msg->msgh_id, dmsg->do_ctxt, opts, msg_opts, reply, + mach_error_string(kr), kr); + if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) { + if (opts & MACH_SEND_NOTIFY) { + _dispatch_debug("machport[0x%08x]: send-possible notification " + "armed", (mach_port_t)dm->dm_dkev->dk_kevent.ident); + DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev) = 1; + } else { + // send kevent must be installed on the manager queue + dr->dm_needs_mgr = 1; + } + if (reply) { + _dispatch_mach_msg_set_options(dmsg, msg_opts | + DISPATCH_MACH_PSEUDO_RECEIVED); + msg->msgh_reserved = reply; // Remember the original reply port + } + goto out; + } + if (fastpath(!kr) && reply) { + if (_dispatch_queue_get_current() != &_dispatch_mgr_q) { + // reply receive kevent must be installed on the manager queue + dr->dm_needs_mgr = 1; + _dispatch_mach_msg_set_options(dmsg, msg_opts | + DISPATCH_MACH_REGISTER_FOR_REPLY); + if (msg_opts & DISPATCH_MACH_PSEUDO_RECEIVED) { + msg->msgh_reserved = reply; // Remember the original reply port + } + goto out; + } + _dispatch_mach_reply_kevent_register(dm, reply, dmsg->do_ctxt); + } + if (slowpath(dmsg == dr->dm_checkin) && dm->dm_dkev) { + _dispatch_mach_kevent_unregister(dm); + } + _dispatch_mach_msg_set_reason(dmsg, kr, 0); + _dispatch_mach_push(dm, dmsg); + dmsg = NULL; + if (slowpath(kr) && reply) { + // Send failed, so reply was never connected + _dispatch_mach_msg_disconnected(dm, reply, MACH_PORT_NULL); + } +out: + return (dispatch_object_t)dmsg; +} + +static void +_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + struct dispatch_object_s *prev, *dc = dou._do; + dc->do_next = NULL; + + prev = dispatch_atomic_xchg2o(dr, dm_tail, dc, release); + if (fastpath(prev)) { + prev->do_next = dc; + return; + } + dr->dm_head = dc; + _dispatch_wakeup(dm); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_send_drain(dispatch_mach_t dm) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + struct dispatch_object_s *dc = NULL, *next_dc = NULL; + while (dr->dm_tail) { + while (!(dc = fastpath(dr->dm_head))) { + dispatch_hardware_pause(); + } + do { + next_dc = fastpath(dc->do_next); + dr->dm_head = next_dc; + if (!next_dc && !dispatch_atomic_cmpxchg2o(dr, dm_tail, dc, NULL, + relaxed)) { + // Enqueue is TIGHTLY controlled, we won't wait long. + while (!(next_dc = fastpath(dc->do_next))) { + dispatch_hardware_pause(); + } + dr->dm_head = next_dc; + } + if (!DISPATCH_OBJ_IS_VTABLE(dc)) { + if ((long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) { + // send barrier + // leave send queue locked until barrier has completed + return _dispatch_mach_push(dm, dc); + } +#if DISPATCH_MACH_SEND_SYNC + if (slowpath((long)dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT)){ + _dispatch_thread_semaphore_signal( + (_dispatch_thread_semaphore_t)dc->do_ctxt); + continue; + } +#endif // DISPATCH_MACH_SEND_SYNC + if (slowpath(!_dispatch_mach_reconnect_invoke(dm, dc))) { + goto out; + } + continue; + } + if (slowpath(dr->dm_disconnect_cnt) || + slowpath(dm->ds_atomic_flags & DSF_CANCELED)) { + _dispatch_mach_msg_not_sent(dm, dc); + continue; + } + if (slowpath(dc = _dispatch_mach_msg_send(dm, dc)._do)) { + goto out; + } + } while ((dc = next_dc)); + } +out: + // if this is not a complete drain, we must undo some things + if (slowpath(dc)) { + if (!next_dc && + !dispatch_atomic_cmpxchg2o(dr, dm_tail, NULL, dc, relaxed)) { + // wait for enqueue slow path to finish + while (!(next_dc = fastpath(dr->dm_head))) { + dispatch_hardware_pause(); + } + dc->do_next = next_dc; + } + dr->dm_head = dc; + } + (void)dispatch_atomic_dec2o(dr, dm_sending, release); + _dispatch_wakeup(dm); +} + +static inline void +_dispatch_mach_send(dispatch_mach_t dm) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + if (!fastpath(dr->dm_tail) || !fastpath(dispatch_atomic_cmpxchg2o(dr, + dm_sending, 0, 1, acquire))) { + return; + } + _dispatch_object_debug(dm, "%s", __func__); + _dispatch_mach_send_drain(dm); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_merge_kevent(dispatch_mach_t dm, const struct kevent64_s *ke) +{ + if (!(ke->fflags & dm->ds_pending_data_mask)) { + return; + } + _dispatch_mach_send(dm); +} + +DISPATCH_NOINLINE +void +dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, + mach_msg_option_t options) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + if (slowpath(dmsg->do_next != DISPATCH_OBJECT_LISTLESS)) { + DISPATCH_CLIENT_CRASH("Message already enqueued"); + } + dispatch_retain(dmsg); + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + _dispatch_mach_msg_set_options(dmsg, options & ~DISPATCH_MACH_OPTIONS_MASK); + if (slowpath(dr->dm_tail) || slowpath(dr->dm_disconnect_cnt) || + slowpath(dm->ds_atomic_flags & DSF_CANCELED) || + slowpath(!dispatch_atomic_cmpxchg2o(dr, dm_sending, 0, 1, + acquire))) { + return _dispatch_mach_send_push(dm, dmsg); + } + if (slowpath(dmsg = _dispatch_mach_msg_send(dm, dmsg)._dmsg)) { + (void)dispatch_atomic_dec2o(dr, dm_sending, release); + return _dispatch_mach_send_push(dm, dmsg); + } + if (slowpath(dr->dm_tail)) { + return _dispatch_mach_send_drain(dm); + } + (void)dispatch_atomic_dec2o(dr, dm_sending, release); + _dispatch_wakeup(dm); +} + +static void +_dispatch_mach_disconnect(dispatch_mach_t dm) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + if (dm->dm_dkev) { + _dispatch_mach_kevent_unregister(dm); + } + if (MACH_PORT_VALID(dr->dm_send)) { + _dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dr->dm_send); + } + dr->dm_send = MACH_PORT_NULL; + if (dr->dm_checkin) { + _dispatch_mach_msg_not_sent(dm, dr->dm_checkin); + dr->dm_checkin = NULL; + } + if (!TAILQ_EMPTY(&dm->dm_refs->dm_replies)) { + dispatch_mach_reply_refs_t dmr, tmp; + TAILQ_FOREACH_SAFE(dmr, &dm->dm_refs->dm_replies, dm_list, tmp){ + _dispatch_mach_reply_kevent_unregister(dm, dmr, true); + } + } +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_cancel(dispatch_mach_t dm) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + if (!fastpath(dispatch_atomic_cmpxchg2o(dr, dm_sending, 0, 1, acquire))) { + return false; + } + _dispatch_object_debug(dm, "%s", __func__); + _dispatch_mach_disconnect(dm); + if (dm->ds_dkev) { + mach_port_t local_port = (mach_port_t)dm->ds_dkev->dk_kevent.ident; + _dispatch_source_kevent_unregister((dispatch_source_t)dm); + _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL); + } + (void)dispatch_atomic_dec2o(dr, dm_sending, release); + return true; +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou) +{ + if (dm->dm_dkev || !TAILQ_EMPTY(&dm->dm_refs->dm_replies)) { + if (slowpath(_dispatch_queue_get_current() != &_dispatch_mgr_q)) { + // send/reply kevents must be uninstalled on the manager queue + return false; + } + } + _dispatch_mach_disconnect(dm); + dispatch_mach_send_refs_t dr = dm->dm_refs; + dr->dm_checkin = dou._dc->dc_data; + dr->dm_send = (mach_port_t)dou._dc->dc_other; + _dispatch_continuation_free(dou._dc); + (void)dispatch_atomic_dec2o(dr, dm_disconnect_cnt, relaxed); + _dispatch_object_debug(dm, "%s", __func__); + return true; +} + +DISPATCH_NOINLINE +void +dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send, + dispatch_mach_msg_t checkin) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + (void)dispatch_atomic_inc2o(dr, dm_disconnect_cnt, relaxed); + if (MACH_PORT_VALID(send) && checkin) { + dispatch_retain(checkin); + dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin); + } else { + checkin = NULL; + dr->dm_checkin_port = MACH_PORT_NULL; + } + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT); + dc->dc_func = (void*)_dispatch_mach_reconnect_invoke; + dc->dc_ctxt = dc; + dc->dc_data = checkin; + dc->dc_other = (void*)(uintptr_t)send; + return _dispatch_mach_send_push(dm, dc); +} + +#if DISPATCH_MACH_SEND_SYNC +DISPATCH_NOINLINE +static void +_dispatch_mach_send_sync_slow(dispatch_mach_t dm) +{ + _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); + struct dispatch_object_s dc = { + .do_vtable = (void *)(DISPATCH_OBJ_SYNC_SLOW_BIT), + .do_ctxt = (void*)sema, + }; + _dispatch_mach_send_push(dm, &dc); + _dispatch_thread_semaphore_wait(sema); + _dispatch_put_thread_semaphore(sema); +} +#endif // DISPATCH_MACH_SEND_SYNC + +DISPATCH_NOINLINE +mach_port_t +dispatch_mach_get_checkin_port(dispatch_mach_t dm) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + if (slowpath(dm->ds_atomic_flags & DSF_CANCELED)) { + return MACH_PORT_DEAD; + } + return dr->dm_checkin_port; +} - return (kr_recv ? kr_recv : kr_sp); +DISPATCH_NOINLINE +static void +_dispatch_mach_connect_invoke(dispatch_mach_t dm) +{ + dispatch_mach_refs_t dr = dm->ds_refs; + _dispatch_client_callout4(dr->dm_handler_ctxt, + DISPATCH_MACH_CONNECTED, NULL, 0, dr->dm_handler_func); + dm->dm_connect_handler_called = 1; } +DISPATCH_NOINLINE void -_dispatch_drain_mach_messages(struct kevent *ke) +_dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg) { - mach_port_t name = (mach_port_name_t)ke->data; - dispatch_source_refs_t dri; - dispatch_kevent_t dk; - struct kevent kev; + dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); + dispatch_mach_refs_t dr = dm->ds_refs; + mach_error_t err; + unsigned long reason = _dispatch_mach_msg_get_reason(dmsg, &err); + + dmsg->do_next = DISPATCH_OBJECT_LISTLESS; + _dispatch_thread_setspecific(dispatch_queue_key, dm->do_targetq); + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); + } + _dispatch_client_callout4(dr->dm_handler_ctxt, reason, dmsg, err, + dr->dm_handler_func); + _dispatch_thread_setspecific(dispatch_queue_key, (dispatch_queue_t)dm); + dispatch_release(dmsg); +} - if (!dispatch_assume(name)) { - return; +DISPATCH_NOINLINE +void +_dispatch_mach_barrier_invoke(void *ctxt) +{ + dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); + dispatch_mach_refs_t dr = dm->ds_refs; + struct dispatch_continuation_s *dc = ctxt; + void *context = dc->dc_data; + dispatch_function_t barrier = dc->dc_other; + bool send_barrier = ((long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT); + + _dispatch_thread_setspecific(dispatch_queue_key, dm->do_targetq); + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); } - _dispatch_debug_machport(name); - dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); - if (!dispatch_assume(dk)) { - return; + _dispatch_client_callout(context, barrier); + _dispatch_client_callout4(dr->dm_handler_ctxt, + DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0, dr->dm_handler_func); + _dispatch_thread_setspecific(dispatch_queue_key, (dispatch_queue_t)dm); + if (send_barrier) { + (void)dispatch_atomic_dec2o(dm->dm_refs, dm_sending, release); } - _dispatch_kevent_machport_disable(dk); // emulate EV_DISPATCH - - EV_SET(&kev, name, EVFILT_MACHPORT, EV_ADD|EV_ENABLE|EV_DISPATCH, - DISPATCH_MACH_RECV_MESSAGE, 0, dk); +} - TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { - _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), &kev); +DISPATCH_NOINLINE +void +dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context, + dispatch_function_t barrier) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); + dc->dc_func = _dispatch_mach_barrier_invoke; + dc->dc_ctxt = dc; + dc->dc_data = context; + dc->dc_other = barrier; + + dispatch_mach_send_refs_t dr = dm->dm_refs; + if (slowpath(dr->dm_tail) || slowpath(!dispatch_atomic_cmpxchg2o(dr, + dm_sending, 0, 1, acquire))) { + return _dispatch_mach_send_push(dm, dc); } + // leave send queue locked until barrier has completed + return _dispatch_mach_push(dm, dc); } -static inline void -_dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, uint32_t unreg, - bool final) +DISPATCH_NOINLINE +void +dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context, + dispatch_function_t barrier) { - dispatch_source_refs_t dri; - dispatch_kevent_t dk; - struct kevent kev; - - dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); - if (!dk) { - return; - } + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT); + dc->dc_func = _dispatch_mach_barrier_invoke; + dc->dc_ctxt = dc; + dc->dc_data = context; + dc->dc_other = barrier; + return _dispatch_mach_push(dm, dc); +} - // Update notification registration state. - dk->dk_kevent.data &= ~unreg; - if (!final) { - // Re-register for notification before delivery - _dispatch_kevent_resume(dk, flag, 0); - } +DISPATCH_NOINLINE +void +dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier) +{ + dispatch_mach_send_barrier_f(dm, _dispatch_Block_copy(barrier), + _dispatch_call_block_and_release); +} - EV_SET(&kev, name, EVFILT_MACHPORT, EV_ADD|EV_ENABLE, flag, 0, dk); +DISPATCH_NOINLINE +void +dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier) +{ + dispatch_mach_receive_barrier_f(dm, _dispatch_Block_copy(barrier), + _dispatch_call_block_and_release); +} - TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { - _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), &kev); - if (final) { - // this can never happen again - // this must happen after the merge - // this may be racy in the future, but we don't provide a 'setter' - // API for the mask yet - _dispatch_source_from_refs(dri)->ds_pending_data_mask &= ~unreg; - } +DISPATCH_NOINLINE +static void +_dispatch_mach_cancel_invoke(dispatch_mach_t dm) +{ + dispatch_mach_refs_t dr = dm->ds_refs; + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); } + _dispatch_client_callout4(dr->dm_handler_ctxt, + DISPATCH_MACH_CANCELED, NULL, 0, dr->dm_handler_func); + dm->dm_cancel_handler_called = 1; + _dispatch_release(dm); // the retain is done at creation time +} - if (final) { - // no more sources have these flags - dk->dk_kevent.fflags &= ~unreg; - } +DISPATCH_NOINLINE +void +dispatch_mach_cancel(dispatch_mach_t dm) +{ + dispatch_source_cancel((dispatch_source_t)dm); } -static kern_return_t -_dispatch_mach_notify_update(dispatch_kevent_t dk, uint32_t new_flags, - uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid, - mach_port_mscount_t notify_sync) +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_mach_invoke2(dispatch_object_t dou, + _dispatch_thread_semaphore_t *sema_ptr DISPATCH_UNUSED) { - mach_port_t previous, port = (mach_port_t)dk->dk_kevent.ident; - typeof(dk->dk_kevent.data) prev = dk->dk_kevent.data; - kern_return_t kr, krr = 0; + dispatch_mach_t dm = dou._dm; - // Update notification registration state. - dk->dk_kevent.data |= (new_flags | dk->dk_kevent.fflags) & mask; - dk->dk_kevent.data &= ~(del_flags & mask); + // This function performs all mach channel actions. Each action is + // responsible for verifying that it takes place on the appropriate queue. + // If the current queue is not the correct queue for this action, the + // correct queue will be returned and the invoke will be re-driven on that + // queue. - _dispatch_debug_machport(port); - if ((dk->dk_kevent.data & mask) && !(prev & mask)) { - previous = MACH_PORT_NULL; - krr = mach_port_request_notification(mach_task_self(), port, - notify_msgid, notify_sync, _dispatch_event_port, - MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); - DISPATCH_VERIFY_MIG(krr); + // The order of tests here in invoke and in probe should be consistent. - switch(krr) { - case KERN_INVALID_NAME: - case KERN_INVALID_RIGHT: - // Supress errors & clear registration state - dk->dk_kevent.data &= ~mask; - break; - default: - // Else, we dont expect any errors from mach. Log any errors - if (dispatch_assume_zero(krr)) { - // log the error & clear registration state - dk->dk_kevent.data &= ~mask; - } else if (dispatch_assume_zero(previous)) { - // Another subsystem has beat libdispatch to requesting the - // specified Mach notification on this port. We should - // technically cache the previous port and message it when the - // kernel messages our port. Or we can just say screw those - // subsystems and deallocate the previous port. - // They should adopt libdispatch :-P - kr = mach_port_deallocate(mach_task_self(), previous); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - previous = MACH_PORT_NULL; + dispatch_queue_t dq = _dispatch_queue_get_current(); + dispatch_mach_send_refs_t dr = dm->dm_refs; + + if (slowpath(!dm->ds_is_installed)) { + // The channel needs to be installed on the manager queue. + if (dq != &_dispatch_mgr_q) { + return &_dispatch_mgr_q; + } + if (dm->ds_dkev) { + _dispatch_source_kevent_register((dispatch_source_t)dm); + } + dm->ds_is_installed = true; + _dispatch_mach_send(dm); + // Apply initial target queue change + _dispatch_queue_drain(dou); + if (dm->dq_items_tail) { + return dm->do_targetq; + } + } else if (dm->dq_items_tail) { + // The channel has pending messages to deliver to the target queue. + if (dq != dm->do_targetq) { + return dm->do_targetq; + } + dispatch_queue_t tq = dm->do_targetq; + if (slowpath(_dispatch_queue_drain(dou))) { + DISPATCH_CLIENT_CRASH("Sync onto mach channel"); + } + if (slowpath(tq != dm->do_targetq)) { + // An item on the channel changed the target queue + return dm->do_targetq; + } + } else if (dr->dm_tail) { + if (slowpath(dr->dm_needs_mgr) || (slowpath(dr->dm_disconnect_cnt) && + (dm->dm_dkev || !TAILQ_EMPTY(&dm->dm_refs->dm_replies)))) { + // Send/reply kevents need to be installed or uninstalled + if (dq != &_dispatch_mgr_q) { + return &_dispatch_mgr_q; } } - } else if (!(dk->dk_kevent.data & mask) && (prev & mask)) { - previous = MACH_PORT_NULL; - kr = mach_port_request_notification(mach_task_self(), port, - notify_msgid, notify_sync, MACH_PORT_NULL, - MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous); - DISPATCH_VERIFY_MIG(kr); - - switch (kr) { - case KERN_INVALID_NAME: - case KERN_INVALID_RIGHT: - case KERN_INVALID_ARGUMENT: - break; - default: - if (dispatch_assume_zero(kr)) { - // log the error + if (!(dm->dm_dkev && DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev)) || + (dm->ds_atomic_flags & DSF_CANCELED) || dr->dm_disconnect_cnt) { + // The channel has pending messages to send. + _dispatch_mach_send(dm); + } + } else if (dm->ds_atomic_flags & DSF_CANCELED){ + // The channel has been cancelled and needs to be uninstalled from the + // manager queue. After uninstallation, the cancellation handler needs + // to be delivered to the target queue. + if (dm->ds_dkev || dm->dm_dkev || dr->dm_send || + !TAILQ_EMPTY(&dm->dm_refs->dm_replies)) { + if (dq != &_dispatch_mgr_q) { + return &_dispatch_mgr_q; + } + if (!_dispatch_mach_cancel(dm)) { + return NULL; } } - } else { - return 0; - } - if (slowpath(previous)) { - // the kernel has not consumed the send-once right yet - (void)dispatch_assume_zero( - _dispatch_send_consume_send_once_right(previous)); + if (!dm->dm_cancel_handler_called) { + if (dq != dm->do_targetq) { + return dm->do_targetq; + } + _dispatch_mach_cancel_invoke(dm); + } } - return krr; -} - -static void -_dispatch_mach_notify_source2(void *context) -{ - dispatch_source_t ds = context; - size_t maxsz = MAX(sizeof(union - __RequestUnion___dispatch_send_libdispatch_internal_protocol_subsystem), - sizeof(union - __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem)); - - dispatch_mig_server(ds, maxsz, libdispatch_internal_protocol_server); + return NULL; } +DISPATCH_NOINLINE void -_dispatch_mach_notify_source_init(void *context DISPATCH_UNUSED) +_dispatch_mach_invoke(dispatch_mach_t dm) { - _dispatch_get_port_set(); - - _dispatch_mach_notify_source = dispatch_source_create( - DISPATCH_SOURCE_TYPE_MACH_RECV, _dispatch_event_port, 0, - &_dispatch_mgr_q); - dispatch_assert(_dispatch_mach_notify_source); - dispatch_set_context(_dispatch_mach_notify_source, - _dispatch_mach_notify_source); - dispatch_source_set_event_handler_f(_dispatch_mach_notify_source, - _dispatch_mach_notify_source2); - dispatch_resume(_dispatch_mach_notify_source); + _dispatch_queue_class_invoke(dm, _dispatch_mach_invoke2); } -kern_return_t -_dispatch_mach_notify_port_deleted(mach_port_t notify DISPATCH_UNUSED, - mach_port_name_t name) +unsigned long +_dispatch_mach_probe(dispatch_mach_t dm) { -#if DISPATCH_DEBUG - _dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x " - "deleted prematurely", name); -#endif + // This function determines whether the mach channel needs to be invoked. + // The order of tests here in probe and in invoke should be consistent. - _dispatch_debug_machport(name); - _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DELETED, - _DISPATCH_MACH_SP_FLAGS, true); + dispatch_mach_send_refs_t dr = dm->dm_refs; - return KERN_SUCCESS; + if (slowpath(!dm->ds_is_installed)) { + // The channel needs to be installed on the manager queue. + return true; + } else if (dm->dq_items_tail) { + // The source has pending messages to deliver to the target queue. + return true; + } else if (dr->dm_tail && + (!(dm->dm_dkev && DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev)) || + (dm->ds_atomic_flags & DSF_CANCELED) || dr->dm_disconnect_cnt)) { + // The channel has pending messages to send. + return true; + } else if (dm->ds_atomic_flags & DSF_CANCELED) { + if (dm->ds_dkev || dm->dm_dkev || dr->dm_send || + !TAILQ_EMPTY(&dm->dm_refs->dm_replies) || + !dm->dm_cancel_handler_called) { + // The channel needs to be uninstalled from the manager queue, or + // the cancellation handler needs to be delivered to the target + // queue. + return true; + } + } + // Nothing to do. + return false; } -kern_return_t -_dispatch_mach_notify_dead_name(mach_port_t notify DISPATCH_UNUSED, - mach_port_name_t name) -{ - kern_return_t kr; +#pragma mark - +#pragma mark dispatch_mach_msg_t -#if DISPATCH_DEBUG - _dispatch_log("machport[0x%08x]: dead-name notification: %s", - name, __func__); -#endif - _dispatch_debug_machport(name); - _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DEAD, - _DISPATCH_MACH_SP_FLAGS, true); +dispatch_mach_msg_t +dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, + dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr) +{ + if (slowpath(size < sizeof(mach_msg_header_t)) || + slowpath(destructor && !msg)) { + DISPATCH_CLIENT_CRASH("Empty message"); + } + dispatch_mach_msg_t dmsg = _dispatch_alloc(DISPATCH_VTABLE(mach_msg), + sizeof(struct dispatch_mach_msg_s) + + (destructor ? 0 : size - sizeof(dmsg->msg))); + if (destructor) { + dmsg->msg = msg; + } else if (msg) { + memcpy(dmsg->buf, msg, size); + } + dmsg->do_next = DISPATCH_OBJECT_LISTLESS; + dmsg->do_targetq = _dispatch_get_root_queue(0, false); + dmsg->destructor = destructor; + dmsg->size = size; + if (msg_ptr) { + *msg_ptr = _dispatch_mach_msg_get_msg(dmsg); + } + return dmsg; +} - // the act of receiving a dead name notification allocates a dead-name - // right that must be deallocated - kr = mach_port_deallocate(mach_task_self(), name); - DISPATCH_VERIFY_MIG(kr); - //(void)dispatch_assume_zero(kr); +void +_dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg) +{ + switch (dmsg->destructor) { + case DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT: + break; + case DISPATCH_MACH_MSG_DESTRUCTOR_FREE: + free(dmsg->msg); + break; + case DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE: { + mach_vm_size_t vm_size = dmsg->size; + mach_vm_address_t vm_addr = (uintptr_t)dmsg->msg; + (void)dispatch_assume_zero(mach_vm_deallocate(mach_task_self(), + vm_addr, vm_size)); + break; + }} +} - return KERN_SUCCESS; +static inline mach_msg_header_t* +_dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg) +{ + return dmsg->destructor ? dmsg->msg : (mach_msg_header_t*)dmsg->buf; } -kern_return_t -_dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED, - mach_port_name_t name) +mach_msg_header_t* +dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg, size_t *size_ptr) { -#if DISPATCH_DEBUG - _dispatch_log("machport[0x%08x]: send-possible notification: %s", - name, __func__); -#endif - _dispatch_debug_machport(name); - _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_POSSIBLE, - _DISPATCH_MACH_SP_FLAGS, false); + if (size_ptr) { + *size_ptr = dmsg->size; + } + return _dispatch_mach_msg_get_msg(dmsg); +} - return KERN_SUCCESS; +size_t +_dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dx_kind(dmsg), dmsg); + offset += dsnprintf(&buf[offset], bufsiz - offset, "xrefcnt = 0x%x, " + "refcnt = 0x%x, ", dmsg->do_xref_cnt + 1, dmsg->do_ref_cnt + 1); + offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, " + "msgh[%p] = { ", dmsg->do_suspend_cnt, dmsg->buf); + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); + if (hdr->msgh_id) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "id 0x%x, ", + hdr->msgh_id); + } + if (hdr->msgh_size) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "size %u, ", + hdr->msgh_size); + } + if (hdr->msgh_bits) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "bits msgh_bits), + MACH_MSGH_BITS_REMOTE(hdr->msgh_bits)); + if (MACH_MSGH_BITS_OTHER(hdr->msgh_bits)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", o 0x%x", + MACH_MSGH_BITS_OTHER(hdr->msgh_bits)); + } + offset += dsnprintf(&buf[offset], bufsiz - offset, ">, "); + } + if (hdr->msgh_local_port && hdr->msgh_remote_port) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x, " + "remote 0x%x", hdr->msgh_local_port, hdr->msgh_remote_port); + } else if (hdr->msgh_local_port) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x", + hdr->msgh_local_port); + } else if (hdr->msgh_remote_port) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "remote 0x%x", + hdr->msgh_remote_port); + } else { + offset += dsnprintf(&buf[offset], bufsiz - offset, "no ports"); + } + offset += dsnprintf(&buf[offset], bufsiz - offset, " } }"); + return offset; } +#pragma mark - +#pragma mark dispatch_mig_server + mach_msg_return_t dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback_t callback) @@ -1643,15 +4019,16 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, mach_msg_options_t tmp_options; mig_reply_error_t *bufTemp, *bufRequest, *bufReply; mach_msg_return_t kr = 0; + uint64_t assertion_token = 0; unsigned int cnt = 1000; // do not stall out serial queues - int demux_success; + boolean_t demux_success; bool received = false; size_t rcv_size = maxmsgsz + MAX_TRAILER_SIZE; // XXX FIXME -- allocate these elsewhere bufRequest = alloca(rcv_size); bufReply = alloca(rcv_size); - bufReply->Head.msgh_size = 0; // make CLANG happy + bufReply->Head.msgh_size = 0; bufRequest->RetCode = 0; #if DISPATCH_DEBUG @@ -1665,7 +4042,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, tmp_options &= ~MACH_RCV_MSG; if (!(tmp_options & MACH_SEND_MSG)) { - break; + goto out; } } kr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size, @@ -1721,11 +4098,19 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, "dispatch_mig_server: mach_msg() failed", kr); break; } - break; + goto out; } if (!(tmp_options & MACH_RCV_MSG)) { - break; + goto out; + } + + if (assertion_token) { +#if DISPATCH_USE_IMPORTANCE_ASSERTION + int r = proc_importance_assertion_complete(assertion_token); + (void)dispatch_assume_zero(r); +#endif + assertion_token = 0; } received = true; @@ -1733,6 +4118,14 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, bufRequest = bufReply; bufReply = bufTemp; +#if DISPATCH_USE_IMPORTANCE_ASSERTION + int r = proc_importance_assertion_begin_with_msg(&bufRequest->Head, + NULL, &assertion_token); + if (r && slowpath(r != EIO)) { + (void)dispatch_assume_zero(r); + } +#endif + demux_success = callback(&bufRequest->Head, &bufReply->Head); if (!demux_success) { @@ -1762,6 +4155,14 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, } } +out: + if (assertion_token) { +#if DISPATCH_USE_IMPORTANCE_ASSERTION + int r = proc_importance_assertion_complete(assertion_token); + (void)dispatch_assume_zero(r); +#endif + } + return kr; } @@ -1786,8 +4187,12 @@ _evfiltstr(short filt) #ifdef EVFILT_VM _evfilt2(EVFILT_VM); #endif +#ifdef EVFILT_MEMORYSTATUS + _evfilt2(EVFILT_MEMORYSTATUS); +#endif #if HAVE_MACH _evfilt2(EVFILT_MACHPORT); + _evfilt2(DISPATCH_EVFILT_MACH_NOTIFICATION); #endif _evfilt2(EVFILT_FS); _evfilt2(EVFILT_USER); @@ -1804,49 +4209,76 @@ static size_t _dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) { dispatch_queue_t target = ds->do_targetq; - return snprintf(buf, bufsiz, "target = %s[%p], pending_data = 0x%lx, " - "pending_data_mask = 0x%lx, ", - target ? target->dq_label : "", target, - ds->ds_pending_data, ds->ds_pending_data_mask); + return dsnprintf(buf, bufsiz, "target = %s[%p], ident = 0x%lx, " + "pending_data = 0x%lx, pending_data_mask = 0x%lx, ", + target && target->dq_label ? target->dq_label : "", target, + ds->ds_ident_hack, ds->ds_pending_data, ds->ds_pending_data_mask); } static size_t _dispatch_timer_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) { dispatch_source_refs_t dr = ds->ds_refs; - return snprintf(buf, bufsiz, "timer = { target = 0x%llx, " - "last_fire = 0x%llx, interval = 0x%llx, flags = 0x%llx }, ", - ds_timer(dr).target, ds_timer(dr).last_fire, ds_timer(dr).interval, - ds_timer(dr).flags); + return dsnprintf(buf, bufsiz, "timer = { target = 0x%llx, deadline = 0x%llx," + " last_fire = 0x%llx, interval = 0x%llx, flags = 0x%lx }, ", + ds_timer(dr).target, ds_timer(dr).deadline, ds_timer(dr).last_fire, + ds_timer(dr).interval, ds_timer(dr).flags); } size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz) { size_t offset = 0; - offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dx_kind(ds), ds); offset += _dispatch_object_debug_attr(ds, &buf[offset], bufsiz - offset); offset += _dispatch_source_debug_attr(ds, &buf[offset], bufsiz - offset); if (ds->ds_is_timer) { offset += _dispatch_timer_debug_attr(ds, &buf[offset], bufsiz - offset); } - offset += snprintf(&buf[offset], bufsiz - offset, "filter = %s }", + offset += dsnprintf(&buf[offset], bufsiz - offset, "filter = %s }", ds->ds_dkev ? _evfiltstr(ds->ds_dkev->dk_kevent.filter) : "????"); return offset; } +static size_t +_dispatch_mach_debug_attr(dispatch_mach_t dm, char* buf, size_t bufsiz) +{ + dispatch_queue_t target = dm->do_targetq; + return dsnprintf(buf, bufsiz, "target = %s[%p], receive = 0x%x, " + "send = 0x%x, send-possible = 0x%x%s, checkin = 0x%x%s, " + "sending = %d, disconnected = %d, canceled = %d ", + target && target->dq_label ? target->dq_label : "", target, + dm->ds_dkev ?(mach_port_t)dm->ds_dkev->dk_kevent.ident:0, + dm->dm_refs->dm_send, + dm->dm_dkev ?(mach_port_t)dm->dm_dkev->dk_kevent.ident:0, + dm->dm_dkev && DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev) ? + " (armed)" : "", dm->dm_refs->dm_checkin_port, + dm->dm_refs->dm_checkin ? " (pending)" : "", + dm->dm_refs->dm_sending, dm->dm_refs->dm_disconnect_cnt, + (bool)(dm->ds_atomic_flags & DSF_CANCELED)); +} +size_t +_dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dm->dq_label ? dm->dq_label : dx_kind(dm), dm); + offset += _dispatch_object_debug_attr(dm, &buf[offset], bufsiz - offset); + offset += _dispatch_mach_debug_attr(dm, &buf[offset], bufsiz - offset); + offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); + return offset; +} + #if DISPATCH_DEBUG -void -dispatch_debug_kevents(struct kevent* kev, size_t count, const char* str) +static void +_dispatch_kevent_debug(struct kevent64_s* kev, const char* str) { - size_t i; - for (i = 0; i < count; ++i) { - _dispatch_log("kevent[%lu] = { ident = %p, filter = %s, flags = 0x%x, " - "fflags = 0x%x, data = %p, udata = %p }: %s", - i, (void*)kev[i].ident, _evfiltstr(kev[i].filter), kev[i].flags, - kev[i].fflags, (void*)kev[i].data, (void*)kev[i].udata, str); - } + _dispatch_log("kevent[%p] = { ident = 0x%llx, filter = %s, flags = 0x%x, " + "fflags = 0x%x, data = 0x%llx, udata = 0x%llx, ext[0] = 0x%llx, " + "ext[1] = 0x%llx }: %s", kev, kev->ident, _evfiltstr(kev->filter), + kev->flags, kev->fflags, kev->data, kev->udata, kev->ext[0], + kev->ext[1], str); } static void @@ -1901,7 +4333,7 @@ _dispatch_kevent_debugger2(void *context) dk, (unsigned long)dk->dk_kevent.ident, _evfiltstr(dk->dk_kevent.filter), dk->dk_kevent.flags, dk->dk_kevent.fflags, (unsigned long)dk->dk_kevent.data, - dk->dk_kevent.udata); + (void*)dk->dk_kevent.udata); fprintf(debug_stream, "\t\t
    \n"); TAILQ_FOREACH(dr, &dk->dk_sources, dr_list) { ds = _dispatch_source_from_refs(dr); @@ -1914,7 +4346,7 @@ _dispatch_kevent_debugger2(void *context) dispatch_queue_t dq = ds->do_targetq; fprintf(debug_stream, "\t\t
    DQ: %p refcnt 0x%x suspend " "0x%x label: %s\n", dq, dq->do_ref_cnt + 1, - dq->do_suspend_cnt, dq->dq_label); + dq->do_suspend_cnt, dq->dq_label ? dq->dq_label:""); } } fprintf(debug_stream, "\t\t
\n"); @@ -1999,7 +4431,7 @@ _dispatch_kevent_debugger(void *context DISPATCH_UNUSED) goto out_bad; } - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, fd, 0, + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)fd, 0, &_dispatch_mgr_q); if (dispatch_assume(ds)) { _dispatch_log("LIBDISPATCH: debug port: %hu", @@ -2024,6 +4456,7 @@ out_bad: #define MACH_PORT_TYPE_SPREQUEST 0x40000000 #endif +DISPATCH_NOINLINE void dispatch_debug_machport(mach_port_t name, const char* str) { @@ -2048,11 +4481,10 @@ dispatch_debug_machport(mach_port_t name, const char* str) (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, MACH_PORT_RIGHT_DEAD_NAME, &nd)); } - if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND| - MACH_PORT_TYPE_SEND_ONCE)) { + if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND)) { (void)dispatch_assume_zero(mach_port_dnrequest_info(mach_task_self(), name, &dnrsiz, &dnreqs)); - } + } if (type & MACH_PORT_TYPE_RECEIVE) { mach_port_status_t status = { .mps_pset = 0, }; mach_msg_type_number_t cnt = MACH_PORT_RECEIVE_STATUS_COUNT; diff --git a/src/source_internal.h b/src/source_internal.h index c2c706f..1a023cf 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -32,6 +32,12 @@ #include // for HeaderDoc #endif +#define DISPATCH_EVFILT_TIMER (-EVFILT_SYSCOUNT - 1) +#define DISPATCH_EVFILT_CUSTOM_ADD (-EVFILT_SYSCOUNT - 2) +#define DISPATCH_EVFILT_CUSTOM_OR (-EVFILT_SYSCOUNT - 3) +#define DISPATCH_EVFILT_MACH_NOTIFICATION (-EVFILT_SYSCOUNT - 4) +#define DISPATCH_EVFILT_SYSCOUNT ( EVFILT_SYSCOUNT + 4) + // NOTE: dispatch_source_mach_send_flags_t and dispatch_source_mach_recv_flags_t // bit values must not overlap as they share the same kevent fflags ! @@ -50,37 +56,58 @@ enum { * @constant DISPATCH_MACH_RECV_MESSAGE * Receive right has pending messages * + * @constant DISPATCH_MACH_RECV_MESSAGE_DIRECT + * Receive messages from receive right directly via kevent64() + * * @constant DISPATCH_MACH_RECV_NO_SENDERS * Receive right has no more senders. TODO */ enum { DISPATCH_MACH_RECV_MESSAGE = 0x2, - DISPATCH_MACH_RECV_NO_SENDERS = 0x10, + DISPATCH_MACH_RECV_MESSAGE_DIRECT = 0x10, + DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE = 0x20, + DISPATCH_MACH_RECV_NO_SENDERS = 0x40, }; enum { DISPATCH_TIMER_WALL_CLOCK = 0x4, + DISPATCH_TIMER_INTERVAL = 0x8, + DISPATCH_TIMER_WITH_AGGREGATE = 0x10, }; -#define DISPATCH_EVFILT_TIMER (-EVFILT_SYSCOUNT - 1) -#define DISPATCH_EVFILT_CUSTOM_ADD (-EVFILT_SYSCOUNT - 2) -#define DISPATCH_EVFILT_CUSTOM_OR (-EVFILT_SYSCOUNT - 3) -#define DISPATCH_EVFILT_SYSCOUNT ( EVFILT_SYSCOUNT + 3) +// low bits are timer QoS class +#define DISPATCH_TIMER_QOS_NORMAL 0u +#define DISPATCH_TIMER_QOS_CRITICAL 1u +#define DISPATCH_TIMER_QOS_BACKGROUND 2u +#define DISPATCH_TIMER_QOS_COUNT (DISPATCH_TIMER_QOS_BACKGROUND + 1) +#define DISPATCH_TIMER_QOS(tidx) ((uintptr_t)(tidx) & 0x3ul) -#define DISPATCH_TIMER_INDEX_WALL 0 -#define DISPATCH_TIMER_INDEX_MACH 1 -#define DISPATCH_TIMER_INDEX_DISARM 2 +#define DISPATCH_TIMER_KIND_WALL 0u +#define DISPATCH_TIMER_KIND_MACH 1u +#define DISPATCH_TIMER_KIND_COUNT (DISPATCH_TIMER_KIND_MACH + 1) +#define DISPATCH_TIMER_KIND(tidx) (((uintptr_t)(tidx) >> 2) & 0x1ul) + +#define DISPATCH_TIMER_INDEX(kind, qos) (((kind) << 2) | (qos)) +#define DISPATCH_TIMER_INDEX_DISARM \ + DISPATCH_TIMER_INDEX(DISPATCH_TIMER_KIND_COUNT, 0) +#define DISPATCH_TIMER_INDEX_COUNT (DISPATCH_TIMER_INDEX_DISARM + 1) +#define DISPATCH_TIMER_IDENT(flags) ({ unsigned long f = (flags); \ + DISPATCH_TIMER_INDEX(f & DISPATCH_TIMER_WALL_CLOCK ? \ + DISPATCH_TIMER_KIND_WALL : DISPATCH_TIMER_KIND_MACH, \ + f & DISPATCH_TIMER_STRICT ? DISPATCH_TIMER_QOS_CRITICAL : \ + f & DISPATCH_TIMER_BACKGROUND ? DISPATCH_TIMER_QOS_BACKGROUND : \ + DISPATCH_TIMER_QOS_NORMAL); }) struct dispatch_kevent_s { TAILQ_ENTRY(dispatch_kevent_s) dk_list; TAILQ_HEAD(, dispatch_source_refs_s) dk_sources; - struct kevent dk_kevent; + struct kevent64_s dk_kevent; }; typedef struct dispatch_kevent_s *dispatch_kevent_t; struct dispatch_source_type_s { - struct kevent ke; + struct kevent64_s ke; uint64_t mask; void (*init)(dispatch_source_t ds, dispatch_source_type_t type, uintptr_t handle, unsigned long mask, dispatch_queue_t q); @@ -88,77 +115,161 @@ struct dispatch_source_type_s { struct dispatch_timer_source_s { uint64_t target; + uint64_t deadline; uint64_t last_fire; uint64_t interval; uint64_t leeway; - uint64_t flags; // dispatch_timer_flags_t + unsigned long flags; // dispatch_timer_flags_t unsigned long missed; }; // Source state which may contain references to the source object // Separately allocated so that 'leaks' can see sources -struct dispatch_source_refs_s { +typedef struct dispatch_source_refs_s { TAILQ_ENTRY(dispatch_source_refs_s) dr_list; uintptr_t dr_source_wref; // "weak" backref to dispatch_source_t dispatch_function_t ds_handler_func; void *ds_handler_ctxt; void *ds_cancel_handler; void *ds_registration_handler; -}; +} *dispatch_source_refs_t; -typedef struct dispatch_source_refs_s *dispatch_source_refs_t; - -struct dispatch_timer_source_refs_s { +typedef struct dispatch_timer_source_refs_s { struct dispatch_source_refs_s _ds_refs; struct dispatch_timer_source_s _ds_timer; -}; + TAILQ_ENTRY(dispatch_timer_source_refs_s) dt_list; +} *dispatch_timer_source_refs_t; + +typedef struct dispatch_timer_source_aggregate_refs_s { + struct dispatch_timer_source_refs_s _dsa_refs; + TAILQ_ENTRY(dispatch_timer_source_aggregate_refs_s) dra_list; + TAILQ_ENTRY(dispatch_timer_source_aggregate_refs_s) dta_list; +} *dispatch_timer_source_aggregate_refs_t; #define _dispatch_ptr2wref(ptr) (~(uintptr_t)(ptr)) #define _dispatch_wref2ptr(ref) ((void*)~(ref)) #define _dispatch_source_from_refs(dr) \ ((dispatch_source_t)_dispatch_wref2ptr((dr)->dr_source_wref)) #define ds_timer(dr) \ - (((struct dispatch_timer_source_refs_s *)(dr))->_ds_timer) + (((dispatch_timer_source_refs_t)(dr))->_ds_timer) +#define ds_timer_aggregate(ds) \ + ((dispatch_timer_aggregate_t)((ds)->dq_specific_q)) + +DISPATCH_ALWAYS_INLINE +static inline unsigned int +_dispatch_source_timer_idx(dispatch_source_refs_t dr) +{ + return DISPATCH_TIMER_IDENT(ds_timer(dr).flags); +} // ds_atomic_flags bits #define DSF_CANCELED 1u // cancellation has been requested #define DSF_ARMED 2u // source is armed +#define DISPATCH_SOURCE_HEADER(refs) \ + dispatch_kevent_t ds_dkev; \ + dispatch_##refs##_refs_t ds_refs; \ + unsigned int ds_atomic_flags; \ + unsigned int \ + ds_is_level:1, \ + ds_is_adder:1, \ + ds_is_installed:1, \ + ds_needs_rearm:1, \ + ds_is_timer:1, \ + ds_cancel_is_block:1, \ + ds_handler_is_block:1, \ + ds_registration_is_block:1, \ + dm_connect_handler_called:1, \ + dm_cancel_handler_called:1; \ + unsigned long ds_pending_data_mask; + DISPATCH_CLASS_DECL(source); struct dispatch_source_s { DISPATCH_STRUCT_HEADER(source); DISPATCH_QUEUE_HEADER; - // Instruments always copies DISPATCH_QUEUE_MIN_LABEL_SIZE, which is 64, - // so the remainder of the structure must be big enough + DISPATCH_SOURCE_HEADER(source); + unsigned long ds_ident_hack; + unsigned long ds_data; + unsigned long ds_pending_data; +}; + +// Mach channel state which may contain references to the channel object +// layout must match dispatch_source_refs_s +struct dispatch_mach_refs_s { + TAILQ_ENTRY(dispatch_mach_refs_s) dr_list; + uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t + dispatch_mach_handler_function_t dm_handler_func; + void *dm_handler_ctxt; +}; +typedef struct dispatch_mach_refs_s *dispatch_mach_refs_t; + +struct dispatch_mach_reply_refs_s { + TAILQ_ENTRY(dispatch_mach_reply_refs_s) dr_list; + uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t + dispatch_kevent_t dm_dkev; + TAILQ_ENTRY(dispatch_mach_reply_refs_s) dm_list; +}; +typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t; + +struct dispatch_mach_send_refs_s { + TAILQ_ENTRY(dispatch_mach_send_refs_s) dr_list; + uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t + dispatch_mach_msg_t dm_checkin; + TAILQ_HEAD(, dispatch_mach_reply_refs_s) dm_replies; + uint32_t volatile dm_disconnect_cnt; + uint32_t volatile dm_sending; + unsigned int dm_needs_mgr:1; + struct dispatch_object_s *volatile dm_tail; + struct dispatch_object_s *volatile dm_head; + mach_port_t dm_send, dm_checkin_port; +}; +typedef struct dispatch_mach_send_refs_s *dispatch_mach_send_refs_t; + +DISPATCH_CLASS_DECL(mach); +struct dispatch_mach_s { + DISPATCH_STRUCT_HEADER(mach); + DISPATCH_QUEUE_HEADER; + DISPATCH_SOURCE_HEADER(mach); + dispatch_kevent_t dm_dkev; + dispatch_mach_send_refs_t dm_refs; +}; + +DISPATCH_CLASS_DECL(mach_msg); +struct dispatch_mach_msg_s { + DISPATCH_STRUCT_HEADER(mach_msg); + dispatch_mach_msg_destructor_t destructor; + size_t size; union { - char _ds_pad[DISPATCH_QUEUE_MIN_LABEL_SIZE]; - struct { - char dq_label[8]; - dispatch_kevent_t ds_dkev; - dispatch_source_refs_t ds_refs; - unsigned int ds_atomic_flags; - unsigned int - ds_is_level:1, - ds_is_adder:1, - ds_is_installed:1, - ds_needs_rearm:1, - ds_is_timer:1, - ds_cancel_is_block:1, - ds_handler_is_block:1, - ds_registration_is_block:1; - unsigned long ds_data; - unsigned long ds_pending_data; - unsigned long ds_pending_data_mask; - unsigned long ds_ident_hack; - }; + mach_msg_header_t *msg; + char buf[0]; }; }; +#if TARGET_OS_EMBEDDED +#define DSL_HASH_SIZE 64u // must be a power of two +#else +#define DSL_HASH_SIZE 256u // must be a power of two +#endif + void _dispatch_source_xref_dispose(dispatch_source_t ds); -void _dispatch_mach_notify_source_init(void *context); -dispatch_queue_t _dispatch_source_invoke(dispatch_source_t ds); void _dispatch_source_dispose(dispatch_source_t ds); -bool _dispatch_source_probe(dispatch_source_t ds); +void _dispatch_source_invoke(dispatch_source_t ds); +unsigned long _dispatch_source_probe(dispatch_source_t ds); size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz); +void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval); + +void _dispatch_mach_dispose(dispatch_mach_t dm); +void _dispatch_mach_invoke(dispatch_mach_t dm); +unsigned long _dispatch_mach_probe(dispatch_mach_t dm); +size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz); + +void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg); +void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg); +size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz); + +void _dispatch_mach_barrier_invoke(void *ctxt); + +unsigned long _dispatch_mgr_wakeup(dispatch_queue_t dq); +void _dispatch_mgr_thread(dispatch_queue_t dq); #endif /* __DISPATCH_SOURCE_INTERNAL__ */ diff --git a/src/time.c b/src/time.c index 4c0285a..a1a8924 100644 --- a/src/time.c +++ b/src/time.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -23,90 +23,112 @@ uint64_t _dispatch_get_nanoseconds(void) { +#if !TARGET_OS_WIN32 struct timeval now; int r = gettimeofday(&now, NULL); dispatch_assert_zero(r); dispatch_assert(sizeof(NSEC_PER_SEC) == 8); dispatch_assert(sizeof(NSEC_PER_USEC) == 8); - return now.tv_sec * NSEC_PER_SEC + now.tv_usec * NSEC_PER_USEC; + return (uint64_t)now.tv_sec * NSEC_PER_SEC + + (uint64_t)now.tv_usec * NSEC_PER_USEC; +#else /* TARGET_OS_WIN32 */ + // FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC). + FILETIME ft; + ULARGE_INTEGER li; + GetSystemTimeAsFileTime(&ft); + li.LowPart = ft.dwLowDateTime; + li.HighPart = ft.dwHighDateTime; + return li.QuadPart * 100ull; +#endif /* TARGET_OS_WIN32 */ } -#if !(defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME) +#if !(defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME) \ + || TARGET_OS_WIN32 DISPATCH_CACHELINE_ALIGN _dispatch_host_time_data_s _dispatch_host_time_data; void _dispatch_get_host_time_init(void *context DISPATCH_UNUSED) { +#if !TARGET_OS_WIN32 mach_timebase_info_data_t tbi; (void)dispatch_assume_zero(mach_timebase_info(&tbi)); _dispatch_host_time_data.frac = tbi.numer; _dispatch_host_time_data.frac /= tbi.denom; _dispatch_host_time_data.ratio_1_to_1 = (tbi.numer == tbi.denom); +#else + LARGE_INTEGER freq; + dispatch_assume(QueryPerformanceFrequency(&freq)); + _dispatch_host_time_data.frac = (long double)NSEC_PER_SEC / + (long double)freq.QuadPart; + _dispatch_host_time_data.ratio_1_to_1 = (freq.QuadPart == 1); +#endif /* TARGET_OS_WIN32 */ } #endif dispatch_time_t dispatch_time(dispatch_time_t inval, int64_t delta) { + uint64_t offset; if (inval == DISPATCH_TIME_FOREVER) { return DISPATCH_TIME_FOREVER; } if ((int64_t)inval < 0) { // wall clock if (delta >= 0) { - if ((int64_t)(inval -= delta) >= 0) { + offset = (uint64_t)delta; + if ((int64_t)(inval -= offset) >= 0) { return DISPATCH_TIME_FOREVER; // overflow } return inval; + } else { + offset = (uint64_t)-delta; + if ((int64_t)(inval += offset) >= -1) { + // -1 is special == DISPATCH_TIME_FOREVER == forever + return (dispatch_time_t)-2ll; // underflow + } + return inval; } - if ((int64_t)(inval -= delta) >= -1) { - // -1 is special == DISPATCH_TIME_FOREVER == forever - return -2; // underflow - } - return inval; } // mach clock - delta = _dispatch_time_nano2mach(delta); if (inval == 0) { inval = _dispatch_absolute_time(); } if (delta >= 0) { - if ((int64_t)(inval += delta) <= 0) { + offset = _dispatch_time_nano2mach((uint64_t)delta); + if ((int64_t)(inval += offset) <= 0) { return DISPATCH_TIME_FOREVER; // overflow } return inval; + } else { + offset = _dispatch_time_nano2mach((uint64_t)-delta); + if ((int64_t)(inval -= offset) < 1) { + return 1; // underflow + } + return inval; } - if ((int64_t)(inval += delta) < 1) { - return 1; // underflow - } - return inval; } dispatch_time_t dispatch_walltime(const struct timespec *inval, int64_t delta) { int64_t nsec; - if (inval) { - nsec = inval->tv_sec * 1000000000ull + inval->tv_nsec; + nsec = inval->tv_sec * 1000000000ll + inval->tv_nsec; } else { - nsec = _dispatch_get_nanoseconds(); + nsec = (int64_t)_dispatch_get_nanoseconds(); } - nsec += delta; if (nsec <= 1) { // -1 is special == DISPATCH_TIME_FOREVER == forever - return delta >= 0 ? DISPATCH_TIME_FOREVER : (uint64_t)-2ll; + return delta >= 0 ? DISPATCH_TIME_FOREVER : (dispatch_time_t)-2ll; } - - return -nsec; + return (dispatch_time_t)-nsec; } uint64_t _dispatch_timeout(dispatch_time_t when) { - uint64_t now; - + dispatch_time_t now; if (when == DISPATCH_TIME_FOREVER) { return DISPATCH_TIME_FOREVER; } @@ -114,7 +136,7 @@ _dispatch_timeout(dispatch_time_t when) return 0; } if ((int64_t)when < 0) { - when = -(int64_t)when; + when = (dispatch_time_t)-(int64_t)when; now = _dispatch_get_nanoseconds(); return now >= when ? 0 : when - now; } diff --git a/src/trace.h b/src/trace.h index 4969cbe..9a0f152 100644 --- a/src/trace.h +++ b/src/trace.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2011 Apple Inc. All rights reserved. + * Copyright (c) 2010-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -29,37 +29,48 @@ #if DISPATCH_USE_DTRACE && !__OBJC2__ +typedef struct dispatch_trace_timer_params_s { + int64_t deadline, interval, leeway; +} *dispatch_trace_timer_params_t; + #include "provider.h" +#if DISPATCH_USE_DTRACE_INTROSPECTION + #define _dispatch_trace_callout(_c, _f, _dcc) do { \ if (slowpath(DISPATCH_CALLOUT_ENTRY_ENABLED()) || \ slowpath(DISPATCH_CALLOUT_RETURN_ENABLED())) { \ dispatch_queue_t _dq = _dispatch_queue_get_current(); \ - char *_label = _dq ? _dq->dq_label : ""; \ + const char *_label = _dq && _dq->dq_label ? _dq->dq_label : ""; \ dispatch_function_t _func = (dispatch_function_t)(_f); \ void *_ctxt = (_c); \ DISPATCH_CALLOUT_ENTRY(_dq, _label, _func, _ctxt); \ _dcc; \ DISPATCH_CALLOUT_RETURN(_dq, _label, _func, _ctxt); \ - return; \ + } else { \ + _dcc; \ } \ - return _dcc; \ } while (0) DISPATCH_ALWAYS_INLINE static inline void _dispatch_trace_client_callout(void *ctxt, dispatch_function_t f) { - _dispatch_trace_callout(ctxt, f == _dispatch_call_block_and_release && - ctxt ? ((struct Block_basic *)ctxt)->Block_invoke : f, - _dispatch_client_callout(ctxt, f)); + dispatch_function_t func = (f == _dispatch_call_block_and_release && + ctxt ? _dispatch_Block_invoke(ctxt) : f); + _dispatch_introspection_callout_entry(ctxt, func); + _dispatch_trace_callout(ctxt, func, _dispatch_client_callout(ctxt, f)); + _dispatch_introspection_callout_return(ctxt, func); } DISPATCH_ALWAYS_INLINE static inline void _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) { - _dispatch_trace_callout(ctxt, f, _dispatch_client_callout2(ctxt, i, f)); + dispatch_function_t func = (dispatch_function_t)f; + _dispatch_introspection_callout_entry(ctxt, func); + _dispatch_trace_callout(ctxt, func, _dispatch_client_callout2(ctxt, i, f)); + _dispatch_introspection_callout_return(ctxt, func); } #ifdef __BLOCKS__ @@ -67,9 +78,10 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_trace_client_callout_block(dispatch_block_t b) { - struct Block_basic *bb = (void*)b; - _dispatch_trace_callout(b, bb->Block_invoke, - _dispatch_client_callout(b, (dispatch_function_t)bb->Block_invoke)); + dispatch_function_t func = _dispatch_Block_invoke(b); + _dispatch_introspection_callout_entry(b, func); + _dispatch_trace_callout(b, func, _dispatch_client_callout(b, func)); + _dispatch_introspection_callout_return(b, func); } #endif @@ -79,7 +91,7 @@ _dispatch_trace_client_callout_block(dispatch_block_t b) #define _dispatch_trace_continuation(_q, _o, _t) do { \ dispatch_queue_t _dq = (_q); \ - char *_label = _dq ? _dq->dq_label : ""; \ + const char *_label = _dq && _dq->dq_label ? _dq->dq_label : ""; \ struct dispatch_object_s *_do = (_o); \ char *_kind; \ dispatch_function_t _func; \ @@ -87,8 +99,8 @@ _dispatch_trace_client_callout_block(dispatch_block_t b) if (DISPATCH_OBJ_IS_VTABLE(_do)) { \ _ctxt = _do->do_ctxt; \ _kind = (char*)dx_kind(_do); \ - if (dx_type(_do) == DISPATCH_SOURCE_KEVENT_TYPE && \ - (_dq) != &_dispatch_mgr_q) { \ + if ((dx_type(_do) & _DISPATCH_META_TYPE_MASK) == \ + _DISPATCH_SOURCE_TYPE && (_dq) != &_dispatch_mgr_q) { \ _func = ((dispatch_source_t)_do)->ds_refs->ds_handler_func; \ } else { \ _func = (dispatch_function_t)_dispatch_queue_invoke; \ @@ -101,7 +113,7 @@ _dispatch_trace_client_callout_block(dispatch_block_t b) _func = (dispatch_function_t)dispatch_semaphore_signal; \ } else if (_dc->dc_func == _dispatch_call_block_and_release) { \ _kind = "block"; \ - _func = ((struct Block_basic *)_dc->dc_ctxt)->Block_invoke;\ + _func = _dispatch_Block_invoke(_dc->dc_ctxt); \ } else { \ _kind = "function"; \ _func = _dc->dc_func; \ @@ -121,6 +133,7 @@ _dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); } while (dou != _tail._do && (dou = dou->do_next)); } + _dispatch_introspection_queue_push_list(dq, _head, _tail); _dispatch_queue_push_list(dq, _head, _tail, n); } @@ -132,9 +145,23 @@ _dispatch_trace_queue_push(dispatch_queue_t dq, dispatch_object_t _tail) struct dispatch_object_s *dou = _tail._do; _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); } + _dispatch_introspection_queue_push(dq, _tail); _dispatch_queue_push(dq, _tail); } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail, + bool wakeup) +{ + if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { + struct dispatch_object_s *dou = _tail._do; + _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); + } + _dispatch_introspection_queue_push(dq, _tail); + _dispatch_queue_push_wakeup(dq, _tail, wakeup); +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_push_notrace(dispatch_queue_t dq, dispatch_object_t dou) @@ -144,6 +171,7 @@ _dispatch_queue_push_notrace(dispatch_queue_t dq, dispatch_object_t dou) #define _dispatch_queue_push_list _dispatch_trace_queue_push_list #define _dispatch_queue_push _dispatch_trace_queue_push +#define _dispatch_queue_push_wakeup _dispatch_trace_queue_push_wakeup DISPATCH_ALWAYS_INLINE static inline void @@ -153,12 +181,128 @@ _dispatch_trace_continuation_pop(dispatch_queue_t dq, if (slowpath(DISPATCH_QUEUE_POP_ENABLED())) { _dispatch_trace_continuation(dq, dou._do, DISPATCH_QUEUE_POP); } + _dispatch_introspection_queue_pop(dq, dou); +} + +#endif // DISPATCH_USE_DTRACE_INTROSPECTION + +static inline dispatch_function_t +_dispatch_trace_timer_function(dispatch_source_t ds, dispatch_source_refs_t dr) +{ + dispatch_function_t func = dr->ds_handler_func; + if (func == _dispatch_after_timer_callback) { + dispatch_continuation_t dc = ds->do_ctxt; + func = dc->dc_func != _dispatch_call_block_and_release ? dc->dc_func : + dc->dc_ctxt ? _dispatch_Block_invoke(dc->dc_ctxt) : NULL; + } + return func; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_trace_timer_params_t +_dispatch_trace_timer_params(uintptr_t ident, + struct dispatch_timer_source_s *values, uint64_t deadline, + dispatch_trace_timer_params_t params) +{ + #define _dispatch_trace_time2nano3(t) (DISPATCH_TIMER_KIND(ident) \ + == DISPATCH_TIMER_KIND_MACH ? _dispatch_time_mach2nano(t) : (t)) + #define _dispatch_trace_time2nano2(v, t) ({ uint64_t _t = (t); \ + (v) >= INT64_MAX ? -1ll : (int64_t)_dispatch_trace_time2nano3(_t);}) + #define _dispatch_trace_time2nano(v) ({ uint64_t _t; \ + _t = _dispatch_trace_time2nano3(v); _t >= INT64_MAX ? -1ll : \ + (int64_t)_t; }) + if (deadline) { + params->deadline = (int64_t)deadline; + } else { + uint64_t now = (DISPATCH_TIMER_KIND(ident) == + DISPATCH_TIMER_KIND_MACH ? _dispatch_absolute_time() : + _dispatch_get_nanoseconds()); + params->deadline = _dispatch_trace_time2nano2(values->target, + values->target < now ? 0 : values->target - now); + } + params->interval = _dispatch_trace_time2nano(values->interval); + params->leeway = _dispatch_trace_time2nano(values->leeway); + return params; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_trace_timer_configure_enabled(void) +{ + return slowpath(DISPATCH_TIMER_CONFIGURE_ENABLED()); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_timer_configure(dispatch_source_t ds, uintptr_t ident, + struct dispatch_timer_source_s *values) +{ + struct dispatch_trace_timer_params_s params; + DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(ds, + ds->ds_refs), _dispatch_trace_timer_params(ident, values, 0, + ¶ms)); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_timer_program(dispatch_source_refs_t dr, uint64_t deadline) +{ + if (slowpath(DISPATCH_TIMER_PROGRAM_ENABLED())) { + if (deadline && dr) { + dispatch_source_t ds = _dispatch_source_from_refs(dr); + struct dispatch_trace_timer_params_s params; + DISPATCH_TIMER_PROGRAM(ds, _dispatch_trace_timer_function(ds, dr), + _dispatch_trace_timer_params(ds->ds_ident_hack, + &ds_timer(dr), deadline, ¶ms)); + } + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_timer_wake(dispatch_source_refs_t dr) +{ + if (slowpath(DISPATCH_TIMER_WAKE_ENABLED())) { + if (dr) { + dispatch_source_t ds = _dispatch_source_from_refs(dr); + DISPATCH_TIMER_WAKE(ds, _dispatch_trace_timer_function(ds, dr)); + } + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data, + unsigned long missed) +{ + if (slowpath(DISPATCH_TIMER_FIRE_ENABLED())) { + if (!(data - missed) && dr) { + dispatch_source_t ds = _dispatch_source_from_refs(dr); + DISPATCH_TIMER_FIRE(ds, _dispatch_trace_timer_function(ds, dr)); + } + } } + #else -#define _dispatch_queue_push_notrace _dispatch_queue_push -#define _dispatch_trace_continuation_pop(dq, dou) (void)(dq) +#define _dispatch_trace_timer_configure_enabled() false +#define _dispatch_trace_timer_configure(ds, ident, values) \ + do { (void)(ds); (void)(ident); (void)(values); } while(0) +#define _dispatch_trace_timer_program(dr, deadline) \ + do { (void)(dr); (void)(deadline); } while(0) +#define _dispatch_trace_timer_wake(dr) \ + do { (void)(dr); } while(0) +#define _dispatch_trace_timer_fire(dr, data, missed) \ + do { (void)(dr); (void)(data); (void)(missed); } while(0) #endif // DISPATCH_USE_DTRACE && !__OBJC2__ +#if !DISPATCH_USE_DTRACE_INTROSPECTION + +#define _dispatch_queue_push_notrace _dispatch_queue_push +#define _dispatch_trace_continuation_pop(dq, dou) \ + do { (void)(dq); (void)(dou); } while(0) + +#endif // !DISPATCH_USE_DTRACE_INTROSPECTION + #endif // __DISPATCH_TRACE__ diff --git a/src/transform.c b/src/transform.c index 3bb1e3e..e6fa401 100644 --- a/src/transform.c +++ b/src/transform.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2012 Apple Inc. All rights reserved. + * Copyright (c) 2011-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -37,14 +37,15 @@ enum { _DISPATCH_DATA_FORMAT_UTF16BE = 0x8, _DISPATCH_DATA_FORMAT_UTF_ANY = 0x10, _DISPATCH_DATA_FORMAT_BASE32 = 0x20, - _DISPATCH_DATA_FORMAT_BASE64 = 0x40, + _DISPATCH_DATA_FORMAT_BASE32HEX = 0x40, + _DISPATCH_DATA_FORMAT_BASE64 = 0x80, }; #pragma mark - #pragma mark baseXX tables -static const char base32_encode_table[] = - "ABCDEFGHIJKLMNOPQRSTUVWXYZ23456789"; +static const unsigned char base32_encode_table[] = + "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"; static const char base32_decode_table[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -57,7 +58,21 @@ static const char base32_decode_table[] = { static const ssize_t base32_decode_table_size = sizeof(base32_decode_table) / sizeof(*base32_decode_table); -static const char base64_encode_table[] = +static const unsigned char base32hex_encode_table[] = + "0123456789ABCDEFGHIJKLMNOPQRSTUV"; + +static const char base32hex_decode_table[] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, + 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -2, -1, -1, -1, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31 +}; +static const ssize_t base32hex_decode_table_size = + sizeof(base32hex_encode_table) / sizeof(*base32hex_encode_table); + +static const unsigned char base64_encode_table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; static const char base64_decode_table[] = { @@ -104,12 +119,12 @@ static bool _dispatch_transform_buffer_new(dispatch_transform_buffer_s *buffer, size_t required, size_t size) { - size_t remaining = buffer->size - (buffer->ptr.u8 - buffer->start); + size_t remaining = buffer->size - (size_t)(buffer->ptr.u8 - buffer->start); if (required == 0 || remaining < required) { if (buffer->start) { if (buffer->ptr.u8 > buffer->start) { dispatch_data_t _new = dispatch_data_create(buffer->start, - buffer->ptr.u8 - buffer->start, NULL, + (size_t)(buffer->ptr.u8 - buffer->start), NULL, DISPATCH_DATA_DESTRUCTOR_FREE); dispatch_data_t _concat = dispatch_data_create_concat( buffer->data, _new); @@ -412,7 +427,8 @@ _dispatch_transform_from_utf16(dispatch_data_t data, int32_t byteOrder) if (range == NULL) { return (bool)false; } - ch = _dispatch_transform_swap_to_host(*(uint64_t*)p, byteOrder); + ch = _dispatch_transform_swap_to_host((uint16_t)*(uint64_t*)p, + byteOrder); dispatch_release(range); skip += 1; } else { @@ -429,7 +445,7 @@ _dispatch_transform_from_utf16(dispatch_data_t data, int32_t byteOrder) if ((ch >= 0xd800) && (ch <= 0xdbff)) { // Surrogate pair - wch = ((ch - 0xd800) << 10); + wch = ((ch - 0xd800u) << 10); if (++i >= max) { // Surrogate byte isn't in this block const void *p; @@ -528,7 +544,8 @@ _dispatch_transform_to_utf16be(dispatch_data_t data) #pragma mark base32 static dispatch_data_t -_dispatch_transform_from_base32(dispatch_data_t data) +_dispatch_transform_from_base32_with_table(dispatch_data_t data, + const char* table, ssize_t table_size) { __block uint64_t x = 0, count = 0, pad = 0; @@ -539,7 +556,7 @@ _dispatch_transform_from_base32(dispatch_data_t data) DISPATCH_UNUSED size_t offset, const void *buffer, size_t size) { size_t i, dest_size = (size * 5) / 8; - uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(char)); + uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(uint8_t)); uint8_t *ptr = dest; if (dest == NULL) { return (bool)false; @@ -553,21 +570,20 @@ _dispatch_transform_from_base32(dispatch_data_t data) } ssize_t index = bytes[i]; - if (index >= base32_decode_table_size || - base32_decode_table[index] == -1) { + if (index >= table_size || table[index] == -1) { free(dest); return (bool)false; } count++; - char value = base32_decode_table[index]; + char value = table[index]; if (value == -2) { value = 0; pad++; } x <<= 5; - x += value; + x += (uint64_t)value; if ((count & 0x7) == 0) { *ptr++ = (x >> 32) & 0xff; @@ -578,7 +594,7 @@ _dispatch_transform_from_base32(dispatch_data_t data) } } - size_t final = (ptr - dest); + size_t final = (size_t)(ptr - dest); switch (pad) { case 1: final -= 1; @@ -614,15 +630,21 @@ _dispatch_transform_from_base32(dispatch_data_t data) } static dispatch_data_t -_dispatch_transform_to_base32(dispatch_data_t data) +_dispatch_transform_to_base32_with_table(dispatch_data_t data, const unsigned char* table) { size_t total = dispatch_data_get_size(data); __block size_t count = 0; - size_t dest_size = ((total + 4) * 8) / 5; - dest_size -= dest_size % 8; + if (total > SIZE_T_MAX-4 || ((total+4)/5 > SIZE_T_MAX/8)) { + /* We can't hold larger than size_t in a dispatch_data_t + * and we want to avoid an integer overflow in the next + * calculation. + */ + return NULL; + } - uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(uint8_t)); + size_t dest_size = (total + 4) / 5 * 8; + uint8_t *dest = (uint8_t*)malloc(dest_size); if (dest == NULL) { return NULL; } @@ -662,26 +684,26 @@ _dispatch_transform_to_base32(dispatch_data_t data) switch (count % 5) { case 0: // a - *ptr++ = base32_encode_table[(curr >> 3) & 0x1f]; + *ptr++ = table[(curr >> 3) & 0x1fu]; break; case 1: // b + c - *ptr++ = base32_encode_table[((last << 2)|(curr >> 6)) & 0x1f]; - *ptr++ = base32_encode_table[(curr >> 1) & 0x1f]; + *ptr++ = table[((last << 2)|(curr >> 6)) & 0x1f]; + *ptr++ = table[(curr >> 1) & 0x1f]; break; case 2: // d - *ptr++ = base32_encode_table[((last << 4)|(curr >> 4)) & 0x1f]; + *ptr++ = table[((last << 4)|(curr >> 4)) & 0x1f]; break; case 3: // e + f - *ptr++ = base32_encode_table[((last << 1)|(curr >> 7)) & 0x1f]; - *ptr++ = base32_encode_table[(curr >> 2) & 0x1f]; + *ptr++ = table[((last << 1)|(curr >> 7)) & 0x1f]; + *ptr++ = table[(curr >> 2) & 0x1f]; break; case 4: // g + h - *ptr++ = base32_encode_table[((last << 3)|(curr >> 5)) & 0x1f]; - *ptr++ = base32_encode_table[curr & 0x1f]; + *ptr++ = table[((last << 3)|(curr >> 5)) & 0x1f]; + *ptr++ = table[curr & 0x1f]; break; } } @@ -693,19 +715,19 @@ _dispatch_transform_to_base32(dispatch_data_t data) break; case 1: // b[4:2] - *ptr++ = base32_encode_table[(bytes[size-1] << 2) & 0x1c]; + *ptr++ = table[(bytes[size-1] << 2) & 0x1c]; break; case 2: // d[4] - *ptr++ = base32_encode_table[(bytes[size-1] << 4) & 0x10]; + *ptr++ = table[(bytes[size-1] << 4) & 0x10]; break; case 3: // e[4:1] - *ptr++ = base32_encode_table[(bytes[size-1] << 1) & 0x1e]; + *ptr++ = table[(bytes[size-1] << 1) & 0x1e]; break; case 4: - // g[4:3] - *ptr++ = base32_encode_table[bytes[size-1] & 0x18]; + // g[2:3] + *ptr++ = table[(bytes[size-1] << 3) & 0x18]; break; } switch (count % 5) { @@ -736,6 +758,33 @@ _dispatch_transform_to_base32(dispatch_data_t data) DISPATCH_DATA_DESTRUCTOR_FREE); } +static dispatch_data_t +_dispatch_transform_from_base32(dispatch_data_t data) +{ + return _dispatch_transform_from_base32_with_table(data, base32_decode_table, + base32_decode_table_size); +} + +static dispatch_data_t +_dispatch_transform_to_base32(dispatch_data_t data) +{ + return _dispatch_transform_to_base32_with_table(data, base32_encode_table); +} + +static dispatch_data_t +_dispatch_transform_from_base32hex(dispatch_data_t data) +{ + return _dispatch_transform_from_base32_with_table(data, + base32hex_decode_table, base32hex_decode_table_size); +} + +static dispatch_data_t +_dispatch_transform_to_base32hex(dispatch_data_t data) +{ + return _dispatch_transform_to_base32_with_table(data, + base32hex_encode_table); +} + #pragma mark - #pragma mark base64 @@ -780,7 +829,7 @@ _dispatch_transform_from_base64(dispatch_data_t data) } x <<= 6; - x += value; + x += (uint64_t)value; if ((count & 0x3) == 0) { *ptr++ = (x >> 16) & 0xff; @@ -789,7 +838,7 @@ _dispatch_transform_from_base64(dispatch_data_t data) } } - size_t final = (ptr - dest); + size_t final = (size_t)(ptr - dest); if (pad > 0) { // 2 bytes of pad means only had one char in final group final -= pad; @@ -822,10 +871,16 @@ _dispatch_transform_to_base64(dispatch_data_t data) size_t total = dispatch_data_get_size(data); __block size_t count = 0; - size_t dest_size = ((total + 2) * 4) / 3; - dest_size -= dest_size % 4; + if (total > SIZE_T_MAX-2 || ((total+2)/3> SIZE_T_MAX/4)) { + /* We can't hold larger than size_t in a dispatch_data_t + * and we want to avoid an integer overflow in the next + * calculation. + */ + return NULL; + } - uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(uint8_t)); + size_t dest_size = (total + 2) / 3 * 4; + uint8_t *dest = (uint8_t*)malloc(dest_size); if (dest == NULL) { return NULL; } @@ -955,8 +1010,8 @@ dispatch_data_create_with_transform(dispatch_data_t data, const struct dispatch_data_format_type_s _dispatch_data_format_type_none = { .type = _DISPATCH_DATA_FORMAT_NONE, - .input_mask = ~0, - .output_mask = ~0, + .input_mask = ~0u, + .output_mask = ~0u, .decode = NULL, .encode = NULL, }; @@ -964,19 +1019,30 @@ const struct dispatch_data_format_type_s _dispatch_data_format_type_none = { const struct dispatch_data_format_type_s _dispatch_data_format_type_base32 = { .type = _DISPATCH_DATA_FORMAT_BASE32, .input_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | - _DISPATCH_DATA_FORMAT_BASE64), + _DISPATCH_DATA_FORMAT_BASE32HEX | _DISPATCH_DATA_FORMAT_BASE64), .output_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | - _DISPATCH_DATA_FORMAT_BASE64), + _DISPATCH_DATA_FORMAT_BASE32HEX | _DISPATCH_DATA_FORMAT_BASE64), .decode = _dispatch_transform_from_base32, .encode = _dispatch_transform_to_base32, }; +const struct dispatch_data_format_type_s _dispatch_data_format_type_base32hex = +{ + .type = _DISPATCH_DATA_FORMAT_BASE32HEX, + .input_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | + _DISPATCH_DATA_FORMAT_BASE32HEX | _DISPATCH_DATA_FORMAT_BASE64), + .output_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | + _DISPATCH_DATA_FORMAT_BASE32HEX | _DISPATCH_DATA_FORMAT_BASE64), + .decode = _dispatch_transform_from_base32hex, + .encode = _dispatch_transform_to_base32hex, +}; + const struct dispatch_data_format_type_s _dispatch_data_format_type_base64 = { .type = _DISPATCH_DATA_FORMAT_BASE64, .input_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | - _DISPATCH_DATA_FORMAT_BASE64), + _DISPATCH_DATA_FORMAT_BASE32HEX | _DISPATCH_DATA_FORMAT_BASE64), .output_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | - _DISPATCH_DATA_FORMAT_BASE64), + _DISPATCH_DATA_FORMAT_BASE32HEX | _DISPATCH_DATA_FORMAT_BASE64), .decode = _dispatch_transform_from_base64, .encode = _dispatch_transform_to_base64, }; diff --git a/tools/dispatch_timers.d b/tools/dispatch_timers.d new file mode 100755 index 0000000..2821505 --- /dev/null +++ b/tools/dispatch_timers.d @@ -0,0 +1,89 @@ +#!/usr/sbin/dtrace -s + +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * Usage: dispatch_timers.d -p [pid] + * traced process must have been executed with + * DYLD_LIBRARY_PATH=/usr/lib/system/introspection or with + * DYLD_IMAGE_SUFFIX=_profile or DYLD_IMAGE_SUFFIX=_debug + */ + +#pragma D option quiet +#pragma D option zdefs + +typedef struct dispatch_trace_timer_params_s { + int64_t deadline, interval, leeway; +} *dispatch_trace_timer_params_t; + +dispatch$target:libdispatch*.dylib::timer-configure, +dispatch$target:libdispatch*.dylib::timer-program, +dispatch$target:libdispatch*.dylib::timer-wake, +dispatch$target:libdispatch*.dylib::timer-fire /!start/ { + start = walltimestamp; +} + +/* + * Trace dispatch timer configuration and programming: + * Timer configuration indicates that dispatch_source_set_timer() was called. + * Timer programming indicates that the dispatch manager is about to sleep + * for 'deadline' ns (but may wake up earlier if non-timer events occur). + * Time parameters are in nanoseconds, a value of -1 means "forever". + * + * probe timer-configure/-program(dispatch_source_t source, + * dispatch_function_t function, dispatch_trace_timer_params_t params) + */ +dispatch$target:libdispatch*.dylib::timer-configure, +dispatch$target:libdispatch*.dylib::timer-program { + this->p = (dispatch_trace_timer_params_t)copyin(arg2, + sizeof(struct dispatch_trace_timer_params_s)); + printf("%8dus %-15s: 0x%0?p deadline: %11dns interval: %11dns leeway: %11dns", + (walltimestamp-start)/1000, probename, arg0, + this->p ? this->p->deadline : 0, this->p ? this->p->interval : 0, + this->p ? this->p->leeway : 0); + usym(arg1); + printf("\n"); +} +dispatch$target:libdispatch*.dylib::timer-configure { + printf(" / --- Begin ustack"); + ustack(); + printf(" \ --- End ustack\n"); +} + +/* + * Trace dispatch timer wakes and fires: + * Timer wakes indicate that the dispatch manager woke up due to expiry of the + * deadline for the specified timer. + * Timer fires indicate that that the dispatch manager scheduled the event + * handler of the specified timer for asynchronous execution (may occur without + * a corresponding timer wake if the manager was awake processing other events + * when the timer deadline expired). + * + * probe timer-wake/-fire(dispatch_source_t source, + * dispatch_function_t function) + */ +dispatch$target:libdispatch*.dylib::timer-wake, +dispatch$target:libdispatch*.dylib::timer-fire { + printf("%8dus %-15s: 0x%0?p%-70s", (walltimestamp-start)/1000, probename, + arg0, ""); + usym(arg1); + printf("\n"); +} diff --git a/tools/dispatch_trace.d b/tools/dispatch_trace.d index 9059e4e..7f53867 100755 --- a/tools/dispatch_trace.d +++ b/tools/dispatch_trace.d @@ -1,7 +1,7 @@ -#!/usr/sbin/dtrace -Z -s +#!/usr/sbin/dtrace -s /* - * Copyright (c) 2010-2011 Apple Inc. All rights reserved. + * Copyright (c) 2010-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -21,12 +21,14 @@ */ /* - * Usage: dispatch_dtrace.d -p [pid] + * Usage: dispatch_trace.d -p [pid] * traced process must have been executed with + * DYLD_LIBRARY_PATH=/usr/lib/system/introspection or with * DYLD_IMAGE_SUFFIX=_profile or DYLD_IMAGE_SUFFIX=_debug */ #pragma D option quiet +#pragma D option zdefs #pragma D option bufsize=16m BEGIN { @@ -35,25 +37,22 @@ BEGIN { "Item", "Kind", "Context", "Symbol"); } -dispatch$target:libdispatch_profile.dylib::queue-push, -dispatch$target:libdispatch_debug.dylib::queue-push, -dispatch$target:libdispatch_profile.dylib::queue-pop, -dispatch$target:libdispatch_debug.dylib::queue-pop, -dispatch$target:libdispatch_profile.dylib::callout-entry, -dispatch$target:libdispatch_debug.dylib::callout-entry, -dispatch$target:libdispatch_profile.dylib::callout-return, -dispatch$target:libdispatch_debug.dylib::callout-return /!start/ { +dispatch$target:libdispatch*.dylib::queue-push, +dispatch$target:libdispatch*.dylib::queue-pop, +dispatch$target:libdispatch*.dylib::callout-entry, +dispatch$target:libdispatch*.dylib::callout-return /!start/ { start = walltimestamp; } -/* probe queue-push/-pop(dispatch_queue_t queue, const char *label, +/* + * Trace queue push and pop operations: + * + * probe queue-push/-pop(dispatch_queue_t queue, const char *label, * dispatch_object_t item, const char *kind, * dispatch_function_t function, void *context) */ -dispatch$target:libdispatch_profile.dylib::queue-push, -dispatch$target:libdispatch_debug.dylib::queue-push, -dispatch$target:libdispatch_profile.dylib::queue-pop, -dispatch$target:libdispatch_debug.dylib::queue-pop { +dispatch$target:libdispatch*.dylib::queue-push, +dispatch$target:libdispatch*.dylib::queue-pop { printf("%-8d %-3d 0x%08p %-35s%-15s0x%0?p %-43s0x%0?p %-14s0x%0?p", (walltimestamp-start)/1000, cpu, tid, probefunc, probename, arg0, copyinstr(arg1, 42), arg2, copyinstr(arg3, 13), arg5); @@ -61,13 +60,14 @@ dispatch$target:libdispatch_debug.dylib::queue-pop { printf("\n"); } -/* probe callout-entry/-return(dispatch_queue_t queue, const char *label, +/* + * Trace callouts to client functions: + * + * probe callout-entry/-return(dispatch_queue_t queue, const char *label, * dispatch_function_t function, void *context) */ -dispatch$target:libdispatch_profile.dylib::callout-entry, -dispatch$target:libdispatch_debug.dylib::callout-entry, -dispatch$target:libdispatch_profile.dylib::callout-return, -dispatch$target:libdispatch_debug.dylib::callout-return { +dispatch$target:libdispatch*.dylib::callout-entry, +dispatch$target:libdispatch*.dylib::callout-return { printf("%-8d %-3d 0x%08p %-35s%-15s0x%0?p %-43s%-?s %-14s0x%0?p", (walltimestamp-start)/1000, cpu, tid, probefunc, probename, arg0, copyinstr(arg1, 42), "", "", arg3); diff --git a/xcodeconfig/libdispatch-introspection.xcconfig b/xcodeconfig/libdispatch-introspection.xcconfig new file mode 100644 index 0000000..d0f431d --- /dev/null +++ b/xcodeconfig/libdispatch-introspection.xcconfig @@ -0,0 +1,26 @@ +// +// Copyright (c) 2012-2013 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + +BUILD_VARIANTS = normal +INSTALL_PATH = /usr/lib/system/introspection +INSTALL_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/lib/system/introspection +GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) DISPATCH_INTROSPECTION=1 +CONFIGURATION_BUILD_DIR = $(BUILD_DIR)/introspection +OTHER_LDFLAGS = $(OTHER_LDFLAGS) -Wl,-interposable_list,$(SRCROOT)/xcodeconfig/libdispatch.interposable diff --git a/xcodeconfig/libdispatch-static.xcconfig b/xcodeconfig/libdispatch-static.xcconfig new file mode 100644 index 0000000..632e01c --- /dev/null +++ b/xcodeconfig/libdispatch-static.xcconfig @@ -0,0 +1,25 @@ +// +// Copyright (c) 2012-2013 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + +OTHER_LDFLAGS = +BUILD_VARIANTS = normal +SKIP_INSTALL = YES +EXCLUDED_SOURCE_FILE_NAMES = * +GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) USE_OBJC=0 DISPATCH_USE_DTRACE=0 diff --git a/xcodeconfig/libdispatch.aliases b/xcodeconfig/libdispatch.aliases index aae0bcc..5877e50 100644 --- a/xcodeconfig/libdispatch.aliases +++ b/xcodeconfig/libdispatch.aliases @@ -1,12 +1,36 @@ +# +# Copyright (c) 2012-2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + _OBJC_CLASS_$_OS_dispatch_semaphore __dispatch_semaphore_vtable _OBJC_CLASS_$_OS_dispatch_group __dispatch_group_vtable _OBJC_CLASS_$_OS_dispatch_queue __dispatch_queue_vtable _OBJC_CLASS_$_OS_dispatch_queue_root __dispatch_queue_root_vtable +_OBJC_CLASS_$_OS_dispatch_queue_runloop __dispatch_queue_runloop_vtable _OBJC_CLASS_$_OS_dispatch_queue_mgr __dispatch_queue_mgr_vtable _OBJC_CLASS_$_OS_dispatch_queue_specific_queue __dispatch_queue_specific_queue_vtable _OBJC_CLASS_$_OS_dispatch_queue_attr __dispatch_queue_attr_vtable _OBJC_CLASS_$_OS_dispatch_source __dispatch_source_vtable -_OBJC_CLASS_$_OS_dispatch_data __dispatch_data_vtable +_OBJC_CLASS_$_OS_dispatch_mach __dispatch_mach_vtable +_OBJC_CLASS_$_OS_dispatch_mach_msg __dispatch_mach_msg_vtable _OBJC_CLASS_$_OS_dispatch_io __dispatch_io_vtable _OBJC_CLASS_$_OS_dispatch_operation __dispatch_operation_vtable _OBJC_CLASS_$_OS_dispatch_disk __dispatch_disk_vtable + +__dispatch_data_destructor_vm_deallocate __dispatch_data_destructor_munmap diff --git a/xcodeconfig/libdispatch.interposable b/xcodeconfig/libdispatch.interposable new file mode 100644 index 0000000..f337761 --- /dev/null +++ b/xcodeconfig/libdispatch.interposable @@ -0,0 +1,28 @@ +# +# Copyright (c) 2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + +# Interposable API hooks in the introspection library + +_dispatch_introspection_hook_queue_create +_dispatch_introspection_hook_queue_destroy +_dispatch_introspection_hook_queue_item_enqueue +_dispatch_introspection_hook_queue_item_dequeue +_dispatch_introspection_hook_queue_callout_begin +_dispatch_introspection_hook_queue_callout_end diff --git a/xcodeconfig/libdispatch.order b/xcodeconfig/libdispatch.order index 64787b7..8870ea9 100644 --- a/xcodeconfig/libdispatch.order +++ b/xcodeconfig/libdispatch.order @@ -1,40 +1,75 @@ -_OBJC_CLASS_$_OS_object -_OBJC_METACLASS_$_OS_object +# +# Copyright (c) 2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + +# Must be kept in sync with ObjC TFB checks in object_internal.h + +# dispatch_object_t classes _OBJC_CLASS_$_OS_dispatch_object -_OBJC_METACLASS_$_OS_dispatch_object _OBJC_CLASS_$_OS_dispatch_semaphore __OS_dispatch_semaphore_vtable -_OBJC_METACLASS_$_OS_dispatch_semaphore _OBJC_CLASS_$_OS_dispatch_group __OS_dispatch_group_vtable -_OBJC_METACLASS_$_OS_dispatch_group _OBJC_CLASS_$_OS_dispatch_queue __OS_dispatch_queue_vtable -_OBJC_METACLASS_$_OS_dispatch_queue _OBJC_CLASS_$_OS_dispatch_queue_root __OS_dispatch_queue_root_vtable -_OBJC_METACLASS_$_OS_dispatch_queue_root +_OBJC_CLASS_$_OS_dispatch_queue_runloop +__OS_dispatch_queue_runloop_vtable _OBJC_CLASS_$_OS_dispatch_queue_mgr __OS_dispatch_queue_mgr_vtable -_OBJC_METACLASS_$_OS_dispatch_queue_mgr _OBJC_CLASS_$_OS_dispatch_queue_specific_queue __OS_dispatch_queue_specific_queue_vtable -_OBJC_METACLASS_$_OS_dispatch_queue_specific_queue _OBJC_CLASS_$_OS_dispatch_queue_attr __OS_dispatch_queue_attr_vtable -_OBJC_METACLASS_$_OS_dispatch_queue_attr _OBJC_CLASS_$_OS_dispatch_source __OS_dispatch_source_vtable -_OBJC_METACLASS_$_OS_dispatch_source -_OBJC_CLASS_$_OS_dispatch_data -__OS_dispatch_data_vtable -_OBJC_METACLASS_$_OS_dispatch_data +_OBJC_CLASS_$_OS_dispatch_mach +__OS_dispatch_mach_vtable +_OBJC_CLASS_$_OS_dispatch_mach_msg +__OS_dispatch_mach_msg_vtable _OBJC_CLASS_$_OS_dispatch_io __OS_dispatch_io_vtable -_OBJC_METACLASS_$_OS_dispatch_io _OBJC_CLASS_$_OS_dispatch_operation __OS_dispatch_operation_vtable -_OBJC_METACLASS_$_OS_dispatch_operation _OBJC_CLASS_$_OS_dispatch_disk __OS_dispatch_disk_vtable +# non-dispatch_object_t classes +_OBJC_CLASS_$_OS_object +_OBJC_CLASS_$_OS_dispatch_data +_OBJC_CLASS_$_OS_dispatch_data_empty +# metaclasses +_OBJC_METACLASS_$_OS_dispatch_object +_OBJC_METACLASS_$_OS_dispatch_semaphore +_OBJC_METACLASS_$_OS_dispatch_group +_OBJC_METACLASS_$_OS_dispatch_queue +_OBJC_METACLASS_$_OS_dispatch_queue_root +_OBJC_METACLASS_$_OS_dispatch_queue_runloop +_OBJC_METACLASS_$_OS_dispatch_queue_mgr +_OBJC_METACLASS_$_OS_dispatch_queue_specific_queue +_OBJC_METACLASS_$_OS_dispatch_queue_attr +_OBJC_METACLASS_$_OS_dispatch_source +_OBJC_METACLASS_$_OS_dispatch_mach +_OBJC_METACLASS_$_OS_dispatch_mach_msg +_OBJC_METACLASS_$_OS_dispatch_io +_OBJC_METACLASS_$_OS_dispatch_operation _OBJC_METACLASS_$_OS_dispatch_disk +_OBJC_METACLASS_$_OS_object +_OBJC_METACLASS_$_OS_dispatch_data +_OBJC_METACLASS_$_OS_dispatch_data_empty diff --git a/xcodeconfig/libdispatch.unexport b/xcodeconfig/libdispatch.unexport index 035bd9c..dba78b9 100644 --- a/xcodeconfig/libdispatch.unexport +++ b/xcodeconfig/libdispatch.unexport @@ -1,12 +1,34 @@ +# +# Copyright (c) 2012-2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + __dispatch_semaphore_vtable __dispatch_group_vtable __dispatch_queue_vtable __dispatch_queue_root_vtable +__dispatch_queue_runloop_vtable __dispatch_queue_mgr_vtable __dispatch_queue_specific_queue_vtable __dispatch_queue_attr_vtable __dispatch_source_vtable -__dispatch_data_vtable +__dispatch_mach_vtable +__dispatch_mach_msg_vtable __dispatch_io_vtable __dispatch_operation_vtable __dispatch_disk_vtable diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig index e651bfc..4904b9d 100644 --- a/xcodeconfig/libdispatch.xcconfig +++ b/xcodeconfig/libdispatch.xcconfig @@ -1,5 +1,5 @@ // -// Copyright (c) 2010-2011 Apple Inc. All rights reserved. +// Copyright (c) 2010-2013 Apple Inc. All rights reserved. // // @APPLE_APACHE_LICENSE_HEADER_START@ // @@ -34,12 +34,14 @@ OS_PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os OS_PUBLIC_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/include/os OS_PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os OS_PRIVATE_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/local/include/os -HEADER_SEARCH_PATHS = $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(PROJECT_DIR) +HEADER_SEARCH_PATHS = $(PROJECT_DIR) +LIBRARY_SEARCH_PATHS = $(SDKROOT)/usr/lib/system INSTALLHDRS_SCRIPT_PHASE = YES ALWAYS_SEARCH_USER_PATHS = NO BUILD_VARIANTS = normal debug profile ONLY_ACTIVE_ARCH = NO -GCC_VERSION = com.apple.compilers.llvm.clang.1_0 +CLANG_LINK_OBJC_RUNTIME = NO +GCC_C_LANGUAGE_STANDARD = gnu11 GCC_STRICT_ALIASING = YES GCC_SYMBOLS_PRIVATE_EXTERN = YES GCC_ENABLE_OBJC_GC[sdk=macosx*] = supported @@ -50,23 +52,30 @@ GCC_WARN_ABOUT_RETURN_TYPE = YES GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES GCC_WARN_ABOUT_MISSING_NEWLINE = YES GCC_WARN_UNUSED_VARIABLE = YES +GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES +GCC_WARN_ABOUT_MISSING_FIELD_INITIALIZERS = YES +GCC_WARN_SIGN_COMPARE = YES +GCC_WARN_UNINITIALIZED_AUTOS = YES +CLANG_WARN_EMPTY_BODY = YES +CLANG_WARN_IMPLICIT_SIGN_CONVERSION = YES +CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES +CLANG_WARN_DOCUMENTATION_COMMENTS = YES GCC_TREAT_WARNINGS_AS_ERRORS = YES GCC_OPTIMIZATION_LEVEL = s -GCC_THUMB_SUPPORT[arch=armv6] = NO GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 -WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -OTHER_CFLAGS = -fdiagnostics-show-option -fverbose-asm +WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -Wno-unknown-warning-option +OTHER_CFLAGS = -fverbose-asm -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders OTHER_CFLAGS[arch=i386][sdk=macosx*] = $(OTHER_CFLAGS) -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions OTHER_CFLAGS_normal = -momit-leaf-frame-pointer -OTHER_CFLAGS_normal[arch=armv6][sdk=macosx*] = OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1 OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 GENERATE_PROFILING_CODE = NO DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) -UMBRELLA_LDFLAGS = -umbrella System -UMBRELLA_LDFLAGS[sdk=iphonesimulator*] = +UMBRELLA_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem_kernel -lsystem_platform -lsystem_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind -Wl,-upward-lsystem_asl +UMBRELLA_LDFLAGS[sdk=iphonesimulator*] = -umbrella System -nodefaultlibs -ldyld_sim -lcompiler_rt_sim -lsystem_sim_c -lsystem_sim_blocks -lunwind_sim -Wl,-upward-lSystem OBJC_LDFLAGS = -Wl,-upward-lobjc -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases -Wl,-unexported_symbols_list,$(SRCROOT)/xcodeconfig/libdispatch.unexport OBJC_LDFLAGS[sdk=macosx*] = $(OBJC_LDFLAGS) -Wl,-upward-lauto OBJC_LDFLAGS[arch=i386][sdk=macosx*] = -OBJC_EXCLUDED_SOURCE_FILE_NAMES_i386_macosx = object.m -OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(UMBRELLA_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) +OBJC_EXCLUDED_SOURCE_FILE_NAMES_i386_macosx = object.m data.m +PLATFORM_LDFLAGS[sdk=macosx*] = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch_macosx.aliases +OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(UMBRELLA_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(PLATFORM_LDFLAGS) diff --git a/xcodeconfig/libdispatch_macosx.aliases b/xcodeconfig/libdispatch_macosx.aliases new file mode 100644 index 0000000..a7f61c5 --- /dev/null +++ b/xcodeconfig/libdispatch_macosx.aliases @@ -0,0 +1,21 @@ +# +# Copyright (c) 2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + +__dispatch_source_type_memorystatus __dispatch_source_type_memorypressure diff --git a/xcodescripts/install-dtrace.sh b/xcodescripts/install-dtrace.sh new file mode 100644 index 0000000..c0eb364 --- /dev/null +++ b/xcodescripts/install-dtrace.sh @@ -0,0 +1,30 @@ +#!/bin/bash -e +# +# Copyright (c) 2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + +if [ "${PLATFORM_NAME}" = iphoneos ]; then exit 0; fi + +if [ "${DEPLOYMENT_LOCATION}" != YES ]; then + DSTROOT="${CONFIGURATION_BUILD_DIR}" +fi + +mkdir -p "${DSTROOT}${PUBLIC_HEADERS_FOLDER_PATH}" || true +cp -X "${SCRIPT_INPUT_FILE_1}" \ + "${DSTROOT}${PUBLIC_HEADERS_FOLDER_PATH}/${SCRIPT_OUTPUT_FILE_0##/*/}" diff --git a/xcodescripts/install-manpages.sh b/xcodescripts/install-manpages.sh index 2ea1f68..d9e28af 100755 --- a/xcodescripts/install-manpages.sh +++ b/xcodescripts/install-manpages.sh @@ -1,6 +1,6 @@ #!/bin/bash -e # -# Copyright (c) 2010-2011 Apple Inc. All rights reserved. +# Copyright (c) 2010-2012 Apple Inc. All rights reserved. # # @APPLE_APACHE_LICENSE_HEADER_START@ # -- 2.45.2