/* Define to 1 if you have the `pthread_workqueue_setdispatch_np' function. */
#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 1
+/* Define to 1 if you have the `_pthread_workqueue_init' function. */
+#define HAVE__PTHREAD_WORKQUEUE_INIT 1
+
+/* Define to 1 if you have the <pthread/qos.h> header file. */
+#define HAVE_PTHREAD_QOS_H 1
+
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
#define DISPATCH_WARN_RESULT __attribute__((__warn_unused_result__))
#define DISPATCH_MALLOC __attribute__((__malloc__))
#define DISPATCH_ALWAYS_INLINE __attribute__((__always_inline__))
+#define DISPATCH_UNAVAILABLE __attribute__((__unavailable__))
#else
/*! @parseOnly */
#define DISPATCH_NORETURN
#define DISPATCH_MALLOC
/*! @parseOnly */
#define DISPATCH_ALWAYS_INLINE
+/*! @parseOnly */
+#define DISPATCH_UNAVAILABLE
#endif
-
#if TARGET_OS_WIN32 && defined(__DISPATCH_BUILDING_DISPATCH__) && \
defined(__cplusplus)
#define DISPATCH_EXPORT extern "C" extern __declspec(dllexport)
#define DISPATCH_EXPECT(x, v) (x)
#endif
-#if defined(__has_feature)
-#if __has_feature(objc_fixed_enum)
+#ifndef DISPATCH_RETURNS_RETAINED_BLOCK
+#if defined(__has_attribute)
+#if __has_attribute(ns_returns_retained)
+#define DISPATCH_RETURNS_RETAINED_BLOCK __attribute__((__ns_returns_retained__))
+#else
+#define DISPATCH_RETURNS_RETAINED_BLOCK
+#endif
+#else
+#define DISPATCH_RETURNS_RETAINED_BLOCK
+#endif
+#endif
+
+#if defined(__has_feature) && defined(__has_extension)
+#if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums)
#define DISPATCH_ENUM(name, type, ...) \
typedef enum : type { __VA_ARGS__ } name##_t
#else
#define DISPATCH_ENUM(name, type, ...) \
enum { __VA_ARGS__ }; typedef type name##_t
#endif
+#if __has_feature(enumerator_attributes)
+#define DISPATCH_ENUM_AVAILABLE_STARTING __OSX_AVAILABLE_STARTING
+#else
+#define DISPATCH_ENUM_AVAILABLE_STARTING(...)
+#endif
#else
#define DISPATCH_ENUM(name, type, ...) \
enum { __VA_ARGS__ }; typedef type name##_t
+#define DISPATCH_ENUM_AVAILABLE_STARTING(...)
#endif
typedef void (*dispatch_function_t)(void *);
--- /dev/null
+/*
+ * Copyright (c) 2014 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __DISPATCH_BLOCK__
+#define __DISPATCH_BLOCK__
+
+#ifndef __DISPATCH_INDIRECT__
+#error "Please #include <dispatch/dispatch.h> instead of this file directly."
+#include <dispatch/base.h> // for HeaderDoc
+#endif
+
+#ifdef __BLOCKS__
+
+/*!
+ * @group Dispatch block objects
+ */
+
+__BEGIN_DECLS
+
+/*!
+ * @typedef dispatch_block_flags_t
+ * Flags to pass to the dispatch_block_create* functions.
+ *
+ * @const DISPATCH_BLOCK_BARRIER
+ * Flag indicating that a dispatch block object should act as a barrier block
+ * when submitted to a DISPATCH_QUEUE_CONCURRENT queue.
+ * See dispatch_barrier_async() for details.
+ * This flag has no effect when the dispatch block object is invoked directly.
+ *
+ * @const DISPATCH_BLOCK_DETACHED
+ * Flag indicating that a dispatch block object should execute disassociated
+ * from current execution context attributes such as QOS class, os_activity_t
+ * and properties of the current IPC request (if any). If invoked directly, the
+ * block object will remove these attributes from the calling thread for the
+ * duration of the block body (before applying attributes assigned to the block
+ * object, if any). If submitted to a queue, the block object will be executed
+ * with the attributes of the queue (or any attributes specifically assigned to
+ * the block object).
+ *
+ * @const DISPATCH_BLOCK_ASSIGN_CURRENT
+ * Flag indicating that a dispatch block object should be assigned the execution
+ * context attributes that are current at the time the block object is created.
+ * This applies to attributes such as QOS class, os_activity_t and properties of
+ * the current IPC request (if any). If invoked directly, the block object will
+ * apply these attributes to the calling thread for the duration of the block
+ * body. If the block object is submitted to a queue, this flag replaces the
+ * default behavior of associating the submitted block instance with the
+ * execution context attributes that are current at the time of submission.
+ * If a specific QOS class is assigned with DISPATCH_BLOCK_NO_QOS_CLASS or
+ * dispatch_block_create_with_qos_class(), that QOS class takes precedence over
+ * the QOS class assignment indicated by this flag.
+ *
+ * @const DISPATCH_BLOCK_NO_QOS_CLASS
+ * Flag indicating that a dispatch block object should be not be assigned a QOS
+ * class. If invoked directly, the block object will be executed with the QOS
+ * class of the calling thread. If the block object is submitted to a queue,
+ * this replaces the default behavior of associating the submitted block
+ * instance with the QOS class current at the time of submission.
+ * This flag is ignored if a specific QOS class is assigned with
+ * dispatch_block_create_with_qos_class().
+ *
+ * @const DISPATCH_BLOCK_INHERIT_QOS_CLASS
+ * Flag indicating that execution of a dispatch block object submitted to a
+ * queue should prefer the QOS class assigned to the queue over the QOS class
+ * assigned to the block (resp. associated with the block at the time of
+ * submission). The latter will only be used if the queue in question does not
+ * have an assigned QOS class, as long as doing so does not result in a QOS
+ * class lower than the QOS class inherited from the queue's target queue.
+ * This flag is the default when a dispatch block object is submitted to a queue
+ * for asynchronous execution and has no effect when the dispatch block object
+ * is invoked directly. It is ignored if DISPATCH_BLOCK_ENFORCE_QOS_CLASS is
+ * also passed.
+ *
+ * @const DISPATCH_BLOCK_ENFORCE_QOS_CLASS
+ * Flag indicating that execution of a dispatch block object submitted to a
+ * queue should prefer the QOS class assigned to the block (resp. associated
+ * with the block at the time of submission) over the QOS class assigned to the
+ * queue, as long as doing so will not result in a lower QOS class.
+ * This flag is the default when a dispatch block object is submitted to a queue
+ * for synchronous execution or when the dispatch block object is invoked
+ * directly.
+ */
+DISPATCH_ENUM(dispatch_block_flags, unsigned long,
+ DISPATCH_BLOCK_BARRIER
+ DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x1,
+ DISPATCH_BLOCK_DETACHED
+ DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x2,
+ DISPATCH_BLOCK_ASSIGN_CURRENT
+ DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x4,
+ DISPATCH_BLOCK_NO_QOS_CLASS
+ DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x8,
+ DISPATCH_BLOCK_INHERIT_QOS_CLASS
+ DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x10,
+ DISPATCH_BLOCK_ENFORCE_QOS_CLASS
+ DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x20,
+);
+
+/*!
+ * @function dispatch_block_create
+ *
+ * @abstract
+ * Create a new dispatch block object on the heap from an existing block and
+ * the given flags.
+ *
+ * @discussion
+ * The provided block is Block_copy'ed to the heap and retained by the newly
+ * created dispatch block object.
+ *
+ * The returned dispatch block object is intended to be submitted to a dispatch
+ * queue with dispatch_async() and related functions, but may also be invoked
+ * directly. Both operations can be performed an arbitrary number of times but
+ * only the first completed execution of a dispatch block object can be waited
+ * on with dispatch_block_wait() or observed with dispatch_block_notify().
+ *
+ * If the returned dispatch block object is submitted to a dispatch queue, the
+ * submitted block instance will be associated with the QOS class current at the
+ * time of submission, unless one of the following flags assigned a specific QOS
+ * class (or no QOS class) at the time of block creation:
+ * - DISPATCH_BLOCK_ASSIGN_CURRENT
+ * - DISPATCH_BLOCK_NO_QOS_CLASS
+ * - DISPATCH_BLOCK_DETACHED
+ * The QOS class the block object will be executed with also depends on the QOS
+ * class assigned to the queue and which of the following flags was specified or
+ * defaulted to:
+ * - DISPATCH_BLOCK_INHERIT_QOS_CLASS (default for asynchronous execution)
+ * - DISPATCH_BLOCK_ENFORCE_QOS_CLASS (default for synchronous execution)
+ * See description of dispatch_block_flags_t for details.
+ *
+ * If the returned dispatch block object is submitted directly to a serial queue
+ * and is configured to execute with a specific QOS class, the system will make
+ * a best effort to apply the necessary QOS overrides to ensure that blocks
+ * submitted earlier to the serial queue are executed at that same QOS class or
+ * higher.
+ *
+ * @param flags
+ * Configuration flags for the block object.
+ * Passing a value that is not a bitwise OR of flags from dispatch_block_flags_t
+ * results in NULL being returned.
+ *
+ * @param block
+ * The block to create the dispatch block object from.
+ *
+ * @result
+ * The newly created dispatch block object, or NULL.
+ * When not building with Objective-C ARC, must be released with a -[release]
+ * message or the Block_release() function.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_RETURNS_RETAINED_BLOCK
+DISPATCH_WARN_RESULT DISPATCH_NOTHROW
+dispatch_block_t
+dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block);
+
+/*!
+ * @function dispatch_block_create_with_qos_class
+ *
+ * @abstract
+ * Create a new dispatch block object on the heap from an existing block and
+ * the given flags, and assign it the specified QOS class and relative priority.
+ *
+ * @discussion
+ * The provided block is Block_copy'ed to the heap and retained by the newly
+ * created dispatch block object.
+ *
+ * The returned dispatch block object is intended to be submitted to a dispatch
+ * queue with dispatch_async() and related functions, but may also be invoked
+ * directly. Both operations can be performed an arbitrary number of times but
+ * only the first completed execution of a dispatch block object can be waited
+ * on with dispatch_block_wait() or observed with dispatch_block_notify().
+ *
+ * If invoked directly, the returned dispatch block object will be executed with
+ * the assigned QOS class as long as that does not result in a lower QOS class
+ * than what is current on the calling thread.
+ *
+ * If the returned dispatch block object is submitted to a dispatch queue, the
+ * QOS class it will be executed with depends on the QOS class assigned to the
+ * block, the QOS class assigned to the queue and which of the following flags
+ * was specified or defaulted to:
+ * - DISPATCH_BLOCK_INHERIT_QOS_CLASS: default for asynchronous execution
+ * - DISPATCH_BLOCK_ENFORCE_QOS_CLASS: default for synchronous execution
+ * See description of dispatch_block_flags_t for details.
+ *
+ * If the returned dispatch block object is submitted directly to a serial queue
+ * and is configured to execute with a specific QOS class, the system will make
+ * a best effort to apply the necessary QOS overrides to ensure that blocks
+ * submitted earlier to the serial queue are executed at that same QOS class or
+ * higher.
+ *
+ * @param flags
+ * Configuration flags for the new block object.
+ * Passing a value that is not a bitwise OR of flags from dispatch_block_flags_t
+ * results in NULL being returned.
+ *
+ * @param qos_class
+ * A QOS class value:
+ * - QOS_CLASS_USER_INTERACTIVE
+ * - QOS_CLASS_USER_INITIATED
+ * - QOS_CLASS_DEFAULT
+ * - QOS_CLASS_UTILITY
+ * - QOS_CLASS_BACKGROUND
+ * - QOS_CLASS_UNSPECIFIED
+ * Passing QOS_CLASS_UNSPECIFIED is equivalent to specifying the
+ * DISPATCH_BLOCK_NO_QOS_CLASS flag. Passing any other value results in NULL
+ * being returned.
+ *
+ * @param relative_priority
+ * A relative priority within the QOS class. This value is a negative
+ * offset from the maximum supported scheduler priority for the given class.
+ * Passing a value greater than zero or less than QOS_MIN_RELATIVE_PRIORITY
+ * results in NULL being returned.
+ *
+ * @param block
+ * The block to create the dispatch block object from.
+ *
+ * @result
+ * The newly created dispatch block object, or NULL.
+ * When not building with Objective-C ARC, must be released with a -[release]
+ * message or the Block_release() function.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_RETURNS_RETAINED_BLOCK
+DISPATCH_WARN_RESULT DISPATCH_NOTHROW
+dispatch_block_t
+dispatch_block_create_with_qos_class(dispatch_block_flags_t flags,
+ dispatch_qos_class_t qos_class, int relative_priority,
+ dispatch_block_t block);
+
+/*!
+ * @function dispatch_block_perform
+ *
+ * @abstract
+ * Create, synchronously execute and release a dispatch block object from the
+ * specified block and flags.
+ *
+ * @discussion
+ * Behaves identically to the sequence
+ * <code>
+ * dispatch_block_t b = dispatch_block_create(flags, block);
+ * b();
+ * Block_release(b);
+ * </code>
+ * but may be implemented more efficiently internally by not requiring a copy
+ * to the heap of the specified block or the allocation of a new block object.
+ *
+ * @param flags
+ * Configuration flags for the temporary block object.
+ * The result of passing a value that is not a bitwise OR of flags from
+ * dispatch_block_flags_t is undefined.
+ *
+ * @param block
+ * The block to create the temporary block object from.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW
+void
+dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block);
+
+/*!
+ * @function dispatch_block_wait
+ *
+ * @abstract
+ * Wait synchronously until execution of the specified dispatch block object has
+ * completed or until the specified timeout has elapsed.
+ *
+ * @discussion
+ * This function will return immediately if execution of the block object has
+ * already completed.
+ *
+ * It is not possible to wait for multiple executions of the same block object
+ * with this interface; use dispatch_group_wait() for that purpose. A single
+ * dispatch block object may either be waited on once and executed once,
+ * or it may be executed any number of times. The behavior of any other
+ * combination is undefined. Submission to a dispatch queue counts as an
+ * execution, even if cancelation (dispatch_block_cancel) means the block's
+ * code never runs.
+ *
+ * The result of calling this function from multiple threads simultaneously
+ * with the same dispatch block object is undefined, but note that doing so
+ * would violate the rules described in the previous paragraph.
+ *
+ * If this function returns indicating that the specified timeout has elapsed,
+ * then that invocation does not count as the one allowed wait.
+ *
+ * If at the time this function is called, the specified dispatch block object
+ * has been submitted directly to a serial queue, the system will make a best
+ * effort to apply the necessary QOS overrides to ensure that the block and any
+ * blocks submitted earlier to that serial queue are executed at the QOS class
+ * (or higher) of the thread calling dispatch_block_wait().
+ *
+ * @param block
+ * The dispatch block object to wait on.
+ * The result of passing NULL or a block object not returned by one of the
+ * dispatch_block_create* functions is undefined.
+ *
+ * @param timeout
+ * When to timeout (see dispatch_time). As a convenience, there are the
+ * DISPATCH_TIME_NOW and DISPATCH_TIME_FOREVER constants.
+ *
+ * @result
+ * Returns zero on success (the dispatch block object completed within the
+ * specified timeout) or non-zero on error (i.e. timed out).
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
+long
+dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout);
+
+/*!
+ * @function dispatch_block_notify
+ *
+ * @abstract
+ * Schedule a notification block to be submitted to a queue when the execution
+ * of a specified dispatch block object has completed.
+ *
+ * @discussion
+ * This function will submit the notification block immediately if execution of
+ * the observed block object has already completed.
+ *
+ * It is not possible to be notified of multiple executions of the same block
+ * object with this interface, use dispatch_group_notify() for that purpose.
+ *
+ * A single dispatch block object may either be observed one or more times
+ * and executed once, or it may be executed any number of times. The behavior
+ * of any other combination is undefined. Submission to a dispatch queue
+ * counts as an execution, even if cancellation (dispatch_block_cancel) means
+ * the block's code never runs.
+ *
+ * If multiple notification blocks are scheduled for a single block object,
+ * there is no defined order in which the notification blocks will be submitted
+ * to their associated queues.
+ *
+ * @param block
+ * The dispatch block object to observe.
+ * The result of passing NULL or a block object not returned by one of the
+ * dispatch_block_create* functions is undefined.
+ *
+ * @param queue
+ * The queue to which the supplied notification block will be submitted when
+ * the observed block completes.
+ *
+ * @param notification_block
+ * The notification block to submit when the observed block object completes.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
+void
+dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue,
+ dispatch_block_t notification_block);
+
+/*!
+ * @function dispatch_block_cancel
+ *
+ * @abstract
+ * Asynchronously cancel the specified dispatch block object.
+ *
+ * @discussion
+ * Cancellation causes any future execution of the dispatch block object to
+ * return immediately, but does not affect any execution of the block object
+ * that is already in progress.
+ *
+ * Release of any resources associated with the block object will be delayed
+ * until execution of the block object is next attempted (or any execution
+ * already in progress completes).
+ *
+ * NOTE: care needs to be taken to ensure that a block object that may be
+ * canceled does not capture any resources that require execution of the
+ * block body in order to be released (e.g. memory allocated with
+ * malloc(3) that the block body calls free(3) on). Such resources will
+ * be leaked if the block body is never executed due to cancellation.
+ *
+ * @param block
+ * The dispatch block object to cancel.
+ * The result of passing NULL or a block object not returned by one of the
+ * dispatch_block_create* functions is undefined.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
+void
+dispatch_block_cancel(dispatch_block_t block);
+
+/*!
+ * @function dispatch_block_testcancel
+ *
+ * @abstract
+ * Tests whether the given dispatch block object has been canceled.
+ *
+ * @param block
+ * The dispatch block object to test.
+ * The result of passing NULL or a block object not returned by one of the
+ * dispatch_block_create* functions is undefined.
+ *
+ * @result
+ * Non-zero if canceled and zero if not canceled.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE
+DISPATCH_NOTHROW
+long
+dispatch_block_testcancel(dispatch_block_t block);
+
+__END_DECLS
+
+#endif // __BLOCKS__
+
+#endif // __DISPATCH_BLOCK__
#define __OSX_AVAILABLE_STARTING(x, y)
#endif
-#define DISPATCH_API_VERSION 20130520
+#define DISPATCH_API_VERSION 20140804
#ifndef __DISPATCH_BUILDING_DISPATCH__
#include <os/object.h>
#include <dispatch/base.h>
-#include <dispatch/object.h>
#include <dispatch/time.h>
+#include <dispatch/object.h>
#include <dispatch/queue.h>
+#include <dispatch/block.h>
#include <dispatch/source.h>
#include <dispatch/group.h>
#include <dispatch/semaphore.h>
dispatch_introspection_hook_queue_item_dequeue(dispatch_queue_t queue,
dispatch_object_t item);
+/*!
+ * @function dispatch_introspection_hook_queue_item_complete
+ *
+ * @abstract
+ * Interposable hook function called when an item previously dequeued from a
+ * dispatch queue has completed processing.
+ *
+ * @discussion
+ * The object pointer value passed to this function must be treated as a value
+ * only. It is intended solely for matching up with an earlier call to a
+ * dequeue hook function and must NOT be dereferenced.
+ *
+ * @param item
+ * Opaque dentifier for completed item. Must NOT be dereferenced.
+ */
+
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_7_1)
+DISPATCH_EXPORT
+void
+dispatch_introspection_hook_queue_item_complete(dispatch_object_t item);
+
/*!
* @function dispatch_introspection_hook_queue_callout_begin
*
* submitted.
* @param handler The handler to enqueue when data is ready to be
* delivered.
- * @param data The data read from the file descriptor.
- * @param error An errno condition for the read operation or
+ * param data The data read from the file descriptor.
+ * param error An errno condition for the read operation or
* zero if the read was successful.
*/
__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
* @param queue The dispatch queue to which the handler should be
* submitted.
* @param handler The handler to enqueue when the data has been written.
- * @param data The data that could not be written to the I/O
+ * param data The data that could not be written to the I/O
* channel, or NULL.
- * @param error An errno condition for the write operation or
+ * param error An errno condition for the write operation or
* zero if the write was successful.
*/
__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
* @param queue The dispatch queue to which the handler should be submitted.
* @param cleanup_handler The handler to enqueue when the system
* relinquishes control over the file descriptor.
- * @param error An errno condition if control is relinquished
+ * param error An errno condition if control is relinquished
* because channel creation failed, zero otherwise.
* @result The newly created dispatch I/O channel or NULL if an error
* occurred (invalid type specified).
* submitted.
* @param cleanup_handler The handler to enqueue when the system
* has closed the file at path.
- * @param error An errno condition if control is relinquished
+ * param error An errno condition if control is relinquished
* because channel creation or opening of the
* specified file failed, zero otherwise.
* @result The newly created dispatch I/O channel or NULL if an error
* relinquishes control over the file descriptor
* (resp. closes the file at path) associated with
* the existing channel.
- * @param error An errno condition if control is relinquished
+ * param error An errno condition if control is relinquished
* because channel creation failed, zero otherwise.
* @result The newly created dispatch I/O channel or NULL if an error
* occurred (invalid type specified).
* submitted.
* @param io_handler The I/O handler to enqueue when data is ready to be
* delivered.
- * @param done A flag indicating whether the operation is complete.
- * @param data An object with the data most recently read from the
+ * param done A flag indicating whether the operation is complete.
+ * param data An object with the data most recently read from the
* I/O channel as part of this read operation, or NULL.
- * @param error An errno condition for the read operation or zero if
+ * param error An errno condition for the read operation or zero if
* the read was successful.
*/
__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
* @param queue The dispatch queue to which the I/O handler should be
* submitted.
* @param io_handler The I/O handler to enqueue when data has been delivered.
- * @param done A flag indicating whether the operation is complete.
- * @param data An object of the data remaining to be
+ * param done A flag indicating whether the operation is complete.
+ * param data An object of the data remaining to be
* written to the I/O channel as part of this write
* operation, or NULL.
- * @param error An errno condition for the write operation or zero
+ * param error An errno condition for the write operation or zero
* if the write was successful.
*/
__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
#define DISPATCH_RETURNS_RETAINED
#endif
-__BEGIN_DECLS
-
/*!
- * @function dispatch_debug
+ * @typedef dispatch_block_t
*
* @abstract
- * Programmatically log debug information about a dispatch object.
+ * The type of blocks submitted to dispatch queues, which take no arguments
+ * and have no return value.
*
* @discussion
- * Programmatically log debug information about a dispatch object. By default,
- * the log output is sent to syslog at notice level. In the debug version of
- * the library, the log output is sent to a file in /var/tmp.
- * The log output destination can be configured via the LIBDISPATCH_LOG
- * environment variable, valid values are: YES, NO, syslog, stderr, file.
- *
- * This function is deprecated and will be removed in a future release.
- * Objective-C callers may use -debugDescription instead.
- *
- * @param object
- * The object to introspect.
- *
- * @param message
- * The message to log above and beyond the introspection.
+ * When not building with Objective-C ARC, a block object allocated on or
+ * copied to the heap must be released with a -[release] message or the
+ * Block_release() function.
+ *
+ * The declaration of a block literal allocates storage on the stack.
+ * Therefore, this is an invalid construct:
+ * <code>
+ * dispatch_block_t block;
+ * if (x) {
+ * block = ^{ printf("true\n"); };
+ * } else {
+ * block = ^{ printf("false\n"); };
+ * }
+ * block(); // unsafe!!!
+ * </code>
+ *
+ * What is happening behind the scenes:
+ * <code>
+ * if (x) {
+ * struct Block __tmp_1 = ...; // setup details
+ * block = &__tmp_1;
+ * } else {
+ * struct Block __tmp_2 = ...; // setup details
+ * block = &__tmp_2;
+ * }
+ * </code>
+ *
+ * As the example demonstrates, the address of a stack variable is escaping the
+ * scope in which it is allocated. That is a classic C bug.
+ *
+ * Instead, the block literal must be copied to the heap with the Block_copy()
+ * function or by sending it a -[copy] message.
*/
-__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0)
-DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW
-__attribute__((__format__(printf,2,3)))
-void
-dispatch_debug(dispatch_object_t object, const char *message, ...);
+typedef void (^dispatch_block_t)(void);
-__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0)
-DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW
-__attribute__((__format__(printf,2,0)))
-void
-dispatch_debugv(dispatch_object_t object, const char *message, va_list ap);
+__BEGIN_DECLS
/*!
* @function dispatch_retain
* @abstract
* Set the finalizer function for a dispatch object.
*
- * @param
+ * @param object
* The dispatch object to modify.
* The result of passing NULL in this parameter is undefined.
*
- * @param
+ * @param finalizer
* The finalizer function pointer.
*
* @discussion
DISPATCH_EXPORT DISPATCH_NOTHROW //DISPATCH_NONNULL1
void
dispatch_set_finalizer_f(dispatch_object_t object,
- dispatch_function_t finalizer);
+ dispatch_function_t finalizer);
/*!
* @function dispatch_suspend
void
dispatch_resume(dispatch_object_t object);
+/*!
+ * @function dispatch_wait
+ *
+ * @abstract
+ * Wait synchronously for an object or until the specified timeout has elapsed.
+ *
+ * @discussion
+ * Type-generic macro that maps to dispatch_block_wait, dispatch_group_wait or
+ * dispatch_semaphore_wait, depending on the type of the first argument.
+ * See documentation for these functions for more details.
+ * This function is unavailable for any other object type.
+ *
+ * @param object
+ * The object to wait on.
+ * The result of passing NULL in this parameter is undefined.
+ *
+ * @param timeout
+ * When to timeout (see dispatch_time). As a convenience, there are the
+ * DISPATCH_TIME_NOW and DISPATCH_TIME_FOREVER constants.
+ *
+ * @result
+ * Returns zero on success or non-zero on error (i.e. timed out).
+ */
+DISPATCH_UNAVAILABLE
+DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
+long
+dispatch_wait(void *object, dispatch_time_t timeout);
+#if __has_extension(c_generic_selections)
+#define dispatch_wait(object, timeout) \
+ _Generic((object), \
+ dispatch_block_t:dispatch_block_wait, \
+ dispatch_group_t:dispatch_group_wait, \
+ dispatch_semaphore_t:dispatch_semaphore_wait \
+ )((object),(timeout))
+#endif
+
+/*!
+ * @function dispatch_notify
+ *
+ * @abstract
+ * Schedule a notification block to be submitted to a queue when the execution
+ * of a specified object has completed.
+ *
+ * @discussion
+ * Type-generic macro that maps to dispatch_block_notify or
+ * dispatch_group_notify, depending on the type of the first argument.
+ * See documentation for these functions for more details.
+ * This function is unavailable for any other object type.
+ *
+ * @param object
+ * The object to observe.
+ * The result of passing NULL in this parameter is undefined.
+ *
+ * @param queue
+ * The queue to which the supplied notification block will be submitted when
+ * the observed object completes.
+ *
+ * @param notification_block
+ * The block to submit when the observed object completes.
+ */
+DISPATCH_UNAVAILABLE
+DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
+void
+dispatch_notify(void *object, dispatch_object_t queue,
+ dispatch_block_t notification_block);
+#if __has_extension(c_generic_selections)
+#define dispatch_notify(object, queue, notification_block) \
+ _Generic((object), \
+ dispatch_block_t:dispatch_block_notify, \
+ dispatch_group_t:dispatch_group_notify \
+ )((object),(queue), (notification_block))
+#endif
+
+/*!
+ * @function dispatch_cancel
+ *
+ * @abstract
+ * Cancel the specified object.
+ *
+ * @discussion
+ * Type-generic macro that maps to dispatch_block_cancel or
+ * dispatch_source_cancel, depending on the type of the first argument.
+ * See documentation for these functions for more details.
+ * This function is unavailable for any other object type.
+ *
+ * @param object
+ * The object to cancel.
+ * The result of passing NULL in this parameter is undefined.
+ */
+DISPATCH_UNAVAILABLE
+DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
+void
+dispatch_cancel(void *object);
+#if __has_extension(c_generic_selections)
+#define dispatch_cancel(object) \
+ _Generic((object), \
+ dispatch_block_t:dispatch_block_cancel, \
+ dispatch_source_t:dispatch_source_cancel \
+ )((object))
+#endif
+
+/*!
+ * @function dispatch_testcancel
+ *
+ * @abstract
+ * Test whether the specified object has been canceled
+ *
+ * @discussion
+ * Type-generic macro that maps to dispatch_block_testcancel or
+ * dispatch_source_testcancel, depending on the type of the first argument.
+ * See documentation for these functions for more details.
+ * This function is unavailable for any other object type.
+ *
+ * @param object
+ * The object to test.
+ * The result of passing NULL in this parameter is undefined.
+ *
+ * @result
+ * Non-zero if canceled and zero if not canceled.
+ */
+DISPATCH_UNAVAILABLE
+DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE
+DISPATCH_NOTHROW
+long
+dispatch_testcancel(void *object);
+#if __has_extension(c_generic_selections)
+#define dispatch_testcancel(object) \
+ _Generic((object), \
+ dispatch_block_t:dispatch_block_testcancel, \
+ dispatch_source_t:dispatch_source_testcancel \
+ )((object))
+#endif
+
+/*!
+ * @function dispatch_debug
+ *
+ * @abstract
+ * Programmatically log debug information about a dispatch object.
+ *
+ * @discussion
+ * Programmatically log debug information about a dispatch object. By default,
+ * the log output is sent to syslog at notice level. In the debug version of
+ * the library, the log output is sent to a file in /var/tmp.
+ * The log output destination can be configured via the LIBDISPATCH_LOG
+ * environment variable, valid values are: YES, NO, syslog, stderr, file.
+ *
+ * This function is deprecated and will be removed in a future release.
+ * Objective-C callers may use -debugDescription instead.
+ *
+ * @param object
+ * The object to introspect.
+ *
+ * @param message
+ * The message to log above and beyond the introspection.
+ */
+__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0)
+DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW
+__attribute__((__format__(printf,2,3)))
+void
+dispatch_debug(dispatch_object_t object, const char *message, ...);
+
+__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0)
+DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW
+__attribute__((__format__(printf,2,0)))
+void
+dispatch_debugv(dispatch_object_t object, const char *message, va_list ap);
+
__END_DECLS
#endif
/*
- * Copyright (c) 2008-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 2008-2014 Apple Inc. All rights reserved.
*
* @APPLE_APACHE_LICENSE_HEADER_START@
*
*/
DISPATCH_DECL(dispatch_queue);
-/*!
- * @typedef dispatch_queue_attr_t
- *
- * @abstract
- * Attribute for dispatch queues.
- */
-DISPATCH_DECL(dispatch_queue_attr);
-
-/*!
- * @typedef dispatch_block_t
- *
- * @abstract
- * The prototype of blocks submitted to dispatch queues, which take no
- * arguments and have no return value.
- *
- * @discussion
- * The declaration of a block allocates storage on the stack. Therefore, this
- * is an invalid construct:
- *
- * dispatch_block_t block;
- *
- * if (x) {
- * block = ^{ printf("true\n"); };
- * } else {
- * block = ^{ printf("false\n"); };
- * }
- * block(); // unsafe!!!
- *
- * What is happening behind the scenes:
- *
- * if (x) {
- * struct Block __tmp_1 = ...; // setup details
- * block = &__tmp_1;
- * } else {
- * struct Block __tmp_2 = ...; // setup details
- * block = &__tmp_2;
- * }
- *
- * As the example demonstrates, the address of a stack variable is escaping the
- * scope in which it is allocated. That is a classic C bug.
- */
-#ifdef __BLOCKS__
-typedef void (^dispatch_block_t)(void);
-#endif
-
__BEGIN_DECLS
/*!
dispatch_queue_t
dispatch_get_current_queue(void);
+__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+DISPATCH_EXPORT struct dispatch_queue_s _dispatch_main_q;
+
/*!
* @function dispatch_get_main_queue
*
* Returns the main queue. This queue is created automatically on behalf of
* the main thread before main() is called.
*/
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
-DISPATCH_EXPORT struct dispatch_queue_s _dispatch_main_q;
-#define dispatch_get_main_queue() \
- DISPATCH_GLOBAL_OBJECT(dispatch_queue_t, _dispatch_main_q)
+DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_CONST DISPATCH_NOTHROW
+dispatch_queue_t
+dispatch_get_main_queue(void)
+{
+ return DISPATCH_GLOBAL_OBJECT(dispatch_queue_t, _dispatch_main_q);
+}
/*!
* @typedef dispatch_queue_priority_t
typedef long dispatch_queue_priority_t;
+/*!
+ * @typedef dispatch_qos_class_t
+ * Alias for qos_class_t type.
+ */
+#if __has_include(<sys/qos.h>)
+#include <sys/qos.h>
+typedef qos_class_t dispatch_qos_class_t;
+#else
+typedef unsigned int dispatch_qos_class_t;
+#endif
+
/*!
* @function dispatch_get_global_queue
*
* @abstract
- * Returns a well-known global concurrent queue of a given priority level.
+ * Returns a well-known global concurrent queue of a given quality of service
+ * class.
*
* @discussion
* The well-known global concurrent queues may not be modified. Calls to
* dispatch_suspend(), dispatch_resume(), dispatch_set_context(), etc., will
* have no effect when used with queues returned by this function.
*
- * @param priority
- * A priority defined in dispatch_queue_priority_t
+ * @param identifier
+ * A quality of service class defined in qos_class_t or a priority defined in
+ * dispatch_queue_priority_t.
+ *
+ * It is recommended to use quality of service class values to identify the
+ * well-known global concurrent queues:
+ * - QOS_CLASS_USER_INTERACTIVE
+ * - QOS_CLASS_USER_INITIATED
+ * - QOS_CLASS_DEFAULT
+ * - QOS_CLASS_UTILITY
+ * - QOS_CLASS_BACKGROUND
+ *
+ * The global concurrent queues may still be identified by their priority,
+ * which map to the following QOS classes:
+ * - DISPATCH_QUEUE_PRIORITY_HIGH: QOS_CLASS_USER_INITIATED
+ * - DISPATCH_QUEUE_PRIORITY_DEFAULT: QOS_CLASS_DEFAULT
+ * - DISPATCH_QUEUE_PRIORITY_LOW: QOS_CLASS_UTILITY
+ * - DISPATCH_QUEUE_PRIORITY_BACKGROUND: QOS_CLASS_BACKGROUND
*
* @param flags
* Reserved for future use. Passing any value other than zero may result in
* a NULL return value.
*
* @result
- * Returns the requested global queue.
+ * Returns the requested global queue or NULL if the requested global queue
+ * does not exist.
*/
__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW
dispatch_queue_t
-dispatch_get_global_queue(dispatch_queue_priority_t priority,
- unsigned long flags);
+dispatch_get_global_queue(long identifier, unsigned long flags);
+
+/*!
+ * @typedef dispatch_queue_attr_t
+ *
+ * @abstract
+ * Attribute for dispatch queues.
+ */
+DISPATCH_DECL(dispatch_queue_attr);
/*!
* @const DISPATCH_QUEUE_SERIAL
DISPATCH_EXPORT
struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent;
+/*!
+ * @function dispatch_queue_attr_make_with_qos_class
+ *
+ * @abstract
+ * Returns an attribute value which may be provided to dispatch_queue_create()
+ * in order to assign a QOS class and relative priority to the queue.
+ *
+ * @discussion
+ * When specified in this manner, the QOS class and relative priority take
+ * precedence over those inherited from the dispatch queue's target queue (if
+ * any) as long that does not result in a lower QOS class and relative priority.
+ *
+ * The global queue priorities map to the following QOS classes:
+ * - DISPATCH_QUEUE_PRIORITY_HIGH: QOS_CLASS_USER_INITIATED
+ * - DISPATCH_QUEUE_PRIORITY_DEFAULT: QOS_CLASS_DEFAULT
+ * - DISPATCH_QUEUE_PRIORITY_LOW: QOS_CLASS_UTILITY
+ * - DISPATCH_QUEUE_PRIORITY_BACKGROUND: QOS_CLASS_BACKGROUND
+ *
+ * Example:
+ * <code>
+ * dispatch_queue_t queue;
+ * dispatch_queue_attr_t attr;
+ * attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL,
+ * QOS_CLASS_UTILITY, 0);
+ * queue = dispatch_queue_create("com.example.myqueue", attr);
+ * </code>
+ *
+ * @param attr
+ * A queue attribute value to be combined with the QOS class, or NULL.
+ *
+ * @param qos_class
+ * A QOS class value:
+ * - QOS_CLASS_USER_INTERACTIVE
+ * - QOS_CLASS_USER_INITIATED
+ * - QOS_CLASS_DEFAULT
+ * - QOS_CLASS_UTILITY
+ * - QOS_CLASS_BACKGROUND
+ * Passing any other value results in NULL being returned.
+ *
+ * @param relative_priority
+ * A relative priority within the QOS class. This value is a negative
+ * offset from the maximum supported scheduler priority for the given class.
+ * Passing a value greater than zero or less than QOS_MIN_RELATIVE_PRIORITY
+ * results in NULL being returned.
+ *
+ * @return
+ * Returns an attribute value which may be provided to dispatch_queue_create(),
+ * or NULL if an invalid QOS class was requested.
+ * The new value combines the attributes specified by the 'attr' parameter and
+ * the new QOS class and relative priority.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW
+dispatch_queue_attr_t
+dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t attr,
+ dispatch_qos_class_t qos_class, int relative_priority);
+
/*!
* @function dispatch_queue_create
*
* hold a reference to that queue. Therefore a queue will not be deallocated
* until all pending blocks have finished.
*
- * The target queue of a newly created dispatch queue is the default priority
- * global concurrent queue.
+ * Passing the result of the dispatch_queue_attr_make_with_qos_class() function
+ * to the attr parameter of this function allows a quality of service class and
+ * relative priority to be specified for the newly created queue.
+ * The quality of service class so specified takes precedence over the quality
+ * of service class of the newly created dispatch queue's target queue (if any)
+ * as long that does not result in a lower QOS class and relative priority.
+ *
+ * When no quality of service class is specified, the target queue of a newly
+ * created dispatch queue is the default priority global concurrent queue.
*
* @param label
* A string label to attach to the queue.
* This parameter is optional and may be NULL.
*
* @param attr
- * DISPATCH_QUEUE_SERIAL or DISPATCH_QUEUE_CONCURRENT.
+ * DISPATCH_QUEUE_SERIAL, DISPATCH_QUEUE_CONCURRENT, or the result of a call to
+ * the function dispatch_queue_attr_make_with_qos_class().
*
* @result
* The newly created dispatch queue.
const char *
dispatch_queue_get_label(dispatch_queue_t queue);
+/*!
+ * @function dispatch_queue_get_qos_class
+ *
+ * @abstract
+ * Returns the QOS class and relative priority of the given queue.
+ *
+ * @discussion
+ * If the given queue was created with an attribute value returned from
+ * dispatch_queue_attr_make_with_qos_class(), this function returns the QOS
+ * class and relative priority specified at that time; for any other attribute
+ * value it returns a QOS class of QOS_CLASS_UNSPECIFIED and a relative
+ * priority of 0.
+ *
+ * If the given queue is one of the global queues, this function returns its
+ * assigned QOS class value as documented under dispatch_get_global_queue() and
+ * a relative priority of 0; in the case of the main queue it returns the QOS
+ * value provided by qos_class_main() and a relative priority of 0.
+ *
+ * @param queue
+ * The queue to query.
+ *
+ * @param relative_priority_ptr
+ * A pointer to an int variable to be filled with the relative priority offset
+ * within the QOS class, or NULL.
+ *
+ * @return
+ * A QOS class value:
+ * - QOS_CLASS_USER_INTERACTIVE
+ * - QOS_CLASS_USER_INITIATED
+ * - QOS_CLASS_DEFAULT
+ * - QOS_CLASS_UTILITY
+ * - QOS_CLASS_BACKGROUND
+ * - QOS_CLASS_UNSPECIFIED
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NONNULL1 DISPATCH_NOTHROW
+dispatch_qos_class_t
+dispatch_queue_get_qos_class(dispatch_queue_t queue,
+ int *relative_priority_ptr);
+
/*!
* @const DISPATCH_TARGET_QUEUE_DEFAULT
* @discussion Constant to pass to the dispatch_set_target_queue() and
* @discussion
* An object's target queue is responsible for processing the object.
*
- * A dispatch queue's priority is inherited from its target queue. Use the
- * dispatch_get_global_queue() function to obtain suitable target queue
- * of the desired priority.
+ * When no quality of service class and relative priority is specified for a
+ * dispatch queue at the time of creation, a dispatch queue's quality of service
+ * class is inherited from its target queue. The dispatch_get_global_queue()
+ * function may be used to obtain a target queue of a specific quality of
+ * service class, however the use of dispatch_queue_attr_make_with_qos_class()
+ * is recommended instead.
*
* Blocks submitted to a serial queue whose target queue is another serial
* queue will not be invoked concurrently with blocks submitted to the target
*/
#define DISPATCH_SOURCE_TYPE_MEMORYPRESSURE \
(&_dispatch_source_type_memorypressure)
-__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_NA)
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_8_0)
DISPATCH_SOURCE_TYPE_DECL(memorypressure);
/*!
DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
void
dispatch_source_set_cancel_handler(dispatch_source_t source,
- dispatch_block_t cancel_handler);
+ dispatch_block_t handler);
#endif /* __BLOCKS__ */
/*!
DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
void
dispatch_source_set_cancel_handler_f(dispatch_source_t source,
- dispatch_function_t cancel_handler);
+ dispatch_function_t handler);
/*!
* @function dispatch_source_cancel
DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
void
dispatch_source_set_registration_handler(dispatch_source_t source,
- dispatch_block_t registration_handler);
+ dispatch_block_t handler);
#endif /* __BLOCKS__ */
/*!
DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
void
dispatch_source_set_registration_handler_f(dispatch_source_t source,
- dispatch_function_t registration_handler);
+ dispatch_function_t handler);
__END_DECLS
2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; };
2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; };
2BBF5A67154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; };
+ 2BE17C6418EA305E002CA4E8 /* layout_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BE17C6318EA305E002CA4E8 /* layout_private.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 2BE17C6518EA305E002CA4E8 /* layout_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BE17C6318EA305E002CA4E8 /* layout_private.h */; settings = {ATTRIBUTES = (Private, ); }; };
5A0095A210F274B0000E2A31 /* io_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A0095A110F274B0000E2A31 /* io_internal.h */; };
5A27262610F26F1900751FBC /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; };
5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */; };
E43570B9126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; };
E43570BA126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; };
E43A710615783F7E0012D38D /* data_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C913AC0E143BD34800B78976 /* data_private.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; };
+ E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; };
+ E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; };
+ E44A8E6B1805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; };
+ E44A8E6C1805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; };
+ E44A8E6D1805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; };
+ E44A8E6E1805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; };
+ E44A8E6F1805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; };
+ E44A8E701805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; };
+ E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E711805C473009FFDB6 /* voucher_private.h */; };
+ E44A8E731805C473009FFDB6 /* voucher_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E711805C473009FFDB6 /* voucher_private.h */; };
+ E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E7418066276009FFDB6 /* voucher_internal.h */; };
+ E44A8E7618066276009FFDB6 /* voucher_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E7418066276009FFDB6 /* voucher_internal.h */; };
+ E44A8E7718066276009FFDB6 /* voucher_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E7418066276009FFDB6 /* voucher_internal.h */; };
E44EBE3E1251659900645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; };
E44EBE5412517EBE00645D88 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; };
E44EBE5512517EBE00645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; };
E46DBC4D14EE10C80001F9F6 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; };
E48AF55A16E70FD9004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; };
E48AF55B16E72D44004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; };
+ E48EC97D1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; };
+ E48EC97E1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; };
E49F2423125D3C960057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; };
E49F2424125D3C970057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; };
E49F2499125D48D80057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; };
E4A2C9C5176019820000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; };
E4A2C9C6176019830000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; };
E4A2C9C7176019840000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; };
+ E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */; };
+ E4B3C3FF18C50D0E0039F49F /* voucher_activity_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */; };
E4B515BD164B2DA300E003AF /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; };
E4B515BE164B2DA300E003AF /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; };
E4B515BF164B2DA300E003AF /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; };
E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743913A8911B0095BDF1 /* getprogname.h */; };
E4C1ED6F1263E714000D3C8B /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; };
E4C1ED701263E714000D3C8B /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; };
+ E4D76A9318E325D200B1F98B /* block.h in Headers */ = {isa = PBXBuildFile; fileRef = E4D76A9218E325D200B1F98B /* block.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ E4D76A9418E325D200B1F98B /* block.h in Headers */ = {isa = PBXBuildFile; fileRef = E4D76A9218E325D200B1F98B /* block.h */; settings = {ATTRIBUTES = (Public, ); }; };
E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; };
E4EB4A2814C35ECE00AA0FA9 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; };
E4EC11AE12514302000DDBD1 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; };
/* Begin PBXFileReference section */
2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = allocator_internal.h; sourceTree = "<group>"; };
2BBF5A62154B64F5002B20F9 /* allocator.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = allocator.c; sourceTree = "<group>"; };
+ 2BE17C6318EA305E002CA4E8 /* layout_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = layout_private.h; sourceTree = "<group>"; };
5A0095A110F274B0000E2A31 /* io_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io_internal.h; sourceTree = "<group>"; };
5A27262510F26F1900751FBC /* io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = io.c; sourceTree = "<group>"; xcLanguageSpecificationIdentifier = xcode.lang.c; };
5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore_internal.h; sourceTree = "<group>"; };
E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.unexport; sourceTree = "<group>"; };
E43570B8126E93380097AB9F /* provider.d */ = {isa = PBXFileReference; explicitFileType = sourcecode.dtrace; fileEncoding = 4; path = provider.d; sourceTree = "<group>"; };
E43D93F11097917E004F6A62 /* libdispatch.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libdispatch.xcconfig; sourceTree = "<group>"; };
+ E44757D917F4572600B82CA1 /* inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inline_internal.h; sourceTree = "<group>"; };
E448727914C6215D00BB45C2 /* libdispatch.order */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.order; sourceTree = "<group>"; };
+ E44A8E6A1805C3E0009FFDB6 /* voucher.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = voucher.c; sourceTree = "<group>"; };
+ E44A8E711805C473009FFDB6 /* voucher_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = voucher_private.h; sourceTree = "<group>"; };
+ E44A8E7418066276009FFDB6 /* voucher_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = voucher_internal.h; sourceTree = "<group>"; };
E44EBE331251654000645D88 /* resolver.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolver.h; sourceTree = "<group>"; };
E44EBE371251656400645D88 /* resolver.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resolver.c; sourceTree = "<group>"; };
E44EBE3B1251659900645D88 /* init.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = init.c; sourceTree = "<group>"; };
E47D6BB5125F0F800070D91C /* resolved.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolved.h; sourceTree = "<group>"; };
E482F1CD12DBAB590030614D /* postprocess-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-headers.sh"; sourceTree = "<group>"; };
E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = io_private.h; path = private/io_private.h; sourceTree = SOURCE_ROOT; tabWidth = 8; };
+ E48EC97B1835BADD00EAC4F1 /* yield.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = yield.h; sourceTree = "<group>"; };
E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
E49F251D125D630A0057C971 /* install-manpages.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-manpages.sh"; sourceTree = "<group>"; };
E49F251E125D631D0057C971 /* mig-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "mig-headers.sh"; sourceTree = "<group>"; };
E4A2C9C4176019760000F809 /* atomic_llsc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_llsc.h; sourceTree = "<group>"; };
+ E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = "libdispatch-resolver_iphoneos.order"; sourceTree = "<group>"; };
+ E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch_iphoneos.order; sourceTree = "<group>"; };
+ E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = voucher_activity_private.h; sourceTree = "<group>"; };
E4B515D6164B2DA300E003AF /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
E4B515D7164B2DFB00E003AF /* introspection_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_private.h; sourceTree = "<group>"; };
E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-introspection.xcconfig"; sourceTree = "<group>"; };
E4BA743813A8900B0095BDF1 /* dispatch_read.3 */ = {isa = PBXFileReference; explicitFileType = text.man; path = dispatch_read.3; sourceTree = "<group>"; };
E4BA743913A8911B0095BDF1 /* getprogname.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = getprogname.h; sourceTree = "<group>"; };
E4C1ED6E1263E714000D3C8B /* data_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_internal.h; sourceTree = "<group>"; };
+ E4D76A9218E325D200B1F98B /* block.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = block.h; sourceTree = "<group>"; };
+ E4DC8D45191053EE0005C6F4 /* libdispatch_objc.aliases */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch_objc.aliases; sourceTree = "<group>"; };
E4EB4A2614C35ECE00AA0FA9 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object.h; sourceTree = "<group>"; };
E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-headers.sh"; sourceTree = "<group>"; };
E4EC11C312514302000DDBD1 /* libdispatch_up.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_up.a; sourceTree = BUILT_PRODUCTS_DIR; };
96A8AA860F41E7A400CD570B /* source.c */,
96032E4A0F5CC8C700241C5F /* time.c */,
C9C5F80D143C1771006DC718 /* transform.c */,
+ E44A8E6A1805C3E0009FFDB6 /* voucher.c */,
FC7BED950E8361E600161930 /* protocol.defs */,
E43570B8126E93380097AB9F /* provider.d */,
);
E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */,
E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */,
E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */,
+ E4DC8D45191053EE0005C6F4 /* libdispatch_objc.aliases */,
E416F53F175D04B800B23711 /* libdispatch_macosx.aliases */,
E448727914C6215D00BB45C2 /* libdispatch.order */,
+ E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */,
+ E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */,
E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */,
E421E5FD1716BEA70090DC9B /* libdispatch.interposable */,
);
FC1832A2109923C7003403D5 /* perfmon.h */,
FC1832A3109923C7003403D5 /* time.h */,
FC1832A4109923C7003403D5 /* tsd.h */,
+ E48EC97B1835BADD00EAC4F1 /* yield.h */,
);
path = shims;
sourceTree = "<group>";
isa = PBXGroup;
children = (
72CC942F0ECCD8750031B751 /* base.h */,
+ E4D76A9218E325D200B1F98B /* block.h */,
5AAB45C510D30D0C004407EA /* data.h */,
FC7BED960E8361E600161930 /* dispatch.h */,
FC5C9C1D0EADABE3006E462D /* group.h */,
96BC39BC0F3EBAB100C59689 /* queue_private.h */,
FCEF047F0F5661960067401F /* source_private.h */,
E4ECBAA415253C25002C313C /* mach_private.h */,
+ E44A8E711805C473009FFDB6 /* voucher_private.h */,
+ E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */,
961B99350F3E83980006BC96 /* benchmark.h */,
E4B515D7164B2DFB00E003AF /* introspection_private.h */,
+ 2BE17C6318EA305E002CA4E8 /* layout_private.h */,
);
name = "Private Headers";
path = private;
children = (
2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */,
FC7BED8F0E8361E600161930 /* internal.h */,
+ E44757D917F4572600B82CA1 /* inline_internal.h */,
E4C1ED6E1263E714000D3C8B /* data_internal.h */,
5A0095A110F274B0000E2A31 /* io_internal.h */,
965ECC200F3EAB71004DDD89 /* object_internal.h */,
96929D950F3EA2170041FF5D /* queue_internal.h */,
5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */,
FC0B34780FA2851C0080FFA0 /* source_internal.h */,
+ E44A8E7418066276009FFDB6 /* voucher_internal.h */,
E422A0D412A557B5005E5BDB /* trace.h */,
E44F9DA816543F79001DCD38 /* introspection_internal.h */,
96929D830F3EA1020041FF5D /* shims.h */,
FC7BEDA50E8361E600161930 /* dispatch.h in Headers */,
72CC94300ECCD8750031B751 /* base.h in Headers */,
961B99500F3E85C30006BC96 /* object.h in Headers */,
+ E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */,
FC7BED9A0E8361E600161930 /* queue.h in Headers */,
FC7BED9C0E8361E600161930 /* source.h in Headers */,
+ E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */,
721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */,
FC5C9C1E0EADABE3006E462D /* group.h in Headers */,
96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */,
5AAB45C410D30CC7004407EA /* io.h in Headers */,
+ E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */,
E4630253176162D400E11F4C /* atomic_sfb.h in Headers */,
5AAB45C610D30D0C004407EA /* data.h in Headers */,
96032E4D0F5CC8D100241C5F /* time.h in Headers */,
FC7BEDA20E8361E600161930 /* private.h in Headers */,
+ E4D76A9318E325D200B1F98B /* block.h in Headers */,
E4A2C9C7176019840000F809 /* atomic_llsc.h in Headers */,
C913AC0F143BD34800B78976 /* data_private.h in Headers */,
96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */,
FC0B34790FA2851C0080FFA0 /* source_internal.h in Headers */,
5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */,
E4C1ED6F1263E714000D3C8B /* data_internal.h in Headers */,
+ E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */,
5A0095A210F274B0000E2A31 /* io_internal.h in Headers */,
FC1832A8109923C7003403D5 /* tsd.h in Headers */,
96929D840F3EA1020041FF5D /* atomic.h in Headers */,
96929D850F3EA1020041FF5D /* shims.h in Headers */,
FC1832A7109923C7003403D5 /* time.h in Headers */,
+ E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */,
+ 2BE17C6418EA305E002CA4E8 /* layout_private.h in Headers */,
FC1832A6109923C7003403D5 /* perfmon.h in Headers */,
FC9C70E8105EC9620074F9CA /* config.h in Headers */,
E422A0D512A557B5005E5BDB /* trace.h in Headers */,
E49F24AB125D57FA0057C971 /* dispatch.h in Headers */,
E49F24AC125D57FA0057C971 /* base.h in Headers */,
E49F24AD125D57FA0057C971 /* object.h in Headers */,
+ E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */,
E49F24AE125D57FA0057C971 /* queue.h in Headers */,
E49F24AF125D57FA0057C971 /* source.h in Headers */,
+ E4B3C3FF18C50D0E0039F49F /* voucher_activity_private.h in Headers */,
E49F24B0125D57FA0057C971 /* semaphore.h in Headers */,
E49F24B1125D57FA0057C971 /* group.h in Headers */,
E49F24B2125D57FA0057C971 /* once.h in Headers */,
E49F24B3125D57FA0057C971 /* io.h in Headers */,
+ E44A8E7618066276009FFDB6 /* voucher_internal.h in Headers */,
E4630252176162D300E11F4C /* atomic_sfb.h in Headers */,
E49F24B4125D57FA0057C971 /* data.h in Headers */,
E49F24B5125D57FA0057C971 /* time.h in Headers */,
E49F24B6125D57FA0057C971 /* private.h in Headers */,
+ E4D76A9418E325D200B1F98B /* block.h in Headers */,
E4A2C9C6176019830000F809 /* atomic_llsc.h in Headers */,
E49F24B7125D57FA0057C971 /* queue_private.h in Headers */,
E49F24B8125D57FA0057C971 /* source_private.h in Headers */,
E49F24BD125D57FA0057C971 /* semaphore_internal.h in Headers */,
E4C1ED701263E714000D3C8B /* data_internal.h in Headers */,
E49F24BF125D57FA0057C971 /* io_internal.h in Headers */,
+ E44A8E731805C473009FFDB6 /* voucher_private.h in Headers */,
E49F24C1125D57FA0057C971 /* tsd.h in Headers */,
E49F24C2125D57FA0057C971 /* atomic.h in Headers */,
E49F24C3125D57FA0057C971 /* shims.h in Headers */,
E49F24C4125D57FA0057C971 /* time.h in Headers */,
E49F24C5125D57FA0057C971 /* perfmon.h in Headers */,
+ E48EC97D1835BADD00EAC4F1 /* yield.h in Headers */,
+ 2BE17C6518EA305E002CA4E8 /* layout_private.h in Headers */,
E49F24C6125D57FA0057C971 /* config.h in Headers */,
E422A0D612A557B5005E5BDB /* trace.h in Headers */,
E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */,
E4630251176162D200E11F4C /* atomic_sfb.h in Headers */,
E44F9DBE1654405B001DCD38 /* tsd.h in Headers */,
E44F9DB816544053001DCD38 /* atomic.h in Headers */,
+ E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */,
E44F9DB71654404F001DCD38 /* shims.h in Headers */,
E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */,
E44F9DBF165440EF001DCD38 /* config.h in Headers */,
+ E44A8E7718066276009FFDB6 /* voucher_internal.h in Headers */,
E4A2C9C5176019820000F809 /* atomic_llsc.h in Headers */,
E44F9DB616544043001DCD38 /* trace.h in Headers */,
E44F9DB916544056001DCD38 /* getprogname.h in Headers */,
+ E48EC97E1835BADD00EAC4F1 /* yield.h in Headers */,
E44F9DBA1654405B001DCD38 /* hw_config.h in Headers */,
E44F9DC116544115001DCD38 /* object_private.h in Headers */,
E44F9DC016544115001DCD38 /* object.h in Headers */,
isa = PBXProject;
attributes = {
BuildIndependentTargetsInParallel = YES;
- LastUpgradeCheck = 0500;
+ LastUpgradeCheck = 0600;
};
buildConfigurationList = 1DEB91EF08733DB70010E9CD /* Build configuration list for PBXProject "libdispatch" */;
compatibilityVersion = "Xcode 3.2";
"$(SRCROOT)/xcodescripts/install-headers.sh",
"$(SRCROOT)/os/object.h",
"$(SRCROOT)/os/object_private.h",
+ "$(SRCROOT)/private/voucher_private.h",
+ "$(SRCROOT)/private/voucher_activity_private.h",
);
name = "Install Headers";
outputPaths = (
"$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h",
"$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h",
+ "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h",
+ "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = "/bin/bash -e";
"$(SRCROOT)/xcodescripts/install-headers.sh",
"$(SRCROOT)/os/object.h",
"$(SRCROOT)/os/object_private.h",
+ "$(SRCROOT)/private/voucher_private.h",
+ "$(SRCROOT)/private/voucher_activity_private.h",
);
name = "Install Headers";
outputPaths = (
"$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h",
"$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h",
+ "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h",
+ "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h",
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = "/bin/bash -e";
E4FC3264145F46C9002FBDDB /* object.m in Sources */,
2BBF5A63154B64F5002B20F9 /* allocator.c in Sources */,
E420867016027AE500EEE210 /* data.m in Sources */,
+ E44A8E6B1805C3E0009FFDB6 /* voucher.c in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
E46DBC4314EE10C80001F9F6 /* queue.c in Sources */,
E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */,
E46DBC4514EE10C80001F9F6 /* once.c in Sources */,
+ E44A8E701805C3E0009FFDB6 /* voucher.c in Sources */,
E46DBC4614EE10C80001F9F6 /* apply.c in Sources */,
E46DBC4714EE10C80001F9F6 /* object.c in Sources */,
E46DBC4814EE10C80001F9F6 /* benchmark.c in Sources */,
E4FC3265145F46C9002FBDDB /* object.m in Sources */,
2BBF5A64154B64F5002B20F9 /* allocator.c in Sources */,
E420867116027AE500EEE210 /* data.m in Sources */,
+ E44A8E6C1805C3E0009FFDB6 /* voucher.c in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
E4B515C8164B2DA300E003AF /* time.c in Sources */,
E4B515C9164B2DA300E003AF /* data.c in Sources */,
E4B515CA164B2DA300E003AF /* io.c in Sources */,
+ E44A8E6F1805C3E0009FFDB6 /* voucher.c in Sources */,
E4B515CB164B2DA300E003AF /* transform.c in Sources */,
E4B515CC164B2DA300E003AF /* object.m in Sources */,
E4B515CD164B2DA300E003AF /* allocator.c in Sources */,
E4FC3266145F46C9002FBDDB /* object.m in Sources */,
2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */,
E420867316027AE500EEE210 /* data.m in Sources */,
+ E44A8E6E1805C3E0009FFDB6 /* voucher.c in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
E4FC3267145F46C9002FBDDB /* object.m in Sources */,
2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */,
E420867216027AE500EEE210 /* data.m in Sources */,
+ E44A8E6D1805C3E0009FFDB6 /* voucher.c in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/*
- * Copyright (c) 2011-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 2011-2014 Apple Inc. All rights reserved.
*
* @APPLE_APACHE_LICENSE_HEADER_START@
*
#ifdef __APPLE__
#include <Availability.h>
#endif
+#include <os/base.h>
/*!
* @header
#else
#define OS_OBJECT_RETURNS_RETAINED
#endif
+#if __has_attribute(ns_consumed)
+#define OS_OBJECT_CONSUMED __attribute__((__ns_consumed__))
+#else
+#define OS_OBJECT_CONSUMED
+#endif
#else
#define OS_OBJECT_RETURNS_RETAINED
+#define OS_OBJECT_CONSUMED
#endif
#if defined(__has_feature)
#if __has_feature(objc_arc)
#define OS_OBJECT_BRIDGE __bridge
+#define OS_WARN_RESULT_NEEDS_RELEASE
#else
#define OS_OBJECT_BRIDGE
+#define OS_WARN_RESULT_NEEDS_RELEASE OS_WARN_RESULT
#endif
#else
#define OS_OBJECT_BRIDGE
+#define OS_WARN_RESULT_NEEDS_RELEASE OS_WARN_RESULT
#endif
#ifndef OS_OBJECT_USE_OBJC_RETAIN_RELEASE
#if defined(__clang_analyzer__)
/*! @parseOnly */
#define OS_OBJECT_RETURNS_RETAINED
/*! @parseOnly */
+#define OS_OBJECT_CONSUMED
+/*! @parseOnly */
#define OS_OBJECT_BRIDGE
+/*! @parseOnly */
+#define OS_WARN_RESULT_NEEDS_RELEASE OS_WARN_RESULT
#define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 0
#endif
+#define OS_OBJECT_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object))
+
+__BEGIN_DECLS
+
+/*!
+ * @function os_retain
+ *
+ * @abstract
+ * Increment the reference count of an os_object.
+ *
+ * @discussion
+ * On a platform with the modern Objective-C runtime this is exactly equivalent
+ * to sending the object the -[retain] message.
+ *
+ * @param object
+ * The object to retain.
+ *
+ * @result
+ * The retained object.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_EXPORT
+void*
+os_retain(void *object);
+#if OS_OBJECT_USE_OBJC
+#undef os_retain
+#define os_retain(object) [object retain]
+#endif
+
+/*!
+ * @function os_release
+ *
+ * @abstract
+ * Decrement the reference count of a os_object.
+ *
+ * @discussion
+ * On a platform with the modern Objective-C runtime this is exactly equivalent
+ * to sending the object the -[release] message.
+ *
+ * @param object
+ * The object to release.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_EXPORT
+void
+os_release(void *object);
+#if OS_OBJECT_USE_OBJC
+#undef os_release
+#define os_release(object) [object release]
+#endif
+
+__END_DECLS
+
#endif
__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
mach_port_t
-dispatch_data_make_memory_entry(dispatch_data_t dd);
+dispatch_data_make_memory_entry(dispatch_data_t data);
#endif
/*!
dispatch_introspection_hook_callout_queue_item_complete(
dispatch_continuation_t object);
-/*!
- * @function dispatch_introspection_hook_queue_item_complete
- *
- * @abstract
- * Interposable hook function called when an item previously dequeued from a
- * dispatch queue has completed processing.
- *
- * @discussion
- * The object pointer value passed to this function must be treated as a value
- * only. It is intended solely for matching up with an earlier call to a
- * dequeue hook function and must NOT be dereferenced.
- *
- * @param item
- * Opaque dentifier for completed item. Must NOT be dereferenced.
- */
-
-DISPATCH_EXPORT
-void
-dispatch_introspection_hook_queue_item_complete(dispatch_object_t item);
-
__END_DECLS
#endif
* the handler function.
* @param handler The handler to enqueue when data is ready to be
* delivered.
- * @param context Application-defined context parameter.
- * @param data The data read from the file descriptor.
- * @param error An errno condition for the read operation or
+ * param context Application-defined context parameter.
+ * param data The data read from the file descriptor.
+ * param error An errno condition for the read operation or
* zero if the read was successful.
*/
__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
* @param context The application-defined context parameter to pass to
* the handler function.
* @param handler The handler to enqueue when the data has been written.
- * @param context Application-defined context parameter.
- * @param data The data that could not be written to the I/O
+ * param context Application-defined context parameter.
+ * param data The data that could not be written to the I/O
* channel, or NULL.
- * @param error An errno condition for the write operation or
+ * param error An errno condition for the write operation or
* zero if the write was successful.
*/
__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
* the cleanup handler function.
* @param cleanup_handler The handler to enqueue when the system
* relinquishes control over the file descriptor.
- * @param context Application-defined context parameter.
- * @param error An errno condition if control is relinquished
+ * param context Application-defined context parameter.
+ * param error An errno condition if control is relinquished
* because channel creation failed, zero otherwise.
* @result The newly created dispatch I/O channel or NULL if an error
* occurred (invalid type specified).
* the cleanup handler function.
* @param cleanup_handler The handler to enqueue when the system
* has closed the file at path.
- * @param context Application-defined context parameter.
- * @param error An errno condition if control is relinquished
+ * param context Application-defined context parameter.
+ * param error An errno condition if control is relinquished
* because channel creation or opening of the
* specified file failed, zero otherwise.
* @result The newly created dispatch I/O channel or NULL if an error
* relinquishes control over the file descriptor
* (resp. closes the file at path) associated with
* the existing channel.
- * @param context Application-defined context parameter.
- * @param error An errno condition if control is relinquished
+ * param context Application-defined context parameter.
+ * param error An errno condition if control is relinquished
* because channel creation failed, zero otherwise.
* @result The newly created dispatch I/O channel or NULL if an error
* occurred (invalid type specified).
* the handler function.
* @param io_handler The I/O handler to enqueue when data is ready to be
* delivered.
- * @param context Application-defined context parameter.
- * @param done A flag indicating whether the operation is complete.
- * @param data An object with the data most recently read from the
+ * param context Application-defined context parameter.
+ * param done A flag indicating whether the operation is complete.
+ * param data An object with the data most recently read from the
* I/O channel as part of this read operation, or NULL.
- * @param error An errno condition for the read operation or zero if
+ * param error An errno condition for the read operation or zero if
* the read was successful.
*/
__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
* @param context The application-defined context parameter to pass to
* the handler function.
* @param io_handler The I/O handler to enqueue when data has been delivered.
- * @param context Application-defined context parameter.
- * @param done A flag indicating whether the operation is complete.
- * @param data An object of the data remaining to be
+ * param context Application-defined context parameter.
+ * param done A flag indicating whether the operation is complete.
+ * param data An object of the data remaining to be
* written to the I/O channel as part of this write
* operation, or NULL.
- * @param error An errno condition for the write operation or zero
+ * param error An errno condition for the write operation or zero
* if the write was successful.
*/
__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
--- /dev/null
+/*
+ * Copyright (c) 2014 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __DISPATCH_LAYOUT_PRIVATE__
+#define __DISPATCH_LAYOUT_PRIVATE__
+
+#ifndef __DISPATCH_INDIRECT__
+#error "Please #include <dispatch/private.h> instead of this file directly."
+#include <dispatch/base.h> // for HeaderDoc
+#endif
+
+__BEGIN_DECLS
+
+#if !TARGET_OS_WIN32
+__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+DISPATCH_EXPORT const struct dispatch_queue_offsets_s {
+ // always add new fields at the end
+ const uint16_t dqo_version;
+ const uint16_t dqo_label;
+ const uint16_t dqo_label_size;
+ const uint16_t dqo_flags;
+ const uint16_t dqo_flags_size;
+ const uint16_t dqo_serialnum;
+ const uint16_t dqo_serialnum_size;
+ const uint16_t dqo_width;
+ const uint16_t dqo_width_size;
+ const uint16_t dqo_running;
+ const uint16_t dqo_running_size;
+ // fields added in dqo_version 5:
+ const uint16_t dqo_suspend_cnt;
+ const uint16_t dqo_suspend_cnt_size;
+ const uint16_t dqo_target_queue;
+ const uint16_t dqo_target_queue_size;
+ const uint16_t dqo_priority;
+ const uint16_t dqo_priority_size;
+} dispatch_queue_offsets;
+#endif
+
+#if DISPATCH_LAYOUT_SPI
+
+/*!
+ * @group Data Structure Layout SPI
+ * SPI intended for CoreSymbolication only
+ */
+
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+DISPATCH_EXPORT const struct dispatch_tsd_indexes_s {
+ // always add new fields at the end
+ const uint16_t dti_version;
+ const uint16_t dti_queue_index;
+ const uint16_t dti_voucher_index;
+ const uint16_t dti_qos_class_index;
+} dispatch_tsd_indexes;
+
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+DISPATCH_EXPORT const struct voucher_offsets_s {
+ // always add new fields at the end
+ const uint16_t vo_version;
+ const uint16_t vo_activity_ids_count;
+ const uint16_t vo_activity_ids_count_size;
+ const uint16_t vo_activity_ids_array;
+ const uint16_t vo_activity_ids_array_entry_size;
+} voucher_offsets;
+
+#endif // DISPATCH_LAYOUT_SPI
+
+__END_DECLS
+
+#endif // __DISPATCH_LAYOUT_PRIVATE__
*/
#define DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE \
- ((PAGE_SIZE > 0x1000 ? 1 : 3) * PAGE_SIZE - \
- sizeof(dispatch_mach_trailer_t))
+ (0x4000 - sizeof(dispatch_mach_trailer_t))
/*!
* @typedef dispatch_mach_msg_t
#include <dispatch/mach_private.h>
#include <dispatch/data_private.h>
#include <dispatch/io_private.h>
+#include <dispatch/layout_private.h>
#undef __DISPATCH_INDIRECT__
#endif /* !__DISPATCH_BUILDING_DISPATCH__ */
// <rdar://problem/9627726> Check that public and private dispatch headers match
-#if DISPATCH_API_VERSION != 20130520 // Keep in sync with <dispatch/dispatch.h>
+#if DISPATCH_API_VERSION != 20140804 // Keep in sync with <dispatch/dispatch.h>
#error "Dispatch header mismatch between /usr/include and /usr/local/include"
#endif
#define DISPATCH_QUEUE_FLAGS_MASK (DISPATCH_QUEUE_OVERCOMMIT)
+/*!
+ * @function dispatch_queue_attr_make_with_overcommit
+ *
+ * @discussion
+ * Returns a dispatch queue attribute value with the overcommit flag set to the
+ * specified value.
+ *
+ * @param attr
+ * A queue attribute value to be combined with the overcommit flag, or NULL.
+ *
+ * @param overcommit
+ * Boolean overcommit flag.
+ *
+ * @return
+ * Returns an attribute value which may be provided to dispatch_queue_create().
+ * This new value combines the attributes specified by the 'attr' parameter and
+ * the overcommit flag.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW
+dispatch_queue_attr_t
+dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t attr,
+ bool overcommit);
+
/*!
* @typedef dispatch_queue_priority_t
*
#define DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS -2
#define DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS -3
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
+__OSX_AVAILABLE_BUT_DEPRECATED_MSG(__MAC_10_6,__MAC_10_10,__IPHONE_4_0,__IPHONE_8_0, \
+ "Use dispatch_queue_create(name, DISPATCH_QUEUE_CONCURRENT) instead")
DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
void
-dispatch_queue_set_width(dispatch_queue_t dq, long width); // DEPRECATED
+dispatch_queue_set_width(dispatch_queue_t dq, long width);
/*!
* @function dispatch_queue_create_with_target
* This parameter is optional and may be NULL.
*
* @param attr
- * DISPATCH_QUEUE_SERIAL or DISPATCH_QUEUE_CONCURRENT.
+ * DISPATCH_QUEUE_SERIAL, DISPATCH_QUEUE_CONCURRENT, or the result of a call to
+ * the function dispatch_queue_attr_make_with_qos_class().
*
* @param target
* The target queue for the newly created queue. The target queue is retained.
* This parameter is optional and may be NULL.
*
* @param flags
- * Reserved for future use. Passing any value other than zero may result in
- * a NULL return value.
+ * Pass flags value returned by dispatch_pthread_root_queue_flags_pool_size()
+ * or 0 if unused.
*
* @param attr
* Attributes passed to pthread_create(3) when creating worker pthreads. This
dispatch_queue_t
dispatch_pthread_root_queue_create(const char *label, unsigned long flags,
const pthread_attr_t *attr, dispatch_block_t configure);
+
+/*!
+ * @function dispatch_pthread_root_queue_flags_pool_size
+ *
+ * @abstract
+ * Returns flags argument to pass to dispatch_pthread_root_queue_create() to
+ * specify the maximum size of the pthread pool to use for a pthread root queue.
+ *
+ * @param pool_size
+ * Maximum size of the pthread pool to use for the root queue. The number of
+ * pthreads created for this root queue will never exceed this number but there
+ * is no guarantee that the specified number will be reached.
+ * Pass 0 to specify that a default pool size determined by the system should
+ * be used.
+ *
+ * @result
+ * The flags argument to pass to dispatch_pthread_root_queue_create().
+ */
+DISPATCH_INLINE DISPATCH_ALWAYS_INLINE
+unsigned long
+dispatch_pthread_root_queue_flags_pool_size(uint8_t pool_size)
+{
+ #define _DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE (0x80000000ul)
+ return (_DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE |
+ (unsigned long)pool_size);
+}
+
#endif /* __BLOCKS__ */
/*!
*/
#define DISPATCH_APPLY_CURRENT_ROOT_QUEUE NULL
-#if !TARGET_OS_WIN32
-__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0)
-DISPATCH_EXPORT const struct dispatch_queue_offsets_s {
- // always add new fields at the end
- const uint16_t dqo_version;
- const uint16_t dqo_label;
- const uint16_t dqo_label_size;
- const uint16_t dqo_flags;
- const uint16_t dqo_flags_size;
- const uint16_t dqo_serialnum;
- const uint16_t dqo_serialnum_size;
- const uint16_t dqo_width;
- const uint16_t dqo_width_size;
- const uint16_t dqo_running;
- const uint16_t dqo_running_size;
-} dispatch_queue_offsets;
-#endif
-
/*!
* @function dispatch_assert_queue
*
* @const DISPATCH_SOURCE_TYPE_VM
* @discussion A dispatch source that monitors virtual memory
* The mask is a mask of desired events from dispatch_source_vm_flags_t.
+ * This type is deprecated, use DISPATCH_SOURCE_TYPE_MEMORYSTATUS instead.
*/
#define DISPATCH_SOURCE_TYPE_VM (&_dispatch_source_type_vm)
-__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3)
+__OSX_AVAILABLE_BUT_DEPRECATED_MSG(__MAC_10_7, __MAC_10_10, __IPHONE_4_3,
+ __IPHONE_8_0, "Use DISPATCH_SOURCE_TYPE_MEMORYSTATUS instead")
DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm;
/*!
*/
enum {
- DISPATCH_VM_PRESSURE = 0x80000000,
+ DISPATCH_VM_PRESSURE __OSX_AVAILABLE_BUT_DEPRECATED_MSG(
+ __MAC_10_7, __MAC_10_10, __IPHONE_4_3, __IPHONE_8_0,
+ "Use DISPATCH_MEMORYSTATUS_PRESSURE_WARN instead") = 0x80000000,
};
/*!
* The system's memory pressure state has changed to warning.
* @constant DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL
* The system's memory pressure state has changed to critical.
+ * @constant DISPATCH_MEMORYSTATUS_LOW_SWAP
+ * The system's memory pressure state has entered the "low swap" condition.
+ * Restricted to the root user.
*/
enum {
- DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL = 0x01,
- DISPATCH_MEMORYSTATUS_PRESSURE_WARN = 0x02,
-#if !TARGET_OS_EMBEDDED
- DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL = 0x04,
-#endif
+ DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_6_0) = 0x01,
+ DISPATCH_MEMORYSTATUS_PRESSURE_WARN
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_6_0) = 0x02,
+ DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL
+ __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_8_0) = 0x04,
+ DISPATCH_MEMORYSTATUS_LOW_SWAP
+ __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x08,
};
-#if TARGET_IPHONE_SIMULATOR // rdar://problem/9219483
-#define DISPATCH_VM_PRESSURE DISPATCH_VNODE_ATTRIB
-#endif
-
__BEGIN_DECLS
/*!
--- /dev/null
+/*
+ * Copyright (c) 2013-2014 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __OS_VOUCHER_ACTIVITY_PRIVATE__
+#define __OS_VOUCHER_ACTIVITY_PRIVATE__
+
+#include <os/base.h>
+#include <os/object.h>
+#if !defined(__DISPATCH_BUILDING_DISPATCH__)
+#include <os/voucher_private.h>
+#endif
+
+#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20140708
+
+#if OS_VOUCHER_WEAK_IMPORT
+#define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT
+#else
+#define OS_VOUCHER_EXPORT OS_EXPORT
+#endif
+
+__BEGIN_DECLS
+
+#if OS_VOUCHER_ACTIVITY_SPI
+
+/*!
+ * @group Voucher Activity SPI
+ * SPI intended for libtrace only
+ */
+
+/*!
+ * @typedef voucher_activity_id_t
+ *
+ * @abstract
+ * Opaque activity identifier.
+ *
+ * @discussion
+ * Scalar value type, not reference counted.
+ */
+typedef uint64_t voucher_activity_id_t;
+
+/*!
+ * @enum voucher_activity_tracepoint_type_t
+ *
+ * @abstract
+ * Types of tracepoints.
+ */
+OS_ENUM(voucher_activity_tracepoint_type, uint8_t,
+ voucher_activity_tracepoint_type_release = (1u << 0),
+ voucher_activity_tracepoint_type_debug = (1u << 1),
+ voucher_activity_tracepoint_type_error = (1u << 6) | (1u << 0),
+ voucher_activity_tracepoint_type_fault = (1u << 7) | (1u << 6) | (1u << 0),
+);
+
+/*!
+ * @enum voucher_activity_flag_t
+ *
+ * @abstract
+ * Flags to pass to voucher_activity_start/voucher_activity_start_with_location
+ */
+OS_ENUM(voucher_activity_flag, unsigned long,
+ voucher_activity_flag_default = 0,
+ voucher_activity_flag_force = 0x1,
+);
+
+/*!
+ * @typedef voucher_activity_trace_id_t
+ *
+ * @abstract
+ * Opaque tracepoint identifier.
+ */
+typedef uint64_t voucher_activity_trace_id_t;
+static const uint8_t _voucher_activity_trace_id_type_shift = 40;
+static const uint8_t _voucher_activity_trace_id_code_namespace_shift = 32;
+
+/*!
+ * @function voucher_activity_trace_id
+ *
+ * @abstract
+ * Return tracepoint identifier for specified arguments.
+ *
+ * @param type
+ * Tracepoint type from voucher_activity_tracepoint_type_t.
+ *
+ * @param code_namespace
+ * Namespace of 'code' argument.
+ *
+ * @param code
+ * Tracepoint code.
+ *
+ * @result
+ * Tracepoint identifier.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_INLINE OS_ALWAYS_INLINE
+voucher_activity_trace_id_t
+voucher_activity_trace_id(uint8_t type, uint8_t code_namespace, uint32_t code)
+{
+ return ((voucher_activity_trace_id_t)type <<
+ _voucher_activity_trace_id_type_shift) |
+ ((voucher_activity_trace_id_t)code_namespace <<
+ _voucher_activity_trace_id_code_namespace_shift) |
+ (voucher_activity_trace_id_t)code;
+}
+
+/*!
+ * @function voucher_activity_start
+ *
+ * @abstract
+ * Creates a new activity identifier and marks the current thread as
+ * participating in the activity.
+ *
+ * @discussion
+ * As part of voucher transport, activities are automatically propagated by the
+ * system to other threads and processes (across IPC).
+ *
+ * Activities persist as long as any threads in any process are marked as
+ * participating. There may be many calls to voucher_activity_end()
+ * corresponding to one call to voucher_activity_start().
+ *
+ * @param trace_id
+ * Tracepoint identifier returned by voucher_activity_trace_id(), intended for
+ * identification of the automatic tracepoint generated as part of creating the
+ * new activity.
+ *
+ * @param flags
+ * Pass voucher_activity_flag_force to indicate that existing activities
+ * on the current thread should not be inherited and that a new toplevel
+ * activity should be created.
+ *
+ * @result
+ * A new activity identifier.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW
+voucher_activity_id_t
+voucher_activity_start(voucher_activity_trace_id_t trace_id,
+ voucher_activity_flag_t flags);
+
+/*!
+ * @function voucher_activity_start_with_location
+ *
+ * @abstract
+ * Creates a new activity identifier and marks the current thread as
+ * participating in the activity.
+ *
+ * @discussion
+ * As part of voucher transport, activities are automatically propagated by the
+ * system to other threads and processes (across IPC).
+ *
+ * Activities persist as long as any threads in any process are marked as
+ * participating. There may be many calls to voucher_activity_end()
+ * corresponding to one call to voucher_activity_start_with_location().
+ *
+ * @param trace_id
+ * Tracepoint identifier returned by voucher_activity_trace_id(), intended for
+ * identification of the automatic tracepoint generated as part of creating the
+ * new activity.
+ *
+ * @param location
+ * Location identifier for the automatic tracepoint generated as part of
+ * creating the new activity.
+ *
+ * @param flags
+ * Pass voucher_activity_flag_force to indicate that existing activities
+ * on the current thread should not be inherited and that a new toplevel
+ * activity should be created.
+ *
+ * @result
+ * A new activity identifier.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW
+voucher_activity_id_t
+voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id,
+ uint64_t location, voucher_activity_flag_t flags);
+
+/*!
+ * @function voucher_activity_end
+ *
+ * @abstract
+ * Unmarks the current thread if it is marked as particpating in the activity
+ * with the specified identifier.
+ *
+ * @discussion
+ * Activities persist as long as any threads in any process are marked as
+ * participating. There may be many calls to voucher_activity_end()
+ * corresponding to one call to voucher_activity_start() or
+ * voucher_activity_start_with_location().
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_NOTHROW
+void
+voucher_activity_end(voucher_activity_id_t activity_id);
+
+/*!
+ * @function voucher_get_activities
+ *
+ * @abstract
+ * Returns the list of activity identifiers that the current thread is marked
+ * with.
+ *
+ * @param entries
+ * Pointer to an array of activity identifiers to be filled in.
+ *
+ * @param count
+ * Pointer to the requested number of activity identifiers.
+ * On output will be filled with the number of activities that are available.
+ *
+ * @result
+ * Number of activity identifiers written to 'entries'
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_NOTHROW
+unsigned int
+voucher_get_activities(voucher_activity_id_t *entries, unsigned int *count);
+
+/*!
+ * @group Voucher Activity Trace SPI
+ * SPI intended for libtrace only
+ */
+
+/*!
+ * @function voucher_activity_get_namespace
+ *
+ * @abstract
+ * Returns the namespace of the current activity.
+ *
+ * @result
+ * The namespace of the current activity (if any).
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_NOTHROW
+uint8_t
+voucher_activity_get_namespace(void);
+
+/*!
+ * @function voucher_activity_trace
+ *
+ * @abstract
+ * Add a tracepoint to trace buffer of the current activity.
+ *
+ * @param trace_id
+ * Tracepoint identifier returned by voucher_activity_trace_id()
+ *
+ * @param location
+ * Tracepoint location.
+ *
+ * @param buffer
+ * Pointer to packed buffer of tracepoint data.
+ *
+ * @param length
+ * Length of data at 'buffer'.
+ *
+ * @result
+ * Timestamp recorded in tracepoint or 0 if no tracepoint was recorded.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_NOTHROW
+uint64_t
+voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location,
+ void *buffer, size_t length);
+
+/*!
+ * @function voucher_activity_trace_args
+ *
+ * @abstract
+ * Add a tracepoint to trace buffer of the current activity, recording
+ * specified arguments passed in registers.
+ *
+ * @param trace_id
+ * Tracepoint identifier returned by voucher_activity_trace_id()
+ *
+ * @param location
+ * Tracepoint location.
+ *
+ * @param arg1
+ * Argument to be recorded in tracepoint data.
+ *
+ * @param arg2
+ * Argument to be recorded in tracepoint data.
+ *
+ * @param arg3
+ * Argument to be recorded in tracepoint data.
+ *
+ * @param arg4
+ * Argument to be recorded in tracepoint data.
+ *
+ * @result
+ * Timestamp recorded in tracepoint or 0 if no tracepoint was recorded.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_NOTHROW
+uint64_t
+voucher_activity_trace_args(voucher_activity_trace_id_t trace_id,
+ uint64_t location, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3,
+ uintptr_t arg4);
+
+/*!
+ * @group Voucher Activity Mode SPI
+ * SPI intended for libtrace only
+ */
+
+/*!
+ * @enum voucher_activity_mode_t
+ *
+ * @abstract
+ * Voucher activity mode.
+ *
+ * @discussion
+ * Configure at process start by setting the OS_ACTIVITY_MODE environment
+ * variable.
+ */
+OS_ENUM(voucher_activity_mode, unsigned long,
+ voucher_activity_mode_disable = 0,
+ voucher_activity_mode_release = (1u << 0),
+ voucher_activity_mode_debug = (1u << 1),
+ voucher_activity_mode_stream = (1u << 2),
+);
+
+/*!
+ * @function voucher_activity_get_mode
+ *
+ * @abstract
+ * Return current mode of voucher activity subsystem.
+ *
+ * @result
+ * Value from voucher_activity_mode_t enum.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW
+voucher_activity_mode_t
+voucher_activity_get_mode(void);
+
+/*!
+ * @function voucher_activity_set_mode_4libtrace(void)
+ *
+ * @abstract
+ * Set the current mode of voucher activity subsystem.
+ *
+ * @param mode
+ * The new mode.
+ *
+ * Note that the new mode will take effect soon, but not immediately.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_NOTHROW
+void
+voucher_activity_set_mode_4libtrace(voucher_activity_mode_t mode);
+
+/*!
+ * @group Voucher Activity Metadata SPI
+ * SPI intended for libtrace only
+ */
+
+/*!
+ * @function voucher_activity_get_metadata_buffer
+ *
+ * @abstract
+ * Return address and length of buffer in the process trace memory area
+ * reserved for libtrace metadata.
+ *
+ * @param length
+ * Pointer to size_t variable, filled with length of metadata buffer.
+ *
+ * @result
+ * Address of metadata buffer.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL_ALL
+void*
+voucher_activity_get_metadata_buffer(size_t *length);
+
+#endif // OS_VOUCHER_ACTIVITY_SPI
+
+#if OS_VOUCHER_ACTIVITY_BUFFER_SPI
+
+/*!
+ * @group Voucher Activity Tracepoint SPI
+ * SPI intended for diagnosticd only
+ */
+
+OS_ENUM(_voucher_activity_tracepoint_flag, uint16_t,
+ _voucher_activity_trace_flag_buffer_empty = 0,
+ _voucher_activity_trace_flag_tracepoint = (1u << 0),
+ _voucher_activity_trace_flag_tracepoint_args = (1u << 1),
+ _voucher_activity_trace_flag_wide_first = (1u << 6),
+ _voucher_activity_trace_flag_wide_second = (1u << 6) | (1u << 7),
+ _voucher_activity_trace_flag_start = (1u << 8),
+ _voucher_activity_trace_flag_end = (1u << 8) | (1u << 9),
+ _voucher_activity_trace_flag_libdispatch = (1u << 13),
+ _voucher_activity_trace_flag_activity = (1u << 14),
+ _voucher_activity_trace_flag_buffer_header = (1u << 15),
+);
+
+// for tracepoints with _voucher_activity_trace_flag_libdispatch
+OS_ENUM(_voucher_activity_tracepoint_namespace, uint8_t,
+ _voucher_activity_tracepoint_namespace_ipc = 0x1
+);
+OS_ENUM(_voucher_activity_tracepoint_code, uint32_t,
+ _voucher_activity_tracepoint_namespace_ipc_send = 0x1,
+ _voucher_activity_tracepoint_namespace_ipc_receive = 0x2,
+);
+
+typedef struct _voucher_activity_tracepoint_s {
+ uint16_t vat_flags; // voucher_activity_tracepoint_flag_t
+ uint8_t vat_type; // voucher_activity_tracepoint_type_t
+ uint8_t vat_namespace; // namespace for tracepoint code
+ uint32_t vat_code; // tracepoint code
+ uint64_t vat_thread; // pthread_t
+ uint64_t vat_timestamp; // absolute time
+ uint64_t vat_location; // tracepoint PC
+ uint64_t vat_data[4]; // trace data
+} *_voucher_activity_tracepoint_t;
+
+/*!
+ * @group Voucher Activity Buffer Internals
+ * SPI intended for diagnosticd only
+ * Layout of structs is subject to change without notice
+ */
+
+#include <sys/queue.h>
+#include <atm/atm_types.h>
+#include <os/lock_private.h>
+
+static const atm_subaid32_t _voucher_default_activity_subid =
+ ATM_SUBAID32_MAX-1;
+
+static const size_t _voucher_activity_buffer_size = 4096;
+static const size_t _voucher_activity_tracepoints_per_buffer =
+ _voucher_activity_buffer_size /
+ sizeof(struct _voucher_activity_tracepoint_s);
+typedef uint8_t _voucher_activity_buffer_t[_voucher_activity_buffer_size];
+
+struct _voucher_activity_self_metadata_s {
+ struct _voucher_activity_metadata_opaque_s *vasm_baseaddr;
+};
+typedef struct _voucher_activity_metadata_opaque_s {
+ _voucher_activity_buffer_t vam_kernel_metadata;
+ _voucher_activity_buffer_t vam_client_metadata;
+ union {
+ struct _voucher_activity_self_metadata_s vam_self_metadata;
+ _voucher_activity_buffer_t vam_self_metadata_opaque;
+ };
+} *_voucher_activity_metadata_opaque_t;
+
+typedef os_lock_handoff_s _voucher_activity_lock_s;
+
+typedef struct _voucher_atm_s {
+ int32_t volatile vatm_refcnt;
+ mach_voucher_t vatm_kvoucher;
+ atm_aid_t vatm_id;
+ atm_mailbox_offset_t vatm_mailbox_offset;
+ TAILQ_ENTRY(_voucher_atm_s) vatm_list;
+#if __LP64__
+ uintptr_t vatm_pad[3];
+ // cacheline
+#endif
+ _voucher_activity_lock_s vatm_activities_lock;
+ TAILQ_HEAD(_voucher_atm_activities_s, _voucher_activity_s) vatm_activities;
+ TAILQ_HEAD(, _voucher_activity_s) vatm_used_activities;
+} *_voucher_atm_t;
+
+// must match layout of _voucher_activity_tracepoint_s
+typedef struct _voucher_activity_buffer_header_s {
+ uint16_t vabh_flags; // _voucher_activity_trace_flag_buffer_header
+ uint8_t vabh_unused[6];
+ uint64_t vabh_thread;
+ uint64_t vabh_timestamp;
+ uint32_t volatile vabh_next_tracepoint_idx;
+ uint32_t vabh_sequence_no;
+ voucher_activity_id_t vabh_activity_id;
+ uint64_t vabh_reserved;
+ TAILQ_ENTRY(_voucher_activity_buffer_header_s) vabh_list;
+} *_voucher_activity_buffer_header_t;
+
+// must match layout of _voucher_activity_buffer_header_s
+typedef struct _voucher_activity_s {
+ // first tracepoint entry
+ // must match layout of _voucher_activity_tracepoint_s
+ uint16_t va_flags; // _voucher_activity_trace_flag_buffer_header |
+ // _voucher_activity_trace_flag_activity |
+ // _voucher_activity_trace_flag_start |
+ // _voucher_activity_trace_flag_wide_first
+ uint8_t va_type;
+ uint8_t va_namespace;
+ uint32_t va_code;
+ uint64_t va_thread;
+ uint64_t va_timestamp;
+ uint32_t volatile vabh_next_tracepoint_idx;
+ uint32_t volatile va_max_sequence_no;
+ voucher_activity_id_t va_id;
+ int32_t volatile va_use_count;
+ uint32_t va_buffer_limit;
+ TAILQ_HEAD(_voucher_activity_buffer_list_s,
+ _voucher_activity_buffer_header_s) va_buffers;
+#if !__LP64__
+ uint64_t va_pad;
+#endif
+
+ // second tracepoint entry
+ // must match layout of _voucher_activity_tracepoint_s
+ uint16_t va_flags2;
+ uint8_t va_unused2[2];
+ int32_t volatile va_refcnt;
+ uint64_t va_location;
+ _voucher_activity_buffer_header_t volatile va_current_buffer;
+ _voucher_atm_t va_atm;
+ _voucher_activity_lock_s va_buffers_lock;
+ uintptr_t va_pad2[2];
+
+#if __LP64__
+ // third tracepoint entry
+ // must match layout of _voucher_activity_tracepoint_s
+ uint16_t va_flags3;
+ uint8_t va_unused3[6];
+ uintptr_t va_pad3;
+#endif
+ TAILQ_ENTRY(_voucher_activity_s) va_list;
+ TAILQ_ENTRY(_voucher_activity_s) va_atm_list;
+ TAILQ_ENTRY(_voucher_activity_s) va_atm_used_list;
+} *_voucher_activity_t;
+
+#endif // OS_VOUCHER_ACTIVITY_BUFFER_SPI
+
+__END_DECLS
+
+#endif // __OS_VOUCHER_ACTIVITY_PRIVATE__
--- /dev/null
+/*
+ * Copyright (c) 2013-2014 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __OS_VOUCHER_PRIVATE__
+#define __OS_VOUCHER_PRIVATE__
+
+#include <os/base.h>
+#include <os/object.h>
+
+#define OS_VOUCHER_SPI_VERSION 20140425
+
+#if OS_VOUCHER_WEAK_IMPORT
+#define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT
+#else
+#define OS_VOUCHER_EXPORT OS_EXPORT
+#endif
+
+__BEGIN_DECLS
+
+/*!
+ * @group Voucher Transport SPI
+ * SPI intended for clients that need to transport vouchers.
+ */
+
+/*!
+ * @typedef voucher_t
+ *
+ * @abstract
+ * Vouchers are immutable sets of key/value attributes that can be adopted on a
+ * thread in the current process or sent to another process.
+ *
+ * @discussion
+ * Voucher objects are os_objects (c.f. <os/object.h>). They are memory-managed
+ * with the os_retain()/os_release() functions or -[retain]/-[release] methods.
+ */
+#if OS_OBJECT_USE_OBJC
+OS_OBJECT_DECL(voucher);
+#else
+typedef struct voucher_s *voucher_t;
+#endif
+
+/*!
+ * @function voucher_adopt
+ *
+ * @abstract
+ * Adopt the specified voucher on the current thread and return the voucher
+ * that had been adopted previously.
+ *
+ * @discussion
+ * Adopted vouchers are automatically carried forward by the system to other
+ * threads and processes (across IPC).
+ *
+ * Consumes a reference to the specified voucher.
+ * Returns a reference to the previous voucher.
+ *
+ * @param voucher
+ * The voucher object to adopt on the current thread.
+ *
+ * @result
+ * The previously adopted voucher object.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT_NEEDS_RELEASE
+OS_NOTHROW
+voucher_t
+voucher_adopt(voucher_t voucher OS_OBJECT_CONSUMED);
+
+/*!
+ * @function voucher_copy
+ *
+ * @abstract
+ * Returns a reference to the voucher that had been adopted previously on the
+ * current thread (or carried forward by the system).
+ *
+ * @result
+ * The currently adopted voucher object.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
+voucher_t
+voucher_copy(void);
+
+/*!
+ * @function voucher_copy_without_importance
+ *
+ * @abstract
+ * Returns a reference to a voucher object with all the properties of the
+ * voucher that had been adopted previously on the current thread, but
+ * without the importance properties that are frequently attached to vouchers
+ * carried with IPC requests. Importance properties may elevate the scheduling
+ * of threads that adopt or retain the voucher while they service the request.
+ * See xpc_transaction_begin(3) for further details on importance.
+ *
+ * @result
+ * A copy of the currently adopted voucher object, with importance removed.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
+voucher_t
+voucher_copy_without_importance(void);
+
+/*!
+ * @function voucher_replace_default_voucher
+ *
+ * @abstract
+ * Replace process attributes of default voucher (used for IPC by this process
+ * when no voucher is adopted on the sending thread) with the process attributes
+ * of the voucher adopted on the current thread.
+ *
+ * @discussion
+ * This allows a daemon to indicate from the context of an incoming IPC request
+ * that all future outgoing IPC from the process should be marked as acting
+ * "on behalf of" the sending process of the current IPC request (as long as the
+ * thread sending that outgoing IPC is not itself in the direct context of an
+ * IPC request, i.e. no voucher is adopted).
+ *
+ * If no voucher is adopted on the current thread or the current voucher does
+ * not contain any process attributes, the default voucher is reset to the
+ * default process attributes for the current process.
+ *
+ * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_NOTHROW
+void
+voucher_replace_default_voucher(void);
+
+/*!
+ * @function voucher_decrement_importance_count4CF
+ *
+ * @abstract
+ * Decrement external importance count of the mach voucher in the specified
+ * voucher object.
+ *
+ * @discussion
+ * This is only intended for use by CoreFoundation to explicitly manage the
+ * App Nap state of an application following receiption of a de-nap IPC message.
+ *
+ * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_NOTHROW
+void
+voucher_decrement_importance_count4CF(voucher_t voucher);
+
+/*!
+ * @group Dispatch block objects
+ */
+
+#ifndef __DISPATCH_BUILDING_DISPATCH__
+#include <dispatch/dispatch.h>
+#endif /* !__DISPATCH_BUILDING_DISPATCH__ */
+
+/*!
+ * @typedef dispatch_block_flags_t
+ * SPI Flags to pass to the dispatch_block_create* functions.
+ *
+ * @const DISPATCH_BLOCK_NO_VOUCHER
+ * Flag indicating that a dispatch block object should not be assigned a voucher
+ * object. If invoked directly, the block object will be executed with the
+ * voucher adopted on the calling thread. If the block object is submitted to a
+ * queue, this replaces the default behavior of associating the submitted block
+ * instance with the voucher adopted at the time of submission.
+ * This flag is ignored if a specific voucher object is assigned with the
+ * dispatch_block_create_with_voucher* functions, and is equivalent to passing
+ * the NULL voucher to these functions.
+ */
+#define DISPATCH_BLOCK_NO_VOUCHER (0x40)
+
+/*!
+ * @function dispatch_block_create_with_voucher
+ *
+ * @abstract
+ * Create a new dispatch block object on the heap from an existing block and
+ * the given flags, and assign it the specified voucher object.
+ *
+ * @discussion
+ * The provided block is Block_copy'ed to the heap, it and the specified voucher
+ * object are retained by the newly created dispatch block object.
+ *
+ * The returned dispatch block object is intended to be submitted to a dispatch
+ * queue with dispatch_async() and related functions, but may also be invoked
+ * directly. Both operations can be performed an arbitrary number of times but
+ * only the first completed execution of a dispatch block object can be waited
+ * on with dispatch_block_wait() or observed with dispatch_block_notify().
+ *
+ * The returned dispatch block will be executed with the specified voucher
+ * adopted for the duration of the block body. If the NULL voucher is passed,
+ * the block will be executed with the voucher adopted on the calling thread, or
+ * with no voucher if the DISPATCH_BLOCK_DETACHED flag was also provided.
+ *
+ * If the returned dispatch block object is submitted to a dispatch queue, the
+ * submitted block instance will be associated with the QOS class current at the
+ * time of submission, unless one of the following flags assigned a specific QOS
+ * class (or no QOS class) at the time of block creation:
+ * - DISPATCH_BLOCK_ASSIGN_CURRENT
+ * - DISPATCH_BLOCK_NO_QOS_CLASS
+ * - DISPATCH_BLOCK_DETACHED
+ * The QOS class the block object will be executed with also depends on the QOS
+ * class assigned to the queue and which of the following flags was specified or
+ * defaulted to:
+ * - DISPATCH_BLOCK_INHERIT_QOS_CLASS (default for asynchronous execution)
+ * - DISPATCH_BLOCK_ENFORCE_QOS_CLASS (default for synchronous execution)
+ * See description of dispatch_block_flags_t for details.
+ *
+ * If the returned dispatch block object is submitted directly to a serial queue
+ * and is configured to execute with a specific QOS class, the system will make
+ * a best effort to apply the necessary QOS overrides to ensure that blocks
+ * submitted earlier to the serial queue are executed at that same QOS class or
+ * higher.
+ *
+ * @param flags
+ * Configuration flags for the block object.
+ * Passing a value that is not a bitwise OR of flags from dispatch_block_flags_t
+ * results in NULL being returned.
+ *
+ * @param voucher
+ * A voucher object or NULL. Passing NULL is equivalent to specifying the
+ * DISPATCH_BLOCK_NO_VOUCHER flag.
+ *
+ * @param block
+ * The block to create the dispatch block object from.
+ *
+ * @result
+ * The newly created dispatch block object, or NULL.
+ * When not building with Objective-C ARC, must be released with a -[release]
+ * message or the Block_release() function.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED_BLOCK
+DISPATCH_WARN_RESULT DISPATCH_NOTHROW
+dispatch_block_t
+dispatch_block_create_with_voucher(dispatch_block_flags_t flags,
+ voucher_t voucher, dispatch_block_t block);
+
+/*!
+ * @function dispatch_block_create_with_voucher_and_qos_class
+ *
+ * @abstract
+ * Create a new dispatch block object on the heap from an existing block and
+ * the given flags, and assign it the specified voucher object, QOS class and
+ * relative priority.
+ *
+ * @discussion
+ * The provided block is Block_copy'ed to the heap, it and the specified voucher
+ * object are retained by the newly created dispatch block object.
+ *
+ * The returned dispatch block object is intended to be submitted to a dispatch
+ * queue with dispatch_async() and related functions, but may also be invoked
+ * directly. Both operations can be performed an arbitrary number of times but
+ * only the first completed execution of a dispatch block object can be waited
+ * on with dispatch_block_wait() or observed with dispatch_block_notify().
+ *
+ * The returned dispatch block will be executed with the specified voucher
+ * adopted for the duration of the block body. If the NULL voucher is passed,
+ * the block will be executed with the voucher adopted on the calling thread, or
+ * with no voucher if the DISPATCH_BLOCK_DETACHED flag was also provided.
+ *
+ * If invoked directly, the returned dispatch block object will be executed with
+ * the assigned QOS class as long as that does not result in a lower QOS class
+ * than what is current on the calling thread.
+ *
+ * If the returned dispatch block object is submitted to a dispatch queue, the
+ * QOS class it will be executed with depends on the QOS class assigned to the
+ * block, the QOS class assigned to the queue and which of the following flags
+ * was specified or defaulted to:
+ * - DISPATCH_BLOCK_INHERIT_QOS_CLASS: default for asynchronous execution
+ * - DISPATCH_BLOCK_ENFORCE_QOS_CLASS: default for synchronous execution
+ * See description of dispatch_block_flags_t for details.
+ *
+ * If the returned dispatch block object is submitted directly to a serial queue
+ * and is configured to execute with a specific QOS class, the system will make
+ * a best effort to apply the necessary QOS overrides to ensure that blocks
+ * submitted earlier to the serial queue are executed at that same QOS class or
+ * higher.
+ *
+ * @param flags
+ * Configuration flags for the block object.
+ * Passing a value that is not a bitwise OR of flags from dispatch_block_flags_t
+ * results in NULL being returned.
+ *
+ * @param voucher
+ * A voucher object or NULL. Passing NULL is equivalent to specifying the
+ * DISPATCH_BLOCK_NO_VOUCHER flag.
+ *
+ * @param qos_class
+ * A QOS class value:
+ * - QOS_CLASS_USER_INTERACTIVE
+ * - QOS_CLASS_USER_INITIATED
+ * - QOS_CLASS_DEFAULT
+ * - QOS_CLASS_UTILITY
+ * - QOS_CLASS_BACKGROUND
+ * - QOS_CLASS_UNSPECIFIED
+ * Passing QOS_CLASS_UNSPECIFIED is equivalent to specifying the
+ * DISPATCH_BLOCK_NO_QOS_CLASS flag. Passing any other value results in NULL
+ * being returned.
+ *
+ * @param relative_priority
+ * A relative priority within the QOS class. This value is a negative
+ * offset from the maximum supported scheduler priority for the given class.
+ * Passing a value greater than zero or less than QOS_MIN_RELATIVE_PRIORITY
+ * results in NULL being returned.
+ *
+ * @param block
+ * The block to create the dispatch block object from.
+ *
+ * @result
+ * The newly created dispatch block object, or NULL.
+ * When not building with Objective-C ARC, must be released with a -[release]
+ * message or the Block_release() function.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+DISPATCH_EXPORT DISPATCH_NONNULL5 DISPATCH_RETURNS_RETAINED_BLOCK
+DISPATCH_WARN_RESULT DISPATCH_NOTHROW
+dispatch_block_t
+dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags,
+ voucher_t voucher, dispatch_qos_class_t qos_class,
+ int relative_priority, dispatch_block_t block);
+
+/*!
+ * @group Voucher Mach SPI
+ * SPI intended for clients that need to interact with mach messages or mach
+ * voucher ports directly.
+ */
+
+#include <mach/mach.h>
+
+/*!
+ * @function voucher_create_with_mach_msg
+ *
+ * @abstract
+ * Creates a new voucher object from a mach message carrying a mach voucher port
+ *
+ * @discussion
+ * Ownership of the mach voucher port in the message is transfered to the new
+ * voucher object and the message header mach voucher field is cleared.
+ *
+ * @param msg
+ * The mach message to query.
+ *
+ * @result
+ * The newly created voucher object or NULL if the message was not carrying a
+ * mach voucher.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
+voucher_t
+voucher_create_with_mach_msg(mach_msg_header_t *msg);
+
+__END_DECLS
+
+#endif // __OS_VOUCHER_PRIVATE__
+
+#if (OS_VOUCHER_ACTIVITY_SPI || OS_VOUCHER_ACTIVITY_BUFFER_SPI) && \
+ !defined(__DISPATCH_BUILDING_DISPATCH__) && \
+ !defined(__OS_VOUCHER_ACTIVITY_PRIVATE__)
+#include <os/voucher_activity_private.h>
+#endif
// (the base of c's magazine == the base of c's page)
// => c is in first page of magazine
return (((uintptr_t)c & MAGAZINE_MASK) ==
- ((uintptr_t)c & ~(uintptr_t)PAGE_MASK));
+ ((uintptr_t)c & ~(uintptr_t)DISPATCH_ALLOCATOR_PAGE_MASK));
#else
(void)c;
return false;
if (fastpath(continuation_is_in_first_page(c))) {
return NULL;
}
- void *page_base = (void *)((uintptr_t)c & ~(uintptr_t)PAGE_MASK);
+ void *page_base = (void *)((uintptr_t)c &
+ ~(uintptr_t)DISPATCH_ALLOCATOR_PAGE_MASK);
#if DISPATCH_DEBUG
struct dispatch_magazine_s *m = magazine_for_continuation(c);
if (slowpath(page_base < (void *)&m->conts)) {
// continuation is "uninitialized", so the caller shouldn't
// load from it before storing, so we don't need to guard
// against reordering those loads.
-#if defined(__x86_64__) // TODO rdar://problem/11477843
- dispatch_assert(sizeof(*bitmap) == sizeof(uint64_t));
- return dispatch_atomic_set_first_bit((volatile uint64_t *)bitmap,max_index);
-#else
- dispatch_assert(sizeof(*bitmap) == sizeof(uint32_t));
- return dispatch_atomic_set_first_bit((volatile uint32_t *)bitmap,max_index);
-#endif
+ dispatch_assert(sizeof(*bitmap) == sizeof(unsigned long));
+ return dispatch_atomic_set_first_bit(bitmap,max_index);
}
DISPATCH_ALWAYS_INLINE
const bitmap_t mask = BITMAP_C(1) << index;
bitmap_t b;
- b = *bitmap;
if (exclusively == CLEAR_EXCLUSIVELY) {
- if (slowpath((b & mask) == 0)) {
+ if (slowpath((*bitmap & mask) == 0)) {
DISPATCH_CRASH("Corruption: failed to clear bit exclusively");
}
}
}
#if DISPATCH_DEBUG
// Double-check our math.
- dispatch_assert(aligned_region % PAGE_SIZE == 0);
- dispatch_assert(aligned_region_end % PAGE_SIZE == 0);
+ dispatch_assert(aligned_region % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
+ dispatch_assert(aligned_region % vm_kernel_page_size == 0);
+ dispatch_assert(aligned_region_end % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
+ dispatch_assert(aligned_region_end % vm_kernel_page_size == 0);
dispatch_assert(aligned_region_end > aligned_region);
- dispatch_assert(top_slop_len % PAGE_SIZE == 0);
- dispatch_assert(bottom_slop_len % PAGE_SIZE == 0);
+ dispatch_assert(top_slop_len % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
+ dispatch_assert(bottom_slop_len % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
dispatch_assert(aligned_region_end + top_slop_len == region_end);
dispatch_assert(region + bottom_slop_len == aligned_region);
dispatch_assert(region_sz == bottom_slop_len + top_slop_len +
// last_locked-1, BITMAPS_PER_PAGE, &page_bitmaps[0]);
// Scribble to expose use-after-free bugs
// madvise (syscall) flushes these stores
- memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, PAGE_SIZE);
+ memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, DISPATCH_ALLOCATOR_PAGE_SIZE);
#endif
- (void)dispatch_assume_zero(madvise(page, PAGE_SIZE, MADV_FREE));
+ (void)dispatch_assume_zero(madvise(page, DISPATCH_ALLOCATOR_PAGE_SIZE,
+ MADV_FREE));
unlock:
while (last_locked > 1) {
// self-aligned.
dispatch_assert(offsetof(struct dispatch_magazine_s, conts) %
(CONTINUATIONS_PER_BITMAP * DISPATCH_CONTINUATION_SIZE) == 0);
- dispatch_assert(offsetof(struct dispatch_magazine_s, conts) == PAGE_SIZE);
+ dispatch_assert(offsetof(struct dispatch_magazine_s, conts) ==
+ DISPATCH_ALLOCATOR_PAGE_SIZE);
#if PACK_FIRST_PAGE_WITH_CONTINUATIONS
// The continuations in the first page should actually fit within the first
// page.
- dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) < PAGE_SIZE);
+ dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) <
+ DISPATCH_ALLOCATOR_PAGE_SIZE);
dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) %
DISPATCH_CONTINUATION_SIZE == 0);
dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) +
- sizeof(((struct dispatch_magazine_s *)0x0)->fp_conts) == PAGE_SIZE);
+ sizeof(((struct dispatch_magazine_s *)0x0)->fp_conts) ==
+ DISPATCH_ALLOCATOR_PAGE_SIZE);
#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS
}
-#else
+#elif (DISPATCH_ALLOCATOR && DISPATCH_CONTINUATION_MALLOC) \
+ || (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE)
static inline void _dispatch_alloc_init(void) {}
#endif
#if DISPATCH_ALLOCATOR
// Configuration here!
-#define NUM_CPU _dispatch_hw_config.cc_max_logical
+#define NUM_CPU dispatch_hw_config(logical_cpus)
#define MAGAZINES_PER_HEAP (NUM_CPU)
// Do you care about compaction or performance?
#define PACK_FIRST_PAGE_WITH_CONTINUATIONS 0
#endif
+#ifndef PAGE_MAX_SIZE
+#define PAGE_MAX_SIZE PAGE_SIZE
+#endif
+#ifndef PAGE_MAX_MASK
+#define PAGE_MAX_MASK PAGE_MASK
+#endif
+#define DISPATCH_ALLOCATOR_PAGE_SIZE PAGE_MAX_SIZE
+#define DISPATCH_ALLOCATOR_PAGE_MASK PAGE_MAX_MASK
+
+
#if TARGET_OS_EMBEDDED
#define PAGES_PER_MAGAZINE 64
#else
#endif
// Use the largest type your platform is comfortable doing atomic ops with.
-#if defined(__x86_64__) // TODO: rdar://11477843
+// TODO: rdar://11477843
typedef unsigned long bitmap_t;
+#if defined(__LP64__)
#define BYTES_PER_BITMAP 8
#else
-typedef uint32_t bitmap_t;
#define BYTES_PER_BITMAP 4
#endif
#define CONTINUATIONS_PER_BITMAP (BYTES_PER_BITMAP * 8)
#define BITMAPS_PER_SUPERMAP (BYTES_PER_SUPERMAP * 8)
-#define BYTES_PER_MAGAZINE (PAGES_PER_MAGAZINE * PAGE_SIZE)
+#define BYTES_PER_MAGAZINE (PAGES_PER_MAGAZINE * DISPATCH_ALLOCATOR_PAGE_SIZE)
#define CONSUMED_BYTES_PER_BITMAP (BYTES_PER_BITMAP + \
(DISPATCH_CONTINUATION_SIZE * CONTINUATIONS_PER_BITMAP))
#define BYTES_PER_HEAP (BYTES_PER_MAGAZINE * MAGAZINES_PER_HEAP)
-#define BYTES_PER_PAGE PAGE_SIZE
+#define BYTES_PER_PAGE DISPATCH_ALLOCATOR_PAGE_SIZE
#define CONTINUATIONS_PER_PAGE (BYTES_PER_PAGE / DISPATCH_CONTINUATION_SIZE)
#define BITMAPS_PER_PAGE (CONTINUATIONS_PER_PAGE / CONTINUATIONS_PER_BITMAP)
(BYTES_LEFT_IN_FIRST_PAGE / CONSUMED_BYTES_PER_BITMAP)
#define REMAINDER_IN_FIRST_PAGE (BYTES_LEFT_IN_FIRST_PAGE - \
(FULL_BITMAPS_IN_FIRST_PAGE * CONSUMED_BYTES_PER_BITMAP) - \
- (FULL_BITMAPS_IN_FIRST_PAGE ? 0 : ROUND_UP_TO_CONTINUATION_SIZE(BYTES_PER_BITMAP)))
+ (FULL_BITMAPS_IN_FIRST_PAGE ? 0 : \
+ ROUND_UP_TO_CONTINUATION_SIZE(BYTES_PER_BITMAP)))
#define REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE \
(REMAINDER_IN_FIRST_PAGE / DISPATCH_CONTINUATION_SIZE)
DISPATCH_ALWAYS_INLINE
static inline void
-_dispatch_apply_invoke2(void *ctxt)
+_dispatch_apply_invoke2(void *ctxt, bool redirect)
{
dispatch_apply_t da = (dispatch_apply_t)ctxt;
size_t const iter = da->da_iterations;
// da_dc is only safe to access once the 'index lock' has been acquired
dispatch_apply_function_t const func = (void *)da->da_dc->dc_func;
void *const da_ctxt = da->da_dc->dc_ctxt;
+ dispatch_queue_t dq = da->da_dc->dc_data;
_dispatch_perfmon_workitem_dec(); // this unit executes many items
size_t nested = (size_t)_dispatch_thread_getspecific(dispatch_apply_key);
_dispatch_thread_setspecific(dispatch_apply_key, (void*)da->da_nested);
+ dispatch_queue_t old_dq;
+ pthread_priority_t old_dp;
+ if (redirect) {
+ old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
+ _dispatch_thread_setspecific(dispatch_queue_key, dq);
+ old_dp = _dispatch_set_defaultpriority(dq->dq_priority);
+ }
+
// Striding is the responsibility of the caller.
do {
_dispatch_client_callout2(da_ctxt, idx, func);
done++;
idx = dispatch_atomic_inc_orig2o(da, da_index, relaxed);
} while (fastpath(idx < iter));
+
+ if (redirect) {
+ _dispatch_reset_defaultpriority(old_dp);
+ _dispatch_thread_setspecific(dispatch_queue_key, old_dq);
+ }
_dispatch_thread_setspecific(dispatch_apply_key, (void*)nested);
// The thread that finished the last workitem wakes up the possibly waiting
void
_dispatch_apply_invoke(void *ctxt)
{
- _dispatch_apply_invoke2(ctxt);
+ _dispatch_apply_invoke2(ctxt, false);
}
DISPATCH_NOINLINE
void
_dispatch_apply_redirect_invoke(void *ctxt)
{
- dispatch_apply_t da = (dispatch_apply_t)ctxt;
- dispatch_queue_t old_dq;
- old_dq = (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key);
-
- _dispatch_thread_setspecific(dispatch_queue_key, da->da_dc->dc_data);
- _dispatch_apply_invoke2(ctxt);
- _dispatch_thread_setspecific(dispatch_queue_key, old_dq);
+ _dispatch_apply_invoke2(ctxt, true);
}
static void
next->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;
next->dc_func = func;
next->dc_ctxt = da;
+ _dispatch_continuation_voucher_set(next, 0);
+ _dispatch_continuation_priority_set(next, 0, 0);
next->do_next = head;
head = next;
_dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore();
da->da_sema = sema;
- _dispatch_queue_push_list(dq, head, tail, continuation_cnt);
+ _dispatch_queue_push_list(dq, head, tail, head->dc_priority,
+ continuation_cnt);
// Call the first element directly
_dispatch_apply_invoke(da);
_dispatch_perfmon_workitem_inc();
if (slowpath(iterations == 0)) {
return;
}
- uint32_t thr_cnt = _dispatch_hw_config.cc_max_active;
+ uint32_t thr_cnt = dispatch_hw_config(active_cpus);
size_t nested = (size_t)_dispatch_thread_getspecific(dispatch_apply_key);
if (!slowpath(nested)) {
nested = iterations;
dispatch_queue_t old_dq;
old_dq = (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key);
if (slowpath(dq == DISPATCH_APPLY_CURRENT_ROOT_QUEUE)) {
- dq = old_dq ? old_dq : _dispatch_get_root_queue(0, 0);
+ dq = old_dq ? old_dq : _dispatch_get_root_queue(
+ _DISPATCH_QOS_CLASS_DEFAULT, false);
while (slowpath(dq->do_targetq)) {
dq = dq->do_targetq;
}
dispatch_async_f(tq, ctxt, func); \
} \
if (tq) { \
- _dispatch_release(tq); \
+ _os_object_release_internal((_os_object_t)tq); \
}
- (void)dealloc {
- (void)_setTargetQueue:(dispatch_queue_t)queue {
struct dispatch_data_s *dd = (void*)self;
- _dispatch_retain(queue);
+ _os_object_retain_internal((_os_object_t)queue);
dispatch_queue_t prev;
prev = dispatch_atomic_xchg2o(dd, do_targetq, queue, release);
- if (prev) _dispatch_release(prev);
+ if (prev) _os_object_release_internal((_os_object_t)prev);
}
- (NSString *)debugDescription {
pthread_key_t dispatch_cache_key;
pthread_key_t dispatch_io_key;
pthread_key_t dispatch_apply_key;
+pthread_key_t dispatch_defaultpriority_key;
#if DISPATCH_INTROSPECTION
pthread_key_t dispatch_introspection_key;
#elif DISPATCH_PERF_MON
#endif
#endif // !DISPATCH_USE_DIRECT_TSD
-struct _dispatch_hw_config_s _dispatch_hw_config;
+#if VOUCHER_USE_MACH_VOUCHER
+dispatch_once_t _voucher_task_mach_voucher_pred;
+mach_voucher_t _voucher_task_mach_voucher;
+_voucher_activity_t _voucher_activity_default;
+#endif
+voucher_activity_mode_t _voucher_activity_mode;
+int _dispatch_set_qos_class_enabled;
+
+
+DISPATCH_NOINLINE
+voucher_activity_mode_t
+voucher_activity_get_mode(void)
+{
+ return _voucher_activity_mode;
+}
+
+void
+voucher_activity_set_mode_4libtrace(voucher_activity_mode_t mode)
+{
+ if (_voucher_activity_disabled()) return;
+ _voucher_activity_mode = mode;
+}
+
+DISPATCH_HW_CONFIG();
bool _dispatch_safe_fork = true, _dispatch_child_of_unsafe_fork;
DISPATCH_NOINLINE
return !_dispatch_safe_fork;
}
-
DISPATCH_NOINLINE
bool
_dispatch_is_fork_of_multithreaded_parent(void)
}
const struct dispatch_queue_offsets_s dispatch_queue_offsets = {
- .dqo_version = 4,
+ .dqo_version = 5,
.dqo_label = offsetof(struct dispatch_queue_s, dq_label),
.dqo_label_size = sizeof(((dispatch_queue_t)NULL)->dq_label),
.dqo_flags = 0,
.dqo_flags_size = 0,
- .dqo_width = offsetof(struct dispatch_queue_s, dq_width),
- .dqo_width_size = sizeof(((dispatch_queue_t)NULL)->dq_width),
.dqo_serialnum = offsetof(struct dispatch_queue_s, dq_serialnum),
.dqo_serialnum_size = sizeof(((dispatch_queue_t)NULL)->dq_serialnum),
+ .dqo_width = offsetof(struct dispatch_queue_s, dq_width),
+ .dqo_width_size = sizeof(((dispatch_queue_t)NULL)->dq_width),
.dqo_running = offsetof(struct dispatch_queue_s, dq_running),
.dqo_running_size = sizeof(((dispatch_queue_t)NULL)->dq_running),
+ .dqo_suspend_cnt = offsetof(struct dispatch_queue_s, do_suspend_cnt),
+ .dqo_suspend_cnt_size = sizeof(((dispatch_queue_t)NULL)->do_suspend_cnt),
+ .dqo_target_queue = offsetof(struct dispatch_queue_s, do_targetq),
+ .dqo_target_queue_size = sizeof(((dispatch_queue_t)NULL)->do_targetq),
+ .dqo_priority = offsetof(struct dispatch_queue_s, dq_priority),
+ .dqo_priority_size = sizeof(((dispatch_queue_t)NULL)->dq_priority),
};
+#if VOUCHER_USE_MACH_VOUCHER
+const struct voucher_offsets_s voucher_offsets = {
+ .vo_version = 1,
+ .vo_activity_ids_count = offsetof(struct voucher_s, v_activities),
+ .vo_activity_ids_count_size = sizeof(((voucher_t)NULL)->v_activities),
+ .vo_activity_ids_array = (uint16_t)_voucher_activity_ids((voucher_t)(NULL)),
+ .vo_activity_ids_array_entry_size = sizeof(voucher_activity_id_t),
+};
+#else // VOUCHER_USE_MACH_VOUCHER
+const struct voucher_offsets_s voucher_offsets = {
+ .vo_version = 0,
+};
+#endif // VOUCHER_USE_MACH_VOUCHER
+
+#if DISPATCH_USE_DIRECT_TSD
+const struct dispatch_tsd_indexes_s dispatch_tsd_indexes = {
+ .dti_version = 2,
+ .dti_queue_index = dispatch_queue_key,
+ .dti_voucher_index = dispatch_voucher_key,
+ .dti_qos_class_index = dispatch_priority_key,
+};
+#else // DISPATCH_USE_DIRECT_TSD
+#error Not implemented on this platform
+#endif // DISPATCH_USE_DIRECT_TSD
+
// 6618342 Contact the team that owns the Instrument DTrace probe before
// renaming this symbol
DISPATCH_CACHELINE_ALIGN
.do_vtable = DISPATCH_VTABLE(queue),
#if !DISPATCH_USE_RESOLVERS
.do_targetq = &_dispatch_root_queues[
- DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY],
+ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT],
#endif
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.dq_serialnum = 1,
};
-struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent = {
- .do_vtable = DISPATCH_VTABLE(queue_attr),
- .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- .do_next = DISPATCH_OBJECT_LISTLESS,
+#pragma mark -
+#pragma mark dispatch_queue_attr_t
+
+#define DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, overcommit, concurrent) \
+ { \
+ .do_vtable = DISPATCH_VTABLE(queue_attr), \
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \
+ .do_next = DISPATCH_OBJECT_LISTLESS, \
+ .dqa_qos_class = (qos), \
+ .dqa_relative_priority = (qos) ? (prio) : 0, \
+ .dqa_overcommit = (overcommit), \
+ .dqa_concurrent = (concurrent), \
+ }
+
+#define DISPATCH_QUEUE_ATTR_KIND_INIT(qos, prio) \
+ { \
+ [DQA_INDEX_NON_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \
+ DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 0, 1), \
+ [DQA_INDEX_NON_OVERCOMMIT][DQA_INDEX_SERIAL] = \
+ DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 0, 0), \
+ [DQA_INDEX_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \
+ DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 1, 1), \
+ [DQA_INDEX_OVERCOMMIT][DQA_INDEX_SERIAL] = \
+ DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 1, 0), \
+ }
+
+#define DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, prio) \
+ [prio] = DISPATCH_QUEUE_ATTR_KIND_INIT(qos, -(prio))
+
+#define DISPATCH_QUEUE_ATTR_PRIO_INIT(qos) \
+ { \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 0), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 1), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 2), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 3), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 4), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 5), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 6), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 7), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 8), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 9), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 10), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 11), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 12), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 13), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 14), \
+ DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 15), \
+ }
+
+#define DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(qos) \
+ [DQA_INDEX_QOS_CLASS_##qos] = \
+ DISPATCH_QUEUE_ATTR_PRIO_INIT(_DISPATCH_QOS_CLASS_##qos)
+
+const struct dispatch_queue_attr_s _dispatch_queue_attrs[]
+ [DISPATCH_QUEUE_ATTR_PRIO_COUNT][2][2] = {
+ DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(UNSPECIFIED),
+ DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(MAINTENANCE),
+ DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(BACKGROUND),
+ DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(UTILITY),
+ DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(DEFAULT),
+ DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(USER_INITIATED),
+ DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(USER_INTERACTIVE),
};
+
#pragma mark -
#pragma mark dispatch_vtables
abort();
}
-#if !DISPATCH_USE_OS_TRACE
+#if !DISPATCH_USE_OS_DEBUG_LOG
#pragma mark -
#pragma mark dispatch_log
_dispatch_log_file(buf, len);
}
+#if DISPATCH_USE_SIMPLE_ASL
+static inline void
+_dispatch_syslog(const char *msg)
+{
+ _simple_asl_log(ASL_LEVEL_NOTICE, "com.apple.libsystem.libdispatch", msg);
+}
+
+static inline void
+_dispatch_vsyslog(const char *msg, va_list ap)
+{
+ char *str;
+ vasprintf(&str, msg, ap);
+ if (str) {
+ _dispatch_syslog(str);
+ free(str);
+ }
+}
+#else // DISPATCH_USE_SIMPLE_ASL
+static inline void
+_dispatch_syslog(const char *msg)
+{
+ syslog(LOG_NOTICE, "%s", msg);
+}
+
+static inline void
+_dispatch_vsyslog(const char *msg, va_list ap)
+{
+ vsyslog(LOG_NOTICE, msg, *ap_ptr);
+}
+#endif // DISPATCH_USE_SIMPLE_ASL
+
DISPATCH_ALWAYS_INLINE
static inline void
-_dispatch_logv(const char *msg, size_t len, va_list ap)
+_dispatch_logv(const char *msg, size_t len, va_list *ap_ptr)
{
dispatch_once_f(&_dispatch_logv_pred, NULL, _dispatch_logv_init);
if (slowpath(dispatch_log_disabled)) {
return;
}
if (slowpath(dispatch_logfile != -1)) {
- if (!ap) {
+ if (!ap_ptr) {
return _dispatch_log_file((char*)msg, len);
}
- return _dispatch_logv_file(msg, ap);
+ return _dispatch_logv_file(msg, *ap_ptr);
}
- if (!ap) {
- return syslog(LOG_NOTICE, "%s", msg);
+ if (!ap_ptr) {
+ return _dispatch_syslog(msg);
}
- return vsyslog(LOG_NOTICE, msg, ap);
+ return _dispatch_vsyslog(msg, *ap_ptr);
}
DISPATCH_NOINLINE
va_list ap;
va_start(ap, msg);
- _dispatch_logv(msg, 0, ap);
+ _dispatch_logv(msg, 0, &ap);
va_end(ap);
}
-#endif // DISPATCH_USE_OS_TRACE
+#endif // DISPATCH_USE_OS_DEBUG_LOG
#pragma mark -
#pragma mark dispatch_debug
offs = strlcpy(buf, "NULL: ", sizeof(buf));
}
r = vsnprintf(buf + offs, sizeof(buf) - offs, msg, ap);
-#if !DISPATCH_USE_OS_TRACE
+#if !DISPATCH_USE_OS_DEBUG_LOG
size_t len = offs + (r < 0 ? 0 : (size_t)r);
if (len > sizeof(buf) - 1) {
len = sizeof(buf) - 1;
Block_release(b);
}
+#pragma mark -
+#pragma mark _dispatch_block_create no_objc
+
+#if !USE_OBJC
+
+// The compiler hides the name of the function it generates, and changes it if
+// we try to reference it directly, but the linker still sees it.
+extern void DISPATCH_BLOCK_SPECIAL_INVOKE(void *)
+ asm("____dispatch_block_create_block_invoke");
+void (*_dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE;
+
+dispatch_block_t
+_dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher,
+ pthread_priority_t pri, dispatch_block_t block)
+{
+ dispatch_block_t copy_block = _dispatch_Block_copy(block); // 17094902
+ (void)voucher; // No voucher capture! (requires ObjC runtime)
+ struct dispatch_block_private_data_s dbpds =
+ DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, NULL, pri, copy_block);
+ dispatch_block_t new_block = _dispatch_Block_copy(^{
+ // Capture object references, which retains copy_block.
+ // All retained objects must be captured by the *block*. We
+ // cannot borrow any references, because the block might be
+ // called zero or several times, so Block_release() is the
+ // only place that can release retained objects.
+ (void)copy_block;
+ _dispatch_block_invoke(&dbpds);
+ });
+ Block_release(copy_block);
+ return new_block;
+}
+
+#endif // !USE_OBJC
+
#endif // __BLOCKS__
#pragma mark -
#pragma mark dispatch_client_callout
// Abort on uncaught exceptions thrown from client callouts rdar://8577499
-#if DISPATCH_USE_CLIENT_CALLOUT && (__arm__ || !USE_OBJC)
+#if DISPATCH_USE_CLIENT_CALLOUT && (__USING_SJLJ_EXCEPTIONS__ || !USE_OBJC)
// On platforms with SjLj exceptions, avoid the SjLj overhead on every callout
// by clearing the unwinder's TSD pointer to the handler stack around callouts
return _os_object_dealloc(obj);
}
+void*
+os_retain(void *obj)
+{
+ if (fastpath(obj)) {
+ return _os_object_retain(obj);
+ }
+ return obj;
+}
+
+#undef os_release
+void
+os_release(void *obj)
+{
+ if (fastpath(obj)) {
+ return _os_object_release(obj);
+ }
+}
+
#pragma mark -
#pragma mark dispatch_autorelease_pool no_objc
},
};
-#if DISPATCH_USE_VM_PRESSURE
+#if DISPATCH_USE_MEMORYSTATUS
+
#if TARGET_IPHONE_SIMULATOR // rdar://problem/9219483
static int _dispatch_ios_simulator_memory_warnings_fd = -1;
static void
-_dispatch_ios_simulator_vm_source_init(void *context DISPATCH_UNUSED)
+_dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED)
{
- char *e = getenv("IPHONE_SIMULATOR_MEMORY_WARNINGS");
+ char *e = getenv("SIMULATOR_MEMORY_WARNINGS");
if (!e) return;
_dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY);
if (_dispatch_ios_simulator_memory_warnings_fd == -1) {
(void)dispatch_assume_zero(errno);
}
}
+#endif
+
static void
-dispatch_source_type_vm_init(dispatch_source_t ds,
+dispatch_source_type_memorystatus_init(dispatch_source_t ds,
dispatch_source_type_t type DISPATCH_UNUSED,
uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask,
+ unsigned long mask DISPATCH_UNUSED,
dispatch_queue_t q DISPATCH_UNUSED)
{
+#if TARGET_IPHONE_SIMULATOR
static dispatch_once_t pred;
- dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_vm_source_init);
- ds->ds_dkev->dk_kevent.ident = (uint64_t)(mask & DISPATCH_VM_PRESSURE ?
- _dispatch_ios_simulator_memory_warnings_fd : -1);
+ dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init);
+ handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd;
+ mask = NOTE_ATTRIB;
+ ds->ds_dkev->dk_kevent.filter = EVFILT_VNODE;
+ ds->ds_dkev->dk_kevent.ident = handle;
+ ds->ds_dkev->dk_kevent.flags |= EV_CLEAR;
+ ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask;
+ ds->ds_ident_hack = handle;
+ ds->ds_pending_data_mask = mask;
+ ds->ds_memorystatus_override = 1;
+#endif
+ ds->ds_is_level = false;
}
-const struct dispatch_source_type_s _dispatch_source_type_vm = {
+#ifndef NOTE_MEMORYSTATUS_LOW_SWAP
+#define NOTE_MEMORYSTATUS_LOW_SWAP 0x8
+#endif
+
+const struct dispatch_source_type_s _dispatch_source_type_memorystatus = {
.ke = {
- .filter = EVFILT_VNODE,
- .flags = EV_CLEAR,
+ .filter = EVFILT_MEMORYSTATUS,
+ .flags = EV_DISPATCH,
},
- .mask = NOTE_ATTRIB,
- .init = dispatch_source_type_vm_init,
+ .mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL|NOTE_MEMORYSTATUS_PRESSURE_WARN
+ |NOTE_MEMORYSTATUS_PRESSURE_CRITICAL|NOTE_MEMORYSTATUS_LOW_SWAP,
+ .init = dispatch_source_type_memorystatus_init,
};
-#else
+
static void
dispatch_source_type_vm_init(dispatch_source_t ds,
- dispatch_source_type_t type DISPATCH_UNUSED,
- uintptr_t handle DISPATCH_UNUSED,
- unsigned long mask DISPATCH_UNUSED,
- dispatch_queue_t q DISPATCH_UNUSED)
+ dispatch_source_type_t type,
+ uintptr_t handle,
+ unsigned long mask,
+ dispatch_queue_t q)
{
- ds->ds_is_level = false;
+ // Map legacy vm pressure to memorystatus warning rdar://problem/15907505
+ mask = NOTE_MEMORYSTATUS_PRESSURE_WARN;
+ ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask;
+ ds->ds_pending_data_mask = mask;
+ ds->ds_vmpressure_override = 1;
+ dispatch_source_type_memorystatus_init(ds, type, handle, mask, q);
}
const struct dispatch_source_type_s _dispatch_source_type_vm = {
.ke = {
- .filter = EVFILT_VM,
+ .filter = EVFILT_MEMORYSTATUS,
.flags = EV_DISPATCH,
},
.mask = NOTE_VM_PRESSURE,
.init = dispatch_source_type_vm_init,
};
-#endif
-#endif
-#ifdef DISPATCH_USE_MEMORYSTATUS
+#elif DISPATCH_USE_VM_PRESSURE
+
static void
-dispatch_source_type_memorystatus_init(dispatch_source_t ds,
+dispatch_source_type_vm_init(dispatch_source_t ds,
dispatch_source_type_t type DISPATCH_UNUSED,
uintptr_t handle DISPATCH_UNUSED,
unsigned long mask DISPATCH_UNUSED,
ds->ds_is_level = false;
}
-const struct dispatch_source_type_s _dispatch_source_type_memorystatus = {
+const struct dispatch_source_type_s _dispatch_source_type_vm = {
.ke = {
- .filter = EVFILT_MEMORYSTATUS,
+ .filter = EVFILT_VM,
.flags = EV_DISPATCH,
},
- .mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL|NOTE_MEMORYSTATUS_PRESSURE_WARN
-#ifdef NOTE_MEMORYSTATUS_PRESSURE_CRITICAL
- |NOTE_MEMORYSTATUS_PRESSURE_CRITICAL
-#endif
- ,
- .init = dispatch_source_type_memorystatus_init,
+ .mask = NOTE_VM_PRESSURE,
+ .init = dispatch_source_type_vm_init,
};
-#endif
+
+#endif // DISPATCH_USE_VM_PRESSURE
const struct dispatch_source_type_s _dispatch_source_type_proc = {
.ke = {
--- /dev/null
+/*
+ * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+/*
+ * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
+ * which are subject to change in future releases of Mac OS X. Any applications
+ * relying on these interfaces WILL break.
+ */
+
+#ifndef __DISPATCH_INLINE_INTERNAL__
+#define __DISPATCH_INLINE_INTERNAL__
+
+#ifndef __DISPATCH_INDIRECT__
+#error "Please #include <dispatch/dispatch.h> instead of this file directly."
+#include <dispatch/base.h> // for HeaderDoc
+#endif
+
+#if DISPATCH_USE_CLIENT_CALLOUT
+
+DISPATCH_NOTHROW void
+_dispatch_client_callout(void *ctxt, dispatch_function_t f);
+DISPATCH_NOTHROW void
+_dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t));
+DISPATCH_NOTHROW bool
+_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset,
+ const void *buffer, size_t size, dispatch_data_applier_function_t f);
+DISPATCH_NOTHROW void
+_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
+ dispatch_mach_msg_t dmsg, mach_error_t error,
+ dispatch_mach_handler_function_t f);
+
+#else // !DISPATCH_USE_CLIENT_CALLOUT
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_client_callout(void *ctxt, dispatch_function_t f)
+{
+ return f(ctxt);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t))
+{
+ return f(ctxt, i);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset,
+ const void *buffer, size_t size, dispatch_data_applier_function_t f)
+{
+ return f(ctxt, region, offset, buffer, size);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
+ dispatch_mach_msg_t dmsg, mach_error_t error,
+ dispatch_mach_handler_function_t f)
+{
+ return f(ctxt, reason, dmsg, error);
+}
+
+#endif // !DISPATCH_USE_CLIENT_CALLOUT
+
+#if !(USE_OBJC && __OBJC2__)
+
+#pragma mark -
+#pragma mark _os_object_t & dispatch_object_t
+
+DISPATCH_ALWAYS_INLINE
+static inline _os_object_t
+_os_object_retain_internal_inline(_os_object_t obj)
+{
+ int ref_cnt = obj->os_obj_ref_cnt;
+ if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
+ return obj; // global object
+ }
+ ref_cnt = dispatch_atomic_inc2o(obj, os_obj_ref_cnt, relaxed);
+ if (slowpath(ref_cnt <= 0)) {
+ DISPATCH_CRASH("Resurrection of an object");
+ }
+ return obj;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_os_object_release_internal_inline(_os_object_t obj)
+{
+ int ref_cnt = obj->os_obj_ref_cnt;
+ if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
+ return; // global object
+ }
+ ref_cnt = dispatch_atomic_dec2o(obj, os_obj_ref_cnt, relaxed);
+ if (fastpath(ref_cnt >= 0)) {
+ return;
+ }
+ if (slowpath(ref_cnt < -1)) {
+ DISPATCH_CRASH("Over-release of an object");
+ }
+#if DISPATCH_DEBUG
+ if (slowpath(obj->os_obj_xref_cnt >= 0)) {
+ DISPATCH_CRASH("Release while external references exist");
+ }
+#endif
+ return _os_object_dispose(obj);
+}
+
+DISPATCH_ALWAYS_INLINE_NDEBUG
+static inline void
+_dispatch_retain(dispatch_object_t dou)
+{
+ (void)_os_object_retain_internal_inline(dou._os_obj);
+}
+
+DISPATCH_ALWAYS_INLINE_NDEBUG
+static inline void
+_dispatch_release(dispatch_object_t dou)
+{
+ _os_object_release_internal_inline(dou._os_obj);
+}
+
+#pragma mark -
+#pragma mark dispatch_thread
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_wqthread_override_start(mach_port_t thread,
+ pthread_priority_t priority)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ if (!_dispatch_set_qos_class_enabled) return;
+ (void)_pthread_workqueue_override_start_direct(thread, priority);
+#else
+ (void)thread; (void)priority;
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_wqthread_override_reset(void)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ if (!_dispatch_set_qos_class_enabled) return;
+ (void)_pthread_workqueue_override_reset();
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_thread_override_start(mach_port_t thread, pthread_priority_t priority)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ if (!_dispatch_set_qos_class_enabled) return;
+ (void)_pthread_override_qos_class_start_direct(thread, priority);
+#else
+ (void)thread; (void)priority;
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_thread_override_end(mach_port_t thread)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ if (!_dispatch_set_qos_class_enabled) return;
+ (void)_pthread_override_qos_class_end_direct(thread);
+#else
+ (void)thread;
+#endif
+}
+
+#pragma mark -
+#pragma mark dispatch_queue_t
+
+static inline bool _dispatch_queue_need_override(dispatch_queue_t dq,
+ pthread_priority_t pp);
+static inline bool _dispatch_queue_need_override_retain(dispatch_queue_t dq,
+ pthread_priority_t pp);
+static inline bool _dispatch_queue_retain_if_override(dispatch_queue_t dq,
+ pthread_priority_t pp);
+static inline pthread_priority_t _dispatch_queue_get_override_priority(
+ dispatch_queue_t dq);
+static inline pthread_priority_t _dispatch_queue_reset_override_priority(
+ dispatch_queue_t dq);
+static inline pthread_priority_t _dispatch_get_defaultpriority(void);
+static inline void _dispatch_set_defaultpriority_override(void);
+static inline void _dispatch_reset_defaultpriority(pthread_priority_t priority);
+static inline void _dispatch_set_priority(pthread_priority_t priority);
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_queue_set_thread(dispatch_queue_t dq)
+{
+ // The manager queue uses dispatch_queue_drain but is thread bound
+ if (!dq->dq_is_thread_bound) {
+ dq->dq_thread = _dispatch_thread_port();
+ }
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_queue_clear_thread(dispatch_queue_t dq)
+{
+ if (!dq->dq_is_thread_bound) {
+ dq->dq_thread = MACH_PORT_NULL;
+ }
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_queue_push_list2(dispatch_queue_t dq, struct dispatch_object_s *head,
+ struct dispatch_object_s *tail)
+{
+ struct dispatch_object_s *prev;
+ tail->do_next = NULL;
+ prev = dispatch_atomic_xchg2o(dq, dq_items_tail, tail, release);
+ if (fastpath(prev)) {
+ // if we crash here with a value less than 0x1000, then we are at a
+ // known bug in client code for example, see _dispatch_queue_dispose
+ // or _dispatch_atfork_child
+ prev->do_next = head;
+ }
+ return (prev != NULL);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
+ dispatch_object_t _tail, pthread_priority_t pp, unsigned int n)
+{
+ struct dispatch_object_s *head = _head._do, *tail = _tail._do;
+ bool override = _dispatch_queue_need_override_retain(dq, pp);
+ if (!fastpath(_dispatch_queue_push_list2(dq, head, tail))) {
+ _dispatch_queue_push_list_slow(dq, pp, head, n, override);
+ } else if (override) {
+ _dispatch_queue_wakeup_with_qos_and_release(dq, pp);
+ }
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t _tail,
+ pthread_priority_t pp)
+{
+ struct dispatch_object_s *tail = _tail._do;
+ bool override = _dispatch_queue_need_override_retain(dq, pp);
+ if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) {
+ _dispatch_queue_push_slow(dq, pp, tail, override);
+ } else if (override) {
+ _dispatch_queue_wakeup_with_qos_and_release(dq, pp);
+ }
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail,
+ pthread_priority_t pp, bool wakeup)
+{
+ // caller assumed to have a reference on dq
+ struct dispatch_object_s *tail = _tail._do;
+ if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) {
+ _dispatch_queue_push_slow(dq, pp, tail, false);
+ } else if (_dispatch_queue_need_override(dq, pp)) {
+ _dispatch_queue_wakeup_with_qos(dq, pp);
+ } else if (slowpath(wakeup)) {
+ _dispatch_queue_wakeup(dq);
+ }
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_queue_class_invoke(dispatch_object_t dou,
+ dispatch_queue_t (*invoke)(dispatch_object_t,
+ _dispatch_thread_semaphore_t*))
+{
+ pthread_priority_t p = 0;
+ dispatch_queue_t dq = dou._dq;
+ if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) &&
+ fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))){
+ _dispatch_queue_set_thread(dq);
+ dispatch_queue_t tq = NULL;
+ _dispatch_thread_semaphore_t sema = 0;
+ tq = invoke(dq, &sema);
+ _dispatch_queue_clear_thread(dq);
+ p = _dispatch_queue_reset_override_priority(dq);
+ if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
+ // Ensure that the root queue sees that this thread was overridden.
+ _dispatch_set_defaultpriority_override();
+ }
+ // We do not need to check the result.
+ // When the suspend-count lock is dropped, then the check will happen.
+ (void)dispatch_atomic_dec2o(dq, dq_running, release);
+ if (sema) {
+ _dispatch_thread_semaphore_signal(sema);
+ } else if (tq) {
+ _dispatch_introspection_queue_item_complete(dq);
+ return _dispatch_queue_push(tq, dq, p);
+ }
+ }
+ dq->do_next = DISPATCH_OBJECT_LISTLESS;
+ _dispatch_introspection_queue_item_complete(dq);
+ if (!dispatch_atomic_sub2o(dq, do_suspend_cnt,
+ DISPATCH_OBJECT_SUSPEND_LOCK, seq_cst)) {
+ // seq_cst with atomic store to suspend_cnt <rdar://problem/11915417>
+ if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) {
+ // verify that the queue is idle
+ return _dispatch_queue_wakeup_with_qos_and_release(dq, p);
+ }
+ }
+ _dispatch_release(dq); // added when the queue is put on the list
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline unsigned long
+_dispatch_queue_class_probe(dispatch_object_t dou)
+{
+ dispatch_queue_t dq = dou._dq;
+ struct dispatch_object_s *tail;
+ // seq_cst with atomic store to suspend_cnt <rdar://problem/14637483>
+ tail = dispatch_atomic_load2o(dq, dq_items_tail, seq_cst);
+ return (unsigned long)slowpath(tail != NULL);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_object_suspended(dispatch_object_t dou)
+{
+ struct dispatch_object_s *obj = dou._do;
+ unsigned int suspend_cnt;
+ // seq_cst with atomic store to tail <rdar://problem/14637483>
+ suspend_cnt = dispatch_atomic_load2o(obj, do_suspend_cnt, seq_cst);
+ return slowpath(suspend_cnt >= DISPATCH_OBJECT_SUSPEND_INTERVAL);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_queue_t
+_dispatch_queue_get_current(void)
+{
+ return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key);
+}
+
+DISPATCH_ALWAYS_INLINE DISPATCH_CONST
+static inline dispatch_queue_t
+_dispatch_get_root_queue(qos_class_t priority, bool overcommit)
+{
+ if (overcommit) switch (priority) {
+ case _DISPATCH_QOS_CLASS_MAINTENANCE:
+ return &_dispatch_root_queues[
+ DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT];
+ case _DISPATCH_QOS_CLASS_BACKGROUND:
+ return &_dispatch_root_queues[
+ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT];
+ case _DISPATCH_QOS_CLASS_UTILITY:
+ return &_dispatch_root_queues[
+ DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT];
+ case _DISPATCH_QOS_CLASS_DEFAULT:
+ return &_dispatch_root_queues[
+ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT];
+ case _DISPATCH_QOS_CLASS_USER_INITIATED:
+ return &_dispatch_root_queues[
+ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT];
+ case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
+ return &_dispatch_root_queues[
+ DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT];
+ } else switch (priority) {
+ case _DISPATCH_QOS_CLASS_MAINTENANCE:
+ return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS];
+ case _DISPATCH_QOS_CLASS_BACKGROUND:
+ return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS];
+ case _DISPATCH_QOS_CLASS_UTILITY:
+ return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS];
+ case _DISPATCH_QOS_CLASS_DEFAULT:
+ return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS];
+ case _DISPATCH_QOS_CLASS_USER_INITIATED:
+ return &_dispatch_root_queues[
+ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS];
+ case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
+ return &_dispatch_root_queues[
+ DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS];
+ }
+ return NULL;
+}
+
+// Note to later developers: ensure that any initialization changes are
+// made for statically allocated queues (i.e. _dispatch_main_q).
+static inline void
+_dispatch_queue_init(dispatch_queue_t dq)
+{
+ dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS;
+
+ dq->dq_running = 0;
+ dq->dq_width = 1;
+ dq->dq_serialnum = dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers,
+ relaxed);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_queue_set_bound_thread(dispatch_queue_t dq)
+{
+ //Tag thread-bound queues with the owning thread
+ dispatch_assert(dq->dq_is_thread_bound);
+ dq->dq_thread = _dispatch_thread_port();
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_queue_clear_bound_thread(dispatch_queue_t dq)
+{
+ dispatch_assert(dq->dq_is_thread_bound);
+ dq->dq_thread = MACH_PORT_NULL;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline mach_port_t
+_dispatch_queue_get_bound_thread(dispatch_queue_t dq)
+{
+ dispatch_assert(dq->dq_is_thread_bound);
+ return dq->dq_thread;
+}
+
+#pragma mark -
+#pragma mark dispatch_priority
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_get_defaultpriority(void)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ pthread_priority_t priority = (uintptr_t)_dispatch_thread_getspecific(
+ dispatch_defaultpriority_key);
+ return priority;
+#else
+ return 0;
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_reset_defaultpriority(pthread_priority_t priority)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ pthread_priority_t old_priority = _dispatch_get_defaultpriority();
+ // if an inner-loop or'd in the override flag to the per-thread priority,
+ // it needs to be propogated up the chain
+ priority |= old_priority & _PTHREAD_PRIORITY_OVERRIDE_FLAG;
+
+ if (slowpath(priority != old_priority)) {
+ _dispatch_thread_setspecific(dispatch_defaultpriority_key,
+ (void*)priority);
+ }
+#else
+ (void)priority;
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_set_defaultpriority_override(void)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ pthread_priority_t old_priority = _dispatch_get_defaultpriority();
+ pthread_priority_t priority = old_priority |
+ _PTHREAD_PRIORITY_OVERRIDE_FLAG;
+
+ if (slowpath(priority != old_priority)) {
+ _dispatch_thread_setspecific(dispatch_defaultpriority_key,
+ (void*)priority);
+ }
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_reset_defaultpriority_override(void)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ pthread_priority_t old_priority = _dispatch_get_defaultpriority();
+ pthread_priority_t priority = old_priority &
+ ~((pthread_priority_t)_PTHREAD_PRIORITY_OVERRIDE_FLAG);
+
+ if (slowpath(priority != old_priority)) {
+ _dispatch_thread_setspecific(dispatch_defaultpriority_key,
+ (void*)priority);
+ return true;
+ }
+#endif
+ return false;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq,
+ dispatch_queue_t tq)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ const pthread_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG;
+ const pthread_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG;
+ pthread_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority;
+ if ((!dqp || (dqp & inherited_flag)) && (tqp & rootqueue_flag)) {
+ dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag;
+ }
+#else
+ (void)dq; (void)tq;
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_set_defaultpriority(pthread_priority_t priority)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ pthread_priority_t old_priority = _dispatch_get_defaultpriority();
+ if (old_priority) {
+ pthread_priority_t flags, defaultqueue, basepri;
+ flags = (priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
+ defaultqueue = (old_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
+ basepri = (old_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK);
+ priority &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
+ if (!priority) {
+ flags = _PTHREAD_PRIORITY_INHERIT_FLAG | defaultqueue;
+ priority = basepri;
+ } else if (priority < basepri && !defaultqueue) { // rdar://16349734
+ priority = basepri;
+ }
+ priority |= flags | (old_priority & _PTHREAD_PRIORITY_OVERRIDE_FLAG);
+ }
+ if (slowpath(priority != old_priority)) {
+ _dispatch_thread_setspecific(dispatch_defaultpriority_key,
+ (void*)priority);
+ }
+ return old_priority;
+#else
+ (void)priority;
+ return 0;
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_priority_adopt(pthread_priority_t priority, unsigned long flags)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ pthread_priority_t defaultpri = _dispatch_get_defaultpriority();
+ bool enforce, inherited, defaultqueue;
+ enforce = (flags & DISPATCH_PRIORITY_ENFORCE) ||
+ (priority & _PTHREAD_PRIORITY_ENFORCE_FLAG);
+ inherited = (defaultpri & _PTHREAD_PRIORITY_INHERIT_FLAG);
+ defaultqueue = (defaultpri & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG);
+ defaultpri &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
+ priority &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
+ if (!priority) {
+ enforce = false;
+ } else if (!enforce) {
+ if (priority < defaultpri) {
+ if (defaultqueue) enforce = true; // rdar://16349734
+ } else if (inherited || defaultqueue) {
+ enforce = true;
+ }
+ } else if (priority < defaultpri && !defaultqueue) { // rdar://16349734
+ enforce = false;
+ }
+ return enforce ? priority : defaultpri;
+#else
+ (void)priority; (void)flags;
+ return 0;
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_get_priority(void)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ pthread_priority_t priority = (uintptr_t)_dispatch_thread_getspecific(
+ dispatch_priority_key);
+ return (priority & ~_PTHREAD_PRIORITY_FLAGS_MASK);
+#else
+ return 0;
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_set_priority_and_mach_voucher(pthread_priority_t priority,
+ mach_voucher_t kv)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ _pthread_set_flags_t flags = 0;
+ if (priority && _dispatch_set_qos_class_enabled) {
+ pthread_priority_t old_priority = _dispatch_get_priority();
+ if (priority != old_priority && old_priority) {
+ flags |= _PTHREAD_SET_SELF_QOS_FLAG;
+ }
+ }
+ if (kv != VOUCHER_NO_MACH_VOUCHER) {
+#if VOUCHER_USE_MACH_VOUCHER
+ flags |= _PTHREAD_SET_SELF_VOUCHER_FLAG;
+#endif
+ }
+ if (!flags) return;
+ int r = _pthread_set_properties_self(flags, priority, kv);
+ (void)dispatch_assume_zero(r);
+#elif VOUCHER_USE_MACH_VOUCHER
+#error Invalid build configuration
+#else
+ (void)priority; (void)kv;
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
+static inline voucher_t
+_dispatch_set_priority_and_adopt_voucher(pthread_priority_t priority,
+ voucher_t voucher)
+{
+ pthread_priority_t p = (priority != DISPATCH_NO_PRIORITY) ? priority : 0;
+ voucher_t ov = DISPATCH_NO_VOUCHER;
+ mach_voucher_t kv = VOUCHER_NO_MACH_VOUCHER;
+ if (voucher != DISPATCH_NO_VOUCHER) {
+ ov = _voucher_get();
+ kv = _voucher_swap_and_get_mach_voucher(ov, voucher);
+ }
+ _dispatch_set_priority_and_mach_voucher(p, kv);
+ return ov;
+}
+
+DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
+static inline voucher_t
+_dispatch_adopt_priority_and_voucher(pthread_priority_t priority,
+ voucher_t voucher, unsigned long flags)
+{
+ pthread_priority_t p = 0;
+ if (priority != DISPATCH_NO_PRIORITY) {
+ p = _dispatch_priority_adopt(priority, flags);
+ }
+ return _dispatch_set_priority_and_adopt_voucher(p, voucher);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_adopt_priority_and_replace_voucher(pthread_priority_t priority,
+ voucher_t voucher, unsigned long flags)
+{
+ voucher_t ov;
+ ov = _dispatch_adopt_priority_and_voucher(priority, voucher, flags);
+ if (voucher != DISPATCH_NO_VOUCHER && ov) _voucher_release(ov);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_set_priority_and_replace_voucher(pthread_priority_t priority,
+ voucher_t voucher)
+{
+ voucher_t ov;
+ ov = _dispatch_set_priority_and_adopt_voucher(priority, voucher);
+ if (voucher != DISPATCH_NO_VOUCHER && ov) _voucher_release(ov);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_set_priority(pthread_priority_t priority)
+{
+ _dispatch_set_priority_and_mach_voucher(priority, VOUCHER_NO_MACH_VOUCHER);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_priority_normalize(pthread_priority_t pp)
+{
+ dispatch_assert_zero(pp & ~(pthread_priority_t)
+ _PTHREAD_PRIORITY_QOS_CLASS_MASK);
+ unsigned int qosbits = (unsigned int)pp, idx;
+ if (!qosbits) return 0;
+ idx = (unsigned int)(sizeof(qosbits)*8) -
+ (unsigned int)__builtin_clz(qosbits) - 1;
+ return (1 << idx);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_queue_need_override(dispatch_queue_t dq, pthread_priority_t pp)
+{
+ if (!pp || dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE) return false;
+ uint32_t p = (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
+ uint32_t o = dq->dq_override;
+ return (o < p);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_queue_need_override_retain(dispatch_queue_t dq, pthread_priority_t pp)
+{
+ bool override = _dispatch_queue_need_override(dq, pp);
+ if (override) _dispatch_retain(dq);
+ return override;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_queue_override_priority(dispatch_queue_t dq, pthread_priority_t pp)
+{
+ uint32_t p = (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
+ uint32_t o = dq->dq_override;
+ if (o < p) o = dispatch_atomic_or_orig2o(dq, dq_override, p, relaxed);
+ return (o < p);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_queue_get_override_priority(dispatch_queue_t dq)
+{
+ uint32_t p = (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
+ uint32_t o = dq->dq_override;
+ if (o == p) return o;
+ return _dispatch_priority_normalize(o);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_queue_set_override_priority(dispatch_queue_t dq)
+{
+ uint32_t p = 0;
+ if (!(dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) {
+ p = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
+ }
+ dispatch_atomic_store2o(dq, dq_override, p, relaxed);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_queue_reset_override_priority(dispatch_queue_t dq)
+{
+ uint32_t p = 0;
+ if (!(dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) {
+ p = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
+ }
+ uint32_t o = dispatch_atomic_xchg2o(dq, dq_override, p, relaxed);
+ if (o == p) return o;
+ return _dispatch_priority_normalize(o);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_priority_propagate(void)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ pthread_priority_t priority = _dispatch_get_priority();
+ if (priority > _dispatch_user_initiated_priority) {
+ // Cap QOS for propagation at user-initiated <rdar://16681262&16998036>
+ priority = _dispatch_user_initiated_priority;
+ }
+ return priority;
+#else
+ return 0;
+#endif
+}
+
+// including maintenance
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_is_background_thread(void)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ pthread_priority_t priority;
+ priority = _dispatch_get_priority();
+ return priority && (priority <= _dispatch_background_priority);
+#else
+ return false;
+#endif
+}
+
+#pragma mark -
+#pragma mark dispatch_block_t
+
+#ifdef __BLOCKS__
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_block_has_private_data(const dispatch_block_t block)
+{
+ extern void (*_dispatch_block_special_invoke)(void*);
+ return (_dispatch_Block_invoke(block) == _dispatch_block_special_invoke);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_block_private_data_t
+_dispatch_block_get_data(const dispatch_block_t db)
+{
+ if (!_dispatch_block_has_private_data(db)) {
+ return NULL;
+ }
+ // Keep in sync with _dispatch_block_create implementation
+ uint8_t *x = (uint8_t *)db;
+ // x points to base of struct Block_layout
+ x += sizeof(struct Block_layout);
+ // x points to addresss of captured block
+ x += sizeof(dispatch_block_t);
+#if USE_OBJC
+ // x points to addresss of captured voucher
+ x += sizeof(voucher_t);
+#endif
+ // x points to base of captured dispatch_block_private_data_s structure
+ dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)x;
+ if (dbpd->dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) {
+ DISPATCH_CRASH("Corruption of dispatch block object");
+ }
+ return dbpd;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_block_get_priority(const dispatch_block_t db)
+{
+ dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
+ return dbpd ? dbpd->dbpd_priority : 0;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_block_flags_t
+_dispatch_block_get_flags(const dispatch_block_t db)
+{
+ dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
+ return dbpd ? dbpd->dbpd_flags : 0;
+}
+
+#define DISPATCH_BLOCK_HAS(flag, db) \
+ ((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_HAS_ ## flag) != 0)
+#define DISPATCH_BLOCK_IS(flag, db) \
+ ((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_ ## flag) != 0)
+
+#endif
+
+#pragma mark -
+#pragma mark dispatch_continuation_t
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_continuation_t
+_dispatch_continuation_alloc_cacheonly(void)
+{
+ dispatch_continuation_t dc = (dispatch_continuation_t)
+ fastpath(_dispatch_thread_getspecific(dispatch_cache_key));
+ if (dc) {
+ _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next);
+ }
+ return dc;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_continuation_t
+_dispatch_continuation_alloc(void)
+{
+ dispatch_continuation_t dc =
+ fastpath(_dispatch_continuation_alloc_cacheonly());
+ if(!dc) {
+ return _dispatch_continuation_alloc_from_heap();
+ }
+ return dc;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_continuation_t
+_dispatch_continuation_free_cacheonly(dispatch_continuation_t dc)
+{
+ dispatch_continuation_t prev_dc = (dispatch_continuation_t)
+ fastpath(_dispatch_thread_getspecific(dispatch_cache_key));
+ int cnt = prev_dc ? prev_dc->dc_cache_cnt + 1 : 1;
+ // Cap continuation cache
+ if (slowpath(cnt > _dispatch_continuation_cache_limit)) {
+ return dc;
+ }
+ dc->do_next = prev_dc;
+ dc->dc_cache_cnt = cnt;
+ _dispatch_thread_setspecific(dispatch_cache_key, dc);
+ return NULL;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_continuation_free(dispatch_continuation_t dc)
+{
+ dc = _dispatch_continuation_free_cacheonly(dc);
+ if (slowpath(dc)) {
+ _dispatch_continuation_free_to_cache_limit(dc);
+ }
+}
+
+#include "trace.h"
+
+DISPATCH_ALWAYS_INLINE_NDEBUG
+static inline void
+_dispatch_continuation_pop(dispatch_object_t dou)
+{
+ dispatch_continuation_t dc = dou._dc, dc1;
+ dispatch_group_t dg;
+
+ _dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dou);
+ if (DISPATCH_OBJ_IS_VTABLE(dou._do)) {
+ return dx_invoke(dou._do);
+ }
+
+ // Add the item back to the cache before calling the function. This
+ // allows the 'hot' continuation to be used for a quick callback.
+ //
+ // The ccache version is per-thread.
+ // Therefore, the object has not been reused yet.
+ // This generates better assembly.
+ if ((long)dc->do_vtable & DISPATCH_OBJ_ASYNC_BIT) {
+ _dispatch_continuation_voucher_adopt(dc);
+ dc1 = _dispatch_continuation_free_cacheonly(dc);
+ } else {
+ dc1 = NULL;
+ }
+ if ((long)dc->do_vtable & DISPATCH_OBJ_GROUP_BIT) {
+ dg = dc->dc_data;
+ } else {
+ dg = NULL;
+ }
+ _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
+ if (dg) {
+ dispatch_group_leave(dg);
+ _dispatch_release(dg);
+ }
+ _dispatch_introspection_queue_item_complete(dou);
+ if (slowpath(dc1)) {
+ _dispatch_continuation_free_to_cache_limit(dc1);
+ }
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_continuation_priority_set(dispatch_continuation_t dc,
+ pthread_priority_t pp, dispatch_block_flags_t flags)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ pthread_priority_t prio = 0;
+ if (flags & DISPATCH_BLOCK_HAS_PRIORITY) {
+ prio = pp;
+ } else if (!(flags & DISPATCH_BLOCK_NO_QOS_CLASS)) {
+ prio = _dispatch_priority_propagate();
+ }
+ if (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) {
+ prio |= _PTHREAD_PRIORITY_ENFORCE_FLAG;
+ }
+ dc->dc_priority = prio;
+#else
+ (void)dc; (void)pp; (void)flags;
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_continuation_get_override_priority(dispatch_queue_t dq,
+ dispatch_continuation_t dc)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ pthread_priority_t p = dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
+ bool enforce = dc->dc_priority & _PTHREAD_PRIORITY_ENFORCE_FLAG;
+ pthread_priority_t dqp = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK;
+ bool defaultqueue = dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
+ if (!p) {
+ enforce = false;
+ } else if (!enforce && (!dqp || defaultqueue)) {
+ enforce = true;
+ }
+ if (!enforce) {
+ p = dqp;
+ }
+ return p;
+#else
+ (void)dq; (void)dc;
+ return 0;
+#endif
+}
+
+#endif // !(USE_OBJC && __OBJC2__)
+
+#endif /* __DISPATCH_INLINE_INTERNAL__ */
#if !defined(DISPATCH_MACH_SPI) && TARGET_OS_MAC
#define DISPATCH_MACH_SPI 1
#endif
+#if !defined(OS_VOUCHER_CREATION_SPI) && TARGET_OS_MAC
+#define OS_VOUCHER_CREATION_SPI 1
+#endif
+#if !defined(OS_VOUCHER_ACTIVITY_SPI) && TARGET_OS_MAC
+#define OS_VOUCHER_ACTIVITY_SPI 1
+#endif
+#if !defined(OS_VOUCHER_ACTIVITY_BUFFER_SPI) && TARGET_OS_MAC && \
+ __has_include(<atm/atm_types.h>)
+#define OS_VOUCHER_ACTIVITY_BUFFER_SPI 1
+#endif
+#if !defined(DISPATCH_LAYOUT_SPI) && TARGET_OS_MAC
+#define DISPATCH_LAYOUT_SPI 1
+#endif
#if !defined(USE_OBJC) && HAVE_OBJC
#define USE_OBJC 1
#include <os/object.h>
-#include <dispatch/object.h>
#include <dispatch/time.h>
+#include <dispatch/object.h>
#include <dispatch/queue.h>
+#include <dispatch/block.h>
#include <dispatch/source.h>
#include <dispatch/group.h>
#include <dispatch/semaphore.h>
#if !TARGET_OS_WIN32
#include "io_private.h"
#endif
+#include "voucher_private.h"
+#include "voucher_activity_private.h"
+#include "layout_private.h"
#include "benchmark.h"
#include "private.h"
#define DISPATCH_USE_DTRACE 1
#endif
-#if ((!TARGET_OS_EMBEDDED && DISPATCH_INTROSPECTION) || DISPATCH_DEBUG || \
+#if DISPATCH_USE_DTRACE && (DISPATCH_INTROSPECTION || DISPATCH_DEBUG || \
DISPATCH_PROFILE) && !defined(DISPATCH_USE_DTRACE_INTROSPECTION)
#define DISPATCH_USE_DTRACE_INTROSPECTION 1
#endif
#include <mach/mach_traps.h>
#include <mach/message.h>
#include <mach/mig_errors.h>
+#include <mach/host_special_ports.h>
#include <mach/host_info.h>
#include <mach/notify.h>
#include <mach/mach_vm.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#if !TARGET_OS_WIN32
-#include <syslog.h>
-#endif
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
DISPATCH_NOINLINE DISPATCH_NORETURN
void _dispatch_abort(size_t line, long val);
-#if !defined(DISPATCH_USE_OS_TRACE) && DISPATCH_DEBUG
-#if __has_include(<os/trace.h>)
-#define DISPATCH_USE_OS_TRACE 1
-#include <os/trace.h>
+#if !defined(DISPATCH_USE_OS_DEBUG_LOG) && DISPATCH_DEBUG
+#if __has_include(<os/debug_private.h>)
+#define DISPATCH_USE_OS_DEBUG_LOG 1
+#include <os/debug_private.h>
+#endif
+#endif // DISPATCH_USE_OS_DEBUG_LOG
+
+#if !defined(DISPATCH_USE_SIMPLE_ASL) && !DISPATCH_USE_OS_DEBUG_LOG
+#if __has_include(<_simple.h>)
+#define DISPATCH_USE_SIMPLE_ASL 1
+#include <_simple.h>
#endif
-#endif // DISPATCH_USE_OS_TRACE
+#endif // DISPATCH_USE_SIMPLE_ASL
-#if DISPATCH_USE_OS_TRACE
-#define _dispatch_log(msg, ...) os_trace("libdispatch", msg, ## __VA_ARGS__)
+#if !DISPATCH_USE_SIMPLE_ASL && !DISPATCH_USE_OS_DEBUG_LOG && !TARGET_OS_WIN32
+#include <syslog.h>
+#endif
+
+#if DISPATCH_USE_OS_DEBUG_LOG
+#define _dispatch_log(msg, ...) os_debug_log("libdispatch", msg, ## __VA_ARGS__)
#else
DISPATCH_NOINLINE __attribute__((__format__(__printf__,1,2)))
void _dispatch_log(const char *msg, ...);
-#endif // DISPATCH_USE_OS_TRACE
+#endif // DISPATCH_USE_OS_DEBUG_LOG
#define dsnprintf(...) \
({ int _r = snprintf(__VA_ARGS__); _r < 0 ? 0u : (size_t)_r; })
#define _dispatch_object_debug(object, message, ...)
#endif // DISPATCH_DEBUG
-#if DISPATCH_USE_CLIENT_CALLOUT
-
-DISPATCH_NOTHROW void
-_dispatch_client_callout(void *ctxt, dispatch_function_t f);
-DISPATCH_NOTHROW void
-_dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t));
-DISPATCH_NOTHROW bool
-_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset,
- const void *buffer, size_t size, dispatch_data_applier_function_t f);
-DISPATCH_NOTHROW void
-_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
- dispatch_mach_msg_t dmsg, mach_error_t error,
- dispatch_mach_handler_function_t f);
-
-#else // !DISPATCH_USE_CLIENT_CALLOUT
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_client_callout(void *ctxt, dispatch_function_t f)
-{
- return f(ctxt);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t))
-{
- return f(ctxt, i);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset,
- const void *buffer, size_t size, dispatch_data_applier_function_t f)
-{
- return f(ctxt, region, offset, buffer, size);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
- dispatch_mach_msg_t dmsg, mach_error_t error,
- dispatch_mach_handler_function_t f);
-{
- return f(ctxt, reason, dmsg, error);
-}
-
-#endif // !DISPATCH_USE_CLIENT_CALLOUT
-
#ifdef __BLOCKS__
#define _dispatch_Block_invoke(bb) \
((dispatch_function_t)((struct Block_layout *)bb)->invoke)
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_client_callout_block(dispatch_block_t b)
-{
- return _dispatch_client_callout(b, _dispatch_Block_invoke(b));
-}
-
#if __GNUC__
dispatch_block_t _dispatch_Block_copy(dispatch_block_t block);
#define _dispatch_Block_copy(x) ((typeof(x))_dispatch_Block_copy(x))
#else
dispatch_block_t _dispatch_Block_copy(const void *block);
#endif
-
void _dispatch_call_block_and_release(void *block);
#endif /* __BLOCKS__ */
extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork;
-extern struct _dispatch_hw_config_s {
- uint32_t cc_max_active;
- uint32_t cc_max_logical;
- uint32_t cc_max_physical;
-} _dispatch_hw_config;
-
-#if !defined(DISPATCH_USE_OS_SEMAPHORE_CACHE) && !(TARGET_IPHONE_SIMULATOR && \
- IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090)
+#if !defined(DISPATCH_USE_OS_SEMAPHORE_CACHE) && !(TARGET_IPHONE_SIMULATOR)
+// rdar://problem/15492045
#if __has_include(<os/semaphore_private.h>)
#define DISPATCH_USE_OS_SEMAPHORE_CACHE 1
#include <os/semaphore_private.h>
// Older Mac OS X and iOS Simulator fallbacks
#if HAVE_PTHREAD_WORKQUEUES
-#ifndef WORKQ_BG_PRIOQUEUE
-#define WORKQ_BG_PRIOQUEUE 3
-#endif
#ifndef WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
#define WORKQ_ADDTHREADS_OPTION_OVERCOMMIT 0x00000001
#endif
-#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070
-#ifndef DISPATCH_NO_BG_PRIORITY
-#define DISPATCH_NO_BG_PRIORITY 1
-#endif
-#endif
#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1080
#ifndef DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK
#define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1
#undef HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 0
#endif
+#if TARGET_IPHONE_SIMULATOR && \
+ IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101000
+#ifndef DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
+#define DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK 1
+#endif
+#endif
+#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 101000
+#undef HAVE__PTHREAD_WORKQUEUE_INIT
+#define HAVE__PTHREAD_WORKQUEUE_INIT 0
+#endif
#endif // HAVE_PTHREAD_WORKQUEUES
+#if HAVE__PTHREAD_WORKQUEUE_INIT && PTHREAD_WORKQUEUE_SPI_VERSION >= 20140213 \
+ && !defined(HAVE_PTHREAD_WORKQUEUE_QOS)
+#define HAVE_PTHREAD_WORKQUEUE_QOS 1
+#endif
#if HAVE_MACH
#if !defined(MACH_NOTIFY_SEND_POSSIBLE) || (TARGET_IPHONE_SIMULATOR && \
#endif
#endif // HAVE_MACH
-#ifdef EVFILT_VM
-#ifndef DISPATCH_USE_VM_PRESSURE
-#define DISPATCH_USE_VM_PRESSURE 1
-#endif
-#endif // EVFILT_VM
-
#ifdef EVFILT_MEMORYSTATUS
#ifndef DISPATCH_USE_MEMORYSTATUS
#define DISPATCH_USE_MEMORYSTATUS 1
#endif
#endif // EVFILT_MEMORYSTATUS
-#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070
+#if defined(EVFILT_VM) && !DISPATCH_USE_MEMORYSTATUS
+#ifndef DISPATCH_USE_VM_PRESSURE
+#define DISPATCH_USE_VM_PRESSURE 1
+#endif
+#endif // EVFILT_VM
+
+#if TARGET_IPHONE_SIMULATOR
+#undef DISPATCH_USE_MEMORYSTATUS_SOURCE
+#define DISPATCH_USE_MEMORYSTATUS_SOURCE 0
#undef DISPATCH_USE_VM_PRESSURE_SOURCE
#define DISPATCH_USE_VM_PRESSURE_SOURCE 0
#endif // TARGET_IPHONE_SIMULATOR
-#if TARGET_OS_EMBEDDED
-#if !defined(DISPATCH_USE_VM_PRESSURE_SOURCE) && DISPATCH_USE_VM_PRESSURE
-#define DISPATCH_USE_VM_PRESSURE_SOURCE 1
-#endif
-#else // !TARGET_OS_EMBEDDED
#if !defined(DISPATCH_USE_MEMORYSTATUS_SOURCE) && DISPATCH_USE_MEMORYSTATUS
#define DISPATCH_USE_MEMORYSTATUS_SOURCE 1
#elif !defined(DISPATCH_USE_VM_PRESSURE_SOURCE) && DISPATCH_USE_VM_PRESSURE
#define DISPATCH_USE_VM_PRESSURE_SOURCE 1
#endif
-#endif // TARGET_OS_EMBEDDED
#if !defined(NOTE_LEEWAY) || (TARGET_IPHONE_SIMULATOR && \
IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090)
#endif
#endif // F_SETNOSIGPIPE
+#if defined(MACH_SEND_NOIMPORTANCE)
+#ifndef DISPATCH_USE_CHECKIN_NOIMPORTANCE
+#define DISPATCH_USE_CHECKIN_NOIMPORTANCE 1 // rdar://problem/16996737
+#endif
+#endif // MACH_SEND_NOIMPORTANCE
+
#if HAVE_LIBPROC_INTERNAL_H
#include <libproc.h>
#endif // HAVE_SYS_GUARDED_H
-#define _dispatch_hardware_crash() __builtin_trap()
+#ifndef MACH_MSGH_BITS_VOUCHER_MASK
+#define MACH_MSGH_BITS_VOUCHER_MASK 0x001f0000
+#define MACH_MSGH_BITS_SET_PORTS(remote, local, voucher) \
+ (((remote) & MACH_MSGH_BITS_REMOTE_MASK) | \
+ (((local) << 8) & MACH_MSGH_BITS_LOCAL_MASK) | \
+ (((voucher) << 16) & MACH_MSGH_BITS_VOUCHER_MASK))
+#define MACH_MSGH_BITS_VOUCHER(bits) \
+ (((bits) & MACH_MSGH_BITS_VOUCHER_MASK) >> 16)
+#define MACH_MSGH_BITS_HAS_VOUCHER(bits) \
+ (MACH_MSGH_BITS_VOUCHER(bits) != MACH_MSGH_BITS_ZERO)
+#define msgh_voucher_port msgh_reserved
+#define mach_voucher_t mach_port_t
+#define MACH_VOUCHER_NULL MACH_PORT_NULL
+#define MACH_SEND_INVALID_VOUCHER 0x10000005
+#endif
+
+#define _dispatch_hardware_crash() \
+ __asm__(""); __builtin_trap() // <rdar://problem/17464981>
-#define _dispatch_set_crash_log_message(x)
+#define _dispatch_set_crash_log_message(msg)
#if HAVE_MACH
// MIG_REPLY_MISMATCH means either:
_dispatch_hardware_crash(); \
} while (0)
+extern int _dispatch_set_qos_class_enabled;
+#define DISPATCH_NO_VOUCHER ((voucher_t)(void*)~0ul)
+#define DISPATCH_NO_PRIORITY ((pthread_priority_t)~0ul)
+#define DISPATCH_PRIORITY_ENFORCE 0x1
+static inline void _dispatch_adopt_priority_and_replace_voucher(
+ pthread_priority_t priority, voucher_t voucher, unsigned long flags);
+#if HAVE_MACH
+static inline void _dispatch_set_priority_and_mach_voucher(
+ pthread_priority_t priority, mach_voucher_t kv);
+mach_port_t _dispatch_get_mach_host_port(void);
+#endif
+
+
/* #includes dependent on internal.h */
#include "object_internal.h"
#include "semaphore_internal.h"
#include "introspection_internal.h"
#include "queue_internal.h"
#include "source_internal.h"
+#include "voucher_internal.h"
#include "data_internal.h"
#if !TARGET_OS_WIN32
#include "io_internal.h"
#endif
-#include "trace.h"
+#include "inline_internal.h"
#endif /* __DISPATCH_INTERNAL__ */
#if DISPATCH_INTROSPECTION
#include "internal.h"
-#include "introspection.h"
+#include "dispatch/introspection.h"
#include "introspection_private.h"
typedef struct dispatch_introspection_thread_s {
bool apply = false;
long flags = (long)dc->do_vtable;
if (flags & DISPATCH_OBJ_SYNC_SLOW_BIT) {
- waiter = dc->dc_data;
+ waiter = pthread_from_mach_thread_np((mach_port_t)dc->dc_data);
if (flags & DISPATCH_OBJ_BARRIER_BIT) {
dc = dc->dc_ctxt;
dq = dc->dc_data;
{
bool global = (dq->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) ||
(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT);
- uint32_t width = dq->dq_width;
- if (width > 1 && width != UINT32_MAX) width /= 2;
+ uint16_t width = dq->dq_width;
+ if (width > 1 && width != DISPATCH_QUEUE_WIDTH_MAX) width /= 2;
dispatch_introspection_queue_s diq = {
.queue = dq,
.target_queue = dq->do_targetq,
_dispatch_introspection_source_get_info(dispatch_source_t ds)
{
dispatch_source_refs_t dr = ds->ds_refs;
- void *ctxt = dr->ds_handler_ctxt;
- dispatch_function_t handler = dr->ds_handler_func;
- bool handler_is_block = ds->ds_handler_is_block;
+ dispatch_continuation_t dc = dr->ds_handler[DS_EVENT_HANDLER];
+ void *ctxt = NULL;
+ dispatch_function_t handler = NULL;
+ bool hdlr_is_block = false;
+ if (dc) {
+ ctxt = dc->dc_ctxt;
+ handler = dc->dc_func;
+ hdlr_is_block = ((long)dc->do_vtable & DISPATCH_OBJ_BLOCK_RELEASE_BIT);
+ }
bool after = (handler == _dispatch_after_timer_callback);
if (after && !(ds->ds_atomic_flags & DSF_CANCELED)) {
- dispatch_continuation_t dc = ctxt;
+ dc = ctxt;
ctxt = dc->dc_ctxt;
handler = dc->dc_func;
- if (handler == _dispatch_call_block_and_release) {
+ hdlr_is_block = (handler == _dispatch_call_block_and_release);
+ if (hdlr_is_block) {
handler = _dispatch_Block_invoke(ctxt);
- handler_is_block = 1;
}
}
dispatch_introspection_source_s dis = {
.handler = handler,
.suspend_count = ds->do_suspend_cnt / 2,
.enqueued = (ds->do_suspend_cnt & 1),
- .handler_is_block = handler_is_block,
+ .handler_is_block = hdlr_is_block,
.timer = ds->ds_is_timer,
.after = after,
};
dispatch_io_t channel = _dispatch_alloc(DISPATCH_VTABLE(io),
sizeof(struct dispatch_io_s));
channel->do_next = DISPATCH_OBJECT_LISTLESS;
- channel->do_targetq = _dispatch_get_root_queue(0, true);
+ channel->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
+ true);
channel->params.type = type;
channel->params.high = SIZE_MAX;
channel->params.low = dispatch_io_defaults.low_water_chunks *
dispatch_operation_t op =
_dispatch_operation_create(DOP_DIR_READ, channel, 0,
length, dispatch_data_empty,
- _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,
- false), ^(bool done, dispatch_data_t data, int error) {
+ _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,false),
+ ^(bool done, dispatch_data_t data, int error) {
if (data) {
data = dispatch_data_create_concat(deliver_data, data);
_dispatch_io_data_release(deliver_data);
dispatch_operation_t op =
_dispatch_operation_create(DOP_DIR_WRITE, channel, 0,
dispatch_data_get_size(data), data,
- _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,
- false), ^(bool done, dispatch_data_t d, int error) {
+ _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,false),
+ ^(bool done, dispatch_data_t d, int error) {
if (done) {
if (d) {
_dispatch_io_data_retain(d);
);
}
_dispatch_stream_init(fd_entry, _dispatch_get_root_queue(
- DISPATCH_QUEUE_PRIORITY_DEFAULT, false));
+ _DISPATCH_QOS_CLASS_DEFAULT, false));
}
fd_entry->orig_flags = orig_flags;
fd_entry->orig_nosigpipe = orig_nosigpipe;
_dispatch_disk_init(fd_entry, major(dev));
} else {
_dispatch_stream_init(fd_entry, _dispatch_get_root_queue(
- DISPATCH_QUEUE_PRIORITY_DEFAULT, false));
+ _DISPATCH_QOS_CLASS_DEFAULT, false));
}
fd_entry->fd = -1;
fd_entry->orig_flags = -1;
disk->do_next = DISPATCH_OBJECT_LISTLESS;
disk->do_xref_cnt = -1;
disk->advise_list_depth = pending_reqs_depth;
- disk->do_targetq = _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,
+ disk->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
false);
disk->dev = dev;
TAILQ_INIT(&disk->operations);
_os_object_t
_os_object_retain_internal(_os_object_t obj)
{
- int ref_cnt = obj->os_obj_ref_cnt;
- if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
- return obj; // global object
- }
- ref_cnt = dispatch_atomic_inc2o(obj, os_obj_ref_cnt, relaxed);
- if (slowpath(ref_cnt <= 0)) {
- DISPATCH_CRASH("Resurrection of an object");
- }
- return obj;
+ return _os_object_retain_internal_inline(obj);
}
DISPATCH_NOINLINE
void
_os_object_release_internal(_os_object_t obj)
{
- int ref_cnt = obj->os_obj_ref_cnt;
- if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
- return; // global object
- }
- ref_cnt = dispatch_atomic_dec2o(obj, os_obj_ref_cnt, relaxed);
- if (fastpath(ref_cnt >= 0)) {
- return;
- }
- if (slowpath(ref_cnt < -1)) {
- DISPATCH_CRASH("Over-release of an object");
- }
-#if DISPATCH_DEBUG
- if (slowpath(obj->os_obj_xref_cnt >= 0)) {
- DISPATCH_CRASH("Release while external references exist");
- }
-#endif
- return _os_object_dispose(obj);
+ return _os_object_release_internal_inline(obj);
}
DISPATCH_NOINLINE
(void)_os_object_retain(dou._os_obj);
}
-void
-_dispatch_retain(dispatch_object_t dou)
-{
- (void)_os_object_retain_internal(dou._os_obj);
-}
-
void
dispatch_release(dispatch_object_t dou)
{
_os_object_release(dou._os_obj);
}
-void
-_dispatch_release(dispatch_object_t dou)
-{
- _os_object_release_internal(dou._os_obj);
-}
-
static void
_dispatch_dealloc(dispatch_object_t dou)
{
// rdar://8181908 explains why we need to do an internal retain at every
// suspension.
(void)dispatch_atomic_add2o(dou._do, do_suspend_cnt,
- DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed);
+ DISPATCH_OBJECT_SUSPEND_INTERVAL, acquire);
_dispatch_retain(dou._do);
}
// If the previous value was less than the suspend interval, the object
// has been over-resumed.
unsigned int suspend_cnt = dispatch_atomic_sub_orig2o(dou._do,
- do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed);
+ do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, release);
if (fastpath(suspend_cnt > DISPATCH_OBJECT_SUSPEND_INTERVAL)) {
// Balancing the retain() done in suspend() for rdar://8181908
return _dispatch_release(dou._do);
/*
- * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
+ * Copyright (c) 2011-2014 Apple Inc. All rights reserved.
*
* @APPLE_APACHE_LICENSE_HEADER_START@
*
_os_object_have_gc = objc_collectingEnabled();
if (slowpath(_os_object_have_gc)) {
_os_object_gc_zone = objc_collectableZone();
+ (void)[OS_OBJECT_CLASS(object) class]; // OS_object class realization
}
}
return obj;
}
-#define _os_objc_gc_retain(obj) \
- if (slowpath(_os_object_have_gc)) { \
- return auto_zone_retain(_os_object_gc_zone, obj); \
+DISPATCH_NOINLINE
+static id
+_os_objc_gc_retain(id obj)
+{
+ if (fastpath(obj)) {
+ auto_zone_retain(_os_object_gc_zone, obj);
+ }
+ return obj;
+}
+
+DISPATCH_NOINLINE
+static void
+_os_objc_gc_release(id obj)
+{
+ if (fastpath(obj)) {
+ (void)auto_zone_release(_os_object_gc_zone, obj);
+ }
+ asm(""); // prevent tailcall
+}
+
+DISPATCH_NOINLINE
+static id
+_os_object_gc_retain(id obj)
+{
+ if ([obj isKindOfClass:OS_OBJECT_OBJC_CLASS(object)]) {
+ return _os_object_retain(obj);
+ } else {
+ return _os_objc_gc_retain(obj);
}
+}
-#define _os_objc_gc_release(obj) \
- if (slowpath(_os_object_have_gc)) { \
- return (void)auto_zone_release(_os_object_gc_zone, obj); \
+DISPATCH_NOINLINE
+static void
+_os_object_gc_release(id obj)
+{
+ if ([obj isKindOfClass:OS_OBJECT_OBJC_CLASS(object)]) {
+ return _os_object_release(obj);
+ } else {
+ return _os_objc_gc_release(obj);
}
+}
#else // __OBJC_GC__
#define _os_object_gc_init()
#define _os_object_make_uncollectable(obj) (obj)
#define _os_object_make_collectable(obj) (obj)
-#define _os_objc_gc_retain(obj)
-#define _os_objc_gc_release(obj)
+#define _os_object_have_gc 0
+#define _os_object_gc_retain(obj) (obj)
+#define _os_object_gc_release(obj)
#endif // __OBJC_GC__
#pragma mark -
return obj;
}
+static void*
+_os_objc_destructInstance(id obj)
+{
+ // noop if only Libystem is loaded
+ return obj;
+}
+
void
_os_object_init(void)
{
_objc_init();
_os_object_gc_init();
+ if (slowpath(_os_object_have_gc)) return;
+ Block_callbacks_RR callbacks = {
+ sizeof(Block_callbacks_RR),
+ (void (*)(const void *))&objc_retain,
+ (void (*)(const void *))&objc_release,
+ (void (*)(const void *))&_os_objc_destructInstance
+ };
+ _Block_use_RR2(&callbacks);
}
_os_object_t
[obj _dispose];
}
+#undef os_retain
+void*
+os_retain(void *obj)
+{
+ if (slowpath(_os_object_have_gc)) return _os_object_gc_retain(obj);
+ return objc_retain(obj);
+}
+
+#undef os_release
+void
+os_release(void *obj)
+{
+ if (slowpath(_os_object_have_gc)) return _os_object_gc_release(obj);
+ return objc_release(obj);
+}
+
#pragma mark -
#pragma mark _os_object
void
_dispatch_objc_retain(dispatch_object_t dou)
{
- _os_objc_gc_retain(dou);
- return (void)[dou retain];
+ return (void)os_retain(dou);
}
void
_dispatch_objc_release(dispatch_object_t dou)
{
- _os_objc_gc_release(dou);
- return [dou release];
+ return os_release(dou);
}
void
DISPATCH_CLASS_IMPL(operation)
DISPATCH_CLASS_IMPL(disk)
+@implementation OS_OBJECT_CLASS(voucher)
+DISPATCH_OBJC_LOAD()
+
+- (id)init {
+ self = [super init];
+ [self release];
+ self = nil;
+ return self;
+}
+
+- (void)_xref_dispose {
+ return _voucher_xref_dispose(self); // calls _os_object_release_internal()
+}
+
+- (void)_dispose {
+ return _voucher_dispose(self); // calls _os_object_dealloc()
+}
+
+- (NSString *)debugDescription {
+ Class nsstring = objc_lookUpClass("NSString");
+ if (!nsstring) return nil;
+ char buf[2048];
+ _voucher_debug(self, buf, sizeof(buf));
+ return [nsstring stringWithFormat:
+ [nsstring stringWithUTF8String:"<%s: %s>"],
+ class_getName([self class]), buf];
+}
+
+@end
+
+#if VOUCHER_ENABLE_RECIPE_OBJECTS
+@implementation OS_OBJECT_CLASS(voucher_recipe)
+DISPATCH_OBJC_LOAD()
+
+- (id)init {
+ self = [super init];
+ [self release];
+ self = nil;
+ return self;
+}
+
+- (void)_dispose {
+
+}
+
+- (NSString *)debugDescription {
+ return nil; // TODO: voucher_recipe debugDescription
+}
+
+@end
+#endif
+
#pragma mark -
#pragma mark dispatch_autorelease_pool
#pragma mark dispatch_client_callout
// Abort on uncaught exceptions thrown from client callouts rdar://8577499
-#if DISPATCH_USE_CLIENT_CALLOUT && !__arm__
+#if DISPATCH_USE_CLIENT_CALLOUT && !__USING_SJLJ_EXCEPTIONS__
// On platforms with zero-cost exceptions, use a compiler-generated catch-all
// exception handler.
#endif // DISPATCH_USE_CLIENT_CALLOUT
+#pragma mark -
+#pragma mark _dispatch_block_create
+
+// The compiler hides the name of the function it generates, and changes it if
+// we try to reference it directly, but the linker still sees it.
+extern void DISPATCH_BLOCK_SPECIAL_INVOKE(void *)
+ asm("____dispatch_block_create_block_invoke");
+void (*_dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE;
+
+dispatch_block_t
+_dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher,
+ pthread_priority_t pri, dispatch_block_t block)
+{
+ dispatch_block_t copy_block = _dispatch_Block_copy(block); // 17094902
+ struct dispatch_block_private_data_s dbpds =
+ DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, voucher, pri, copy_block);
+ dispatch_block_t new_block = _dispatch_Block_copy(^{
+ // Capture object references, which retains copy_block and voucher.
+ // All retained objects must be captured by the *block*. We
+ // cannot borrow any references, because the block might be
+ // called zero or several times, so Block_release() is the
+ // only place that can release retained objects.
+ (void)copy_block;
+ (void)voucher;
+ _dispatch_block_invoke(&dbpds);
+ });
+ Block_release(copy_block);
+ return new_block;
+}
+
#endif // USE_OBJC
struct dispatch_queue_s *do_targetq; \
void *do_ctxt; \
void *do_finalizer; \
- unsigned int do_suspend_cnt;
+ unsigned int volatile do_suspend_cnt;
#define DISPATCH_OBJECT_GLOBAL_REFCNT _OS_OBJECT_GLOBAL_REFCNT
// "word and bit" must be a power of two to be safely subtracted
size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf,
size_t bufsiz);
void *_dispatch_alloc(const void *vtable, size_t size);
-void _dispatch_retain(dispatch_object_t dou);
-void _dispatch_release(dispatch_object_t dou);
void _dispatch_xref_dispose(dispatch_object_t dou);
void _dispatch_dispose(dispatch_object_t dou);
#if DISPATCH_COCOA_COMPAT
#undef dispatch_once_f
-struct _dispatch_once_waiter_s {
+typedef struct _dispatch_once_waiter_s {
volatile struct _dispatch_once_waiter_s *volatile dow_next;
_dispatch_thread_semaphore_t dow_sema;
-};
+ mach_port_t dow_thread;
+} *_dispatch_once_waiter_t;
-#define DISPATCH_ONCE_DONE ((struct _dispatch_once_waiter_s *)~0l)
+#define DISPATCH_ONCE_DONE ((_dispatch_once_waiter_t)~0l)
#ifdef __BLOCKS__
void
void
dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
{
- struct _dispatch_once_waiter_s * volatile *vval =
- (struct _dispatch_once_waiter_s**)val;
- struct _dispatch_once_waiter_s dow = { NULL, 0 };
- struct _dispatch_once_waiter_s *tail, *tmp;
+ _dispatch_once_waiter_t volatile *vval = (_dispatch_once_waiter_t*)val;
+ struct _dispatch_once_waiter_s dow = { NULL, 0, MACH_PORT_NULL };
+ _dispatch_once_waiter_t tail = &dow, next, tmp;
_dispatch_thread_semaphore_t sema;
- if (dispatch_atomic_cmpxchg(vval, NULL, &dow, acquire)) {
+ if (dispatch_atomic_cmpxchg(vval, NULL, tail, acquire)) {
+ dow.dow_thread = _dispatch_thread_port();
_dispatch_client_callout(ctxt, func);
// The next barrier must be long and strong.
dispatch_atomic_maximally_synchronizing_barrier();
// above assumed to contain release barrier
- tmp = dispatch_atomic_xchg(vval, DISPATCH_ONCE_DONE, relaxed);
- tail = &dow;
- while (tail != tmp) {
- while (!tmp->dow_next) {
- dispatch_hardware_pause();
- }
- sema = tmp->dow_sema;
- tmp = (struct _dispatch_once_waiter_s*)tmp->dow_next;
+ next = dispatch_atomic_xchg(vval, DISPATCH_ONCE_DONE, relaxed);
+ while (next != tail) {
+ _dispatch_wait_until(tmp = (_dispatch_once_waiter_t)next->dow_next);
+ sema = next->dow_sema;
+ next = tmp;
_dispatch_thread_semaphore_signal(sema);
}
} else {
dow.dow_sema = _dispatch_get_thread_semaphore();
- tmp = *vval;
+ next = *vval;
for (;;) {
- if (tmp == DISPATCH_ONCE_DONE) {
+ if (next == DISPATCH_ONCE_DONE) {
break;
}
- if (dispatch_atomic_cmpxchgvw(vval, tmp, &dow, &tmp, release)) {
- dow.dow_next = tmp;
+ if (dispatch_atomic_cmpxchgvw(vval, next, tail, &next, release)) {
+ dow.dow_thread = next->dow_thread;
+ dow.dow_next = next;
+ if (dow.dow_thread) {
+ pthread_priority_t pp = _dispatch_get_priority();
+ _dispatch_thread_override_start(dow.dow_thread, pp);
+ }
_dispatch_thread_semaphore_wait(dow.dow_sema);
+ if (dow.dow_thread) {
+ _dispatch_thread_override_end(dow.dow_thread);
+ }
break;
}
}
#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_ENABLE_THREAD_POOL
#define DISPATCH_USE_PTHREAD_POOL 1
#endif
-#if HAVE_PTHREAD_WORKQUEUES && !HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \
+#if HAVE_PTHREAD_WORKQUEUES && (!HAVE_PTHREAD_WORKQUEUE_QOS || DISPATCH_DEBUG) \
+ && !defined(DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK)
+#define DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK 1
+#endif
+#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK && \
+ !HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \
!defined(DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK)
#define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1
#endif
+#if HAVE_PTHREAD_WORKQUEUE_QOS && !DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
+#undef HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
+#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 0
+#endif
#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_USE_PTHREAD_POOL && \
!DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK
#define pthread_workqueue_t void*
static void _dispatch_cache_cleanup(void *value);
static void _dispatch_async_f_redirect(dispatch_queue_t dq,
- dispatch_continuation_t dc);
+ dispatch_continuation_t dc, pthread_priority_t pp);
static void _dispatch_queue_cleanup(void *ctxt);
static inline void _dispatch_queue_wakeup_global2(dispatch_queue_t dq,
unsigned int n);
static inline void _dispatch_queue_wakeup_global(dispatch_queue_t dq);
static inline _dispatch_thread_semaphore_t
_dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq);
+static inline bool _dispatch_queue_prepare_override(dispatch_queue_t dq,
+ dispatch_queue_t tq, pthread_priority_t p);
+static inline void _dispatch_queue_push_override(dispatch_queue_t dq,
+ dispatch_queue_t tq, pthread_priority_t p);
#if HAVE_PTHREAD_WORKQUEUES
-static void _dispatch_worker_thread3(void *context);
+static void _dispatch_worker_thread4(void *context);
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+static void _dispatch_worker_thread3(pthread_priority_t priority);
+#endif
#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
static void _dispatch_worker_thread2(int priority, int options, void *context);
#endif
static void _dispatch_runloop_queue_port_dispose(dispatch_queue_t dq);
#endif
+static void _dispatch_root_queues_init(void *context);
+static dispatch_once_t _dispatch_root_queues_pred;
+
#pragma mark -
#pragma mark dispatch_root_queue
+struct dispatch_pthread_root_queue_context_s {
+ pthread_attr_t dpq_thread_attr;
+ dispatch_block_t dpq_thread_configure;
+ struct dispatch_semaphore_s dpq_thread_mediator;
+};
+typedef struct dispatch_pthread_root_queue_context_s *
+ dispatch_pthread_root_queue_context_t;
+
#if DISPATCH_ENABLE_THREAD_POOL
-static struct dispatch_semaphore_s _dispatch_thread_mediator[] = {
- [DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = {
- .do_vtable = DISPATCH_VTABLE(semaphore),
- .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- },
- [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = {
- .do_vtable = DISPATCH_VTABLE(semaphore),
- .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- },
- [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = {
- .do_vtable = DISPATCH_VTABLE(semaphore),
- .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- },
- [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = {
- .do_vtable = DISPATCH_VTABLE(semaphore),
- .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- },
- [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = {
- .do_vtable = DISPATCH_VTABLE(semaphore),
- .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- },
- [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = {
- .do_vtable = DISPATCH_VTABLE(semaphore),
- .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- },
- [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = {
- .do_vtable = DISPATCH_VTABLE(semaphore),
- .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- },
- [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = {
- .do_vtable = DISPATCH_VTABLE(semaphore),
- .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
- },
+static struct dispatch_pthread_root_queue_context_s
+ _dispatch_pthread_root_queue_contexts[] = {
+ [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = {
+ .dpq_thread_mediator = {
+ .do_vtable = DISPATCH_VTABLE(semaphore),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ }},
+ [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = {
+ .dpq_thread_mediator = {
+ .do_vtable = DISPATCH_VTABLE(semaphore),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ }},
+ [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = {
+ .dpq_thread_mediator = {
+ .do_vtable = DISPATCH_VTABLE(semaphore),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ }},
+ [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = {
+ .dpq_thread_mediator = {
+ .do_vtable = DISPATCH_VTABLE(semaphore),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ }},
+ [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = {
+ .dpq_thread_mediator = {
+ .do_vtable = DISPATCH_VTABLE(semaphore),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ }},
+ [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = {
+ .dpq_thread_mediator = {
+ .do_vtable = DISPATCH_VTABLE(semaphore),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ }},
+ [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = {
+ .dpq_thread_mediator = {
+ .do_vtable = DISPATCH_VTABLE(semaphore),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ }},
+ [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = {
+ .dpq_thread_mediator = {
+ .do_vtable = DISPATCH_VTABLE(semaphore),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ }},
+ [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = {
+ .dpq_thread_mediator = {
+ .do_vtable = DISPATCH_VTABLE(semaphore),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ }},
+ [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = {
+ .dpq_thread_mediator = {
+ .do_vtable = DISPATCH_VTABLE(semaphore),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ }},
+ [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = {
+ .dpq_thread_mediator = {
+ .do_vtable = DISPATCH_VTABLE(semaphore),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ }},
+ [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = {
+ .dpq_thread_mediator = {
+ .do_vtable = DISPATCH_VTABLE(semaphore),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ }},
};
#endif
struct {
unsigned int volatile dgq_pending;
#if HAVE_PTHREAD_WORKQUEUES
+ qos_class_t dgq_qos;
int dgq_wq_priority, dgq_wq_options;
#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL
pthread_workqueue_t dgq_kworkqueue;
#endif // HAVE_PTHREAD_WORKQUEUES
#if DISPATCH_USE_PTHREAD_POOL
void *dgq_ctxt;
- dispatch_semaphore_t dgq_thread_mediator;
uint32_t volatile dgq_thread_pool_size;
#endif
};
DISPATCH_CACHELINE_ALIGN
static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = {
- [DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = {{{
+ [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = {{{
+#if HAVE_PTHREAD_WORKQUEUES
+ .dgq_qos = _DISPATCH_QOS_CLASS_MAINTENANCE,
+ .dgq_wq_priority = WORKQ_BG_PRIOQUEUE,
+ .dgq_wq_options = 0,
+#endif
+#if DISPATCH_ENABLE_THREAD_POOL
+ .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS],
+#endif
+ }}},
+ [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = {{{
+#if HAVE_PTHREAD_WORKQUEUES
+ .dgq_qos = _DISPATCH_QOS_CLASS_MAINTENANCE,
+ .dgq_wq_priority = WORKQ_BG_PRIOQUEUE,
+ .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
+#endif
+#if DISPATCH_ENABLE_THREAD_POOL
+ .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT],
+#endif
+ }}},
+ [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = {{{
+#if HAVE_PTHREAD_WORKQUEUES
+ .dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND,
+ .dgq_wq_priority = WORKQ_BG_PRIOQUEUE,
+ .dgq_wq_options = 0,
+#endif
+#if DISPATCH_ENABLE_THREAD_POOL
+ .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS],
+#endif
+ }}},
+ [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = {{{
+#if HAVE_PTHREAD_WORKQUEUES
+ .dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND,
+ .dgq_wq_priority = WORKQ_BG_PRIOQUEUE,
+ .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
+#endif
+#if DISPATCH_ENABLE_THREAD_POOL
+ .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT],
+#endif
+ }}},
+ [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = {{{
#if HAVE_PTHREAD_WORKQUEUES
+ .dgq_qos = _DISPATCH_QOS_CLASS_UTILITY,
.dgq_wq_priority = WORKQ_LOW_PRIOQUEUE,
.dgq_wq_options = 0,
#endif
#if DISPATCH_ENABLE_THREAD_POOL
- .dgq_thread_mediator = &_dispatch_thread_mediator[
- DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY],
+ .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS],
#endif
}}},
- [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = {{{
+ [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = {{{
#if HAVE_PTHREAD_WORKQUEUES
+ .dgq_qos = _DISPATCH_QOS_CLASS_UTILITY,
.dgq_wq_priority = WORKQ_LOW_PRIOQUEUE,
.dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
#endif
#if DISPATCH_ENABLE_THREAD_POOL
- .dgq_thread_mediator = &_dispatch_thread_mediator[
- DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY],
+ .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT],
#endif
}}},
- [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = {{{
+ [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = {{{
#if HAVE_PTHREAD_WORKQUEUES
+ .dgq_qos = _DISPATCH_QOS_CLASS_DEFAULT,
.dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE,
.dgq_wq_options = 0,
#endif
#if DISPATCH_ENABLE_THREAD_POOL
- .dgq_thread_mediator = &_dispatch_thread_mediator[
- DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY],
+ .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS],
#endif
}}},
- [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = {{{
+ [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = {{{
#if HAVE_PTHREAD_WORKQUEUES
+ .dgq_qos = _DISPATCH_QOS_CLASS_DEFAULT,
.dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE,
.dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
#endif
#if DISPATCH_ENABLE_THREAD_POOL
- .dgq_thread_mediator = &_dispatch_thread_mediator[
- DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY],
+ .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT],
#endif
}}},
- [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = {{{
+ [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = {{{
#if HAVE_PTHREAD_WORKQUEUES
+ .dgq_qos = _DISPATCH_QOS_CLASS_USER_INITIATED,
.dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE,
.dgq_wq_options = 0,
#endif
#if DISPATCH_ENABLE_THREAD_POOL
- .dgq_thread_mediator = &_dispatch_thread_mediator[
- DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY],
+ .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS],
#endif
}}},
- [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = {{{
+ [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = {{{
#if HAVE_PTHREAD_WORKQUEUES
+ .dgq_qos = _DISPATCH_QOS_CLASS_USER_INITIATED,
.dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE,
.dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
#endif
#if DISPATCH_ENABLE_THREAD_POOL
- .dgq_thread_mediator = &_dispatch_thread_mediator[
- DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY],
+ .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT],
#endif
}}},
- [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = {{{
+ [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = {{{
#if HAVE_PTHREAD_WORKQUEUES
- .dgq_wq_priority = WORKQ_BG_PRIOQUEUE,
+ .dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE,
+ .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE,
.dgq_wq_options = 0,
#endif
#if DISPATCH_ENABLE_THREAD_POOL
- .dgq_thread_mediator = &_dispatch_thread_mediator[
- DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY],
+ .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS],
#endif
}}},
- [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = {{{
+ [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = {{{
#if HAVE_PTHREAD_WORKQUEUES
- .dgq_wq_priority = WORKQ_BG_PRIOQUEUE,
+ .dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE,
+ .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE,
.dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT,
#endif
#if DISPATCH_ENABLE_THREAD_POOL
- .dgq_thread_mediator = &_dispatch_thread_mediator[
- DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY],
+ .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT],
#endif
}}},
};
// dq_running is set to 2 so that barrier operations go through the slow path
DISPATCH_CACHELINE_ALIGN
struct dispatch_queue_s _dispatch_root_queues[] = {
- [DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = {
+ [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = {
.do_vtable = DISPATCH_VTABLE(queue_root),
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
- DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY],
- .dq_label = "com.apple.root.low-priority",
+ DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS],
+ .dq_label = "com.apple.root.maintenance-qos",
.dq_running = 2,
- .dq_width = UINT32_MAX,
+ .dq_width = DISPATCH_QUEUE_WIDTH_MAX,
.dq_serialnum = 4,
},
- [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = {
+ [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = {
.do_vtable = DISPATCH_VTABLE(queue_root),
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
- DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY],
- .dq_label = "com.apple.root.low-overcommit-priority",
+ DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT],
+ .dq_label = "com.apple.root.maintenance-qos.overcommit",
.dq_running = 2,
- .dq_width = UINT32_MAX,
+ .dq_width = DISPATCH_QUEUE_WIDTH_MAX,
.dq_serialnum = 5,
},
- [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = {
+ [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = {
.do_vtable = DISPATCH_VTABLE(queue_root),
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
- DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY],
- .dq_label = "com.apple.root.default-priority",
+ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS],
+ .dq_label = "com.apple.root.background-qos",
.dq_running = 2,
- .dq_width = UINT32_MAX,
+ .dq_width = DISPATCH_QUEUE_WIDTH_MAX,
.dq_serialnum = 6,
},
- [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = {
+ [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = {
.do_vtable = DISPATCH_VTABLE(queue_root),
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
- DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY],
- .dq_label = "com.apple.root.default-overcommit-priority",
+ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT],
+ .dq_label = "com.apple.root.background-qos.overcommit",
.dq_running = 2,
- .dq_width = UINT32_MAX,
+ .dq_width = DISPATCH_QUEUE_WIDTH_MAX,
.dq_serialnum = 7,
},
- [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = {
+ [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = {
.do_vtable = DISPATCH_VTABLE(queue_root),
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
- DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY],
- .dq_label = "com.apple.root.high-priority",
+ DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS],
+ .dq_label = "com.apple.root.utility-qos",
.dq_running = 2,
- .dq_width = UINT32_MAX,
+ .dq_width = DISPATCH_QUEUE_WIDTH_MAX,
.dq_serialnum = 8,
},
- [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = {
+ [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = {
.do_vtable = DISPATCH_VTABLE(queue_root),
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
- DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY],
- .dq_label = "com.apple.root.high-overcommit-priority",
+ DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT],
+ .dq_label = "com.apple.root.utility-qos.overcommit",
.dq_running = 2,
- .dq_width = UINT32_MAX,
+ .dq_width = DISPATCH_QUEUE_WIDTH_MAX,
.dq_serialnum = 9,
},
- [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = {
+ [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = {
.do_vtable = DISPATCH_VTABLE(queue_root),
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
- DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY],
- .dq_label = "com.apple.root.background-priority",
+ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS],
+ .dq_label = "com.apple.root.default-qos",
.dq_running = 2,
- .dq_width = UINT32_MAX,
+ .dq_width = DISPATCH_QUEUE_WIDTH_MAX,
.dq_serialnum = 10,
},
- [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = {
+ [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = {
.do_vtable = DISPATCH_VTABLE(queue_root),
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
.do_ctxt = &_dispatch_root_queue_contexts[
- DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY],
- .dq_label = "com.apple.root.background-overcommit-priority",
+ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT],
+ .dq_label = "com.apple.root.default-qos.overcommit",
.dq_running = 2,
- .dq_width = UINT32_MAX,
+ .dq_width = DISPATCH_QUEUE_WIDTH_MAX,
.dq_serialnum = 11,
},
+ [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = {
+ .do_vtable = DISPATCH_VTABLE(queue_root),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
+ .do_ctxt = &_dispatch_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS],
+ .dq_label = "com.apple.root.user-initiated-qos",
+ .dq_running = 2,
+ .dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_serialnum = 12,
+ },
+ [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = {
+ .do_vtable = DISPATCH_VTABLE(queue_root),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
+ .do_ctxt = &_dispatch_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT],
+ .dq_label = "com.apple.root.user-initiated-qos.overcommit",
+ .dq_running = 2,
+ .dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_serialnum = 13,
+ },
+ [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = {
+ .do_vtable = DISPATCH_VTABLE(queue_root),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
+ .do_ctxt = &_dispatch_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS],
+ .dq_label = "com.apple.root.user-interactive-qos",
+ .dq_running = 2,
+ .dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_serialnum = 14,
+ },
+ [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = {
+ .do_vtable = DISPATCH_VTABLE(queue_root),
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
+ .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
+ .do_ctxt = &_dispatch_root_queue_contexts[
+ DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT],
+ .dq_label = "com.apple.root.user-interactive-qos.overcommit",
+ .dq_running = 2,
+ .dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_serialnum = 15,
+ },
};
-#if HAVE_PTHREAD_WORKQUEUES
+#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
static const dispatch_queue_t _dispatch_wq2root_queues[][2] = {
+ [WORKQ_BG_PRIOQUEUE][0] = &_dispatch_root_queues[
+ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS],
+ [WORKQ_BG_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] =
+ &_dispatch_root_queues[
+ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT],
[WORKQ_LOW_PRIOQUEUE][0] = &_dispatch_root_queues[
- DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY],
+ DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS],
[WORKQ_LOW_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] =
&_dispatch_root_queues[
- DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY],
+ DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT],
[WORKQ_DEFAULT_PRIOQUEUE][0] = &_dispatch_root_queues[
- DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY],
+ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS],
[WORKQ_DEFAULT_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] =
&_dispatch_root_queues[
- DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY],
+ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT],
[WORKQ_HIGH_PRIOQUEUE][0] = &_dispatch_root_queues[
- DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY],
+ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS],
[WORKQ_HIGH_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] =
&_dispatch_root_queues[
- DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY],
- [WORKQ_BG_PRIOQUEUE][0] = &_dispatch_root_queues[
- DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY],
- [WORKQ_BG_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] =
- &_dispatch_root_queues[
- DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY],
+ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT],
+};
+#endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
+
+#define DISPATCH_PRIORITY_COUNT 5
+
+enum {
+ // No DISPATCH_PRIORITY_IDX_MAINTENANCE define because there is no legacy
+ // maintenance priority
+ DISPATCH_PRIORITY_IDX_BACKGROUND = 0,
+ DISPATCH_PRIORITY_IDX_NON_INTERACTIVE,
+ DISPATCH_PRIORITY_IDX_LOW,
+ DISPATCH_PRIORITY_IDX_DEFAULT,
+ DISPATCH_PRIORITY_IDX_HIGH,
+};
+
+static qos_class_t _dispatch_priority2qos[] = {
+ [DISPATCH_PRIORITY_IDX_BACKGROUND] = _DISPATCH_QOS_CLASS_BACKGROUND,
+ [DISPATCH_PRIORITY_IDX_NON_INTERACTIVE] = _DISPATCH_QOS_CLASS_UTILITY,
+ [DISPATCH_PRIORITY_IDX_LOW] = _DISPATCH_QOS_CLASS_UTILITY,
+ [DISPATCH_PRIORITY_IDX_DEFAULT] = _DISPATCH_QOS_CLASS_DEFAULT,
+ [DISPATCH_PRIORITY_IDX_HIGH] = _DISPATCH_QOS_CLASS_USER_INITIATED,
};
-#endif // HAVE_PTHREAD_WORKQUEUES
+
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+static const int _dispatch_priority2wq[] = {
+ [DISPATCH_PRIORITY_IDX_BACKGROUND] = WORKQ_BG_PRIOQUEUE,
+ [DISPATCH_PRIORITY_IDX_NON_INTERACTIVE] = WORKQ_NON_INTERACTIVE_PRIOQUEUE,
+ [DISPATCH_PRIORITY_IDX_LOW] = WORKQ_LOW_PRIOQUEUE,
+ [DISPATCH_PRIORITY_IDX_DEFAULT] = WORKQ_DEFAULT_PRIOQUEUE,
+ [DISPATCH_PRIORITY_IDX_HIGH] = WORKQ_HIGH_PRIOQUEUE,
+};
+#endif
#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
static struct dispatch_queue_s _dispatch_mgr_root_queue;
if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) {
return NULL;
}
- return _dispatch_get_root_queue(priority,
- flags & DISPATCH_QUEUE_OVERCOMMIT);
+ dispatch_once_f(&_dispatch_root_queues_pred, NULL,
+ _dispatch_root_queues_init);
+ qos_class_t qos;
+ switch (priority) {
+#if !RDAR_17878963 || DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
+ case _DISPATCH_QOS_CLASS_MAINTENANCE:
+ if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS]
+ .dq_priority) {
+ // map maintenance to background on old kernel
+ qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_BACKGROUND];
+ } else {
+ qos = (qos_class_t)priority;
+ }
+ break;
+#endif // RDAR_17878963 || DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
+ case DISPATCH_QUEUE_PRIORITY_BACKGROUND:
+ qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_BACKGROUND];
+ break;
+ case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE:
+ qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_NON_INTERACTIVE];
+ break;
+ case DISPATCH_QUEUE_PRIORITY_LOW:
+ qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_LOW];
+ break;
+ case DISPATCH_QUEUE_PRIORITY_DEFAULT:
+ qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_DEFAULT];
+ break;
+ case DISPATCH_QUEUE_PRIORITY_HIGH:
+ qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_HIGH];
+ break;
+ case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
+#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
+ if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS]
+ .dq_priority) {
+ qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_HIGH];
+ break;
+ }
+#endif
+ // fallthrough
+ default:
+ qos = (qos_class_t)priority;
+ break;
+ }
+ return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT);
}
DISPATCH_ALWAYS_INLINE
static inline dispatch_queue_t
_dispatch_get_current_queue(void)
{
- return _dispatch_queue_get_current() ?: _dispatch_get_root_queue(0, true);
+ return _dispatch_queue_get_current() ?:
+ _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true);
}
dispatch_queue_t
#pragma mark -
#pragma mark dispatch_init
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+int _dispatch_set_qos_class_enabled;
+pthread_priority_t _dispatch_background_priority;
+pthread_priority_t _dispatch_user_initiated_priority;
+
static void
-_dispatch_hw_config_init(void)
+_dispatch_root_queues_init_qos(int supported)
{
- _dispatch_hw_config.cc_max_active = _dispatch_get_activecpu();
- _dispatch_hw_config.cc_max_logical = _dispatch_get_logicalcpu_max();
- _dispatch_hw_config.cc_max_physical = _dispatch_get_physicalcpu_max();
+ pthread_priority_t p;
+ qos_class_t qos;
+ unsigned int i;
+ for (i = 0; i < DISPATCH_PRIORITY_COUNT; i++) {
+ p = _pthread_qos_class_encode_workqueue(_dispatch_priority2wq[i], 0);
+ qos = _pthread_qos_class_decode(p, NULL, NULL);
+ dispatch_assert(qos != _DISPATCH_QOS_CLASS_UNSPECIFIED);
+ _dispatch_priority2qos[i] = qos;
+ }
+ for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
+ qos = _dispatch_root_queue_contexts[i].dgq_qos;
+ if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE &&
+ !(supported & WORKQ_FEATURE_MAINTENANCE)) {
+ continue;
+ }
+ unsigned long flags = i & 1 ? _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0;
+ flags |= _PTHREAD_PRIORITY_ROOTQUEUE_FLAG;
+ if (i == DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS ||
+ i == DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT) {
+ flags |= _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG;
+ }
+ p = _pthread_qos_class_encode(qos, 0, flags);
+ _dispatch_root_queues[i].dq_priority = p;
+ }
+ p = _pthread_qos_class_encode(qos_class_main(), 0, 0);
+ _dispatch_main_q.dq_priority = p;
+ _dispatch_queue_set_override_priority(&_dispatch_main_q);
+ _dispatch_background_priority = _dispatch_root_queues[
+ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS].dq_priority &
+ ~_PTHREAD_PRIORITY_FLAGS_MASK;
+ _dispatch_user_initiated_priority = _dispatch_root_queues[
+ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS].dq_priority &
+ ~_PTHREAD_PRIORITY_FLAGS_MASK;
+ if (!slowpath(getenv("LIBDISPATCH_DISABLE_SET_QOS"))) {
+ _dispatch_set_qos_class_enabled = 1;
+ }
}
+#endif
static inline bool
_dispatch_root_queues_init_workq(void)
disable_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KWQ"));
#endif
int r;
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ bool disable_qos = false;
+#if DISPATCH_DEBUG
+ disable_qos = slowpath(getenv("LIBDISPATCH_DISABLE_QOS"));
+#endif
+ if (!disable_qos && !disable_wq) {
+ r = _pthread_workqueue_supported();
+ int supported = r;
+ if (r & WORKQ_FEATURE_FINEPRIO) {
+ r = _pthread_workqueue_init(_dispatch_worker_thread3,
+ offsetof(struct dispatch_queue_s, dq_serialnum), 0);
+ result = !r;
+ if (result) _dispatch_root_queues_init_qos(supported);
+ }
+ }
+#endif // HAVE_PTHREAD_WORKQUEUE_QOS
#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
- if (!disable_wq) {
+ if (!result && !disable_wq) {
#if PTHREAD_WORKQUEUE_SPI_VERSION >= 20121218
pthread_workqueue_setdispatchoffset_np(
offsetof(struct dispatch_queue_s, dq_serialnum));
dispatch_root_queue_context_t qc;
qc = &_dispatch_root_queue_contexts[i];
#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK
- if (!disable_wq
-#if DISPATCH_NO_BG_PRIORITY
- && (qc->dgq_wq_priority != WORKQ_BG_PRIOQUEUE)
-#endif
- ) {
+ if (!disable_wq) {
r = pthread_workqueue_attr_setqueuepriority_np(&pwq_attr,
qc->dgq_wq_priority);
(void)dispatch_assume_zero(r);
#if DISPATCH_USE_PTHREAD_POOL
static inline void
_dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc,
- bool overcommit)
+ uint8_t pool_size, bool overcommit)
{
- qc->dgq_thread_pool_size = overcommit ? MAX_PTHREAD_COUNT :
- _dispatch_hw_config.cc_max_active;
+ dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt;
+ uint32_t thread_pool_size = overcommit ? MAX_PTHREAD_COUNT :
+ dispatch_hw_config(active_cpus);
+ if (slowpath(pool_size) && pool_size < thread_pool_size) {
+ thread_pool_size = pool_size;
+ }
+ qc->dgq_thread_pool_size = thread_pool_size;
+ if (qc->dgq_qos) {
+ (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr));
+ (void)dispatch_assume_zero(pthread_attr_setdetachstate(
+ &pqc->dpq_thread_attr, PTHREAD_CREATE_DETACHED));
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(
+ &pqc->dpq_thread_attr, qc->dgq_qos, 0));
+#endif
+ }
#if USE_MACH_SEM
// override the default FIFO behavior for the pool semaphores
kern_return_t kr = semaphore_create(mach_task_self(),
- &qc->dgq_thread_mediator->dsema_port, SYNC_POLICY_LIFO, 0);
+ &pqc->dpq_thread_mediator.dsema_port, SYNC_POLICY_LIFO, 0);
DISPATCH_VERIFY_MIG(kr);
(void)dispatch_assume_zero(kr);
- (void)dispatch_assume(qc->dgq_thread_mediator->dsema_port);
+ (void)dispatch_assume(pqc->dpq_thread_mediator.dsema_port);
#elif USE_POSIX_SEM
/* XXXRW: POSIX semaphores don't support LIFO? */
- int ret = sem_init(&qc->dgq_thread_mediator->dsema_sem, 0, 0);
+ int ret = sem_init(&pqc->dpq_thread_mediator.dsema_sem), 0, 0);
(void)dispatch_assume_zero(ret);
#endif
}
#endif // DISPATCH_USE_PTHREAD_POOL
+static dispatch_once_t _dispatch_root_queues_pred;
+
static void
_dispatch_root_queues_init(void *context DISPATCH_UNUSED)
{
}
#endif
_dispatch_root_queue_init_pthread_pool(
- &_dispatch_root_queue_contexts[i], overcommit);
+ &_dispatch_root_queue_contexts[i], 0, overcommit);
}
#else
DISPATCH_CRASH("Root queue initialization failed");
#endif // DISPATCH_ENABLE_THREAD_POOL
}
-
}
#define countof(x) (sizeof(x) / sizeof(x[0]))
void
libdispatch_init(void)
{
- dispatch_assert(DISPATCH_QUEUE_PRIORITY_COUNT == 4);
- dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 8);
+ dispatch_assert(DISPATCH_QUEUE_QOS_COUNT == 6);
+ dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 12);
dispatch_assert(DISPATCH_QUEUE_PRIORITY_LOW ==
-DISPATCH_QUEUE_PRIORITY_HIGH);
DISPATCH_ROOT_QUEUE_COUNT);
dispatch_assert(countof(_dispatch_root_queue_contexts) ==
DISPATCH_ROOT_QUEUE_COUNT);
-#if HAVE_PTHREAD_WORKQUEUES
+ dispatch_assert(countof(_dispatch_priority2qos) ==
+ DISPATCH_PRIORITY_COUNT);
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ dispatch_assert(countof(_dispatch_priority2wq) ==
+ DISPATCH_PRIORITY_COUNT);
+#endif
+#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
dispatch_assert(sizeof(_dispatch_wq2root_queues) /
sizeof(_dispatch_wq2root_queues[0][0]) ==
- DISPATCH_ROOT_QUEUE_COUNT);
+ WORKQ_NUM_PRIOQUEUE * 2);
#endif
#if DISPATCH_ENABLE_THREAD_POOL
- dispatch_assert(countof(_dispatch_thread_mediator) ==
+ dispatch_assert(countof(_dispatch_pthread_root_queue_contexts) ==
DISPATCH_ROOT_QUEUE_COUNT);
#endif
+ dispatch_assert(offsetof(struct dispatch_continuation_s, do_next) ==
+ offsetof(struct dispatch_object_s, do_next));
dispatch_assert(sizeof(struct dispatch_apply_s) <=
DISPATCH_CONTINUATION_SIZE);
dispatch_assert(sizeof(struct dispatch_queue_s) % DISPATCH_CACHELINE_SIZE
DISPATCH_CACHELINE_SIZE == 0);
_dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup);
-#if !DISPATCH_USE_OS_SEMAPHORE_CACHE
- _dispatch_thread_key_create(&dispatch_sema4_key,
- (void (*)(void *))_dispatch_thread_semaphore_dispose);
-#endif
+ _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup);
_dispatch_thread_key_create(&dispatch_cache_key, _dispatch_cache_cleanup);
_dispatch_thread_key_create(&dispatch_io_key, NULL);
_dispatch_thread_key_create(&dispatch_apply_key, NULL);
-#if DISPATCH_PERF_MON
+ _dispatch_thread_key_create(&dispatch_defaultpriority_key, NULL);
+#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION
_dispatch_thread_key_create(&dispatch_bcounter_key, NULL);
#endif
+#if !DISPATCH_USE_OS_SEMAPHORE_CACHE
+ _dispatch_thread_key_create(&dispatch_sema4_key,
+ (void (*)(void *))_dispatch_thread_semaphore_dispose);
+#endif
#if DISPATCH_USE_RESOLVERS // rdar://problem/8541707
_dispatch_main_q.do_targetq = &_dispatch_root_queues[
- DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY];
+ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT];
#endif
_dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_main_q);
_dispatch_hw_config_init();
_dispatch_vtable_init();
_os_object_init();
+ _voucher_init();
_dispatch_introspection_init();
}
+#if HAVE_MACH
+static dispatch_once_t _dispatch_mach_host_port_pred;
+static mach_port_t _dispatch_mach_host_port;
+
+static void
+_dispatch_mach_host_port_init(void *ctxt DISPATCH_UNUSED)
+{
+ kern_return_t kr;
+ mach_port_t mp, mhp = mach_host_self();
+ kr = host_get_host_port(mhp, &mp);
+ DISPATCH_VERIFY_MIG(kr);
+ if (!kr) {
+ // mach_host_self returned the HOST_PRIV port
+ kr = mach_port_deallocate(mach_task_self(), mhp);
+ DISPATCH_VERIFY_MIG(kr);
+ (void)dispatch_assume_zero(kr);
+ mhp = mp;
+ } else if (kr != KERN_INVALID_ARGUMENT) {
+ (void)dispatch_assume_zero(kr);
+ }
+ if (!dispatch_assume(mhp)) {
+ DISPATCH_CRASH("Could not get unprivileged host port");
+ }
+ _dispatch_mach_host_port = mhp;
+}
+
+mach_port_t
+_dispatch_get_mach_host_port(void)
+{
+ dispatch_once_f(&_dispatch_mach_host_port_pred, NULL,
+ _dispatch_mach_host_port_init);
+ return _dispatch_mach_host_port;
+}
+#endif
+
DISPATCH_EXPORT DISPATCH_NOTHROW
void
dispatch_atfork_child(void)
void *crash = (void *)0x100;
size_t i;
+#if HAVE_MACH
+ _dispatch_mach_host_port_pred = 0;
+ _dispatch_mach_host_port = MACH_VOUCHER_NULL;
+#endif
+ _voucher_atfork_child();
if (_dispatch_safe_fork) {
return;
}
}
}
+#pragma mark -
+#pragma mark dispatch_queue_attr_t
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_qos_class_valid(dispatch_qos_class_t qos_class, int relative_priority)
+{
+ qos_class_t qos = (qos_class_t)qos_class;
+ switch (qos) {
+ case _DISPATCH_QOS_CLASS_MAINTENANCE:
+ case _DISPATCH_QOS_CLASS_BACKGROUND:
+ case _DISPATCH_QOS_CLASS_UTILITY:
+ case _DISPATCH_QOS_CLASS_DEFAULT:
+ case _DISPATCH_QOS_CLASS_USER_INITIATED:
+ case _DISPATCH_QOS_CLASS_USER_INTERACTIVE:
+ case _DISPATCH_QOS_CLASS_UNSPECIFIED:
+ break;
+ default:
+ return false;
+ }
+ if (relative_priority > 0 || relative_priority < QOS_MIN_RELATIVE_PRIORITY){
+ return false;
+ }
+ return true;
+}
+
+#define DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(qos) \
+ [_DISPATCH_QOS_CLASS_##qos] = DQA_INDEX_QOS_CLASS_##qos
+
+static const
+_dispatch_queue_attr_index_qos_class_t _dispatch_queue_attr_qos2idx[] = {
+ DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(UNSPECIFIED),
+ DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(MAINTENANCE),
+ DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(BACKGROUND),
+ DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(UTILITY),
+ DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(DEFAULT),
+ DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(USER_INITIATED),
+ DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(USER_INTERACTIVE),
+};
+
+#define DISPATCH_QUEUE_ATTR_OVERCOMMIT2IDX(overcommit) \
+ (overcommit ? DQA_INDEX_OVERCOMMIT : DQA_INDEX_NON_OVERCOMMIT)
+
+#define DISPATCH_QUEUE_ATTR_CONCURRENT2IDX(concurrent) \
+ (concurrent ? DQA_INDEX_CONCURRENT : DQA_INDEX_SERIAL)
+
+#define DISPATCH_QUEUE_ATTR_PRIO2IDX(prio) (-(prio))
+
+#define DISPATCH_QUEUE_ATTR_QOS2IDX(qos) (_dispatch_queue_attr_qos2idx[(qos)])
+
+static inline dispatch_queue_attr_t
+_dispatch_get_queue_attr(qos_class_t qos, int prio, bool overcommit,
+ bool concurrent)
+{
+ return (dispatch_queue_attr_t)&_dispatch_queue_attrs
+ [DISPATCH_QUEUE_ATTR_QOS2IDX(qos)]
+ [DISPATCH_QUEUE_ATTR_PRIO2IDX(prio)]
+ [DISPATCH_QUEUE_ATTR_OVERCOMMIT2IDX(overcommit)]
+ [DISPATCH_QUEUE_ATTR_CONCURRENT2IDX(concurrent)];
+}
+
+dispatch_queue_attr_t
+dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t dqa,
+ dispatch_qos_class_t qos_class, int relative_priority)
+{
+ if (!_dispatch_qos_class_valid(qos_class, relative_priority)) return NULL;
+ if (!slowpath(dqa)) {
+ dqa = _dispatch_get_queue_attr(0, 0, false, false);
+ } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
+ DISPATCH_CLIENT_CRASH("Invalid queue attribute");
+ }
+ return _dispatch_get_queue_attr(qos_class, relative_priority,
+ dqa->dqa_overcommit, dqa->dqa_concurrent);
+}
+
+dispatch_queue_attr_t
+dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t dqa,
+ bool overcommit)
+{
+ if (!slowpath(dqa)) {
+ dqa = _dispatch_get_queue_attr(0, 0, false, false);
+ } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
+ DISPATCH_CLIENT_CRASH("Invalid queue attribute");
+ }
+ return _dispatch_get_queue_attr(dqa->dqa_qos_class,
+ dqa->dqa_relative_priority, overcommit, dqa->dqa_concurrent);
+}
+
#pragma mark -
#pragma mark dispatch_queue_t
// 1 - main_q
// 2 - mgr_q
// 3 - mgr_root_q
-// 4,5,6,7,8,9,10,11 - global queues
+// 4,5,6,7,8,9,10,11,12,13,14,15 - global queues
// we use 'xadd' on Intel, so the initial value == next assigned
-unsigned long volatile _dispatch_queue_serial_numbers = 12;
+unsigned long volatile _dispatch_queue_serial_numbers = 16;
dispatch_queue_t
-dispatch_queue_create_with_target(const char *label,
- dispatch_queue_attr_t attr, dispatch_queue_t tq)
+dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa,
+ dispatch_queue_t tq)
{
- dispatch_queue_t dq;
-
- dq = _dispatch_alloc(DISPATCH_VTABLE(queue),
+#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
+ // Be sure the root queue priorities are set
+ dispatch_once_f(&_dispatch_root_queues_pred, NULL,
+ _dispatch_root_queues_init);
+#endif
+ bool disallow_tq = (slowpath(dqa) && dqa != DISPATCH_QUEUE_CONCURRENT);
+ if (!slowpath(dqa)) {
+ dqa = _dispatch_get_queue_attr(0, 0, false, false);
+ } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
+ DISPATCH_CLIENT_CRASH("Invalid queue attribute");
+ }
+ dispatch_queue_t dq = _dispatch_alloc(DISPATCH_VTABLE(queue),
sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD);
-
_dispatch_queue_init(dq);
if (label) {
dq->dq_label = strdup(label);
}
+ qos_class_t qos = dqa->dqa_qos_class;
+ bool overcommit = dqa->dqa_overcommit;
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ dq->dq_priority = _pthread_qos_class_encode(qos, dqa->dqa_relative_priority,
+ overcommit);
+#endif
+ if (dqa->dqa_concurrent) {
+ dq->dq_width = DISPATCH_QUEUE_WIDTH_MAX;
+ } else {
+ // Default serial queue target queue is overcommit!
+ overcommit = true;
+ }
+ if (!tq) {
+ if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) {
+ qos = _DISPATCH_QOS_CLASS_DEFAULT;
+ }
+#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
+ if (qos == _DISPATCH_QOS_CLASS_USER_INTERACTIVE &&
+ !_dispatch_root_queues[
+ DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS].dq_priority) {
+ qos = _DISPATCH_QOS_CLASS_USER_INITIATED;
+ }
+#endif
+ bool maintenance_fallback = false;
+#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
+ maintenance_fallback = true;
+#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
+ if (maintenance_fallback) {
+ if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE &&
+ !_dispatch_root_queues[
+ DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS].dq_priority) {
+ qos = _DISPATCH_QOS_CLASS_BACKGROUND;
+ }
+ }
- if (attr == DISPATCH_QUEUE_CONCURRENT) {
- dq->dq_width = UINT32_MAX;
- if (!tq) {
- tq = _dispatch_get_root_queue(0, false);
+ tq = _dispatch_get_root_queue(qos, overcommit);
+ if (slowpath(!tq)) {
+ DISPATCH_CLIENT_CRASH("Invalid queue attribute");
}
} else {
- if (!tq) {
- // Default target queue is overcommit!
- tq = _dispatch_get_root_queue(0, true);
- }
- if (slowpath(attr)) {
- dispatch_debug_assert(!attr, "Invalid attribute");
+ _dispatch_retain(tq);
+ if (disallow_tq) {
+ // TODO: override target queue's qos/overcommit ?
+ DISPATCH_CLIENT_CRASH("Invalid combination of target queue & "
+ "queue attribute");
}
+ _dispatch_queue_priority_inherit_from_target(dq, tq);
}
+ _dispatch_queue_set_override_priority(dq);
dq->do_targetq = tq;
_dispatch_object_debug(dq, "%s", __func__);
return _dispatch_introspection_queue_create(dq);
return dq->dq_label ? dq->dq_label : "";
}
+qos_class_t
+dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relative_priority_ptr)
+{
+ qos_class_t qos = _DISPATCH_QOS_CLASS_UNSPECIFIED;
+ int relative_priority = 0;
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ pthread_priority_t dqp = dq->dq_priority;
+ if (dqp & _PTHREAD_PRIORITY_INHERIT_FLAG) dqp = 0;
+ qos = _pthread_qos_class_decode(dqp, &relative_priority, NULL);
+#else
+ (void)dq;
+#endif
+ if (relative_priority_ptr) *relative_priority_ptr = relative_priority;
+ return qos;
+}
+
static void
_dispatch_queue_set_width2(void *ctxt)
{
tmp = (unsigned int)w;
} else switch (w) {
case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS:
- tmp = _dispatch_hw_config.cc_max_physical;
+ tmp = dispatch_hw_config(physical_cpus);
break;
case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS:
- tmp = _dispatch_hw_config.cc_max_active;
+ tmp = dispatch_hw_config(active_cpus);
break;
default:
// fall through
case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS:
- tmp = _dispatch_hw_config.cc_max_logical;
+ tmp = dispatch_hw_config(logical_cpus);
break;
}
+ if (tmp > DISPATCH_QUEUE_WIDTH_MAX / 2) {
+ tmp = DISPATCH_QUEUE_WIDTH_MAX / 2;
+ }
// multiply by two since the running count is inc/dec by two
// (the low bit == barrier)
- dq->dq_width = tmp * 2;
+ dq->dq_width = (typeof(dq->dq_width))(tmp * 2);
_dispatch_object_debug(dq, "%s", __func__);
}
static void
_dispatch_set_target_queue2(void *ctxt)
{
- dispatch_queue_t prev_dq, dq = _dispatch_queue_get_current();
+ dispatch_queue_t prev_dq, dq = _dispatch_queue_get_current(), tq = ctxt;
+ mach_port_t th;
+ while (!dispatch_atomic_cmpxchgv2o(dq, dq_tqthread, MACH_PORT_NULL,
+ _dispatch_thread_port(), &th, acquire)) {
+ _dispatch_thread_switch(th, DISPATCH_YIELD_THREAD_SWITCH_OPTION,
+ DISPATCH_CONTENTION_USLEEP_START);
+ }
+ _dispatch_queue_priority_inherit_from_target(dq, tq);
prev_dq = dq->do_targetq;
- dq->do_targetq = ctxt;
+ dq->do_targetq = tq;
_dispatch_release(prev_dq);
_dispatch_object_debug(dq, "%s", __func__);
+ dispatch_atomic_store2o(dq, dq_tqthread, MACH_PORT_NULL, release);
}
void
if (slowpath(!dq)) {
bool is_concurrent_q = (type == _DISPATCH_QUEUE_TYPE &&
slowpath(dou._dq->dq_width > 1));
- dq = _dispatch_get_root_queue(0, !is_concurrent_q);
+ dq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
+ !is_concurrent_q);
}
// TODO: put into the vtable
switch(type) {
#pragma mark -
#pragma mark dispatch_pthread_root_queue
-struct dispatch_pthread_root_queue_context_s {
- pthread_attr_t dpq_thread_attr;
- dispatch_block_t dpq_thread_configure;
- struct dispatch_semaphore_s dpq_thread_mediator;
-};
-typedef struct dispatch_pthread_root_queue_context_s *
- dispatch_pthread_root_queue_context_t;
-
#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
static struct dispatch_pthread_root_queue_context_s
_dispatch_mgr_root_queue_pthread_context;
.do_ctxt = &_dispatch_mgr_root_queue_context,
.dq_label = "com.apple.root.libdispatch-manager",
.dq_running = 2,
- .dq_width = UINT32_MAX,
+ .dq_width = DISPATCH_QUEUE_WIDTH_MAX,
.dq_serialnum = 3,
};
static struct {
volatile int prio;
+ int default_prio;
int policy;
pthread_t tid;
} _dispatch_mgr_sched;
(void)dispatch_assume_zero(pthread_attr_getschedpolicy(attr,
&_dispatch_mgr_sched.policy));
(void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m));
- // high-priority workq threads are at priority 2 above default
- _dispatch_mgr_sched.prio = param.sched_priority + 2;
+ // legacy priority calls allowed when requesting above default priority
+ _dispatch_mgr_sched.default_prio = param.sched_priority;
+ _dispatch_mgr_sched.prio = _dispatch_mgr_sched.default_prio;
}
DISPATCH_NOINLINE
PTHREAD_CREATE_DETACHED));
#if !DISPATCH_DEBUG
(void)dispatch_assume_zero(pthread_attr_setstacksize(attr, 64 * 1024));
+#endif
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ if (_dispatch_set_qos_class_enabled) {
+ qos_class_t qos = qos_class_main();
+ (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr, qos, 0));
+ _dispatch_mgr_q.dq_priority = _pthread_qos_class_encode(qos, 0, 0);
+ _dispatch_queue_set_override_priority(&_dispatch_mgr_q);
+ }
#endif
param.sched_priority = _dispatch_mgr_sched.prio;
- (void)dispatch_assume_zero(pthread_attr_setschedparam(attr, ¶m));
+ if (param.sched_priority > _dispatch_mgr_sched.default_prio) {
+ (void)dispatch_assume_zero(pthread_attr_setschedparam(attr, ¶m));
+ }
return &_dispatch_mgr_sched.tid;
}
struct sched_param param;
do {
param.sched_priority = _dispatch_mgr_sched.prio;
- (void)dispatch_assume_zero(pthread_setschedparam(
- _dispatch_mgr_sched.tid, _dispatch_mgr_sched.policy, ¶m));
+ if (param.sched_priority > _dispatch_mgr_sched.default_prio) {
+ (void)dispatch_assume_zero(pthread_setschedparam(
+ _dispatch_mgr_sched.tid, _dispatch_mgr_sched.policy,
+ ¶m));
+ }
} while (_dispatch_mgr_sched.prio > param.sched_priority);
}
dispatch_root_queue_context_t qc;
dispatch_pthread_root_queue_context_t pqc;
size_t dqs;
+ uint8_t pool_size = flags & _DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE ?
+ (uint8_t)(flags & ~_DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE) : 0;
- if (slowpath(flags)) {
- return NULL;
- }
dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD;
dq = _dispatch_alloc(DISPATCH_VTABLE(queue_root), dqs +
sizeof(struct dispatch_root_queue_context_s) +
dq->do_ctxt = qc;
dq->do_targetq = NULL;
dq->dq_running = 2;
- dq->dq_width = UINT32_MAX;
+ dq->dq_width = DISPATCH_QUEUE_WIDTH_MAX;
pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore);
- qc->dgq_thread_mediator = &pqc->dpq_thread_mediator;
qc->dgq_ctxt = pqc;
#if HAVE_PTHREAD_WORKQUEUES
qc->dgq_kworkqueue = (void*)(~0ul);
#endif
- _dispatch_root_queue_init_pthread_pool(qc, true); // rdar://11352331
+ _dispatch_root_queue_init_pthread_pool(qc, pool_size, true);
if (attr) {
memcpy(&pqc->dpq_thread_attr, attr, sizeof(pthread_attr_t));
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ qos_class_t qos = 0;
+ if (!pthread_attr_get_qos_class_np(&pqc->dpq_thread_attr, &qos, NULL)
+ && qos > _DISPATCH_QOS_CLASS_DEFAULT) {
+ DISPATCH_CLIENT_CRASH("pthread root queues do not support "
+ "explicit QoS attributes");
+ }
+#endif
_dispatch_mgr_priority_raise(&pqc->dpq_thread_attr);
} else {
(void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr));
dispatch_root_queue_context_t qc = dq->do_ctxt;
dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt;
- _dispatch_semaphore_dispose(qc->dgq_thread_mediator);
+ pthread_attr_destroy(&pqc->dpq_thread_attr);
+ _dispatch_semaphore_dispose(&pqc->dpq_thread_mediator);
if (pqc->dpq_thread_configure) {
Block_release(pqc->dpq_thread_configure);
}
- dq->do_targetq = _dispatch_get_root_queue(0, false);
+ dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
+ false);
#endif
if (dq->dq_label) {
free((void*)dq->dq_label);
TAILQ_FOREACH_SAFE(dqs, &dqsq->dqsq_contexts, dqs_list, tmp) {
if (dqs->dqs_destructor) {
dispatch_async_f(_dispatch_get_root_queue(
- DISPATCH_QUEUE_PRIORITY_DEFAULT, false), dqs->dqs_ctxt,
+ _DISPATCH_QOS_CLASS_DEFAULT, false), dqs->dqs_ctxt,
dqs->dqs_destructor);
}
free(dqs);
sizeof(struct dispatch_queue_specific_queue_s));
_dispatch_queue_init((dispatch_queue_t)dqsq);
dqsq->do_xref_cnt = -1;
- dqsq->do_targetq = _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_HIGH,
- true);
- dqsq->dq_width = UINT32_MAX;
+ dqsq->do_targetq = _dispatch_get_root_queue(
+ _DISPATCH_QOS_CLASS_USER_INITIATED, true);
+ dqsq->dq_width = DISPATCH_QUEUE_WIDTH_MAX;
dqsq->dq_label = "queue-specific";
TAILQ_INIT(&dqsq->dqsq_contexts);
if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_specific_q, NULL,
// Destroy previous context for existing key
if (dqs->dqs_destructor) {
dispatch_async_f(_dispatch_get_root_queue(
- DISPATCH_QUEUE_PRIORITY_DEFAULT, false), dqs->dqs_ctxt,
+ _DISPATCH_QOS_CLASS_DEFAULT, false), dqs->dqs_ctxt,
dqs->dqs_destructor);
}
if (dqsn->dqs_ctxt) {
target->dq_label : "", target, dq->dq_width / 2,
dq->dq_running / 2, dq->dq_running & 1);
if (dq->dq_is_thread_bound) {
- offset += dsnprintf(buf, bufsiz, ", thread = %p ",
+ offset += dsnprintf(buf, bufsiz, ", thread = 0x%x ",
_dispatch_queue_get_bound_thread(dq));
}
return offset;
}
#endif
-#if DISPATCH_PERF_MON
+#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION
static OSSpinLock _dispatch_stats_lock;
static struct {
uint64_t time_total;
static void
_dispatch_queue_merge_stats(uint64_t start)
{
- uint64_t avg, delta = _dispatch_absolute_time() - start;
- unsigned long count, bucket;
+ uint64_t delta = _dispatch_absolute_time() - start;
+ unsigned long count;
- count = (size_t)_dispatch_thread_getspecific(dispatch_bcounter_key);
+ count = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key);
_dispatch_thread_setspecific(dispatch_bcounter_key, NULL);
- if (count) {
- avg = delta / count;
- bucket = flsll(avg);
- } else {
- bucket = 0;
- }
+ int bucket = flsl((long)count);
// 64-bit counters on 32-bit require a lock or a queue
OSSpinLockLock(&_dispatch_stats_lock);
dispatch_continuation_t next_dc;
dc = _dispatch_thread_getspecific(dispatch_cache_key);
int cnt;
- if (!dc || (cnt = dc->do_ref_cnt-_dispatch_continuation_cache_limit) <= 0) {
+ if (!dc || (cnt = dc->dc_cache_cnt -
+ _dispatch_continuation_cache_limit) <= 0){
return;
}
do {
(_dispatch_thread_semaphore_t)dc->dc_other);
_dispatch_introspection_queue_item_complete(dou);
} else {
- _dispatch_async_f_redirect(dq, dc);
+ _dispatch_async_f_redirect(dq, dc,
+ _dispatch_queue_get_override_priority(dq));
}
_dispatch_perfmon_workitem_inc();
}
-DISPATCH_ALWAYS_INLINE_NDEBUG
-static inline void
-_dispatch_continuation_pop(dispatch_object_t dou)
-{
- dispatch_continuation_t dc = dou._dc, dc1;
- dispatch_group_t dg;
+#pragma mark -
+#pragma mark dispatch_block_create
- _dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dou);
- if (DISPATCH_OBJ_IS_VTABLE(dou._do)) {
- return dx_invoke(dou._do);
- }
+#if __BLOCKS__
- // Add the item back to the cache before calling the function. This
- // allows the 'hot' continuation to be used for a quick callback.
- //
- // The ccache version is per-thread.
- // Therefore, the object has not been reused yet.
- // This generates better assembly.
- if ((long)dc->do_vtable & DISPATCH_OBJ_ASYNC_BIT) {
- dc1 = _dispatch_continuation_free_cacheonly(dc);
- } else {
- dc1 = NULL;
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_block_flags_valid(dispatch_block_flags_t flags)
+{
+ return ((flags & ~DISPATCH_BLOCK_API_MASK) == 0);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_block_flags_t
+_dispatch_block_normalize_flags(dispatch_block_flags_t flags)
+{
+ if (flags & (DISPATCH_BLOCK_NO_VOUCHER|DISPATCH_BLOCK_DETACHED)) {
+ flags |= DISPATCH_BLOCK_HAS_VOUCHER;
+ }
+ if (flags & (DISPATCH_BLOCK_NO_QOS_CLASS|DISPATCH_BLOCK_DETACHED)) {
+ flags |= DISPATCH_BLOCK_HAS_PRIORITY;
+ }
+ return flags;
+}
+
+static inline dispatch_block_t
+_dispatch_block_create_with_voucher_and_priority(dispatch_block_flags_t flags,
+ voucher_t voucher, pthread_priority_t pri, dispatch_block_t block)
+{
+ flags = _dispatch_block_normalize_flags(flags);
+ voucher_t cv = NULL;
+ bool assign = (flags & DISPATCH_BLOCK_ASSIGN_CURRENT);
+ if (assign && !(flags & DISPATCH_BLOCK_HAS_VOUCHER)) {
+ voucher = cv = voucher_copy();
+ flags |= DISPATCH_BLOCK_HAS_VOUCHER;
+ }
+ if (assign && !(flags & DISPATCH_BLOCK_HAS_PRIORITY)) {
+ pri = _dispatch_priority_propagate();
+ flags |= DISPATCH_BLOCK_HAS_PRIORITY;
+ }
+ dispatch_block_t db = _dispatch_block_create(flags, voucher, pri, block);
+ if (cv) _voucher_release(cv);
+#if DISPATCH_DEBUG
+ dispatch_assert(_dispatch_block_get_data(db));
+#endif
+ return db;
+}
+
+dispatch_block_t
+dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block)
+{
+ if (!_dispatch_block_flags_valid(flags)) return NULL;
+ return _dispatch_block_create_with_voucher_and_priority(flags, NULL, 0,
+ block);
+}
+
+dispatch_block_t
+dispatch_block_create_with_qos_class(dispatch_block_flags_t flags,
+ dispatch_qos_class_t qos_class, int relative_priority,
+ dispatch_block_t block)
+{
+ if (!_dispatch_block_flags_valid(flags)) return NULL;
+ if (!_dispatch_qos_class_valid(qos_class, relative_priority)) return NULL;
+ flags |= DISPATCH_BLOCK_HAS_PRIORITY;
+ pthread_priority_t pri = 0;
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ pri = _pthread_qos_class_encode(qos_class, relative_priority, 0);
+#endif
+ return _dispatch_block_create_with_voucher_and_priority(flags, NULL,
+ pri, block);
+}
+
+dispatch_block_t
+dispatch_block_create_with_voucher(dispatch_block_flags_t flags,
+ voucher_t voucher, dispatch_block_t block)
+{
+ if (!_dispatch_block_flags_valid(flags)) return NULL;
+ flags |= DISPATCH_BLOCK_HAS_VOUCHER;
+ return _dispatch_block_create_with_voucher_and_priority(flags, voucher, 0,
+ block);
+}
+
+dispatch_block_t
+dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags,
+ voucher_t voucher, dispatch_qos_class_t qos_class,
+ int relative_priority, dispatch_block_t block)
+{
+ if (!_dispatch_block_flags_valid(flags)) return NULL;
+ if (!_dispatch_qos_class_valid(qos_class, relative_priority)) return NULL;
+ flags |= (DISPATCH_BLOCK_HAS_VOUCHER|DISPATCH_BLOCK_HAS_PRIORITY);
+ pthread_priority_t pri = 0;
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ pri = _pthread_qos_class_encode(qos_class, relative_priority, 0);
+#endif
+ return _dispatch_block_create_with_voucher_and_priority(flags, voucher,
+ pri, block);
+}
+
+void
+dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block)
+{
+ if (!_dispatch_block_flags_valid(flags)) {
+ DISPATCH_CLIENT_CRASH("Invalid flags passed to "
+ "dispatch_block_perform()");
+ }
+ flags = _dispatch_block_normalize_flags(flags);
+ struct dispatch_block_private_data_s dbpds =
+ DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, NULL, 0, block);
+ dbpds.dbpd_atomic_flags |= DBF_PERFORM; // no group_leave at end of invoke
+ return _dispatch_block_invoke(&dbpds);
+}
+
+#define _dbpd_group(dbpd) ((dispatch_group_t)&(dbpd)->dbpd_group)
+
+void
+_dispatch_block_invoke(const struct dispatch_block_private_data_s *dbcpd)
+{
+ dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)dbcpd;
+ dispatch_block_flags_t flags = dbpd->dbpd_flags;
+ unsigned int atomic_flags = dbpd->dbpd_atomic_flags;
+ if (slowpath(atomic_flags & DBF_WAITED)) {
+ DISPATCH_CLIENT_CRASH("A block object may not be both run more "
+ "than once and waited for");
+ }
+ if (atomic_flags & DBF_CANCELED) goto out;
+
+ pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY;
+ unsigned long override = 0;
+ if (flags & DISPATCH_BLOCK_HAS_PRIORITY) {
+ op = _dispatch_get_priority();
+ p = dbpd->dbpd_priority;
+ override = (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) ||
+ !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS) ?
+ DISPATCH_PRIORITY_ENFORCE : 0;
+ }
+ voucher_t ov, v = DISPATCH_NO_VOUCHER;
+ if (flags & DISPATCH_BLOCK_HAS_VOUCHER) {
+ v = dbpd->dbpd_voucher;
+ if (v) _voucher_retain(v);
+ }
+ ov = _dispatch_adopt_priority_and_voucher(p, v, override);
+ dbpd->dbpd_thread = _dispatch_thread_port();
+ dbpd->dbpd_block();
+ _dispatch_set_priority_and_replace_voucher(op, ov);
+out:
+ if ((atomic_flags & DBF_PERFORM) == 0) {
+ if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) {
+ dispatch_group_leave(_dbpd_group(dbpd));
+ }
+ }
+}
+
+static void
+_dispatch_block_sync_invoke(void *block)
+{
+ dispatch_block_t b = block;
+ dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b);
+ dispatch_block_flags_t flags = dbpd->dbpd_flags;
+ unsigned int atomic_flags = dbpd->dbpd_atomic_flags;
+ if (slowpath(atomic_flags & DBF_WAITED)) {
+ DISPATCH_CLIENT_CRASH("A block object may not be both run more "
+ "than once and waited for");
+ }
+ if (atomic_flags & DBF_CANCELED) goto out;
+
+ pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY;
+ unsigned long override = 0;
+ if (flags & DISPATCH_BLOCK_HAS_PRIORITY) {
+ op = _dispatch_get_priority();
+ p = dbpd->dbpd_priority;
+ override = (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) ||
+ !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS) ?
+ DISPATCH_PRIORITY_ENFORCE : 0;
+ }
+ voucher_t ov, v = DISPATCH_NO_VOUCHER;
+ if (flags & DISPATCH_BLOCK_HAS_VOUCHER) {
+ v = dbpd->dbpd_voucher;
+ if (v) _voucher_retain(v);
+ }
+ ov = _dispatch_adopt_priority_and_voucher(p, v, override);
+ dbpd->dbpd_block();
+ _dispatch_set_priority_and_replace_voucher(op, ov);
+out:
+ if ((atomic_flags & DBF_PERFORM) == 0) {
+ if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) {
+ dispatch_group_leave(_dbpd_group(dbpd));
+ }
+ }
+
+ dispatch_queue_t dq = _dispatch_queue_get_current();
+ if (dispatch_atomic_cmpxchg2o(dbpd, dbpd_queue, dq, NULL, acquire)) {
+ // balances dispatch_{,barrier_,}sync
+ _dispatch_release(dq);
}
- if ((long)dc->do_vtable & DISPATCH_OBJ_GROUP_BIT) {
- dg = dc->dc_data;
+}
+
+static void
+_dispatch_block_async_invoke_and_release(void *block)
+{
+ dispatch_block_t b = block;
+ dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b);
+ dispatch_block_flags_t flags = dbpd->dbpd_flags;
+ unsigned int atomic_flags = dbpd->dbpd_atomic_flags;
+ if (slowpath(atomic_flags & DBF_WAITED)) {
+ DISPATCH_CLIENT_CRASH("A block object may not be both run more "
+ "than once and waited for");
+ }
+ if (atomic_flags & DBF_CANCELED) goto out;
+
+ pthread_priority_t p = DISPATCH_NO_PRIORITY;
+ unsigned long override = 0;
+ if (flags & DISPATCH_BLOCK_HAS_PRIORITY) {
+ override = (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) ?
+ DISPATCH_PRIORITY_ENFORCE : 0;
+ p = dbpd->dbpd_priority;
+ }
+ voucher_t v = DISPATCH_NO_VOUCHER;
+ if (flags & DISPATCH_BLOCK_HAS_VOUCHER) {
+ v = dbpd->dbpd_voucher;
+ if (v) _voucher_retain(v);
+ }
+ _dispatch_adopt_priority_and_replace_voucher(p, v, override);
+ dbpd->dbpd_block();
+out:
+ if ((atomic_flags & DBF_PERFORM) == 0) {
+ if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) {
+ dispatch_group_leave(_dbpd_group(dbpd));
+ }
+ }
+ dispatch_queue_t dq = _dispatch_queue_get_current();
+ if (dispatch_atomic_cmpxchg2o(dbpd, dbpd_queue, dq, NULL, acquire)) {
+ // balances dispatch_{,barrier_,group_}async
+ _dispatch_release(dq);
+ }
+ Block_release(b);
+}
+
+void
+dispatch_block_cancel(dispatch_block_t db)
+{
+ dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
+ if (!dbpd) {
+ DISPATCH_CLIENT_CRASH("Invalid block object passed to "
+ "dispatch_block_cancel()");
+ }
+ (void)dispatch_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_CANCELED, relaxed);
+}
+
+long
+dispatch_block_testcancel(dispatch_block_t db)
+{
+ dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
+ if (!dbpd) {
+ DISPATCH_CLIENT_CRASH("Invalid block object passed to "
+ "dispatch_block_testcancel()");
+ }
+ return (bool)(dbpd->dbpd_atomic_flags & DBF_CANCELED);
+}
+
+long
+dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout)
+{
+ dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
+ if (!dbpd) {
+ DISPATCH_CLIENT_CRASH("Invalid block object passed to "
+ "dispatch_block_wait()");
+ }
+
+ unsigned int flags = dispatch_atomic_or_orig2o(dbpd, dbpd_atomic_flags,
+ DBF_WAITING, relaxed);
+ if (slowpath(flags & (DBF_WAITED | DBF_WAITING))) {
+ DISPATCH_CLIENT_CRASH("A block object may not be waited for "
+ "more than once");
+ }
+
+ // <rdar://problem/17703192> If we know the queue where this block is
+ // enqueued, or the thread that's executing it, then we should boost
+ // it here.
+
+ pthread_priority_t pp = _dispatch_get_priority();
+
+ dispatch_queue_t boost_dq;
+ boost_dq = dispatch_atomic_xchg2o(dbpd, dbpd_queue, NULL, acquire);
+ if (boost_dq) {
+ // release balances dispatch_{,barrier_,group_}async.
+ // Can't put the queue back in the timeout case: the block might
+ // finish after we fell out of group_wait and see our NULL, so
+ // neither of us would ever release. Side effect: After a _wait
+ // that times out, subsequent waits will not boost the qos of the
+ // still-running block.
+ _dispatch_queue_wakeup_with_qos_and_release(boost_dq, pp);
+ }
+
+ mach_port_t boost_th = dbpd->dbpd_thread;
+ if (boost_th) {
+ _dispatch_thread_override_start(boost_th, pp);
+ }
+
+ int performed = dispatch_atomic_load2o(dbpd, dbpd_performed, relaxed);
+ if (slowpath(performed > 1 || (boost_th && boost_dq))) {
+ DISPATCH_CLIENT_CRASH("A block object may not be both run more "
+ "than once and waited for");
+ }
+
+ long ret = dispatch_group_wait(_dbpd_group(dbpd), timeout);
+
+ if (boost_th) {
+ _dispatch_thread_override_end(boost_th);
+ }
+
+ if (ret) {
+ // timed out: reverse our changes
+ (void)dispatch_atomic_and2o(dbpd, dbpd_atomic_flags,
+ ~DBF_WAITING, relaxed);
} else {
- dg = NULL;
+ (void)dispatch_atomic_or2o(dbpd, dbpd_atomic_flags,
+ DBF_WAITED, relaxed);
+ // don't need to re-test here: the second call would see
+ // the first call's WAITING
}
- _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
- if (dg) {
- dispatch_group_leave(dg);
- _dispatch_release(dg);
+
+ return ret;
+}
+
+void
+dispatch_block_notify(dispatch_block_t db, dispatch_queue_t queue,
+ dispatch_block_t notification_block)
+{
+ dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db);
+ if (!dbpd) {
+ DISPATCH_CLIENT_CRASH("Invalid block object passed to "
+ "dispatch_block_notify()");
}
- _dispatch_introspection_queue_item_complete(dou);
- if (slowpath(dc1)) {
- _dispatch_continuation_free_to_cache_limit(dc1);
+ int performed = dispatch_atomic_load2o(dbpd, dbpd_performed, relaxed);
+ if (slowpath(performed > 1)) {
+ DISPATCH_CLIENT_CRASH("A block object may not be both run more "
+ "than once and observed");
}
+
+ return dispatch_group_notify(_dbpd_group(dbpd), queue, notification_block);
}
+#endif // __BLOCKS__
+
#pragma mark -
#pragma mark dispatch_barrier_async
DISPATCH_NOINLINE
static void
_dispatch_barrier_async_f_slow(dispatch_queue_t dq, void *ctxt,
- dispatch_function_t func)
+ dispatch_function_t func, pthread_priority_t pp,
+ dispatch_block_flags_t flags)
{
dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap();
dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT);
dc->dc_func = func;
dc->dc_ctxt = ctxt;
+ _dispatch_continuation_voucher_set(dc, flags);
+ _dispatch_continuation_priority_set(dc, pp, flags);
+
+ pp = _dispatch_continuation_get_override_priority(dq, dc);
- _dispatch_queue_push(dq, dc);
+ _dispatch_queue_push(dq, dc, pp);
}
-DISPATCH_NOINLINE
-void
-dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt,
- dispatch_function_t func)
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_barrier_async_f2(dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func, pthread_priority_t pp,
+ dispatch_block_flags_t flags)
{
dispatch_continuation_t dc;
dc = fastpath(_dispatch_continuation_alloc_cacheonly());
if (!dc) {
- return _dispatch_barrier_async_f_slow(dq, ctxt, func);
+ return _dispatch_barrier_async_f_slow(dq, ctxt, func, pp, flags);
}
dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT);
dc->dc_func = func;
dc->dc_ctxt = ctxt;
+ _dispatch_continuation_voucher_set(dc, flags);
+ _dispatch_continuation_priority_set(dc, pp, flags);
+
+ pp = _dispatch_continuation_get_override_priority(dq, dc);
- _dispatch_queue_push(dq, dc);
+ _dispatch_queue_push(dq, dc, pp);
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func, pthread_priority_t pp,
+ dispatch_block_flags_t flags)
+{
+ return _dispatch_barrier_async_f2(dq, ctxt, func, pp, flags);
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func)
+{
+ return _dispatch_barrier_async_f2(dq, ctxt, func, 0, 0);
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func)
+{
+ return _dispatch_barrier_async_f2(dq, ctxt, func, 0,
+ DISPATCH_BLOCK_NO_QOS_CLASS|DISPATCH_BLOCK_NO_VOUCHER);
}
#ifdef __BLOCKS__
void
dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void))
{
- dispatch_barrier_async_f(dq, _dispatch_Block_copy(work),
- _dispatch_call_block_and_release);
+ dispatch_function_t func = _dispatch_call_block_and_release;
+ pthread_priority_t pp = 0;
+ dispatch_block_flags_t flags = 0;
+ if (slowpath(_dispatch_block_has_private_data(work))) {
+ func = _dispatch_block_async_invoke_and_release;
+ pp = _dispatch_block_get_priority(work);
+ flags = _dispatch_block_get_flags(work);
+ // balanced in d_block_async_invoke_and_release or d_block_wait
+ if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work),
+ dbpd_queue, NULL, dq, release)) {
+ _dispatch_retain(dq);
+ }
+ }
+ _dispatch_barrier_async_f(dq, _dispatch_Block_copy(work), func, pp, flags);
}
#endif
old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
_dispatch_thread_setspecific(dispatch_queue_key, dq);
+ pthread_priority_t old_dp = _dispatch_set_defaultpriority(dq->dq_priority);
_dispatch_continuation_pop(other_dc);
+ _dispatch_reset_defaultpriority(old_dp);
_dispatch_thread_setspecific(dispatch_queue_key, old_dq);
rq = dq->do_targetq;
while (slowpath(rq->do_targetq) && rq != old_dq) {
if (dispatch_atomic_sub2o(rq, dq_running, 2, relaxed) == 0) {
- _dispatch_wakeup(rq);
+ _dispatch_queue_wakeup(rq);
}
rq = rq->do_targetq;
}
if (dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0) {
- _dispatch_wakeup(dq);
+ _dispatch_queue_wakeup(dq);
}
_dispatch_release(dq);
}
static inline void
-_dispatch_async_f_redirect2(dispatch_queue_t dq, dispatch_continuation_t dc)
+_dispatch_async_f_redirect2(dispatch_queue_t dq, dispatch_continuation_t dc,
+ pthread_priority_t pp)
{
uint32_t running = 2;
dq = dq->do_targetq;
} while (slowpath(dq->do_targetq));
- _dispatch_queue_push_wakeup(dq, dc, running == 0);
+ _dispatch_queue_push_wakeup(dq, dc, pp, running == 0);
}
DISPATCH_NOINLINE
static void
_dispatch_async_f_redirect(dispatch_queue_t dq,
- dispatch_continuation_t other_dc)
+ dispatch_continuation_t other_dc, pthread_priority_t pp)
{
dispatch_continuation_t dc = _dispatch_continuation_alloc();
dc->dc_ctxt = dc;
dc->dc_data = dq;
dc->dc_other = other_dc;
+ dc->dc_priority = 0;
+ dc->dc_voucher = NULL;
_dispatch_retain(dq);
dq = dq->do_targetq;
if (slowpath(dq->do_targetq)) {
- return _dispatch_async_f_redirect2(dq, dc);
+ return _dispatch_async_f_redirect2(dq, dc, pp);
}
- _dispatch_queue_push(dq, dc);
+ _dispatch_queue_push(dq, dc, pp);
}
DISPATCH_NOINLINE
static void
-_dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc)
+_dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc,
+ pthread_priority_t pp)
{
uint32_t running = 2;
break;
}
if (!slowpath(running & 1)) {
- return _dispatch_async_f_redirect(dq, dc);
+ return _dispatch_async_f_redirect(dq, dc, pp);
}
running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed);
// We might get lucky and find that the barrier has ended by now
} while (!(running & 1));
- _dispatch_queue_push_wakeup(dq, dc, running == 0);
+ _dispatch_queue_push_wakeup(dq, dc, pp, running == 0);
}
DISPATCH_NOINLINE
static void
_dispatch_async_f_slow(dispatch_queue_t dq, void *ctxt,
- dispatch_function_t func)
+ dispatch_function_t func, pthread_priority_t pp,
+ dispatch_block_flags_t flags)
{
dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap();
dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;
dc->dc_func = func;
dc->dc_ctxt = ctxt;
+ _dispatch_continuation_voucher_set(dc, flags);
+ _dispatch_continuation_priority_set(dc, pp, flags);
+
+ pp = _dispatch_continuation_get_override_priority(dq, dc);
// No fastpath/slowpath hint because we simply don't know
if (dq->do_targetq) {
- return _dispatch_async_f2(dq, dc);
+ return _dispatch_async_f2(dq, dc, pp);
}
- _dispatch_queue_push(dq, dc);
+ _dispatch_queue_push(dq, dc, pp);
}
-DISPATCH_NOINLINE
-void
-dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
+ pthread_priority_t pp, dispatch_block_flags_t flags)
{
dispatch_continuation_t dc;
// No fastpath/slowpath hint because we simply don't know
- if (dq->dq_width == 1) {
- return dispatch_barrier_async_f(dq, ctxt, func);
+ if (dq->dq_width == 1 || flags & DISPATCH_BLOCK_BARRIER) {
+ return _dispatch_barrier_async_f(dq, ctxt, func, pp, flags);
}
dc = fastpath(_dispatch_continuation_alloc_cacheonly());
if (!dc) {
- return _dispatch_async_f_slow(dq, ctxt, func);
+ return _dispatch_async_f_slow(dq, ctxt, func, pp, flags);
}
dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;
dc->dc_func = func;
dc->dc_ctxt = ctxt;
+ _dispatch_continuation_voucher_set(dc, flags);
+ _dispatch_continuation_priority_set(dc, pp, flags);
+
+ pp = _dispatch_continuation_get_override_priority(dq, dc);
// No fastpath/slowpath hint because we simply don't know
if (dq->do_targetq) {
- return _dispatch_async_f2(dq, dc);
+ return _dispatch_async_f2(dq, dc, pp);
}
- _dispatch_queue_push(dq, dc);
+ _dispatch_queue_push(dq, dc, pp);
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
+{
+ return _dispatch_async_f(dq, ctxt, func, 0, 0);
}
#ifdef __BLOCKS__
void
dispatch_async(dispatch_queue_t dq, void (^work)(void))
{
- dispatch_async_f(dq, _dispatch_Block_copy(work),
- _dispatch_call_block_and_release);
+ dispatch_function_t func = _dispatch_call_block_and_release;
+ dispatch_block_flags_t flags = 0;
+ pthread_priority_t pp = 0;
+ if (slowpath(_dispatch_block_has_private_data(work))) {
+ func = _dispatch_block_async_invoke_and_release;
+ pp = _dispatch_block_get_priority(work);
+ flags = _dispatch_block_get_flags(work);
+ // balanced in d_block_async_invoke_and_release or d_block_wait
+ if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work),
+ dbpd_queue, NULL, dq, release)) {
+ _dispatch_retain(dq);
+ }
+ }
+ _dispatch_async_f(dq, _dispatch_Block_copy(work), func, pp, flags);
}
#endif
#pragma mark -
#pragma mark dispatch_group_async
-DISPATCH_NOINLINE
-void
-dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt,
- dispatch_function_t func)
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func, pthread_priority_t pp,
+ dispatch_block_flags_t flags)
{
dispatch_continuation_t dc;
dc = _dispatch_continuation_alloc();
- dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_GROUP_BIT);
+ unsigned long barrier = (flags & DISPATCH_BLOCK_BARRIER) ?
+ DISPATCH_OBJ_BARRIER_BIT : 0;
+ dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_GROUP_BIT |
+ barrier);
dc->dc_func = func;
dc->dc_ctxt = ctxt;
dc->dc_data = dg;
+ _dispatch_continuation_voucher_set(dc, flags);
+ _dispatch_continuation_priority_set(dc, pp, flags);
+
+ pp = _dispatch_continuation_get_override_priority(dq, dc);
// No fastpath/slowpath hint because we simply don't know
- if (dq->dq_width != 1 && dq->do_targetq) {
- return _dispatch_async_f2(dq, dc);
+ if (dq->dq_width != 1 && !barrier && dq->do_targetq) {
+ return _dispatch_async_f2(dq, dc, pp);
}
- _dispatch_queue_push(dq, dc);
+ _dispatch_queue_push(dq, dc, pp);
+}
+
+DISPATCH_NOINLINE
+void
+dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func)
+{
+ return _dispatch_group_async_f(dg, dq, ctxt, func, 0, 0);
}
#ifdef __BLOCKS__
dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
dispatch_block_t db)
{
- dispatch_group_async_f(dg, dq, _dispatch_Block_copy(db),
- _dispatch_call_block_and_release);
+ dispatch_function_t func = _dispatch_call_block_and_release;
+ dispatch_block_flags_t flags = 0;
+ pthread_priority_t pp = 0;
+ if (slowpath(_dispatch_block_has_private_data(db))) {
+ func = _dispatch_block_async_invoke_and_release;
+ pp = _dispatch_block_get_priority(db);
+ flags = _dispatch_block_get_flags(db);
+ // balanced in d_block_async_invoke_and_release or d_block_wait
+ if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(db),
+ dbpd_queue, NULL, dq, release)) {
+ _dispatch_retain(dq);
+ }
+ }
+ _dispatch_group_async_f(dg, dq, _dispatch_Block_copy(db), func, pp, flags);
}
#endif
#pragma mark -
#pragma mark dispatch_function_invoke
+static void _dispatch_sync_f(dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func, pthread_priority_t pp);
+
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_function_invoke(dispatch_queue_t dq, void *ctxt,
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_function_recurse(dispatch_queue_t dq, void *ctxt,
- dispatch_function_t func)
+ dispatch_function_t func, pthread_priority_t pp)
{
struct dispatch_continuation_s dc = {
.dc_data = dq,
.dc_func = func,
.dc_ctxt = ctxt,
};
- dispatch_sync_f(dq->do_targetq, &dc, _dispatch_sync_recurse_invoke);
+ _dispatch_sync_f(dq->do_targetq, &dc, _dispatch_sync_recurse_invoke, pp);
}
#pragma mark -
{
_dispatch_thread_semaphore_t sema;
dispatch_continuation_t dc = dou._dc;
+ mach_port_t th;
if (DISPATCH_OBJ_IS_VTABLE(dc) || ((long)dc->do_vtable &
(DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) !=
_dispatch_trace_continuation_pop(dq, dc);
_dispatch_perfmon_workitem_inc();
+ th = (mach_port_t)dc->dc_data;
dc = dc->dc_ctxt;
dq = dc->dc_data;
sema = (_dispatch_thread_semaphore_t)dc->dc_other;
(void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed);
}
_dispatch_introspection_queue_item_complete(dou);
+ _dispatch_wqthread_override_start(th,
+ _dispatch_queue_get_override_priority(dq));
return sema ? sema : MACH_PORT_DEAD;
}
#if DISPATCH_COCOA_COMPAT
if (slowpath(dq->dq_is_thread_bound)) {
// The queue is bound to a non-dispatch thread (e.g. main thread)
+ _dispatch_continuation_voucher_adopt(dc);
_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
dispatch_atomic_store2o(dc, dc_func, NULL, release);
_dispatch_thread_semaphore_signal(sema); // release
DISPATCH_NOINLINE
static void
_dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt,
- dispatch_function_t func)
+ dispatch_function_t func, pthread_priority_t pp)
{
if (slowpath(!dq->do_targetq)) {
// the global concurrent queues do not need strict ordering
(void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed);
return _dispatch_sync_f_invoke(dq, ctxt, func);
}
- // It's preferred to execute synchronous blocks on the current thread
- // due to thread-local side effects, garbage collection, etc. However,
- // blocks submitted to the main thread MUST be run on the main thread
-
+ if (!pp) pp = (_dispatch_get_priority() | _PTHREAD_PRIORITY_ENFORCE_FLAG);
_dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore();
struct dispatch_continuation_s dc = {
.dc_data = dq,
#endif
.dc_other = (void*)sema,
};
+#if DISPATCH_COCOA_COMPAT
+ // It's preferred to execute synchronous blocks on the current thread
+ // due to thread-local side effects, garbage collection, etc. However,
+ // blocks submitted to the main thread MUST be run on the main thread
+ if (slowpath(dq->dq_is_thread_bound)) {
+ _dispatch_continuation_voucher_set(&dc, 0);
+ }
+#endif
struct dispatch_continuation_s dbss = {
.do_vtable = (void *)(DISPATCH_OBJ_BARRIER_BIT |
DISPATCH_OBJ_SYNC_SLOW_BIT),
.dc_func = _dispatch_barrier_sync_f_slow_invoke,
.dc_ctxt = &dc,
-#if DISPATCH_INTROSPECTION
- .dc_data = (void*)_dispatch_thread_self(),
-#endif
+ .dc_data = (void*)(uintptr_t)_dispatch_thread_port(),
+ .dc_priority = pp,
};
- _dispatch_queue_push(dq, &dbss);
+ _dispatch_queue_push(dq, &dbss,
+ _dispatch_continuation_get_override_priority(dq, &dbss));
_dispatch_thread_semaphore_wait(sema); // acquire
_dispatch_put_thread_semaphore(sema);
return;
}
#endif
+
+ _dispatch_queue_set_thread(dq);
if (slowpath(dq->do_targetq->do_targetq)) {
- _dispatch_function_recurse(dq, ctxt, func);
+ _dispatch_function_recurse(dq, ctxt, func, pp);
} else {
_dispatch_function_invoke(dq, ctxt, func);
}
+ _dispatch_queue_clear_thread(dq);
+
if (fastpath(dq->do_suspend_cnt < 2 * DISPATCH_OBJECT_SUSPEND_INTERVAL) &&
dq->dq_running == 2) {
// rdar://problem/8290662 "lock transfer"
}
}
(void)dispatch_atomic_sub2o(dq, do_suspend_cnt,
- DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed);
- if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, release) == 0)) {
- _dispatch_wakeup(dq);
+ DISPATCH_OBJECT_SUSPEND_INTERVAL, release);
+ if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) {
+ _dispatch_queue_wakeup(dq);
}
}
}
}
if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) {
- _dispatch_wakeup(dq);
+ _dispatch_queue_wakeup(dq);
}
}
_dispatch_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func)
{
+ _dispatch_queue_set_thread(dq);
_dispatch_function_invoke(dq, ctxt, func);
+ _dispatch_queue_clear_thread(dq);
if (slowpath(dq->dq_items_tail)) {
return _dispatch_barrier_sync_f2(dq);
}
if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) {
- _dispatch_wakeup(dq);
+ _dispatch_queue_wakeup(dq);
}
}
DISPATCH_NOINLINE
static void
_dispatch_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt,
- dispatch_function_t func)
+ dispatch_function_t func, pthread_priority_t pp)
{
- _dispatch_function_recurse(dq, ctxt, func);
+ _dispatch_queue_set_thread(dq);
+ _dispatch_function_recurse(dq, ctxt, func, pp);
+ _dispatch_queue_clear_thread(dq);
if (slowpath(dq->dq_items_tail)) {
return _dispatch_barrier_sync_f2(dq);
}
if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) {
- _dispatch_wakeup(dq);
+ _dispatch_queue_wakeup(dq);
+ }
+}
+
+DISPATCH_NOINLINE
+static void
+_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func, pthread_priority_t pp)
+{
+ // 1) ensure that this thread hasn't enqueued anything ahead of this call
+ // 2) the queue is not suspended
+ if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){
+ return _dispatch_barrier_sync_f_slow(dq, ctxt, func, pp);
+ }
+ if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))) {
+ // global concurrent queues and queues bound to non-dispatch threads
+ // always fall into the slow case
+ return _dispatch_barrier_sync_f_slow(dq, ctxt, func, pp);
}
+ if (slowpath(dq->do_targetq->do_targetq)) {
+ return _dispatch_barrier_sync_f_recurse(dq, ctxt, func, pp);
+ }
+ _dispatch_barrier_sync_f_invoke(dq, ctxt, func);
}
DISPATCH_NOINLINE
// 1) ensure that this thread hasn't enqueued anything ahead of this call
// 2) the queue is not suspended
if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){
- return _dispatch_barrier_sync_f_slow(dq, ctxt, func);
+ return _dispatch_barrier_sync_f_slow(dq, ctxt, func, 0);
}
if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))) {
// global concurrent queues and queues bound to non-dispatch threads
// always fall into the slow case
- return _dispatch_barrier_sync_f_slow(dq, ctxt, func);
+ return _dispatch_barrier_sync_f_slow(dq, ctxt, func, 0);
}
if (slowpath(dq->do_targetq->do_targetq)) {
- return _dispatch_barrier_sync_f_recurse(dq, ctxt, func);
+ return _dispatch_barrier_sync_f_recurse(dq, ctxt, func, 0);
}
_dispatch_barrier_sync_f_invoke(dq, ctxt, func);
}
#ifdef __BLOCKS__
-#if DISPATCH_COCOA_COMPAT
DISPATCH_NOINLINE
static void
_dispatch_barrier_sync_slow(dispatch_queue_t dq, void (^work)(void))
{
- // Blocks submitted to the main queue MUST be run on the main thread,
- // therefore under GC we must Block_copy in order to notify the thread-local
- // garbage collector that the objects are transferring to the main thread
- // rdar://problem/7176237&7181849&7458685
- if (dispatch_begin_thread_4GC) {
- dispatch_block_t block = _dispatch_Block_copy(work);
- return dispatch_barrier_sync_f(dq, block,
- _dispatch_call_block_and_release);
+ bool has_pd = _dispatch_block_has_private_data(work);
+ dispatch_function_t func = _dispatch_Block_invoke(work);
+ pthread_priority_t pp = 0;
+ if (has_pd) {
+ func = _dispatch_block_sync_invoke;
+ pp = _dispatch_block_get_priority(work);
+ dispatch_block_flags_t flags = _dispatch_block_get_flags(work);
+ if (flags & DISPATCH_BLOCK_HAS_PRIORITY) {
+ pthread_priority_t tp = _dispatch_get_priority();
+ if (pp < tp) {
+ pp = tp | _PTHREAD_PRIORITY_ENFORCE_FLAG;
+ } else if (!(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS)) {
+ pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG;
+ }
+ }
+ // balanced in d_block_sync_invoke or d_block_wait
+ if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work),
+ dbpd_queue, NULL, dq, release)) {
+ _dispatch_retain(dq);
+ }
+#if DISPATCH_COCOA_COMPAT
+ } else if (dq->dq_is_thread_bound && dispatch_begin_thread_4GC) {
+ // Blocks submitted to the main queue MUST be run on the main thread,
+ // under GC we must Block_copy in order to notify the thread-local
+ // garbage collector that the objects are transferring to another thread
+ // rdar://problem/7176237&7181849&7458685
+ work = _dispatch_Block_copy(work);
+ func = _dispatch_call_block_and_release;
}
- dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work));
-}
#endif
+ _dispatch_barrier_sync_f(dq, work, func, pp);
+}
void
dispatch_barrier_sync(dispatch_queue_t dq, void (^work)(void))
{
-#if DISPATCH_COCOA_COMPAT
- if (slowpath(dq->dq_is_thread_bound)) {
+ if (slowpath(dq->dq_is_thread_bound) ||
+ slowpath(_dispatch_block_has_private_data(work))) {
return _dispatch_barrier_sync_slow(dq, work);
}
-#endif
dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work));
}
#endif
_dispatch_barrier_trysync_f_invoke(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func)
{
+ _dispatch_queue_set_thread(dq);
_dispatch_function_invoke(dq, ctxt, func);
+ _dispatch_queue_clear_thread(dq);
if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) {
- _dispatch_wakeup(dq);
+ _dispatch_queue_wakeup(dq);
}
}
if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))
|| slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1,
acquire))) {
- return dispatch_barrier_async_f(dq, ctxt, func);
+ return _dispatch_barrier_async_detached_f(dq, ctxt, func);
}
_dispatch_barrier_trysync_f_invoke(dq, ctxt, func);
}
DISPATCH_NOINLINE
static void
_dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
- bool wakeup)
+ pthread_priority_t pp, bool wakeup)
{
+ if (!pp) pp = (_dispatch_get_priority() | _PTHREAD_PRIORITY_ENFORCE_FLAG);
_dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore();
- struct dispatch_continuation_s dss = {
+ struct dispatch_continuation_s dc = {
.do_vtable = (void*)DISPATCH_OBJ_SYNC_SLOW_BIT,
#if DISPATCH_INTROSPECTION
.dc_func = func,
.dc_ctxt = ctxt,
- .dc_data = (void*)_dispatch_thread_self(),
+ .dc_data = (void*)(uintptr_t)_dispatch_thread_port(),
#endif
.dc_other = (void*)sema,
+ .dc_priority = pp,
};
- _dispatch_queue_push_wakeup(dq, &dss, wakeup);
+ _dispatch_queue_push_wakeup(dq, &dc,
+ _dispatch_continuation_get_override_priority(dq, &dc), wakeup);
_dispatch_thread_semaphore_wait(sema);
_dispatch_put_thread_semaphore(sema);
if (slowpath(dq->do_targetq->do_targetq)) {
- _dispatch_function_recurse(dq, ctxt, func);
+ _dispatch_function_recurse(dq, ctxt, func, pp);
} else {
_dispatch_function_invoke(dq, ctxt, func);
}
+
if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) {
- _dispatch_wakeup(dq);
+ _dispatch_queue_wakeup(dq);
}
}
{
_dispatch_function_invoke(dq, ctxt, func);
if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) {
- _dispatch_wakeup(dq);
+ _dispatch_queue_wakeup(dq);
}
}
DISPATCH_NOINLINE
static void
_dispatch_sync_f_recurse(dispatch_queue_t dq, void *ctxt,
- dispatch_function_t func)
+ dispatch_function_t func, pthread_priority_t pp)
{
- _dispatch_function_recurse(dq, ctxt, func);
+ _dispatch_function_recurse(dq, ctxt, func, pp);
if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) {
- _dispatch_wakeup(dq);
+ _dispatch_queue_wakeup(dq);
}
}
static inline void
-_dispatch_sync_f2(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
+_dispatch_sync_f2(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
+ pthread_priority_t pp)
{
// 1) ensure that this thread hasn't enqueued anything ahead of this call
// 2) the queue is not suspended
if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){
- return _dispatch_sync_f_slow(dq, ctxt, func, false);
+ return _dispatch_sync_f_slow(dq, ctxt, func, pp, false);
}
uint32_t running = dispatch_atomic_add2o(dq, dq_running, 2, relaxed);
// re-check suspension after barrier check <rdar://problem/15242126>
- if (slowpath(running & 1) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))) {
+ if (slowpath(running & 1) || _dispatch_object_suspended(dq)) {
running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed);
- return _dispatch_sync_f_slow(dq, ctxt, func, running == 0);
+ return _dispatch_sync_f_slow(dq, ctxt, func, pp, running == 0);
}
if (slowpath(dq->do_targetq->do_targetq)) {
- return _dispatch_sync_f_recurse(dq, ctxt, func);
+ return _dispatch_sync_f_recurse(dq, ctxt, func, pp);
}
_dispatch_sync_f_invoke(dq, ctxt, func);
}
+DISPATCH_NOINLINE
+static void
+_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
+ pthread_priority_t pp)
+{
+ if (fastpath(dq->dq_width == 1)) {
+ return _dispatch_barrier_sync_f(dq, ctxt, func, pp);
+ }
+ if (slowpath(!dq->do_targetq)) {
+ // the global concurrent queues do not need strict ordering
+ (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed);
+ return _dispatch_sync_f_invoke(dq, ctxt, func);
+ }
+ _dispatch_sync_f2(dq, ctxt, func, pp);
+}
+
DISPATCH_NOINLINE
void
dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
(void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed);
return _dispatch_sync_f_invoke(dq, ctxt, func);
}
- _dispatch_sync_f2(dq, ctxt, func);
+ _dispatch_sync_f2(dq, ctxt, func, 0);
}
#ifdef __BLOCKS__
-#if DISPATCH_COCOA_COMPAT
DISPATCH_NOINLINE
static void
_dispatch_sync_slow(dispatch_queue_t dq, void (^work)(void))
{
- // Blocks submitted to the main queue MUST be run on the main thread,
- // therefore under GC we must Block_copy in order to notify the thread-local
- // garbage collector that the objects are transferring to the main thread
- // rdar://problem/7176237&7181849&7458685
- if (dispatch_begin_thread_4GC) {
- dispatch_block_t block = _dispatch_Block_copy(work);
- return dispatch_sync_f(dq, block, _dispatch_call_block_and_release);
+ bool has_pd = _dispatch_block_has_private_data(work);
+ if (has_pd && (_dispatch_block_get_flags(work) & DISPATCH_BLOCK_BARRIER)) {
+ return _dispatch_barrier_sync_slow(dq, work);
}
- dispatch_sync_f(dq, work, _dispatch_Block_invoke(work));
-}
+ dispatch_function_t func = _dispatch_Block_invoke(work);
+ pthread_priority_t pp = 0;
+ if (has_pd) {
+ func = _dispatch_block_sync_invoke;
+ pp = _dispatch_block_get_priority(work);
+ dispatch_block_flags_t flags = _dispatch_block_get_flags(work);
+ if (flags & DISPATCH_BLOCK_HAS_PRIORITY) {
+ pthread_priority_t tp = _dispatch_get_priority();
+ if (pp < tp) {
+ pp = tp | _PTHREAD_PRIORITY_ENFORCE_FLAG;
+ } else if (!(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS)) {
+ pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG;
+ }
+ }
+ // balanced in d_block_sync_invoke or d_block_wait
+ if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work),
+ dbpd_queue, NULL, dq, release)) {
+ _dispatch_retain(dq);
+ }
+#if DISPATCH_COCOA_COMPAT
+ } else if (dq->dq_is_thread_bound && dispatch_begin_thread_4GC) {
+ // Blocks submitted to the main queue MUST be run on the main thread,
+ // under GC we must Block_copy in order to notify the thread-local
+ // garbage collector that the objects are transferring to another thread
+ // rdar://problem/7176237&7181849&7458685
+ work = _dispatch_Block_copy(work);
+ func = _dispatch_call_block_and_release;
#endif
+ }
+ if (slowpath(!dq->do_targetq)) {
+ // the global concurrent queues do not need strict ordering
+ (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed);
+ return _dispatch_sync_f_invoke(dq, work, func);
+ }
+ _dispatch_sync_f2(dq, work, func, pp);
+}
void
dispatch_sync(dispatch_queue_t dq, void (^work)(void))
{
-#if DISPATCH_COCOA_COMPAT
- if (slowpath(dq->dq_is_thread_bound)) {
+ if (fastpath(dq->dq_width == 1)) {
+ return dispatch_barrier_sync(dq, work);
+ }
+ if (slowpath(dq->dq_is_thread_bound) ||
+ slowpath(_dispatch_block_has_private_data(work)) ) {
return _dispatch_sync_slow(dq, work);
}
-#endif
dispatch_sync_f(dq, work, _dispatch_Block_invoke(work));
}
#endif
ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, queue);
dispatch_assert(ds);
+ // TODO: don't use a separate continuation & voucher
dispatch_continuation_t dc = _dispatch_continuation_alloc();
- dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT);
+ dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT);
dc->dc_func = func;
dc->dc_ctxt = ctxt;
dc->dc_data = ds;
#pragma mark -
#pragma mark dispatch_queue_push
-DISPATCH_NOINLINE
-static void
-_dispatch_queue_push_list_slow2(dispatch_queue_t dq,
- struct dispatch_object_s *obj)
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_queue_push_list_slow2(dispatch_queue_t dq, pthread_priority_t pp,
+ struct dispatch_object_s *obj, bool retained)
{
// The queue must be retained before dq_items_head is written in order
// to ensure that the reference is still valid when _dispatch_wakeup is
// dq_items_head and _dispatch_wakeup, the blocks submitted to the
// queue may release the last reference to the queue when invoked by
// _dispatch_queue_drain. <rdar://problem/6932776>
- _dispatch_retain(dq);
+ if (!retained) _dispatch_retain(dq);
dq->dq_items_head = obj;
- _dispatch_wakeup(dq);
- _dispatch_release(dq);
+ return _dispatch_queue_wakeup_with_qos_and_release(dq, pp);
}
DISPATCH_NOINLINE
void
-_dispatch_queue_push_list_slow(dispatch_queue_t dq,
- struct dispatch_object_s *obj, unsigned int n)
+_dispatch_queue_push_list_slow(dispatch_queue_t dq, pthread_priority_t pp,
+ struct dispatch_object_s *obj, unsigned int n, bool retained)
{
if (dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE && !dq->dq_is_thread_bound) {
+ dispatch_assert(!retained);
dispatch_atomic_store2o(dq, dq_items_head, obj, relaxed);
return _dispatch_queue_wakeup_global2(dq, n);
}
- _dispatch_queue_push_list_slow2(dq, obj);
+ _dispatch_queue_push_list_slow2(dq, pp, obj, retained);
}
DISPATCH_NOINLINE
void
-_dispatch_queue_push_slow(dispatch_queue_t dq,
- struct dispatch_object_s *obj)
+_dispatch_queue_push_slow(dispatch_queue_t dq, pthread_priority_t pp,
+ struct dispatch_object_s *obj, bool retained)
{
if (dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE && !dq->dq_is_thread_bound) {
+ dispatch_assert(!retained);
dispatch_atomic_store2o(dq, dq_items_head, obj, relaxed);
return _dispatch_queue_wakeup_global(dq);
}
- _dispatch_queue_push_list_slow2(dq, obj);
+ _dispatch_queue_push_list_slow2(dq, pp, obj, retained);
}
#pragma mark -
unsigned long
_dispatch_queue_probe(dispatch_queue_t dq)
{
- return (unsigned long)slowpath(dq->dq_items_tail != NULL);
+ return _dispatch_queue_class_probe(dq);
}
#if DISPATCH_COCOA_COMPAT
unsigned long
_dispatch_runloop_queue_probe(dispatch_queue_t dq)
{
- if (_dispatch_queue_probe(dq)) {
+ if (_dispatch_queue_class_probe(dq)) {
if (dq->do_xref_cnt == -1) return true; // <rdar://problem/14026816>
return _dispatch_runloop_queue_wakeup(dq);
}
unsigned long
_dispatch_mgr_queue_probe(dispatch_queue_t dq)
{
- if (_dispatch_queue_probe(dq)) {
+ if (_dispatch_queue_class_probe(dq)) {
return _dispatch_mgr_wakeup(dq);
}
return false;
dispatch_queue_t
_dispatch_wakeup(dispatch_object_t dou)
{
- if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) {
+ unsigned long type = dx_metatype(dou._do);
+ if (type == _DISPATCH_QUEUE_TYPE || type == _DISPATCH_SOURCE_TYPE) {
+ return _dispatch_queue_wakeup(dou._dq);
+ }
+ if (_dispatch_object_suspended(dou)) {
return NULL;
}
if (!dx_probe(dou._do)) {
return NULL;
}
if (!dispatch_atomic_cmpxchg2o(dou._do, do_suspend_cnt, 0,
- DISPATCH_OBJECT_SUSPEND_LOCK, release)) {
-#if DISPATCH_COCOA_COMPAT
- if (dou._dq == &_dispatch_main_q) {
- return _dispatch_main_queue_wakeup();
- }
-#endif
+ DISPATCH_OBJECT_SUSPEND_LOCK, acquire)) {
return NULL;
}
_dispatch_retain(dou._do);
dispatch_queue_t tq = dou._do->do_targetq;
- _dispatch_queue_push(tq, dou._do);
+ _dispatch_queue_push(tq, dou._do, 0);
return tq; // libdispatch does not need this, but the Instrument DTrace
// probe does
}
static void
_dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n)
{
- static dispatch_once_t pred;
dispatch_root_queue_context_t qc = dq->do_ctxt;
uint32_t i = n;
int r;
_dispatch_debug_root_queue(dq, __func__);
- dispatch_once_f(&pred, NULL, _dispatch_root_queues_init);
+ dispatch_once_f(&_dispatch_root_queues_pred, NULL,
+ _dispatch_root_queues_init);
#if HAVE_PTHREAD_WORKQUEUES
#if DISPATCH_USE_PTHREAD_POOL
unsigned int gen_cnt;
do {
r = pthread_workqueue_additem_np(qc->dgq_kworkqueue,
- _dispatch_worker_thread3, dq, &wh, &gen_cnt);
+ _dispatch_worker_thread4, dq, &wh, &gen_cnt);
(void)dispatch_assume_zero(r);
} while (--i);
return;
}
#endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK
#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
- r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority,
- qc->dgq_wq_options, (int)i);
+ if (!dq->dq_priority) {
+ r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority,
+ qc->dgq_wq_options, (int)i);
+ (void)dispatch_assume_zero(r);
+ return;
+ }
+#endif
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ r = _pthread_workqueue_addthreads((int)i, dq->dq_priority);
(void)dispatch_assume_zero(r);
#endif
return;
}
#endif // HAVE_PTHREAD_WORKQUEUES
#if DISPATCH_USE_PTHREAD_POOL
- if (fastpath(qc->dgq_thread_mediator)) {
- while (dispatch_semaphore_signal(qc->dgq_thread_mediator)) {
+ dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt;
+ if (fastpath(pqc->dpq_thread_mediator.do_vtable)) {
+ while (dispatch_semaphore_signal(&pqc->dpq_thread_mediator)) {
if (!--i) {
return;
}
}
}
- uint32_t j, t_count = qc->dgq_thread_pool_size;
+ uint32_t j, t_count;
+ // seq_cst with atomic store to tail <rdar://problem/16932833>
+ t_count = dispatch_atomic_load2o(qc, dgq_thread_pool_size, seq_cst);
do {
if (!t_count) {
_dispatch_root_queue_debug("pthread pool is full for root queue: "
}
j = i > t_count ? t_count : i;
} while (!dispatch_atomic_cmpxchgvw2o(qc, dgq_thread_pool_size, t_count,
- t_count - j, &t_count, relaxed));
+ t_count - j, &t_count, acquire));
- dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt;
- pthread_attr_t *attr = pqc ? &pqc->dpq_thread_attr : NULL;
+ pthread_attr_t *attr = &pqc->dpq_thread_attr;
pthread_t tid, *pthr = &tid;
#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
if (slowpath(dq == &_dispatch_mgr_root_queue)) {
}
_dispatch_temporary_resource_shortage();
}
- if (!attr) {
- r = pthread_detach(*pthr);
- (void)dispatch_assume_zero(r);
- }
} while (--j);
#endif // DISPATCH_USE_PTHREAD_POOL
}
static inline void
_dispatch_queue_wakeup_global2(dispatch_queue_t dq, unsigned int n)
{
- if (!dq->dq_items_tail) {
+ if (!_dispatch_queue_class_probe(dq)) {
return;
}
#if HAVE_PTHREAD_WORKQUEUES
{
dispatch_queue_t dq = dou._dq;
dispatch_queue_t otq = dq->do_targetq;
+ dispatch_queue_t cq = _dispatch_queue_get_current();
+
+ if (slowpath(cq != otq)) {
+ return otq;
+ }
+
*sema_ptr = _dispatch_queue_drain(dq);
if (slowpath(otq != dq->do_targetq)) {
_dispatch_queue_head(dispatch_queue_t dq)
{
struct dispatch_object_s *dc;
- while (!(dc = fastpath(dq->dq_items_head))) {
- dispatch_hardware_pause();
- }
+ _dispatch_wait_until(dc = fastpath(dq->dq_items_head));
return dc;
}
dq->dq_items_head = next_dc;
if (!next_dc && !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, dc, NULL,
relaxed)) {
- // Enqueue is TIGHTLY controlled, we won't wait long.
- while (!(next_dc = fastpath(dc->do_next))) {
- dispatch_hardware_pause();
- }
+ _dispatch_wait_until(next_dc = fastpath(dc->do_next));
dq->dq_items_head = next_dc;
}
return next_dc;
orig_tq = dq->do_targetq;
_dispatch_thread_setspecific(dispatch_queue_key, dq);
+ pthread_priority_t old_dp = _dispatch_set_defaultpriority(dq->dq_priority);
+
+ pthread_priority_t op = _dispatch_queue_get_override_priority(dq);
+ pthread_priority_t dp = _dispatch_get_defaultpriority();
+ dp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
+ if (op > dp) {
+ _dispatch_wqthread_override_start(dq->dq_thread, op);
+ }
+
//dispatch_debug_queue(dq, __func__);
while (dq->dq_items_tail) {
}
out:
+ _dispatch_reset_defaultpriority(old_dp);
_dispatch_thread_setspecific(dispatch_queue_key, old_dq);
return sema;
}
.do_vtable = NULL,
};
struct dispatch_object_s *dmarker = (void*)▮
- _dispatch_queue_push_notrace(dq, dmarker);
+ _dispatch_queue_push_notrace(dq, dmarker, 0);
_dispatch_perfmon_start();
dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
_dispatch_thread_setspecific(dispatch_queue_key, dq);
+ pthread_priority_t old_pri = _dispatch_get_priority();
+ pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri);
+ voucher_t voucher = _voucher_copy();
struct dispatch_object_s *dc, *next_dc;
dc = _dispatch_queue_head(dq);
if (next_dc) {
_dispatch_main_queue_wakeup();
}
+ _dispatch_voucher_debug("main queue restore", voucher);
+ _dispatch_set_priority_and_replace_voucher(old_pri, voucher);
+ _dispatch_queue_reset_override_priority(dq);
+ _dispatch_reset_defaultpriority(old_dp);
_dispatch_thread_setspecific(dispatch_queue_key, old_dq);
_dispatch_perfmon_end();
_dispatch_force_cache_cleanup();
_dispatch_perfmon_start();
dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
_dispatch_thread_setspecific(dispatch_queue_key, dq);
+ pthread_priority_t old_pri = _dispatch_get_priority();
+ pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri);
+ voucher_t voucher = _voucher_copy();
struct dispatch_object_s *dc, *next_dc;
dc = _dispatch_queue_head(dq);
_dispatch_continuation_pop(dc);
_dispatch_perfmon_workitem_inc();
+ _dispatch_voucher_debug("runloop queue restore", voucher);
+ _dispatch_set_priority_and_replace_voucher(old_pri, voucher);
+ _dispatch_reset_defaultpriority(old_dp);
_dispatch_thread_setspecific(dispatch_queue_key, old_dq);
_dispatch_perfmon_end();
_dispatch_force_cache_cleanup();
if (slowpath(_dispatch_queue_drain(dq))) {
DISPATCH_CRASH("Sync onto manager queue");
}
+ _dispatch_voucher_debug("mgr queue clear", NULL);
+ _voucher_clear();
+ _dispatch_queue_reset_override_priority(dq);
+ _dispatch_reset_defaultpriority_override();
_dispatch_perfmon_end();
_dispatch_force_cache_cleanup();
}
#pragma mark -
-#pragma mark dispatch_root_queue_drain
+#pragma mark _dispatch_queue_wakeup_with_qos
-#ifndef DISPATCH_CONTENTION_USE_RAND
-#define DISPATCH_CONTENTION_USE_RAND (!TARGET_OS_EMBEDDED)
-#endif
-#ifndef DISPATCH_CONTENTION_SPINS_MAX
-#define DISPATCH_CONTENTION_SPINS_MAX (128 - 1)
+DISPATCH_NOINLINE
+static dispatch_queue_t
+_dispatch_queue_wakeup_with_qos_slow(dispatch_queue_t dq, pthread_priority_t pp,
+ bool retained)
+{
+ if (!dx_probe(dq) && (dq->dq_is_thread_bound || !dq->dq_thread)) {
+ if (retained) _dispatch_release(dq);
+ return NULL;
+ }
+ pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
+ bool override = _dispatch_queue_override_priority(dq, pp);
+ if (override && dq->dq_running > 1) {
+ override = false;
+ }
+
+ if (!dispatch_atomic_cmpxchg2o(dq, do_suspend_cnt, 0,
+ DISPATCH_OBJECT_SUSPEND_LOCK, acquire)) {
+#if DISPATCH_COCOA_COMPAT
+ if (dq == &_dispatch_main_q && dq->dq_is_thread_bound) {
+ return _dispatch_main_queue_wakeup();
+ }
#endif
-#ifndef DISPATCH_CONTENTION_SPINS_MIN
-#define DISPATCH_CONTENTION_SPINS_MIN (32 - 1)
+ if (override) {
+ mach_port_t th;
+ // <rdar://problem/17735825> to traverse the tq chain safely we must
+ // lock it to ensure it cannot change, unless the queue is running
+ // and we can just override the thread itself
+ if (dq->dq_thread) {
+ _dispatch_wqthread_override_start(dq->dq_thread, pp);
+ } else if (!dispatch_atomic_cmpxchgv2o(dq, dq_tqthread,
+ MACH_PORT_NULL, _dispatch_thread_port(), &th, acquire)) {
+ // already locked, override the owner, trysync will do a queue
+ // wakeup when it returns.
+ _dispatch_wqthread_override_start(th, pp);
+ } else {
+ dispatch_queue_t tq = dq->do_targetq;
+ if (_dispatch_queue_prepare_override(dq, tq, pp)) {
+ _dispatch_queue_push_override(dq, tq, pp);
+ } else {
+ _dispatch_queue_wakeup_with_qos(tq, pp);
+ }
+ dispatch_atomic_store2o(dq, dq_tqthread, MACH_PORT_NULL,
+ release);
+ }
+ }
+ if (retained) _dispatch_release(dq);
+ return NULL;
+ }
+ dispatch_queue_t tq = dq->do_targetq;
+ if (!retained) _dispatch_retain(dq);
+ if (override) {
+ override = _dispatch_queue_prepare_override(dq, tq, pp);
+ }
+ _dispatch_queue_push(tq, dq, pp);
+ if (override) {
+ _dispatch_queue_push_override(dq, tq, pp);
+ }
+ return tq; // libdispatch does not need this, but the Instrument DTrace
+ // probe does
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_queue_t
+_dispatch_queue_wakeup_with_qos2(dispatch_queue_t dq, pthread_priority_t pp,
+ bool retained)
+{
+ if (_dispatch_object_suspended(dq)) {
+ _dispatch_queue_override_priority(dq, pp);
+ if (retained) _dispatch_release(dq);
+ return NULL;
+ }
+ return _dispatch_queue_wakeup_with_qos_slow(dq, pp, retained);
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_queue_wakeup_with_qos_and_release(dispatch_queue_t dq,
+ pthread_priority_t pp)
+{
+ (void)_dispatch_queue_wakeup_with_qos2(dq, pp, true);
+}
+
+DISPATCH_NOINLINE
+void
+_dispatch_queue_wakeup_with_qos(dispatch_queue_t dq, pthread_priority_t pp)
+{
+ (void)_dispatch_queue_wakeup_with_qos2(dq, pp, false);
+}
+
+DISPATCH_NOINLINE
+dispatch_queue_t
+_dispatch_queue_wakeup(dispatch_queue_t dq)
+{
+ return _dispatch_queue_wakeup_with_qos2(dq,
+ _dispatch_queue_get_override_priority(dq), false);
+}
+
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+static void
+_dispatch_queue_override_invoke(void *ctxt)
+{
+ dispatch_continuation_t dc = (dispatch_continuation_t)ctxt;
+ dispatch_queue_t dq = dc->dc_data;
+ pthread_priority_t p = 0;
+
+ if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) &&
+ fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))) {
+ _dispatch_queue_set_thread(dq);
+
+ _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx",
+ dq->dq_thread, _dispatch_get_defaultpriority());
+
+ pthread_priority_t old_dp = _dispatch_get_defaultpriority();
+ _dispatch_reset_defaultpriority(dc->dc_priority);
+
+ dispatch_queue_t tq = NULL;
+ _dispatch_thread_semaphore_t sema = 0;
+ tq = dispatch_queue_invoke2(dq, &sema);
+
+ _dispatch_queue_clear_thread(dq);
+ _dispatch_reset_defaultpriority(old_dp);
+
+ uint32_t running = dispatch_atomic_dec2o(dq, dq_running, release);
+ if (sema) {
+ _dispatch_thread_semaphore_signal(sema);
+ } else if (!tq && running == 0) {
+ p = _dispatch_queue_reset_override_priority(dq);
+ if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
+ _dispatch_wqthread_override_reset();
+ }
+ }
+ _dispatch_introspection_queue_item_complete(dq);
+ if (running == 0) {
+ return _dispatch_queue_wakeup_with_qos_and_release(dq, p);
+ }
+ } else {
+ mach_port_t th = dq->dq_thread;
+ if (th) {
+ p = _dispatch_queue_get_override_priority(dq);
+ _dispatch_object_debug(dq, "overriding thr 0x%x to priority 0x%lx",
+ th, p);
+ _dispatch_wqthread_override_start(th, p);
+ }
+ }
+ _dispatch_release(dq); // added when we pushed the override block
+}
#endif
-#ifndef DISPATCH_CONTENTION_USLEEP_START
-#define DISPATCH_CONTENTION_USLEEP_START 500
+
+static inline bool
+_dispatch_queue_prepare_override(dispatch_queue_t dq, dispatch_queue_t tq,
+ pthread_priority_t p)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ if (dx_type(tq) != DISPATCH_QUEUE_ROOT_TYPE || !tq->dq_priority) {
+ return false;
+ }
+ if (p <= (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
+ return false;
+ }
+ if (p <= (tq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
+ return false;
+ }
+ _dispatch_retain(dq);
+ return true;
+#else
+ (void)dq; (void)tq; (void)p;
+ return false;
#endif
-#ifndef DISPATCH_CONTENTION_USLEEP_MAX
-#define DISPATCH_CONTENTION_USLEEP_MAX 100000
+}
+
+static inline void
+_dispatch_queue_push_override(dispatch_queue_t dq, dispatch_queue_t tq,
+ pthread_priority_t p)
+{
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ unsigned int qosbit, idx, overcommit;
+ overcommit = (tq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) ? 1 : 0;
+ qosbit = (p & _PTHREAD_PRIORITY_QOS_CLASS_MASK) >>
+ _PTHREAD_PRIORITY_QOS_CLASS_SHIFT;
+ idx = (unsigned int)__builtin_ffs((int)qosbit);
+ if (!idx || idx > DISPATCH_QUEUE_QOS_COUNT) {
+ DISPATCH_CRASH("Corrupted override priority");
+ }
+ dispatch_queue_t rq = &_dispatch_root_queues[((idx-1) << 1) | overcommit];
+
+ dispatch_continuation_t dc = _dispatch_continuation_alloc();
+ dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT);
+ dc->dc_func = _dispatch_queue_override_invoke;
+ dc->dc_ctxt = dc;
+ dc->dc_priority = tq->dq_priority;
+ dc->dc_voucher = NULL;
+ dc->dc_data = dq;
+ // dq retained by _dispatch_queue_prepare_override
+
+ _dispatch_queue_push(rq, dc, 0);
+#else
+ (void)dq; (void)tq; (void)p;
#endif
+}
+
+#pragma mark -
+#pragma mark dispatch_root_queue_drain
DISPATCH_NOINLINE
static bool
dispatch_root_queue_context_t qc = dq->do_ctxt;
struct dispatch_object_s *const mediator = (void *)~0ul;
bool pending = false, available = true;
- unsigned int spins, sleep_time = DISPATCH_CONTENTION_USLEEP_START;
+ unsigned int sleep_time = DISPATCH_CONTENTION_USLEEP_START;
do {
// Spin for a short while in case the contention is temporary -- e.g.
// when starting up after dispatch_apply, or when executing a few
// short continuations in a row.
-#if DISPATCH_CONTENTION_USE_RAND
- // Use randomness to prevent threads from resonating at the same
- // frequency and permanently contending. All threads sharing the same
- // seed value is safe with the FreeBSD rand_r implementation.
- static unsigned int seed;
- spins = (rand_r(&seed) & DISPATCH_CONTENTION_SPINS_MAX) |
- DISPATCH_CONTENTION_SPINS_MIN;
-#else
- spins = DISPATCH_CONTENTION_SPINS_MIN +
- (DISPATCH_CONTENTION_SPINS_MAX-DISPATCH_CONTENTION_SPINS_MIN)/2;
-#endif
- while (spins--) {
- dispatch_hardware_pause();
- if (fastpath(dq->dq_items_head != mediator)) goto out;
- };
+ if (_dispatch_contention_wait_until(dq->dq_items_head != mediator)) {
+ goto out;
+ }
// Since we have serious contention, we need to back off.
if (!pending) {
// Mark this queue as pending to avoid requests for further threads
// Create a new pending thread and then exit this thread.
// The kernel will grant a new thread when the load subsides.
_dispatch_debug("contention on global queue: %p", dq);
- _dispatch_queue_wakeup_global(dq);
available = false;
out:
if (pending) {
(void)dispatch_atomic_dec2o(qc, dgq_pending, relaxed);
}
+ if (!available) {
+ _dispatch_queue_wakeup_global(dq);
+ }
+ return available;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_queue_concurrent_drain_one2(dispatch_queue_t dq)
+{
+ // Wait for queue head and tail to be both non-empty or both empty
+ bool available; // <rdar://problem/15917893>
+ _dispatch_wait_until((dq->dq_items_head != NULL) ==
+ (available = (dq->dq_items_tail != NULL)));
return available;
}
// The first xchg on the tail will tell the enqueueing thread that it
// is safe to blindly write out to the head pointer. A cmpxchg honors
// the algorithm.
- (void)dispatch_atomic_cmpxchg2o(dq, dq_items_head, mediator, NULL,
- relaxed);
+ if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_items_head, mediator,
+ NULL, relaxed))) {
+ goto start;
+ }
+ if (slowpath(dq->dq_items_tail) && // <rdar://problem/14416349>
+ _dispatch_queue_concurrent_drain_one2(dq)) {
+ goto start;
+ }
_dispatch_root_queue_debug("no work on global queue: %p", dq);
return NULL;
}
// both head and tail are NULL now
goto out;
}
-
- // There must be a next item now. This thread won't wait long.
- while (!(next = head->do_next)) {
- dispatch_hardware_pause();
- }
+ // There must be a next item now.
+ _dispatch_wait_until(next = head->do_next);
}
dispatch_atomic_store2o(dq, dq_items_head, next, relaxed);
}
#endif
_dispatch_thread_setspecific(dispatch_queue_key, dq);
+ pthread_priority_t old_pri = _dispatch_get_priority();
+ pthread_priority_t pri = dq->dq_priority ? dq->dq_priority : old_pri;
+ pthread_priority_t old_dp = _dispatch_set_defaultpriority(pri);
#if DISPATCH_COCOA_COMPAT
// ensure that high-level memory management techniques do not leak/crash
_dispatch_perfmon_start();
struct dispatch_object_s *item;
+ bool reset = false;
while ((item = fastpath(_dispatch_queue_concurrent_drain_one(dq)))) {
+ if (reset) _dispatch_wqthread_override_reset();
_dispatch_continuation_pop(item);
+ reset = _dispatch_reset_defaultpriority_override();
}
+ _dispatch_voucher_debug("root queue clear", NULL);
+ _dispatch_set_priority_and_replace_voucher(old_pri, NULL);
+ _dispatch_reset_defaultpriority(old_dp);
_dispatch_perfmon_end();
#if DISPATCH_COCOA_COMPAT
#if HAVE_PTHREAD_WORKQUEUES
static void
-_dispatch_worker_thread3(void *context)
+_dispatch_worker_thread4(void *context)
{
dispatch_queue_t dq = context;
dispatch_root_queue_context_t qc = dq->do_ctxt;
_dispatch_introspection_thread_add();
-
- (void)dispatch_atomic_dec2o(qc, dgq_pending, relaxed);
+ int pending = (int)dispatch_atomic_dec2o(qc, dgq_pending, relaxed);
+ dispatch_assert(pending >= 0);
_dispatch_root_queue_drain(dq);
__asm__(""); // prevent tailcall (for Instrument DTrace probe)
+}
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+static void
+_dispatch_worker_thread3(pthread_priority_t priority)
+{
+ // Reset priority TSD to workaround <rdar://problem/17825261>
+ _dispatch_thread_setspecific(dispatch_priority_key,
+ (void*)(uintptr_t)(priority & ~_PTHREAD_PRIORITY_FLAGS_MASK));
+ unsigned int overcommit, qosbit, idx;
+ overcommit = (priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) ? 1 : 0;
+ qosbit = (priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK) >>
+ _PTHREAD_PRIORITY_QOS_CLASS_SHIFT;
+ if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS].
+ dq_priority) {
+ // If kernel doesn't support maintenance, bottom bit is background.
+ // Shift to our idea of where background bit is.
+ qosbit <<= 1;
+ }
+ idx = (unsigned int)__builtin_ffs((int)qosbit);
+ dispatch_assert(idx > 0 && idx < DISPATCH_QUEUE_QOS_COUNT+1);
+ dispatch_queue_t dq = &_dispatch_root_queues[((idx-1) << 1) | overcommit];
+ return _dispatch_worker_thread4(dq);
}
+#endif // HAVE_PTHREAD_WORKQUEUE_QOS
#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
// 6618342 Contact the team that owns the Instrument DTrace probe before
dispatch_assert(!(options & ~WORKQ_ADDTHREADS_OPTION_OVERCOMMIT));
dispatch_queue_t dq = _dispatch_wq2root_queues[priority][options];
- return _dispatch_worker_thread3(dq);
+ return _dispatch_worker_thread4(dq);
}
#endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP
#endif // HAVE_PTHREAD_WORKQUEUES
dispatch_root_queue_context_t qc = dq->do_ctxt;
dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt;
- if (pqc && pqc->dpq_thread_configure) {
+ if (pqc->dpq_thread_configure) {
pqc->dpq_thread_configure();
}
(void)dispatch_assume_zero(r);
_dispatch_introspection_thread_add();
- // Non-pthread-root-queue pthreads use a 65 second timeout in case there
- // are any timers that run once a minute <rdar://problem/11744973>
- const int64_t timeout = (pqc ? 5ull : 65ull) * NSEC_PER_SEC;
-
+ const int64_t timeout = 5ull * NSEC_PER_SEC;
do {
_dispatch_root_queue_drain(dq);
- } while (dispatch_semaphore_wait(qc->dgq_thread_mediator,
+ } while (dispatch_semaphore_wait(&pqc->dpq_thread_mediator,
dispatch_time(0, timeout)) == 0);
- (void)dispatch_atomic_inc2o(qc, dgq_thread_pool_size, relaxed);
+ (void)dispatch_atomic_inc2o(qc, dgq_thread_pool_size, release);
_dispatch_queue_wakeup_global(dq);
_dispatch_release(dq);
dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD;
dq = _dispatch_alloc(DISPATCH_VTABLE(queue_runloop), dqs);
_dispatch_queue_init(dq);
- dq->do_targetq = _dispatch_get_root_queue(0, true);
+ dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,true);
dq->dq_label = label ? label : "runloop-queue"; // no-copy contract
dq->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK;
dq->dq_running = 1;
DISPATCH_OBJECT_SUSPEND_LOCK, release);
_dispatch_queue_clear_bound_thread(dq);
if (suspend_cnt == 0) {
- _dispatch_wakeup(dq);
+ _dispatch_queue_wakeup(dq);
}
}
DISPATCH_OBJECT_SUSPEND_LOCK, release);
dq->dq_is_thread_bound = 0;
if (suspend_cnt == 0) {
- _dispatch_wakeup(dq);
+ _dispatch_queue_wakeup(dq);
}
// overload the "probably" variable to mean that dispatch_main() or
// similar non-POSIX API was called
// this has to run before the DISPATCH_COCOA_COMPAT below
if (_dispatch_program_is_probably_callback_driven) {
- dispatch_async_f(_dispatch_get_root_queue(0, true), NULL,
- _dispatch_sig_thread);
+ _dispatch_barrier_async_detached_f(_dispatch_get_root_queue(
+ _DISPATCH_QOS_CLASS_DEFAULT, true), NULL, _dispatch_sig_thread);
sleep(1); // workaround 6778970
}
/* x86 & cortex-a8 have a 64 byte cacheline */
#define DISPATCH_CACHELINE_SIZE 64u
-#define DISPATCH_CONTINUATION_SIZE DISPATCH_CACHELINE_SIZE
#define ROUND_UP_TO_CACHELINE_SIZE(x) \
(((x) + (DISPATCH_CACHELINE_SIZE - 1u)) & \
~(DISPATCH_CACHELINE_SIZE - 1u))
-#define ROUND_UP_TO_CONTINUATION_SIZE(x) \
- (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \
- ~(DISPATCH_CONTINUATION_SIZE - 1u))
-#define ROUND_UP_TO_VECTOR_SIZE(x) \
- (((x) + 15u) & ~15u)
#define DISPATCH_CACHELINE_ALIGN \
__attribute__((__aligned__(DISPATCH_CACHELINE_SIZE)))
-#define DISPATCH_QUEUE_CACHELINE_PADDING \
- char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]
-#ifdef __LP64__
-#define DISPATCH_QUEUE_CACHELINE_PAD (( \
- (3*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
- + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
-#else
-#define DISPATCH_QUEUE_CACHELINE_PAD (( \
- (0*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
- + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
-#if !DISPATCH_INTROSPECTION
-// No padding, DISPATCH_QUEUE_CACHELINE_PAD == 0
-#undef DISPATCH_QUEUE_CACHELINE_PADDING
-#define DISPATCH_QUEUE_CACHELINE_PADDING
-#endif
-#endif
-
-// If dc_vtable is less than 127, then the object is a continuation.
-// Otherwise, the object has a private layout and memory management rules. The
-// layout until after 'do_next' must align with normal objects.
-#define DISPATCH_CONTINUATION_HEADER(x) \
- _OS_OBJECT_HEADER( \
- const void *do_vtable, \
- do_ref_cnt, \
- do_xref_cnt); \
- struct dispatch_##x##_s *volatile do_next; \
- dispatch_function_t dc_func; \
- void *dc_ctxt; \
- void *dc_data; \
- void *dc_other;
-
-#define DISPATCH_OBJ_ASYNC_BIT 0x1
-#define DISPATCH_OBJ_BARRIER_BIT 0x2
-#define DISPATCH_OBJ_GROUP_BIT 0x4
-#define DISPATCH_OBJ_SYNC_SLOW_BIT 0x8
-// vtables are pointers far away from the low page in memory
-#define DISPATCH_OBJ_IS_VTABLE(x) ((unsigned long)(x)->do_vtable > 127ul)
-
-struct dispatch_continuation_s {
- DISPATCH_CONTINUATION_HEADER(continuation);
-};
-
-typedef struct dispatch_continuation_s *dispatch_continuation_t;
-
-struct dispatch_apply_s {
- size_t volatile da_index, da_todo;
- size_t da_iterations, da_nested;
- dispatch_continuation_t da_dc;
- _dispatch_thread_semaphore_t da_sema;
- uint32_t da_thr_cnt;
-};
-
-typedef struct dispatch_apply_s *dispatch_apply_t;
-
-DISPATCH_CLASS_DECL(queue_attr);
-struct dispatch_queue_attr_s {
- DISPATCH_STRUCT_HEADER(queue_attr);
-};
+#pragma mark -
+#pragma mark dispatch_queue_t
#define DISPATCH_QUEUE_HEADER \
uint32_t volatile dq_running; \
/* LP64 global queue cacheline boundary */ \
struct dispatch_object_s *volatile dq_items_tail; \
dispatch_queue_t dq_specific_q; \
- uint32_t dq_width; \
- unsigned int dq_is_thread_bound:1; \
+ uint16_t dq_width; \
+ uint16_t dq_is_thread_bound:1; \
+ pthread_priority_t dq_priority; \
+ mach_port_t dq_thread; \
+ mach_port_t volatile dq_tqthread; \
+ uint32_t volatile dq_override; \
unsigned long dq_serialnum; \
const char *dq_label; \
DISPATCH_INTROSPECTION_QUEUE_LIST;
+#define DISPATCH_QUEUE_WIDTH_MAX UINT16_MAX
+
+#define DISPATCH_QUEUE_CACHELINE_PADDING \
+ char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]
+#ifdef __LP64__
+#define DISPATCH_QUEUE_CACHELINE_PAD (( \
+ (0*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
+ + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
+#else
+#define DISPATCH_QUEUE_CACHELINE_PAD (( \
+ (13*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
+ + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
+#endif
+
DISPATCH_CLASS_DECL(queue);
struct dispatch_queue_s {
DISPATCH_STRUCT_HEADER(queue);
DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_queue_specific_queue, dispatch_queue);
DISPATCH_CLASS_DECL(queue_specific_queue);
-extern struct dispatch_queue_s _dispatch_mgr_q;
-
void _dispatch_queue_destroy(dispatch_object_t dou);
void _dispatch_queue_dispose(dispatch_queue_t dq);
void _dispatch_queue_invoke(dispatch_queue_t dq);
void _dispatch_queue_push_list_slow(dispatch_queue_t dq,
- struct dispatch_object_s *obj, unsigned int n);
+ pthread_priority_t pp, struct dispatch_object_s *obj, unsigned int n,
+ bool retained);
void _dispatch_queue_push_slow(dispatch_queue_t dq,
- struct dispatch_object_s *obj);
+ pthread_priority_t pp, struct dispatch_object_s *obj, bool retained);
unsigned long _dispatch_queue_probe(dispatch_queue_t dq);
dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou);
+dispatch_queue_t _dispatch_queue_wakeup(dispatch_queue_t dq);
+void _dispatch_queue_wakeup_with_qos(dispatch_queue_t dq,
+ pthread_priority_t pp);
+void _dispatch_queue_wakeup_with_qos_and_release(dispatch_queue_t dq,
+ pthread_priority_t pp);
_dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_object_t dou);
void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t
dqsq);
void _dispatch_sync_recurse_invoke(void *ctxt);
void _dispatch_apply_invoke(void *ctxt);
void _dispatch_apply_redirect_invoke(void *ctxt);
+void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func);
void _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf,
size_t bufsiz);
-#define DISPATCH_QUEUE_PRIORITY_COUNT 4
-#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_PRIORITY_COUNT * 2)
+#define DISPATCH_QUEUE_QOS_COUNT 6
+#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_QOS_COUNT * 2)
-// overcommit priority index values need bit 1 set
+// must be in lowest to highest qos order (as encoded in pthread_priority_t)
+// overcommit qos index values need bit 1 set
enum {
- DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY = 0,
- DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY,
- DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY,
- DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY,
- DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY,
- DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY,
- DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY,
- DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY,
+ DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0,
+ DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT,
+ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS,
+ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT,
+ DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS,
+ DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT,
+ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS,
+ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT,
+ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS,
+ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT,
+ DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS,
+ DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT,
};
extern unsigned long volatile _dispatch_queue_serial_numbers;
extern struct dispatch_queue_s _dispatch_root_queues[];
+extern struct dispatch_queue_s _dispatch_mgr_q;
-#if !(USE_OBJC && __OBJC2__)
-
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_queue_push_list2(dispatch_queue_t dq, struct dispatch_object_s *head,
- struct dispatch_object_s *tail)
-{
- struct dispatch_object_s *prev;
- tail->do_next = NULL;
- prev = dispatch_atomic_xchg2o(dq, dq_items_tail, tail, release);
- if (fastpath(prev)) {
- // if we crash here with a value less than 0x1000, then we are at a
- // known bug in client code for example, see _dispatch_queue_dispose
- // or _dispatch_atfork_child
- prev->do_next = head;
- }
- return (prev != NULL);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
- dispatch_object_t _tail, unsigned int n)
-{
- struct dispatch_object_s *head = _head._do, *tail = _tail._do;
- if (!fastpath(_dispatch_queue_push_list2(dq, head, tail))) {
- _dispatch_queue_push_list_slow(dq, head, n);
- }
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t _tail)
-{
- struct dispatch_object_s *tail = _tail._do;
- if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) {
- _dispatch_queue_push_slow(dq, tail);
- }
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail,
- bool wakeup)
-{
- struct dispatch_object_s *tail = _tail._do;
- if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) {
- _dispatch_queue_push_slow(dq, tail);
- } else if (slowpath(wakeup)) {
- _dispatch_wakeup(dq);
- }
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_queue_class_invoke(dispatch_object_t dou,
- dispatch_queue_t (*invoke)(dispatch_object_t,
- _dispatch_thread_semaphore_t*))
-{
- dispatch_queue_t dq = dou._dq;
- if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) &&
- fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))){
- dispatch_queue_t tq = NULL;
- _dispatch_thread_semaphore_t sema = 0;
- tq = invoke(dq, &sema);
- // We do not need to check the result.
- // When the suspend-count lock is dropped, then the check will happen.
- (void)dispatch_atomic_dec2o(dq, dq_running, release);
- if (sema) {
- _dispatch_thread_semaphore_signal(sema);
- } else if (tq) {
- _dispatch_introspection_queue_item_complete(dq);
- return _dispatch_queue_push(tq, dq);
- }
- }
- dq->do_next = DISPATCH_OBJECT_LISTLESS;
- if (!dispatch_atomic_sub2o(dq, do_suspend_cnt,
- DISPATCH_OBJECT_SUSPEND_LOCK, release)) {
- dispatch_atomic_barrier(seq_cst); // <rdar://problem/11915417>
- if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) {
- _dispatch_wakeup(dq); // verify that the queue is idle
- }
- }
- _dispatch_introspection_queue_item_complete(dq);
- _dispatch_release(dq); // added when the queue is put on the list
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline dispatch_queue_t
-_dispatch_queue_get_current(void)
-{
- return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key);
-}
-
-DISPATCH_ALWAYS_INLINE DISPATCH_CONST
-static inline dispatch_queue_t
-_dispatch_get_root_queue(long priority, bool overcommit)
-{
- if (overcommit) switch (priority) {
- case DISPATCH_QUEUE_PRIORITY_BACKGROUND:
-#if !DISPATCH_NO_BG_PRIORITY
- return &_dispatch_root_queues[
- DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY];
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+extern pthread_priority_t _dispatch_background_priority;
+extern pthread_priority_t _dispatch_user_initiated_priority;
+#endif
+
+#pragma mark -
+#pragma mark dispatch_queue_attr_t
+
+DISPATCH_CLASS_DECL(queue_attr);
+struct dispatch_queue_attr_s {
+ DISPATCH_STRUCT_HEADER(queue_attr);
+ qos_class_t dqa_qos_class;
+ int dqa_relative_priority;
+ unsigned int dqa_overcommit:1, dqa_concurrent:1;
+};
+
+enum {
+ DQA_INDEX_NON_OVERCOMMIT = 0,
+ DQA_INDEX_OVERCOMMIT,
+};
+
+enum {
+ DQA_INDEX_CONCURRENT = 0,
+ DQA_INDEX_SERIAL,
+};
+
+#define DISPATCH_QUEUE_ATTR_PRIO_COUNT (1 - QOS_MIN_RELATIVE_PRIORITY)
+
+typedef enum {
+ DQA_INDEX_QOS_CLASS_UNSPECIFIED = 0,
+ DQA_INDEX_QOS_CLASS_MAINTENANCE,
+ DQA_INDEX_QOS_CLASS_BACKGROUND,
+ DQA_INDEX_QOS_CLASS_UTILITY,
+ DQA_INDEX_QOS_CLASS_DEFAULT,
+ DQA_INDEX_QOS_CLASS_USER_INITIATED,
+ DQA_INDEX_QOS_CLASS_USER_INTERACTIVE,
+} _dispatch_queue_attr_index_qos_class_t;
+
+extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[]
+ [DISPATCH_QUEUE_ATTR_PRIO_COUNT][2][2];
+
+#pragma mark -
+#pragma mark dispatch_continuation_t
+
+// If dc_vtable is less than 127, then the object is a continuation.
+// Otherwise, the object has a private layout and memory management rules. The
+// layout until after 'do_next' must align with normal objects.
+#if __LP64__
+#define DISPATCH_CONTINUATION_HEADER(x) \
+ const void *do_vtable; \
+ union { \
+ pthread_priority_t dc_priority; \
+ int dc_cache_cnt; \
+ uintptr_t dc_pad; \
+ }; \
+ struct dispatch_##x##_s *volatile do_next; \
+ struct voucher_s *dc_voucher; \
+ dispatch_function_t dc_func; \
+ void *dc_ctxt; \
+ void *dc_data; \
+ void *dc_other;
+#define _DISPATCH_SIZEOF_PTR 8
+#else
+#define DISPATCH_CONTINUATION_HEADER(x) \
+ const void *do_vtable; \
+ union { \
+ pthread_priority_t dc_priority; \
+ int dc_cache_cnt; \
+ uintptr_t dc_pad; \
+ }; \
+ struct voucher_s *dc_voucher; \
+ struct dispatch_##x##_s *volatile do_next; \
+ dispatch_function_t dc_func; \
+ void *dc_ctxt; \
+ void *dc_data; \
+ void *dc_other;
+#define _DISPATCH_SIZEOF_PTR 4
#endif
- case DISPATCH_QUEUE_PRIORITY_LOW:
- case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE:
- return &_dispatch_root_queues[
- DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY];
- case DISPATCH_QUEUE_PRIORITY_DEFAULT:
- return &_dispatch_root_queues[
- DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY];
- case DISPATCH_QUEUE_PRIORITY_HIGH:
- return &_dispatch_root_queues[
- DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY];
- }
- switch (priority) {
- case DISPATCH_QUEUE_PRIORITY_BACKGROUND:
-#if !DISPATCH_NO_BG_PRIORITY
- return &_dispatch_root_queues[
- DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY];
+#define _DISPATCH_CONTINUATION_PTRS 8
+#if DISPATCH_HW_CONFIG_UP
+// UP devices don't contend on continuations so we don't need to force them to
+// occupy a whole cacheline (which is intended to avoid contention)
+#define DISPATCH_CONTINUATION_SIZE \
+ (_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR)
+#else
+#define DISPATCH_CONTINUATION_SIZE ROUND_UP_TO_CACHELINE_SIZE( \
+ (_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR))
#endif
- case DISPATCH_QUEUE_PRIORITY_LOW:
- case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE:
- return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY];
- case DISPATCH_QUEUE_PRIORITY_DEFAULT:
- return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY];
- case DISPATCH_QUEUE_PRIORITY_HIGH:
- return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY];
- default:
- return NULL;
- }
-}
-
-// Note to later developers: ensure that any initialization changes are
-// made for statically allocated queues (i.e. _dispatch_main_q).
-static inline void
-_dispatch_queue_init(dispatch_queue_t dq)
-{
- dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS;
-
- dq->dq_running = 0;
- dq->dq_width = 1;
- dq->dq_serialnum = dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers,
- relaxed);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_queue_set_bound_thread(dispatch_queue_t dq)
-{
- //Tag thread-bound queues with the owning thread
- dispatch_assert(dq->dq_is_thread_bound);
- dq->do_finalizer = (void*)_dispatch_thread_self();
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_queue_clear_bound_thread(dispatch_queue_t dq)
-{
- dispatch_assert(dq->dq_is_thread_bound);
- dq->do_finalizer = NULL;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline pthread_t
-_dispatch_queue_get_bound_thread(dispatch_queue_t dq)
-{
- dispatch_assert(dq->dq_is_thread_bound);
- return (pthread_t)dq->do_finalizer;
-}
+#define ROUND_UP_TO_CONTINUATION_SIZE(x) \
+ (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \
+ ~(DISPATCH_CONTINUATION_SIZE - 1u))
+
+#define DISPATCH_OBJ_ASYNC_BIT 0x1
+#define DISPATCH_OBJ_BARRIER_BIT 0x2
+#define DISPATCH_OBJ_GROUP_BIT 0x4
+#define DISPATCH_OBJ_SYNC_SLOW_BIT 0x8
+#define DISPATCH_OBJ_BLOCK_RELEASE_BIT 0x10
+#define DISPATCH_OBJ_CTXT_FETCH_BIT 0x20
+#define DISPATCH_OBJ_HAS_VOUCHER_BIT 0x80
+// vtables are pointers far away from the low page in memory
+#define DISPATCH_OBJ_IS_VTABLE(x) ((unsigned long)(x)->do_vtable > 0xfful)
+
+struct dispatch_continuation_s {
+ DISPATCH_CONTINUATION_HEADER(continuation);
+};
+typedef struct dispatch_continuation_s *dispatch_continuation_t;
#ifndef DISPATCH_CONTINUATION_CACHE_LIMIT
#if TARGET_OS_EMBEDDED
_dispatch_continuation_free_to_heap(c)
#endif
-DISPATCH_ALWAYS_INLINE
-static inline dispatch_continuation_t
-_dispatch_continuation_alloc_cacheonly(void)
-{
- dispatch_continuation_t dc = (dispatch_continuation_t)
- fastpath(_dispatch_thread_getspecific(dispatch_cache_key));
- if (dc) {
- _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next);
- }
- return dc;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline dispatch_continuation_t
-_dispatch_continuation_alloc(void)
-{
- dispatch_continuation_t dc =
- fastpath(_dispatch_continuation_alloc_cacheonly());
- if(!dc) {
- return _dispatch_continuation_alloc_from_heap();
- }
- return dc;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline dispatch_continuation_t
-_dispatch_continuation_free_cacheonly(dispatch_continuation_t dc)
-{
- dispatch_continuation_t prev_dc = (dispatch_continuation_t)
- fastpath(_dispatch_thread_getspecific(dispatch_cache_key));
- int cnt = prev_dc ? prev_dc->do_ref_cnt + 1 : 1;
- // Cap continuation cache
- if (slowpath(cnt > _dispatch_continuation_cache_limit)) {
- return dc;
- }
- dc->do_next = prev_dc;
- dc->do_ref_cnt = cnt;
- _dispatch_thread_setspecific(dispatch_cache_key, dc);
- return NULL;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_continuation_free(dispatch_continuation_t dc)
-{
- dc = _dispatch_continuation_free_cacheonly(dc);
- if (slowpath(dc)) {
- _dispatch_continuation_free_to_cache_limit(dc);
- }
-}
-#endif // !(USE_OBJC && __OBJC2__)
+#pragma mark -
+#pragma mark dispatch_apply_t
+
+struct dispatch_apply_s {
+ size_t volatile da_index, da_todo;
+ size_t da_iterations, da_nested;
+ dispatch_continuation_t da_dc;
+ _dispatch_thread_semaphore_t da_sema;
+ uint32_t da_thr_cnt;
+};
+typedef struct dispatch_apply_s *dispatch_apply_t;
+
+#pragma mark -
+#pragma mark dispatch_block_t
+
+#ifdef __BLOCKS__
+
+#define DISPATCH_BLOCK_API_MASK (0x80u - 1)
+#define DISPATCH_BLOCK_HAS_VOUCHER (1u << 31)
+#define DISPATCH_BLOCK_HAS_PRIORITY (1u << 30)
+
+struct dispatch_block_private_data_s {
+ unsigned long dbpd_magic;
+ dispatch_block_flags_t dbpd_flags;
+ unsigned int volatile dbpd_atomic_flags;
+ int volatile dbpd_performed;
+ pthread_priority_t dbpd_priority;
+ voucher_t dbpd_voucher;
+ dispatch_block_t dbpd_block;
+ struct dispatch_semaphore_s dbpd_group;
+ dispatch_queue_t volatile dbpd_queue;
+ mach_port_t dbpd_thread;
+};
+typedef struct dispatch_block_private_data_s *dispatch_block_private_data_t;
+
+// dbpd_atomic_flags bits
+#define DBF_CANCELED 1u // block has been cancelled
+#define DBF_WAITING 2u // dispatch_block_wait has begun
+#define DBF_WAITED 4u // dispatch_block_wait has finished without timeout
+#define DBF_PERFORM 8u // dispatch_block_perform: don't group_leave
+
+#define DISPATCH_BLOCK_PRIVATE_DATA_MAGIC 0xD159B10C // 0xDISPatch_BLOCk
+
+#define DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, voucher, prio, block) \
+ { \
+ .dbpd_magic = DISPATCH_BLOCK_PRIVATE_DATA_MAGIC, \
+ .dbpd_flags = (flags), \
+ .dbpd_priority = (prio), \
+ .dbpd_voucher = (voucher), \
+ .dbpd_block = (block), \
+ .dbpd_group = DISPATCH_GROUP_INITIALIZER(1), \
+ }
+
+dispatch_block_t _dispatch_block_create(dispatch_block_flags_t flags,
+ voucher_t voucher, pthread_priority_t priority, dispatch_block_t block);
+void _dispatch_block_invoke(const struct dispatch_block_private_data_s *dbcpd);
+
+#endif /* __BLOCKS__ */
#endif
// semaphores are too fundamental to use the dispatch_assume*() macros
#if USE_MACH_SEM
#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
- if (slowpath(x)) { \
- DISPATCH_CRASH("flawed group/semaphore logic"); \
+ if (slowpath((x) == KERN_INVALID_NAME)) { \
+ DISPATCH_CLIENT_CRASH("Use-after-free of dispatch_semaphore_t"); \
+ } else if (slowpath(x)) { \
+ DISPATCH_CRASH("mach semaphore API failure"); \
+ } \
+ } while (0)
+#define DISPATCH_GROUP_VERIFY_KR(x) do { \
+ if (slowpath((x) == KERN_INVALID_NAME)) { \
+ DISPATCH_CLIENT_CRASH("Use-after-free of dispatch_group_t"); \
+ } else if (slowpath(x)) { \
+ DISPATCH_CRASH("mach semaphore API failure"); \
} \
} while (0)
#elif USE_POSIX_SEM
#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
if (slowpath((x) == -1)) { \
- DISPATCH_CRASH("flawed group/semaphore logic"); \
+ DISPATCH_CRASH("POSIX semaphore API failure"); \
} \
} while (0)
#endif
dispatch_semaphore_t dsema = dou._dsema;
dsema->do_next = (dispatch_semaphore_t)DISPATCH_OBJECT_LISTLESS;
- dsema->do_targetq = dispatch_get_global_queue(
- DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
+ dsema->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
+ false);
dsema->dsema_value = value;
dsema->dsema_orig = value;
#if USE_POSIX_SEM
if (!dispatch_atomic_cmpxchg(s4, 0, tmp, relaxed)) {
kr = semaphore_destroy(mach_task_self(), tmp);
+ DISPATCH_VERIFY_MIG(kr);
DISPATCH_SEMAPHORE_VERIFY_KR(kr);
}
}
kern_return_t kr;
if (dsema->dsema_port) {
kr = semaphore_destroy(mach_task_self(), dsema->dsema_port);
+ DISPATCH_VERIFY_MIG(kr);
DISPATCH_SEMAPHORE_VERIFY_KR(kr);
}
+ dsema->dsema_port = MACH_PORT_DEAD;
#elif USE_POSIX_SEM
int ret = sem_destroy(&dsema->dsema_sem);
DISPATCH_SEMAPHORE_VERIFY_RET(ret);
_dispatch_semaphore_create_port(&dsema->dsema_port);
do {
kern_return_t kr = semaphore_signal(dsema->dsema_port);
- DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+ DISPATCH_GROUP_VERIFY_KR(kr);
} while (--rval);
#elif USE_POSIX_SEM
do {
do {
next = fastpath(head->do_next);
if (!next && head != tail) {
- while (!(next = fastpath(head->do_next))) {
- dispatch_hardware_pause();
- }
+ _dispatch_wait_until(next = fastpath(head->do_next));
}
dispatch_queue_t dsn_queue = (dispatch_queue_t)head->dc_data;
dc = _dispatch_continuation_free_cacheonly(head);
} while (kr == KERN_ABORTED);
if (kr != KERN_OPERATION_TIMED_OUT) {
- DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+ DISPATCH_GROUP_VERIFY_KR(kr);
break;
}
#elif USE_POSIX_SEM
do {
kr = semaphore_wait(dsema->dsema_port);
} while (kr == KERN_ABORTED);
- DISPATCH_SEMAPHORE_VERIFY_KR(kr);
+ DISPATCH_GROUP_VERIFY_KR(kr);
#elif USE_POSIX_SEM
do {
ret = sem_wait(&dsema->dsema_sem);
} else {
_dispatch_retain(dg);
dispatch_atomic_store2o(dsema, dsema_notify_head, dsn, seq_cst);
- dispatch_atomic_barrier(seq_cst); // <rdar://problem/11750916>
+ // seq_cst with atomic store to notify_head <rdar://problem/11750916>
if (dispatch_atomic_load2o(dsema, dsema_value, seq_cst) == LONG_MAX) {
_dispatch_group_wake(dsema);
}
#elif USE_MACH_SEM
semaphore_t s4 = (semaphore_t)sema;
kern_return_t kr = semaphore_destroy(mach_task_self(), s4);
+ DISPATCH_VERIFY_MIG(kr);
DISPATCH_SEMAPHORE_VERIFY_KR(kr);
#elif USE_POSIX_SEM
sem_t s4 = (sem_t)sema;
DISPATCH_CLASS_DECL(group);
+#define DISPATCH_GROUP_INITIALIZER(s) \
+ { \
+ .do_vtable = (const void*)DISPATCH_VTABLE(group), \
+ .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \
+ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \
+ .dsema_value = LONG_MAX - (s), \
+ .dsema_orig = LONG_MAX, \
+ }
+
void _dispatch_semaphore_dispose(dispatch_object_t dou);
size_t _dispatch_semaphore_debug(dispatch_object_t dou, char *buf,
size_t bufsiz);
#define __DISPATCH_OS_SHIMS__
#include <pthread.h>
+#if HAVE_PTHREAD_QOS_H && __has_include(<pthread/qos.h>)
+#include <pthread/qos.h>
+#if __has_include(<pthread/qos_private.h>)
+#include <pthread/qos_private.h>
+#define _DISPATCH_QOS_CLASS_USER_INTERACTIVE QOS_CLASS_USER_INTERACTIVE
+#define _DISPATCH_QOS_CLASS_USER_INITIATED QOS_CLASS_USER_INITIATED
+#ifndef QOS_CLASS_LEGACY
+#define _DISPATCH_QOS_CLASS_DEFAULT QOS_CLASS_LEGACY
+#else
+#define _DISPATCH_QOS_CLASS_DEFAULT QOS_CLASS_DEFAULT
+#endif
+#define _DISPATCH_QOS_CLASS_UTILITY QOS_CLASS_UTILITY
+#define _DISPATCH_QOS_CLASS_BACKGROUND QOS_CLASS_BACKGROUND
+#define _DISPATCH_QOS_CLASS_UNSPECIFIED QOS_CLASS_UNSPECIFIED
+#else // pthread/qos_private.h
+typedef unsigned long pthread_priority_t;
+#endif // pthread/qos_private.h
+#if __has_include(<sys/qos_private.h>)
+#include <sys/qos_private.h>
+#define _DISPATCH_QOS_CLASS_MAINTENANCE QOS_CLASS_MAINTENANCE
+#else // sys/qos_private.h
+#define _DISPATCH_QOS_CLASS_MAINTENANCE 0x05
+#endif // sys/qos_private.h
+#ifndef _PTHREAD_PRIORITY_ROOTQUEUE_FLAG
+#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000
+#endif
+#ifndef _PTHREAD_PRIORITY_ENFORCE_FLAG
+#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000
+#endif
+#ifndef _PTHREAD_PRIORITY_OVERRIDE_FLAG
+#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000
+#endif
+#ifndef _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG
+#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000
+#endif
+#else // HAVE_PTHREAD_QOS_H
+typedef unsigned int qos_class_t;
+typedef unsigned long pthread_priority_t;
+#define QOS_MIN_RELATIVE_PRIORITY (-15)
+#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x00ffff00
+#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000
+#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000
+#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000
+#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000
+#endif // HAVE_PTHREAD_QOS_H
+#ifndef _DISPATCH_QOS_CLASS_USER_INTERACTIVE
+enum {
+ _DISPATCH_QOS_CLASS_USER_INTERACTIVE = 0x21,
+ _DISPATCH_QOS_CLASS_USER_INITIATED = 0x19,
+ _DISPATCH_QOS_CLASS_DEFAULT = 0x15,
+ _DISPATCH_QOS_CLASS_UTILITY = 0x11,
+ _DISPATCH_QOS_CLASS_BACKGROUND = 0x09,
+ _DISPATCH_QOS_CLASS_MAINTENANCE = 0x05,
+ _DISPATCH_QOS_CLASS_UNSPECIFIED = 0x00,
+};
+#endif // _DISPATCH_QOS_CLASS_USER_INTERACTIVE
#if HAVE_PTHREAD_WORKQUEUES
+#if __has_include(<pthread/workqueue_private.h>)
+#include <pthread/workqueue_private.h>
+#else
#include <pthread_workqueue.h>
#endif
+#ifndef WORKQ_FEATURE_MAINTENANCE
+#define WORKQ_FEATURE_MAINTENANCE 0x10
+#endif
+#endif // HAVE_PTHREAD_WORKQUEUES
+
#if HAVE_PTHREAD_NP_H
#include <pthread_np.h>
#endif
}
#endif // TARGET_OS_WIN32
+#if PTHREAD_WORKQUEUE_SPI_VERSION < 20140716
+static inline int
+_pthread_workqueue_override_start_direct(mach_port_t thread,
+ pthread_priority_t priority)
+{
+ (void)thread; (void)priority;
+ return 0;
+}
+#endif // PTHREAD_WORKQUEUE_SPI_VERSION < 20140716
+
+#if PTHREAD_WORKQUEUE_SPI_VERSION < 20140707
+static inline int
+_pthread_override_qos_class_start_direct(pthread_t thread,
+ pthread_priority_t priority)
+{
+ (void)thread; (void)priority;
+ return 0;
+}
+
+static inline int
+_pthread_override_qos_class_end_direct(mach_port_t thread)
+{
+ (void)thread;
+ return 0;
+}
+#endif // PTHREAD_WORKQUEUE_SPI_VERSION < 20140707
+
#if !HAVE_NORETURN_BUILTIN_TRAP
/*
* XXXRW: Work-around for possible clang bug in which __builtin_trap() is not
#include "shims/atomic.h"
#include "shims/atomic_sfb.h"
#include "shims/tsd.h"
+#include "shims/yield.h"
+
#include "shims/hw_config.h"
#include "shims/perfmon.h"
default: \
_dispatch_atomic_full_barrier(); break; \
} })
-// Only emulate store seq_cst -> load seq_cst
+// seq_cst: only emulate explicit store(seq_cst) -> load(seq_cst)
#define dispatch_atomic_load(p, m) \
- ({ switch(dispatch_atomic_memory_order_##m) { \
- case _dispatch_atomic_memory_order_relaxed: \
+ ({ typeof(*(p)) _r = *(p); \
+ switch(dispatch_atomic_memory_order_##m) { \
case _dispatch_atomic_memory_order_seq_cst: \
+ _dispatch_atomic_barrier(m); /* fallthrough */ \
+ case _dispatch_atomic_memory_order_relaxed: \
break; \
default: \
_dispatch_atomic_unimplemented(); break; \
- }; *(p); })
+ } _r; })
#define dispatch_atomic_store(p, v, m) \
({ switch(dispatch_atomic_memory_order_##m) { \
case _dispatch_atomic_memory_order_release: \
+ case _dispatch_atomic_memory_order_seq_cst: \
_dispatch_atomic_barrier(m); /* fallthrough */ \
case _dispatch_atomic_memory_order_relaxed: \
- case _dispatch_atomic_memory_order_seq_cst: \
*(p) = (v); break; \
- default:\
+ default: \
_dispatch_atomic_unimplemented(); break; \
} switch(dispatch_atomic_memory_order_##m) { \
case _dispatch_atomic_memory_order_seq_cst: \
({ __asm__ __volatile__( \
"mfence" \
: : : "memory"); })
+#undef dispatch_atomic_load
+#define dispatch_atomic_load(p, m) \
+ ({ switch(dispatch_atomic_memory_order_##m) { \
+ case _dispatch_atomic_memory_order_seq_cst: \
+ case _dispatch_atomic_memory_order_relaxed: \
+ break; \
+ default: \
+ _dispatch_atomic_unimplemented(); break; \
+ } *(p); })
// xchg is faster than store + mfence
#undef dispatch_atomic_store
#define dispatch_atomic_store(p, v, m) \
#pragma mark -
#pragma mark generic
-#define dispatch_hardware_pause() ({ __asm__(""); })
// assume atomic builtins provide barriers
#define dispatch_atomic_barrier(m)
// see comment in dispatch_once.c
#pragma mark -
#pragma mark x86
-#undef dispatch_hardware_pause
-#define dispatch_hardware_pause() ({ __asm__("pause"); })
-
#undef dispatch_atomic_maximally_synchronizing_barrier
#ifdef __LP64__
#define dispatch_atomic_maximally_synchronizing_barrier() \
// Returns UINT_MAX if all the bits in p were already set.
#define dispatch_atomic_set_first_bit(p,m) _dispatch_atomic_set_first_bit(p,m)
-// TODO: rdar://11477843
DISPATCH_ALWAYS_INLINE
static inline unsigned int
-_dispatch_atomic_set_first_bit(volatile uint32_t *p, unsigned int max_index)
+_dispatch_atomic_set_first_bit(volatile unsigned long *p,
+ unsigned int max_index)
{
unsigned int index;
- typeof(*p) b, mask, b_masked;
+ unsigned long b, mask, b_masked;
for (;;) {
b = *p;
// ffs returns 1 + index, or 0 if none set.
- index = (unsigned int)__builtin_ffs((int)~b);
+ index = (unsigned int)__builtin_ffsl((long)~b);
if (slowpath(index == 0)) {
return UINT_MAX;
}
#if defined(__x86_64__) || defined(__i386__)
#undef dispatch_atomic_set_first_bit
-// TODO: rdar://11477843 uint64_t -> long
DISPATCH_ALWAYS_INLINE
static inline unsigned int
-dispatch_atomic_set_first_bit(volatile uint64_t *p, unsigned int max)
+dispatch_atomic_set_first_bit(volatile unsigned long *p, unsigned int max)
{
- typeof(*p) val, bit;
+ unsigned long val, bit;
if (max > (sizeof(val) * 8)) {
__asm__ (
"1: \n\t"
/*
- * Copyright (c) 2011 Apple Inc. All rights reserved.
+ * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
*
* @APPLE_APACHE_LICENSE_HEADER_START@
*
#ifndef __DISPATCH_SHIMS_HW_CONFIG__
#define __DISPATCH_SHIMS_HW_CONFIG__
-#if defined(__APPLE__)
-#define DISPATCH_SYSCTL_LOGICAL_CPUS "hw.logicalcpu_max"
-#define DISPATCH_SYSCTL_PHYSICAL_CPUS "hw.physicalcpu_max"
-#define DISPATCH_SYSCTL_ACTIVE_CPUS "hw.activecpu"
-#elif defined(__FreeBSD__)
-#define DISPATCH_SYSCTL_LOGICAL_CPUS "kern.smp.cpus"
-#define DISPATCH_SYSCTL_PHYSICAL_CPUS "kern.smp.cpus"
-#define DISPATCH_SYSCTL_ACTIVE_CPUS "kern.smp.cpus"
+#if !TARGET_OS_WIN32
+
+typedef enum {
+ _dispatch_hw_config_logical_cpus,
+ _dispatch_hw_config_physical_cpus,
+ _dispatch_hw_config_active_cpus,
+} _dispatch_hw_config_t;
+
+#if !defined(DISPATCH_HAVE_HW_CONFIG_COMMPAGE) && \
+ defined(_COMM_PAGE_LOGICAL_CPUS) && \
+ defined(_COMM_PAGE_PHYSICAL_CPUS) && defined(_COMM_PAGE_ACTIVE_CPUS)
+#define DISPATCH_HAVE_HW_CONFIG_COMMPAGE 1
#endif
-#if !TARGET_OS_WIN32
+#if DISPATCH_HAVE_HW_CONFIG_COMMPAGE
+DISPATCH_ALWAYS_INLINE
static inline uint32_t
-_dispatch_get_logicalcpu_max(void)
+_dispatch_hw_get_config(_dispatch_hw_config_t c)
{
- uint32_t val = 1;
-#if defined(_COMM_PAGE_LOGICAL_CPUS)
- uint8_t* u8val = (uint8_t*)(uintptr_t)_COMM_PAGE_LOGICAL_CPUS;
- val = (uint32_t)*u8val;
-#elif defined(DISPATCH_SYSCTL_LOGICAL_CPUS)
- size_t valsz = sizeof(val);
- int ret = sysctlbyname(DISPATCH_SYSCTL_LOGICAL_CPUS,
- &val, &valsz, NULL, 0);
- (void)dispatch_assume_zero(ret);
- (void)dispatch_assume(valsz == sizeof(uint32_t));
-#elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN)
- int ret = (int)sysconf(_SC_NPROCESSORS_ONLN);
- val = ret < 0 ? 1 : ret;
-#else
-#warning "no supported way to query logical CPU count"
-#endif
- return val;
+ uintptr_t p;
+ switch (c) {
+ case _dispatch_hw_config_logical_cpus:
+ p = _COMM_PAGE_LOGICAL_CPUS; break;
+ case _dispatch_hw_config_physical_cpus:
+ p = _COMM_PAGE_PHYSICAL_CPUS; break;
+ case _dispatch_hw_config_active_cpus:
+ p = _COMM_PAGE_ACTIVE_CPUS; break;
+ }
+ return *(uint8_t*)p;
}
+#define dispatch_hw_config(c) \
+ _dispatch_hw_get_config(_dispatch_hw_config_##c)
+
+#define DISPATCH_HW_CONFIG()
+#define _dispatch_hw_config_init()
+
+#else // DISPATCH_HAVE_HW_CONFIG_COMMPAGE
+
+extern struct _dispatch_hw_configs_s {
+ uint32_t logical_cpus;
+ uint32_t physical_cpus;
+ uint32_t active_cpus;
+} _dispatch_hw_config;
+
+#define DISPATCH_HW_CONFIG() struct _dispatch_hw_configs_s _dispatch_hw_config
+#define dispatch_hw_config(c) (_dispatch_hw_config.c)
+
+DISPATCH_ALWAYS_INLINE
static inline uint32_t
-_dispatch_get_physicalcpu_max(void)
+_dispatch_hw_get_config(_dispatch_hw_config_t c)
{
uint32_t val = 1;
-#if defined(_COMM_PAGE_PHYSICAL_CPUS)
- uint8_t* u8val = (uint8_t*)(uintptr_t)_COMM_PAGE_PHYSICAL_CPUS;
- val = (uint32_t)*u8val;
-#elif defined(DISPATCH_SYSCTL_PHYSICAL_CPUS)
- size_t valsz = sizeof(val);
- int ret = sysctlbyname(DISPATCH_SYSCTL_LOGICAL_CPUS,
- &val, &valsz, NULL, 0);
- (void)dispatch_assume_zero(ret);
- (void)dispatch_assume(valsz == sizeof(uint32_t));
-#elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN)
- int ret = (int)sysconf(_SC_NPROCESSORS_ONLN);
- val = ret < 0 ? 1 : ret;
-#else
-#warning "no supported way to query physical CPU count"
+ const char *name = NULL;
+ int r;
+#if defined(__APPLE__)
+ switch (c) {
+ case _dispatch_hw_config_logical_cpus:
+ name = "hw.logicalcpu_max"; break;
+ case _dispatch_hw_config_physical_cpus:
+ name = "hw.physicalcpu_max"; break;
+ case _dispatch_hw_config_active_cpus:
+ name = "hw.activecpu"; break;
+ }
+#elif defined(__FreeBSD__)
+ (void)c; name = "kern.smp.cpus";
#endif
+ if (name) {
+ size_t valsz = sizeof(val);
+ r = sysctlbyname(name, &val, &valsz, NULL, 0);
+ (void)dispatch_assume_zero(r);
+ dispatch_assert(valsz == sizeof(uint32_t));
+ } else {
+#if HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN)
+ r = (int)sysconf(_SC_NPROCESSORS_ONLN);
+ if (r > 0) val = (uint32_t)r;
+#endif
+ }
return val;
}
-static inline uint32_t
-_dispatch_get_activecpu(void)
+#define dispatch_hw_config_init(c) \
+ _dispatch_hw_get_config(_dispatch_hw_config_##c)
+
+static inline void
+_dispatch_hw_config_init(void)
{
- uint32_t val = 1;
-#if defined(_COMM_PAGE_ACTIVE_CPUS)
- uint8_t* u8val = (uint8_t*)(uintptr_t)_COMM_PAGE_ACTIVE_CPUS;
- val = (uint32_t)*u8val;
-#elif defined(DISPATCH_SYSCTL_ACTIVE_CPUS)
- size_t valsz = sizeof(val);
- int ret = sysctlbyname(DISPATCH_SYSCTL_ACTIVE_CPUS,
- &val, &valsz, NULL, 0);
- (void)dispatch_assume_zero(ret);
- (void)dispatch_assume(valsz == sizeof(uint32_t));
-#elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN)
- int ret = (int)sysconf(_SC_NPROCESSORS_ONLN);
- val = ret < 0 ? 1 : ret;
-#else
-#warning "no supported way to query active CPU count"
-#endif
- return val;
+ dispatch_hw_config(logical_cpus) = dispatch_hw_config_init(logical_cpus);
+ dispatch_hw_config(physical_cpus) = dispatch_hw_config_init(physical_cpus);
+ dispatch_hw_config(active_cpus) = dispatch_hw_config_init(active_cpus);
}
+#undef dispatch_hw_config_init
+
+#endif // DISPATCH_HAVE_HW_CONFIG_COMMPAGE
+
#else // TARGET_OS_WIN32
static inline long
return bits;
}
-
static inline uint32_t
_dispatch_get_ncpus(void)
{
#ifndef __DISPATCH_SHIMS_PERFMON__
#define __DISPATCH_SHIMS_PERFMON__
-#if DISPATCH_PERF_MON
+#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION
#if defined (USE_APPLE_TSD_OPTIMIZATIONS) && defined(SIMULATE_5491082) && \
(defined(__i386__) || defined(__x86_64__))
}
#endif /* USE_APPLE_TSD_OPTIMIZATIONS */
-// C99 doesn't define flsll() or ffsll()
-#ifdef __LP64__
-#define flsll(x) flsl(x)
-#else
-static inline unsigned int
-flsll(uint64_t val)
-{
- union {
- struct {
-#ifdef __BIG_ENDIAN__
- unsigned int hi, low;
-#else
- unsigned int low, hi;
-#endif
- } words;
- uint64_t word;
- } _bucket = {
- .word = val,
- };
- if (_bucket.words.hi) {
- return fls(_bucket.words.hi) + 32;
- }
- return fls(_bucket.words.low);
-}
-#endif
-
#define _dispatch_perfmon_start() \
uint64_t start = _dispatch_absolute_time()
#define _dispatch_perfmon_end() \
#error "Please #include <dispatch/dispatch.h> instead of this file directly."
#endif
-DISPATCH_ALWAYS_INLINE_NDEBUG
-static inline void
-_dispatch_contention_usleep(unsigned int us)
-{
-#if HAVE_MACH
-#if defined(SWITCH_OPTION_DISPATCH_CONTENTION) && !(TARGET_IPHONE_SIMULATOR && \
- IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090)
- thread_switch(MACH_PORT_NULL, SWITCH_OPTION_DISPATCH_CONTENTION, us);
-#else
- thread_switch(MACH_PORT_NULL, SWITCH_OPTION_WAIT, ((us-1)/1000)+1);
-#endif
-#else
- usleep(us);
-#endif
-}
-
#if TARGET_OS_WIN32
static inline unsigned int
sleep(unsigned int seconds)
#if DISPATCH_USE_DIRECT_TSD
static const unsigned long dispatch_queue_key = __PTK_LIBDISPATCH_KEY0;
-#if DISPATCH_USE_OS_SEMAPHORE_CACHE
-static const unsigned long dispatch_sema4_key = __TSD_SEMAPHORE_CACHE;
-#else
-static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY1;
-#endif
+static const unsigned long dispatch_voucher_key = __PTK_LIBDISPATCH_KEY1;
static const unsigned long dispatch_cache_key = __PTK_LIBDISPATCH_KEY2;
static const unsigned long dispatch_io_key = __PTK_LIBDISPATCH_KEY3;
static const unsigned long dispatch_apply_key = __PTK_LIBDISPATCH_KEY4;
+static const unsigned long dispatch_defaultpriority_key =__PTK_LIBDISPATCH_KEY5;
#if DISPATCH_INTROSPECTION
-static const unsigned long dispatch_introspection_key = __PTK_LIBDISPATCH_KEY5;
+static const unsigned long dispatch_introspection_key =__PTK_LIBDISPATCH_KEY5+1;
#elif DISPATCH_PERF_MON
-static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY5;
+static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY5+1;
+#endif
+#if DISPATCH_USE_OS_SEMAPHORE_CACHE
+static const unsigned long dispatch_sema4_key = __TSD_SEMAPHORE_CACHE;
+#else
+static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY5+2;
#endif
+#ifndef __TSD_THREAD_QOS_CLASS
+#define __TSD_THREAD_QOS_CLASS 4
+#endif
+static const unsigned long dispatch_priority_key = __TSD_THREAD_QOS_CLASS;
+
DISPATCH_TSD_INLINE
static inline void
_dispatch_thread_key_create(const unsigned long *k, void (*d)(void *))
{
+ if (!*k || !d) return;
dispatch_assert_zero(pthread_key_init_np((int)*k, d));
}
#else
extern pthread_key_t dispatch_queue_key;
+extern pthread_key_t dispatch_voucher_key;
#if DISPATCH_USE_OS_SEMAPHORE_CACHE
#error "Invalid DISPATCH_USE_OS_SEMAPHORE_CACHE configuration"
#else
extern pthread_key_t dispatch_cache_key;
extern pthread_key_t dispatch_io_key;
extern pthread_key_t dispatch_apply_key;
+extern pthread_key_t dispatch_defaultpriority_key;
#if DISPATCH_INTROSPECTION
extern pthread_key_t dispatch_introspection_key;
#elif DISPATCH_PERF_MON
#endif
#endif
+#if TARGET_OS_WIN32
+#define _dispatch_thread_port() ((mach_port_t)0)
+#else
+#if DISPATCH_USE_DIRECT_TSD
+#define _dispatch_thread_port() ((mach_port_t)_dispatch_thread_getspecific(\
+ _PTHREAD_TSD_SLOT_MACH_THREAD_SELF))
+#else
+#define _dispatch_thread_port() (pthread_mach_thread_np(_dispatch_thread_self()))
+#endif
+#endif
+
DISPATCH_TSD_INLINE DISPATCH_CONST
static inline unsigned int
_dispatch_cpu_number(void)
--- /dev/null
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+/*
+ * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
+ * which are subject to change in future releases of Mac OS X. Any applications
+ * relying on these interfaces WILL break.
+ */
+
+#ifndef __DISPATCH_SHIMS_YIELD__
+#define __DISPATCH_SHIMS_YIELD__
+
+#pragma mark -
+#pragma mark _dispatch_wait_until
+
+#if DISPATCH_HW_CONFIG_UP
+#define _dispatch_wait_until(c) do { \
+ int _spins = 0; \
+ while (!(c)) { \
+ _spins++; \
+ _dispatch_preemption_yield(_spins); \
+ } } while (0)
+#elif TARGET_OS_EMBEDDED
+// <rdar://problem/15440575>
+#ifndef DISPATCH_WAIT_SPINS
+#define DISPATCH_WAIT_SPINS 1024
+#endif
+#define _dispatch_wait_until(c) do { \
+ int _spins = -(DISPATCH_WAIT_SPINS); \
+ while (!(c)) { \
+ if (slowpath(_spins++ >= 0)) { \
+ _dispatch_preemption_yield(_spins); \
+ } else { \
+ dispatch_hardware_pause(); \
+ } \
+ } } while (0)
+#else
+#define _dispatch_wait_until(c) do { \
+ while (!(c)) { \
+ dispatch_hardware_pause(); \
+ } } while (0)
+#endif
+
+#pragma mark -
+#pragma mark _dispatch_contention_wait_until
+
+#if DISPATCH_HW_CONFIG_UP
+#define _dispatch_contention_wait_until(c) false
+#else
+#ifndef DISPATCH_CONTENTION_SPINS_MAX
+#define DISPATCH_CONTENTION_SPINS_MAX (128 - 1)
+#endif
+#ifndef DISPATCH_CONTENTION_SPINS_MIN
+#define DISPATCH_CONTENTION_SPINS_MIN (32 - 1)
+#endif
+#if TARGET_OS_EMBEDDED
+#define _dispatch_contention_spins() \
+ ((DISPATCH_CONTENTION_SPINS_MIN) + ((DISPATCH_CONTENTION_SPINS_MAX) - \
+ (DISPATCH_CONTENTION_SPINS_MIN)) / 2)
+#else
+// Use randomness to prevent threads from resonating at the same
+// frequency and permanently contending. All threads sharing the same
+// seed value is safe with the FreeBSD rand_r implementation.
+#define _dispatch_contention_spins() ({ \
+ static unsigned int _seed; \
+ ((unsigned int)rand_r(&_seed) & (DISPATCH_CONTENTION_SPINS_MAX)) | \
+ (DISPATCH_CONTENTION_SPINS_MIN); })
+#endif
+#define _dispatch_contention_wait_until(c) ({ \
+ bool _out = false; \
+ unsigned int _spins = _dispatch_contention_spins(); \
+ while (_spins--) { \
+ dispatch_hardware_pause(); \
+ if ((_out = fastpath(c))) break; \
+ }; _out; })
+#endif
+
+#pragma mark -
+#pragma mark dispatch_hardware_pause
+
+#if defined(__x86_64__) || defined(__i386__)
+#define dispatch_hardware_pause() __asm__("pause")
+#elif (defined(__arm__) && defined(_ARM_ARCH_7) && defined(__thumb__)) || \
+ defined(__arm64__)
+#define dispatch_hardware_pause() __asm__("yield")
+#define dispatch_hardware_wfe() __asm__("wfe")
+#else
+#define dispatch_hardware_pause() __asm__("")
+#endif
+
+#pragma mark -
+#pragma mark _dispatch_preemption_yield
+
+#if HAVE_MACH
+#if defined(SWITCH_OPTION_OSLOCK_DEPRESS) && !(TARGET_IPHONE_SIMULATOR && \
+ IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090)
+#define DISPATCH_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_OSLOCK_DEPRESS
+#else
+#define DISPATCH_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_DEPRESS
+#endif
+#define _dispatch_preemption_yield(n) _dispatch_thread_switch(MACH_PORT_NULL, \
+ DISPATCH_YIELD_THREAD_SWITCH_OPTION, (mach_msg_timeout_t)(n))
+#else
+#define _dispatch_preemption_yield(n) pthread_yield_np()
+#endif // HAVE_MACH
+
+#pragma mark -
+#pragma mark _dispatch_contention_usleep
+
+#ifndef DISPATCH_CONTENTION_USLEEP_START
+#define DISPATCH_CONTENTION_USLEEP_START 500
+#endif
+#ifndef DISPATCH_CONTENTION_USLEEP_MAX
+#define DISPATCH_CONTENTION_USLEEP_MAX 100000
+#endif
+
+#if HAVE_MACH
+#if defined(SWITCH_OPTION_DISPATCH_CONTENTION) && !(TARGET_IPHONE_SIMULATOR && \
+ IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090)
+#define _dispatch_contention_usleep(u) _dispatch_thread_switch(MACH_PORT_NULL, \
+ SWITCH_OPTION_DISPATCH_CONTENTION, (u))
+#else
+#define _dispatch_contention_usleep(u) _dispatch_thread_switch(MACH_PORT_NULL, \
+ SWITCH_OPTION_WAIT, (((u)-1)/1000)+1)
+#endif
+#else
+#define _dispatch_contention_usleep(u) usleep((u))
+#endif // HAVE_MACH
+
+#pragma mark -
+#pragma mark _dispatch_thread_switch
+
+#if HAVE_MACH
+#define _dispatch_thread_switch(thread_name, option, option_time) \
+ thread_switch((thread_name), (option), (option_time))
+
+#endif // HAVE_MACH
+
+#endif // __DISPATCH_SHIMS_YIELD__
return (bool)(ds->ds_atomic_flags & DSF_CANCELED);
}
-
unsigned long
dispatch_source_get_mask(dispatch_source_t ds)
{
- return ds->ds_pending_data_mask;
+ unsigned long mask = ds->ds_pending_data_mask;
+ if (ds->ds_vmpressure_override) {
+ mask = NOTE_VM_PRESSURE;
+ }
+#if TARGET_IPHONE_SIMULATOR
+ else if (ds->ds_memorystatus_override) {
+ mask = NOTE_MEMORYSTATUS_PRESSURE_WARN;
+ }
+#endif
+ return mask;
}
uintptr_t
dispatch_source_get_handle(dispatch_source_t ds)
{
- return (unsigned int)ds->ds_ident_hack;
+ unsigned int handle = (unsigned int)ds->ds_ident_hack;
+#if TARGET_IPHONE_SIMULATOR
+ if (ds->ds_memorystatus_override) {
+ handle = 0;
+ }
+#endif
+ return handle;
}
unsigned long
dispatch_source_get_data(dispatch_source_t ds)
{
- return ds->ds_data;
+ unsigned long data = ds->ds_data;
+ if (ds->ds_vmpressure_override) {
+ data = NOTE_VM_PRESSURE;
+ }
+#if TARGET_IPHONE_SIMULATOR
+ else if (ds->ds_memorystatus_override) {
+ data = NOTE_MEMORYSTATUS_PRESSURE_WARN;
+ }
+#endif
+ return data;
}
void
#pragma mark -
#pragma mark dispatch_source_handler
-#ifdef __BLOCKS__
-// 6618342 Contact the team that owns the Instrument DTrace probe before
-// renaming this symbol
-static void
-_dispatch_source_set_event_handler2(void *context)
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_continuation_t
+_dispatch_source_handler_alloc(dispatch_source_t ds, void *handler, long kind,
+ bool block)
{
- dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current();
- dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE);
- dispatch_source_refs_t dr = ds->ds_refs;
-
- if (ds->ds_handler_is_block && dr->ds_handler_ctxt) {
- Block_release(dr->ds_handler_ctxt);
+ dispatch_continuation_t dc = _dispatch_continuation_alloc();
+ if (handler) {
+ dc->do_vtable = (void *)((block ? DISPATCH_OBJ_BLOCK_RELEASE_BIT :
+ DISPATCH_OBJ_CTXT_FETCH_BIT) | (kind != DS_EVENT_HANDLER ?
+ DISPATCH_OBJ_ASYNC_BIT : 0l));
+ dc->dc_priority = 0;
+ dc->dc_voucher = NULL;
+ if (block) {
+#ifdef __BLOCKS__
+ if (slowpath(_dispatch_block_has_private_data(handler))) {
+ // sources don't propagate priority by default
+ dispatch_block_flags_t flags = DISPATCH_BLOCK_NO_QOS_CLASS;
+ flags |= _dispatch_block_get_flags(handler);
+ _dispatch_continuation_priority_set(dc,
+ _dispatch_block_get_priority(handler), flags);
+ }
+ if (kind != DS_EVENT_HANDLER) {
+ dc->dc_func = _dispatch_call_block_and_release;
+ } else {
+ dc->dc_func = _dispatch_Block_invoke(handler);
+ }
+ dc->dc_ctxt = _dispatch_Block_copy(handler);
+#endif /* __BLOCKS__ */
+ } else {
+ dc->dc_func = handler;
+ dc->dc_ctxt = ds->do_ctxt;
+ }
+ _dispatch_trace_continuation_push((dispatch_queue_t)ds, dc);
+ } else {
+ dc->dc_func = NULL;
}
- dr->ds_handler_func = context ? _dispatch_Block_invoke(context) : NULL;
- dr->ds_handler_ctxt = context;
- ds->ds_handler_is_block = true;
-}
-
-void
-dispatch_source_set_event_handler(dispatch_source_t ds,
- dispatch_block_t handler)
-{
- handler = _dispatch_Block_copy(handler);
- _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler,
- _dispatch_source_set_event_handler2);
+ dc->dc_data = (void*)kind;
+ return dc;
}
-#endif /* __BLOCKS__ */
-static void
-_dispatch_source_set_event_handler_f(void *context)
+static inline void
+_dispatch_source_handler_replace(dispatch_source_refs_t dr, long kind,
+ dispatch_continuation_t dc_new)
{
- dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current();
- dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE);
- dispatch_source_refs_t dr = ds->ds_refs;
-
+ dispatch_continuation_t dc = dr->ds_handler[kind];
+ if (dc) {
#ifdef __BLOCKS__
- if (ds->ds_handler_is_block && dr->ds_handler_ctxt) {
- Block_release(dr->ds_handler_ctxt);
+ if ((long)dc->do_vtable & DISPATCH_OBJ_BLOCK_RELEASE_BIT) {
+ Block_release(dc->dc_ctxt);
+ }
+#endif /* __BLOCKS__ */
+ if (dc->dc_voucher) {
+ _voucher_release(dc->dc_voucher);
+ dc->dc_voucher = NULL;
+ }
+ _dispatch_continuation_free(dc);
}
-#endif
- dr->ds_handler_func = context;
- dr->ds_handler_ctxt = ds->do_ctxt;
- ds->ds_handler_is_block = false;
+ dr->ds_handler[kind] = dc_new;
}
-void
-dispatch_source_set_event_handler_f(dispatch_source_t ds,
- dispatch_function_t handler)
+static inline void
+_dispatch_source_handler_free(dispatch_source_refs_t dr, long kind)
{
- _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler,
- _dispatch_source_set_event_handler_f);
+ _dispatch_source_handler_replace(dr, kind, NULL);
}
-#ifdef __BLOCKS__
-// 6618342 Contact the team that owns the Instrument DTrace probe before
-// renaming this symbol
static void
-_dispatch_source_set_cancel_handler2(void *context)
+_dispatch_source_set_handler(void *context)
{
dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current();
dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE);
- dispatch_source_refs_t dr = ds->ds_refs;
-
- if (ds->ds_cancel_is_block && dr->ds_cancel_handler) {
- Block_release(dr->ds_cancel_handler);
+ dispatch_continuation_t dc = context;
+ long kind = (long)dc->dc_data;
+ dc->dc_data = 0;
+ if (!dc->dc_func) {
+ _dispatch_continuation_free(dc);
+ dc = NULL;
+ } else if ((long)dc->do_vtable & DISPATCH_OBJ_CTXT_FETCH_BIT) {
+ dc->dc_ctxt = ds->do_ctxt;
+ }
+ _dispatch_source_handler_replace(ds->ds_refs, kind, dc);
+ if (kind == DS_EVENT_HANDLER && dc && dc->dc_priority) {
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ ds->dq_priority = dc->dc_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK;
+ _dispatch_queue_set_override_priority((dispatch_queue_t)ds);
+#endif
}
- dr->ds_cancel_handler = context;
- ds->ds_cancel_is_block = true;
}
+#ifdef __BLOCKS__
void
-dispatch_source_set_cancel_handler(dispatch_source_t ds,
- dispatch_block_t handler)
+dispatch_source_set_event_handler(dispatch_source_t ds,
+ dispatch_block_t handler)
{
- handler = _dispatch_Block_copy(handler);
- _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler,
- _dispatch_source_set_cancel_handler2);
+ dispatch_continuation_t dc;
+ dc = _dispatch_source_handler_alloc(ds, handler, DS_EVENT_HANDLER, true);
+ _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc,
+ _dispatch_source_set_handler);
}
#endif /* __BLOCKS__ */
-static void
-_dispatch_source_set_cancel_handler_f(void *context)
+void
+dispatch_source_set_event_handler_f(dispatch_source_t ds,
+ dispatch_function_t handler)
{
- dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current();
- dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE);
- dispatch_source_refs_t dr = ds->ds_refs;
-
-#ifdef __BLOCKS__
- if (ds->ds_cancel_is_block && dr->ds_cancel_handler) {
- Block_release(dr->ds_cancel_handler);
- }
-#endif
- dr->ds_cancel_handler = context;
- ds->ds_cancel_is_block = false;
+ dispatch_continuation_t dc;
+ dc = _dispatch_source_handler_alloc(ds, handler, DS_EVENT_HANDLER, false);
+ _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc,
+ _dispatch_source_set_handler);
}
void
-dispatch_source_set_cancel_handler_f(dispatch_source_t ds,
- dispatch_function_t handler)
+_dispatch_source_set_event_handler_with_context_f(dispatch_source_t ds,
+ void *ctxt, dispatch_function_t handler)
{
- _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler,
- _dispatch_source_set_cancel_handler_f);
+ dispatch_continuation_t dc;
+ dc = _dispatch_source_handler_alloc(ds, handler, DS_EVENT_HANDLER, false);
+ dc->do_vtable = (void *)((long)dc->do_vtable &~DISPATCH_OBJ_CTXT_FETCH_BIT);
+ dc->dc_other = dc->dc_ctxt;
+ dc->dc_ctxt = ctxt;
+ _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc,
+ _dispatch_source_set_handler);
}
#ifdef __BLOCKS__
-static void
-_dispatch_source_set_registration_handler2(void *context)
-{
- dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current();
- dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE);
- dispatch_source_refs_t dr = ds->ds_refs;
-
- if (ds->ds_registration_is_block && dr->ds_registration_handler) {
- Block_release(dr->ds_registration_handler);
- }
- dr->ds_registration_handler = context;
- ds->ds_registration_is_block = true;
-}
-
void
-dispatch_source_set_registration_handler(dispatch_source_t ds,
- dispatch_block_t handler)
+dispatch_source_set_cancel_handler(dispatch_source_t ds,
+ dispatch_block_t handler)
{
- handler = _dispatch_Block_copy(handler);
- _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler,
- _dispatch_source_set_registration_handler2);
+ dispatch_continuation_t dc;
+ dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, true);
+ _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc,
+ _dispatch_source_set_handler);
}
#endif /* __BLOCKS__ */
-static void
-_dispatch_source_set_registration_handler_f(void *context)
+void
+dispatch_source_set_cancel_handler_f(dispatch_source_t ds,
+ dispatch_function_t handler)
{
- dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current();
- dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE);
- dispatch_source_refs_t dr = ds->ds_refs;
+ dispatch_continuation_t dc;
+ dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, false);
+ _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc,
+ _dispatch_source_set_handler);
+}
#ifdef __BLOCKS__
- if (ds->ds_registration_is_block && dr->ds_registration_handler) {
- Block_release(dr->ds_registration_handler);
- }
-#endif
- dr->ds_registration_handler = context;
- ds->ds_registration_is_block = false;
+void
+dispatch_source_set_registration_handler(dispatch_source_t ds,
+ dispatch_block_t handler)
+{
+ dispatch_continuation_t dc;
+ dc = _dispatch_source_handler_alloc(ds, handler, DS_REGISTN_HANDLER, true);
+ _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc,
+ _dispatch_source_set_handler);
}
+#endif /* __BLOCKS__ */
void
dispatch_source_set_registration_handler_f(dispatch_source_t ds,
dispatch_function_t handler)
{
- _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler,
- _dispatch_source_set_registration_handler_f);
+ dispatch_continuation_t dc;
+ dc = _dispatch_source_handler_alloc(ds, handler, DS_REGISTN_HANDLER, false);
+ _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc,
+ _dispatch_source_set_handler);
}
#pragma mark -
_dispatch_source_registration_callout(dispatch_source_t ds)
{
dispatch_source_refs_t dr = ds->ds_refs;
-
+ dispatch_continuation_t dc = dr->ds_handler[DS_REGISTN_HANDLER];
if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) {
// no registration callout if source is canceled rdar://problem/8955246
-#ifdef __BLOCKS__
- if (ds->ds_registration_is_block) {
- Block_release(dr->ds_registration_handler);
- }
- } else if (ds->ds_registration_is_block) {
- dispatch_block_t b = dr->ds_registration_handler;
- _dispatch_client_callout_block(b);
- Block_release(dr->ds_registration_handler);
-#endif
- } else {
- dispatch_function_t f = dr->ds_registration_handler;
- _dispatch_client_callout(ds->do_ctxt, f);
+ return _dispatch_source_handler_free(dr, DS_REGISTN_HANDLER);
}
- ds->ds_registration_is_block = false;
- dr->ds_registration_handler = NULL;
+ pthread_priority_t old_dp = _dispatch_set_defaultpriority(ds->dq_priority);
+ if ((long)dc->do_vtable & DISPATCH_OBJ_CTXT_FETCH_BIT) {
+ dc->dc_ctxt = ds->do_ctxt;
+ }
+ _dispatch_continuation_pop(dc);
+ dr->ds_handler[DS_REGISTN_HANDLER] = NULL;
+ _dispatch_reset_defaultpriority(old_dp);
}
static void
_dispatch_source_cancel_callout(dispatch_source_t ds)
{
dispatch_source_refs_t dr = ds->ds_refs;
-
+ dispatch_continuation_t dc = dr->ds_handler[DS_CANCEL_HANDLER];
ds->ds_pending_data_mask = 0;
ds->ds_pending_data = 0;
ds->ds_data = 0;
-
-#ifdef __BLOCKS__
- if (ds->ds_handler_is_block) {
- Block_release(dr->ds_handler_ctxt);
- ds->ds_handler_is_block = false;
- dr->ds_handler_func = NULL;
- dr->ds_handler_ctxt = NULL;
- }
- if (ds->ds_registration_is_block) {
- Block_release(dr->ds_registration_handler);
- ds->ds_registration_is_block = false;
- dr->ds_registration_handler = NULL;
- }
-#endif
-
- if (!dr->ds_cancel_handler) {
+ _dispatch_source_handler_free(dr, DS_EVENT_HANDLER);
+ _dispatch_source_handler_free(dr, DS_REGISTN_HANDLER);
+ if (!dc) {
return;
}
- if (ds->ds_cancel_is_block) {
-#ifdef __BLOCKS__
- dispatch_block_t b = dr->ds_cancel_handler;
- if (ds->ds_atomic_flags & DSF_CANCELED) {
- _dispatch_client_callout_block(b);
- }
- Block_release(dr->ds_cancel_handler);
- ds->ds_cancel_is_block = false;
-#endif
- } else {
- dispatch_function_t f = dr->ds_cancel_handler;
- if (ds->ds_atomic_flags & DSF_CANCELED) {
- _dispatch_client_callout(ds->do_ctxt, f);
- }
+ if (!(ds->ds_atomic_flags & DSF_CANCELED)) {
+ return _dispatch_source_handler_free(dr, DS_CANCEL_HANDLER);
}
- dr->ds_cancel_handler = NULL;
+ pthread_priority_t old_dp = _dispatch_set_defaultpriority(ds->dq_priority);
+ if ((long)dc->do_vtable & DISPATCH_OBJ_CTXT_FETCH_BIT) {
+ dc->dc_ctxt = ds->do_ctxt;
+ }
+ _dispatch_continuation_pop(dc);
+ dr->ds_handler[DS_CANCEL_HANDLER] = NULL;
+ _dispatch_reset_defaultpriority(old_dp);
}
static void
return;
}
dispatch_source_refs_t dr = ds->ds_refs;
+ dispatch_continuation_t dc = dr->ds_handler[DS_EVENT_HANDLER];
prev = dispatch_atomic_xchg2o(ds, ds_pending_data, 0, relaxed);
if (ds->ds_is_level) {
ds->ds_data = ~prev;
} else {
ds->ds_data = prev;
}
- if (dispatch_assume(prev) && dr->ds_handler_func) {
- _dispatch_client_callout(dr->ds_handler_ctxt, dr->ds_handler_func);
+ if (!dispatch_assume(prev) || !dc) {
+ return;
}
+ pthread_priority_t old_dp = _dispatch_set_defaultpriority(ds->dq_priority);
+ _dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dc);
+ voucher_t voucher = dc->dc_voucher ? _voucher_retain(dc->dc_voucher) : NULL;
+ _dispatch_continuation_voucher_adopt(dc); // consumes voucher reference
+ _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
+ _dispatch_introspection_queue_item_complete(dc);
+ if (voucher) dc->dc_voucher = voucher;
+ _dispatch_reset_defaultpriority(old_dp);
}
static void
}
_dispatch_source_kevent_register(ds);
ds->ds_is_installed = true;
- if (dr->ds_registration_handler) {
+ if (dr->ds_handler[DS_REGISTN_HANDLER]) {
return ds->do_targetq;
}
if (slowpath(ds->do_xref_cnt == -1)) {
} else if (slowpath(DISPATCH_OBJECT_SUSPENDED(ds))) {
// Source suspended by an item drained from the source queue.
return NULL;
- } else if (dr->ds_registration_handler) {
+ } else if (dr->ds_handler[DS_REGISTN_HANDLER]) {
// The source has been registered and the registration handler needs
// to be delivered on the target queue.
if (dq != ds->do_targetq) {
}
_dispatch_source_kevent_unregister(ds);
}
- if (dr->ds_cancel_handler || ds->ds_handler_is_block ||
- ds->ds_registration_is_block) {
+ if (dr->ds_handler[DS_EVENT_HANDLER] ||
+ dr->ds_handler[DS_CANCEL_HANDLER] ||
+ dr->ds_handler[DS_REGISTN_HANDLER]) {
if (dq != ds->do_targetq) {
return ds->do_targetq;
}
if (!ds->ds_is_installed) {
// The source needs to be installed on the manager queue.
return true;
- } else if (dr->ds_registration_handler) {
+ } else if (dr->ds_handler[DS_REGISTN_HANDLER]) {
// The registration handler needs to be delivered to the target queue.
return true;
} else if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)){
// The source needs to be uninstalled from the manager queue, or the
// cancellation handler needs to be delivered to the target queue.
// Note: cancellation assumes installation.
- if (ds->ds_dkev || dr->ds_cancel_handler
-#ifdef __BLOCKS__
- || ds->ds_handler_is_block || ds->ds_registration_is_block
-#endif
- ) {
+ if (ds->ds_dkev || dr->ds_handler[DS_EVENT_HANDLER] ||
+ dr->ds_handler[DS_CANCEL_HANDLER] ||
+ dr->ds_handler[DS_REGISTN_HANDLER]) {
return true;
}
} else if (ds->ds_pending_data) {
// The source needs to be rearmed on the manager queue.
return true;
}
- return (ds->dq_items_tail != NULL);
+ return _dispatch_queue_class_probe(ds);
}
static void
} else if (ke->data == ESRCH) {
return _dispatch_kevent_proc_exit(ke);
}
-#if DISPATCH_USE_VM_PRESSURE
- } else if (ke->filter == EVFILT_VM && ke->data == ENOTSUP) {
- // Memory pressure kevent is not supported on all platforms
- // <rdar://problem/8636227>
- return;
-#endif
-#if DISPATCH_USE_MEMORYSTATUS
- } else if (ke->filter == EVFILT_MEMORYSTATUS &&
- (ke->data == EINVAL || ke->data == ENOTSUP)) {
- // Memory status kevent is not supported on all platforms
- return;
-#endif
}
return _dispatch_kevent_error(ke);
}
#pragma mark -
#pragma mark dispatch_source_timer
-#if DISPATCH_USE_DTRACE && DISPATCH_USE_DTRACE_INTROSPECTION
+#if DISPATCH_USE_DTRACE
static dispatch_source_refs_t
_dispatch_trace_next_timer[DISPATCH_TIMER_QOS_COUNT];
#define _dispatch_trace_next_timer_set(x, q) \
// Called on the source queue
struct dispatch_set_timer_params *params = context;
dispatch_suspend(params->ds);
- dispatch_barrier_async_f(&_dispatch_mgr_q, params,
+ _dispatch_barrier_async_detached_f(&_dispatch_mgr_q, params,
_dispatch_source_set_timer3);
}
DISPATCH_KEVENT_TIMER_UDATA(tidx);
}
#endif // __LP64__
- _dispatch_timers_force_max_leeway =
- getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY");
+ if (slowpath(getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY"))) {
+ _dispatch_timers_force_max_leeway = true;
+ }
}
static inline void
if (tidx != DISPATCH_TIMER_INDEX_DISARM) {
(void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed);
}
- free(dk);
_dispatch_object_debug(ds, "%s", __func__);
+ ds->ds_dkev = NULL;
+ free(dk);
} else {
_dispatch_timers_unregister(ds, dk);
}
dispatch_timer_aggregate_t dta = _dispatch_alloc(DISPATCH_VTABLE(queue),
sizeof(struct dispatch_timer_aggregate_s));
_dispatch_queue_init((dispatch_queue_t)dta);
- dta->do_targetq = _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_HIGH,
- true);
- dta->dq_width = UINT32_MAX;
+ dta->do_targetq = _dispatch_get_root_queue(
+ _DISPATCH_QOS_CLASS_USER_INITIATED, true);
+ dta->dq_width = DISPATCH_QUEUE_WIDTH_MAX;
//FIXME: aggregates need custom vtable
//dta->dq_label = "timer-aggregate";
for (tidx = 0; tidx < DISPATCH_KEVENT_TIMER_COUNT; tidx++) {
}
dtau = _dispatch_calloc(DISPATCH_TIMER_COUNT, sizeof(*dtau));
memcpy(dtau, dta->dta_timer, sizeof(dta->dta_timer));
- dispatch_barrier_async_f((dispatch_queue_t)dta, dtau,
+ _dispatch_barrier_async_detached_f((dispatch_queue_t)dta, dtau,
_dispatch_timer_aggregate_update);
}
}
_dispatch_kq = kqueue();
#endif
if (_dispatch_kq == -1) {
- DISPATCH_CLIENT_CRASH("kqueue() create failed: "
- "probably out of file descriptors");
+ int err = errno;
+ switch (err) {
+ case EMFILE:
+ DISPATCH_CLIENT_CRASH("kqueue() failure: "
+ "process is out of file descriptors");
+ break;
+ case ENFILE:
+ DISPATCH_CLIENT_CRASH("kqueue() failure: "
+ "system is out of file descriptors");
+ break;
+ case ENOMEM:
+ DISPATCH_CLIENT_CRASH("kqueue() failure: "
+ "kernel is out of memory");
+ break;
+ default:
+ (void)dispatch_assume_zero(err);
+ DISPATCH_CRASH("kqueue() failure");
+ break;
+ }
} else if (dispatch_assume(_dispatch_kq < FD_SETSIZE)) {
// in case we fall back to select()
FD_SET(_dispatch_kq, &_dispatch_rfds);
(void)dispatch_assume_zero(kevent64(_dispatch_kq, &kev, 1, NULL, 0, 0,
NULL));
- _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q);
+ _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0);
}
static int
poll = _dispatch_mgr_select(poll);
if (!poll) continue;
}
+ poll = poll || _dispatch_queue_class_probe(&_dispatch_mgr_q);
r = kevent64(_dispatch_kq, _dispatch_kevent_enable,
_dispatch_kevent_enable ? 1 : 0, &kev, 1, 0,
poll ? &timeout_immediately : NULL);
memorystatus = dispatch_source_get_data(_dispatch_memorystatus_source);
if (memorystatus & DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL) {
_dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT;
+ _voucher_activity_heap_pressure_normal();
return;
}
_dispatch_continuation_cache_limit =
DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN;
+ _voucher_activity_heap_pressure_warn();
#endif
malloc_zone_pressure_relief(0,0);
}
_dispatch_memorystatus_source = dispatch_source_create(
DISPATCH_MEMORYSTATUS_SOURCE_TYPE, 0,
DISPATCH_MEMORYSTATUS_SOURCE_MASK,
- _dispatch_get_root_queue(0, true));
+ _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true));
dispatch_source_set_event_handler_f(_dispatch_memorystatus_source,
_dispatch_memorystatus_handler);
dispatch_resume(_dispatch_memorystatus_source);
#ifndef MACH_RCV_LARGE_IDENTITY
#define MACH_RCV_LARGE_IDENTITY 0x00000008
#endif
+#ifndef MACH_RCV_VOUCHER
+#define MACH_RCV_VOUCHER 0x00000800
+#endif
#define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_CTX
#define DISPATCH_MACH_RCV_OPTIONS ( \
MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \
MACH_RCV_TRAILER_ELEMENTS(DISPATCH_MACH_RCV_TRAILER) | \
- MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0))
+ MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)) | \
+ MACH_RCV_VOUCHER
#define DISPATCH_MACH_KEVENT_ARMED(dk) ((dk)->dk_kevent.ext[0])
static void _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr);
static void _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm,
dispatch_mach_reply_refs_t dmr, bool disconnected);
-static void _dispatch_mach_msg_recv(dispatch_mach_t dm, mach_msg_header_t *hdr,
+static void _dispatch_mach_kevent_unregister(dispatch_mach_t dm);
+static inline void _dispatch_mach_msg_set_options(dispatch_object_t dou,
+ mach_msg_option_t options);
+static void _dispatch_mach_msg_recv(dispatch_mach_t dm,
+ dispatch_mach_reply_refs_t dmr, mach_msg_header_t *hdr,
mach_msg_size_t siz);
static void _dispatch_mach_merge_kevent(dispatch_mach_t dm,
const struct kevent64_s *ke);
-static void _dispatch_mach_kevent_unregister(dispatch_mach_t dm);
+static inline mach_msg_option_t _dispatch_mach_checkin_options(void);
static const size_t _dispatch_mach_recv_msg_size =
DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE;
static const size_t dispatch_mach_trailer_size =
sizeof(dispatch_mach_trailer_t);
-static const size_t _dispatch_mach_recv_msg_buf_size = mach_vm_round_page(
- _dispatch_mach_recv_msg_size + dispatch_mach_trailer_size);
+static mach_msg_size_t _dispatch_mach_recv_msg_buf_size;
static mach_port_t _dispatch_mach_portset, _dispatch_mach_recv_portset;
static mach_port_t _dispatch_mach_notify_port;
static struct kevent64_s _dispatch_mach_recv_kevent = {
static void
_dispatch_mach_recv_msg_buf_init(void)
{
- mach_vm_size_t vm_size = _dispatch_mach_recv_msg_buf_size;
+ mach_vm_size_t vm_size = mach_vm_round_page(
+ _dispatch_mach_recv_msg_size + dispatch_mach_trailer_size);
+ _dispatch_mach_recv_msg_buf_size = (mach_msg_size_t)vm_size;
mach_vm_address_t vm_addr = vm_page_size;
kern_return_t kr;
vm_addr = vm_page_size;
}
_dispatch_mach_recv_kevent.ext[0] = (uintptr_t)vm_addr;
- _dispatch_mach_recv_kevent.ext[1] = _dispatch_mach_recv_msg_buf_size;
+ _dispatch_mach_recv_kevent.ext[1] = vm_size;
}
static inline void*
_dispatch_mach_notify_source = dispatch_source_create(
&_dispatch_source_type_mach_recv_direct,
_dispatch_mach_notify_port, 0, &_dispatch_mgr_q);
- _dispatch_mach_notify_source->ds_refs->ds_handler_func =
- (void*)_dispatch_mach_notify_source_invoke;
+ static const struct dispatch_continuation_s dc = {
+ .dc_func = (void*)_dispatch_mach_notify_source_invoke,
+ };
+ _dispatch_mach_notify_source->ds_refs->ds_handler[DS_EVENT_HANDLER] =
+ (dispatch_continuation_t)&dc;
dispatch_assert(_dispatch_mach_notify_source);
dispatch_resume(_dispatch_mach_notify_source);
}
_dispatch_mach_notify_source_invoke(hdr);
return _dispatch_kevent_mach_msg_destroy(hdr);
}
+ dispatch_mach_reply_refs_t dmr = NULL;
if (dk->dk_kevent.fflags & DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE) {
- _dispatch_mach_reply_kevent_unregister((dispatch_mach_t)ds,
- (dispatch_mach_reply_refs_t)dr, false);
+ dmr = (dispatch_mach_reply_refs_t)dr;
}
- return _dispatch_mach_msg_recv((dispatch_mach_t)ds, hdr, siz);
+ return _dispatch_mach_msg_recv((dispatch_mach_t)ds, dmr, hdr, siz);
}
DISPATCH_ALWAYS_INLINE
{
(void)_dispatch_get_mach_recv_portset();
_dispatch_debug("registering for calendar-change notification");
- kern_return_t kr = host_request_notification(mach_host_self(),
+ kern_return_t kr = host_request_notification(_dispatch_get_mach_host_port(),
HOST_NOTIFY_CALENDAR_CHANGE, _dispatch_mach_notify_port);
DISPATCH_VERIFY_MIG(kr);
(void)dispatch_assume_zero(kr);
#pragma mark dispatch_mach_t
#define DISPATCH_MACH_NEVER_CONNECTED (UINT32_MAX/2)
-#define DISPATCH_MACH_PSEUDO_RECEIVED 0x1
#define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2
#define DISPATCH_MACH_OPTIONS_MASK 0xffff
static mach_port_t _dispatch_mach_msg_get_remote_port(dispatch_object_t dou);
static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm,
mach_port_t local_port, mach_port_t remote_port);
+static dispatch_mach_msg_t _dispatch_mach_msg_create_reply_disconnected(
+ dispatch_object_t dou, dispatch_mach_reply_refs_t dmr);
static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm,
dispatch_object_t dou);
static inline mach_msg_header_t* _dispatch_mach_msg_get_msg(
dispatch_mach_msg_t dmsg);
+static void _dispatch_mach_push(dispatch_object_t dm, dispatch_object_t dou,
+ pthread_priority_t pp);
static dispatch_mach_t
_dispatch_mach_create(const char *label, dispatch_queue_t q, void *context,
dr->dm_handler_func = handler;
dr->dm_handler_ctxt = context;
dm->ds_refs = dr;
- dm->ds_handler_is_block = handler_is_block;
+ dm->dm_handler_is_block = handler_is_block;
dm->dm_refs = _dispatch_calloc(1ul,
sizeof(struct dispatch_mach_send_refs_s));
{
_dispatch_object_debug(dm, "%s", __func__);
dispatch_mach_refs_t dr = dm->ds_refs;
- if (dm->ds_handler_is_block && dr->dm_handler_ctxt) {
+ if (dm->dm_handler_is_block && dr->dm_handler_ctxt) {
Block_release(dr->dm_handler_ctxt);
}
free(dr);
if (MACH_PORT_VALID(send)) {
if (checkin) {
dispatch_retain(checkin);
+ mach_msg_option_t options = _dispatch_mach_checkin_options();
+ _dispatch_mach_msg_set_options(checkin, options);
dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin);
}
dr->dm_checkin = checkin;
_dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm,
dispatch_mach_reply_refs_t dmr, bool disconnected)
{
- dispatch_kevent_t dk = dmr->dm_dkev;
- mach_port_t local_port = (mach_port_t)dk->dk_kevent.ident;
+ dispatch_mach_msg_t dmsgr = NULL;
+ if (disconnected) {
+ dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr);
+ }
+ dispatch_kevent_t dk = dmr->dmr_dkev;
TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dmr, dr_list);
_dispatch_kevent_unregister(dk, DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE);
- TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dm_list);
+ TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list);
+ if (dmr->dmr_voucher) _voucher_release(dmr->dmr_voucher);
free(dmr);
- if (disconnected) {
- _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL);
- }
+ if (dmsgr) _dispatch_mach_push(dm, dmsgr, dmsgr->dmsg_priority);
}
DISPATCH_NOINLINE
static void
_dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply,
- void *ctxt)
+ dispatch_mach_msg_t dmsg)
{
dispatch_kevent_t dk;
dispatch_mach_reply_refs_t dmr;
dk->dk_kevent.flags |= EV_ADD|EV_ENABLE;
dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE;
dk->dk_kevent.udata = (uintptr_t)dk;
- // make reply context visible to leaks rdar://11777199
- dk->dk_kevent.ext[1] = (uintptr_t)ctxt;
TAILQ_INIT(&dk->dk_sources);
dmr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_reply_refs_s));
dmr->dr_source_wref = _dispatch_ptr2wref(dm);
- dmr->dm_dkev = dk;
+ dmr->dmr_dkev = dk;
+ if (dmsg->dmsg_voucher) {
+ dmr->dmr_voucher =_voucher_retain(dmsg->dmsg_voucher);
+ }
+ dmr->dmr_priority = dmsg->dmsg_priority;
+ // make reply context visible to leaks rdar://11777199
+ dmr->dmr_ctxt = dmsg->do_ctxt;
_dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p", reply,
- ctxt);
+ dmsg->do_ctxt);
uint32_t flags;
- bool do_resume = _dispatch_kevent_register(&dmr->dm_dkev, &flags);
- TAILQ_INSERT_TAIL(&dmr->dm_dkev->dk_sources, (dispatch_source_refs_t)dmr,
+ bool do_resume = _dispatch_kevent_register(&dmr->dmr_dkev, &flags);
+ TAILQ_INSERT_TAIL(&dmr->dmr_dkev->dk_sources, (dispatch_source_refs_t)dmr,
dr_list);
- TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dm_list);
- if (do_resume && _dispatch_kevent_resume(dmr->dm_dkev, flags, 0)) {
+ TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dmr_list);
+ if (do_resume && _dispatch_kevent_resume(dmr->dmr_dkev, flags, 0)) {
_dispatch_mach_reply_kevent_unregister(dm, dmr, true);
}
}
}
static inline void
-_dispatch_mach_push(dispatch_object_t dm, dispatch_object_t dou)
+_dispatch_mach_push(dispatch_object_t dm, dispatch_object_t dou,
+ pthread_priority_t pp)
{
- return _dispatch_queue_push(dm._dq, dou);
+ return _dispatch_queue_push(dm._dq, dou, pp);
}
static inline void
}
static void
-_dispatch_mach_msg_recv(dispatch_mach_t dm, mach_msg_header_t *hdr,
- mach_msg_size_t siz)
+_dispatch_mach_msg_recv(dispatch_mach_t dm, dispatch_mach_reply_refs_t dmr,
+ mach_msg_header_t *hdr, mach_msg_size_t siz)
{
_dispatch_debug_machport(hdr->msgh_remote_port);
_dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x",
return _dispatch_kevent_mach_msg_destroy(hdr);
}
dispatch_mach_msg_t dmsg;
+ voucher_t voucher;
+ pthread_priority_t priority;
+ void *ctxt = NULL;
+ if (dmr) {
+ _voucher_mach_msg_clear(hdr, false); // deallocate reply message voucher
+ voucher = dmr->dmr_voucher;
+ dmr->dmr_voucher = NULL; // transfer reference
+ priority = dmr->dmr_priority;
+ ctxt = dmr->dmr_ctxt;
+ _dispatch_mach_reply_kevent_unregister(dm, dmr, false);
+ } else {
+ voucher = voucher_create_with_mach_msg(hdr);
+ priority = _voucher_get_priority(voucher);
+ }
dispatch_mach_msg_destructor_t destructor;
destructor = (hdr == _dispatch_get_mach_recv_msg_buf()) ?
DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT :
DISPATCH_MACH_MSG_DESTRUCTOR_FREE;
dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL);
+ dmsg->dmsg_voucher = voucher;
+ dmsg->dmsg_priority = priority;
+ dmsg->do_ctxt = ctxt;
_dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_RECEIVED);
- return _dispatch_mach_push(dm, dmsg);
+ _dispatch_voucher_debug("mach-msg[%p] create", voucher, dmsg);
+ _dispatch_voucher_ktrace_dmsg_push(dmsg);
+ return _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority);
}
static inline mach_port_t
return remote;
}
-static inline mach_port_t
-_dispatch_mach_msg_get_reply_port(dispatch_mach_t dm, dispatch_object_t dou)
-{
- mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg);
- mach_port_t reply = MACH_PORT_NULL;
- mach_msg_option_t msg_opts = _dispatch_mach_msg_get_options(dou);
- if (msg_opts & DISPATCH_MACH_PSEUDO_RECEIVED) {
- reply = hdr->msgh_reserved;
- hdr->msgh_reserved = 0;
- } else if (MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) ==
- MACH_MSG_TYPE_MAKE_SEND_ONCE &&
- MACH_PORT_VALID(hdr->msgh_local_port) && (!dm->ds_dkev ||
- dm->ds_dkev->dk_kevent.ident != hdr->msgh_local_port)) {
- reply = hdr->msgh_local_port;
- }
- return reply;
-}
-
static inline void
_dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port,
mach_port_t remote_port)
if (local_port) hdr->msgh_local_port = local_port;
if (remote_port) hdr->msgh_remote_port = remote_port;
_dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED);
- return _dispatch_mach_push(dm, dmsg);
+ return _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority);
+}
+
+static inline dispatch_mach_msg_t
+_dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou,
+ dispatch_mach_reply_refs_t dmr)
+{
+ dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr;
+ if (dmsg && !dmsg->dmsg_reply) return NULL;
+ mach_msg_header_t *hdr;
+ dmsgr = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t),
+ DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr);
+ if (dmsg) {
+ hdr->msgh_local_port = dmsg->dmsg_reply;
+ if (dmsg->dmsg_voucher) {
+ dmsgr->dmsg_voucher = _voucher_retain(dmsg->dmsg_voucher);
+ }
+ dmsgr->dmsg_priority = dmsg->dmsg_priority;
+ dmsgr->do_ctxt = dmsg->do_ctxt;
+ } else {
+ hdr->msgh_local_port = (mach_port_t)dmr->dmr_dkev->dk_kevent.ident;
+ dmsgr->dmsg_voucher = dmr->dmr_voucher;
+ dmr->dmr_voucher = NULL; // transfer reference
+ dmsgr->dmsg_priority = dmr->dmr_priority;
+ dmsgr->do_ctxt = dmr->dmr_ctxt;
+ }
+ _dispatch_mach_msg_set_reason(dmsgr, 0, DISPATCH_MACH_DISCONNECTED);
+ return dmsgr;
}
DISPATCH_NOINLINE
static void
_dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou)
{
- mach_port_t reply = _dispatch_mach_msg_get_reply_port(dm, dou);
- _dispatch_mach_msg_set_reason(dou, 0, DISPATCH_MACH_MESSAGE_NOT_SENT);
- _dispatch_mach_push(dm, dou);
- if (reply) {
- _dispatch_mach_msg_disconnected(dm, reply, MACH_PORT_NULL);
- }
+ dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr;
+ dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL);
+ _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_NOT_SENT);
+ _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority);
+ if (dmsgr) _dispatch_mach_push(dm, dmsgr, dmsgr->dmsg_priority);
}
DISPATCH_NOINLINE
_dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou)
{
dispatch_mach_send_refs_t dr = dm->dm_refs;
- dispatch_mach_msg_t dmsg = dou._dmsg;
+ dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr = NULL;
+ voucher_t voucher = dmsg->dmsg_voucher;
+ mach_voucher_t ipc_kvoucher = MACH_VOUCHER_NULL;
+ bool clear_voucher = false, kvoucher_move_send = false;
dr->dm_needs_mgr = 0;
if (slowpath(dr->dm_checkin) && dmsg != dr->dm_checkin) {
// send initial checkin message
}
mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
mach_msg_return_t kr = 0;
- mach_port_t reply = _dispatch_mach_msg_get_reply_port(dm, dmsg);
+ mach_port_t reply = dmsg->dmsg_reply;
mach_msg_option_t opts = 0, msg_opts = _dispatch_mach_msg_get_options(dmsg);
if (!slowpath(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) {
- opts = MACH_SEND_MSG | (msg_opts & DISPATCH_MACH_OPTIONS_MASK);
+ opts = MACH_SEND_MSG | (msg_opts & ~DISPATCH_MACH_OPTIONS_MASK);
if (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) !=
MACH_MSG_TYPE_MOVE_SEND_ONCE) {
if (dmsg != dr->dm_checkin) {
}
}
opts |= MACH_SEND_TIMEOUT;
+ if (dmsg->dmsg_priority != _voucher_get_priority(voucher)) {
+ ipc_kvoucher = _voucher_create_mach_voucher_with_priority(
+ voucher, dmsg->dmsg_priority);
+ }
+ _dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg);
+ if (ipc_kvoucher) {
+ kvoucher_move_send = true;
+ clear_voucher = _voucher_mach_msg_set_mach_voucher(msg,
+ ipc_kvoucher, kvoucher_move_send);
+ } else {
+ clear_voucher = _voucher_mach_msg_set(msg, voucher);
+ }
}
+ _voucher_activity_trace_msg(voucher, msg, send);
_dispatch_debug_machport(msg->msgh_remote_port);
if (reply) _dispatch_debug_machport(reply);
kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0,
MACH_PORT_NULL);
+ _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, "
+ "opts 0x%x, msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x: "
+ "%s - 0x%x", msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt,
+ opts, msg_opts, msg->msgh_voucher_port, reply,
+ mach_error_string(kr), kr);
+ if (clear_voucher) {
+ if (kr == MACH_SEND_INVALID_VOUCHER && msg->msgh_voucher_port) {
+ DISPATCH_CRASH("Voucher port corruption");
+ }
+ mach_voucher_t kv;
+ kv = _voucher_mach_msg_clear(msg, kvoucher_move_send);
+ if (kvoucher_move_send) ipc_kvoucher = kv;
+ }
}
- _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, opts 0x%x, "
- "msg_opts 0x%x, reply on 0x%08x: %s - 0x%x", msg->msgh_remote_port,
- msg->msgh_id, dmsg->do_ctxt, opts, msg_opts, reply,
- mach_error_string(kr), kr);
if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) {
if (opts & MACH_SEND_NOTIFY) {
_dispatch_debug("machport[0x%08x]: send-possible notification "
// send kevent must be installed on the manager queue
dr->dm_needs_mgr = 1;
}
- if (reply) {
- _dispatch_mach_msg_set_options(dmsg, msg_opts |
- DISPATCH_MACH_PSEUDO_RECEIVED);
- msg->msgh_reserved = reply; // Remember the original reply port
+ if (ipc_kvoucher) {
+ _dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher);
+ voucher_t ipc_voucher;
+ ipc_voucher = _voucher_create_with_priority_and_mach_voucher(
+ voucher, dmsg->dmsg_priority, ipc_kvoucher);
+ _dispatch_voucher_debug("mach-msg[%p] replace voucher[%p]",
+ ipc_voucher, dmsg, voucher);
+ if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher);
+ dmsg->dmsg_voucher = ipc_voucher;
}
goto out;
+ } else if (ipc_kvoucher && (kr || !kvoucher_move_send)) {
+ _voucher_dealloc_mach_voucher(ipc_kvoucher);
}
- if (fastpath(!kr) && reply) {
+ if (fastpath(!kr) && reply &&
+ !(dm->ds_dkev && dm->ds_dkev->dk_kevent.ident == reply)) {
if (_dispatch_queue_get_current() != &_dispatch_mgr_q) {
// reply receive kevent must be installed on the manager queue
dr->dm_needs_mgr = 1;
_dispatch_mach_msg_set_options(dmsg, msg_opts |
DISPATCH_MACH_REGISTER_FOR_REPLY);
- if (msg_opts & DISPATCH_MACH_PSEUDO_RECEIVED) {
- msg->msgh_reserved = reply; // Remember the original reply port
- }
goto out;
}
- _dispatch_mach_reply_kevent_register(dm, reply, dmsg->do_ctxt);
+ _dispatch_mach_reply_kevent_register(dm, reply, dmsg);
}
if (slowpath(dmsg == dr->dm_checkin) && dm->dm_dkev) {
_dispatch_mach_kevent_unregister(dm);
}
- _dispatch_mach_msg_set_reason(dmsg, kr, 0);
- _dispatch_mach_push(dm, dmsg);
- dmsg = NULL;
- if (slowpath(kr) && reply) {
+ if (slowpath(kr)) {
// Send failed, so reply was never connected <rdar://problem/14309159>
- _dispatch_mach_msg_disconnected(dm, reply, MACH_PORT_NULL);
+ dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL);
}
+ _dispatch_mach_msg_set_reason(dmsg, kr, 0);
+ _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority);
+ if (dmsgr) _dispatch_mach_push(dm, dmsgr, dmsgr->dmsg_priority);
+ dmsg = NULL;
out:
return (dispatch_object_t)dmsg;
}
-static void
-_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou)
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_mach_send_push_wakeup(dispatch_mach_t dm, dispatch_object_t dou,
+ bool wakeup)
{
dispatch_mach_send_refs_t dr = dm->dm_refs;
struct dispatch_object_s *prev, *dc = dou._do;
prev = dispatch_atomic_xchg2o(dr, dm_tail, dc, release);
if (fastpath(prev)) {
prev->do_next = dc;
- return;
+ } else {
+ dr->dm_head = dc;
+ }
+ if (wakeup || !prev) {
+ _dispatch_wakeup(dm);
}
- dr->dm_head = dc;
- _dispatch_wakeup(dm);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou)
+{
+ return _dispatch_mach_send_push_wakeup(dm, dou, false);
}
DISPATCH_NOINLINE
dispatch_mach_send_refs_t dr = dm->dm_refs;
struct dispatch_object_s *dc = NULL, *next_dc = NULL;
while (dr->dm_tail) {
- while (!(dc = fastpath(dr->dm_head))) {
- dispatch_hardware_pause();
- }
+ _dispatch_wait_until(dc = fastpath(dr->dm_head));
do {
next_dc = fastpath(dc->do_next);
dr->dm_head = next_dc;
if (!next_dc && !dispatch_atomic_cmpxchg2o(dr, dm_tail, dc, NULL,
relaxed)) {
- // Enqueue is TIGHTLY controlled, we won't wait long.
- while (!(next_dc = fastpath(dc->do_next))) {
- dispatch_hardware_pause();
- }
+ _dispatch_wait_until(next_dc = fastpath(dc->do_next));
dr->dm_head = next_dc;
}
if (!DISPATCH_OBJ_IS_VTABLE(dc)) {
if ((long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) {
// send barrier
// leave send queue locked until barrier has completed
- return _dispatch_mach_push(dm, dc);
+ return _dispatch_mach_push(dm, dc,
+ ((dispatch_continuation_t)dc)->dc_priority);
}
#if DISPATCH_MACH_SEND_SYNC
if (slowpath((long)dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT)){
}
continue;
}
+ _dispatch_voucher_ktrace_dmsg_pop((dispatch_mach_msg_t)dc);
if (slowpath(dr->dm_disconnect_cnt) ||
slowpath(dm->ds_atomic_flags & DSF_CANCELED)) {
_dispatch_mach_msg_not_sent(dm, dc);
if (!next_dc &&
!dispatch_atomic_cmpxchg2o(dr, dm_tail, NULL, dc, relaxed)) {
// wait for enqueue slow path to finish
- while (!(next_dc = fastpath(dr->dm_head))) {
- dispatch_hardware_pause();
- }
+ _dispatch_wait_until(next_dc = fastpath(dr->dm_head));
dc->do_next = next_dc;
}
dr->dm_head = dc;
_dispatch_mach_send(dm);
}
+static inline mach_msg_option_t
+_dispatch_mach_checkin_options(void)
+{
+ mach_msg_option_t options = 0;
+#if DISPATCH_USE_CHECKIN_NOIMPORTANCE
+ options = MACH_SEND_NOIMPORTANCE; // <rdar://problem/16996737>
+#endif
+ return options;
+}
+
+
+static inline mach_msg_option_t
+_dispatch_mach_send_options(void)
+{
+ mach_msg_option_t options = 0;
+ return options;
+}
+
DISPATCH_NOINLINE
void
dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
}
dispatch_retain(dmsg);
dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
+ options |= _dispatch_mach_send_options();
_dispatch_mach_msg_set_options(dmsg, options & ~DISPATCH_MACH_OPTIONS_MASK);
- if (slowpath(dr->dm_tail) || slowpath(dr->dm_disconnect_cnt) ||
+ mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
+ dmsg->dmsg_reply = (MACH_MSGH_BITS_LOCAL(msg->msgh_bits) ==
+ MACH_MSG_TYPE_MAKE_SEND_ONCE &&
+ MACH_PORT_VALID(msg->msgh_local_port) ? msg->msgh_local_port :
+ MACH_PORT_NULL);
+ bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) ==
+ MACH_MSG_TYPE_MOVE_SEND_ONCE);
+ dmsg->dmsg_priority = _dispatch_priority_propagate();
+ dmsg->dmsg_voucher = _voucher_copy();
+ _dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg);
+ if ((!is_reply && slowpath(dr->dm_tail)) ||
+ slowpath(dr->dm_disconnect_cnt) ||
slowpath(dm->ds_atomic_flags & DSF_CANCELED) ||
slowpath(!dispatch_atomic_cmpxchg2o(dr, dm_sending, 0, 1,
acquire))) {
+ _dispatch_voucher_ktrace_dmsg_push(dmsg);
return _dispatch_mach_send_push(dm, dmsg);
}
if (slowpath(dmsg = _dispatch_mach_msg_send(dm, dmsg)._dmsg)) {
(void)dispatch_atomic_dec2o(dr, dm_sending, release);
- return _dispatch_mach_send_push(dm, dmsg);
+ _dispatch_voucher_ktrace_dmsg_push(dmsg);
+ return _dispatch_mach_send_push_wakeup(dm, dmsg, true);
}
- if (slowpath(dr->dm_tail)) {
+ if (!is_reply && slowpath(dr->dm_tail)) {
return _dispatch_mach_send_drain(dm);
}
(void)dispatch_atomic_dec2o(dr, dm_sending, release);
}
if (!TAILQ_EMPTY(&dm->dm_refs->dm_replies)) {
dispatch_mach_reply_refs_t dmr, tmp;
- TAILQ_FOREACH_SAFE(dmr, &dm->dm_refs->dm_replies, dm_list, tmp){
+ TAILQ_FOREACH_SAFE(dmr, &dm->dm_refs->dm_replies, dmr_list, tmp){
_dispatch_mach_reply_kevent_unregister(dm, dmr, true);
}
}
(void)dispatch_atomic_inc2o(dr, dm_disconnect_cnt, relaxed);
if (MACH_PORT_VALID(send) && checkin) {
dispatch_retain(checkin);
+ mach_msg_option_t options = _dispatch_mach_checkin_options();
+ _dispatch_mach_msg_set_options(checkin, options);
dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin);
} else {
checkin = NULL;
dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
_dispatch_thread_setspecific(dispatch_queue_key, dm->do_targetq);
+ _dispatch_voucher_ktrace_dmsg_pop(dmsg);
+ _dispatch_voucher_debug("mach-msg[%p] adopt", dmsg->dmsg_voucher, dmsg);
+ _dispatch_adopt_priority_and_replace_voucher(dmsg->dmsg_priority,
+ dmsg->dmsg_voucher, DISPATCH_PRIORITY_ENFORCE);
+ dmsg->dmsg_voucher = NULL;
if (slowpath(!dm->dm_connect_handler_called)) {
_dispatch_mach_connect_invoke(dm);
}
dc->dc_ctxt = dc;
dc->dc_data = context;
dc->dc_other = barrier;
+ _dispatch_continuation_voucher_set(dc, 0);
+ _dispatch_continuation_priority_set(dc, 0, 0);
dispatch_mach_send_refs_t dr = dm->dm_refs;
if (slowpath(dr->dm_tail) || slowpath(!dispatch_atomic_cmpxchg2o(dr,
return _dispatch_mach_send_push(dm, dc);
}
// leave send queue locked until barrier has completed
- return _dispatch_mach_push(dm, dc);
+ return _dispatch_mach_push(dm, dc, dc->dc_priority);
}
DISPATCH_NOINLINE
dc->dc_ctxt = dc;
dc->dc_data = context;
dc->dc_other = barrier;
- return _dispatch_mach_push(dm, dc);
+ _dispatch_continuation_voucher_set(dc, 0);
+ _dispatch_continuation_priority_set(dc, 0, 0);
+
+ return _dispatch_mach_push(dm, dc, dc->dc_priority);
}
DISPATCH_NOINLINE
// An item on the channel changed the target queue
return dm->do_targetq;
}
+ } else if (dr->dm_sending) {
+ // Sending and uninstallation below require the send lock, the channel
+ // will be woken up when the lock is dropped <rdar://15132939&15203957>
+ return NULL;
} else if (dr->dm_tail) {
if (slowpath(dr->dm_needs_mgr) || (slowpath(dr->dm_disconnect_cnt) &&
(dm->dm_dkev || !TAILQ_EMPTY(&dm->dm_refs->dm_replies)))) {
if (slowpath(!dm->ds_is_installed)) {
// The channel needs to be installed on the manager queue.
return true;
- } else if (dm->dq_items_tail) {
+ } else if (_dispatch_queue_class_probe(dm)) {
// The source has pending messages to deliver to the target queue.
return true;
+ } else if (dr->dm_sending) {
+ // Sending and uninstallation below require the send lock, the channel
+ // will be woken up when the lock is dropped <rdar://15132939&15203957>
+ return false;
} else if (dr->dm_tail &&
(!(dm->dm_dkev && DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev)) ||
(dm->ds_atomic_flags & DSF_CANCELED) || dr->dm_disconnect_cnt)) {
}
dispatch_mach_msg_t dmsg = _dispatch_alloc(DISPATCH_VTABLE(mach_msg),
sizeof(struct dispatch_mach_msg_s) +
- (destructor ? 0 : size - sizeof(dmsg->msg)));
+ (destructor ? 0 : size - sizeof(dmsg->dmsg_msg)));
if (destructor) {
- dmsg->msg = msg;
+ dmsg->dmsg_msg = msg;
} else if (msg) {
- memcpy(dmsg->buf, msg, size);
+ memcpy(dmsg->dmsg_buf, msg, size);
}
dmsg->do_next = DISPATCH_OBJECT_LISTLESS;
- dmsg->do_targetq = _dispatch_get_root_queue(0, false);
- dmsg->destructor = destructor;
- dmsg->size = size;
+ dmsg->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,
+ false);
+ dmsg->dmsg_destructor = destructor;
+ dmsg->dmsg_size = size;
if (msg_ptr) {
*msg_ptr = _dispatch_mach_msg_get_msg(dmsg);
}
void
_dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg)
{
- switch (dmsg->destructor) {
+ if (dmsg->dmsg_voucher) {
+ _voucher_release(dmsg->dmsg_voucher);
+ dmsg->dmsg_voucher = NULL;
+ }
+ switch (dmsg->dmsg_destructor) {
case DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT:
break;
case DISPATCH_MACH_MSG_DESTRUCTOR_FREE:
- free(dmsg->msg);
+ free(dmsg->dmsg_msg);
break;
case DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE: {
- mach_vm_size_t vm_size = dmsg->size;
- mach_vm_address_t vm_addr = (uintptr_t)dmsg->msg;
+ mach_vm_size_t vm_size = dmsg->dmsg_size;
+ mach_vm_address_t vm_addr = (uintptr_t)dmsg->dmsg_msg;
(void)dispatch_assume_zero(mach_vm_deallocate(mach_task_self(),
vm_addr, vm_size));
break;
static inline mach_msg_header_t*
_dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg)
{
- return dmsg->destructor ? dmsg->msg : (mach_msg_header_t*)dmsg->buf;
+ return dmsg->dmsg_destructor ? dmsg->dmsg_msg :
+ (mach_msg_header_t*)dmsg->dmsg_buf;
}
mach_msg_header_t*
dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg, size_t *size_ptr)
{
if (size_ptr) {
- *size_ptr = dmsg->size;
+ *size_ptr = dmsg->dmsg_size;
}
return _dispatch_mach_msg_get_msg(dmsg);
}
offset += dsnprintf(&buf[offset], bufsiz - offset, "xrefcnt = 0x%x, "
"refcnt = 0x%x, ", dmsg->do_xref_cnt + 1, dmsg->do_ref_cnt + 1);
offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, "
- "msgh[%p] = { ", dmsg->do_suspend_cnt, dmsg->buf);
+ "msgh[%p] = { ", dmsg->do_suspend_cnt, dmsg->dmsg_buf);
mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg);
if (hdr->msgh_id) {
offset += dsnprintf(&buf[offset], bufsiz - offset, "id 0x%x, ",
{
mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT
| MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX)
- | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0);
+ | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER;
mach_msg_options_t tmp_options;
mig_reply_error_t *bufTemp, *bufRequest, *bufReply;
mach_msg_return_t kr = 0;
(void)dispatch_assume_zero(r);
}
#endif
-
+ _voucher_replace(voucher_create_with_mach_msg(&bufRequest->Head));
demux_success = callback(&bufRequest->Head, &bufReply->Head);
if (!demux_success) {
{
size_t offset = 0;
offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
- dm->dq_label ? dm->dq_label : dx_kind(dm), dm);
+ dm->dq_label && !dm->dm_cancel_handler_called ? dm->dq_label :
+ dx_kind(dm), dm);
offset += _dispatch_object_debug_attr(dm, &buf[offset], bufsiz - offset);
offset += _dispatch_mach_debug_attr(dm, &buf[offset], bufsiz - offset);
offset += dsnprintf(&buf[offset], bufsiz - offset, "}");
MACH_PORT_RIGHT_DEAD_NAME, &nd));
}
if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND)) {
- (void)dispatch_assume_zero(mach_port_dnrequest_info(mach_task_self(),
- name, &dnrsiz, &dnreqs));
+ kr = mach_port_dnrequest_info(mach_task_self(), name, &dnrsiz, &dnreqs);
+ if (kr != KERN_INVALID_RIGHT) (void)dispatch_assume_zero(kr);
}
if (type & MACH_PORT_TYPE_RECEIVE) {
mach_port_status_t status = { .mps_pset = 0, };
unsigned long missed;
};
+enum {
+ DS_EVENT_HANDLER = 0,
+ DS_CANCEL_HANDLER,
+ DS_REGISTN_HANDLER,
+};
+
// Source state which may contain references to the source object
// Separately allocated so that 'leaks' can see sources <rdar://problem/9050566>
typedef struct dispatch_source_refs_s {
TAILQ_ENTRY(dispatch_source_refs_s) dr_list;
uintptr_t dr_source_wref; // "weak" backref to dispatch_source_t
- dispatch_function_t ds_handler_func;
- void *ds_handler_ctxt;
- void *ds_cancel_handler;
- void *ds_registration_handler;
+ dispatch_continuation_t ds_handler[3];
} *dispatch_source_refs_t;
typedef struct dispatch_timer_source_refs_s {
ds_is_installed:1, \
ds_needs_rearm:1, \
ds_is_timer:1, \
- ds_cancel_is_block:1, \
- ds_handler_is_block:1, \
- ds_registration_is_block:1, \
+ ds_vmpressure_override:1, \
+ ds_memorystatus_override:1, \
+ dm_handler_is_block:1, \
dm_connect_handler_called:1, \
dm_cancel_handler_called:1; \
unsigned long ds_pending_data_mask;
struct dispatch_mach_reply_refs_s {
TAILQ_ENTRY(dispatch_mach_reply_refs_s) dr_list;
uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t
- dispatch_kevent_t dm_dkev;
- TAILQ_ENTRY(dispatch_mach_reply_refs_s) dm_list;
+ dispatch_kevent_t dmr_dkev;
+ void *dmr_ctxt;
+ pthread_priority_t dmr_priority;
+ voucher_t dmr_voucher;
+ TAILQ_ENTRY(dispatch_mach_reply_refs_s) dmr_list;
};
typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t;
DISPATCH_CLASS_DECL(mach_msg);
struct dispatch_mach_msg_s {
DISPATCH_STRUCT_HEADER(mach_msg);
- dispatch_mach_msg_destructor_t destructor;
- size_t size;
+ mach_port_t dmsg_reply;
+ pthread_priority_t dmsg_priority;
+ voucher_t dmsg_voucher;
+ dispatch_mach_msg_destructor_t dmsg_destructor;
+ size_t dmsg_size;
union {
- mach_msg_header_t *msg;
- char buf[0];
+ mach_msg_header_t *dmsg_msg;
+ char dmsg_buf[0];
};
};
unsigned long _dispatch_source_probe(dispatch_source_t ds);
size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz);
void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval);
+void _dispatch_source_set_event_handler_with_context_f(dispatch_source_t ds,
+ void *ctxt, dispatch_function_t handler);
void _dispatch_mach_dispose(dispatch_mach_t dm);
void _dispatch_mach_invoke(dispatch_mach_t dm);
#if !(defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME) \
|| TARGET_OS_WIN32
-DISPATCH_CACHELINE_ALIGN _dispatch_host_time_data_s _dispatch_host_time_data;
+DISPATCH_CACHELINE_ALIGN _dispatch_host_time_data_s _dispatch_host_time_data = {
+ .ratio_1_to_1 = true,
+};
void
_dispatch_get_host_time_init(void *context DISPATCH_UNUSED)
#ifndef __DISPATCH_TRACE__
#define __DISPATCH_TRACE__
-#if DISPATCH_USE_DTRACE && !__OBJC2__
+#if !__OBJC2__
+#if DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION
typedef struct dispatch_trace_timer_params_s {
int64_t deadline, interval, leeway;
} *dispatch_trace_timer_params_t;
#include "provider.h"
+#endif // DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION
#if DISPATCH_USE_DTRACE_INTROSPECTION
-
#define _dispatch_trace_callout(_c, _f, _dcc) do { \
if (slowpath(DISPATCH_CALLOUT_ENTRY_ENABLED()) || \
slowpath(DISPATCH_CALLOUT_RETURN_ENABLED())) { \
_dcc; \
} \
} while (0)
+#elif DISPATCH_INTROSPECTION
+#define _dispatch_trace_callout(_c, _f, _dcc) \
+ do { (void)(_c); (void)(_f); _dcc; } while (0)
+#endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION
+#if DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_trace_client_callout(void *ctxt, dispatch_function_t f)
_dispatch_introspection_callout_return(ctxt, func);
}
-#ifdef __BLOCKS__
-DISPATCH_ALWAYS_INLINE
-static inline void
-_dispatch_trace_client_callout_block(dispatch_block_t b)
-{
- dispatch_function_t func = _dispatch_Block_invoke(b);
- _dispatch_introspection_callout_entry(b, func);
- _dispatch_trace_callout(b, func, _dispatch_client_callout(b, func));
- _dispatch_introspection_callout_return(b, func);
-}
-#endif
-
#define _dispatch_client_callout _dispatch_trace_client_callout
#define _dispatch_client_callout2 _dispatch_trace_client_callout2
-#define _dispatch_client_callout_block _dispatch_trace_client_callout_block
+#endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION
+#if DISPATCH_USE_DTRACE_INTROSPECTION
#define _dispatch_trace_continuation(_q, _o, _t) do { \
dispatch_queue_t _dq = (_q); \
const char *_label = _dq && _dq->dq_label ? _dq->dq_label : ""; \
struct dispatch_object_s *_do = (_o); \
+ dispatch_continuation_t _dc; \
char *_kind; \
dispatch_function_t _func; \
void *_ctxt; \
if (DISPATCH_OBJ_IS_VTABLE(_do)) { \
- _ctxt = _do->do_ctxt; \
_kind = (char*)dx_kind(_do); \
if ((dx_type(_do) & _DISPATCH_META_TYPE_MASK) == \
_DISPATCH_SOURCE_TYPE && (_dq) != &_dispatch_mgr_q) { \
- _func = ((dispatch_source_t)_do)->ds_refs->ds_handler_func; \
+ dispatch_source_t _ds = (dispatch_source_t)_do; \
+ _dc = _ds->ds_refs->ds_handler[DS_EVENT_HANDLER]; \
+ _func = _dc->dc_func; \
+ _ctxt = _dc->dc_ctxt; \
} else { \
_func = (dispatch_function_t)_dispatch_queue_invoke; \
+ _ctxt = _do->do_ctxt; \
} \
} else { \
- struct dispatch_continuation_s *_dc = (void*)(_do); \
+ _dc = (void*)_do; \
_ctxt = _dc->dc_ctxt; \
if ((long)_dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT) { \
_kind = "semaphore"; \
} \
_t(_dq, _label, _do, _kind, _func, _ctxt); \
} while (0)
-
+#elif DISPATCH_INTROSPECTION
+#define _dispatch_trace_continuation(_q, _o, _t) \
+ do { (void)(_q); (void)(_o); } while(0)
+#define DISPATCH_QUEUE_PUSH_ENABLED() 0
+#define DISPATCH_QUEUE_POP_ENABLED() 0
+#endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION
+
+#if DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head,
- dispatch_object_t _tail, unsigned int n)
+ dispatch_object_t _tail, pthread_priority_t pp, unsigned int n)
{
if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) {
struct dispatch_object_s *dou = _head._do;
} while (dou != _tail._do && (dou = dou->do_next));
}
_dispatch_introspection_queue_push_list(dq, _head, _tail);
- _dispatch_queue_push_list(dq, _head, _tail, n);
+ _dispatch_queue_push_list(dq, _head, _tail, pp, n);
}
DISPATCH_ALWAYS_INLINE
static inline void
-_dispatch_trace_queue_push(dispatch_queue_t dq, dispatch_object_t _tail)
+_dispatch_trace_queue_push(dispatch_queue_t dq, dispatch_object_t _tail, pthread_priority_t pp)
{
if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) {
struct dispatch_object_s *dou = _tail._do;
_dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH);
}
_dispatch_introspection_queue_push(dq, _tail);
- _dispatch_queue_push(dq, _tail);
+ _dispatch_queue_push(dq, _tail, pp);
}
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_trace_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail,
- bool wakeup)
+ pthread_priority_t pp, bool wakeup)
+{
+ if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) {
+ struct dispatch_object_s *dou = _tail._do;
+ _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH);
+ }
+ _dispatch_introspection_queue_push(dq, _tail);
+ _dispatch_queue_push_wakeup(dq, _tail, pp, wakeup);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_trace_continuation_push(dispatch_queue_t dq, dispatch_object_t _tail)
{
if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) {
struct dispatch_object_s *dou = _tail._do;
_dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH);
}
_dispatch_introspection_queue_push(dq, _tail);
- _dispatch_queue_push_wakeup(dq, _tail, wakeup);
}
DISPATCH_ALWAYS_INLINE
static inline void
-_dispatch_queue_push_notrace(dispatch_queue_t dq, dispatch_object_t dou)
+_dispatch_queue_push_notrace(dispatch_queue_t dq, dispatch_object_t dou, pthread_priority_t pp)
{
- _dispatch_queue_push(dq, dou);
+ _dispatch_queue_push(dq, dou, pp);
}
#define _dispatch_queue_push_list _dispatch_trace_queue_push_list
DISPATCH_ALWAYS_INLINE
static inline void
-_dispatch_trace_continuation_pop(dispatch_queue_t dq,
- dispatch_object_t dou)
+_dispatch_trace_continuation_pop(dispatch_queue_t dq, dispatch_object_t dou)
{
if (slowpath(DISPATCH_QUEUE_POP_ENABLED())) {
_dispatch_trace_continuation(dq, dou._do, DISPATCH_QUEUE_POP);
}
_dispatch_introspection_queue_pop(dq, dou);
}
+#else
+#define _dispatch_queue_push_notrace _dispatch_queue_push
+#define _dispatch_trace_continuation_push(dq, dou) \
+ do { (void)(dq); (void)(dou); } while(0)
+#define _dispatch_trace_continuation_pop(dq, dou) \
+ do { (void)(dq); (void)(dou); } while(0)
+#endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION
-#endif // DISPATCH_USE_DTRACE_INTROSPECTION
-
+#if DISPATCH_USE_DTRACE
static inline dispatch_function_t
_dispatch_trace_timer_function(dispatch_source_t ds, dispatch_source_refs_t dr)
{
- dispatch_function_t func = dr->ds_handler_func;
+ dispatch_continuation_t dc = dr->ds_handler[DS_EVENT_HANDLER];
+ dispatch_function_t func = dc ? dc->dc_func : NULL;
if (func == _dispatch_after_timer_callback &&
- !(ds->ds_atomic_flags & DSF_CANCELED)) {
- dispatch_continuation_t dc = ds->do_ctxt;
+ !(ds->ds_atomic_flags & DSF_CANCELED)) {
+ dc = ds->do_ctxt;
func = dc->dc_func != _dispatch_call_block_and_release ? dc->dc_func :
dc->dc_ctxt ? _dispatch_Block_invoke(dc->dc_ctxt) : NULL;
}
#define _dispatch_trace_timer_fire(dr, data, missed) \
do { (void)(dr); (void)(data); (void)(missed); } while(0)
-#endif // DISPATCH_USE_DTRACE && !__OBJC2__
-
-#if !DISPATCH_USE_DTRACE_INTROSPECTION
-
-#define _dispatch_queue_push_notrace _dispatch_queue_push
-#define _dispatch_trace_continuation_pop(dq, dou) \
- do { (void)(dq); (void)(dou); } while(0)
+#endif // DISPATCH_USE_DTRACE
-#endif // !DISPATCH_USE_DTRACE_INTROSPECTION
+#endif // !__OBJC2__
#endif // __DISPATCH_TRACE__
--- /dev/null
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#include "internal.h"
+
+#if VOUCHER_USE_MACH_VOUCHER
+
+#include <mach/mach_voucher.h>
+
+// <rdar://16363550>
+#ifndef VM_MEMORY_GENEALOGY
+#define VM_MEMORY_GENEALOGY 78
+#endif
+
+#ifndef VOUCHER_ATM_COLLECT_THRESHOLD
+#define VOUCHER_ATM_COLLECT_THRESHOLD 1
+#endif
+#define VATM_COLLECT_THRESHOLD_VALUE(t) (((t) - 1) * 2)
+static volatile long _voucher_atm_collect_level;
+static long _voucher_atm_collect_threshold =
+ VATM_COLLECT_THRESHOLD_VALUE(VOUCHER_ATM_COLLECT_THRESHOLD);
+static unsigned long _voucher_atm_subid_bits;
+
+typedef struct _voucher_atm_s *_voucher_atm_t;
+
+static void _voucher_activity_atfork_child(void);
+static inline mach_voucher_t _voucher_get_atm_mach_voucher(voucher_t voucher);
+static inline mach_voucher_t _voucher_activity_get_atm_mach_voucher(
+ _voucher_activity_t act);
+static inline _voucher_activity_t _voucher_activity_get(voucher_t voucher);
+static _voucher_activity_t _voucher_activity_copy_from_mach_voucher(
+ mach_voucher_t kv, voucher_activity_id_t va_id);
+static inline _voucher_activity_t _voucher_activity_retain(
+ _voucher_activity_t act);
+static inline void _voucher_activity_release(_voucher_activity_t act);
+
+#pragma mark -
+#pragma mark voucher_t
+
+#if USE_OBJC
+OS_OBJECT_OBJC_CLASS_DECL(voucher);
+#define VOUCHER_CLASS OS_OBJECT_OBJC_CLASS(voucher)
+#else
+const _os_object_class_s _voucher_class = {
+ ._os_obj_xref_dispose = (void(*)(_os_object_t))_voucher_xref_dispose,
+ ._os_obj_dispose = (void(*)(_os_object_t))_voucher_dispose,
+};
+#define VOUCHER_CLASS &_voucher_class
+#endif // USE_OBJC
+
+static const voucher_activity_trace_id_t _voucher_activity_trace_id_release =
+ (voucher_activity_trace_id_t)voucher_activity_tracepoint_type_release <<
+ _voucher_activity_trace_id_type_shift;
+static const unsigned int _voucher_max_activities = 16;
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_recipes_init(mach_voucher_attr_recipe_data_t *recipes,
+ mach_voucher_attr_content_size_t bits_size)
+{
+ static const mach_voucher_attr_recipe_data_t base_recipe = {
+ .key = MACH_VOUCHER_ATTR_KEY_ALL,
+ .command = MACH_VOUCHER_ATTR_COPY,
+ };
+ _voucher_recipes_base(recipes) = base_recipe;
+ static const mach_voucher_attr_recipe_data_t atm_recipe = {
+ .key = MACH_VOUCHER_ATTR_KEY_ATM,
+ .command = MACH_VOUCHER_ATTR_COPY,
+ };
+ _voucher_recipes_atm(recipes) = atm_recipe;
+ static const mach_voucher_attr_recipe_data_t bits_recipe = {
+ .key = MACH_VOUCHER_ATTR_KEY_USER_DATA,
+ .command = MACH_VOUCHER_ATTR_USER_DATA_STORE,
+ };
+ _voucher_recipes_bits(recipes) = bits_recipe;
+ if (!bits_size) return;
+ _voucher_recipes_bits(recipes).content_size = bits_size;
+ *_voucher_recipes_magic(recipes) = _voucher_magic_v1;
+}
+
+static inline voucher_t
+_voucher_alloc(unsigned int activities, pthread_priority_t priority,
+ mach_voucher_attr_recipe_size_t extra)
+{
+ if (activities > _voucher_max_activities) {
+ activities = _voucher_max_activities;
+ }
+ voucher_t voucher;
+ size_t voucher_size, recipes_size;
+ mach_voucher_attr_content_size_t bits_size;
+ recipes_size = (priority||activities||extra) ? _voucher_recipes_size() : 0;
+ bits_size = recipes_size ? _voucher_bits_size(activities) : 0;
+ voucher_size = sizeof(voucher_s) + recipes_size + bits_size + extra;
+ voucher = (voucher_t)_os_object_alloc_realized(VOUCHER_CLASS, voucher_size);
+#if VOUCHER_ENABLE_RECIPE_OBJECTS
+ voucher->v_recipe_extra_size = extra;
+ voucher->v_recipe_extra_offset = voucher_size - extra;
+#else
+ dispatch_assert(!extra);
+#endif
+ voucher->v_has_priority = priority ? 1 : 0;
+ voucher->v_activities = activities;
+ if (!recipes_size) return voucher;
+ _voucher_recipes_init(voucher->v_recipes, bits_size);
+ *_voucher_priority(voucher) = (_voucher_priority_t)priority;
+ _dispatch_voucher_debug("alloc", voucher);
+ return voucher;
+}
+
+#if VOUCHER_ENABLE_RECIPE_OBJECTS
+voucher_t
+voucher_create(voucher_recipe_t recipe)
+{
+ // TODO: capture current activities or current kvoucher ?
+ mach_voucher_attr_recipe_size_t extra = recipe ? recipe->vr_size : 0;
+ voucher_t voucher = _voucher_alloc(0, 0, extra);
+ if (extra) {
+ memcpy(_voucher_extra_recipes(voucher), recipe->vr_data, extra);
+ }
+ return voucher;
+}
+#endif
+
+voucher_t
+voucher_adopt(voucher_t voucher)
+{
+ return _voucher_adopt(voucher);
+}
+
+voucher_t
+voucher_copy(void)
+{
+ return _voucher_copy();
+}
+
+voucher_t
+voucher_copy_without_importance(void)
+{
+ return _voucher_copy_without_importance();
+}
+
+void
+_voucher_thread_cleanup(void *voucher)
+{
+ _voucher_swap(voucher, NULL);
+}
+
+DISPATCH_CACHELINE_ALIGN
+static TAILQ_HEAD(, voucher_s) _vouchers[VL_HASH_SIZE];
+#define _vouchers(kv) (&_vouchers[VL_HASH((kv))])
+static os_lock_handoff_s _vouchers_lock = OS_LOCK_HANDOFF_INIT;
+#define _vouchers_lock_lock() os_lock_lock(&_vouchers_lock)
+#define _vouchers_lock_unlock() os_lock_unlock(&_vouchers_lock)
+
+static voucher_t
+_voucher_find_and_retain(mach_voucher_t kv)
+{
+ voucher_t v;
+ if (!kv) return NULL;
+ _vouchers_lock_lock();
+ TAILQ_FOREACH(v, _vouchers(kv), v_list) {
+ if (v->v_ipc_kvoucher == kv) {
+ int xref_cnt = dispatch_atomic_inc2o(v, os_obj_xref_cnt, relaxed);
+ _dispatch_voucher_debug("retain -> %d", v, xref_cnt + 1);
+ if (slowpath(xref_cnt < 0)) {
+ _dispatch_voucher_debug("overrelease", v);
+ DISPATCH_CRASH("Voucher overrelease");
+ }
+ if (xref_cnt == 0) {
+ // resurrection: raced with _voucher_remove
+ (void)dispatch_atomic_inc2o(v, os_obj_ref_cnt, relaxed);
+ }
+ break;
+ }
+ }
+ _vouchers_lock_unlock();
+ return v;
+}
+
+static void
+_voucher_insert(voucher_t v)
+{
+ mach_voucher_t kv = v->v_ipc_kvoucher;
+ if (!kv) return;
+ _vouchers_lock_lock();
+ if (slowpath(_TAILQ_IS_ENQUEUED(v, v_list))) {
+ _dispatch_voucher_debug("corruption", v);
+ DISPATCH_CRASH("Voucher corruption");
+ }
+ TAILQ_INSERT_TAIL(_vouchers(kv), v, v_list);
+ _vouchers_lock_unlock();
+}
+
+static void
+_voucher_remove(voucher_t v)
+{
+ mach_voucher_t kv = v->v_ipc_kvoucher;
+ if (!_TAILQ_IS_ENQUEUED(v, v_list)) return;
+ _vouchers_lock_lock();
+ if (slowpath(!kv)) {
+ _dispatch_voucher_debug("corruption", v);
+ DISPATCH_CRASH("Voucher corruption");
+ }
+ // check for resurrection race with _voucher_find_and_retain
+ if (dispatch_atomic_load2o(v, os_obj_xref_cnt, seq_cst) < 0 &&
+ _TAILQ_IS_ENQUEUED(v, v_list)) {
+ TAILQ_REMOVE(_vouchers(kv), v, v_list);
+ _TAILQ_MARK_NOT_ENQUEUED(v, v_list);
+ v->v_list.tqe_next = (void*)~0ull;
+ }
+ _vouchers_lock_unlock();
+}
+
+void
+_voucher_dealloc_mach_voucher(mach_voucher_t kv)
+{
+ _dispatch_kvoucher_debug("dealloc", kv);
+ _dispatch_voucher_debug_machport(kv);
+ kern_return_t kr = mach_voucher_deallocate(kv);
+ DISPATCH_VERIFY_MIG(kr);
+ (void)dispatch_assume_zero(kr);
+}
+
+static inline kern_return_t
+_voucher_create_mach_voucher(const mach_voucher_attr_recipe_data_t *recipes,
+ size_t recipes_size, mach_voucher_t *kvp)
+{
+ kern_return_t kr;
+ mach_port_t mhp = _dispatch_get_mach_host_port();
+ mach_voucher_t kv = MACH_VOUCHER_NULL;
+ mach_voucher_attr_raw_recipe_array_t kvr;
+ mach_voucher_attr_recipe_size_t kvr_size;
+ kvr = (mach_voucher_attr_raw_recipe_array_t)recipes;
+ kvr_size = (mach_voucher_attr_recipe_size_t)recipes_size;
+ kr = host_create_mach_voucher(mhp, kvr, kvr_size, &kv);
+ DISPATCH_VERIFY_MIG(kr);
+ if (!kr) {
+ _dispatch_kvoucher_debug("create", kv);
+ _dispatch_voucher_debug_machport(kv);
+ }
+ *kvp = kv;
+ return kr;
+}
+
+#if __has_include(<bank/bank_types.h>) && !defined(VOUCHER_USE_ATTR_BANK)
+#include <bank/bank_types.h>
+#define VOUCHER_USE_ATTR_BANK 1
+mach_voucher_t _voucher_default_task_mach_voucher;
+#endif
+
+void
+_voucher_task_mach_voucher_init(void* ctxt DISPATCH_UNUSED)
+{
+#if VOUCHER_USE_ATTR_BANK
+ kern_return_t kr;
+ mach_voucher_t kv;
+ static const mach_voucher_attr_recipe_data_t task_create_recipe = {
+ .key = MACH_VOUCHER_ATTR_KEY_BANK,
+ .command = MACH_VOUCHER_ATTR_BANK_CREATE,
+ };
+ kr = _voucher_create_mach_voucher(&task_create_recipe,
+ sizeof(task_create_recipe), &kv);
+ if (dispatch_assume_zero(kr)) {
+ DISPATCH_CLIENT_CRASH("Could not create task mach voucher");
+ }
+ _voucher_default_task_mach_voucher = kv;
+ _voucher_task_mach_voucher = kv;
+#endif
+}
+
+void
+voucher_replace_default_voucher(void)
+{
+#if VOUCHER_USE_ATTR_BANK
+ (void)_voucher_get_task_mach_voucher(); // initalize task mach voucher
+ mach_voucher_t kv, tkv = MACH_VOUCHER_NULL;
+ voucher_t v = _voucher_get();
+ if (v && v->v_kvoucher) {
+ kern_return_t kr;
+ kv = v->v_ipc_kvoucher ? v->v_ipc_kvoucher : v->v_kvoucher;
+ const mach_voucher_attr_recipe_data_t task_copy_recipe = {
+ .key = MACH_VOUCHER_ATTR_KEY_BANK,
+ .command = MACH_VOUCHER_ATTR_COPY,
+ .previous_voucher = kv,
+ };
+ kr = _voucher_create_mach_voucher(&task_copy_recipe,
+ sizeof(task_copy_recipe), &tkv);
+ if (dispatch_assume_zero(kr)) {
+ tkv = MACH_VOUCHER_NULL;
+ }
+ }
+ if (!tkv) tkv = _voucher_default_task_mach_voucher;
+ kv = dispatch_atomic_xchg(&_voucher_task_mach_voucher, tkv, relaxed);
+ if (kv && kv != _voucher_default_task_mach_voucher) {
+ _voucher_dealloc_mach_voucher(kv);
+ }
+ _dispatch_voucher_debug("kvoucher[0x%08x] replace default voucher", v, tkv);
+#endif
+}
+
+static inline mach_voucher_t
+_voucher_get_atm_mach_voucher(voucher_t voucher)
+{
+ _voucher_activity_t act = _voucher_activity_get(voucher);
+ return _voucher_activity_get_atm_mach_voucher(act);
+}
+
+mach_voucher_t
+_voucher_get_mach_voucher(voucher_t voucher)
+{
+ if (!voucher) return MACH_VOUCHER_NULL;
+ if (voucher->v_ipc_kvoucher) return voucher->v_ipc_kvoucher;
+ mach_voucher_t kvb = voucher->v_kvoucher;
+ if (!kvb) kvb = _voucher_get_task_mach_voucher();
+ if (!voucher->v_has_priority && !voucher->v_activities &&
+ !_voucher_extra_size(voucher)) {
+ return kvb;
+ }
+ kern_return_t kr;
+ mach_voucher_t kv, kvo;
+ _voucher_base_recipe(voucher).previous_voucher = kvb;
+ _voucher_atm_recipe(voucher).previous_voucher =
+ _voucher_get_atm_mach_voucher(voucher);
+ kr = _voucher_create_mach_voucher(voucher->v_recipes,
+ _voucher_recipes_size() + _voucher_extra_size(voucher) +
+ _voucher_bits_recipe(voucher).content_size, &kv);
+ if (dispatch_assume_zero(kr) || !kv){
+ return MACH_VOUCHER_NULL;
+ }
+ if (!dispatch_atomic_cmpxchgv2o(voucher, v_ipc_kvoucher, MACH_VOUCHER_NULL,
+ kv, &kvo, relaxed)) {
+ _voucher_dealloc_mach_voucher(kv);
+ kv = kvo;
+ } else {
+ if (kv == voucher->v_kvoucher) {
+ // if v_kvoucher == v_ipc_kvoucher we keep only one reference
+ _voucher_dealloc_mach_voucher(kv);
+ }
+ _voucher_insert(voucher);
+ _dispatch_voucher_debug("kvoucher[0x%08x] create", voucher, kv);
+ }
+ return kv;
+}
+
+mach_voucher_t
+_voucher_create_mach_voucher_with_priority(voucher_t voucher,
+ pthread_priority_t priority)
+{
+ if (priority == _voucher_get_priority(voucher)) {
+ return MACH_VOUCHER_NULL; // caller will use _voucher_get_mach_voucher
+ }
+ kern_return_t kr;
+ mach_voucher_t kv, kvb = voucher ? voucher->v_kvoucher : MACH_VOUCHER_NULL;
+ if (!kvb) kvb = _voucher_get_task_mach_voucher();
+ mach_voucher_attr_recipe_data_t *recipes;
+ size_t recipes_size = _voucher_recipes_size();
+ if (voucher && (voucher->v_has_priority || voucher->v_activities ||
+ _voucher_extra_size(voucher))) {
+ recipes_size += _voucher_bits_recipe(voucher).content_size +
+ _voucher_extra_size(voucher);
+ recipes = alloca(recipes_size);
+ memcpy(recipes, voucher->v_recipes, recipes_size);
+ _voucher_recipes_atm(recipes).previous_voucher =
+ _voucher_get_atm_mach_voucher(voucher);
+ } else {
+ mach_voucher_attr_content_size_t bits_size = _voucher_bits_size(0);
+ recipes_size += bits_size;
+ recipes = alloca(recipes_size);
+ _voucher_recipes_init(recipes, bits_size);
+ }
+ _voucher_recipes_base(recipes).previous_voucher = kvb;
+ *_voucher_recipes_priority(recipes) = (_voucher_priority_t)priority;
+ kr = _voucher_create_mach_voucher(recipes, recipes_size, &kv);
+ if (dispatch_assume_zero(kr) || !kv){
+ return MACH_VOUCHER_NULL;
+ }
+ _dispatch_kvoucher_debug("create with priority from voucher[%p]", kv,
+ voucher);
+ return kv;
+}
+
+static voucher_t
+_voucher_create_with_mach_voucher(mach_voucher_t kv)
+{
+ if (!kv) return NULL;
+ kern_return_t kr;
+ mach_voucher_t rkv;
+ mach_voucher_attr_recipe_t vr;
+ size_t vr_size;
+ mach_voucher_attr_recipe_size_t kvr_size = 0;
+ const mach_voucher_attr_recipe_data_t redeem_recipe[] = {
+ [0] = {
+ .key = MACH_VOUCHER_ATTR_KEY_ALL,
+ .command = MACH_VOUCHER_ATTR_COPY,
+ .previous_voucher = kv,
+ },
+#if VOUCHER_USE_ATTR_BANK
+ [1] = {
+ .key = MACH_VOUCHER_ATTR_KEY_BANK,
+ .command = MACH_VOUCHER_ATTR_REDEEM,
+ },
+#endif
+ };
+ kr = _voucher_create_mach_voucher(redeem_recipe, sizeof(redeem_recipe),
+ &rkv);
+ if (!dispatch_assume_zero(kr)) {
+ _voucher_dealloc_mach_voucher(kv);
+ } else {
+ _dispatch_voucher_debug_machport(kv);
+ rkv = kv;
+ }
+ voucher_t v = _voucher_find_and_retain(rkv);
+ if (v) {
+ _dispatch_voucher_debug("kvoucher[0x%08x] find with 0x%08x", v, rkv,kv);
+ _voucher_dealloc_mach_voucher(rkv);
+ return v;
+ }
+ vr_size = sizeof(*vr) + _voucher_bits_size(_voucher_max_activities);
+ vr = alloca(vr_size);
+ if (rkv) {
+ kvr_size = (mach_voucher_attr_recipe_size_t)vr_size;
+ kr = mach_voucher_extract_attr_recipe(rkv,
+ MACH_VOUCHER_ATTR_KEY_USER_DATA, (void*)vr, &kvr_size);
+ DISPATCH_VERIFY_MIG(kr);
+ if (dispatch_assume_zero(kr)) kvr_size = 0;
+ }
+ mach_voucher_attr_content_size_t content_size = vr->content_size;
+ uint8_t *content = vr->content;
+ bool valid = false, has_priority = false;
+ unsigned int activities = 0;
+ if (kvr_size >= sizeof(*vr) + sizeof(_voucher_magic_t)) {
+ valid = (*(_voucher_magic_t*)content == _voucher_magic_v1);
+ content += sizeof(_voucher_magic_t);
+ content_size -= sizeof(_voucher_magic_t);
+ }
+ if (valid) {
+ has_priority = (content_size >= sizeof(_voucher_priority_t));
+ activities = has_priority ? (content_size - sizeof(_voucher_priority_t))
+ / sizeof(voucher_activity_id_t) : 0;
+ }
+ pthread_priority_t priority = 0;
+ if (has_priority) {
+ priority = (pthread_priority_t)*(_voucher_priority_t*)content;
+ content += sizeof(_voucher_priority_t);
+ content_size -= sizeof(_voucher_priority_t);
+ }
+ voucher_activity_id_t va_id = 0, va_base_id = 0;
+ _voucher_activity_t act = NULL;
+ if (activities) {
+ va_id = *(voucher_activity_id_t*)content;
+ act = _voucher_activity_copy_from_mach_voucher(rkv, va_id);
+ if (!act && _voucher_activity_default) {
+ activities++;
+ // default to _voucher_activity_default base activity
+ va_base_id = _voucher_activity_default->va_id;
+ } else if (act && act->va_id != va_id) {
+ activities++;
+ va_base_id = act->va_id;
+ }
+ }
+ v = _voucher_alloc(activities, priority, 0);
+ v->v_activity = act;
+ voucher_activity_id_t *activity_ids = _voucher_activity_ids(v);
+ if (activities && va_base_id) {
+ *activity_ids++ = va_base_id;
+ activities--;
+ }
+ if (activities) {
+ memcpy(activity_ids, content, content_size);
+ }
+ v->v_ipc_kvoucher = v->v_kvoucher = rkv;
+ _voucher_insert(v);
+ _dispatch_voucher_debug("kvoucher[0x%08x] create with 0x%08x", v, rkv, kv);
+ return v;
+}
+
+voucher_t
+_voucher_create_with_priority_and_mach_voucher(voucher_t ov,
+ pthread_priority_t priority, mach_voucher_t kv)
+{
+ if (priority == _voucher_get_priority(ov)) {
+ if (kv) _voucher_dealloc_mach_voucher(kv);
+ return ov ? _voucher_retain(ov) : NULL;
+ }
+ voucher_t v = _voucher_find_and_retain(kv);
+ if (v) {
+ _dispatch_voucher_debug("kvoucher[0x%08x] find", v, kv);
+ _voucher_dealloc_mach_voucher(kv);
+ return v;
+ }
+ unsigned int activities = ov ? ov->v_activities : 0;
+ mach_voucher_attr_recipe_size_t extra = ov ? _voucher_extra_size(ov) : 0;
+ v = _voucher_alloc(activities, priority, extra);
+ if (extra) {
+ memcpy(_voucher_extra_recipes(v), _voucher_extra_recipes(ov), extra);
+ }
+ if (activities) {
+ if (ov->v_activity) {
+ v->v_activity = _voucher_activity_retain(ov->v_activity);
+ }
+ memcpy(_voucher_activity_ids(v), _voucher_activity_ids(ov),
+ activities * sizeof(voucher_activity_id_t));
+ }
+ if (kv) {
+ v->v_ipc_kvoucher = v->v_kvoucher = kv;
+ _voucher_insert(v);
+ _dispatch_voucher_debug("kvoucher[0x%08x] create with priority from "
+ "voucher[%p]", v, kv, ov);
+ _dispatch_voucher_debug_machport(kv);
+ } else if (ov && ov->v_kvoucher) {
+ voucher_t kvb = ov->v_kvbase ? ov->v_kvbase : ov;
+ v->v_kvbase = _voucher_retain(kvb);
+ v->v_kvoucher = kvb->v_kvoucher;
+ }
+ return v;
+}
+
+voucher_t
+_voucher_create_without_importance(voucher_t ov)
+{
+ // Nothing to do unless the old voucher has a kernel voucher. If it
+ // doesn't, it can't have any importance, now or in the future.
+ if (!ov) return NULL;
+ // TODO: 17487167: track presence of importance attribute
+ if (!ov->v_kvoucher) return _voucher_retain(ov);
+ kern_return_t kr;
+ mach_voucher_t kv, okv;
+ // Copy kernel voucher, removing importance.
+ okv = ov->v_ipc_kvoucher ? ov->v_ipc_kvoucher : ov->v_kvoucher;
+ const mach_voucher_attr_recipe_data_t importance_remove_recipe[] = {
+ [0] = {
+ .key = MACH_VOUCHER_ATTR_KEY_ALL,
+ .command = MACH_VOUCHER_ATTR_COPY,
+ .previous_voucher = okv,
+ },
+ [1] = {
+ .key = MACH_VOUCHER_ATTR_KEY_IMPORTANCE,
+ .command = MACH_VOUCHER_ATTR_REMOVE,
+ },
+ };
+ kr = _voucher_create_mach_voucher(importance_remove_recipe,
+ sizeof(importance_remove_recipe), &kv);
+ if (dispatch_assume_zero(kr) || !kv){
+ if (ov->v_ipc_kvoucher) return NULL;
+ kv = MACH_VOUCHER_NULL;
+ }
+ if (kv == okv) {
+ _voucher_dealloc_mach_voucher(kv);
+ return _voucher_retain(ov);
+ }
+ voucher_t v = _voucher_find_and_retain(kv);
+ if (v && ov->v_ipc_kvoucher) {
+ _dispatch_voucher_debug("kvoucher[0x%08x] find without importance "
+ "from voucher[%p]", v, kv, ov);
+ _voucher_dealloc_mach_voucher(kv);
+ return v;
+ }
+ voucher_t kvbase = v;
+ // Copy userspace contents
+ unsigned int activities = ov->v_activities;
+ pthread_priority_t priority = _voucher_get_priority(ov);
+ mach_voucher_attr_recipe_size_t extra = _voucher_extra_size(ov);
+ v = _voucher_alloc(activities, priority, extra);
+ if (extra) {
+ memcpy(_voucher_extra_recipes(v), _voucher_extra_recipes(ov), extra);
+ }
+ if (activities) {
+ if (ov->v_activity) {
+ v->v_activity = _voucher_activity_retain(ov->v_activity);
+ }
+ memcpy(_voucher_activity_ids(v), _voucher_activity_ids(ov),
+ activities * sizeof(voucher_activity_id_t));
+ }
+ v->v_kvoucher = kv;
+ if (ov->v_ipc_kvoucher) {
+ v->v_ipc_kvoucher = kv;
+ _voucher_insert(v);
+ } else if (kvbase) {
+ v->v_kvbase = kvbase;
+ _voucher_dealloc_mach_voucher(kv); // borrow base reference
+ }
+ if (!kvbase) {
+ _dispatch_voucher_debug("kvoucher[0x%08x] create without importance "
+ "from voucher[%p]", v, kv, ov);
+ }
+ return v;
+}
+
+voucher_t
+voucher_create_with_mach_msg(mach_msg_header_t *msg)
+{
+ voucher_t v = _voucher_create_with_mach_voucher(_voucher_mach_msg_get(msg));
+ _voucher_activity_trace_msg(v, msg, receive);
+ return v;
+}
+
+#ifndef MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL
+#define MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL 2
+#endif
+
+void
+voucher_decrement_importance_count4CF(voucher_t v)
+{
+ if (!v || !v->v_kvoucher) return;
+ // TODO: 17487167: track presence of importance attribute
+ kern_return_t kr;
+ mach_voucher_t kv = v->v_ipc_kvoucher ? v->v_ipc_kvoucher : v->v_kvoucher;
+ uint32_t dec = 1;
+ mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&dec;
+ mach_voucher_attr_content_size_t kvc_in_size = sizeof(dec);
+ mach_voucher_attr_content_t kvc_out = NULL;
+ mach_voucher_attr_content_size_t kvc_out_size = 0;
+#if DISPATCH_DEBUG
+ uint32_t count = UINT32_MAX;
+ kvc_out = (mach_voucher_attr_content_t)&count;
+ kvc_out_size = sizeof(count);
+#endif
+ kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_IMPORTANCE,
+ MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL, kvc_in, kvc_in_size,
+ kvc_out, &kvc_out_size);
+ DISPATCH_VERIFY_MIG(kr);
+#if DISPATCH_DEBUG
+ _dispatch_voucher_debug("kvoucher[0x%08x] decrement importance count to %u:"
+ " %s - 0x%x", v, kv, count, mach_error_string(kr), kr);
+#endif
+ if (kr != KERN_INVALID_ARGUMENT &&
+ dispatch_assume_zero(kr) == KERN_FAILURE) {
+ // TODO: 17487167: skip KERN_INVALID_ARGUMENT check
+ DISPATCH_CLIENT_CRASH("Voucher importance count underflow");
+ }
+}
+
+#if VOUCHER_ENABLE_GET_MACH_VOUCHER
+mach_voucher_t
+voucher_get_mach_voucher(voucher_t voucher)
+{
+ return _voucher_get_mach_voucher(voucher);
+}
+#endif
+
+void
+_voucher_xref_dispose(voucher_t voucher)
+{
+ _dispatch_voucher_debug("xref_dispose", voucher);
+ _voucher_remove(voucher);
+ return _os_object_release_internal_inline((_os_object_t)voucher);
+}
+
+void
+_voucher_dispose(voucher_t voucher)
+{
+ _dispatch_voucher_debug("dispose", voucher);
+ if (slowpath(_TAILQ_IS_ENQUEUED(voucher, v_list))) {
+ _dispatch_voucher_debug("corruption", voucher);
+ DISPATCH_CRASH("Voucher corruption");
+ }
+ voucher->v_list.tqe_next = DISPATCH_OBJECT_LISTLESS;
+ if (voucher->v_ipc_kvoucher) {
+ if (voucher->v_ipc_kvoucher != voucher->v_kvoucher) {
+ _voucher_dealloc_mach_voucher(voucher->v_ipc_kvoucher);
+ }
+ voucher->v_ipc_kvoucher = MACH_VOUCHER_NULL;
+ }
+ if (voucher->v_kvoucher) {
+ if (!voucher->v_kvbase) {
+ _voucher_dealloc_mach_voucher(voucher->v_kvoucher);
+ }
+ voucher->v_kvoucher = MACH_VOUCHER_NULL;
+ }
+ if (voucher->v_kvbase) {
+ _voucher_release(voucher->v_kvbase);
+ voucher->v_kvbase = NULL;
+ }
+ if (voucher->v_activity) {
+ _voucher_activity_release(voucher->v_activity);
+ voucher->v_activity = NULL;
+ }
+ voucher->v_has_priority= 0;
+ voucher->v_activities = 0;
+#if VOUCHER_ENABLE_RECIPE_OBJECTS
+ voucher->v_recipe_extra_size = 0;
+ voucher->v_recipe_extra_offset = 0;
+#endif
+ return _os_object_dealloc((_os_object_t)voucher);
+}
+
+void
+_voucher_atfork_child(void)
+{
+ _voucher_activity_atfork_child();
+ _dispatch_thread_setspecific(dispatch_voucher_key, NULL);
+ _voucher_task_mach_voucher_pred = 0;
+ _voucher_task_mach_voucher = MACH_VOUCHER_NULL;
+
+ // TODO: voucher/activity inheritance on fork ?
+}
+
+#pragma mark -
+#pragma mark _voucher_init
+
+boolean_t
+voucher_mach_msg_set(mach_msg_header_t *msg)
+{
+ voucher_t v = _voucher_get();
+ bool clear_voucher = _voucher_mach_msg_set(msg, v);
+ if (clear_voucher) _voucher_activity_trace_msg(v, msg, send);
+ return clear_voucher;
+}
+
+void
+voucher_mach_msg_clear(mach_msg_header_t *msg)
+{
+ (void)_voucher_mach_msg_clear(msg, false);
+}
+
+voucher_mach_msg_state_t
+voucher_mach_msg_adopt(mach_msg_header_t *msg)
+{
+ mach_voucher_t kv = _voucher_mach_msg_get(msg);
+ if (!kv) return VOUCHER_MACH_MSG_STATE_UNCHANGED;
+ voucher_t v = _voucher_create_with_mach_voucher(kv);
+ _voucher_activity_trace_msg(v, msg, receive);
+ return (voucher_mach_msg_state_t)_voucher_adopt(v);
+}
+
+void
+voucher_mach_msg_revert(voucher_mach_msg_state_t state)
+{
+ if (state == VOUCHER_MACH_MSG_STATE_UNCHANGED) return;
+ _voucher_replace((voucher_t)state);
+}
+
+#if DISPATCH_USE_LIBKERNEL_VOUCHER_INIT
+#include <_libkernel_init.h>
+
+static const struct _libkernel_voucher_functions _voucher_libkernel_functions =
+{
+ .version = 1,
+ .voucher_mach_msg_set = voucher_mach_msg_set,
+ .voucher_mach_msg_clear = voucher_mach_msg_clear,
+ .voucher_mach_msg_adopt = voucher_mach_msg_adopt,
+ .voucher_mach_msg_revert = voucher_mach_msg_revert,
+};
+
+static void
+_voucher_libkernel_init(void)
+{
+ kern_return_t kr = __libkernel_voucher_init(&_voucher_libkernel_functions);
+ dispatch_assert(!kr);
+}
+#else
+#define _voucher_libkernel_init()
+#endif
+
+void
+_voucher_init(void)
+{
+ _voucher_libkernel_init();
+ char *e, *end;
+ unsigned int i;
+ for (i = 0; i < VL_HASH_SIZE; i++) {
+ TAILQ_INIT(&_vouchers[i]);
+ }
+ voucher_activity_mode_t mode;
+ mode = DISPATCH_DEBUG ? voucher_activity_mode_debug
+ : voucher_activity_mode_release;
+ e = getenv("OS_ACTIVITY_MODE");
+ if (e) {
+ if (strcmp(e, "release") == 0) {
+ mode = voucher_activity_mode_release;
+ } else if (strcmp(e, "debug") == 0) {
+ mode = voucher_activity_mode_debug;
+ } else if (strcmp(e, "stream") == 0) {
+ mode = voucher_activity_mode_stream;
+ } else if (strcmp(e, "disable") == 0) {
+ mode = voucher_activity_mode_disable;
+ }
+ }
+ _voucher_activity_mode = mode;
+ if (_voucher_activity_disabled()) return;
+
+ e = getenv("LIBDISPATCH_ACTIVITY_ATM_SUBID_BITS");
+ if (e) {
+ unsigned long v = strtoul(e, &end, 0);
+ if (v && !*end) {
+ _voucher_atm_subid_bits = v;
+ }
+ }
+ e = getenv("LIBDISPATCH_ACTIVITY_ATM_COLLECT_THRESHOLD");
+ if (e) {
+ unsigned long v = strtoul(e, &end, 0);
+ if (v && v < LONG_MAX/2 && !*end) {
+ _voucher_atm_collect_threshold =
+ VATM_COLLECT_THRESHOLD_VALUE((long)v);
+ }
+ }
+ // default task activity
+ bool default_task_activity = DISPATCH_DEBUG;
+ e = getenv("LIBDISPATCH_DEFAULT_TASK_ACTIVITY");
+ if (e) default_task_activity = atoi(e);
+ if (default_task_activity) {
+ (void)voucher_activity_start(_voucher_activity_trace_id_release, 0);
+ }
+}
+
+#pragma mark -
+#pragma mark _voucher_activity_lock_s
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_activity_lock_init(_voucher_activity_lock_s *lock) {
+ static const os_lock_handoff_s _os_lock_handoff_init = OS_LOCK_HANDOFF_INIT;
+ *lock = _os_lock_handoff_init;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_activity_lock_lock(_voucher_activity_lock_s *lock) {
+ return os_lock_lock(lock);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_activity_lock_unlock(_voucher_activity_lock_s *lock) {
+ return os_lock_unlock(lock);
+}
+
+#pragma mark -
+#pragma mark _voucher_activity_heap
+
+#if __has_extension(c_static_assert)
+_Static_assert(sizeof(struct _voucher_activity_tracepoint_s) == 64,
+ "Tracepoint too large");
+_Static_assert(sizeof(struct _voucher_activity_buffer_header_s) <=
+ sizeof(struct _voucher_activity_tracepoint_s),
+ "Buffer header too large");
+_Static_assert(offsetof(struct _voucher_activity_s, va_flags2) ==
+ sizeof(struct _voucher_activity_tracepoint_s),
+ "Extended activity object misaligned");
+#if __LP64__
+_Static_assert(sizeof(struct _voucher_activity_s) ==
+ 3 * sizeof(struct _voucher_activity_tracepoint_s),
+ "Activity object too large");
+_Static_assert(offsetof(struct _voucher_activity_s, va_flags3) ==
+ 2 * sizeof(struct _voucher_activity_tracepoint_s),
+ "Extended activity object misaligned");
+_Static_assert(offsetof(struct _voucher_atm_s, vatm_activities_lock) % 64 == 0,
+ "Bad ATM padding");
+_Static_assert(sizeof(struct _voucher_atm_s) <= 128,
+ "ATM too large");
+#else
+_Static_assert(sizeof(struct _voucher_activity_s) ==
+ 2 * sizeof(struct _voucher_activity_tracepoint_s),
+ "Activity object too large");
+_Static_assert(sizeof(struct _voucher_atm_s) <= 64,
+ "ATM too large");
+#endif
+_Static_assert(sizeof(_voucher_activity_buffer_t) ==
+ sizeof(struct {char x[_voucher_activity_buffer_size];}),
+ "Buffer too large");
+_Static_assert(sizeof(struct _voucher_activity_metadata_s) <=
+ sizeof(struct _voucher_activity_metadata_opaque_s),
+ "Metadata too large");
+_Static_assert(sizeof(_voucher_activity_bitmap_t) % 64 == 0,
+ "Bad metadata bitmap size");
+_Static_assert(offsetof(struct _voucher_activity_metadata_s,
+ vam_atm_mbox_bitmap) % 64 == 0,
+ "Bad metadata padding");
+_Static_assert(offsetof(struct _voucher_activity_metadata_s,
+ vam_base_atm_subid) % 64 == 0,
+ "Bad metadata padding");
+_Static_assert(offsetof(struct _voucher_activity_metadata_s, vam_base_atm_lock)
+ % 32 == 0,
+ "Bad metadata padding");
+_Static_assert(offsetof(struct _voucher_activity_metadata_s, vam_atms) % 64 ==0,
+ "Bad metadata padding");
+_Static_assert(sizeof(_voucher_activity_bitmap_t) * 8 *
+ sizeof(atm_mailbox_offset_t) <=
+ sizeof(((_voucher_activity_metadata_t)NULL)->vam_kernel_metadata),
+ "Bad kernel metadata bitmap");
+_Static_assert(sizeof(atm_mailbox_offset_t) == 2 * sizeof(atm_subaid32_t),
+ "Bad kernel ATM mailbox sizes");
+#endif
+
+static const size_t _voucher_atm_mailboxes =
+ sizeof(((_voucher_activity_metadata_t)NULL)->vam_kernel_metadata) /
+ sizeof(atm_mailbox_offset_t);
+
+#define va_buffers_lock(va) (&(va)->va_buffers_lock)
+#define vatm_activities_lock(vatm) (&(vatm)->vatm_activities_lock)
+#define vatm_activities(vatm) (&(vatm)->vatm_activities)
+#define vatm_used_activities(vatm) (&(vatm)->vatm_used_activities)
+#define vam_base_atm_lock() (&_voucher_activity_heap->vam_base_atm_lock)
+#define vam_nested_atm_lock() (&_voucher_activity_heap->vam_nested_atm_lock)
+#define vam_atms_lock() (&_voucher_activity_heap->vam_atms_lock)
+#define vam_activities_lock() (&_voucher_activity_heap->vam_activities_lock)
+#define vam_atms(hash) (&_voucher_activity_heap->vam_atms[hash])
+#define vam_activities(hash) (&_voucher_activity_heap->vam_activities[hash])
+#define vam_buffer_bitmap() (_voucher_activity_heap->vam_buffer_bitmap)
+#define vam_atm_mbox_bitmap() (_voucher_activity_heap->vam_atm_mbox_bitmap)
+#define vam_pressure_locked_bitmap() \
+ (_voucher_activity_heap->vam_pressure_locked_bitmap)
+#define vam_buffer(i) ((void*)((char*)_voucher_activity_heap + \
+ (i) * _voucher_activity_buffer_size))
+
+static _voucher_activity_t _voucher_activity_create_with_atm(
+ _voucher_atm_t vatm, voucher_activity_id_t va_id,
+ voucher_activity_trace_id_t trace_id, uint64_t location,
+ _voucher_activity_buffer_header_t buffer);
+static _voucher_atm_t _voucher_atm_create(mach_voucher_t kv, atm_aid_t atm_id);
+static voucher_activity_id_t _voucher_atm_nested_atm_id_make(void);
+
+DISPATCH_ALWAYS_INLINE
+static inline uint32_t
+_voucher_default_activity_buffer_limit()
+{
+ switch (_voucher_activity_mode) {
+ case voucher_activity_mode_debug:
+ case voucher_activity_mode_stream:
+ // High-profile modes: Default activity can use 1/32nd of the heap
+ // (twice as much as non-default activities)
+ return MAX(_voucher_activity_buffers_per_heap / 32, 3) - 1;
+ }
+#if TARGET_OS_EMBEDDED
+ // Low-profile modes: Default activity can use a total of 3 buffers.
+ return 2;
+#else
+ // Low-profile modes: Default activity can use a total of 8 buffers.
+ return 7;
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline uint32_t
+_voucher_activity_buffer_limit()
+{
+ switch (_voucher_activity_mode) {
+ case voucher_activity_mode_debug:
+ case voucher_activity_mode_stream:
+ // High-profile modes: 64 activities, each of which can use 1/64th
+ // of the entire heap.
+ return MAX(_voucher_activity_buffers_per_heap / 64, 2) - 1;
+ }
+#if TARGET_OS_EMBEDDED
+ // Low-profile modes: Each activity can use a total of 2 buffers.
+ return 1;
+#else
+ // Low-profile modes: Each activity can use a total of 4 buffers.
+ return 3;
+#endif
+}
+
+// The two functions above return the number of *additional* buffers activities
+// may allocate, hence the gymnastics with - 1.
+
+DISPATCH_ALWAYS_INLINE
+static inline uint32_t
+_voucher_heap_buffer_limit()
+{
+ switch (_voucher_activity_mode) {
+ case voucher_activity_mode_debug:
+ case voucher_activity_mode_stream:
+ // High-profile modes: Use it all.
+ return _voucher_activity_buffers_per_heap;
+ }
+#if TARGET_OS_EMBEDDED
+ // Low-profile modes: 3 activities, each of which can use 2Â buffers;
+ // plus the default activity, which can use 3; plus 3 buffers of overhead.
+ return 12;
+#else
+ // Low-profile modes: 13 activities, each of which can use 4 buffers;
+ // plus the default activity, which can use 8; plus 3 buffers of overhead.
+ return 64;
+#endif
+}
+
+#define NO_BITS_WERE_UNSET (UINT_MAX)
+
+DISPATCH_ALWAYS_INLINE
+static inline size_t
+_voucher_activity_bitmap_set_first_unset_bit_upto(
+ _voucher_activity_bitmap_t volatile bitmap,
+ unsigned int max_index)
+{
+ dispatch_assert(max_index != 0);
+ unsigned int index = NO_BITS_WERE_UNSET, max_map, max_bit, i;
+ max_map = max_index / _voucher_activity_bits_per_bitmap_base_t;
+ max_map = MIN(max_map, _voucher_activity_bitmaps_per_heap - 1);
+ max_bit = max_index % _voucher_activity_bits_per_bitmap_base_t;
+ for (i = 0; i < max_map; i++) {
+ index = dispatch_atomic_set_first_bit(&bitmap[i], UINT_MAX);
+ if (fastpath(index < NO_BITS_WERE_UNSET)) {
+ return index + i * _voucher_activity_bits_per_bitmap_base_t;
+ }
+ }
+ index = dispatch_atomic_set_first_bit(&bitmap[i], max_bit);
+ if (fastpath(index < NO_BITS_WERE_UNSET)) {
+ return index + i * _voucher_activity_bits_per_bitmap_base_t;
+ }
+ return index;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline size_t
+_voucher_activity_bitmap_set_first_unset_bit(
+ _voucher_activity_bitmap_t volatile bitmap)
+{
+ return _voucher_activity_bitmap_set_first_unset_bit_upto(bitmap, UINT_MAX);
+}
+
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_activity_bitmap_clear_bit(
+ _voucher_activity_bitmap_t volatile bitmap, size_t index)
+{
+ size_t i = index / _voucher_activity_bits_per_bitmap_base_t;
+ _voucher_activity_bitmap_base_t mask = ((typeof(mask))1) <<
+ (index % _voucher_activity_bits_per_bitmap_base_t);
+ if (slowpath((bitmap[i] & mask) == 0)) {
+ DISPATCH_CRASH("Corruption: failed to clear bit exclusively");
+ }
+ (void)dispatch_atomic_and(&bitmap[i], ~mask, release);
+}
+
+_voucher_activity_metadata_t _voucher_activity_heap;
+static dispatch_once_t _voucher_activity_heap_pred;
+
+static void
+_voucher_activity_heap_init(void *ctxt DISPATCH_UNUSED)
+{
+ if (_voucher_activity_disabled()) return;
+ kern_return_t kr;
+ mach_vm_size_t vm_size = _voucher_activity_buffer_size *
+ _voucher_activity_buffers_per_heap;
+ mach_vm_address_t vm_addr = vm_page_size;
+ while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size,
+ 0, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_GENEALOGY),
+ MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
+ VM_INHERIT_NONE))) {
+ if (kr != KERN_NO_SPACE) {
+ (void)dispatch_assume_zero(kr);
+ _voucher_activity_mode = voucher_activity_mode_disable;
+ return;
+ }
+ _dispatch_temporary_resource_shortage();
+ vm_addr = vm_page_size;
+ }
+ _voucher_activity_metadata_t heap;
+ task_trace_memory_info_data_t trace_memory_info = {
+ .user_memory_address = vm_addr,
+ .buffer_size = vm_size,
+ .mailbox_array_size = sizeof(heap->vam_kernel_metadata),
+ };
+ kr = task_set_info(mach_task_self(), TASK_TRACE_MEMORY_INFO,
+ (task_info_t)&trace_memory_info, TASK_TRACE_MEMORY_INFO_COUNT);
+ DISPATCH_VERIFY_MIG(kr);
+ if (kr) {
+ if (kr != KERN_NOT_SUPPORTED) (void)dispatch_assume_zero(kr);
+ kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size);
+ (void)dispatch_assume_zero(kr);
+ _voucher_activity_mode = voucher_activity_mode_disable;
+ return;
+ }
+ heap = (void*)vm_addr;
+ heap->vam_self_metadata.vasm_baseaddr = (void*)vm_addr;
+ heap->vam_buffer_bitmap[0] = 0xf; // first four buffers are reserved
+ uint32_t i;
+ for (i = 0; i < _voucher_activity_hash_size; i++) {
+ TAILQ_INIT(&heap->vam_activities[i]);
+ TAILQ_INIT(&heap->vam_atms[i]);
+ }
+ uint32_t subid_max = VATM_SUBID_MAX;
+ if (_voucher_atm_subid_bits &&
+ _voucher_atm_subid_bits < VATM_SUBID_MAXBITS) {
+ subid_max = MIN(VATM_SUBID_BITS2MAX(_voucher_atm_subid_bits),
+ VATM_SUBID_MAX);
+ }
+ heap->vam_base_atm_subid_max = subid_max;
+ _voucher_activity_lock_init(&heap->vam_base_atm_lock);
+ _voucher_activity_lock_init(&heap->vam_nested_atm_lock);
+ _voucher_activity_lock_init(&heap->vam_atms_lock);
+ _voucher_activity_lock_init(&heap->vam_activities_lock);
+ _voucher_activity_heap = heap;
+
+ _voucher_atm_t vatm = _voucher_atm_create(0, 0);
+ dispatch_assert(vatm->vatm_kvoucher);
+ heap->vam_default_activity_atm = vatm;
+ _voucher_activity_buffer_header_t buffer = vam_buffer(3); // reserved index
+ // consumes vatm reference:
+ _voucher_activity_t va = _voucher_activity_create_with_atm(vatm,
+ VATM_ACTID(vatm, _voucher_default_activity_subid), 0, 0, buffer);
+ dispatch_assert(va);
+ va->va_buffer_limit = _voucher_default_activity_buffer_limit();
+ _voucher_activity_default = va;
+ heap->vam_base_atm = _voucher_atm_create(0, 0);
+ heap->vam_nested_atm_id = _voucher_atm_nested_atm_id_make();
+}
+
+static void
+_voucher_activity_atfork_child(void)
+{
+ _voucher_activity_heap_pred = 0;
+ _voucher_activity_heap = NULL; // activity heap is VM_INHERIT_NONE
+ _voucher_activity_default = NULL;
+}
+
+void*
+voucher_activity_get_metadata_buffer(size_t *length)
+{
+ dispatch_once_f(&_voucher_activity_heap_pred, NULL,
+ _voucher_activity_heap_init);
+ if (_voucher_activity_disabled()) {
+ *length = 0;
+ return NULL;
+ }
+ *length = sizeof(_voucher_activity_heap->vam_client_metadata);
+ return _voucher_activity_heap->vam_client_metadata;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline _voucher_activity_buffer_header_t
+_voucher_activity_heap_buffer_alloc(void)
+{
+ _voucher_activity_buffer_header_t buffer = NULL;
+ size_t index;
+ index = _voucher_activity_bitmap_set_first_unset_bit_upto(
+ vam_buffer_bitmap(), _voucher_heap_buffer_limit() - 1);
+ if (index < NO_BITS_WERE_UNSET) {
+ buffer = vam_buffer(index);
+ }
+#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG
+ _dispatch_debug("activity heap alloc %zd (%p)", index, buffer);
+#endif
+ return buffer;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_activity_heap_buffer_free(_voucher_activity_buffer_header_t buffer)
+{
+ buffer->vabh_flags = _voucher_activity_trace_flag_buffer_empty;
+ size_t index = (size_t)((char*)buffer - (char*)_voucher_activity_heap) /
+ _voucher_activity_buffer_size;
+#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG
+ _dispatch_debug("activity heap free %zd (%p)", index, buffer);
+#endif
+ _voucher_activity_bitmap_clear_bit(vam_buffer_bitmap(), index);
+}
+
+#define _voucher_activity_heap_can_madvise() \
+ (PAGE_SIZE == _voucher_activity_buffer_size) // <rdar://17445544>
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_activity_heap_madvise(size_t bitmap_num, unsigned int start,
+ unsigned int len)
+{
+ size_t base = bitmap_num * _voucher_activity_bits_per_bitmap_base_t;
+#if DISPATCH_DEBUG
+#if DISPATCH_VOUCHER_ACTIVITY_DEBUG
+ _dispatch_debug("activity heap madvise %zd (%p) -> %zd (%p)", base + start,
+ vam_buffer(base + start), base + start + len,
+ vam_buffer(base + start + len));
+#endif
+ dispatch_assert(!(len * _voucher_activity_buffer_size % vm_page_size));
+ const uint64_t pattern = 0xFACEFACEFACEFACE;
+ _voucher_activity_buffer_header_t buffer = vam_buffer(base + start);
+ for (unsigned int i = 0; i < len; i++, buffer++) {
+ memset_pattern8((char*)buffer + sizeof(buffer->vabh_flags), &pattern,
+ _voucher_activity_buffer_size - sizeof(buffer->vabh_flags));
+ }
+#endif
+ (void)dispatch_assume_zero(madvise(vam_buffer(base + start),
+ len * _voucher_activity_buffer_size, MADV_FREE));
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_activity_heap_madvise_contiguous(size_t bitmap_num,
+ _voucher_activity_bitmap_base_t bits)
+{
+ // TODO: x86 has fast ctz; arm has fast clz; haswell has fast ctz
+ dispatch_assert(_voucher_activity_heap_can_madvise());
+ if (bits == 0) {
+ return;
+ } else if (~bits == 0) {
+ _voucher_activity_heap_madvise(bitmap_num, 0,
+ _voucher_activity_bits_per_bitmap_base_t);
+ } else while (bits != 0) {
+ unsigned int start = (typeof(start))__builtin_ctzl(bits), len;
+ typeof(bits) inverse = ~bits >> start;
+ if (inverse) {
+ len = (typeof(len))__builtin_ctzl(inverse);
+ } else {
+ len = _voucher_activity_bits_per_bitmap_base_t - start;
+ }
+ typeof(bits) mask = ((((typeof(bits))1) << len) - 1) << start;
+ bits &= ~mask;
+ _voucher_activity_heap_madvise(bitmap_num, start, len);
+ }
+}
+
+void
+_voucher_activity_heap_pressure_warn(void)
+{
+ if (!_voucher_activity_heap_can_madvise() || !_voucher_activity_heap) {
+ return;
+ }
+ volatile _voucher_activity_bitmap_base_t *bitmap, *pressure_locked_bitmap;
+ bitmap = vam_buffer_bitmap();
+ pressure_locked_bitmap = vam_pressure_locked_bitmap();
+
+ // number of bitmaps needed to map the current buffer limit =
+ // ceil(buffer limit / bits per bitmap)
+ size_t nbuffers = _voucher_heap_buffer_limit();
+ size_t nbitmaps_quot = nbuffers / _voucher_activity_bits_per_bitmap_base_t;
+ size_t nbitmaps_rem = nbuffers % _voucher_activity_bits_per_bitmap_base_t;
+ size_t nbitmaps = nbitmaps_quot + ((nbitmaps_rem == 0) ? 0 : 1);
+
+ for (size_t i = 0; i < nbitmaps; i++) {
+ _voucher_activity_bitmap_base_t got_bits;
+ got_bits = dispatch_atomic_or_orig(&bitmap[i], ~((typeof(bitmap[i]))0),
+ relaxed);
+ got_bits = ~got_bits; // Now 1 means 'acquired this one, madvise it'
+ _voucher_activity_heap_madvise_contiguous(i, got_bits);
+ pressure_locked_bitmap[i] |= got_bits;
+ }
+}
+
+void
+_voucher_activity_heap_pressure_normal(void)
+{
+ if (!_voucher_activity_heap_can_madvise() || !_voucher_activity_heap) {
+ return;
+ }
+ volatile _voucher_activity_bitmap_base_t *bitmap, *pressure_locked_bitmap;
+ bitmap = vam_buffer_bitmap();
+ pressure_locked_bitmap = vam_pressure_locked_bitmap();
+ for (size_t i = 0; i < _voucher_activity_bitmaps_per_heap; i++) {
+ _voucher_activity_bitmap_base_t free_bits = pressure_locked_bitmap[i];
+ pressure_locked_bitmap[i] = 0;
+ if (free_bits != 0) {
+ (void)dispatch_atomic_and(&bitmap[i], ~free_bits, release);
+ }
+ }
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_activity_buffer_init(_voucher_activity_t act,
+ _voucher_activity_buffer_header_t buffer, bool reuse)
+{
+ if (!reuse) {
+ buffer->vabh_flags = _voucher_activity_trace_flag_buffer_header;
+ buffer->vabh_activity_id = act->va_id;
+ }
+ buffer->vabh_timestamp = _voucher_activity_timestamp();
+ buffer->vabh_next_tracepoint_idx = 1;
+ buffer->vabh_sequence_no = dispatch_atomic_inc2o(act, va_max_sequence_no,
+ relaxed);
+}
+
+static _voucher_activity_buffer_header_t
+_voucher_activity_buffer_alloc_slow(_voucher_activity_t act,
+ _voucher_activity_buffer_header_t current)
+{
+ _voucher_activity_buffer_header_t buffer;
+ _voucher_activity_lock_lock(va_buffers_lock(act)); // TODO: revisit locking
+ buffer = act->va_current_buffer;
+ if (buffer != current) goto out;
+ buffer = TAILQ_FIRST(&act->va_buffers);
+ if (buffer) {
+ _voucher_activity_buffer_init(act, buffer, true);
+ if (buffer != TAILQ_LAST(&act->va_buffers,
+ _voucher_activity_buffer_list_s)) {
+ TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list);
+ TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list);
+ }
+ }
+ if (!dispatch_atomic_cmpxchgv2o(act, va_current_buffer, current, buffer,
+ ¤t, release)) {
+ if (buffer) {
+ TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list);
+ _voucher_activity_heap_buffer_free(buffer);
+ }
+ buffer = current;
+ }
+out:
+ _voucher_activity_lock_unlock(va_buffers_lock(act));
+ _dispatch_voucher_activity_debug("buffer reuse %p", act, buffer);
+ return buffer;
+}
+
+static _voucher_activity_buffer_header_t
+_voucher_activity_buffer_alloc(_voucher_activity_t act,
+ _voucher_activity_buffer_header_t current)
+{
+ _voucher_activity_buffer_header_t buffer = NULL;
+ if (act->va_max_sequence_no < act->va_buffer_limit) {
+ buffer = _voucher_activity_heap_buffer_alloc();
+ }
+ if (!buffer) return _voucher_activity_buffer_alloc_slow(act, current);
+ _voucher_activity_buffer_init(act, buffer, false);
+ if (dispatch_atomic_cmpxchgv2o(act, va_current_buffer, current, buffer,
+ ¤t, release)) {
+ _voucher_activity_lock_lock(va_buffers_lock(act));
+ TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list);
+ _voucher_activity_lock_unlock(va_buffers_lock(act));
+ } else {
+ _voucher_activity_heap_buffer_free(buffer);
+ buffer = current;
+ }
+ _dispatch_voucher_activity_debug("buffer alloc %p", act, buffer);
+ return buffer;
+}
+
+#pragma mark -
+#pragma mark _voucher_activity_t
+
+#define _voucher_activity_ordered_insert(_act, head, field) do { \
+ typeof(_act) _vai; \
+ TAILQ_FOREACH(_vai, (head), field) { \
+ if (_act->va_id < _vai->va_id) break; \
+ } \
+ if (_vai) { \
+ TAILQ_INSERT_BEFORE(_vai, _act, field); \
+ } else { \
+ TAILQ_INSERT_TAIL((head), _act, field); \
+ } } while (0);
+
+static void _voucher_activity_dispose(_voucher_activity_t act);
+static _voucher_activity_t _voucher_atm_activity_mark_used(
+ _voucher_activity_t act);
+static void _voucher_atm_activity_mark_unused(_voucher_activity_t act);
+static _voucher_atm_t _voucher_atm_copy(atm_aid_t atm_id);
+static inline void _voucher_atm_release(_voucher_atm_t vatm);
+static void _voucher_atm_activity_insert(_voucher_atm_t vatm,
+ _voucher_activity_t act);
+static void _voucher_atm_activity_remove(_voucher_activity_t act);
+static atm_aid_t _voucher_mach_voucher_get_atm_id(mach_voucher_t kv);
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_voucher_activity_copy(_voucher_activity_t act)
+{
+ int use_cnt = dispatch_atomic_inc2o(act, va_use_count, relaxed);
+ _dispatch_voucher_activity_debug("retain -> %d", act, use_cnt + 1);
+ if (slowpath(use_cnt < 0)) {
+ _dispatch_voucher_activity_debug("overrelease", act);
+ DISPATCH_CRASH("Activity overrelease");
+ }
+ return (use_cnt == 0);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline _voucher_activity_t
+_voucher_activity_retain(_voucher_activity_t act)
+{
+ if (_voucher_activity_copy(act)) {
+ _dispatch_voucher_activity_debug("invalid resurrection", act);
+ DISPATCH_CRASH("Invalid activity resurrection");
+ }
+ return act;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_activity_release(_voucher_activity_t act)
+{
+ int use_cnt = dispatch_atomic_dec2o(act, va_use_count, relaxed);
+ _dispatch_voucher_activity_debug("release -> %d", act, use_cnt + 1);
+ if (fastpath(use_cnt >= 0)) {
+ return;
+ }
+ if (slowpath(use_cnt < -1)) {
+ _dispatch_voucher_activity_debug("overrelease", act);
+ DISPATCH_CRASH("Activity overrelease");
+ }
+ return _voucher_atm_activity_mark_unused(act);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline _voucher_activity_t
+_voucher_activity_atm_retain(_voucher_activity_t act)
+{
+ int refcnt = dispatch_atomic_inc2o(act, va_refcnt, relaxed);
+ _dispatch_voucher_activity_debug("atm retain -> %d", act, refcnt + 1);
+ if (slowpath(refcnt <= 0)) {
+ _dispatch_voucher_activity_debug("atm resurrection", act);
+ DISPATCH_CRASH("Activity ATM resurrection");
+ }
+ return act;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_activity_atm_release(_voucher_activity_t act)
+{
+ int refcnt = dispatch_atomic_dec2o(act, va_refcnt, relaxed);
+ _dispatch_voucher_activity_debug("atm release -> %d", act, refcnt + 1);
+ if (fastpath(refcnt >= 0)) {
+ return;
+ }
+ if (slowpath(refcnt < -1)) {
+ _dispatch_voucher_activity_debug("atm overrelease", act);
+ DISPATCH_CRASH("Activity ATM overrelease");
+ }
+ return _voucher_activity_dispose(act);
+}
+
+static inline _voucher_activity_t
+_voucher_activity_get(voucher_t v)
+{
+ _voucher_activity_t act;
+ act = v && v->v_activity ? v->v_activity : _voucher_activity_default;
+ return act;
+}
+
+static _voucher_activity_t
+_voucher_activity_find(voucher_activity_id_t va_id, uint32_t hash)
+{
+ // assumes vam_activities_lock held
+ _voucher_activity_t act;
+ TAILQ_FOREACH(act, vam_activities(hash), va_list){
+ if (act->va_id == va_id) break;
+ }
+ return act;
+}
+
+static _voucher_activity_t
+_voucher_activity_copy_from_id(voucher_activity_id_t va_id)
+{
+ bool resurrect = false;
+ uint32_t hash = VACTID_HASH(va_id);
+ _voucher_activity_lock_lock(vam_activities_lock());
+ _voucher_activity_t act = _voucher_activity_find(va_id, hash);
+ if (act) {
+ resurrect = _voucher_activity_copy(act);
+ _dispatch_voucher_activity_debug("copy from id 0x%llx", act, va_id);
+ }
+ _voucher_activity_lock_unlock(vam_activities_lock());
+ if (resurrect) return _voucher_atm_activity_mark_used(act);
+ return act;
+}
+
+static _voucher_activity_t
+_voucher_activity_try_insert(_voucher_activity_t act_new)
+{
+ bool resurrect = false;
+ voucher_activity_id_t va_id = act_new->va_id;
+ uint32_t hash = VACTID_HASH(va_id);
+ _voucher_activity_lock_lock(vam_activities_lock());
+ _voucher_activity_t act = _voucher_activity_find(va_id, hash);
+ if (act) {
+ resurrect = _voucher_activity_copy(act);
+ _dispatch_voucher_activity_debug("try insert: failed (%p)", act,act_new);
+ } else {
+ if (slowpath(_TAILQ_IS_ENQUEUED(act_new, va_list))) {
+ _dispatch_voucher_activity_debug("corruption", act_new);
+ DISPATCH_CRASH("Activity corruption");
+ }
+ TAILQ_INSERT_TAIL(vam_activities(hash), act_new, va_list);
+ _dispatch_voucher_activity_debug("try insert: succeeded", act_new);
+ }
+ _voucher_activity_lock_unlock(vam_activities_lock());
+ if (resurrect) return _voucher_atm_activity_mark_used(act);
+ return act;
+}
+
+static bool
+_voucher_activity_try_remove(_voucher_activity_t act)
+{
+ bool r;
+ voucher_activity_id_t va_id = act->va_id;
+ uint32_t hash = VACTID_HASH(va_id);
+ _voucher_activity_lock_lock(vam_activities_lock());
+ if (slowpath(!va_id)) {
+ _dispatch_voucher_activity_debug("corruption", act);
+ DISPATCH_CRASH("Activity corruption");
+ }
+ if ((r = (dispatch_atomic_load2o(act, va_use_count, seq_cst) < 0 &&
+ _TAILQ_IS_ENQUEUED(act, va_list)))) {
+ TAILQ_REMOVE(vam_activities(hash), act, va_list);
+ _TAILQ_MARK_NOT_ENQUEUED(act, va_list);
+ act->va_list.tqe_next = (void*)~0ull;
+ }
+ _dispatch_voucher_activity_debug("try remove: %s", act, r ? "succeeded" :
+ "failed");
+ _voucher_activity_lock_unlock(vam_activities_lock());
+ return r;
+}
+
+static _voucher_activity_t
+_voucher_activity_create_with_atm(_voucher_atm_t vatm,
+ voucher_activity_id_t va_id, voucher_activity_trace_id_t trace_id,
+ uint64_t location, _voucher_activity_buffer_header_t buffer)
+{
+ if (!buffer) buffer = _voucher_activity_heap_buffer_alloc();
+ if (!buffer) {
+ _dispatch_voucher_atm_debug("no buffer", vatm);
+ _voucher_atm_release(vatm); // consume vatm reference
+ return NULL;
+ }
+ if (!trace_id) trace_id = _voucher_activity_trace_id_release;
+ _voucher_activity_tracepoint_t vat = (_voucher_activity_tracepoint_t)buffer;
+ _voucher_activity_tracepoint_init_with_id(vat, trace_id, ~1ull);
+ _voucher_activity_t act = (_voucher_activity_t)buffer;
+ act->va_flags = _voucher_activity_trace_flag_buffer_header |
+ _voucher_activity_trace_flag_activity |
+ _voucher_activity_trace_flag_start |
+ _voucher_activity_trace_flag_wide_first;
+ act->vabh_next_tracepoint_idx = sizeof(*act)/sizeof(*vat);
+ act->va_max_sequence_no = 0;
+ act->va_id = va_id ? va_id : VATM_ACTID(vatm, 0);
+ act->va_use_count = 0;
+ act->va_buffer_limit = _voucher_activity_buffer_limit();
+ TAILQ_INIT(&act->va_buffers);
+ act->va_flags2 = _voucher_activity_trace_flag_activity |
+ _voucher_activity_trace_flag_wide_second;
+#if __LP64__
+ act->va_flags3 = act->va_flags2;
+#endif
+ act->va_refcnt = 0;
+ act->va_location = location;
+ act->va_current_buffer = buffer;
+ act->va_atm = vatm; // transfer vatm reference
+ _voucher_activity_lock_init(va_buffers_lock(act));
+ _TAILQ_MARK_NOT_ENQUEUED(act, va_list);
+ _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_list);
+ _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_used_list);
+ _voucher_activity_t actx = _voucher_activity_try_insert(act);
+ if (actx) {
+ _voucher_activity_dispose(act);
+ act = actx;
+ } else {
+ _voucher_atm_activity_insert(vatm, act);
+ }
+ _dispatch_voucher_activity_debug("create", act);
+ return act;
+}
+
+static void
+_voucher_activity_dispose(_voucher_activity_t act)
+{
+ _dispatch_voucher_activity_debug("dispose", act);
+ _voucher_atm_release(act->va_atm);
+ if (slowpath(_TAILQ_IS_ENQUEUED(act, va_list))) {
+ _dispatch_voucher_activity_debug("corruption", act);
+ DISPATCH_CRASH("Activity corruption");
+ }
+ act->va_list.tqe_next = DISPATCH_OBJECT_LISTLESS;
+ dispatch_assert(!_TAILQ_IS_ENQUEUED(act, va_atm_list));
+ dispatch_assert(!_TAILQ_IS_ENQUEUED(act, va_atm_used_list));
+ _voucher_activity_buffer_header_t buffer, tmp;
+ TAILQ_FOREACH_SAFE(buffer, &act->va_buffers, vabh_list, tmp) {
+ _dispatch_voucher_activity_debug("buffer free %p", act, buffer);
+ TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list);
+ _voucher_activity_heap_buffer_free(buffer);
+ }
+ buffer = (_voucher_activity_buffer_header_t)act;
+ _voucher_activity_heap_buffer_free(buffer);
+}
+
+static void
+_voucher_activity_collect(_voucher_activity_t act)
+{
+ _dispatch_voucher_activity_debug("collect", act);
+ if (_voucher_activity_try_remove(act)) {
+ _voucher_atm_activity_remove(act);
+ }
+}
+
+static _voucher_activity_t
+_voucher_activity_copy_from_mach_voucher(mach_voucher_t kv,
+ voucher_activity_id_t va_id)
+{
+ dispatch_once_f(&_voucher_activity_heap_pred, NULL,
+ _voucher_activity_heap_init);
+ if (_voucher_activity_disabled()) return NULL;
+ _voucher_activity_t act = NULL;
+ if (dispatch_assume(va_id)) {
+ if ((act = _voucher_activity_copy_from_id(va_id))) return act;
+ }
+ atm_aid_t atm_id = _voucher_mach_voucher_get_atm_id(kv);
+ if (!dispatch_assume(atm_id)) return NULL;
+ _voucher_activity_buffer_header_t buffer;
+ buffer = _voucher_activity_heap_buffer_alloc();
+ if (!buffer) return NULL;
+ _dispatch_kvoucher_debug("atm copy/create from <%lld>", kv, atm_id);
+ _voucher_atm_t vatm = _voucher_atm_copy(atm_id);
+ if (!vatm) vatm = _voucher_atm_create(kv, atm_id);
+ if (!vatm) {
+ _voucher_activity_heap_buffer_free(buffer);
+ return NULL;
+ }
+ if (VACTID_BASEID(va_id) != VATMID2ACTID(atm_id)) va_id = 0;
+ // consumes vatm reference:
+ act = _voucher_activity_create_with_atm(vatm, va_id, 0, 0, buffer);
+ _dispatch_voucher_activity_debug("copy from kvoucher[0x%08x]", act, kv);
+ return act;
+}
+
+#pragma mark -
+#pragma mark _voucher_atm_mailbox
+
+DISPATCH_ALWAYS_INLINE
+static inline atm_mailbox_offset_t
+_voucher_atm_mailbox_alloc(void)
+{
+ atm_mailbox_offset_t mailbox_offset = MAILBOX_OFFSET_UNSET;
+ size_t index;
+ index = _voucher_activity_bitmap_set_first_unset_bit(vam_atm_mbox_bitmap());
+ if (index < NO_BITS_WERE_UNSET) {
+ mailbox_offset = index * sizeof(atm_mailbox_offset_t);
+#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG
+ _dispatch_debug("mailbox alloc %zd (%lld)", index, mailbox_offset);
+#endif
+ }
+ return mailbox_offset;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_atm_mailbox_free(atm_mailbox_offset_t mailbox_offset)
+{
+ if (mailbox_offset == MAILBOX_OFFSET_UNSET) return;
+ size_t index = (size_t)mailbox_offset / sizeof(atm_mailbox_offset_t);
+#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG
+ _dispatch_debug("mailbox free %zd (%lld)", index, mailbox_offset);
+#endif
+ _voucher_activity_bitmap_clear_bit(vam_atm_mbox_bitmap(), index);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_voucher_atm_mailbox_set(atm_mailbox_offset_t mailbox_offset,
+ atm_subaid32_t subaid, bool max_present)
+{
+ if (mailbox_offset == MAILBOX_OFFSET_UNSET) return false;
+ char *mailbox_base = (char*)_voucher_activity_heap->vam_kernel_metadata;
+ atm_subaid32_t *mailbox = (atm_subaid32_t*)(mailbox_base + mailbox_offset);
+ if (max_present) mailbox++; // second atm_subaid32_t in atm_mailbox_offset_t
+ if (*mailbox == subaid) return false;
+ *mailbox = subaid;
+ return true;
+}
+
+#pragma mark -
+#pragma mark _voucher_atm_t
+
+static bool _voucher_atm_try_remove(_voucher_atm_t vatm);
+static void _voucher_atm_dispose(_voucher_atm_t vatm, bool unregister);
+static inline void _voucher_atm_collect_if_needed(bool updated);
+
+DISPATCH_ALWAYS_INLINE
+static inline _voucher_atm_t
+_voucher_atm_retain(_voucher_atm_t vatm)
+{
+ // assumes vam_atms_lock or vam_base_atm_lock held
+ int refcnt = dispatch_atomic_inc2o(vatm, vatm_refcnt, relaxed);
+ _dispatch_voucher_atm_debug("retain -> %d", vatm, refcnt + 1);
+ if (slowpath(refcnt < 0)) {
+ _dispatch_voucher_atm_debug("overrelease", vatm);
+ DISPATCH_CRASH("ATM overrelease");
+ }
+ return vatm;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_atm_release(_voucher_atm_t vatm)
+{
+ int refcnt = dispatch_atomic_dec2o(vatm, vatm_refcnt, relaxed);
+ _dispatch_voucher_atm_debug("release -> %d", vatm, refcnt + 1);
+ if (fastpath(refcnt >= 0)) {
+ return;
+ }
+ if (slowpath(refcnt < -1)) {
+ _dispatch_voucher_atm_debug("overrelease", vatm);
+ DISPATCH_CRASH("ATM overrelease");
+ }
+ if (_voucher_atm_try_remove(vatm)) {
+ _voucher_atm_dispose(vatm, true);
+ }
+}
+
+static _voucher_atm_t
+_voucher_atm_find(atm_aid_t atm_id, uint32_t hash)
+{
+ // assumes vam_atms_lock held
+ _voucher_atm_t vatm;
+ TAILQ_FOREACH(vatm, vam_atms(hash), vatm_list){
+ if (vatm->vatm_id == atm_id) break;
+ }
+ return vatm;
+}
+
+static _voucher_atm_t
+_voucher_atm_copy(atm_aid_t atm_id)
+{
+ uint32_t hash = VATMID_HASH(atm_id);
+ _voucher_activity_lock_lock(vam_atms_lock());
+ _voucher_atm_t vatm = _voucher_atm_find(atm_id, hash);
+ if (vatm) {
+ _voucher_atm_retain(vatm);
+ _dispatch_voucher_atm_debug("copy", vatm);
+ }
+ _voucher_activity_lock_unlock(vam_atms_lock());
+ return vatm;
+}
+
+static _voucher_atm_t
+_voucher_atm_try_insert(_voucher_atm_t vatm_new)
+{
+ atm_aid_t atm_id = vatm_new->vatm_id;
+ uint32_t hash = VATMID_HASH(atm_id);
+ _voucher_activity_lock_lock(vam_atms_lock());
+ _voucher_atm_t vatm = _voucher_atm_find(atm_id, hash);
+ if (vatm) {
+ _voucher_atm_retain(vatm);
+ _dispatch_voucher_atm_debug("try insert: failed (%p)", vatm, vatm_new);
+ } else {
+ if (slowpath(_TAILQ_IS_ENQUEUED(vatm_new, vatm_list))) {
+ _dispatch_voucher_atm_debug("corruption", vatm_new);
+ DISPATCH_CRASH("ATM corruption");
+ }
+ TAILQ_INSERT_TAIL(vam_atms(hash), vatm_new, vatm_list);
+ _dispatch_voucher_atm_debug("try insert: succeeded", vatm_new);
+ }
+ _voucher_activity_lock_unlock(vam_atms_lock());
+ return vatm;
+}
+
+static bool
+_voucher_atm_try_remove(_voucher_atm_t vatm)
+{
+ bool r;
+ atm_aid_t atm_id = vatm->vatm_id;
+ uint32_t hash = VATMID_HASH(atm_id);
+ _voucher_activity_lock_lock(vam_atms_lock());
+ if (slowpath(!atm_id)) {
+ _dispatch_voucher_atm_debug("corruption", vatm);
+ DISPATCH_CRASH("ATM corruption");
+ }
+ if ((r = (dispatch_atomic_load2o(vatm, vatm_refcnt, seq_cst) < 0 &&
+ _TAILQ_IS_ENQUEUED(vatm, vatm_list)))) {
+ TAILQ_REMOVE(vam_atms(hash), vatm, vatm_list);
+ _TAILQ_MARK_NOT_ENQUEUED(vatm, vatm_list);
+ vatm->vatm_list.tqe_next = (void*)~0ull;
+ }
+ _dispatch_voucher_atm_debug("try remove: %s", vatm, r ? "succeeded" :
+ "failed");
+ _voucher_activity_lock_unlock(vam_atms_lock());
+ return r;
+}
+
+static bool
+_voucher_atm_update_mailbox(_voucher_atm_t vatm)
+{
+ // Update kernel mailbox with largest allocated subaid for this atm_id
+ // assumes atm_activities_lock held
+ _voucher_activity_t act = TAILQ_LAST(vatm_activities(vatm),
+ _voucher_atm_activities_s);
+ atm_subaid32_t subaid = act ? VACTID_SUBID(act->va_id) : 0;
+ bool r = _voucher_atm_mailbox_set(vatm->vatm_mailbox_offset, subaid, true);
+ if (r) {
+ _dispatch_voucher_atm_debug("update max-present subaid 0x%x", vatm,
+ subaid);
+ }
+ return r;
+}
+
+static bool
+_voucher_atm_update_used_mailbox(_voucher_atm_t vatm)
+{
+ // Update kernel mailbox with smallest in-use subaid for this atm_id
+ // assumes atm_activities_lock held
+ _voucher_activity_t act = TAILQ_FIRST(vatm_used_activities(vatm));
+ atm_subaid32_t subaid = act ? VACTID_SUBID(act->va_id) : ATM_SUBAID32_MAX;
+ bool r = _voucher_atm_mailbox_set(vatm->vatm_mailbox_offset, subaid, false);
+ if (r) {
+ _dispatch_voucher_atm_debug("update min-used subaid 0x%x", vatm,
+ subaid);
+ }
+ return r;
+}
+
+static void
+_voucher_atm_activity_insert(_voucher_atm_t vatm, _voucher_activity_t act)
+{
+ _voucher_activity_lock_lock(vatm_activities_lock(vatm));
+ if (!_TAILQ_IS_ENQUEUED(act, va_atm_list)) {
+ _voucher_activity_ordered_insert(act, vatm_activities(vatm),
+ va_atm_list);
+ _voucher_atm_update_mailbox(vatm);
+ }
+ if (!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) {
+ _voucher_activity_ordered_insert(act, vatm_used_activities(vatm),
+ va_atm_used_list);
+ _voucher_atm_update_used_mailbox(vatm);
+ }
+ _dispatch_voucher_activity_debug("atm insert", act);
+ _voucher_activity_lock_unlock(vatm_activities_lock(vatm));
+}
+
+static void
+_voucher_atm_activity_remove(_voucher_activity_t act)
+{
+ _voucher_atm_t vatm = act->va_atm;
+ _voucher_activity_lock_lock(vatm_activities_lock(vatm));
+ _dispatch_voucher_activity_debug("atm remove", act);
+ if (_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) {
+ TAILQ_REMOVE(vatm_activities(vatm), act, va_atm_used_list);
+ _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_used_list);
+ _voucher_atm_update_used_mailbox(vatm);
+ }
+ if (_TAILQ_IS_ENQUEUED(act, va_atm_list)) {
+ TAILQ_REMOVE(vatm_activities(vatm), act, va_atm_list);
+ _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_list);
+ _voucher_atm_update_mailbox(vatm);
+ // Balance initial creation refcnt. Caller must hold additional
+ // reference to ensure this does not release vatm before the unlock,
+ // see _voucher_atm_activity_collect
+ _voucher_activity_atm_release(act);
+ }
+ _voucher_activity_lock_unlock(vatm_activities_lock(vatm));
+}
+
+static _voucher_activity_t
+_voucher_atm_activity_mark_used(_voucher_activity_t act)
+{
+ _voucher_atm_t vatm = act->va_atm;
+ _voucher_activity_lock_lock(vatm_activities_lock(vatm));
+ if (!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) {
+ _voucher_activity_ordered_insert(act, vatm_used_activities(vatm),
+ va_atm_used_list);
+ _voucher_atm_update_used_mailbox(vatm);
+ _dispatch_voucher_activity_debug("mark used", act);
+ }
+ _voucher_activity_lock_unlock(vatm_activities_lock(vatm));
+ return act;
+}
+
+static void
+_voucher_atm_activity_mark_unused(_voucher_activity_t act)
+{
+ bool atm_collect = false, updated = false;
+ _voucher_atm_t vatm = act->va_atm;
+ _voucher_activity_lock_lock(vatm_activities_lock(vatm));
+ if (_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) {
+ _dispatch_voucher_activity_debug("mark unused", act);
+ TAILQ_REMOVE(&vatm->vatm_used_activities, act, va_atm_used_list);
+ _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_used_list);
+ atm_collect = true;
+ _voucher_atm_retain(vatm);
+ updated = _voucher_atm_update_used_mailbox(vatm);
+ }
+ _voucher_activity_lock_unlock(vatm_activities_lock(vatm));
+ if (atm_collect) {
+ _voucher_atm_release(vatm);
+ _voucher_atm_collect_if_needed(updated);
+ }
+}
+
+static void
+_voucher_atm_activity_collect(_voucher_atm_t vatm, atm_subaid32_t min_subaid)
+{
+ _dispatch_voucher_atm_debug("collect min subaid 0x%x", vatm, min_subaid);
+ voucher_activity_id_t min_va_id = VATM_ACTID(vatm, min_subaid);
+ _voucher_activity_t act;
+ do {
+ _voucher_activity_lock_lock(vatm_activities_lock(vatm));
+ TAILQ_FOREACH(act, vatm_activities(vatm), va_atm_list) {
+ if (act->va_id >= min_va_id) {
+ act = NULL;
+ break;
+ }
+ if (!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) {
+ _voucher_activity_atm_retain(act);
+ break;
+ }
+ }
+ _voucher_activity_lock_unlock(vatm_activities_lock(vatm));
+ if (act) {
+ _voucher_activity_collect(act);
+ _voucher_activity_atm_release(act);
+ }
+ } while (act);
+}
+
+DISPATCH_NOINLINE
+static void
+_voucher_atm_collect(void)
+{
+ _voucher_atm_t vatms[_voucher_atm_mailboxes], vatm;
+ atm_aid_t aids[_voucher_atm_mailboxes];
+ mach_atm_subaid_t subaids[_voucher_atm_mailboxes];
+ uint32_t i, a = 0, s;
+
+ _voucher_activity_lock_lock(vam_atms_lock());
+ for (i = 0; i < _voucher_activity_hash_size; i++) {
+ TAILQ_FOREACH(vatm, vam_atms(i), vatm_list){
+ if (vatm == _voucher_activity_heap->vam_default_activity_atm ||
+ vatm->vatm_mailbox_offset == MAILBOX_OFFSET_UNSET) continue;
+ _dispatch_voucher_atm_debug("find min subaid", vatm);
+ vatms[a] = _voucher_atm_retain(vatm);
+ aids[a] = vatm->vatm_id;
+ if (++a == _voucher_atm_mailboxes) goto out;
+ }
+ }
+out:
+ _voucher_activity_lock_unlock(vam_atms_lock());
+ if (!a) return;
+ kern_return_t kr;
+ mach_voucher_t kv = vatms[0]->vatm_kvoucher;
+ mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&aids;
+ mach_voucher_attr_content_size_t kvc_in_size = sizeof(atm_aid_t) * a;
+ mach_voucher_attr_content_t kvc_out = (mach_voucher_attr_content_t)&subaids;
+ mach_voucher_attr_content_size_t kvc_out_size = sizeof(mach_atm_subaid_t)*a;
+ kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM,
+ ATM_FIND_MIN_SUB_AID, kvc_in, kvc_in_size, kvc_out, &kvc_out_size);
+ DISPATCH_VERIFY_MIG(kr);
+ (void)dispatch_assume_zero(kr);
+ s = kvc_out_size / sizeof(mach_atm_subaid_t);
+#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG
+ _dispatch_debug("found min subaids (%u out of %u)", s, a);
+#endif
+ for (i = 0; i < a; i++) {
+ if (i < s) _voucher_atm_activity_collect(vatms[i],
+ (atm_subaid32_t)subaids[i]);
+ _voucher_atm_release(vatms[i]);
+ }
+}
+
+static inline void
+_voucher_atm_collect_if_needed(bool updated)
+{
+ long level;
+ if (updated) {
+ level = dispatch_atomic_add(&_voucher_atm_collect_level, 2ul, relaxed);
+ } else {
+ level = _voucher_atm_collect_level;
+ if (!level) return;
+ }
+ if (level & 1 || level <= _voucher_atm_collect_threshold) return;
+ if (!dispatch_atomic_cmpxchg(&_voucher_atm_collect_level, level, level + 1,
+ acquire)) return;
+#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG
+ _dispatch_debug("atm collect: reached level %ld", level/2);
+#endif
+ if (slowpath(level < 0)) {
+ DISPATCH_CRASH("ATM collection level corruption");
+ }
+ _voucher_atm_collect();
+ dispatch_atomic_sub(&_voucher_atm_collect_level, level + 1, release);
+}
+
+DISPATCH_NOINLINE
+static void
+_voucher_atm_fault(mach_voucher_attr_command_t kvc_cmd)
+{
+ _voucher_activity_t act = _voucher_activity_get(_voucher_get());
+ mach_voucher_t kv = _voucher_activity_get_atm_mach_voucher(act);
+ if (!kv) return;
+
+ kern_return_t kr;
+ mach_atm_subaid_t subaid = VACTID_SUBID(act->va_id);
+ mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&subaid;
+ mach_voucher_attr_content_size_t kvc_in_size = sizeof(mach_atm_subaid_t);
+ mach_voucher_attr_content_t kvc_out = (mach_voucher_attr_content_t)&subaid;
+ mach_voucher_attr_content_size_t kvc_out_size = sizeof(mach_atm_subaid_t);
+ kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM,
+ kvc_cmd, kvc_in, kvc_in_size, kvc_out, &kvc_out_size);
+ DISPATCH_VERIFY_MIG(kr);
+ (void)dispatch_assume_zero(kr);
+}
+
+static atm_aid_t
+_voucher_mach_voucher_get_atm_id(mach_voucher_t kv)
+{
+ kern_return_t kr;
+ atm_aid_t atm_id = 0;
+ mach_voucher_attr_content_t kvc = (mach_voucher_attr_content_t)&atm_id;
+ mach_voucher_attr_content_size_t kvc_size = sizeof(atm_id);
+ kr = mach_voucher_extract_attr_content(kv, MACH_VOUCHER_ATTR_KEY_ATM, kvc,
+ &kvc_size);
+ DISPATCH_VERIFY_MIG(kr);
+ (void)dispatch_assume_zero(kr);
+ return atm_id;
+}
+
+static mach_voucher_t
+_voucher_atm_mach_voucher_create(atm_aid_t *atm_id_ptr)
+{
+ kern_return_t kr;
+ mach_voucher_t kv;
+ static const mach_voucher_attr_recipe_data_t atm_create_recipe = {
+ .key = MACH_VOUCHER_ATTR_KEY_ATM,
+ .command = MACH_VOUCHER_ATTR_ATM_CREATE,
+ };
+ kr = _voucher_create_mach_voucher(&atm_create_recipe,
+ sizeof(atm_create_recipe), &kv);
+ if (dispatch_assume_zero(kr)) {
+ DISPATCH_CLIENT_CRASH("Could not create ATM mach voucher");
+ }
+ atm_aid_t atm_id = _voucher_mach_voucher_get_atm_id(kv);
+ if (!dispatch_assume(atm_id)) {
+ DISPATCH_CLIENT_CRASH("Could not extract ATM ID");
+ }
+ _dispatch_kvoucher_debug("atm create <%lld>", kv, atm_id);
+ *atm_id_ptr = atm_id;
+ return kv;
+}
+
+static void
+_voucher_atm_mailbox_mach_voucher_register(_voucher_atm_t vatm,
+ mach_voucher_t kv)
+{
+ _dispatch_voucher_atm_debug("mailbox register %lld with kvoucher[0x%08x]",
+ vatm, vatm->vatm_mailbox_offset, kv);
+ kern_return_t kr;
+ mach_voucher_t akv;
+ atm_mailbox_offset_t offset = vatm->vatm_mailbox_offset;
+ mach_voucher_attr_recipe_t vr;
+ size_t vr_size;
+ static const mach_voucher_attr_recipe_data_t atm_register_recipe = {
+ .key = MACH_VOUCHER_ATTR_KEY_ATM,
+ .command = MACH_VOUCHER_ATTR_ATM_REGISTER,
+ .content_size = sizeof(offset),
+ };
+ vr_size = sizeof(atm_register_recipe) + atm_register_recipe.content_size;
+ vr = alloca(vr_size);
+ *vr = atm_register_recipe;
+ vr->previous_voucher = kv;
+ memcpy(&vr->content, &offset, sizeof(offset));
+ kr = _voucher_create_mach_voucher(vr, vr_size, &akv);
+ if (dispatch_assume_zero(kr)) {
+ DISPATCH_CLIENT_CRASH("Could not register ATM ID");
+ }
+ if (!vatm->vatm_kvoucher) {
+ vatm->vatm_kvoucher = akv;
+ } else {
+#if !RDAR_17510224
+ if (akv != vatm->vatm_kvoucher) {
+ DISPATCH_CRASH("Unexpected mach voucher returned by ATM ID "
+ "registration");
+ }
+ _voucher_dealloc_mach_voucher(akv);
+#else
+ DISPATCH_CRASH("Registered invalid ATM object");
+#endif
+ }
+ _dispatch_voucher_atm_debug("mailbox registered %lld", vatm,
+ vatm->vatm_mailbox_offset);
+}
+
+static void
+_voucher_atm_mailbox_register(_voucher_atm_t vatm)
+{
+ mach_voucher_t kv = vatm->vatm_kvoucher;
+ if (!kv) return;
+#if !RDAR_17510224
+ _voucher_atm_mailbox_mach_voucher_register(vatm, kv);
+#else // RDAR_17510224
+ _dispatch_voucher_atm_debug("mailbox register %lld", vatm,
+ vatm->vatm_mailbox_offset);
+ kern_return_t kr;
+ atm_mailbox_offset_t offset = vatm->vatm_mailbox_offset;
+ mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&offset;
+ mach_voucher_attr_content_size_t kvc_in_size = sizeof(offset);
+ mach_voucher_attr_content_t kvc_out = NULL;
+ mach_voucher_attr_content_size_t kvc_out_size = 0;
+ kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM,
+ ATM_ACTION_REGISTER, kvc_in, kvc_in_size, kvc_out,
+ &kvc_out_size);
+ DISPATCH_VERIFY_MIG(kr);
+ if (dispatch_assume_zero(kr)) {
+ DISPATCH_CLIENT_CRASH("Could not register ATM ID");
+ }
+ _dispatch_voucher_atm_debug("mailbox registered %lld", vatm,
+ vatm->vatm_mailbox_offset);
+#endif // RDAR_17510224
+}
+
+static bool
+_voucher_atm_mailbox_unregister(_voucher_atm_t vatm)
+{
+ if (vatm->vatm_mailbox_offset == MAILBOX_OFFSET_UNSET) return false;
+ _dispatch_voucher_atm_debug("mailbox unregister %lld", vatm,
+ vatm->vatm_mailbox_offset);
+ mach_voucher_t kv = vatm->vatm_kvoucher;
+ dispatch_assert(kv);
+ kern_return_t kr;
+ atm_mailbox_offset_t offset = vatm->vatm_mailbox_offset;
+ mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&offset;
+ mach_voucher_attr_content_size_t kvc_in_size = sizeof(offset);
+ mach_voucher_attr_content_t kvc_out = NULL;
+ mach_voucher_attr_content_size_t kvc_out_size = 0;
+ kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM,
+ ATM_ACTION_UNREGISTER, kvc_in, kvc_in_size, kvc_out, &kvc_out_size);
+ DISPATCH_VERIFY_MIG(kr);
+ if (kr && kr != KERN_INVALID_VALUE) {
+ (void)dispatch_assume_zero(kr);
+ DISPATCH_CLIENT_CRASH("Could not unregister ATM ID");
+ }
+ _dispatch_voucher_atm_debug("mailbox unregistered %lld", vatm,
+ vatm->vatm_mailbox_offset);
+ return true;
+}
+
+static _voucher_atm_t
+_voucher_atm_create(mach_voucher_t kv, atm_aid_t atm_id)
+{
+ atm_mailbox_offset_t mailbox_offset = _voucher_atm_mailbox_alloc();
+ if (kv && mailbox_offset == MAILBOX_OFFSET_UNSET) return NULL;
+ _voucher_atm_t vatm = _dispatch_calloc(1ul, sizeof(struct _voucher_atm_s));
+ if (!kv) {
+ kv = _voucher_atm_mach_voucher_create(&atm_id);
+ if (mailbox_offset == MAILBOX_OFFSET_UNSET) {
+ _voucher_dealloc_mach_voucher(kv);
+ } else {
+ vatm->vatm_kvoucher = kv;
+ }
+ kv = MACH_VOUCHER_NULL;
+ }
+ vatm->vatm_id = atm_id;
+ vatm->vatm_mailbox_offset = mailbox_offset;
+ _voucher_activity_lock_init(vatm_activities_lock(vatm));
+ TAILQ_INIT(&vatm->vatm_activities);
+ TAILQ_INIT(&vatm->vatm_used_activities);
+ _voucher_atm_mailbox_set(mailbox_offset, 0, true);
+ _voucher_atm_mailbox_set(mailbox_offset, ATM_SUBAID32_MAX, false);
+ _voucher_atm_t vatmx = _voucher_atm_try_insert(vatm);
+ if (vatmx) {
+ _voucher_atm_dispose(vatm, false);
+ vatm = vatmx;
+ } else if (kv) {
+ _voucher_atm_mailbox_mach_voucher_register(vatm, kv);
+ } else {
+ _voucher_atm_mailbox_register(vatm);
+ }
+ _dispatch_voucher_atm_debug("create with kvoucher[0x%08x]", vatm, kv);
+ return vatm;
+}
+
+static void
+_voucher_atm_dispose(_voucher_atm_t vatm, bool unregister)
+{
+ _dispatch_voucher_atm_debug("dispose", vatm);
+ dispatch_assert(TAILQ_EMPTY(&vatm->vatm_activities));
+ dispatch_assert(TAILQ_EMPTY(&vatm->vatm_used_activities));
+ if (slowpath(_TAILQ_IS_ENQUEUED(vatm, vatm_list))) {
+ _dispatch_voucher_atm_debug("corruption", vatm);
+ DISPATCH_CRASH("ATM corruption");
+ }
+ vatm->vatm_list.tqe_next = DISPATCH_OBJECT_LISTLESS;
+ bool free_mailbox = (vatm->vatm_mailbox_offset != MAILBOX_OFFSET_UNSET);
+ if (vatm->vatm_kvoucher) {
+ if (unregister) free_mailbox = _voucher_atm_mailbox_unregister(vatm);
+ _voucher_dealloc_mach_voucher(vatm->vatm_kvoucher);
+ vatm->vatm_kvoucher = MACH_VOUCHER_NULL;
+ }
+ if (free_mailbox) {
+ _voucher_atm_mailbox_free(vatm->vatm_mailbox_offset);
+ vatm->vatm_mailbox_offset = MAILBOX_OFFSET_UNSET;
+ }
+ free(vatm);
+}
+
+static inline mach_voucher_t
+_voucher_activity_get_atm_mach_voucher(_voucher_activity_t act)
+{
+ mach_voucher_t kv;
+ kv = act && act->va_atm ? act->va_atm->vatm_kvoucher : MACH_VOUCHER_NULL;
+ return kv;
+}
+
+DISPATCH_NOINLINE
+static _voucher_atm_t
+_voucher_atm_base_copy_and_activity_id_make(voucher_activity_id_t *va_id_ptr)
+{
+ _voucher_atm_subid_t subid;
+ _voucher_atm_t vatm, vatm_old = NULL, vatm_new = NULL;
+ if (_voucher_activity_heap->vam_base_atm_subid_max == 1) {
+ vatm = _voucher_atm_create(0, 0);
+ subid = 1;
+ goto out;
+ }
+ _voucher_activity_lock_lock(vam_base_atm_lock());
+ vatm = _voucher_activity_heap->vam_base_atm;
+retry:
+ _voucher_atm_retain(vatm);
+ subid = _voucher_activity_heap->vam_base_atm_subid;
+ if (subid++ >= _voucher_activity_heap->vam_base_atm_subid_max) {
+ _voucher_activity_lock_unlock(vam_base_atm_lock());
+ if (!vatm_new) vatm_new = _voucher_atm_create(0, 0);
+ _voucher_activity_lock_lock(vam_base_atm_lock());
+ _voucher_atm_release(vatm);
+ vatm_old = vatm;
+ vatm = _voucher_activity_heap->vam_base_atm;
+ if (vatm != vatm_old) {
+ vatm_old = NULL;
+ goto retry;
+ }
+ _voucher_activity_heap->vam_base_atm = vatm = vatm_new;
+ _voucher_activity_heap->vam_base_atm_subid = subid = 1;
+ vatm_new = NULL;
+ _voucher_atm_retain(vatm);
+ _dispatch_voucher_atm_debug("base replace", vatm);
+ } else {
+ _voucher_activity_heap->vam_base_atm_subid = subid;
+ _dispatch_voucher_atm_debug("base copy", vatm);
+ }
+ _voucher_activity_lock_unlock(vam_base_atm_lock());
+ if (vatm_old) _voucher_atm_release(vatm_old);
+ if (vatm_new) _voucher_atm_release(vatm_new);
+out:
+ *va_id_ptr = VATM_ACTID(vatm, subid);
+ return vatm;
+}
+
+static voucher_activity_id_t
+_voucher_atm_nested_atm_id_make(void)
+{
+ atm_aid_t atm_id;
+ mach_voucher_t kv = _voucher_atm_mach_voucher_create(&atm_id);
+ _voucher_dealloc_mach_voucher(kv); // just need the unique ID
+ return VATMID2ACTID(atm_id);
+}
+
+static voucher_activity_id_t
+_voucher_atm_nested_activity_id_make(void)
+{
+ voucher_activity_id_t va_id, va_id_old, va_id_new;
+ _voucher_atm_subid_t subid;
+ _voucher_activity_lock_lock(vam_nested_atm_lock());
+ va_id = _voucher_activity_heap->vam_nested_atm_id;
+retry:
+ subid = _voucher_activity_heap->vam_nested_atm_subid;
+ if (subid++ >= VATM_SUBID_MAX) {
+ _voucher_activity_lock_unlock(vam_nested_atm_lock());
+ va_id_new = _voucher_atm_nested_atm_id_make();
+ va_id_old = va_id;
+ _voucher_activity_lock_lock(vam_nested_atm_lock());
+ va_id = _voucher_activity_heap->vam_nested_atm_id;
+ if (va_id != va_id_old) goto retry;
+ _voucher_activity_heap->vam_nested_atm_id = va_id = va_id_new;
+ subid = 1;
+ }
+ _voucher_activity_heap->vam_nested_atm_subid = subid;
+ _voucher_activity_lock_unlock(vam_nested_atm_lock());
+ return va_id + subid;
+}
+
+#pragma mark -
+#pragma mark voucher_activity_id_t
+
+voucher_activity_id_t
+voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id,
+ uint64_t location, voucher_activity_flag_t flags)
+{
+ dispatch_once_f(&_voucher_activity_heap_pred, NULL,
+ _voucher_activity_heap_init);
+ if (!_voucher_activity_trace_id_enabled(trace_id)) return 0;
+ voucher_activity_id_t va_id = 0, va_base_id = 0;
+ _voucher_atm_t vatm = NULL;
+ _voucher_activity_t act = NULL;
+ _voucher_activity_tracepoint_t vat = NULL;
+ unsigned int activities = 1, oactivities = 0;
+ voucher_t ov = _voucher_get();
+ if (!(flags & voucher_activity_flag_force) && ov && ov->v_activities) {
+ oactivities = ov->v_activities;
+ activities += oactivities;
+ if (activities > _voucher_max_activities) {
+ va_id = _voucher_atm_nested_activity_id_make();
+ goto out;
+ }
+ }
+ if (activities == 1) {
+ vatm = _voucher_atm_base_copy_and_activity_id_make(&va_id);
+ if (vatm->vatm_kvoucher) {
+ // consumes vatm reference:
+ act = _voucher_activity_create_with_atm(vatm, va_id, trace_id,
+ location, NULL);
+ vat = (_voucher_activity_tracepoint_t)act;
+ } else {
+ _voucher_atm_release(vatm);
+ }
+ if (!act) {
+ activities++;
+ // default to _voucher_activity_default base activity
+ va_base_id = _voucher_activity_default->va_id;
+ }
+ }
+ pthread_priority_t priority = _voucher_get_priority(ov);
+ mach_voucher_attr_recipe_size_t extra = ov ? _voucher_extra_size(ov) : 0;
+ voucher_t v = _voucher_alloc(activities, priority, extra);
+ if (extra) {
+ memcpy(_voucher_extra_recipes(v), _voucher_extra_recipes(ov), extra);
+ }
+ if (ov && ov->v_kvoucher) {
+ voucher_t kvb = ov->v_kvbase ? ov->v_kvbase : ov;
+ v->v_kvbase = _voucher_retain(kvb);
+ v->v_kvoucher = kvb->v_kvoucher;
+ }
+ voucher_activity_id_t *activity_ids = _voucher_activity_ids(v);
+ if (oactivities) {
+ memcpy(activity_ids, _voucher_activity_ids(ov),
+ oactivities * sizeof(voucher_activity_id_t));
+ }
+ if (!va_id) {
+ va_id = _voucher_atm_nested_activity_id_make();
+ if (ov && ov->v_activity) {
+ act = _voucher_activity_retain(ov->v_activity);
+ }
+ }
+ if (va_base_id) activity_ids[0] = va_base_id;
+ activity_ids[activities-1] = va_id;
+ v->v_activity = act;
+ _voucher_swap(ov, v);
+ if (vat) return va_id; // new _voucher_activity_s contains trace info
+out:
+ vat = _voucher_activity_trace_with_id(trace_id);
+ if (vat) {
+ vat->vat_flags |= _voucher_activity_trace_flag_activity |
+ _voucher_activity_trace_flag_start;
+ vat->vat_data[0] = va_id;
+ }
+ return va_id;
+}
+
+voucher_activity_id_t
+voucher_activity_start(voucher_activity_trace_id_t trace_id,
+ voucher_activity_flag_t flags)
+{
+ return voucher_activity_start_with_location(trace_id, 0, flags);
+}
+
+void
+voucher_activity_end(voucher_activity_id_t va_id)
+{
+ if (!va_id) return;
+ _voucher_activity_tracepoint_t vat;
+ vat = _voucher_activity_trace_with_id(_voucher_activity_trace_id_release);
+ if (vat) {
+ vat->vat_flags |= _voucher_activity_trace_flag_activity |
+ _voucher_activity_trace_flag_end;
+ vat->vat_data[0] = va_id;
+ }
+ voucher_t v = _voucher_get();
+ if (!v) return;
+ unsigned int activities = v->v_activities, act_idx = activities;
+ voucher_activity_id_t *activity_ids = _voucher_activity_ids(v);
+ while (act_idx) {
+ if (activity_ids[act_idx-1] == va_id) break;
+ act_idx--;
+ }
+ if (!act_idx) return; // activity_id not found
+ pthread_priority_t priority = _voucher_get_priority(v);
+ mach_voucher_attr_recipe_size_t extra = _voucher_extra_size(v);
+ voucher_t nv = NULL;
+ if (act_idx > 1 || activities == 1) --activities;
+ if (priority || activities || extra || v->v_kvoucher) {
+ nv = _voucher_alloc(activities, priority, extra);
+ if (extra) {
+ memcpy(_voucher_extra_recipes(nv), _voucher_extra_recipes(v),extra);
+ }
+ }
+ if (v->v_kvoucher) {
+ voucher_t kvb = v->v_kvbase ? v->v_kvbase : v;
+ nv->v_kvbase = _voucher_retain(kvb);
+ nv->v_kvoucher = kvb->v_kvoucher;
+ }
+ bool atm_collect = !activities;
+ if (activities) {
+ voucher_activity_id_t *new_activity_ids = _voucher_activity_ids(nv);
+ if (act_idx == 1 && _voucher_activity_default) {
+ atm_collect = true;
+ // default to _voucher_activity_default base activity
+ new_activity_ids[0] = _voucher_activity_default->va_id;
+ memcpy(&new_activity_ids[1], &activity_ids[1],
+ (activities - 1) * sizeof(voucher_activity_id_t));
+ } else {
+ if (v->v_activity) {
+ nv->v_activity = _voucher_activity_retain(v->v_activity);
+ }
+ memcpy(new_activity_ids, activity_ids,
+ --act_idx * sizeof(voucher_activity_id_t));
+ if (act_idx < activities) {
+ memcpy(&new_activity_ids[act_idx], &activity_ids[act_idx+1],
+ (activities - act_idx) * sizeof(voucher_activity_id_t));
+ }
+ }
+ }
+ _voucher_swap(v, nv);
+}
+
+unsigned int
+voucher_get_activities(voucher_activity_id_t *entries, unsigned int *count)
+{
+ voucher_t v = _voucher_get();
+ if (!v || !count) return 0;
+ unsigned int activities = v->v_activities;
+ if (*count < activities) activities = *count;
+ *count = v->v_activities;
+ voucher_activity_id_t *activity_ids = _voucher_activity_ids(v);
+ if (activities && entries) {
+ memcpy(entries, activity_ids, activities *
+ sizeof(voucher_activity_id_t));
+ }
+ return activities;
+}
+
+uint8_t
+voucher_activity_get_namespace(void)
+{
+ voucher_t v = _voucher_get();
+ if (!v || !v->v_activity) return 0;
+ return v->v_activity->va_namespace;
+}
+
+DISPATCH_NOINLINE
+_voucher_activity_tracepoint_t
+_voucher_activity_tracepoint_get_slow(unsigned int slots)
+{
+ _voucher_activity_t act;
+ _voucher_activity_buffer_header_t vab;
+ _voucher_activity_tracepoint_t vat = NULL;
+ voucher_t v = _voucher_get();
+ if (v && v->v_activity) {
+ act = v->v_activity;
+ } else {
+ dispatch_once_f(&_voucher_activity_heap_pred, NULL,
+ _voucher_activity_heap_init);
+ if (_voucher_activity_disabled()) return NULL;
+ act = _voucher_activity_default;
+ }
+ vab = act->va_current_buffer;
+ if (vab && vab->vabh_next_tracepoint_idx <=
+ _voucher_activity_tracepoints_per_buffer) {
+ goto retry; // another slowpath raced us
+ }
+ do {
+ vab = _voucher_activity_buffer_alloc(act, vab);
+ if (!vab) break;
+retry:
+ vat = _voucher_activity_buffer_tracepoint_get(vab, slots);
+ } while (!vat);
+ return vat;
+}
+
+static inline void
+_voucher_activity_trace_fault(voucher_activity_trace_id_t trace_id)
+{
+ if (!slowpath(_voucher_activity_trace_id_is_subtype(trace_id, error))) {
+ return;
+ }
+ mach_voucher_attr_command_t atm_cmd = ATM_ACTION_COLLECT;
+ if (_voucher_activity_trace_id_is_subtype(trace_id, fault)) {
+ atm_cmd = ATM_ACTION_LOGFAIL;
+ }
+ return _voucher_atm_fault(atm_cmd);
+}
+
+uint64_t
+voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location,
+ void *buffer, size_t length)
+{
+ if (!_voucher_activity_trace_id_enabled(trace_id)) return 0;
+ _voucher_activity_tracepoint_t vat;
+ const unsigned int slots = length <= sizeof(vat->vat_data) ? 1 : 2;
+ vat = _voucher_activity_tracepoint_get(slots);
+ if (!vat) vat = _voucher_activity_tracepoint_get_slow(slots);
+ if (!vat) return 0;
+ uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat,
+ trace_id, location);
+ void *tbuf = vat->vat_data;
+ size_t tlen = sizeof(vat->vat_data);
+ if (length < tlen) {
+ memcpy(tbuf, buffer, length);
+ } else {
+ memcpy(tbuf, buffer, tlen);
+ }
+ if (length > tlen) {
+ vat->vat_flags |= _voucher_activity_trace_flag_wide_first;
+ buffer += tlen;
+ length -= tlen;
+ (++vat)->vat_flags = _voucher_activity_trace_flag_tracepoint |
+ _voucher_activity_trace_flag_wide_second;
+ vat->vat_type = 0; vat->vat_namespace = 0;
+ tbuf = (void*)vat + offsetof(typeof(*vat), vat_code);
+ tlen = sizeof(*vat) - offsetof(typeof(*vat), vat_code);
+ if (length < tlen) {
+ memcpy(tbuf, buffer, length);
+ } else {
+ memcpy(tbuf, buffer, tlen);
+ }
+ }
+ _voucher_activity_trace_fault(trace_id);
+ return timestamp;
+}
+
+uint64_t
+voucher_activity_trace_args(voucher_activity_trace_id_t trace_id,
+ uint64_t location, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3,
+ uintptr_t arg4)
+{
+ if (!_voucher_activity_trace_id_enabled(trace_id)) return 0;
+ _voucher_activity_tracepoint_t vat;
+ vat = _voucher_activity_tracepoint_get(1);
+ if (!vat) vat = _voucher_activity_tracepoint_get_slow(1);
+ if (!vat) return 0;
+ uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat,
+ trace_id, location);
+ vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_args;
+ vat->vat_data[0] = arg1;
+ vat->vat_data[1] = arg2;
+ vat->vat_data[2] = arg3;
+ vat->vat_data[3] = arg4;
+ _voucher_activity_trace_fault(trace_id);
+ return timestamp;
+}
+
+#pragma mark -
+#pragma mark _voucher_debug
+
+size_t
+_voucher_debug(voucher_t v, char* buf, size_t bufsiz)
+{
+ size_t offset = 0;
+ #define bufprintf(...) \
+ offset += dsnprintf(&buf[offset], bufsiz - offset, ##__VA_ARGS__)
+ bufprintf("voucher[%p] = { xrefcnt = 0x%x, refcnt = 0x%x, ", v,
+ v->os_obj_xref_cnt + 1, v->os_obj_ref_cnt + 1);
+
+ if (v->v_kvbase) {
+ bufprintf("base voucher %p, ", v->v_kvbase);
+ }
+ if (v->v_kvoucher) {
+ bufprintf("kvoucher%s 0x%x, ", v->v_kvoucher == v->v_ipc_kvoucher ?
+ " & ipc kvoucher" : "", v->v_kvoucher);
+ }
+ if (v->v_ipc_kvoucher && v->v_ipc_kvoucher != v->v_kvoucher) {
+ bufprintf("ipc kvoucher 0x%x, ", v->v_ipc_kvoucher);
+ }
+ if (v->v_has_priority) {
+ bufprintf("QOS 0x%x, ", *_voucher_priority(v));
+ }
+ if (v->v_activities) {
+ voucher_activity_id_t *activity_ids = _voucher_activity_ids(v);
+ bufprintf("activity IDs = { ");
+ unsigned int i;
+ for (i = 0; i < v->v_activities; i++) {
+ bufprintf("0x%llx, ", *activity_ids++);
+ }
+ bufprintf("}, ");
+ }
+ if (v->v_activity) {
+ _voucher_activity_t va = v->v_activity;
+ _voucher_atm_t vatm = va->va_atm;
+ bufprintf("activity[%p] = { ID 0x%llx, use %d, atm[%p] = { "
+ "AID 0x%llx, ref %d, kvoucher 0x%x } }, ", va, va->va_id,
+ va->va_use_count + 1, va->va_atm, vatm->vatm_id,
+ vatm->vatm_refcnt + 1, vatm->vatm_kvoucher);
+ }
+ bufprintf("}");
+ return offset;
+}
+
+#else // VOUCHER_USE_MACH_VOUCHER
+
+#pragma mark -
+#pragma mark Simulator / vouchers disabled
+
+#if VOUCHER_ENABLE_RECIPE_OBJECTS
+voucher_t
+voucher_create(voucher_recipe_t recipe)
+{
+ (void)recipe;
+ return NULL;
+}
+#endif
+
+voucher_t
+voucher_adopt(voucher_t voucher)
+{
+ return voucher;
+}
+
+voucher_t
+voucher_copy(void)
+{
+ return NULL;
+}
+
+voucher_t
+voucher_copy_without_importance(void)
+{
+ return NULL;
+}
+
+void
+voucher_replace_default_voucher(void)
+{
+}
+
+void
+voucher_decrement_importance_count4CF(voucher_t v)
+{
+ (void)v;
+}
+
+void
+_voucher_thread_cleanup(void *voucher)
+{
+ (void)voucher;
+}
+
+void
+_voucher_dealloc_mach_voucher(mach_voucher_t kv)
+{
+ (void)kv;
+}
+
+mach_voucher_t
+_voucher_create_mach_voucher_with_priority(voucher_t voucher,
+ pthread_priority_t priority)
+{
+ (void)voucher; (void)priority;
+ return MACH_VOUCHER_NULL;
+}
+
+voucher_t
+_voucher_create_with_priority_and_mach_voucher(voucher_t voucher,
+ pthread_priority_t priority, mach_voucher_t kv)
+{
+ (void)voucher; (void)priority; (void)kv;
+ return NULL;
+}
+
+voucher_t
+voucher_create_with_mach_msg(mach_msg_header_t *msg)
+{
+ (void)msg;
+ return NULL;
+}
+
+#if VOUCHER_ENABLE_GET_MACH_VOUCHER
+mach_voucher_t
+voucher_get_mach_voucher(voucher_t voucher)
+{
+ (void)voucher;
+ return 0;
+}
+#endif
+
+void
+_voucher_xref_dispose(voucher_t voucher)
+{
+ (void)voucher;
+}
+
+void
+_voucher_dispose(voucher_t voucher)
+{
+ (void)voucher;
+}
+
+void
+_voucher_atfork_child(void)
+{
+}
+
+void
+_voucher_init(void)
+{
+}
+
+void*
+voucher_activity_get_metadata_buffer(size_t *length)
+{
+ *length = 0;
+ return NULL;
+}
+
+void
+_voucher_activity_heap_pressure_normal(void)
+{
+}
+
+void
+_voucher_activity_heap_pressure_warn(void)
+{
+}
+
+voucher_activity_id_t
+voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id,
+ uint64_t location, voucher_activity_flag_t flags)
+{
+ (void)trace_id; (void)location; (void)flags;
+ return 0;
+}
+
+voucher_activity_id_t
+voucher_activity_start(voucher_activity_trace_id_t trace_id,
+ voucher_activity_flag_t flags)
+{
+ (void)trace_id; (void)flags;
+ return 0;
+}
+
+void
+voucher_activity_end(voucher_activity_id_t activity_id)
+{
+ (void)activity_id;
+}
+
+unsigned int
+voucher_get_activities(voucher_activity_id_t *entries, unsigned int *count)
+{
+ (void)entries; (void)count;
+ return 0;
+}
+
+uint8_t
+voucher_activity_get_namespace(void)
+{
+ return 0;
+}
+
+uint64_t
+voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location,
+ void *buffer, size_t length)
+{
+ (void)trace_id; (void)location; (void)buffer; (void)length;
+ return 0;
+}
+
+uint64_t
+voucher_activity_trace_args(voucher_activity_trace_id_t trace_id,
+ uint64_t location, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3,
+ uintptr_t arg4)
+{
+ (void)trace_id; (void)location;
+ (void)arg1; (void)arg2; (void)arg3; (void)arg4;
+ return 0;
+}
+
+size_t
+_voucher_debug(voucher_t v, char* buf, size_t bufsiz)
+{
+ (void)v; (void)buf; (void)bufsiz;
+ return 0;
+}
+
+#endif // VOUCHER_USE_MACH_VOUCHER
--- /dev/null
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+/*
+ * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
+ * which are subject to change in future releases of Mac OS X. Any applications
+ * relying on these interfaces WILL break.
+ */
+
+#ifndef __DISPATCH_VOUCHER_INTERNAL__
+#define __DISPATCH_VOUCHER_INTERNAL__
+
+#ifndef __DISPATCH_INDIRECT__
+#error "Please #include <dispatch/dispatch.h> instead of this file directly."
+#include <dispatch/base.h> // for HeaderDoc
+#endif
+
+#pragma mark -
+#pragma mark voucher_recipe_t (disabled)
+
+#if VOUCHER_ENABLE_RECIPE_OBJECTS
+/*!
+ * @group Voucher Creation SPI
+ * SPI intended for clients that need to create vouchers.
+ */
+
+#if OS_OBJECT_USE_OBJC
+OS_OBJECT_DECL(voucher_recipe);
+#else
+typedef struct voucher_recipe_s *voucher_recipe_t;
+#endif
+
+/*!
+ * @function voucher_create
+ *
+ * @abstract
+ * Creates a new voucher object from a recipe.
+ *
+ * @discussion
+ * Error handling TBD
+ *
+ * @result
+ * The newly created voucher object.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW
+voucher_t
+voucher_create(voucher_recipe_t recipe);
+#endif // VOUCHER_ENABLE_RECIPE_OBJECTS
+
+#if VOUCHER_ENABLE_GET_MACH_VOUCHER
+/*!
+ * @function voucher_get_mach_voucher
+ *
+ * @abstract
+ * Returns the mach voucher port underlying the specified voucher object.
+ *
+ * @discussion
+ * The caller must either maintain a reference on the voucher object while the
+ * returned mach voucher port is in use to ensure it stays valid for the
+ * duration, or it must retain the mach voucher port with mach_port_mod_refs().
+ *
+ * @param voucher
+ * The voucher object to query.
+ *
+ * @result
+ * A mach voucher port.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW
+mach_voucher_t
+voucher_get_mach_voucher(voucher_t voucher);
+#endif // VOUCHER_ENABLE_GET_MACH_VOUCHER
+
+#pragma mark -
+#pragma mark voucher_t
+
+#if TARGET_IPHONE_SIMULATOR && \
+ IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101000
+#undef VOUCHER_USE_MACH_VOUCHER
+#define VOUCHER_USE_MACH_VOUCHER 0
+#endif
+#ifndef VOUCHER_USE_MACH_VOUCHER
+#if __has_include(<mach/mach_voucher.h>)
+#define VOUCHER_USE_MACH_VOUCHER 1
+#endif
+#endif
+
+#if VOUCHER_USE_MACH_VOUCHER
+#undef DISPATCH_USE_IMPORTANCE_ASSERTION
+#define DISPATCH_USE_IMPORTANCE_ASSERTION 0
+#else
+#undef MACH_RCV_VOUCHER
+#define MACH_RCV_VOUCHER 0
+#endif // VOUCHER_USE_MACH_VOUCHER
+
+void _voucher_init(void);
+void _voucher_atfork_child(void);
+void _voucher_activity_heap_pressure_warn(void);
+void _voucher_activity_heap_pressure_normal(void);
+void _voucher_xref_dispose(voucher_t voucher);
+void _voucher_dispose(voucher_t voucher);
+size_t _voucher_debug(voucher_t v, char* buf, size_t bufsiz);
+void _voucher_thread_cleanup(void *voucher);
+mach_voucher_t _voucher_get_mach_voucher(voucher_t voucher);
+voucher_t _voucher_create_without_importance(voucher_t voucher);
+mach_voucher_t _voucher_create_mach_voucher_with_priority(voucher_t voucher,
+ pthread_priority_t priority);
+voucher_t _voucher_create_with_priority_and_mach_voucher(voucher_t voucher,
+ pthread_priority_t priority, mach_voucher_t kv);
+void _voucher_dealloc_mach_voucher(mach_voucher_t kv);
+
+#if OS_OBJECT_USE_OBJC
+_OS_OBJECT_DECL_SUBCLASS_INTERFACE(voucher, object)
+#if VOUCHER_ENABLE_RECIPE_OBJECTS
+_OS_OBJECT_DECL_SUBCLASS_INTERFACE(voucher_recipe, object)
+#endif
+#endif
+
+#define _TAILQ_IS_ENQUEUED(elm, field) \
+ ((elm)->field.tqe_prev != NULL)
+#define _TAILQ_MARK_NOT_ENQUEUED(elm, field) \
+ do { (elm)->field.tqe_prev = NULL; } while (0)
+
+#define VOUCHER_NO_MACH_VOUCHER MACH_PORT_DEAD
+
+#if VOUCHER_USE_MACH_VOUCHER
+
+#if DISPATCH_DEBUG
+#define DISPATCH_VOUCHER_DEBUG 1
+#define DISPATCH_VOUCHER_ACTIVITY_DEBUG 1
+#endif
+
+typedef struct voucher_s {
+ _OS_OBJECT_HEADER(
+ void *os_obj_isa,
+ os_obj_ref_cnt,
+ os_obj_xref_cnt);
+ TAILQ_ENTRY(voucher_s) v_list;
+ mach_voucher_t v_kvoucher, v_ipc_kvoucher; // if equal, only one reference
+ voucher_t v_kvbase; // if non-NULL, v_kvoucher is a borrowed reference
+ _voucher_activity_t v_activity;
+#if VOUCHER_ENABLE_RECIPE_OBJECTS
+ size_t v_recipe_extra_offset;
+ mach_voucher_attr_recipe_size_t v_recipe_extra_size;
+#endif
+ unsigned int v_has_priority:1;
+ unsigned int v_activities;
+ mach_voucher_attr_recipe_data_t v_recipes[];
+} voucher_s;
+
+#if VOUCHER_ENABLE_RECIPE_OBJECTS
+typedef struct voucher_recipe_s {
+ _OS_OBJECT_HEADER(
+ const _os_object_class_s *os_obj_isa,
+ os_obj_ref_cnt,
+ os_obj_xref_cnt);
+ size_t vr_allocation_size;
+ mach_voucher_attr_recipe_size_t volatile vr_size;
+ mach_voucher_attr_recipe_t vr_data;
+} voucher_recipe_s;
+#endif
+
+#define _voucher_recipes_base(r) (r[0])
+#define _voucher_recipes_atm(r) (r[1])
+#define _voucher_recipes_bits(r) (r[2])
+#define _voucher_base_recipe(v) (_voucher_recipes_base((v)->v_recipes))
+#define _voucher_atm_recipe(v) (_voucher_recipes_atm((v)->v_recipes))
+#define _voucher_bits_recipe(v) (_voucher_recipes_bits((v)->v_recipes))
+#define _voucher_recipes_size() (3 * sizeof(mach_voucher_attr_recipe_data_t))
+
+#if TARGET_OS_EMBEDDED
+#define VL_HASH_SIZE 64u // must be a power of two
+#else
+#define VL_HASH_SIZE 256u // must be a power of two
+#endif
+#define VL_HASH(kv) (MACH_PORT_INDEX(kv) & (VL_HASH_SIZE - 1))
+
+typedef uint32_t _voucher_magic_t;
+const _voucher_magic_t _voucher_magic_v1 = 0x0190cefa; // little-endian FACE9001
+#define _voucher_recipes_magic(r) ((_voucher_magic_t*) \
+ (_voucher_recipes_bits(r).content))
+#define _voucher_magic(v) _voucher_recipes_magic((v)->v_recipes)
+typedef uint32_t _voucher_priority_t;
+#define _voucher_recipes_priority(r) ((_voucher_priority_t*) \
+ (_voucher_recipes_bits(r).content + sizeof(_voucher_magic_t)))
+#define _voucher_priority(v) _voucher_recipes_priority((v)->v_recipes)
+#define _voucher_activity_ids(v) ((voucher_activity_id_t*) \
+ (_voucher_bits_recipe(v).content + sizeof(_voucher_magic_t) + \
+ sizeof(_voucher_priority_t)))
+#define _voucher_bits_size(activities) \
+ (sizeof(_voucher_magic_t) + sizeof(_voucher_priority_t) + \
+ (activities) * sizeof(voucher_activity_id_t))
+
+#if VOUCHER_ENABLE_RECIPE_OBJECTS
+#define _voucher_extra_size(v) ((v)->v_recipe_extra_size)
+#define _voucher_extra_recipes(v) ((char*)(v) + (v)->v_recipe_extra_offset)
+#else
+#define _voucher_extra_size(v) 0
+#define _voucher_extra_recipes(v) NULL
+#endif
+
+#if DISPATCH_DEBUG && DISPATCH_VOUCHER_DEBUG
+#define _dispatch_voucher_debug(msg, v, ...) \
+ _dispatch_debug("voucher[%p]: " msg, v, ##__VA_ARGS__)
+#define _dispatch_kvoucher_debug(msg, kv, ...) \
+ _dispatch_debug("kvoucher[0x%08x]: " msg, kv, ##__VA_ARGS__)
+#define _dispatch_voucher_debug_machport(name) \
+ dispatch_debug_machport((name), __func__)
+#else
+#define _dispatch_voucher_debug(msg, v, ...)
+#define _dispatch_kvoucher_debug(msg, kv, ...)
+#define _dispatch_voucher_debug_machport(name) ((void)(name))
+#endif
+
+#if !(USE_OBJC && __OBJC2__)
+
+DISPATCH_ALWAYS_INLINE
+static inline voucher_t
+_voucher_retain(voucher_t voucher)
+{
+#if !DISPATCH_VOUCHER_OBJC_DEBUG
+ int xref_cnt = dispatch_atomic_inc2o(voucher, os_obj_xref_cnt, relaxed);
+ _dispatch_voucher_debug("retain -> %d", voucher, xref_cnt + 1);
+ if (slowpath(xref_cnt <= 0)) {
+ _dispatch_voucher_debug("resurrection", voucher);
+ DISPATCH_CRASH("Voucher resurrection");
+ }
+#else
+ os_retain(voucher);
+ _dispatch_voucher_debug("retain -> %d", voucher,
+ voucher->os_obj_xref_cnt + 1);
+#endif // DISPATCH_DEBUG
+ return voucher;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_release(voucher_t voucher)
+{
+#if !DISPATCH_VOUCHER_OBJC_DEBUG
+ int xref_cnt = dispatch_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed);
+ _dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1);
+ if (fastpath(xref_cnt >= 0)) {
+ return;
+ }
+ if (slowpath(xref_cnt < -1)) {
+ _dispatch_voucher_debug("overrelease", voucher);
+ DISPATCH_CRASH("Voucher overrelease");
+ }
+ return _os_object_xref_dispose((_os_object_t)voucher);
+#else
+ _dispatch_voucher_debug("release -> %d", voucher, voucher->os_obj_xref_cnt);
+ return os_release(voucher);
+#endif // DISPATCH_DEBUG
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline voucher_t
+_voucher_get(void)
+{
+ return _dispatch_thread_getspecific(dispatch_voucher_key);
+}
+
+DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
+static inline voucher_t
+_voucher_copy(void)
+{
+ voucher_t voucher = _voucher_get();
+ if (voucher) _voucher_retain(voucher);
+ return voucher;
+}
+
+DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
+static inline voucher_t
+_voucher_copy_without_importance(void)
+{
+ voucher_t voucher = _voucher_get();
+ if (voucher) voucher = _voucher_create_without_importance(voucher);
+ return voucher;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_mach_voucher_set(mach_voucher_t kv)
+{
+ if (kv == VOUCHER_NO_MACH_VOUCHER) return;
+ _dispatch_set_priority_and_mach_voucher(0, kv);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline mach_voucher_t
+_voucher_swap_and_get_mach_voucher(voucher_t ov, voucher_t voucher)
+{
+ if (ov == voucher) return VOUCHER_NO_MACH_VOUCHER;
+ _dispatch_voucher_debug("swap from voucher[%p]", voucher, ov);
+ _dispatch_thread_setspecific(dispatch_voucher_key, voucher);
+ mach_voucher_t kv = voucher ? voucher->v_kvoucher : MACH_VOUCHER_NULL;
+ mach_voucher_t okv = ov ? ov->v_kvoucher : MACH_VOUCHER_NULL;
+ return (kv != okv) ? kv : VOUCHER_NO_MACH_VOUCHER;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_swap(voucher_t ov, voucher_t voucher)
+{
+ _voucher_mach_voucher_set(_voucher_swap_and_get_mach_voucher(ov, voucher));
+ if (ov) _voucher_release(ov);
+}
+
+DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
+static inline voucher_t
+_voucher_adopt(voucher_t voucher)
+{
+ voucher_t ov = _voucher_get();
+ _voucher_mach_voucher_set(_voucher_swap_and_get_mach_voucher(ov, voucher));
+ return ov;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_replace(voucher_t voucher)
+{
+ voucher_t ov = _voucher_get();
+ _voucher_swap(ov, voucher);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_clear(void)
+{
+ _voucher_replace(NULL);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_voucher_get_priority(voucher_t voucher)
+{
+ return voucher && voucher->v_has_priority ?
+ (pthread_priority_t)*_voucher_priority(voucher) : 0;
+}
+
+void _voucher_task_mach_voucher_init(void* ctxt);
+extern dispatch_once_t _voucher_task_mach_voucher_pred;
+extern mach_voucher_t _voucher_task_mach_voucher;
+
+DISPATCH_ALWAYS_INLINE
+static inline mach_voucher_t
+_voucher_get_task_mach_voucher(void)
+{
+ dispatch_once_f(&_voucher_task_mach_voucher_pred, NULL,
+ _voucher_task_mach_voucher_init);
+ return _voucher_task_mach_voucher;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_voucher_mach_msg_set_mach_voucher(mach_msg_header_t *msg, mach_voucher_t kv,
+ bool move_send)
+{
+ if (MACH_MSGH_BITS_HAS_VOUCHER(msg->msgh_bits)) return false;
+ if (!kv) return false;
+ msg->msgh_voucher_port = kv;
+ msg->msgh_bits |= MACH_MSGH_BITS_SET_PORTS(0, 0, move_send ?
+ MACH_MSG_TYPE_MOVE_SEND : MACH_MSG_TYPE_COPY_SEND);
+ _dispatch_kvoucher_debug("msg[%p] set %s", kv, msg, move_send ?
+ "move-send" : "copy-send");
+ _dispatch_voucher_debug_machport(kv);
+ return true;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_voucher_mach_msg_set(mach_msg_header_t *msg, voucher_t voucher)
+{
+ if (MACH_MSGH_BITS_HAS_VOUCHER(msg->msgh_bits)) return false;
+ mach_voucher_t kv;
+ if (voucher) {
+ kv = _voucher_get_mach_voucher(voucher);
+ } else {
+ kv = _voucher_get_task_mach_voucher();
+ }
+ return _voucher_mach_msg_set_mach_voucher(msg, kv, false);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline mach_voucher_t
+_voucher_mach_msg_get(mach_msg_header_t *msg)
+{
+ if (!MACH_MSGH_BITS_HAS_VOUCHER(msg->msgh_bits)) return MACH_VOUCHER_NULL;
+ mach_voucher_t kv = msg->msgh_voucher_port;
+ msg->msgh_voucher_port = MACH_VOUCHER_NULL;
+ msg->msgh_bits &= (mach_msg_bits_t)~MACH_MSGH_BITS_VOUCHER_MASK;
+ return kv;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline mach_voucher_t
+_voucher_mach_msg_clear(mach_msg_header_t *msg, bool move_send)
+{
+ mach_msg_bits_t kvbits = MACH_MSGH_BITS_VOUCHER(msg->msgh_bits);
+ mach_voucher_t kv = msg->msgh_voucher_port, kvm = MACH_VOUCHER_NULL;
+ if ((kvbits == MACH_MSG_TYPE_COPY_SEND ||
+ kvbits == MACH_MSG_TYPE_MOVE_SEND) && kv) {
+ _dispatch_kvoucher_debug("msg[%p] clear %s", kv, msg, move_send ?
+ "move-send" : "copy-send");
+ _dispatch_voucher_debug_machport(kv);
+ if (kvbits == MACH_MSG_TYPE_MOVE_SEND) {
+ // <rdar://problem/15694142> return/drop received or pseudo-received
+ // voucher reference (e.g. due to send failure).
+ if (move_send) {
+ kvm = kv;
+ } else {
+ _voucher_dealloc_mach_voucher(kv);
+ }
+ }
+ msg->msgh_voucher_port = MACH_VOUCHER_NULL;
+ msg->msgh_bits &= (mach_msg_bits_t)~MACH_MSGH_BITS_VOUCHER_MASK;
+ }
+ return kvm;
+}
+
+#pragma mark -
+#pragma mark dispatch_continuation_t + voucher_t
+
+#if DISPATCH_USE_KDEBUG_TRACE
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_voucher_ktrace(int code, natural_t voucher, void *container)
+{
+ if (!voucher) return;
+ __kdebug_trace(APPSDBG_CODE(DBG_MACH_CHUD, (0xfac >> 2)) | DBG_FUNC_NONE,
+ code, (int)voucher, (int)(uintptr_t)container,
+#ifdef __LP64__
+ (int)((uintptr_t)container >> 32)
+#else
+ 0
+#endif
+ );
+}
+#define _dispatch_voucher_ktrace_dc_push(dc) \
+ _dispatch_voucher_ktrace(0x1, (dc)->dc_voucher ? \
+ (dc)->dc_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dc))
+#define _dispatch_voucher_ktrace_dc_pop(dc) \
+ _dispatch_voucher_ktrace(0x2, (dc)->dc_voucher ? \
+ (dc)->dc_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dc))
+#define _dispatch_voucher_ktrace_dmsg_push(dmsg) \
+ _dispatch_voucher_ktrace(0x3, (dmsg)->dmsg_voucher ? \
+ (dmsg)->dmsg_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dmsg))
+#define _dispatch_voucher_ktrace_dmsg_pop(dmsg) \
+ _dispatch_voucher_ktrace(0x4, (dmsg)->dmsg_voucher ? \
+ (dmsg)->dmsg_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dmsg))
+#else
+#define _dispatch_voucher_ktrace_dc_push(dc)
+#define _dispatch_voucher_ktrace_dc_pop(dc)
+#define _dispatch_voucher_ktrace_dmsg_push(dmsg)
+#define _dispatch_voucher_ktrace_dmsg_pop(dmsg)
+#endif // DISPATCH_USE_KDEBUG_TRACE
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_continuation_voucher_set(dispatch_continuation_t dc,
+ dispatch_block_flags_t flags)
+{
+ unsigned long bits = (unsigned long)dc->do_vtable;
+ voucher_t v = NULL;
+
+ if (flags & DISPATCH_BLOCK_HAS_VOUCHER) {
+ bits |= DISPATCH_OBJ_HAS_VOUCHER_BIT;
+ } else if (!(flags & DISPATCH_BLOCK_NO_VOUCHER)) {
+ v = _voucher_copy();
+ }
+ dc->do_vtable = (void*)bits;
+ dc->dc_voucher = v;
+ _dispatch_voucher_debug("continuation[%p] set", dc->dc_voucher, dc);
+ _dispatch_voucher_ktrace_dc_push(dc);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc)
+{
+ unsigned long bits = (unsigned long)dc->do_vtable;
+ voucher_t v = DISPATCH_NO_VOUCHER;
+ if (!(bits & DISPATCH_OBJ_HAS_VOUCHER_BIT)) {
+ _dispatch_voucher_ktrace_dc_pop(dc);
+ _dispatch_voucher_debug("continuation[%p] adopt", dc->dc_voucher, dc);
+ v = dc->dc_voucher;
+ dc->dc_voucher = NULL;
+ }
+ _dispatch_adopt_priority_and_replace_voucher(dc->dc_priority, v, 0);
+}
+
+#pragma mark -
+#pragma mark _voucher_activity_heap
+
+typedef uint32_t _voucher_atm_subid_t;
+static const size_t _voucher_activity_hash_bits = 6;
+static const size_t _voucher_activity_hash_size =
+ 1 << _voucher_activity_hash_bits;
+#define VACTID_HASH(x) ((((uint32_t)((x) >> 32) + (uint32_t)(x)) * \
+ 2654435761u) >> (32-_voucher_activity_hash_bits))
+#define VATMID_HASH(x) \
+ (((uint32_t)(x) * 2654435761u) >> (32-_voucher_activity_hash_bits))
+#define VATMID2ACTID(x) ((uint64_t)(x) << 32)
+#define VACTID_BASEID(x) ((uint64_t)(x) & (((uint64_t)UINT32_MAX) << 32))
+#define VACTID_SUBID(x) ((uint32_t)(x))
+#define VATM_ACTID(vatm, subid) (VATMID2ACTID((vatm)->vatm_id) + (subid))
+#define VATM_SUBID_BITS2MAX(bits) ((1u << (bits)) - 1)
+#define VATM_SUBID_MAXBITS (32)
+#define VATM_SUBID_MAX (ATM_SUBAID32_MAX)
+#define MAILBOX_OFFSET_UNSET UINT64_MAX
+
+static const size_t _voucher_activity_buffers_per_heap = 512;
+typedef unsigned long _voucher_activity_bitmap_base_t;
+static const size_t _voucher_activity_bits_per_bitmap_base_t =
+ 8 * sizeof(_voucher_activity_bitmap_base_t);
+static const size_t _voucher_activity_bitmaps_per_heap =
+ _voucher_activity_buffers_per_heap /
+ _voucher_activity_bits_per_bitmap_base_t;
+typedef _voucher_activity_bitmap_base_t
+ _voucher_activity_bitmap_t[_voucher_activity_bitmaps_per_heap];
+
+typedef struct _voucher_activity_metadata_s {
+ _voucher_activity_buffer_t vam_kernel_metadata;
+ _voucher_activity_buffer_t vam_client_metadata;
+ struct _voucher_activity_self_metadata_s vam_self_metadata;
+#if __LP64__
+ uintptr_t vam_pad0[7];
+#else
+ uintptr_t vam_pad0[15];
+#endif
+ // cacheline
+ _voucher_activity_bitmap_t volatile vam_atm_mbox_bitmap;
+ _voucher_activity_bitmap_t volatile vam_buffer_bitmap;
+ _voucher_activity_bitmap_t volatile vam_pressure_locked_bitmap;
+ // cacheline
+ _voucher_atm_subid_t vam_base_atm_subid;
+ _voucher_atm_subid_t vam_base_atm_subid_max;
+ _voucher_atm_subid_t vam_nested_atm_subid;
+ _voucher_atm_t vam_default_activity_atm;
+ _voucher_atm_t volatile vam_base_atm;
+ voucher_activity_id_t volatile vam_nested_atm_id;
+#if __LP64__
+ uintptr_t vam_pad2[3];
+#else
+ uintptr_t vam_pad2[1];
+#endif
+ _voucher_activity_lock_s vam_base_atm_lock;
+ _voucher_activity_lock_s vam_nested_atm_lock;
+ _voucher_activity_lock_s vam_atms_lock;
+ _voucher_activity_lock_s vam_activities_lock;
+ // cacheline
+ TAILQ_HEAD(, _voucher_atm_s) vam_atms[_voucher_activity_hash_size];
+ TAILQ_HEAD(, _voucher_activity_s)
+ vam_activities[_voucher_activity_hash_size];
+} *_voucher_activity_metadata_t;
+
+#pragma mark -
+#pragma mark _voucher_activity_t
+
+_voucher_activity_tracepoint_t _voucher_activity_tracepoint_get_slow(
+ unsigned int slots);
+extern _voucher_activity_t _voucher_activity_default;
+extern voucher_activity_mode_t _voucher_activity_mode;
+
+#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG
+#define _dispatch_voucher_activity_debug(msg, act, ...) \
+ _dispatch_debug("activity[%p] <0x%x>: atm[%p] <%lld>: " msg, (act), \
+ (act) ? VACTID_SUBID((act)->va_id) : 0, (act) ? (act)->va_atm : NULL, \
+ (act) && (act)->va_atm ? (act)->va_atm->vatm_id : 0, ##__VA_ARGS__)
+#define _dispatch_voucher_atm_debug(msg, atm, ...) \
+ _dispatch_debug("atm[%p] <%lld> kvoucher[0x%08x]: " msg, (atm), \
+ (atm) ? (atm)->vatm_id : 0, (atm) ? (atm)->vatm_kvoucher : 0, \
+ ##__VA_ARGS__)
+#else
+#define _dispatch_voucher_activity_debug(msg, act, ...)
+#define _dispatch_voucher_atm_debug(msg, atm, ...)
+#endif
+
+DISPATCH_ALWAYS_INLINE
+static inline uint64_t
+_voucher_activity_timestamp(void)
+{
+#if TARGET_IPHONE_SIMULATOR && \
+ IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101000
+ return mach_absolute_time();
+#else
+ return mach_approximate_time();
+#endif
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline uint64_t
+_voucher_activity_thread_id(void)
+{
+ uint64_t thread_id;
+ pthread_threadid_np(NULL, &thread_id); // TODO: 15923074: use TSD thread_id
+ return thread_id;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline _voucher_activity_tracepoint_t
+_voucher_activity_buffer_tracepoint_get(_voucher_activity_buffer_header_t vab,
+ unsigned int slots)
+{
+ uint32_t idx = dispatch_atomic_add2o(vab, vabh_next_tracepoint_idx,
+ slots, relaxed);
+ if (idx <= _voucher_activity_tracepoints_per_buffer) {
+ return (_voucher_activity_tracepoint_t)vab + (idx - slots);
+ }
+ return NULL;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline _voucher_activity_tracepoint_t
+_voucher_activity_tracepoint_get_from_activity(_voucher_activity_t va,
+ unsigned int slots)
+{
+ _voucher_activity_buffer_header_t vab = va ? va->va_current_buffer : NULL;
+ return vab ? _voucher_activity_buffer_tracepoint_get(vab, slots) : NULL;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline _voucher_activity_tracepoint_t
+_voucher_activity_tracepoint_get(unsigned int slots)
+{
+ _voucher_activity_t va;
+ voucher_t v = _voucher_get();
+ va = v && v->v_activity ? v->v_activity : _voucher_activity_default;
+ return _voucher_activity_tracepoint_get_from_activity(va, slots);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline uint64_t
+_voucher_activity_tracepoint_init(_voucher_activity_tracepoint_t vat,
+ uint8_t type, uint8_t code_namespace, uint32_t code, uint64_t location)
+{
+ if (!location) location = (uint64_t)__builtin_return_address(0);
+ uint64_t timestamp = _voucher_activity_timestamp();
+ vat->vat_flags = _voucher_activity_trace_flag_tracepoint,
+ vat->vat_type = type,
+ vat->vat_namespace = code_namespace,
+ vat->vat_code = code,
+ vat->vat_timestamp = timestamp,
+ vat->vat_thread = _voucher_activity_thread_id(),
+ vat->vat_location = location;
+ return timestamp;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline uint64_t
+_voucher_activity_tracepoint_init_with_id(_voucher_activity_tracepoint_t vat,
+ voucher_activity_trace_id_t trace_id, uint64_t location)
+{
+ uint8_t type = (uint8_t)(trace_id >> _voucher_activity_trace_id_type_shift);
+ uint8_t cns = (uint8_t)(trace_id >>
+ _voucher_activity_trace_id_code_namespace_shift);
+ uint32_t code = (uint32_t)trace_id;
+ return _voucher_activity_tracepoint_init(vat, type, cns, code, location);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_voucher_activity_trace_id_is_subtype(voucher_activity_trace_id_t trace_id,
+ uint8_t type)
+{
+ voucher_activity_trace_id_t type_id = voucher_activity_trace_id(type, 0, 0);
+ return (trace_id & type_id) == type_id;
+}
+#define _voucher_activity_trace_id_is_subtype(trace_id, name) \
+ _voucher_activity_trace_id_is_subtype(trace_id, \
+ voucher_activity_tracepoint_type_ ## name)
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_voucher_activity_trace_id_enabled(voucher_activity_trace_id_t trace_id)
+{
+ switch (_voucher_activity_mode) {
+ case voucher_activity_mode_release:
+ return _voucher_activity_trace_id_is_subtype(trace_id, release);
+ case voucher_activity_mode_stream:
+ case voucher_activity_mode_debug:
+ return _voucher_activity_trace_id_is_subtype(trace_id, debug) ||
+ _voucher_activity_trace_id_is_subtype(trace_id, release);
+ }
+ return false;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_voucher_activity_trace_type_enabled(uint8_t type)
+{
+ voucher_activity_trace_id_t type_id = voucher_activity_trace_id(type, 0, 0);
+ return _voucher_activity_trace_id_enabled(type_id);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_voucher_activity_disabled(void)
+{
+ return slowpath(_voucher_activity_mode == voucher_activity_mode_disable);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline _voucher_activity_tracepoint_t
+_voucher_activity_trace_args_inline(uint8_t type, uint8_t code_namespace,
+ uint32_t code, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3,
+ uintptr_t arg4)
+{
+ if (!_voucher_activity_trace_type_enabled(type)) return NULL;
+ _voucher_activity_tracepoint_t vat;
+ vat = _voucher_activity_tracepoint_get(1);
+ if (!vat) return NULL;
+ _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0);
+ vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_args;
+ vat->vat_data[0] = arg1;
+ vat->vat_data[1] = arg2;
+ vat->vat_data[2] = arg3;
+ vat->vat_data[3] = arg4;
+ return vat;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline _voucher_activity_tracepoint_t
+_voucher_activity_trace_with_id_inline(voucher_activity_trace_id_t trace_id)
+{
+ _voucher_activity_tracepoint_t vat = _voucher_activity_tracepoint_get(1);
+ if (!vat) return NULL;
+ _voucher_activity_tracepoint_init_with_id(vat, trace_id, 0);
+ return vat;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline _voucher_activity_tracepoint_t
+_voucher_activity_trace_with_id(voucher_activity_trace_id_t trace_id)
+{
+ _voucher_activity_tracepoint_t vat = _voucher_activity_tracepoint_get(1);
+ if (!vat) vat = _voucher_activity_tracepoint_get_slow(1);
+ if (!vat) return NULL;
+ _voucher_activity_tracepoint_init_with_id(vat, trace_id, 0);
+ return vat;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_activity_trace_msg(voucher_t v, mach_msg_header_t *msg, uint32_t code)
+{
+ if (!v || !v->v_activity) return; // Don't use default activity for IPC
+ const uint8_t type = voucher_activity_tracepoint_type_release;
+ const uint8_t code_namespace = _voucher_activity_tracepoint_namespace_ipc;
+ if (!_voucher_activity_trace_type_enabled(type)) return;
+ _voucher_activity_tracepoint_t vat;
+ vat = _voucher_activity_tracepoint_get_from_activity(v->v_activity, 1);
+ if (!vat) return; // TODO: slowpath ?
+ _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0);
+ vat->vat_flags |= _voucher_activity_trace_flag_libdispatch;
+#if __has_extension(c_static_assert)
+ _Static_assert(sizeof(mach_msg_header_t) <= sizeof(vat->vat_data),
+ "mach_msg_header_t too large");
+#endif
+ memcpy(vat->vat_data, msg, sizeof(mach_msg_header_t));
+}
+#define _voucher_activity_trace_msg(v, msg, type) \
+ _voucher_activity_trace_msg(v, msg, \
+ _voucher_activity_tracepoint_namespace_ipc_ ## type)
+
+#endif // !(USE_OBJC && __OBJC2__)
+
+#else // VOUCHER_USE_MACH_VOUCHER
+
+#pragma mark -
+#pragma mark Simulator / vouchers disabled
+
+#define _dispatch_voucher_debug(msg, v, ...)
+#define _dispatch_kvoucher_debug(msg, kv, ...)
+
+DISPATCH_ALWAYS_INLINE
+static inline voucher_t
+_voucher_retain(voucher_t voucher)
+{
+ return voucher;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_release(voucher_t voucher)
+{
+ (void)voucher;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline voucher_t
+_voucher_get(void)
+{
+ return NULL;
+}
+
+DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
+static inline voucher_t
+_voucher_copy(void)
+{
+ return NULL;
+}
+
+DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
+static inline voucher_t
+_voucher_copy_without_importance(void)
+{
+ return NULL;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline mach_voucher_t
+_voucher_swap_and_get_mach_voucher(voucher_t ov, voucher_t voucher)
+{
+ (void)ov; (void)voucher;
+ return MACH_VOUCHER_NULL;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline voucher_t
+_voucher_adopt(voucher_t voucher)
+{
+ return voucher;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_replace(voucher_t voucher)
+{
+ (void)voucher;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_voucher_clear(void)
+{
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_voucher_get_priority(voucher_t voucher)
+{
+ (void)voucher;
+ return 0;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_voucher_mach_msg_set_mach_voucher(mach_msg_header_t *msg, mach_voucher_t kv,
+ bool move_send)
+{
+ (void)msg; (void)kv; (void)move_send;
+ return false;
+
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_voucher_mach_msg_set(mach_msg_header_t *msg, voucher_t voucher)
+{
+ (void)msg; (void)voucher;
+ return false;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline mach_voucher_t
+_voucher_mach_msg_get(mach_msg_header_t *msg)
+{
+ (void)msg;
+ return 0;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline mach_voucher_t
+_voucher_mach_msg_clear(mach_msg_header_t *msg, bool move_send)
+{
+ (void)msg; (void)move_send;
+ return MACH_VOUCHER_NULL;
+}
+
+#define _dispatch_voucher_ktrace_dmsg_push(dmsg)
+#define _dispatch_voucher_ktrace_dmsg_pop(dmsg)
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_continuation_voucher_set(dispatch_continuation_t dc,
+ dispatch_block_flags_t flags)
+{
+ (void)dc; (void)flags;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc)
+{
+ (void)dc;
+}
+
+#define _voucher_activity_trace_msg(v, msg, type)
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_voucher_activity_disabled(void)
+{
+ return true;
+}
+
+#endif // VOUCHER_USE_MACH_VOUCHER
+
+#endif /* __DISPATCH_VOUCHER_INTERNAL__ */
//
BUILD_VARIANTS = normal
-INSTALL_PATH = /usr/lib/system/introspection
-INSTALL_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/lib/system/introspection
+INSTALL_PATH_ACTUAL = /usr/lib/system/introspection
+
GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) DISPATCH_INTROSPECTION=1
CONFIGURATION_BUILD_DIR = $(BUILD_DIR)/introspection
OTHER_LDFLAGS = $(OTHER_LDFLAGS) -Wl,-interposable_list,$(SRCROOT)/xcodeconfig/libdispatch.interposable
--- /dev/null
+#
+# Copyright (c) 2013 Apple Inc. All rights reserved.
+#
+# @APPLE_APACHE_LICENSE_HEADER_START@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# @APPLE_APACHE_LICENSE_HEADER_END@
+#
+
#
-# Copyright (c) 2012-2013 Apple Inc. All rights reserved.
+# Copyright (c) 2013-2014 Apple Inc. All rights reserved.
#
# @APPLE_APACHE_LICENSE_HEADER_START@
#
# @APPLE_APACHE_LICENSE_HEADER_END@
#
-_OBJC_CLASS_$_OS_dispatch_semaphore __dispatch_semaphore_vtable
-_OBJC_CLASS_$_OS_dispatch_group __dispatch_group_vtable
-_OBJC_CLASS_$_OS_dispatch_queue __dispatch_queue_vtable
-_OBJC_CLASS_$_OS_dispatch_queue_root __dispatch_queue_root_vtable
-_OBJC_CLASS_$_OS_dispatch_queue_runloop __dispatch_queue_runloop_vtable
-_OBJC_CLASS_$_OS_dispatch_queue_mgr __dispatch_queue_mgr_vtable
-_OBJC_CLASS_$_OS_dispatch_queue_specific_queue __dispatch_queue_specific_queue_vtable
-_OBJC_CLASS_$_OS_dispatch_queue_attr __dispatch_queue_attr_vtable
-_OBJC_CLASS_$_OS_dispatch_source __dispatch_source_vtable
-_OBJC_CLASS_$_OS_dispatch_mach __dispatch_mach_vtable
-_OBJC_CLASS_$_OS_dispatch_mach_msg __dispatch_mach_msg_vtable
-_OBJC_CLASS_$_OS_dispatch_io __dispatch_io_vtable
-_OBJC_CLASS_$_OS_dispatch_operation __dispatch_operation_vtable
-_OBJC_CLASS_$_OS_dispatch_disk __dispatch_disk_vtable
-
__dispatch_data_destructor_vm_deallocate __dispatch_data_destructor_munmap
+__dispatch_source_type_memorystatus __dispatch_source_type_memorypressure
+__dispatch_queue_attrs __dispatch_queue_attr_concurrent
__OS_dispatch_operation_vtable
_OBJC_CLASS_$_OS_dispatch_disk
__OS_dispatch_disk_vtable
-# non-dispatch_object_t classes
+# os_object_t classes
_OBJC_CLASS_$_OS_object
+_OBJC_CLASS_$_OS_voucher
+#_OBJC_CLASS_$_OS_voucher_recipe
+# non-os_object_t classes
_OBJC_CLASS_$_OS_dispatch_data
_OBJC_CLASS_$_OS_dispatch_data_empty
# metaclasses
_OBJC_METACLASS_$_OS_dispatch_operation
_OBJC_METACLASS_$_OS_dispatch_disk
_OBJC_METACLASS_$_OS_object
+_OBJC_METACLASS_$_OS_voucher
+_OBJC_METACLASS_$_OS_voucher_recipe
_OBJC_METACLASS_$_OS_dispatch_data
_OBJC_METACLASS_$_OS_dispatch_data_empty
//
#include "<DEVELOPER_DIR>/Makefiles/CoreOS/Xcode/BSD.xcconfig"
-SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator
+#include "<DEVELOPER_DIR>/AppleInternal/XcodeConfig/SimulatorSupport.xcconfig"
+
+// Set INSTALL_PATH[sdk=macosx*] when SimulatorSupport.xcconfig is unavailable
+INSTALL_PATH[sdk=macosx*] = $(INSTALL_PATH_ACTUAL)
+
+SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator iphoneosnano iphonesimulatornano
ARCHS[sdk=iphonesimulator*] = $(NATIVE_ARCH_32_BIT) // Override BSD.xcconfig ARCHS <rdar://problem/9303721>
PRODUCT_NAME = libdispatch
EXECUTABLE_PREFIX =
-LD_DYLIB_INSTALL_NAME = /usr/lib/system/$(EXECUTABLE_NAME)
-INSTALL_PATH = /usr/lib/system
-INSTALL_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/lib/system
-PUBLIC_HEADERS_FOLDER_PATH = /usr/include/dispatch
-PUBLIC_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/include/dispatch
-PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/dispatch
-PRIVATE_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/local/include/dispatch
-OS_PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os
-OS_PUBLIC_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/include/os
-OS_PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os
-OS_PRIVATE_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/local/include/os
-HEADER_SEARCH_PATHS = $(PROJECT_DIR)
+INSTALL_PATH_ACTUAL = /usr/lib/system
+PUBLIC_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/include/dispatch
+PRIVATE_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/local/include/dispatch
+OS_PUBLIC_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/include/os
+OS_PRIVATE_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/local/include/os
+HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(PROJECT_DIR)/private $(PROJECT_DIR)/os
LIBRARY_SEARCH_PATHS = $(SDKROOT)/usr/lib/system
INSTALLHDRS_SCRIPT_PHASE = YES
ALWAYS_SEARCH_USER_PATHS = NO
+USE_HEADERMAP = NO
BUILD_VARIANTS = normal debug profile
ONLY_ACTIVE_ARCH = NO
CLANG_LINK_OBJC_RUNTIME = NO
OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1
GENERATE_PROFILING_CODE = NO
DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION)
-UMBRELLA_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem_kernel -lsystem_platform -lsystem_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind -Wl,-upward-lsystem_asl
+UMBRELLA_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem_kernel -lsystem_platform -lsystem_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind
UMBRELLA_LDFLAGS[sdk=iphonesimulator*] = -umbrella System -nodefaultlibs -ldyld_sim -lcompiler_rt_sim -lsystem_sim_c -lsystem_sim_blocks -lunwind_sim -Wl,-upward-lSystem
-OBJC_LDFLAGS = -Wl,-upward-lobjc -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases -Wl,-unexported_symbols_list,$(SRCROOT)/xcodeconfig/libdispatch.unexport
+OBJC_LDFLAGS = -Wl,-upward-lobjc -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch_objc.aliases -Wl,-unexported_symbols_list,$(SRCROOT)/xcodeconfig/libdispatch.unexport
OBJC_LDFLAGS[sdk=macosx*] = $(OBJC_LDFLAGS) -Wl,-upward-lauto
OBJC_LDFLAGS[arch=i386][sdk=macosx*] =
OBJC_EXCLUDED_SOURCE_FILE_NAMES_i386_macosx = object.m data.m
+ALIASES_LDFLAGS = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases
PLATFORM_LDFLAGS[sdk=macosx*] = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch_macosx.aliases
-OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(UMBRELLA_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(PLATFORM_LDFLAGS)
+OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(UMBRELLA_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS)
+OTHER_MIGFLAGS = -novouchers
--- /dev/null
+#
+# Copyright (c) 2013 Apple Inc. All rights reserved.
+#
+# @APPLE_APACHE_LICENSE_HEADER_START@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# @APPLE_APACHE_LICENSE_HEADER_END@
+#
+
#
# @APPLE_APACHE_LICENSE_HEADER_END@
#
-
-__dispatch_source_type_memorystatus __dispatch_source_type_memorypressure
--- /dev/null
+#
+# Copyright (c) 2012-2013 Apple Inc. All rights reserved.
+#
+# @APPLE_APACHE_LICENSE_HEADER_START@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# @APPLE_APACHE_LICENSE_HEADER_END@
+#
+
+_OBJC_CLASS_$_OS_dispatch_semaphore __dispatch_semaphore_vtable
+_OBJC_CLASS_$_OS_dispatch_group __dispatch_group_vtable
+_OBJC_CLASS_$_OS_dispatch_queue __dispatch_queue_vtable
+_OBJC_CLASS_$_OS_dispatch_queue_root __dispatch_queue_root_vtable
+_OBJC_CLASS_$_OS_dispatch_queue_runloop __dispatch_queue_runloop_vtable
+_OBJC_CLASS_$_OS_dispatch_queue_mgr __dispatch_queue_mgr_vtable
+_OBJC_CLASS_$_OS_dispatch_queue_specific_queue __dispatch_queue_specific_queue_vtable
+_OBJC_CLASS_$_OS_dispatch_queue_attr __dispatch_queue_attr_vtable
+_OBJC_CLASS_$_OS_dispatch_source __dispatch_source_vtable
+_OBJC_CLASS_$_OS_dispatch_mach __dispatch_mach_vtable
+_OBJC_CLASS_$_OS_dispatch_mach_msg __dispatch_mach_msg_vtable
+_OBJC_CLASS_$_OS_dispatch_io __dispatch_io_vtable
+_OBJC_CLASS_$_OS_dispatch_operation __dispatch_operation_vtable
+_OBJC_CLASS_$_OS_dispatch_disk __dispatch_disk_vtable
mkdir -p "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" || true
cp -X "${SCRIPT_INPUT_FILE_1}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}"
cp -X "${SCRIPT_INPUT_FILE_2}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}"
+cp -X "${SCRIPT_INPUT_FILE_3}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}"
+cp -X "${SCRIPT_INPUT_FILE_4}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}"
export MIGCOM="$(xcrun -find migcom)"
export PATH="${PLATFORM_DEVELOPER_BIN_DIR}:${DEVELOPER_BIN_DIR}:${PATH}"
for a in ${ARCHS}; do
- xcrun mig -arch $a -header "${SCRIPT_OUTPUT_FILE_0}" \
+ xcrun mig ${OTHER_MIGFLAGS} -arch $a -header "${SCRIPT_OUTPUT_FILE_0}" \
-sheader "${SCRIPT_OUTPUT_FILE_1}" -user /dev/null \
-server /dev/null "${SCRIPT_INPUT_FILE_0}"
done