#define __OSX_AVAILABLE_STARTING(x, y)
#endif
-#define DISPATCH_API_VERSION 20140804
+#define DISPATCH_API_VERSION 20141121
#ifndef __DISPATCH_BUILDING_DISPATCH__
void *isa = *(void* volatile*)(OS_OBJECT_BRIDGE void*)object;
(void)isa;
}
-#elif defined(__cplusplus)
+#elif defined(__cplusplus) && !defined(__DISPATCH_BUILDING_DISPATCH__)
/*
* Dispatch objects are NOT C++ objects. Nevertheless, we can at least keep C++
* aware of type compatibility.
#define DISPATCH_RETURNS_RETAINED
#endif
+#ifdef __BLOCKS__
/*!
* @typedef dispatch_block_t
*
* function or by sending it a -[copy] message.
*/
typedef void (^dispatch_block_t)(void);
+#endif // __BLOCKS__
__BEGIN_DECLS
void
dispatch_resume(dispatch_object_t object);
+#ifdef __BLOCKS__
/*!
* @function dispatch_wait
*
dispatch_source_t:dispatch_source_testcancel \
)((object))
#endif
+#endif // __BLOCKS__
/*!
* @function dispatch_debug
* is NULL.
*/
__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
-DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NOTHROW
+DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW
void
dispatch_queue_set_specific(dispatch_queue_t queue, const void *key,
void *context, dispatch_function_t destructor);
* The context for the specified key or NULL if no context was found.
*/
__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
-DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT
+DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_PURE DISPATCH_WARN_RESULT
DISPATCH_NOTHROW
void *
dispatch_queue_get_specific(dispatch_queue_t queue, const void *key);
* The context for the specified key or NULL if no context was found.
*/
__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0)
-DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT
-DISPATCH_NOTHROW
+DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW
void *
dispatch_get_specific(const void *key);
*/
DISPATCH_DECL(dispatch_source);
+__BEGIN_DECLS
+
/*!
* @typedef dispatch_source_type_t
*
typedef unsigned long dispatch_source_timer_flags_t;
-__BEGIN_DECLS
/*!
* @function dispatch_source_create
name = libdispatch_Sim;
productName = libdispatch_Sim;
};
+ 4552540A19B1389700B88766 /* libdispatch_tests */ = {
+ isa = PBXAggregateTarget;
+ buildConfigurationList = 4552540B19B1389700B88766 /* Build configuration list for PBXAggregateTarget "libdispatch_tests" */;
+ buildPhases = (
+ );
+ dependencies = (
+ 4552540F19B138B700B88766 /* PBXTargetDependency */,
+ );
+ name = libdispatch_tests;
+ productName = libdispatch_tests;
+ };
C927F35A10FD7F0600C5AB8B /* libdispatch_tools */ = {
isa = PBXAggregateTarget;
buildConfigurationList = C927F35E10FD7F0B00C5AB8B /* Build configuration list for PBXAggregateTarget "libdispatch_tools" */;
E43570B9126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; };
E43570BA126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; };
E43A710615783F7E0012D38D /* data_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C913AC0E143BD34800B78976 /* data_private.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ E43A72501AF85BBC00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; };
+ E43A72841AF85BCB00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; };
+ E43A72851AF85BCC00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; };
+ E43A72861AF85BCC00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; };
+ E43A72871AF85BCD00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; };
+ E43A72881AF85BE900BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; };
E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; };
E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; };
E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
+ 455253A819B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = DF80F67E10B5C71600FAB5AE;
+ remoteInfo = dispatch_test;
+ };
+ 455253AA19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01C78108E68D400FAA873;
+ remoteInfo = dispatch_apply;
+ };
+ 455253AC19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4EB36CD1088F0B000C33AD4;
+ remoteInfo = dispatch_api;
+ };
+ 455253AE19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01CA7108E6C5000FAA873;
+ remoteInfo = dispatch_c99;
+ };
+ 455253B019B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4C72A26115C3F65009F3CE1;
+ remoteInfo = dispatch_cf_main;
+ };
+ 455253B219B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01CB9108E6C7200FAA873;
+ remoteInfo = dispatch_deadname;
+ };
+ 455253B419B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01CC3108E6CC300FAA873;
+ remoteInfo = dispatch_debug;
+ };
+ 455253B619B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01CCC108E6CD400FAA873;
+ remoteInfo = dispatch_group;
+ };
+ 455253B819B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01CD5108E6CE300FAA873;
+ remoteInfo = dispatch_overcommit;
+ };
+ 455253BA19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01CDE108E6CF300FAA873;
+ remoteInfo = dispatch_pingpong;
+ };
+ 455253BC19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01CE7108E6D0500FAA873;
+ remoteInfo = dispatch_plusplus;
+ };
+ 455253BE19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01CF0108E6D2900FAA873;
+ remoteInfo = dispatch_priority;
+ };
+ 455253C019B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01CF9108E6D3800FAA873;
+ remoteInfo = dispatch_priority2;
+ };
+ 455253C219B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E46D768811D0365F00615518;
+ remoteInfo = dispatch_concur;
+ };
+ 455253C419B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4324AAC12250F0800A3CAD5;
+ remoteInfo = dispatch_context_for_key;
+ };
+ 455253C619B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01D02108E6D5600FAA873;
+ remoteInfo = dispatch_proc;
+ };
+ 455253C819B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01D0B108E6D6000FAA873;
+ remoteInfo = dispatch_queue_finalizer;
+ };
+ 455253CA19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01D14108E6D7300FAA873;
+ remoteInfo = dispatch_read;
+ };
+ 455253CC19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01D1D108E6D8B00FAA873;
+ remoteInfo = dispatch_read2;
+ };
+ 455253CE19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01D26108E6D9A00FAA873;
+ remoteInfo = dispatch_after;
+ };
+ 455253D019B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01D2F108E6DA700FAA873;
+ remoteInfo = dispatch_timer;
+ };
+ 455253D219B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4CE9BC31151AB2A00D710C0;
+ remoteInfo = dispatch_timer_short;
+ };
+ 455253D419B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = 5A2BA66D11D0369E0081FF89;
+ remoteInfo = dispatch_timer_timeout;
+ };
+ 455253D619B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01D38108E6DB200FAA873;
+ remoteInfo = dispatch_suspend_timer;
+ };
+ 455253D819B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01D41108E6DBF00FAA873;
+ remoteInfo = dispatch_sema;
+ };
+ 455253DA19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01D53108E6DDC00FAA873;
+ remoteInfo = dispatch_timer_bit31;
+ };
+ 455253DC19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01D5C108E6E0400FAA873;
+ remoteInfo = dispatch_timer_bit63;
+ };
+ 455253DE19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01D74108E6E4B00FAA873;
+ remoteInfo = dispatch_timer_set_time;
+ };
+ 455253E019B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01D7D108E6E6600FAA873;
+ remoteInfo = dispatch_drift;
+ };
+ 455253E219B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01D86108E6E7200FAA873;
+ remoteInfo = dispatch_starfish;
+ };
+ 455253E419B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01D8F108E6E7E00FAA873;
+ remoteInfo = dispatch_cascade;
+ };
+ 455253E619B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01D98108E6E9500FAA873;
+ remoteInfo = dispatch_readsync;
+ };
+ 455253E819B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4E24A0710E0020B00C3C692;
+ remoteInfo = dispatch_sync_on_main;
+ };
+ 455253EA19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4E24A1810E0021C00C3C692;
+ remoteInfo = dispatch_sync_gc;
+ };
+ 455253EC19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4E24C3210E01DF800C3C692;
+ remoteInfo = dispatch_apply_gc;
+ };
+ 455253EE19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = 5AAB464A10D330C5004407EA;
+ remoteInfo = dispatch_data;
+ };
+ 455253F019B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = 5A11B20E10DB124C000FAD7A;
+ remoteInfo = dispatch_io;
+ };
+ 455253F219B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = 5AA78BAB114821D0009A233B;
+ remoteInfo = dispatch_io_net;
+ };
+ 455253F419B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = 5AF00EF51135FA1300CA14CE;
+ remoteInfo = dispatch_vm;
+ };
+ 455253F619B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4E33EB6121C9C9400F4B71C;
+ remoteInfo = dispatch_vnode;
+ };
+ 455253F819B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = C9E804AF1963EC5F00C2B970;
+ remoteInfo = dispatch_qos;
+ };
+ 455253FA19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = C9B1FF84113F458A00843414;
+ remoteInfo = dispatch_select;
+ };
+ 455253FC19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = C985912B143D584100718FE3;
+ remoteInfo = dispatch_transform;
+ };
+ 455253FE19B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01DA1108E6EE000FAA873;
+ remoteInfo = nsoperation;
+ };
+ 4552540019B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D01CB0108E6C6300FAA873;
+ remoteInfo = cffd;
+ };
+ 4552540219B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E4D020B4108F73E000FAA873;
+ remoteInfo = bench;
+ };
+ 4552540419B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E454823616C1D8E50042EC2D;
+ remoteInfo = jsgc_bench;
+ };
+ 4552540619B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E454824516C1F0EF0042EC2D;
+ remoteInfo = async_bench;
+ };
+ 4552540819B1384900B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 2;
+ remoteGlobalIDString = E454824F16C1F0FE0042EC2D;
+ remoteInfo = apply_bench;
+ };
+ 4552540E19B138B700B88766 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ proxyType = 1;
+ remoteGlobalIDString = E4D01DC5108E708E00FAA873;
+ remoteInfo = all;
+ };
C927F36610FD7F1000C5AB8B /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy;
containerPortal = C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */;
2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = allocator_internal.h; sourceTree = "<group>"; };
2BBF5A62154B64F5002B20F9 /* allocator.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = allocator.c; sourceTree = "<group>"; };
2BE17C6318EA305E002CA4E8 /* layout_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = layout_private.h; sourceTree = "<group>"; };
+ 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = libdispatchtest.xcodeproj; path = tests/libdispatchtest.xcodeproj; sourceTree = "<group>"; };
5A0095A110F274B0000E2A31 /* io_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io_internal.h; sourceTree = "<group>"; };
5A27262510F26F1900751FBC /* io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = io.c; sourceTree = "<group>"; xcLanguageSpecificationIdentifier = xcode.lang.c; };
5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore_internal.h; sourceTree = "<group>"; };
E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.aliases; sourceTree = "<group>"; };
E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.unexport; sourceTree = "<group>"; };
E43570B8126E93380097AB9F /* provider.d */ = {isa = PBXFileReference; explicitFileType = sourcecode.dtrace; fileEncoding = 4; path = provider.d; sourceTree = "<group>"; };
+ E43A724F1AF85BBC00BAA921 /* block.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = block.cpp; sourceTree = "<group>"; };
E43D93F11097917E004F6A62 /* libdispatch.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libdispatch.xcconfig; sourceTree = "<group>"; };
E44757D917F4572600B82CA1 /* inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inline_internal.h; sourceTree = "<group>"; };
E448727914C6215D00BB45C2 /* libdispatch.order */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.order; sourceTree = "<group>"; };
08FB7795FE84155DC02AAC07 /* Source */,
C6A0FF2B0290797F04C91782 /* Documentation */,
1AB674ADFE9D54B511CA2CBB /* Products */,
+ C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */,
+ 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */,
);
indentWidth = 4;
name = libdispatch;
2BBF5A62154B64F5002B20F9 /* allocator.c */,
9676A0E00F3E755D00713ADB /* apply.c */,
965CD6340F3E806200D4E28D /* benchmark.c */,
+ E43A724F1AF85BBC00BAA921 /* block.cpp */,
5AAB45BF10D30B79004407EA /* data.c */,
E420866F16027AE500EEE210 /* data.m */,
E44EBE3B1251659900645D88 /* init.c */,
name = Products;
sourceTree = "<group>";
};
+ 4552536F19B1384900B88766 /* Products */ = {
+ isa = PBXGroup;
+ children = (
+ 455253A919B1384900B88766 /* libdispatch_test.a */,
+ 455253AB19B1384900B88766 /* dispatch_apply */,
+ 455253AD19B1384900B88766 /* dispatch_api */,
+ 455253AF19B1384900B88766 /* dispatch_c99 */,
+ 455253B119B1384900B88766 /* dispatch_cf_main */,
+ 455253B319B1384900B88766 /* dispatch_deadname */,
+ 455253B519B1384900B88766 /* dispatch_debug */,
+ 455253B719B1384900B88766 /* dispatch_group */,
+ 455253B919B1384900B88766 /* dispatch_overcommit */,
+ 455253BB19B1384900B88766 /* dispatch_pingpong */,
+ 455253BD19B1384900B88766 /* dispatch_plusplus */,
+ 455253BF19B1384900B88766 /* dispatch_priority */,
+ 455253C119B1384900B88766 /* dispatch_priority2 */,
+ 455253C319B1384900B88766 /* dispatch_concur */,
+ 455253C519B1384900B88766 /* dispatch_context_for_key */,
+ 455253C719B1384900B88766 /* dispatch_proc */,
+ 455253C919B1384900B88766 /* dispatch_queue_finalizer */,
+ 455253CB19B1384900B88766 /* dispatch_read */,
+ 455253CD19B1384900B88766 /* dispatch_read2 */,
+ 455253CF19B1384900B88766 /* dispatch_after */,
+ 455253D119B1384900B88766 /* dispatch_timer */,
+ 455253D319B1384900B88766 /* dispatch_timer_short */,
+ 455253D519B1384900B88766 /* dispatch_timer_timeout */,
+ 455253D719B1384900B88766 /* dispatch_suspend_timer */,
+ 455253D919B1384900B88766 /* dispatch_sema */,
+ 455253DB19B1384900B88766 /* dispatch_timer_bit31 */,
+ 455253DD19B1384900B88766 /* dispatch_timer_bit63 */,
+ 455253DF19B1384900B88766 /* dispatch_timer_set_time */,
+ 455253E119B1384900B88766 /* dispatch_drift */,
+ 455253E319B1384900B88766 /* dispatch_starfish */,
+ 455253E519B1384900B88766 /* dispatch_cascade */,
+ 455253E719B1384900B88766 /* dispatch_readsync */,
+ 455253E919B1384900B88766 /* dispatch_sync_on_main */,
+ 455253EB19B1384900B88766 /* dispatch_sync_gc */,
+ 455253ED19B1384900B88766 /* dispatch_apply_gc */,
+ 455253EF19B1384900B88766 /* dispatch_data */,
+ 455253F119B1384900B88766 /* dispatch_io */,
+ 455253F319B1384900B88766 /* dispatch_io_net */,
+ 455253F519B1384900B88766 /* dispatch_vm */,
+ 455253F719B1384900B88766 /* dispatch_vnode */,
+ 455253F919B1384900B88766 /* dispatch_qos */,
+ 455253FB19B1384900B88766 /* dispatch_select */,
+ 455253FD19B1384900B88766 /* dispatch_transform */,
+ 455253FF19B1384900B88766 /* nsoperation */,
+ 4552540119B1384900B88766 /* cffd */,
+ 4552540319B1384900B88766 /* bench */,
+ 4552540519B1384900B88766 /* jsgc_bench */,
+ 4552540719B1384900B88766 /* async_bench */,
+ 4552540919B1384900B88766 /* apply_bench */,
+ );
+ name = Products;
+ sourceTree = "<group>";
+ };
C6A0FF2B0290797F04C91782 /* Documentation */ = {
isa = PBXGroup;
children = (
E40041E4125E71150022B135 /* xcodeconfig */,
E49F259C125D664F0057C971 /* xcodescripts */,
E47D6BCA125F10F70070D91C /* resolver */,
- C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */,
);
name = "Build Support";
sourceTree = "<group>";
isa = PBXProject;
attributes = {
BuildIndependentTargetsInParallel = YES;
- LastUpgradeCheck = 0600;
+ LastUpgradeCheck = 0700;
};
buildConfigurationList = 1DEB91EF08733DB70010E9CD /* Build configuration list for PBXProject "libdispatch" */;
compatibilityVersion = "Xcode 3.2";
ProductGroup = C927F36010FD7F1000C5AB8B /* Products */;
ProjectRef = C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */;
},
+ {
+ ProductGroup = 4552536F19B1384900B88766 /* Products */;
+ ProjectRef = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */;
+ },
);
projectRoot = "";
targets = (
E46DBC1A14EE10C80001F9F6 /* libdispatch static */,
3F3C9326128E637B0042B1F7 /* libdispatch_Sim */,
C927F35A10FD7F0600C5AB8B /* libdispatch_tools */,
+ 4552540A19B1389700B88766 /* libdispatch_tests */,
);
};
/* End PBXProject section */
/* Begin PBXReferenceProxy section */
+ 455253A919B1384900B88766 /* libdispatch_test.a */ = {
+ isa = PBXReferenceProxy;
+ fileType = archive.ar;
+ path = libdispatch_test.a;
+ remoteRef = 455253A819B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253AB19B1384900B88766 /* dispatch_apply */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_apply;
+ remoteRef = 455253AA19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253AD19B1384900B88766 /* dispatch_api */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_api;
+ remoteRef = 455253AC19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253AF19B1384900B88766 /* dispatch_c99 */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_c99;
+ remoteRef = 455253AE19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253B119B1384900B88766 /* dispatch_cf_main */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_cf_main;
+ remoteRef = 455253B019B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253B319B1384900B88766 /* dispatch_deadname */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_deadname;
+ remoteRef = 455253B219B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253B519B1384900B88766 /* dispatch_debug */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_debug;
+ remoteRef = 455253B419B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253B719B1384900B88766 /* dispatch_group */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_group;
+ remoteRef = 455253B619B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253B919B1384900B88766 /* dispatch_overcommit */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_overcommit;
+ remoteRef = 455253B819B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253BB19B1384900B88766 /* dispatch_pingpong */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_pingpong;
+ remoteRef = 455253BA19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253BD19B1384900B88766 /* dispatch_plusplus */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_plusplus;
+ remoteRef = 455253BC19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253BF19B1384900B88766 /* dispatch_priority */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_priority;
+ remoteRef = 455253BE19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253C119B1384900B88766 /* dispatch_priority2 */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_priority2;
+ remoteRef = 455253C019B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253C319B1384900B88766 /* dispatch_concur */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_concur;
+ remoteRef = 455253C219B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253C519B1384900B88766 /* dispatch_context_for_key */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_context_for_key;
+ remoteRef = 455253C419B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253C719B1384900B88766 /* dispatch_proc */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_proc;
+ remoteRef = 455253C619B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253C919B1384900B88766 /* dispatch_queue_finalizer */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_queue_finalizer;
+ remoteRef = 455253C819B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253CB19B1384900B88766 /* dispatch_read */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_read;
+ remoteRef = 455253CA19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253CD19B1384900B88766 /* dispatch_read2 */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_read2;
+ remoteRef = 455253CC19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253CF19B1384900B88766 /* dispatch_after */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_after;
+ remoteRef = 455253CE19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253D119B1384900B88766 /* dispatch_timer */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_timer;
+ remoteRef = 455253D019B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253D319B1384900B88766 /* dispatch_timer_short */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_timer_short;
+ remoteRef = 455253D219B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253D519B1384900B88766 /* dispatch_timer_timeout */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_timer_timeout;
+ remoteRef = 455253D419B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253D719B1384900B88766 /* dispatch_suspend_timer */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_suspend_timer;
+ remoteRef = 455253D619B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253D919B1384900B88766 /* dispatch_sema */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_sema;
+ remoteRef = 455253D819B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253DB19B1384900B88766 /* dispatch_timer_bit31 */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_timer_bit31;
+ remoteRef = 455253DA19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253DD19B1384900B88766 /* dispatch_timer_bit63 */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_timer_bit63;
+ remoteRef = 455253DC19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253DF19B1384900B88766 /* dispatch_timer_set_time */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_timer_set_time;
+ remoteRef = 455253DE19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253E119B1384900B88766 /* dispatch_drift */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_drift;
+ remoteRef = 455253E019B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253E319B1384900B88766 /* dispatch_starfish */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_starfish;
+ remoteRef = 455253E219B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253E519B1384900B88766 /* dispatch_cascade */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_cascade;
+ remoteRef = 455253E419B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253E719B1384900B88766 /* dispatch_readsync */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_readsync;
+ remoteRef = 455253E619B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253E919B1384900B88766 /* dispatch_sync_on_main */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_sync_on_main;
+ remoteRef = 455253E819B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253EB19B1384900B88766 /* dispatch_sync_gc */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_sync_gc;
+ remoteRef = 455253EA19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253ED19B1384900B88766 /* dispatch_apply_gc */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_apply_gc;
+ remoteRef = 455253EC19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253EF19B1384900B88766 /* dispatch_data */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_data;
+ remoteRef = 455253EE19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253F119B1384900B88766 /* dispatch_io */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_io;
+ remoteRef = 455253F019B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253F319B1384900B88766 /* dispatch_io_net */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_io_net;
+ remoteRef = 455253F219B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253F519B1384900B88766 /* dispatch_vm */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_vm;
+ remoteRef = 455253F419B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253F719B1384900B88766 /* dispatch_vnode */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_vnode;
+ remoteRef = 455253F619B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253F919B1384900B88766 /* dispatch_qos */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_qos;
+ remoteRef = 455253F819B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253FB19B1384900B88766 /* dispatch_select */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_select;
+ remoteRef = 455253FA19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253FD19B1384900B88766 /* dispatch_transform */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = dispatch_transform;
+ remoteRef = 455253FC19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 455253FF19B1384900B88766 /* nsoperation */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = nsoperation;
+ remoteRef = 455253FE19B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 4552540119B1384900B88766 /* cffd */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = cffd;
+ remoteRef = 4552540019B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 4552540319B1384900B88766 /* bench */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = bench;
+ remoteRef = 4552540219B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 4552540519B1384900B88766 /* jsgc_bench */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = jsgc_bench;
+ remoteRef = 4552540419B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 4552540719B1384900B88766 /* async_bench */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = async_bench;
+ remoteRef = 4552540619B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+ 4552540919B1384900B88766 /* apply_bench */ = {
+ isa = PBXReferenceProxy;
+ fileType = "compiled.mach-o.executable";
+ path = apply_bench;
+ remoteRef = 4552540819B1384900B88766 /* PBXContainerItemProxy */;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
C927F36710FD7F1000C5AB8B /* ddt */ = {
isa = PBXReferenceProxy;
fileType = "compiled.mach-o.executable";
96032E4B0F5CC8C700241C5F /* time.c in Sources */,
5AAB45C010D30B79004407EA /* data.c in Sources */,
5A27262610F26F1900751FBC /* io.c in Sources */,
+ E43A72501AF85BBC00BAA921 /* block.cpp in Sources */,
C9C5F80E143C1771006DC718 /* transform.c in Sources */,
E4FC3264145F46C9002FBDDB /* object.m in Sources */,
2BBF5A63154B64F5002B20F9 /* allocator.c in Sources */,
E46DBC4114EE10C80001F9F6 /* resolver.c in Sources */,
E46DBC4214EE10C80001F9F6 /* init.c in Sources */,
E46DBC4314EE10C80001F9F6 /* queue.c in Sources */,
+ E43A72881AF85BE900BAA921 /* block.cpp in Sources */,
E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */,
E46DBC4514EE10C80001F9F6 /* once.c in Sources */,
E44A8E701805C3E0009FFDB6 /* voucher.c in Sources */,
E49F24D2125D57FA0057C971 /* time.c in Sources */,
E49F24D3125D57FA0057C971 /* data.c in Sources */,
E49F24D4125D57FA0057C971 /* io.c in Sources */,
+ E43A72841AF85BCB00BAA921 /* block.cpp in Sources */,
C93D6165143E190E00EB9023 /* transform.c in Sources */,
E4FC3265145F46C9002FBDDB /* object.m in Sources */,
2BBF5A64154B64F5002B20F9 /* allocator.c in Sources */,
E4B515C1164B2DA300E003AF /* queue.c in Sources */,
E4B515C2164B2DA300E003AF /* semaphore.c in Sources */,
E4B515C3164B2DA300E003AF /* once.c in Sources */,
+ E43A72871AF85BCD00BAA921 /* block.cpp in Sources */,
E4B515C4164B2DA300E003AF /* apply.c in Sources */,
E4B515C5164B2DA300E003AF /* object.c in Sources */,
E4B515C6164B2DA300E003AF /* benchmark.c in Sources */,
E4EC11B512514302000DDBD1 /* time.c in Sources */,
E4EC11B712514302000DDBD1 /* data.c in Sources */,
E4EC11B812514302000DDBD1 /* io.c in Sources */,
+ E43A72861AF85BCC00BAA921 /* block.cpp in Sources */,
C93D6166143E190F00EB9023 /* transform.c in Sources */,
E4FC3266145F46C9002FBDDB /* object.m in Sources */,
2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */,
E4EC122112514715000DDBD1 /* time.c in Sources */,
E4EC122312514715000DDBD1 /* data.c in Sources */,
E4EC122412514715000DDBD1 /* io.c in Sources */,
+ E43A72851AF85BCC00BAA921 /* block.cpp in Sources */,
C93D6167143E190F00EB9023 /* transform.c in Sources */,
E4FC3267145F46C9002FBDDB /* object.m in Sources */,
2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */,
/* End PBXSourcesBuildPhase section */
/* Begin PBXTargetDependency section */
+ 4552540F19B138B700B88766 /* PBXTargetDependency */ = {
+ isa = PBXTargetDependency;
+ name = all;
+ targetProxy = 4552540E19B138B700B88766 /* PBXContainerItemProxy */;
+ };
C927F36910FD7F1A00C5AB8B /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
name = ddt;
};
name = Debug;
};
+ 4552540C19B1389700B88766 /* Release */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ PRODUCT_NAME = "$(TARGET_NAME)";
+ };
+ name = Release;
+ };
+ 4552540D19B1389700B88766 /* Debug */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ PRODUCT_NAME = "$(TARGET_NAME)";
+ };
+ name = Debug;
+ };
C927F35B10FD7F0600C5AB8B /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
+ 4552540B19B1389700B88766 /* Build configuration list for PBXAggregateTarget "libdispatch_tests" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ 4552540C19B1389700B88766 /* Release */,
+ 4552540D19B1389700B88766 /* Debug */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Release;
+ };
C927F35E10FD7F0B00C5AB8B /* Build configuration list for PBXAggregateTarget "libdispatch_tools" */ = {
isa = XCConfigurationList;
buildConfigurations = (
.Fn dispatch_source_get_data
indicates which of the events in the
.Fa mask
-were observed.
+were observed. Note that because this source type will request notifications on the provided port, it should not be mixed with the use of
+.Fn mach_port_request_notification
+on the same port.
.Pp
.Vt DISPATCH_SOURCE_TYPE_MACH_RECV
.Pp
#if OS_OBJECT_USE_OBJC
#import <objc/NSObject.h>
#define OS_OBJECT_CLASS(name) OS_##name
-#define OS_OBJECT_DECL(name, ...) \
+#define OS_OBJECT_DECL_IMPL(name, ...) \
@protocol OS_OBJECT_CLASS(name) __VA_ARGS__ \
@end \
typedef NSObject<OS_OBJECT_CLASS(name)> *name##_t
+#define OS_OBJECT_DECL(name, ...) \
+ OS_OBJECT_DECL_IMPL(name, <NSObject> __VA_ARGS__)
#define OS_OBJECT_DECL_SUBCLASS(name, super) \
- OS_OBJECT_DECL(name, <OS_OBJECT_CLASS(super)>)
+ OS_OBJECT_DECL_IMPL(name, <OS_OBJECT_CLASS(super)>)
#if defined(__has_attribute)
#if __has_attribute(ns_returns_retained)
#define OS_OBJECT_RETURNS_RETAINED __attribute__((__ns_returns_retained__))
_os_object_t
_os_object_retain(_os_object_t object);
+__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0)
+OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
+_os_object_t
+_os_object_retain_with_resurrect(_os_object_t obj);
+
__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0)
OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW
void
#endif /* !__DISPATCH_BUILDING_DISPATCH__ */
// <rdar://problem/9627726> Check that public and private dispatch headers match
-#if DISPATCH_API_VERSION != 20140804 // Keep in sync with <dispatch/dispatch.h>
+#if DISPATCH_API_VERSION != 20141121 // Keep in sync with <dispatch/dispatch.h>
#error "Dispatch header mismatch between /usr/include and /usr/local/include"
#endif
* This priority level is intended for user-initiated application activity that
* is long-running and CPU or IO intensive and that the user is actively waiting
* on, but that should not interfere with interactive use of the application.
+ *
+ * This global queue priority level is mapped to QOS_CLASS_UTILITY.
*/
#define DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE INT8_MIN
* is no guarantee that the specified number will be reached.
* Pass 0 to specify that a default pool size determined by the system should
* be used.
+ * NOTE: passing pool_size == 1 does NOT make the pthread root queue equivalent
+ * to a serial queue.
*
* @result
* The flags argument to pass to dispatch_pthread_root_queue_create().
#define dispatch_assert_queue_not_debug(q) dispatch_assert_queue_not(q)
#endif
+/*!
+ * @function dispatch_async_enforce_qos_class_f
+ *
+ * @abstract
+ * Submits a function for asynchronous execution on a dispatch queue.
+ *
+ * @discussion
+ * See dispatch_async() for details. The QOS will be enforced as if
+ * this was called:
+ * <code>
+ * dispatch_async(queue, dispatch_block_create(DISPATCH_BLOCK_ENFORCE_QOS_CLASS, ^{
+ * work(context);
+ * });
+ * </code>
+ *
+ * @param queue
+ * The target dispatch queue to which the function is submitted.
+ * The system will hold a reference on the target queue until the function
+ * has returned.
+ * The result of passing NULL in this parameter is undefined.
+ *
+ * @param context
+ * The application-defined context parameter to pass to the function.
+ *
+ * @param work
+ * The application-defined function to invoke on the target queue. The first
+ * parameter passed to this function is the context provided to
+ * dispatch_async_f().
+ * The result of passing NULL in this parameter is undefined.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0)
+DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW
+void
+dispatch_async_enforce_qos_class_f(dispatch_queue_t queue,
+ void *context,
+ dispatch_function_t work);
+
+
__END_DECLS
#endif
#include <dispatch/base.h> // for HeaderDoc
#endif
+__BEGIN_DECLS
+
/*!
* @const DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE
* @discussion A dispatch timer source that is part of a timer aggregate.
__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_sock;
+__END_DECLS
+
/*!
* @enum dispatch_source_sock_flags_t
*
#include <os/voucher_private.h>
#endif
-#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20140708
+#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20150318
#if OS_VOUCHER_WEAK_IMPORT
#define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT
OS_ENUM(voucher_activity_flag, unsigned long,
voucher_activity_flag_default = 0,
voucher_activity_flag_force = 0x1,
+ voucher_activity_flag_debug = 0x2,
+ voucher_activity_flag_persist = 0x4,
+ voucher_activity_flag_stream = 0x8,
);
/*!
voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location,
void *buffer, size_t length);
+/*!
+ * @function voucher_activity_trace_strings
+ *
+ * @abstract
+ * Add a tracepoint with strings data to trace buffer of the current activity.
+ *
+ * @param trace_id
+ * Tracepoint identifier returned by voucher_activity_trace_id()
+ *
+ * @param location
+ * Tracepoint location.
+ *
+ * @param buffer
+ * Pointer to packed buffer of tracepoint data.
+ *
+ * @param length
+ * Length of data at 'buffer'.
+ *
+ * @param strings
+ * NULL-terminated array of strings data.
+ *
+ * @param string_lengths
+ * Array of string lengths (required to have the same number of elements as the
+ * 'strings' array): string_lengths[i] is the maximum number of characters to
+ * copy from strings[i], excluding the NUL-terminator (may be smaller than the
+ * length of the string present in strings[i]).
+ *
+ * @param total_strings_size
+ * Total size of all strings data to be copied from strings array (including
+ * all NUL-terminators).
+ *
+ * @result
+ * Timestamp recorded in tracepoint or 0 if no tracepoint was recorded.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0)
+OS_VOUCHER_EXPORT OS_NOTHROW
+uint64_t
+voucher_activity_trace_strings(voucher_activity_trace_id_t trace_id,
+ uint64_t location, void *buffer, size_t length, const char *strings[],
+ size_t string_lengths[], size_t total_strings_size);
+
/*!
* @function voucher_activity_trace_args
*
voucher_activity_get_mode(void);
/*!
- * @function voucher_activity_set_mode_4libtrace(void)
+ * @function voucher_activity_set_mode_4libtrace
*
* @abstract
* Set the current mode of voucher activity subsystem.
_voucher_activity_trace_flag_buffer_empty = 0,
_voucher_activity_trace_flag_tracepoint = (1u << 0),
_voucher_activity_trace_flag_tracepoint_args = (1u << 1),
+ _voucher_activity_trace_flag_tracepoint_strings = (1u << 2),
_voucher_activity_trace_flag_wide_first = (1u << 6),
_voucher_activity_trace_flag_wide_second = (1u << 6) | (1u << 7),
_voucher_activity_trace_flag_start = (1u << 8),
uint64_t vat_thread; // pthread_t
uint64_t vat_timestamp; // absolute time
uint64_t vat_location; // tracepoint PC
- uint64_t vat_data[4]; // trace data
+ union {
+ uint64_t vat_data[4]; // trace data
+ struct {
+ uint16_t vats_offset; // offset to string data (from buffer end)
+ uint8_t vats_data[30]; // trace data
+ } vat_stroff; // iff _vat_flag_tracepoint_strings present
+ };
} *_voucher_activity_tracepoint_t;
/*!
#include <atm/atm_types.h>
#include <os/lock_private.h>
-static const atm_subaid32_t _voucher_default_activity_subid =
- ATM_SUBAID32_MAX-1;
-
static const size_t _voucher_activity_buffer_size = 4096;
static const size_t _voucher_activity_tracepoints_per_buffer =
_voucher_activity_buffer_size /
sizeof(struct _voucher_activity_tracepoint_s);
+static const size_t _voucher_activity_buffer_header_size =
+ sizeof(struct _voucher_activity_tracepoint_s);
+static const size_t _voucher_activity_strings_header_size = 0; // TODO
+
typedef uint8_t _voucher_activity_buffer_t[_voucher_activity_buffer_size];
+static const size_t _voucher_activity_buffers_per_heap = 512;
+typedef unsigned long _voucher_activity_bitmap_base_t;
+static const size_t _voucher_activity_bits_per_bitmap_base_t =
+ 8 * sizeof(_voucher_activity_bitmap_base_t);
+static const size_t _voucher_activity_bitmaps_per_heap =
+ _voucher_activity_buffers_per_heap /
+ _voucher_activity_bits_per_bitmap_base_t;
+typedef _voucher_activity_bitmap_base_t
+ _voucher_activity_bitmap_t[_voucher_activity_bitmaps_per_heap]
+ __attribute__((__aligned__(64)));
+
struct _voucher_activity_self_metadata_s {
struct _voucher_activity_metadata_opaque_s *vasm_baseaddr;
+ _voucher_activity_bitmap_t volatile vam_buffer_bitmap;
};
+
typedef struct _voucher_activity_metadata_opaque_s {
- _voucher_activity_buffer_t vam_kernel_metadata;
_voucher_activity_buffer_t vam_client_metadata;
union {
struct _voucher_activity_self_metadata_s vam_self_metadata;
typedef os_lock_handoff_s _voucher_activity_lock_s;
-typedef struct _voucher_atm_s {
- int32_t volatile vatm_refcnt;
- mach_voucher_t vatm_kvoucher;
- atm_aid_t vatm_id;
- atm_mailbox_offset_t vatm_mailbox_offset;
- TAILQ_ENTRY(_voucher_atm_s) vatm_list;
-#if __LP64__
- uintptr_t vatm_pad[3];
- // cacheline
-#endif
- _voucher_activity_lock_s vatm_activities_lock;
- TAILQ_HEAD(_voucher_atm_activities_s, _voucher_activity_s) vatm_activities;
- TAILQ_HEAD(, _voucher_activity_s) vatm_used_activities;
-} *_voucher_atm_t;
+OS_ENUM(_voucher_activity_buffer_atomic_flags, uint8_t,
+ _voucher_activity_buffer_full = (1u << 0),
+ _voucher_activity_buffer_pushing = (1u << 1),
+);
+
+typedef union {
+ uint64_t vabp_atomic_pos;
+ struct {
+ uint16_t vabp_refcnt;
+ uint8_t vabp_flags;
+ uint8_t vabp_unused;
+ uint16_t vabp_next_tracepoint_idx;
+ uint16_t vabp_string_offset; // offset from the _end_ of the buffer
+ } vabp_pos;
+} _voucher_activity_buffer_position_u;
// must match layout of _voucher_activity_tracepoint_s
typedef struct _voucher_activity_buffer_header_s {
uint16_t vabh_flags; // _voucher_activity_trace_flag_buffer_header
- uint8_t vabh_unused[6];
- uint64_t vabh_thread;
- uint64_t vabh_timestamp;
- uint32_t volatile vabh_next_tracepoint_idx;
- uint32_t vabh_sequence_no;
+ uint8_t vat_type;
+ uint8_t vat_namespace;
+ uint32_t vat_code;
+ uint64_t vat_thread;
+ uint64_t vat_timestamp;
+ uint64_t vat_location;
voucher_activity_id_t vabh_activity_id;
- uint64_t vabh_reserved;
+ _voucher_activity_buffer_position_u volatile vabh_pos;
TAILQ_ENTRY(_voucher_activity_buffer_header_s) vabh_list;
} *_voucher_activity_buffer_header_t;
-// must match layout of _voucher_activity_buffer_header_s
-typedef struct _voucher_activity_s {
- // first tracepoint entry
- // must match layout of _voucher_activity_tracepoint_s
- uint16_t va_flags; // _voucher_activity_trace_flag_buffer_header |
- // _voucher_activity_trace_flag_activity |
- // _voucher_activity_trace_flag_start |
- // _voucher_activity_trace_flag_wide_first
- uint8_t va_type;
- uint8_t va_namespace;
- uint32_t va_code;
- uint64_t va_thread;
- uint64_t va_timestamp;
- uint32_t volatile vabh_next_tracepoint_idx;
- uint32_t volatile va_max_sequence_no;
- voucher_activity_id_t va_id;
- int32_t volatile va_use_count;
- uint32_t va_buffer_limit;
- TAILQ_HEAD(_voucher_activity_buffer_list_s,
- _voucher_activity_buffer_header_s) va_buffers;
-#if !__LP64__
- uint64_t va_pad;
-#endif
+/*!
+ * @enum _voucher_activity_buffer_hook_reason
+ *
+ * @constant _voucher_activity_buffer_hook_reason_full
+ * Specified activity buffer is full.
+ * Will be reported reused or freed later.
+ *
+ * @constant _voucher_activity_buffer_hook_reason_reuse
+ * Specified activity buffer is about to be reused.
+ * Was previously reported as full.
+ *
+ * @constant _voucher_activity_buffer_hook_reason_free
+ * Specified activity buffer is about to be freed.
+ * May have been previously reported as full or may be only partially filled.
+ */
+typedef enum _voucher_activity_buffer_hook_reason {
+ _voucher_activity_buffer_hook_reason_full = 0x1,
+ _voucher_activity_buffer_hook_reason_reuse = 0x2,
+ _voucher_activity_buffer_hook_reason_free = 0x4,
+} _voucher_activity_buffer_hook_reason;
- // second tracepoint entry
- // must match layout of _voucher_activity_tracepoint_s
- uint16_t va_flags2;
- uint8_t va_unused2[2];
- int32_t volatile va_refcnt;
- uint64_t va_location;
- _voucher_activity_buffer_header_t volatile va_current_buffer;
- _voucher_atm_t va_atm;
- _voucher_activity_lock_s va_buffers_lock;
- uintptr_t va_pad2[2];
-
-#if __LP64__
- // third tracepoint entry
- // must match layout of _voucher_activity_tracepoint_s
- uint16_t va_flags3;
- uint8_t va_unused3[6];
- uintptr_t va_pad3;
-#endif
- TAILQ_ENTRY(_voucher_activity_s) va_list;
- TAILQ_ENTRY(_voucher_activity_s) va_atm_list;
- TAILQ_ENTRY(_voucher_activity_s) va_atm_used_list;
-} *_voucher_activity_t;
+/*!
+ * @typedef _voucher_activity_buffer_hook_t
+ *
+ * @abstract
+ * A function pointer called when an activity buffer is full or being freed.
+ * NOTE: callbacks occur under an activity-wide handoff lock and work done
+ * inside the callback function must not block or otherwise cause that lock to
+ * be held for a extended period of time.
+ *
+ * @param reason
+ * Reason for callback.
+ *
+ * @param buffer
+ * Pointer to activity buffer.
+ */
+typedef void (*_voucher_activity_buffer_hook_t)(
+ _voucher_activity_buffer_hook_reason reason,
+ _voucher_activity_buffer_header_t buffer);
+
+/*!
+ * @function voucher_activity_buffer_hook_install_4libtrace
+ *
+ * @abstract
+ * Install activity buffer hook callback function.
+ * Must be called from the libtrace initializer, and at most once.
+ *
+ * @param hook
+ * Hook function to install.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0)
+OS_VOUCHER_EXPORT OS_NOTHROW
+void
+voucher_activity_buffer_hook_install_4libtrace(
+ _voucher_activity_buffer_hook_t hook);
#endif // OS_VOUCHER_ACTIVITY_BUFFER_SPI
#include <os/base.h>
#include <os/object.h>
-#define OS_VOUCHER_SPI_VERSION 20140425
+#define OS_VOUCHER_SPI_VERSION 20141203
#if OS_VOUCHER_WEAK_IMPORT
#define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT
voucher_decrement_importance_count4CF(voucher_t voucher);
/*!
- * @group Dispatch block objects
+ * @group Voucher dispatch block SPI
*/
#ifndef __DISPATCH_BUILDING_DISPATCH__
voucher_t voucher, dispatch_qos_class_t qos_class,
int relative_priority, dispatch_block_t block);
+/*!
+ * @group Voucher dispatch queue SPI
+ */
+
+/*!
+ * @function dispatch_queue_create_with_accounting_override_voucher
+ *
+ * @abstract
+ * Creates a new dispatch queue with an accounting override voucher created
+ * from the specified voucher.
+ *
+ * @discussion
+ * See dispatch_queue_create() headerdoc for generic details on queue creation.
+ *
+ * The resource accounting attributes of the specified voucher are extracted
+ * and used to create an accounting override voucher for the new queue.
+ *
+ * Every block executed on the returned queue will initially have this override
+ * voucher adopted, any voucher automatically associated with or explicitly
+ * assigned to the block will NOT be used and released immediately before block
+ * execution starts.
+ *
+ * The accounting override voucher will be automatically propagated to any
+ * asynchronous work generated from the queue following standard voucher
+ * propagation rules.
+ *
+ * NOTE: this SPI should only be used in special circumstances when a subsystem
+ * has complete control over all workitems submitted to a queue (e.g. no client
+ * block is ever submitted to the queue) and if and only if such queues have a
+ * one-to-one mapping with resource accounting identities.
+ *
+ * CAUTION: use of this SPI represents a potential voucher propagation hole. It
+ * is the responsibility of the caller to ensure that any callbacks into client
+ * code from the queue have the correct client voucher applied (rather than the
+ * automatically propagated accounting override voucher), e.g. by use of the
+ * dispatch_block_create() API to capture client state at the time the callback
+ * is registered.
+ *
+ * @param label
+ * A string label to attach to the queue.
+ * This parameter is optional and may be NULL.
+ *
+ * @param attr
+ * DISPATCH_QUEUE_SERIAL, DISPATCH_QUEUE_CONCURRENT, or the result of a call to
+ * the function dispatch_queue_attr_make_with_qos_class().
+ *
+ * @param voucher
+ * A voucher whose resource accounting attributes are used to create the
+ * accounting override voucher attached to the queue.
+ *
+ * @result
+ * The newly created dispatch queue.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0)
+DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
+DISPATCH_NOTHROW
+dispatch_queue_t
+dispatch_queue_create_with_accounting_override_voucher(const char *label,
+ dispatch_queue_attr_t attr, voucher_t voucher);
+
/*!
* @group Voucher Mach SPI
* SPI intended for clients that need to interact with mach messages or mach
sizeof(((struct dispatch_magazine_s *)0x0)->fp_conts) ==
DISPATCH_ALLOCATOR_PAGE_SIZE);
#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS
+ // Make sure our alignment will be correct: that is, that we are correctly
+ // aligning to both.
+ dispatch_assert(ROUND_UP_TO_BITMAP_ALIGNMENT(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) ==
+ ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1));
+ dispatch_assert(ROUND_UP_TO_CONTINUATION_SIZE(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) ==
+ ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1));
}
#elif (DISPATCH_ALLOCATOR && DISPATCH_CONTINUATION_MALLOC) \
|| (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE)
#define HEAP_MASK (~(uintptr_t)(BYTES_PER_HEAP - 1))
#define MAGAZINE_MASK (~(uintptr_t)(BYTES_PER_MAGAZINE - 1))
+// this will round up such that first_bitmap_in_same_page() can mask the address
+// of a bitmap_t in the maps to obtain the first bitmap for that same page
+#define ROUND_UP_TO_BITMAP_ALIGNMENT(x) \
+ (((x) + ((BITMAPS_PER_PAGE * BYTES_PER_BITMAP) - 1u)) & \
+ ~((BITMAPS_PER_PAGE * BYTES_PER_BITMAP) - 1u))
+// Since these are both powers of two, we end up with not only the max alignment,
+// but happily the least common multiple, which will be the greater of the two.
+#define ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(x) (ROUND_UP_TO_CONTINUATION_SIZE(ROUND_UP_TO_BITMAP_ALIGNMENT(x)))
+#define PADDING_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(x) (ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(x) - (x))
+
#define PADDING_TO_CONTINUATION_SIZE(x) (ROUND_UP_TO_CONTINUATION_SIZE(x) - (x))
#if defined(__LP64__)
// header is expected to end on supermap's required alignment
#define HEADER_TO_SUPERMAPS_PADDING 0
-#define SUPERMAPS_TO_MAPS_PADDING (PADDING_TO_CONTINUATION_SIZE( \
+// we want to align the maps to a continuation size, but we must also have proper padding
+// so that we can perform first_bitmap_in_same_page()
+#define SUPERMAPS_TO_MAPS_PADDING (PADDING_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE( \
SIZEOF_SUPERMAPS + HEADER_TO_SUPERMAPS_PADDING + SIZEOF_HEADER))
+
#define MAPS_TO_FPMAPS_PADDING (PADDING_TO_CONTINUATION_SIZE(SIZEOF_MAPS))
#define BYTES_LEFT_IN_FIRST_PAGE (BYTES_PER_PAGE - \
--- /dev/null
+/*
+ * Copyright (c) 2015 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifdef __BLOCKS__
+
+#if __cplusplus < 201103L
+#error Must build with C++11 or later
+#endif
+
+#if __has_feature(cxx_exceptions)
+#error Must build without C++ exceptions
+#endif
+
+extern "C" {
+#include "internal.h"
+}
+
+#if DISPATCH_DEBUG && DISPATCH_BLOCK_PRIVATE_DATA_DEBUG
+#define _dispatch_block_private_data_debug(msg, ...) \
+ _dispatch_debug("block_private[%p]: " msg, (this), ##__VA_ARGS__)
+#else
+#define _dispatch_block_private_data_debug(msg, ...)
+#endif
+
+#pragma mark -
+#pragma mark _dispatch_block_create
+
+// rdar://20766742 C++ helpers to enable block capture of vouchers and groups
+
+struct dispatch_block_private_data_s {
+ DISPATCH_BLOCK_PRIVATE_DATA_HEADER();
+ static void* operator new(size_t) = delete;
+ static void* operator new [] (size_t) = delete;
+ explicit inline DISPATCH_ALWAYS_INLINE dispatch_block_private_data_s(
+ dispatch_block_flags_t flags, voucher_t voucher,
+ pthread_priority_t priority, dispatch_block_t block) noexcept :
+ dbpd_magic(), dbpd_flags(flags), dbpd_atomic_flags(),
+ dbpd_performed(), dbpd_priority(priority), dbpd_voucher(voucher),
+ dbpd_block(block), dbpd_group(), dbpd_queue(), dbpd_thread()
+ {
+ // stack structure constructor, no releases on destruction
+ _dispatch_block_private_data_debug("create, block: %p", dbpd_block);
+ }
+ inline DISPATCH_ALWAYS_INLINE dispatch_block_private_data_s(
+ dispatch_block_private_data_s const &o) noexcept :
+ dbpd_magic(DISPATCH_BLOCK_PRIVATE_DATA_MAGIC),
+ dbpd_flags(o.dbpd_flags), dbpd_atomic_flags(), dbpd_performed(),
+ dbpd_priority(o.dbpd_priority), dbpd_voucher(o.dbpd_voucher),
+ dbpd_block(), dbpd_group(), dbpd_queue(), dbpd_thread()
+ {
+ // copy constructor, create copy with retained references
+ if (dbpd_voucher) voucher_retain(dbpd_voucher);
+ if (o.dbpd_block) dbpd_block = _dispatch_Block_copy(o.dbpd_block);
+ _dispatch_block_private_data_debug("copy from %p, block: %p from %p",
+ &o, dbpd_block, o.dbpd_block);
+ if (!o.dbpd_magic) return; // No group in initial copy of stack object
+ dbpd_group = _dispatch_group_create_and_enter();
+ }
+ inline DISPATCH_ALWAYS_INLINE ~dispatch_block_private_data_s() noexcept
+ {
+ _dispatch_block_private_data_debug("destroy%s, block: %p",
+ dbpd_magic ? "" : " (stack)", dbpd_block);
+ if (dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) return;
+ if (dbpd_group) {
+ if (!dbpd_performed) dispatch_group_leave(dbpd_group);
+ ((void (*)(dispatch_group_t))dispatch_release)(dbpd_group);
+ }
+ if (dbpd_block) Block_release(dbpd_block);
+ if (dbpd_voucher) voucher_release(dbpd_voucher);
+ }
+};
+
+dispatch_block_t
+_dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher,
+ pthread_priority_t pri, dispatch_block_t block)
+{
+ struct dispatch_block_private_data_s dbpds(flags, voucher, pri, block);
+ return _dispatch_Block_copy(^{
+ // Capture stack object: invokes copy constructor (17094902)
+ (void)dbpds;
+ _dispatch_block_invoke(&dbpds);
+ });
+}
+
+extern "C" {
+// The compiler hides the name of the function it generates, and changes it if
+// we try to reference it directly, but the linker still sees it.
+extern void DISPATCH_BLOCK_SPECIAL_INVOKE(void *)
+ asm("____dispatch_block_create_block_invoke");
+void (*_dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE;
+}
+
+#endif // __BLOCKS__
#include "internal.h"
-// Dispatch data objects are dispatch objects with standard retain/release
-// memory management. A dispatch data object either points to a number of other
-// dispatch data objects or is a leaf data object. A leaf data object contains
-// a pointer to represented memory. A composite data object specifies the total
-// size of data it represents and list of constituent records.
-//
-// A leaf data object always points to a full represented buffer, a composite
-// dispatch data object is needed to represent a subrange of a memory region.
+/*
+ * Dispatch data objects are dispatch objects with standard retain/release
+ * memory management. A dispatch data object either points to a number of other
+ * dispatch data objects or is a leaf data object.
+ * A composite data object specifies the total size of data it represents
+ * and list of constituent records.
+ *
+ *******************************************************************************
+ *
+ * CURRENT IMPLEMENTATION DETAILS
+ *
+ * There are actually 3 kinds of composite objects
+ * - trivial subranges
+ * - unflattened composite data objects
+ * - flattened composite data objects
+ *
+ * LEAVES (num_records == 0, destructor != nil)
+ *
+ * Those objects have a pointer to represented memory in `buf`.
+ *
+ * UNFLATTENED (num_records > 1, buf == nil, destructor == nil)
+ *
+ * This is the generic case of a composite object.
+ *
+ * FLATTENED (num_records > 1, buf != nil, destructor == nil)
+ *
+ * Those objects are non trivial composite objects whose `buf` pointer
+ * is a contiguous representation (copied) of the memory it represents.
+ *
+ * Such objects are created when used as an NSData and -bytes is called and
+ * where the dispatch data object is an unflattened composite object.
+ * The underlying implementation is _dispatch_data_get_flattened_bytes
+ *
+ * TRIVIAL SUBRANGES (num_records == 1, buf == nil, destructor == nil)
+ *
+ * Those objects point to a single leaf, never to flattened objects.
+ *
+ *******************************************************************************
+ *
+ * Non trivial invariants:
+ *
+ * It is forbidden to point into a composite data object and ignore entire
+ * records from it. (for example by having `from` longer than the first
+ * record length).
+ *
+ * dispatch_data_t's are either leaves, or composite objects pointing to
+ * leaves. Depth is never greater than 1.
+ *
+ *******************************************************************************
+ *
+ * There are 4 dispatch_data_t constructors who may create non leaf objects,
+ * and ensure proper invariants.
+ *
+ * dispatch_data_copy_region()
+ * This function first sees through trivial subranges, and may in turn
+ * generate new trivial subranges.
+ *
+ * dispatch_data_create_map()
+ * This function either returns existing data objects, or a leaf.
+ *
+ * dispatch_data_create_subrange()
+ * This function treats flattened objects like unflattened ones,
+ * and recurses into trivial subranges, it can create trivial subranges.
+ *
+ * dispatch_data_create_concat()
+ * This function unwraps the top-level composite objects, trivial or not,
+ * and else concatenates the two arguments range lists, hence always creating
+ * unflattened objects, unless one of the arguments was empty.
+ *
+ *******************************************************************************
+ */
#if USE_OBJC
#define _dispatch_data_retain(x) _dispatch_objc_retain(x)
{
dispatch_data_t data = _dispatch_alloc(DISPATCH_DATA_CLASS,
sizeof(struct dispatch_data_s) + extra +
- (n ? n * sizeof(range_record) - sizeof(data->buf) : 0));
+ n * sizeof(range_record));
data->num_records = n;
#if !USE_OBJC
data->do_targetq = dispatch_get_global_queue(
data->buf = buffer;
data->size = size;
data->destructor = destructor;
-#if DISPATCH_DATA_USE_LEAF_MEMBER
- data->leaf = true;
- data->num_records = 1;
-#endif
if (queue) {
_dispatch_retain(queue);
data->do_targetq = queue;
void
_dispatch_data_dispose(dispatch_data_t dd)
{
- dispatch_block_t destructor = dd->destructor;
- if (destructor == NULL) {
+ if (_dispatch_data_leaf(dd)) {
+ _dispatch_data_destroy_buffer(dd->buf, dd->size, dd->do_targetq,
+ dd->destructor);
+ } else {
size_t i;
for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
_dispatch_data_release(dd->records[i].data_object);
}
- } else {
- _dispatch_data_destroy_buffer(dd->buf, dd->size, dd->do_targetq,
- destructor);
+ free((void *)dd->buf);
}
}
offset += dsnprintf(&buf[offset], bufsiz - offset,
"composite, size = %zd, num_records = %zd ", dd->size,
_dispatch_data_num_records(dd));
+ if (dd->buf) {
+ offset += dsnprintf(&buf[offset], bufsiz - offset,
+ ", flatbuf = %p ", dd->buf);
+ }
size_t i;
for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
range_record r = dd->records[i];
_dispatch_data_retain(dd1);
return dd1;
}
+
data = _dispatch_data_alloc(_dispatch_data_num_records(dd1) +
_dispatch_data_num_records(dd2), 0);
data->size = dd1->size + dd2->size;
_dispatch_data_retain(dd);
return dd;
}
+ /*
+ * we must only optimize leaves and not flattened objects
+ * because lots of users want to keep the end of a buffer and release
+ * as much memory as they can from the beginning of it
+ *
+ * Using the flatbuf here would be very wrong with respect to that goal
+ */
if (_dispatch_data_leaf(dd)) {
data = _dispatch_data_alloc(1, 0);
data->size = length;
_dispatch_data_retain(dd);
return data;
}
- // Subrange of a composite dispatch data object: find the record containing
- // the specified offset
- data = dispatch_data_empty;
- size_t i = 0, bytes_left = length;
- while (i < _dispatch_data_num_records(dd) &&
- offset >= dd->records[i].length) {
+
+ // Subrange of a composite dispatch data object
+ const size_t dd_num_records = _dispatch_data_num_records(dd);
+ bool to_the_end = (offset + length == dd->size);
+ size_t i = 0;
+
+ // find the record containing the specified offset
+ while (i < dd_num_records && offset >= dd->records[i].length) {
offset -= dd->records[i++].length;
}
- while (i < _dispatch_data_num_records(dd)) {
- size_t record_len = dd->records[i].length - offset;
- if (record_len > bytes_left) {
- record_len = bytes_left;
- }
- dispatch_data_t subrange = dispatch_data_create_subrange(
- dd->records[i].data_object, dd->records[i].from + offset,
- record_len);
- dispatch_data_t concat = dispatch_data_create_concat(data, subrange);
- _dispatch_data_release(data);
- _dispatch_data_release(subrange);
- data = concat;
- bytes_left -= record_len;
- if (!bytes_left) {
- return data;
+
+ // Crashing here indicates memory corruption of passed in data object
+ if (slowpath(i >= dd_num_records)) {
+ DISPATCH_CRASH("dispatch_data_create_subrange out of bounds");
+ return NULL;
+ }
+
+ // if everything is from a single dispatch data object, avoid boxing it
+ if (offset + length <= dd->records[i].length) {
+ return dispatch_data_create_subrange(dd->records[i].data_object,
+ dd->records[i].from + offset, length);
+ }
+
+ // find the record containing the end of the current range
+ // and optimize the case when you just remove bytes at the origin
+ size_t count, last_length;
+
+ if (to_the_end) {
+ count = dd_num_records - i;
+ } else {
+ last_length = length - (dd->records[i].length - offset);
+ count = 1;
+
+ while (i + count < dd_num_records) {
+ size_t record_length = dd->records[i + count++].length;
+
+ if (last_length <= record_length) {
+ break;
+ }
+ last_length -= record_length;
+
+ // Crashing here indicates memory corruption of passed in data object
+ if (slowpath(i + count >= dd_num_records)) {
+ DISPATCH_CRASH("dispatch_data_create_subrange out of bounds");
+ return NULL;
+ }
}
- offset = 0;
- i++;
}
- // Crashing here indicates memory corruption of passed in data object
- DISPATCH_CRASH("dispatch_data_create_subrange out of bounds");
- return NULL;
+
+ data = _dispatch_data_alloc(count, 0);
+ data->size = length;
+ memcpy(data->records, dd->records + i, count * sizeof(range_record));
+
+ if (offset) {
+ data->records[0].from += offset;
+ data->records[0].length -= offset;
+ }
+ if (!to_the_end) {
+ data->records[count - 1].length = last_length;
+ }
+
+ for (i = 0; i < count; i++) {
+ _dispatch_data_retain(data->records[i].data_object);
+ }
+ return data;
+}
+
+static void*
+_dispatch_data_flatten(dispatch_data_t dd)
+{
+ void *buffer = malloc(dd->size);
+
+ // Composite data object, copy the represented buffers
+ if (buffer) {
+ dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED,
+ size_t off, const void* buf, size_t len) {
+ memcpy(buffer + off, buf, len);
+ return (bool)true;
+ });
+ }
+
+ return buffer;
}
// When mapping a leaf object or a subrange of a leaf object, return a direct
dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr,
size_t *size_ptr)
{
- dispatch_data_t data = dd;
+ dispatch_data_t data = NULL;
const void *buffer = NULL;
- size_t size = dd->size, offset = 0;
+ size_t size = dd->size;
+
if (!size) {
data = dispatch_data_empty;
goto out;
}
- if (!_dispatch_data_leaf(dd) && _dispatch_data_num_records(dd) == 1 &&
- _dispatch_data_leaf(dd->records[0].data_object)) {
- offset = dd->records[0].from;
- dd = dd->records[0].data_object;
- }
- if (_dispatch_data_leaf(dd)) {
- _dispatch_data_retain(data);
- buffer = dd->buf + offset;
+
+ buffer = _dispatch_data_map_direct(dd, 0, NULL, NULL);
+ if (buffer) {
+ _dispatch_data_retain(dd);
+ data = dd;
goto out;
}
- // Composite data object, copy the represented buffers
- buffer = malloc(size);
- if (!buffer) {
- data = NULL;
+
+ buffer = _dispatch_data_flatten(dd);
+ if (fastpath(buffer)) {
+ data = dispatch_data_create(buffer, size, NULL,
+ DISPATCH_DATA_DESTRUCTOR_FREE);
+ } else {
size = 0;
- goto out;
}
- dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED,
- size_t off, const void* buf, size_t len) {
- memcpy((void*)buffer + off, buf, len);
- return (bool)true;
- });
- data = dispatch_data_create(buffer, size, NULL,
- DISPATCH_DATA_DESTRUCTOR_FREE);
+
out:
if (buffer_ptr) {
*buffer_ptr = buffer;
return data;
}
+const void *
+_dispatch_data_get_flattened_bytes(dispatch_data_t dd)
+{
+ const void *buffer;
+ size_t offset = 0;
+
+ if (slowpath(!dd->size)) {
+ return NULL;
+ }
+
+ buffer = _dispatch_data_map_direct(dd, 0, &dd, &offset);
+ if (buffer) {
+ return buffer;
+ }
+
+ void *flatbuf = _dispatch_data_flatten(dd);
+ if (fastpath(flatbuf)) {
+ // we need a release so that readers see the content of the buffer
+ if (slowpath(!dispatch_atomic_cmpxchgv2o(dd, buf, NULL, flatbuf,
+ &buffer, release))) {
+ free(flatbuf);
+ } else {
+ buffer = flatbuf;
+ }
+ } else {
+ return NULL;
+ }
+
+ return buffer + offset;
+}
+
+#if DISPATCH_USE_CLIENT_CALLOUT
+DISPATCH_NOINLINE
+#else
+DISPATCH_ALWAYS_INLINE
+#endif
+static bool
+_dispatch_data_apply_client_callout(void *ctxt, dispatch_data_t region, size_t offset,
+ const void *buffer, size_t size, dispatch_data_applier_function_t f)
+{
+ return f(ctxt, region, offset, buffer, size);
+}
+
+
static bool
_dispatch_data_apply(dispatch_data_t dd, size_t offset, size_t from,
size_t size, void *ctxt, dispatch_data_applier_function_t applier)
{
bool result = true;
- dispatch_data_t data = dd;
const void *buffer;
- dispatch_assert(dd->size);
- if (!_dispatch_data_leaf(dd) && _dispatch_data_num_records(dd) == 1 &&
- _dispatch_data_leaf(dd->records[0].data_object)) {
- from = dd->records[0].from;
- dd = dd->records[0].data_object;
- }
- if (_dispatch_data_leaf(dd)) {
- buffer = dd->buf + from;
- return _dispatch_client_callout3(ctxt, data, offset, buffer, size,
- applier);
+
+ buffer = _dispatch_data_map_direct(dd, 0, NULL, NULL);
+ if (buffer) {
+ return _dispatch_data_apply_client_callout(ctxt, dd,
+ offset, buffer + from, size, applier);
}
+
size_t i;
for (i = 0; i < _dispatch_data_num_records(dd) && result; ++i) {
result = _dispatch_data_apply(dd->records[i].data_object,
(dispatch_data_applier_function_t)_dispatch_Block_invoke(applier));
}
+static dispatch_data_t
+_dispatch_data_copy_region(dispatch_data_t dd, size_t from, size_t size,
+ size_t location, size_t *offset_ptr)
+{
+ dispatch_data_t reusable_dd = NULL;
+ size_t offset = 0;
+
+ if (from == 0 && size == dd->size) {
+ reusable_dd = dd;
+ }
+
+ if (_dispatch_data_map_direct(dd, from, &dd, &from)) {
+ if (reusable_dd) {
+ _dispatch_data_retain(reusable_dd);
+ return reusable_dd;
+ }
+
+ _dispatch_data_retain(dd);
+ if (from == 0 && size == dd->size) {
+ return dd;
+ }
+
+ dispatch_data_t data = _dispatch_data_alloc(1, 0);
+ data->size = size;
+ data->records[0].from = from;
+ data->records[0].length = size;
+ data->records[0].data_object = dd;
+ return data;
+ }
+
+ size_t i;
+ for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
+ size_t length = dd->records[i].length;
+
+ if (from >= length) {
+ from -= length;
+ continue;
+ }
+
+ length -= from;
+ if (location >= offset + length) {
+ offset += length;
+ from = 0;
+ continue;
+ }
+
+ from += dd->records[i].from;
+ dd = dd->records[i].data_object;
+ *offset_ptr += offset;
+ location -= offset;
+ return _dispatch_data_copy_region(dd, from, length, location, offset_ptr);
+ }
+
+ DISPATCH_CRASH("dispatch_data_copy_region out of bounds");
+}
+
// Returs either a leaf object or an object composed of a single leaf object
dispatch_data_t
dispatch_data_copy_region(dispatch_data_t dd, size_t location,
size_t *offset_ptr)
{
if (location >= dd->size) {
- *offset_ptr = 0;
+ *offset_ptr = dd->size;
return dispatch_data_empty;
}
- dispatch_data_t data;
- size_t size = dd->size, offset = 0, from = 0;
- while (true) {
- if (_dispatch_data_leaf(dd)) {
- _dispatch_data_retain(dd);
- *offset_ptr = offset;
- if (size == dd->size) {
- return dd;
- } else {
- // Create a new object for the requested subrange of the leaf
- data = _dispatch_data_alloc(1, 0);
- data->size = size;
- data->records[0].from = from;
- data->records[0].length = size;
- data->records[0].data_object = dd;
- return data;
- }
- } else {
- // Find record at the specified location
- size_t i, pos;
- for (i = 0; i < _dispatch_data_num_records(dd); ++i) {
- pos = offset + dd->records[i].length;
- if (location < pos) {
- size = dd->records[i].length;
- from = dd->records[i].from;
- data = dd->records[i].data_object;
- if (_dispatch_data_num_records(dd) == 1 &&
- _dispatch_data_leaf(data)) {
- // Return objects composed of a single leaf node
- *offset_ptr = offset;
- _dispatch_data_retain(dd);
- return dd;
- } else {
- // Drill down into other objects
- dd = data;
- break;
- }
- } else {
- offset = pos;
- }
- }
- }
- }
+ *offset_ptr = 0;
+ return _dispatch_data_copy_region(dd, 0, dd->size, location, offset_ptr);
}
#if HAVE_MACH
#include <Foundation/NSString.h>
@interface DISPATCH_CLASS(data) ()
+@property (readonly) NSUInteger length;
+@property (readonly) const void *bytes NS_RETURNS_INNER_POINTER;
+
- (id)initWithBytes:(void *)bytes length:(NSUInteger)length copy:(BOOL)copy
freeWhenDone:(BOOL)freeBytes bytesAreVM:(BOOL)vm;
- (BOOL)_bytesAreVM;
+- (BOOL)_isCompact;
@end
@interface DISPATCH_CLASS(data_empty) : DISPATCH_CLASS(data)
class_getName([self class]), buf];
}
+- (NSUInteger)length {
+ struct dispatch_data_s *dd = (void*)self;
+ return dd->size;
+}
+
+- (const void *)bytes {
+ struct dispatch_data_s *dd = (void*)self;
+ return _dispatch_data_get_flattened_bytes(dd);
+}
+
+- (BOOL)_isCompact {
+ struct dispatch_data_s *dd = (void*)self;
+ return !dd->size || _dispatch_data_map_direct(dd, 0, NULL, NULL) != NULL;
+}
+
@end
@implementation DISPATCH_CLASS(data_empty)
#include <dispatch/base.h> // for HeaderDoc
#endif
-#if defined(__LP64__) && !defined(DISPATCH_DATA_USE_LEAF_MEMBER) && !USE_OBJC
-// explicit leaf member is free on 64bit due to padding
-#define DISPATCH_DATA_USE_LEAF_MEMBER 1
-#endif
-
typedef struct range_record_s {
dispatch_data_t data_object;
size_t from;
#else // USE_OBJC
DISPATCH_STRUCT_HEADER(data);
#endif // USE_OBJC
-#if DISPATCH_DATA_USE_LEAF_MEMBER
- bool leaf;
-#endif
+ const void *buf;
dispatch_block_t destructor;
size_t size, num_records;
- union {
- const void* buf;
- range_record records[0];
- };
+ range_record records[0];
};
-#if DISPATCH_DATA_USE_LEAF_MEMBER
-#define _dispatch_data_leaf(d) ((d)->leaf)
-#define _dispatch_data_num_records(d) ((d)->num_records)
-#else
-#define _dispatch_data_leaf(d) ((d)->num_records ? 0 : ((d)->size ? 1 : 0))
-#define _dispatch_data_num_records(d) \
- (_dispatch_data_leaf(d) ? 1 : (d)->num_records)
-#endif // DISPATCH_DATA_USE_LEAF_MEMBER
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_dispatch_data_leaf(struct dispatch_data_s *dd)
+{
+ return dd->num_records == 0;
+}
+
+/*
+ * This is about the number of records required to hold that dispatch data
+ * if it's not a leaf. Callers either want that value, or have to special
+ * case the case when the dispatch data *is* a leaf before (and that the actual
+ * embeded record count of that dispatch data is 0)
+ */
+DISPATCH_ALWAYS_INLINE
+static inline size_t
+_dispatch_data_num_records(struct dispatch_data_s *dd)
+{
+ return dd->num_records ?: 1;
+}
typedef dispatch_data_t (*dispatch_transform_t)(dispatch_data_t data);
dispatch_block_t destructor);
void _dispatch_data_dispose(dispatch_data_t data);
size_t _dispatch_data_debug(dispatch_data_t data, char* buf, size_t bufsiz);
-const dispatch_block_t _dispatch_data_destructor_inline;
-#define DISPATCH_DATA_DESTRUCTOR_INLINE (_dispatch_data_destructor_inline)
+const void*
+_dispatch_data_get_flattened_bytes(struct dispatch_data_s *dd);
+#if !defined(__cplusplus)
#if !__OBJC2__
+const dispatch_block_t _dispatch_data_destructor_inline;
+#define DISPATCH_DATA_DESTRUCTOR_INLINE (_dispatch_data_destructor_inline)
+#endif // !__OBJC2__
+/*
+ * the out parameters are about seeing "through" trivial subranges
+ * so for something like this: dd = { subrange [ dd1, offset1 ] },
+ * this will return { dd1, offset + offset1 }
+ *
+ * If the dispatch object isn't a trivial subrange, it returns { dd, offset }
+ */
+DISPATCH_ALWAYS_INLINE
static inline const void*
-_dispatch_data_map_direct(dispatch_data_t dd)
+_dispatch_data_map_direct(struct dispatch_data_s *dd, size_t offset,
+ struct dispatch_data_s **dd_out, size_t *from_out)
{
- size_t offset = 0;
- if (slowpath(!dd->size)) {
- return NULL;
- }
+ const void *buffer = NULL;
+
+ dispatch_assert(dd->size);
if (slowpath(!_dispatch_data_leaf(dd)) &&
- _dispatch_data_num_records(dd) == 1 &&
- _dispatch_data_leaf(dd->records[0].data_object)) {
- offset = dd->records[0].from;
- dd = dd->records[0].data_object;
+ _dispatch_data_num_records(dd) == 1) {
+ offset += dd->records[0].from;
+ dd = (struct dispatch_data_s *)dd->records[0].data_object;
}
- return fastpath(_dispatch_data_leaf(dd)) ? (dd->buf + offset) : NULL;
+
+ if (fastpath(_dispatch_data_leaf(dd))) {
+ buffer = dd->buf + offset;
+ } else {
+ buffer = dispatch_atomic_load((void **)&dd->buf, relaxed);
+ if (buffer) {
+ buffer += offset;
+ }
+ }
+ if (dd_out) *dd_out = dd;
+ if (from_out) *from_out = offset;
+ return buffer;
}
-#endif // !__OBJC2__
+#endif // !defined(__cplusplus)
#endif // __DISPATCH_DATA_INTERNAL__
#if VOUCHER_USE_MACH_VOUCHER
dispatch_once_t _voucher_task_mach_voucher_pred;
mach_voucher_t _voucher_task_mach_voucher;
+_voucher_atm_t _voucher_task_atm;
_voucher_activity_t _voucher_activity_default;
#endif
voucher_activity_mode_t _voucher_activity_mode;
.dq_running = 1,
.dq_width = 1,
.dq_is_thread_bound = 1,
+ .dq_override_voucher = DISPATCH_NO_VOUCHER,
.dq_serialnum = 1,
};
.do_next = DISPATCH_OBJECT_LISTLESS, \
.dqa_qos_class = (qos), \
.dqa_relative_priority = (qos) ? (prio) : 0, \
- .dqa_overcommit = (overcommit), \
+ .dqa_overcommit = _dispatch_queue_attr_overcommit_##overcommit, \
.dqa_concurrent = (concurrent), \
}
#define DISPATCH_QUEUE_ATTR_KIND_INIT(qos, prio) \
{ \
[DQA_INDEX_NON_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \
- DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 0, 1), \
+ DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, disabled, 1), \
[DQA_INDEX_NON_OVERCOMMIT][DQA_INDEX_SERIAL] = \
- DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 0, 0), \
+ DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, disabled, 0), \
[DQA_INDEX_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \
- DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 1, 1), \
+ DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, enabled, 1), \
[DQA_INDEX_OVERCOMMIT][DQA_INDEX_SERIAL] = \
- DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 1, 0), \
+ DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, enabled, 0), \
+ [DQA_INDEX_UNSPECIFIED_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \
+ DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, unspecified, 1),\
+ [DQA_INDEX_UNSPECIFIED_OVERCOMMIT][DQA_INDEX_SERIAL] = \
+ DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, unspecified, 0),\
}
#define DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, prio) \
[DQA_INDEX_QOS_CLASS_##qos] = \
DISPATCH_QUEUE_ATTR_PRIO_INIT(_DISPATCH_QOS_CLASS_##qos)
+// DISPATCH_QUEUE_CONCURRENT resp. _dispatch_queue_attr_concurrent is aliased
+// to array member [0][0][0][0] and their properties must match!
const struct dispatch_queue_attr_s _dispatch_queue_attrs[]
- [DISPATCH_QUEUE_ATTR_PRIO_COUNT][2][2] = {
+ [DISPATCH_QUEUE_ATTR_PRIO_COUNT]
+ [DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT]
+ [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT] = {
DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(UNSPECIFIED),
DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(MAINTENANCE),
DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(BACKGROUND),
static int dispatch_logfile = -1;
static bool dispatch_log_disabled;
+#if DISPATCH_DEBUG
+static uint64_t dispatch_log_basetime;
+#endif
static dispatch_once_t _dispatch_logv_pred;
static void
if (dispatch_logfile != -1) {
struct timeval tv;
gettimeofday(&tv, NULL);
+#if DISPATCH_DEBUG
+ dispatch_log_basetime = mach_absolute_time();
+#endif
dprintf(dispatch_logfile, "=== log file opened for %s[%u] at "
"%ld.%06u ===\n", getprogname() ?: "", getpid(),
tv.tv_sec, tv.tv_usec);
_dispatch_logv_file(const char *msg, va_list ap)
{
char buf[2048];
- int r = vsnprintf(buf, sizeof(buf), msg, ap);
+ size_t bufsiz = sizeof(buf), offset = 0;
+ int r;
+
+#if DISPATCH_DEBUG
+ offset += dsnprintf(&buf[offset], bufsiz - offset, "%llu\t",
+ mach_absolute_time() - dispatch_log_basetime);
+#endif
+ r = vsnprintf(&buf[offset], bufsiz - offset, msg, ap);
if (r < 0) return;
- size_t len = (size_t)r;
- if (len > sizeof(buf) - 1) {
- len = sizeof(buf) - 1;
+ offset += (size_t)r;
+ if (offset > bufsiz - 1) {
+ offset = bufsiz - 1;
}
- _dispatch_log_file(buf, len);
+ _dispatch_log_file(buf, offset);
}
#if DISPATCH_USE_SIMPLE_ASL
_dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap)
{
char buf[2048];
+ size_t bufsiz = sizeof(buf), offset = 0;
int r;
- size_t offs;
+#if DISPATCH_DEBUG && !DISPATCH_USE_OS_DEBUG_LOG
+ offset += dsnprintf(&buf[offset], bufsiz - offset, "%llu\t\t%p\t",
+ mach_absolute_time() - dispatch_log_basetime,
+ (void *)_dispatch_thread_self());
+#endif
if (dou._do) {
- offs = _dispatch_object_debug2(dou, buf, sizeof(buf));
- dispatch_assert(offs + 2 < sizeof(buf));
- buf[offs++] = ':';
- buf[offs++] = ' ';
- buf[offs] = '\0';
+ offset += _dispatch_object_debug2(dou, &buf[offset], bufsiz - offset);
+ dispatch_assert(offset + 2 < bufsiz);
+ buf[offset++] = ':';
+ buf[offset++] = ' ';
+ buf[offset] = '\0';
} else {
- offs = strlcpy(buf, "NULL: ", sizeof(buf));
+ offset += strlcpy(&buf[offset], "NULL: ", bufsiz - offset);
}
- r = vsnprintf(buf + offs, sizeof(buf) - offs, msg, ap);
+ r = vsnprintf(&buf[offset], bufsiz - offset, msg, ap);
#if !DISPATCH_USE_OS_DEBUG_LOG
- size_t len = offs + (r < 0 ? 0 : (size_t)r);
- if (len > sizeof(buf) - 1) {
- len = sizeof(buf) - 1;
+ size_t len = offset + (r < 0 ? 0 : (size_t)r);
+ if (len > bufsiz - 1) {
+ len = bufsiz - 1;
}
_dispatch_logv(buf, len, NULL);
#else
Block_release(b);
}
-#pragma mark -
-#pragma mark _dispatch_block_create no_objc
-
-#if !USE_OBJC
-
-// The compiler hides the name of the function it generates, and changes it if
-// we try to reference it directly, but the linker still sees it.
-extern void DISPATCH_BLOCK_SPECIAL_INVOKE(void *)
- asm("____dispatch_block_create_block_invoke");
-void (*_dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE;
-
-dispatch_block_t
-_dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher,
- pthread_priority_t pri, dispatch_block_t block)
-{
- dispatch_block_t copy_block = _dispatch_Block_copy(block); // 17094902
- (void)voucher; // No voucher capture! (requires ObjC runtime)
- struct dispatch_block_private_data_s dbpds =
- DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, NULL, pri, copy_block);
- dispatch_block_t new_block = _dispatch_Block_copy(^{
- // Capture object references, which retains copy_block.
- // All retained objects must be captured by the *block*. We
- // cannot borrow any references, because the block might be
- // called zero or several times, so Block_release() is the
- // only place that can release retained objects.
- (void)copy_block;
- _dispatch_block_invoke(&dbpds);
- });
- Block_release(copy_block);
- return new_block;
-}
-
-#endif // !USE_OBJC
-
#endif // __BLOCKS__
#pragma mark -
_dispatch_set_unwind_tsd(u);
}
-#undef _dispatch_client_callout3
-bool
-_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset,
- const void *buffer, size_t size, dispatch_data_applier_function_t f)
-{
- _dispatch_get_tsd_base();
- void *u = _dispatch_get_unwind_tsd();
- if (fastpath(!u)) return f(ctxt, region, offset, buffer, size);
- _dispatch_set_unwind_tsd(NULL);
- bool res = f(ctxt, region, offset, buffer, size);
- _dispatch_free_unwind_tsd();
- _dispatch_set_unwind_tsd(u);
- return res;
-}
-
#undef _dispatch_client_callout4
void
_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
void
_os_object_xref_dispose(_os_object_t obj)
{
+ _os_object_xrefcnt_dispose_barrier(obj);
if (fastpath(obj->os_obj_isa->_os_obj_xref_dispose)) {
return obj->os_obj_isa->_os_obj_xref_dispose(obj);
}
void
_os_object_dispose(_os_object_t obj)
{
+ _os_object_refcnt_dispose_barrier(obj);
if (fastpath(obj->os_obj_isa->_os_obj_dispose)) {
return obj->os_obj_isa->_os_obj_dispose(obj);
}
.init = dispatch_source_type_interval_init,
};
+#if !DISPATCH_USE_SELECT_FALLBACK || DISPATCH_DYNAMIC_SELECT_FALLBACK
+static void
+dispatch_source_type_readwrite_init(dispatch_source_t ds,
+ dispatch_source_type_t type DISPATCH_UNUSED,
+ uintptr_t handle DISPATCH_UNUSED,
+ unsigned long mask DISPATCH_UNUSED,
+ dispatch_queue_t q DISPATCH_UNUSED)
+{
+ ds->ds_dkev->dk_kevent.flags |= EV_UDATA_SPECIFIC;
+ ds->ds_is_direct_kevent = true;
+ // bypass kernel check for device kqueue support rdar://19004921
+ ds->ds_dkev->dk_kevent.fflags = NOTE_LOWAT;
+ ds->ds_dkev->dk_kevent.data = 1;
+}
+#else
+#define dispatch_source_type_readwrite_init NULL
+#endif
+
const struct dispatch_source_type_s _dispatch_source_type_read = {
.ke = {
.filter = EVFILT_READ,
.flags = EV_DISPATCH,
},
+ .init = dispatch_source_type_readwrite_init,
};
const struct dispatch_source_type_s _dispatch_source_type_write = {
.filter = EVFILT_WRITE,
.flags = EV_DISPATCH,
},
+ .init = dispatch_source_type_readwrite_init,
};
#if DISPATCH_USE_MEMORYSTATUS
const struct dispatch_source_type_s _dispatch_source_type_memorystatus = {
.ke = {
.filter = EVFILT_MEMORYSTATUS,
- .flags = EV_DISPATCH,
+ .flags = EV_DISPATCH|EV_UDATA_SPECIFIC,
},
.mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL|NOTE_MEMORYSTATUS_PRESSURE_WARN
|NOTE_MEMORYSTATUS_PRESSURE_CRITICAL|NOTE_MEMORYSTATUS_LOW_SWAP,
const struct dispatch_source_type_s _dispatch_source_type_vm = {
.ke = {
.filter = EVFILT_MEMORYSTATUS,
- .flags = EV_DISPATCH,
+ .flags = EV_DISPATCH|EV_UDATA_SPECIFIC,
},
.mask = NOTE_VM_PRESSURE,
.init = dispatch_source_type_vm_init,
const struct dispatch_source_type_s _dispatch_source_type_vm = {
.ke = {
.filter = EVFILT_VM,
- .flags = EV_DISPATCH,
+ .flags = EV_DISPATCH|EV_UDATA_SPECIFIC,
},
.mask = NOTE_VM_PRESSURE,
.init = dispatch_source_type_vm_init,
#endif // DISPATCH_USE_VM_PRESSURE
+static void
+dispatch_source_type_proc_init(dispatch_source_t ds,
+ dispatch_source_type_t type DISPATCH_UNUSED,
+ uintptr_t handle DISPATCH_UNUSED,
+ unsigned long mask DISPATCH_UNUSED,
+ dispatch_queue_t q DISPATCH_UNUSED)
+{
+ ds->ds_dkev->dk_kevent.fflags |= NOTE_EXIT; // rdar://16655831
+}
+
const struct dispatch_source_type_s _dispatch_source_type_proc = {
.ke = {
.filter = EVFILT_PROC,
- .flags = EV_CLEAR,
+ .flags = EV_CLEAR|EV_UDATA_SPECIFIC,
},
.mask = NOTE_EXIT|NOTE_FORK|NOTE_EXEC
#if HAVE_DECL_NOTE_SIGNAL
|NOTE_REAP
#endif
,
+ .init = dispatch_source_type_proc_init,
};
const struct dispatch_source_type_s _dispatch_source_type_signal = {
.ke = {
.filter = EVFILT_SIGNAL,
+ .flags = EV_UDATA_SPECIFIC,
},
};
const struct dispatch_source_type_s _dispatch_source_type_vnode = {
.ke = {
.filter = EVFILT_VNODE,
- .flags = EV_CLEAR,
+ .flags = EV_CLEAR|EV_UDATA_SPECIFIC,
},
.mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK|
NOTE_RENAME|NOTE_REVOKE
const struct dispatch_source_type_s _dispatch_source_type_vfs = {
.ke = {
.filter = EVFILT_FS,
- .flags = EV_CLEAR,
+ .flags = EV_CLEAR|EV_UDATA_SPECIFIC,
},
.mask = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT|VQ_DEAD|
VQ_ASSIST|VQ_NOTRESPLOCK
#ifdef EVFILT_SOCK
.ke = {
.filter = EVFILT_SOCK,
- .flags = EV_CLEAR,
+ .flags = EV_CLEAR|EV_UDATA_SPECIFIC,
},
.mask = NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED |
NOTE_TIMEOUT | NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND |
#endif // EVFILT_SOCK
};
+#if DISPATCH_USE_EV_UDATA_SPECIFIC
+static void
+dispatch_source_type_data_init(dispatch_source_t ds,
+ dispatch_source_type_t type DISPATCH_UNUSED,
+ uintptr_t handle DISPATCH_UNUSED,
+ unsigned long mask DISPATCH_UNUSED,
+ dispatch_queue_t q DISPATCH_UNUSED)
+{
+ ds->ds_needs_rearm = false; // not registered with kevent
+}
+#else
+#define dispatch_source_type_data_init NULL
+#endif
+
const struct dispatch_source_type_s _dispatch_source_type_data_add = {
.ke = {
.filter = DISPATCH_EVFILT_CUSTOM_ADD,
+ .flags = EV_UDATA_SPECIFIC,
},
+ .init = dispatch_source_type_data_init,
};
const struct dispatch_source_type_s _dispatch_source_type_data_or = {
.ke = {
.filter = DISPATCH_EVFILT_CUSTOM_OR,
- .flags = EV_CLEAR,
+ .flags = EV_CLEAR|EV_UDATA_SPECIFIC,
.fflags = ~0u,
},
+ .init = dispatch_source_type_data_init,
};
#if HAVE_MACH
_dispatch_client_callout(void *ctxt, dispatch_function_t f);
DISPATCH_NOTHROW void
_dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t));
-DISPATCH_NOTHROW bool
-_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset,
- const void *buffer, size_t size, dispatch_data_applier_function_t f);
DISPATCH_NOTHROW void
_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
dispatch_mach_msg_t dmsg, mach_error_t error,
return f(ctxt, i);
}
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset,
- const void *buffer, size_t size, dispatch_data_applier_function_t f)
-{
- return f(ctxt, region, offset, buffer, size);
-}
-
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
#endif // !DISPATCH_USE_CLIENT_CALLOUT
-#if !(USE_OBJC && __OBJC2__)
+#if !(USE_OBJC && __OBJC2__) && !defined(__cplusplus)
#pragma mark -
#pragma mark _os_object_t & dispatch_object_t
static inline _os_object_t
_os_object_retain_internal_inline(_os_object_t obj)
{
- int ref_cnt = obj->os_obj_ref_cnt;
- if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
- return obj; // global object
- }
- ref_cnt = dispatch_atomic_inc2o(obj, os_obj_ref_cnt, relaxed);
+ int ref_cnt = _os_object_refcnt_inc(obj);
if (slowpath(ref_cnt <= 0)) {
DISPATCH_CRASH("Resurrection of an object");
}
static inline void
_os_object_release_internal_inline(_os_object_t obj)
{
- int ref_cnt = obj->os_obj_ref_cnt;
- if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
- return; // global object
- }
- ref_cnt = dispatch_atomic_dec2o(obj, os_obj_ref_cnt, relaxed);
+ int ref_cnt = _os_object_refcnt_dec(obj);
if (fastpath(ref_cnt >= 0)) {
return;
}
DISPATCH_CRASH("Release while external references exist");
}
#endif
+ // _os_object_refcnt_dispose_barrier() is in _os_object_dispose()
return _os_object_dispose(obj);
}
static inline pthread_priority_t _dispatch_get_defaultpriority(void);
static inline void _dispatch_set_defaultpriority_override(void);
static inline void _dispatch_reset_defaultpriority(pthread_priority_t priority);
+static inline pthread_priority_t _dispatch_get_priority(void);
static inline void _dispatch_set_priority(pthread_priority_t priority);
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_queue_t
+_dispatch_queue_get_current(void)
+{
+ return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key);
+}
+
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_queue_set_thread(dispatch_queue_t dq)
}
}
+struct _dispatch_identity_s {
+ pthread_priority_t old_pri;
+ pthread_priority_t old_pp;
+ dispatch_queue_t old_dq;
+};
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_root_queue_identity_assume(struct _dispatch_identity_s *di,
+ dispatch_queue_t assumed_rq)
+{
+ di->old_dq = _dispatch_queue_get_current();
+ di->old_pri = _dispatch_get_priority();
+ di->old_pp = _dispatch_get_defaultpriority();
+
+ dispatch_assert(dx_type(di->old_dq) == DISPATCH_QUEUE_ROOT_TYPE);
+ dispatch_assert(dx_type(assumed_rq) == DISPATCH_QUEUE_ROOT_TYPE);
+
+ _dispatch_wqthread_override_start(_dispatch_thread_port(), di->old_pri);
+ _dispatch_set_priority(assumed_rq->dq_priority);
+ _dispatch_reset_defaultpriority(assumed_rq->dq_priority);
+ _dispatch_thread_setspecific(dispatch_queue_key, assumed_rq);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_root_queue_identity_restore(struct _dispatch_identity_s *di)
+{
+ _dispatch_thread_setspecific(dispatch_queue_key, di->old_dq);
+ _dispatch_set_priority(di->old_pri);
+ _dispatch_reset_defaultpriority(di->old_pp);
+ // Ensure that the root queue sees that this thread was overridden.
+ _dispatch_set_defaultpriority_override();
+}
+
+typedef dispatch_queue_t
+_dispatch_queue_class_invoke_handler_t(dispatch_object_t,
+ _dispatch_thread_semaphore_t*);
+
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_queue_class_invoke(dispatch_object_t dou,
- dispatch_queue_t (*invoke)(dispatch_object_t,
- _dispatch_thread_semaphore_t*))
+ dispatch_continuation_t dc, dispatch_invoke_flags_t flags,
+ _dispatch_queue_class_invoke_handler_t invoke)
{
pthread_priority_t p = 0;
dispatch_queue_t dq = dou._dq;
+ bool owning = !slowpath(flags & DISPATCH_INVOKE_STEALING);
+ bool overriding = slowpath(flags & DISPATCH_INVOKE_OVERRIDING);
+
if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) &&
fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))){
_dispatch_queue_set_thread(dq);
+
dispatch_queue_t tq = NULL;
_dispatch_thread_semaphore_t sema = 0;
+ struct _dispatch_identity_s di;
+
+ if (overriding) {
+ _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx",
+ dq->dq_thread, _dispatch_get_defaultpriority());
+ _dispatch_root_queue_identity_assume(&di, dc->dc_other);
+ }
+
tq = invoke(dq, &sema);
_dispatch_queue_clear_thread(dq);
- p = _dispatch_queue_reset_override_priority(dq);
- if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
- // Ensure that the root queue sees that this thread was overridden.
- _dispatch_set_defaultpriority_override();
+
+ if (!owning && !sema && tq && tq != dq->do_targetq) {
+ /*
+ * When (tq && tq != dq->do_targetq) this is a source or mach
+ * channel asking to get to their manager queue.
+ *
+ * Since stealers cannot call _dispatch_queue_push_queue and
+ * retarget those, they need ot destroy the override so that
+ * when waking those sources or mach channels on their target queue
+ * we don't risk a stealer taking them over and not be able to
+ * retarget again, effectively live-locking them.
+ *
+ * Also, we're in the `overriding` case so the thread will be marked
+ * dirty by _dispatch_root_queue_identity_restore anyway
+ * so forgetting about p is fine.
+ */
+ (void)_dispatch_queue_reset_override_priority(dq);
+ p = 0;
+ } else if (sema || tq || DISPATCH_OBJECT_SUSPENDED(dq)) {
+ p = _dispatch_queue_get_override_priority(dq);
+ } else {
+ p = _dispatch_queue_reset_override_priority(dq);
+ }
+ if (overriding) {
+ _dispatch_root_queue_identity_restore(&di);
+ } else {
+ if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
+ // Ensure that the root queue sees that this thread was overridden.
+ _dispatch_set_defaultpriority_override();
+ }
}
- // We do not need to check the result.
- // When the suspend-count lock is dropped, then the check will happen.
- (void)dispatch_atomic_dec2o(dq, dq_running, release);
+
+ uint32_t running = dispatch_atomic_dec2o(dq, dq_running, release);
if (sema) {
_dispatch_thread_semaphore_signal(sema);
- } else if (tq) {
+ } else if (owning && tq) {
+ _dispatch_introspection_queue_item_complete(dq);
+ return _dispatch_queue_push_queue(tq, dq, p);
+ }
+ if (!owning && running == 0) {
_dispatch_introspection_queue_item_complete(dq);
- return _dispatch_queue_push(tq, dq, p);
+ return _dispatch_queue_wakeup_with_qos_and_release(dq, p);
+ }
+ } else if (overriding) {
+ mach_port_t th = dq->dq_thread;
+ if (th) {
+ p = _dispatch_queue_get_override_priority(dq);
+ _dispatch_object_debug(dq, "overriding thr 0x%x to priority 0x%lx",
+ th, p);
+ _dispatch_wqthread_override_start(th, p);
}
}
- dq->do_next = DISPATCH_OBJECT_LISTLESS;
+
_dispatch_introspection_queue_item_complete(dq);
- if (!dispatch_atomic_sub2o(dq, do_suspend_cnt,
- DISPATCH_OBJECT_SUSPEND_LOCK, seq_cst)) {
- // seq_cst with atomic store to suspend_cnt <rdar://problem/11915417>
- if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) {
- // verify that the queue is idle
- return _dispatch_queue_wakeup_with_qos_and_release(dq, p);
+ if (owning) {
+ dq->do_next = DISPATCH_OBJECT_LISTLESS;
+ if (!dispatch_atomic_sub2o(dq, do_suspend_cnt,
+ DISPATCH_OBJECT_SUSPEND_LOCK, seq_cst)) {
+ // seq_cst with atomic store to suspend_cnt <rdar://problem/11915417>
+ if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) {
+ // verify that the queue is idle
+ return _dispatch_queue_wakeup_with_qos_and_release(dq, p);
+ }
}
}
_dispatch_release(dq); // added when the queue is put on the list
return slowpath(suspend_cnt >= DISPATCH_OBJECT_SUSPEND_INTERVAL);
}
-DISPATCH_ALWAYS_INLINE
-static inline dispatch_queue_t
-_dispatch_queue_get_current(void)
-{
- return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key);
-}
-
DISPATCH_ALWAYS_INLINE DISPATCH_CONST
static inline dispatch_queue_t
_dispatch_get_root_queue(qos_class_t priority, bool overcommit)
dq->dq_running = 0;
dq->dq_width = 1;
+ dq->dq_override_voucher = DISPATCH_NO_VOUCHER;
dq->dq_serialnum = dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers,
relaxed);
}
return dq->dq_thread;
}
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_pthread_root_queue_observer_hooks_t
+_dispatch_get_pthread_root_queue_observer_hooks(void)
+{
+ return _dispatch_thread_getspecific(
+ dispatch_pthread_root_queue_observer_hooks_key);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_set_pthread_root_queue_observer_hooks(
+ dispatch_pthread_root_queue_observer_hooks_t observer_hooks)
+{
+ _dispatch_thread_setspecific(dispatch_pthread_root_queue_observer_hooks_key,
+ observer_hooks);
+}
+
#pragma mark -
#pragma mark dispatch_priority
const pthread_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG;
const pthread_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG;
pthread_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority;
- if ((!dqp || (dqp & inherited_flag)) && (tqp & rootqueue_flag)) {
+ if ((!(dqp & ~_PTHREAD_PRIORITY_FLAGS_MASK) || (dqp & inherited_flag)) &&
+ (tqp & rootqueue_flag)) {
dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag;
}
#else
DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
static inline voucher_t
_dispatch_adopt_priority_and_voucher(pthread_priority_t priority,
- voucher_t voucher, unsigned long flags)
+ voucher_t v, unsigned long flags)
{
pthread_priority_t p = 0;
if (priority != DISPATCH_NO_PRIORITY) {
p = _dispatch_priority_adopt(priority, flags);
}
- return _dispatch_set_priority_and_adopt_voucher(p, voucher);
+ if (!(flags & DISPATCH_VOUCHER_IGNORE_QUEUE_OVERRIDE)) {
+ dispatch_queue_t dq = _dispatch_queue_get_current();
+ if (dq && dq->dq_override_voucher != DISPATCH_NO_VOUCHER) {
+ if (v != DISPATCH_NO_VOUCHER && v) _voucher_release(v);
+ v = dq->dq_override_voucher;
+ if (v) _voucher_retain(v);
+ }
+ }
+ return _dispatch_set_priority_and_adopt_voucher(p, v);
+}
+
+DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
+static inline voucher_t
+_dispatch_adopt_queue_override_voucher(dispatch_queue_t dq)
+{
+ voucher_t v = dq->dq_override_voucher;
+ if (v == DISPATCH_NO_VOUCHER) return DISPATCH_NO_VOUCHER;
+ if (v) _voucher_retain(v);
+ return _dispatch_set_priority_and_adopt_voucher(DISPATCH_NO_PRIORITY, v);
}
DISPATCH_ALWAYS_INLINE
DISPATCH_ALWAYS_INLINE
static inline void
-_dispatch_set_priority_and_replace_voucher(pthread_priority_t priority,
+_dispatch_reset_priority_and_voucher(pthread_priority_t priority,
voucher_t voucher)
{
voucher_t ov;
if (voucher != DISPATCH_NO_VOUCHER && ov) _voucher_release(ov);
}
+DISPATCH_ALWAYS_INLINE
+static inline void
+_dispatch_reset_voucher(voucher_t voucher)
+{
+ return _dispatch_reset_priority_and_voucher(DISPATCH_NO_PRIORITY, voucher);
+}
+
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_set_priority(pthread_priority_t priority)
DISPATCH_ALWAYS_INLINE
static inline bool
-_dispatch_queue_override_priority(dispatch_queue_t dq, pthread_priority_t pp)
+_dispatch_queue_override_priority(dispatch_queue_t dq, pthread_priority_t *pp,
+ bool *was_overridden)
{
- uint32_t p = (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
uint32_t o = dq->dq_override;
- if (o < p) o = dispatch_atomic_or_orig2o(dq, dq_override, p, relaxed);
+ uint32_t p = (*pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK);
+ if (o < p) {
+ o = dispatch_atomic_or_orig2o(dq, dq_override, p, relaxed);
+ if (was_overridden) {
+ o = (uint32_t)_dispatch_priority_normalize(o);
+ }
+ *pp = _dispatch_priority_normalize(o | p);
+ } else {
+ o = (uint32_t)_dispatch_priority_normalize(o);
+ *pp = o;
+ }
+ if (was_overridden) {
+ *was_overridden =
+ (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK) < o;
+ }
return (o < p);
}
uint8_t *x = (uint8_t *)db;
// x points to base of struct Block_layout
x += sizeof(struct Block_layout);
- // x points to addresss of captured block
- x += sizeof(dispatch_block_t);
-#if USE_OBJC
- // x points to addresss of captured voucher
- x += sizeof(voucher_t);
-#endif
- // x points to base of captured dispatch_block_private_data_s structure
+ // x points to base of captured dispatch_block_private_data_s object
dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)x;
if (dbpd->dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) {
DISPATCH_CRASH("Corruption of dispatch block object");
#include "trace.h"
-DISPATCH_ALWAYS_INLINE_NDEBUG
+DISPATCH_ALWAYS_INLINE
static inline void
-_dispatch_continuation_pop(dispatch_object_t dou)
+_dispatch_continuation_invoke(dispatch_object_t dou, dispatch_queue_t dq)
{
dispatch_continuation_t dc = dou._dc, dc1;
dispatch_group_t dg;
- _dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dou);
+ _dispatch_trace_continuation_pop(dq, dou);
if (DISPATCH_OBJ_IS_VTABLE(dou._do)) {
- return dx_invoke(dou._do);
+ return dx_invoke(dou._do, NULL, DISPATCH_INVOKE_NONE);
}
// Add the item back to the cache before calling the function. This
}
}
+DISPATCH_ALWAYS_INLINE_NDEBUG
+static inline void
+_dispatch_continuation_pop(dispatch_object_t dou)
+{
+ dispatch_queue_t dq = _dispatch_queue_get_current();
+ dispatch_pthread_root_queue_observer_hooks_t observer_hooks =
+ _dispatch_get_pthread_root_queue_observer_hooks();
+ if (observer_hooks) observer_hooks->queue_will_execute(dq);
+ _dispatch_continuation_invoke(dou, dq);
+ if (observer_hooks) observer_hooks->queue_did_execute(dq);
+}
+
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_continuation_priority_set(dispatch_continuation_t dc,
#endif
}
-#endif // !(USE_OBJC && __OBJC2__)
+#endif // !(USE_OBJC && __OBJC2__) && !defined(__cplusplus)
#endif /* __DISPATCH_INLINE_INTERNAL__ */
#define slowpath(x) (x)
#endif // __GNUC__
+#if DISPATCH_DEBUG
+// sys/queue.h debugging
+#undef TRASHIT
+#define TRASHIT(x) do {(x) = (void *)-1;} while (0)
+#endif // DISPATCH_DEBUG
+#define _TAILQ_TRASH_ENTRY(elm, field) do { \
+ TRASHIT((elm)->field.tqe_next); \
+ TRASHIT((elm)->field.tqe_prev); \
+ } while (0)
+#define _TAILQ_TRASH_HEAD(head) do { \
+ TRASHIT((head)->tqh_first); \
+ TRASHIT((head)->tqh_last); \
+ } while (0)
+
DISPATCH_NOINLINE
void _dispatch_bug(size_t line, long val);
#define dsnprintf(...) \
({ int _r = snprintf(__VA_ARGS__); _r < 0 ? 0u : (size_t)_r; })
+#if __GNUC__
+#define dispatch_static_assert(e) ({ \
+ char __compile_time_assert__[(bool)(e) ? 1 : -1] DISPATCH_UNUSED; \
+ })
+#else
+#define dispatch_static_assert(e)
+#endif
+
/*
* For reporting bugs within libdispatch when using the "_debug" version of the
* library.
#if __GNUC__
#define dispatch_assert(e) do { \
if (__builtin_constant_p(e)) { \
- char __compile_time_assert__[(bool)(e) ? 1 : -1] DISPATCH_UNUSED; \
+ dispatch_static_assert(e); \
} else { \
typeof(e) _e = fastpath(e); /* always eval 'e' */ \
if (DISPATCH_DEBUG && !_e) { \
*/
#define dispatch_assert_zero(e) do { \
if (__builtin_constant_p(e)) { \
- char __compile_time_assert__[(bool)(e) ? -1 : 1] DISPATCH_UNUSED; \
+ dispatch_static_assert(e); \
} else { \
typeof(e) _e = slowpath(e); /* always eval 'e' */ \
if (DISPATCH_DEBUG && _e) { \
typeof(e) _e = fastpath(e); /* always eval 'e' */ \
if (!_e) { \
if (__builtin_constant_p(e)) { \
- char __compile_time_assert__[(bool)(e) ? 1 : -1]; \
- (void)__compile_time_assert__; \
+ dispatch_static_assert(e); \
} \
_dispatch_bug(__LINE__, (long)_e); \
} \
typeof(e) _e = slowpath(e); /* always eval 'e' */ \
if (_e) { \
if (__builtin_constant_p(e)) { \
- char __compile_time_assert__[(bool)(e) ? -1 : 1]; \
- (void)__compile_time_assert__; \
+ dispatch_static_assert(e); \
} \
_dispatch_bug(__LINE__, (long)_e); \
} \
#if __GNUC__
#define dispatch_debug_assert(e, msg, args...) do { \
if (__builtin_constant_p(e)) { \
- char __compile_time_assert__[(bool)(e) ? 1 : -1] DISPATCH_UNUSED; \
+ dispatch_static_assert(e); \
} else { \
typeof(e) _e = fastpath(e); /* always eval 'e' */ \
if (DISPATCH_DEBUG && !_e) { \
#endif
#endif // HAVE_DECL_NOTE_REAP
+#if !defined(EV_UDATA_SPECIFIC) || (TARGET_IPHONE_SIMULATOR && \
+ IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100) || \
+ (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 101100)
+#undef DISPATCH_USE_EV_UDATA_SPECIFIC
+#define DISPATCH_USE_EV_UDATA_SPECIFIC 0
+#elif !defined(DISPATCH_USE_EV_UDATA_SPECIFIC)
+#define DISPATCH_USE_EV_UDATA_SPECIFIC 1
+#endif // EV_UDATA_SPECIFIC
+
+#if !DISPATCH_USE_EV_UDATA_SPECIFIC
+#undef EV_UDATA_SPECIFIC
+#define EV_UDATA_SPECIFIC 0
+#undef DISPATCH_DYNAMIC_SELECT_FALLBACK
+#define DISPATCH_DYNAMIC_SELECT_FALLBACK 0
+#undef DISPATCH_USE_SELECT_FALLBACK
+#define DISPATCH_USE_SELECT_FALLBACK 1
+#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC
+
+#if !defined(EV_SET_QOS) || (TARGET_IPHONE_SIMULATOR && \
+ IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100) || \
+ (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 101100)
+#undef DISPATCH_USE_KEVENT_QOS
+#define DISPATCH_USE_KEVENT_QOS 0
+#elif !defined(DISPATCH_USE_KEVENT_QOS)
+#define DISPATCH_USE_KEVENT_QOS 1
+#endif // EV_SET_QOS
+
+#if DISPATCH_USE_KEVENT_QOS
+typedef struct kevent_qos_s _dispatch_kevent_qos_s;
+#else // DISPATCH_USE_KEVENT_QOS
+#ifndef KEVENT_FLAG_IMMEDIATE
+#define KEVENT_FLAG_NONE 0x00
+#define KEVENT_FLAG_IMMEDIATE 0x01
+#define KEVENT_FLAG_ERROR_EVENTS 0x02
+#endif // KEVENT_FLAG_IMMEDIATE
+typedef struct kevent64_s _dispatch_kevent_qos_s;
+#define kevent_qos(_kq, _changelist, _nchanges, _eventlist, _nevents, \
+ _data_out, _data_available, _flags) \
+ ({ unsigned int _f = (_flags); _dispatch_kevent_qos_s _kev_copy; \
+ const _dispatch_kevent_qos_s *_cl = (_changelist); \
+ int _n = (_nchanges); const struct timespec _timeout_immediately = {}; \
+ dispatch_static_assert(!(_data_out) && !(_data_available)); \
+ if (_f & KEVENT_FLAG_ERROR_EVENTS) { \
+ dispatch_static_assert(_n == 1); \
+ _kev_copy = *_cl; _kev_copy.flags |= EV_RECEIPT; } \
+ kevent64((_kq), _f & KEVENT_FLAG_ERROR_EVENTS ? &_kev_copy : _cl, _n, \
+ (_eventlist), (_nevents), 0, \
+ _f & KEVENT_FLAG_IMMEDIATE ? &_timeout_immediately : NULL); })
+#endif // DISPATCH_USE_KEVENT_QOS
+
#if defined(F_SETNOSIGPIPE) && defined(F_GETNOSIGPIPE)
#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070
#undef DISPATCH_USE_SETNOSIGPIPE
#ifndef DISPATCH_USE_CHECKIN_NOIMPORTANCE
#define DISPATCH_USE_CHECKIN_NOIMPORTANCE 1 // rdar://problem/16996737
#endif
+#ifndef DISPATCH_USE_NOIMPORTANCE_QOS
+#define DISPATCH_USE_NOIMPORTANCE_QOS 1 // rdar://problem/21414476
+#endif
#endif // MACH_SEND_NOIMPORTANCE
__asm__(""); __builtin_trap() // <rdar://problem/17464981>
#define _dispatch_set_crash_log_message(msg)
+#define _dispatch_set_crash_log_message_dynamic(msg)
#if HAVE_MACH
// MIG_REPLY_MISMATCH means either:
#define DISPATCH_NO_VOUCHER ((voucher_t)(void*)~0ul)
#define DISPATCH_NO_PRIORITY ((pthread_priority_t)~0ul)
#define DISPATCH_PRIORITY_ENFORCE 0x1
+#define DISPATCH_VOUCHER_IGNORE_QUEUE_OVERRIDE 0x2
static inline void _dispatch_adopt_priority_and_replace_voucher(
pthread_priority_t priority, voucher_t voucher, unsigned long flags);
#if HAVE_MACH
type != DISPATCH_QUEUE_SPECIFIC_TYPE) {
diqi.type = dispatch_introspection_queue_item_type_queue;
diqi.queue = dispatch_introspection_queue_get_info(dou._dq);
- } else if (metatype == _DISPATCH_SOURCE_TYPE) {
+ } else if (metatype == _DISPATCH_SOURCE_TYPE &&
+ type != DISPATCH_MACH_CHANNEL_TYPE) {
diqi.type = dispatch_introspection_queue_item_type_source;
diqi.source = _dispatch_introspection_source_get_info(dou._ds);
} else {
void _dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f);
void _dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f);
-#if !__OBJC2__
+#if !__OBJC2__ && !defined(__cplusplus)
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_introspection_queue_item_dequeue(dq, dou);
};
-#endif
+#endif // !__OBJC2__ && !defined(__cplusplus)
-#else
+#else // DISPATCH_INTROSPECTION
#define DISPATCH_INTROSPECTION_QUEUE_LIST
#define DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE 0
#pragma mark -
#pragma mark dispatch_io_hashtables
-#if TARGET_OS_EMBEDDED
-#define DIO_HASH_SIZE 64u // must be a power of two
-#else
-#define DIO_HASH_SIZE 256u // must be a power of two
-#endif
-#define DIO_HASH(x) ((uintptr_t)(x) & (DIO_HASH_SIZE - 1))
-
// Global hashtable of dev_t -> disk_s mappings
DISPATCH_CACHELINE_ALIGN
static TAILQ_HEAD(, dispatch_disk_s) _dispatch_io_devs[DIO_HASH_SIZE];
};
static struct dispatch_io_defaults_s {
- size_t chunk_pages, low_water_chunks, max_pending_io_reqs;
+ size_t chunk_size, low_water_chunks, max_pending_io_reqs;
bool initial_delivery;
} dispatch_io_defaults = {
- .chunk_pages = DIO_MAX_CHUNK_PAGES,
+ .chunk_size = DIO_MAX_CHUNK_SIZE,
.low_water_chunks = DIO_DEFAULT_LOW_WATER_CHUNKS,
.max_pending_io_reqs = DIO_MAX_PENDING_IO_REQS,
};
{
switch (param) {
case DISPATCH_IOCNTL_CHUNK_PAGES:
- _dispatch_iocntl_set_default(chunk_pages, value);
+ _dispatch_iocntl_set_default(chunk_size, value * PAGE_SIZE);
break;
case DISPATCH_IOCNTL_LOW_WATER_CHUNKS:
_dispatch_iocntl_set_default(low_water_chunks, value);
channel->params.type = type;
channel->params.high = SIZE_MAX;
channel->params.low = dispatch_io_defaults.low_water_chunks *
- dispatch_io_defaults.chunk_pages * PAGE_SIZE;
+ dispatch_io_defaults.chunk_size;
channel->queue = dispatch_queue_create("com.apple.libdispatch-io.channelq",
NULL);
return channel;
void (^cleanup_handler)(int error))
{
if ((type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) ||
- !(path && *path == '/')) {
+ !(*path == '/')) {
return NULL;
}
size_t pathlen = strlen(path);
_dispatch_disk_perform(void *ctxt)
{
dispatch_disk_t disk = ctxt;
- size_t chunk_size = dispatch_io_defaults.chunk_pages * PAGE_SIZE;
+ size_t chunk_size = dispatch_io_defaults.chunk_size;
_dispatch_fd_debug("disk perform", -1);
dispatch_operation_t op;
size_t i = disk->advise_idx, j = disk->free_idx;
_dispatch_object_debug(op, "%s", __func__);
if (!op->buf) {
size_t max_buf_siz = op->params.high;
- size_t chunk_siz = dispatch_io_defaults.chunk_pages * PAGE_SIZE;
+ size_t chunk_siz = dispatch_io_defaults.chunk_size;
if (op->direction == DOP_DIR_READ) {
// If necessary, create a buffer for the ongoing operation, large
- // enough to fit chunk_pages but at most high-water
+ // enough to fit chunk_size but at most high-water
size_t data_siz = dispatch_data_get_size(op->data);
if (data_siz) {
dispatch_assert(data_siz < max_buf_siz);
#define _DISPATCH_IO_LABEL_SIZE 16
#if TARGET_OS_EMBEDDED // rdar://problem/9032036
-#define DIO_MAX_CHUNK_PAGES 128u // 512kB chunk size
+#define DIO_MAX_CHUNK_SIZE (512u * 1024)
+#define DIO_HASH_SIZE 64u // must be a power of two
#else
-#define DIO_MAX_CHUNK_PAGES 256u // 1024kB chunk size
+#define DIO_MAX_CHUNK_SIZE (1024u * 1024)
+#define DIO_HASH_SIZE 256u // must be a power of two
#endif
+#define DIO_HASH(x) ((uintptr_t)(x) & (DIO_HASH_SIZE - 1))
+
#define DIO_DEFAULT_LOW_WATER_CHUNKS 1u // default low-water mark
#define DIO_MAX_PENDING_IO_REQS 6u // Pending I/O read advises
DISPATCH_CLASS_DECL(disk);
struct dispatch_disk_s {
DISPATCH_STRUCT_HEADER(disk);
- dev_t dev;
TAILQ_HEAD(dispatch_disk_operations_s, dispatch_operation_s) operations;
dispatch_operation_t cur_rq;
dispatch_queue_t pick_queue;
size_t free_idx;
size_t req_idx;
size_t advise_idx;
+ dev_t dev;
bool io_active;
- int err;
TAILQ_ENTRY(dispatch_disk_s) disk_list;
size_t advise_list_depth;
dispatch_operation_t advise_list[];
_os_object_t
_os_object_retain(_os_object_t obj)
{
- int xref_cnt = obj->os_obj_xref_cnt;
- if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
- return obj; // global object
- }
- xref_cnt = dispatch_atomic_inc2o(obj, os_obj_xref_cnt, relaxed);
+ int xref_cnt = _os_object_xrefcnt_inc(obj);
if (slowpath(xref_cnt <= 0)) {
_OS_OBJECT_CLIENT_CRASH("Resurrection of an object");
}
return obj;
}
+DISPATCH_NOINLINE
+_os_object_t
+_os_object_retain_with_resurrect(_os_object_t obj)
+{
+ int xref_cnt = _os_object_xrefcnt_inc(obj);
+ if (slowpath(xref_cnt < 0)) {
+ _OS_OBJECT_CLIENT_CRASH("Resurrection of an overreleased object");
+ }
+ if (slowpath(xref_cnt == 0)) {
+ _os_object_retain_internal(obj);
+ }
+ return obj;
+}
+
DISPATCH_NOINLINE
void
_os_object_release(_os_object_t obj)
{
- int xref_cnt = obj->os_obj_xref_cnt;
- if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) {
- return; // global object
- }
- xref_cnt = dispatch_atomic_dec2o(obj, os_obj_xref_cnt, relaxed);
+ int xref_cnt = _os_object_xrefcnt_dec(obj);
if (fastpath(xref_cnt >= 0)) {
return;
}
return obj;
}
+#if DISPATCH_COCOA_COMPAT
+static bool _os_object_debug_missing_pools;
+#endif
+
void
_os_object_init(void)
{
(void (*)(const void *))&_os_objc_destructInstance
};
_Block_use_RR2(&callbacks);
+#if DISPATCH_COCOA_COMPAT
+ const char *v = getenv("OBJC_DEBUG_MISSING_POOLS");
+ _os_object_debug_missing_pools = v && !strcmp(v, "YES");
+#endif
}
_os_object_t
void
_os_object_xref_dispose(_os_object_t obj)
{
+ struct _os_object_s *o = (struct _os_object_s *)obj;
+ _os_object_xrefcnt_dispose_barrier(o);
[obj _xref_dispose];
}
void
_os_object_dispose(_os_object_t obj)
{
+ struct _os_object_s *o = (struct _os_object_s *)obj;
+ _os_object_refcnt_dispose_barrier(o);
[obj _dispose];
}
void *
_dispatch_autorelease_pool_push(void) {
- return objc_autoreleasePoolPush();
+ if (!slowpath(_os_object_debug_missing_pools)) {
+ return objc_autoreleasePoolPush();
+ }
+ return NULL;
}
void
_dispatch_autorelease_pool_pop(void *context) {
- return objc_autoreleasePoolPop(context);
+ if (!slowpath(_os_object_debug_missing_pools)) {
+ return objc_autoreleasePoolPop(context);
+ }
}
#endif // DISPATCH_COCOA_COMPAT
}
}
-#undef _dispatch_client_callout3
-bool
-_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset,
- const void *buffer, size_t size, dispatch_data_applier_function_t f)
-{
- @try {
- return f(ctxt, region, offset, buffer, size);
- }
- @catch (...) {
- objc_terminate();
- }
-}
-
#undef _dispatch_client_callout4
void
_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason,
#endif // DISPATCH_USE_CLIENT_CALLOUT
-#pragma mark -
-#pragma mark _dispatch_block_create
-
-// The compiler hides the name of the function it generates, and changes it if
-// we try to reference it directly, but the linker still sees it.
-extern void DISPATCH_BLOCK_SPECIAL_INVOKE(void *)
- asm("____dispatch_block_create_block_invoke");
-void (*_dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE;
-
-dispatch_block_t
-_dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher,
- pthread_priority_t pri, dispatch_block_t block)
-{
- dispatch_block_t copy_block = _dispatch_Block_copy(block); // 17094902
- struct dispatch_block_private_data_s dbpds =
- DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, voucher, pri, copy_block);
- dispatch_block_t new_block = _dispatch_Block_copy(^{
- // Capture object references, which retains copy_block and voucher.
- // All retained objects must be captured by the *block*. We
- // cannot borrow any references, because the block might be
- // called zero or several times, so Block_release() is the
- // only place that can release retained objects.
- (void)copy_block;
- (void)voucher;
- _dispatch_block_invoke(&dbpds);
- });
- Block_release(copy_block);
- return new_block;
-}
-
#endif // USE_OBJC
#define DISPATCH_DECL_SUBCLASS_INTERFACE(name, super)
#endif // OS_OBJECT_USE_OBJC
+DISPATCH_ENUM(dispatch_invoke_flags, unsigned long,
+ DISPATCH_INVOKE_NONE = 0x00,
+ /* This invoke is a stealer, meaning that it doesn't own the
+ * enqueue lock, and is not allowed to requeue elsewhere
+ */
+ DISPATCH_INVOKE_STEALING = 0x01,
+ /* The `dc` argument is a dispatch continuation wrapper
+ * created by _dispatch_queue_push_override
+ */
+ DISPATCH_INVOKE_OVERRIDING = 0x02,
+);
+
#if USE_OBJC
#define DISPATCH_CLASS(name) OS_OBJECT_CLASS(dispatch_##name)
// ObjC classes and dispatch vtables are co-located via linker order and alias
unsigned long const do_type; \
const char *const do_kind; \
size_t (*const do_debug)(struct dispatch_##x##_s *, char *, size_t); \
- void (*const do_invoke)(struct dispatch_##x##_s *); \
+ void (*const do_invoke)(struct dispatch_##x##_s *, dispatch_object_t dc, \
+ dispatch_invoke_flags_t); \
unsigned long (*const do_probe)(struct dispatch_##x##_s *); \
void (*const do_dispose)(struct dispatch_##x##_s *);
#else
unsigned long do_type; \
const char *do_kind; \
size_t (*do_debug)(struct dispatch_##x##_s *, char *, size_t); \
- void (*do_invoke)(struct dispatch_##x##_s *); \
+ void (*do_invoke)(struct dispatch_##x##_s *, dispatch_object_t dc, \
+ dispatch_invoke_flags_t); \
unsigned long (*do_probe)(struct dispatch_##x##_s *); \
void (*do_dispose)(struct dispatch_##x##_s *);
#endif
#define dx_kind(x) (x)->do_vtable->do_kind
#define dx_debug(x, y, z) (x)->do_vtable->do_debug((x), (y), (z))
#define dx_dispose(x) (x)->do_vtable->do_dispose(x)
-#define dx_invoke(x) (x)->do_vtable->do_invoke(x)
+#define dx_invoke(x, y, z) (x)->do_vtable->do_invoke(x, y, z)
#define dx_probe(x) (x)->do_vtable->do_probe(x)
#define DISPATCH_STRUCT_HEADER(x) \
#pragma mark -
#pragma mark _os_object_s
+/*
+ * Low level _os_atomic_refcnt_* actions
+ *
+ * _os_atomic_refcnt_inc2o(o, f):
+ * performs a refcount increment and returns the new refcount value
+ *
+ * _os_atomic_refcnt_dec2o(o, f):
+ * performs a refcount decrement and returns the new refcount value
+ *
+ * _os_atomic_refcnt_dispose_barrier2o(o, f):
+ * a barrier to perform prior to tearing down an object when the refcount
+ * reached -1.
+ */
+#define _os_atomic_refcnt_perform2o(o, f, op, m) ({ \
+ typeof(o) _o = (o); \
+ int _ref_cnt = _o->f; \
+ if (fastpath(_ref_cnt != _OS_OBJECT_GLOBAL_REFCNT)) { \
+ _ref_cnt = dispatch_atomic_##op##2o(_o, f, m); \
+ } \
+ _ref_cnt; \
+ })
+
+#define _os_atomic_refcnt_inc2o(o, m) \
+ _os_atomic_refcnt_perform2o(o, m, inc, relaxed)
+
+#define _os_atomic_refcnt_dec2o(o, m) \
+ _os_atomic_refcnt_perform2o(o, m, dec, release)
+
+#define _os_atomic_refcnt_dispose_barrier2o(o, m) \
+ (void)dispatch_atomic_load2o(o, m, acquire)
+
+
+/*
+ * Higher level _os_object_{x,}refcnt_* actions
+ *
+ * _os_atomic_{x,}refcnt_inc(o):
+ * increment the external (resp. internal) refcount and
+ * returns the new refcount value
+ *
+ * _os_atomic_{x,}refcnt_dec(o):
+ * decrement the external (resp. internal) refcount and
+ * returns the new refcount value
+ *
+ * _os_atomic_{x,}refcnt_dispose_barrier(o):
+ * performs the pre-teardown barrier for the external
+ * (resp. internal) refcount
+ *
+ */
+#define _os_object_xrefcnt_inc(o) \
+ _os_atomic_refcnt_inc2o(o, os_obj_xref_cnt)
+
+#define _os_object_xrefcnt_dec(o) \
+ _os_atomic_refcnt_dec2o(o, os_obj_xref_cnt)
+
+#define _os_object_xrefcnt_dispose_barrier(o) \
+ _os_atomic_refcnt_dispose_barrier2o(o, os_obj_xref_cnt)
+
+#define _os_object_refcnt_inc(o) \
+ _os_atomic_refcnt_inc2o(o, os_obj_ref_cnt)
+
+#define _os_object_refcnt_dec(o) \
+ _os_atomic_refcnt_dec2o(o, os_obj_ref_cnt)
+
+#define _os_object_refcnt_dispose_barrier(o) \
+ _os_atomic_refcnt_dispose_barrier2o(o, os_obj_ref_cnt)
+
typedef struct _os_object_class_s {
_OS_OBJECT_CLASS_HEADER();
} _os_object_class_s;
static inline bool _dispatch_queue_prepare_override(dispatch_queue_t dq,
dispatch_queue_t tq, pthread_priority_t p);
static inline void _dispatch_queue_push_override(dispatch_queue_t dq,
- dispatch_queue_t tq, pthread_priority_t p);
+ dispatch_queue_t tq, pthread_priority_t p, bool owning);
#if HAVE_PTHREAD_WORKQUEUES
static void _dispatch_worker_thread4(void *context);
#if HAVE_PTHREAD_WORKQUEUE_QOS
pthread_attr_t dpq_thread_attr;
dispatch_block_t dpq_thread_configure;
struct dispatch_semaphore_s dpq_thread_mediator;
+ dispatch_pthread_root_queue_observer_hooks_s dpq_observer_hooks;
};
typedef struct dispatch_pthread_root_queue_context_s *
dispatch_pthread_root_queue_context_t;
.dq_label = "com.apple.root.maintenance-qos",
.dq_running = 2,
.dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_override_voucher = DISPATCH_NO_VOUCHER,
.dq_serialnum = 4,
},
[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = {
.dq_label = "com.apple.root.maintenance-qos.overcommit",
.dq_running = 2,
.dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_override_voucher = DISPATCH_NO_VOUCHER,
.dq_serialnum = 5,
},
[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = {
.dq_label = "com.apple.root.background-qos",
.dq_running = 2,
.dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_override_voucher = DISPATCH_NO_VOUCHER,
.dq_serialnum = 6,
},
[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = {
.dq_label = "com.apple.root.background-qos.overcommit",
.dq_running = 2,
.dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_override_voucher = DISPATCH_NO_VOUCHER,
.dq_serialnum = 7,
},
[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = {
.dq_label = "com.apple.root.utility-qos",
.dq_running = 2,
.dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_override_voucher = DISPATCH_NO_VOUCHER,
.dq_serialnum = 8,
},
[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = {
.dq_label = "com.apple.root.utility-qos.overcommit",
.dq_running = 2,
.dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_override_voucher = DISPATCH_NO_VOUCHER,
.dq_serialnum = 9,
},
[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = {
.dq_label = "com.apple.root.default-qos",
.dq_running = 2,
.dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_override_voucher = DISPATCH_NO_VOUCHER,
.dq_serialnum = 10,
},
[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = {
.dq_label = "com.apple.root.default-qos.overcommit",
.dq_running = 2,
.dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_override_voucher = DISPATCH_NO_VOUCHER,
.dq_serialnum = 11,
},
[DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = {
.dq_label = "com.apple.root.user-initiated-qos",
.dq_running = 2,
.dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_override_voucher = DISPATCH_NO_VOUCHER,
.dq_serialnum = 12,
},
[DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = {
.dq_label = "com.apple.root.user-initiated-qos.overcommit",
.dq_running = 2,
.dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_override_voucher = DISPATCH_NO_VOUCHER,
.dq_serialnum = 13,
},
[DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = {
.dq_label = "com.apple.root.user-interactive-qos",
.dq_running = 2,
.dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_override_voucher = DISPATCH_NO_VOUCHER,
.dq_serialnum = 14,
},
[DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = {
.dq_label = "com.apple.root.user-interactive-qos.overcommit",
.dq_running = 2,
.dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_override_voucher = DISPATCH_NO_VOUCHER,
.dq_serialnum = 15,
},
};
.dq_label = "com.apple.libdispatch-manager",
.dq_width = 1,
.dq_is_thread_bound = 1,
+ .dq_override_voucher = DISPATCH_NO_VOUCHER,
.dq_serialnum = 2,
};
_dispatch_root_queues_init);
qos_class_t qos;
switch (priority) {
-#if !RDAR_17878963 || DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
+#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
case _DISPATCH_QOS_CLASS_MAINTENANCE:
if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS]
.dq_priority) {
qos = (qos_class_t)priority;
}
break;
-#endif // RDAR_17878963 || DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
+#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK
case DISPATCH_QUEUE_PRIORITY_BACKGROUND:
qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_BACKGROUND];
break;
expected ? "Expected" : "Unexpected", dq, dq->dq_label ?
dq->dq_label : "");
_dispatch_log("%s", msg);
- _dispatch_set_crash_log_message(msg);
+ _dispatch_set_crash_log_message_dynamic(msg);
_dispatch_hardware_crash();
free(msg);
}
void
dispatch_assert_queue(dispatch_queue_t dq)
{
- if (slowpath(!dq) || slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) {
+ if (slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) {
DISPATCH_CLIENT_CRASH("invalid queue passed to "
"dispatch_assert_queue()");
}
void
dispatch_assert_queue_not(dispatch_queue_t dq)
{
- if (slowpath(!dq) || slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) {
+ if (slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) {
DISPATCH_CLIENT_CRASH("invalid queue passed to "
"dispatch_assert_queue_not()");
}
_dispatch_thread_key_create(&dispatch_io_key, NULL);
_dispatch_thread_key_create(&dispatch_apply_key, NULL);
_dispatch_thread_key_create(&dispatch_defaultpriority_key, NULL);
+ _dispatch_thread_key_create(&dispatch_pthread_root_queue_observer_hooks_key,
+ NULL);
#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION
_dispatch_thread_key_create(&dispatch_bcounter_key, NULL);
#endif
};
#define DISPATCH_QUEUE_ATTR_OVERCOMMIT2IDX(overcommit) \
- (overcommit ? DQA_INDEX_OVERCOMMIT : DQA_INDEX_NON_OVERCOMMIT)
+ ((overcommit) == _dispatch_queue_attr_overcommit_disabled ? \
+ DQA_INDEX_NON_OVERCOMMIT : \
+ ((overcommit) == _dispatch_queue_attr_overcommit_enabled ? \
+ DQA_INDEX_OVERCOMMIT : DQA_INDEX_UNSPECIFIED_OVERCOMMIT))
#define DISPATCH_QUEUE_ATTR_CONCURRENT2IDX(concurrent) \
- (concurrent ? DQA_INDEX_CONCURRENT : DQA_INDEX_SERIAL)
+ ((concurrent) ? DQA_INDEX_CONCURRENT : DQA_INDEX_SERIAL)
#define DISPATCH_QUEUE_ATTR_PRIO2IDX(prio) (-(prio))
#define DISPATCH_QUEUE_ATTR_QOS2IDX(qos) (_dispatch_queue_attr_qos2idx[(qos)])
static inline dispatch_queue_attr_t
-_dispatch_get_queue_attr(qos_class_t qos, int prio, bool overcommit,
- bool concurrent)
+_dispatch_get_queue_attr(qos_class_t qos, int prio,
+ _dispatch_queue_attr_overcommit_t overcommit, bool concurrent)
{
return (dispatch_queue_attr_t)&_dispatch_queue_attrs
[DISPATCH_QUEUE_ATTR_QOS2IDX(qos)]
{
if (!_dispatch_qos_class_valid(qos_class, relative_priority)) return NULL;
if (!slowpath(dqa)) {
- dqa = _dispatch_get_queue_attr(0, 0, false, false);
+ dqa = _dispatch_get_queue_attr(0, 0,
+ _dispatch_queue_attr_overcommit_unspecified, false);
} else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
DISPATCH_CLIENT_CRASH("Invalid queue attribute");
}
bool overcommit)
{
if (!slowpath(dqa)) {
- dqa = _dispatch_get_queue_attr(0, 0, false, false);
+ dqa = _dispatch_get_queue_attr(0, 0,
+ _dispatch_queue_attr_overcommit_unspecified, false);
} else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
DISPATCH_CLIENT_CRASH("Invalid queue attribute");
}
return _dispatch_get_queue_attr(dqa->dqa_qos_class,
- dqa->dqa_relative_priority, overcommit, dqa->dqa_concurrent);
+ dqa->dqa_relative_priority, overcommit ?
+ _dispatch_queue_attr_overcommit_enabled :
+ _dispatch_queue_attr_overcommit_disabled, dqa->dqa_concurrent);
}
#pragma mark -
#endif
bool disallow_tq = (slowpath(dqa) && dqa != DISPATCH_QUEUE_CONCURRENT);
if (!slowpath(dqa)) {
- dqa = _dispatch_get_queue_attr(0, 0, false, false);
+ dqa = _dispatch_get_queue_attr(0, 0,
+ _dispatch_queue_attr_overcommit_unspecified, false);
} else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) {
DISPATCH_CLIENT_CRASH("Invalid queue attribute");
}
dq->dq_label = strdup(label);
}
qos_class_t qos = dqa->dqa_qos_class;
- bool overcommit = dqa->dqa_overcommit;
+ _dispatch_queue_attr_overcommit_t overcommit = dqa->dqa_overcommit;
+ if (overcommit == _dispatch_queue_attr_overcommit_unspecified) {
+ // Serial queues default to overcommit!
+ overcommit = dqa->dqa_concurrent ?
+ _dispatch_queue_attr_overcommit_disabled :
+ _dispatch_queue_attr_overcommit_enabled;
+ }
#if HAVE_PTHREAD_WORKQUEUE_QOS
dq->dq_priority = _pthread_qos_class_encode(qos, dqa->dqa_relative_priority,
- overcommit);
+ overcommit == _dispatch_queue_attr_overcommit_enabled ?
+ _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0);
#endif
if (dqa->dqa_concurrent) {
dq->dq_width = DISPATCH_QUEUE_WIDTH_MAX;
- } else {
- // Default serial queue target queue is overcommit!
- overcommit = true;
}
if (!tq) {
if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) {
}
}
- tq = _dispatch_get_root_queue(qos, overcommit);
+ tq = _dispatch_get_root_queue(qos, overcommit ==
+ _dispatch_queue_attr_overcommit_enabled);
if (slowpath(!tq)) {
DISPATCH_CLIENT_CRASH("Invalid queue attribute");
}
DISPATCH_TARGET_QUEUE_DEFAULT);
}
+dispatch_queue_t
+dispatch_queue_create_with_accounting_override_voucher(const char *label,
+ dispatch_queue_attr_t attr, voucher_t voucher)
+{
+ dispatch_queue_t dq = dispatch_queue_create_with_target(label, attr,
+ DISPATCH_TARGET_QUEUE_DEFAULT);
+ dq->dq_override_voucher = _voucher_create_accounting_voucher(voucher);
+ return dq;
+}
+
void
_dispatch_queue_destroy(dispatch_object_t dou)
{
if (dqsq) {
_dispatch_release(dqsq);
}
+ if (dq->dq_override_voucher != DISPATCH_NO_VOUCHER) {
+ if (dq->dq_override_voucher) _voucher_release(dq->dq_override_voucher);
+ dq->dq_override_voucher = DISPATCH_NO_VOUCHER;
+ }
}
// 6618342 Contact the team that owns the Instrument DTrace probe before
_dispatch_set_target_queue2(void *ctxt)
{
dispatch_queue_t prev_dq, dq = _dispatch_queue_get_current(), tq = ctxt;
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ // see _dispatch_queue_wakeup_with_qos_slow
+ mach_msg_timeout_t timeout = 1;
mach_port_t th;
while (!dispatch_atomic_cmpxchgv2o(dq, dq_tqthread, MACH_PORT_NULL,
_dispatch_thread_port(), &th, acquire)) {
_dispatch_thread_switch(th, DISPATCH_YIELD_THREAD_SWITCH_OPTION,
- DISPATCH_CONTENTION_USLEEP_START);
+ timeout++);
}
+#endif
_dispatch_queue_priority_inherit_from_target(dq, tq);
prev_dq = dq->do_targetq;
dq->do_targetq = tq;
.dgq_ctxt = &_dispatch_mgr_root_queue_pthread_context,
.dgq_thread_pool_size = 1,
}}};
+
static struct dispatch_queue_s _dispatch_mgr_root_queue = {
.do_vtable = DISPATCH_VTABLE(queue_root),
.do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
.dq_label = "com.apple.root.libdispatch-manager",
.dq_running = 2,
.dq_width = DISPATCH_QUEUE_WIDTH_MAX,
+ .dq_override_voucher = DISPATCH_NO_VOUCHER,
.dq_serialnum = 3,
};
+
static struct {
volatile int prio;
+ volatile qos_class_t qos;
int default_prio;
int policy;
pthread_t tid;
} _dispatch_mgr_sched;
+
static dispatch_once_t _dispatch_mgr_sched_pred;
+// TODO: switch to "event-reflector thread" property <rdar://problem/18126138>
+
+// Must be kept in sync with list of qos classes in sys/qos.h
+static const int _dispatch_mgr_sched_qos2prio[] = {
+ [_DISPATCH_QOS_CLASS_MAINTENANCE] = 4,
+ [_DISPATCH_QOS_CLASS_BACKGROUND] = 4,
+ [_DISPATCH_QOS_CLASS_UTILITY] = 20,
+ [_DISPATCH_QOS_CLASS_DEFAULT] = 31,
+ [_DISPATCH_QOS_CLASS_USER_INITIATED] = 37,
+ [_DISPATCH_QOS_CLASS_USER_INTERACTIVE] = 47,
+};
+
static void
_dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED)
{
(void)dispatch_assume_zero(pthread_attr_getschedpolicy(attr,
&_dispatch_mgr_sched.policy));
(void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m));
- // legacy priority calls allowed when requesting above default priority
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ qos_class_t qos = qos_class_main();
+ if (qos == _DISPATCH_QOS_CLASS_DEFAULT) {
+ qos = _DISPATCH_QOS_CLASS_USER_INITIATED; // rdar://problem/17279292
+ }
+ if (qos) {
+ _dispatch_mgr_sched.qos = qos;
+ param.sched_priority = _dispatch_mgr_sched_qos2prio[qos];
+ }
+#endif
_dispatch_mgr_sched.default_prio = param.sched_priority;
_dispatch_mgr_sched.prio = _dispatch_mgr_sched.default_prio;
}
(void)dispatch_assume_zero(pthread_attr_setstacksize(attr, 64 * 1024));
#endif
#if HAVE_PTHREAD_WORKQUEUE_QOS
- if (_dispatch_set_qos_class_enabled) {
- qos_class_t qos = qos_class_main();
- (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr, qos, 0));
+ qos_class_t qos = _dispatch_mgr_sched.qos;
+ if (qos) {
+ if (_dispatch_set_qos_class_enabled) {
+ (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr,
+ qos, 0));
+ }
_dispatch_mgr_q.dq_priority = _pthread_qos_class_encode(qos, 0, 0);
_dispatch_queue_set_override_priority(&_dispatch_mgr_q);
}
pthread_attr_t *attr;
attr = &_dispatch_mgr_root_queue_pthread_context.dpq_thread_attr;
(void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m));
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ qos_class_t qos = 0;
+ (void)pthread_attr_get_qos_class_np(attr, &qos, NULL);
+ if (_dispatch_mgr_sched.qos > qos && _dispatch_set_qos_class_enabled) {
+ (void)pthread_set_qos_class_self_np(_dispatch_mgr_sched.qos, 0);
+ int p = _dispatch_mgr_sched_qos2prio[_dispatch_mgr_sched.qos];
+ if (p > param.sched_priority) {
+ param.sched_priority = p;
+ }
+ }
+#endif
if (slowpath(_dispatch_mgr_sched.prio > param.sched_priority)) {
return _dispatch_mgr_priority_apply();
}
dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init);
struct sched_param param;
(void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m));
+#if HAVE_PTHREAD_WORKQUEUE_QOS
+ qos_class_t qos = 0;
+ (void)pthread_attr_get_qos_class_np((pthread_attr_t *)attr, &qos, NULL);
+ if (qos) {
+ param.sched_priority = _dispatch_mgr_sched_qos2prio[qos];
+ qos_class_t q = _dispatch_mgr_sched.qos;
+ do if (q >= qos) {
+ break;
+ } while (slowpath(!dispatch_atomic_cmpxchgvw2o(&_dispatch_mgr_sched,
+ qos, q, qos, &q, relaxed)));
+ }
+#endif
int p = _dispatch_mgr_sched.prio;
do if (p >= param.sched_priority) {
return;
}
}
-dispatch_queue_t
-dispatch_pthread_root_queue_create(const char *label, unsigned long flags,
- const pthread_attr_t *attr, dispatch_block_t configure)
+static dispatch_queue_t
+_dispatch_pthread_root_queue_create(const char *label, unsigned long flags,
+ const pthread_attr_t *attr, dispatch_block_t configure,
+ dispatch_pthread_root_queue_observer_hooks_t observer_hooks)
{
dispatch_queue_t dq;
dispatch_root_queue_context_t qc;
if (attr) {
memcpy(&pqc->dpq_thread_attr, attr, sizeof(pthread_attr_t));
-#if HAVE_PTHREAD_WORKQUEUE_QOS
- qos_class_t qos = 0;
- if (!pthread_attr_get_qos_class_np(&pqc->dpq_thread_attr, &qos, NULL)
- && qos > _DISPATCH_QOS_CLASS_DEFAULT) {
- DISPATCH_CLIENT_CRASH("pthread root queues do not support "
- "explicit QoS attributes");
- }
-#endif
_dispatch_mgr_priority_raise(&pqc->dpq_thread_attr);
} else {
(void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr));
if (configure) {
pqc->dpq_thread_configure = _dispatch_Block_copy(configure);
}
+ if (observer_hooks) {
+ pqc->dpq_observer_hooks = *observer_hooks;
+ }
_dispatch_object_debug(dq, "%s", __func__);
return _dispatch_introspection_queue_create(dq);
}
-#endif
+
+dispatch_queue_t
+dispatch_pthread_root_queue_create(const char *label, unsigned long flags,
+ const pthread_attr_t *attr, dispatch_block_t configure)
+{
+ return _dispatch_pthread_root_queue_create(label, flags, attr, configure,
+ NULL);
+}
+
+
+#endif // DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
void
_dispatch_pthread_root_queue_dispose(dispatch_queue_t dq)
return ctxt;
}
+
#pragma mark -
#pragma mark dispatch_queue_debug
{
size_t offset = 0;
dispatch_queue_t target = dq->do_targetq;
- offset += dsnprintf(buf, bufsiz, "target = %s[%p], width = 0x%x, "
- "running = 0x%x, barrier = %d ", target && target->dq_label ?
- target->dq_label : "", target, dq->dq_width / 2,
- dq->dq_running / 2, dq->dq_running & 1);
+ offset += dsnprintf(&buf[offset], bufsiz - offset, "target = %s[%p], "
+ "width = 0x%x, running = 0x%x, barrier = %d ",
+ target && target->dq_label ? target->dq_label : "", target,
+ dq->dq_width / 2, dq->dq_running / 2, dq->dq_running & 1);
if (dq->dq_is_thread_bound) {
- offset += dsnprintf(buf, bufsiz, ", thread = 0x%x ",
+ offset += dsnprintf(&buf[offset], bufsiz - offset, ", thread = 0x%x ",
_dispatch_queue_get_bound_thread(dq));
}
return offset;
if (!DISPATCH_OBJ_IS_VTABLE(dc) &&
(long)dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT) {
_dispatch_trace_continuation_pop(dq, dou);
+ _dispatch_wqthread_override_start((mach_port_t)dc->dc_data,
+ _dispatch_queue_get_override_priority(dq));
_dispatch_thread_semaphore_signal(
(_dispatch_thread_semaphore_t)dc->dc_other);
_dispatch_introspection_queue_item_complete(dou);
}
flags = _dispatch_block_normalize_flags(flags);
struct dispatch_block_private_data_s dbpds =
- DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, NULL, 0, block);
- dbpds.dbpd_atomic_flags |= DBF_PERFORM; // no group_leave at end of invoke
+ DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block);
return _dispatch_block_invoke(&dbpds);
}
-#define _dbpd_group(dbpd) ((dispatch_group_t)&(dbpd)->dbpd_group)
+#define _dbpd_group(dbpd) ((dbpd)->dbpd_group)
void
_dispatch_block_invoke(const struct dispatch_block_private_data_s *dbcpd)
if (atomic_flags & DBF_CANCELED) goto out;
pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY;
- unsigned long override = 0;
+ unsigned long override = DISPATCH_VOUCHER_IGNORE_QUEUE_OVERRIDE;
if (flags & DISPATCH_BLOCK_HAS_PRIORITY) {
op = _dispatch_get_priority();
p = dbpd->dbpd_priority;
- override = (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) ||
+ override |= (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) ||
!(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS) ?
DISPATCH_PRIORITY_ENFORCE : 0;
}
}
ov = _dispatch_adopt_priority_and_voucher(p, v, override);
dbpd->dbpd_thread = _dispatch_thread_port();
- dbpd->dbpd_block();
- _dispatch_set_priority_and_replace_voucher(op, ov);
+ _dispatch_client_callout(dbpd->dbpd_block,
+ _dispatch_Block_invoke(dbpd->dbpd_block));
+ _dispatch_reset_priority_and_voucher(op, ov);
out:
if ((atomic_flags & DBF_PERFORM) == 0) {
if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) {
}
ov = _dispatch_adopt_priority_and_voucher(p, v, override);
dbpd->dbpd_block();
- _dispatch_set_priority_and_replace_voucher(op, ov);
+ _dispatch_reset_priority_and_voucher(op, ov);
out:
if ((atomic_flags & DBF_PERFORM) == 0) {
if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) {
return _dispatch_async_f(dq, ctxt, func, 0, 0);
}
+DISPATCH_NOINLINE
+void
+dispatch_async_enforce_qos_class_f(dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func)
+{
+ return _dispatch_async_f(dq, ctxt, func, 0,
+ DISPATCH_BLOCK_ENFORCE_QOS_CLASS);
+}
+
#ifdef __BLOCKS__
void
dispatch_async(dispatch_queue_t dq, void (^work)(void))
static void _dispatch_sync_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func, pthread_priority_t pp);
+DISPATCH_NOINLINE
+static void
+_dispatch_function_invoke_slow(dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func)
+{
+ dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
+ _dispatch_thread_setspecific(dispatch_queue_key, dq);
+ voucher_t ov = _dispatch_adopt_queue_override_voucher(dq);
+ _dispatch_client_callout(ctxt, func);
+ _dispatch_perfmon_workitem_inc();
+ _dispatch_reset_voucher(ov);
+ _dispatch_thread_setspecific(dispatch_queue_key, old_dq);
+}
+
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_function_invoke(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func)
{
+ if (slowpath(dq->dq_override_voucher != DISPATCH_NO_VOUCHER)) {
+ return _dispatch_function_invoke_slow(dq, ctxt, func);
+ }
dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
_dispatch_thread_setspecific(dispatch_queue_key, dq);
_dispatch_client_callout(ctxt, func);
_dispatch_thread_semaphore_wait(sema); // acquire
_dispatch_put_thread_semaphore(sema);
+ pthread_priority_t p = _dispatch_queue_get_override_priority(dq);
+ if (p > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
+ // Ensure that the root queue sees that this thread was overridden.
+ _dispatch_set_defaultpriority_override();
+ }
+
#if DISPATCH_COCOA_COMPAT
// Queue bound to a non-dispatch thread
if (dc.dc_func == NULL) {
_dispatch_thread_semaphore_wait(sema);
_dispatch_put_thread_semaphore(sema);
+ pthread_priority_t p = _dispatch_queue_get_override_priority(dq);
+ if (p > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
+ // Ensure that the root queue sees that this thread was overridden.
+ _dispatch_set_defaultpriority_override();
+ }
+
if (slowpath(dq->do_targetq->do_targetq)) {
_dispatch_function_recurse(dq, ctxt, func, pp);
} else {
// renaming this symbol
DISPATCH_NOINLINE
void
-_dispatch_queue_invoke(dispatch_queue_t dq)
+_dispatch_queue_invoke(dispatch_queue_t dq, dispatch_object_t dou,
+ dispatch_invoke_flags_t flags)
{
- _dispatch_queue_class_invoke(dq, dispatch_queue_invoke2);
+ _dispatch_queue_class_invoke(dq, dou._dc, flags, dispatch_queue_invoke2);
}
#pragma mark -
out:
if (next_dc) {
_dispatch_main_queue_wakeup();
+ } else {
+ pthread_priority_t p = _dispatch_queue_reset_override_priority(dq);
+
+ if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
+ _dispatch_thread_override_end(dq->dq_thread);
+ }
}
_dispatch_voucher_debug("main queue restore", voucher);
- _dispatch_set_priority_and_replace_voucher(old_pri, voucher);
- _dispatch_queue_reset_override_priority(dq);
+ _dispatch_reset_priority_and_voucher(old_pri, voucher);
_dispatch_reset_defaultpriority(old_dp);
_dispatch_thread_setspecific(dispatch_queue_key, old_dq);
_dispatch_perfmon_end();
_dispatch_perfmon_workitem_inc();
_dispatch_voucher_debug("runloop queue restore", voucher);
- _dispatch_set_priority_and_replace_voucher(old_pri, voucher);
+ _dispatch_reset_priority_and_voucher(old_pri, voucher);
_dispatch_reset_defaultpriority(old_dp);
_dispatch_thread_setspecific(dispatch_queue_key, old_dq);
_dispatch_perfmon_end();
if (retained) _dispatch_release(dq);
return NULL;
}
- pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK;
- bool override = _dispatch_queue_override_priority(dq, pp);
- if (override && dq->dq_running > 1) {
- override = false;
- }
-
if (!dispatch_atomic_cmpxchg2o(dq, do_suspend_cnt, 0,
DISPATCH_OBJECT_SUSPEND_LOCK, acquire)) {
+ bool was_overridden, override;
+
+ override = _dispatch_queue_override_priority(dq, &pp, &was_overridden);
+ if (override && dq->dq_running > 1) {
+ override = false;
+ }
+
#if DISPATCH_COCOA_COMPAT
if (dq == &_dispatch_main_q && dq->dq_is_thread_bound) {
+ if (override) {
+ _dispatch_thread_override_start(dq->dq_thread, pp);
+ if (was_overridden) {
+ _dispatch_thread_override_end(dq->dq_thread);
+ }
+ }
return _dispatch_main_queue_wakeup();
}
#endif
if (override) {
+#if HAVE_PTHREAD_WORKQUEUE_QOS
mach_port_t th;
// <rdar://problem/17735825> to traverse the tq chain safely we must
// lock it to ensure it cannot change, unless the queue is running
} else if (!dispatch_atomic_cmpxchgv2o(dq, dq_tqthread,
MACH_PORT_NULL, _dispatch_thread_port(), &th, acquire)) {
// already locked, override the owner, trysync will do a queue
- // wakeup when it returns.
+ // wakeup when it returns, see _dispatch_set_target_queue2
_dispatch_wqthread_override_start(th, pp);
} else {
dispatch_queue_t tq = dq->do_targetq;
if (_dispatch_queue_prepare_override(dq, tq, pp)) {
- _dispatch_queue_push_override(dq, tq, pp);
+ _dispatch_queue_push_override(dq, tq, pp, false);
} else {
_dispatch_queue_wakeup_with_qos(tq, pp);
}
dispatch_atomic_store2o(dq, dq_tqthread, MACH_PORT_NULL,
release);
}
+#endif
}
if (retained) _dispatch_release(dq);
return NULL;
}
+
dispatch_queue_t tq = dq->do_targetq;
if (!retained) _dispatch_retain(dq);
- if (override) {
- override = _dispatch_queue_prepare_override(dq, tq, pp);
- }
- _dispatch_queue_push(tq, dq, pp);
- if (override) {
- _dispatch_queue_push_override(dq, tq, pp);
- }
+ _dispatch_queue_push_queue(tq, dq, pp);
return tq; // libdispatch does not need this, but the Instrument DTrace
// probe does
}
bool retained)
{
if (_dispatch_object_suspended(dq)) {
- _dispatch_queue_override_priority(dq, pp);
+ _dispatch_queue_override_priority(dq, &pp, NULL);
if (retained) _dispatch_release(dq);
return NULL;
}
(void)_dispatch_queue_wakeup_with_qos2(dq, pp, false);
}
+DISPATCH_NOINLINE
+void
+_dispatch_queue_wakeup_and_release(dispatch_queue_t dq)
+{
+ (void)_dispatch_queue_wakeup_with_qos2(dq,
+ _dispatch_queue_get_override_priority(dq), true);
+}
+
DISPATCH_NOINLINE
dispatch_queue_t
_dispatch_queue_wakeup(dispatch_queue_t dq)
}
#if HAVE_PTHREAD_WORKQUEUE_QOS
+DISPATCH_NOINLINE
static void
-_dispatch_queue_override_invoke(void *ctxt)
+_dispatch_queue_override_invoke_stealing(void *ctxt)
{
dispatch_continuation_t dc = (dispatch_continuation_t)ctxt;
dispatch_queue_t dq = dc->dc_data;
- pthread_priority_t p = 0;
- if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) &&
- fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))) {
- _dispatch_queue_set_thread(dq);
-
- _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx",
- dq->dq_thread, _dispatch_get_defaultpriority());
-
- pthread_priority_t old_dp = _dispatch_get_defaultpriority();
- _dispatch_reset_defaultpriority(dc->dc_priority);
-
- dispatch_queue_t tq = NULL;
- _dispatch_thread_semaphore_t sema = 0;
- tq = dispatch_queue_invoke2(dq, &sema);
+ dx_invoke(dq, dc, DISPATCH_INVOKE_OVERRIDING | DISPATCH_INVOKE_STEALING);
+}
- _dispatch_queue_clear_thread(dq);
- _dispatch_reset_defaultpriority(old_dp);
+DISPATCH_NOINLINE
+static void
+_dispatch_queue_override_invoke_owning(void *ctxt)
+{
+ dispatch_continuation_t dc = (dispatch_continuation_t)ctxt;
+ dispatch_queue_t dq = dc->dc_data;
- uint32_t running = dispatch_atomic_dec2o(dq, dq_running, release);
- if (sema) {
- _dispatch_thread_semaphore_signal(sema);
- } else if (!tq && running == 0) {
- p = _dispatch_queue_reset_override_priority(dq);
- if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
- _dispatch_wqthread_override_reset();
- }
- }
- _dispatch_introspection_queue_item_complete(dq);
- if (running == 0) {
- return _dispatch_queue_wakeup_with_qos_and_release(dq, p);
- }
- } else {
- mach_port_t th = dq->dq_thread;
- if (th) {
- p = _dispatch_queue_get_override_priority(dq);
- _dispatch_object_debug(dq, "overriding thr 0x%x to priority 0x%lx",
- th, p);
- _dispatch_wqthread_override_start(th, p);
- }
- }
- _dispatch_release(dq); // added when we pushed the override block
+ // balance the fake continuation push in _dispatch_queue_push_override
+ _dispatch_trace_continuation_pop(dc->dc_other, dc->dc_data);
+ dx_invoke(dq, dc, DISPATCH_INVOKE_OVERRIDING);
}
#endif
if (p <= (tq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) {
return false;
}
- _dispatch_retain(dq);
return true;
#else
(void)dq; (void)tq; (void)p;
static inline void
_dispatch_queue_push_override(dispatch_queue_t dq, dispatch_queue_t tq,
- pthread_priority_t p)
+ pthread_priority_t p, bool owning)
{
#if HAVE_PTHREAD_WORKQUEUE_QOS
unsigned int qosbit, idx, overcommit;
dispatch_continuation_t dc = _dispatch_continuation_alloc();
dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT);
- dc->dc_func = _dispatch_queue_override_invoke;
+ if (owning) {
+ // fake that we queued `dq` on `tq` for introspection purposes
+ _dispatch_trace_continuation_push(tq, dq);
+ dc->dc_func = _dispatch_queue_override_invoke_owning;
+ } else {
+ dc->dc_func = _dispatch_queue_override_invoke_stealing;
+ _dispatch_retain(dq);
+ }
dc->dc_ctxt = dc;
- dc->dc_priority = tq->dq_priority;
+ dc->dc_priority = 0;
+ dc->dc_other = tq;
dc->dc_voucher = NULL;
dc->dc_data = dq;
- // dq retained by _dispatch_queue_prepare_override
_dispatch_queue_push(rq, dc, 0);
#else
#endif
}
+void
+_dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_t dq,
+ pthread_priority_t pp)
+{
+ _dispatch_queue_override_priority(dq, &pp, NULL);
+ if (_dispatch_queue_prepare_override(dq, tq, pp)) {
+ _dispatch_queue_push_override(dq, tq, pp, true);
+ } else {
+ _dispatch_queue_push(tq, dq, pp);
+ }
+}
+
#pragma mark -
#pragma mark dispatch_root_queue_drain
reset = _dispatch_reset_defaultpriority_override();
}
_dispatch_voucher_debug("root queue clear", NULL);
- _dispatch_set_priority_and_replace_voucher(old_pri, NULL);
+ _dispatch_reset_priority_and_voucher(old_pri, NULL);
_dispatch_reset_defaultpriority(old_dp);
_dispatch_perfmon_end();
dispatch_root_queue_context_t qc = dq->do_ctxt;
dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt;
+ if (pqc->dpq_observer_hooks.queue_will_execute) {
+ _dispatch_set_pthread_root_queue_observer_hooks(
+ &pqc->dpq_observer_hooks);
+ }
if (pqc->dpq_thread_configure) {
pqc->dpq_thread_configure();
}
{
dispatch_queue_t dq = &_dispatch_main_q;
(void)dispatch_atomic_dec2o(dq, dq_running, relaxed);
- unsigned int suspend_cnt = dispatch_atomic_sub2o(dq, do_suspend_cnt,
+ (void)dispatch_atomic_sub2o(dq, do_suspend_cnt,
DISPATCH_OBJECT_SUSPEND_LOCK, release);
+ _dispatch_queue_clear_bound_thread(dq);
dq->dq_is_thread_bound = 0;
- if (suspend_cnt == 0) {
- _dispatch_queue_wakeup(dq);
- }
+ // no need to drop the override, the thread will die anyway
+ _dispatch_queue_wakeup_with_qos(dq,
+ _dispatch_queue_reset_override_priority(dq));
// overload the "probably" variable to mean that dispatch_main() or
// similar non-POSIX API was called
dispatch_queue_t dq_specific_q; \
uint16_t dq_width; \
uint16_t dq_is_thread_bound:1; \
+ uint32_t volatile dq_override; \
pthread_priority_t dq_priority; \
mach_port_t dq_thread; \
mach_port_t volatile dq_tqthread; \
- uint32_t volatile dq_override; \
+ voucher_t dq_override_voucher; \
unsigned long dq_serialnum; \
const char *dq_label; \
DISPATCH_INTROSPECTION_QUEUE_LIST;
+ DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
#else
#define DISPATCH_QUEUE_CACHELINE_PAD (( \
- (13*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
+ (12*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \
+ DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
#endif
DISPATCH_CLASS_DECL(queue);
+#if !(defined(__cplusplus) && DISPATCH_INTROSPECTION)
struct dispatch_queue_s {
DISPATCH_STRUCT_HEADER(queue);
DISPATCH_QUEUE_HEADER;
DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only
};
+#endif // !(defined(__cplusplus) && DISPATCH_INTROSPECTION)
DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root, queue);
DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue);
void _dispatch_queue_destroy(dispatch_object_t dou);
void _dispatch_queue_dispose(dispatch_queue_t dq);
-void _dispatch_queue_invoke(dispatch_queue_t dq);
+void _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_object_t dou,
+ dispatch_invoke_flags_t flags);
void _dispatch_queue_push_list_slow(dispatch_queue_t dq,
pthread_priority_t pp, struct dispatch_object_s *obj, unsigned int n,
bool retained);
unsigned long _dispatch_queue_probe(dispatch_queue_t dq);
dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou);
dispatch_queue_t _dispatch_queue_wakeup(dispatch_queue_t dq);
+void _dispatch_queue_wakeup_and_release(dispatch_queue_t dq);
void _dispatch_queue_wakeup_with_qos(dispatch_queue_t dq,
pthread_priority_t pp);
void _dispatch_queue_wakeup_with_qos_and_release(dispatch_queue_t dq,
pthread_priority_t pp);
+void _dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_t dq,
+ pthread_priority_t pp);
_dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_object_t dou);
void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t
dqsq);
#pragma mark -
#pragma mark dispatch_queue_attr_t
+typedef enum {
+ _dispatch_queue_attr_overcommit_unspecified = 0,
+ _dispatch_queue_attr_overcommit_enabled,
+ _dispatch_queue_attr_overcommit_disabled,
+} _dispatch_queue_attr_overcommit_t;
+
DISPATCH_CLASS_DECL(queue_attr);
struct dispatch_queue_attr_s {
DISPATCH_STRUCT_HEADER(queue_attr);
qos_class_t dqa_qos_class;
int dqa_relative_priority;
- unsigned int dqa_overcommit:1, dqa_concurrent:1;
+ unsigned int dqa_overcommit:2, dqa_concurrent:1;
};
enum {
DQA_INDEX_NON_OVERCOMMIT = 0,
DQA_INDEX_OVERCOMMIT,
+ DQA_INDEX_UNSPECIFIED_OVERCOMMIT,
};
+#define DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT 3
+
enum {
DQA_INDEX_CONCURRENT = 0,
DQA_INDEX_SERIAL,
};
-#define DISPATCH_QUEUE_ATTR_PRIO_COUNT (1 - QOS_MIN_RELATIVE_PRIORITY)
+#define DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT 2
typedef enum {
DQA_INDEX_QOS_CLASS_UNSPECIFIED = 0,
DQA_INDEX_QOS_CLASS_USER_INTERACTIVE,
} _dispatch_queue_attr_index_qos_class_t;
+#define DISPATCH_QUEUE_ATTR_PRIO_COUNT (1 - QOS_MIN_RELATIVE_PRIORITY)
+
extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[]
- [DISPATCH_QUEUE_ATTR_PRIO_COUNT][2][2];
+ [DISPATCH_QUEUE_ATTR_PRIO_COUNT]
+ [DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT]
+ [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT];
#pragma mark -
#pragma mark dispatch_continuation_t
#define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads
#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 16
#else
-#define DISPATCH_CONTINUATION_CACHE_LIMIT 65536
+#define DISPATCH_CONTINUATION_CACHE_LIMIT 1024
#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 128
#endif
#endif
#define DISPATCH_BLOCK_HAS_VOUCHER (1u << 31)
#define DISPATCH_BLOCK_HAS_PRIORITY (1u << 30)
-struct dispatch_block_private_data_s {
- unsigned long dbpd_magic;
- dispatch_block_flags_t dbpd_flags;
- unsigned int volatile dbpd_atomic_flags;
- int volatile dbpd_performed;
- pthread_priority_t dbpd_priority;
- voucher_t dbpd_voucher;
- dispatch_block_t dbpd_block;
- struct dispatch_semaphore_s dbpd_group;
- dispatch_queue_t volatile dbpd_queue;
+#define DISPATCH_BLOCK_PRIVATE_DATA_HEADER() \
+ unsigned long dbpd_magic; \
+ dispatch_block_flags_t dbpd_flags; \
+ unsigned int volatile dbpd_atomic_flags; \
+ int volatile dbpd_performed; \
+ pthread_priority_t dbpd_priority; \
+ voucher_t dbpd_voucher; \
+ dispatch_block_t dbpd_block; \
+ dispatch_group_t dbpd_group; \
+ dispatch_queue_t volatile dbpd_queue; \
mach_port_t dbpd_thread;
+
+#if !defined(__cplusplus)
+struct dispatch_block_private_data_s {
+ DISPATCH_BLOCK_PRIVATE_DATA_HEADER();
};
+#endif
typedef struct dispatch_block_private_data_s *dispatch_block_private_data_t;
// dbpd_atomic_flags bits
#define DISPATCH_BLOCK_PRIVATE_DATA_MAGIC 0xD159B10C // 0xDISPatch_BLOCk
-#define DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, voucher, prio, block) \
+// struct for synchronous perform: no group_leave at end of invoke
+#define DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block) \
{ \
.dbpd_magic = DISPATCH_BLOCK_PRIVATE_DATA_MAGIC, \
.dbpd_flags = (flags), \
- .dbpd_priority = (prio), \
- .dbpd_voucher = (voucher), \
+ .dbpd_atomic_flags = DBF_PERFORM, \
.dbpd_block = (block), \
- .dbpd_group = DISPATCH_GROUP_INITIALIZER(1), \
}
dispatch_block_t _dispatch_block_create(dispatch_block_flags_t flags,
#pragma mark -
#pragma mark dispatch_group_t
-dispatch_group_t
-dispatch_group_create(void)
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_group_t
+_dispatch_group_create_with_count(long count)
{
dispatch_group_t dg = (dispatch_group_t)_dispatch_alloc(
DISPATCH_VTABLE(group), sizeof(struct dispatch_semaphore_s));
- _dispatch_semaphore_init(LONG_MAX, dg);
+ _dispatch_semaphore_init(LONG_MAX - count, dg);
return dg;
}
+dispatch_group_t
+dispatch_group_create(void)
+{
+ return _dispatch_group_create_with_count(0);
+}
+
+dispatch_group_t
+_dispatch_group_create_and_enter(void)
+{
+ return _dispatch_group_create_with_count(1);
+}
+
void
dispatch_group_enter(dispatch_group_t dg)
{
static long
_dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout)
{
- long orig;
+ long orig, value;
#if USE_MACH_SEM
mach_timespec_t _timeout;
again:
// check before we cause another signal to be sent by incrementing
// dsema->dsema_group_waiters
- if (dsema->dsema_value == LONG_MAX) {
+ value = dispatch_atomic_load2o(dsema, dsema_value, seq_cst); // 19296565
+ if (value == LONG_MAX) {
return _dispatch_group_wake(dsema);
}
// Mach semaphores appear to sometimes spuriously wake up. Therefore,
// signaled (6880961).
(void)dispatch_atomic_inc2o(dsema, dsema_group_waiters, relaxed);
// check the values again in case we need to wake any threads
- if (dsema->dsema_value == LONG_MAX) {
+ value = dispatch_atomic_load2o(dsema, dsema_value, seq_cst); // 19296565
+ if (value == LONG_MAX) {
return _dispatch_group_wake(dsema);
}
DISPATCH_CLASS_DECL(group);
-#define DISPATCH_GROUP_INITIALIZER(s) \
- { \
- .do_vtable = (const void*)DISPATCH_VTABLE(group), \
- .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \
- .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \
- .dsema_value = LONG_MAX - (s), \
- .dsema_orig = LONG_MAX, \
- }
-
+dispatch_group_t _dispatch_group_create_and_enter(void);
void _dispatch_semaphore_dispose(dispatch_object_t dou);
size_t _dispatch_semaphore_debug(dispatch_object_t dou, char *buf,
size_t bufsiz);
static const unsigned long dispatch_apply_key = __PTK_LIBDISPATCH_KEY4;
static const unsigned long dispatch_defaultpriority_key =__PTK_LIBDISPATCH_KEY5;
#if DISPATCH_INTROSPECTION
-static const unsigned long dispatch_introspection_key =__PTK_LIBDISPATCH_KEY5+1;
+static const unsigned long dispatch_introspection_key = __PTK_LIBDISPATCH_KEY6;
#elif DISPATCH_PERF_MON
-static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY5+1;
+static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY6;
#endif
#if DISPATCH_USE_OS_SEMAPHORE_CACHE
static const unsigned long dispatch_sema4_key = __TSD_SEMAPHORE_CACHE;
#else
-static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY5+2;
+static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY7;
#endif
+static const unsigned long dispatch_pthread_root_queue_observer_hooks_key =
+ __PTK_LIBDISPATCH_KEY8;
#ifndef __TSD_THREAD_QOS_CLASS
#define __TSD_THREAD_QOS_CLASS 4
#elif DISPATCH_PERF_MON
extern pthread_key_t dispatch_bcounter_key;
#endif
-
+exern pthread_key_t dispatch_pthread_root_queue_observer_hooks_key;
DISPATCH_TSD_INLINE
static inline void
#endif
#include <sys/mount.h>
+#define DKEV_DISPOSE_IMMEDIATE_DELETE 0x1
+#define DKEV_DISPOSE_IGNORE_ENOENT 0x2
+
static void _dispatch_source_merge_kevent(dispatch_source_t ds,
- const struct kevent64_s *ke);
+ const _dispatch_kevent_qos_s *ke);
static bool _dispatch_kevent_register(dispatch_kevent_t *dkp, uint32_t *flgp);
-static void _dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg);
-static bool _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags,
+static long _dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg,
+ int options);
+static long _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags,
uint32_t del_flags);
-static void _dispatch_kevent_drain(struct kevent64_s *ke);
-static void _dispatch_kevent_merge(struct kevent64_s *ke);
-static void _dispatch_timers_kevent(struct kevent64_s *ke);
+static void _dispatch_kevent_drain(_dispatch_kevent_qos_s *ke);
+static void _dispatch_kevent_merge(_dispatch_kevent_qos_s *ke);
+static void _dispatch_timers_kevent(_dispatch_kevent_qos_s *ke);
static void _dispatch_timers_unregister(dispatch_source_t ds,
dispatch_kevent_t dk);
static void _dispatch_timers_update(dispatch_source_t ds);
unsigned int tidx);
static inline unsigned long _dispatch_source_timer_data(
dispatch_source_refs_t dr, unsigned long prev);
-static long _dispatch_kq_update(const struct kevent64_s *);
+static long _dispatch_kq_update(const _dispatch_kevent_qos_s *);
static void _dispatch_memorystatus_init(void);
#if HAVE_MACH
static void _dispatch_mach_host_calendar_change_register(void);
uint32_t new_flags, uint32_t del_flags);
static kern_return_t _dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk,
uint32_t new_flags, uint32_t del_flags);
-static inline void _dispatch_kevent_mach_portset(struct kevent64_s *ke);
+static inline void _dispatch_kevent_mach_portset(_dispatch_kevent_qos_s *ke);
#else
static inline void _dispatch_mach_host_calendar_change_register(void) {}
static inline void _dispatch_mach_recv_msg_buf_init(void) {}
#endif
static const char * _evfiltstr(short filt);
#if DISPATCH_DEBUG
-static void _dispatch_kevent_debug(struct kevent64_s* kev, const char* str);
+static void _dispatch_kevent_debug(const _dispatch_kevent_qos_s* kev,
+ const char* str);
static void _dispatch_kevent_debugger(void *context);
#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \
dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q)
#else
static inline void
-_dispatch_kevent_debug(struct kevent64_s* kev DISPATCH_UNUSED,
+_dispatch_kevent_debug(const _dispatch_kevent_qos_s* kev DISPATCH_UNUSED,
const char* str DISPATCH_UNUSED) {}
#define DISPATCH_ASSERT_ON_MANAGER_QUEUE()
#endif
+#ifndef DISPATCH_MGR_QUEUE_DEBUG
+#define DISPATCH_MGR_QUEUE_DEBUG 0
+#endif
+#if DISPATCH_MGR_QUEUE_DEBUG
+#define _dispatch_kevent_mgr_debug _dispatch_kevent_debug
+#else
+static inline void
+_dispatch_kevent_mgr_debug(_dispatch_kevent_qos_s* kev DISPATCH_UNUSED,
+ const char* str DISPATCH_UNUSED) {}
+#endif
#pragma mark -
#pragma mark dispatch_source_t
dispatch_source_create(dispatch_source_type_t type,
uintptr_t handle,
unsigned long mask,
- dispatch_queue_t q)
+ dispatch_queue_t dq)
{
- const struct kevent64_s *proto_kev = &type->ke;
+ const _dispatch_kevent_qos_s *proto_kev = &type->ke;
dispatch_source_t ds;
dispatch_kevent_t dk;
ds->do_ref_cnt++; // the reference the manager queue holds
ds->do_ref_cnt++; // since source is created suspended
ds->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL;
- // The initial target queue is the manager queue, in order to get
- // the source installed. <rdar://problem/8928171>
- ds->do_targetq = &_dispatch_mgr_q;
dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s));
dk->dk_kevent = *proto_kev;
// we cheat and use EV_CLEAR to mean a "flag thingy"
ds->ds_is_adder = true;
}
+ if (EV_UDATA_SPECIFIC & proto_kev->flags) {
+ dispatch_assert(!(EV_ONESHOT & proto_kev->flags));
+ dk->dk_kevent.flags |= EV_DISPATCH;
+ ds->ds_is_direct_kevent = true;
+ ds->ds_needs_rearm = true;
+ }
// Some sources require special processing
if (type->init != NULL) {
- type->init(ds, type, handle, mask, q);
+ type->init(ds, type, handle, mask, dq);
}
dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder));
}
ds->ds_refs->dr_source_wref = _dispatch_ptr2wref(ds);
- // First item on the queue sets the user-specified target queue
- dispatch_set_target_queue(ds, q);
+ if (!ds->ds_is_direct_kevent) {
+ // The initial target queue is the manager queue, in order to get
+ // the source installed. <rdar://problem/8928171>
+ ds->do_targetq = &_dispatch_mgr_q;
+ // First item on the queue sets the user-specified target queue
+ dispatch_set_target_queue(ds, dq);
+ } else {
+ if (slowpath(!dq)) {
+ dq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true);
+ } else {
+ _dispatch_retain(dq);
+ }
+ ds->do_targetq = dq;
+ _dispatch_queue_priority_inherit_from_target((dispatch_queue_t)ds, dq);
+ _dispatch_queue_set_override_priority(dq);
+ }
_dispatch_object_debug(ds, "%s", __func__);
return ds;
}
+DISPATCH_ALWAYS_INLINE
+static inline dispatch_queue_t
+_dispatch_source_get_kevent_queue(dispatch_source_t ds)
+{
+ if (ds->ds_is_direct_kevent) {
+ return ds->do_targetq;
+ }
+ return &_dispatch_mgr_q;
+}
+
void
_dispatch_source_dispose(dispatch_source_t ds)
{
void
dispatch_source_merge_data(dispatch_source_t ds, unsigned long val)
{
- struct kevent64_s kev = {
+ _dispatch_kevent_qos_s kev = {
.fflags = (typeof(kev.fflags))val,
.data = (typeof(kev.data))val,
};
return;
}
pthread_priority_t old_dp = _dispatch_set_defaultpriority(ds->dq_priority);
- _dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dc);
voucher_t voucher = dc->dc_voucher ? _voucher_retain(dc->dc_voucher) : NULL;
_dispatch_continuation_voucher_adopt(dc); // consumes voucher reference
- _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
- _dispatch_introspection_queue_item_complete(dc);
+ _dispatch_continuation_pop(dc);
if (voucher) dc->dc_voucher = voucher;
_dispatch_reset_defaultpriority(old_dp);
}
_dispatch_source_kevent_unregister(dispatch_source_t ds)
{
_dispatch_object_debug(ds, "%s", __func__);
+ uint32_t flags = (uint32_t)ds->ds_pending_data_mask;
dispatch_kevent_t dk = ds->ds_dkev;
- ds->ds_dkev = NULL;
- switch (dk->dk_kevent.filter) {
- case DISPATCH_EVFILT_TIMER:
+ if (ds->ds_atomic_flags & DSF_DELETED) {
+ dk->dk_kevent.flags |= EV_DELETE; // already deleted
+ dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE);
+ }
+ if (dk->dk_kevent.filter == DISPATCH_EVFILT_TIMER) {
+ ds->ds_dkev = NULL;
_dispatch_timers_unregister(ds, dk);
- break;
- default:
+ } else if (!ds->ds_is_direct_kevent) {
+ ds->ds_dkev = NULL;
TAILQ_REMOVE(&dk->dk_sources, ds->ds_refs, dr_list);
- _dispatch_kevent_unregister(dk, (uint32_t)ds->ds_pending_data_mask);
- break;
+ _dispatch_kevent_unregister(dk, flags, 0);
+ } else {
+ int dkev_dispose_options = 0;
+ if (ds->ds_needs_rearm && !(ds->ds_atomic_flags & DSF_ARMED)) {
+ dkev_dispose_options |= DKEV_DISPOSE_IMMEDIATE_DELETE;
+ }
+ if (ds->ds_needs_mgr) {
+ dkev_dispose_options |= DKEV_DISPOSE_IGNORE_ENOENT;
+ ds->ds_needs_mgr = false;
+ }
+ long r = _dispatch_kevent_unregister(dk, flags, dkev_dispose_options);
+ if (r == EINPROGRESS) {
+ _dispatch_debug("kevent-source[%p]: deferred delete kevent[%p]",
+ ds, dk);
+ ds->ds_pending_delete = true;
+ return; // deferred unregistration
+ } else if (r == ENOENT) {
+ _dispatch_debug("kevent-source[%p]: ENOENT delete kevent[%p]",
+ ds, dk);
+ ds->ds_needs_mgr = true;
+ return; // potential concurrent EV_DELETE delivery rdar://22047283
+ }
+ ds->ds_dkev = NULL;
+ _TAILQ_TRASH_ENTRY(ds->ds_refs, dr_list);
}
-
(void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed);
+ _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, ds->ds_dkev);
ds->ds_needs_rearm = false; // re-arm is pointless and bad now
_dispatch_release(ds); // the retain is done at creation time
}
{
switch (ds->ds_dkev->dk_kevent.filter) {
case DISPATCH_EVFILT_TIMER:
- return _dispatch_timers_update(ds);
+ _dispatch_timers_update(ds);
+ (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed);
+ _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds,
+ ds->ds_dkev);
+ return;
case EVFILT_MACHPORT:
if (ds->ds_pending_data_mask & DISPATCH_MACH_RECV_MESSAGE) {
new_flags |= DISPATCH_MACH_RECV_MESSAGE; // emulate EV_DISPATCH
}
break;
}
- if (_dispatch_kevent_resume(ds->ds_dkev, new_flags, 0)) {
+ if ((ds->ds_atomic_flags & DSF_DELETED) ||
+ _dispatch_kevent_resume(ds->ds_dkev, new_flags, 0)) {
_dispatch_source_kevent_unregister(ds);
}
}
dispatch_assert_zero(ds->ds_is_installed);
switch (ds->ds_dkev->dk_kevent.filter) {
case DISPATCH_EVFILT_TIMER:
- return _dispatch_timers_update(ds);
+ _dispatch_timers_update(ds);
+ (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed);
+ _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev);
+ return;
}
uint32_t flags;
bool do_resume = _dispatch_kevent_register(&ds->ds_dkev, &flags);
TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, ds->ds_refs, dr_list);
+ (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed);
+ _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev);
if (do_resume || ds->ds_needs_rearm) {
_dispatch_source_kevent_resume(ds, flags);
}
- (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed);
_dispatch_object_debug(ds, "%s", __func__);
}
_dispatch_thread_semaphore_t *sema_ptr DISPATCH_UNUSED)
{
dispatch_source_t ds = dou._ds;
- if (slowpath(_dispatch_queue_drain(ds))) {
- DISPATCH_CLIENT_CRASH("Sync onto source");
+ if (_dispatch_queue_class_probe(ds)) {
+ if (slowpath(_dispatch_queue_drain(ds))) {
+ DISPATCH_CLIENT_CRASH("Sync onto source");
+ }
}
// This function performs all source actions. Each action is responsible
// The order of tests here in invoke and in probe should be consistent.
dispatch_queue_t dq = _dispatch_queue_get_current();
+ dispatch_queue_t dkq = _dispatch_source_get_kevent_queue(ds);
dispatch_source_refs_t dr = ds->ds_refs;
if (!ds->ds_is_installed) {
- // The source needs to be installed on the manager queue.
- if (dq != &_dispatch_mgr_q) {
- return &_dispatch_mgr_q;
+ // The source needs to be installed on the kevent queue.
+ if (dq != dkq) {
+ return dkq;
}
_dispatch_source_kevent_register(ds);
ds->ds_is_installed = true;
return ds->do_targetq;
}
if (slowpath(ds->do_xref_cnt == -1)) {
- return &_dispatch_mgr_q; // rdar://problem/9558246
+ return dkq; // rdar://problem/9558246
}
} else if (slowpath(DISPATCH_OBJECT_SUSPENDED(ds))) {
// Source suspended by an item drained from the source queue.
// clears ds_registration_handler
_dispatch_source_registration_callout(ds);
if (slowpath(ds->do_xref_cnt == -1)) {
- return &_dispatch_mgr_q; // rdar://problem/9558246
+ return dkq; // rdar://problem/9558246
+ }
+ } else if ((ds->ds_atomic_flags & DSF_DELETED) && (ds->ds_pending_delete ||
+ (ds->ds_atomic_flags & DSF_ONESHOT))) {
+ // Pending source kevent unregistration has been completed
+ if (ds->ds_needs_mgr) {
+ dkq = &_dispatch_mgr_q;
}
- } else if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)){
+ if (dq != dkq) {
+ return dkq;
+ }
+ ds->ds_pending_delete = false;
+ if (ds->ds_atomic_flags & DSF_ONESHOT) {
+ (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ONESHOT,
+ relaxed);
+ }
+ if (ds->ds_dkev) {
+ _dispatch_source_kevent_unregister(ds);
+ if (ds->ds_needs_mgr) {
+ return &_dispatch_mgr_q;
+ }
+ }
+ if (dr->ds_handler[DS_EVENT_HANDLER] ||
+ dr->ds_handler[DS_CANCEL_HANDLER] ||
+ dr->ds_handler[DS_REGISTN_HANDLER]) {
+ return ds->do_targetq;
+ }
+ } else if (((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1))
+ && !ds->ds_pending_delete) {
// The source has been cancelled and needs to be uninstalled from the
- // manager queue. After uninstallation, the cancellation handler needs
+ // kevent queue. After uninstallation, the cancellation handler needs
// to be delivered to the target queue.
if (ds->ds_dkev) {
- if (dq != &_dispatch_mgr_q) {
- return &_dispatch_mgr_q;
+ if (ds->ds_needs_mgr) {
+ dkq = &_dispatch_mgr_q;
+ }
+ if (dq != dkq) {
+ return dkq;
}
_dispatch_source_kevent_unregister(ds);
+ if (ds->ds_needs_mgr) {
+ return &_dispatch_mgr_q;
+ }
+ if (ds->ds_pending_delete) {
+ // deferred unregistration
+ if (ds->ds_needs_rearm) {
+ return dkq;
+ }
+ return NULL;
+ }
}
if (dr->ds_handler[DS_EVENT_HANDLER] ||
dr->ds_handler[DS_CANCEL_HANDLER] ||
}
}
_dispatch_source_cancel_callout(ds);
- } else if (ds->ds_pending_data) {
+ } else if (ds->ds_pending_data && !ds->ds_pending_delete) {
// The source has pending data to deliver via the event handler callback
- // on the target queue. Some sources need to be rearmed on the manager
+ // on the target queue. Some sources need to be rearmed on the kevent
// queue after event delivery.
if (dq != ds->do_targetq) {
return ds->do_targetq;
}
_dispatch_source_latch_and_call(ds);
if (ds->ds_needs_rearm) {
- return &_dispatch_mgr_q;
+ return dkq;
}
} else if (ds->ds_needs_rearm && !(ds->ds_atomic_flags & DSF_ARMED)) {
- // The source needs to be rearmed on the manager queue.
- if (dq != &_dispatch_mgr_q) {
- return &_dispatch_mgr_q;
+ // The source needs to be rearmed on the kevent queue.
+ if (dq != dkq) {
+ return dkq;
}
- _dispatch_source_kevent_resume(ds, 0);
(void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed);
+ _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds,
+ ds->ds_dkev);
+ _dispatch_source_kevent_resume(ds, 0);
}
return NULL;
DISPATCH_NOINLINE
void
-_dispatch_source_invoke(dispatch_source_t ds)
+_dispatch_source_invoke(dispatch_source_t ds, dispatch_object_t dou,
+ dispatch_invoke_flags_t flags)
{
- _dispatch_queue_class_invoke(ds, _dispatch_source_invoke2);
+ _dispatch_queue_class_invoke(ds, dou._dc, flags, _dispatch_source_invoke2);
}
unsigned long
dispatch_source_refs_t dr = ds->ds_refs;
if (!ds->ds_is_installed) {
- // The source needs to be installed on the manager queue.
+ // The source needs to be installed on the kevent queue.
return true;
} else if (dr->ds_handler[DS_REGISTN_HANDLER]) {
// The registration handler needs to be delivered to the target queue.
return true;
- } else if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)){
- // The source needs to be uninstalled from the manager queue, or the
+ } else if ((ds->ds_atomic_flags & DSF_DELETED) && (ds->ds_pending_delete ||
+ (ds->ds_atomic_flags & DSF_ONESHOT))) {
+ // Pending source kevent unregistration has been completed
+ return true;
+ } else if (((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1))
+ && !ds->ds_pending_delete) {
+ // The source needs to be uninstalled from the kevent queue, or the
// cancellation handler needs to be delivered to the target queue.
// Note: cancellation assumes installation.
if (ds->ds_dkev || dr->ds_handler[DS_EVENT_HANDLER] ||
dr->ds_handler[DS_REGISTN_HANDLER]) {
return true;
}
- } else if (ds->ds_pending_data) {
+ } else if (ds->ds_pending_data && !ds->ds_pending_delete) {
// The source has pending data to deliver to the target queue.
return true;
} else if (ds->ds_needs_rearm && !(ds->ds_atomic_flags & DSF_ARMED)) {
- // The source needs to be rearmed on the manager queue.
+ // The source needs to be rearmed on the kevent queue.
return true;
}
return _dispatch_queue_class_probe(ds);
}
static void
-_dispatch_source_merge_kevent(dispatch_source_t ds, const struct kevent64_s *ke)
+_dispatch_source_merge_kevent(dispatch_source_t ds,
+ const _dispatch_kevent_qos_s *ke)
{
+ _dispatch_object_debug(ds, "%s", __func__);
+ bool retained = false;
+ if ((ke->flags & EV_UDATA_SPECIFIC) && (ke->flags & EV_ONESHOT) &&
+ !(ke->flags & EV_DELETE)) {
+ _dispatch_debug("kevent-source[%p]: deferred delete oneshot kevent[%p]",
+ ds, (void*)ke->udata);
+ (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ONESHOT, relaxed);
+ } else if ((ke->flags & EV_DELETE) || (ke->flags & EV_ONESHOT)) {
+ _dispatch_debug("kevent-source[%p]: delete kevent[%p]",
+ ds, (void*)ke->udata);
+ retained = true;
+ _dispatch_retain(ds);
+ (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_DELETED, relaxed);
+ if (ke->flags & EV_DELETE) goto done;
+ }
if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) {
- return;
+ goto done; // rdar://20204025
}
if (ds->ds_is_level) {
// ke->data is signed and "negative available data" makes no sense
(void)dispatch_atomic_or2o(ds, ds_pending_data,
ke->fflags & ds->ds_pending_data_mask, relaxed);
}
+done:
// EV_DISPATCH and EV_ONESHOT sources are no longer armed after delivery
if (ds->ds_needs_rearm) {
+ if (!retained) {
+ retained = true;
+ _dispatch_retain(ds); // rdar://20382435
+ }
(void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed);
+ _dispatch_debug("kevent-source[%p]: disarmed kevent[%p] ",
+ ds, (void*)ke->udata);
+ }
+ if (retained) {
+ _dispatch_queue_wakeup_and_release((dispatch_queue_t)ds);
+ } else {
+ _dispatch_queue_wakeup((dispatch_queue_t)ds);
}
-
- _dispatch_wakeup(ds);
}
#pragma mark -
static inline void _dispatch_kevent_unguard(dispatch_kevent_t dk) { (void)dk; }
#endif
+#if !DISPATCH_USE_EV_UDATA_SPECIFIC
static struct dispatch_kevent_s _dispatch_kevent_data_or = {
.dk_kevent = {
.filter = DISPATCH_EVFILT_CUSTOM_OR,
},
.dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_add.dk_sources),
};
+#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC
#define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1))
TAILQ_INIT(&_dispatch_sources[i]);
}
+#if !DISPATCH_USE_EV_UDATA_SPECIFIC
TAILQ_INSERT_TAIL(&_dispatch_sources[0],
&_dispatch_kevent_data_or, dk_list);
TAILQ_INSERT_TAIL(&_dispatch_sources[0],
(uintptr_t)&_dispatch_kevent_data_or;
_dispatch_kevent_data_add.dk_kevent.udata =
(uintptr_t)&_dispatch_kevent_data_add;
+#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC
}
static inline uintptr_t
static void
_dispatch_kevent_insert(dispatch_kevent_t dk)
{
+ if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) return;
_dispatch_kevent_guard(dk);
uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident,
dk->dk_kevent.filter);
static bool
_dispatch_kevent_register(dispatch_kevent_t *dkp, uint32_t *flgp)
{
- dispatch_kevent_t dk, ds_dkev = *dkp;
+ dispatch_kevent_t dk = NULL, ds_dkev = *dkp;
uint32_t new_flags;
bool do_resume = false;
- dk = _dispatch_kevent_find(ds_dkev->dk_kevent.ident,
- ds_dkev->dk_kevent.filter);
+ if (!(ds_dkev->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
+ dk = _dispatch_kevent_find(ds_dkev->dk_kevent.ident,
+ ds_dkev->dk_kevent.filter);
+ }
if (dk) {
// If an existing dispatch kevent is found, check to see if new flags
// need to be added to the existing kevent
return do_resume;
}
-static bool
+static long
_dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags,
uint32_t del_flags)
{
case DISPATCH_EVFILT_MACH_NOTIFICATION:
return _dispatch_kevent_mach_notify_resume(dk, new_flags, del_flags);
#endif
- case EVFILT_PROC:
- if (dk->dk_kevent.flags & EV_ONESHOT) {
+ default:
+ if (dk->dk_kevent.flags & EV_DELETE) {
return 0;
}
- // fall through
- default:
r = _dispatch_kq_update(&dk->dk_kevent);
- if (dk->dk_kevent.flags & EV_DISPATCH) {
+ if (r && (dk->dk_kevent.flags & EV_ADD) &&
+ (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
+ dk->dk_kevent.flags |= EV_DELETE;
+ dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE);
+ } else if (dk->dk_kevent.flags & EV_DISPATCH) {
dk->dk_kevent.flags &= ~EV_ADD;
}
return r;
}
}
-static void
-_dispatch_kevent_dispose(dispatch_kevent_t dk)
+static long
+_dispatch_kevent_dispose(dispatch_kevent_t dk, int options)
{
- uintptr_t hash;
-
+ long r = 0;
switch (dk->dk_kevent.filter) {
case DISPATCH_EVFILT_TIMER:
case DISPATCH_EVFILT_CUSTOM_ADD:
case DISPATCH_EVFILT_CUSTOM_OR:
- // these sources live on statically allocated lists
- return;
+ if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) {
+ free(dk);
+ } else {
+ // these sources live on statically allocated lists
+ }
+ return r;
#if HAVE_MACH
case EVFILT_MACHPORT:
_dispatch_kevent_machport_resume(dk, 0, dk->dk_kevent.fflags);
_dispatch_kevent_mach_notify_resume(dk, 0, dk->dk_kevent.fflags);
break;
#endif
- case EVFILT_PROC:
- if (dk->dk_kevent.flags & EV_ONESHOT) {
- break; // implicitly deleted
- }
- // fall through
default:
if (~dk->dk_kevent.flags & EV_DELETE) {
dk->dk_kevent.flags |= EV_DELETE;
dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE);
- _dispatch_kq_update(&dk->dk_kevent);
+ if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) {
+ dk->dk_kevent.flags |= EV_ENABLE;
+ }
+ r = _dispatch_kq_update(&dk->dk_kevent);
+ if (r == ENOENT && (options & DKEV_DISPOSE_IGNORE_ENOENT)) {
+ r = 0;
+ }
+ if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) {
+ dk->dk_kevent.flags &= ~EV_ENABLE;
+ }
}
break;
}
-
- hash = _dispatch_kevent_hash(dk->dk_kevent.ident,
- dk->dk_kevent.filter);
- TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list);
- _dispatch_kevent_unguard(dk);
- free(dk);
+ if ((r == EINPROGRESS || r == ENOENT) &&
+ (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
+ // deferred EV_DELETE or concurrent concurrent EV_DELETE delivery
+ dk->dk_kevent.flags &= ~EV_DELETE;
+ dk->dk_kevent.flags |= EV_ENABLE;
+ } else {
+ if ((dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
+#if DISPATCH_DEBUG
+ // zero/trash dr linkage
+ dispatch_source_refs_t dr = TAILQ_FIRST(&dk->dk_sources);
+ TAILQ_REMOVE(&dk->dk_sources, dr, dr_list);
+#endif
+ } else {
+ uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident,
+ dk->dk_kevent.filter);
+ TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list);
+ }
+ _dispatch_kevent_unguard(dk);
+ free(dk);
+ }
+ return r;
}
-static void
-_dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg)
+static long
+_dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg, int options)
{
dispatch_source_refs_t dri;
uint32_t del_flags, fflags = 0;
+ long r = 0;
- if (TAILQ_EMPTY(&dk->dk_sources)) {
- _dispatch_kevent_dispose(dk);
+ if (TAILQ_EMPTY(&dk->dk_sources) ||
+ (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) {
+ r = _dispatch_kevent_dispose(dk, options);
} else {
TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) {
dispatch_source_t dsi = _dispatch_source_from_refs(dri);
del_flags = flg & ~fflags;
if (del_flags) {
dk->dk_kevent.flags |= EV_ADD;
- dk->dk_kevent.fflags = fflags;
- _dispatch_kevent_resume(dk, 0, del_flags);
+ dk->dk_kevent.fflags &= ~del_flags;
+ r = _dispatch_kevent_resume(dk, 0, del_flags);
}
}
+ return r;
}
DISPATCH_NOINLINE
static void
-_dispatch_kevent_proc_exit(struct kevent64_s *ke)
+_dispatch_kevent_proc_exit(_dispatch_kevent_qos_s *ke)
{
// EVFILT_PROC may fail with ESRCH when the process exists but is a zombie
// <rdar://problem/5067725>. As a workaround, we simulate an exit event for
// any EVFILT_PROC with an invalid pid <rdar://problem/6626350>.
- struct kevent64_s fake;
+ _dispatch_kevent_qos_s fake;
fake = *ke;
fake.flags &= ~EV_ERROR;
fake.fflags = NOTE_EXIT;
DISPATCH_NOINLINE
static void
-_dispatch_kevent_error(struct kevent64_s *ke)
+_dispatch_kevent_error(_dispatch_kevent_qos_s *ke)
{
_dispatch_kevent_debug(ke, __func__);
if (ke->data) {
}
static void
-_dispatch_kevent_drain(struct kevent64_s *ke)
+_dispatch_kevent_drain(_dispatch_kevent_qos_s *ke)
{
#if DISPATCH_DEBUG
static dispatch_once_t pred;
dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger);
#endif
if (ke->filter == EVFILT_USER) {
+ _dispatch_kevent_mgr_debug(ke, __func__);
return;
}
if (slowpath(ke->flags & EV_ERROR)) {
- if (ke->filter == EVFILT_PROC) {
+ if (ke->filter == EVFILT_PROC && ke->data == ESRCH) {
+ ke->data = 0; // don't return error from caller
if (ke->flags & EV_DELETE) {
- // Process exited while monitored
+ _dispatch_debug("kevent[0x%llx]: ignoring ESRCH from "
+ "EVFILT_PROC EV_DELETE", ke->udata);
return;
- } else if (ke->data == ESRCH) {
- return _dispatch_kevent_proc_exit(ke);
}
+ _dispatch_debug("kevent[0x%llx]: ESRCH from EVFILT_PROC: "
+ "generating fake NOTE_EXIT", ke->udata);
+ return _dispatch_kevent_proc_exit(ke);
}
return _dispatch_kevent_error(ke);
}
- _dispatch_kevent_debug(ke, __func__);
if (ke->filter == EVFILT_TIMER) {
return _dispatch_timers_kevent(ke);
}
DISPATCH_NOINLINE
static void
-_dispatch_kevent_merge(struct kevent64_s *ke)
+_dispatch_kevent_merge(_dispatch_kevent_qos_s *ke)
{
+ _dispatch_kevent_debug(ke, __func__);
dispatch_kevent_t dk;
- dispatch_source_refs_t dri;
+ dispatch_source_refs_t dri, dr_next;
dk = (void*)ke->udata;
dispatch_assert(dk);
- if (ke->flags & EV_ONESHOT) {
- dk->dk_kevent.flags |= EV_ONESHOT;
- }
- TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) {
+ TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) {
_dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), ke);
}
}
ds->ds_pending_data = 0;
// Re-arm in case we got disarmed because of pending set_timer suspension
(void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, release);
+ _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, ds->ds_dkev);
dispatch_resume(ds);
// Must happen after resume to avoid getting disarmed due to suspension
_dispatch_timers_update(ds);
#define DISPATCH_KEVENT_TIMEOUT_INIT(qos, note) \
DISPATCH_KEVENT_TIMEOUT_INITIALIZER(DISPATCH_TIMER_QOS_##qos, note)
-struct kevent64_s _dispatch_kevent_timeout[] = {
+_dispatch_kevent_qos_s _dispatch_kevent_timeout[] = {
DISPATCH_KEVENT_TIMEOUT_INIT(NORMAL, 0),
DISPATCH_KEVENT_TIMEOUT_INIT(CRITICAL, NOTE_CRITICAL),
DISPATCH_KEVENT_TIMEOUT_INIT(BACKGROUND, NOTE_BACKGROUND),
ds->ds_pending_data) {
tidx = DISPATCH_TIMER_INDEX_DISARM;
(void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed);
+ _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds,
+ ds->ds_dkev);
} else {
tidx = _dispatch_source_timer_idx(dr);
}
ds->ds_is_installed = true;
if (tidx != DISPATCH_TIMER_INDEX_DISARM) {
(void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed);
+ _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds,
+ ds->ds_dkev);
}
_dispatch_object_debug(ds, "%s", __func__);
ds->ds_dkev = NULL;
}
static bool
-_dispatch_timers_program2(uint64_t nows[], struct kevent64_s *ke,
+_dispatch_timers_program2(uint64_t nows[], _dispatch_kevent_qos_s *ke,
unsigned int qos)
{
unsigned int tidx;
}
static void
-_dispatch_timers_kevent(struct kevent64_s *ke)
+_dispatch_timers_kevent(_dispatch_kevent_qos_s *ke)
{
+ _dispatch_kevent_debug(ke, __func__);
dispatch_assert(ke->data > 0);
dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) ==
DISPATCH_KEVENT_TIMEOUT_IDENT_MASK);
static int _dispatch_kq;
+#if DISPATCH_USE_SELECT_FALLBACK
+
static unsigned int _dispatch_select_workaround;
static fd_set _dispatch_rfds;
static fd_set _dispatch_wfds;
DISPATCH_NOINLINE
static bool
-_dispatch_select_register(struct kevent64_s *kev)
+_dispatch_select_register(const _dispatch_kevent_qos_s *kev)
{
-
// Must execute on manager queue
DISPATCH_ASSERT_ON_MANAGER_QUEUE();
_dispatch_debug("select workaround used to read fd %d: 0x%lx",
(int)kev->ident, (long)kev->data);
}
+ return true;
}
- return true;
+ break;
case EVFILT_WRITE:
if ((kev->data == EINVAL || kev->data == ENOENT) &&
dispatch_assume(kev->ident < FD_SETSIZE)) {
_dispatch_debug("select workaround used to write fd %d: 0x%lx",
(int)kev->ident, (long)kev->data);
}
+ return true;
}
- return true;
+ break;
}
return false;
}
DISPATCH_NOINLINE
static bool
-_dispatch_select_unregister(const struct kevent64_s *kev)
+_dispatch_select_unregister(const _dispatch_kevent_qos_s *kev)
{
// Must execute on manager queue
DISPATCH_ASSERT_ON_MANAGER_QUEUE();
{
static const struct timeval timeout_immediately = { 0, 0 };
fd_set tmp_rfds, tmp_wfds;
- struct kevent64_s kev;
int err, i, r;
bool kevent_avail = false;
continue;
}
FD_CLR(i, &_dispatch_rfds); // emulate EV_DISPATCH
- EV_SET64(&kev, i, EVFILT_READ,
- EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1,
- _dispatch_rfd_ptrs[i], 0, 0);
+ _dispatch_kevent_qos_s kev = {
+ .ident = (uint64_t)i,
+ .filter = EVFILT_READ,
+ .flags = EV_ADD|EV_ENABLE|EV_DISPATCH,
+ .data = 1,
+ .udata = _dispatch_rfd_ptrs[i],
+ };
_dispatch_kevent_drain(&kev);
}
if (FD_ISSET(i, &tmp_wfds)) {
FD_CLR(i, &_dispatch_wfds); // emulate EV_DISPATCH
- EV_SET64(&kev, i, EVFILT_WRITE,
- EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1,
- _dispatch_wfd_ptrs[i], 0, 0);
+ _dispatch_kevent_qos_s kev = {
+ .ident = (uint64_t)i,
+ .filter = EVFILT_WRITE,
+ .flags = EV_ADD|EV_ENABLE|EV_DISPATCH,
+ .data = 1,
+ .udata = _dispatch_wfd_ptrs[i],
+ };
_dispatch_kevent_drain(&kev);
}
}
return kevent_avail;
}
+#endif // DISPATCH_USE_SELECT_FALLBACK
+
#pragma mark -
#pragma mark dispatch_kqueue
static void
_dispatch_kq_init(void *context DISPATCH_UNUSED)
{
- static const struct kevent64_s kev = {
+ static const _dispatch_kevent_qos_s kev = {
.ident = 1,
.filter = EVFILT_USER,
.flags = EV_ADD|EV_CLEAR,
DISPATCH_CRASH("kqueue() failure");
break;
}
- } else if (dispatch_assume(_dispatch_kq < FD_SETSIZE)) {
+ }
+#if DISPATCH_USE_SELECT_FALLBACK
+ else if (dispatch_assume(_dispatch_kq < FD_SETSIZE)) {
// in case we fall back to select()
FD_SET(_dispatch_kq, &_dispatch_rfds);
}
+#endif // DISPATCH_USE_SELECT_FALLBACK
- (void)dispatch_assume_zero(kevent64(_dispatch_kq, &kev, 1, NULL, 0, 0,
- NULL));
+ (void)dispatch_assume_zero(kevent_qos(_dispatch_kq, &kev, 1, NULL, 0, NULL,
+ NULL, 0));
_dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0);
}
DISPATCH_NOINLINE
static long
-_dispatch_kq_update(const struct kevent64_s *kev)
+_dispatch_kq_update(const _dispatch_kevent_qos_s *kev)
{
int r;
- struct kevent64_s kev_copy;
+ _dispatch_kevent_qos_s kev_error;
+#if DISPATCH_USE_SELECT_FALLBACK
if (slowpath(_dispatch_select_workaround) && (kev->flags & EV_DELETE)) {
if (_dispatch_select_unregister(kev)) {
return 0;
}
}
- kev_copy = *kev;
- // This ensures we don't get a pending kevent back while registering
- // a new kevent
- kev_copy.flags |= EV_RECEIPT;
+#endif // DISPATCH_USE_SELECT_FALLBACK
+ if (kev->filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) {
+ _dispatch_kevent_debug(kev, __func__);
+ }
retry:
- r = dispatch_assume(kevent64(_dispatch_get_kq(), &kev_copy, 1,
- &kev_copy, 1, 0, NULL));
+ r = kevent_qos(_dispatch_get_kq(), kev, 1, &kev_error,
+ 1, NULL, NULL, KEVENT_FLAG_ERROR_EVENTS);
if (slowpath(r == -1)) {
int err = errno;
switch (err) {
}
return err;
}
- switch (kev_copy.data) {
- case 0:
+ if (r == 0) {
return 0;
- case EBADF:
- case EPERM:
- case EINVAL:
+ }
+ if (kev_error.flags & EV_ERROR && kev_error.data) {
+ _dispatch_kevent_debug(&kev_error, __func__);
+ }
+ r = (int)kev_error.data;
+ switch (r) {
+ case 0:
+ _dispatch_kevent_mgr_debug(&kev_error, __func__);
+ break;
+ case EINPROGRESS:
+ // deferred EV_DELETE
+ break;
case ENOENT:
+ if ((kev->flags & EV_DELETE) && (kev->flags & EV_UDATA_SPECIFIC)) {
+ // potential concurrent EV_DELETE delivery
+ break;
+ }
+ // fall through
+ case EINVAL:
if ((kev->flags & (EV_ADD|EV_ENABLE)) && !(kev->flags & EV_DELETE)) {
- if (_dispatch_select_register(&kev_copy)) {
- return 0;
+#if DISPATCH_USE_SELECT_FALLBACK
+ if (_dispatch_select_register(&kev_error)) {
+ r = 0;
+ break;
+ }
+#elif DISPATCH_DEBUG
+ if (kev->filter == EVFILT_READ || kev->filter == EVFILT_WRITE) {
+ DISPATCH_CRASH("Unsupported fd for EVFILT_READ or EVFILT_WRITE "
+ "kevent");
}
+#endif // DISPATCH_USE_SELECT_FALLBACK
}
// fall through
+ case EBADF:
+ case EPERM:
default:
- kev_copy.flags |= kev->flags;
- _dispatch_kevent_drain(&kev_copy);
+ kev_error.flags |= kev->flags;
+ _dispatch_kevent_drain(&kev_error);
+ r = (int)kev_error.data;
break;
}
- return (long)kev_copy.data;
+ return r;
}
#pragma mark -
#pragma mark dispatch_mgr
-static struct kevent64_s *_dispatch_kevent_enable;
+static _dispatch_kevent_qos_s *_dispatch_kevent_enable;
static void inline
-_dispatch_mgr_kevent_reenable(struct kevent64_s *ke)
+_dispatch_mgr_kevent_reenable(_dispatch_kevent_qos_s *ke)
{
dispatch_assert(!_dispatch_kevent_enable || _dispatch_kevent_enable == ke);
_dispatch_kevent_enable = ke;
return false;
}
- static const struct kevent64_s kev = {
+ static const _dispatch_kevent_qos_s kev = {
.ident = 1,
.filter = EVFILT_USER,
.fflags = NOTE_TRIGGER,
static void
_dispatch_mgr_invoke(void)
{
- static const struct timespec timeout_immediately = { 0, 0 };
- struct kevent64_s kev;
+ _dispatch_kevent_qos_s kev;
bool poll;
int r;
for (;;) {
_dispatch_mgr_queue_drain();
poll = _dispatch_mgr_timers();
+#if DISPATCH_USE_SELECT_FALLBACK
if (slowpath(_dispatch_select_workaround)) {
poll = _dispatch_mgr_select(poll);
if (!poll) continue;
}
+#endif // DISPATCH_USE_SELECT_FALLBACK
poll = poll || _dispatch_queue_class_probe(&_dispatch_mgr_q);
- r = kevent64(_dispatch_kq, _dispatch_kevent_enable,
- _dispatch_kevent_enable ? 1 : 0, &kev, 1, 0,
- poll ? &timeout_immediately : NULL);
+ r = kevent_qos(_dispatch_kq, _dispatch_kevent_enable,
+ _dispatch_kevent_enable ? 1 : 0, &kev, 1, NULL, NULL,
+ poll ? KEVENT_FLAG_IMMEDIATE : KEVENT_FLAG_NONE);
_dispatch_kevent_enable = NULL;
if (slowpath(r == -1)) {
int err = errno;
DISPATCH_NORETURN
void
-_dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED)
+_dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED,
+ dispatch_object_t dou DISPATCH_UNUSED,
+ dispatch_invoke_flags_t flags DISPATCH_UNUSED)
{
_dispatch_mgr_init();
// never returns, so burn bridges behind us & clear stack 2k ahead
#define DISPATCH_MACH_KEVENT_ARMED(dk) ((dk)->dk_kevent.ext[0])
-static void _dispatch_kevent_machport_drain(struct kevent64_s *ke);
-static void _dispatch_kevent_mach_msg_drain(struct kevent64_s *ke);
+static void _dispatch_kevent_machport_drain(_dispatch_kevent_qos_s *ke);
+static void _dispatch_kevent_mach_msg_drain(_dispatch_kevent_qos_s *ke);
static void _dispatch_kevent_mach_msg_recv(mach_msg_header_t *hdr);
static void _dispatch_kevent_mach_msg_destroy(mach_msg_header_t *hdr);
static void _dispatch_source_merge_mach_msg(dispatch_source_t ds,
dispatch_mach_reply_refs_t dmr, mach_msg_header_t *hdr,
mach_msg_size_t siz);
static void _dispatch_mach_merge_kevent(dispatch_mach_t dm,
- const struct kevent64_s *ke);
+ const _dispatch_kevent_qos_s *ke);
static inline mach_msg_option_t _dispatch_mach_checkin_options(void);
static const size_t _dispatch_mach_recv_msg_size =
static mach_msg_size_t _dispatch_mach_recv_msg_buf_size;
static mach_port_t _dispatch_mach_portset, _dispatch_mach_recv_portset;
static mach_port_t _dispatch_mach_notify_port;
-static struct kevent64_s _dispatch_mach_recv_kevent = {
+static _dispatch_kevent_qos_s _dispatch_mach_recv_kevent = {
.filter = EVFILT_MACHPORT,
.flags = EV_ADD|EV_ENABLE|EV_DISPATCH,
.fflags = DISPATCH_MACH_RCV_OPTIONS,
static void
_dispatch_mach_portset_init(void *context DISPATCH_UNUSED)
{
- struct kevent64_s kev = {
+ _dispatch_kevent_qos_s kev = {
.filter = EVFILT_MACHPORT,
.flags = EV_ADD,
};
}
static void
-_dispatch_kevent_mach_recv_reenable(struct kevent64_s *ke DISPATCH_UNUSED)
+_dispatch_kevent_mach_recv_reenable(_dispatch_kevent_qos_s *ke DISPATCH_UNUSED)
{
#if (TARGET_IPHONE_SIMULATOR && \
IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) || \
(!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090)
// delete and re-add kevent to workaround <rdar://problem/13924256>
if (ke->ext[1] != _dispatch_mach_recv_kevent.ext[1]) {
- struct kevent64_s kev = _dispatch_mach_recv_kevent;
+ _dispatch_kevent_qos_s kev = _dispatch_mach_recv_kevent;
kev.flags = EV_DELETE;
_dispatch_kq_update(&kev);
}
}
static inline void
-_dispatch_kevent_mach_portset(struct kevent64_s *ke)
+_dispatch_kevent_mach_portset(_dispatch_kevent_qos_s *ke)
{
if (ke->ident == _dispatch_mach_recv_portset) {
return _dispatch_kevent_mach_msg_drain(ke);
DISPATCH_NOINLINE
static void
-_dispatch_kevent_machport_drain(struct kevent64_s *ke)
+_dispatch_kevent_machport_drain(_dispatch_kevent_qos_s *ke)
{
+ _dispatch_kevent_debug(ke, __func__);
mach_port_t name = (mach_port_name_t)ke->data;
dispatch_kevent_t dk;
- struct kevent64_s kev;
_dispatch_debug_machport(name);
dk = _dispatch_kevent_find(name, EVFILT_MACHPORT);
}
_dispatch_mach_portset_update(dk, MACH_PORT_NULL); // emulate EV_DISPATCH
- EV_SET64(&kev, name, EVFILT_MACHPORT, EV_ADD|EV_ENABLE|EV_DISPATCH,
- DISPATCH_MACH_RECV_MESSAGE, 0, (uintptr_t)dk, 0, 0);
+ _dispatch_kevent_qos_s kev = {
+ .ident = name,
+ .filter = EVFILT_MACHPORT,
+ .flags = EV_ADD|EV_ENABLE|EV_DISPATCH,
+ .fflags = DISPATCH_MACH_RECV_MESSAGE,
+ .udata = (uintptr_t)dk,
+ };
_dispatch_kevent_debug(&kev, __func__);
_dispatch_kevent_merge(&kev);
}
DISPATCH_NOINLINE
static void
-_dispatch_kevent_mach_msg_drain(struct kevent64_s *ke)
+_dispatch_kevent_mach_msg_drain(_dispatch_kevent_qos_s *ke)
{
+ _dispatch_kevent_debug(ke, __func__);
mach_msg_header_t *hdr = (mach_msg_header_t*)ke->ext[0];
mach_msg_size_t siz, msgsiz;
mach_msg_return_t kr = (mach_msg_return_t)ke->fflags;
return _dispatch_mach_msg_recv((dispatch_mach_t)ds, dmr, hdr, siz);
}
-DISPATCH_ALWAYS_INLINE
-static inline void
+DISPATCH_NOINLINE
+static void
_dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, bool final)
{
dispatch_source_refs_t dri, dr_next;
dispatch_kevent_t dk;
- struct kevent64_s kev;
bool unreg;
dk = _dispatch_kevent_find(name, DISPATCH_EVFILT_MACH_NOTIFICATION);
// Update notification registration state.
dk->dk_kevent.data &= ~_DISPATCH_MACH_SP_FLAGS;
- EV_SET64(&kev, name, DISPATCH_EVFILT_MACH_NOTIFICATION, EV_ADD|EV_ENABLE,
- flag, 0, (uintptr_t)dk, 0, 0);
+ _dispatch_kevent_qos_s kev = {
+ .ident = name,
+ .filter = DISPATCH_EVFILT_MACH_NOTIFICATION,
+ .flags = EV_ADD|EV_ENABLE,
+ .fflags = flag,
+ .udata = (uintptr_t)dk,
+ };
if (final) {
// This can never happen again
unreg = true;
}
dispatch_kevent_t dk = dmr->dmr_dkev;
TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dmr, dr_list);
- _dispatch_kevent_unregister(dk, DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE);
+ _dispatch_kevent_unregister(dk, DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE, 0);
TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list);
if (dmr->dmr_voucher) _voucher_release(dmr->dmr_voucher);
free(dmr);
dm->ds_pending_data_mask &= ~(unsigned long)
(DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD);
_dispatch_kevent_unregister(dk,
- DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD);
+ DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD, 0);
}
DISPATCH_NOINLINE
_dispatch_mach_send_drain(dm);
}
-DISPATCH_NOINLINE
static void
-_dispatch_mach_merge_kevent(dispatch_mach_t dm, const struct kevent64_s *ke)
+_dispatch_mach_merge_kevent(dispatch_mach_t dm,
+ const _dispatch_kevent_qos_s *ke)
{
if (!(ke->fflags & dm->ds_pending_data_mask)) {
return;
return options;
}
+DISPATCH_ALWAYS_INLINE
+static inline pthread_priority_t
+_dispatch_mach_priority_propagate(mach_msg_option_t options)
+{
+#if DISPATCH_USE_NOIMPORTANCE_QOS
+ if (options & MACH_SEND_NOIMPORTANCE) return 0;
+#else
+ (void)options;
+#endif
+ return _dispatch_priority_propagate();
+}
+
DISPATCH_NOINLINE
void
dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg,
}
dispatch_retain(dmsg);
dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK);
+ pthread_priority_t priority = _dispatch_mach_priority_propagate(options);
options |= _dispatch_mach_send_options();
_dispatch_mach_msg_set_options(dmsg, options & ~DISPATCH_MACH_OPTIONS_MASK);
mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg);
MACH_PORT_NULL);
bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) ==
MACH_MSG_TYPE_MOVE_SEND_ONCE);
- dmsg->dmsg_priority = _dispatch_priority_propagate();
+ dmsg->dmsg_priority = priority;
dmsg->dmsg_voucher = _voucher_copy();
_dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg);
if ((!is_reply && slowpath(dr->dm_tail)) ||
DISPATCH_NOINLINE
void
-_dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg)
+_dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg,
+ dispatch_object_t dou DISPATCH_UNUSED,
+ dispatch_invoke_flags_t flags DISPATCH_UNUSED)
{
dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current();
dispatch_mach_refs_t dr = dm->ds_refs;
DISPATCH_NOINLINE
void
-_dispatch_mach_invoke(dispatch_mach_t dm)
+_dispatch_mach_invoke(dispatch_mach_t dm, dispatch_object_t dou,
+ dispatch_invoke_flags_t flags)
{
- _dispatch_queue_class_invoke(dm, _dispatch_mach_invoke2);
+ _dispatch_queue_class_invoke(dm, dou._dc, flags, _dispatch_mach_invoke2);
}
unsigned long
_evfilt2(EVFILT_PROC);
_evfilt2(EVFILT_SIGNAL);
_evfilt2(EVFILT_TIMER);
-#ifdef EVFILT_VM
- _evfilt2(EVFILT_VM);
-#endif
-#ifdef EVFILT_MEMORYSTATUS
- _evfilt2(EVFILT_MEMORYSTATUS);
-#endif
#if HAVE_MACH
_evfilt2(EVFILT_MACHPORT);
_evfilt2(DISPATCH_EVFILT_MACH_NOTIFICATION);
#endif
_evfilt2(EVFILT_FS);
_evfilt2(EVFILT_USER);
+#ifdef EVFILT_VM
+ _evfilt2(EVFILT_VM);
+#endif
+#ifdef EVFILT_SOCK
+ _evfilt2(EVFILT_SOCK);
+#endif
+#ifdef EVFILT_MEMORYSTATUS
+ _evfilt2(EVFILT_MEMORYSTATUS);
+#endif
_evfilt2(DISPATCH_EVFILT_TIMER);
_evfilt2(DISPATCH_EVFILT_CUSTOM_ADD);
}
}
+#if DISPATCH_DEBUG
+static const char *
+_evflagstr2(uint16_t *flagsp)
+{
+#define _evflag2(f) \
+ if ((*flagsp & (f)) == (f) && (f)) { \
+ *flagsp &= ~(f); \
+ return #f "|"; \
+ }
+ _evflag2(EV_ADD);
+ _evflag2(EV_DELETE);
+ _evflag2(EV_ENABLE);
+ _evflag2(EV_DISABLE);
+ _evflag2(EV_ONESHOT);
+ _evflag2(EV_CLEAR);
+ _evflag2(EV_RECEIPT);
+ _evflag2(EV_DISPATCH);
+ _evflag2(EV_UDATA_SPECIFIC);
+ _evflag2(EV_POLL);
+ _evflag2(EV_OOBAND);
+ _evflag2(EV_ERROR);
+ _evflag2(EV_EOF);
+ *flagsp = 0;
+ return "EV_UNKNOWN ";
+}
+
+DISPATCH_NOINLINE
+static const char *
+_evflagstr(uint16_t flags, char *str, size_t strsize)
+{
+ str[0] = 0;
+ while (flags) {
+ strlcat(str, _evflagstr2(&flags), strsize);
+ }
+ size_t sz = strlen(str);
+ if (sz) str[sz-1] = 0;
+ return str;
+}
+#endif
+
static size_t
_dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz)
{
dispatch_queue_t target = ds->do_targetq;
return dsnprintf(buf, bufsiz, "target = %s[%p], ident = 0x%lx, "
- "pending_data = 0x%lx, pending_data_mask = 0x%lx, ",
+ "mask = 0x%lx, pending_data = 0x%lx, registered = %d, "
+ "armed = %d, deleted = %d%s%s, canceled = %d, needs_mgr = %d, ",
target && target->dq_label ? target->dq_label : "", target,
- ds->ds_ident_hack, ds->ds_pending_data, ds->ds_pending_data_mask);
+ ds->ds_ident_hack, ds->ds_pending_data_mask, ds->ds_pending_data,
+ ds->ds_is_installed, (bool)(ds->ds_atomic_flags & DSF_ARMED),
+ (bool)(ds->ds_atomic_flags & DSF_DELETED), ds->ds_pending_delete ?
+ " (pending)" : "", (ds->ds_atomic_flags & DSF_ONESHOT) ?
+ " (oneshot)" : "", (bool)(ds->ds_atomic_flags & DSF_CANCELED),
+ ds->ds_needs_mgr);
}
static size_t
if (ds->ds_is_timer) {
offset += _dispatch_timer_debug_attr(ds, &buf[offset], bufsiz - offset);
}
- offset += dsnprintf(&buf[offset], bufsiz - offset, "filter = %s }",
- ds->ds_dkev ? _evfiltstr(ds->ds_dkev->dk_kevent.filter) : "????");
+ offset += dsnprintf(&buf[offset], bufsiz - offset, "kevent = %p%s, "
+ "filter = %s }", ds->ds_dkev, ds->ds_is_direct_kevent ? " (direct)"
+ : "", ds->ds_dkev ? _evfiltstr(ds->ds_dkev->dk_kevent.filter) :
+ "????");
return offset;
}
}
#if DISPATCH_DEBUG
+DISPATCH_NOINLINE
static void
-_dispatch_kevent_debug(struct kevent64_s* kev, const char* str)
-{
- _dispatch_log("kevent[%p] = { ident = 0x%llx, filter = %s, flags = 0x%x, "
- "fflags = 0x%x, data = 0x%llx, udata = 0x%llx, ext[0] = 0x%llx, "
- "ext[1] = 0x%llx }: %s", kev, kev->ident, _evfiltstr(kev->filter),
- kev->flags, kev->fflags, kev->data, kev->udata, kev->ext[0],
- kev->ext[1], str);
+_dispatch_kevent_debug(const _dispatch_kevent_qos_s* kev, const char* str)
+{
+ char flagstr[256];
+ _dispatch_debug("kevent[%p] = { ident = 0x%llx, filter = %s, "
+ "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, "
+ "ext[0] = 0x%llx, ext[1] = 0x%llx }: %s", kev, kev->ident,
+ _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr,
+ sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata,
+ kev->ext[0], kev->ext[1], str);
}
static void
struct dispatch_kevent_s {
TAILQ_ENTRY(dispatch_kevent_s) dk_list;
TAILQ_HEAD(, dispatch_source_refs_s) dk_sources;
- struct kevent64_s dk_kevent;
+ _dispatch_kevent_qos_s dk_kevent;
};
typedef struct dispatch_kevent_s *dispatch_kevent_t;
struct dispatch_source_type_s {
- struct kevent64_s ke;
+ _dispatch_kevent_qos_s ke;
uint64_t mask;
void (*init)(dispatch_source_t ds, dispatch_source_type_t type,
uintptr_t handle, unsigned long mask, dispatch_queue_t q);
// ds_atomic_flags bits
#define DSF_CANCELED 1u // cancellation has been requested
#define DSF_ARMED 2u // source is armed
+#define DSF_DELETED 4u // source received EV_DELETE event
+#define DSF_ONESHOT 8u // source received EV_ONESHOT event
#define DISPATCH_SOURCE_HEADER(refs) \
dispatch_kevent_t ds_dkev; \
ds_is_level:1, \
ds_is_adder:1, \
ds_is_installed:1, \
+ ds_is_direct_kevent:1, \
ds_needs_rearm:1, \
+ ds_pending_delete:1, \
+ ds_needs_mgr:1, \
ds_is_timer:1, \
ds_vmpressure_override:1, \
ds_memorystatus_override:1, \
void _dispatch_source_xref_dispose(dispatch_source_t ds);
void _dispatch_source_dispose(dispatch_source_t ds);
-void _dispatch_source_invoke(dispatch_source_t ds);
+void _dispatch_source_invoke(dispatch_source_t ds, dispatch_object_t dou,
+ dispatch_invoke_flags_t flags);
unsigned long _dispatch_source_probe(dispatch_source_t ds);
size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz);
void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval);
void *ctxt, dispatch_function_t handler);
void _dispatch_mach_dispose(dispatch_mach_t dm);
-void _dispatch_mach_invoke(dispatch_mach_t dm);
+void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_object_t dou,
+ dispatch_invoke_flags_t flags);
unsigned long _dispatch_mach_probe(dispatch_mach_t dm);
size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz);
void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg);
-void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg);
+void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, dispatch_object_t dou,
+ dispatch_invoke_flags_t flags);
size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz);
void _dispatch_mach_barrier_invoke(void *ctxt);
unsigned long _dispatch_mgr_wakeup(dispatch_queue_t dq);
-void _dispatch_mgr_thread(dispatch_queue_t dq);
+void _dispatch_mgr_thread(dispatch_queue_t dq, dispatch_object_t dou,
+ dispatch_invoke_flags_t flags);
#endif /* __DISPATCH_SOURCE_INTERNAL__ */
#ifndef __DISPATCH_TRACE__
#define __DISPATCH_TRACE__
-#if !__OBJC2__
+#if !__OBJC2__ && !defined(__cplusplus)
#if DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION
typedef struct dispatch_trace_timer_params_s {
_DISPATCH_SOURCE_TYPE && (_dq) != &_dispatch_mgr_q) { \
dispatch_source_t _ds = (dispatch_source_t)_do; \
_dc = _ds->ds_refs->ds_handler[DS_EVENT_HANDLER]; \
- _func = _dc->dc_func; \
- _ctxt = _dc->dc_ctxt; \
+ _func = _dc ? _dc->dc_func : NULL; \
+ _ctxt = _dc ? _dc->dc_ctxt : NULL; \
} else { \
_func = (dispatch_function_t)_dispatch_queue_invoke; \
_ctxt = _do->do_ctxt; \
#endif // DISPATCH_USE_DTRACE
-#endif // !__OBJC2__
+#endif // !__OBJC2__ && !defined(__cplusplus)
#endif // __DISPATCH_TRACE__
#define VOUCHER_ATM_COLLECT_THRESHOLD 1
#endif
#define VATM_COLLECT_THRESHOLD_VALUE(t) (((t) - 1) * 2)
-static volatile long _voucher_atm_collect_level;
-static long _voucher_atm_collect_threshold =
- VATM_COLLECT_THRESHOLD_VALUE(VOUCHER_ATM_COLLECT_THRESHOLD);
-static unsigned long _voucher_atm_subid_bits;
+static uint64_t volatile _voucher_atm_generation;
typedef struct _voucher_atm_s *_voucher_atm_t;
static void _voucher_activity_atfork_child(void);
-static inline mach_voucher_t _voucher_get_atm_mach_voucher(voucher_t voucher);
-static inline mach_voucher_t _voucher_activity_get_atm_mach_voucher(
- _voucher_activity_t act);
-static inline _voucher_activity_t _voucher_activity_get(voucher_t voucher);
static _voucher_activity_t _voucher_activity_copy_from_mach_voucher(
mach_voucher_t kv, voucher_activity_id_t va_id);
static inline _voucher_activity_t _voucher_activity_retain(
_voucher_activity_t act);
static inline void _voucher_activity_release(_voucher_activity_t act);
+static void _voucher_activity_remove(_voucher_activity_t act);
+static inline _voucher_atm_t _voucher_atm_retain(_voucher_atm_t vatm);
+static inline void _voucher_atm_release(_voucher_atm_t vatm);
#pragma mark -
#pragma mark voucher_t
return _voucher_copy_without_importance();
}
+voucher_t
+voucher_retain(voucher_t voucher)
+{
+ return _voucher_retain(voucher);
+}
+
+void
+voucher_release(voucher_t voucher)
+{
+ return _voucher_release(voucher);
+}
+
void
_voucher_thread_cleanup(void *voucher)
{
DISPATCH_CACHELINE_ALIGN
static TAILQ_HEAD(, voucher_s) _vouchers[VL_HASH_SIZE];
-#define _vouchers(kv) (&_vouchers[VL_HASH((kv))])
+#define _vouchers_head(kv) (&_vouchers[VL_HASH((kv))])
static os_lock_handoff_s _vouchers_lock = OS_LOCK_HANDOFF_INIT;
#define _vouchers_lock_lock() os_lock_lock(&_vouchers_lock)
#define _vouchers_lock_unlock() os_lock_unlock(&_vouchers_lock)
voucher_t v;
if (!kv) return NULL;
_vouchers_lock_lock();
- TAILQ_FOREACH(v, _vouchers(kv), v_list) {
+ TAILQ_FOREACH(v, _vouchers_head(kv), v_list) {
if (v->v_ipc_kvoucher == kv) {
int xref_cnt = dispatch_atomic_inc2o(v, os_obj_xref_cnt, relaxed);
_dispatch_voucher_debug("retain -> %d", v, xref_cnt + 1);
_dispatch_voucher_debug("corruption", v);
DISPATCH_CRASH("Voucher corruption");
}
- TAILQ_INSERT_TAIL(_vouchers(kv), v, v_list);
+ TAILQ_INSERT_TAIL(_vouchers_head(kv), v, v_list);
_vouchers_lock_unlock();
}
// check for resurrection race with _voucher_find_and_retain
if (dispatch_atomic_load2o(v, os_obj_xref_cnt, seq_cst) < 0 &&
_TAILQ_IS_ENQUEUED(v, v_list)) {
- TAILQ_REMOVE(_vouchers(kv), v, v_list);
+ TAILQ_REMOVE(_vouchers_head(kv), v, v_list);
_TAILQ_MARK_NOT_ENQUEUED(v, v_list);
v->v_list.tqe_next = (void*)~0ull;
}
#endif
}
+static inline _voucher_atm_t
+_voucher_get_atm(voucher_t voucher)
+{
+ _voucher_atm_t vatm;
+ vatm = voucher && voucher->v_atm ? voucher->v_atm : _voucher_task_atm;
+ return vatm;
+}
+
static inline mach_voucher_t
_voucher_get_atm_mach_voucher(voucher_t voucher)
{
- _voucher_activity_t act = _voucher_activity_get(voucher);
- return _voucher_activity_get_atm_mach_voucher(act);
+ _voucher_atm_t vatm = _voucher_get_atm(voucher);
+ mach_voucher_t kv = vatm ? vatm->vatm_kvoucher : MACH_VOUCHER_NULL;
+ return kv;
}
mach_voucher_t
}
voucher_activity_id_t va_id = 0, va_base_id = 0;
_voucher_activity_t act = NULL;
+ _voucher_atm_t vatm = NULL;
if (activities) {
va_id = *(voucher_activity_id_t*)content;
act = _voucher_activity_copy_from_mach_voucher(rkv, va_id);
activities++;
va_base_id = act->va_id;
}
+ if (act) {
+ vatm = _voucher_atm_retain(act->va_atm);
+ }
}
v = _voucher_alloc(activities, priority, 0);
+ v->v_atm = vatm;
v->v_activity = act;
voucher_activity_id_t *activity_ids = _voucher_activity_ids(v);
if (activities && va_base_id) {
if (activities) {
if (ov->v_activity) {
v->v_activity = _voucher_activity_retain(ov->v_activity);
+ v->v_atm = _voucher_atm_retain(ov->v_atm);
}
memcpy(_voucher_activity_ids(v), _voucher_activity_ids(ov),
activities * sizeof(voucher_activity_id_t));
if (activities) {
if (ov->v_activity) {
v->v_activity = _voucher_activity_retain(ov->v_activity);
+ v->v_atm = _voucher_atm_retain(ov->v_atm);
}
memcpy(_voucher_activity_ids(v), _voucher_activity_ids(ov),
activities * sizeof(voucher_activity_id_t));
return v;
}
+voucher_t
+_voucher_create_accounting_voucher(voucher_t ov)
+{
+ // Nothing to do unless the old voucher has a kernel voucher. If it does
+ // doesn't, it can't have any accounting attributes.
+ if (!ov || !ov->v_kvoucher) return NULL;
+ kern_return_t kr = KERN_SUCCESS;
+ mach_voucher_t okv, kv = MACH_VOUCHER_NULL;
+ okv = ov->v_ipc_kvoucher ? ov->v_ipc_kvoucher : ov->v_kvoucher;
+#if VOUCHER_USE_ATTR_BANK
+ const mach_voucher_attr_recipe_data_t accounting_copy_recipe = {
+ .key = MACH_VOUCHER_ATTR_KEY_BANK,
+ .command = MACH_VOUCHER_ATTR_COPY,
+ .previous_voucher = okv,
+ };
+ kr = _voucher_create_mach_voucher(&accounting_copy_recipe,
+ sizeof(accounting_copy_recipe), &kv);
+#endif
+ if (dispatch_assume_zero(kr) || !kv){
+ return NULL;
+ }
+ voucher_t v = _voucher_find_and_retain(kv);
+ if (v) {
+ _dispatch_voucher_debug("kvoucher[0x%08x] find accounting voucher "
+ "from voucher[%p]", v, kv, ov);
+ _voucher_dealloc_mach_voucher(kv);
+ return v;
+ }
+ v = _voucher_alloc(0, 0, 0);
+ v->v_ipc_kvoucher = v->v_kvoucher = kv;
+ if (kv == okv) {
+ v->v_kvbase = _voucher_retain(ov);
+ _voucher_dealloc_mach_voucher(kv); // borrow base reference
+ }
+ _voucher_insert(v);
+ _dispatch_voucher_debug("kvoucher[0x%08x] create accounting voucher "
+ "from voucher[%p]", v, kv, ov);
+ return v;
+}
+
voucher_t
voucher_create_with_mach_msg(mach_msg_header_t *msg)
{
_voucher_activity_release(voucher->v_activity);
voucher->v_activity = NULL;
}
- voucher->v_has_priority= 0;
+ if (voucher->v_atm) {
+ _voucher_atm_release(voucher->v_atm);
+ voucher->v_atm = NULL;
+ }
+ voucher->v_has_priority = 0;
voucher->v_activities = 0;
#if VOUCHER_ENABLE_RECIPE_OBJECTS
voucher->v_recipe_extra_size = 0;
_voucher_init(void)
{
_voucher_libkernel_init();
- char *e, *end;
+ char *e;
unsigned int i;
for (i = 0; i < VL_HASH_SIZE; i++) {
TAILQ_INIT(&_vouchers[i]);
_voucher_activity_mode = mode;
if (_voucher_activity_disabled()) return;
- e = getenv("LIBDISPATCH_ACTIVITY_ATM_SUBID_BITS");
- if (e) {
- unsigned long v = strtoul(e, &end, 0);
- if (v && !*end) {
- _voucher_atm_subid_bits = v;
- }
- }
- e = getenv("LIBDISPATCH_ACTIVITY_ATM_COLLECT_THRESHOLD");
- if (e) {
- unsigned long v = strtoul(e, &end, 0);
- if (v && v < LONG_MAX/2 && !*end) {
- _voucher_atm_collect_threshold =
- VATM_COLLECT_THRESHOLD_VALUE((long)v);
- }
- }
// default task activity
bool default_task_activity = DISPATCH_DEBUG;
e = getenv("LIBDISPATCH_DEFAULT_TASK_ACTIVITY");
_Static_assert(sizeof(struct _voucher_activity_buffer_header_s) <=
sizeof(struct _voucher_activity_tracepoint_s),
"Buffer header too large");
-_Static_assert(offsetof(struct _voucher_activity_s, va_flags2) ==
- sizeof(struct _voucher_activity_tracepoint_s),
- "Extended activity object misaligned");
#if __LP64__
-_Static_assert(sizeof(struct _voucher_activity_s) ==
- 3 * sizeof(struct _voucher_activity_tracepoint_s),
- "Activity object too large");
-_Static_assert(offsetof(struct _voucher_activity_s, va_flags3) ==
- 2 * sizeof(struct _voucher_activity_tracepoint_s),
- "Extended activity object misaligned");
-_Static_assert(offsetof(struct _voucher_atm_s, vatm_activities_lock) % 64 == 0,
- "Bad ATM padding");
+_Static_assert(offsetof(struct _voucher_activity_s, va_buffers_lock) % 64 == 0,
+ "Bad activity padding");
_Static_assert(sizeof(struct _voucher_atm_s) <= 128,
"ATM too large");
#else
-_Static_assert(sizeof(struct _voucher_activity_s) ==
- 2 * sizeof(struct _voucher_activity_tracepoint_s),
- "Activity object too large");
_Static_assert(sizeof(struct _voucher_atm_s) <= 64,
"ATM too large");
#endif
"Metadata too large");
_Static_assert(sizeof(_voucher_activity_bitmap_t) % 64 == 0,
"Bad metadata bitmap size");
-_Static_assert(offsetof(struct _voucher_activity_metadata_s,
- vam_atm_mbox_bitmap) % 64 == 0,
- "Bad metadata padding");
-_Static_assert(offsetof(struct _voucher_activity_metadata_s,
- vam_base_atm_subid) % 64 == 0,
- "Bad metadata padding");
-_Static_assert(offsetof(struct _voucher_activity_metadata_s, vam_base_atm_lock)
- % 32 == 0,
- "Bad metadata padding");
-_Static_assert(offsetof(struct _voucher_activity_metadata_s, vam_atms) % 64 ==0,
- "Bad metadata padding");
-_Static_assert(sizeof(_voucher_activity_bitmap_t) * 8 *
- sizeof(atm_mailbox_offset_t) <=
- sizeof(((_voucher_activity_metadata_t)NULL)->vam_kernel_metadata),
- "Bad kernel metadata bitmap");
-_Static_assert(sizeof(atm_mailbox_offset_t) == 2 * sizeof(atm_subaid32_t),
- "Bad kernel ATM mailbox sizes");
#endif
-static const size_t _voucher_atm_mailboxes =
- sizeof(((_voucher_activity_metadata_t)NULL)->vam_kernel_metadata) /
- sizeof(atm_mailbox_offset_t);
-
#define va_buffers_lock(va) (&(va)->va_buffers_lock)
-#define vatm_activities_lock(vatm) (&(vatm)->vatm_activities_lock)
#define vatm_activities(vatm) (&(vatm)->vatm_activities)
-#define vatm_used_activities(vatm) (&(vatm)->vatm_used_activities)
-#define vam_base_atm_lock() (&_voucher_activity_heap->vam_base_atm_lock)
-#define vam_nested_atm_lock() (&_voucher_activity_heap->vam_nested_atm_lock)
#define vam_atms_lock() (&_voucher_activity_heap->vam_atms_lock)
#define vam_activities_lock() (&_voucher_activity_heap->vam_activities_lock)
#define vam_atms(hash) (&_voucher_activity_heap->vam_atms[hash])
#define vam_activities(hash) (&_voucher_activity_heap->vam_activities[hash])
#define vam_buffer_bitmap() (_voucher_activity_heap->vam_buffer_bitmap)
-#define vam_atm_mbox_bitmap() (_voucher_activity_heap->vam_atm_mbox_bitmap)
#define vam_pressure_locked_bitmap() \
(_voucher_activity_heap->vam_pressure_locked_bitmap)
#define vam_buffer(i) ((void*)((char*)_voucher_activity_heap + \
voucher_activity_trace_id_t trace_id, uint64_t location,
_voucher_activity_buffer_header_t buffer);
static _voucher_atm_t _voucher_atm_create(mach_voucher_t kv, atm_aid_t atm_id);
-static voucher_activity_id_t _voucher_atm_nested_atm_id_make(void);
+static void _voucher_activity_firehose_wait(_voucher_activity_t act,
+ _voucher_activity_buffer_header_t buffer);
DISPATCH_ALWAYS_INLINE
static inline uint32_t
_voucher_default_activity_buffer_limit()
{
+#if 0 // FIXME: tune buffer chain sizes
switch (_voucher_activity_mode) {
case voucher_activity_mode_debug:
case voucher_activity_mode_stream:
// (twice as much as non-default activities)
return MAX(_voucher_activity_buffers_per_heap / 32, 3) - 1;
}
-#if TARGET_OS_EMBEDDED
- // Low-profile modes: Default activity can use a total of 3 buffers.
- return 2;
-#else
- // Low-profile modes: Default activity can use a total of 8 buffers.
- return 7;
#endif
+ // Low-profile modes: Default activity can use a total of 4 buffers.
+ return 3;
}
DISPATCH_ALWAYS_INLINE
static inline uint32_t
_voucher_activity_buffer_limit()
{
+#if 0 // FIXME: tune buffer chain sizes
switch (_voucher_activity_mode) {
case voucher_activity_mode_debug:
case voucher_activity_mode_stream:
// of the entire heap.
return MAX(_voucher_activity_buffers_per_heap / 64, 2) - 1;
}
-#if TARGET_OS_EMBEDDED
+#endif
// Low-profile modes: Each activity can use a total of 2 buffers.
return 1;
-#else
- // Low-profile modes: Each activity can use a total of 4 buffers.
- return 3;
-#endif
}
// The two functions above return the number of *additional* buffers activities
return index;
}
-DISPATCH_ALWAYS_INLINE
+DISPATCH_ALWAYS_INLINE DISPATCH_UNUSED
static inline size_t
_voucher_activity_bitmap_set_first_unset_bit(
_voucher_activity_bitmap_t volatile bitmap)
return _voucher_activity_bitmap_set_first_unset_bit_upto(bitmap, UINT_MAX);
}
-
DISPATCH_ALWAYS_INLINE
static inline void
_voucher_activity_bitmap_clear_bit(
task_trace_memory_info_data_t trace_memory_info = {
.user_memory_address = vm_addr,
.buffer_size = vm_size,
- .mailbox_array_size = sizeof(heap->vam_kernel_metadata),
};
kr = task_set_info(mach_task_self(), TASK_TRACE_MEMORY_INFO,
(task_info_t)&trace_memory_info, TASK_TRACE_MEMORY_INFO_COUNT);
return;
}
heap = (void*)vm_addr;
- heap->vam_self_metadata.vasm_baseaddr = (void*)vm_addr;
- heap->vam_buffer_bitmap[0] = 0xf; // first four buffers are reserved
+ heap->vasm_baseaddr = (void*)vm_addr;
+ heap->vam_buffer_bitmap[0] = 0x7; // first three buffers are reserved
uint32_t i;
for (i = 0; i < _voucher_activity_hash_size; i++) {
TAILQ_INIT(&heap->vam_activities[i]);
TAILQ_INIT(&heap->vam_atms[i]);
}
- uint32_t subid_max = VATM_SUBID_MAX;
- if (_voucher_atm_subid_bits &&
- _voucher_atm_subid_bits < VATM_SUBID_MAXBITS) {
- subid_max = MIN(VATM_SUBID_BITS2MAX(_voucher_atm_subid_bits),
- VATM_SUBID_MAX);
- }
- heap->vam_base_atm_subid_max = subid_max;
- _voucher_activity_lock_init(&heap->vam_base_atm_lock);
- _voucher_activity_lock_init(&heap->vam_nested_atm_lock);
_voucher_activity_lock_init(&heap->vam_atms_lock);
_voucher_activity_lock_init(&heap->vam_activities_lock);
_voucher_activity_heap = heap;
_voucher_atm_t vatm = _voucher_atm_create(0, 0);
dispatch_assert(vatm->vatm_kvoucher);
- heap->vam_default_activity_atm = vatm;
- _voucher_activity_buffer_header_t buffer = vam_buffer(3); // reserved index
+ _voucher_atm_retain(vatm);
+
+ _voucher_activity_buffer_header_t buffer = vam_buffer(2); // reserved index
// consumes vatm reference:
- _voucher_activity_t va = _voucher_activity_create_with_atm(vatm,
- VATM_ACTID(vatm, _voucher_default_activity_subid), 0, 0, buffer);
+ _voucher_activity_t va = _voucher_activity_create_with_atm(vatm, 0, 0, 0,
+ buffer);
dispatch_assert(va);
va->va_buffer_limit = _voucher_default_activity_buffer_limit();
_voucher_activity_default = va;
- heap->vam_base_atm = _voucher_atm_create(0, 0);
- heap->vam_nested_atm_id = _voucher_atm_nested_atm_id_make();
+ _voucher_task_atm = vatm;
}
static void
return _voucher_activity_heap->vam_client_metadata;
}
+static _voucher_activity_buffer_hook_t _voucher_activity_buffer_hook;
+
+void
+voucher_activity_buffer_hook_install_4libtrace(
+ _voucher_activity_buffer_hook_t hook)
+{
+ if (dispatch_atomic_cmpxchg(&_voucher_activity_buffer_hook, NULL,
+ (void*)hook, release)) return;
+ DISPATCH_CLIENT_CRASH("_voucher_activity_buffer_hook_install_4libtrace " \
+ "called more than once");
+}
+
+#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG
+#define VOUCHER_ACTIVITY_BUFFER_DEBUG(reason, buffer) \
+ _dispatch_debug("activity buffer %s (%p)", #reason, buffer)
+#else
+#define VOUCHER_ACTIVITY_BUFFER_DEBUG(reason, buffer)
+#endif
+
+#define VOUCHER_ACTIVITY_BUFFER_HOOK_CALLOUT(reason, buffer) \
+ if (buffer) { VOUCHER_ACTIVITY_BUFFER_DEBUG(reason, buffer); \
+ if (slowpath(_voucher_activity_buffer_hook)) { \
+ _voucher_activity_buffer_hook( \
+ _voucher_activity_buffer_hook_reason_##reason, (buffer)); \
+ } }
+
DISPATCH_ALWAYS_INLINE
static inline _voucher_activity_buffer_header_t
_voucher_activity_heap_buffer_alloc(void)
DISPATCH_ALWAYS_INLINE
static inline void
_voucher_activity_buffer_init(_voucher_activity_t act,
- _voucher_activity_buffer_header_t buffer, bool reuse)
+ _voucher_activity_buffer_header_t buffer, bool initial)
{
- if (!reuse) {
- buffer->vabh_flags = _voucher_activity_trace_flag_buffer_header;
- buffer->vabh_activity_id = act->va_id;
- }
- buffer->vabh_timestamp = _voucher_activity_timestamp();
- buffer->vabh_next_tracepoint_idx = 1;
- buffer->vabh_sequence_no = dispatch_atomic_inc2o(act, va_max_sequence_no,
- relaxed);
+ _voucher_activity_tracepoint_t vat = (_voucher_activity_tracepoint_t)buffer;
+ _voucher_activity_tracepoint_init_with_id(vat, act->va_trace_id,
+ act->va_location, !initial);
+ buffer->vabh_flags = _voucher_activity_trace_flag_buffer_header |
+ _voucher_activity_trace_flag_activity |
+ (initial ? _voucher_activity_trace_flag_start : 0);
+ buffer->vabh_activity_id = act->va_id;
+ buffer->vabh_pos.vabp_atomic_pos = 0;
+ buffer->vabh_pos.vabp_pos.vabp_next_tracepoint_idx = 1;
}
static _voucher_activity_buffer_header_t
_voucher_activity_buffer_header_t buffer;
_voucher_activity_lock_lock(va_buffers_lock(act)); // TODO: revisit locking
buffer = act->va_current_buffer;
- if (buffer != current) goto out;
+ if (buffer != current) {
+ _voucher_activity_lock_unlock(va_buffers_lock(act));
+ return buffer;
+ }
buffer = TAILQ_FIRST(&act->va_buffers);
- if (buffer) {
- _voucher_activity_buffer_init(act, buffer, true);
- if (buffer != TAILQ_LAST(&act->va_buffers,
+ if (buffer != TAILQ_LAST(&act->va_buffers,
_voucher_activity_buffer_list_s)) {
- TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list);
- TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list);
- }
+ TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list);
+ TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list);
}
- if (!dispatch_atomic_cmpxchgv2o(act, va_current_buffer, current, buffer,
+ _voucher_activity_lock_unlock(va_buffers_lock(act));
+ if (_voucher_activity_buffer_is_full(buffer)) {
+ _voucher_activity_firehose_wait(act, buffer);
+ }
+ if (dispatch_atomic_cmpxchgv2o(act, va_current_buffer, current, buffer,
¤t, release)) {
- if (buffer) {
- TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list);
- _voucher_activity_heap_buffer_free(buffer);
+ if (_voucher_activity_buffer_mark_full(current)) {
+ _voucher_activity_firehose_push(act, current);
}
+ _dispatch_voucher_activity_debug("buffer reuse %p", act, buffer);
+ } else {
buffer = current;
}
-out:
- _voucher_activity_lock_unlock(va_buffers_lock(act));
- _dispatch_voucher_activity_debug("buffer reuse %p", act, buffer);
return buffer;
}
_voucher_activity_buffer_header_t current)
{
_voucher_activity_buffer_header_t buffer = NULL;
- if (act->va_max_sequence_no < act->va_buffer_limit) {
+ if (act->va_buffer_count < act->va_buffer_limit) {
buffer = _voucher_activity_heap_buffer_alloc();
+ if (buffer && dispatch_atomic_inc2o(act, va_buffer_count, relaxed) >
+ act->va_buffer_limit) {
+ dispatch_atomic_dec2o(act, va_buffer_count, relaxed);
+ _voucher_activity_heap_buffer_free(buffer);
+ buffer = NULL;
+ }
}
if (!buffer) return _voucher_activity_buffer_alloc_slow(act, current);
_voucher_activity_buffer_init(act, buffer, false);
_voucher_activity_lock_lock(va_buffers_lock(act));
TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list);
_voucher_activity_lock_unlock(va_buffers_lock(act));
+ if (_voucher_activity_buffer_mark_full(current)) {
+ _voucher_activity_firehose_push(act, current);
+ }
+ _dispatch_voucher_activity_debug("buffer alloc %p", act, buffer);
} else {
+ dispatch_atomic_dec2o(act, va_buffer_count, relaxed);
_voucher_activity_heap_buffer_free(buffer);
buffer = current;
}
- _dispatch_voucher_activity_debug("buffer alloc %p", act, buffer);
return buffer;
}
} } while (0);
static void _voucher_activity_dispose(_voucher_activity_t act);
-static _voucher_activity_t _voucher_atm_activity_mark_used(
- _voucher_activity_t act);
-static void _voucher_atm_activity_mark_unused(_voucher_activity_t act);
static _voucher_atm_t _voucher_atm_copy(atm_aid_t atm_id);
static inline void _voucher_atm_release(_voucher_atm_t vatm);
-static void _voucher_atm_activity_insert(_voucher_atm_t vatm,
- _voucher_activity_t act);
-static void _voucher_atm_activity_remove(_voucher_activity_t act);
static atm_aid_t _voucher_mach_voucher_get_atm_id(mach_voucher_t kv);
DISPATCH_ALWAYS_INLINE
static inline bool
-_voucher_activity_copy(_voucher_activity_t act)
+_voucher_activity_try_retain(_voucher_activity_t act)
{
- int use_cnt = dispatch_atomic_inc2o(act, va_use_count, relaxed);
+ // not using _os_object_refcnt* because we don't need barriers:
+ // activities are immutable and are in a hash table with a lock
+ int use_cnt = dispatch_atomic_inc2o(act, va_refcnt, relaxed);
_dispatch_voucher_activity_debug("retain -> %d", act, use_cnt + 1);
if (slowpath(use_cnt < 0)) {
_dispatch_voucher_activity_debug("overrelease", act);
DISPATCH_CRASH("Activity overrelease");
}
- return (use_cnt == 0);
+ return use_cnt > 0;
}
DISPATCH_ALWAYS_INLINE
static inline _voucher_activity_t
_voucher_activity_retain(_voucher_activity_t act)
{
- if (_voucher_activity_copy(act)) {
- _dispatch_voucher_activity_debug("invalid resurrection", act);
- DISPATCH_CRASH("Invalid activity resurrection");
+ if (slowpath(!_voucher_activity_try_retain(act))) {
+ _dispatch_voucher_activity_debug("resurrection", act);
+ DISPATCH_CRASH("Activity resurrection");
}
return act;
}
static inline void
_voucher_activity_release(_voucher_activity_t act)
{
- int use_cnt = dispatch_atomic_dec2o(act, va_use_count, relaxed);
+ // not using _os_object_refcnt* because we don't need barriers:
+ // activities are immutable and are in a hash table with a lock
+ int use_cnt = dispatch_atomic_dec2o(act, va_refcnt, relaxed);
_dispatch_voucher_activity_debug("release -> %d", act, use_cnt + 1);
if (fastpath(use_cnt >= 0)) {
return;
_dispatch_voucher_activity_debug("overrelease", act);
DISPATCH_CRASH("Activity overrelease");
}
- return _voucher_atm_activity_mark_unused(act);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline _voucher_activity_t
-_voucher_activity_atm_retain(_voucher_activity_t act)
-{
- int refcnt = dispatch_atomic_inc2o(act, va_refcnt, relaxed);
- _dispatch_voucher_activity_debug("atm retain -> %d", act, refcnt + 1);
- if (slowpath(refcnt <= 0)) {
- _dispatch_voucher_activity_debug("atm resurrection", act);
- DISPATCH_CRASH("Activity ATM resurrection");
- }
- return act;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_voucher_activity_atm_release(_voucher_activity_t act)
-{
- int refcnt = dispatch_atomic_dec2o(act, va_refcnt, relaxed);
- _dispatch_voucher_activity_debug("atm release -> %d", act, refcnt + 1);
- if (fastpath(refcnt >= 0)) {
- return;
- }
- if (slowpath(refcnt < -1)) {
- _dispatch_voucher_activity_debug("atm overrelease", act);
- DISPATCH_CRASH("Activity ATM overrelease");
- }
- return _voucher_activity_dispose(act);
-}
-
-static inline _voucher_activity_t
-_voucher_activity_get(voucher_t v)
-{
- _voucher_activity_t act;
- act = v && v->v_activity ? v->v_activity : _voucher_activity_default;
- return act;
+ _voucher_activity_remove(act);
+ _voucher_activity_dispose(act);
}
static _voucher_activity_t
-_voucher_activity_find(voucher_activity_id_t va_id, uint32_t hash)
+_voucher_activity_find_and_retain(voucher_activity_id_t va_id, uint32_t hash)
{
+ // not using _os_object_refcnt* because we don't need barriers:
+ // activities are immutable and are in a hash table with a lock
+ //
// assumes vam_activities_lock held
_voucher_activity_t act;
- TAILQ_FOREACH(act, vam_activities(hash), va_list){
- if (act->va_id == va_id) break;
+ TAILQ_FOREACH(act, vam_activities(hash), va_list) {
+ if (act->va_id == va_id) {
+ if (fastpath(_voucher_activity_try_retain(act))) {
+ return act;
+ }
+
+ // <rdar://problem/20468375> disallow resurrection
+ dispatch_atomic_dec2o(act, va_refcnt, relaxed);
+ _dispatch_voucher_activity_debug("undo resurrection", act);
+ }
}
- return act;
+ return NULL;
}
static _voucher_activity_t
_voucher_activity_copy_from_id(voucher_activity_id_t va_id)
{
- bool resurrect = false;
uint32_t hash = VACTID_HASH(va_id);
_voucher_activity_lock_lock(vam_activities_lock());
- _voucher_activity_t act = _voucher_activity_find(va_id, hash);
+ _voucher_activity_t act = _voucher_activity_find_and_retain(va_id, hash);
if (act) {
- resurrect = _voucher_activity_copy(act);
_dispatch_voucher_activity_debug("copy from id 0x%llx", act, va_id);
}
_voucher_activity_lock_unlock(vam_activities_lock());
- if (resurrect) return _voucher_atm_activity_mark_used(act);
return act;
}
static _voucher_activity_t
_voucher_activity_try_insert(_voucher_activity_t act_new)
{
- bool resurrect = false;
voucher_activity_id_t va_id = act_new->va_id;
uint32_t hash = VACTID_HASH(va_id);
_voucher_activity_lock_lock(vam_activities_lock());
- _voucher_activity_t act = _voucher_activity_find(va_id, hash);
+ _voucher_activity_t act = _voucher_activity_find_and_retain(va_id, hash);
if (act) {
- resurrect = _voucher_activity_copy(act);
_dispatch_voucher_activity_debug("try insert: failed (%p)", act,act_new);
} else {
if (slowpath(_TAILQ_IS_ENQUEUED(act_new, va_list))) {
_dispatch_voucher_activity_debug("try insert: succeeded", act_new);
}
_voucher_activity_lock_unlock(vam_activities_lock());
- if (resurrect) return _voucher_atm_activity_mark_used(act);
return act;
}
-static bool
-_voucher_activity_try_remove(_voucher_activity_t act)
+static void
+_voucher_activity_remove(_voucher_activity_t act)
{
- bool r;
voucher_activity_id_t va_id = act->va_id;
uint32_t hash = VACTID_HASH(va_id);
+
_voucher_activity_lock_lock(vam_activities_lock());
- if (slowpath(!va_id)) {
+ if (slowpath(!va_id || !_TAILQ_IS_ENQUEUED(act, va_list))) {
_dispatch_voucher_activity_debug("corruption", act);
DISPATCH_CRASH("Activity corruption");
}
- if ((r = (dispatch_atomic_load2o(act, va_use_count, seq_cst) < 0 &&
- _TAILQ_IS_ENQUEUED(act, va_list)))) {
- TAILQ_REMOVE(vam_activities(hash), act, va_list);
- _TAILQ_MARK_NOT_ENQUEUED(act, va_list);
- act->va_list.tqe_next = (void*)~0ull;
- }
- _dispatch_voucher_activity_debug("try remove: %s", act, r ? "succeeded" :
- "failed");
+ TAILQ_REMOVE(vam_activities(hash), act, va_list);
+ _TAILQ_MARK_NOT_ENQUEUED(act, va_list);
+ act->va_list.tqe_next = (void*)~0ull;
+ _dispatch_voucher_activity_debug("remove", act);
_voucher_activity_lock_unlock(vam_activities_lock());
- return r;
}
static _voucher_activity_t
_voucher_atm_release(vatm); // consume vatm reference
return NULL;
}
- if (!trace_id) trace_id = _voucher_activity_trace_id_release;
- _voucher_activity_tracepoint_t vat = (_voucher_activity_tracepoint_t)buffer;
- _voucher_activity_tracepoint_init_with_id(vat, trace_id, ~1ull);
- _voucher_activity_t act = (_voucher_activity_t)buffer;
- act->va_flags = _voucher_activity_trace_flag_buffer_header |
- _voucher_activity_trace_flag_activity |
- _voucher_activity_trace_flag_start |
- _voucher_activity_trace_flag_wide_first;
- act->vabh_next_tracepoint_idx = sizeof(*act)/sizeof(*vat);
- act->va_max_sequence_no = 0;
- act->va_id = va_id ? va_id : VATM_ACTID(vatm, 0);
- act->va_use_count = 0;
+ _voucher_activity_t act = _dispatch_calloc(1ul,
+ sizeof(struct _voucher_activity_s));
+ act->va_id = va_id;
+ act->va_trace_id = trace_id ? trace_id : _voucher_activity_trace_id_release;
+ act->va_location = location;
act->va_buffer_limit = _voucher_activity_buffer_limit();
TAILQ_INIT(&act->va_buffers);
- act->va_flags2 = _voucher_activity_trace_flag_activity |
- _voucher_activity_trace_flag_wide_second;
-#if __LP64__
- act->va_flags3 = act->va_flags2;
-#endif
- act->va_refcnt = 0;
- act->va_location = location;
act->va_current_buffer = buffer;
act->va_atm = vatm; // transfer vatm reference
_voucher_activity_lock_init(va_buffers_lock(act));
+ if (dispatch_assume_zero(pthread_mutex_init(&act->va_mutex, NULL)) ||
+ dispatch_assume_zero(pthread_cond_init(&act->va_cond, NULL))) {
+ DISPATCH_CLIENT_CRASH("Could not initialize activity");
+ }
_TAILQ_MARK_NOT_ENQUEUED(act, va_list);
_TAILQ_MARK_NOT_ENQUEUED(act, va_atm_list);
_TAILQ_MARK_NOT_ENQUEUED(act, va_atm_used_list);
+
+ _voucher_activity_buffer_init(act, buffer, true);
+ TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list);
_voucher_activity_t actx = _voucher_activity_try_insert(act);
if (actx) {
_voucher_activity_dispose(act);
act = actx;
- } else {
- _voucher_atm_activity_insert(vatm, act);
}
_dispatch_voucher_activity_debug("create", act);
return act;
dispatch_assert(!_TAILQ_IS_ENQUEUED(act, va_atm_used_list));
_voucher_activity_buffer_header_t buffer, tmp;
TAILQ_FOREACH_SAFE(buffer, &act->va_buffers, vabh_list, tmp) {
- _dispatch_voucher_activity_debug("buffer free %p", act, buffer);
+ if (buffer->vabh_pos.vabp_pos.vabp_next_tracepoint_idx > 1) {
+ dispatch_assert(_voucher_activity_buffer_mark_full(buffer));
+ _voucher_activity_firehose_push(act, buffer);
+ }
TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list);
+ _dispatch_voucher_activity_debug("buffer free %p", act, buffer);
_voucher_activity_heap_buffer_free(buffer);
}
- buffer = (_voucher_activity_buffer_header_t)act;
- _voucher_activity_heap_buffer_free(buffer);
+ (void)dispatch_assume_zero(pthread_mutex_destroy(&act->va_mutex));
+ (void)dispatch_assume_zero(pthread_cond_destroy(&act->va_cond));
+ free(act);
}
+DISPATCH_NOINLINE
+void
+_voucher_activity_firehose_push(_voucher_activity_t act,
+ _voucher_activity_buffer_header_t buffer)
+{
+ if (dispatch_assume_zero(pthread_mutex_lock(&act->va_mutex))) {
+ DISPATCH_CLIENT_CRASH("Activity corruption: mutex_lock");
+ }
+ _dispatch_voucher_activity_debug("firehose push %p", act, buffer);
+ // TODO: call firehose_push
+ VOUCHER_ACTIVITY_BUFFER_HOOK_CALLOUT(full, buffer);
+ _voucher_activity_buffer_init(act, buffer, false);
+ if (dispatch_assume_zero(pthread_cond_broadcast(&act->va_cond))) {
+ DISPATCH_CLIENT_CRASH("Activity corruption: cond_broadcast");
+ }
+ if (dispatch_assume_zero(pthread_mutex_unlock(&act->va_mutex))) {
+ DISPATCH_CLIENT_CRASH("Activity corruption: mutex_unlock");
+ }
+}
+
+DISPATCH_NOINLINE
static void
-_voucher_activity_collect(_voucher_activity_t act)
+_voucher_activity_firehose_wait(_voucher_activity_t act,
+ _voucher_activity_buffer_header_t buffer)
{
- _dispatch_voucher_activity_debug("collect", act);
- if (_voucher_activity_try_remove(act)) {
- _voucher_atm_activity_remove(act);
+ if (dispatch_assume_zero(pthread_mutex_lock(&act->va_mutex))) {
+ DISPATCH_CLIENT_CRASH("Activity corruption: mutex_lock");
+ }
+ while (_voucher_activity_buffer_is_full(buffer)) {
+ _dispatch_voucher_activity_debug("firehose wait %p", act, buffer);
+ if (dispatch_assume_zero(pthread_cond_wait(&act->va_cond,
+ &act->va_mutex))){
+ DISPATCH_CLIENT_CRASH("Activity corruption: cond_wait");
+ }
+ }
+ if (dispatch_assume_zero(pthread_mutex_unlock(&act->va_mutex))) {
+ DISPATCH_CLIENT_CRASH("Activity corruption: mutex_unlock");
}
}
_voucher_activity_heap_buffer_free(buffer);
return NULL;
}
- if (VACTID_BASEID(va_id) != VATMID2ACTID(atm_id)) va_id = 0;
// consumes vatm reference:
act = _voucher_activity_create_with_atm(vatm, va_id, 0, 0, buffer);
_dispatch_voucher_activity_debug("copy from kvoucher[0x%08x]", act, kv);
return act;
}
-#pragma mark -
-#pragma mark _voucher_atm_mailbox
-
-DISPATCH_ALWAYS_INLINE
-static inline atm_mailbox_offset_t
-_voucher_atm_mailbox_alloc(void)
-{
- atm_mailbox_offset_t mailbox_offset = MAILBOX_OFFSET_UNSET;
- size_t index;
- index = _voucher_activity_bitmap_set_first_unset_bit(vam_atm_mbox_bitmap());
- if (index < NO_BITS_WERE_UNSET) {
- mailbox_offset = index * sizeof(atm_mailbox_offset_t);
-#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG
- _dispatch_debug("mailbox alloc %zd (%lld)", index, mailbox_offset);
-#endif
- }
- return mailbox_offset;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline void
-_voucher_atm_mailbox_free(atm_mailbox_offset_t mailbox_offset)
-{
- if (mailbox_offset == MAILBOX_OFFSET_UNSET) return;
- size_t index = (size_t)mailbox_offset / sizeof(atm_mailbox_offset_t);
-#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG
- _dispatch_debug("mailbox free %zd (%lld)", index, mailbox_offset);
-#endif
- _voucher_activity_bitmap_clear_bit(vam_atm_mbox_bitmap(), index);
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline bool
-_voucher_atm_mailbox_set(atm_mailbox_offset_t mailbox_offset,
- atm_subaid32_t subaid, bool max_present)
-{
- if (mailbox_offset == MAILBOX_OFFSET_UNSET) return false;
- char *mailbox_base = (char*)_voucher_activity_heap->vam_kernel_metadata;
- atm_subaid32_t *mailbox = (atm_subaid32_t*)(mailbox_base + mailbox_offset);
- if (max_present) mailbox++; // second atm_subaid32_t in atm_mailbox_offset_t
- if (*mailbox == subaid) return false;
- *mailbox = subaid;
- return true;
-}
-
#pragma mark -
#pragma mark _voucher_atm_t
-static bool _voucher_atm_try_remove(_voucher_atm_t vatm);
+static void _voucher_atm_remove(_voucher_atm_t vatm);
static void _voucher_atm_dispose(_voucher_atm_t vatm, bool unregister);
-static inline void _voucher_atm_collect_if_needed(bool updated);
DISPATCH_ALWAYS_INLINE
-static inline _voucher_atm_t
-_voucher_atm_retain(_voucher_atm_t vatm)
+static inline bool
+_voucher_atm_try_retain(_voucher_atm_t vatm)
{
- // assumes vam_atms_lock or vam_base_atm_lock held
+ // not using _os_object_refcnt* because we don't need barriers:
+ // vouchers atm are immutable and are in a hash table with a lock
+ //
+ // assumes vam_atms_lock held
int refcnt = dispatch_atomic_inc2o(vatm, vatm_refcnt, relaxed);
_dispatch_voucher_atm_debug("retain -> %d", vatm, refcnt + 1);
if (slowpath(refcnt < 0)) {
_dispatch_voucher_atm_debug("overrelease", vatm);
DISPATCH_CRASH("ATM overrelease");
}
+ return refcnt > 0;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline _voucher_atm_t
+_voucher_atm_retain(_voucher_atm_t vatm)
+{
+ if (slowpath(!_voucher_atm_try_retain(vatm))) {
+ _dispatch_voucher_atm_debug("resurrection", vatm);
+ DISPATCH_CRASH("ATM resurrection");
+ }
return vatm;
}
static inline void
_voucher_atm_release(_voucher_atm_t vatm)
{
+ // not using _os_object_refcnt* because we don't need barriers:
+ // vouchers atm are immutable are into a hash table with a lock
int refcnt = dispatch_atomic_dec2o(vatm, vatm_refcnt, relaxed);
_dispatch_voucher_atm_debug("release -> %d", vatm, refcnt + 1);
if (fastpath(refcnt >= 0)) {
_dispatch_voucher_atm_debug("overrelease", vatm);
DISPATCH_CRASH("ATM overrelease");
}
- if (_voucher_atm_try_remove(vatm)) {
- _voucher_atm_dispose(vatm, true);
- }
+ _voucher_atm_remove(vatm);
+ _voucher_atm_dispose(vatm, true);
}
static _voucher_atm_t
-_voucher_atm_find(atm_aid_t atm_id, uint32_t hash)
+_voucher_atm_find_and_retain(atm_aid_t atm_id, uint32_t hash)
{
+ // not using _os_object_refcnt* because we don't need barriers:
+ // vouchers atm are immutable are into a hash table with a lock
+ //
// assumes vam_atms_lock held
_voucher_atm_t vatm;
TAILQ_FOREACH(vatm, vam_atms(hash), vatm_list){
- if (vatm->vatm_id == atm_id) break;
+ if (vatm->vatm_id == atm_id) {
+ if (fastpath(_voucher_atm_try_retain(vatm))) {
+ return vatm;
+ }
+
+ // <rdar://problem/20468375> disallow resurrection
+ dispatch_atomic_dec2o(vatm, vatm_refcnt, relaxed);
+ _dispatch_voucher_atm_debug("undo resurrection", vatm);
+ }
}
- return vatm;
+ return NULL;
}
static _voucher_atm_t
{
uint32_t hash = VATMID_HASH(atm_id);
_voucher_activity_lock_lock(vam_atms_lock());
- _voucher_atm_t vatm = _voucher_atm_find(atm_id, hash);
+ _voucher_atm_t vatm = _voucher_atm_find_and_retain(atm_id, hash);
if (vatm) {
- _voucher_atm_retain(vatm);
_dispatch_voucher_atm_debug("copy", vatm);
}
_voucher_activity_lock_unlock(vam_atms_lock());
atm_aid_t atm_id = vatm_new->vatm_id;
uint32_t hash = VATMID_HASH(atm_id);
_voucher_activity_lock_lock(vam_atms_lock());
- _voucher_atm_t vatm = _voucher_atm_find(atm_id, hash);
+ _voucher_atm_t vatm = _voucher_atm_find_and_retain(atm_id, hash);
if (vatm) {
- _voucher_atm_retain(vatm);
_dispatch_voucher_atm_debug("try insert: failed (%p)", vatm, vatm_new);
} else {
if (slowpath(_TAILQ_IS_ENQUEUED(vatm_new, vatm_list))) {
return vatm;
}
-static bool
-_voucher_atm_try_remove(_voucher_atm_t vatm)
+static void
+_voucher_atm_remove(_voucher_atm_t vatm)
{
- bool r;
atm_aid_t atm_id = vatm->vatm_id;
uint32_t hash = VATMID_HASH(atm_id);
+
_voucher_activity_lock_lock(vam_atms_lock());
- if (slowpath(!atm_id)) {
+ if (slowpath(!atm_id || !_TAILQ_IS_ENQUEUED(vatm, vatm_list))) {
_dispatch_voucher_atm_debug("corruption", vatm);
DISPATCH_CRASH("ATM corruption");
}
- if ((r = (dispatch_atomic_load2o(vatm, vatm_refcnt, seq_cst) < 0 &&
- _TAILQ_IS_ENQUEUED(vatm, vatm_list)))) {
- TAILQ_REMOVE(vam_atms(hash), vatm, vatm_list);
- _TAILQ_MARK_NOT_ENQUEUED(vatm, vatm_list);
- vatm->vatm_list.tqe_next = (void*)~0ull;
- }
- _dispatch_voucher_atm_debug("try remove: %s", vatm, r ? "succeeded" :
- "failed");
+ TAILQ_REMOVE(vam_atms(hash), vatm, vatm_list);
+ _TAILQ_MARK_NOT_ENQUEUED(vatm, vatm_list);
+ vatm->vatm_list.tqe_next = (void*)~0ull;
+ _dispatch_voucher_atm_debug("remove", vatm);
_voucher_activity_lock_unlock(vam_atms_lock());
- return r;
-}
-
-static bool
-_voucher_atm_update_mailbox(_voucher_atm_t vatm)
-{
- // Update kernel mailbox with largest allocated subaid for this atm_id
- // assumes atm_activities_lock held
- _voucher_activity_t act = TAILQ_LAST(vatm_activities(vatm),
- _voucher_atm_activities_s);
- atm_subaid32_t subaid = act ? VACTID_SUBID(act->va_id) : 0;
- bool r = _voucher_atm_mailbox_set(vatm->vatm_mailbox_offset, subaid, true);
- if (r) {
- _dispatch_voucher_atm_debug("update max-present subaid 0x%x", vatm,
- subaid);
- }
- return r;
-}
-
-static bool
-_voucher_atm_update_used_mailbox(_voucher_atm_t vatm)
-{
- // Update kernel mailbox with smallest in-use subaid for this atm_id
- // assumes atm_activities_lock held
- _voucher_activity_t act = TAILQ_FIRST(vatm_used_activities(vatm));
- atm_subaid32_t subaid = act ? VACTID_SUBID(act->va_id) : ATM_SUBAID32_MAX;
- bool r = _voucher_atm_mailbox_set(vatm->vatm_mailbox_offset, subaid, false);
- if (r) {
- _dispatch_voucher_atm_debug("update min-used subaid 0x%x", vatm,
- subaid);
- }
- return r;
-}
-
-static void
-_voucher_atm_activity_insert(_voucher_atm_t vatm, _voucher_activity_t act)
-{
- _voucher_activity_lock_lock(vatm_activities_lock(vatm));
- if (!_TAILQ_IS_ENQUEUED(act, va_atm_list)) {
- _voucher_activity_ordered_insert(act, vatm_activities(vatm),
- va_atm_list);
- _voucher_atm_update_mailbox(vatm);
- }
- if (!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) {
- _voucher_activity_ordered_insert(act, vatm_used_activities(vatm),
- va_atm_used_list);
- _voucher_atm_update_used_mailbox(vatm);
- }
- _dispatch_voucher_activity_debug("atm insert", act);
- _voucher_activity_lock_unlock(vatm_activities_lock(vatm));
-}
-
-static void
-_voucher_atm_activity_remove(_voucher_activity_t act)
-{
- _voucher_atm_t vatm = act->va_atm;
- _voucher_activity_lock_lock(vatm_activities_lock(vatm));
- _dispatch_voucher_activity_debug("atm remove", act);
- if (_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) {
- TAILQ_REMOVE(vatm_activities(vatm), act, va_atm_used_list);
- _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_used_list);
- _voucher_atm_update_used_mailbox(vatm);
- }
- if (_TAILQ_IS_ENQUEUED(act, va_atm_list)) {
- TAILQ_REMOVE(vatm_activities(vatm), act, va_atm_list);
- _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_list);
- _voucher_atm_update_mailbox(vatm);
- // Balance initial creation refcnt. Caller must hold additional
- // reference to ensure this does not release vatm before the unlock,
- // see _voucher_atm_activity_collect
- _voucher_activity_atm_release(act);
- }
- _voucher_activity_lock_unlock(vatm_activities_lock(vatm));
-}
-
-static _voucher_activity_t
-_voucher_atm_activity_mark_used(_voucher_activity_t act)
-{
- _voucher_atm_t vatm = act->va_atm;
- _voucher_activity_lock_lock(vatm_activities_lock(vatm));
- if (!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) {
- _voucher_activity_ordered_insert(act, vatm_used_activities(vatm),
- va_atm_used_list);
- _voucher_atm_update_used_mailbox(vatm);
- _dispatch_voucher_activity_debug("mark used", act);
- }
- _voucher_activity_lock_unlock(vatm_activities_lock(vatm));
- return act;
-}
-
-static void
-_voucher_atm_activity_mark_unused(_voucher_activity_t act)
-{
- bool atm_collect = false, updated = false;
- _voucher_atm_t vatm = act->va_atm;
- _voucher_activity_lock_lock(vatm_activities_lock(vatm));
- if (_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) {
- _dispatch_voucher_activity_debug("mark unused", act);
- TAILQ_REMOVE(&vatm->vatm_used_activities, act, va_atm_used_list);
- _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_used_list);
- atm_collect = true;
- _voucher_atm_retain(vatm);
- updated = _voucher_atm_update_used_mailbox(vatm);
- }
- _voucher_activity_lock_unlock(vatm_activities_lock(vatm));
- if (atm_collect) {
- _voucher_atm_release(vatm);
- _voucher_atm_collect_if_needed(updated);
- }
-}
-
-static void
-_voucher_atm_activity_collect(_voucher_atm_t vatm, atm_subaid32_t min_subaid)
-{
- _dispatch_voucher_atm_debug("collect min subaid 0x%x", vatm, min_subaid);
- voucher_activity_id_t min_va_id = VATM_ACTID(vatm, min_subaid);
- _voucher_activity_t act;
- do {
- _voucher_activity_lock_lock(vatm_activities_lock(vatm));
- TAILQ_FOREACH(act, vatm_activities(vatm), va_atm_list) {
- if (act->va_id >= min_va_id) {
- act = NULL;
- break;
- }
- if (!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) {
- _voucher_activity_atm_retain(act);
- break;
- }
- }
- _voucher_activity_lock_unlock(vatm_activities_lock(vatm));
- if (act) {
- _voucher_activity_collect(act);
- _voucher_activity_atm_release(act);
- }
- } while (act);
}
DISPATCH_NOINLINE
static void
-_voucher_atm_collect(void)
+_voucher_atm_fault(mach_voucher_attr_command_t kvc_cmd)
{
- _voucher_atm_t vatms[_voucher_atm_mailboxes], vatm;
- atm_aid_t aids[_voucher_atm_mailboxes];
- mach_atm_subaid_t subaids[_voucher_atm_mailboxes];
- uint32_t i, a = 0, s;
+ mach_voucher_t kv = _voucher_get_atm_mach_voucher(_voucher_get());
+ if (!kv) return;
- _voucher_activity_lock_lock(vam_atms_lock());
- for (i = 0; i < _voucher_activity_hash_size; i++) {
- TAILQ_FOREACH(vatm, vam_atms(i), vatm_list){
- if (vatm == _voucher_activity_heap->vam_default_activity_atm ||
- vatm->vatm_mailbox_offset == MAILBOX_OFFSET_UNSET) continue;
- _dispatch_voucher_atm_debug("find min subaid", vatm);
- vatms[a] = _voucher_atm_retain(vatm);
- aids[a] = vatm->vatm_id;
- if (++a == _voucher_atm_mailboxes) goto out;
+ mach_atm_subaid_t subaid = 0;
+ voucher_t v = _voucher_get();
+ if (v) {
+ unsigned int activities = v->v_activities;
+ voucher_activity_id_t *activity_ids = _voucher_activity_ids(v);
+ if (activities) {
+ subaid = activity_ids[0];
}
}
-out:
- _voucher_activity_lock_unlock(vam_atms_lock());
- if (!a) return;
- kern_return_t kr;
- mach_voucher_t kv = vatms[0]->vatm_kvoucher;
- mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&aids;
- mach_voucher_attr_content_size_t kvc_in_size = sizeof(atm_aid_t) * a;
- mach_voucher_attr_content_t kvc_out = (mach_voucher_attr_content_t)&subaids;
- mach_voucher_attr_content_size_t kvc_out_size = sizeof(mach_atm_subaid_t)*a;
- kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM,
- ATM_FIND_MIN_SUB_AID, kvc_in, kvc_in_size, kvc_out, &kvc_out_size);
- DISPATCH_VERIFY_MIG(kr);
- (void)dispatch_assume_zero(kr);
- s = kvc_out_size / sizeof(mach_atm_subaid_t);
-#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG
- _dispatch_debug("found min subaids (%u out of %u)", s, a);
-#endif
- for (i = 0; i < a; i++) {
- if (i < s) _voucher_atm_activity_collect(vatms[i],
- (atm_subaid32_t)subaids[i]);
- _voucher_atm_release(vatms[i]);
- }
-}
-
-static inline void
-_voucher_atm_collect_if_needed(bool updated)
-{
- long level;
- if (updated) {
- level = dispatch_atomic_add(&_voucher_atm_collect_level, 2ul, relaxed);
- } else {
- level = _voucher_atm_collect_level;
- if (!level) return;
- }
- if (level & 1 || level <= _voucher_atm_collect_threshold) return;
- if (!dispatch_atomic_cmpxchg(&_voucher_atm_collect_level, level, level + 1,
- acquire)) return;
-#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG
- _dispatch_debug("atm collect: reached level %ld", level/2);
-#endif
- if (slowpath(level < 0)) {
- DISPATCH_CRASH("ATM collection level corruption");
- }
- _voucher_atm_collect();
- dispatch_atomic_sub(&_voucher_atm_collect_level, level + 1, release);
-}
-
-DISPATCH_NOINLINE
-static void
-_voucher_atm_fault(mach_voucher_attr_command_t kvc_cmd)
-{
- _voucher_activity_t act = _voucher_activity_get(_voucher_get());
- mach_voucher_t kv = _voucher_activity_get_atm_mach_voucher(act);
- if (!kv) return;
kern_return_t kr;
- mach_atm_subaid_t subaid = VACTID_SUBID(act->va_id);
mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&subaid;
mach_voucher_attr_content_size_t kvc_in_size = sizeof(mach_atm_subaid_t);
mach_voucher_attr_content_t kvc_out = (mach_voucher_attr_content_t)&subaid;
return kv;
}
-static void
-_voucher_atm_mailbox_mach_voucher_register(_voucher_atm_t vatm,
- mach_voucher_t kv)
+static mach_voucher_t
+_voucher_atm_mach_voucher_copy(mach_voucher_t akv)
{
- _dispatch_voucher_atm_debug("mailbox register %lld with kvoucher[0x%08x]",
- vatm, vatm->vatm_mailbox_offset, kv);
kern_return_t kr;
- mach_voucher_t akv;
- atm_mailbox_offset_t offset = vatm->vatm_mailbox_offset;
- mach_voucher_attr_recipe_t vr;
- size_t vr_size;
- static const mach_voucher_attr_recipe_data_t atm_register_recipe = {
+ mach_voucher_t kv;
+ const mach_voucher_attr_recipe_data_t atm_copy_recipe = {
.key = MACH_VOUCHER_ATTR_KEY_ATM,
- .command = MACH_VOUCHER_ATTR_ATM_REGISTER,
- .content_size = sizeof(offset),
+ .command = MACH_VOUCHER_ATTR_COPY,
+ .previous_voucher = akv,
};
- vr_size = sizeof(atm_register_recipe) + atm_register_recipe.content_size;
- vr = alloca(vr_size);
- *vr = atm_register_recipe;
- vr->previous_voucher = kv;
- memcpy(&vr->content, &offset, sizeof(offset));
- kr = _voucher_create_mach_voucher(vr, vr_size, &akv);
+ kr = _voucher_create_mach_voucher(&atm_copy_recipe,
+ sizeof(atm_copy_recipe), &kv);
if (dispatch_assume_zero(kr)) {
- DISPATCH_CLIENT_CRASH("Could not register ATM ID");
+ DISPATCH_CLIENT_CRASH("Could not copy ATM mach voucher");
}
- if (!vatm->vatm_kvoucher) {
- vatm->vatm_kvoucher = akv;
- } else {
-#if !RDAR_17510224
- if (akv != vatm->vatm_kvoucher) {
- DISPATCH_CRASH("Unexpected mach voucher returned by ATM ID "
- "registration");
- }
- _voucher_dealloc_mach_voucher(akv);
-#else
- DISPATCH_CRASH("Registered invalid ATM object");
-#endif
- }
- _dispatch_voucher_atm_debug("mailbox registered %lld", vatm,
- vatm->vatm_mailbox_offset);
+ _dispatch_kvoucher_debug("copy atm voucher from [0x%08x]", kv, akv);
+ return kv;
}
static void
-_voucher_atm_mailbox_register(_voucher_atm_t vatm)
+_voucher_atm_register(_voucher_atm_t vatm)
{
mach_voucher_t kv = vatm->vatm_kvoucher;
if (!kv) return;
-#if !RDAR_17510224
- _voucher_atm_mailbox_mach_voucher_register(vatm, kv);
-#else // RDAR_17510224
- _dispatch_voucher_atm_debug("mailbox register %lld", vatm,
- vatm->vatm_mailbox_offset);
kern_return_t kr;
- atm_mailbox_offset_t offset = vatm->vatm_mailbox_offset;
- mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&offset;
- mach_voucher_attr_content_size_t kvc_in_size = sizeof(offset);
+ atm_guard_t gen =
+ dispatch_atomic_inc(&_voucher_atm_generation, relaxed);
+ _dispatch_voucher_atm_debug("atm register %lld", vatm, gen);
+ mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&gen;
+ mach_voucher_attr_content_size_t kvc_in_size = sizeof(gen);
mach_voucher_attr_content_t kvc_out = NULL;
mach_voucher_attr_content_size_t kvc_out_size = 0;
kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM,
ATM_ACTION_REGISTER, kvc_in, kvc_in_size, kvc_out,
&kvc_out_size);
DISPATCH_VERIFY_MIG(kr);
- if (dispatch_assume_zero(kr)) {
+ if (kr) {
DISPATCH_CLIENT_CRASH("Could not register ATM ID");
}
- _dispatch_voucher_atm_debug("mailbox registered %lld", vatm,
- vatm->vatm_mailbox_offset);
-#endif // RDAR_17510224
+ vatm->vatm_generation = gen;
+ _dispatch_voucher_atm_debug("atm registered %lld", vatm,
+ vatm->vatm_generation);
}
-static bool
-_voucher_atm_mailbox_unregister(_voucher_atm_t vatm)
+static void
+_voucher_atm_unregister(_voucher_atm_t vatm)
{
- if (vatm->vatm_mailbox_offset == MAILBOX_OFFSET_UNSET) return false;
- _dispatch_voucher_atm_debug("mailbox unregister %lld", vatm,
- vatm->vatm_mailbox_offset);
+ _dispatch_voucher_atm_debug("atm unregister %lld", vatm,
+ vatm->vatm_generation);
mach_voucher_t kv = vatm->vatm_kvoucher;
dispatch_assert(kv);
kern_return_t kr;
- atm_mailbox_offset_t offset = vatm->vatm_mailbox_offset;
- mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&offset;
- mach_voucher_attr_content_size_t kvc_in_size = sizeof(offset);
+ atm_guard_t gen = vatm->vatm_generation;
+ mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&gen;
+ mach_voucher_attr_content_size_t kvc_in_size = sizeof(gen);
mach_voucher_attr_content_t kvc_out = NULL;
mach_voucher_attr_content_size_t kvc_out_size = 0;
kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM,
DISPATCH_VERIFY_MIG(kr);
if (kr && kr != KERN_INVALID_VALUE) {
(void)dispatch_assume_zero(kr);
- DISPATCH_CLIENT_CRASH("Could not unregister ATM ID");
}
- _dispatch_voucher_atm_debug("mailbox unregistered %lld", vatm,
- vatm->vatm_mailbox_offset);
- return true;
+ _dispatch_voucher_atm_debug("atm unregistered %lld", vatm,
+ vatm->vatm_generation);
}
static _voucher_atm_t
_voucher_atm_create(mach_voucher_t kv, atm_aid_t atm_id)
{
- atm_mailbox_offset_t mailbox_offset = _voucher_atm_mailbox_alloc();
- if (kv && mailbox_offset == MAILBOX_OFFSET_UNSET) return NULL;
_voucher_atm_t vatm = _dispatch_calloc(1ul, sizeof(struct _voucher_atm_s));
- if (!kv) {
- kv = _voucher_atm_mach_voucher_create(&atm_id);
- if (mailbox_offset == MAILBOX_OFFSET_UNSET) {
- _voucher_dealloc_mach_voucher(kv);
- } else {
- vatm->vatm_kvoucher = kv;
- }
- kv = MACH_VOUCHER_NULL;
- }
+ kv = kv ? _voucher_atm_mach_voucher_copy(kv) :
+ _voucher_atm_mach_voucher_create(&atm_id);
+ vatm->vatm_kvoucher = kv;
vatm->vatm_id = atm_id;
- vatm->vatm_mailbox_offset = mailbox_offset;
- _voucher_activity_lock_init(vatm_activities_lock(vatm));
- TAILQ_INIT(&vatm->vatm_activities);
- TAILQ_INIT(&vatm->vatm_used_activities);
- _voucher_atm_mailbox_set(mailbox_offset, 0, true);
- _voucher_atm_mailbox_set(mailbox_offset, ATM_SUBAID32_MAX, false);
_voucher_atm_t vatmx = _voucher_atm_try_insert(vatm);
if (vatmx) {
_voucher_atm_dispose(vatm, false);
vatm = vatmx;
- } else if (kv) {
- _voucher_atm_mailbox_mach_voucher_register(vatm, kv);
} else {
- _voucher_atm_mailbox_register(vatm);
+ _voucher_atm_register(vatm);
}
_dispatch_voucher_atm_debug("create with kvoucher[0x%08x]", vatm, kv);
return vatm;
_voucher_atm_dispose(_voucher_atm_t vatm, bool unregister)
{
_dispatch_voucher_atm_debug("dispose", vatm);
- dispatch_assert(TAILQ_EMPTY(&vatm->vatm_activities));
- dispatch_assert(TAILQ_EMPTY(&vatm->vatm_used_activities));
if (slowpath(_TAILQ_IS_ENQUEUED(vatm, vatm_list))) {
_dispatch_voucher_atm_debug("corruption", vatm);
DISPATCH_CRASH("ATM corruption");
}
vatm->vatm_list.tqe_next = DISPATCH_OBJECT_LISTLESS;
- bool free_mailbox = (vatm->vatm_mailbox_offset != MAILBOX_OFFSET_UNSET);
if (vatm->vatm_kvoucher) {
- if (unregister) free_mailbox = _voucher_atm_mailbox_unregister(vatm);
+ if (unregister) _voucher_atm_unregister(vatm);
_voucher_dealloc_mach_voucher(vatm->vatm_kvoucher);
vatm->vatm_kvoucher = MACH_VOUCHER_NULL;
}
- if (free_mailbox) {
- _voucher_atm_mailbox_free(vatm->vatm_mailbox_offset);
- vatm->vatm_mailbox_offset = MAILBOX_OFFSET_UNSET;
- }
free(vatm);
}
-static inline mach_voucher_t
-_voucher_activity_get_atm_mach_voucher(_voucher_activity_t act)
-{
- mach_voucher_t kv;
- kv = act && act->va_atm ? act->va_atm->vatm_kvoucher : MACH_VOUCHER_NULL;
- return kv;
-}
-
DISPATCH_NOINLINE
-static _voucher_atm_t
-_voucher_atm_base_copy_and_activity_id_make(voucher_activity_id_t *va_id_ptr)
-{
- _voucher_atm_subid_t subid;
- _voucher_atm_t vatm, vatm_old = NULL, vatm_new = NULL;
- if (_voucher_activity_heap->vam_base_atm_subid_max == 1) {
- vatm = _voucher_atm_create(0, 0);
- subid = 1;
- goto out;
- }
- _voucher_activity_lock_lock(vam_base_atm_lock());
- vatm = _voucher_activity_heap->vam_base_atm;
-retry:
- _voucher_atm_retain(vatm);
- subid = _voucher_activity_heap->vam_base_atm_subid;
- if (subid++ >= _voucher_activity_heap->vam_base_atm_subid_max) {
- _voucher_activity_lock_unlock(vam_base_atm_lock());
- if (!vatm_new) vatm_new = _voucher_atm_create(0, 0);
- _voucher_activity_lock_lock(vam_base_atm_lock());
- _voucher_atm_release(vatm);
- vatm_old = vatm;
- vatm = _voucher_activity_heap->vam_base_atm;
- if (vatm != vatm_old) {
- vatm_old = NULL;
- goto retry;
- }
- _voucher_activity_heap->vam_base_atm = vatm = vatm_new;
- _voucher_activity_heap->vam_base_atm_subid = subid = 1;
- vatm_new = NULL;
- _voucher_atm_retain(vatm);
- _dispatch_voucher_atm_debug("base replace", vatm);
- } else {
- _voucher_activity_heap->vam_base_atm_subid = subid;
- _dispatch_voucher_atm_debug("base copy", vatm);
- }
- _voucher_activity_lock_unlock(vam_base_atm_lock());
- if (vatm_old) _voucher_atm_release(vatm_old);
- if (vatm_new) _voucher_atm_release(vatm_new);
-out:
- *va_id_ptr = VATM_ACTID(vatm, subid);
- return vatm;
-}
-
static voucher_activity_id_t
-_voucher_atm_nested_atm_id_make(void)
+_voucher_atm_subid_make(_voucher_atm_t vatm, voucher_activity_flag_t flags)
{
- atm_aid_t atm_id;
- mach_voucher_t kv = _voucher_atm_mach_voucher_create(&atm_id);
- _voucher_dealloc_mach_voucher(kv); // just need the unique ID
- return VATMID2ACTID(atm_id);
-}
-
-static voucher_activity_id_t
-_voucher_atm_nested_activity_id_make(void)
-{
- voucher_activity_id_t va_id, va_id_old, va_id_new;
- _voucher_atm_subid_t subid;
- _voucher_activity_lock_lock(vam_nested_atm_lock());
- va_id = _voucher_activity_heap->vam_nested_atm_id;
-retry:
- subid = _voucher_activity_heap->vam_nested_atm_subid;
- if (subid++ >= VATM_SUBID_MAX) {
- _voucher_activity_lock_unlock(vam_nested_atm_lock());
- va_id_new = _voucher_atm_nested_atm_id_make();
- va_id_old = va_id;
- _voucher_activity_lock_lock(vam_nested_atm_lock());
- va_id = _voucher_activity_heap->vam_nested_atm_id;
- if (va_id != va_id_old) goto retry;
- _voucher_activity_heap->vam_nested_atm_id = va_id = va_id_new;
- subid = 1;
- }
- _voucher_activity_heap->vam_nested_atm_subid = subid;
- _voucher_activity_lock_unlock(vam_nested_atm_lock());
- return va_id + subid;
+ mach_voucher_t kv = vatm->vatm_kvoucher;
+ _dispatch_voucher_atm_debug("create subid from atm", vatm);
+ kern_return_t kr;
+ mach_atm_subaid_t naid;
+ mach_voucher_attr_content_t kvc_in = NULL;
+ mach_voucher_attr_content_size_t kvc_in_size = 0;
+ mach_voucher_attr_content_t kvc_out = (mach_voucher_attr_content_t)&naid;
+ mach_voucher_attr_content_size_t kvc_out_size = sizeof(naid);
+ kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM,
+ ATM_ACTION_GETSUBAID, kvc_in, kvc_in_size, kvc_out, &kvc_out_size);
+ DISPATCH_VERIFY_MIG(kr);
+ if (dispatch_assume_zero(kr)) {
+ DISPATCH_CLIENT_CRASH("Could not get next ATM ID");
+ }
+ _dispatch_voucher_atm_debug("created subid from atm %lld", vatm, naid);
+ return VATMID2ACTID(naid, flags);
}
#pragma mark -
#pragma mark voucher_activity_id_t
+static const size_t _voucher_activity_maxsize =
+ _voucher_activity_buffer_size - _voucher_activity_buffer_header_size -
+ _voucher_activity_strings_header_size;
+
voucher_activity_id_t
voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id,
uint64_t location, voucher_activity_flag_t flags)
dispatch_once_f(&_voucher_activity_heap_pred, NULL,
_voucher_activity_heap_init);
if (!_voucher_activity_trace_id_enabled(trace_id)) return 0;
- voucher_activity_id_t va_id = 0, va_base_id = 0;
+ voucher_activity_id_t va_id = 0;
_voucher_atm_t vatm = NULL;
_voucher_activity_t act = NULL;
_voucher_activity_tracepoint_t vat = NULL;
unsigned int activities = 1, oactivities = 0;
voucher_t ov = _voucher_get();
+ vatm = _voucher_get_atm(ov);
if (!(flags & voucher_activity_flag_force) && ov && ov->v_activities) {
oactivities = ov->v_activities;
activities += oactivities;
if (activities > _voucher_max_activities) {
- va_id = _voucher_atm_nested_activity_id_make();
+ va_id = _voucher_atm_subid_make(vatm, flags);
goto out;
}
}
- if (activities == 1) {
- vatm = _voucher_atm_base_copy_and_activity_id_make(&va_id);
- if (vatm->vatm_kvoucher) {
- // consumes vatm reference:
- act = _voucher_activity_create_with_atm(vatm, va_id, trace_id,
- location, NULL);
- vat = (_voucher_activity_tracepoint_t)act;
- } else {
- _voucher_atm_release(vatm);
- }
- if (!act) {
- activities++;
- // default to _voucher_activity_default base activity
- va_base_id = _voucher_activity_default->va_id;
- }
- }
+ _voucher_atm_retain(vatm);
+ // required for v->v_atm = vatm below
+ _voucher_atm_retain(vatm);
+ va_id = _voucher_atm_subid_make(vatm, flags);
+ // consumes vatm reference:
+ act = _voucher_activity_create_with_atm(vatm, va_id, trace_id, location,
+ NULL);
+ vat = (_voucher_activity_tracepoint_t)act;
pthread_priority_t priority = _voucher_get_priority(ov);
mach_voucher_attr_recipe_size_t extra = ov ? _voucher_extra_size(ov) : 0;
voucher_t v = _voucher_alloc(activities, priority, extra);
memcpy(activity_ids, _voucher_activity_ids(ov),
oactivities * sizeof(voucher_activity_id_t));
}
- if (!va_id) {
- va_id = _voucher_atm_nested_activity_id_make();
- if (ov && ov->v_activity) {
- act = _voucher_activity_retain(ov->v_activity);
- }
- }
- if (va_base_id) activity_ids[0] = va_base_id;
activity_ids[activities-1] = va_id;
+ v->v_atm = vatm;
v->v_activity = act;
_voucher_swap(ov, v);
- if (vat) return va_id; // new _voucher_activity_s contains trace info
+ return va_id; // new activity buffer contains trace info
out:
- vat = _voucher_activity_trace_with_id(trace_id);
- if (vat) {
- vat->vat_flags |= _voucher_activity_trace_flag_activity |
- _voucher_activity_trace_flag_start;
- vat->vat_data[0] = va_id;
- }
+ _voucher_activity_trace_activity_event(trace_id, va_id, start);
return va_id;
}
voucher_activity_end(voucher_activity_id_t va_id)
{
if (!va_id) return;
- _voucher_activity_tracepoint_t vat;
- vat = _voucher_activity_trace_with_id(_voucher_activity_trace_id_release);
- if (vat) {
- vat->vat_flags |= _voucher_activity_trace_flag_activity |
- _voucher_activity_trace_flag_end;
- vat->vat_data[0] = va_id;
- }
+ _voucher_activity_trace_activity_event(_voucher_activity_trace_id_release,
+ va_id, end);
voucher_t v = _voucher_get();
if (!v) return;
unsigned int activities = v->v_activities, act_idx = activities;
} else {
if (v->v_activity) {
nv->v_activity = _voucher_activity_retain(v->v_activity);
+ nv->v_atm = _voucher_atm_retain(v->v_atm);
}
memcpy(new_activity_ids, activity_ids,
--act_idx * sizeof(voucher_activity_id_t));
{
voucher_t v = _voucher_get();
if (!v || !v->v_activity) return 0;
- return v->v_activity->va_namespace;
+ voucher_activity_trace_id_t trace_id = v->v_activity->va_trace_id;
+ uint8_t cns = (uint8_t)(trace_id >>
+ _voucher_activity_trace_id_code_namespace_shift);
+ return cns;
}
DISPATCH_NOINLINE
_voucher_activity_tracepoint_t
-_voucher_activity_tracepoint_get_slow(unsigned int slots)
+_voucher_activity_buffer_tracepoint_acquire_slow(_voucher_activity_t *vap,
+ _voucher_activity_buffer_header_t *vabp, unsigned int slots,
+ size_t strsize, uint16_t *stroffsetp)
{
_voucher_activity_t act;
_voucher_activity_buffer_header_t vab;
act = _voucher_activity_default;
}
vab = act->va_current_buffer;
- if (vab && vab->vabh_next_tracepoint_idx <=
- _voucher_activity_tracepoints_per_buffer) {
+ if (act == *vap && vab != *vabp) {
goto retry; // another slowpath raced us
}
do {
vab = _voucher_activity_buffer_alloc(act, vab);
if (!vab) break;
retry:
- vat = _voucher_activity_buffer_tracepoint_get(vab, slots);
+ vat = _voucher_activity_buffer_tracepoint_acquire(vab, slots, strsize,
+ stroffsetp);
} while (!vat);
+ *vap = act;
+ *vabp = vab;
return vat;
}
void *buffer, size_t length)
{
if (!_voucher_activity_trace_id_enabled(trace_id)) return 0;
+ _voucher_activity_t act;
+ _voucher_activity_buffer_header_t vab;
_voucher_activity_tracepoint_t vat;
const unsigned int slots = length <= sizeof(vat->vat_data) ? 1 : 2;
- vat = _voucher_activity_tracepoint_get(slots);
- if (!vat) vat = _voucher_activity_tracepoint_get_slow(slots);
+ act = _voucher_activity_get();
+ vab = _voucher_activity_buffer_get_from_activity(act);
+ vat = _voucher_activity_buffer_tracepoint_acquire(vab, slots, 0, NULL);
+ if (!vat) {
+ vat = _voucher_activity_buffer_tracepoint_acquire_slow(&act, &vab,
+ slots, 0, NULL);
+ }
if (!vat) return 0;
uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat,
- trace_id, location);
+ trace_id, location, true);
void *tbuf = vat->vat_data;
size_t tlen = sizeof(vat->vat_data);
if (length < tlen) {
}
}
_voucher_activity_trace_fault(trace_id);
+ if (_voucher_activity_buffer_tracepoint_release(vab)) {
+ _voucher_activity_firehose_push(act, vab);
+ }
+ return timestamp;
+}
+
+uint64_t
+voucher_activity_trace_strings(voucher_activity_trace_id_t trace_id,
+ uint64_t location, void *buffer, size_t length, const char *strings[],
+ size_t string_lengths[], size_t strings_size)
+{
+ if (!_voucher_activity_trace_id_enabled(trace_id)) return 0;
+ _voucher_activity_t act;
+ _voucher_activity_buffer_header_t vab;
+ _voucher_activity_tracepoint_t vat;
+ uint16_t offset;
+ const unsigned int slots = length <= sizeof(vat->vat_data) ? 1 : 2;
+ strings_size = MIN(strings_size, _voucher_activity_maxsize -
+ slots * sizeof(struct _voucher_activity_tracepoint_s));
+ act = _voucher_activity_get();
+ vab = _voucher_activity_buffer_get_from_activity(act);
+ vat = _voucher_activity_buffer_tracepoint_acquire(vab, slots, strings_size,
+ &offset);
+ if (!vat) {
+ vat = _voucher_activity_buffer_tracepoint_acquire_slow(&act, &vab,
+ slots, strings_size, &offset);
+ }
+ if (!vat) return 0;
+ uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat,
+ trace_id, location, false);
+ vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_strings;
+ vat->vat_stroff.vats_offset = offset;
+ void *tbuf = vat->vat_stroff.vats_data;
+ size_t tlen = sizeof(vat->vat_stroff.vats_data);
+ if (length < tlen) {
+ memcpy(tbuf, buffer, length);
+ } else {
+ memcpy(tbuf, buffer, tlen);
+ }
+ if (length > tlen) {
+ vat->vat_flags |= _voucher_activity_trace_flag_wide_first;
+ buffer += tlen;
+ length -= tlen;
+ (++vat)->vat_flags = _voucher_activity_trace_flag_tracepoint |
+ _voucher_activity_trace_flag_wide_second;
+ vat->vat_type = 0; vat->vat_namespace = 0;
+ tbuf = (void*)vat + offsetof(typeof(*vat), vat_code);
+ tlen = sizeof(*vat) - offsetof(typeof(*vat), vat_code);
+ if (length < tlen) {
+ memcpy(tbuf, buffer, length);
+ } else {
+ memcpy(tbuf, buffer, tlen);
+ }
+ }
+ const uint16_t offsetend = offset - (uint16_t)strings_size;
+ char *b = (char*)vab + _voucher_activity_buffer_size;
+ int i = 0;
+ while (offset > offsetend && strings[i]) {
+ size_t maxsize = MIN(string_lengths[i] + 1, offset - offsetend);
+ size_t len = strlcpy(b - offset, strings[i++], maxsize);
+ offset -= MIN(len + 1, maxsize);
+ }
+ _voucher_activity_trace_fault(trace_id);
+ if (_voucher_activity_buffer_tracepoint_release(vab)) {
+ _voucher_activity_firehose_push(act, vab);
+ }
return timestamp;
}
uintptr_t arg4)
{
if (!_voucher_activity_trace_id_enabled(trace_id)) return 0;
+ _voucher_activity_t act;
+ _voucher_activity_buffer_header_t vab;
_voucher_activity_tracepoint_t vat;
- vat = _voucher_activity_tracepoint_get(1);
- if (!vat) vat = _voucher_activity_tracepoint_get_slow(1);
+ act = _voucher_activity_get();
+ vab = _voucher_activity_buffer_get_from_activity(act);
+ vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL);
+ if (!vat) {
+ vat = _voucher_activity_buffer_tracepoint_acquire_slow(&act, &vab, 1,
+ 0, NULL);
+ }
if (!vat) return 0;
uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat,
- trace_id, location);
+ trace_id, location, true);
vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_args;
vat->vat_data[0] = arg1;
vat->vat_data[1] = arg2;
vat->vat_data[2] = arg3;
vat->vat_data[3] = arg4;
_voucher_activity_trace_fault(trace_id);
+ if (_voucher_activity_buffer_tracepoint_release(vab)) {
+ _voucher_activity_firehose_push(act, vab);
+ }
return timestamp;
}
if (v->v_activity) {
_voucher_activity_t va = v->v_activity;
_voucher_atm_t vatm = va->va_atm;
- bufprintf("activity[%p] = { ID 0x%llx, use %d, atm[%p] = { "
+ bufprintf("activity[%p] = { ID 0x%llx, ref %d, atm[%p] = { "
"AID 0x%llx, ref %d, kvoucher 0x%x } }, ", va, va->va_id,
- va->va_use_count + 1, va->va_atm, vatm->vatm_id,
+ va->va_refcnt + 1, va->va_atm, vatm->vatm_id,
vatm->vatm_refcnt + 1, vatm->vatm_kvoucher);
}
bufprintf("}");
return NULL;
}
+voucher_t
+voucher_retain(voucher_t voucher)
+{
+ return voucher;
+}
+
+void
+voucher_release(voucher_t voucher)
+{
+ (void)voucher;
+}
+
void
voucher_replace_default_voucher(void)
{
return NULL;
}
+voucher_t
+_voucher_create_accounting_voucher(voucher_t voucher)
+{
+ (void)voucher;
+ return NULL;
+}
+
voucher_t
voucher_create_with_mach_msg(mach_msg_header_t *msg)
{
return NULL;
}
+void
+voucher_activity_buffer_hook_install_4libtrace(
+ _voucher_activity_buffer_hook_t hook)
+{
+ (void)hook;
+}
+
void
_voucher_activity_heap_pressure_normal(void)
{
return 0;
}
+uint64_t
+voucher_activity_trace_strings(voucher_activity_trace_id_t trace_id,
+ uint64_t location, void *buffer, size_t length, const char *strings[],
+ size_t string_lengths[], size_t strings_size)
+{
+ (void)trace_id; (void)location; (void)buffer; (void)length; (void)strings;
+ (void)string_lengths; (void)strings_size;
+ return 0;
+}
+
uint64_t
voucher_activity_trace_args(voucher_activity_trace_id_t trace_id,
uint64_t location, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3,
#pragma mark voucher_t
#if TARGET_IPHONE_SIMULATOR && \
- IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101000
+ IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100
#undef VOUCHER_USE_MACH_VOUCHER
#define VOUCHER_USE_MACH_VOUCHER 0
#endif
void _voucher_thread_cleanup(void *voucher);
mach_voucher_t _voucher_get_mach_voucher(voucher_t voucher);
voucher_t _voucher_create_without_importance(voucher_t voucher);
+voucher_t _voucher_create_accounting_voucher(voucher_t voucher);
mach_voucher_t _voucher_create_mach_voucher_with_priority(voucher_t voucher,
pthread_priority_t priority);
voucher_t _voucher_create_with_priority_and_mach_voucher(voucher_t voucher,
#endif
#endif
+voucher_t voucher_retain(voucher_t voucher);
+void voucher_release(voucher_t voucher);
+
#define _TAILQ_IS_ENQUEUED(elm, field) \
((elm)->field.tqe_prev != NULL)
#define _TAILQ_MARK_NOT_ENQUEUED(elm, field) \
TAILQ_ENTRY(voucher_s) v_list;
mach_voucher_t v_kvoucher, v_ipc_kvoucher; // if equal, only one reference
voucher_t v_kvbase; // if non-NULL, v_kvoucher is a borrowed reference
- _voucher_activity_t v_activity;
+ struct _voucher_atm_s *v_atm;
+ struct _voucher_activity_s *v_activity;
#if VOUCHER_ENABLE_RECIPE_OBJECTS
size_t v_recipe_extra_offset;
mach_voucher_attr_recipe_size_t v_recipe_extra_size;
#define _dispatch_voucher_debug_machport(name) ((void)(name))
#endif
-#if !(USE_OBJC && __OBJC2__)
+#if !(USE_OBJC && __OBJC2__) && !defined(__cplusplus)
DISPATCH_ALWAYS_INLINE
static inline voucher_t
_voucher_retain(voucher_t voucher)
{
#if !DISPATCH_VOUCHER_OBJC_DEBUG
+ // not using _os_object_refcnt* because we don't need barriers:
+ // vouchers are immutable and are in a hash table with a lock
int xref_cnt = dispatch_atomic_inc2o(voucher, os_obj_xref_cnt, relaxed);
_dispatch_voucher_debug("retain -> %d", voucher, xref_cnt + 1);
if (slowpath(xref_cnt <= 0)) {
_voucher_release(voucher_t voucher)
{
#if !DISPATCH_VOUCHER_OBJC_DEBUG
+ // not using _os_object_refcnt* because we don't need barriers:
+ // vouchers are immutable and are in a hash table with a lock
int xref_cnt = dispatch_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed);
_dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1);
if (fastpath(xref_cnt >= 0)) {
static const size_t _voucher_activity_hash_bits = 6;
static const size_t _voucher_activity_hash_size =
1 << _voucher_activity_hash_bits;
-#define VACTID_HASH(x) ((((uint32_t)((x) >> 32) + (uint32_t)(x)) * \
- 2654435761u) >> (32-_voucher_activity_hash_bits))
+#define VACTID_HASH(x) \
+ (((uint32_t)(x) * 2654435761u) >> (32-_voucher_activity_hash_bits))
#define VATMID_HASH(x) \
(((uint32_t)(x) * 2654435761u) >> (32-_voucher_activity_hash_bits))
-#define VATMID2ACTID(x) ((uint64_t)(x) << 32)
-#define VACTID_BASEID(x) ((uint64_t)(x) & (((uint64_t)UINT32_MAX) << 32))
-#define VACTID_SUBID(x) ((uint32_t)(x))
-#define VATM_ACTID(vatm, subid) (VATMID2ACTID((vatm)->vatm_id) + (subid))
-#define VATM_SUBID_BITS2MAX(bits) ((1u << (bits)) - 1)
-#define VATM_SUBID_MAXBITS (32)
-#define VATM_SUBID_MAX (ATM_SUBAID32_MAX)
-#define MAILBOX_OFFSET_UNSET UINT64_MAX
-
-static const size_t _voucher_activity_buffers_per_heap = 512;
-typedef unsigned long _voucher_activity_bitmap_base_t;
-static const size_t _voucher_activity_bits_per_bitmap_base_t =
- 8 * sizeof(_voucher_activity_bitmap_base_t);
-static const size_t _voucher_activity_bitmaps_per_heap =
- _voucher_activity_buffers_per_heap /
- _voucher_activity_bits_per_bitmap_base_t;
-typedef _voucher_activity_bitmap_base_t
- _voucher_activity_bitmap_t[_voucher_activity_bitmaps_per_heap];
+#define VATMID2ACTID(x, flags) \
+ (((voucher_activity_id_t)(x) & 0xffffffffffffff) | \
+ (((voucher_activity_id_t)(flags) & 0xfe) << 55))
typedef struct _voucher_activity_metadata_s {
- _voucher_activity_buffer_t vam_kernel_metadata;
_voucher_activity_buffer_t vam_client_metadata;
- struct _voucher_activity_self_metadata_s vam_self_metadata;
-#if __LP64__
- uintptr_t vam_pad0[7];
-#else
- uintptr_t vam_pad0[15];
-#endif
- // cacheline
- _voucher_activity_bitmap_t volatile vam_atm_mbox_bitmap;
+ struct _voucher_activity_metadata_opaque_s *vasm_baseaddr;
_voucher_activity_bitmap_t volatile vam_buffer_bitmap;
_voucher_activity_bitmap_t volatile vam_pressure_locked_bitmap;
- // cacheline
- _voucher_atm_subid_t vam_base_atm_subid;
- _voucher_atm_subid_t vam_base_atm_subid_max;
- _voucher_atm_subid_t vam_nested_atm_subid;
- _voucher_atm_t vam_default_activity_atm;
- _voucher_atm_t volatile vam_base_atm;
- voucher_activity_id_t volatile vam_nested_atm_id;
-#if __LP64__
- uintptr_t vam_pad2[3];
-#else
- uintptr_t vam_pad2[1];
-#endif
- _voucher_activity_lock_s vam_base_atm_lock;
- _voucher_activity_lock_s vam_nested_atm_lock;
_voucher_activity_lock_s vam_atms_lock;
_voucher_activity_lock_s vam_activities_lock;
- // cacheline
TAILQ_HEAD(, _voucher_atm_s) vam_atms[_voucher_activity_hash_size];
TAILQ_HEAD(, _voucher_activity_s)
vam_activities[_voucher_activity_hash_size];
} *_voucher_activity_metadata_t;
+#pragma mark -
+#pragma mark _voucher_atm_t
+
+typedef struct _voucher_atm_s {
+ int32_t volatile vatm_refcnt;
+ mach_voucher_t vatm_kvoucher;
+ atm_aid_t vatm_id;
+ atm_guard_t vatm_generation;
+ TAILQ_ENTRY(_voucher_atm_s) vatm_list;
+#if __LP64__
+ uintptr_t vatm_pad[3];
+ // cacheline
+#endif
+} *_voucher_atm_t;
+
+extern _voucher_atm_t _voucher_task_atm;
+
#pragma mark -
#pragma mark _voucher_activity_t
-_voucher_activity_tracepoint_t _voucher_activity_tracepoint_get_slow(
- unsigned int slots);
+typedef struct _voucher_activity_s {
+ voucher_activity_id_t va_id;
+ voucher_activity_trace_id_t va_trace_id;
+ uint64_t va_location;
+ int32_t volatile va_refcnt;
+ uint32_t volatile va_buffer_count;
+ uint32_t va_buffer_limit;
+ _voucher_activity_buffer_header_t volatile va_current_buffer;
+ _voucher_atm_t va_atm;
+#if __LP64__
+ uint64_t va_unused;
+#endif
+ // cacheline
+ _voucher_activity_lock_s va_buffers_lock;
+ TAILQ_HEAD(_voucher_activity_buffer_list_s,
+ _voucher_activity_buffer_header_s) va_buffers;
+ TAILQ_ENTRY(_voucher_activity_s) va_list;
+ TAILQ_ENTRY(_voucher_activity_s) va_atm_list;
+ TAILQ_ENTRY(_voucher_activity_s) va_atm_used_list;
+ pthread_mutex_t va_mutex;
+ pthread_cond_t va_cond;
+} *_voucher_activity_t;
+
+_voucher_activity_tracepoint_t _voucher_activity_buffer_tracepoint_acquire_slow(
+ _voucher_activity_t *vap, _voucher_activity_buffer_header_t *vabp,
+ unsigned int slots, size_t strsize, uint16_t *stroffsetp);
+void _voucher_activity_firehose_push(_voucher_activity_t act,
+ _voucher_activity_buffer_header_t buffer);
extern _voucher_activity_t _voucher_activity_default;
extern voucher_activity_mode_t _voucher_activity_mode;
#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG
#define _dispatch_voucher_activity_debug(msg, act, ...) \
- _dispatch_debug("activity[%p] <0x%x>: atm[%p] <%lld>: " msg, (act), \
- (act) ? VACTID_SUBID((act)->va_id) : 0, (act) ? (act)->va_atm : NULL, \
+ _dispatch_debug("activity[%p] <0x%llx>: atm[%p] <%lld>: " msg, (act), \
+ (act) ? (act)->va_id : 0, (act) ? (act)->va_atm : NULL, \
(act) && (act)->va_atm ? (act)->va_atm->vatm_id : 0, ##__VA_ARGS__)
#define _dispatch_voucher_atm_debug(msg, atm, ...) \
_dispatch_debug("atm[%p] <%lld> kvoucher[0x%08x]: " msg, (atm), \
DISPATCH_ALWAYS_INLINE
static inline uint64_t
-_voucher_activity_timestamp(void)
+_voucher_activity_timestamp(bool approx)
{
#if TARGET_IPHONE_SIMULATOR && \
IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101000
+ (void)approx;
return mach_absolute_time();
#else
- return mach_approximate_time();
+ return approx ? mach_approximate_time() : mach_absolute_time();
#endif
}
return thread_id;
}
+#define _voucher_activity_buffer_pos2length(pos) \
+ ({ _voucher_activity_buffer_position_u _pos = (pos); \
+ _pos.vabp_pos.vabp_next_tracepoint_idx * \
+ sizeof(struct _voucher_activity_tracepoint_s) + \
+ _pos.vabp_pos.vabp_string_offset; })
+
DISPATCH_ALWAYS_INLINE
static inline _voucher_activity_tracepoint_t
-_voucher_activity_buffer_tracepoint_get(_voucher_activity_buffer_header_t vab,
- unsigned int slots)
+_voucher_activity_buffer_tracepoint_acquire(
+ _voucher_activity_buffer_header_t vab, unsigned int slots,
+ size_t strsize, uint16_t *stroffsetp)
{
- uint32_t idx = dispatch_atomic_add2o(vab, vabh_next_tracepoint_idx,
- slots, relaxed);
- if (idx <= _voucher_activity_tracepoints_per_buffer) {
- return (_voucher_activity_tracepoint_t)vab + (idx - slots);
- }
- return NULL;
+ if (!vab) return NULL;
+ _voucher_activity_buffer_position_u pos_orig, pos;
+ pos_orig.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos;
+ do {
+ pos.vabp_atomic_pos = pos_orig.vabp_atomic_pos;
+ pos.vabp_pos.vabp_next_tracepoint_idx += slots;
+ pos.vabp_pos.vabp_string_offset += strsize;
+ size_t len = _voucher_activity_buffer_pos2length(pos);
+ if (len > _voucher_activity_buffer_size || pos.vabp_pos.vabp_flags) {
+ return NULL;
+ }
+ if (len == _voucher_activity_buffer_size) {
+ pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_full;
+ }
+ pos.vabp_pos.vabp_refcnt++;
+ } while (!dispatch_atomic_cmpxchgvw2o(vab, vabh_pos.vabp_atomic_pos,
+ pos_orig.vabp_atomic_pos, pos.vabp_atomic_pos,
+ &pos_orig.vabp_atomic_pos, relaxed));
+ if (stroffsetp) *stroffsetp = pos.vabp_pos.vabp_string_offset;
+ return (_voucher_activity_tracepoint_t)vab +
+ pos_orig.vabp_pos.vabp_next_tracepoint_idx;
}
DISPATCH_ALWAYS_INLINE
-static inline _voucher_activity_tracepoint_t
-_voucher_activity_tracepoint_get_from_activity(_voucher_activity_t va,
- unsigned int slots)
+static inline bool
+_voucher_activity_buffer_tracepoint_release(
+ _voucher_activity_buffer_header_t vab)
{
- _voucher_activity_buffer_header_t vab = va ? va->va_current_buffer : NULL;
- return vab ? _voucher_activity_buffer_tracepoint_get(vab, slots) : NULL;
+ _voucher_activity_buffer_position_u pos_orig, pos;
+ pos_orig.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos;
+ do {
+ pos.vabp_atomic_pos = pos_orig.vabp_atomic_pos;
+ pos.vabp_pos.vabp_refcnt--;
+ if (!pos.vabp_pos.vabp_refcnt &&
+ (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_full)) {
+ pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_pushing;
+ }
+ } while (!dispatch_atomic_cmpxchgvw2o(vab, vabh_pos.vabp_atomic_pos,
+ pos_orig.vabp_atomic_pos, pos.vabp_atomic_pos,
+ &pos_orig.vabp_atomic_pos, relaxed));
+ return (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_pushing);
}
DISPATCH_ALWAYS_INLINE
-static inline _voucher_activity_tracepoint_t
-_voucher_activity_tracepoint_get(unsigned int slots)
+static inline bool
+_voucher_activity_buffer_mark_full(_voucher_activity_buffer_header_t vab)
+{
+ _voucher_activity_buffer_position_u pos_orig, pos;
+ pos_orig.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos;
+ do {
+ pos.vabp_atomic_pos = pos_orig.vabp_atomic_pos;
+ if (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_full) {
+ return false;
+ }
+ pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_full;
+ if (!pos.vabp_pos.vabp_refcnt) {
+ pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_pushing;
+ }
+ } while (!dispatch_atomic_cmpxchgvw2o(vab, vabh_pos.vabp_atomic_pos,
+ pos_orig.vabp_atomic_pos, pos.vabp_atomic_pos,
+ &pos_orig.vabp_atomic_pos, relaxed));
+ return (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_pushing);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline bool
+_voucher_activity_buffer_is_full(_voucher_activity_buffer_header_t vab)
+{
+ _voucher_activity_buffer_position_u pos;
+ pos.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos;
+ return (pos.vabp_pos.vabp_flags);
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline _voucher_activity_buffer_header_t
+_voucher_activity_buffer_get_from_activity(_voucher_activity_t va)
+{
+ return va ? va->va_current_buffer : NULL;
+}
+
+DISPATCH_ALWAYS_INLINE
+static inline _voucher_activity_t
+_voucher_activity_get(void)
{
_voucher_activity_t va;
voucher_t v = _voucher_get();
va = v && v->v_activity ? v->v_activity : _voucher_activity_default;
- return _voucher_activity_tracepoint_get_from_activity(va, slots);
+ return va;
}
DISPATCH_ALWAYS_INLINE
static inline uint64_t
_voucher_activity_tracepoint_init(_voucher_activity_tracepoint_t vat,
- uint8_t type, uint8_t code_namespace, uint32_t code, uint64_t location)
+ uint8_t type, uint8_t code_namespace, uint32_t code, uint64_t location,
+ bool approx)
{
if (!location) location = (uint64_t)__builtin_return_address(0);
- uint64_t timestamp = _voucher_activity_timestamp();
+ uint64_t timestamp = _voucher_activity_timestamp(approx);
vat->vat_flags = _voucher_activity_trace_flag_tracepoint,
vat->vat_type = type,
vat->vat_namespace = code_namespace,
DISPATCH_ALWAYS_INLINE
static inline uint64_t
_voucher_activity_tracepoint_init_with_id(_voucher_activity_tracepoint_t vat,
- voucher_activity_trace_id_t trace_id, uint64_t location)
+ voucher_activity_trace_id_t trace_id, uint64_t location, bool approx)
{
uint8_t type = (uint8_t)(trace_id >> _voucher_activity_trace_id_type_shift);
uint8_t cns = (uint8_t)(trace_id >>
_voucher_activity_trace_id_code_namespace_shift);
uint32_t code = (uint32_t)trace_id;
- return _voucher_activity_tracepoint_init(vat, type, cns, code, location);
+ return _voucher_activity_tracepoint_init(vat, type, cns, code, location,
+ approx);
}
DISPATCH_ALWAYS_INLINE
}
DISPATCH_ALWAYS_INLINE
-static inline _voucher_activity_tracepoint_t
+static inline void
_voucher_activity_trace_args_inline(uint8_t type, uint8_t code_namespace,
uint32_t code, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3,
uintptr_t arg4)
{
- if (!_voucher_activity_trace_type_enabled(type)) return NULL;
+ if (!_voucher_activity_trace_type_enabled(type)) return;
+ _voucher_activity_t act;
+ _voucher_activity_buffer_header_t vab;
_voucher_activity_tracepoint_t vat;
- vat = _voucher_activity_tracepoint_get(1);
- if (!vat) return NULL;
- _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0);
+ act = _voucher_activity_get();
+ vab = _voucher_activity_buffer_get_from_activity(act);
+ vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL);
+ if (!vat) return;
+ _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0, true);
vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_args;
vat->vat_data[0] = arg1;
vat->vat_data[1] = arg2;
vat->vat_data[2] = arg3;
vat->vat_data[3] = arg4;
- return vat;
-}
-
-DISPATCH_ALWAYS_INLINE
-static inline _voucher_activity_tracepoint_t
-_voucher_activity_trace_with_id_inline(voucher_activity_trace_id_t trace_id)
-{
- _voucher_activity_tracepoint_t vat = _voucher_activity_tracepoint_get(1);
- if (!vat) return NULL;
- _voucher_activity_tracepoint_init_with_id(vat, trace_id, 0);
- return vat;
+ if (_voucher_activity_buffer_tracepoint_release(vab)) {
+ _voucher_activity_firehose_push(act, vab);
+ }
}
DISPATCH_ALWAYS_INLINE
-static inline _voucher_activity_tracepoint_t
-_voucher_activity_trace_with_id(voucher_activity_trace_id_t trace_id)
+static inline void
+_voucher_activity_trace_activity_event(voucher_activity_trace_id_t trace_id,
+ voucher_activity_id_t va_id, _voucher_activity_tracepoint_flag_t flags)
{
- _voucher_activity_tracepoint_t vat = _voucher_activity_tracepoint_get(1);
- if (!vat) vat = _voucher_activity_tracepoint_get_slow(1);
- if (!vat) return NULL;
- _voucher_activity_tracepoint_init_with_id(vat, trace_id, 0);
- return vat;
+ _voucher_activity_t act;
+ _voucher_activity_buffer_header_t vab;
+ _voucher_activity_tracepoint_t vat;
+ act = _voucher_activity_get();
+ vab = _voucher_activity_buffer_get_from_activity(act);
+ vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL);
+ if (!vat) return;
+ _voucher_activity_tracepoint_init_with_id(vat, trace_id, 0, false);
+ vat->vat_flags |= _voucher_activity_trace_flag_activity | flags;
+ vat->vat_data[0] = va_id;
+ if (_voucher_activity_buffer_tracepoint_release(vab)) {
+ _voucher_activity_firehose_push(act, vab);
+ }
}
+#define _voucher_activity_trace_activity_event(trace_id, va_id, type) \
+ _voucher_activity_trace_activity_event(trace_id, va_id, \
+ _voucher_activity_trace_flag_ ## type)
DISPATCH_ALWAYS_INLINE
static inline void
_voucher_activity_trace_msg(voucher_t v, mach_msg_header_t *msg, uint32_t code)
{
if (!v || !v->v_activity) return; // Don't use default activity for IPC
- const uint8_t type = voucher_activity_tracepoint_type_release;
+ const uint8_t type = voucher_activity_tracepoint_type_debug;
const uint8_t code_namespace = _voucher_activity_tracepoint_namespace_ipc;
if (!_voucher_activity_trace_type_enabled(type)) return;
+ _voucher_activity_buffer_header_t vab;
_voucher_activity_tracepoint_t vat;
- vat = _voucher_activity_tracepoint_get_from_activity(v->v_activity, 1);
+ vab = _voucher_activity_buffer_get_from_activity(v->v_activity);
+ vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL);
if (!vat) return; // TODO: slowpath ?
- _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0);
+ _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0, true);
vat->vat_flags |= _voucher_activity_trace_flag_libdispatch;
#if __has_extension(c_static_assert)
_Static_assert(sizeof(mach_msg_header_t) <= sizeof(vat->vat_data),
"mach_msg_header_t too large");
#endif
memcpy(vat->vat_data, msg, sizeof(mach_msg_header_t));
+ if (_voucher_activity_buffer_tracepoint_release(vab)) {
+ _voucher_activity_firehose_push(v->v_activity, vab);
+ }
}
#define _voucher_activity_trace_msg(v, msg, type) \
_voucher_activity_trace_msg(v, msg, \
_voucher_activity_tracepoint_namespace_ipc_ ## type)
-#endif // !(USE_OBJC && __OBJC2__)
+#endif // !(USE_OBJC && __OBJC2__) && !defined(__cplusplus)
#else // VOUCHER_USE_MACH_VOUCHER
//
BUILD_VARIANTS = normal
-INSTALL_PATH_ACTUAL = /usr/lib/system/introspection
+INSTALL_PATH = /usr/lib/system/introspection
GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) DISPATCH_INTROSPECTION=1
CONFIGURATION_BUILD_DIR = $(BUILD_DIR)/introspection
// @APPLE_APACHE_LICENSE_HEADER_END@
//
-SUPPORTED_PLATFORMS = iphoneos
+SUPPORTED_PLATFORMS = iphoneos appletvos watchos
PRODUCT_NAME = libdispatch_$(DISPATCH_RESOLVED_VARIANT)
OTHER_LDFLAGS =
SKIP_INSTALL = YES
//
#include "<DEVELOPER_DIR>/Makefiles/CoreOS/Xcode/BSD.xcconfig"
-#include "<DEVELOPER_DIR>/AppleInternal/XcodeConfig/SimulatorSupport.xcconfig"
+#include "<DEVELOPER_DIR>/AppleInternal/XcodeConfig/PlatformSupport.xcconfig"
-// Set INSTALL_PATH[sdk=macosx*] when SimulatorSupport.xcconfig is unavailable
-INSTALL_PATH[sdk=macosx*] = $(INSTALL_PATH_ACTUAL)
-
-SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator iphoneosnano iphonesimulatornano
-ARCHS[sdk=iphonesimulator*] = $(NATIVE_ARCH_32_BIT) // Override BSD.xcconfig ARCHS <rdar://problem/9303721>
+SDKROOT = macosx.internal
+SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator
PRODUCT_NAME = libdispatch
EXECUTABLE_PREFIX =
-INSTALL_PATH_ACTUAL = /usr/lib/system
-PUBLIC_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/include/dispatch
-PRIVATE_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/local/include/dispatch
-OS_PUBLIC_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/include/os
-OS_PRIVATE_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/local/include/os
+INSTALL_PATH = /usr/lib/system
+PUBLIC_HEADERS_FOLDER_PATH = /usr/include/dispatch
+PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/dispatch
+OS_PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os
+OS_PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os
HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(PROJECT_DIR)/private $(PROJECT_DIR)/os
LIBRARY_SEARCH_PATHS = $(SDKROOT)/usr/lib/system
INSTALLHDRS_SCRIPT_PHASE = YES
ONLY_ACTIVE_ARCH = NO
CLANG_LINK_OBJC_RUNTIME = NO
GCC_C_LANGUAGE_STANDARD = gnu11
+CLANG_CXX_LANGUAGE_STANDARD = gnu++11
+GCC_ENABLE_CPP_EXCEPTIONS = NO
GCC_STRICT_ALIASING = YES
GCC_SYMBOLS_PRIVATE_EXTERN = YES
GCC_ENABLE_OBJC_GC[sdk=macosx*] = supported
OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1
GENERATE_PROFILING_CODE = NO
DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION)
-UMBRELLA_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem_kernel -lsystem_platform -lsystem_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind
-UMBRELLA_LDFLAGS[sdk=iphonesimulator*] = -umbrella System -nodefaultlibs -ldyld_sim -lcompiler_rt_sim -lsystem_sim_c -lsystem_sim_blocks -lunwind_sim -Wl,-upward-lSystem
+SIM_SUFFIX[sdk=*simulator*] = _sim
+DYLIB_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem$(SIM_SUFFIX)_kernel -lsystem$(SIM_SUFFIX)_platform -lsystem$(SIM_SUFFIX)_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind
OBJC_LDFLAGS = -Wl,-upward-lobjc -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch_objc.aliases -Wl,-unexported_symbols_list,$(SRCROOT)/xcodeconfig/libdispatch.unexport
OBJC_LDFLAGS[sdk=macosx*] = $(OBJC_LDFLAGS) -Wl,-upward-lauto
OBJC_LDFLAGS[arch=i386][sdk=macosx*] =
OBJC_EXCLUDED_SOURCE_FILE_NAMES_i386_macosx = object.m data.m
ALIASES_LDFLAGS = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases
PLATFORM_LDFLAGS[sdk=macosx*] = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch_macosx.aliases
-OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(UMBRELLA_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS)
+OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(DYLIB_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS)
OTHER_MIGFLAGS = -novouchers
# @APPLE_APACHE_LICENSE_HEADER_END@
#
-if [ "${PLATFORM_NAME}" = iphoneos ]; then exit 0; fi
+# This check equates to "is macosx or a simulator platform"
+if [ "${PLATFORM_NAME}" == "${DEVICE_PLATFORM_NAME}" ]; then exit 0; fi
if [ "${DEPLOYMENT_LOCATION}" != YES ]; then
DSTROOT="${CONFIGURATION_BUILD_DIR}"