diff --git a/.gitmodules b/.gitmodules index 009b5fbf1..e6068b432 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,3 @@ [submodule "libpwq"] path = libpwq url = https://github.com/mheily/libpwq.git -[submodule "libkqueue"] - path = libkqueue - url = https://github.com/mheily/libkqueue.git diff --git a/INSTALL.md b/INSTALL.md index e30551887..42675d1bf 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -70,9 +70,9 @@ that includes libdispatch on Linux: ``` ./swift/utils/build-toolchain local.swift ``` - + Note that adding libdispatch in build-presets.ini is for Linux only as Swift on macOS platforms uses the system installed libdispatch, so its not required. - + ### Building and installing on OS X The following configure options may be of general interest: @@ -151,9 +151,9 @@ Note that libdispatch development and testing is done only on Ubuntu; currently supported versions are 14.04, 15.10 and 16.04. 1. The first thing to do is install required packages: - + `sudo apt-get install autoconf libtool pkg-config clang systemtap-sdt-dev libbsd-dev` - + Note: compiling libdispatch requires clang 3.8 or better and the gold linker. If the default clang on your Ubuntu version is too old, see http://apt.llvm.org/ to install a newer version. @@ -168,7 +168,7 @@ to get the gold linker. git submodule init git submodule update ``` - + 3. Build (as in the general instructions above) ``` diff --git a/Makefile.am b/Makefile.am index 63c8b17aa..ffc82df29 100644 --- a/Makefile.am +++ b/Makefile.am @@ -8,10 +8,6 @@ if BUILD_OWN_PTHREAD_WORKQUEUES MAYBE_PTHREAD_WORKQUEUES = libpwq endif -if BUILD_OWN_KQUEUES - MAYBE_KQUEUES = libkqueue -endif - if BUILD_TESTS MAYBE_TESTS = tests endif @@ -19,7 +15,6 @@ endif SUBDIRS= \ dispatch \ $(MAYBE_PTHREAD_WORKQUEUES) \ - $(MAYBE_KQUEUES) \ man \ os \ private \ diff --git a/PATCHES b/PATCHES index 6b62acb39..9fecb8398 100644 --- a/PATCHES +++ b/PATCHES @@ -267,3 +267,48 @@ github commits starting with 29bdc2f from [4d58038] APPLIED rdar://27600964 [98d0a05] APPLIED rdar://27600964 [8976101] APPLIED rdar://27600964 +[0d9ea5f] APPLIED rdar://28486911 +[e7e9a32] APPLIED rdar://28486911 +[44174d9] APPLIED rdar://28486911 +[6402cb7] APPLIED rdar://28486911 +[e2d5eb5] APPLIED rdar://28486911 +[758bb7f] APPLIED rdar://28486911 +[4c588e9] APPLIED rdar://28486911 +[1300d06] APPLIED rdar://28486911 +[ae1f7e8] APPLIED rdar://28486911 +[40a9bfb] APPLIED rdar://28486911 +[6366081] APPLIED rdar://28486911 +[81d1d0c] APPLIED rdar://28486911 +[5526122] APPLIED rdar://28486911 +[1a7ff3f] APPLIED rdar://28486911 +[e905735] APPLIED rdar://28486911 +[7fe8323] APPLIED rdar://28486911 +[6249878] APPLIED rdar://28486911 +[20792fe] APPLIED rdar://28486911 +[3639fbe] APPLIED rdar://28486911 +[bda3baf] APPLIED rdar://28486911 +[8803d07] APPLIED rdar://28486911 +[d04a0df] APPLIED rdar://28486911 +[69d2a6a] APPLIED rdar://28486911 +[367bd95] APPLIED rdar://28486911 +[152985f] APPLIED rdar://28486911 +[ba7802e] APPLIED rdar://28486911 +[92773e0] APPLIED rdar://30568673 +[548a1b9] APPLIED rdar://30568673 +[b628e5c] APPLIED rdar://30568673 +[a055ddb] APPLIED rdar://30568673 +[012f48b] APPLIED rdar://30568673 +[353adba] APPLIED rdar://30568673 +[eb730eb] APPLIED rdar://30568673 +[ac16fbb] APPLIED rdar://30568673 +[967876e] APPLIED rdar://30568673 +[44c2291] APPLIED rdar://30568673 +[ceb1fac] APPLIED rdar://30568673 +[c95febb] APPLIED rdar://30568673 +[b6e9cf4] APPLIED rdar://30568673 +[e199473] APPLIED rdar://30568673 +[3767ac7] APPLIED rdar://30568673 +[10eb0e4] APPLIED rdar://30568673 +[787dd92] APPLIED rdar://30568673 +[ba4cac5] APPLIED rdar://30568673 +[7974138] APPLIED rdar://30568673 diff --git a/config/config.h b/config/config.h index ca3a1dbb8..91d7cfe8e 100644 --- a/config/config.h +++ b/config/config.h @@ -5,10 +5,18 @@ you don't. */ #define HAVE_DECL_CLOCK_MONOTONIC 0 +/* Define to 1 if you have the declaration of `CLOCK_REALTIME', and to 0 if + you don't. */ +#define CLOCK_REALTIME 0 + /* Define to 1 if you have the declaration of `CLOCK_UPTIME', and to 0 if you don't. */ #define HAVE_DECL_CLOCK_UPTIME 0 +/* Define to 1 if you have the declaration of `HAVE_DECL_CLOCK_UPTIME_FAST', + and to 0 if you don't. */ +#define HAVE_DECL_CLOCK_UPTIME_FAST 0 + /* Define to 1 if you have the declaration of `FD_COPY', and to 0 if you don't. */ #define HAVE_DECL_FD_COPY 1 @@ -57,6 +65,14 @@ you don't. */ #define HAVE_DECL_VQ_QUOTA 1 +/* Define to 1 if you have the declaration of `VQ_NEARLOWDISK', and to 0 if + you don't. */ +#define HAVE_DECL_VQ_NEARLOWDISK 1 + +/* Define to 1 if you have the declaration of `VQ_DESIRED_DISK', and to 0 if + you don't. */ +#define HAVE_DECL_VQ_DESIRED_DISK 1 + /* Define to 1 if you have the header file. */ #define HAVE_DLFCN_H 1 @@ -87,6 +103,9 @@ /* Define to 1 if you have the `mach_absolute_time' function. */ #define HAVE_MACH_ABSOLUTE_TIME 1 +/* Define to 1 if you have the `mach_approximate_time' function. */ +#define HAVE_MACH_APPROXIMATE_TIME 1 + /* Define to 1 if you have the `mach_port_construct' function. */ #define HAVE_MACH_PORT_CONSTRUCT 1 diff --git a/configure.ac b/configure.ac index 73d661324..00e546bf1 100644 --- a/configure.ac +++ b/configure.ac @@ -295,19 +295,6 @@ esac AC_SEARCH_LIBS(clock_gettime, rt) AC_SEARCH_LIBS(pthread_create, pthread) -AS_IF([test -f $srcdir/libkqueue/configure.ac], - [AC_DEFINE(BUILD_OWN_KQUEUES, 1, [Define if building libkqueue from source]) - ac_configure_args="--disable-libkqueue-install $ac_configure_args" - AC_CONFIG_SUBDIRS([libkqueue]) - build_own_kqueues=true], - [build_own_kqueues=false - AC_CHECK_HEADER(sys/event.h, [], - [PKG_CHECK_MODULES(KQUEUE, libkqueue)] - ) - ] -) -AM_CONDITIONAL(BUILD_OWN_KQUEUES, $build_own_kqueues) - AC_CHECK_FUNCS([strlcpy getprogname], [], [PKG_CHECK_MODULES(BSD_OVERLAY, libbsd-overlay,[ AC_DEFINE(HAVE_STRLCPY, 1, []) @@ -423,15 +410,15 @@ AC_CHECK_FUNCS([mach_port_construct]) # # Find functions and declarations we care about. # -AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC], [], [], +AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_UPTIME_FAST], [], [], [[#include ]]) AC_CHECK_DECLS([NOTE_NONE, NOTE_REAP, NOTE_REVOKE, NOTE_SIGNAL, NOTE_LOWAT], [], [], [[#include ]]) AC_CHECK_DECLS([FD_COPY], [], [], [[#include ]]) AC_CHECK_DECLS([SIGEMT], [], [], [[#include ]]) -AC_CHECK_DECLS([VQ_UPDATE, VQ_VERYLOWDISK, VQ_QUOTA], [], [], [[#include ]]) +AC_CHECK_DECLS([VQ_UPDATE, VQ_VERYLOWDISK, VQ_QUOTA, VQ_NEARLOWDISK, VQ_DESIRED_DISK], [], [], [[#include ]]) AC_CHECK_DECLS([program_invocation_short_name], [], [], [[#include ]]) -AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time malloc_create_zone sysconf]) +AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time mach_approximate_time malloc_create_zone sysconf]) AC_CHECK_DECLS([POSIX_SPAWN_START_SUSPENDED], [have_posix_spawn_start_suspended=true], [have_posix_spawn_start_suspended=false], diff --git a/dispatch/base.h b/dispatch/base.h index 8adfb0bdb..4c82b010c 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -204,11 +204,14 @@ #endif #if __has_feature(enumerator_attributes) -#define DISPATCH_ENUM_AVAILABLE_STARTING __OSX_AVAILABLE_STARTING -#define DISPATCH_ENUM_AVAILABLE(os, version) __##os##_AVAILABLE(version) +#define DISPATCH_ENUM_API_AVAILABLE(...) API_AVAILABLE(__VA_ARGS__) +#define DISPATCH_ENUM_API_DEPRECATED(...) API_DEPRECATED(__VA_ARGS__) +#define DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT(...) \ + API_DEPRECATED_WITH_REPLACEMENT(__VA_ARGS__) #else -#define DISPATCH_ENUM_AVAILABLE_STARTING(...) -#define DISPATCH_ENUM_AVAILABLE(...) +#define DISPATCH_ENUM_API_AVAILABLE(...) +#define DISPATCH_ENUM_API_DEPRECATED(...) +#define DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT(...) #endif #if defined(SWIFT_SDK_OVERLAY_DISPATCH_EPOCH) && \ @@ -243,6 +246,12 @@ #define DISPATCH_SWIFT_NAME(_name) #endif +#ifndef __cplusplus +#define DISPATCH_TRANSPARENT_UNION __attribute__((__transparent_union__)) +#else +#define DISPATCH_TRANSPARENT_UNION +#endif + typedef void (*dispatch_function_t)(void *_Nullable); #endif diff --git a/dispatch/block.h b/dispatch/block.h index cd56b230d..cbdcb5eff 100644 --- a/dispatch/block.h +++ b/dispatch/block.h @@ -101,17 +101,17 @@ __BEGIN_DECLS */ DISPATCH_ENUM(dispatch_block_flags, unsigned long, DISPATCH_BLOCK_BARRIER - DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x1, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x1, DISPATCH_BLOCK_DETACHED - DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x2, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x2, DISPATCH_BLOCK_ASSIGN_CURRENT - DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x4, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x4, DISPATCH_BLOCK_NO_QOS_CLASS - DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x8, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x8, DISPATCH_BLOCK_INHERIT_QOS_CLASS - DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x10, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x10, DISPATCH_BLOCK_ENFORCE_QOS_CLASS - DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x20, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x20, ); /*! @@ -164,7 +164,7 @@ DISPATCH_ENUM(dispatch_block_flags, unsigned long, * When not building with Objective-C ARC, must be released with a -[release] * message or the Block_release() function. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t @@ -236,7 +236,7 @@ dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block); * When not building with Objective-C ARC, must be released with a -[release] * message or the Block_release() function. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t @@ -269,7 +269,7 @@ dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, * @param block * The block to create the temporary block object from. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW void dispatch_block_perform(dispatch_block_flags_t flags, @@ -320,7 +320,7 @@ dispatch_block_perform(dispatch_block_flags_t flags, * Returns zero on success (the dispatch block object completed within the * specified timeout) or non-zero on error (i.e. timed out). */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW long dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout); @@ -361,7 +361,7 @@ dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout); * @param notification_block * The notification block to submit when the observed block object completes. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue, @@ -393,7 +393,7 @@ dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue, * The result of passing NULL or a block object not returned by one of the * dispatch_block_create* functions is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_block_cancel(dispatch_block_t block); @@ -412,7 +412,7 @@ dispatch_block_cancel(dispatch_block_t block); * @result * Non-zero if canceled and zero if not canceled. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW long diff --git a/dispatch/data.h b/dispatch/data.h index 7ceee0647..33a0c9d51 100644 --- a/dispatch/data.h +++ b/dispatch/data.h @@ -50,7 +50,7 @@ DISPATCH_DATA_DECL(dispatch_data); */ #define dispatch_data_empty \ DISPATCH_GLOBAL_OBJECT(dispatch_data_t, _dispatch_data_empty) -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty; /*! @@ -83,7 +83,7 @@ DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty; * was allocated by the malloc() family and should be destroyed with free(3). */ #define DISPATCH_DATA_DESTRUCTOR_FREE (_dispatch_data_destructor_free) -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(free); /*! @@ -92,7 +92,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(free); * from buffers that require deallocation with munmap(2). */ #define DISPATCH_DATA_DESTRUCTOR_MUNMAP (_dispatch_data_destructor_munmap) -__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(munmap); #ifdef __BLOCKS__ @@ -117,7 +117,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(munmap); * is no longer needed. * @result A newly created dispatch data object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_create(const void *buffer, @@ -134,7 +134,7 @@ dispatch_data_create(const void *buffer, * @param data The dispatch data object to query. * @result The number of bytes represented by the data object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_NONNULL1 DISPATCH_NOTHROW size_t dispatch_data_get_size(dispatch_data_t data); @@ -158,7 +158,7 @@ dispatch_data_get_size(dispatch_data_t data); * size of the mapped contiguous memory region, or NULL. * @result A newly created dispatch data object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t @@ -181,7 +181,7 @@ dispatch_data_create_map(dispatch_data_t data, * @result A newly created object representing the concatenation of the * data1 and data2 objects. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t @@ -202,7 +202,7 @@ dispatch_data_create_concat(dispatch_data_t data1, dispatch_data_t data2); * @result A newly created object representing the specified * subrange of the data object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t @@ -247,10 +247,11 @@ typedef bool (^dispatch_data_applier_t)(dispatch_data_t region, * @result A Boolean indicating whether traversal completed * successfully. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW bool -dispatch_data_apply(dispatch_data_t data, dispatch_data_applier_t applier); +dispatch_data_apply(dispatch_data_t data, + DISPATCH_NOESCAPE dispatch_data_applier_t applier); #endif /* __BLOCKS__ */ /*! @@ -267,7 +268,7 @@ dispatch_data_apply(dispatch_data_t data, dispatch_data_applier_t applier); * start of the queried data object. * @result A newly created dispatch data object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index e8d69f893..52cd3e707 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -24,19 +24,14 @@ #ifdef __APPLE__ #include #include -#else -#define __OSX_AVAILABLE_STARTING(x, y) -#define __OSX_AVAILABLE_BUT_DEPRECATED(...) -#define __OSX_AVAILABLE_BUT_DEPRECATED_MSG(...) -#define __OSX_AVAILABLE(...) -#define __IOS_AVAILABLE(...) -#define __TVOS_AVAILABLE(...) -#define __WATCHOS_AVAILABLE(...) -#define __OSX_DEPRECATED(...) -#define __IOS_DEPRECATED(...) -#define __TVOS_DEPRECATED(...) -#define __WATCHOS_DEPRECATED(...) -#endif // __APPLE__ +#endif + +#ifndef API_AVAILABLE +#define API_AVAILABLE(...) +#define API_DEPRECATED(...) +#define API_UNAVAILABLE(...) +#define API_DEPRECATED_WITH_REPLACEMENT(...) +#endif // !API_AVAILABLE #include #include @@ -55,7 +50,7 @@ #endif #endif -#define DISPATCH_API_VERSION 20160712 +#define DISPATCH_API_VERSION 20160831 #ifndef __DISPATCH_BUILDING_DISPATCH__ diff --git a/dispatch/group.h b/dispatch/group.h index c50ad89d1..8d74ada2e 100644 --- a/dispatch/group.h +++ b/dispatch/group.h @@ -51,7 +51,7 @@ __BEGIN_DECLS * @result * The newly created group, or NULL on failure. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_group_t @@ -81,7 +81,7 @@ dispatch_group_create(void); * The block to perform asynchronously. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_group_async(dispatch_group_t group, @@ -115,7 +115,7 @@ dispatch_group_async(dispatch_group_t group, * parameter passed to this function is the context provided to * dispatch_group_async_f(). */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW void @@ -158,7 +158,7 @@ dispatch_group_async_f(dispatch_group_t group, * Returns zero on success (all blocks associated with the group completed * within the specified timeout) or non-zero on error (i.e. timed out). */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW long dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout); @@ -194,7 +194,7 @@ dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout); * The block to submit when the group completes. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_group_notify(dispatch_group_t group, @@ -224,7 +224,7 @@ dispatch_group_notify(dispatch_group_t group, * parameter passed to this function is the context provided to * dispatch_group_notify_f(). */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW void @@ -248,7 +248,7 @@ dispatch_group_notify_f(dispatch_group_t group, * The dispatch group to update. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_group_enter(dispatch_group_t group); @@ -267,7 +267,7 @@ dispatch_group_enter(dispatch_group_t group); * The dispatch group to update. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_group_leave(dispatch_group_t group); diff --git a/dispatch/introspection.h b/dispatch/introspection.h index 9cfb4d1c0..ea7dcd8f5 100644 --- a/dispatch/introspection.h +++ b/dispatch/introspection.h @@ -49,7 +49,7 @@ __BEGIN_DECLS * The newly created dispatch queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT void dispatch_introspection_hook_queue_create(dispatch_queue_t queue); @@ -65,7 +65,7 @@ dispatch_introspection_hook_queue_create(dispatch_queue_t queue); * The dispatch queue about to be destroyed. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT void dispatch_introspection_hook_queue_destroy(dispatch_queue_t queue); @@ -84,7 +84,7 @@ dispatch_introspection_hook_queue_destroy(dispatch_queue_t queue); * The object about to be enqueued. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT void dispatch_introspection_hook_queue_item_enqueue(dispatch_queue_t queue, @@ -104,7 +104,7 @@ dispatch_introspection_hook_queue_item_enqueue(dispatch_queue_t queue, * The dequeued object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT void dispatch_introspection_hook_queue_item_dequeue(dispatch_queue_t queue, @@ -126,7 +126,7 @@ dispatch_introspection_hook_queue_item_dequeue(dispatch_queue_t queue, * Opaque dentifier for completed item. Must NOT be dereferenced. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_7_1) +API_AVAILABLE(macos(10.10), ios(7.1)) DISPATCH_EXPORT void dispatch_introspection_hook_queue_item_complete(dispatch_object_t item); @@ -150,7 +150,7 @@ dispatch_introspection_hook_queue_item_complete(dispatch_object_t item); * this is the block object's invoke function. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT void dispatch_introspection_hook_queue_callout_begin(dispatch_queue_t queue, @@ -175,7 +175,7 @@ dispatch_introspection_hook_queue_callout_begin(dispatch_queue_t queue, * this is the block object's invoke function. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT void dispatch_introspection_hook_queue_callout_end(dispatch_queue_t queue, diff --git a/dispatch/io.h b/dispatch/io.h index 5814bc0f7..a9e6892e5 100644 --- a/dispatch/io.h +++ b/dispatch/io.h @@ -102,7 +102,7 @@ typedef int dispatch_fd_t; * param error An errno condition for the read operation or * zero if the read was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_read(dispatch_fd_t fd, @@ -140,7 +140,7 @@ dispatch_read(dispatch_fd_t fd, * param error An errno condition for the write operation or * zero if the write was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NOTHROW void @@ -211,7 +211,7 @@ typedef unsigned long dispatch_io_type_t; * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type specified). */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_io_t @@ -247,7 +247,7 @@ dispatch_io_create(dispatch_io_type_t type, * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type or non-absolute path specified). */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_io_t @@ -287,7 +287,7 @@ dispatch_io_create_with_path(dispatch_io_type_t type, * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type specified). */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_io_t @@ -349,7 +349,7 @@ typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t _Nullable data, * param error An errno condition for the read operation or zero if * the read was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL4 DISPATCH_NONNULL5 DISPATCH_NOTHROW void @@ -402,7 +402,7 @@ dispatch_io_read(dispatch_io_t channel, * param error An errno condition for the write operation or zero * if the write was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NONNULL5 DISPATCH_NOTHROW void @@ -441,7 +441,7 @@ typedef unsigned long dispatch_io_close_flags_t; * @param channel The dispatch I/O channel to close. * @param flags The flags for the close operation. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_io_close(dispatch_io_t channel, dispatch_io_close_flags_t flags); @@ -468,7 +468,7 @@ dispatch_io_close(dispatch_io_t channel, dispatch_io_close_flags_t flags); * @param channel The dispatch I/O channel to schedule the barrier on. * @param barrier The barrier block. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier); @@ -488,7 +488,7 @@ dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier); * @param channel The dispatch I/O channel to query. * @result The file descriptor underlying the channel, or -1. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_fd_t dispatch_io_get_descriptor(dispatch_io_t channel); @@ -509,7 +509,7 @@ dispatch_io_get_descriptor(dispatch_io_t channel); * @param channel The dispatch I/O channel on which to set the policy. * @param high_water The number of bytes to use as a high water mark. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water); @@ -540,7 +540,7 @@ dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water); * @param channel The dispatch I/O channel on which to set the policy. * @param low_water The number of bytes to use as a low water mark. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_io_set_low_water(dispatch_io_t channel, size_t low_water); @@ -579,7 +579,7 @@ typedef unsigned long dispatch_io_interval_flags_t; * @param flags Flags indicating desired data delivery behavior at * interval time. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_io_set_interval(dispatch_io_t channel, diff --git a/dispatch/object.h b/dispatch/object.h index 8b2030138..3ff36c2d3 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -92,14 +92,13 @@ typedef union { struct dispatch_source_s *_ds; struct dispatch_mach_s *_dm; struct dispatch_mach_msg_s *_dmsg; - struct dispatch_timer_aggregate_s *_dta; struct dispatch_source_attr_s *_dsa; struct dispatch_semaphore_s *_dsema; struct dispatch_data_s *_ddata; struct dispatch_io_s *_dchannel; struct dispatch_operation_s *_doperation; struct dispatch_disk_s *_ddisk; -} dispatch_object_t __attribute__((__transparent_union__)); +} dispatch_object_t DISPATCH_TRANSPARENT_UNION; /*! @parseOnly */ #define DISPATCH_DECL(name) typedef struct name##_s *name##_t /*! @parseOnly */ @@ -201,7 +200,7 @@ __BEGIN_DECLS * The object to retain. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW DISPATCH_SWIFT_UNAVAILABLE("Can't be used with ARC") void @@ -229,7 +228,7 @@ dispatch_retain(dispatch_object_t object); * The object to release. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW DISPATCH_SWIFT_UNAVAILABLE("Can't be used with ARC") void @@ -253,7 +252,7 @@ dispatch_release(dispatch_object_t object); * @result * The context of the object; may be NULL. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW void *_Nullable @@ -272,7 +271,7 @@ dispatch_get_context(dispatch_object_t object); * The new client defined context for the object. This may be NULL. * */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_set_context(dispatch_object_t object, void *_Nullable context); @@ -298,7 +297,7 @@ dispatch_set_context(dispatch_object_t object, void *_Nullable context); * The context parameter passed to the finalizer function is the current * context of the dispatch object at the time the finalizer call is made. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_set_finalizer_f(dispatch_object_t object, @@ -326,8 +325,7 @@ dispatch_set_finalizer_f(dispatch_object_t object, * The object to be activated. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_activate(dispatch_object_t object); @@ -350,7 +348,7 @@ dispatch_activate(dispatch_object_t object); * The object to be suspended. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_suspend(dispatch_object_t object); @@ -379,7 +377,7 @@ dispatch_suspend(dispatch_object_t object); * The object to be resumed. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_resume(dispatch_object_t object); @@ -541,13 +539,13 @@ dispatch_testcancel(void *object); * @param message * The message to log above and beyond the introspection. */ -__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) +API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW __attribute__((__format__(printf,2,3))) void dispatch_debug(dispatch_object_t object, const char *message, ...); -__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) +API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW __attribute__((__format__(printf,2,0))) void diff --git a/dispatch/once.h b/dispatch/once.h index a8f56441c..68acfe803 100644 --- a/dispatch/once.h +++ b/dispatch/once.h @@ -58,7 +58,7 @@ typedef long dispatch_once_t; * initialized by the block. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") void @@ -82,7 +82,7 @@ _dispatch_once(dispatch_once_t *predicate, #define dispatch_once _dispatch_once #endif -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") void diff --git a/dispatch/queue.h b/dispatch/queue.h index 264c34418..b1dd8e547 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -103,7 +103,7 @@ __BEGIN_DECLS * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_async(dispatch_queue_t queue, dispatch_block_t block); @@ -133,7 +133,7 @@ dispatch_async(dispatch_queue_t queue, dispatch_block_t block); * dispatch_async_f(). * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_async_f(dispatch_queue_t queue, @@ -171,7 +171,7 @@ dispatch_async_f(dispatch_queue_t queue, * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_sync(dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block); @@ -199,7 +199,7 @@ dispatch_sync(dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block); * dispatch_sync_f(). * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_sync_f(dispatch_queue_t queue, @@ -232,7 +232,7 @@ dispatch_sync_f(dispatch_queue_t queue, * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_apply(size_t iterations, dispatch_queue_t queue, @@ -265,7 +265,7 @@ dispatch_apply(size_t iterations, dispatch_queue_t queue, * current index of iteration. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_apply_f(size_t iterations, dispatch_queue_t queue, @@ -301,12 +301,12 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t queue, * @result * Returns the current queue. */ -__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) +API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0)) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t dispatch_get_current_queue(void); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT struct dispatch_queue_s _dispatch_main_q; /*! @@ -415,7 +415,7 @@ typedef unsigned int dispatch_qos_class_t; * Returns the requested global queue or NULL if the requested global queue * does not exist. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t dispatch_get_global_queue(long identifier, unsigned long flags); @@ -454,7 +454,7 @@ DISPATCH_DECL(dispatch_queue_attr); #define DISPATCH_QUEUE_CONCURRENT \ DISPATCH_GLOBAL_OBJECT(dispatch_queue_attr_t, \ _dispatch_queue_attr_concurrent) -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; @@ -498,8 +498,7 @@ struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; * The new value combines the attributes specified by the 'attr' parameter with * the initially inactive attribute. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW dispatch_queue_attr_t dispatch_queue_attr_make_initially_inactive( @@ -556,21 +555,9 @@ dispatch_queue_attr_make_initially_inactive( * asynchronously. This is the behavior of the global concurrent queues. */ DISPATCH_ENUM(dispatch_autorelease_frequency, unsigned long, - DISPATCH_AUTORELEASE_FREQUENCY_INHERIT - DISPATCH_ENUM_AVAILABLE(OSX, 10.12) - DISPATCH_ENUM_AVAILABLE(IOS, 10.0) - DISPATCH_ENUM_AVAILABLE(TVOS, 10.0) - DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 0, - DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM - DISPATCH_ENUM_AVAILABLE(OSX, 10.12) - DISPATCH_ENUM_AVAILABLE(IOS, 10.0) - DISPATCH_ENUM_AVAILABLE(TVOS, 10.0) - DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 1, - DISPATCH_AUTORELEASE_FREQUENCY_NEVER - DISPATCH_ENUM_AVAILABLE(OSX, 10.12) - DISPATCH_ENUM_AVAILABLE(IOS, 10.0) - DISPATCH_ENUM_AVAILABLE(TVOS, 10.0) - DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 2, + DISPATCH_AUTORELEASE_FREQUENCY_INHERIT DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0, + DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 1, + DISPATCH_AUTORELEASE_FREQUENCY_NEVER DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 2, ); /*! @@ -610,8 +597,7 @@ DISPATCH_ENUM(dispatch_autorelease_frequency, unsigned long, * This new value combines the attributes specified by the 'attr' parameter and * the chosen autorelease frequency. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW dispatch_queue_attr_t dispatch_queue_attr_make_with_autorelease_frequency( @@ -671,7 +657,7 @@ dispatch_queue_attr_make_with_autorelease_frequency( * The new value combines the attributes specified by the 'attr' parameter and * the new QOS class and relative priority. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW dispatch_queue_attr_t dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t _Nullable attr, @@ -736,8 +722,7 @@ dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t _Nullable attr, * @result * The newly created dispatch queue. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t @@ -788,7 +773,7 @@ dispatch_queue_create_with_target(const char *_Nullable label, * @result * The newly created dispatch queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t @@ -818,7 +803,7 @@ dispatch_queue_create(const char *_Nullable label, * @result * The label of the queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW const char * dispatch_queue_get_label(dispatch_queue_t _Nullable queue); @@ -857,7 +842,7 @@ dispatch_queue_get_label(dispatch_queue_t _Nullable queue); * - QOS_CLASS_BACKGROUND * - QOS_CLASS_UNSPECIFIED */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NONNULL1 DISPATCH_NOTHROW dispatch_qos_class_t dispatch_queue_get_qos_class(dispatch_queue_t queue, @@ -922,7 +907,7 @@ dispatch_queue_get_qos_class(dispatch_queue_t queue, * If queue is DISPATCH_TARGET_QUEUE_DEFAULT, set the object's target queue * to the default target queue for the given object type. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_set_target_queue(dispatch_object_t object, @@ -941,7 +926,7 @@ dispatch_set_target_queue(dispatch_object_t object, * Applications that call NSApplicationMain() or CFRunLoopRun() on the * main thread do not need to call dispatch_main(). */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NORETURN void dispatch_main(void); @@ -969,7 +954,7 @@ dispatch_main(void); * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_after(dispatch_time_t when, @@ -1002,7 +987,7 @@ dispatch_after(dispatch_time_t when, * dispatch_after_f(). * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_after_f(dispatch_time_t when, @@ -1049,7 +1034,7 @@ dispatch_after_f(dispatch_time_t when, * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_t block); @@ -1083,7 +1068,7 @@ dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_t block); * dispatch_barrier_async_f(). * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_barrier_async_f(dispatch_queue_t queue, @@ -1111,7 +1096,7 @@ dispatch_barrier_async_f(dispatch_queue_t queue, * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_barrier_sync(dispatch_queue_t queue, @@ -1143,7 +1128,7 @@ dispatch_barrier_sync(dispatch_queue_t queue, * dispatch_barrier_sync_f(). * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_barrier_sync_f(dispatch_queue_t queue, @@ -1186,7 +1171,7 @@ dispatch_barrier_sync_f(dispatch_queue_t queue, * The destructor function pointer. This may be NULL and is ignored if context * is NULL. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_queue_set_specific(dispatch_queue_t queue, const void *key, @@ -1215,7 +1200,7 @@ dispatch_queue_set_specific(dispatch_queue_t queue, const void *key, * @result * The context for the specified key or NULL if no context was found. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW void *_Nullable @@ -1242,7 +1227,7 @@ dispatch_queue_get_specific(dispatch_queue_t queue, const void *key); * @result * The context for the specified key or NULL if no context was found. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW void *_Nullable dispatch_get_specific(const void *key); @@ -1296,8 +1281,7 @@ dispatch_get_specific(const void *key); * The dispatch queue that the current block is expected to run on. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 void dispatch_assert_queue(dispatch_queue_t queue) @@ -1323,8 +1307,7 @@ dispatch_assert_queue(dispatch_queue_t queue) * The dispatch queue that the current block is expected to run as a barrier on. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 void dispatch_assert_queue_barrier(dispatch_queue_t queue); @@ -1347,8 +1330,7 @@ dispatch_assert_queue_barrier(dispatch_queue_t queue); * The dispatch queue that the current block is expected not to run on. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 void dispatch_assert_queue_not(dispatch_queue_t queue) diff --git a/dispatch/semaphore.h b/dispatch/semaphore.h index b6139d70d..f5394b45d 100644 --- a/dispatch/semaphore.h +++ b/dispatch/semaphore.h @@ -57,7 +57,7 @@ __BEGIN_DECLS * @result * The newly created semaphore, or NULL on failure. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_semaphore_t @@ -83,7 +83,7 @@ dispatch_semaphore_create(long value); * @result * Returns zero on success, or non-zero if the timeout occurred. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW long dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout); @@ -105,7 +105,7 @@ dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout); * This function returns non-zero if a thread is woken. Otherwise, zero is * returned. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW long dispatch_semaphore_signal(dispatch_semaphore_t dsema); diff --git a/dispatch/source.h b/dispatch/source.h index 63b3ff365..6992d4226 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -79,7 +79,7 @@ typedef const struct dispatch_source_type_s *dispatch_source_type_t; * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_DATA_ADD (&_dispatch_source_type_data_add) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_SOURCE_TYPE_DECL(data_add); /*! @@ -90,9 +90,24 @@ DISPATCH_SOURCE_TYPE_DECL(data_add); * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_DATA_OR (&_dispatch_source_type_data_or) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_SOURCE_TYPE_DECL(data_or); +/*! + * @const DISPATCH_SOURCE_TYPE_DATA_REPLACE + * @discussion A dispatch source that tracks data obtained via calls to + * dispatch_source_merge_data(). Newly obtained data values replace existing + * data values not yet delivered to the source handler + * + * A data value of zero will cause the source handler to not be invoked. + * + * The handle is unused (pass zero for now). + * The mask is unused (pass zero for now). + */ +#define DISPATCH_SOURCE_TYPE_DATA_REPLACE (&_dispatch_source_type_data_replace) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_SOURCE_TYPE_DECL(data_replace); + /*! * @const DISPATCH_SOURCE_TYPE_MACH_SEND * @discussion A dispatch source that monitors a Mach port for dead name @@ -101,7 +116,7 @@ DISPATCH_SOURCE_TYPE_DECL(data_or); * The mask is a mask of desired events from dispatch_source_mach_send_flags_t. */ #define DISPATCH_SOURCE_TYPE_MACH_SEND (&_dispatch_source_type_mach_send) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(mach_send); /*! @@ -111,7 +126,7 @@ DISPATCH_SOURCE_TYPE_DECL(mach_send); * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_MACH_RECV (&_dispatch_source_type_mach_recv) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(mach_recv); /*! @@ -124,7 +139,7 @@ DISPATCH_SOURCE_TYPE_DECL(mach_recv); */ #define DISPATCH_SOURCE_TYPE_MEMORYPRESSURE \ (&_dispatch_source_type_memorypressure) -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_8_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.9), ios(8.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(memorypressure); /*! @@ -135,7 +150,7 @@ DISPATCH_SOURCE_TYPE_DECL(memorypressure); * The mask is a mask of desired events from dispatch_source_proc_flags_t. */ #define DISPATCH_SOURCE_TYPE_PROC (&_dispatch_source_type_proc) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(proc); /*! @@ -146,7 +161,7 @@ DISPATCH_SOURCE_TYPE_DECL(proc); * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_READ (&_dispatch_source_type_read) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_SOURCE_TYPE_DECL(read); /*! @@ -156,7 +171,7 @@ DISPATCH_SOURCE_TYPE_DECL(read); * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_SIGNAL (&_dispatch_source_type_signal) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_SOURCE_TYPE_DECL(signal); /*! @@ -167,7 +182,7 @@ DISPATCH_SOURCE_TYPE_DECL(signal); * The mask specifies which flags from dispatch_source_timer_flags_t to apply. */ #define DISPATCH_SOURCE_TYPE_TIMER (&_dispatch_source_type_timer) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_SOURCE_TYPE_DECL(timer); /*! @@ -178,7 +193,7 @@ DISPATCH_SOURCE_TYPE_DECL(timer); * The mask is a mask of desired events from dispatch_source_vnode_flags_t. */ #define DISPATCH_SOURCE_TYPE_VNODE (&_dispatch_source_type_vnode) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(vnode); /*! @@ -189,7 +204,7 @@ DISPATCH_SOURCE_TYPE_DECL(vnode); * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_WRITE (&_dispatch_source_type_write) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_SOURCE_TYPE_DECL(write); /*! @@ -361,7 +376,7 @@ typedef unsigned long dispatch_source_timer_flags_t; * @result * The newly created dispatch source. Or NULL if invalid arguments are passed. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_source_t @@ -384,7 +399,7 @@ dispatch_source_create(dispatch_source_type_t type, * The event handler block to submit to the source's target queue. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_event_handler(dispatch_source_t source, @@ -406,7 +421,7 @@ dispatch_source_set_event_handler(dispatch_source_t source, * The context parameter passed to the event handler function is the context of * the dispatch source current at the time the event handler was set. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_event_handler_f(dispatch_source_t source, @@ -425,12 +440,13 @@ dispatch_source_set_event_handler_f(dispatch_source_t source, * the source's event handler block has returned. * * IMPORTANT: - * A cancellation handler is required for file descriptor and mach port based - * sources in order to safely close the descriptor or destroy the port. Closing - * the descriptor or port before the cancellation handler may result in a race - * condition. If a new descriptor is allocated with the same value as the - * recently closed descriptor while the source's event handler is still running, - * the event handler may read/write data to the wrong descriptor. + * Source cancellation and a cancellation handler are required for file + * descriptor and mach port based sources in order to safely close the + * descriptor or destroy the port. + * Closing the descriptor or port before the cancellation handler is invoked may + * result in a race condition. If a new descriptor is allocated with the same + * value as the recently closed descriptor while the source's event handler is + * still running, the event handler may read/write data to the wrong descriptor. * * @param source * The dispatch source to modify. @@ -440,7 +456,7 @@ dispatch_source_set_event_handler_f(dispatch_source_t source, * The cancellation handler block to submit to the source's target queue. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_cancel_handler(dispatch_source_t source, @@ -465,7 +481,7 @@ dispatch_source_set_cancel_handler(dispatch_source_t source, * The context parameter passed to the event handler function is the current * context of the dispatch source at the time the handler call is made. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_cancel_handler_f(dispatch_source_t source, @@ -493,7 +509,7 @@ dispatch_source_set_cancel_handler_f(dispatch_source_t source, * The dispatch source to be canceled. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_source_cancel(dispatch_source_t source); @@ -511,7 +527,7 @@ dispatch_source_cancel(dispatch_source_t source); * @result * Non-zero if canceled and zero if not canceled. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW long @@ -542,7 +558,7 @@ dispatch_source_testcancel(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_VNODE: file descriptor (int) * DISPATCH_SOURCE_TYPE_WRITE: file descriptor (int) */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW uintptr_t @@ -573,7 +589,7 @@ dispatch_source_get_handle(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_VNODE: dispatch_source_vnode_flags_t * DISPATCH_SOURCE_TYPE_WRITE: n/a */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW unsigned long @@ -611,7 +627,7 @@ dispatch_source_get_mask(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_VNODE: dispatch_source_vnode_flags_t * DISPATCH_SOURCE_TYPE_WRITE: estimated buffer space available */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW unsigned long @@ -633,7 +649,7 @@ dispatch_source_get_data(dispatch_source_t source); * as specified by the dispatch source type. A value of zero has no effect * and will not result in the submission of the event handler block. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_source_merge_data(dispatch_source_t source, unsigned long value); @@ -685,7 +701,7 @@ dispatch_source_merge_data(dispatch_source_t source, unsigned long value); * @param leeway * The nanosecond leeway for the timer. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_source_set_timer(dispatch_source_t source, @@ -715,7 +731,7 @@ dispatch_source_set_timer(dispatch_source_t source, * The registration handler block to submit to the source's target queue. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_registration_handler(dispatch_source_t source, @@ -740,7 +756,7 @@ dispatch_source_set_registration_handler(dispatch_source_t source, * The context parameter passed to the registration handler function is the * current context of the dispatch source at the time the handler call is made. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_registration_handler_f(dispatch_source_t source, diff --git a/dispatch/time.h b/dispatch/time.h index c2152ea14..ce99f2700 100644 --- a/dispatch/time.h +++ b/dispatch/time.h @@ -89,7 +89,7 @@ typedef uint64_t dispatch_time_t; * @result * A new dispatch_time_t. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_time_t dispatch_time(dispatch_time_t when, int64_t delta); @@ -113,7 +113,7 @@ dispatch_time(dispatch_time_t when, int64_t delta); * @result * A new dispatch_time_t. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_time_t dispatch_walltime(const struct timespec *_Nullable when, int64_t delta); diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index 9fe06aa92..ce73d95c2 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -85,6 +85,26 @@ 6E21F2E81BBB23FA0000C6A5 /* firehose_server_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */; }; 6E21F2E91BBB240E0000C6A5 /* firehose_server.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E21F2E51BBB23F00000C6A5 /* firehose_server.c */; }; 6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; + 6E4BACBD1D48A41500B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACC21D48A42000B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACC31D48A42100B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACC41D48A42200B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACC51D48A42200B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACC61D48A42300B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACC71D48A42300B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACC81D48A42400B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACCA1D48A89500B562AE /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; }; + 6E4BACF51D49A04600B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E4BACF61D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E4BACF71D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E4BACF81D49A04800B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E4BACF91D49A04800B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E4BACFA1D49A04900B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E4BACFB1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E4BACFC1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E5ACCBA1D3C4D0B007DA2B4 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; }; + 6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; }; + 6E5ACCBC1D3C4D0F007DA2B4 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; }; 6E90269C1BB9BD50004DC3AD /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; settings = {ATTRIBUTES = (Server, ); }; }; 6E9955581C3AF7710071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; 6E99558A1C3AF7900071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; @@ -99,6 +119,25 @@ 6E9956091C3B21B40071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; 6E9B6B5F1BB4F3C8009E324D /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; }; 6EA283D71CAB93920041B2E0 /* libdispatch.codes in Copy Trace Definitions */ = {isa = PBXBuildFile; fileRef = 6EA283D01CAB93270041B2E0 /* libdispatch.codes */; }; + 6EA793891D458A5800929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; + 6EA7938E1D458A5C00929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; + 6EA7938F1D458A5E00929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; + 6EA962971D48622600759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA962981D48622700759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA962991D48622800759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA9629A1D48622900759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA9629B1D48622900759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA9629C1D48622A00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA9629D1D48622B00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA9629E1D48622C00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA9629F1D48625000759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + 6EA962A01D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + 6EA962A11D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + 6EA962A21D48625200759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + 6EA962A31D48625300759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + 6EA962A41D48625300759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + 6EA962A51D48625400759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + 6EA962A61D48625500759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EB60D2C1BBB197B0092FA94 /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; 6EBEC7E61BBDD30D009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; @@ -142,6 +181,7 @@ 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAB41C889D65001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; 6EF2CAB51C889D67001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; + 6EFBDA4B1D61A0D600282887 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; }; 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */ = {isa = PBXBuildFile; fileRef = 721F5C5C0F15520500FF03A6 /* semaphore.h */; settings = {ATTRIBUTES = (Public, ); }; }; 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; 72CC94300ECCD8750031B751 /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = 72CC942F0ECCD8750031B751 /* base.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -463,13 +503,6 @@ remoteGlobalIDString = 92F3FECA1BEC69E500025962; remoteInfo = darwintests; }; - C00B0E101C5AEBBE000330B3 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CB9108E6C7200FAA873; - remoteInfo = dispatch_deadname; - }; C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -588,7 +621,12 @@ 6E326B171C239431002A6505 /* dispatch_timer_timeout.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_timeout.c; sourceTree = ""; }; 6E326B441C239B61002A6505 /* dispatch_priority.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_priority.c; sourceTree = ""; }; 6E4130C91B431697001A152D /* backward-compat.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "backward-compat.xcconfig"; sourceTree = ""; }; + 6E4BACBC1D48A41500B562AE /* mach.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mach.c; sourceTree = ""; }; + 6E4BACC91D48A89500B562AE /* mach_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mach_internal.h; sourceTree = ""; }; 6E4FC9D11C84123600520351 /* os_venture_basic.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = os_venture_basic.c; sourceTree = ""; }; + 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event_kevent.c; sourceTree = ""; }; + 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = event_internal.h; sourceTree = ""; }; + 6E5ACCBD1D3C6719007DA2B4 /* event.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event.c; sourceTree = ""; }; 6E62B0531C55806200D2C7C0 /* dispatch_trysync.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_trysync.c; sourceTree = ""; }; 6E67D8D31C16C20B00FC98AC /* dispatch_apply.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_apply.c; sourceTree = ""; }; 6E67D8D91C16C94B00FC98AC /* dispatch_cf_main.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_cf_main.c; sourceTree = ""; }; @@ -614,15 +652,21 @@ 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_internal.h; sourceTree = ""; }; 6EA283D01CAB93270041B2E0 /* libdispatch.codes */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.codes; sourceTree = ""; }; 6EA2CB841C005DEF0076794A /* dispatch_source.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_source.c; sourceTree = ""; }; + 6EA7937D1D456D1300929B1B /* event_epoll.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event_epoll.c; sourceTree = ""; }; + 6EA793881D458A5800929B1B /* event_config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = event_config.h; sourceTree = ""; }; 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libfirehose_server.a; sourceTree = BUILT_PRODUCTS_DIR; }; 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose.xcconfig; sourceTree = ""; }; 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_inline_internal.h; sourceTree = ""; }; + 6EC5ABE31D4436E4004F8674 /* dispatch_deadname.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_deadname.c; sourceTree = ""; }; + 6EC670C71E37E201004F10D6 /* perf_mach_async.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_mach_async.c; sourceTree = ""; }; + 6EC670C81E37E201004F10D6 /* perf_pipepingpong.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_pipepingpong.c; sourceTree = ""; }; 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_kevent_cancel_races.c; sourceTree = ""; }; 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_private.h; sourceTree = ""; }; 6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_state_machine.c; sourceTree = ""; }; 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_internal.h; sourceTree = ""; }; 6EF2CAA41C88998A001ABE83 /* lock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lock.h; sourceTree = ""; }; 6EF2CAAB1C8899D5001ABE83 /* lock.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = lock.c; path = shims/lock.c; sourceTree = ""; }; + 6EFBDA4A1D61A0D600282887 /* priority.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = priority.h; sourceTree = ""; }; 721F5C5C0F15520500FF03A6 /* semaphore.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore.h; sourceTree = ""; }; 721F5CCE0F15553500FF03A6 /* semaphore.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = semaphore.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; 72406A031AF95DF800DF4E2B /* firehose_reply.defs */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.mig; path = firehose_reply.defs; sourceTree = ""; }; @@ -657,6 +701,9 @@ 96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue_private.h; sourceTree = ""; }; 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = ""; }; 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + B6AE9A4A1D7F53B300AC007F /* dispatch_queue_create.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_queue_create.c; sourceTree = ""; }; + B6AE9A561D7F53C100AC007F /* perf_async_bench.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = perf_async_bench.m; sourceTree = ""; }; + B6AE9A581D7F53CB00AC007F /* perf_bench.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = perf_bench.m; sourceTree = ""; }; C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_dyld_stub.a; sourceTree = BUILT_PRODUCTS_DIR; }; C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-dyld-stub.xcconfig"; sourceTree = ""; }; C01866BD1C5973210040FC07 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -701,8 +748,6 @@ E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E49F251D125D630A0057C971 /* install-manpages.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-manpages.sh"; sourceTree = ""; }; E49F251E125D631D0057C971 /* mig-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "mig-headers.sh"; sourceTree = ""; }; - E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = "libdispatch-resolver_iphoneos.order"; sourceTree = ""; }; - E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch_iphoneos.order; sourceTree = ""; }; E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = voucher_activity_private.h; sourceTree = ""; }; E4B515D6164B2DA300E003AF /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E4B515D7164B2DFB00E003AF /* introspection_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_private.h; sourceTree = ""; }; @@ -817,8 +862,6 @@ FC7BEDAF0E83626100161930 /* Dispatch Private Headers */, FC7BEDB60E8363DC00161930 /* Dispatch Project Headers */, 08FB7795FE84155DC02AAC07 /* Dispatch Source */, - 6EF0B2661BA8C43D007FA4F6 /* Firehose Project Headers */, - 6EF0B2641BA8C3A0007FA4F6 /* Firehose Source */, 92F3FEC91BEC687200025962 /* Darwin Tests */, C6A0FF2B0290797F04C91782 /* Documentation */, 1AB674ADFE9D54B511CA2CBB /* Products */, @@ -845,6 +888,7 @@ E4B515DC164B32E000E003AF /* introspection.c */, 5A27262510F26F1900751FBC /* io.c */, 6EF2CAAB1C8899D5001ABE83 /* lock.c */, + 6E4BACBC1D48A41500B562AE /* mach.c */, 9661E56A0F3E7DDF00749F3E /* object.c */, E4FC3263145F46C9002FBDDB /* object.m */, 96DF70BD0F38FE3C0074BD99 /* once.c */, @@ -858,6 +902,8 @@ 6EA283D01CAB93270041B2E0 /* libdispatch.codes */, FC7BED950E8361E600161930 /* protocol.defs */, E43570B8126E93380097AB9F /* provider.d */, + 6E5ACCAF1D3BF2A0007DA2B4 /* event */, + 6EF0B2641BA8C3A0007FA4F6 /* firehose */, ); name = "Dispatch Source"; path = src; @@ -887,11 +933,29 @@ 4552540519B1384900B88766 /* jsgc_bench */, 4552540719B1384900B88766 /* async_bench */, 4552540919B1384900B88766 /* apply_bench */, - C00B0E111C5AEBBE000330B3 /* dispatch_deadname */, ); name = Products; sourceTree = ""; }; + 6E5ACCAE1D3BF27F007DA2B4 /* event */ = { + isa = PBXGroup; + children = ( + 6EA793881D458A5800929B1B /* event_config.h */, + 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */, + ); + path = event; + sourceTree = ""; + }; + 6E5ACCAF1D3BF2A0007DA2B4 /* event */ = { + isa = PBXGroup; + children = ( + 6E5ACCBD1D3C6719007DA2B4 /* event.c */, + 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */, + 6EA7937D1D456D1300929B1B /* event_epoll.c */, + ); + path = event; + sourceTree = ""; + }; 6E9B6AE21BB39793009E324D /* OS Public Headers */ = { isa = PBXGroup; children = ( @@ -901,7 +965,7 @@ path = os; sourceTree = ""; }; - 6EF0B2641BA8C3A0007FA4F6 /* Firehose Source */ = { + 6EF0B2641BA8C3A0007FA4F6 /* firehose */ = { isa = PBXGroup; children = ( 72406A391AF9926000DF4E2B /* firehose_types.defs */, @@ -911,11 +975,10 @@ 6E21F2E51BBB23F00000C6A5 /* firehose_server.c */, 72DEAA9D1AE1BB7300289540 /* firehose_server_object.m */, ); - name = "Firehose Source"; - path = src/firehose; + path = firehose; sourceTree = ""; }; - 6EF0B2661BA8C43D007FA4F6 /* Firehose Project Headers */ = { + 6EF0B2661BA8C43D007FA4F6 /* firehose */ = { isa = PBXGroup; children = ( 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */, @@ -923,8 +986,7 @@ 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */, 6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */, ); - name = "Firehose Project Headers"; - path = src/firehose; + path = firehose; sourceTree = ""; }; 92F3FEC91BEC687200025962 /* Darwin Tests */ = { @@ -941,18 +1003,20 @@ 6E326ADE1C23451A002A6505 /* dispatch_concur.c */, 6E326AEF1C239303002A6505 /* dispatch_context_for_key.c */, 6E8E4EC71C1A61680004F5CC /* dispatch_data.m */, + 6EC5ABE31D4436E4004F8674 /* dispatch_deadname.c */, 6E67D90D1C16CCEB00FC98AC /* dispatch_debug.c */, 6E8E4ECB1C1A72650004F5CC /* dispatch_drift.c */, 6E67D90F1C16CF0B00FC98AC /* dispatch_group.c */, - 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */, 6E326ABD1C22A577002A6505 /* dispatch_io_net.c */, 6E326ABE1C22A577002A6505 /* dispatch_io.c */, + 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */, C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */, 6E67D9131C17676D00FC98AC /* dispatch_overcommit.c */, 6E67D9151C1768B300FC98AC /* dispatch_pingpong.c */, 6E326B441C239B61002A6505 /* dispatch_priority.c */, 6E326AB51C225477002A6505 /* dispatch_proc.c */, 6E326AB31C224870002A6505 /* dispatch_qos.c */, + B6AE9A4A1D7F53B300AC007F /* dispatch_queue_create.c */, 6E67D9111C17669C00FC98AC /* dispatch_queue_finalizer.c */, 6E1612691C79606E006FC9A9 /* dispatch_queue_label.c */, 6E326AB91C229866002A6505 /* dispatch_read.c */, @@ -977,6 +1041,10 @@ 6E326AB71C225FCA002A6505 /* dispatch_vnode.c */, 6E67D9171C17BA7200FC98AC /* nsoperation.m */, 6E4FC9D11C84123600520351 /* os_venture_basic.c */, + B6AE9A561D7F53C100AC007F /* perf_async_bench.m */, + B6AE9A581D7F53CB00AC007F /* perf_bench.m */, + 6EC670C71E37E201004F10D6 /* perf_mach_async.c */, + 6EC670C81E37E201004F10D6 /* perf_pipepingpong.c */, 92F3FE921BEC686300025962 /* Makefile */, 6E8E4E6E1C1A35EE0004F5CC /* test_lib.c */, 6E8E4E6F1C1A35EE0004F5CC /* test_lib.h */, @@ -1033,8 +1101,6 @@ 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */, E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */, E448727914C6215D00BB45C2 /* libdispatch.order */, - E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */, - E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */, E421E5FD1716BEA70090DC9B /* libdispatch.interposable */, ); path = xcodeconfig; @@ -1055,8 +1121,8 @@ isa = PBXGroup; children = ( E47D6BB5125F0F800070D91C /* resolved.h */, - E44EBE371251656400645D88 /* resolver.c */, E44EBE331251654000645D88 /* resolver.h */, + E44EBE371251656400645D88 /* resolver.c */, ); path = resolver; sourceTree = ""; @@ -1158,6 +1224,7 @@ E4128ED513BA9A1700ABB2CB /* hw_config.h */, 6EF2CAA41C88998A001ABE83 /* lock.h */, FC1832A2109923C7003403D5 /* perfmon.h */, + 6EFBDA4A1D61A0D600282887 /* priority.h */, FC1832A3109923C7003403D5 /* time.h */, FC1832A4109923C7003403D5 /* tsd.h */, E48EC97B1835BADD00EAC4F1 /* yield.h */, @@ -1213,6 +1280,7 @@ E44757D917F4572600B82CA1 /* inline_internal.h */, E4C1ED6E1263E714000D3C8B /* data_internal.h */, 5A0095A110F274B0000E2A31 /* io_internal.h */, + 6E4BACC91D48A89500B562AE /* mach_internal.h */, 965ECC200F3EAB71004DDD89 /* object_internal.h */, 96929D950F3EA2170041FF5D /* queue_internal.h */, 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */, @@ -1222,6 +1290,8 @@ E422A0D412A557B5005E5BDB /* trace.h */, E44F9DA816543F79001DCD38 /* introspection_internal.h */, 96929D830F3EA1020041FF5D /* shims.h */, + 6E5ACCAE1D3BF27F007DA2B4 /* event */, + 6EF0B2661BA8C43D007FA4F6 /* firehose */, FC1832A0109923B3003403D5 /* shims */, ); name = "Dispatch Project Headers"; @@ -1266,6 +1336,7 @@ E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */, 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */, FC5C9C1E0EADABE3006E462D /* group.h in Headers */, + 6EFBDA4B1D61A0D600282887 /* priority.h in Headers */, 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */, 5AAB45C410D30CC7004407EA /* io.h in Headers */, E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */, @@ -1290,9 +1361,11 @@ E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */, 5A0095A210F274B0000E2A31 /* io_internal.h in Headers */, FC1832A8109923C7003403D5 /* tsd.h in Headers */, + 6EA793891D458A5800929B1B /* event_config.h in Headers */, 96929D840F3EA1020041FF5D /* atomic.h in Headers */, 96929D850F3EA1020041FF5D /* shims.h in Headers */, FC1832A7109923C7003403D5 /* time.h in Headers */, + 6E4BACCA1D48A89500B562AE /* mach_internal.h in Headers */, 6ED64B511BBD8A2100C35F4D /* firehose_buffer_internal.h in Headers */, E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */, 2BE17C6418EA305E002CA4E8 /* layout_private.h in Headers */, @@ -1302,6 +1375,7 @@ 6EF2CAA51C88998A001ABE83 /* lock.h in Headers */, E422A0D512A557B5005E5BDB /* trace.h in Headers */, E4BA743B13A8911B0095BDF1 /* getprogname.h in Headers */, + 6E5ACCBA1D3C4D0B007DA2B4 /* event_internal.h in Headers */, 6ED64B571BBD8A3B00C35F4D /* firehose_inline_internal.h in Headers */, E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */, E454569314746F1B00106147 /* object_private.h in Headers */, @@ -1319,6 +1393,7 @@ files = ( E49F24AB125D57FA0057C971 /* dispatch.h in Headers */, E49F24AC125D57FA0057C971 /* base.h in Headers */, + 6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */, E49F24AD125D57FA0057C971 /* object.h in Headers */, E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */, E49F24AE125D57FA0057C971 /* queue.h in Headers */, @@ -1344,6 +1419,7 @@ E49F24BE125D57FA0057C971 /* source_internal.h in Headers */, E49F24BD125D57FA0057C971 /* semaphore_internal.h in Headers */, E4C1ED701263E714000D3C8B /* data_internal.h in Headers */, + 6EA7938F1D458A5E00929B1B /* event_config.h in Headers */, 6ED64B501BBD8A1400C35F4D /* firehose_internal.h in Headers */, E49F24BF125D57FA0057C971 /* io_internal.h in Headers */, E44A8E731805C473009FFDB6 /* voucher_private.h in Headers */, @@ -1385,6 +1461,7 @@ E44F9DB51654403F001DCD38 /* source_internal.h in Headers */, E44F9DB41654403B001DCD38 /* semaphore_internal.h in Headers */, E44F9DB01654402B001DCD38 /* data_internal.h in Headers */, + 6E5ACCBC1D3C4D0F007DA2B4 /* event_internal.h in Headers */, 6E9956081C3B21B30071D40C /* venture_internal.h in Headers */, E44F9DB11654402E001DCD38 /* io_internal.h in Headers */, E4630251176162D200E11F4C /* atomic_sfb.h in Headers */, @@ -1393,6 +1470,7 @@ 6ED64B591BBD8A3F00C35F4D /* firehose_inline_internal.h in Headers */, 6EF2CAB51C889D67001ABE83 /* lock.h in Headers */, E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */, + 6EA7938E1D458A5C00929B1B /* event_config.h in Headers */, 6ED64B4F1BBD8A1400C35F4D /* firehose_internal.h in Headers */, E44F9DB71654404F001DCD38 /* shims.h in Headers */, E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */, @@ -1614,7 +1692,7 @@ isa = PBXProject; attributes = { BuildIndependentTargetsInParallel = YES; - LastUpgradeCheck = 0800; + LastUpgradeCheck = 0820; TargetAttributes = { 3F3C9326128E637B0042B1F7 = { ProvisioningStyle = Manual; @@ -1706,10 +1784,10 @@ 6E2ECAFD1C49C2FF00A30A32 /* libdispatch_kernel */, C927F35A10FD7F0600C5AB8B /* libdispatch_tools */, 4552540A19B1389700B88766 /* libdispatch_tests */, + 92CBD7201BED924F006E0892 /* libdispatch_tests_legacy */, + 92F3FECA1BEC69E500025962 /* darwintests */, 6E040C621C499B1B00411A2E /* libfirehose_kernel */, 6EB4E4081BA8BCAD00D7B9D2 /* libfirehose_server */, - 92F3FECA1BEC69E500025962 /* darwintests */, - 92CBD7201BED924F006E0892 /* libdispatch_tests_legacy */, ); }; /* End PBXProject section */ @@ -1743,13 +1821,6 @@ remoteRef = 4552540819B1384900B88766 /* PBXContainerItemProxy */; sourceTree = BUILT_PRODUCTS_DIR; }; - C00B0E111C5AEBBE000330B3 /* dispatch_deadname */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_deadname; - remoteRef = C00B0E101C5AEBBE000330B3 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; C927F36710FD7F1000C5AB8B /* ddt */ = { isa = PBXReferenceProxy; fileType = "compiled.mach-o.executable"; @@ -2038,6 +2109,7 @@ files = ( C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */, C00B0DF31C5AEBBE000330B3 /* resolver.c in Sources */, + 6E4BACFC1D49A04A00B562AE /* event_epoll.c in Sources */, 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */, C00B0DF41C5AEBBE000330B3 /* init.c in Sources */, C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */, @@ -2045,6 +2117,7 @@ C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */, C00B0DF81C5AEBBE000330B3 /* block.cpp in Sources */, C00B0DF91C5AEBBE000330B3 /* semaphore.c in Sources */, + 6E4BACC81D48A42400B562AE /* mach.c in Sources */, C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */, C00B0DFB1C5AEBBE000330B3 /* once.c in Sources */, C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */, @@ -2054,8 +2127,10 @@ C00B0E001C5AEBBE000330B3 /* source.c in Sources */, C00B0E011C5AEBBE000330B3 /* time.c in Sources */, C00B0E021C5AEBBE000330B3 /* data.c in Sources */, + 6EA962A61D48625500759D53 /* event_kevent.c in Sources */, C00B0E031C5AEBBE000330B3 /* io.c in Sources */, C00B0E041C5AEBBE000330B3 /* transform.c in Sources */, + 6EA9629E1D48622C00759D53 /* event.c in Sources */, C00B0E051C5AEBBE000330B3 /* allocator.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; @@ -2066,6 +2141,7 @@ files = ( C01866A61C5973210040FC07 /* protocol.defs in Sources */, C01866A71C5973210040FC07 /* resolver.c in Sources */, + 6E4BACFB1D49A04A00B562AE /* event_epoll.c in Sources */, 6EF2CAB21C8899EC001ABE83 /* lock.c in Sources */, C01866A81C5973210040FC07 /* init.c in Sources */, C01866A91C5973210040FC07 /* queue.c in Sources */, @@ -2073,6 +2149,7 @@ C01866AB1C5973210040FC07 /* firehose.defs in Sources */, C01866AC1C5973210040FC07 /* block.cpp in Sources */, C01866AD1C5973210040FC07 /* semaphore.c in Sources */, + 6E4BACC71D48A42300B562AE /* mach.c in Sources */, C01866AE1C5973210040FC07 /* firehose_reply.defs in Sources */, C01866AF1C5973210040FC07 /* once.c in Sources */, C01866B01C5973210040FC07 /* voucher.c in Sources */, @@ -2082,8 +2159,10 @@ C01866B41C5973210040FC07 /* source.c in Sources */, C01866B51C5973210040FC07 /* time.c in Sources */, C01866B61C5973210040FC07 /* data.c in Sources */, + 6EA962A51D48625400759D53 /* event_kevent.c in Sources */, C01866B71C5973210040FC07 /* io.c in Sources */, C01866B81C5973210040FC07 /* transform.c in Sources */, + 6EA9629D1D48622B00759D53 /* event.c in Sources */, C01866B91C5973210040FC07 /* allocator.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; @@ -2097,7 +2176,9 @@ 6E9955CF1C3B218E0071D40C /* venture.c in Sources */, 6ED64B471BBD89AF00C35F4D /* firehose.defs in Sources */, 6ED64B441BBD898700C35F4D /* firehose_buffer.c in Sources */, + 6EA9629F1D48625000759D53 /* event_kevent.c in Sources */, E49F2499125D48D80057C971 /* resolver.c in Sources */, + 6E4BACBD1D48A41500B562AE /* mach.c in Sources */, E44EBE3E1251659900645D88 /* init.c in Sources */, FC7BED990E8361E600161930 /* queue.c in Sources */, 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */, @@ -2107,11 +2188,13 @@ 9676A0E10F3E755D00713ADB /* apply.c in Sources */, 9661E56B0F3E7DDF00749F3E /* object.c in Sources */, 965CD6350F3E806200D4E28D /* benchmark.c in Sources */, + 6E4BACF51D49A04600B562AE /* event_epoll.c in Sources */, 96A8AA870F41E7A400CD570B /* source.c in Sources */, 96032E4B0F5CC8C700241C5F /* time.c in Sources */, 5AAB45C010D30B79004407EA /* data.c in Sources */, 5A27262610F26F1900751FBC /* io.c in Sources */, E43A72501AF85BBC00BAA921 /* block.cpp in Sources */, + 6EA962971D48622600759D53 /* event.c in Sources */, C9C5F80E143C1771006DC718 /* transform.c in Sources */, E4FC3264145F46C9002FBDDB /* object.m in Sources */, 2BBF5A63154B64F5002B20F9 /* allocator.c in Sources */, @@ -2124,12 +2207,15 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 6E4BACC61D48A42300B562AE /* mach.c in Sources */, E46DBC4014EE10C80001F9F6 /* protocol.defs in Sources */, E46DBC4114EE10C80001F9F6 /* resolver.c in Sources */, 6EF2CAB11C8899EC001ABE83 /* lock.c in Sources */, E46DBC4214EE10C80001F9F6 /* init.c in Sources */, E46DBC4314EE10C80001F9F6 /* queue.c in Sources */, + 6EA962A41D48625300759D53 /* event_kevent.c in Sources */, 6EE664271BE2FD5C00ED7B1C /* firehose_buffer.c in Sources */, + 6EA9629C1D48622A00759D53 /* event.c in Sources */, 6EBEC7E71BBDD30F009B1596 /* firehose.defs in Sources */, E43A72881AF85BE900BAA921 /* block.cpp in Sources */, E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */, @@ -2146,6 +2232,7 @@ E46DBC4C14EE10C80001F9F6 /* io.c in Sources */, E46DBC4D14EE10C80001F9F6 /* transform.c in Sources */, 2BBF5A67154B64F5002B20F9 /* allocator.c in Sources */, + 6E4BACFA1D49A04900B562AE /* event_epoll.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2158,7 +2245,9 @@ 6E9956051C3B219B0071D40C /* venture.c in Sources */, 6ED64B461BBD89AF00C35F4D /* firehose.defs in Sources */, 6ED64B401BBD898300C35F4D /* firehose_buffer.c in Sources */, + 6EA962A01D48625100759D53 /* event_kevent.c in Sources */, E49F24C9125D57FA0057C971 /* resolver.c in Sources */, + 6E4BACC21D48A42000B562AE /* mach.c in Sources */, E49F24CA125D57FA0057C971 /* init.c in Sources */, E49F24CB125D57FA0057C971 /* queue.c in Sources */, E49F24CC125D57FA0057C971 /* semaphore.c in Sources */, @@ -2168,11 +2257,13 @@ E49F24CE125D57FA0057C971 /* apply.c in Sources */, E49F24CF125D57FA0057C971 /* object.c in Sources */, E49F24D0125D57FA0057C971 /* benchmark.c in Sources */, + 6E4BACF61D49A04700B562AE /* event_epoll.c in Sources */, E49F24D1125D57FA0057C971 /* source.c in Sources */, E49F24D2125D57FA0057C971 /* time.c in Sources */, E49F24D3125D57FA0057C971 /* data.c in Sources */, E49F24D4125D57FA0057C971 /* io.c in Sources */, E43A72841AF85BCB00BAA921 /* block.cpp in Sources */, + 6EA962981D48622700759D53 /* event.c in Sources */, C93D6165143E190E00EB9023 /* transform.c in Sources */, E4FC3265145F46C9002FBDDB /* object.m in Sources */, 2BBF5A64154B64F5002B20F9 /* allocator.c in Sources */, @@ -2186,11 +2277,13 @@ buildActionMask = 2147483647; files = ( E4B515BD164B2DA300E003AF /* provider.d in Sources */, + 6EA962A31D48625300759D53 /* event_kevent.c in Sources */, E4B515BE164B2DA300E003AF /* protocol.defs in Sources */, E4B515BF164B2DA300E003AF /* resolver.c in Sources */, 6ED64B4B1BBD89BE00C35F4D /* firehose_reply.defs in Sources */, 6ED64B481BBD89B100C35F4D /* firehose.defs in Sources */, E4B515C0164B2DA300E003AF /* init.c in Sources */, + 6EA9629B1D48622900759D53 /* event.c in Sources */, E4B515C1164B2DA300E003AF /* queue.c in Sources */, 6E9956021C3B21990071D40C /* venture.c in Sources */, E4B515C2164B2DA300E003AF /* semaphore.c in Sources */, @@ -2202,6 +2295,7 @@ E4B515C6164B2DA300E003AF /* benchmark.c in Sources */, E4B515C7164B2DA300E003AF /* source.c in Sources */, E4B515C8164B2DA300E003AF /* time.c in Sources */, + 6E4BACC51D48A42200B562AE /* mach.c in Sources */, E4B515C9164B2DA300E003AF /* data.c in Sources */, E4B515CA164B2DA300E003AF /* io.c in Sources */, E44A8E6F1805C3E0009FFDB6 /* voucher.c in Sources */, @@ -2209,6 +2303,7 @@ 6EF2CAB01C8899EB001ABE83 /* lock.c in Sources */, E4B515CC164B2DA300E003AF /* object.m in Sources */, E4B515CD164B2DA300E003AF /* allocator.c in Sources */, + 6E4BACF91D49A04800B562AE /* event_epoll.c in Sources */, E4B515CE164B2DA300E003AF /* data.m in Sources */, E4B515DD164B32E000E003AF /* introspection.c in Sources */, ); @@ -2223,7 +2318,9 @@ 6E9956031C3B219A0071D40C /* venture.c in Sources */, 6EBEC7E61BBDD30D009B1596 /* firehose.defs in Sources */, 6ED64B421BBD898500C35F4D /* firehose_buffer.c in Sources */, + 6EA962A21D48625200759D53 /* event_kevent.c in Sources */, E49F2424125D3C970057C971 /* resolver.c in Sources */, + 6E4BACC41D48A42200B562AE /* mach.c in Sources */, E44EBE5512517EBE00645D88 /* init.c in Sources */, E4EC11AE12514302000DDBD1 /* queue.c in Sources */, E4EC11AF12514302000DDBD1 /* semaphore.c in Sources */, @@ -2233,11 +2330,13 @@ E4EC11B112514302000DDBD1 /* apply.c in Sources */, E4EC11B212514302000DDBD1 /* object.c in Sources */, E4EC11B312514302000DDBD1 /* benchmark.c in Sources */, + 6E4BACF81D49A04800B562AE /* event_epoll.c in Sources */, E4EC11B412514302000DDBD1 /* source.c in Sources */, E4EC11B512514302000DDBD1 /* time.c in Sources */, E4EC11B712514302000DDBD1 /* data.c in Sources */, E4EC11B812514302000DDBD1 /* io.c in Sources */, E43A72861AF85BCC00BAA921 /* block.cpp in Sources */, + 6EA9629A1D48622900759D53 /* event.c in Sources */, C93D6166143E190F00EB9023 /* transform.c in Sources */, E4FC3266145F46C9002FBDDB /* object.m in Sources */, 2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */, @@ -2255,7 +2354,9 @@ 6E9956041C3B219B0071D40C /* venture.c in Sources */, 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */, 6ED64B411BBD898400C35F4D /* firehose_buffer.c in Sources */, + 6EA962A11D48625100759D53 /* event_kevent.c in Sources */, E49F2423125D3C960057C971 /* resolver.c in Sources */, + 6E4BACC31D48A42100B562AE /* mach.c in Sources */, E44EBE5712517EBE00645D88 /* init.c in Sources */, E4EC121A12514715000DDBD1 /* queue.c in Sources */, E4EC121B12514715000DDBD1 /* semaphore.c in Sources */, @@ -2265,11 +2366,13 @@ E4EC121D12514715000DDBD1 /* apply.c in Sources */, E4EC121E12514715000DDBD1 /* object.c in Sources */, E4EC121F12514715000DDBD1 /* benchmark.c in Sources */, + 6E4BACF71D49A04700B562AE /* event_epoll.c in Sources */, E4EC122012514715000DDBD1 /* source.c in Sources */, E4EC122112514715000DDBD1 /* time.c in Sources */, E4EC122312514715000DDBD1 /* data.c in Sources */, E4EC122412514715000DDBD1 /* io.c in Sources */, E43A72851AF85BCC00BAA921 /* block.cpp in Sources */, + 6EA962991D48622800759D53 /* event.c in Sources */, C93D6167143E190F00EB9023 /* transform.c in Sources */, E4FC3267145F46C9002FBDDB /* object.m in Sources */, 2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */, @@ -2513,12 +2616,20 @@ E49F24D9125D57FA0057C971 /* Release */ = { isa = XCBuildConfiguration; buildSettings = { + WARNING_CFLAGS = ( + "-Weverything", + "$(inherited)", + ); }; name = Release; }; E49F24DA125D57FA0057C971 /* Debug */ = { isa = XCBuildConfiguration; buildSettings = { + WARNING_CFLAGS = ( + "-Weverything", + "$(inherited)", + ); }; name = Debug; }; diff --git a/libkqueue b/libkqueue deleted file mode 160000 index b3f81ecf6..000000000 --- a/libkqueue +++ /dev/null @@ -1 +0,0 @@ -Subproject commit b3f81ecf680e826c2dc834316b5d77fc1be5a5c7 diff --git a/man/dispatch_object.3 b/man/dispatch_object.3 index 95ba1c348..cddcf32aa 100644 --- a/man/dispatch_object.3 +++ b/man/dispatch_object.3 @@ -23,6 +23,10 @@ .Fo dispatch_resume .Fa "dispatch_object_t object" .Fc +.Ft void +.Fo dispatch_activate +.Fa "dispatch_object_t object" +.Fc .Ft "void *" .Fo dispatch_get_context .Fa "dispatch_object_t object" @@ -40,7 +44,7 @@ .Sh DESCRIPTION Dispatch objects share functions for coordinating memory management, suspension, cancellation and context pointers. -.Sh MEMORY MANGEMENT +.Sh MEMORY MANAGEMENT Objects returned by creation functions in the dispatch framework may be uniformly retained and released with the functions .Fn dispatch_retain @@ -123,6 +127,17 @@ dispatch_async(queue, ^{ dispatch_release(object); }); .Ed +.Sh ACTIVATION +Dispatch objects such as queues and sources may be created in an inactive +state. Objects in this state must be activated before any blocks +associated with them will be invoked. Calling +.Fn dispatch_activate +on an active object has no effect. +.Pp +Changing attributes such as the target queue or a source handler is no longer permitted +once the object has been activated (see +.Xr dispatch_set_target_queue 3 , +.Xr dispatch_source_set_event_handler 3 ). .Sh SUSPENSION The invocation of blocks on dispatch queues or dispatch sources may be suspended or resumed with the functions @@ -148,7 +163,7 @@ and .Fn dispatch_resume such that the dispatch object is fully resumed when the last reference is released. The result of releasing all references to a dispatch object while in -a suspended state is undefined. +an inactive or suspended state is undefined. .Sh CONTEXT POINTERS Dispatch objects support supplemental context pointers. The value of the context pointer may be retrieved and updated with diff --git a/man/dispatch_semaphore_create.3 b/man/dispatch_semaphore_create.3 index 81c291546..da263658a 100644 --- a/man/dispatch_semaphore_create.3 +++ b/man/dispatch_semaphore_create.3 @@ -23,6 +23,13 @@ .Fc .Sh DESCRIPTION Dispatch semaphores are used to synchronize threads. +.Pp +The +.Fn dispatch_semaphore_wait +function decrements the semaphore. If the resulting value is less than zero, +it waits for a signal from a thread that increments the semaphore by calling +.Fn dispatch_semaphore_signal +before returning. The .Fa timeout parameter is creatable with the @@ -30,6 +37,13 @@ parameter is creatable with the or .Xr dispatch_walltime 3 functions. +.Pp +The +.Fn dispatch_semaphore_signal +function increments the counting semaphore. If the previous value was less than zero, +it wakes one of the threads that are waiting in +.Fn dispatch_semaphore_wait +before returning. .Sh COMPLETION SYNCHRONIZATION If the .Fa count diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3 index 4da708cfb..b4e9a7ad8 100644 --- a/man/dispatch_source_create.3 +++ b/man/dispatch_source_create.3 @@ -113,6 +113,8 @@ DISPATCH_SOURCE_TYPE_DATA_ADD .It DISPATCH_SOURCE_TYPE_DATA_OR .It +DISPATCH_SOURCE_TYPE_DATA_REPLACE +.It DISPATCH_SOURCE_TYPE_MACH_SEND .It DISPATCH_SOURCE_TYPE_MACH_RECV @@ -168,12 +170,34 @@ The result of calling this function from any other context is undefined. The .Fn dispatch_source_merge_data function is intended for use with the -.Vt DISPATCH_SOURCE_TYPE_DATA_ADD -and +.Vt DISPATCH_SOURCE_TYPE_DATA_ADD , .Vt DISPATCH_SOURCE_TYPE_DATA_OR +and +.Vt DISPATCH_SOURCE_TYPE_DATA_REPLACE source types. The result of using this function with any other source type is -undefined. Calling this function will atomically add or bitwise OR the data -into the source's data, and trigger the delivery of the source's event handler. +undefined. Data merging is performed according to the source type: +.Bl -tag -width "XXDISPATCH_SOURCE_TYPE_DATA_REPLACE" -compact -offset indent +.It \(bu DISPATCH_SOURCE_TYPE_DATA_ADD +.Vt data +is atomically added to the source's data +.It \(bu DISPATCH_SOURCE_TYPE_DATA_OR +.Vt data +is atomically bitwise ORed into the source's data +.It \(bu DISPATCH_SOURCE_TYPE_DATA_REPLACE +.Vt data +atomically replaces the source's data. +.El +.Pp +If the source data value resulting from the merge operation is 0, the source +handler will not be invoked. This can happen if: +.Bl -bullet -compact -offset indent +.It +the atomic addition wraps for sources of type +.Vt DISPATCH_SOURCE_TYPE_DATA_ADD , +.It +0 is merged for sources of type +.Vt DISPATCH_SOURCE_TYPE_DATA_REPLACE . +.El .Pp .Sh SOURCE EVENT HANDLERS In order to receive events from the dispatch source, an event handler should be @@ -265,14 +289,15 @@ The following section contains a summary of supported dispatch event types and the interpretation of their parameters and returned data. .Pp .Vt DISPATCH_SOURCE_TYPE_DATA_ADD , -.Vt DISPATCH_SOURCE_TYPE_DATA_OR +.Vt DISPATCH_SOURCE_TYPE_DATA_OR , +.Vt DISPATCH_SOURCE_TYPE_DATA_REPLACE .Pp Sources of this type allow applications to manually trigger the source's event handler via a call to .Fn dispatch_source_merge_data . The data will be merged with the source's pending data via an atomic add or -atomic bitwise OR (based on the source's type), and the event handler block will -be submitted to the source's target queue. The +atomic bitwise OR, or direct replacement (based on the source's type), and the +event handler block will be submitted to the source's target queue. The .Fa data is application defined. These sources have no .Fa handle @@ -295,7 +320,7 @@ The port's corresponding receive right has been destroyed .Pp The data returned by .Fn dispatch_source_get_data -indicates which of the events in the +is a bitmask that indicates which of the events in the .Fa mask were observed. Note that because this source type will request notifications on the provided port, it should not be mixed with the use of @@ -372,7 +397,7 @@ A signal was delivered to the process. .Pp The data returned by .Fn dispatch_source_get_data -indicates which of the events in the +is a bitmask that indicates which of the events in the .Fa mask were observed. .Pp @@ -499,19 +524,6 @@ was created with the timer is based on .Xr gettimeofday 3 . .Pp -.Em Note : -Under the C language, untyped numbers default to the -.Vt int -type. This can lead to truncation bugs when arithmetic operations with other -numbers are expected to generate a -.Vt uint64_t -sized result. When in doubt, use -.Vt ull -as a suffix. For example: -.Bd -literal -offset indent -3ull * NSEC_PER_SEC -.Ed -.Pp .Vt DISPATCH_SOURCE_TYPE_VNODE .Pp Sources of this type monitor the virtual filesystem nodes for state changes. @@ -548,7 +560,7 @@ or .Pp The data returned by .Fn dispatch_source_get_data -indicates which of the events in the +is a bitmask that indicates which of the events in the .Fa mask were observed. .Pp diff --git a/man/dispatch_time.3 b/man/dispatch_time.3 index 4b4f9d863..685898de0 100644 --- a/man/dispatch_time.3 +++ b/man/dispatch_time.3 @@ -80,28 +80,10 @@ parameter is ignored. .Pp Underflow causes the smallest representable value to be returned for a given clock. -.Sh CAVEATS -Under the C language, untyped numbers default to the -.Vt int -type. This can lead to truncation bugs when arithmetic operations with other -numbers are expected to generate a -.Vt int64_t -sized result, such as the -.Fa offset -argument to -.Fn dispatch_time -and -.Fn dispatch_walltime . -When in doubt, use -.Vt ull -as a suffix. For example: -.Bd -literal -offset indent -3ull * NSEC_PER_SEC -.Ed .Sh EXAMPLES Create a milestone two seconds in the future: .Bd -literal -offset indent -milestone = dispatch_time(DISPATCH_TIME_NOW, 2LL * NSEC_PER_SEC); +milestone = dispatch_time(DISPATCH_TIME_NOW, 2 * NSEC_PER_SEC); .Ed .Pp Create a milestone for use as an infinite timeout: @@ -116,6 +98,11 @@ ts.tv_sec = 0x7FFFFFFF; ts.tv_nsec = 0; milestone = dispatch_walltime(&ts, 0); .Ed +.Pp +Use a negative delta to create a milestone an hour before the one above: +.Bd -literal -offset indent +milestone = dispatch_walltime(&ts, -60 * 60 * NSEC_PER_SEC); +.Ed .Sh RETURN VALUE These functions return an abstract value for use with .Fn dispatch_after , diff --git a/os/firehose_buffer_private.h b/os/firehose_buffer_private.h index 2c6466f94..29b80c30e 100644 --- a/os/firehose_buffer_private.h +++ b/os/firehose_buffer_private.h @@ -26,6 +26,7 @@ #include #else #include +#include #include #include #endif @@ -38,39 +39,9 @@ * Layout of structs is subject to change without notice */ -#define FIREHOSE_BUFFER_CHUNK_SIZE 4096ul #define FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE 2048ul #define FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT 16 -typedef union { - uint64_t fbc_atomic_pos; -#define FIREHOSE_BUFFER_POS_ENTRY_OFFS_INC (1ULL << 0) -#define FIREHOSE_BUFFER_POS_PRIVATE_OFFS_INC (1ULL << 16) -#define FIREHOSE_BUFFER_POS_REFCNT_INC (1ULL << 32) -#define FIREHOSE_BUFFER_POS_FULL_BIT (1ULL << 56) -#define FIREHOSE_BUFFER_POS_USABLE_FOR_STREAM(pos, stream) \ - ((((pos).fbc_atomic_pos >> 48) & 0x1ff) == (uint16_t)stream) - struct { - uint16_t fbc_next_entry_offs; - uint16_t fbc_private_offs; - uint8_t fbc_refcnt; - uint8_t fbc_qos_bits; - uint8_t fbc_stream; - uint8_t fbc_flag_full : 1; - uint8_t fbc_flag_io : 1; - uint8_t _fbc_flag_unused : 6; - }; -} firehose_buffer_pos_u; - -typedef struct firehose_buffer_chunk_s { - uint8_t fbc_start[0]; - firehose_buffer_pos_u volatile fbc_pos; - uint64_t fbc_timestamp; - uint8_t fbc_data[FIREHOSE_BUFFER_CHUNK_SIZE - - sizeof(firehose_buffer_pos_u) - - sizeof(uint64_t)]; -} __attribute__((aligned(8))) *firehose_buffer_chunk_t; - typedef struct firehose_buffer_range_s { uint16_t fbr_offset; // offset from the start of the buffer uint16_t fbr_length; @@ -78,6 +49,8 @@ typedef struct firehose_buffer_range_s { #ifdef KERNEL +typedef struct firehose_chunk_s *firehose_chunk_t; + // implemented by the kernel extern void __firehose_buffer_push_to_logd(firehose_buffer_t fb, bool for_io); extern void __firehose_critical_region_enter(void); @@ -89,19 +62,10 @@ firehose_tracepoint_t __firehose_buffer_tracepoint_reserve(uint64_t stamp, firehose_stream_t stream, uint16_t pubsize, uint16_t privsize, uint8_t **privptr); -firehose_tracepoint_t -__firehose_buffer_tracepoint_reserve_with_chunk(firehose_buffer_chunk_t fbc, - uint64_t stamp, firehose_stream_t stream, - uint16_t pubsize, uint16_t privsize, uint8_t **privptr); - void __firehose_buffer_tracepoint_flush(firehose_tracepoint_t vat, firehose_tracepoint_id_u vatid); -void -__firehose_buffer_tracepoint_flush_chunk(firehose_buffer_chunk_t fbc, - firehose_tracepoint_t vat, firehose_tracepoint_id_u vatid); - firehose_buffer_t __firehose_buffer_create(size_t *size); @@ -118,13 +82,12 @@ const uint32_t _firehose_spi_version; OS_ALWAYS_INLINE static inline const uint8_t * -_firehose_tracepoint_reader_init(firehose_buffer_chunk_t fbc, - const uint8_t **endptr) +_firehose_tracepoint_reader_init(firehose_chunk_t fc, const uint8_t **endptr) { - const uint8_t *start = fbc->fbc_data; - const uint8_t *end = fbc->fbc_start + fbc->fbc_pos.fbc_next_entry_offs; + const uint8_t *start = fc->fc_data; + const uint8_t *end = fc->fc_start + fc->fc_pos.fcp_next_entry_offs; - if (end > fbc->fbc_start + FIREHOSE_BUFFER_CHUNK_SIZE) { + if (end > fc->fc_start + FIREHOSE_CHUNK_SIZE) { end = start; } *endptr = end; @@ -136,27 +99,29 @@ static inline firehose_tracepoint_t _firehose_tracepoint_reader_next(const uint8_t **ptr, const uint8_t *end) { const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); - firehose_tracepoint_t ft; + struct ft_unaligned_s { + struct firehose_tracepoint_s ft; + } __attribute__((packed, aligned(1))) *uft; do { - ft = (firehose_tracepoint_t)*ptr; - if (ft->ft_data >= end) { + uft = (struct ft_unaligned_s *)*ptr; + if (uft->ft.ft_data >= end) { // reached the end return NULL; } - if (!ft->ft_length) { + if (!uft->ft.ft_length) { // tracepoint write didn't even start return NULL; } - if (ft->ft_length > end - ft->ft_data) { + if (uft->ft.ft_length > end - uft->ft.ft_data) { // invalid length return NULL; } - *ptr += roundup(ft_size + ft->ft_length, 8); + *ptr += roundup(ft_size + uft->ft.ft_length, 8); // test whether write of the tracepoint was finished - } while (os_unlikely(ft->ft_id.ftid_value == 0)); + } while (os_unlikely(uft->ft.ft_id.ftid_value == 0)); - return ft; + return (firehose_tracepoint_t)uft; } #define firehose_tracepoint_foreach(ft, fbc) \ @@ -165,13 +130,13 @@ _firehose_tracepoint_reader_next(const uint8_t **ptr, const uint8_t *end) OS_ALWAYS_INLINE static inline bool -firehose_buffer_range_validate(firehose_buffer_chunk_t fbc, - firehose_tracepoint_t ft, firehose_buffer_range_t range) +firehose_buffer_range_validate(firehose_chunk_t fc, firehose_tracepoint_t ft, + firehose_buffer_range_t range) { - if (range->fbr_offset + range->fbr_length > FIREHOSE_BUFFER_CHUNK_SIZE) { + if (range->fbr_offset + range->fbr_length > FIREHOSE_CHUNK_SIZE) { return false; } - if (fbc->fbc_start + range->fbr_offset < ft->ft_data + ft->ft_length) { + if (fc->fc_start + range->fbr_offset < ft->ft_data + ft->ft_length) { return false; } return true; diff --git a/os/firehose_server_private.h b/os/firehose_server_private.h index 4bff8abc1..441bb52fd 100644 --- a/os/firehose_server_private.h +++ b/os/firehose_server_private.h @@ -139,6 +139,32 @@ OS_NOTHROW OS_NONNULL1 uint64_t firehose_client_get_unique_pid(firehose_client_t client, pid_t *pid); +/*! + * @function firehose_client_get_pid_version + * + * @abstract + * Returns the pid version for that client. + * + * @param client + * The specified client. + */ +OS_NOTHROW OS_NONNULL1 +int +firehose_client_get_pid_version(firehose_client_t client); + +/*! + * @function firehose_client_get_euid + * + * @abstract + * Returns the EUID for that client as discovered at connect time. + * + * @param client + * The specified client. + */ +OS_NOTHROW OS_NONNULL1 +uid_t +firehose_client_get_euid(firehose_client_t client); + /*! * @function firehose_client_get_metadata_buffer * @@ -235,7 +261,7 @@ OS_NOTHROW OS_NONNULL1 OS_NONNULL4 void firehose_client_metadata_stream_peek(firehose_client_t client, firehose_event_t context, OS_NOESCAPE bool (^peek_should_start)(void), - OS_NOESCAPE bool (^peek)(firehose_buffer_chunk_t fbc)); + OS_NOESCAPE bool (^peek)(firehose_chunk_t fbc)); #pragma mark - Firehose Server @@ -246,7 +272,7 @@ firehose_client_metadata_stream_peek(firehose_client_t client, * Type of the handler block for firehose_server_init() */ typedef void (^firehose_handler_t)(firehose_client_t client, - firehose_event_t event, firehose_buffer_chunk_t page); + firehose_event_t event, firehose_chunk_t page); /*! * @function firehose_server_init @@ -276,6 +302,20 @@ OS_NOTHROW void firehose_server_assert_spi_version(uint32_t spi_version); +/*! + * @function firehose_server_has_ever_flushed_pages + * + * @abstract + * Checks whether the firehose server has ever flushed any pages this boot. + * + * @discussion + * Must be called after firehose_server_init() and before calling + * firehose_server_resume(). + */ +OS_NOTHROW +bool +firehose_server_has_ever_flushed_pages(void); + /*! * @function firehose_server_resume * @@ -289,11 +329,42 @@ OS_NOTHROW void firehose_server_resume(void); +/*! + * @function firehose_server_cancel + * + * @abstract + * Cancels the server, disconnects all clients, and prevents new connections. + */ +OS_NOTHROW +void +firehose_server_cancel(void); + +/*! + * @typedef firehose_server_queue_t + * + * @abstract + * Values to pass to firehose_server_get_queue() + */ +OS_ENUM(firehose_server_queue, unsigned long, + FIREHOSE_SERVER_QUEUE_UNKNOWN, + FIREHOSE_SERVER_QUEUE_IO, + FIREHOSE_SERVER_QUEUE_MEMORY, +); + +/*! + * @function firehose_server_copy_queue + * + * @abstract + * Returns internal queues to the firehose server subsystem. + */ +OS_NOTHROW OS_OBJECT_RETURNS_RETAINED +dispatch_queue_t +firehose_server_copy_queue(firehose_server_queue_t which); + #pragma mark - Firehose Snapshot /*! * @typedef firehose_snapshot_event - * */ OS_ENUM(firehose_snapshot_event, unsigned long, FIREHOSE_SNAPSHOT_EVENT_IO_START = 1, @@ -310,7 +381,7 @@ OS_ENUM(firehose_snapshot_event, unsigned long, * Type of the handler block for firehose_snapshot */ typedef void (^firehose_snapshot_handler_t)(firehose_client_t client, - firehose_snapshot_event_t event, firehose_buffer_chunk_t page); + firehose_snapshot_event_t event, firehose_chunk_t page); /*! * @function firehose_snapshot diff --git a/os/linux_base.h b/os/linux_base.h index 8173e12bf..d0048d615 100644 --- a/os/linux_base.h +++ b/os/linux_base.h @@ -17,8 +17,10 @@ #if __GNUC__ #define OS_EXPECT(x, v) __builtin_expect((x), (v)) +#define OS_UNUSED __attribute__((__unused__)) #else #define OS_EXPECT(x, v) (x) +#define OS_UNUSED #endif #ifndef os_likely @@ -67,6 +69,14 @@ #define __OS_CONCAT(x, y) x ## y #define OS_CONCAT(x, y) __OS_CONCAT(x, y) +#if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums) +#define OS_ENUM(_name, _type, ...) \ +typedef enum : _type { __VA_ARGS__ } _name##_t +#else +#define OS_ENUM(_name, _type, ...) \ +enum { __VA_ARGS__ }; typedef _type _name##_t +#endif + /* * Stub out misc linking and compilation attributes */ diff --git a/os/object.h b/os/object.h index f3faa62fd..b0b47059a 100644 --- a/os/object.h +++ b/os/object.h @@ -24,6 +24,7 @@ #ifdef __APPLE__ #include #include +#include #endif #ifndef __linux__ #include @@ -75,6 +76,9 @@ #endif // OS_OBJECT_HAVE_OBJC_SUPPORT #if OS_OBJECT_HAVE_OBJC_SUPPORT +#if defined(__swift__) && __swift__ && !OS_OBJECT_USE_OBJC +#define OS_OBJECT_USE_OBJC 1 +#endif #ifndef OS_OBJECT_USE_OBJC #define OS_OBJECT_USE_OBJC 1 #endif @@ -232,7 +236,7 @@ __BEGIN_DECLS * @result * The retained object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_EXPORT OS_SWIFT_UNAVAILABLE("Can't be used with ARC") void* os_retain(void *object); @@ -254,7 +258,7 @@ os_retain(void *object); * @param object * The object to release. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_EXPORT void OS_SWIFT_UNAVAILABLE("Can't be used with ARC") os_release(void *object); diff --git a/os/object_private.h b/os/object_private.h index dc2af8345..36a807cb0 100644 --- a/os/object_private.h +++ b/os/object_private.h @@ -31,8 +31,8 @@ #include #include -#ifndef __OSX_AVAILABLE_STARTING -#define __OSX_AVAILABLE_STARTING(x, y) +#ifndef API_AVAILABLE +#define API_AVAILABLE(...) #endif #if __GNUC__ @@ -112,7 +112,7 @@ typedef OS_OBJECT_CLASS(object) *_os_object_t; #define _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) \ OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) #elif OS_OBJECT_USE_OBJC -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT @interface OS_OBJECT_CLASS(object) : NSObject - (void)_xref_dispose; @@ -136,48 +136,48 @@ __BEGIN_DECLS #if !_OS_OBJECT_OBJC_ARC -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_alloc(const void *cls, size_t size); -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_alloc_realized(const void *cls, size_t size); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void _os_object_dealloc(_os_object_t object); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_retain(_os_object_t object); -__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_retain_with_resurrect(_os_object_t obj); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void _os_object_release(_os_object_t object); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_retain_internal(_os_object_t object); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void diff --git a/os/voucher_activity_private.h b/os/voucher_activity_private.h index 8f233b33c..28effc91e 100644 --- a/os/voucher_activity_private.h +++ b/os/voucher_activity_private.h @@ -28,11 +28,13 @@ #endif #ifndef __linux__ #include +#include #endif +#include #include #include "voucher_private.h" -#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20160329 +#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20161003 #if OS_VOUCHER_WEAK_IMPORT #define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT @@ -40,12 +42,6 @@ #define OS_VOUCHER_EXPORT OS_EXPORT #endif -#define __VOUCHER_ACTIVITY_IGNORE_DEPRECATION_PUSH \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") -#define __VOUCHER_ACTIVITY_IGNORE_DEPRECATION_POP \ - _Pragma("clang diagnostic pop") - __BEGIN_DECLS /*! @@ -79,8 +75,7 @@ __BEGIN_DECLS * The current activity identifier, if any. When 0 is returned, parent_id will * also always be 0. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) OS_VOUCHER_EXPORT OS_NOTHROW firehose_activity_id_t voucher_get_activity_id(voucher_t voucher, firehose_activity_id_t *parent_id); @@ -109,15 +104,14 @@ voucher_get_activity_id(voucher_t voucher, firehose_activity_id_t *parent_id); * The current activity identifier, if any. When 0 is returned, parent_id will * also always be 0. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) OS_VOUCHER_EXPORT OS_NOTHROW firehose_activity_id_t voucher_get_activity_id_and_creator(voucher_t voucher, uint64_t *creator_pid, firehose_activity_id_t *parent_id); /*! - * @function voucher_activity_create + * @function voucher_activity_create_with_data * * @abstract * Creates a voucher object with a new activity identifier. @@ -151,22 +145,24 @@ voucher_get_activity_id_and_creator(voucher_t voucher, uint64_t *creator_pid, * @param flags * See voucher_activity_flag_t documentation for effect. * - * @param location - * Location identifier for the automatic tracepoint generated as part of - * creating the new activity. + * @param pubdata + * Pointer to packed buffer of tracepoint data. + * + * @param publen + * Length of data at 'pubdata'. * * @result * A new voucher with an activity identifier. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t -voucher_activity_create(firehose_tracepoint_id_t trace_id, - voucher_t base, firehose_activity_flags_t flags, uint64_t location); +voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id, + voucher_t base, firehose_activity_flags_t flags, + const void *pubdata, size_t publen); -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_DEPRECATED_WITH_REPLACEMENT("voucher_activity_create_with_data", + macos(10.12,10.12), ios(10.0,10.0), tvos(10.0,10.0), watchos(3.0,3.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, @@ -177,6 +173,21 @@ voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, * SPI intended for libtrace only */ +/*! + * @function voucher_activity_id_allocate + * + * @abstract + * Allocate a new system-wide unique activity ID. + * + * @param flags + * The bottom-most 8 bits of the flags will be used to generate the ID. + * See firehose_activity_flags_t. + */ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +OS_VOUCHER_EXPORT OS_NOTHROW +firehose_activity_id_t +voucher_activity_id_allocate(firehose_activity_flags_t flags); + /*! * @function voucher_activity_flush * @@ -192,8 +203,7 @@ voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, * @param stream * The stream to flush. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) OS_VOUCHER_EXPORT OS_NOTHROW void voucher_activity_flush(firehose_stream_t stream); @@ -219,8 +229,7 @@ voucher_activity_flush(firehose_stream_t stream); * @param publen * Length of data at 'pubdata'. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 firehose_tracepoint_id_t voucher_activity_trace(firehose_stream_t stream, @@ -228,7 +237,7 @@ voucher_activity_trace(firehose_stream_t stream, const void *pubdata, size_t publen); /*! - * @function voucher_activity_trace_with_private_strings + * @function voucher_activity_trace_v * * @abstract * Add a tracepoint to the specified stream, with private data. @@ -242,20 +251,29 @@ voucher_activity_trace(firehose_stream_t stream, * @param timestamp * The mach_approximate_time()/mach_absolute_time() value for this tracepoint. * - * @param pubdata - * Pointer to packed buffer of tracepoint data. + * @param iov + * Array of `struct iovec` pointing to the data to layout. + * The total size of this iovec must span exactly `publen + privlen` bytes. + * The `publen` boundary must coincide with the end of an iovec (each iovec + * must either be pure public or pure private data). * * @param publen - * Length of data at 'pubdata'. - * - * @param privdata - * Pointer to packed buffer of private tracepoint data. + * Total length of data to read from the iovec for the public data. * * @param privlen - * Length of data at 'privdata'. + * Length of data to read from the iovec after the public data for the private + * data. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 +firehose_tracepoint_id_t +voucher_activity_trace_v(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const struct iovec *iov, size_t publen, size_t privlen); + + +API_DEPRECATED_WITH_REPLACEMENT("voucher_activity_trace_v", + macos(10.12,10.12), ios(10.0,10.0), tvos(10.0,10.0), watchos(3.0,3.0)) OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 OS_NONNULL6 firehose_tracepoint_id_t voucher_activity_trace_with_private_strings(firehose_stream_t stream, @@ -263,15 +281,13 @@ voucher_activity_trace_with_private_strings(firehose_stream_t stream, const void *pubdata, size_t publen, const void *privdata, size_t privlen); -typedef struct voucher_activity_hooks_s { -#define VOUCHER_ACTIVITY_HOOKS_VERSION 3 +typedef const struct voucher_activity_hooks_s { +#define VOUCHER_ACTIVITY_HOOKS_VERSION 4 long vah_version; - // version 1 mach_port_t (*vah_get_logd_port)(void); - // version 2 dispatch_mach_handler_function_t vah_debug_channel_handler; - // version 3 kern_return_t (*vah_get_reconnect_info)(mach_vm_address_t *, mach_vm_size_t *); + void (*vah_metadata_init)(void *metadata_buffer, size_t size); } *voucher_activity_hooks_t; /*! @@ -283,8 +299,7 @@ typedef struct voucher_activity_hooks_s { * @param hooks * A pointer to a voucher_activity_hooks_s structure. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL_ALL void voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks); @@ -302,7 +317,7 @@ voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks); * @result * Address of metadata buffer. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL_ALL void* voucher_activity_get_metadata_buffer(size_t *length); @@ -314,8 +329,7 @@ voucher_activity_get_metadata_buffer(size_t *length); * Return the current voucher activity ID. Available for the dyld client stub * only. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW firehose_activity_id_t voucher_get_activity_id_4dyld(void); diff --git a/os/voucher_private.h b/os/voucher_private.h index 562a70415..6675a0edb 100644 --- a/os/voucher_private.h +++ b/os/voucher_private.h @@ -23,6 +23,7 @@ #ifndef __linux__ #include +#include #endif #if __has_include() #include @@ -100,7 +101,7 @@ OS_OBJECT_DECL_CLASS(voucher); * @result * The previously adopted voucher object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT_NEEDS_RELEASE OS_NOTHROW voucher_t _Nullable @@ -116,7 +117,7 @@ voucher_adopt(voucher_t _Nullable voucher OS_OBJECT_CONSUMED); * @result * The currently adopted voucher object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_copy(void); @@ -135,7 +136,7 @@ voucher_copy(void); * @result * A copy of the currently adopted voucher object, with importance removed. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_copy_without_importance(void); @@ -161,7 +162,7 @@ voucher_copy_without_importance(void); * * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_NOTHROW void voucher_replace_default_voucher(void); @@ -179,7 +180,7 @@ voucher_replace_default_voucher(void); * * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_NOTHROW void voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); @@ -263,7 +264,7 @@ voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); * When not building with Objective-C ARC, must be released with a -[release] * message or the Block_release() function. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t @@ -346,7 +347,7 @@ dispatch_block_create_with_voucher(dispatch_block_flags_t flags, * When not building with Objective-C ARC, must be released with a -[release] * message or the Block_release() function. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL5 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t @@ -362,52 +363,10 @@ dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, * @function dispatch_queue_create_with_accounting_override_voucher * * @abstract - * Creates a new dispatch queue with an accounting override voucher created - * from the specified voucher. - * - * @discussion - * See dispatch_queue_create() headerdoc for generic details on queue creation. - * - * The resource accounting attributes of the specified voucher are extracted - * and used to create an accounting override voucher for the new queue. - * - * Every block executed on the returned queue will initially have this override - * voucher adopted, any voucher automatically associated with or explicitly - * assigned to the block will NOT be used and released immediately before block - * execution starts. - * - * The accounting override voucher will be automatically propagated to any - * asynchronous work generated from the queue following standard voucher - * propagation rules. - * - * NOTE: this SPI should only be used in special circumstances when a subsystem - * has complete control over all workitems submitted to a queue (e.g. no client - * block is ever submitted to the queue) and if and only if such queues have a - * one-to-one mapping with resource accounting identities. - * - * CAUTION: use of this SPI represents a potential voucher propagation hole. It - * is the responsibility of the caller to ensure that any callbacks into client - * code from the queue have the correct client voucher applied (rather than the - * automatically propagated accounting override voucher), e.g. by use of the - * dispatch_block_create() API to capture client state at the time the callback - * is registered. - * - * @param label - * A string label to attach to the queue. - * This parameter is optional and may be NULL. - * - * @param attr - * DISPATCH_QUEUE_SERIAL, DISPATCH_QUEUE_CONCURRENT, or the result of a call to - * the function dispatch_queue_attr_make_with_qos_class(). - * - * @param voucher - * A voucher whose resource accounting attributes are used to create the - * accounting override voucher attached to the queue. - * - * @result - * The newly created dispatch queue. + * Deprecated, do not use, will abort process if called. */ -__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +API_DEPRECATED("removed SPI", \ + macos(10.11,10.12), ios(9.0,10.0), watchos(2.0,3.0), tvos(9.0,10.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t @@ -440,7 +399,7 @@ dispatch_queue_create_with_accounting_override_voucher( * The newly created voucher object or NULL if the message was not carrying a * mach voucher. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_create_with_mach_msg(mach_msg_header_t *msg); @@ -475,7 +434,7 @@ struct proc_persona_info; * or the persona identifier of the current process * or PERSONA_ID_NONE */ -__OSX_AVAILABLE_STARTING(__MAC_NA,__IPHONE_9_2) +API_AVAILABLE(ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW uid_t voucher_get_current_persona(void); @@ -498,7 +457,7 @@ voucher_get_current_persona(void); * 0 on success: currently adopted voucher has a PERSONA_TOKEN * -1 on failure: persona_info is untouched/uninitialized */ -__OSX_AVAILABLE_STARTING(__MAC_NA,__IPHONE_9_2) +API_AVAILABLE(ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 int voucher_get_current_persona_originator_info( @@ -522,7 +481,7 @@ voucher_get_current_persona_originator_info( * 0 on success: currently adopted voucher has a PERSONA_TOKEN * -1 on failure: persona_info is untouched/uninitialized */ -__OSX_AVAILABLE_STARTING(__MAC_NA,__IPHONE_9_2) +API_AVAILABLE(ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 int voucher_get_current_persona_proximate_info( diff --git a/private/benchmark.h b/private/benchmark.h index ef3cdbd2f..ab5715648 100644 --- a/private/benchmark.h +++ b/private/benchmark.h @@ -70,13 +70,13 @@ __BEGIN_DECLS * cache-line. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW uint64_t dispatch_benchmark(size_t count, dispatch_block_t block); #endif -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW uint64_t dispatch_benchmark_f(size_t count, void *_Nullable ctxt, diff --git a/private/data_private.h b/private/data_private.h index 7485525a5..364a8ffe0 100644 --- a/private/data_private.h +++ b/private/data_private.h @@ -43,7 +43,7 @@ __BEGIN_DECLS * encapsulate buffers that should not be copied or freed by the system. */ #define DISPATCH_DATA_DESTRUCTOR_NONE (_dispatch_data_destructor_none) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(none); /*! @@ -53,7 +53,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(none); */ #define DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE \ (_dispatch_data_destructor_vm_deallocate) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(vm_deallocate); /*! @@ -77,7 +77,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(vm_deallocate); * data buffer when it is no longer needed. * @result A newly created dispatch data object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_create_f(const void *buffer, @@ -100,7 +100,7 @@ dispatch_data_create_f(const void *buffer, * location of the newly allocated memory region, or NULL. * @result A newly created dispatch data object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t @@ -142,7 +142,7 @@ typedef bool (*dispatch_data_applier_function_t)(void *_Nullable context, * @result A Boolean indicating whether traversal completed * successfully. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW bool dispatch_data_apply_f(dispatch_data_t data, void *_Nullable context, @@ -163,7 +163,7 @@ dispatch_data_apply_f(dispatch_data_t data, void *_Nullable context, * @result A mach port for the newly made memory entry, or * MACH_PORT_NULL if an error occurred. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_port_t dispatch_data_make_memory_entry(dispatch_data_t data); @@ -198,7 +198,7 @@ typedef const struct dispatch_data_format_type_s *dispatch_data_format_type_t; * or should be, comprised of raw data bytes with no given encoding. */ #define DISPATCH_DATA_FORMAT_TYPE_NONE (&_dispatch_data_format_type_none) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(none); /*! @@ -209,7 +209,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(none); * types. */ #define DISPATCH_DATA_FORMAT_TYPE_BASE32 (&_dispatch_data_format_type_base32) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(base32); /*! @@ -221,7 +221,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(base32); */ #define DISPATCH_DATA_FORMAT_TYPE_BASE32HEX \ (&_dispatch_data_format_type_base32hex) -__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(base32hex); /*! @@ -232,7 +232,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(base32hex); * types. */ #define DISPATCH_DATA_FORMAT_TYPE_BASE64 (&_dispatch_data_format_type_base64) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(base64); /*! @@ -242,7 +242,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(base64); * with other UTF format types. */ #define DISPATCH_DATA_FORMAT_TYPE_UTF8 (&_dispatch_data_format_type_utf8) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(utf8); /*! @@ -252,7 +252,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(utf8); * conjunction with other UTF format types. */ #define DISPATCH_DATA_FORMAT_TYPE_UTF16LE (&_dispatch_data_format_type_utf16le) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(utf16le); /*! @@ -262,7 +262,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(utf16le); * conjunction with other UTF format types. */ #define DISPATCH_DATA_FORMAT_TYPE_UTF16BE (&_dispatch_data_format_type_utf16be) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(utf16be); /*! @@ -274,7 +274,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(utf16be); * format. */ #define DISPATCH_DATA_FORMAT_TYPE_UTF_ANY (&_dispatch_data_format_type_utf_any) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(utf_any); /*! @@ -295,7 +295,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(utf_any); * produced, or NULL if an error occurred. */ -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t diff --git a/private/introspection_private.h b/private/introspection_private.h index fa8e49aeb..972c68857 100644 --- a/private/introspection_private.h +++ b/private/introspection_private.h @@ -68,8 +68,8 @@ typedef struct dispatch_queue_s *dispatch_queue_t; typedef struct dispatch_source_s *dispatch_source_t; typedef struct dispatch_group_s *dispatch_group_t; typedef struct dispatch_object_s *dispatch_object_t; -#ifndef __OSX_AVAILABLE_STARTING -#define __OSX_AVAILABLE_STARTING(x,y) +#ifndef API_AVAILABLE +#define API_AVAILABLE(...) #endif #ifndef DISPATCH_EXPORT #define DISPATCH_EXPORT extern @@ -135,7 +135,7 @@ typedef struct dispatch_object_s *dispatch_object_t; * Size of dispatch_introspection_source_s structure. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT const struct dispatch_introspection_versions_s { unsigned long introspection_version; unsigned long hooks_version; @@ -716,7 +716,7 @@ dispatch_introspection_queue_item_get_info(dispatch_queue_t queue, * hooks on output. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT void dispatch_introspection_hooks_install(dispatch_introspection_hooks_t hooks); diff --git a/private/io_private.h b/private/io_private.h index 0bb1e3b25..293258161 100644 --- a/private/io_private.h +++ b/private/io_private.h @@ -79,7 +79,7 @@ __BEGIN_DECLS * param error An errno condition for the read operation or * zero if the read was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW void dispatch_read_f(dispatch_fd_t fd, @@ -121,7 +121,7 @@ dispatch_read_f(dispatch_fd_t fd, * param error An errno condition for the write operation or * zero if the write was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW void @@ -160,7 +160,7 @@ dispatch_write_f(dispatch_fd_t fd, * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type specified). */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_io_t @@ -200,7 +200,7 @@ dispatch_io_create_f(dispatch_io_type_t type, * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type or non-absolute path specified). */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_io_t @@ -244,7 +244,7 @@ dispatch_io_create_with_path_f(dispatch_io_type_t type, * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type specified). */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_io_t @@ -311,7 +311,7 @@ typedef void (*dispatch_io_handler_function_t)(void *_Nullable context, * param error An errno condition for the read operation or zero if * the read was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL4 DISPATCH_NONNULL6 DISPATCH_NOTHROW void @@ -368,7 +368,7 @@ dispatch_io_read_f(dispatch_io_t channel, * param error An errno condition for the write operation or zero * if the write was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NONNULL6 DISPATCH_NOTHROW void @@ -402,7 +402,7 @@ dispatch_io_write_f(dispatch_io_t channel, * the barrier function. * @param barrier The barrier function. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_io_barrier_f(dispatch_io_t channel, diff --git a/private/layout_private.h b/private/layout_private.h index bf93ee999..0c0cd942d 100644 --- a/private/layout_private.h +++ b/private/layout_private.h @@ -29,7 +29,7 @@ __BEGIN_DECLS #if !TARGET_OS_WIN32 -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT const struct dispatch_queue_offsets_s { // always add new fields at the end const uint16_t dqo_version; @@ -60,7 +60,7 @@ DISPATCH_EXPORT const struct dispatch_queue_offsets_s { * SPI intended for CoreSymbolication only */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT const struct dispatch_tsd_indexes_s { // always add new fields at the end const uint16_t dti_version; diff --git a/private/mach_private.h b/private/mach_private.h index 2228436a7..6ca891d6f 100644 --- a/private/mach_private.h +++ b/private/mach_private.h @@ -36,7 +36,7 @@ __BEGIN_DECLS #if DISPATCH_MACH_SPI -#define DISPATCH_MACH_SPI_VERSION 20160505 +#define DISPATCH_MACH_SPI_VERSION 20161026 #include @@ -109,6 +109,21 @@ DISPATCH_DECL(dispatch_mach); * result operation and never passed to a channel handler. Indicates that the * message passed to the send operation must not be disposed of until it is * returned via the channel handler. + * + * @const DISPATCH_MACH_SIGTERM_RECEIVED + * A SIGTERM signal has been received. This notification is delivered at most + * once during the lifetime of the channel. This event is sent only for XPC + * channels (i.e. channels that were created by calling + * dispatch_mach_create_4libxpc()). + * + * @const DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED + * The channel has been disconnected by a call to dispatch_mach_reconnect() or + * dispatch_mach_cancel(), an empty message is passed in the message parameter + * (so that associated port rights can be disposed of). The message header will + * contain a local port with the receive right previously allocated to receive + * an asynchronous reply to a message previously sent to the channel. Used + * only if the channel is disconnected while waiting for a reply to a message + * sent with dispatch_mach_send_with_result_and_async_reply_4libxpc(). */ DISPATCH_ENUM(dispatch_mach_reason, unsigned long, DISPATCH_MACH_CONNECTED = 1, @@ -121,6 +136,8 @@ DISPATCH_ENUM(dispatch_mach_reason, unsigned long, DISPATCH_MACH_CANCELED, DISPATCH_MACH_REPLY_RECEIVED, DISPATCH_MACH_NEEDS_DEFERRED_SEND, + DISPATCH_MACH_SIGTERM_RECEIVED, + DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED, DISPATCH_MACH_REASON_LAST, /* unused */ ); @@ -202,7 +219,7 @@ DISPATCH_ENUM(dispatch_mach_msg_destructor, unsigned int, * buffer, or NULL. * @result A newly created dispatch mach message object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_mach_msg_t @@ -219,7 +236,7 @@ dispatch_mach_msg_create(mach_msg_header_t *_Nullable msg, size_t size, * size of the message buffer, or NULL. * @result Pointer to message buffer underlying the object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW mach_msg_header_t* dispatch_mach_msg_get_msg(dispatch_mach_msg_t message, @@ -267,7 +284,7 @@ typedef void (^dispatch_mach_handler_t)(dispatch_mach_reason_t reason, * @result * The newly created dispatch mach channel. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NONNULL3 DISPATCH_NOTHROW dispatch_mach_t @@ -321,7 +338,7 @@ typedef void (*dispatch_mach_handler_function_t)(void *_Nullable context, * @result * The newly created dispatch mach channel. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NONNULL4 DISPATCH_NOTHROW dispatch_mach_t @@ -354,7 +371,7 @@ dispatch_mach_create_f(const char *_Nullable label, * to channel cancellation or reconnection) and the channel handler has * returned. May be NULL. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_mach_connect(dispatch_mach_t channel, mach_port_t receive, @@ -385,7 +402,7 @@ dispatch_mach_connect(dispatch_mach_t channel, mach_port_t receive, * is complete (or not peformed due to channel cancellation or reconnection) * and the channel handler has returned. May be NULL. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_mach_reconnect(dispatch_mach_t channel, mach_port_t send, @@ -408,7 +425,7 @@ dispatch_mach_reconnect(dispatch_mach_t channel, mach_port_t send, * @param channel * The mach channel to cancel. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_mach_cancel(dispatch_mach_t channel); @@ -451,7 +468,7 @@ dispatch_mach_cancel(dispatch_mach_t channel); * Additional send options to pass to mach_msg() when performing the send * operation. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NOTHROW void dispatch_mach_send(dispatch_mach_t channel, dispatch_mach_msg_t message, @@ -519,8 +536,7 @@ dispatch_mach_send(dispatch_mach_t channel, dispatch_mach_msg_t message, * Out parameter to return the error from the immediate send attempt. * If a deferred send is required, returns 0. Must not be NULL. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5 DISPATCH_NONNULL6 DISPATCH_NOTHROW void @@ -580,7 +596,7 @@ dispatch_mach_send_with_result(dispatch_mach_t channel, * @result * The received reply message object, or NULL if the channel was canceled. */ -__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +API_AVAILABLE(macos(10.11), ios(9.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NOTHROW dispatch_mach_msg_t _Nullable @@ -662,8 +678,7 @@ dispatch_mach_send_and_wait_for_reply(dispatch_mach_t channel, * @result * The received reply message object, or NULL if the channel was canceled. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5 DISPATCH_NONNULL6 DISPATCH_NOTHROW @@ -688,7 +703,7 @@ dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t channel, * @param barrier * The barrier block to submit to the channel target queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_mach_send_barrier(dispatch_mach_t channel, dispatch_block_t barrier); @@ -711,7 +726,7 @@ dispatch_mach_send_barrier(dispatch_mach_t channel, dispatch_block_t barrier); * @param barrier * The barrier function to submit to the channel target queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_mach_send_barrier_f(dispatch_mach_t channel, void *_Nullable context, @@ -731,7 +746,7 @@ dispatch_mach_send_barrier_f(dispatch_mach_t channel, void *_Nullable context, * @param barrier * The barrier block to submit to the channel target queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_mach_receive_barrier(dispatch_mach_t channel, @@ -754,7 +769,7 @@ dispatch_mach_receive_barrier(dispatch_mach_t channel, * @param barrier * The barrier function to submit to the channel target queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_mach_receive_barrier_f(dispatch_mach_t channel, void *_Nullable context, @@ -781,11 +796,222 @@ dispatch_mach_receive_barrier_f(dispatch_mach_t channel, void *_Nullable context * @result * The most recently specified check-in port for the channel. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_port_t dispatch_mach_get_checkin_port(dispatch_mach_t channel); +// SPI for libxpc +/* + * Type for the callback for receipt of asynchronous replies to + * dispatch_mach_send_with_result_and_async_reply_4libxpc(). + */ +typedef void (*_Nonnull dispatch_mach_async_reply_callback_t)(void *context, + dispatch_mach_reason_t reason, dispatch_mach_msg_t message); + +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +typedef const struct dispatch_mach_xpc_hooks_s { +#define DISPATCH_MACH_XPC_HOOKS_VERSION 2 + unsigned long version; + + /* Fields available in version 1. */ + + /* + * Called to handle a Mach message event inline if possible. Returns true + * if the event was handled, false if the event should be delivered to the + * channel event handler. The implementation should not make any assumptions + * about the thread in which the function is called and cannot assume that + * invocations of this function are serialized relative to each other or + * relative to the channel's event handler function. In addition, the + * handler must not throw an exception or call out to any code that might + * throw an exception. + */ + bool (* _Nonnull dmxh_direct_message_handler)(void *_Nullable context, + dispatch_mach_reason_t reason, dispatch_mach_msg_t message, + mach_error_t error); + + /* Fields available in version 2. */ + + /* + * Gets the queue to which a reply to a message sent using + * dispatch_mach_send_with_result_and_async_reply_4libxpc() should be + * delivered. The msg_context argument is the value of the do_ctxt field + * of the outgoing message, as returned by dispatch_get_context(). If this + * function returns NULL, the reply will be delivered to the channel queue. + * This function should not make any assumptions about the thread on which + * it is called and, since it may be called more than once per message, it + * should execute as quickly as possible and not attempt to synchronize with + * other code. + */ + dispatch_queue_t _Nullable (*_Nonnull dmxh_msg_context_reply_queue)( + void *_Nonnull msg_context); + + /* + * Called when a reply to a message sent by + * dispatch_mach_send_with_result_and_async_reply_4libxpc() is received. The + * message argument points to the reply message and the context argument is + * the context value passed to dispatch_mach_create_4libxpc() when creating + * the Mach channel. The handler is called on the queue that is returned by + * dmxh_msg_context_reply_queue() when the reply is received or if the + * channel is disconnected. The reason argument is + * DISPATCH_MACH_MESSAGE_RECEIVED if a reply has been received or + * DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED if the channel has been + * disconnected. Refer to the documentation for + * dispatch_mach_send_with_result_and_async_reply_4libxpc() for more + * details. + */ + dispatch_mach_async_reply_callback_t dmxh_async_reply_handler; +} *dispatch_mach_xpc_hooks_t; + +#define DISPATCH_MACH_XPC_SUPPORTS_ASYNC_REPLIES(hooks) ((hooks)->version >= 2) + +/*! + * @function dispatch_mach_hooks_install_4libxpc + * + * @abstract + * installs XPC callbacks for dispatch Mach channels. + * + * @discussion + * In order to improve the performance of the XPC/dispatch interface, it is + * sometimes useful for dispatch to be able to call directly into XPC. The + * channel hooks structure should be initialized with pointers to XPC callback + * functions, or NULL for callbacks that XPC does not support. The version + * number in the structure must be set to reflect the fields that have been + * initialized. This function may be called only once. + * + * @param hooks + * A pointer to the channel hooks structure. This must remain valid once set. + */ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_mach_hooks_install_4libxpc(dispatch_mach_xpc_hooks_t hooks); + +/*! + * @function dispatch_mach_create_4libxpc + * Create a dispatch mach channel to asynchronously receive and send mach + * messages, specifically for libxpc. + * + * The specified handler will be called with the corresponding reason parameter + * for each message received and for each message that was successfully sent, + * that failed to be sent, or was not sent; as well as when a barrier block + * has completed, or when channel connection, reconnection or cancellation has + * taken effect. However, the handler will not be called for messages that + * were passed to the XPC hooks dmxh_direct_message_handler function if that + * function returned true. + * + * Dispatch mach channels are created in a disconnected state, they must be + * connected via dispatch_mach_connect() to begin receiving and sending + * messages. + * + * @param label + * An optional string label to attach to the channel. The string is not copied, + * if it is non-NULL it must point to storage that remains valid for the + * lifetime of the channel object. May be NULL. + * + * @param queue + * The target queue of the channel, where the handler and barrier blocks will + * be submitted. + * + * @param context + * The application-defined context to pass to the handler. + * + * @param handler + * The handler function to submit when a message has been sent or received. + * + * @result + * The newly created dispatch mach channel. + */ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NONNULL4 DISPATCH_NOTHROW +dispatch_mach_t +dispatch_mach_create_4libxpc(const char *_Nullable label, + dispatch_queue_t _Nullable queue, void *_Nullable context, + dispatch_mach_handler_function_t handler); + +/*! + * @function dispatch_mach_send_with_result_and_async_reply_4libxpc + * SPI for XPC that asynchronously sends a message encapsulated in a dispatch + * mach message object to the specified mach channel. If an immediate send can + * be performed, returns its result via out parameters. + * + * The reply message is processed on the queue returned by the + * dmxh_msg_context_reply_queue function in the dispatch_mach_xpc_hooks_s + * structure, which is called with a single argument whose value is the + * do_ctxt field of the message argument to this function. The reply message is + * delivered to the dmxh_async_reply_handler hook function instead of being + * passed to the channel event handler. + * + * If the dmxh_msg_context_reply_queue function is not implemented or returns + * NULL, the reply message is delivered to the channel event handler on the + * channel queue. + * + * Unless the message is being sent to a send-once right (as determined by the + * presence of MACH_MSG_TYPE_MOVE_SEND_ONCE in the message header remote bits), + * the message header remote port is set to the channel send right before the + * send operation is performed. + * + * The message is required to expect a direct reply (as determined by the + * presence of MACH_MSG_TYPE_MAKE_SEND_ONCE in the message header local bits). + * The receive right specified in the message header local port will be + * monitored until a reply message (or a send-once notification) is received, or + * the channel is canceled. Hence the application must wait for the reply + * to be received or for a DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED message + * before releasing that receive right. + * + * If the message send operation is attempted but the channel is canceled + * before the send operation succesfully completes, the message returned to the + * channel handler with DISPATCH_MACH_MESSAGE_NOT_SENT may be the result of a + * pseudo-receive operation and the receive right originally specified in the + * message header local port will be returned in a + * DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED message. + * + * If an immediate send could be performed, returns the resulting reason + * (e.g. DISPATCH_MACH_MESSAGE_SENT) and possible error to the caller in the + * send_result and send_error out parameters (instead of via the channel + * handler), in which case the passed-in message and associated resources + * can be disposed of synchronously. + * + * If a deferred send is required, returns DISPATCH_MACH_NEEDS_DEFERRED_SEND + * in the send_result out parameter to indicate that the passed-in message has + * been retained and associated resources must not be disposed of until the + * message is returned asynchronusly via the channel handler. + * + * @param channel + * The mach channel to which to send the message. + * + * @param message + * The message object encapsulating the message to send. Unless an immediate + * send could be performed, the object will be retained until the asynchronous + * send operation is complete and the channel handler has returned. The storage + * underlying the message object may be modified by the send operation. + * + * @param options + * Additional send options to pass to mach_msg() when performing the send + * operation. + * + * @param send_flags + * Flags to configure the send operation. Must be 0 for now. + * + * @param send_result + * Out parameter to return the result of the immediate send attempt. + * If a deferred send is required, returns DISPATCH_MACH_NEEDS_DEFERRED_SEND. + * Must not be NULL. + * + * @param send_error + * Out parameter to return the error from the immediate send attempt. + * If a deferred send is required, returns 0. Must not be NULL. + */ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5 +DISPATCH_NONNULL6 DISPATCH_NOTHROW +void +dispatch_mach_send_with_result_and_async_reply_4libxpc(dispatch_mach_t channel, + dispatch_mach_msg_t message, mach_msg_option_t options, + dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error); + DISPATCH_ASSUME_NONNULL_END #endif // DISPATCH_MACH_SPI diff --git a/private/private.h b/private/private.h index 3c37bed0d..82da15ea1 100644 --- a/private/private.h +++ b/private/private.h @@ -66,7 +66,7 @@ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ // Check that public and private dispatch headers match -#if DISPATCH_API_VERSION != 20160712 // Keep in sync with +#if DISPATCH_API_VERSION != 20160831 // Keep in sync with #error "Dispatch header mismatch between /usr/include and /usr/local/include" #endif @@ -93,7 +93,7 @@ __BEGIN_DECLS * Boolean indicating whether the process has used libdispatch and become * multithreaded. */ -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_EXPORT DISPATCH_NOTHROW bool _dispatch_is_multithreaded(void); @@ -117,7 +117,7 @@ bool _dispatch_is_multithreaded(void); * Boolean indicating whether the parent process had used libdispatch and * become multithreaded at the time of fork. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NOTHROW bool _dispatch_is_fork_of_multithreaded_parent(void); @@ -144,8 +144,7 @@ bool _dispatch_is_fork_of_multithreaded_parent(void); * If the program already used dispatch before the guard is enabled, then * this function will abort immediately. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NOTHROW void _dispatch_prohibit_transition_to_multithreaded(bool prohibit); @@ -187,31 +186,23 @@ typedef int dispatch_runloop_handle_t; #endif #if TARGET_OS_MAC -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_runloop_handle_t _dispatch_get_main_queue_port_4CF(void); #endif -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NOTHROW dispatch_runloop_handle_t _dispatch_get_main_queue_handle_4CF(void); -#if TARGET_OS_MAC -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NOTHROW -void -_dispatch_main_queue_callback_4CF(mach_msg_header_t *_Null_unspecified msg); -#else -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW void _dispatch_main_queue_callback_4CF(void *_Null_unspecified msg); -#endif -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t @@ -219,33 +210,33 @@ _dispatch_runloop_root_queue_create_4CF(const char *_Nullable label, unsigned long flags); #if TARGET_OS_MAC -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW mach_port_t _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t queue); #endif -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NOTHROW void _dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t queue); -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW bool _dispatch_runloop_root_queue_perform_4CF(dispatch_queue_t queue); -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void _dispatch_source_set_runloop_timer_4CF(dispatch_source_t source, dispatch_time_t start, uint64_t interval, uint64_t leeway); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT void *_Nonnull (*_Nullable _dispatch_begin_NSAutoReleasePool)(void); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT void (*_Nullable _dispatch_end_NSAutoReleasePool)(void *); diff --git a/private/queue_private.h b/private/queue_private.h index 33de371c8..14d64772d 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -79,7 +79,7 @@ enum { * This new value combines the attributes specified by the 'attr' parameter and * the overcommit flag. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW dispatch_queue_attr_t dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr, @@ -98,6 +98,39 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr, */ #define DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE INT8_MIN +/*! + * @function dispatch_queue_set_label_nocopy + * + * @abstract + * Set the label for a given queue, without copying the input string. + * + * @discussion + * The queue must have been initially created with a NULL label, else using + * this function to set the queue label is undefined. + * + * The caller of this function must make sure the label pointer remains valid + * while it is used as the queue label and while any callers to + * dispatch_queue_get_label() may have obtained it. Since the queue lifetime + * may extend past the last release, it is advised to call this function with + * a constant string or NULL before the queue is released, or to destroy the + * label from a finalizer for that queue. + * + * This function should be called before any work item could call + * dispatch_queue_get_label(DISPATCH_CURRENT_QUEUE_LABEL) or from the context of + * the queue itself. + * + * @param queue + * The queue to adjust. Attempts to set the label of the main queue or a global + * concurrent queue will be ignored. + * + * @param label + * The new label for the queue. + */ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_queue_set_label_nocopy(dispatch_queue_t queue, const char *label); + /*! * @function dispatch_queue_set_width * @@ -115,8 +148,8 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr, * with the desired concurrency width. * * @param queue - * The queue to adjust. Passing the main queue or a global concurrent queue - * will be ignored. + * The queue to adjust. Attempts to set the width of the main queue or a global + * concurrent queue will be ignored. * * @param width * The new maximum width of concurrency depending on available resources. @@ -128,8 +161,8 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr, #define DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS -2 #define DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS -3 -__OSX_AVAILABLE_BUT_DEPRECATED_MSG(__MAC_10_6,__MAC_10_10,__IPHONE_4_0,__IPHONE_8_0, \ - "Use dispatch_queue_create(name, DISPATCH_QUEUE_CONCURRENT) instead") +API_DEPRECATED("Use dispatch_queue_create(name, DISPATCH_QUEUE_CONCURRENT)", + macos(10.6,10.10), ios(4.0,8.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_queue_set_width(dispatch_queue_t dq, long width); @@ -189,7 +222,7 @@ dispatch_queue_set_width(dispatch_queue_t dq, long width); * @result * The newly created dispatch pthread root queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t @@ -238,8 +271,7 @@ dispatch_pthread_root_queue_flags_pool_size(uint8_t pool_size) * @result * A new reference to a pthread root queue object or NULL. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t _Nullable dispatch_pthread_root_queue_copy_current(void); @@ -284,7 +316,7 @@ dispatch_pthread_root_queue_copy_current(void); * dispatch_async_f(). * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +API_AVAILABLE(macos(10.11), ios(9.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_async_enforce_qos_class_f(dispatch_queue_t queue, diff --git a/private/source_private.h b/private/source_private.h index bb1370238..f01287b56 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -36,17 +36,6 @@ DISPATCH_ASSUME_NONNULL_BEGIN __BEGIN_DECLS -/*! - * @const DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE - * @discussion A dispatch timer source that is part of a timer aggregate. - * The handle is the dispatch timer aggregate object. - * The mask specifies which flags from dispatch_source_timer_flags_t to apply. - */ -#define DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE \ - (&_dispatch_source_type_timer_with_aggregate) -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) -DISPATCH_SOURCE_TYPE_DECL(timer_with_aggregate); - /*! * @const DISPATCH_SOURCE_TYPE_INTERVAL * @discussion A dispatch source that submits the event handler block at a @@ -69,7 +58,7 @@ DISPATCH_SOURCE_TYPE_DECL(timer_with_aggregate); * The mask specifies which flags from dispatch_source_timer_flags_t to apply. */ #define DISPATCH_SOURCE_TYPE_INTERVAL (&_dispatch_source_type_interval) -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_SOURCE_TYPE_DECL(interval); /*! @@ -79,8 +68,8 @@ DISPATCH_SOURCE_TYPE_DECL(interval); * The handle is a process identifier (pid_t). */ #define DISPATCH_SOURCE_TYPE_VFS (&_dispatch_source_type_vfs) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() -DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vfs; +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() +DISPATCH_SOURCE_TYPE_DECL(vfs); /*! * @const DISPATCH_SOURCE_TYPE_VM @@ -89,10 +78,9 @@ DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vfs; * This type is deprecated, use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead. */ #define DISPATCH_SOURCE_TYPE_VM (&_dispatch_source_type_vm) -__OSX_AVAILABLE_BUT_DEPRECATED_MSG(__MAC_10_7, __MAC_10_10, __IPHONE_4_3, - __IPHONE_8_0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") -DISPATCH_LINUX_UNAVAILABLE() -DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm; +API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_SOURCE_TYPE_MEMORYPRESSURE", + macos(10.7,10.10), ios(4.3,8.0)) DISPATCH_LINUX_UNAVAILABLE() +DISPATCH_SOURCE_TYPE_DECL(vm); /*! * @const DISPATCH_SOURCE_TYPE_MEMORYSTATUS @@ -101,21 +89,19 @@ DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm; * dispatch_source_memorystatus_flags_t. */ #define DISPATCH_SOURCE_TYPE_MEMORYSTATUS (&_dispatch_source_type_memorystatus) -__OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") -__IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") -__TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") -__WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") +API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_SOURCE_TYPE_MEMORYPRESSURE", + macos(10.9, 10.12), ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0)) DISPATCH_LINUX_UNAVAILABLE() -DISPATCH_EXPORT const struct dispatch_source_type_s - _dispatch_source_type_memorystatus; +DISPATCH_SOURCE_TYPE_DECL(memorystatus); /*! * @const DISPATCH_SOURCE_TYPE_SOCK * @discussion A dispatch source that monitors events on socket state changes. */ #define DISPATCH_SOURCE_TYPE_SOCK (&_dispatch_source_type_sock) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) DISPATCH_LINUX_UNAVAILABLE() -DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_sock; +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_LINUX_UNAVAILABLE() +DISPATCH_SOURCE_TYPE_DECL(sock); + __END_DECLS @@ -179,6 +165,16 @@ enum { DISPATCH_SOCK_NOTIFY_ACK = 0x00004000, }; +/*! + * @enum dispatch_source_nw_channel_flags_t + * + * @constant DISPATCH_NW_CHANNEL_FLOW_ADV_UPDATE + * Received network channel flow advisory. + */ +enum { + DISPATCH_NW_CHANNEL_FLOW_ADV_UPDATE = 0x00000001, +}; + /*! * @enum dispatch_source_vfs_flags_t * @@ -214,6 +210,12 @@ enum { * * @constant DISPATCH_VFS_QUOTA * We hit a user quota (quotactl) for this filesystem. + * + * @constant DISPATCH_VFS_NEARLOWDISK + * Filesystem is nearly full (below NEARLOWDISK level). + * + * @constant DISPATCH_VFS_DESIREDDISK + * Filesystem has exceeded the DESIREDDISK level */ enum { DISPATCH_VFS_NOTRESP = 0x0001, @@ -227,6 +229,8 @@ enum { DISPATCH_VFS_UPDATE = 0x0100, DISPATCH_VFS_VERYLOWDISK = 0x0200, DISPATCH_VFS_QUOTA = 0x1000, + DISPATCH_VFS_NEARLOWDISK = 0x2000, + DISPATCH_VFS_DESIREDDISK = 0x4000, }; /*! @@ -269,10 +273,20 @@ enum { * @constant DISPATCH_PROC_REAP * The process has been reaped by the parent process via wait*(). * This flag is deprecated and will be removed in a future release. + * + * @constant DISPATCH_PROC_EXIT_STATUS + * The process has exited. Specifying this flag allows the process exit status + * to be retrieved from the source's status value, as returned by the + * dispatch_source_get_extended_data() function. The macros + * DISPATCH_PROC_EXIT_STATUS_EXITED(), DISPATCH_PROC_EXIT_STATUS_CODE(), + * DISPATCH_PROC_EXIT_STATUS_SIGNALED(), DISPATCH_PROC_EXIT_STATUS_TERMSIG() and + * DISPATCH_PROC_EXIT_STATUS_CORE_DUMPED() can be used to examine the status + * value. */ enum { - DISPATCH_PROC_REAP __OSX_AVAILABLE_BUT_DEPRECATED( - __MAC_10_6, __MAC_10_9, __IPHONE_4_0, __IPHONE_7_0) = 0x10000000, + DISPATCH_PROC_REAP DISPATCH_ENUM_API_DEPRECATED("unsupported flag", + macos(10.6,10.9), ios(4.0,7.0)) = 0x10000000, + DISPATCH_PROC_EXIT_STATUS DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(2.0)) = 0x04000000, }; /*! @@ -283,9 +297,8 @@ enum { */ enum { - DISPATCH_VM_PRESSURE __OSX_AVAILABLE_BUT_DEPRECATED_MSG( - __MAC_10_7, __MAC_10_10, __IPHONE_4_3, __IPHONE_8_0, - "Use DISPATCH_MEMORYPRESSURE_WARN instead") = 0x80000000, + DISPATCH_VM_PRESSURE DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_WARN", macos(10.7, 10.10), ios(4.3, 8.0)) + = 0x80000000, }; /*! @@ -297,8 +310,7 @@ enum { * Restricted to the root user. */ enum { - DISPATCH_MEMORYPRESSURE_LOW_SWAP - __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x08, + DISPATCH_MEMORYPRESSURE_LOW_SWAP DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x08, }; /*! @@ -307,29 +319,17 @@ enum { */ enum { DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL - __OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") - __IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") - __TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") - __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") - = 0x01, + DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_NORMAL", macos(10.9, 10.12), + ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x01, DISPATCH_MEMORYSTATUS_PRESSURE_WARN - __OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_WARN instead") - __IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead") - __TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead") - __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead") - = 0x02, + DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_WARN", macos(10.9, 10.12), + ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x02, DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL - __OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") - __IOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") - __TVOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") - __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") - = 0x04, + DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_CRITICAL", macos(10.9, 10.12), + ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x04, DISPATCH_MEMORYSTATUS_LOW_SWAP - __OSX_DEPRECATED(10.10, 10.12, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") - __IOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") - __TVOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") - __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") - = 0x08, + DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_LOW_SWAP", macos(10.9, 10.12), + ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x08, }; /*! @@ -343,18 +343,109 @@ enum { * The memory of the process has reached 100% of its high watermark limit. */ enum { - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN - __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10) - __TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) = 0x10, + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.10), tvos(10.10), watchos(3.0)) = 0x10, - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL - __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10) - __TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) = 0x20, + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.10), tvos(10.10), watchos(3.0)) = 0x20, }; +/*! + * Macros to check the exit status obtained from the status field of the + * structure returned by the dispatch_source_get_extended_data() function for a + * source of type DISPATCH_SOURCE_TYPE_PROC when DISPATCH_PROC_EXIT_STATUS has + * been requested. + * + * DISPATCH_PROC_EXIT_STATUS_EXITED returns whether the process exited. If this + * is true, the exit status can be obtained from DISPATCH_PROC_EXIT_STATUS_CODE. + * + * DISPATCH_PROC_EXIT_STATUS_SIGNALED returns whether the process was terminated + * by a signal. + * + * DISPATCH_PROC_EXIT_STATUS_TERMSIG returns the signal that caused the process + * to terminate, or 0 if the process was not terminated by a signal. + * + * DISPATCH_PROC_EXIT_STATUS_CORE_DUMPED returns whether a core dump of the + * process was created. + */ +#define DISPATCH_PROC_EXIT_STATUS_EXITED(status) ((bool)WIFEXITED(status)) +#define DISPATCH_PROC_EXIT_STATUS_CODE(status) ((int)WEXITSTATUS(status)) +#define DISPATCH_PROC_EXIT_STATUS_SIGNALED(status) ((bool)WIFSIGNALED(status)) +#define DISPATCH_PROC_EXIT_STATUS_TERMSIG(status) ((int)WTERMSIG(status)) +#define DISPATCH_PROC_EXIT_STATUS_CORE_DUMPED(status) ((bool)WCOREDUMP(status)) __BEGIN_DECLS +/*! + * @function dispatch_source_set_mandatory_cancel_handler + * + * @abstract + * Sets the event handler block for the given dispatch source, and indicates + * that calling dispatch_source_cancel() is mandatory for this source object. + * + * @discussion + * The cancellation handler (if specified) will be submitted to the source's + * target queue in response to a call to dispatch_source_cancel() once the + * system has released all references to the source's underlying handle and + * the source's event handler block has returned. + * + * When this function has been used used to set a cancellation handler, then + * the following result in an assertion and the process being terminated: + * - releasing the last reference on the dispatch source without having + * cancelled it by calling dispatch_source_cancel(); + * - changing any handler after the source has been activated; + * - changing the target queue of the source after it has been activated. + * + * IMPORTANT: + * Source cancellation and a cancellation handler are required for file + * descriptor and mach port based sources in order to safely close the + * descriptor or destroy the port. Making the cancellation handler of such + * sources mandatory is strongly recommended. + * Closing the descriptor or port before the cancellation handler is invoked may + * result in a race condition. If a new descriptor is allocated with the same + * value as the recently closed descriptor while the source's event handler is + * still running, the event handler may read/write data to the wrong descriptor. + * + * @param source + * The dispatch source to modify. + * The result of passing NULL in this parameter is undefined. + * + * @param handler + * The cancellation handler block to submit to the source's target queue. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_source_set_mandatory_cancel_handler(dispatch_source_t source, + dispatch_block_t handler); +#endif /* __BLOCKS__ */ + +/*! + * @function dispatch_source_set_mandatory_cancel_handler_f + * + * @abstract + * Sets the event handler function for the given dispatch source, and causes an + * assertion if this source is released before having been explicitly canceled. + * + * @discussion + * See dispatch_source_set_mandatory_cancel_handler() for more details. + * + * @param source + * The dispatch source to modify. + * The result of passing NULL in this parameter is undefined. + * + * @param handler + * The cancellation handler function to submit to the source's target queue. + * The context parameter passed to the event handler function is the current + * context of the dispatch source at the time the handler call is made. + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_source_set_mandatory_cancel_handler_f(dispatch_source_t source, + dispatch_function_t handler); + /*! * @function dispatch_source_cancel_and_wait * @@ -400,64 +491,11 @@ __BEGIN_DECLS * The dispatch source to be canceled. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10) -__TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_source_cancel_and_wait(dispatch_source_t source); -/*! - * @typedef dispatch_timer_aggregate_t - * - * @abstract - * Dispatch timer aggregates are sets of related timers. - */ -DISPATCH_DECL(dispatch_timer_aggregate); - -/*! - * @function dispatch_timer_aggregate_create - * - * @abstract - * Creates a new dispatch timer aggregate. - * - * @discussion - * A dispatch timer aggregate is a set of related timers whose overall timing - * parameters can be queried. - * - * Timers are added to an aggregate when a timer source is created with type - * DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE. - * - * @result - * The newly created dispatch timer aggregate. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT -DISPATCH_NOTHROW -dispatch_timer_aggregate_t -dispatch_timer_aggregate_create(void); - -/*! - * @function dispatch_timer_aggregate_get_delay - * - * @abstract - * Retrieves the delay until a timer in the given aggregate will next fire. - * - * @param aggregate - * The dispatch timer aggregate to query. - * - * @param leeway_ptr - * Optional pointer to a variable filled with the leeway (in ns) that will be - * applied to the return value. May be NULL. - * - * @result - * Delay in ns from now. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) -DISPATCH_EXPORT DISPATCH_NOTHROW -uint64_t -dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t aggregate, - uint64_t *_Nullable leeway_ptr); - #if __has_include() /*! * @typedef dispatch_mig_callback_t @@ -468,7 +506,7 @@ dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t aggregate, typedef boolean_t (*dispatch_mig_callback_t)(mach_msg_header_t *message, mach_msg_header_t *reply); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_msg_return_t dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, @@ -480,13 +518,66 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, * @abstract * Extract the context pointer from a mach message trailer. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void *_Nullable dispatch_mach_msg_get_context(mach_msg_header_t *msg); #endif +/*! + * @typedef dispatch_source_extended_data_t + * + * @abstract + * Type used by dispatch_source_get_extended_data() to return a consistent + * snapshot of the data and status of a dispatch source. + */ +typedef struct dispatch_source_extended_data_s { + unsigned long data; + unsigned long status; +} *dispatch_source_extended_data_t; + +/*! + * @function dispatch_source_get_extended_data + * + * @abstract + * Returns the current data and status values for a dispatch source. + * + * @discussion + * This function is intended to be called from within the event handler block. + * The result of calling this function outside of the event handler callback is + * undefined. + * + * @param source + * The result of passing NULL in this parameter is undefined. + * + * @param data + * A pointer to a dispatch_source_extended_data_s in which the data and status + * will be returned. The data field is populated with the value that would be + * returned by dispatch_source_get_data(). The value of the status field should + * be interpreted according to the type of the dispatch source: + * + * DISPATCH_SOURCE_TYPE_PROC: dispatch_source_proc_exit_flags_t + * + * If called from the event handler of a data source type not listed above, the + * status value is undefined. + * + * @param size + * The size of the specified structure. Should be set to + * sizeof(dispatch_source_extended_data_s). + * + * @result + * The size of the structure returned in *data, which will never be greater than + * the value of the size argument. If this is less than the value of the size + * argument, the remaining space in data will have been populated with zeroes. + */ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE +DISPATCH_NOTHROW +size_t +dispatch_source_get_extended_data(dispatch_source_t source, + dispatch_source_extended_data_t data, size_t size); + __END_DECLS DISPATCH_ASSUME_NONNULL_END diff --git a/src/Makefile.am b/src/Makefile.am index a574288e7..f774e9fe1 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -9,38 +9,45 @@ else lib_LTLIBRARIES=libdispatch.la endif -libdispatch_la_SOURCES= \ - allocator.c \ - apply.c \ - benchmark.c \ - data.c \ +libdispatch_la_SOURCES= \ + allocator.c \ + apply.c \ + benchmark.c \ + data.c \ + init.c \ introspection.c \ - init.c \ - io.c \ - object.c \ - once.c \ - queue.c \ - semaphore.c \ - source.c \ - time.c \ - transform.c \ - voucher.c \ + io.c \ + mach.c \ + object.c \ + once.c \ + queue.c \ + semaphore.c \ + source.c \ + time.c \ + transform.c \ + voucher.c \ protocol.defs \ - provider.d \ - allocator_internal.h \ + provider.d \ + allocator_internal.h \ data_internal.h \ inline_internal.h \ - internal.h \ + internal.h \ introspection_internal.h \ io_internal.h \ + mach_internal.h \ object_internal.h \ queue_internal.h \ - semaphore_internal.h \ - shims.h \ + semaphore_internal.h \ + shims.h \ source_internal.h \ - trace.h \ + trace.h \ voucher_internal.h \ - firehose/firehose_internal.h \ + event/event.c \ + event/event_config.h \ + event/event_epoll.c \ + event/event_internal.h \ + event/event_kevent.c \ + firehose/firehose_internal.h \ shims/android_stubs.h \ shims/atomic.h \ shims/atomic_sfb.h \ @@ -52,7 +59,7 @@ libdispatch_la_SOURCES= \ shims/lock.h \ shims/perfmon.h \ shims/time.h \ - shims/tsd.h \ + shims/tsd.h \ shims/yield.h EXTRA_libdispatch_la_SOURCES= @@ -65,16 +72,11 @@ DISPATCH_CFLAGS=-Wall $(VISIBILITY_FLAGS) $(OMIT_LEAF_FP_FLAGS) \ if DISPATCH_ENABLE_ASSERTS DISPATCH_CFLAGS+=-DDISPATCH_DEBUG=1 endif -AM_CFLAGS= $(KQUEUE_CFLAGS) $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS) +AM_CFLAGS= $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS) AM_OBJCFLAGS=$(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS) -AM_CXXFLAGS=$(KQUEUE_CFLAGS) $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) +AM_CXXFLAGS=$(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) AM_OBJCXXFLAGS=$(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) -if BUILD_OWN_KQUEUES - KQUEUE_LIBS+=$(top_builddir)/libkqueue/libkqueue.la - KQUEUE_CFLAGS+=-I$(top_srcdir)/libkqueue/include -endif - if BUILD_OWN_PTHREAD_WORKQUEUES PTHREAD_WORKQUEUE_LIBS=$(top_builddir)/libpwq/libpthread_workqueue.la PTHREAD_WORKQUEUE_CFLAGS=-I$(top_srcdir)/libpwq/include @@ -94,7 +96,7 @@ endif endif libdispatch_la_LDFLAGS=-avoid-version -libdispatch_la_LIBADD=$(KQUEUE_LIBS) $(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) $(BLOCKS_RUNTIME_LIBS) +libdispatch_la_LIBADD=$(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) $(BLOCKS_RUNTIME_LIBS) if HAVE_DARWIN_LD libdispatch_la_LDFLAGS+=-Wl,-compatibility_version,1 \ diff --git a/src/allocator.c b/src/allocator.c index a3a8c650a..e6ea77217 100644 --- a/src/allocator.c +++ b/src/allocator.c @@ -274,22 +274,16 @@ mark_bitmap_as_full_if_still_full(volatile bitmap_t *supermap, dispatch_assert(bitmap_index < BITMAPS_PER_SUPERMAP); #endif const bitmap_t mask = BITMAP_C(1) << bitmap_index; - bitmap_t s, s_new, s_masked; + bitmap_t s, s_new; - if (!bitmap_is_full(*bitmap)) { - return; - } - s_new = *supermap; - for (;;) { - // No barriers because supermaps are only advisory, they - // don't protect access to other memory. - s = s_new; - s_masked = s | mask; - if (os_atomic_cmpxchgvw(supermap, s, s_masked, &s_new, relaxed) || - !bitmap_is_full(*bitmap)) { - return; + // No barriers because supermaps are only advisory, they + // don't protect access to other memory. + os_atomic_rmw_loop(supermap, s, s_new, relaxed, { + if (!bitmap_is_full(*bitmap)) { + os_atomic_rmw_loop_give_up(return); } - } + s_new = s | mask; + }); } #pragma mark - diff --git a/src/apply.c b/src/apply.c index e051a1630..79c4e9594 100644 --- a/src/apply.c +++ b/src/apply.c @@ -52,10 +52,10 @@ _dispatch_apply_invoke2(void *ctxt, long invoke_flags) _dispatch_thread_context_push(&apply_ctxt); dispatch_thread_frame_s dtf; - pthread_priority_t old_dp; + dispatch_priority_t old_dbp = 0; if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) { _dispatch_thread_frame_push(&dtf, dq); - old_dp = _dispatch_set_defaultpriority(dq->dq_priority, NULL); + old_dbp = _dispatch_set_basepri(dq->dq_priority); } dispatch_invoke_flags_t flags = da->da_flags; @@ -70,7 +70,7 @@ _dispatch_apply_invoke2(void *ctxt, long invoke_flags) } while (fastpath(idx < iter)); if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) { - _dispatch_reset_defaultpriority(old_dp); + _dispatch_reset_basepri(old_dbp); _dispatch_thread_frame_pop(&dtf); } @@ -181,9 +181,8 @@ _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, } _dispatch_thread_event_init(&da->da_event); - - _dispatch_queue_push_list(dq, head, tail, head->dc_priority, - continuation_cnt); + // FIXME: dq may not be the right queue for the priority of `head` + _dispatch_root_queue_push_inline(dq, head, tail, continuation_cnt); // Call the first element directly _dispatch_apply_invoke_and_wait(da); } @@ -252,7 +251,7 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, } if (slowpath(dq == DISPATCH_APPLY_CURRENT_ROOT_QUEUE)) { dq = old_dq ? old_dq : _dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_DEFAULT, false); + DISPATCH_QOS_DEFAULT, false); while (slowpath(dq->do_targetq)) { dq = dq->do_targetq; } diff --git a/src/data.c b/src/data.c index 644328911..adcfbb2f7 100644 --- a/src/data.c +++ b/src/data.c @@ -433,7 +433,7 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, // find the record containing the end of the current range // and optimize the case when you just remove bytes at the origin - size_t count, last_length; + size_t count, last_length = 0; if (to_the_end) { count = dd_num_records - i; diff --git a/src/data.m b/src/data.m index 190b1edd1..9971f18bf 100644 --- a/src/data.m +++ b/src/data.m @@ -29,8 +29,8 @@ #include @interface DISPATCH_CLASS(data) () -@property (readonly) NSUInteger length; -@property (readonly) const void *bytes NS_RETURNS_INNER_POINTER; +@property (readonly,nonatomic) NSUInteger length; +@property (readonly,nonatomic) const void *bytes NS_RETURNS_INNER_POINTER; - (id)initWithBytes:(void *)bytes length:(NSUInteger)length copy:(BOOL)copy freeWhenDone:(BOOL)freeBytes bytesAreVM:(BOOL)vm; @@ -124,9 +124,9 @@ - (NSString *)debugDescription { if (!nsstring) return nil; char buf[2048]; _dispatch_data_debug(self, buf, sizeof(buf)); - return [nsstring stringWithFormat: - [nsstring stringWithUTF8String:"<%s: %s>"], - class_getName([self class]), buf]; + NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"]; + if (!format) return nil; + return [nsstring stringWithFormat:format, class_getName([self class]), buf]; } - (NSUInteger)length { diff --git a/src/event/event.c b/src/event/event.c new file mode 100644 index 000000000..2a8a8c381 --- /dev/null +++ b/src/event/event.c @@ -0,0 +1,319 @@ +/* + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +DISPATCH_NOINLINE +static dispatch_unote_t +_dispatch_unote_create(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ + dispatch_unote_linkage_t dul; + dispatch_unote_class_t du; + + if (mask & ~dst->dst_mask) { + return DISPATCH_UNOTE_NULL; + } + + if (dst->dst_filter != DISPATCH_EVFILT_TIMER) { + if (dst->dst_mask && !mask) { + return DISPATCH_UNOTE_NULL; + } + } + + if ((dst->dst_flags & EV_UDATA_SPECIFIC) || + (dst->dst_filter == DISPATCH_EVFILT_TIMER)) { + du = _dispatch_calloc(1u, dst->dst_size); + } else { + dul = _dispatch_calloc(1u, sizeof(*dul) + dst->dst_size); + du = _dispatch_unote_linkage_get_unote(dul)._du; + } + du->du_type = dst; + du->du_ident = (uint32_t)handle; + du->du_filter = dst->dst_filter; + du->du_fflags = (typeof(du->du_fflags))mask; + if (dst->dst_flags & EV_UDATA_SPECIFIC) { + du->du_is_direct = true; + } + du->du_data_action = DISPATCH_UNOTE_ACTION_DATA_OR; + return (dispatch_unote_t){ ._du = du }; +} + +DISPATCH_NOINLINE +dispatch_unote_t +_dispatch_unote_create_with_handle(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ + if (!handle) { + return DISPATCH_UNOTE_NULL; + } + return _dispatch_unote_create(dst, handle, mask); +} + +DISPATCH_NOINLINE +dispatch_unote_t +_dispatch_unote_create_with_fd(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ +#if !TARGET_OS_MAC // + if (handle > INT_MAX) { + return DISPATCH_UNOTE_NULL; + } +#endif + dispatch_unote_t du = _dispatch_unote_create(dst, handle, mask); + if (du._du) { + int16_t filter = dst->dst_filter; + du._du->du_data_action = (filter == EVFILT_READ||filter == EVFILT_WRITE) + ? DISPATCH_UNOTE_ACTION_DATA_SET : DISPATCH_UNOTE_ACTION_DATA_OR; + } + return du; +} + +DISPATCH_NOINLINE +dispatch_unote_t +_dispatch_unote_create_without_handle(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ + if (handle) { + return DISPATCH_UNOTE_NULL; + } + return _dispatch_unote_create(dst, handle, mask); +} + +DISPATCH_NOINLINE +void +_dispatch_unote_dispose(dispatch_unote_t du) +{ + void *ptr = du._du; +#if HAVE_MACH + if (du._du->dmrr_handler_is_block) { + Block_release(du._dmrr->dmrr_handler_ctxt); + } +#endif + if (du._du->du_is_timer) { + if (du._dt->dt_pending_config) { + free(du._dt->dt_pending_config); + } + } else if (!du._du->du_is_direct) { + ptr = _dispatch_unote_get_linkage(du); + } + free(ptr); +} + +#pragma mark data or / add + +static dispatch_unote_t +_dispatch_source_data_create(dispatch_source_type_t dst, uintptr_t handle, + unsigned long mask) +{ + if (handle || mask) { + return DISPATCH_UNOTE_NULL; + } + + // bypass _dispatch_unote_create() because this is always "direct" + // even when EV_UDATA_SPECIFIC is 0 + dispatch_unote_class_t du = _dispatch_calloc(1u, dst->dst_size); + du->du_type = dst; + du->du_filter = dst->dst_filter; + du->du_is_direct = true; + return (dispatch_unote_t){ ._du = du }; +} + +const dispatch_source_type_s _dispatch_source_type_data_add = { + .dst_kind = "data-add", + .dst_filter = DISPATCH_EVFILT_CUSTOM_ADD, + .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_data_create, + .dst_merge_evt = NULL, +}; + +const dispatch_source_type_s _dispatch_source_type_data_or = { + .dst_kind = "data-or", + .dst_filter = DISPATCH_EVFILT_CUSTOM_OR, + .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_data_create, + .dst_merge_evt = NULL, +}; + +const dispatch_source_type_s _dispatch_source_type_data_replace = { + .dst_kind = "data-replace", + .dst_filter = DISPATCH_EVFILT_CUSTOM_REPLACE, + .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_data_create, + .dst_merge_evt = NULL, +}; + +#pragma mark file descriptors + +const dispatch_source_type_s _dispatch_source_type_read = { + .dst_kind = "read", + .dst_filter = EVFILT_READ, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, +#if DISPATCH_EVENT_BACKEND_KEVENT +#if HAVE_DECL_NOTE_LOWAT + .dst_fflags = NOTE_LOWAT, +#endif + .dst_data = 1, +#endif // DISPATCH_EVENT_BACKEND_KEVENT + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_unote_create_with_fd, + .dst_merge_evt = _dispatch_source_merge_evt, +}; + +const dispatch_source_type_s _dispatch_source_type_write = { + .dst_kind = "write", + .dst_filter = EVFILT_WRITE, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, +#if DISPATCH_EVENT_BACKEND_KEVENT +#if HAVE_DECL_NOTE_LOWAT + .dst_fflags = NOTE_LOWAT, +#endif + .dst_data = 1, +#endif // DISPATCH_EVENT_BACKEND_KEVENT + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_unote_create_with_fd, + .dst_merge_evt = _dispatch_source_merge_evt, +}; + +#pragma mark signals + +static dispatch_unote_t +_dispatch_source_signal_create(dispatch_source_type_t dst, uintptr_t handle, + unsigned long mask) +{ + if (handle >= NSIG) { + return DISPATCH_UNOTE_NULL; + } + dispatch_unote_t du = _dispatch_unote_create_with_handle(dst, handle, mask); + if (du._du) { + du._du->du_data_action = DISPATCH_UNOTE_ACTION_DATA_ADD; + } + return du; +} + +const dispatch_source_type_s _dispatch_source_type_signal = { + .dst_kind = "signal", + .dst_filter = EVFILT_SIGNAL, + .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_signal_create, + .dst_merge_evt = _dispatch_source_merge_evt, +}; + +#pragma mark timers + +bool _dispatch_timers_reconfigure, _dispatch_timers_expired; +uint32_t _dispatch_timers_processing_mask; +#if DISPATCH_USE_DTRACE +uint32_t _dispatch_timers_will_wake; +#endif +#define DISPATCH_TIMER_HEAP_INITIALIZER(tidx) \ + [tidx] = { \ + .dth_target = UINT64_MAX, \ + .dth_deadline = UINT64_MAX, \ + } +#define DISPATCH_TIMER_HEAP_INIT(kind, qos) \ + DISPATCH_TIMER_HEAP_INITIALIZER(DISPATCH_TIMER_INDEX( \ + DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos)) + +struct dispatch_timer_heap_s _dispatch_timers_heap[] = { + DISPATCH_TIMER_HEAP_INIT(WALL, NORMAL), + DISPATCH_TIMER_HEAP_INIT(MACH, NORMAL), +#if DISPATCH_HAVE_TIMER_QOS + DISPATCH_TIMER_HEAP_INIT(WALL, CRITICAL), + DISPATCH_TIMER_HEAP_INIT(MACH, CRITICAL), + DISPATCH_TIMER_HEAP_INIT(WALL, BACKGROUND), + DISPATCH_TIMER_HEAP_INIT(MACH, BACKGROUND), +#endif +}; + +static dispatch_unote_t +_dispatch_source_timer_create(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ + uint32_t fflags = dst->dst_fflags; + dispatch_unote_t du; + + // normalize flags + if (mask & DISPATCH_TIMER_STRICT) { + mask &= ~(unsigned long)DISPATCH_TIMER_BACKGROUND; + } + + if (fflags & DISPATCH_TIMER_INTERVAL) { + if (!handle) return DISPATCH_UNOTE_NULL; + du = _dispatch_unote_create_without_handle(dst, 0, mask); + } else { + du = _dispatch_unote_create_without_handle(dst, handle, mask); + } + + if (du._dt) { + du._dt->du_is_timer = true; + du._dt->du_data_action = DISPATCH_UNOTE_ACTION_DATA_ADD; + du._dt->du_fflags |= fflags; + du._dt->du_ident = _dispatch_source_timer_idx(du); + du._dt->dt_timer.target = UINT64_MAX; + du._dt->dt_timer.deadline = UINT64_MAX; + du._dt->dt_timer.interval = UINT64_MAX; + } + return du; +} + +const dispatch_source_type_s _dispatch_source_type_timer = { + .dst_kind = "timer", + .dst_filter = DISPATCH_EVFILT_TIMER, + .dst_flags = EV_DISPATCH, + .dst_mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND, + .dst_fflags = 0, + .dst_size = sizeof(struct dispatch_timer_source_refs_s), + + .dst_create = _dispatch_source_timer_create, +}; + +const dispatch_source_type_s _dispatch_source_type_after = { + .dst_kind = "timer (after)", + .dst_filter = DISPATCH_EVFILT_TIMER, + .dst_flags = EV_DISPATCH, + .dst_mask = 0, + .dst_fflags = DISPATCH_TIMER_AFTER, + .dst_size = sizeof(struct dispatch_timer_source_refs_s), + + .dst_create = _dispatch_source_timer_create, +}; + +const dispatch_source_type_s _dispatch_source_type_interval = { + .dst_kind = "timer (interval)", + .dst_filter = DISPATCH_EVFILT_TIMER, + .dst_flags = EV_DISPATCH, + .dst_mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND + |DISPATCH_INTERVAL_UI_ANIMATION, + .dst_fflags = DISPATCH_TIMER_INTERVAL|DISPATCH_TIMER_CLOCK_MACH, + .dst_size = sizeof(struct dispatch_timer_source_refs_s), + + .dst_create = _dispatch_source_timer_create, +}; diff --git a/src/event/event_config.h b/src/event/event_config.h new file mode 100644 index 000000000..7f7761c32 --- /dev/null +++ b/src/event/event_config.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_EVENT_EVENT_CONFIG__ +#define __DISPATCH_EVENT_EVENT_CONFIG__ + +#if defined(__linux__) +# include +# define DISPATCH_EVENT_BACKEND_EPOLL 1 +# define DISPATCH_EVENT_BACKEND_KEVENT 0 +#elif __has_include() +# include +# define DISPATCH_EVENT_BACKEND_EPOLL 0 +# define DISPATCH_EVENT_BACKEND_KEVENT 1 +#else +# error unsupported event loop +#endif + +#if DISPATCH_DEBUG +#define DISPATCH_MGR_QUEUE_DEBUG 1 +#endif + +#ifndef DISPATCH_MGR_QUEUE_DEBUG +#define DISPATCH_MGR_QUEUE_DEBUG 0 +#endif + +#ifndef DISPATCH_MACHPORT_DEBUG +#define DISPATCH_MACHPORT_DEBUG 0 +#endif + +#ifndef EV_VANISHED +#define EV_VANISHED 0x0200 +#endif + +#if DISPATCH_EVENT_BACKEND_KEVENT +# if defined(EV_SET_QOS) +# define DISPATCH_USE_KEVENT_QOS 1 +# ifndef KEVENT_FLAG_IMMEDIATE +# define KEVENT_FLAG_IMMEDIATE 0x001 +# endif +# ifndef KEVENT_FLAG_ERROR_EVENTS +# define KEVENT_FLAG_ERROR_EVENTS 0x002 +# endif +# else +# define DISPATCH_USE_KEVENT_QOS 0 +# endif + +# ifdef NOTE_LEEWAY +# define DISPATCH_HAVE_TIMER_COALESCING 1 +# else +# define NOTE_LEEWAY 0 +# define DISPATCH_HAVE_TIMER_COALESCING 0 +# endif // !NOTE_LEEWAY +# if defined(NOTE_CRITICAL) && defined(NOTE_BACKGROUND) +# define DISPATCH_HAVE_TIMER_QOS 1 +# else +# undef NOTE_CRITICAL +# define NOTE_CRITICAL 0 +# undef NOTE_BACKGROUND +# define NOTE_BACKGROUND 0 +# define DISPATCH_HAVE_TIMER_QOS 0 +# endif // !defined(NOTE_CRITICAL) || !defined(NOTE_BACKGROUND) + +# ifndef NOTE_FUNLOCK +# define NOTE_FUNLOCK 0x00000100 +# endif + +# if HAVE_DECL_NOTE_REAP +# if defined(NOTE_REAP) && defined(__APPLE__) +# undef NOTE_REAP +# define NOTE_REAP 0x10000000 // +# endif +# endif // HAVE_DECL_NOTE_REAP + +# ifndef VQ_QUOTA +# undef HAVE_DECL_VQ_QUOTA // rdar://problem/24160982 +# endif // VQ_QUOTA + +# ifndef VQ_NEARLOWDISK +# undef HAVE_DECL_VQ_NEARLOWDISK +# endif // VQ_NEARLOWDISK + +# ifndef VQ_DESIRED_DISK +# undef HAVE_DECL_VQ_DESIRED_DISK +# endif // VQ_DESIRED_DISK + +# ifndef NOTE_MEMORYSTATUS_LOW_SWAP +# define NOTE_MEMORYSTATUS_LOW_SWAP 0x8 +# endif + +# if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) || \ + !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) +# undef NOTE_MEMORYSTATUS_PROC_LIMIT_WARN +# define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN 0 +# endif // NOTE_MEMORYSTATUS_PROC_LIMIT_WARN + +# if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) || \ + !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) +# undef NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL +# define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL 0 +# endif // NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL + +# ifndef DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS +# if TARGET_OS_MAC && !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) + // deferred delete can return bogus ENOENTs on older kernels +# define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 1 +# else +# define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 0 +# endif +# endif +#else // DISPATCH_EVENT_BACKEND_KEVENT +# define EV_ADD 0x0001 +# define EV_DELETE 0x0002 +# define EV_ENABLE 0x0004 + +# define EV_ONESHOT 0x0010 +# define EV_CLEAR 0x0020 +# define EV_DISPATCH 0x0080 + +# define EVFILT_READ (-1) +# define EVFILT_WRITE (-2) +# define EVFILT_SIGNAL (-3) +# define EVFILT_TIMER (-4) +# define EVFILT_SYSCOUNT 4 + +# define DISPATCH_HAVE_TIMER_QOS 0 +# define DISPATCH_HAVE_TIMER_COALESCING 0 +# define KEVENT_FLAG_IMMEDIATE 0x001 +#endif // !DISPATCH_EVENT_BACKEND_KEVENT + +#ifdef EV_UDATA_SPECIFIC +# define DISPATCH_EV_DIRECT (EV_UDATA_SPECIFIC|EV_DISPATCH) +#else +# define DISPATCH_EV_DIRECT 0x0000 +# define EV_UDATA_SPECIFIC 0x0000 +# undef EV_VANISHED +# define EV_VANISHED 0x0000 +#endif + +#define DISPATCH_EV_MSG_NEEDS_FREE 0x10000 // mach message needs to be freed() + +#define DISPATCH_EVFILT_TIMER (-EVFILT_SYSCOUNT - 1) +#define DISPATCH_EVFILT_CUSTOM_ADD (-EVFILT_SYSCOUNT - 2) +#define DISPATCH_EVFILT_CUSTOM_OR (-EVFILT_SYSCOUNT - 3) +#define DISPATCH_EVFILT_CUSTOM_REPLACE (-EVFILT_SYSCOUNT - 4) +#define DISPATCH_EVFILT_MACH_NOTIFICATION (-EVFILT_SYSCOUNT - 5) +#define DISPATCH_EVFILT_SYSCOUNT ( EVFILT_SYSCOUNT + 5) + +#if HAVE_MACH +# if !EV_UDATA_SPECIFIC +# error mach support requires EV_UDATA_SPECIFIC +# endif + +# ifndef MACH_RCV_VOUCHER +# define MACH_RCV_VOUCHER 0x00000800 +# endif + +# ifndef MACH_NOTIFY_SEND_POSSIBLE +# undef MACH_NOTIFY_SEND_POSSIBLE +# define MACH_NOTIFY_SEND_POSSIBLE MACH_NOTIFY_DEAD_NAME +# endif + +# ifndef NOTE_MACH_CONTINUOUS_TIME +# define NOTE_MACH_CONTINUOUS_TIME 0 +# endif // NOTE_MACH_CONTINUOUS_TIME + +# ifndef HOST_NOTIFY_CALENDAR_SET +# define HOST_NOTIFY_CALENDAR_SET HOST_NOTIFY_CALENDAR_CHANGE +# endif // HOST_NOTIFY_CALENDAR_SET + +# ifndef HOST_CALENDAR_SET_REPLYID +# define HOST_CALENDAR_SET_REPLYID 951 +# endif // HOST_CALENDAR_SET_REPLYID + +# ifndef MACH_SEND_OVERRIDE +# define MACH_SEND_OVERRIDE 0x00000020 +typedef unsigned int mach_msg_priority_t; +# define MACH_MSG_PRIORITY_UNSPECIFIED ((mach_msg_priority_t)0) +# endif // MACH_SEND_OVERRIDE + +# define DISPATCH_MACH_TRAILER_SIZE sizeof(dispatch_mach_trailer_t) +# define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_CTX +# define DISPATCH_MACH_RCV_OPTIONS ( \ + MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \ + MACH_RCV_TRAILER_ELEMENTS(DISPATCH_MACH_RCV_TRAILER) | \ + MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | \ + MACH_RCV_VOUCHER) +#endif // HAVE_MACH + +#endif // __DISPATCH_EVENT_EVENT_CONFIG__ diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c new file mode 100644 index 000000000..2788b1008 --- /dev/null +++ b/src/event/event_epoll.c @@ -0,0 +1,561 @@ +/* + * Copyright (c) 2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + + +#include "internal.h" +#if DISPATCH_EVENT_BACKEND_EPOLL +#include +#include +#include +#include +#include + +#ifndef EPOLLFREE +#define EPOLLFREE 0x4000 +#endif + +#if !DISPATCH_USE_MGR_THREAD +#error unsupported configuration +#endif + +#define DISPATCH_EPOLL_MAX_EVENT_COUNT 16 + +enum { + DISPATCH_EPOLL_EVENTFD = 0x0001, + DISPATCH_EPOLL_CLOCK_WALL = 0x0002, + DISPATCH_EPOLL_CLOCK_MACH = 0x0003, +}; + +typedef struct dispatch_muxnote_s { + TAILQ_ENTRY(dispatch_muxnote_s) dmn_list; + TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_readers_head; + TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_writers_head; + int dmn_fd; + int dmn_ident; + uint32_t dmn_events; + int16_t dmn_filter; + bool dmn_socket_listener; +} *dispatch_muxnote_t; + +typedef struct dispatch_epoll_timeout_s { + int det_fd; + uint16_t det_ident; + bool det_registered; + bool det_armed; +} *dispatch_epoll_timeout_t; + +static int _dispatch_epfd, _dispatch_eventfd; + +static dispatch_once_t epoll_init_pred; +static void _dispatch_epoll_init(void *); + +DISPATCH_CACHELINE_ALIGN +static TAILQ_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s) +_dispatch_sources[DSL_HASH_SIZE]; + +#define DISPATCH_EPOLL_TIMEOUT_INITIALIZER(clock) \ + [DISPATCH_CLOCK_##clock] = { \ + .det_fd = -1, \ + .det_ident = DISPATCH_EPOLL_CLOCK_##clock, \ + } +static struct dispatch_epoll_timeout_s _dispatch_epoll_timeout[] = { + DISPATCH_EPOLL_TIMEOUT_INITIALIZER(WALL), + DISPATCH_EPOLL_TIMEOUT_INITIALIZER(MACH), +}; + +#pragma mark dispatch_muxnote_t + +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_muxnote_bucket_s * +_dispatch_muxnote_bucket(int ident) +{ + return &_dispatch_sources[DSL_HASH((uint32_t)ident)]; +} +#define _dispatch_unote_muxnote_bucket(du) \ + _dispatch_muxnote_bucket(du._du->du_ident) + +DISPATCH_ALWAYS_INLINE +static inline dispatch_muxnote_t +_dispatch_muxnote_find(struct dispatch_muxnote_bucket_s *dmb, + uint64_t ident, int16_t filter) +{ + dispatch_muxnote_t dmn; + if (filter == EVFILT_WRITE) filter = EVFILT_READ; + TAILQ_FOREACH(dmn, dmb, dmn_list) { + if (dmn->dmn_ident == ident && dmn->dmn_filter == filter) { + break; + } + } + return dmn; +} +#define _dispatch_unote_muxnote_find(dmb, du) \ + _dispatch_muxnote_find(dmb, du._du->du_ident, du._du->du_filter) + +static void +_dispatch_muxnote_dispose(dispatch_muxnote_t dmn) +{ + if (dmn->dmn_filter != EVFILT_READ || dmn->dmn_fd != dmn->dmn_ident) { + close(dmn->dmn_fd); + } + free(dmn); +} + +static dispatch_muxnote_t +_dispatch_muxnote_create(dispatch_unote_t du, uint32_t events) +{ + dispatch_muxnote_t dmn; + struct stat sb; + int fd = du._du->du_ident; + int16_t filter = du._du->du_filter; + bool socket_listener = false; + sigset_t sigmask; + + switch (filter) { + case EVFILT_SIGNAL: + sigemptyset(&sigmask); + sigaddset(&sigmask, du._du->du_ident); + fd = signalfd(-1, &sigmask, SFD_NONBLOCK | SFD_CLOEXEC); + if (fd < 0) { + return NULL; + } + sigprocmask(SIG_BLOCK, &sigmask, NULL); + break; + + case EVFILT_WRITE: + filter = EVFILT_READ; + case EVFILT_READ: + if (fstat(fd, &sb) < 0) { + return NULL; + } + if (S_ISREG(sb.st_mode)) { + // make a dummy fd that is both readable & writeable + fd = eventfd(1, EFD_CLOEXEC | EFD_NONBLOCK); + if (fd < 0) { + return NULL; + } + } else if (S_ISSOCK(sb.st_mode)) { + socklen_t vlen = sizeof(int); + int v; + if (getsockopt(fd, SOL_SOCKET, SO_ACCEPTCONN, &v, &vlen) == 0) { + socket_listener = (bool)v; + } + } + break; + + default: + DISPATCH_INTERNAL_CRASH(0, "Unexpected filter"); + } + + dmn = _dispatch_calloc(1, sizeof(struct dispatch_muxnote_s)); + TAILQ_INIT(&dmn->dmn_readers_head); + TAILQ_INIT(&dmn->dmn_writers_head); + dmn->dmn_fd = fd; + dmn->dmn_ident = du._du->du_ident; + dmn->dmn_filter = filter; + dmn->dmn_events = events; + dmn->dmn_socket_listener = socket_listener; + return dmn; +} + +#pragma mark dispatch_unote_t + +static int +_dispatch_epoll_update(dispatch_muxnote_t dmn, int op) +{ + dispatch_once_f(&epoll_init_pred, NULL, _dispatch_epoll_init); + struct epoll_event ev = { + .events = dmn->dmn_events, + .data = { .ptr = dmn }, + }; + return epoll_ctl(_dispatch_epfd, op, dmn->dmn_fd, &ev); +} + +bool +_dispatch_unote_register(dispatch_unote_t du, dispatch_wlh_t wlh, + dispatch_priority_t pri) +{ + struct dispatch_muxnote_bucket_s *dmb; + dispatch_muxnote_t dmn; + uint32_t events = EPOLLFREE; + + dispatch_assert(!_dispatch_unote_registered(du)); + du._du->du_priority = pri; + + switch (du._du->du_filter) { + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + case DISPATCH_EVFILT_CUSTOM_REPLACE: + du._du->du_wlh = wlh; + return true; + case EVFILT_WRITE: + events |= EPOLLOUT; + break; + default: + events |= EPOLLIN; + break; + } + + if (du._du->du_type->dst_flags & EV_DISPATCH) { + events |= EPOLLONESHOT; + } + + dmb = _dispatch_unote_muxnote_bucket(du); + dmn = _dispatch_unote_muxnote_find(dmb, du); + if (dmn) { + events &= ~dmn->dmn_events; + if (events) { + dmn->dmn_events |= events; + if (_dispatch_epoll_update(dmn, EPOLL_CTL_MOD) < 0) { + dmn->dmn_events &= ~events; + dmn = NULL; + } + } + } else { + dmn = _dispatch_muxnote_create(du, events); + if (_dispatch_epoll_update(dmn, EPOLL_CTL_ADD) < 0) { + _dispatch_muxnote_dispose(dmn); + dmn = NULL; + } else { + TAILQ_INSERT_TAIL(dmb, dmn, dmn_list); + } + } + + if (dmn) { + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + if (events & EPOLLOUT) { + TAILQ_INSERT_TAIL(&dmn->dmn_writers_head, dul, du_link); + } else { + TAILQ_INSERT_TAIL(&dmn->dmn_readers_head, dul, du_link); + } + dul->du_muxnote = dmn; + du._du->du_wlh = DISPATCH_WLH_GLOBAL; + } + return dmn != NULL; +} + +void +_dispatch_unote_resume(dispatch_unote_t du) +{ + dispatch_muxnote_t dmn = _dispatch_unote_get_linkage(du)->du_muxnote; + dispatch_assert(_dispatch_unote_registered(du)); + + _dispatch_epoll_update(dmn, EPOLL_CTL_MOD); +} + +bool +_dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags) +{ + switch (du._du->du_filter) { + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + case DISPATCH_EVFILT_CUSTOM_REPLACE: + du._du->du_wlh = NULL; + return true; + } + if (_dispatch_unote_registered(du)) { + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + dispatch_muxnote_t dmn = dul->du_muxnote; + uint32_t events = dmn->dmn_events; + + if (du._du->du_filter == EVFILT_WRITE) { + TAILQ_REMOVE(&dmn->dmn_writers_head, dul, du_link); + } else { + TAILQ_REMOVE(&dmn->dmn_readers_head, dul, du_link); + } + _TAILQ_TRASH_ENTRY(dul, du_link); + dul->du_muxnote = NULL; + + if (TAILQ_EMPTY(&dmn->dmn_readers_head)) { + events &= ~EPOLLIN; + } + if (TAILQ_EMPTY(&dmn->dmn_writers_head)) { + events &= ~EPOLLOUT; + } + + if (events == dmn->dmn_events) { + // nothing to do + } else if (events & (EPOLLIN | EPOLLOUT)) { + _dispatch_epoll_update(dmn, EPOLL_CTL_MOD); + } else { + epoll_ctl(_dispatch_epfd, EPOLL_CTL_DEL, dmn->dmn_fd, NULL); + TAILQ_REMOVE(_dispatch_unote_muxnote_bucket(du), dmn, dmn_list); + _dispatch_muxnote_dispose(dmn); + } + du._du->du_wlh = NULL; + } + return true; +} + +#pragma mark timers + +static void +_dispatch_event_merge_timer(dispatch_clock_t clock) +{ + _dispatch_timers_expired = true; + _dispatch_timers_processing_mask |= 1 << DISPATCH_TIMER_INDEX(clock, 0); +#if DISPATCH_USE_DTRACE + _dispatch_timers_will_wake |= 1 << 0; +#endif + _dispatch_epoll_timeout[clock].det_armed = false; + _dispatch_timers_heap[clock].dth_flags &= ~DTH_ARMED; +} + +static void +_dispatch_timeout_program(uint32_t tidx, uint64_t target, uint64_t leeway) +{ + dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx); + dispatch_epoll_timeout_t timer = &_dispatch_epoll_timeout[clock]; + struct epoll_event ev = { + .events = EPOLLONESHOT | EPOLLIN, + .data = { .u32 = timer->det_ident }, + }; + unsigned long op; + + if (target >= INT64_MAX && !timer->det_registered) { + return; + } + + if (unlikely(timer->det_fd < 0)) { + clockid_t clock; + int fd; + switch (DISPATCH_TIMER_CLOCK(tidx)) { + case DISPATCH_CLOCK_MACH: + clock = CLOCK_MONOTONIC; + break; + case DISPATCH_CLOCK_WALL: + clock = CLOCK_REALTIME; + break; + } + fd = timerfd_create(clock, TFD_NONBLOCK | TFD_CLOEXEC); + if (!dispatch_assume(fd >= 0)) { + return; + } + timer->det_fd = fd; + } + + if (target < INT64_MAX) { + struct itimerspec its = { .it_value = { + .tv_sec = target / NSEC_PER_SEC, + .tv_nsec = target % NSEC_PER_SEC, + } }; + dispatch_assume_zero(timerfd_settime(timer->det_fd, TFD_TIMER_ABSTIME, + &its, NULL)); + if (!timer->det_registered) { + op = EPOLL_CTL_ADD; + } else if (!timer->det_armed) { + op = EPOLL_CTL_MOD; + } else { + return; + } + } else { + op = EPOLL_CTL_DEL; + } + dispatch_assume_zero(epoll_ctl(_dispatch_epfd, op, timer->det_fd, &ev)); + timer->det_armed = timer->det_registered = (op != EPOLL_CTL_DEL);; +} + +void +_dispatch_event_loop_timer_arm(uint32_t tidx, dispatch_timer_delay_s range, + dispatch_clock_now_cache_t nows) +{ + uint64_t target = range.delay; + target += _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows); + _dispatch_timers_heap[tidx].dth_flags |= DTH_ARMED; + _dispatch_timeout_program(tidx, target, range.leeway); +} + +void +_dispatch_event_loop_timer_delete(uint32_t tidx) +{ + _dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED; + _dispatch_timeout_program(tidx, UINT64_MAX, UINT64_MAX); +} + +#pragma mark dispatch_loop + +void +_dispatch_event_loop_atfork_child(void) +{ +} + +void +_dispatch_event_loop_init(void) +{ +} + +static void +_dispatch_epoll_init(void *context DISPATCH_UNUSED) +{ + _dispatch_fork_becomes_unsafe(); + + unsigned int i; + for (i = 0; i < DSL_HASH_SIZE; i++) { + TAILQ_INIT(&_dispatch_sources[i]); + } + + _dispatch_epfd = epoll_create1(EPOLL_CLOEXEC); + if (_dispatch_epfd < 0) { + DISPATCH_INTERNAL_CRASH(errno, "epoll_create1() failed"); + } + + _dispatch_eventfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); + if (_dispatch_eventfd < 0) { + DISPATCH_INTERNAL_CRASH(errno, "epoll_eventfd() failed"); + } + + struct epoll_event ev = { + .events = EPOLLIN | EPOLLFREE, + .data = { .u32 = DISPATCH_EPOLL_EVENTFD, }, + }; + unsigned long op = EPOLL_CTL_ADD; + if (epoll_ctl(_dispatch_epfd, op, _dispatch_eventfd, &ev) < 0) { + DISPATCH_INTERNAL_CRASH(errno, "epoll_ctl() failed"); + } + +#if DISPATCH_USE_MGR_THREAD + dx_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); +#endif +} + +void +_dispatch_event_loop_poke(dispatch_wlh_t wlh DISPATCH_UNUSED, + dispatch_priority_t pri DISPATCH_UNUSED, uint32_t flags DISPATCH_UNUSED) +{ + dispatch_once_f(&epoll_init_pred, NULL, _dispatch_epoll_init); + dispatch_assume_zero(eventfd_write(_dispatch_eventfd, 1)); +} + +static void +_dispatch_event_merge_signal(dispatch_muxnote_t dmn) +{ + dispatch_unote_linkage_t dul, dul_next; + struct signalfd_siginfo si; + + dispatch_assume(read(dmn->dmn_fd, &si, sizeof(si)) == sizeof(si)); + + TAILQ_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_CLEAR, 1, 0, 0); + } +} + +static uintptr_t +_dispatch_get_buffer_size(dispatch_muxnote_t dmn, bool writer) +{ + unsigned long op = writer ? SIOCOUTQ : SIOCINQ; + int n; + + if (!writer && dmn->dmn_socket_listener) { + // Linux doesn't support saying how many clients are ready to be + // accept()ed + return 1; + } + + if (dispatch_assume_zero(ioctl(dmn->dmn_ident, op, &n))) { + return 1; + } + return (uintptr_t)n; +} + +static void +_dispatch_event_merge_fd(dispatch_muxnote_t dmn, uint32_t events) +{ + dispatch_unote_linkage_t dul, dul_next; + uintptr_t data; + + if (events & EPOLLIN) { + data = _dispatch_get_buffer_size(dmn, false); + TAILQ_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, ~data, 0, 0); + } + } + + if (events & EPOLLOUT) { + data = _dispatch_get_buffer_size(dmn, true); + TAILQ_FOREACH_SAFE(dul, &dmn->dmn_writers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, ~data, 0, 0); + } + } +} + +DISPATCH_NOINLINE +void +_dispatch_event_loop_drain(uint32_t flags) +{ + struct epoll_event ev[DISPATCH_EPOLL_MAX_EVENT_COUNT]; + int i, r; + int timeout = (flags & KEVENT_FLAG_IMMEDIATE) ? 0 : -1; + +retry: + r = epoll_wait(_dispatch_epfd, ev, countof(ev), timeout); + if (unlikely(r == -1)) { + int err = errno; + switch (err) { + case EINTR: + goto retry; + case EBADF: + DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors"); + break; + default: + (void)dispatch_assume_zero(err); + break; + } + return; + } + + for (i = 0; i < r; i++) { + dispatch_muxnote_t dmn; + eventfd_t value; + + if (ev[i].events & EPOLLFREE) { + DISPATCH_CLIENT_CRASH(0, "Do not close random Unix descriptors"); + } + + switch (ev[i].data.u32) { + case DISPATCH_EPOLL_EVENTFD: + dispatch_assume_zero(eventfd_read(_dispatch_eventfd, &value)); + break; + + case DISPATCH_EPOLL_CLOCK_WALL: + _dispatch_event_merge_timer(DISPATCH_CLOCK_WALL); + break; + + case DISPATCH_EPOLL_CLOCK_MACH: + _dispatch_event_merge_timer(DISPATCH_CLOCK_MACH); + break; + + default: + dmn = ev[i].data.ptr; + switch (dmn->dmn_filter) { + case EVFILT_SIGNAL: + _dispatch_event_merge_signal(dmn); + break; + + case EVFILT_READ: + _dispatch_event_merge_fd(dmn, ev[i].events); + break; + } + } + } +} + +#endif // DISPATCH_EVENT_BACKEND_EPOLL diff --git a/src/event/event_internal.h b/src/event/event_internal.h new file mode 100644 index 000000000..c84b353f0 --- /dev/null +++ b/src/event/event_internal.h @@ -0,0 +1,448 @@ +/* + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_EVENT_EVENT_INTERNAL__ +#define __DISPATCH_EVENT_EVENT_INTERNAL__ + +#include "event_config.h" + +typedef struct dispatch_wlh_s *dispatch_wlh_t; // opaque handle +#define DISPATCH_WLH_GLOBAL ((dispatch_wlh_t)(void*)(~0ul)) +#define DISPATCH_WLH_MANAGER ((dispatch_wlh_t)(void*)(~2ul)) + +#define DISPATCH_UNOTE_DATA_ACTION_SIZE 2 + +#define DISPATCH_UNOTE_CLASS_HEADER() \ + dispatch_source_type_t du_type; \ + uintptr_t du_owner_wref; /* "weak" back reference to the owner object */ \ + dispatch_wlh_t du_wlh; \ + uint32_t du_ident; \ + int16_t du_filter; \ + uint8_t du_data_action : DISPATCH_UNOTE_DATA_ACTION_SIZE; \ + uint8_t du_is_direct : 1; \ + uint8_t du_is_timer : 1; \ + uint8_t du_memorypressure_override : 1; \ + uint8_t du_vmpressure_override : 1; \ + uint8_t dmr_async_reply : 1; \ + uint8_t dmrr_handler_is_block : 1; \ + os_atomic(bool) dmsr_notification_armed; \ + uint32_t du_fflags; \ + dispatch_priority_t du_priority + +#define _dispatch_ptr2wref(ptr) (~(uintptr_t)(ptr)) +#define _dispatch_wref2ptr(ref) ((void*)~(ref)) +#define _dispatch_source_from_refs(dr) \ + ((dispatch_source_t)_dispatch_wref2ptr((dr)->du_owner_wref)) + +DISPATCH_ENUM(dispatch_unote_action, uint8_t, + DISPATCH_UNOTE_ACTION_DATA_OR = 0, + DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET, + DISPATCH_UNOTE_ACTION_DATA_SET, + DISPATCH_UNOTE_ACTION_DATA_ADD, + DISPATCH_UNOTE_ACTION_LAST = DISPATCH_UNOTE_ACTION_DATA_ADD +); +_Static_assert(DISPATCH_UNOTE_ACTION_LAST < + (1 << DISPATCH_UNOTE_DATA_ACTION_SIZE), + "DISPATCH_UNOTE_ACTION_LAST too large for du_data_action field"); + +typedef struct dispatch_unote_class_s { + DISPATCH_UNOTE_CLASS_HEADER(); +} *dispatch_unote_class_t; + + +enum { + DS_EVENT_HANDLER = 0, + DS_CANCEL_HANDLER, + DS_REGISTN_HANDLER, +}; + +#define DISPATCH_SOURCE_REFS_HEADER() \ + DISPATCH_UNOTE_CLASS_HEADER(); \ + struct dispatch_continuation_s *volatile ds_handler[3] + +// Source state which may contain references to the source object +// Separately allocated so that 'leaks' can see sources +typedef struct dispatch_source_refs_s { + DISPATCH_SOURCE_REFS_HEADER(); +} *dispatch_source_refs_t; + +typedef struct dispatch_timer_delay_s { + uint64_t delay, leeway; +} dispatch_timer_delay_s; + +#define DTH_TARGET_ID 0u +#define DTH_DEADLINE_ID 1u +#define DTH_ID_COUNT 2u + +typedef struct dispatch_timer_source_s { + union { + struct { + uint64_t target; + uint64_t deadline; + }; + uint64_t heap_key[DTH_ID_COUNT]; + }; + uint64_t interval; +} *dispatch_timer_source_t; + +typedef struct dispatch_timer_config_s { + struct dispatch_timer_source_s dtc_timer; + dispatch_clock_t dtc_clock; +} *dispatch_timer_config_t; + +typedef struct dispatch_timer_source_refs_s { + DISPATCH_SOURCE_REFS_HEADER(); + struct dispatch_timer_source_s dt_timer; + struct dispatch_timer_config_s *dt_pending_config; + uint32_t dt_heap_entry[DTH_ID_COUNT]; +} *dispatch_timer_source_refs_t; + +typedef struct dispatch_timer_heap_s { + uint64_t dth_target, dth_deadline; + uint32_t dth_count; + uint16_t dth_segments; +#define DTH_ARMED 1u + uint16_t dth_flags; + dispatch_timer_source_refs_t dth_min[DTH_ID_COUNT]; + void **dth_heap; +} *dispatch_timer_heap_t; + +#if HAVE_MACH +#if DISPATCH_MACHPORT_DEBUG +void dispatch_debug_machport(mach_port_t name, const char *str); +#define _dispatch_debug_machport(name) \ + dispatch_debug_machport((name), __func__) +#else +#define _dispatch_debug_machport(name) ((void)(name)) +#endif // DISPATCH_MACHPORT_DEBUG + +// Mach channel state which may contain references to the channel object +// layout must match dispatch_source_refs_s +struct dispatch_mach_recv_refs_s { + DISPATCH_UNOTE_CLASS_HEADER(); + dispatch_mach_handler_function_t dmrr_handler_func; + void *dmrr_handler_ctxt; +}; +typedef struct dispatch_mach_recv_refs_s *dispatch_mach_recv_refs_t; + +struct dispatch_mach_reply_refs_s { + DISPATCH_UNOTE_CLASS_HEADER(); + dispatch_priority_t dmr_priority; + void *dmr_ctxt; + voucher_t dmr_voucher; + TAILQ_ENTRY(dispatch_mach_reply_refs_s) dmr_list; + mach_port_t dmr_waiter_tid; +}; +typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t; + +#define _DISPATCH_MACH_STATE_UNUSED_MASK 0xffffffa000000000ull +#define DISPATCH_MACH_STATE_DIRTY 0x0000002000000000ull +#define DISPATCH_MACH_STATE_PENDING_BARRIER 0x0000001000000000ull +#define DISPATCH_MACH_STATE_RECEIVED_OVERRIDE 0x0000000800000000ull +#define DISPATCH_MACH_STATE_MAX_QOS_MASK 0x0000000700000000ull +#define DISPATCH_MACH_STATE_MAX_QOS_SHIFT 32 +#define DISPATCH_MACH_STATE_UNLOCK_MASK 0x00000000ffffffffull + +struct dispatch_mach_send_refs_s { + DISPATCH_UNOTE_CLASS_HEADER(); + dispatch_mach_msg_t dmsr_checkin; + TAILQ_HEAD(, dispatch_mach_reply_refs_s) dmsr_replies; + dispatch_unfair_lock_s dmsr_replies_lock; +#define DISPATCH_MACH_DISCONNECT_MAGIC_BASE (0x80000000) +#define DISPATCH_MACH_NEVER_INSTALLED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 0) +#define DISPATCH_MACH_NEVER_CONNECTED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 1) + uint32_t volatile dmsr_disconnect_cnt; + DISPATCH_UNION_LE(uint64_t volatile dmsr_state, + dispatch_unfair_lock_s dmsr_state_lock, + uint32_t dmsr_state_bits + ) DISPATCH_ATOMIC64_ALIGN; + struct dispatch_object_s *volatile dmsr_tail; + struct dispatch_object_s *volatile dmsr_head; + mach_port_t dmsr_send, dmsr_checkin_port; +}; +typedef struct dispatch_mach_send_refs_s *dispatch_mach_send_refs_t; + +void _dispatch_mach_notification_set_armed(dispatch_mach_send_refs_t dmsr); + +struct dispatch_xpc_term_refs_s { + DISPATCH_UNOTE_CLASS_HEADER(); +}; +typedef struct dispatch_xpc_term_refs_s *dispatch_xpc_term_refs_t; +#endif // HAVE_MACH + +typedef union dispatch_unote_u { + dispatch_unote_class_t _du; + dispatch_source_refs_t _dr; + dispatch_timer_source_refs_t _dt; +#if HAVE_MACH + dispatch_mach_recv_refs_t _dmrr; + dispatch_mach_send_refs_t _dmsr; + dispatch_mach_reply_refs_t _dmr; + dispatch_xpc_term_refs_t _dxtr; +#endif +} dispatch_unote_t DISPATCH_TRANSPARENT_UNION; + +#define DISPATCH_UNOTE_NULL ((dispatch_unote_t){ ._du = NULL }) + +#if TARGET_OS_EMBEDDED +#define DSL_HASH_SIZE 64u // must be a power of two +#else +#define DSL_HASH_SIZE 256u // must be a power of two +#endif +#define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1)) + +typedef struct dispatch_unote_linkage_s { + TAILQ_ENTRY(dispatch_unote_linkage_s) du_link; + struct dispatch_muxnote_s *du_muxnote; +} DISPATCH_ATOMIC64_ALIGN *dispatch_unote_linkage_t; + +#define DU_UNREGISTER_IMMEDIATE_DELETE 0x01 +#define DU_UNREGISTER_ALREADY_DELETED 0x02 +#define DU_UNREGISTER_DISCONNECTED 0x04 +#define DU_UNREGISTER_REPLY_REMOVE 0x08 +#define DU_UNREGISTER_WAKEUP 0x10 + +typedef struct dispatch_source_type_s { + const char *dst_kind; + int16_t dst_filter; + uint16_t dst_flags; + uint32_t dst_fflags; + uint32_t dst_mask; + uint32_t dst_size; +#if DISPATCH_EVENT_BACKEND_KEVENT + uint32_t dst_data; +#endif + + dispatch_unote_t (*dst_create)(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask); +#if DISPATCH_EVENT_BACKEND_KEVENT + bool (*dst_update_mux)(struct dispatch_muxnote_s *dmn); +#endif + void (*dst_merge_evt)(dispatch_unote_t du, uint32_t flags, uintptr_t data, + uintptr_t status, pthread_priority_t pp); +#if HAVE_MACH + void (*dst_merge_msg)(dispatch_unote_t du, uint32_t flags, + mach_msg_header_t *msg, mach_msg_size_t sz); +#endif +} dispatch_source_type_s; + +#define dux_create(dst, handle, mask) (dst)->dst_create(dst, handle, mask) +#define dux_merge_evt(du, ...) (du)->du_type->dst_merge_evt(du, __VA_ARGS__) +#define dux_merge_msg(du, ...) (du)->du_type->dst_merge_msg(du, __VA_ARGS__) + +extern const dispatch_source_type_s _dispatch_source_type_after; + +#if HAVE_MACH +extern const dispatch_source_type_s _dispatch_source_type_mach_recv_pset; +extern const dispatch_source_type_s _dispatch_source_type_mach_recv_direct; +extern const dispatch_source_type_s _dispatch_source_type_mach_recv_direct_pset; +extern const dispatch_source_type_s _dispatch_mach_type_send; +extern const dispatch_source_type_s _dispatch_mach_type_recv; +extern const dispatch_source_type_s _dispatch_mach_type_recv_pset; +extern const dispatch_source_type_s _dispatch_mach_type_reply; +extern const dispatch_source_type_s _dispatch_mach_type_reply_pset; +extern const dispatch_source_type_s _dispatch_xpc_type_sigterm; +#endif + +#pragma mark - +#pragma mark deferred items + +#if DISPATCH_EVENT_BACKEND_KEVENT +#if DISPATCH_USE_KEVENT_QOS +typedef struct kevent_qos_s dispatch_kevent_s; +#else +typedef struct kevent dispatch_kevent_s; +#endif +typedef dispatch_kevent_s *dispatch_kevent_t; +#endif // DISPATCH_EVENT_BACKEND_KEVENT + +#define DISPATCH_DEFERRED_ITEMS_EVENT_COUNT 16 + +typedef struct dispatch_deferred_items_s { +#define DISPATCH_PRIORITY_NOSTASH ((dispatch_priority_t)~0u) + dispatch_priority_t ddi_stashed_pri; + dispatch_queue_t ddi_stashed_rq; + dispatch_queue_t ddi_stashed_dq; +#if DISPATCH_EVENT_BACKEND_KEVENT + int ddi_nevents; + dispatch_kevent_s ddi_eventlist[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT]; +#endif +} dispatch_deferred_items_s, *dispatch_deferred_items_t; + +#pragma mark - +#pragma mark inlines + +#if DISPATCH_PURE_C + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_deferred_items_set(dispatch_deferred_items_t ddi) +{ + _dispatch_thread_setspecific(dispatch_deferred_items_key, (void *)ddi); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_deferred_items_t +_dispatch_deferred_items_get(void) +{ + return (dispatch_deferred_items_t) + _dispatch_thread_getspecific(dispatch_deferred_items_key); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_needs_to_return_to_kernel(void) +{ + return (uintptr_t)_dispatch_thread_getspecific(dispatch_r2k_key) != 0; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_set_return_to_kernel(void) +{ + _dispatch_thread_setspecific(dispatch_r2k_key, (void *)1); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_clear_return_to_kernel(void) +{ + _dispatch_thread_setspecific(dispatch_r2k_key, (void *)0); +} + +DISPATCH_ALWAYS_INLINE DISPATCH_PURE +static inline dispatch_wlh_t +_dispatch_get_wlh(void) +{ + return _dispatch_thread_getspecific(dispatch_wlh_key); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_set_wlh(dispatch_wlh_t wlh) +{ + dispatch_assert(_dispatch_get_wlh() == NULL); + dispatch_assert(wlh); + _dispatch_debug("wlh[%p]: set current ", wlh); + _dispatch_thread_setspecific(dispatch_wlh_key, (void *)wlh); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_reset_wlh(void) +{ + _dispatch_debug("wlh[%p]: clear current ", _dispatch_get_wlh()); + _dispatch_thread_setspecific(dispatch_wlh_key, NULL); + _dispatch_clear_return_to_kernel(); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_unote_registered(dispatch_unote_t du) +{ + return du._du->du_wlh != NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_unote_linkage_t +_dispatch_unote_get_linkage(dispatch_unote_t du) +{ + dispatch_assert(!du._du->du_is_direct); + return (dispatch_unote_linkage_t)((char *)du._du + - sizeof(struct dispatch_unote_linkage_s)); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_unote_needs_rearm(dispatch_unote_t du) +{ + return du._du->du_type->dst_flags & (EV_ONESHOT | EV_DISPATCH); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_unote_t +_dispatch_unote_linkage_get_unote(dispatch_unote_linkage_t dul) +{ + return (dispatch_unote_t){ ._du = (dispatch_unote_class_t)(dul + 1) }; +} + +#endif // DISPATCH_PURE_C + +#pragma mark - +#pragma mark prototypes + +#if DISPATCH_HAVE_TIMER_QOS +#define DISPATCH_TIMER_QOS_NORMAL 0u +#define DISPATCH_TIMER_QOS_CRITICAL 1u +#define DISPATCH_TIMER_QOS_BACKGROUND 2u +#define DISPATCH_TIMER_QOS_COUNT 3u +#else +#define DISPATCH_TIMER_QOS_NORMAL 0u +#define DISPATCH_TIMER_QOS_COUNT 1u +#endif + +#define DISPATCH_TIMER_QOS(tidx) (((uintptr_t)(tidx) >> 1) & 3u) +#define DISPATCH_TIMER_CLOCK(tidx) (dispatch_clock_t)((tidx) & 1u) + +#define DISPATCH_TIMER_INDEX(clock, qos) ((qos) << 1 | (clock)) +#define DISPATCH_TIMER_COUNT \ + DISPATCH_TIMER_INDEX(0, DISPATCH_TIMER_QOS_COUNT) +#define DISPATCH_TIMER_IDENT_CANCELED (~0u) + +extern struct dispatch_timer_heap_s _dispatch_timers_heap[DISPATCH_TIMER_COUNT]; +extern bool _dispatch_timers_reconfigure, _dispatch_timers_expired; +extern uint32_t _dispatch_timers_processing_mask; +#if DISPATCH_USE_DTRACE +extern uint32_t _dispatch_timers_will_wake; +#endif + +dispatch_unote_t _dispatch_unote_create_with_handle(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask); +dispatch_unote_t _dispatch_unote_create_with_fd(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask); +dispatch_unote_t _dispatch_unote_create_without_handle( + dispatch_source_type_t dst, uintptr_t handle, unsigned long mask); + +bool _dispatch_unote_register(dispatch_unote_t du, dispatch_wlh_t wlh, + dispatch_priority_t pri); +void _dispatch_unote_resume(dispatch_unote_t du); +bool _dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags); +void _dispatch_unote_dispose(dispatch_unote_t du); + +void _dispatch_event_loop_atfork_child(void); +void _dispatch_event_loop_init(void); +void _dispatch_event_loop_poke(dispatch_wlh_t wlh, dispatch_priority_t pri, + uint32_t flags); +void _dispatch_event_loop_drain(uint32_t flags); +#if DISPATCH_EVENT_BACKEND_KEVENT +void _dispatch_event_loop_update(void); +void _dispatch_event_loop_merge(dispatch_kevent_t events, int nevents); +#endif +void _dispatch_event_loop_timer_arm(unsigned int tidx, + dispatch_timer_delay_s range, dispatch_clock_now_cache_t nows); +void _dispatch_event_loop_timer_delete(unsigned int tidx); + +#endif /* __DISPATCH_EVENT_EVENT_INTERNAL__ */ diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c new file mode 100644 index 000000000..b3bd63f3a --- /dev/null +++ b/src/event/event_kevent.c @@ -0,0 +1,2426 @@ +/* + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" +#if DISPATCH_EVENT_BACKEND_KEVENT +#if HAVE_MACH +#include "protocol.h" +#include "protocolServer.h" +#endif + +#if DISPATCH_USE_KEVENT_WORKQUEUE && !DISPATCH_USE_KEVENT_QOS +#error unsupported configuration +#endif + +#define DISPATCH_KEVENT_MUXED_MARKER 1ul + +typedef struct dispatch_muxnote_s { + TAILQ_ENTRY(dispatch_muxnote_s) dmn_list; + TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_unotes_head; + dispatch_wlh_t dmn_wlh; + dispatch_kevent_s dmn_kev; +} *dispatch_muxnote_t; + +static int _dispatch_kq = -1; +static struct { + dispatch_once_t pred; + dispatch_unfair_lock_s lock; +} _dispatch_muxnotes; +#if !DISPATCH_USE_KEVENT_WORKQUEUE +#define _dispatch_muxnotes_lock() \ + _dispatch_unfair_lock_lock(&_dispatch_muxnotes.lock) +#define _dispatch_muxnotes_unlock() \ + _dispatch_unfair_lock_unlock(&_dispatch_muxnotes.lock) +#else +#define _dispatch_muxnotes_lock() +#define _dispatch_muxnotes_unlock() +#endif // !DISPATCH_USE_KEVENT_WORKQUEUE + +DISPATCH_CACHELINE_ALIGN +static TAILQ_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s) +_dispatch_sources[DSL_HASH_SIZE]; + +#define DISPATCH_NOTE_CLOCK_WALL NOTE_MACH_CONTINUOUS_TIME +#define DISPATCH_NOTE_CLOCK_MACH 0 + +static const uint32_t _dispatch_timer_index_to_fflags[] = { +#define DISPATCH_TIMER_FFLAGS_INIT(kind, qos, note) \ + [DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos)] = \ + DISPATCH_NOTE_CLOCK_##kind | NOTE_ABSOLUTE | \ + NOTE_NSECONDS | NOTE_LEEWAY | (note) + DISPATCH_TIMER_FFLAGS_INIT(WALL, NORMAL, 0), + DISPATCH_TIMER_FFLAGS_INIT(MACH, NORMAL, 0), +#if DISPATCH_HAVE_TIMER_QOS + DISPATCH_TIMER_FFLAGS_INIT(WALL, CRITICAL, NOTE_CRITICAL), + DISPATCH_TIMER_FFLAGS_INIT(MACH, CRITICAL, NOTE_CRITICAL), + DISPATCH_TIMER_FFLAGS_INIT(WALL, BACKGROUND, NOTE_BACKGROUND), + DISPATCH_TIMER_FFLAGS_INIT(MACH, BACKGROUND, NOTE_BACKGROUND), +#endif +#undef DISPATCH_TIMER_FFLAGS_INIT +}; + +static void _dispatch_kevent_timer_drain(dispatch_kevent_t ke); +static void _dispatch_kevent_poke_drain(dispatch_kevent_t ke); + +#pragma mark - +#pragma mark kevent debug + +DISPATCH_NOINLINE +static const char * +_evfiltstr(short filt) +{ + switch (filt) { +#define _evfilt2(f) case (f): return #f + _evfilt2(EVFILT_READ); + _evfilt2(EVFILT_WRITE); + _evfilt2(EVFILT_SIGNAL); + _evfilt2(EVFILT_TIMER); + +#ifdef DISPATCH_EVENT_BACKEND_KEVENT + _evfilt2(EVFILT_AIO); + _evfilt2(EVFILT_VNODE); + _evfilt2(EVFILT_PROC); +#if HAVE_MACH + _evfilt2(EVFILT_MACHPORT); + _evfilt2(DISPATCH_EVFILT_MACH_NOTIFICATION); +#endif + _evfilt2(EVFILT_FS); + _evfilt2(EVFILT_USER); +#ifdef EVFILT_SOCK + _evfilt2(EVFILT_SOCK); +#endif +#ifdef EVFILT_MEMORYSTATUS + _evfilt2(EVFILT_MEMORYSTATUS); +#endif +#endif // DISPATCH_EVENT_BACKEND_KEVENT + + _evfilt2(DISPATCH_EVFILT_TIMER); + _evfilt2(DISPATCH_EVFILT_CUSTOM_ADD); + _evfilt2(DISPATCH_EVFILT_CUSTOM_OR); + _evfilt2(DISPATCH_EVFILT_CUSTOM_REPLACE); + default: + return "EVFILT_missing"; + } +} + +#if DISPATCH_DEBUG +static const char * +_evflagstr2(uint16_t *flagsp) +{ +#define _evflag2(f) \ + if ((*flagsp & (f)) == (f) && (f)) { \ + *flagsp &= ~(f); \ + return #f "|"; \ + } + _evflag2(EV_ADD); + _evflag2(EV_DELETE); + _evflag2(EV_ENABLE); + _evflag2(EV_DISABLE); + _evflag2(EV_ONESHOT); + _evflag2(EV_CLEAR); + _evflag2(EV_RECEIPT); + _evflag2(EV_DISPATCH); + _evflag2(EV_UDATA_SPECIFIC); +#ifdef EV_POLL + _evflag2(EV_POLL); +#endif +#ifdef EV_OOBAND + _evflag2(EV_OOBAND); +#endif + _evflag2(EV_ERROR); + _evflag2(EV_EOF); + _evflag2(EV_VANISHED); + *flagsp = 0; + return "EV_UNKNOWN "; +} + +DISPATCH_NOINLINE +static const char * +_evflagstr(uint16_t flags, char *str, size_t strsize) +{ + str[0] = 0; + while (flags) { + strlcat(str, _evflagstr2(&flags), strsize); + } + size_t sz = strlen(str); + if (sz) str[sz-1] = 0; + return str; +} + +DISPATCH_NOINLINE +static void +dispatch_kevent_debug(const char *verb, const dispatch_kevent_s *kev, + int i, int n, const char *function, unsigned int line) +{ + char flagstr[256]; + char i_n[31]; + + if (n > 1) { + snprintf(i_n, sizeof(i_n), "%d/%d ", i + 1, n); + } else { + i_n[0] = '\0'; + } + if (verb == NULL) { + if (kev->flags & EV_DELETE) { + verb = "deleting"; + } else if (kev->flags & EV_ADD) { + verb = "adding"; + } else { + verb = "updating"; + } + } +#if DISPATCH_USE_KEVENT_QOS + _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, " + "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, " + "qos = 0x%x, ext[0] = 0x%llx, ext[1] = 0x%llx, ext[2] = 0x%llx, " + "ext[3] = 0x%llx }: %s #%u", verb, kev, i_n, kev->ident, + _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr, + sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata, + kev->qos, kev->ext[0], kev->ext[1], kev->ext[2], kev->ext[3], + function, line); +#else + _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, " + "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx}: " + "%s #%u", verb, kev, i_n, + kev->ident, _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr, + sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata, + function, line); +#endif +} +#else +static inline void +dispatch_kevent_debug(const char *verb, const dispatch_kevent_s *kev, + int i, int n, const char *function, unsigned int line) +{ + (void)verb; (void)kev; (void)i; (void)n; (void)function; (void)line; +} +#endif // DISPATCH_DEBUG +#define _dispatch_kevent_debug_n(verb, _kev, i, n) \ + dispatch_kevent_debug(verb, _kev, i, n, __FUNCTION__, __LINE__) +#define _dispatch_kevent_debug(verb, _kev) \ + _dispatch_kevent_debug_n(verb, _kev, 0, 0) +#if DISPATCH_MGR_QUEUE_DEBUG +#define _dispatch_kevent_mgr_debug(verb, kev) _dispatch_kevent_debug(verb, kev) +#else +#define _dispatch_kevent_mgr_debug(verb, kev) ((void)verb, (void)kev) +#endif + +#if DISPATCH_MACHPORT_DEBUG +#ifndef MACH_PORT_TYPE_SPREQUEST +#define MACH_PORT_TYPE_SPREQUEST 0x40000000 +#endif + +DISPATCH_NOINLINE +void +dispatch_debug_machport(mach_port_t name, const char* str) +{ + mach_port_type_t type; + mach_msg_bits_t ns = 0, nr = 0, nso = 0, nd = 0; + unsigned int dnreqs = 0, dnrsiz; + kern_return_t kr = mach_port_type(mach_task_self(), name, &type); + if (kr) { + _dispatch_log("machport[0x%08x] = { error(0x%x) \"%s\" }: %s", name, + kr, mach_error_string(kr), str); + return; + } + if (type & MACH_PORT_TYPE_SEND) { + (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, + MACH_PORT_RIGHT_SEND, &ns)); + } + if (type & MACH_PORT_TYPE_SEND_ONCE) { + (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, + MACH_PORT_RIGHT_SEND_ONCE, &nso)); + } + if (type & MACH_PORT_TYPE_DEAD_NAME) { + (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, + MACH_PORT_RIGHT_DEAD_NAME, &nd)); + } + if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND)) { + kr = mach_port_dnrequest_info(mach_task_self(), name, &dnrsiz, &dnreqs); + if (kr != KERN_INVALID_RIGHT) (void)dispatch_assume_zero(kr); + } + if (type & MACH_PORT_TYPE_RECEIVE) { + mach_port_status_t status = { .mps_pset = 0, }; + mach_msg_type_number_t cnt = MACH_PORT_RECEIVE_STATUS_COUNT; + (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, + MACH_PORT_RIGHT_RECEIVE, &nr)); + (void)dispatch_assume_zero(mach_port_get_attributes(mach_task_self(), + name, MACH_PORT_RECEIVE_STATUS, (void*)&status, &cnt)); + _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) " + "dnreqs(%03u) spreq(%s) nsreq(%s) pdreq(%s) srights(%s) " + "sorights(%03u) qlim(%03u) msgcount(%03u) mkscount(%03u) " + "seqno(%03u) }: %s", name, nr, ns, nso, nd, dnreqs, + type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N", + status.mps_nsrequest ? "Y":"N", status.mps_pdrequest ? "Y":"N", + status.mps_srights ? "Y":"N", status.mps_sorights, + status.mps_qlimit, status.mps_msgcount, status.mps_mscount, + status.mps_seqno, str); + } else if (type & (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_SEND_ONCE| + MACH_PORT_TYPE_DEAD_NAME)) { + _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) " + "dnreqs(%03u) spreq(%s) }: %s", name, nr, ns, nso, nd, dnreqs, + type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N", str); + } else { + _dispatch_log("machport[0x%08x] = { type(0x%08x) }: %s", name, type, + str); + } +} +#endif + +#pragma mark dispatch_kevent_t + +#if HAVE_MACH + +static dispatch_once_t _dispatch_mach_host_port_pred; +static mach_port_t _dispatch_mach_host_port; + +static inline void* +_dispatch_kevent_mach_msg_buf(dispatch_kevent_t ke) +{ + return (void*)ke->ext[0]; +} + +static inline mach_msg_size_t +_dispatch_kevent_mach_msg_size(dispatch_kevent_t ke) +{ + // buffer size in the successful receive case, but message size (like + // msgh_size) in the MACH_RCV_TOO_LARGE case, i.e. add trailer size. + return (mach_msg_size_t)ke->ext[1]; +} + +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK +static void _dispatch_mach_kevent_portset_drain(dispatch_kevent_t ke); +#endif +static void _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke); +static inline void _dispatch_mach_host_calendar_change_register(void); + +// DISPATCH_MACH_NOTIFICATION_ARMED are muxnotes that aren't registered with +// kevent for real, but with mach_port_request_notification() +// +// the kevent structure is used for bookkeeping: +// - ident, filter, flags and fflags have their usual meaning +// - data is used to monitor the actual state of the +// mach_port_request_notification() +// - ext[0] is a boolean that trackes whether the notification is armed or not +#define DISPATCH_MACH_NOTIFICATION_ARMED(dk) ((dk)->ext[0]) +#endif + +DISPATCH_ALWAYS_INLINE +static dispatch_muxnote_t +_dispatch_kevent_get_muxnote(dispatch_kevent_t ke) +{ + uintptr_t dmn_addr = (uintptr_t)ke->udata & ~DISPATCH_KEVENT_MUXED_MARKER; + return (dispatch_muxnote_t)dmn_addr; +} + +DISPATCH_ALWAYS_INLINE +static dispatch_unote_t +_dispatch_kevent_get_unote(dispatch_kevent_t ke) +{ + dispatch_assert((ke->udata & DISPATCH_KEVENT_MUXED_MARKER) == 0); + return (dispatch_unote_t){ ._du = (dispatch_unote_class_t)ke->udata }; +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_print_error(dispatch_kevent_t ke) +{ + dispatch_kevent_t kev = NULL; + + if (ke->flags & EV_DELETE) { + if (ke->flags & EV_UDATA_SPECIFIC) { + if (ke->data == EINPROGRESS) { + // deferred EV_DELETE + return; + } +#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS + if (ke->data == ENOENT) { + // deferred EV_DELETE + return; + } +#endif + } + // for EV_DELETE if the update was deferred we may have reclaimed + // the udata already, and it is unsafe to dereference it now. + } else if (ke->udata & DISPATCH_KEVENT_MUXED_MARKER) { + ke->flags |= _dispatch_kevent_get_muxnote(ke)->dmn_kev.flags; + } else if (ke->udata) { + if (!_dispatch_unote_registered(_dispatch_kevent_get_unote(ke))) { + ke->flags |= EV_ADD; + } + } + +#if HAVE_MACH + if (ke->filter == EVFILT_MACHPORT && ke->data == ENOTSUP && + (ke->flags & EV_ADD) && _dispatch_evfilt_machport_direct_enabled && + kev && (kev->fflags & MACH_RCV_MSG)) { + DISPATCH_INTERNAL_CRASH(ke->ident, + "Missing EVFILT_MACHPORT support for ports"); + } +#endif + + if (ke->data) { + // log the unexpected error + _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter), + !ke->udata ? NULL : + ke->flags & EV_DELETE ? "delete" : + ke->flags & EV_ADD ? "add" : + ke->flags & EV_ENABLE ? "enable" : "monitor", + (int)ke->data); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_merge(dispatch_unote_t du, dispatch_kevent_t ke) +{ + uintptr_t data; + uintptr_t status = 0; + pthread_priority_t pp = 0; +#if DISPATCH_USE_KEVENT_QOS + pp = ((pthread_priority_t)ke->qos) & ~_PTHREAD_PRIORITY_FLAGS_MASK; +#endif + dispatch_unote_action_t action = du._du->du_data_action; + if (action == DISPATCH_UNOTE_ACTION_DATA_SET) { + // ke->data is signed and "negative available data" makes no sense + // zero bytes happens when EV_EOF is set + dispatch_assert(ke->data >= 0l); + data = ~(unsigned long)ke->data; +#if HAVE_MACH + } else if (du._du->du_filter == EVFILT_MACHPORT) { + data = DISPATCH_MACH_RECV_MESSAGE; +#endif + } else if (action == DISPATCH_UNOTE_ACTION_DATA_ADD) { + data = (unsigned long)ke->data; + } else if (action == DISPATCH_UNOTE_ACTION_DATA_OR) { + data = ke->fflags & du._du->du_fflags; + } else if (action == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET) { + data = ke->fflags & du._du->du_fflags; + status = (unsigned long)ke->data; + } else { + DISPATCH_INTERNAL_CRASH(action, "Corrupt unote action"); + } + return dux_merge_evt(du._du, ke->flags, data, status, pp); +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_merge_muxed(dispatch_kevent_t ke) +{ + dispatch_muxnote_t dmn = _dispatch_kevent_get_muxnote(ke); + dispatch_unote_linkage_t dul, dul_next; + + TAILQ_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) { + _dispatch_kevent_merge(_dispatch_unote_linkage_get_unote(dul), ke); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_drain(dispatch_kevent_t ke) +{ + if (ke->filter == EVFILT_USER) { + _dispatch_kevent_mgr_debug("received", ke); + return _dispatch_kevent_poke_drain(ke); + } + _dispatch_kevent_debug("received", ke); + if (unlikely(ke->flags & EV_ERROR)) { + if (ke->filter == EVFILT_PROC && ke->data == ESRCH) { + // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie + // . As a workaround, we simulate an exit event for + // any EVFILT_PROC with an invalid pid . + ke->flags &= ~(EV_ERROR | EV_ADD | EV_ENABLE | EV_UDATA_SPECIFIC); + ke->flags |= EV_ONESHOT; + ke->fflags = NOTE_EXIT; + ke->data = 0; + _dispatch_kevent_debug("synthetic NOTE_EXIT", ke); + } else { + _dispatch_debug("kevent[0x%llx]: handling error", + (unsigned long long)ke->udata); + return _dispatch_kevent_print_error(ke); + } + } + if (ke->filter == EVFILT_TIMER) { + return _dispatch_kevent_timer_drain(ke); + } + +#if HAVE_MACH + if (ke->filter == EVFILT_MACHPORT) { +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + if (ke->udata == 0) { + return _dispatch_mach_kevent_portset_drain(ke); + } +#endif + if (_dispatch_kevent_mach_msg_size(ke)) { + return _dispatch_kevent_mach_msg_drain(ke); + } + } +#endif + + if (ke->udata & DISPATCH_KEVENT_MUXED_MARKER) { + return _dispatch_kevent_merge_muxed(ke); + } + return _dispatch_kevent_merge(_dispatch_kevent_get_unote(ke), ke); +} + +#pragma mark dispatch_kq + +#if DISPATCH_USE_MGR_THREAD +DISPATCH_NOINLINE +static int +_dispatch_kq_create(const void *guard_ptr) +{ + static const dispatch_kevent_s kev = { + .ident = 1, + .filter = EVFILT_USER, + .flags = EV_ADD|EV_CLEAR, + .udata = (uintptr_t)DISPATCH_WLH_MANAGER, + }; + int kqfd; + + _dispatch_fork_becomes_unsafe(); +#if DISPATCH_USE_GUARDED_FD + guardid_t guard = (uintptr_t)guard_ptr; + kqfd = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP); +#else + (void)guard_ptr; + kqfd = kqueue(); +#endif + if (kqfd == -1) { + int err = errno; + switch (err) { + case EMFILE: + DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " + "process is out of file descriptors"); + break; + case ENFILE: + DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " + "system is out of file descriptors"); + break; + case ENOMEM: + DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " + "kernel is out of memory"); + break; + default: + DISPATCH_INTERNAL_CRASH(err, "kqueue() failure"); + break; + } + } +#if DISPATCH_USE_KEVENT_QOS + dispatch_assume_zero(kevent_qos(kqfd, &kev, 1, NULL, 0, NULL, NULL, 0)); +#else + dispatch_assume_zero(kevent(kqfd, &kev, 1, NULL, 0, NULL)); +#endif + return kqfd; +} +#endif + +static void +_dispatch_kq_init(void *context DISPATCH_UNUSED) +{ + _dispatch_fork_becomes_unsafe(); +#if DISPATCH_USE_KEVENT_WORKQUEUE + _dispatch_kevent_workqueue_init(); + if (_dispatch_kevent_workqueue_enabled) { + int r; + int kqfd = _dispatch_kq; + const dispatch_kevent_s kev[] = { + [0] = { + .ident = 1, + .filter = EVFILT_USER, + .flags = EV_ADD|EV_CLEAR, + .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, + .udata = (uintptr_t)DISPATCH_WLH_MANAGER, + }, + [1] = { + .ident = 1, + .filter = EVFILT_USER, + .fflags = NOTE_TRIGGER, + .udata = (uintptr_t)DISPATCH_WLH_MANAGER, + }, + }; +retry: + r = kevent_qos(kqfd, kev, 2, NULL, 0, NULL, NULL, + KEVENT_FLAG_WORKQ|KEVENT_FLAG_IMMEDIATE); + if (unlikely(r == -1)) { + int err = errno; + switch (err) { + case EINTR: + goto retry; + default: + DISPATCH_CLIENT_CRASH(err, + "Failed to initalize workqueue kevent"); + break; + } + } + return; + } +#endif // DISPATCH_USE_KEVENT_WORKQUEUE +#if DISPATCH_USE_MGR_THREAD + _dispatch_kq = _dispatch_kq_create(&_dispatch_mgr_q); + dx_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); +#endif // DISPATCH_USE_MGR_THREAD +} + +DISPATCH_NOINLINE +static int +_dispatch_kq_update(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n, + uint32_t flags) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_kq_init); + + dispatch_kevent_s ke_out[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT]; + int i, out_n = countof(ke_out), r = 0; +#if DISPATCH_USE_KEVENT_QOS + size_t size, *avail = NULL; + void *buf = NULL; +#endif + +#if DISPATCH_DEBUG + dispatch_assert(wlh); + dispatch_assert((size_t)n <= countof(ke_out)); + for (i = 0; i < n; i++) { + if (ke[i].filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) { + _dispatch_kevent_debug_n(NULL, ke + i, i, n); + } + } +#endif + + wlh = DISPATCH_WLH_GLOBAL; + + if (flags & KEVENT_FLAG_ERROR_EVENTS) { +#if !DISPATCH_USE_KEVENT_QOS + // emulate KEVENT_FLAG_ERROR_EVENTS + for (i = 0; i < n; i++) { + ke[i].flags |= EV_RECEIPT; + } + out_n = n; +#endif + } else { +#if DISPATCH_USE_KEVENT_QOS + size = DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE + + DISPATCH_MACH_TRAILER_SIZE; + buf = alloca(size); + avail = &size; +#endif + } + +retry: + _dispatch_clear_return_to_kernel(); + if (wlh == DISPATCH_WLH_GLOBAL) { + int kqfd = _dispatch_kq; +#if DISPATCH_USE_KEVENT_QOS + if (_dispatch_kevent_workqueue_enabled) { + flags |= KEVENT_FLAG_WORKQ; + } + r = kevent_qos(kqfd, ke, n, ke_out, out_n, buf, avail, flags); +#else + const struct timespec timeout_immediately = {}, *timeout = NULL; + if (flags & KEVENT_FLAG_IMMEDIATE) timeout = &timeout_immediately; + r = kevent(kqfd, ke, n, ke_out, out_n, timeout); +#endif + } + if (unlikely(r == -1)) { + int err = errno; + switch (err) { + case EINTR: + goto retry; + case EBADF: + DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors"); + break; + default: + (void)dispatch_assume_zero(err); + break; + } + return err; + } + + if (flags & KEVENT_FLAG_ERROR_EVENTS) { + for (i = 0, n = r, r = 0; i < n; i++) { + if ((ke_out[i].flags & EV_ERROR) && (r = (int)ke_out[i].data)) { + _dispatch_kevent_drain(&ke_out[i]); + } + } + } else { + for (i = 0, n = r, r = 0; i < n; i++) { + _dispatch_kevent_drain(&ke_out[i]); + } + } + return r; +} + +DISPATCH_ALWAYS_INLINE +static inline int +_dispatch_kq_update_one(dispatch_wlh_t wlh, dispatch_kevent_t ke) +{ + return _dispatch_kq_update(wlh, ke, 1, + KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_kq_update_all(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n) +{ + (void)_dispatch_kq_update(wlh, ke, n, + KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_kq_unote_set_kevent(dispatch_unote_t _du, dispatch_kevent_t dk, + uint16_t action) +{ + dispatch_unote_class_t du = _du._du; + dispatch_source_type_t dst = du->du_type; + uint16_t flags = dst->dst_flags | action; + + if ((flags & EV_VANISHED) && !(flags & EV_ADD)) { + flags &= ~EV_VANISHED; + } + pthread_priority_t pp = _dispatch_priority_to_pp(du->du_priority); + *dk = (dispatch_kevent_s){ + .ident = du->du_ident, + .filter = dst->dst_filter, + .flags = flags, + .udata = (uintptr_t)du, + .fflags = du->du_fflags | dst->dst_fflags, + .data = (typeof(dk->data))dst->dst_data, +#if DISPATCH_USE_KEVENT_QOS + .qos = (typeof(dk->qos))pp, +#endif + }; +} + +DISPATCH_ALWAYS_INLINE +static inline int +_dispatch_kq_deferred_find_slot(dispatch_deferred_items_t ddi, + int16_t filter, uint64_t ident, uint64_t udata) +{ + dispatch_kevent_t events = ddi->ddi_eventlist; + int i; + + for (i = 0; i < ddi->ddi_nevents; i++) { + if (events[i].filter == filter && events[i].ident == ident && + events[i].udata == udata) { + break; + } + } + return i; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_kevent_t +_dispatch_kq_deferred_reuse_slot(dispatch_wlh_t wlh, + dispatch_deferred_items_t ddi, int slot) +{ + if (wlh != DISPATCH_WLH_GLOBAL) _dispatch_set_return_to_kernel(); + if (unlikely(slot == countof(ddi->ddi_eventlist))) { + int nevents = ddi->ddi_nevents; + ddi->ddi_nevents = 1; + _dispatch_kq_update_all(wlh, ddi->ddi_eventlist, nevents); + dispatch_assert(ddi->ddi_nevents == 1); + slot = 0; + } else if (slot == ddi->ddi_nevents) { + ddi->ddi_nevents++; + } + return ddi->ddi_eventlist + slot; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_kq_deferred_discard_slot(dispatch_deferred_items_t ddi, int slot) +{ + if (slot < ddi->ddi_nevents) { + int last = --ddi->ddi_nevents; + if (slot != last) { + ddi->ddi_eventlist[slot] = ddi->ddi_eventlist[last]; + } + } +} + +DISPATCH_NOINLINE +static void +_dispatch_kq_deferred_update(dispatch_wlh_t wlh, dispatch_kevent_t ke) +{ + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + + if (ddi && wlh == _dispatch_get_wlh()) { + int slot = _dispatch_kq_deferred_find_slot(ddi, ke->filter, ke->ident, + ke->udata); + dispatch_kevent_t dk = _dispatch_kq_deferred_reuse_slot(wlh, ddi, slot); + *dk = *ke; + if (ke->filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) { + _dispatch_kevent_debug("deferred", ke); + } + } else { + _dispatch_kq_update_one(wlh, ke); + } +} + +DISPATCH_NOINLINE +static int +_dispatch_kq_immediate_update(dispatch_wlh_t wlh, dispatch_kevent_t ke) +{ + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (ddi && wlh == _dispatch_get_wlh()) { + int slot = _dispatch_kq_deferred_find_slot(ddi, ke->filter, ke->ident, + ke->udata); + _dispatch_kq_deferred_discard_slot(ddi, slot); + } + return _dispatch_kq_update_one(wlh, ke); +} + +DISPATCH_NOINLINE +static bool +_dispatch_kq_unote_update(dispatch_wlh_t wlh, dispatch_unote_t _du, + uint16_t action_flags) +{ + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + dispatch_unote_class_t du = _du._du; + dispatch_kevent_t ke; + int r = 0; + + if (action_flags & EV_ADD) { + // as soon as we register we may get an event delivery and it has to + // see this bit already set, else it will not unregister the kevent + du->du_wlh = wlh; + } + + if (ddi && wlh == _dispatch_get_wlh()) { + int slot = _dispatch_kq_deferred_find_slot(ddi, + du->du_filter, du->du_ident, (uintptr_t)du); + if (slot < ddi->ddi_nevents) { + // when deleting and an enable is pending, + // we must merge EV_ENABLE to do an immediate deletion + action_flags |= (ddi->ddi_eventlist[slot].flags & EV_ENABLE); + } + + if (!(action_flags & EV_ADD) && (action_flags & EV_ENABLE)) { + // can be deferred, so do it! + ke = _dispatch_kq_deferred_reuse_slot(wlh, ddi, slot); + _dispatch_kq_unote_set_kevent(du, ke, action_flags); + _dispatch_kevent_debug("deferred", ke); + goto done; + } + + // get rid of the deferred item if any, we can't wait + _dispatch_kq_deferred_discard_slot(ddi, slot); + } + + if (action_flags) { + dispatch_kevent_s dk; + _dispatch_kq_unote_set_kevent(du, &dk, action_flags); + r = _dispatch_kq_update_one(wlh, &dk); + } + +done: + if (action_flags & EV_ADD) { + if (unlikely(r)) { + du->du_wlh = NULL; + } + return r == 0; + } + + if (action_flags & EV_DELETE) { + if (r == EINPROGRESS) { + return false; +#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS + } else if (r == ENOENT) { + return false; +#endif + } + du->du_wlh = NULL; + } + + dispatch_assume_zero(r); + return true; +} + +#pragma mark dispatch_muxnote_t + +static void +_dispatch_muxnotes_init(void *ctxt DISPATCH_UNUSED) +{ + uint32_t i; + for (i = 0; i < DSL_HASH_SIZE; i++) { + TAILQ_INIT(&_dispatch_sources[i]); + } +} + +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_muxnote_bucket_s * +_dispatch_muxnote_bucket(uint64_t ident, int16_t filter) +{ + switch (filter) { +#if HAVE_MACH + case EVFILT_MACHPORT: + case DISPATCH_EVFILT_MACH_NOTIFICATION: + ident = MACH_PORT_INDEX(ident); + break; +#endif + case EVFILT_SIGNAL: // signo + case EVFILT_PROC: // pid_t + default: // fd + break; + } + + dispatch_once_f(&_dispatch_muxnotes.pred, NULL, _dispatch_muxnotes_init); + return &_dispatch_sources[DSL_HASH((uintptr_t)ident)]; +} +#define _dispatch_unote_muxnote_bucket(du) \ + _dispatch_muxnote_bucket(du._du->du_ident, du._du->du_filter) + +DISPATCH_ALWAYS_INLINE +static inline dispatch_muxnote_t +_dispatch_muxnote_find(struct dispatch_muxnote_bucket_s *dmb, + dispatch_wlh_t wlh, uint64_t ident, int16_t filter) +{ + dispatch_muxnote_t dmn; + _dispatch_muxnotes_lock(); + TAILQ_FOREACH(dmn, dmb, dmn_list) { + if (dmn->dmn_wlh == wlh && dmn->dmn_kev.ident == ident && + dmn->dmn_kev.filter == filter) { + break; + } + } + _dispatch_muxnotes_unlock(); + return dmn; +} +#define _dispatch_unote_muxnote_find(dmb, du, wlh) \ + _dispatch_muxnote_find(dmb, wlh, du._du->du_ident, du._du->du_filter) + +DISPATCH_ALWAYS_INLINE +static inline dispatch_muxnote_t +_dispatch_mach_muxnote_find(mach_port_t name, int16_t filter) +{ + struct dispatch_muxnote_bucket_s *dmb; + dmb = _dispatch_muxnote_bucket(name, filter); + return _dispatch_muxnote_find(dmb, DISPATCH_WLH_GLOBAL, name, filter); +} + +DISPATCH_NOINLINE +static bool +_dispatch_unote_register_muxed(dispatch_unote_t du, dispatch_wlh_t wlh) +{ + struct dispatch_muxnote_bucket_s *dmb = _dispatch_unote_muxnote_bucket(du); + dispatch_muxnote_t dmn; + bool installed = true; + + dmn = _dispatch_unote_muxnote_find(dmb, du, wlh); + if (dmn) { + uint32_t flags = du._du->du_fflags & ~dmn->dmn_kev.fflags; + if (flags) { + dmn->dmn_kev.fflags |= flags; + if (unlikely(du._du->du_type->dst_update_mux)) { + installed = du._du->du_type->dst_update_mux(dmn); + } else { + installed = !_dispatch_kq_immediate_update(dmn->dmn_wlh, + &dmn->dmn_kev); + } + if (!installed) dmn->dmn_kev.fflags &= ~flags; + } + } else { + dmn = _dispatch_calloc(1, sizeof(struct dispatch_muxnote_s)); + TAILQ_INIT(&dmn->dmn_unotes_head); + _dispatch_kq_unote_set_kevent(du, &dmn->dmn_kev, EV_ADD | EV_ENABLE); +#if DISPATCH_USE_KEVENT_QOS + dmn->dmn_kev.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; +#endif + dmn->dmn_kev.udata = (uintptr_t)dmn | DISPATCH_KEVENT_MUXED_MARKER; + dmn->dmn_wlh = wlh; + if (unlikely(du._du->du_type->dst_update_mux)) { + installed = du._du->du_type->dst_update_mux(dmn); + } else { + installed = !_dispatch_kq_immediate_update(dmn->dmn_wlh, + &dmn->dmn_kev); + } + if (installed) { + dmn->dmn_kev.flags &= ~(EV_ADD | EV_VANISHED); + _dispatch_muxnotes_lock(); + TAILQ_INSERT_TAIL(dmb, dmn, dmn_list); + _dispatch_muxnotes_unlock(); + } else { + free(dmn); + } + } + + if (installed) { + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + TAILQ_INSERT_TAIL(&dmn->dmn_unotes_head, dul, du_link); + dul->du_muxnote = dmn; + + if (du._du->du_filter == DISPATCH_EVFILT_MACH_NOTIFICATION) { + bool armed = DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev); + os_atomic_store2o(du._dmsr, dmsr_notification_armed, armed,relaxed); + } + du._du->du_wlh = DISPATCH_WLH_GLOBAL; + } + return installed; +} + +bool +_dispatch_unote_register(dispatch_unote_t du, dispatch_wlh_t wlh, + dispatch_priority_t pri) +{ + dispatch_assert(!_dispatch_unote_registered(du)); + du._du->du_priority = pri; + switch (du._du->du_filter) { + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + case DISPATCH_EVFILT_CUSTOM_REPLACE: + du._du->du_wlh = wlh; + return true; + } + if (!du._du->du_is_direct) { + return _dispatch_unote_register_muxed(du, DISPATCH_WLH_GLOBAL); + } + return _dispatch_kq_unote_update(wlh, du, EV_ADD | EV_ENABLE); +} + +void +_dispatch_unote_resume(dispatch_unote_t du) +{ + dispatch_assert(_dispatch_unote_registered(du)); + + if (du._du->du_is_direct) { + dispatch_wlh_t wlh = du._du->du_wlh; + _dispatch_kq_unote_update(wlh, du, EV_ENABLE); + } else if (unlikely(du._du->du_type->dst_update_mux)) { + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + du._du->du_type->dst_update_mux(dul->du_muxnote); + } else { + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + dispatch_muxnote_t dmn = dul->du_muxnote; + _dispatch_kq_deferred_update(dmn->dmn_wlh, &dmn->dmn_kev); + } +} + +DISPATCH_NOINLINE +static bool +_dispatch_unote_unregister_muxed(dispatch_unote_t du, uint32_t flags) +{ + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + dispatch_muxnote_t dmn = dul->du_muxnote; + bool update = false, dispose = false; + + if (dmn->dmn_kev.filter == DISPATCH_EVFILT_MACH_NOTIFICATION) { + os_atomic_store2o(du._dmsr, dmsr_notification_armed, false, relaxed); + } + du._du->du_wlh = NULL; + TAILQ_REMOVE(&dmn->dmn_unotes_head, dul, du_link); + _TAILQ_TRASH_ENTRY(dul, du_link); + dul->du_muxnote = NULL; + + if (TAILQ_EMPTY(&dmn->dmn_unotes_head)) { + dmn->dmn_kev.flags |= EV_DELETE; + update = dispose = true; + } else { + uint32_t fflags = du._du->du_type->dst_fflags; + TAILQ_FOREACH(dul, &dmn->dmn_unotes_head, du_link) { + du = _dispatch_unote_linkage_get_unote(dul); + fflags |= du._du->du_fflags; + } + if (dmn->dmn_kev.fflags & ~fflags) { + dmn->dmn_kev.fflags &= fflags; + update = true; + } + } + if (update && !(flags & DU_UNREGISTER_ALREADY_DELETED)) { + if (unlikely(du._du->du_type->dst_update_mux)) { + dispatch_assume(du._du->du_type->dst_update_mux(dmn)); + } else { + _dispatch_kq_deferred_update(dmn->dmn_wlh, &dmn->dmn_kev); + } + } + if (dispose) { + struct dispatch_muxnote_bucket_s *dmb; + dmb = _dispatch_muxnote_bucket(dmn->dmn_kev.ident, dmn->dmn_kev.filter); + _dispatch_muxnotes_lock(); + TAILQ_REMOVE(dmb, dmn, dmn_list); + _dispatch_muxnotes_unlock(); + free(dmn); + } + return true; +} + +bool +_dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags) +{ + switch (du._du->du_filter) { + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + case DISPATCH_EVFILT_CUSTOM_REPLACE: + du._du->du_wlh = NULL; + return true; + } + dispatch_wlh_t wlh = du._du->du_wlh; + if (wlh) { + if (!du._du->du_is_direct) { + return _dispatch_unote_unregister_muxed(du, flags); + } + uint16_t action_flags; + if (flags & DU_UNREGISTER_ALREADY_DELETED) { + action_flags = 0; + } else if (flags & DU_UNREGISTER_IMMEDIATE_DELETE) { + action_flags = EV_DELETE | EV_ENABLE; + } else { + action_flags = EV_DELETE; + } + return _dispatch_kq_unote_update(wlh, du, action_flags); + } + return true; +} + +#pragma mark - +#pragma mark dispatch_loop + +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE +static void _dispatch_memorypressure_init(void); +#else +#define _dispatch_memorypressure_init() +#endif +static bool _dispatch_timers_force_max_leeway; + +void +_dispatch_event_loop_atfork_child(void) +{ +#if HAVE_MACH + _dispatch_mach_host_port_pred = 0; + _dispatch_mach_host_port = MACH_PORT_NULL; +#endif +} + +DISPATCH_NOINLINE +void +_dispatch_event_loop_init(void) +{ + if (unlikely(getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY"))) { + _dispatch_timers_force_max_leeway = true; + } + _dispatch_memorypressure_init(); + _voucher_activity_debug_channel_init(); +} + +DISPATCH_NOINLINE +void +_dispatch_event_loop_poke(dispatch_wlh_t wlh, dispatch_priority_t pri, + uint32_t flags) +{ + if (wlh == DISPATCH_WLH_MANAGER) { + dispatch_assert(!flags); + dispatch_kevent_s ke = { + .ident = 1, + .filter = EVFILT_USER, + .fflags = NOTE_TRIGGER, + .udata = (uintptr_t)DISPATCH_WLH_MANAGER, + }; + return _dispatch_kq_deferred_update(DISPATCH_WLH_GLOBAL, &ke); + } else if (wlh && wlh != DISPATCH_WLH_GLOBAL) { + dispatch_assert(flags); + dispatch_assert(pri); + } + DISPATCH_INTERNAL_CRASH(wlh, "Unsupported wlh configuration"); +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_poke_drain(dispatch_kevent_t ke) +{ + dispatch_assert(ke->filter == EVFILT_USER); + dispatch_wlh_t wlh = (dispatch_wlh_t)ke->udata; + dispatch_assert(wlh); +} + +DISPATCH_NOINLINE +void +_dispatch_event_loop_drain(uint32_t flags) +{ + dispatch_wlh_t wlh = _dispatch_get_wlh(); + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + int n = ddi->ddi_nevents; + ddi->ddi_nevents = 0; + _dispatch_kq_update(wlh, ddi->ddi_eventlist, n, flags); +} + +void +_dispatch_event_loop_update(void) +{ + dispatch_wlh_t wlh = _dispatch_get_wlh(); + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + int n = ddi->ddi_nevents; + ddi->ddi_nevents = 0; + _dispatch_kq_update_all(wlh, ddi->ddi_eventlist, n); + dispatch_assert(ddi->ddi_nevents == 0); +} + +void +_dispatch_event_loop_merge(dispatch_kevent_t ke, int n) +{ + while (n-- > 0) { + _dispatch_kevent_drain(ke++); + } +} + +#define DISPATCH_KEVENT_TIMEOUT_IDENT_MASK (~0ull << 8) + +DISPATCH_NOINLINE +static void +_dispatch_kevent_timer_drain(dispatch_kevent_t ke) +{ + dispatch_assert(ke->data > 0); + dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) == + DISPATCH_KEVENT_TIMEOUT_IDENT_MASK); + uint32_t tidx = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK; + + dispatch_assert(tidx < DISPATCH_TIMER_COUNT); + _dispatch_timers_expired = true; + _dispatch_timers_processing_mask |= 1 << tidx; + _dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED; +#if DISPATCH_USE_DTRACE + _dispatch_timers_will_wake |= 1 << DISPATCH_TIMER_QOS(tidx); +#endif +} + +DISPATCH_NOINLINE +static void +_dispatch_event_loop_timer_program(uint32_t tidx, + uint64_t target, uint64_t leeway, uint16_t action) +{ + dispatch_kevent_s ke = { + .ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK | tidx, + .filter = EVFILT_TIMER, + .flags = action | EV_ONESHOT, + .fflags = _dispatch_timer_index_to_fflags[tidx], + .data = (int64_t)target, + .udata = (uintptr_t)&_dispatch_timers_heap[tidx], +#if DISPATCH_HAVE_TIMER_COALESCING + .ext[1] = leeway, +#endif +#if DISPATCH_USE_KEVENT_QOS + .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, +#endif + }; + + _dispatch_kq_deferred_update(DISPATCH_WLH_GLOBAL, &ke); +} + +void +_dispatch_event_loop_timer_arm(uint32_t tidx, dispatch_timer_delay_s range, + dispatch_clock_now_cache_t nows) +{ + if (unlikely(_dispatch_timers_force_max_leeway)) { + range.delay += range.leeway; + range.leeway = 0; + } +#if HAVE_MACH + if (DISPATCH_TIMER_CLOCK(tidx) == DISPATCH_CLOCK_WALL) { + _dispatch_mach_host_calendar_change_register(); + } +#endif + + // EVFILT_TIMER NOTE_ABSOLUTE always expects + // a WALL deadline + uint64_t now = _dispatch_time_now_cached(DISPATCH_CLOCK_WALL, nows); + _dispatch_timers_heap[tidx].dth_flags |= DTH_ARMED; + _dispatch_event_loop_timer_program(tidx, now + range.delay, range.leeway, + EV_ADD | EV_ENABLE); +} + +void +_dispatch_event_loop_timer_delete(uint32_t tidx) +{ + _dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED; + _dispatch_event_loop_timer_program(tidx, 0, 0, EV_DELETE); +} + +#pragma mark kevent specific sources + +static dispatch_unote_t +_dispatch_source_proc_create(dispatch_source_type_t dst DISPATCH_UNUSED, + uintptr_t handle, unsigned long mask DISPATCH_UNUSED) +{ + dispatch_unote_t du = _dispatch_unote_create_with_handle(dst, handle, mask); + if (du._du && (mask & DISPATCH_PROC_EXIT_STATUS)) { + du._du->du_data_action = DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET; + } + return du; +} + +const dispatch_source_type_s _dispatch_source_type_proc = { + .dst_kind = "proc", + .dst_filter = EVFILT_PROC, + .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR, + .dst_fflags = NOTE_EXIT, // rdar://16655831 + .dst_mask = NOTE_EXIT|NOTE_FORK|NOTE_EXEC|NOTE_EXITSTATUS +#if HAVE_DECL_NOTE_SIGNAL + |NOTE_SIGNAL +#endif +#if HAVE_DECL_NOTE_REAP + |NOTE_REAP +#endif + , + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_proc_create, + .dst_merge_evt = _dispatch_source_merge_evt, +}; + +const dispatch_source_type_s _dispatch_source_type_vnode = { + .dst_kind = "vnode", + .dst_filter = EVFILT_VNODE, + .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED, + .dst_mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK + |NOTE_RENAME|NOTE_FUNLOCK +#if HAVE_DECL_NOTE_REVOKE + |NOTE_REVOKE +#endif +#if HAVE_DECL_NOTE_NONE + |NOTE_NONE +#endif + , + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_unote_create_with_fd, + .dst_merge_evt = _dispatch_source_merge_evt, +}; + +const dispatch_source_type_s _dispatch_source_type_vfs = { + .dst_kind = "vfs", + .dst_filter = EVFILT_FS, + .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR, + .dst_mask = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT + |VQ_DEAD|VQ_ASSIST|VQ_NOTRESPLOCK +#if HAVE_DECL_VQ_UPDATE + |VQ_UPDATE +#endif +#if HAVE_DECL_VQ_VERYLOWDISK + |VQ_VERYLOWDISK +#endif +#if HAVE_DECL_VQ_QUOTA + |VQ_QUOTA +#endif +#if HAVE_DECL_VQ_NEARLOWDISK + |VQ_NEARLOWDISK +#endif +#if HAVE_DECL_VQ_DESIRED_DISK + |VQ_DESIRED_DISK +#endif + , + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_unote_create_without_handle, + .dst_merge_evt = _dispatch_source_merge_evt, +}; + +#ifdef EVFILT_SOCK +const dispatch_source_type_s _dispatch_source_type_sock = { + .dst_kind = "sock", + .dst_filter = EVFILT_SOCK, + .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED, + .dst_mask = NOTE_CONNRESET|NOTE_READCLOSED|NOTE_WRITECLOSED + |NOTE_TIMEOUT|NOTE_NOSRCADDR|NOTE_IFDENIED|NOTE_SUSPEND|NOTE_RESUME + |NOTE_KEEPALIVE +#ifdef NOTE_ADAPTIVE_WTIMO + |NOTE_ADAPTIVE_WTIMO|NOTE_ADAPTIVE_RTIMO +#endif +#ifdef NOTE_CONNECTED + |NOTE_CONNECTED|NOTE_DISCONNECTED|NOTE_CONNINFO_UPDATED +#endif +#ifdef NOTE_NOTIFY_ACK + |NOTE_NOTIFY_ACK +#endif + , + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_unote_create_with_fd, + .dst_merge_evt = _dispatch_source_merge_evt, +}; +#endif // EVFILT_SOCK + + +#if DISPATCH_USE_MEMORYSTATUS + +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE +#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK ( \ + DISPATCH_MEMORYPRESSURE_NORMAL | \ + DISPATCH_MEMORYPRESSURE_WARN | \ + DISPATCH_MEMORYPRESSURE_CRITICAL | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL) +#define DISPATCH_MEMORYPRESSURE_MALLOC_MASK ( \ + DISPATCH_MEMORYPRESSURE_WARN | \ + DISPATCH_MEMORYPRESSURE_CRITICAL | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL) + +static void +_dispatch_memorypressure_handler(void *context) +{ + dispatch_source_t ds = context; + unsigned long memorypressure = dispatch_source_get_data(ds); + + if (memorypressure & DISPATCH_MEMORYPRESSURE_NORMAL) { + _dispatch_memory_warn = false; + _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; +#if VOUCHER_USE_MACH_VOUCHER + if (_firehose_task_buffer) { + firehose_buffer_clear_bank_flags(_firehose_task_buffer, + FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY); + } +#endif + } + if (memorypressure & DISPATCH_MEMORYPRESSURE_WARN) { + _dispatch_memory_warn = true; + _dispatch_continuation_cache_limit = + DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN; +#if VOUCHER_USE_MACH_VOUCHER + if (_firehose_task_buffer) { + firehose_buffer_set_bank_flags(_firehose_task_buffer, + FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY); + } +#endif + } + memorypressure &= DISPATCH_MEMORYPRESSURE_MALLOC_MASK; + if (memorypressure) { + malloc_memory_event_handler(memorypressure); + } +} + +static void +_dispatch_memorypressure_init(void) +{ + dispatch_source_t ds = dispatch_source_create( + DISPATCH_SOURCE_TYPE_MEMORYPRESSURE, 0, + DISPATCH_MEMORYPRESSURE_SOURCE_MASK, + _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true)); + dispatch_set_context(ds, ds); + dispatch_source_set_event_handler_f(ds, _dispatch_memorypressure_handler); + dispatch_activate(ds); +} +#endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE + +#if TARGET_OS_SIMULATOR // rdar://problem/9219483 +static int _dispatch_ios_simulator_memory_warnings_fd = -1; +static void +_dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED) +{ + char *e = getenv("SIMULATOR_MEMORY_WARNINGS"); + if (!e) return; + _dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY); + if (_dispatch_ios_simulator_memory_warnings_fd == -1) { + (void)dispatch_assume_zero(errno); + } +} + +static dispatch_unote_t +_dispatch_source_memorypressure_create(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init); + + if (handle) { + return DISPATCH_UNOTE_NULL; + } + + dst = &_dispatch_source_type_vnode; + handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd; + mask = NOTE_ATTRIB; + + dispatch_unote_t du = dux_create(dst, handle, mask); + if (du._du) { + du._du->du_memorypressure_override = true; + } + return du; +} +#endif // TARGET_OS_SIMULATOR + +const dispatch_source_type_s _dispatch_source_type_memorypressure = { + .dst_kind = "memorystatus", + .dst_filter = EVFILT_MEMORYSTATUS, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH, + .dst_mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL + |NOTE_MEMORYSTATUS_PRESSURE_WARN|NOTE_MEMORYSTATUS_PRESSURE_CRITICAL + |NOTE_MEMORYSTATUS_LOW_SWAP|NOTE_MEMORYSTATUS_PROC_LIMIT_WARN + |NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL, + .dst_size = sizeof(struct dispatch_source_refs_s), + +#if TARGET_OS_SIMULATOR + .dst_create = _dispatch_source_memorypressure_create, + // redirected to _dispatch_source_type_vnode +#else + .dst_create = _dispatch_unote_create_without_handle, + .dst_merge_evt = _dispatch_source_merge_evt, +#endif +}; + +static dispatch_unote_t +_dispatch_source_vm_create(dispatch_source_type_t dst DISPATCH_UNUSED, + uintptr_t handle, unsigned long mask DISPATCH_UNUSED) +{ + // Map legacy vm pressure to memorypressure warning rdar://problem/15907505 + dispatch_unote_t du = dux_create(&_dispatch_source_type_memorypressure, + handle, NOTE_MEMORYSTATUS_PRESSURE_WARN); + if (du._du) { + du._du->du_vmpressure_override = 1; + } + return du; +} + +const dispatch_source_type_s _dispatch_source_type_vm = { + .dst_kind = "vm (deprecated)", + .dst_filter = EVFILT_MEMORYSTATUS, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH, + .dst_mask = NOTE_VM_PRESSURE, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_vm_create, + // redirected to _dispatch_source_type_memorypressure +}; +#endif // DISPATCH_USE_MEMORYSTATUS + +#pragma mark mach send / notifications +#if HAVE_MACH + +// Flags for all notifications that are registered/unregistered when a +// send-possible notification is requested/delivered +#define _DISPATCH_MACH_SP_FLAGS (DISPATCH_MACH_SEND_POSSIBLE| \ + DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_DELETED) + +static void _dispatch_mach_host_notify_update(void *context); + +static mach_port_t _dispatch_mach_notify_port; +static dispatch_source_t _dispatch_mach_notify_source; + +static void +_dispatch_timers_calendar_change(void) +{ + uint32_t qos; + + // calendar change may have gone past the wallclock deadline + _dispatch_timers_expired = true; + for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) { + _dispatch_timers_processing_mask |= + 1 << DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_WALL, qos); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr) +{ + mig_reply_error_t reply; + dispatch_assert(sizeof(mig_reply_error_t) == sizeof(union + __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem)); + dispatch_assert(sizeof(mig_reply_error_t) < + DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE); + boolean_t success = libdispatch_internal_protocol_server(hdr, &reply.Head); + if (!success && reply.RetCode == MIG_BAD_ID && + (hdr->msgh_id == HOST_CALENDAR_SET_REPLYID || + hdr->msgh_id == HOST_CALENDAR_CHANGED_REPLYID)) { + _dispatch_debug("calendar-change notification"); + _dispatch_timers_calendar_change(); + _dispatch_mach_host_notify_update(NULL); + success = TRUE; + reply.RetCode = KERN_SUCCESS; + } + if (dispatch_assume(success) && reply.RetCode != MIG_NO_REPLY) { + (void)dispatch_assume_zero(reply.RetCode); + } + if (!success || (reply.RetCode && reply.RetCode != MIG_NO_REPLY)) { + mach_msg_destroy(hdr); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED) +{ + kern_return_t kr; +#if HAVE_MACH_PORT_CONSTRUCT + mach_port_options_t opts = { .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT }; +#ifdef __LP64__ + const mach_port_context_t guard = 0xfeed09071f1ca7edull; +#else + const mach_port_context_t guard = 0xff1ca7edull; +#endif + kr = mach_port_construct(mach_task_self(), &opts, guard, + &_dispatch_mach_notify_port); +#else + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, + &_dispatch_mach_notify_port); +#endif + DISPATCH_VERIFY_MIG(kr); + if (unlikely(kr)) { + DISPATCH_CLIENT_CRASH(kr, + "mach_port_construct() failed: cannot create receive right"); + } + + static const struct dispatch_continuation_s dc = { + .dc_func = (void*)_dispatch_mach_notify_source_invoke, + }; + _dispatch_mach_notify_source = _dispatch_source_create_mach_msg_direct_recv( + _dispatch_mach_notify_port, &dc); + dispatch_assert(_dispatch_mach_notify_source); + dispatch_activate(_dispatch_mach_notify_source); +} + +static void +_dispatch_mach_host_port_init(void *ctxt DISPATCH_UNUSED) +{ + kern_return_t kr; + mach_port_t mp, mhp = mach_host_self(); + kr = host_get_host_port(mhp, &mp); + DISPATCH_VERIFY_MIG(kr); + if (likely(!kr)) { + // mach_host_self returned the HOST_PRIV port + kr = mach_port_deallocate(mach_task_self(), mhp); + DISPATCH_VERIFY_MIG(kr); + mhp = mp; + } else if (kr != KERN_INVALID_ARGUMENT) { + (void)dispatch_assume_zero(kr); + } + if (unlikely(!mhp)) { + DISPATCH_CLIENT_CRASH(kr, "Could not get unprivileged host port"); + } + _dispatch_mach_host_port = mhp; +} + +mach_port_t +_dispatch_get_mach_host_port(void) +{ + dispatch_once_f(&_dispatch_mach_host_port_pred, NULL, + _dispatch_mach_host_port_init); + return _dispatch_mach_host_port; +} + +DISPATCH_ALWAYS_INLINE +static inline mach_port_t +_dispatch_get_mach_notify_port(void) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_notify_port_init); + return _dispatch_mach_notify_port; +} + +static void +_dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED) +{ + static int notify_type = HOST_NOTIFY_CALENDAR_SET; + kern_return_t kr; + _dispatch_debug("registering for calendar-change notification"); +retry: + kr = host_request_notification(_dispatch_get_mach_host_port(), + notify_type, _dispatch_get_mach_notify_port()); + // Fallback when missing support for newer _SET variant, fires strictly more + if (kr == KERN_INVALID_ARGUMENT && + notify_type != HOST_NOTIFY_CALENDAR_CHANGE) { + notify_type = HOST_NOTIFY_CALENDAR_CHANGE; + goto retry; + } + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_host_calendar_change_register(void) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_host_notify_update); +} + +static kern_return_t +_dispatch_mach_notify_update(dispatch_muxnote_t dmn, uint32_t new_flags, + uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid, + mach_port_mscount_t notify_sync) +{ + mach_port_t previous, port = (mach_port_t)dmn->dmn_kev.ident; + typeof(dmn->dmn_kev.data) prev = dmn->dmn_kev.data; + kern_return_t kr, krr = 0; + + // Update notification registration state. + dmn->dmn_kev.data |= (new_flags | dmn->dmn_kev.fflags) & mask; + dmn->dmn_kev.data &= ~(del_flags & mask); + + _dispatch_debug_machport(port); + if ((dmn->dmn_kev.data & mask) && !(prev & mask)) { + _dispatch_debug("machport[0x%08x]: registering for send-possible " + "notification", port); + previous = MACH_PORT_NULL; + krr = mach_port_request_notification(mach_task_self(), port, + notify_msgid, notify_sync, _dispatch_get_mach_notify_port(), + MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(krr); + + switch (krr) { + case KERN_INVALID_NAME: + case KERN_INVALID_RIGHT: + // Suppress errors & clear registration state + dmn->dmn_kev.data &= ~mask; + break; + default: + // Else, we don't expect any errors from mach. Log any errors + if (dispatch_assume_zero(krr)) { + // log the error & clear registration state + dmn->dmn_kev.data &= ~mask; + } else if (dispatch_assume_zero(previous)) { + // Another subsystem has beat libdispatch to requesting the + // specified Mach notification on this port. We should + // technically cache the previous port and message it when the + // kernel messages our port. Or we can just say screw those + // subsystems and deallocate the previous port. + // They should adopt libdispatch :-P + kr = mach_port_deallocate(mach_task_self(), previous); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + previous = MACH_PORT_NULL; + } + } + } else if (!(dmn->dmn_kev.data & mask) && (prev & mask)) { + _dispatch_debug("machport[0x%08x]: unregistering for send-possible " + "notification", port); + previous = MACH_PORT_NULL; + kr = mach_port_request_notification(mach_task_self(), port, + notify_msgid, notify_sync, MACH_PORT_NULL, + MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(kr); + + switch (kr) { + case KERN_INVALID_NAME: + case KERN_INVALID_RIGHT: + case KERN_INVALID_ARGUMENT: + break; + default: + if (dispatch_assume_zero(kr)) { + // log the error + } + } + } else { + return 0; + } + if (unlikely(previous)) { + // the kernel has not consumed the send-once right yet + (void)dispatch_assume_zero( + _dispatch_send_consume_send_once_right(previous)); + } + return krr; +} + +static bool +_dispatch_kevent_mach_notify_resume(dispatch_muxnote_t dmn, uint32_t new_flags, + uint32_t del_flags) +{ + kern_return_t kr = KERN_SUCCESS; + dispatch_assert_zero(new_flags & del_flags); + if ((new_flags & _DISPATCH_MACH_SP_FLAGS) || + (del_flags & _DISPATCH_MACH_SP_FLAGS)) { + // Requesting a (delayed) non-sync send-possible notification + // registers for both immediate dead-name notification and delayed-arm + // send-possible notification for the port. + // The send-possible notification is armed when a mach_msg() with the + // the MACH_SEND_NOTIFY to the port times out. + // If send-possible is unavailable, fall back to immediate dead-name + // registration rdar://problem/2527840&9008724 + kr = _dispatch_mach_notify_update(dmn, new_flags, del_flags, + _DISPATCH_MACH_SP_FLAGS, MACH_NOTIFY_SEND_POSSIBLE, + MACH_NOTIFY_SEND_POSSIBLE == MACH_NOTIFY_DEAD_NAME); + } + return kr == KERN_SUCCESS; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_notify_merge(mach_port_t name, uint32_t data, bool final) +{ + dispatch_unote_linkage_t dul, dul_next; + dispatch_muxnote_t dmn; + + _dispatch_debug_machport(name); + dmn = _dispatch_mach_muxnote_find(name, DISPATCH_EVFILT_MACH_NOTIFICATION); + if (!dispatch_assume(dmn)) { + return; + } + + dmn->dmn_kev.data &= ~_DISPATCH_MACH_SP_FLAGS; + if (!final) { + // Re-register for notification before delivery + final = !_dispatch_kevent_mach_notify_resume(dmn, data, 0); + } + + uint32_t flags = final ? EV_ONESHOT : EV_ENABLE; + DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev) = 0; + TAILQ_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + os_atomic_store2o(du._dmsr, dmsr_notification_armed, false, relaxed); + dux_merge_evt(du._du, flags, (data & du._du->du_fflags), 0, 0); + if (!dul_next || DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev)) { + // current merge is last in list (dmn might have been freed) + // or it re-armed the notification + break; + } + } +} + +kern_return_t +_dispatch_mach_notify_port_deleted(mach_port_t notify DISPATCH_UNUSED, + mach_port_name_t name) +{ +#if DISPATCH_DEBUG + _dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x " + "deleted prematurely", name); +#endif + _dispatch_debug_machport(name); + _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DELETED, true); + return KERN_SUCCESS; +} + +kern_return_t +_dispatch_mach_notify_dead_name(mach_port_t notify DISPATCH_UNUSED, + mach_port_name_t name) +{ + kern_return_t kr; + + _dispatch_debug("machport[0x%08x]: dead-name notification", name); + _dispatch_debug_machport(name); + _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DEAD, true); + + // the act of receiving a dead name notification allocates a dead-name + // right that must be deallocated + kr = mach_port_deallocate(mach_task_self(), name); + DISPATCH_VERIFY_MIG(kr); + //(void)dispatch_assume_zero(kr); + return KERN_SUCCESS; +} + +kern_return_t +_dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED, + mach_port_name_t name) +{ + _dispatch_debug("machport[0x%08x]: send-possible notification", name); + _dispatch_debug_machport(name); + _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_POSSIBLE, false); + return KERN_SUCCESS; +} + +void +_dispatch_mach_notification_set_armed(dispatch_mach_send_refs_t dmsr) +{ + dispatch_muxnote_t dmn = _dispatch_unote_get_linkage(dmsr)->du_muxnote; + dispatch_unote_linkage_t dul; + dispatch_unote_t du; + + if (!_dispatch_unote_registered(dmsr)) { + return; + } + + DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev) = true; + TAILQ_FOREACH(dul, &dmn->dmn_unotes_head, du_link) { + du = _dispatch_unote_linkage_get_unote(dul); + os_atomic_store2o(du._dmsr, dmsr_notification_armed, true, relaxed); + } +} + +static dispatch_unote_t +_dispatch_source_mach_send_create(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ + if (!mask) { + // Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD + mask = DISPATCH_MACH_SEND_DEAD; + } + if (!handle) { + handle = MACH_PORT_DEAD; // + } + return _dispatch_unote_create_with_handle(dst, handle, mask); +} + +static bool +_dispatch_mach_send_update(dispatch_muxnote_t dmn) +{ + if (dmn->dmn_kev.flags & EV_DELETE) { + return _dispatch_kevent_mach_notify_resume(dmn, 0, dmn->dmn_kev.fflags); + } else { + return _dispatch_kevent_mach_notify_resume(dmn, dmn->dmn_kev.fflags, 0); + } +} + +const dispatch_source_type_s _dispatch_source_type_mach_send = { + .dst_kind = "mach_send", + .dst_filter = DISPATCH_EVFILT_MACH_NOTIFICATION, + .dst_flags = EV_CLEAR, + .dst_mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_mach_send_create, + .dst_update_mux = _dispatch_mach_send_update, + .dst_merge_evt = _dispatch_source_merge_evt, +}; + +static dispatch_unote_t +_dispatch_mach_send_create(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ + // without handle because the mach code will set the ident later + dispatch_unote_t du = + _dispatch_unote_create_without_handle(dst, handle, mask); + if (du._dmsr) { + du._dmsr->dmsr_disconnect_cnt = DISPATCH_MACH_NEVER_CONNECTED; + TAILQ_INIT(&du._dmsr->dmsr_replies); + } + return du; +} + +const dispatch_source_type_s _dispatch_mach_type_send = { + .dst_kind = "mach_send (mach)", + .dst_filter = DISPATCH_EVFILT_MACH_NOTIFICATION, + .dst_flags = EV_CLEAR, + .dst_mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, + .dst_size = sizeof(struct dispatch_mach_send_refs_s), + + .dst_create = _dispatch_mach_send_create, + .dst_update_mux = _dispatch_mach_send_update, + .dst_merge_evt = _dispatch_mach_merge_notification, +}; + +#endif // HAVE_MACH +#pragma mark mach recv / reply +#if HAVE_MACH + +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK +static mach_port_t _dispatch_mach_portset, _dispatch_mach_recv_portset; +static dispatch_kevent_s _dispatch_mach_recv_kevent; + +static void +_dispatch_mach_portset_init(void *context DISPATCH_UNUSED) +{ + kern_return_t kr = mach_port_allocate(mach_task_self(), + MACH_PORT_RIGHT_PORT_SET, &_dispatch_mach_portset); + DISPATCH_VERIFY_MIG(kr); + if (unlikely(kr)) { + DISPATCH_CLIENT_CRASH(kr, + "mach_port_allocate() failed: cannot create port set"); + } + + dispatch_kevent_s kev = { + .filter = EVFILT_MACHPORT, + .flags = EV_ADD|EV_ENABLE, + .ident = _dispatch_mach_portset, + .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, + }; + _dispatch_kq_deferred_update(DISPATCH_WLH_GLOBAL, &kev); +} + +static bool +_dispatch_mach_portset_update(mach_port_t mp, mach_port_t mps) +{ + kern_return_t kr; + + _dispatch_debug_machport(mp); + kr = mach_port_move_member(mach_task_self(), mp, mps); + if (unlikely(kr)) { + DISPATCH_VERIFY_MIG(kr); + switch (kr) { + case KERN_INVALID_RIGHT: + if (mps) { + _dispatch_bug_mach_client("_dispatch_kevent_machport_enable: " + "mach_port_move_member() failed ", kr); + break; + } + //fall through + case KERN_INVALID_NAME: +#if DISPATCH_DEBUG + _dispatch_log("Corruption: Mach receive right 0x%x destroyed " + "prematurely", mp); +#endif + break; + default: + (void)dispatch_assume_zero(kr); + break; + } + } + if (mps) { + return kr == KERN_SUCCESS; + } + return true; +} + +static mach_port_t +_dispatch_mach_get_portset(void) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_portset_init); + return _dispatch_mach_portset; +} + +static bool +_dispatch_mach_recv_update_portset_mux(dispatch_muxnote_t dmn) +{ + mach_port_t mp = (mach_port_t)dmn->dmn_kev.ident; + mach_port_t mps = MACH_PORT_NULL; + if (!(dmn->dmn_kev.flags & EV_DELETE)) { + mps = _dispatch_mach_get_portset(); + } + return _dispatch_mach_portset_update(mp, mps); +} + +static void +_dispatch_mach_recv_msg_buf_init(dispatch_kevent_t ke) +{ + mach_vm_size_t vm_size = mach_vm_round_page( + DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE + + DISPATCH_MACH_TRAILER_SIZE); + mach_vm_address_t vm_addr = vm_page_size; + kern_return_t kr; + + while (unlikely(kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size, + VM_FLAGS_ANYWHERE))) { + if (kr != KERN_NO_SPACE) { + DISPATCH_CLIENT_CRASH(kr, + "Could not allocate mach msg receive buffer"); + } + _dispatch_temporary_resource_shortage(); + vm_addr = vm_page_size; + } + ke->ext[0] = (uintptr_t)vm_addr; + ke->ext[1] = vm_size; +} + +static void +_dispatch_mach_recv_portset_init(void *context DISPATCH_UNUSED) +{ + kern_return_t kr = mach_port_allocate(mach_task_self(), + MACH_PORT_RIGHT_PORT_SET, &_dispatch_mach_recv_portset); + DISPATCH_VERIFY_MIG(kr); + if (unlikely(kr)) { + DISPATCH_CLIENT_CRASH(kr, + "mach_port_allocate() failed: cannot create port set"); + } + + dispatch_assert(DISPATCH_MACH_TRAILER_SIZE == + REQUESTED_TRAILER_SIZE_NATIVE(MACH_RCV_TRAILER_ELEMENTS( + DISPATCH_MACH_RCV_TRAILER))); + + _dispatch_mach_recv_kevent = (dispatch_kevent_s){ + .filter = EVFILT_MACHPORT, + .ident = _dispatch_mach_recv_portset, + .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, + .fflags = DISPATCH_MACH_RCV_OPTIONS, + .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, + }; + if (!_dispatch_kevent_workqueue_enabled) { + _dispatch_mach_recv_msg_buf_init(&_dispatch_mach_recv_kevent); + } + _dispatch_kq_deferred_update(DISPATCH_WLH_GLOBAL, + &_dispatch_mach_recv_kevent); +} + +static mach_port_t +_dispatch_mach_get_recv_portset(void) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_recv_portset_init); + return _dispatch_mach_recv_portset; +} + +static bool +_dispatch_mach_recv_direct_update_portset_mux(dispatch_muxnote_t dmn) +{ + mach_port_t mp = (mach_port_t)dmn->dmn_kev.ident; + mach_port_t mps = MACH_PORT_NULL; + if (!(dmn->dmn_kev.flags & EV_DELETE)) { + mps = _dispatch_mach_get_recv_portset(); + } + return _dispatch_mach_portset_update(mp, mps); +} + +static dispatch_unote_t +_dispatch_mach_kevent_mach_recv_direct_find(mach_port_t name) +{ + dispatch_muxnote_t dmn; + dispatch_unote_linkage_t dul; + + dmn = _dispatch_mach_muxnote_find(name, EVFILT_MACHPORT); + TAILQ_FOREACH(dul, &dmn->dmn_unotes_head, du_link) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + if (du._du->du_type->dst_fflags & MACH_RCV_MSG) { + return du; + } + } + return DISPATCH_UNOTE_NULL; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_kevent_portset_merge(dispatch_kevent_t ke) +{ + mach_port_t name = (mach_port_name_t)ke->data; + dispatch_unote_linkage_t dul, dul_next; + dispatch_muxnote_t dmn; + + _dispatch_debug_machport(name); + dmn = _dispatch_mach_muxnote_find(name, EVFILT_MACHPORT); + if (!dispatch_assume(dmn)) { + return; + } + _dispatch_mach_portset_update(name, MACH_PORT_NULL); // emulate EV_DISPATCH + + TAILQ_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + dux_merge_evt(du._du, EV_ENABLE | EV_DISPATCH, + DISPATCH_MACH_RECV_MESSAGE, 0, 0); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_kevent_portset_drain(dispatch_kevent_t ke) +{ + if (ke->ident == _dispatch_mach_recv_portset) { + return _dispatch_kevent_mach_msg_drain(ke); + } else { + dispatch_assert(ke->ident == _dispatch_mach_portset); + return _dispatch_mach_kevent_portset_merge(ke); + } +} +#endif // DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + +static void +_dispatch_kevent_mach_msg_recv(dispatch_unote_t du, uint32_t flags, + mach_msg_header_t *hdr) +{ + mach_msg_size_t siz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE; + mach_port_t name = hdr->msgh_local_port; + + if (!dispatch_assume(hdr->msgh_size <= UINT_MAX - + DISPATCH_MACH_TRAILER_SIZE)) { + _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " + "received overlarge message"); + } else if (!dispatch_assume(name)) { + _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " + "received message with MACH_PORT_NULL port"); + } else { + _dispatch_debug_machport(name); +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + if (du._du == NULL) { + du = _dispatch_mach_kevent_mach_recv_direct_find(name); + } +#endif + if (likely(du._du)) { + return dux_merge_msg(du._du, flags, hdr, siz); + } + _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " + "received message with no listeners"); + } + + mach_msg_destroy(hdr); + if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { + free(hdr); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke) +{ + mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke); + mach_msg_size_t siz; + mach_msg_return_t kr = (mach_msg_return_t)ke->fflags; + uint32_t flags = ke->flags; + dispatch_unote_t du = _dispatch_kevent_get_unote(ke); + + if (unlikely(!hdr)) { + DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message"); + } + if (likely(!kr)) { + _dispatch_kevent_mach_msg_recv(du, flags, hdr); + goto out; + } else if (kr != MACH_RCV_TOO_LARGE) { + goto out; + } else if (!ke->data) { + DISPATCH_INTERNAL_CRASH(0, "MACH_RCV_LARGE_IDENTITY with no identity"); + } + if (unlikely(ke->ext[1] > (UINT_MAX - DISPATCH_MACH_TRAILER_SIZE))) { + DISPATCH_INTERNAL_CRASH(ke->ext[1], + "EVFILT_MACHPORT with overlarge message"); + } + siz = _dispatch_kevent_mach_msg_size(ke) + DISPATCH_MACH_TRAILER_SIZE; + hdr = malloc(siz); + if (dispatch_assume(hdr)) { + flags |= DISPATCH_EV_MSG_NEEDS_FREE; + } else { + // Kernel will discard message too large to fit + hdr = NULL; + siz = 0; + } + mach_port_t name = (mach_port_name_t)ke->data; + const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS | + MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE); + kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); + if (likely(!kr)) { + _dispatch_kevent_mach_msg_recv(du, flags, hdr); + goto out; + } else if (kr == MACH_RCV_TOO_LARGE) { + _dispatch_log("BUG in libdispatch client: " + "_dispatch_kevent_mach_msg_drain: dropped message too " + "large to fit in memory: id = 0x%x, size = %u", + hdr->msgh_id, _dispatch_kevent_mach_msg_size(ke)); + kr = MACH_MSG_SUCCESS; + } + if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { + free(hdr); + } +out: + if (unlikely(kr)) { + _dispatch_bug_mach_client("_dispatch_kevent_mach_msg_drain: " + "message reception failed", kr); + } + +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + if (!(flags & EV_UDATA_SPECIFIC)) { + _dispatch_kq_deferred_update(DISPATCH_WLH_GLOBAL, + &_dispatch_mach_recv_kevent); + } +#endif +} + +static dispatch_unote_t +_dispatch_source_mach_recv_create(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + if (!_dispatch_evfilt_machport_direct_enabled) { + dst = &_dispatch_source_type_mach_recv_pset; + } +#endif + return _dispatch_unote_create_with_handle(dst, handle, mask); +} + +const dispatch_source_type_s _dispatch_source_type_mach_recv = { + .dst_kind = "mach_recv", + .dst_filter = EVFILT_MACHPORT, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, + .dst_fflags = 0, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_mach_recv_create, + .dst_merge_evt = _dispatch_source_merge_evt, + .dst_merge_msg = NULL, // never receives messages directly +}; + +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK +const dispatch_source_type_s _dispatch_source_type_mach_recv_pset = { + .dst_kind = "mach_recv (portset)", + .dst_filter = EVFILT_MACHPORT, + .dst_flags = EV_DISPATCH, + .dst_fflags = 0, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = NULL, // never created directly + .dst_update_mux = _dispatch_mach_recv_update_portset_mux, + .dst_merge_evt = _dispatch_source_merge_evt, + .dst_merge_msg = NULL, // never receives messages directly +}; +#endif + +static void +_dispatch_source_mach_recv_direct_merge_msg(dispatch_unote_t du, uint32_t flags, + mach_msg_header_t *msg, mach_msg_size_t msgsz DISPATCH_UNUSED) +{ + dispatch_continuation_t dc = du._dr->ds_handler[DS_EVENT_HANDLER]; + dispatch_source_t ds = _dispatch_source_from_refs(du._dr); + dispatch_queue_t cq = _dispatch_queue_get_current(); + + // see firehose_client_push_notify_async + _dispatch_queue_set_current(ds->_as_dq); + dc->dc_func(msg); + _dispatch_queue_set_current(cq); + if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { + free(msg); + } + if ((ds->dq_atomic_flags & DSF_CANCELED) || + (flags & (EV_ONESHOT | EV_DELETE))) { + return _dispatch_source_merge_evt(du, flags, 0, 0, 0); + } + if (_dispatch_unote_needs_rearm(du)) { + return _dispatch_unote_resume(du); + } +} + +static dispatch_unote_t +_dispatch_source_mach_recv_direct_create(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + if (!_dispatch_evfilt_machport_direct_enabled) { + dst = &_dispatch_source_type_mach_recv_direct_pset; + } +#endif + return _dispatch_unote_create_with_handle(dst, handle, mask); +} + +static void +_dispatch_mach_recv_direct_merge(dispatch_unote_t du, + uint32_t flags, uintptr_t data, + uintptr_t status DISPATCH_UNUSED, + pthread_priority_t pp) +{ + if (flags & EV_VANISHED) { + DISPATCH_CLIENT_CRASH(du._du->du_ident, + "Unexpected EV_VANISHED (do not destroy random mach ports)"); + } + return _dispatch_source_merge_evt(du, flags, data, 0, pp); +} + +const dispatch_source_type_s _dispatch_source_type_mach_recv_direct = { + .dst_kind = "direct mach_recv", + .dst_filter = EVFILT_MACHPORT, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, + .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_mach_recv_direct_create, + .dst_merge_evt = _dispatch_mach_recv_direct_merge, + .dst_merge_msg = _dispatch_source_mach_recv_direct_merge_msg, +}; + +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK +const dispatch_source_type_s _dispatch_source_type_mach_recv_direct_pset = { + .dst_kind = "direct mach_recv (portset)", + .dst_filter = EVFILT_MACHPORT, + .dst_flags = 0, + .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = NULL, // never created directly + .dst_update_mux = _dispatch_mach_recv_direct_update_portset_mux, + .dst_merge_evt = _dispatch_mach_recv_direct_merge, + .dst_merge_msg = _dispatch_source_mach_recv_direct_merge_msg, +}; +#endif + +static dispatch_unote_t +_dispatch_mach_recv_create(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ + // mach channels pass MACH_PORT_NULL until connect +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + if (!_dispatch_evfilt_machport_direct_enabled) { + dst = &_dispatch_mach_type_recv_pset; + } +#endif + // without handle because the mach code will set the ident later + return _dispatch_unote_create_without_handle(dst, handle, mask); +} + +const dispatch_source_type_s _dispatch_mach_type_recv = { + .dst_kind = "mach_recv (channel)", + .dst_filter = EVFILT_MACHPORT, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, + .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_size = sizeof(struct dispatch_mach_recv_refs_s), + + .dst_create = _dispatch_mach_recv_create, + .dst_merge_evt = _dispatch_mach_recv_direct_merge, + .dst_merge_msg = _dispatch_mach_merge_msg, +}; + +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK +const dispatch_source_type_s _dispatch_mach_type_recv_pset = { + .dst_kind = "mach_recv (channel, portset)", + .dst_filter = EVFILT_MACHPORT, + .dst_flags = 0, + .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_size = sizeof(struct dispatch_mach_recv_refs_s), + + .dst_create = NULL, // never created directly + .dst_update_mux = _dispatch_mach_recv_direct_update_portset_mux, + .dst_merge_evt = _dispatch_mach_recv_direct_merge, + .dst_merge_msg = _dispatch_mach_merge_msg, +}; +#endif + +static dispatch_unote_t +_dispatch_mach_reply_create(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + if (!_dispatch_evfilt_machport_direct_enabled) { + dst = &_dispatch_mach_type_reply_pset; + } +#endif + return _dispatch_unote_create_with_handle(dst, handle, mask); +} + +DISPATCH_NORETURN +static void +_dispatch_mach_reply_merge_evt(dispatch_unote_t du, + uint32_t flags DISPATCH_UNUSED, uintptr_t data DISPATCH_UNUSED, + uintptr_t status DISPATCH_UNUSED, + pthread_priority_t pp DISPATCH_UNUSED) +{ + DISPATCH_INTERNAL_CRASH(du._du->du_ident, "Unexpected event"); +} + +const dispatch_source_type_s _dispatch_mach_type_reply = { + .dst_kind = "mach reply", + .dst_filter = EVFILT_MACHPORT, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_ONESHOT|EV_VANISHED, + .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_size = sizeof(struct dispatch_mach_reply_refs_s), + + .dst_create = _dispatch_mach_reply_create, + .dst_merge_evt = _dispatch_mach_reply_merge_evt, + .dst_merge_msg = _dispatch_mach_reply_merge_msg, +}; + +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK +const dispatch_source_type_s _dispatch_mach_type_reply_pset = { + .dst_kind = "mach reply (portset)", + .dst_filter = EVFILT_MACHPORT, + .dst_flags = EV_ONESHOT, + .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_size = sizeof(struct dispatch_mach_reply_refs_s), + + .dst_create = NULL, // never created directly + .dst_update_mux = _dispatch_mach_recv_direct_update_portset_mux, + .dst_merge_evt = _dispatch_mach_reply_merge_evt, + .dst_merge_msg = _dispatch_mach_reply_merge_msg, +}; +#endif + +#pragma mark Mach channel SIGTERM notification (for XPC channels only) + +const dispatch_source_type_s _dispatch_xpc_type_sigterm = { + .dst_kind = "sigterm (xpc)", + .dst_filter = EVFILT_SIGNAL, + .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_ONESHOT, + .dst_fflags = 0, + .dst_size = sizeof(struct dispatch_xpc_term_refs_s), + + .dst_create = _dispatch_unote_create_with_handle, + .dst_merge_evt = _dispatch_xpc_sigterm_merge, +}; + +#endif // HAVE_MACH + +#endif // DISPATCH_EVENT_BACKEND_KEVENT diff --git a/src/firehose/firehose.defs b/src/firehose/firehose.defs index 986533cc1..7ed795827 100644 --- a/src/firehose/firehose.defs +++ b/src/firehose/firehose.defs @@ -35,7 +35,8 @@ register( comm_recvp : mach_port_move_receive_t; comm_sendp : mach_port_make_send_t; extra_info_port : mach_port_move_send_t; - extra_info_size : mach_vm_size_t + extra_info_size : mach_vm_size_t; + ServerAuditToken atoken : audit_token_t ); routine diff --git a/src/firehose/firehose_buffer.c b/src/firehose/firehose_buffer.c index 1305bdea6..21692b9e3 100644 --- a/src/firehose/firehose_buffer.c +++ b/src/firehose/firehose_buffer.c @@ -37,6 +37,10 @@ #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) +#ifndef OS_FALLTHROUGH +#define OS_FALLTHROUGH +#endif + #define DISPATCH_INTERNAL_CRASH(ac, msg) ({ panic(msg); __builtin_trap(); }) #if defined(__x86_64__) || defined(__i386__) @@ -49,10 +53,13 @@ #define dispatch_hardware_pause() __asm__("") #endif -#define _dispatch_wait_until(c) do { \ - while (!fastpath(c)) { \ +#define _dispatch_wait_until(c) ({ \ + typeof(c) _c; \ + for (;;) { \ + if (likely(_c = (c))) break; \ dispatch_hardware_pause(); \ - } } while (0) + } \ + _c; }) #define dispatch_compiler_barrier() __asm__ __volatile__("" ::: "memory") typedef uint32_t dispatch_lock; @@ -71,9 +78,10 @@ static void _dispatch_gate_wait(dispatch_gate_t l, uint32_t flags); #include #include #include +#include // os/internal/atomic.h #include // #include // -#include // os/internal/atomic.h +#include // #include "os/firehose_buffer_private.h" #include "firehose_buffer_internal.h" #include "firehose_inline_internal.h" @@ -93,14 +101,11 @@ _Static_assert(offsetof(firehose_stream_state_u, fss_gate) == offsetof(firehose_stream_state_u, fss_allocator), "fss_gate and fss_allocator alias"); _Static_assert(sizeof(struct firehose_buffer_header_s) == - FIREHOSE_BUFFER_CHUNK_SIZE, + FIREHOSE_CHUNK_SIZE, "firehose buffer header must be 4k"); _Static_assert(offsetof(struct firehose_buffer_header_s, fbh_unused) <= - FIREHOSE_BUFFER_CHUNK_SIZE - FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE, + FIREHOSE_CHUNK_SIZE - FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE, "we must have enough space for the libtrace header"); -_Static_assert(sizeof(struct firehose_buffer_chunk_s) == - FIREHOSE_BUFFER_CHUNK_SIZE, - "firehose buffer chunks must be 4k"); _Static_assert(powerof2(FIREHOSE_BUFFER_CHUNK_COUNT), "CHUNK_COUNT Must be a power of two"); _Static_assert(FIREHOSE_BUFFER_CHUNK_COUNT <= 64, @@ -109,14 +114,8 @@ _Static_assert(FIREHOSE_BUFFER_CHUNK_COUNT <= 64, _Static_assert(powerof2(FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT), "madvise chunk count must be a power of two"); #endif -_Static_assert(howmany(sizeof(struct firehose_tracepoint_s), - sizeof(struct firehose_buffer_chunk_s)) < 255, - "refcount assumes that you cannot have more than 255 tracepoints"); -// FIXME: we should have an event-count instead here _Static_assert(sizeof(struct firehose_buffer_stream_s) == 128, "firehose buffer stream must be small (single cacheline if possible)"); -_Static_assert(offsetof(struct firehose_buffer_chunk_s, fbc_data) % 8 == 0, - "Page header is 8 byte aligned"); _Static_assert(sizeof(struct firehose_tracepoint_s) == 24, "tracepoint header should be exactly 24 bytes"); #endif @@ -177,21 +176,19 @@ firehose_client_reconnect(firehose_buffer_t fb, mach_port_t oldsendp) uint32_t opts = MPO_CONTEXT_AS_GUARD | MPO_TEMPOWNER | MPO_INSERT_SEND_RIGHT; sendp = firehose_mach_port_allocate(opts, fb); - if (oldsendp && _voucher_libtrace_hooks->vah_version >= 3) { - if (_voucher_libtrace_hooks->vah_get_reconnect_info) { - kr = _voucher_libtrace_hooks->vah_get_reconnect_info(&addr, &size); - if (likely(kr == KERN_SUCCESS) && addr && size) { - extra_info_size = size; - kr = mach_make_memory_entry_64(mach_task_self(), &size, addr, - flags, &extra_info_port, MACH_PORT_NULL); - if (unlikely(kr)) { - // the client probably has some form of memory corruption - // and/or a port leak - DISPATCH_CLIENT_CRASH(kr, "Unable to make memory port"); - } - kr = mach_vm_deallocate(mach_task_self(), addr, size); - (void)dispatch_assume_zero(kr); + if (oldsendp && _voucher_libtrace_hooks->vah_get_reconnect_info) { + kr = _voucher_libtrace_hooks->vah_get_reconnect_info(&addr, &size); + if (likely(kr == KERN_SUCCESS) && addr && size) { + extra_info_size = size; + kr = mach_make_memory_entry_64(mach_task_self(), &size, addr, + flags, &extra_info_port, MACH_PORT_NULL); + if (unlikely(kr)) { + // the client probably has some form of memory corruption + // and/or a port leak + DISPATCH_CLIENT_CRASH(kr, "Unable to make memory port"); } + kr = mach_vm_deallocate(mach_task_self(), addr, size); + (void)dispatch_assume_zero(kr); } } @@ -261,7 +258,7 @@ firehose_buffer_update_limits_unlocked(firehose_buffer_t fb) } } - uint16_t ratio = (uint16_t)(PAGE_SIZE / FIREHOSE_BUFFER_CHUNK_SIZE); + uint16_t ratio = (uint16_t)(PAGE_SIZE / FIREHOSE_CHUNK_SIZE); if (ratio > 1) { total = roundup(total, ratio); } @@ -299,7 +296,7 @@ firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, vm_addr = vm_page_size; const size_t madvise_bytes = FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT * - FIREHOSE_BUFFER_CHUNK_SIZE; + FIREHOSE_CHUNK_SIZE; if (slowpath(madvise_bytes % PAGE_SIZE)) { DISPATCH_INTERNAL_CRASH(madvise_bytes, "Invalid values for MADVISE_CHUNK_COUNT / CHUNK_SIZE"); @@ -320,7 +317,7 @@ firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, vm_offset_t vm_addr = 0; vm_size_t size; - size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE; + size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE; __firehose_allocate(&vm_addr, size); (void)logd_port; (void)unique_pid; @@ -487,12 +484,7 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, return; } - bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) | - ((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1)); - state.fbs_atomic_state = os_atomic_sub2o(&fb->fb_header, - fbh_bank.fbb_state.fbs_atomic_state, bank_updates, relaxed); - if (state_out) *state_out = state; - + __firehose_critical_region_enter(); os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail, otail.frp_atomic_tail, ntail.frp_atomic_tail, relaxed, { ntail = otail; @@ -500,6 +492,15 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, ntail.frp_io_flushed += io_delta; ntail.frp_mem_flushed += mem_delta; }); + + bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) | + ((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1)); + state.fbs_atomic_state = os_atomic_sub2o(&fb->fb_header, + fbh_bank.fbb_state.fbs_atomic_state, bank_updates, release); + __firehose_critical_region_leave(); + + if (state_out) *state_out = state; + if (async_notif) { if (io_delta) { os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_io_notifs, relaxed); @@ -611,18 +612,18 @@ firehose_buffer_update_limits(firehose_buffer_t fb) OS_ALWAYS_INLINE static inline firehose_tracepoint_t -firehose_buffer_chunk_init(firehose_buffer_chunk_t fbc, +firehose_buffer_chunk_init(firehose_chunk_t fc, firehose_tracepoint_query_t ask, uint8_t **privptr) { const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); - uint16_t pub_offs = offsetof(struct firehose_buffer_chunk_s, fbc_data); - uint16_t priv_offs = FIREHOSE_BUFFER_CHUNK_SIZE; + uint16_t pub_offs = offsetof(struct firehose_chunk_s, fc_data); + uint16_t priv_offs = FIREHOSE_CHUNK_SIZE; pub_offs += roundup(ft_size + ask->pubsize, 8); priv_offs -= ask->privsize; - if (fbc->fbc_pos.fbc_atomic_pos) { + if (fc->fc_pos.fcp_atomic_pos) { // Needed for process death handling (recycle-reuse): // No atomic fences required, we merely want to make sure the observers // will see memory effects in program (asm) order. @@ -632,32 +633,32 @@ firehose_buffer_chunk_init(firehose_buffer_chunk_t fbc, // and it is dirty, when crawling the chunk, we don't see remnants of // other tracepoints // - // We only do that when the fbc_pos is non zero, because zero means + // We only do that when the fc_pos is non zero, because zero means // we just faulted the chunk, and the kernel already bzero-ed it. - bzero(fbc->fbc_data, sizeof(fbc->fbc_data)); + bzero(fc->fc_data, sizeof(fc->fc_data)); } dispatch_compiler_barrier(); // boot starts mach absolute time at 0, and // wrapping around to values above UINT64_MAX - FIREHOSE_STAMP_SLOP // breaks firehose_buffer_stream_flush() assumptions if (ask->stamp > FIREHOSE_STAMP_SLOP) { - fbc->fbc_timestamp = ask->stamp - FIREHOSE_STAMP_SLOP; + fc->fc_timestamp = ask->stamp - FIREHOSE_STAMP_SLOP; } else { - fbc->fbc_timestamp = 0; + fc->fc_timestamp = 0; } - fbc->fbc_pos = (firehose_buffer_pos_u){ - .fbc_next_entry_offs = pub_offs, - .fbc_private_offs = priv_offs, - .fbc_refcnt = 1, - .fbc_qos_bits = firehose_buffer_qos_bits_propagate(), - .fbc_stream = ask->stream, - .fbc_flag_io = ask->for_io, + fc->fc_pos = (firehose_chunk_pos_u){ + .fcp_next_entry_offs = pub_offs, + .fcp_private_offs = priv_offs, + .fcp_refcnt = 1, + .fcp_qos = firehose_buffer_qos_bits_propagate(), + .fcp_stream = ask->stream, + .fcp_flag_io = ask->for_io, }; if (privptr) { - *privptr = fbc->fbc_start + priv_offs; + *privptr = fc->fc_start + priv_offs; } - return (firehose_tracepoint_t)fbc->fbc_data; + return (firehose_tracepoint_t)fc->fc_data; } OS_NOINLINE @@ -671,14 +672,18 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, uint64_t stamp_and_len; if (fastpath(ref)) { - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); - ft = firehose_buffer_chunk_init(fbc, ask, privptr); + firehose_chunk_t fc = firehose_buffer_ref_to_chunk(fb, ref); + ft = firehose_buffer_chunk_init(fc, ask, privptr); // Needed for process death handling (tracepoint-begin): // write the length before making the chunk visible - stamp_and_len = ask->stamp - fbc->fbc_timestamp; + stamp_and_len = ask->stamp - fc->fc_timestamp; stamp_and_len |= (uint64_t)ask->pubsize << 48; os_atomic_store2o(ft, ft_stamp_and_length, stamp_and_len, relaxed); - +#ifdef KERNEL + ft->ft_thread = thread_tid(current_thread()); +#else + ft->ft_thread = _pthread_threadid_self_np_direct(); +#endif if (ask->stream == firehose_stream_metadata) { os_atomic_or2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap, 1ULL << ref, relaxed); @@ -750,7 +755,7 @@ static inline uint16_t firehose_buffer_ring_shrink(firehose_buffer_t fb, uint16_t ref) { const size_t madv_size = - FIREHOSE_BUFFER_CHUNK_SIZE * FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT; + FIREHOSE_CHUNK_SIZE * FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT; const size_t madv_mask = (1ULL << FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT) - 1; @@ -779,12 +784,12 @@ OS_NOINLINE void firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref) { - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + firehose_chunk_t fc = firehose_buffer_ref_to_chunk(fb, ref); uint16_t volatile *fbh_ring; uint16_t volatile *fbh_ring_head; uint16_t head, gen, dummy, idx; - firehose_buffer_pos_u fbc_pos = fbc->fbc_pos; - bool for_io = fbc_pos.fbc_flag_io; + firehose_chunk_pos_u fc_pos = fc->fc_pos; + bool for_io = fc_pos.fcp_flag_io; if (for_io) { fbh_ring = fb->fb_header.fbh_io_ring; @@ -809,7 +814,7 @@ firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref) gen = head & FIREHOSE_RING_POS_GEN_MASK; idx = head & FIREHOSE_RING_POS_IDX_MASK; - while (unlikely(!os_atomic_cmpxchgvw(&fbh_ring[idx], gen, gen | ref, &dummy, + while (unlikely(!os_atomic_cmpxchgv(&fbh_ring[idx], gen, gen | ref, &dummy, relaxed))) { // can only ever happen if a recycler is slow, this requires having // enough cores (>5 for I/O e.g.) @@ -849,7 +854,7 @@ firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref) // a thread being preempted here for GEN_MASK worth of ring rotations, // it could lead to the cmpxchg succeed, and have a bogus enqueue // (confused enqueuer) - if (fastpath(os_atomic_cmpxchgvw(&fbh_ring[idx], gen, gen | ref, &dummy, + if (fastpath(os_atomic_cmpxchgv(&fbh_ring[idx], gen, gen | ref, &dummy, relaxed))) { if (fastpath(os_atomic_cmpxchgv(fbh_ring_head, head, head + 1, &head, release))) { @@ -871,13 +876,22 @@ firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref) })); } - pthread_priority_t pp = fbc_pos.fbc_qos_bits; + pthread_priority_t pp = fc_pos.fcp_qos; pp <<= _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; firehose_client_send_push_async(fb, _pthread_qos_class_decode(pp, NULL, NULL), for_io); #endif } +#ifndef KERNEL +void +firehose_buffer_force_connect(firehose_buffer_t fb) +{ + mach_port_t sendp = fb->fb_header.fbh_sendp; + if (sendp == MACH_PORT_NULL) firehose_client_reconnect(fb, MACH_PORT_NULL); +} +#endif + OS_ALWAYS_INLINE static inline uint16_t firehose_buffer_ring_try_recycle(firehose_buffer_t fb) @@ -885,7 +899,7 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb) firehose_ring_tail_u pos, old; uint16_t volatile *fbh_ring; uint16_t gen, ref, entry, tail; - firehose_buffer_chunk_t fbc; + firehose_chunk_t fc; bool for_io; os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail, @@ -923,14 +937,14 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb) // and it is dirty, it is a chunk being written to that needs a flush gen = (entry & FIREHOSE_RING_POS_GEN_MASK) + FIREHOSE_RING_POS_GEN_INC; ref = entry & FIREHOSE_RING_POS_IDX_MASK; - fbc = firehose_buffer_ref_to_chunk(fb, ref); + fc = firehose_buffer_ref_to_chunk(fb, ref); - if (!for_io && fbc->fbc_pos.fbc_stream == firehose_stream_metadata) { + if (!for_io && fc->fc_pos.fcp_stream == firehose_stream_metadata) { os_atomic_and2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap, ~(1ULL << ref), relaxed); } - os_atomic_store2o(fbc, fbc_pos.fbc_atomic_pos, - FIREHOSE_BUFFER_POS_FULL_BIT, relaxed); + os_atomic_store2o(fc, fc_pos.fcp_atomic_pos, + FIREHOSE_CHUNK_POS_FULL_BIT, relaxed); dispatch_compiler_barrier(); os_atomic_store(&fbh_ring[tail], gen | 0, relaxed); return ref; @@ -939,10 +953,11 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb) #ifndef KERNEL OS_NOINLINE static firehose_tracepoint_t -firehose_buffer_tracepoint_reserve_slow2(firehose_buffer_t fb, +firehose_buffer_tracepoint_reserve_wait_for_chunks_from_logd(firehose_buffer_t fb, firehose_tracepoint_query_t ask, uint8_t **privptr, uint16_t ref) { const uint64_t bank_unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(ask->for_io); + const uint64_t bank_inc = FIREHOSE_BANK_INC(ask->for_io); firehose_buffer_bank_t const fbb = &fb->fb_header.fbh_bank; firehose_bank_state_u state; uint16_t fbs_max_ref; @@ -951,7 +966,7 @@ firehose_buffer_tracepoint_reserve_slow2(firehose_buffer_t fb, if (!fastpath(ask->is_bank_ok)) { state.fbs_atomic_state = os_atomic_load2o(fbb, fbb_state.fbs_atomic_state, relaxed); - while (state.fbs_atomic_state & bank_unavail_mask) { + while ((state.fbs_atomic_state - bank_inc) & bank_unavail_mask) { firehose_client_send_push(fb, ask->for_io, &state); if (slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD)) { // logd was unloaded, give up @@ -1020,7 +1035,7 @@ firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, uint64_t unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(for_io); #ifndef KERNEL state.fbs_atomic_state = os_atomic_add_orig2o(fbb, - fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), relaxed); + fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), acquire); if (fastpath(!(state.fbs_atomic_state & unavail_mask))) { ask->is_bank_ok = true; if (fastpath(ref = firehose_buffer_ring_try_recycle(fb))) { @@ -1030,11 +1045,12 @@ firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, } } } - return firehose_buffer_tracepoint_reserve_slow2(fb, ask, privptr, ref); + return firehose_buffer_tracepoint_reserve_wait_for_chunks_from_logd(fb, ask, + privptr, ref); #else firehose_bank_state_u value; ask->is_bank_ok = os_atomic_rmw_loop2o(fbb, fbb_state.fbs_atomic_state, - state.fbs_atomic_state, value.fbs_atomic_state, relaxed, { + state.fbs_atomic_state, value.fbs_atomic_state, acquire, { value = state; if (slowpath((value.fbs_atomic_state & unavail_mask) != 0)) { os_atomic_rmw_loop_give_up(break); @@ -1067,32 +1083,6 @@ __firehose_buffer_tracepoint_reserve(uint64_t stamp, firehose_stream_t stream, privsize, privptr); } -firehose_tracepoint_t -__firehose_buffer_tracepoint_reserve_with_chunk(firehose_buffer_chunk_t fbc, - uint64_t stamp, firehose_stream_t stream, - uint16_t pubsize, uint16_t privsize, uint8_t **privptr) -{ - - firehose_tracepoint_t ft; - long result; - - result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, - pubsize, privsize, privptr); - if (fastpath(result > 0)) { - ft = (firehose_tracepoint_t)(fbc->fbc_start + result); - stamp -= fbc->fbc_timestamp; - stamp |= (uint64_t)pubsize << 48; - // Needed for process death handling (tracepoint-begin) - // see firehose_buffer_stream_chunk_install - os_atomic_store2o(ft, ft_stamp_and_length, stamp, relaxed); - dispatch_compiler_barrier(); - return ft; - } - else { - return NULL; - } -} - firehose_buffer_t __firehose_buffer_create(size_t *size) { @@ -1101,7 +1091,7 @@ __firehose_buffer_create(size_t *size) } if (size) { - *size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE; + *size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE; } return kernel_firehose_buffer; } @@ -1113,27 +1103,6 @@ __firehose_buffer_tracepoint_flush(firehose_tracepoint_t ft, return firehose_buffer_tracepoint_flush(kernel_firehose_buffer, ft, ftid); } -void -__firehose_buffer_tracepoint_flush_chunk(firehose_buffer_chunk_t fbc, - firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid) -{ - firehose_buffer_pos_u pos; - - // Needed for process death handling (tracepoint-flush): - // We want to make sure the observers - // will see memory effects in program (asm) order. - // 1. write all the data to the tracepoint - // 2. write the tracepoint ID, so that seeing it means the tracepoint - // is valid - ft->ft_thread = thread_tid(current_thread()); - - // release barrier makes the log writes visible - os_atomic_store2o(ft, ft_id.ftid_value, ftid.ftid_value, release); - pos.fbc_atomic_pos = os_atomic_sub2o(fbc, fbc_pos.fbc_atomic_pos, - FIREHOSE_BUFFER_POS_REFCNT_INC, relaxed); - return; -} - void __firehose_merge_updates(firehose_push_reply_t update) { diff --git a/src/firehose/firehose_buffer_internal.h b/src/firehose/firehose_buffer_internal.h index db8e02629..7679c8c0d 100644 --- a/src/firehose/firehose_buffer_internal.h +++ b/src/firehose/firehose_buffer_internal.h @@ -173,11 +173,11 @@ typedef struct firehose_buffer_header_s { dispatch_unfair_lock_s fbh_logd_lock; #endif uint64_t fbh_unused[0]; -} OS_ALIGNED(FIREHOSE_BUFFER_CHUNK_SIZE) *firehose_buffer_header_t; +} OS_ALIGNED(FIREHOSE_CHUNK_SIZE) *firehose_buffer_header_t; union firehose_buffer_u { struct firehose_buffer_header_s fb_header; - struct firehose_buffer_chunk_s fb_chunks[FIREHOSE_BUFFER_CHUNK_COUNT]; + struct firehose_chunk_s fb_chunks[FIREHOSE_BUFFER_CHUNK_COUNT]; }; // used to let the compiler pack these values in 1 or 2 registers @@ -206,6 +206,9 @@ firehose_buffer_update_limits(firehose_buffer_t fb); void firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref); +void +firehose_buffer_force_connect(firehose_buffer_t fb); + #endif #endif // __FIREHOSE_BUFFER_INTERNAL__ diff --git a/src/firehose/firehose_inline_internal.h b/src/firehose/firehose_inline_internal.h index 95768825f..abc5f9ec3 100644 --- a/src/firehose/firehose_inline_internal.h +++ b/src/firehose/firehose_inline_internal.h @@ -55,17 +55,11 @@ firehose_mach_port_allocate(uint32_t flags, void *ctx) mach_port_options_t opts = { .flags = flags, }; - kern_return_t kr; - - for (;;) { - kr = mach_port_construct(mach_task_self(), &opts, - (mach_port_context_t)ctx, &port); - if (fastpath(kr == KERN_SUCCESS)) { - break; - } + kern_return_t kr = mach_port_construct(mach_task_self(), &opts, + (mach_port_context_t)ctx, &port); + if (unlikely(kr)) { DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); - _dispatch_temporary_resource_shortage(); + DISPATCH_CLIENT_CRASH(kr, "Unable to allocate mach port"); } return port; } @@ -142,36 +136,28 @@ firehose_mig_server(dispatch_mig_callback_t demux, size_t maxmsgsz, #pragma mark firehose buffer OS_ALWAYS_INLINE -static inline firehose_buffer_chunk_t +static inline firehose_chunk_t firehose_buffer_chunk_for_address(void *addr) { - uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_BUFFER_CHUNK_SIZE - 1); - return (firehose_buffer_chunk_t)chunk_addr; + uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_CHUNK_SIZE - 1); + return (firehose_chunk_t)chunk_addr; } OS_ALWAYS_INLINE static inline uint16_t -firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_buffer_chunk_t fbc) +firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_chunk_t fbc) { return (uint16_t)(fbc - fb->fb_chunks); } OS_ALWAYS_INLINE -static inline firehose_buffer_chunk_t +static inline firehose_chunk_t firehose_buffer_ref_to_chunk(firehose_buffer_t fb, uint16_t ref) { return fb->fb_chunks + ref; } #ifndef FIREHOSE_SERVER - -OS_ALWAYS_INLINE -static inline bool -firehose_buffer_pos_fits(firehose_buffer_pos_u pos, uint16_t size) -{ - return pos.fbc_next_entry_offs + size <= pos.fbc_private_offs; -} - #if DISPATCH_PURE_C OS_ALWAYS_INLINE @@ -188,84 +174,13 @@ firehose_buffer_qos_bits_propagate(void) #endif } -OS_ALWAYS_INLINE -static inline long -firehose_buffer_chunk_try_reserve(firehose_buffer_chunk_t fbc, uint64_t stamp, - firehose_stream_t stream, uint16_t pubsize, - uint16_t privsize, uint8_t **privptr) -{ - const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); - firehose_buffer_pos_u orig, pos; - uint8_t qos_bits = firehose_buffer_qos_bits_propagate(); - bool reservation_failed, stamp_delta_fits; - - stamp_delta_fits = ((stamp - fbc->fbc_timestamp) >> 48) == 0; - - // no acquire barrier because the returned space is written to only - os_atomic_rmw_loop2o(fbc, fbc_pos.fbc_atomic_pos, - orig.fbc_atomic_pos, pos.fbc_atomic_pos, relaxed, { - if (unlikely(orig.fbc_atomic_pos == 0)) { - // we acquired a really really old reference, and we probably - // just faulted in a new page - // FIXME: if/when we hit this we should try to madvise it back FREE - os_atomic_rmw_loop_give_up(return 0); - } - if (unlikely(!FIREHOSE_BUFFER_POS_USABLE_FOR_STREAM(orig, stream))) { - // nothing to do if the chunk is full, or the stream doesn't match, - // in which case the thread probably: - // - loaded the chunk ref - // - been suspended a long while - // - read the chunk to find a very old thing - os_atomic_rmw_loop_give_up(return 0); - } - pos = orig; - pos.fbc_qos_bits |= qos_bits; - if (unlikely(!firehose_buffer_pos_fits(orig, - ft_size + pubsize + privsize) || !stamp_delta_fits)) { - pos.fbc_flag_full = true; - reservation_failed = true; - } else { - // using these *_INC macros is so that the compiler generates better - // assembly: using the struct individual fields forces the compiler - // to handle carry propagations, and we know it won't happen - pos.fbc_atomic_pos += roundup(ft_size + pubsize, 8) * - FIREHOSE_BUFFER_POS_ENTRY_OFFS_INC; - pos.fbc_atomic_pos -= privsize * - FIREHOSE_BUFFER_POS_PRIVATE_OFFS_INC; - pos.fbc_atomic_pos += FIREHOSE_BUFFER_POS_REFCNT_INC; - const uint16_t minimum_payload_size = 16; - if (!firehose_buffer_pos_fits(pos, - roundup(ft_size + minimum_payload_size , 8))) { - // if we can't even have minimum_payload_size bytes of payload - // for the next tracepoint, just flush right away - pos.fbc_flag_full = true; - } - reservation_failed = false; - } - }); - - if (reservation_failed) { - if (pos.fbc_refcnt) { - // nothing to do, there is a thread writing that will pick up - // the "FULL" flag on flush and push as a consequence - return 0; - } - // caller must enqueue chunk - return -1; - } - if (privptr) { - *privptr = fbc->fbc_start + pos.fbc_private_offs; - } - return orig.fbc_next_entry_offs; -} - OS_ALWAYS_INLINE static inline void firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) { firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream]; firehose_stream_state_u old_state, new_state; - firehose_buffer_chunk_t fbc; + firehose_chunk_t fc; uint64_t stamp = UINT64_MAX; // will cause the reservation to fail uint16_t ref; long result; @@ -275,11 +190,15 @@ firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) ref = old_state.fss_current; if (!ref || ref == FIREHOSE_STREAM_STATE_PRISTINE) { // there is no installed page, nothing to flush, go away +#ifndef KERNEL + firehose_buffer_force_connect(fb); +#endif return; } - fbc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current); - result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, 1, 0, NULL); + fc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current); + result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream, + firehose_buffer_qos_bits_propagate(), 1, 0, NULL); if (likely(result < 0)) { firehose_buffer_ring_enqueue(fb, old_state.fss_current); } @@ -339,8 +258,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, { firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream]; firehose_stream_state_u old_state, new_state; - firehose_tracepoint_t ft; - firehose_buffer_chunk_t fbc; + firehose_chunk_t fc; #if KERNEL bool failable = false; #endif @@ -356,18 +274,19 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, ref = old_state.fss_current; if (likely(ref && ref != FIREHOSE_STREAM_STATE_PRISTINE)) { - fbc = firehose_buffer_ref_to_chunk(fb, ref); - result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, + fc = firehose_buffer_ref_to_chunk(fb, ref); + result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream, + firehose_buffer_qos_bits_propagate(), pubsize, privsize, privptr); if (likely(result > 0)) { - ft = (firehose_tracepoint_t)(fbc->fbc_start + result); - stamp -= fbc->fbc_timestamp; - stamp |= (uint64_t)pubsize << 48; - // Needed for process death handling (tracepoint-begin) - // see firehose_buffer_stream_chunk_install - os_atomic_store2o(ft, ft_stamp_and_length, stamp, relaxed); - dispatch_compiler_barrier(); - return ft; + uint64_t thread; +#ifdef KERNEL + thread = thread_tid(current_thread()); +#else + thread = _pthread_threadid_self_np_direct(); +#endif + return firehose_chunk_tracepoint_begin(fc, + stamp, pubsize, thread, result); } if (likely(result < 0)) { firehose_buffer_ring_enqueue(fb, old_state.fss_current); @@ -402,7 +321,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, #else new_state.fss_allocator = _dispatch_tid_self(); #endif - success = os_atomic_cmpxchgvw2o(fbs, fbs_state.fss_atomic_state, + success = os_atomic_cmpxchgv2o(fbs, fbs_state.fss_atomic_state, old_state.fss_atomic_state, new_state.fss_atomic_state, &old_state.fss_atomic_state, relaxed); if (likely(success)) { @@ -444,8 +363,7 @@ static inline void firehose_buffer_tracepoint_flush(firehose_buffer_t fb, firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid) { - firehose_buffer_chunk_t fbc = firehose_buffer_chunk_for_address(ft); - firehose_buffer_pos_u pos; + firehose_chunk_t fc = firehose_buffer_chunk_for_address(ft); // Needed for process death handling (tracepoint-flush): // We want to make sure the observers @@ -453,17 +371,8 @@ firehose_buffer_tracepoint_flush(firehose_buffer_t fb, // 1. write all the data to the tracepoint // 2. write the tracepoint ID, so that seeing it means the tracepoint // is valid -#ifdef KERNEL - ft->ft_thread = thread_tid(current_thread()); -#else - ft->ft_thread = _pthread_threadid_self_np_direct(); -#endif - // release barrier makes the log writes visible - os_atomic_store2o(ft, ft_id.ftid_value, ftid.ftid_value, release); - pos.fbc_atomic_pos = os_atomic_sub2o(fbc, fbc_pos.fbc_atomic_pos, - FIREHOSE_BUFFER_POS_REFCNT_INC, relaxed); - if (pos.fbc_refcnt == 0 && pos.fbc_flag_full) { - firehose_buffer_ring_enqueue(fb, firehose_buffer_chunk_to_ref(fb, fbc)); + if (firehose_chunk_tracepoint_end(fc, ft, ftid)) { + firehose_buffer_ring_enqueue(fb, firehose_buffer_chunk_to_ref(fb, fc)); } } diff --git a/src/firehose/firehose_server.c b/src/firehose/firehose_server.c index a6be2fab7..52397d65e 100644 --- a/src/firehose/firehose_server.c +++ b/src/firehose/firehose_server.c @@ -41,6 +41,8 @@ static struct firehose_server_s { firehose_handler_t fs_handler; firehose_snapshot_t fs_snapshot; + bool fs_io_snapshot_started; + bool fs_mem_snapshot_started; int fs_kernel_fd; firehose_client_t fs_kernel_client; @@ -74,7 +76,7 @@ firehose_client_notify(firehose_client_t fc, mach_port_t reply_port) firehose_atomic_max2o(fc, fc_io_sent_flushed_pos, push_reply.fpr_io_flushed_pos, relaxed); - if (fc->fc_is_kernel) { + if (!fc->fc_pid) { if (ioctl(server_config.fs_kernel_fd, LOGFLUSHED, &push_reply) < 0) { dispatch_assume_zero(errno); } @@ -157,7 +159,7 @@ static void firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) { firehose_buffer_t fb = fc->fc_buffer; - firehose_buffer_chunk_t fbc; + firehose_chunk_t fbc; firehose_event_t evt; uint16_t volatile *fbh_ring; uint16_t flushed, ref, count = 0; @@ -172,7 +174,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) fbh_ring = fb->fb_header.fbh_io_ring; sent_flushed = (uint16_t)fc->fc_io_sent_flushed_pos; flushed = (uint16_t)fc->fc_io_flushed_pos; - if (fc->fc_needs_io_snapshot) { + if (fc->fc_needs_io_snapshot && server_config.fs_io_snapshot_started) { snapshot = server_config.fs_snapshot; } } else { @@ -182,7 +184,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) fbh_ring = fb->fb_header.fbh_mem_ring; sent_flushed = (uint16_t)fc->fc_mem_sent_flushed_pos; flushed = (uint16_t)fc->fc_mem_flushed_pos; - if (fc->fc_needs_mem_snapshot) { + if (fc->fc_needs_mem_snapshot && server_config.fs_mem_snapshot_started) { snapshot = server_config.fs_snapshot; } } @@ -209,7 +211,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) ref = (flushed + count) & FIREHOSE_RING_POS_IDX_MASK; ref = os_atomic_load(&fbh_ring[ref], relaxed); ref &= FIREHOSE_RING_POS_IDX_MASK; - } while (fc->fc_is_kernel && !ref); + } while (!fc->fc_pid && !ref); count++; if (!ref) { _dispatch_debug("Ignoring invalid page reference in ring: %d", ref); @@ -217,10 +219,17 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) } fbc = firehose_buffer_ref_to_chunk(fb, ref); + if (fbc->fc_pos.fcp_stream == firehose_stream_metadata) { + // serialize with firehose_client_metadata_stream_peek + os_unfair_lock_lock(&fc->fc_lock); + } server_config.fs_handler(fc, evt, fbc); if (slowpath(snapshot)) { snapshot->handler(fc, evt, fbc); } + if (fbc->fc_pos.fcp_stream == firehose_stream_metadata) { + os_unfair_lock_unlock(&fc->fc_lock); + } // clients not using notifications (single threaded) always drain fully // because they use all their limit, always } while (!fc->fc_use_notifs || count < DRAIN_BATCH_SIZE || snapshot); @@ -238,7 +247,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) client_flushed = os_atomic_load2o(&fb->fb_header, fbh_ring_tail.frp_mem_flushed, relaxed); } - if (fc->fc_is_kernel) { + if (!fc->fc_pid) { // will fire firehose_client_notify() because port is MACH_PORT_DEAD port = fc->fc_sendp; } else if (!port && client_flushed == sent_flushed && fc->fc_use_notifs) { @@ -253,7 +262,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) if (port) { firehose_client_notify(fc, port); } - if (fc->fc_is_kernel) { + if (!fc->fc_pid) { if (!(flags & FIREHOSE_DRAIN_POLL)) { // see firehose_client_kernel_source_handle_event dispatch_resume(fc->fc_kernel_source); @@ -283,7 +292,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) // from now on all IO/mem drains depending on `for_io` will be no-op // (needs__snapshot: false, memory_corrupted: true). we can safely // silence the corresponding source of drain wake-ups. - if (!fc->fc_is_kernel) { + if (fc->fc_pid) { dispatch_source_cancel(for_io ? fc->fc_io_source : fc->fc_mem_source); } } @@ -327,6 +336,8 @@ firehose_client_finalize(firehose_client_t fc OS_OBJECT_CONSUMED) server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_DIED, NULL); TAILQ_REMOVE(&server_config.fs_clients, fc, fc_entry); + dispatch_release(fc->fc_mach_channel); + fc->fc_mach_channel = NULL; fc->fc_entry.tqe_next = DISPATCH_OBJECT_LISTLESS; fc->fc_entry.tqe_prev = DISPATCH_OBJECT_LISTLESS; _os_object_release(&fc->fc_as_os_object); @@ -383,26 +394,26 @@ firehose_client_handle_death(void *ctxt) // Then look at all the allocated pages not seen in the ring while (bitmap) { uint16_t ref = firehose_bitmap_first_set(bitmap); - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); - uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs; + firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs; bitmap &= ~(1ULL << ref); - if (fbc->fbc_start + fbc_length <= fbc->fbc_data) { + if (fbc->fc_start + fbc_length <= fbc->fc_data) { // this page has its "recycle-requeue" done, but hasn't gone // through "recycle-reuse", or it has no data, ditch it continue; } - if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) { + if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) { // this thing has data, but the first tracepoint is unreadable // so also just ditch it continue; } - if (!fbc->fbc_pos.fbc_flag_io) { + if (!fbc->fc_pos.fcp_flag_io) { mem_bitmap |= 1ULL << ref; continue; } server_config.fs_handler(fc, FIREHOSE_EVENT_IO_BUFFER_RECEIVED, fbc); - if (fc->fc_needs_io_snapshot && snapshot) { + if (fc->fc_needs_io_snapshot && server_config.fs_io_snapshot_started) { snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, fbc); } } @@ -416,11 +427,11 @@ firehose_client_handle_death(void *ctxt) while (mem_bitmap_copy) { uint16_t ref = firehose_bitmap_first_set(mem_bitmap_copy); - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); mem_bitmap_copy &= ~(1ULL << ref); server_config.fs_handler(fc, FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, fbc); - if (fc->fc_needs_mem_snapshot && snapshot) { + if (fc->fc_needs_mem_snapshot && server_config.fs_mem_snapshot_started) { snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, fbc); } } @@ -434,9 +445,9 @@ static void firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason, dispatch_mach_msg_t dmsg, mach_error_t error OS_UNUSED) { - mach_msg_header_t *msg_hdr; + mach_msg_header_t *msg_hdr = NULL; firehose_client_t fc = ctx; - mach_port_t oldsendp, oldrecvp; + mach_port_t oldsendp = 0, oldrecvp = 0; if (dmsg) { msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); @@ -502,7 +513,7 @@ firehose_client_resume(firehose_client_t fc, dispatch_assert_queue(server_config.fs_io_drain_queue); TAILQ_INSERT_TAIL(&server_config.fs_clients, fc, fc_entry); server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_CONNECTED, (void *)fcci); - if (fc->fc_is_kernel) { + if (!fc->fc_pid) { dispatch_activate(fc->fc_kernel_source); } else { dispatch_mach_connect(fc->fc_mach_channel, @@ -515,16 +526,11 @@ firehose_client_resume(firehose_client_t fc, static void firehose_client_cancel(firehose_client_t fc) { - dispatch_mach_t dm; dispatch_block_t block; _dispatch_debug("client died (unique_pid: 0x%llx", firehose_client_get_unique_pid(fc, NULL)); - dm = fc->fc_mach_channel; - fc->fc_mach_channel = NULL; - dispatch_release(dm); - fc->fc_use_notifs = false; dispatch_source_cancel(fc->fc_io_source); dispatch_source_cancel(fc->fc_mem_source); @@ -552,8 +558,21 @@ _firehose_client_create(firehose_buffer_t fb) return fc; } +#pragma pack(4) +typedef struct firehose_token_s { + uid_t auid; + uid_t euid; + gid_t egid; + uid_t ruid; + gid_t rgid; + pid_t pid; + au_asid_t asid; + dev_t execcnt; +} *firehose_token_t; +#pragma pack() + static firehose_client_t -firehose_client_create(firehose_buffer_t fb, +firehose_client_create(firehose_buffer_t fb, firehose_token_t token, mach_port_t comm_recvp, mach_port_t comm_sendp) { uint64_t unique_pid = fb->fb_header.fbh_uniquepid; @@ -561,6 +580,9 @@ firehose_client_create(firehose_buffer_t fb, dispatch_mach_t dm; dispatch_source_t ds; + fc->fc_pid = token->pid ? token->pid : ~0; + fc->fc_euid = token->euid; + fc->fc_pidversion = token->execcnt; ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR, 0, 0, server_config.fs_mem_drain_queue); _os_object_retain_internal_inline(&fc->fc_as_os_object); @@ -617,12 +639,11 @@ firehose_kernel_client_create(void) DISPATCH_INTERNAL_CRASH(errno, "Unable to map kernel buffer"); } if (fb_map.fbmi_size != - FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE) { + FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE) { DISPATCH_INTERNAL_CRASH(fb_map.fbmi_size, "Unexpected kernel buffer size"); } fc = _firehose_client_create((firehose_buffer_t)(uintptr_t)fb_map.fbmi_addr); - fc->fc_is_kernel = true; ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)fd, 0, fs->fs_ipc_queue); dispatch_set_context(ds, fc); @@ -663,12 +684,21 @@ uint64_t firehose_client_get_unique_pid(firehose_client_t fc, pid_t *pid_out) { firehose_buffer_header_t fbh = &fc->fc_buffer->fb_header; - if (fc->fc_is_kernel) { - if (pid_out) *pid_out = 0; - return 0; - } - if (pid_out) *pid_out = fbh->fbh_pid ?: ~(pid_t)0; - return fbh->fbh_uniquepid ?: ~0ull; + if (pid_out) *pid_out = fc->fc_pid; + if (!fc->fc_pid) return 0; + return fbh->fbh_uniquepid ? fbh->fbh_uniquepid : ~0ull; +} + +uid_t +firehose_client_get_euid(firehose_client_t fc) +{ + return fc->fc_euid; +} + +int +firehose_client_get_pid_version(firehose_client_t fc) +{ + return fc->fc_pidversion; } void * @@ -760,6 +790,17 @@ firehose_server_assert_spi_version(uint32_t spi_version) } } +bool +firehose_server_has_ever_flushed_pages(void) +{ + // Use the IO pages flushed count from the kernel client as an + // approximation for whether the firehose has ever flushed pages during + // this boot. logd uses this detect the first time it starts after a + // fresh boot. + firehose_client_t fhc = server_config.fs_kernel_client; + return !fhc || fhc->fc_io_flushed_pos > 0; +} + void firehose_server_resume(void) { @@ -777,52 +818,83 @@ firehose_server_resume(void) MACH_PORT_NULL, NULL); } +OS_NOINLINE +static void +_firehose_server_cancel(void *ctxt OS_UNUSED) +{ + firehose_client_t fc; + TAILQ_FOREACH(fc, &server_config.fs_clients, fc_entry) { + dispatch_mach_cancel(fc->fc_mach_channel); + } +} + +void +firehose_server_cancel(void) +{ + dispatch_mach_cancel(server_config.fs_mach_channel); + dispatch_async_f(server_config.fs_io_drain_queue, NULL, + _firehose_server_cancel); +} + +dispatch_queue_t +firehose_server_copy_queue(firehose_server_queue_t which) +{ + dispatch_queue_t dq; + switch (which) { + case FIREHOSE_SERVER_QUEUE_IO: + dq = server_config.fs_io_drain_queue; + break; + case FIREHOSE_SERVER_QUEUE_MEMORY: + dq = server_config.fs_mem_drain_queue; + break; + default: + DISPATCH_INTERNAL_CRASH(which, "Invalid firehose server queue type"); + } + dispatch_retain(dq); + return dq; +} + #pragma mark - #pragma mark firehose snapshot and peeking void firehose_client_metadata_stream_peek(firehose_client_t fc, - firehose_event_t context, bool (^peek_should_start)(void), - bool (^peek)(firehose_buffer_chunk_t fbc)) + OS_UNUSED firehose_event_t context, bool (^peek_should_start)(void), + bool (^peek)(firehose_chunk_t fbc)) { - if (context != FIREHOSE_EVENT_MEM_BUFFER_RECEIVED) { - return dispatch_sync(server_config.fs_mem_drain_queue, ^{ - firehose_client_metadata_stream_peek(fc, - FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, peek_should_start, peek); - }); - } - - if (peek_should_start && !peek_should_start()) { - return; - } + os_unfair_lock_lock(&fc->fc_lock); - firehose_buffer_t fb = fc->fc_buffer; - firehose_buffer_header_t fbh = &fb->fb_header; - uint64_t bitmap = fbh->fbh_bank.fbb_metadata_bitmap; + if (peek_should_start && peek_should_start()) { + firehose_buffer_t fb = fc->fc_buffer; + firehose_buffer_header_t fbh = &fb->fb_header; + uint64_t bitmap = fbh->fbh_bank.fbb_metadata_bitmap; - while (bitmap) { - uint16_t ref = firehose_bitmap_first_set(bitmap); - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); - uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs; + while (bitmap) { + uint16_t ref = firehose_bitmap_first_set(bitmap); + firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs; - bitmap &= ~(1ULL << ref); - if (fbc->fbc_start + fbc_length <= fbc->fbc_data) { - // this page has its "recycle-requeue" done, but hasn't gone - // through "recycle-reuse", or it has no data, ditch it - continue; - } - if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) { - // this thing has data, but the first tracepoint is unreadable - // so also just ditch it - continue; - } - if (fbc->fbc_pos.fbc_stream != firehose_stream_metadata) { - continue; - } - if (!peek(fbc)) { - break; + bitmap &= ~(1ULL << ref); + if (fbc->fc_start + fbc_length <= fbc->fc_data) { + // this page has its "recycle-requeue" done, but hasn't gone + // through "recycle-reuse", or it has no data, ditch it + continue; + } + if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) { + // this thing has data, but the first tracepoint is unreadable + // so also just ditch it + continue; + } + if (fbc->fc_pos.fcp_stream != firehose_stream_metadata) { + continue; + } + if (!peek(fbc)) { + break; + } } } + + os_unfair_lock_unlock(&fc->fc_lock); } OS_NOINLINE OS_COLD @@ -872,21 +944,21 @@ firehose_client_snapshot_finish(firehose_client_t fc, // Then look at all the allocated pages not seen in the ring while (bitmap) { uint16_t ref = firehose_bitmap_first_set(bitmap); - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); - uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs; + firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs; bitmap &= ~(1ULL << ref); - if (fbc->fbc_start + fbc_length <= fbc->fbc_data) { + if (fbc->fc_start + fbc_length <= fbc->fc_data) { // this page has its "recycle-requeue" done, but hasn't gone // through "recycle-reuse", or it has no data, ditch it continue; } - if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) { + if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) { // this thing has data, but the first tracepoint is unreadable // so also just ditch it continue; } - if (fbc->fbc_pos.fbc_flag_io != for_io) { + if (fbc->fc_pos.fcp_flag_io != for_io) { continue; } snapshot->handler(fc, evt, fbc); @@ -903,11 +975,12 @@ firehose_snapshot_start(void *ctxt) // 0. we need to be on the IO queue so that client connection and/or death // cannot happen concurrently dispatch_assert_queue(server_config.fs_io_drain_queue); + server_config.fs_snapshot = snapshot; // 1. mark all the clients participating in the current snapshot // and enter the group for each bit set TAILQ_FOREACH(fci, &server_config.fs_clients, fc_entry) { - if (fci->fc_is_kernel) { + if (!fci->fc_pid) { #if TARGET_OS_SIMULATOR continue; #endif @@ -926,16 +999,18 @@ firehose_snapshot_start(void *ctxt) } dispatch_async(server_config.fs_mem_drain_queue, ^{ - // 2. make fs_snapshot visible, this is what triggers the snapshot - // logic from _drain() or handle_death(). until fs_snapshot is - // published, the bits set above are mostly ignored - server_config.fs_snapshot = snapshot; - + // 2. start the fs_mem_snapshot, this is what triggers the snapshot + // logic from _drain() or handle_death() + server_config.fs_mem_snapshot_started = true; snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_MEM_START, NULL); dispatch_async(server_config.fs_io_drain_queue, ^{ firehose_client_t fcj; + // 3. start the fs_io_snapshot, this is what triggers the snapshot + // logic from _drain() or handle_death() + // 29868879: must always happen after the memory snapshot started + server_config.fs_io_snapshot_started = true; snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_IO_START, NULL); // match group_enter from firehose_snapshot() after MEM+IO_START @@ -947,7 +1022,7 @@ firehose_snapshot_start(void *ctxt) // were removed from the list have already left the group // (see firehose_client_finalize()) TAILQ_FOREACH(fcj, &server_config.fs_clients, fc_entry) { - if (fcj->fc_is_kernel) { + if (!fcj->fc_pid) { #if !TARGET_OS_SIMULATOR firehose_client_kernel_source_handle_event(fcj); #endif @@ -967,6 +1042,8 @@ firehose_snapshot_finish(void *ctxt) fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_COMPLETE, NULL); server_config.fs_snapshot = NULL; + server_config.fs_mem_snapshot_started = false; + server_config.fs_io_snapshot_started = false; dispatch_release(fs->fs_group); Block_release(fs->handler); @@ -1010,7 +1087,8 @@ kern_return_t firehose_server_register(mach_port_t server_port OS_UNUSED, mach_port_t mem_port, mach_vm_size_t mem_size, mach_port_t comm_recvp, mach_port_t comm_sendp, - mach_port_t extra_info_port, mach_vm_size_t extra_info_size) + mach_port_t extra_info_port, mach_vm_size_t extra_info_size, + audit_token_t atoken) { mach_vm_address_t base_addr = 0; firehose_client_t fc = NULL; @@ -1060,7 +1138,7 @@ firehose_server_register(mach_port_t server_port OS_UNUSED, } fc = firehose_client_create((firehose_buffer_t)base_addr, - comm_recvp, comm_sendp); + (firehose_token_t)&atoken, comm_recvp, comm_sendp); dispatch_async(server_config.fs_io_drain_queue, ^{ firehose_client_resume(fc, &fcci); if (fcci.fcci_size) { diff --git a/src/firehose/firehose_server_internal.h b/src/firehose/firehose_server_internal.h index 799172175..d80516760 100644 --- a/src/firehose/firehose_server_internal.h +++ b/src/firehose/firehose_server_internal.h @@ -53,11 +53,14 @@ struct firehose_client_s { dispatch_source_t fc_mem_source; mach_port_t fc_recvp; mach_port_t fc_sendp; + os_unfair_lock fc_lock; + pid_t fc_pid; + int fc_pidversion; + uid_t fc_euid; bool fc_use_notifs; bool fc_memory_corrupted; bool fc_needs_io_snapshot; bool fc_needs_mem_snapshot; - bool fc_is_kernel; }; void diff --git a/src/init.c b/src/init.c index 87be596f2..29d8c4e97 100644 --- a/src/init.c +++ b/src/init.c @@ -47,12 +47,29 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_prepare(void) { + _os_object_atfork_prepare(); } DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_parent(void) { + _os_object_atfork_parent(); +} + +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_atfork_child(void) +{ + _os_object_atfork_child(); + _voucher_atfork_child(); + _dispatch_event_loop_atfork_child(); + if (_dispatch_is_multithreaded_inline()) { + _dispatch_child_of_unsafe_fork = true; + } + _dispatch_queue_atfork_child(); + // clear the _PROHIBIT and _MULTITHREADED bits if set + _dispatch_unsafe_fork = 0; } #pragma mark - @@ -76,13 +93,13 @@ pthread_key_t dispatch_frame_key; pthread_key_t dispatch_cache_key; pthread_key_t dispatch_context_key; pthread_key_t dispatch_pthread_root_queue_observer_hooks_key; -pthread_key_t dispatch_defaultpriority_key; +pthread_key_t dispatch_basepri_key; #if DISPATCH_INTROSPECTION pthread_key_t dispatch_introspection_key; #elif DISPATCH_PERF_MON pthread_key_t dispatch_bcounter_key; #endif -pthread_key_t dispatch_sema4_key; +pthread_key_t dispatch_wlh_key; pthread_key_t dispatch_voucher_key; pthread_key_t dispatch_deferred_items_key; #endif // !DISPATCH_USE_DIRECT_TSD && !DISPATCH_USE_THREAD_LOCAL_STORAGE @@ -176,8 +193,8 @@ const struct dispatch_queue_offsets_s dispatch_queue_offsets = { .dqo_suspend_cnt_size = 0, .dqo_target_queue = offsetof(struct dispatch_queue_s, do_targetq), .dqo_target_queue_size = sizeof(((dispatch_queue_t)NULL)->do_targetq), - .dqo_priority = offsetof(struct dispatch_queue_s, dq_priority), - .dqo_priority_size = sizeof(((dispatch_queue_t)NULL)->dq_priority), + .dqo_priority = 0, + .dqo_priority_size = 0, }; #if DISPATCH_USE_DIRECT_TSD @@ -200,83 +217,90 @@ struct dispatch_queue_s _dispatch_main_q = { #endif .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1), .dq_label = "com.apple.main-thread", - .dq_width = 1, - .dq_atomic_bits = DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC, - .dq_override_voucher = DISPATCH_NO_VOUCHER, + .dq_atomic_flags = DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC | DQF_WIDTH(1), + .dq_wlh = DISPATCH_WLH_GLOBAL, // TODO: main thread wlh .dq_serialnum = 1, }; #pragma mark - #pragma mark dispatch_queue_attr_t -#define DISPATCH_QUEUE_ATTR_INIT(qos, prio, overcommit, freq, concurrent, inactive) \ - { \ - DISPATCH_GLOBAL_OBJECT_HEADER(queue_attr), \ - .dqa_qos_class = (qos), \ - .dqa_relative_priority = (qos) ? (prio) : 0, \ - .dqa_overcommit = _dispatch_queue_attr_overcommit_##overcommit, \ - .dqa_autorelease_frequency = DISPATCH_AUTORELEASE_FREQUENCY_##freq, \ - .dqa_concurrent = (concurrent), \ - .dqa_inactive = (inactive), \ - } +#define DISPATCH_QUEUE_ATTR_INIT(qos, prio, overcommit, freq, concurrent, \ + inactive) \ + { \ + DISPATCH_GLOBAL_OBJECT_HEADER(queue_attr), \ + .dqa_qos_and_relpri = (_dispatch_priority_make(qos, prio) & \ + DISPATCH_PRIORITY_REQUESTED_MASK), \ + .dqa_overcommit = _dispatch_queue_attr_overcommit_##overcommit, \ + .dqa_autorelease_frequency = DISPATCH_AUTORELEASE_FREQUENCY_##freq, \ + .dqa_concurrent = (concurrent), \ + .dqa_inactive = (inactive), \ + } -#define DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, freq, concurrent) \ - { \ - [DQA_INDEX_ACTIVE] = DISPATCH_QUEUE_ATTR_INIT(\ - qos, prio, overcommit, freq, concurrent, false), \ - [DQA_INDEX_INACTIVE] = DISPATCH_QUEUE_ATTR_INIT(\ - qos, prio, overcommit, freq, concurrent, true), \ - } +#define DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, freq, \ + concurrent) \ + { \ + [DQA_INDEX_ACTIVE] = DISPATCH_QUEUE_ATTR_INIT( \ + qos, prio, overcommit, freq, concurrent, false), \ + [DQA_INDEX_INACTIVE] = DISPATCH_QUEUE_ATTR_INIT( \ + qos, prio, overcommit, freq, concurrent, true), \ + } #define DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, prio, overcommit) \ - { \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, INHERIT, 1), \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, INHERIT, 0), \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, WORK_ITEM, 1), \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, WORK_ITEM, 0), \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, NEVER, 1), \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, NEVER, 0), \ - } + { \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_CONCURRENT] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \ + qos, prio, overcommit, INHERIT, 1), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_SERIAL] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \ + qos, prio, overcommit, INHERIT, 0), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_CONCURRENT] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \ + qos, prio, overcommit, WORK_ITEM, 1), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_SERIAL] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \ + qos, prio, overcommit, WORK_ITEM, 0), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_CONCURRENT] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \ + qos, prio, overcommit, NEVER, 1), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_SERIAL] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT(\ + qos, prio, overcommit, NEVER, 0), \ + } #define DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, prio) \ - [prio] = { \ - [DQA_INDEX_UNSPECIFIED_OVERCOMMIT] = \ - DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), unspecified), \ - [DQA_INDEX_NON_OVERCOMMIT] = \ - DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), disabled), \ - [DQA_INDEX_OVERCOMMIT] = \ - DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), enabled), \ - } + [prio] = { \ + [DQA_INDEX_UNSPECIFIED_OVERCOMMIT] = \ + DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), unspecified),\ + [DQA_INDEX_NON_OVERCOMMIT] = \ + DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), disabled), \ + [DQA_INDEX_OVERCOMMIT] = \ + DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), enabled), \ + } #define DISPATCH_QUEUE_ATTR_PRIO_INIT(qos) \ - { \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 0), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 1), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 2), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 3), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 4), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 5), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 6), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 7), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 8), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 9), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 10), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 11), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 12), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 13), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 14), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 15), \ - } + { \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 0), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 1), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 2), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 3), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 4), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 5), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 6), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 7), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 8), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 9), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 10), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 11), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 12), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 13), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 14), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 15), \ + } #define DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(qos) \ - [DQA_INDEX_QOS_CLASS_##qos] = \ - DISPATCH_QUEUE_ATTR_PRIO_INIT(_DISPATCH_QOS_CLASS_##qos) + [DQA_INDEX_QOS_CLASS_##qos] = \ + DISPATCH_QUEUE_ATTR_PRIO_INIT(DISPATCH_QOS_##qos) // DISPATCH_QUEUE_CONCURRENT resp. _dispatch_queue_attr_concurrent is aliased // to array member [0][0][0][0][0][0] and their properties must match! @@ -298,7 +322,7 @@ const struct dispatch_queue_attr_s _dispatch_queue_attrs[] #if DISPATCH_VARIANT_STATIC // struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent = - DISPATCH_QUEUE_ATTR_INIT(_DISPATCH_QOS_CLASS_UNSPECIFIED, 0, + DISPATCH_QUEUE_ATTR_INIT(QOS_CLASS_UNSPECIFIED, 0, unspecified, INHERIT, 1, false); #endif // DISPATCH_VARIANT_STATIC @@ -333,6 +357,7 @@ DISPATCH_VTABLE_INSTANCE(queue, .do_dispose = _dispatch_queue_dispose, .do_suspend = _dispatch_queue_suspend, .do_resume = _dispatch_queue_resume, + .do_push = _dispatch_queue_push, .do_invoke = _dispatch_queue_invoke, .do_wakeup = _dispatch_queue_wakeup, .do_debug = dispatch_queue_debug, @@ -346,6 +371,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, queue, .do_suspend = _dispatch_queue_suspend, .do_resume = _dispatch_queue_resume, .do_finalize_activation = _dispatch_queue_finalize_activation, + .do_push = _dispatch_queue_push, .do_invoke = _dispatch_queue_invoke, .do_wakeup = _dispatch_queue_wakeup, .do_debug = dispatch_queue_debug, @@ -359,6 +385,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, queue, .do_suspend = _dispatch_queue_suspend, .do_resume = _dispatch_queue_resume, .do_finalize_activation = _dispatch_queue_finalize_activation, + .do_push = _dispatch_queue_push, .do_invoke = _dispatch_queue_invoke, .do_wakeup = _dispatch_queue_wakeup, .do_debug = dispatch_queue_debug, @@ -370,6 +397,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_root, queue, .do_type = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE, .do_kind = "global-queue", .do_dispose = _dispatch_pthread_root_queue_dispose, + .do_push = _dispatch_root_queue_push, .do_wakeup = _dispatch_root_queue_wakeup, .do_debug = dispatch_queue_debug, ); @@ -379,6 +407,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_main, queue, .do_kind = "main-queue", .do_dispose = _dispatch_queue_dispose, .do_invoke = _dispatch_queue_invoke, + .do_push = _dispatch_queue_push, .do_wakeup = _dispatch_main_queue_wakeup, .do_debug = dispatch_queue_debug, ); @@ -388,6 +417,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_runloop, queue, .do_kind = "runloop-queue", .do_dispose = _dispatch_runloop_queue_dispose, .do_invoke = _dispatch_queue_invoke, + .do_push = _dispatch_queue_push, .do_wakeup = _dispatch_runloop_queue_wakeup, .do_debug = dispatch_queue_debug, ); @@ -396,6 +426,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_mgr, queue, .do_type = DISPATCH_QUEUE_MGR_TYPE, .do_kind = "mgr-queue", .do_invoke = _dispatch_mgr_thread, + .do_push = _dispatch_queue_push, .do_wakeup = _dispatch_mgr_queue_wakeup, .do_debug = dispatch_queue_debug, ); @@ -421,6 +452,7 @@ DISPATCH_VTABLE_INSTANCE(source, .do_suspend = (void *)_dispatch_queue_suspend, .do_resume = (void *)_dispatch_queue_resume, .do_finalize_activation = _dispatch_source_finalize_activation, + .do_push = (void *)_dispatch_queue_push, .do_invoke = _dispatch_source_invoke, .do_wakeup = _dispatch_source_wakeup, .do_debug = _dispatch_source_debug, @@ -435,6 +467,7 @@ DISPATCH_VTABLE_INSTANCE(mach, .do_suspend = (void *)_dispatch_queue_suspend, .do_resume = (void *)_dispatch_queue_resume, .do_finalize_activation = _dispatch_mach_finalize_activation, + .do_push = (void *)_dispatch_queue_push, .do_invoke = _dispatch_mach_invoke, .do_wakeup = _dispatch_mach_wakeup, .do_debug = _dispatch_mach_debug, @@ -481,31 +514,6 @@ DISPATCH_VTABLE_INSTANCE(disk, ); -const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { - DC_VTABLE_ENTRY(ASYNC_REDIRECT, - .do_kind = "dc-redirect", - .do_invoke = _dispatch_async_redirect_invoke), -#if HAVE_MACH - DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN, - .do_kind = "dc-mach-send-drain", - .do_invoke = _dispatch_mach_send_barrier_drain_invoke), - DC_VTABLE_ENTRY(MACH_SEND_BARRIER, - .do_kind = "dc-mach-send-barrier", - .do_invoke = _dispatch_mach_barrier_invoke), - DC_VTABLE_ENTRY(MACH_RECV_BARRIER, - .do_kind = "dc-mach-recv-barrier", - .do_invoke = _dispatch_mach_barrier_invoke), -#endif -#if HAVE_PTHREAD_WORKQUEUE_QOS - DC_VTABLE_ENTRY(OVERRIDE_STEALING, - .do_kind = "dc-override-stealing", - .do_invoke = _dispatch_queue_override_invoke), - DC_VTABLE_ENTRY(OVERRIDE_OWNING, - .do_kind = "dc-override-owning", - .do_invoke = _dispatch_queue_override_invoke), -#endif -}; - void _dispatch_vtable_init(void) { @@ -962,6 +970,22 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) } #if HAVE_MACH + +#undef _dispatch_client_callout3 +DISPATCH_NOINLINE +void +_dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, dispatch_mach_async_reply_callback_t f) +{ + _dispatch_get_tsd_base(); + void *u = _dispatch_get_unwind_tsd(); + if (fastpath(!u)) return f(ctxt, reason, dmsg); + _dispatch_set_unwind_tsd(NULL); + f(ctxt, reason, dmsg); + _dispatch_free_unwind_tsd(); + _dispatch_set_unwind_tsd(u); +} + #undef _dispatch_client_callout4 void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, @@ -1057,6 +1081,24 @@ os_release(void *obj) } } +void +_os_object_atfork_prepare(void) +{ + return; +} + +void +_os_object_atfork_parent(void) +{ + return; +} + +void +_os_object_atfork_child(void) +{ + return; +} + #pragma mark - #pragma mark dispatch_autorelease_pool no_objc @@ -1095,397 +1137,9 @@ _dispatch_last_resort_autorelease_pool_pop(void *pool) #endif // DISPATCH_COCOA_COMPAT #endif // !USE_OBJC -#pragma mark - -#pragma mark dispatch_source_types - -static void -dispatch_source_type_timer_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask, - dispatch_queue_t q) -{ - if (fastpath(!ds->ds_refs)) { - ds->ds_refs = _dispatch_calloc(1ul, - sizeof(struct dispatch_timer_source_refs_s)); - } - ds->ds_needs_rearm = true; - ds->ds_is_timer = true; - if (q == dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_BACKGROUND, 0) - || q == dispatch_get_global_queue( - DISPATCH_QUEUE_PRIORITY_BACKGROUND, DISPATCH_QUEUE_OVERCOMMIT)){ - mask |= DISPATCH_TIMER_BACKGROUND; // - } - ds_timer(ds->ds_refs).flags = mask; -} - -const struct dispatch_source_type_s _dispatch_source_type_timer = { - .ke = { - .filter = DISPATCH_EVFILT_TIMER, - }, - .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND| - DISPATCH_TIMER_WALL_CLOCK, - .init = dispatch_source_type_timer_init, -}; - -static void -dispatch_source_type_after_init(dispatch_source_t ds, - dispatch_source_type_t type, uintptr_t handle, unsigned long mask, - dispatch_queue_t q) -{ - dispatch_source_type_timer_init(ds, type, handle, mask, q); - ds->ds_needs_rearm = false; - ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_AFTER; -} - -const struct dispatch_source_type_s _dispatch_source_type_after = { - .ke = { - .filter = DISPATCH_EVFILT_TIMER, - }, - .init = dispatch_source_type_after_init, -}; - -static void -dispatch_source_type_timer_with_aggregate_init(dispatch_source_t ds, - dispatch_source_type_t type, uintptr_t handle, unsigned long mask, - dispatch_queue_t q) -{ - ds->ds_refs = _dispatch_calloc(1ul, - sizeof(struct dispatch_timer_source_aggregate_refs_s)); - dispatch_source_type_timer_init(ds, type, handle, mask, q); - ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_WITH_AGGREGATE; - ds->dq_specific_q = (void*)handle; - _dispatch_retain(ds->dq_specific_q); -} - -const struct dispatch_source_type_s _dispatch_source_type_timer_with_aggregate={ - .ke = { - .filter = DISPATCH_EVFILT_TIMER, - .ident = ~0ull, - }, - .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND, - .init = dispatch_source_type_timer_with_aggregate_init, -}; - -static void -dispatch_source_type_interval_init(dispatch_source_t ds, - dispatch_source_type_t type, uintptr_t handle, unsigned long mask, - dispatch_queue_t q) -{ - dispatch_source_type_timer_init(ds, type, handle, mask, q); - ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_INTERVAL; - unsigned long ident = _dispatch_source_timer_idx(ds->ds_refs); - ds->ds_dkev->dk_kevent.ident = ds->ds_ident_hack = ident; - _dispatch_source_set_interval(ds, handle); -} - -const struct dispatch_source_type_s _dispatch_source_type_interval = { - .ke = { - .filter = DISPATCH_EVFILT_TIMER, - .ident = ~0ull, - }, - .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND| - DISPATCH_INTERVAL_UI_ANIMATION, - .init = dispatch_source_type_interval_init, -}; - -static void -dispatch_source_type_readwrite_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) -{ - ds->ds_is_level = true; -#if HAVE_DECL_NOTE_LOWAT - // bypass kernel check for device kqueue support rdar://19004921 - ds->ds_dkev->dk_kevent.fflags = NOTE_LOWAT; -#endif - ds->ds_dkev->dk_kevent.data = 1; -} - -const struct dispatch_source_type_s _dispatch_source_type_read = { - .ke = { - .filter = EVFILT_READ, - .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, - }, - .init = dispatch_source_type_readwrite_init, -}; - -const struct dispatch_source_type_s _dispatch_source_type_write = { - .ke = { - .filter = EVFILT_WRITE, - .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, - }, - .init = dispatch_source_type_readwrite_init, -}; - -#if DISPATCH_USE_MEMORYSTATUS - -#if TARGET_IPHONE_SIMULATOR // rdar://problem/9219483 -static int _dispatch_ios_simulator_memory_warnings_fd = -1; -static void -_dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED) -{ - char *e = getenv("SIMULATOR_MEMORY_WARNINGS"); - if (!e) return; - _dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY); - if (_dispatch_ios_simulator_memory_warnings_fd == -1) { - (void)dispatch_assume_zero(errno); - } -} -#endif - -#if TARGET_IPHONE_SIMULATOR -static void -dispatch_source_type_memorypressure_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) -{ - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init); - handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd; - mask = NOTE_ATTRIB; - ds->ds_dkev->dk_kevent.filter = EVFILT_VNODE; - ds->ds_dkev->dk_kevent.ident = handle; - ds->ds_dkev->dk_kevent.flags |= EV_CLEAR; - ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask; - ds->ds_ident_hack = handle; - ds->ds_pending_data_mask = mask; - ds->ds_memorypressure_override = 1; -} -#else -#define dispatch_source_type_memorypressure_init NULL -#endif - -#ifndef NOTE_MEMORYSTATUS_LOW_SWAP -#define NOTE_MEMORYSTATUS_LOW_SWAP 0x8 -#endif - -const struct dispatch_source_type_s _dispatch_source_type_memorypressure = { - .ke = { - .filter = EVFILT_MEMORYSTATUS, - .flags = EV_DISPATCH|EV_UDATA_SPECIFIC, - }, - .mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL|NOTE_MEMORYSTATUS_PRESSURE_WARN - |NOTE_MEMORYSTATUS_PRESSURE_CRITICAL|NOTE_MEMORYSTATUS_LOW_SWAP - |NOTE_MEMORYSTATUS_PROC_LIMIT_WARN|NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL, - .init = dispatch_source_type_memorypressure_init, -}; - -static void -dispatch_source_type_vm_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) -{ - // Map legacy vm pressure to memorypressure warning rdar://problem/15907505 - mask = NOTE_MEMORYSTATUS_PRESSURE_WARN; - ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask; - ds->ds_pending_data_mask = mask; - ds->ds_vmpressure_override = 1; -#if TARGET_IPHONE_SIMULATOR - dispatch_source_type_memorypressure_init(ds, type, handle, mask, q); -#endif -} - -const struct dispatch_source_type_s _dispatch_source_type_vm = { - .ke = { - .filter = EVFILT_MEMORYSTATUS, - .flags = EV_DISPATCH|EV_UDATA_SPECIFIC, - }, - .mask = NOTE_VM_PRESSURE, - .init = dispatch_source_type_vm_init, -}; - -#elif DISPATCH_USE_VM_PRESSURE - -const struct dispatch_source_type_s _dispatch_source_type_vm = { - .ke = { - .filter = EVFILT_VM, - .flags = EV_DISPATCH|EV_UDATA_SPECIFIC, - }, - .mask = NOTE_VM_PRESSURE, -}; - -#endif // DISPATCH_USE_VM_PRESSURE - -const struct dispatch_source_type_s _dispatch_source_type_signal = { - .ke = { - .filter = EVFILT_SIGNAL, - .flags = EV_UDATA_SPECIFIC, - }, -}; - -#if !defined(__linux__) -static void -dispatch_source_type_proc_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) -{ - ds->ds_dkev->dk_kevent.fflags |= NOTE_EXIT; // rdar://16655831 -} - -const struct dispatch_source_type_s _dispatch_source_type_proc = { - .ke = { - .filter = EVFILT_PROC, - .flags = EV_CLEAR|EV_UDATA_SPECIFIC, - }, - .mask = NOTE_EXIT|NOTE_FORK|NOTE_EXEC -#if HAVE_DECL_NOTE_SIGNAL - |NOTE_SIGNAL -#endif -#if HAVE_DECL_NOTE_REAP - |NOTE_REAP -#endif - , - .init = dispatch_source_type_proc_init, -}; - -const struct dispatch_source_type_s _dispatch_source_type_vnode = { - .ke = { - .filter = EVFILT_VNODE, - .flags = EV_CLEAR|EV_VANISHED|EV_UDATA_SPECIFIC, - }, - .mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK| - NOTE_RENAME|NOTE_FUNLOCK -#if HAVE_DECL_NOTE_REVOKE - |NOTE_REVOKE -#endif -#if HAVE_DECL_NOTE_NONE - |NOTE_NONE -#endif - , -}; - -const struct dispatch_source_type_s _dispatch_source_type_vfs = { - .ke = { - .filter = EVFILT_FS, - .flags = EV_CLEAR|EV_UDATA_SPECIFIC, - }, - .mask = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT|VQ_DEAD| - VQ_ASSIST|VQ_NOTRESPLOCK -#if HAVE_DECL_VQ_UPDATE - |VQ_UPDATE -#endif -#if HAVE_DECL_VQ_VERYLOWDISK - |VQ_VERYLOWDISK -#endif -#if HAVE_DECL_VQ_QUOTA - |VQ_QUOTA -#endif - , -}; - -const struct dispatch_source_type_s _dispatch_source_type_sock = { -#ifdef EVFILT_SOCK - .ke = { - .filter = EVFILT_SOCK, - .flags = EV_CLEAR|EV_VANISHED|EV_UDATA_SPECIFIC, - }, - .mask = NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | - NOTE_TIMEOUT | NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | - NOTE_RESUME | NOTE_KEEPALIVE -#ifdef NOTE_ADAPTIVE_WTIMO - | NOTE_ADAPTIVE_WTIMO | NOTE_ADAPTIVE_RTIMO -#endif -#ifdef NOTE_CONNECTED - | NOTE_CONNECTED | NOTE_DISCONNECTED | NOTE_CONNINFO_UPDATED -#endif -#ifdef NOTE_NOTIFY_ACK - | NOTE_NOTIFY_ACK -#endif - , -#endif // EVFILT_SOCK -}; -#endif // !defined(__linux__) - -static void -dispatch_source_type_data_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) -{ - ds->ds_is_installed = true; - ds->ds_is_custom_source = true; - ds->ds_is_direct_kevent = true; - ds->ds_pending_data_mask = ~0ul; - ds->ds_needs_rearm = false; // not registered with kevent -} - -const struct dispatch_source_type_s _dispatch_source_type_data_add = { - .ke = { - .filter = DISPATCH_EVFILT_CUSTOM_ADD, - .flags = EV_UDATA_SPECIFIC, - }, - .init = dispatch_source_type_data_init, -}; - -const struct dispatch_source_type_s _dispatch_source_type_data_or = { - .ke = { - .filter = DISPATCH_EVFILT_CUSTOM_OR, - .flags = EV_CLEAR|EV_UDATA_SPECIFIC, - .fflags = ~0u, - }, - .init = dispatch_source_type_data_init, -}; - -#if HAVE_MACH - -static void -dispatch_source_type_mach_send_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, unsigned long mask, - dispatch_queue_t q DISPATCH_UNUSED) -{ - if (!mask) { - // Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD - ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_SEND_DEAD; - ds->ds_pending_data_mask = DISPATCH_MACH_SEND_DEAD; - } -} - -const struct dispatch_source_type_s _dispatch_source_type_mach_send = { - .ke = { - .filter = DISPATCH_EVFILT_MACH_NOTIFICATION, - .flags = EV_CLEAR, - }, - .mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, - .init = dispatch_source_type_mach_send_init, -}; - -static void -dispatch_source_type_mach_recv_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) -{ - ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE; -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK - if (_dispatch_evfilt_machport_direct_enabled) return; - ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE; - ds->ds_dkev->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); - ds->ds_is_direct_kevent = false; -#endif -} - -const struct dispatch_source_type_s _dispatch_source_type_mach_recv = { - .ke = { - .filter = EVFILT_MACHPORT, - .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, - }, - .init = dispatch_source_type_mach_recv_init, -}; - #pragma mark - #pragma mark dispatch_mig +#if HAVE_MACH void * dispatch_mach_msg_get_context(mach_msg_header_t *msg) diff --git a/src/inline_internal.h b/src/inline_internal.h index d1c73dd4e..a7ccb8260 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -40,6 +40,9 @@ DISPATCH_NOTHROW void _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)); #if HAVE_MACH DISPATCH_NOTHROW void +_dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, dispatch_mach_async_reply_callback_t f); +DISPATCH_NOTHROW void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, dispatch_mach_msg_t dmsg, mach_error_t error, dispatch_mach_handler_function_t f); @@ -62,6 +65,14 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) } #if HAVE_MACH +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, dispatch_mach_async_reply_callback_t f) +{ + return f(ctxt, reason, dmsg); +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, @@ -134,36 +145,24 @@ _dispatch_object_is_barrier(dispatch_object_t dou) DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_object_is_slow_item(dispatch_object_t dou) +_dispatch_object_is_sync_waiter(dispatch_object_t dou) { if (_dispatch_object_has_vtable(dou)) { return false; } - return (dou._dc->dc_flags & DISPATCH_OBJ_SYNC_SLOW_BIT); + return (dou._dc->dc_flags & DISPATCH_OBJ_SYNC_WAITER_BIT); } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_object_is_slow_non_barrier(dispatch_object_t dou) +_dispatch_object_is_sync_waiter_non_barrier(dispatch_object_t dou) { if (_dispatch_object_has_vtable(dou)) { return false; } return ((dou._dc->dc_flags & - (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) == - (DISPATCH_OBJ_SYNC_SLOW_BIT)); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_object_is_slow_barrier(dispatch_object_t dou) -{ - if (_dispatch_object_has_vtable(dou)) { - return false; - } - return ((dou._dc->dc_flags & - (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) == - (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)); + (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_WAITER_BIT)) == + (DISPATCH_OBJ_SYNC_WAITER_BIT)); } DISPATCH_ALWAYS_INLINE @@ -250,48 +249,6 @@ _dispatch_object_set_target_queue_inline(dispatch_object_t dou, #pragma mark dispatch_thread #if DISPATCH_PURE_C -#define DISPATCH_DEFERRED_ITEMS_MAGIC 0xdefe55edul /* deferred */ -#define DISPATCH_DEFERRED_ITEMS_EVENT_COUNT 8 -#ifdef WORKQ_KEVENT_EVENT_BUFFER_LEN -_Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >= - DISPATCH_DEFERRED_ITEMS_EVENT_COUNT, - "our list should not be longer than the kernel's"); -#endif - -typedef struct dispatch_deferred_items_s { - uint32_t ddi_magic; - dispatch_queue_t ddi_stashed_dq; - struct dispatch_object_s *ddi_stashed_dou; - dispatch_priority_t ddi_stashed_pp; - int ddi_nevents; - int ddi_maxevents; - _dispatch_kevent_qos_s ddi_eventlist[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT]; -} dispatch_deferred_items_s, *dispatch_deferred_items_t; - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_deferred_items_set(dispatch_deferred_items_t ddi) -{ - _dispatch_thread_setspecific(dispatch_deferred_items_key, (void *)ddi); -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_deferred_items_t -_dispatch_deferred_items_get(void) -{ - dispatch_deferred_items_t ddi = (dispatch_deferred_items_t) - _dispatch_thread_getspecific(dispatch_deferred_items_key); - if (ddi && ddi->ddi_magic == DISPATCH_DEFERRED_ITEMS_MAGIC) { - return ddi; - } - return NULL; -} - -#endif // DISPATCH_PURE_C -#pragma mark - -#pragma mark dispatch_thread -#if DISPATCH_PURE_C - DISPATCH_ALWAYS_INLINE static inline dispatch_thread_context_t _dispatch_thread_context_find(const void *key) @@ -345,12 +302,12 @@ _dispatch_thread_frame_iterate_next(dispatch_thread_frame_iterator_t it) dispatch_queue_t dq = it->dtfi_queue; if (dtf) { - if (dq->do_targetq) { - // redirections and trysync_f may skip some frames, - // so we need to simulate seeing the missing links - // however the bottom root queue is always present - it->dtfi_queue = dq->do_targetq; - if (it->dtfi_queue == dtf->dtf_queue) { + dispatch_queue_t tq = dq->do_targetq; + if (tq) { + // redirections, dispatch_sync and dispatch_trysync_f may skip + // frames, so we need to simulate seeing the missing links + it->dtfi_queue = tq; + if (dq == dtf->dtf_queue) { it->dtfi_frame = dtf->dtf_prev; } } else { @@ -385,13 +342,6 @@ _dispatch_thread_frame_get_current(void) return _dispatch_thread_getspecific(dispatch_frame_key); } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_thread_frame_set_current(dispatch_thread_frame_t dtf) -{ - _dispatch_thread_setspecific(dispatch_frame_key, dtf); -} - DISPATCH_ALWAYS_INLINE static inline void _dispatch_thread_frame_save_state(dispatch_thread_frame_t dtf) @@ -407,7 +357,6 @@ _dispatch_thread_frame_push(dispatch_thread_frame_t dtf, dispatch_queue_t dq) _dispatch_thread_frame_save_state(dtf); _dispatch_thread_setspecific_pair(dispatch_queue_key, dq, dispatch_frame_key, dtf); - dtf->dtf_deferred = NULL; } DISPATCH_ALWAYS_INLINE @@ -418,7 +367,6 @@ _dispatch_thread_frame_push_and_rebase(dispatch_thread_frame_t dtf, _dispatch_thread_frame_save_state(dtf); _dispatch_thread_setspecific_pair(dispatch_queue_key, dq, dispatch_frame_key, new_base); - dtf->dtf_deferred = NULL; } DISPATCH_ALWAYS_INLINE @@ -450,28 +398,28 @@ _dispatch_thread_frame_unstash(dispatch_thread_frame_t dtf) DISPATCH_ALWAYS_INLINE static inline int _dispatch_wqthread_override_start_check_owner(mach_port_t thread, - pthread_priority_t pp, mach_port_t *ulock_addr) + dispatch_qos_t qos, mach_port_t *ulock_addr) { #if HAVE_PTHREAD_WORKQUEUE_QOS if (!_dispatch_set_qos_class_enabled) return 0; return _pthread_workqueue_override_start_direct_check_owner(thread, - pp, ulock_addr); + _dispatch_qos_to_pp(qos), ulock_addr); #else - (void)thread; (void)pp; (void)ulock_addr; + (void)thread; (void)qos; (void)ulock_addr; return 0; #endif } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_wqthread_override_start(mach_port_t thread, - pthread_priority_t pp) +_dispatch_wqthread_override_start(mach_port_t thread, dispatch_qos_t qos) { #if HAVE_PTHREAD_WORKQUEUE_QOS if (!_dispatch_set_qos_class_enabled) return; - (void)_pthread_workqueue_override_start_direct(thread, pp); + (void)_pthread_workqueue_override_start_direct(thread, + _dispatch_qos_to_pp(qos)); #else - (void)thread; (void)pp; + (void)thread; (void)qos; #endif } @@ -510,43 +458,6 @@ _dispatch_thread_override_end(mach_port_t thread, void *resource) #endif } -#if DISPATCH_DEBUG_QOS && HAVE_PTHREAD_WORKQUEUE_QOS -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_qos_class_is_valid(pthread_priority_t pp) -{ - pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - if (pp > (1UL << (DISPATCH_QUEUE_QOS_COUNT + - _PTHREAD_PRIORITY_QOS_CLASS_SHIFT))) { - return false; - } - return true; -} -#define _dispatch_assert_is_valid_qos_class(pp) ({ typeof(pp) _pp = (pp); \ - if (unlikely(!_dispatch_qos_class_is_valid(_pp))) { \ - DISPATCH_INTERNAL_CRASH(_pp, "Invalid qos class"); \ - } \ - }) - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_qos_override_is_valid(pthread_priority_t pp) -{ - if (pp & (pthread_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK) { - return false; - } - return _dispatch_qos_class_is_valid(pp); -} -#define _dispatch_assert_is_valid_qos_override(pp) ({ typeof(pp) _pp = (pp); \ - if (unlikely(!_dispatch_qos_override_is_valid(_pp))) { \ - DISPATCH_INTERNAL_CRASH(_pp, "Invalid override"); \ - } \ - }) -#else -#define _dispatch_assert_is_valid_qos_override(pp) (void)(pp) -#define _dispatch_assert_is_valid_qos_class(pp) (void)(pp) -#endif - #endif // DISPATCH_PURE_C #pragma mark - #pragma mark dispatch_queue_t state accessors @@ -658,12 +569,9 @@ _dispatch_queue_merge_autorelease_frequency(dispatch_queue_t dq, DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_has_immutable_target(dispatch_queue_t dq) +_dispatch_queue_is_legacy(dispatch_queue_t dq) { - if (dx_metatype(dq) != _DISPATCH_QUEUE_TYPE) { - return false; - } - return dx_type(dq) != DISPATCH_QUEUE_LEGACY_TYPE; + return _dispatch_queue_atomic_flags(dq) & DQF_LEGACY; } #endif // DISPATCH_PURE_C @@ -696,7 +604,7 @@ static inline uint32_t _dq_state_available_width(uint64_t dq_state) { uint32_t full = DISPATCH_QUEUE_WIDTH_FULL; - if (fastpath(!(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT))) { + if (likely(!(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT))) { return full - _dq_state_extract_width_bits(dq_state); } return 0; @@ -723,7 +631,8 @@ _dq_state_is_suspended(uint64_t dq_state) { return dq_state >= DISPATCH_QUEUE_NEEDS_ACTIVATION; } -#define DISPATCH_QUEUE_IS_SUSPENDED(x) _dq_state_is_suspended((x)->dq_state) +#define DISPATCH_QUEUE_IS_SUSPENDED(x) \ + _dq_state_is_suspended(os_atomic_load2o(x, dq_state, relaxed)) DISPATCH_ALWAYS_INLINE static inline bool @@ -776,9 +685,36 @@ _dq_state_is_enqueued(uint64_t dq_state) DISPATCH_ALWAYS_INLINE static inline bool -_dq_state_has_override(uint64_t dq_state) +_dq_state_received_override(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_RECEIVED_OVERRIDE; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dq_state_max_qos(uint64_t dq_state) +{ + dq_state &= DISPATCH_QUEUE_MAX_QOS_MASK; + return (dispatch_qos_t)(dq_state >> DISPATCH_QUEUE_MAX_QOS_SHIFT); +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dq_state_from_qos(dispatch_qos_t qos) { - return dq_state & DISPATCH_QUEUE_HAS_OVERRIDE; + return (uint64_t)(qos) << DISPATCH_QUEUE_MAX_QOS_SHIFT; +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dq_state_merge_qos(uint64_t dq_state, dispatch_qos_t qos) +{ + uint64_t qos_bits = _dq_state_from_qos(qos); + if ((dq_state & DISPATCH_QUEUE_MAX_QOS_MASK) < qos_bits) { + dq_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + dq_state |= qos_bits | DISPATCH_QUEUE_RECEIVED_OVERRIDE; + } + return dq_state; } DISPATCH_ALWAYS_INLINE @@ -849,28 +785,29 @@ _dq_state_should_wakeup(uint64_t dq_state) #pragma mark dispatch_queue_t state machine #ifndef __cplusplus -static inline bool _dispatch_queue_need_override(dispatch_queue_class_t dqu, - pthread_priority_t pp); -static inline bool _dispatch_queue_need_override_retain( - dispatch_queue_class_t dqu, pthread_priority_t pp); -static inline dispatch_priority_t _dispatch_queue_reset_override_priority( - dispatch_queue_class_t dqu, bool qp_is_floor); -static inline bool _dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu, - dispatch_priority_t new_op); -static inline pthread_priority_t _dispatch_get_defaultpriority(void); -static inline void _dispatch_set_defaultpriority_override(void); -static inline void _dispatch_reset_defaultpriority(pthread_priority_t pp); static inline pthread_priority_t _dispatch_get_priority(void); -static inline pthread_priority_t _dispatch_set_defaultpriority( - pthread_priority_t pp, pthread_priority_t *new_pp); +static inline dispatch_priority_t _dispatch_get_basepri(void); +static inline dispatch_qos_t _dispatch_get_basepri_override_qos_floor(void); +static inline void _dispatch_set_basepri_override_qos(dispatch_qos_t qos); +static inline void _dispatch_reset_basepri(dispatch_priority_t dbp); +static inline dispatch_priority_t _dispatch_set_basepri(dispatch_priority_t dbp); + +static inline bool _dispatch_queue_need_override_retain( + dispatch_queue_class_t dqu, dispatch_qos_t qos); DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_xref_dispose(struct dispatch_queue_s *dq) { - if (slowpath(DISPATCH_QUEUE_IS_SUSPENDED(dq))) { - // Arguments for and against this assert are within 6705399 - DISPATCH_CLIENT_CRASH(dq, "Release of a suspended object"); + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(_dq_state_is_suspended(dq_state))) { + long state = (long)dq_state; + if (sizeof(long) < sizeof(uint64_t)) state = (long)(dq_state >> 32); + if (unlikely(_dq_state_is_inactive(dq_state))) { + // Arguments for and against this assert are within 6705399 + DISPATCH_CLIENT_CRASH(state, "Release of an inactive object"); + } + DISPATCH_CLIENT_CRASH(dq_state, "Release of a suspended object"); } os_atomic_or2o(dq, dq_atomic_flags, DQF_RELEASED, relaxed); } @@ -888,13 +825,13 @@ _dispatch_queue_init(dispatch_queue_t dq, dispatch_queue_flags_t dqf, if (inactive) { dq_state += DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION; + dq_state += DLOCK_OWNER_INVALID; dq->do_ref_cnt++; // rdar://8181908 see _dispatch_queue_resume } dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS; - dqf |= (dispatch_queue_flags_t)width << DQF_WIDTH_SHIFT; + dqf |= DQF_WIDTH(width); os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed); dq->dq_state = dq_state; - dq->dq_override_voucher = DISPATCH_NO_VOUCHER; dq->dq_serialnum = os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed); } @@ -909,16 +846,16 @@ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool _dispatch_queue_try_inactive_suspend(dispatch_queue_t dq) { - uint64_t dq_state, value; + uint64_t old_state, new_state; - (void)os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { - if (!fastpath(_dq_state_is_inactive(dq_state))) { + (void)os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + if (unlikely(!_dq_state_is_inactive(old_state))) { os_atomic_rmw_loop_give_up(return false); } - value = dq_state + DISPATCH_QUEUE_SUSPEND_INTERVAL; + new_state = old_state + DISPATCH_QUEUE_SUSPEND_INTERVAL; }); - if (slowpath(!_dq_state_is_suspended(dq_state)) || - slowpath(_dq_state_has_side_suspend_cnt(dq_state))) { + if (unlikely(!_dq_state_is_suspended(old_state) || + _dq_state_has_side_suspend_cnt(old_state))) { // Crashing here means that 128+ dispatch_suspend() calls have been // made on an inactive object and then dispatch_set_target_queue() or // dispatch_set_*_handler() has been called. @@ -932,31 +869,19 @@ _dispatch_queue_try_inactive_suspend(dispatch_queue_t dq) return true; } -/* Must be used by any caller meaning to do a speculative wakeup when the caller - * was preventing other wakeups (for example dispatch_resume() or a drainer not - * doing a drain_try_unlock() and not observing DIRTY) - * - * In that case this call loads DIRTY with an acquire barrier so that when - * other threads have made changes (such as dispatch_source_cancel()) the - * caller can take these state machine changes into account in its decision to - * wake up the object. - */ +#define _dispatch_queue_should_override_self(dq_state, qos) \ + unlikely(qos < _dq_state_max_qos(dq_state)) + DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_try_wakeup(dispatch_queue_t dq, uint64_t dq_state, - dispatch_wakeup_flags_t flags) -{ - if (_dq_state_should_wakeup(dq_state)) { - if (slowpath(_dq_state_is_dirty(dq_state))) { - // - // seq_cst wrt state changes that were flushed and not acted upon - os_atomic_thread_fence(acquire); - } - return dx_wakeup(dq, 0, flags); - } - if (flags & DISPATCH_WAKEUP_CONSUME) { - return _dispatch_release_tailcall(dq); - } +static inline dispatch_qos_t +_dispatch_queue_override_self(uint64_t dq_state) +{ + dispatch_qos_t qos = _dq_state_max_qos(dq_state); + _dispatch_wqthread_override_start(_dispatch_tid_self(), qos); + // ensure that the root queue sees + // that this thread was overridden. + _dispatch_set_basepri_override_qos(qos); + return qos; } /* Used by: @@ -984,14 +909,23 @@ _dispatch_queue_drain_try_lock(dispatch_queue_t dq, clear_enqueued_bit = DISPATCH_QUEUE_ENQUEUED; } + dispatch_qos_t oq_floor = _dispatch_get_basepri_override_qos_floor(); +retry: os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { new_state = old_state; new_state ^= clear_enqueued_bit; if (likely(_dq_state_is_runnable(old_state) && !_dq_state_drain_locked(old_state))) { + if (_dispatch_queue_should_override_self(old_state, oq_floor)) { + os_atomic_rmw_loop_give_up({ + oq_floor = _dispatch_queue_override_self(old_state); + goto retry; + }); + } // - // Only keep the HAS_WAITER bit (and ENQUEUED if stealing). - // In particular acquiring the drain lock clears the DIRTY bit + // Only keep the HAS_WAITER, MAX_QOS and ENQUEUED (if stealing) bits + // In particular acquiring the drain lock clears the DIRTY and + // RECEIVED_OVERRIDE // new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; // @@ -1036,10 +970,10 @@ _dispatch_queue_drain_try_lock(dispatch_queue_t dq, */ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool -_dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq) +_dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq, uint32_t tid) { uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; - value |= _dispatch_tid_self(); + value |= tid; return os_atomic_cmpxchg2o(dq, dq_state, DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width), value, acquire); @@ -1070,15 +1004,23 @@ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool _dispatch_queue_try_reserve_sync_width(dispatch_queue_t dq) { - uint64_t dq_state, value; + uint64_t old_state, new_state; - return os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { - if (!fastpath(_dq_state_is_sync_runnable(dq_state)) || - slowpath(_dq_state_is_dirty(dq_state)) || - slowpath(_dq_state_has_pending_barrier(dq_state))) { + // reserving non barrier width + // doesn't fail if only the ENQUEUED bit is set (unlike its barrier width + // equivalent), so we have to check that this thread hasn't enqueued + // anything ahead of this call or we can break ordering + if (unlikely(dq->dq_items_tail)) { + return false; + } + + return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + if (unlikely(!_dq_state_is_sync_runnable(old_state)) || + _dq_state_is_dirty(old_state) || + _dq_state_has_pending_barrier(old_state)) { os_atomic_rmw_loop_give_up(return false); } - value = dq_state + DISPATCH_QUEUE_WIDTH_INTERVAL; + new_state = old_state + DISPATCH_QUEUE_WIDTH_INTERVAL; }); } @@ -1091,18 +1033,18 @@ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline uint32_t _dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq, uint32_t da_width) { - uint64_t dq_state, value; + uint64_t old_state, new_state; uint32_t width; - (void)os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { - width = _dq_state_available_width(dq_state); - if (!fastpath(width)) { + (void)os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + width = _dq_state_available_width(old_state); + if (unlikely(!width)) { os_atomic_rmw_loop_give_up(return 0); } if (width > da_width) { width = da_width; } - value = dq_state + width * DISPATCH_QUEUE_WIDTH_INTERVAL; + new_state = old_state + width * DISPATCH_QUEUE_WIDTH_INTERVAL; }); return width; } @@ -1128,18 +1070,51 @@ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool _dispatch_queue_try_acquire_async(dispatch_queue_t dq) { - uint64_t dq_state, value; + uint64_t old_state, new_state; - return os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, acquire, { - if (!fastpath(_dq_state_is_runnable(dq_state)) || - slowpath(_dq_state_is_dirty(dq_state)) || - slowpath(_dq_state_has_pending_barrier(dq_state))) { + return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + if (unlikely(!_dq_state_is_runnable(old_state) || + _dq_state_is_dirty(old_state) || + _dq_state_has_pending_barrier(old_state))) { os_atomic_rmw_loop_give_up(return false); } - value = dq_state + DISPATCH_QUEUE_WIDTH_INTERVAL; + new_state = old_state + DISPATCH_QUEUE_WIDTH_INTERVAL; }); } +/* Used by concurrent drain + * + * Either acquires the full barrier width, in which case the Final state is: + * { ib:1 qf:1 pb:0 d:0 } + * Or if there isn't enough width prepare the queue with the PENDING_BARRIER bit + * { ib:0 pb:1 d:0} + * + * This always clears the dirty bit as we know for sure we shouldn't reevaluate + * the state machine here + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_queue_try_upgrade_full_width(dispatch_queue_t dq, uint64_t owned) +{ + uint64_t old_state, new_state; + uint64_t pending_barrier_width = DISPATCH_QUEUE_PENDING_BARRIER + + (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + new_state = old_state - owned; + if (likely(!_dq_state_has_pending_barrier(old_state))) { + new_state += pending_barrier_width; + } + if (likely(_dq_state_is_runnable(new_state))) { + new_state += DISPATCH_QUEUE_WIDTH_INTERVAL; + new_state += DISPATCH_QUEUE_IN_BARRIER; + new_state -= DISPATCH_QUEUE_PENDING_BARRIER; + } + new_state &= ~DISPATCH_QUEUE_DIRTY; + }); + return new_state & DISPATCH_QUEUE_IN_BARRIER; +} + /* Used at the end of Drainers * * This adjusts the `owned` width when the next continuation is already known @@ -1152,7 +1127,7 @@ _dispatch_queue_adjust_owned(dispatch_queue_t dq, uint64_t owned, { uint64_t reservation; - if (slowpath(dq->dq_width > 1)) { + if (unlikely(dq->dq_width > 1)) { if (next_dc && _dispatch_object_is_barrier(next_dc)) { reservation = DISPATCH_QUEUE_PENDING_BARRIER; reservation += (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; @@ -1168,67 +1143,65 @@ _dispatch_queue_adjust_owned(dispatch_queue_t dq, uint64_t owned, * In that case, only the DIRTY bit is cleared. The DIRTY bit is therefore used * as a signal to renew the drain lock instead of releasing it. * - * Successful unlock forces { dl:0, d:0, qo:0 } and gives back `owned` + * Successful unlock forces { dl:0, d:!done, qo:0 } and gives back `owned` */ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool -_dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned) +_dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) { uint64_t old_state = os_atomic_load2o(dq, dq_state, relaxed); uint64_t new_state; - dispatch_priority_t pp = 0, op; - - do { - if (unlikely(_dq_state_is_dirty(old_state) && - !_dq_state_is_suspended(old_state))) { - // just renew the drain lock with an acquire barrier, to see - // what the enqueuer that set DIRTY has done. - os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_DIRTY, acquire); - _dispatch_queue_reinstate_override_priority(dq, pp); - return false; - } + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { new_state = old_state - owned; - if ((new_state & DISPATCH_QUEUE_WIDTH_FULL_BIT) || - _dq_state_is_suspended(old_state)) { - // the test for the WIDTH_FULL_BIT is about narrow concurrent queues - // releasing the drain lock while being at the width limit - // - // _non_barrier_complete() will set the DIRTY bit when going back - // under the limit which will cause the try_unlock to fail - new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state); - } else { - new_state &= ~DISPATCH_QUEUE_DIRTY; + if (unlikely(_dq_state_is_suspended(new_state))) { +#ifdef DLOCK_NOWAITERS_BIT + new_state = new_state | DISPATCH_QUEUE_DRAIN_OWNER_MASK; +#else + new_state = new_state | DLOCK_OWNER_INVALID; +#endif + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (unlikely(_dq_state_is_dirty(old_state))) { + os_atomic_rmw_loop_give_up({ + // just renew the drain lock with an acquire barrier, to see + // what the enqueuer that set DIRTY has done. + // the xor generates better assembly as DISPATCH_QUEUE_DIRTY + // is already in a register + os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + return false; + }); + } else if (likely(done)) { new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - // This current owner is the only one that can clear HAS_OVERRIDE, - // so accumulating reset overrides here is valid. - if (unlikely(_dq_state_has_override(new_state))) { - new_state &= ~DISPATCH_QUEUE_HAS_OVERRIDE; - dispatch_assert(!_dispatch_queue_is_thread_bound(dq)); - op = _dispatch_queue_reset_override_priority(dq, false); - if (op > pp) pp = op; - } + new_state &= ~DISPATCH_QUEUE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + } else { + new_state = DISPATCH_QUEUE_DRAIN_UNLOCK(new_state); + new_state |= DISPATCH_QUEUE_DIRTY; } - } while (!fastpath(os_atomic_cmpxchgvw2o(dq, dq_state, - old_state, new_state, &old_state, release))); + }); - if (_dq_state_has_override(old_state)) { + if (_dq_state_received_override(old_state)) { // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); } return true; } -/* Used at the end of Drainers when the next work item is known +/* Used to transfer the drain lock to a next thread, because it is known * and that the dirty-head check isn't needed. * - * This releases `owned`, clears DIRTY, and handles HAS_OVERRIDE when seen. + * This releases `owned`, clears DIRTY, and handles overrides when seen. */ DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dispatch_queue_drain_lock_transfer_or_unlock(dispatch_queue_t dq, - uint64_t owned, mach_port_t next_owner, uint64_t *orig_state) +static inline void +_dispatch_queue_drain_transfer_lock(dispatch_queue_t dq, + uint64_t owned, dispatch_object_t dou) { - uint64_t dq_state, value; + uint64_t old_state, new_state; + mach_port_t next_owner = 0; + if (dou._dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT) { + next_owner = (mach_port_t)dou._dc->dc_data; + } #ifdef DLOCK_NOWAITERS_BIT // The NOWAITERS_BIT state must not change through the transfer. It means @@ -1241,39 +1214,53 @@ _dispatch_queue_drain_lock_transfer_or_unlock(dispatch_queue_t dq, // clear it so that the second xor leaves the NOWAITERS_BIT alone. next_owner ^= DLOCK_NOWAITERS_BIT; #endif - os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, release, { - value = dq_state - owned; - // same as DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state - owned; + // same as DISPATCH_QUEUE_DRAIN_UNLOCK // but we want to be more efficient wrt the WAITERS_BIT - value &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK; - value &= ~DISPATCH_QUEUE_DRAIN_PENDED; - value &= ~DISPATCH_QUEUE_DIRTY; - value ^= next_owner; + new_state &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK; + new_state &= ~DISPATCH_QUEUE_DRAIN_PENDED; + new_state &= ~DISPATCH_QUEUE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_QUEUE_DIRTY; + new_state ^= next_owner; }); - - if (_dq_state_has_override(dq_state)) { + if (_dq_state_received_override(old_state)) { // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); } - if (orig_state) *orig_state = dq_state; - return value; } -#define _dispatch_queue_drain_unlock(dq, owned, orig) \ - _dispatch_queue_drain_lock_transfer_or_unlock(dq, owned, 0, orig) +/* Used to forcefully unlock the drain lock, bypassing the dirty bit check. + * This usually is followed by a wakeup to re-evaluate the state machine + * of the queue/source. + * + * This releases `owned`, clears DIRTY, and handles overrides when seen. + */ DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_drain_transfer_lock(dispatch_queue_t dq, - uint64_t to_unlock, dispatch_object_t dou) +static inline uint64_t +_dispatch_queue_drain_unlock(dispatch_queue_t dq, uint64_t owned) { - mach_port_t th_next = 0; - if (dou._dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT) { - th_next = (mach_port_t)dou._dc->dc_data; + uint64_t old_state, new_state; + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state - owned; + // same as DISPATCH_QUEUE_DRAIN_UNLOCK + // but we want to be more efficient wrt the WAITERS_BIT +#ifdef DLOCK_NOWAITERS_BIT + new_state ^= DLOCK_NOWAITERS_BIT; +#endif + new_state &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK; + new_state &= ~DISPATCH_QUEUE_DRAIN_PENDED; + new_state &= ~DISPATCH_QUEUE_RECEIVED_OVERRIDE; + }); + + if (_dq_state_received_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); } - _dispatch_queue_drain_lock_transfer_or_unlock(dq, to_unlock, th_next, NULL); + return old_state; } - #pragma mark - #pragma mark os_mpsc_queue @@ -1294,7 +1281,7 @@ _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq, os_mpsc_node_type(q, _ns) _head = (head), _tail = (tail), _prev; \ _tail->_o_next = NULL; \ _prev = os_atomic_xchg2o((q), _ns##_tail, _tail, release); \ - if (fastpath(_prev)) { \ + if (likely(_prev)) { \ os_atomic_store2o(_prev, _o_next, _head, relaxed); \ } \ (_prev == NULL); \ @@ -1314,20 +1301,22 @@ _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq, // Single Consumer calls, can NOT be used safely concurrently // -#define os_mpsc_get_head(q, _ns) ({ \ - os_mpsc_node_type(q, _ns) _head; \ - _dispatch_wait_until(_head = (q)->_ns##_head); \ - _head; \ - }) +#define os_mpsc_get_head(q, _ns) \ + _dispatch_wait_until(os_atomic_load2o(q, _ns##_head, dependency)) + +#define os_mpsc_get_next(_n, _o_next) \ + _dispatch_wait_until(os_atomic_load2o(_n, _o_next, dependency)) #define os_mpsc_pop_head(q, _ns, head, _o_next) ({ \ typeof(q) _q = (q); \ - os_mpsc_node_type(_q, _ns) _head = (head), _n = fastpath(_head->_o_next); \ + os_mpsc_node_type(_q, _ns) _head = (head), _n; \ + _n = os_atomic_load2o(_head, _o_next, dependency); \ os_atomic_store2o(_q, _ns##_head, _n, relaxed); \ /* 22708742: set tail to NULL with release, so that NULL write */ \ /* to head above doesn't clobber head from concurrent enqueuer */ \ - if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release)) { \ - _dispatch_wait_until(_n = fastpath(_head->_o_next)); \ + if (unlikely(!_n && \ + !os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release))) { \ + _n = os_mpsc_get_next(_head, _o_next); \ os_atomic_store2o(_q, _ns##_head, _n, relaxed); \ } \ _n; \ @@ -1336,17 +1325,17 @@ _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq, #define os_mpsc_undo_pop_head(q, _ns, head, next, _o_next) ({ \ typeof(q) _q = (q); \ os_mpsc_node_type(_q, _ns) _head = (head), _n = (next); \ - if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed)) { \ - _dispatch_wait_until(_n = _q->_ns##_head); \ - _head->_o_next = _n; \ + if (unlikely(!_n && \ + !os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed))) { \ + _n = os_mpsc_get_head(q, _ns); \ + os_atomic_store2o(_head, _o_next, _n, relaxed); \ } \ os_atomic_store2o(_q, _ns##_head, _head, relaxed); \ }) #define os_mpsc_capture_snapshot(q, _ns, tail) ({ \ typeof(q) _q = (q); \ - os_mpsc_node_type(_q, _ns) _head; \ - _dispatch_wait_until(_head = _q->_ns##_head); \ + os_mpsc_node_type(_q, _ns) _head = os_mpsc_get_head(q, _ns); \ os_atomic_store2o(_q, _ns##_head, NULL, relaxed); \ /* 22708742: set tail to NULL with release, so that NULL write */ \ /* to head above doesn't clobber head from concurrent enqueuer */ \ @@ -1357,17 +1346,17 @@ _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq, #define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \ os_unqualified_pointer_type(head) _head = (head), _n = NULL; \ if (_head != (tail)) { \ - _dispatch_wait_until(_n = _head->_o_next); \ + _n = os_mpsc_get_next(_head, _o_next); \ }; \ _n; }) #define os_mpsc_prepend(q, _ns, head, tail, _o_next) ({ \ typeof(q) _q = (q); \ os_mpsc_node_type(_q, _ns) _head = (head), _tail = (tail), _n; \ - _tail->_o_next = NULL; \ - if (!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release)) { \ - _dispatch_wait_until(_n = _q->_ns##_head); \ - _tail->_o_next = _n; \ + os_atomic_store2o(_tail, _o_next, NULL, relaxed); \ + if (unlikely(!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release))) { \ + _n = os_mpsc_get_head(q, _ns); \ + os_atomic_store2o(_tail, _o_next, _n, relaxed); \ } \ os_atomic_store2o(_q, _ns##_head, _head, relaxed); \ }) @@ -1377,13 +1366,13 @@ _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq, DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_sidelock_trylock(dispatch_queue_t dq, pthread_priority_t pp) +_dispatch_queue_sidelock_trylock(dispatch_queue_t dq, dispatch_qos_t qos) { dispatch_lock_owner owner; if (_dispatch_unfair_lock_trylock(&dq->dq_sidelock, &owner)) { return true; } - _dispatch_wqthread_override_start_check_owner(owner, pp, + _dispatch_wqthread_override_start_check_owner(owner, qos, &dq->dq_sidelock.dul_lock); return false; } @@ -1403,7 +1392,9 @@ _dispatch_queue_sidelock_tryunlock(dispatch_queue_t dq) return true; } // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); + // Since we don't know which override QoS was used, use MAINTENANCE + // as a marker for _dispatch_reset_basepri_override() + _dispatch_set_basepri_override_qos(DISPATCH_QOS_MAINTENANCE); return false; } @@ -1413,7 +1404,9 @@ _dispatch_queue_sidelock_unlock(dispatch_queue_t dq) { if (_dispatch_unfair_lock_unlock_had_failed_trylock(&dq->dq_sidelock)) { // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); + // Since we don't know which override QoS was used, use MAINTENANCE + // as a marker for _dispatch_reset_basepri_override() + _dispatch_set_basepri_override_qos(DISPATCH_QOS_MAINTENANCE); } } @@ -1473,141 +1466,101 @@ _dispatch_queue_push_update_tail_list(dispatch_queue_t dq, DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_push_update_head(dispatch_queue_t dq, - struct dispatch_object_s *head, bool retained) + struct dispatch_object_s *head) { - if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { - dispatch_assert(!retained); - // Lie about "retained" here, it generates better assembly in this - // hotpath, and _dispatch_root_queue_wakeup knows to ignore this - // fake "WAKEUP_CONSUME" bit when it also sees WAKEUP_FLUSH. - // - // We need to bypass the retain below because pthread root queues - // are not global and retaining them would be wrong. - // - // We should eventually have a typeflag for "POOL" kind of root queues. - retained = true; - } - // The queue must be retained before dq_items_head is written in order - // to ensure that the reference is still valid when _dispatch_queue_wakeup - // is called. Otherwise, if preempted between the assignment to - // dq_items_head and _dispatch_queue_wakeup, the blocks submitted to the - // queue may release the last reference to the queue when invoked by - // _dispatch_queue_drain. - if (!retained) _dispatch_retain(dq); os_mpsc_push_update_head(dq, dq_items, head); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, - dispatch_object_t _tail, pthread_priority_t pp, unsigned int n) +_dispatch_root_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _head, + dispatch_object_t _tail, unsigned int n) { struct dispatch_object_s *head = _head._do, *tail = _tail._do; - bool override = _dispatch_queue_need_override_retain(dq, pp); - dispatch_queue_flags_t flags; - if (slowpath(_dispatch_queue_push_update_tail_list(dq, head, tail))) { - _dispatch_queue_push_update_head(dq, head, override); - if (fastpath(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)) { - return _dispatch_queue_push_list_slow(dq, n); - } - flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH; - } else if (override) { - flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING; - } else { - return; + if (unlikely(_dispatch_queue_push_update_tail_list(dq, head, tail))) { + _dispatch_queue_push_update_head(dq, head); + return _dispatch_global_queue_poke(dq, n); } - dx_wakeup(dq, pp, flags); } DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail, - pthread_priority_t pp, dispatch_wakeup_flags_t flags) + dispatch_qos_t qos) { struct dispatch_object_s *tail = _tail._do; - bool override = _dispatch_queue_need_override(dq, pp); - if (flags & DISPATCH_WAKEUP_SLOW_WAITER) { - // when SLOW_WAITER is set, we borrow the reference of the caller - if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) { - _dispatch_queue_push_update_head(dq, tail, true); - flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_FLUSH; - } else if (override) { - flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_OVERRIDING; - } else { - flags = DISPATCH_WAKEUP_SLOW_WAITER; - } + dispatch_wakeup_flags_t flags = 0; + // If we are going to call dx_wakeup(), the queue must be retained before + // the item we're pushing can be dequeued, which means: + // - before we exchange the tail if we may have to override + // - before we set the head if we made the queue non empty. + // Otherwise, if preempted between one of these and the call to dx_wakeup() + // the blocks submitted to the queue may release the last reference to the + // queue when invoked by _dispatch_queue_drain. + bool overriding = _dispatch_queue_need_override_retain(dq, qos); + if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) { + if (!overriding) _dispatch_retain(dq); + _dispatch_queue_push_update_head(dq, tail); + flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH; + } else if (overriding) { + flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING; } else { - if (override) _dispatch_retain(dq); - if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) { - _dispatch_queue_push_update_head(dq, tail, override); - flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH; - } else if (override) { - flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING; - } else { - return; - } + return; } - return dx_wakeup(dq, pp, flags); + return dx_wakeup(dq, qos, flags); } -struct _dispatch_identity_s { - pthread_priority_t old_pp; -}; - DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_root_queue_identity_assume(struct _dispatch_identity_s *di, - pthread_priority_t pp) +static inline dispatch_priority_t +_dispatch_root_queue_identity_assume(dispatch_queue_t assumed_rq) { - // assumed_rq was set by the caller, we need to fake the priorities - dispatch_queue_t assumed_rq = _dispatch_queue_get_current(); - + dispatch_priority_t old_dbp = _dispatch_get_basepri(); dispatch_assert(dx_type(assumed_rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); + _dispatch_reset_basepri(assumed_rq->dq_priority); + _dispatch_queue_set_current(assumed_rq); + return old_dbp; +} - di->old_pp = _dispatch_get_defaultpriority(); - - if (!(assumed_rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) { - if (!pp) { - pp = _dispatch_get_priority(); - // _dispatch_root_queue_drain_deferred_item() may turn a manager - // thread into a regular root queue, and we must never try to - // restore the manager flag once we became a regular work queue - // thread. - pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; - } - if ((pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) > - (assumed_rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - _dispatch_wqthread_override_start(_dispatch_tid_self(), pp); - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); - } - } - _dispatch_reset_defaultpriority(assumed_rq->dq_priority); +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_root_queue_allows_wlh_for_queue(dispatch_queue_t rq, + dispatch_queue_class_t dqu) +{ + // This will discard: + // - queues already tagged with the global wlh + // - concurrent queues (width != 1) + // - non overcommit queues, which includes pthread root queues. + return dqu._dq->dq_wlh != DISPATCH_WLH_GLOBAL && dqu._dq->dq_width == 1 && + (rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_root_queue_identity_restore(struct _dispatch_identity_s *di) +static inline dispatch_wlh_t +_dispatch_root_queue_wlh_for_queue(dispatch_queue_t rq, + dispatch_queue_class_t dqu) { - _dispatch_reset_defaultpriority(di->old_pp); + if (likely(_dispatch_root_queue_allows_wlh_for_queue(rq, dqu))) { + return (dispatch_wlh_t)dqu._dq; + } + return DISPATCH_WLH_GLOBAL; } -typedef dispatch_queue_t +typedef dispatch_queue_wakeup_target_t _dispatch_queue_class_invoke_handler_t(dispatch_object_t, - dispatch_invoke_flags_t, uint64_t *owned, struct dispatch_object_s **); + dispatch_invoke_context_t dic, dispatch_invoke_flags_t, + uint64_t *owned); DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_class_invoke(dispatch_object_t dou, - dispatch_invoke_flags_t flags, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, _dispatch_queue_class_invoke_handler_t invoke) { dispatch_queue_t dq = dou._dq; - struct dispatch_object_s *dc = NULL; - dispatch_queue_t tq = NULL; + dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE; uint64_t dq_state, to_unlock = 0; - bool owning = !slowpath(flags & DISPATCH_INVOKE_STEALING); - bool overriding = slowpath(flags & DISPATCH_INVOKE_OVERRIDING); + bool owning = !(flags & DISPATCH_INVOKE_STEALING); + bool overriding = (flags & DISPATCH_INVOKE_OVERRIDING); // When called from a plain _dispatch_queue_drain: // overriding = false @@ -1618,37 +1571,44 @@ _dispatch_queue_class_invoke(dispatch_object_t dou, // owning depends on whether the override embedded the queue or steals DISPATCH_COMPILER_CAN_ASSUME(owning || overriding); - if (owning) { + if (likely(owning)) { dq->do_next = DISPATCH_OBJECT_LISTLESS; } to_unlock = _dispatch_queue_drain_try_lock(dq, flags, &dq_state); if (likely(to_unlock)) { - struct _dispatch_identity_s di; - pthread_priority_t old_dp; - -drain_pending_barrier: - if (overriding) { - _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx", - _dispatch_tid_self(), _dispatch_get_defaultpriority()); - _dispatch_root_queue_identity_assume(&di, 0); - } - + dispatch_priority_t old_dbp; if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) { - pthread_priority_t op, dp; - - old_dp = _dispatch_set_defaultpriority(dq->dq_priority, &dp); - op = dq->dq_override; - if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - _dispatch_wqthread_override_start(_dispatch_tid_self(), op); - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); + if (unlikely(overriding)) { + _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%x", + _dispatch_tid_self(), _dispatch_get_basepri()); + } + old_dbp = _dispatch_set_basepri(dq->dq_priority); + dispatch_wlh_t wlh = _dispatch_get_wlh(); + if (unlikely(dq->dq_wlh != wlh)) { + if (unlikely(dq->dq_wlh)) { + _dispatch_ktrace3(DISPATCH_PERF_wlh_change, dq, + dq->dq_wlh, wlh); + if (!(_dispatch_queue_atomic_flags_set_orig(dq, + DQF_WLH_CHANGED) & DQF_WLH_CHANGED)) { + _dispatch_bug_deprecated("Changing target queue " + "hierarchy after object has started executing"); + } + } + dq->dq_wlh = wlh; +#if DISPATCH_ENFORCE_STATIC_WLH_HIERARCHY + _dispatch_queue_atomic_flags_clear(dq, DQF_LEGACY); +#endif } + } else { + old_dbp = 0; } flags = _dispatch_queue_merge_autorelease_frequency(dq, flags); attempt_running_slow_head: - tq = invoke(dq, flags, &to_unlock, &dc); - if (slowpath(tq)) { + tq = invoke(dq, dic, flags, &to_unlock); + dispatch_assert(tq != DISPATCH_QUEUE_WAKEUP_TARGET); + if (unlikely(tq != DISPATCH_QUEUE_WAKEUP_NONE && + tq != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT)) { // Either dc is set, which is a deferred invoke case // // or only tq is and it means a reenqueue is required, because of: @@ -1657,74 +1617,46 @@ _dispatch_queue_class_invoke(dispatch_object_t dou, // In both cases, we want to bypass the check for DIRTY. // That may cause us to leave DIRTY in place but all drain lock // acquirers clear it - } else { - if (!_dispatch_queue_drain_try_unlock(dq, to_unlock)) { + } else if (!_dispatch_queue_drain_try_unlock(dq, to_unlock, + tq == DISPATCH_QUEUE_WAKEUP_NONE)) { + tq = _dispatch_queue_get_current(); + if (dx_hastypeflag(tq, QUEUE_ROOT) || !owning) { goto attempt_running_slow_head; } + } else { to_unlock = 0; - } - if (overriding) { - _dispatch_root_queue_identity_restore(&di); + tq = NULL; } if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) { - _dispatch_reset_defaultpriority(old_dp); - } - } else if (overriding) { - uint32_t owner = _dq_state_drain_owner(dq_state); - pthread_priority_t p = dq->dq_override; - if (owner && p) { - _dispatch_object_debug(dq, "overriding thr 0x%x to priority 0x%lx", - owner, p); - _dispatch_wqthread_override_start_check_owner(owner, p, - &dq->dq_state_lock); + _dispatch_reset_basepri(old_dbp); } } - - if (owning) { + if (likely(owning)) { _dispatch_introspection_queue_item_complete(dq); } - if (tq && dc) { - return _dispatch_queue_drain_deferred_invoke(dq, flags, to_unlock, dc); + if (tq && dic->dic_deferred) { + return _dispatch_queue_drain_deferred_invoke(dq, dic, flags, to_unlock); } if (tq) { - bool full_width_upgrade_allowed = (tq == _dispatch_queue_get_current()); uint64_t old_state, new_state; os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = old_state - to_unlock; - if (full_width_upgrade_allowed && _dq_state_is_runnable(new_state) && - _dq_state_has_pending_barrier(new_state)) { - new_state += DISPATCH_QUEUE_IN_BARRIER; - new_state += DISPATCH_QUEUE_WIDTH_INTERVAL; - new_state -= DISPATCH_QUEUE_PENDING_BARRIER; - new_state += to_unlock & DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; - } else { - new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state); - if (_dq_state_should_wakeup(new_state)) { - // drain was not interupted for suspension - // we will reenqueue right away, just put ENQUEUED back - new_state |= DISPATCH_QUEUE_ENQUEUED; - new_state |= DISPATCH_QUEUE_DIRTY; - } + new_state = DISPATCH_QUEUE_DRAIN_UNLOCK(old_state - to_unlock); + new_state |= DISPATCH_QUEUE_DIRTY; + if (_dq_state_should_wakeup(new_state)) { + // drain was not interupted for suspension + // we will reenqueue right away, just put ENQUEUED back + new_state |= DISPATCH_QUEUE_ENQUEUED; } }); - if (_dq_state_is_in_barrier(new_state)) { - // we did a "full width upgrade" and just added IN_BARRIER - // so adjust what we own and drain again - to_unlock &= DISPATCH_QUEUE_ENQUEUED; - to_unlock += DISPATCH_QUEUE_IN_BARRIER; - to_unlock += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - goto drain_pending_barrier; - } - if (_dq_state_has_override(old_state)) { + if (_dq_state_received_override(old_state)) { // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); } - if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { - return _dispatch_queue_push(tq, dq, 0); + return dx_push(tq, dq, _dq_state_max_qos(old_state)); } } @@ -1739,7 +1671,7 @@ _dispatch_queue_class_probe(dispatch_queue_class_t dqu) // seq_cst wrt atomic store to dq_state // seq_cst wrt atomic store to dq_flags tail = os_atomic_load2o(dqu._oq, oq_items_tail, ordered); - return slowpath(tail != NULL); + return unlikely(tail != NULL); } DISPATCH_ALWAYS_INLINE DISPATCH_CONST @@ -1752,87 +1684,12 @@ _dispatch_is_in_root_queues_array(dispatch_queue_t dq) DISPATCH_ALWAYS_INLINE DISPATCH_CONST static inline dispatch_queue_t -_dispatch_get_root_queue(qos_class_t priority, bool overcommit) -{ - if (overcommit) switch (priority) { - case _DISPATCH_QOS_CLASS_MAINTENANCE: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT]; - case _DISPATCH_QOS_CLASS_BACKGROUND: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT]; - case _DISPATCH_QOS_CLASS_UTILITY: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT]; - case _DISPATCH_QOS_CLASS_DEFAULT: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT]; - case _DISPATCH_QOS_CLASS_USER_INITIATED: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT]; - case _DISPATCH_QOS_CLASS_USER_INTERACTIVE: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT]; - } else switch (priority) { - case _DISPATCH_QOS_CLASS_MAINTENANCE: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS]; - case _DISPATCH_QOS_CLASS_BACKGROUND: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS]; - case _DISPATCH_QOS_CLASS_UTILITY: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS]; - case _DISPATCH_QOS_CLASS_DEFAULT: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS]; - case _DISPATCH_QOS_CLASS_USER_INITIATED: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS]; - case _DISPATCH_QOS_CLASS_USER_INTERACTIVE: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS]; - } - return NULL; -} - -#if HAVE_PTHREAD_WORKQUEUE_QOS -DISPATCH_ALWAYS_INLINE DISPATCH_CONST -static inline dispatch_queue_t -_dispatch_get_root_queue_for_priority(pthread_priority_t pp, bool overcommit) +_dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit) { - uint32_t idx; - - pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - idx = (uint32_t)__builtin_ffs((int)pp); - if (unlikely(!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] - .dq_priority)) { - // If kernel doesn't support maintenance, bottom bit is background. - // Shift to our idea of where background bit is. - idx++; - } - // ffs starts at 1, and account for the QOS_CLASS_SHIFT - // if pp is 0, idx is 0 or 1 and this will wrap to a value larger than - // DISPATCH_QOS_COUNT - idx -= (_PTHREAD_PRIORITY_QOS_CLASS_SHIFT + 1); - if (unlikely(idx >= DISPATCH_QUEUE_QOS_COUNT)) { - DISPATCH_CLIENT_CRASH(pp, "Corrupted priority"); + if (unlikely(qos == DISPATCH_QOS_UNSPECIFIED || qos > DISPATCH_QOS_MAX)) { + DISPATCH_CLIENT_CRASH(qos, "Corrupted priority"); } - return &_dispatch_root_queues[2 * idx + overcommit]; -} -#endif - -DISPATCH_ALWAYS_INLINE DISPATCH_CONST -static inline dispatch_queue_t -_dispatch_get_root_queue_with_overcommit(dispatch_queue_t rq, bool overcommit) -{ - bool rq_overcommit = (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); - // root queues in _dispatch_root_queues are not overcommit for even indices - // and overcommit for odd ones, so fixing overcommit is either returning - // the same queue, or picking its neighbour in _dispatch_root_queues - if (overcommit && !rq_overcommit) { - return rq + 1; - } - if (!overcommit && rq_overcommit) { - return rq - 1; - } - return rq; + return &_dispatch_root_queues[2 * (qos - 1) + overcommit]; } DISPATCH_ALWAYS_INLINE @@ -1852,11 +1709,11 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_clear_bound_thread(dispatch_queue_t dq) { - uint64_t dq_state, value; + uint64_t old_state, new_state; dispatch_assert(_dispatch_queue_is_thread_bound(dq)); - os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { - value = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(dq_state); + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = DISPATCH_QUEUE_DRAIN_UNLOCK(old_state); }); } @@ -1881,13 +1738,12 @@ _dispatch_set_pthread_root_queue_observer_hooks( #pragma mark dispatch_priority DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_get_defaultpriority(void) +static inline dispatch_priority_t +_dispatch_get_basepri(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t pp = (uintptr_t)_dispatch_thread_getspecific( - dispatch_defaultpriority_key); - return pp; + return (dispatch_priority_t)(uintptr_t)_dispatch_thread_getspecific( + dispatch_basepri_key); #else return 0; #endif @@ -1895,97 +1751,90 @@ _dispatch_get_defaultpriority(void) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_reset_defaultpriority(pthread_priority_t pp) +_dispatch_reset_basepri(dispatch_priority_t dbp) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t old_pp = _dispatch_get_defaultpriority(); + dispatch_priority_t old_dbp = _dispatch_get_basepri(); // If an inner-loop or'd in the override flag to the per-thread priority, // it needs to be propagated up the chain. - pp |= old_pp & _PTHREAD_PRIORITY_OVERRIDE_FLAG; - _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); + dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK; + dbp |= (old_dbp & DISPATCH_PRIORITY_OVERRIDE_MASK); + _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp); #else - (void)pp; + (void)dbp; #endif } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_set_defaultpriority_override(void) +static inline dispatch_qos_t +_dispatch_get_basepri_override_qos_floor(void) { -#if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t old_pp = _dispatch_get_defaultpriority(); - pthread_priority_t pp = old_pp | _PTHREAD_PRIORITY_OVERRIDE_FLAG; - - _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); -#endif + dispatch_priority_t dbp = _dispatch_get_basepri(); + dispatch_qos_t qos = _dispatch_priority_qos(dbp); + dispatch_qos_t oqos = _dispatch_priority_override_qos(dbp); + dispatch_qos_t qos_floor = MAX(qos, oqos); + return qos_floor ? qos_floor : DISPATCH_QOS_SATURATED; } DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_reset_defaultpriority_override(void) +static inline void +_dispatch_set_basepri_override_qos(dispatch_qos_t qos) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t old_pp = _dispatch_get_defaultpriority(); - pthread_priority_t pp = old_pp & - ~((pthread_priority_t)_PTHREAD_PRIORITY_OVERRIDE_FLAG); - - _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); - return unlikely(pp != old_pp); + dispatch_priority_t dbp = _dispatch_get_basepri(); + if (_dispatch_priority_override_qos(dbp) >= qos) return; + dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK; + dbp |= qos << DISPATCH_PRIORITY_OVERRIDE_SHIFT; + _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp); +#else + (void)qos; #endif - return false; } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq, - dispatch_queue_t tq) +static inline bool +_dispatch_reset_basepri_override(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - const dispatch_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG; - const dispatch_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG; - const dispatch_priority_t defaultqueue_flag = - _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; - dispatch_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority; - if ((!(dqp & ~_PTHREAD_PRIORITY_FLAGS_MASK) || (dqp & inherited_flag)) && - (tqp & rootqueue_flag)) { - if (tqp & defaultqueue_flag) { - dq->dq_priority = 0; - } else { - dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag; - } + dispatch_priority_t dbp = _dispatch_get_basepri(); + dispatch_qos_t oqos = _dispatch_priority_override_qos(dbp); + if (oqos) { + dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK; + _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp); + return oqos != DISPATCH_QOS_SATURATED; } -#else - (void)dq; (void)tq; #endif + return false; } DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_set_defaultpriority(pthread_priority_t pp, pthread_priority_t *new_pp) +static inline dispatch_priority_t +_dispatch_set_basepri(dispatch_priority_t dbp) { #if HAVE_PTHREAD_WORKQUEUE_QOS - const pthread_priority_t default_priority_preserved_flags = - _PTHREAD_PRIORITY_OVERRIDE_FLAG|_PTHREAD_PRIORITY_OVERCOMMIT_FLAG; - pthread_priority_t old_pp = _dispatch_get_defaultpriority(); - if (old_pp) { - pthread_priority_t flags, defaultqueue, basepri; - flags = (pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); - defaultqueue = (old_pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); - basepri = (old_pp & ~_PTHREAD_PRIORITY_FLAGS_MASK); - pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (!pp) { - flags = _PTHREAD_PRIORITY_INHERIT_FLAG | defaultqueue; - pp = basepri; - } else if (pp < basepri && !defaultqueue) { // rdar://16349734 - pp = basepri; + const dispatch_priority_t preserved_mask = + DISPATCH_PRIORITY_OVERRIDE_MASK | DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + dispatch_priority_t old_dbp = _dispatch_get_basepri(); + if (old_dbp) { + dispatch_priority_t flags, defaultqueue, basepri; + flags = (dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE); + defaultqueue = (old_dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE); + basepri = old_dbp & DISPATCH_PRIORITY_REQUESTED_MASK; + dbp &= DISPATCH_PRIORITY_REQUESTED_MASK; + if (!dbp) { + flags = DISPATCH_PRIORITY_FLAG_INHERIT | defaultqueue; + dbp = basepri; + } else if (dbp < basepri && !defaultqueue) { // rdar://16349734 + dbp = basepri; } - pp |= flags | (old_pp & default_priority_preserved_flags); + dbp |= flags | (old_dbp & preserved_mask); + } else { + dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK; } - _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); - if (new_pp) *new_pp = pp; - return old_pp; + _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp); + return old_dbp; #else - (void)pp; (void)new_pp; + (void)dbp; return 0; #endif } @@ -1995,25 +1844,24 @@ static inline pthread_priority_t _dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t defaultpri = _dispatch_get_defaultpriority(); - bool enforce, inherited, defaultqueue; - enforce = (flags & DISPATCH_PRIORITY_ENFORCE) || + dispatch_priority_t inherited, defaultqueue, dbp = _dispatch_get_basepri(); + pthread_priority_t basepp = _dispatch_priority_to_pp_strip_flags(dbp); + bool enforce = (flags & DISPATCH_PRIORITY_ENFORCE) || (pp & _PTHREAD_PRIORITY_ENFORCE_FLAG); - inherited = (defaultpri & _PTHREAD_PRIORITY_INHERIT_FLAG); - defaultqueue = (defaultpri & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); - defaultpri &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + inherited = (dbp & DISPATCH_PRIORITY_FLAG_INHERIT); + defaultqueue = (dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE); pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; if (!pp) { - return defaultpri; + return basepp; } else if (defaultqueue) { // rdar://16349734 return pp; - } else if (pp < defaultpri) { - return defaultpri; + } else if (pp < basepp) { + return basepp; } else if (enforce || inherited) { return pp; } else { - return defaultpri; + return basepp; } #else (void)pp; (void)flags; @@ -2022,22 +1870,47 @@ _dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags) } DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_priority_inherit_from_root_queue(pthread_priority_t pp, +static inline void +_dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq, + dispatch_queue_t tq) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + const dispatch_priority_t rootqueue_flag = DISPATCH_PRIORITY_FLAG_ROOTQUEUE; + const dispatch_priority_t inherited_flag = DISPATCH_PRIORITY_FLAG_INHERIT; + const dispatch_priority_t defaultqueue_flag = + DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE; + dispatch_priority_t pri = dq->dq_priority, tpri = tq->dq_priority; + + if ((!_dispatch_priority_qos(pri) || (pri & inherited_flag)) && + (tpri & rootqueue_flag)) { + if (tpri & defaultqueue_flag) { + dq->dq_priority = 0; + } else { + dq->dq_priority = (tpri & ~rootqueue_flag) | inherited_flag; + } + } +#else + (void)dq; (void)tq; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_priority_t +_dispatch_priority_inherit_from_root_queue(dispatch_priority_t pri, dispatch_queue_t rq) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t p = pp & ~_PTHREAD_PRIORITY_FLAGS_MASK; - pthread_priority_t rqp = rq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; - pthread_priority_t defaultqueue = - rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + dispatch_priority_t p = pri & DISPATCH_PRIORITY_REQUESTED_MASK; + dispatch_priority_t rqp = rq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK; + dispatch_priority_t defaultqueue = + rq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE; if (!p || (!defaultqueue && p < rqp)) { p = rqp | defaultqueue; } - return p | (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + return p | (rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); #else - (void)rq; (void)pp; + (void)rq; (void)pri; return 0; #endif } @@ -2075,7 +1948,7 @@ _dispatch_priority_compute_update(pthread_priority_t pp) pthread_priority_t overcommit = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; if (unlikely(cur_priority & unbind)) { // else we always need an update if the NEEDS_UNBIND flag is set - // the slowpath in _dispatch_set_priority_and_voucher_slow() will + // the slow path in _dispatch_set_priority_and_voucher_slow() will // adjust the priority further with the proper overcommitness return pp ? pp : (cur_priority & ~unbind); } else { @@ -2089,7 +1962,7 @@ _dispatch_priority_compute_update(pthread_priority_t pp) DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline voucher_t _dispatch_set_priority_and_voucher(pthread_priority_t pp, - voucher_t v, _dispatch_thread_set_self_t flags) + voucher_t v, dispatch_thread_set_self_t flags) { #if HAVE_PTHREAD_WORKQUEUE_QOS pp = _dispatch_priority_compute_update(pp); @@ -2118,7 +1991,7 @@ _dispatch_set_priority_and_voucher(pthread_priority_t pp, DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline voucher_t _dispatch_adopt_priority_and_set_voucher(pthread_priority_t pp, - voucher_t v, _dispatch_thread_set_self_t flags) + voucher_t v, dispatch_thread_set_self_t flags) { pthread_priority_t p = 0; if (pp != DISPATCH_NO_PRIORITY) { @@ -2138,7 +2011,7 @@ _dispatch_reset_priority_and_voucher(pthread_priority_t pp, voucher_t v) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_reset_voucher(voucher_t v, _dispatch_thread_set_self_t flags) +_dispatch_reset_voucher(voucher_t v, dispatch_thread_set_self_t flags) { flags |= DISPATCH_VOUCHER_CONSUME | DISPATCH_VOUCHER_REPLACE; (void)_dispatch_set_priority_and_voucher(0, v, flags); @@ -2146,28 +2019,23 @@ _dispatch_reset_voucher(voucher_t v, _dispatch_thread_set_self_t flags) DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_need_override(dispatch_queue_class_t dqu, pthread_priority_t pp) +_dispatch_queue_need_override(dispatch_queue_class_t dqu, dispatch_qos_t qos) { - // global queues have their override set to DISPATCH_SATURATED_OVERRIDE - // which makes this test always return false for them. - return dqu._oq->oq_override < (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_received_override(dispatch_queue_class_t dqu, - pthread_priority_t pp) -{ - dispatch_assert(dqu._oq->oq_override != DISPATCH_SATURATED_OVERRIDE); - return dqu._oq->oq_override > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); + uint64_t dq_state = os_atomic_load2o(dqu._dq, dq_state, relaxed); + // dq_priority "override qos" contains the priority at which the queue + // is already running for thread-bound queues. + // For non thread-bound queues, the qos of the queue may not be observed + // when the first work item is dispatched synchronously. + return _dq_state_max_qos(dq_state) < qos && + _dispatch_priority_override_qos(dqu._dq->dq_priority) < qos; } DISPATCH_ALWAYS_INLINE static inline bool _dispatch_queue_need_override_retain(dispatch_queue_class_t dqu, - pthread_priority_t pp) + dispatch_qos_t qos) { - if (_dispatch_queue_need_override(dqu, pp)) { + if (_dispatch_queue_need_override(dqu, qos)) { _os_object_retain_internal_inline(dqu._oq->_as_os_obj); return true; } @@ -2175,76 +2043,30 @@ _dispatch_queue_need_override_retain(dispatch_queue_class_t dqu, } DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu, - dispatch_priority_t new_op) -{ - dispatch_priority_t old_op; - new_op &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - if (!new_op) return false; - os_atomic_rmw_loop2o(dqu._oq, oq_override, old_op, new_op, relaxed, { - if (new_op <= old_op) { - os_atomic_rmw_loop_give_up(return false); - } - }); - return true; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_override_priority(dispatch_queue_class_t dqu, - pthread_priority_t *pp, dispatch_wakeup_flags_t *flags) -{ - os_mpsc_queue_t oq = dqu._oq; - dispatch_priority_t qp = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - dispatch_priority_t np = (*pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); - dispatch_priority_t o; - - _dispatch_assert_is_valid_qos_override(np); - if (oq->oq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG) { - qp = 0; - } else if (*flags & DISPATCH_WAKEUP_SLOW_WAITER) { - // when a queue is used as a lock its priority doesn't count - } else if (np < qp) { - // for asynchronous workitems, queue priority is the floor for overrides - np = qp; - } - *flags &= ~_DISPATCH_WAKEUP_OVERRIDE_BITS; - - // this optimizes for the case when no update of the override is required - // os_atomic_rmw_loop2o optimizes for the case when the update happens, - // and can't be used. - o = os_atomic_load2o(oq, oq_override, relaxed); - do { - if (likely(np <= o)) break; - } while (unlikely(!os_atomic_cmpxchgvw2o(oq, oq_override, o, np, &o, relaxed))); - - if (np <= o) { - *pp = o; - } else { - *flags |= DISPATCH_WAKEUP_OVERRIDING; - *pp = np; - } - if (o > qp) { - *flags |= DISPATCH_WAKEUP_WAS_OVERRIDDEN; +static inline dispatch_qos_t +_dispatch_queue_override_qos(dispatch_queue_class_t dqu, dispatch_qos_t qos) +{ + if (dqu._oq->oq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE) { + return qos; } + // for asynchronous workitems, queue priority is the floor for overrides + return MAX(qos, _dispatch_priority_qos(dqu._oq->oq_priority)); } DISPATCH_ALWAYS_INLINE -static inline dispatch_priority_t -_dispatch_queue_reset_override_priority(dispatch_queue_class_t dqu, - bool qp_is_floor) +static inline dispatch_qos_t +_dispatch_queue_reset_max_qos(dispatch_queue_class_t dqu) { - os_mpsc_queue_t oq = dqu._oq; - dispatch_priority_t p = 0; - if (qp_is_floor) { - // thread bound queues floor their dq_override to their - // priority to avoid receiving useless overrides - p = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - } - dispatch_priority_t o = os_atomic_xchg2o(oq, oq_override, p, relaxed); - dispatch_assert(o != DISPATCH_SATURATED_OVERRIDE); - return (o > p) ? o : 0; + uint64_t old_state, new_state; + os_atomic_rmw_loop2o(dqu._dq, dq_state, old_state, new_state, relaxed, { + new_state = old_state; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + new_state &= ~DISPATCH_QUEUE_RECEIVED_OVERRIDE; + if (old_state == new_state) { + os_atomic_rmw_loop_give_up(return DISPATCH_QOS_UNSPECIFIED); + } + }); + return _dq_state_max_qos(old_state); } DISPATCH_ALWAYS_INLINE @@ -2254,9 +2076,9 @@ _dispatch_priority_propagate(void) #if HAVE_PTHREAD_WORKQUEUE_QOS pthread_priority_t pp = _dispatch_get_priority(); pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (pp > _dispatch_user_initiated_priority) { + if (pp > _dispatch_qos_to_pp(DISPATCH_QOS_USER_INITIATED)) { // Cap QOS for propagation at user-initiated - pp = _dispatch_user_initiated_priority; + return _dispatch_qos_to_pp(DISPATCH_QOS_USER_INITIATED); } return pp; #else @@ -2271,13 +2093,72 @@ _dispatch_is_background_thread(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS pthread_priority_t pp = _dispatch_get_priority(); - pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - return pp && (pp <= _dispatch_background_priority); + return _dispatch_qos_is_background(_dispatch_qos_from_pp(pp)); #else return false; #endif } +#pragma mark - +#pragma mark dispatch_wlh_t + +static inline dispatch_wlh_t +_dispatch_queue_class_compute_wlh(dispatch_queue_class_t dqu) +{ + // TODO: combine with _dispatch_source_compute_kevent_priority + dispatch_queue_t dq = dqu._dq; + dispatch_queue_t tq = dq->do_targetq; + + while (unlikely(!dx_hastypeflag(tq, QUEUE_ROOT))) { + if (tq->dq_wlh) { + return tq->dq_wlh; + } + dispatch_assert(!_dispatch_queue_is_thread_bound(tq)); + if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(tq))) { + // this queue may not be activated yet, so the queue graph may not + // have stabilized yet + return NULL; + } + if (unlikely(_dispatch_queue_is_legacy(tq))) { + if (!_dispatch_is_in_root_queues_array(tq->do_targetq)) { + // we're not allowed to dereference tq->do_targetq + return NULL; + } + } + dq = tq; + tq = dq->do_targetq; + } + dispatch_assert(tq->dq_wlh); + return _dispatch_root_queue_wlh_for_queue(tq, dq); +} + +static inline void +_dispatch_queue_class_record_wlh_hierarchy(dispatch_queue_class_t dqu, + dispatch_wlh_t wlh) +{ + dispatch_queue_t dq = dqu._dq; + dispatch_queue_t tq = dq->do_targetq; + + dispatch_assert(wlh); + dispatch_assert(!dq->dq_wlh); + dq->dq_wlh = wlh; +#if DISPATCH_ENFORCE_STATIC_WLH_HIERARCHY + _dispatch_queue_atomic_flags_clear(dq, DQF_LEGACY); +#endif + while (unlikely(!dx_hastypeflag(tq, QUEUE_ROOT))) { + if (tq->dq_wlh) { + return; + } + tq->dq_wlh = wlh; +#if DISPATCH_ENFORCE_STATIC_WLH_HIERARCHY + _dispatch_queue_atomic_flags_set_and_clear(tq, DQF_TARGETED,DQF_LEGACY); +#else + _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); +#endif + tq = tq->do_targetq; + } +} + #pragma mark - #pragma mark dispatch_block_t @@ -2291,16 +2172,15 @@ _dispatch_block_has_private_data(const dispatch_block_t block) return (_dispatch_Block_invoke(block) == _dispatch_block_special_invoke); } -DISPATCH_ALWAYS_INLINE +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool -_dispatch_block_sync_should_enforce_qos_class(dispatch_block_flags_t flags) +_dispatch_block_invoke_should_set_priority(dispatch_block_flags_t flags) { - /* - * Generates better assembly than the actual readable test: - * (flags & ENFORCE_QOS_CLASS) || !(flags & INHERIT_QOS_FLAGS) - */ - flags &= DISPATCH_BLOCK_ENFORCE_QOS_CLASS | DISPATCH_BLOCK_INHERIT_QOS_CLASS; - return flags != DISPATCH_BLOCK_INHERIT_QOS_CLASS; + if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { + return (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) || + !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS); + } + return false; } DISPATCH_ALWAYS_INLINE @@ -2442,12 +2322,14 @@ _dispatch_continuation_invoke_inline(dispatch_object_t dou, voucher_t ov, _dispatch_continuation_free_to_cache_limit(dc1); } }); + _dispatch_perfmon_workitem_inc(); } DISPATCH_ALWAYS_INLINE_NDEBUG static inline void -_dispatch_continuation_pop_inline(dispatch_object_t dou, dispatch_queue_t dq, - dispatch_invoke_flags_t flags) +_dispatch_continuation_pop_inline(dispatch_object_t dou, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + dispatch_queue_t dq) { dispatch_pthread_root_queue_observer_hooks_t observer_hooks = _dispatch_get_pthread_root_queue_observer_hooks(); @@ -2455,10 +2337,9 @@ _dispatch_continuation_pop_inline(dispatch_object_t dou, dispatch_queue_t dq, _dispatch_trace_continuation_pop(dq, dou); flags &= _DISPATCH_INVOKE_PROPAGATE_MASK; if (_dispatch_object_has_vtable(dou)) { - dx_invoke(dou._do, flags); + dx_invoke(dou._do, dic, flags); } else { - voucher_t ov = dq->dq_override_voucher; - _dispatch_continuation_invoke_inline(dou, ov, flags); + _dispatch_continuation_invoke_inline(dou, DISPATCH_NO_VOUCHER, flags); } if (observer_hooks) observer_hooks->queue_did_execute(dq); } @@ -2501,21 +2382,21 @@ _dispatch_continuation_priority_set(dispatch_continuation_t dc, } DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_continuation_get_override_priority(dispatch_queue_t dq, +static inline dispatch_qos_t +_dispatch_continuation_override_qos(dispatch_queue_t dq, dispatch_continuation_t dc) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t p = dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + dispatch_qos_t dc_qos = _dispatch_qos_from_pp(dc->dc_priority); bool enforce = dc->dc_priority & _PTHREAD_PRIORITY_ENFORCE_FLAG; - pthread_priority_t dqp = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - bool defaultqueue = dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + dispatch_qos_t dq_qos = _dispatch_priority_qos(dq->dq_priority); + bool defaultqueue = dq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE; dispatch_assert(dc->dc_priority != DISPATCH_NO_PRIORITY); - if (p && (enforce || !dqp || defaultqueue)) { - return p; + if (dc_qos && (enforce || !dq_qos || defaultqueue)) { + return dc_qos; } - return dqp; + return dq_qos; #else (void)dq; (void)dc; return 0; @@ -2559,6 +2440,36 @@ _dispatch_continuation_init(dispatch_continuation_t dc, _dispatch_continuation_voucher_set(dc, dqu, flags); } +#if HAVE_MACH +#pragma mark dispatch_mach_reply_refs_t + +// assumes low bit of mach port names is always set +#define DISPATCH_MACH_REPLY_PORT_UNOWNED 0x1u + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_reply_mark_reply_port_owned(dispatch_mach_reply_refs_t dmr) +{ + dmr->du_ident &= ~DISPATCH_MACH_REPLY_PORT_UNOWNED; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_mach_reply_is_reply_port_owned(dispatch_mach_reply_refs_t dmr) +{ + mach_port_t reply_port = (mach_port_t)dmr->du_ident; + return reply_port ? !(reply_port & DISPATCH_MACH_REPLY_PORT_UNOWNED) :false; +} + +DISPATCH_ALWAYS_INLINE +static inline mach_port_t +_dispatch_mach_reply_get_reply_port(mach_port_t reply_port) +{ + return reply_port ? (reply_port | DISPATCH_MACH_REPLY_PORT_UNOWNED) : 0; +} + +#endif // HAVE_MACH + #endif // DISPATCH_PURE_C #endif /* __DISPATCH_INLINE_INTERNAL__ */ diff --git a/src/internal.h b/src/internal.h index 8934b2cb5..743d0b2c6 100644 --- a/src/internal.h +++ b/src/internal.h @@ -46,26 +46,26 @@ #endif #if TARGET_OS_MAC_DESKTOP -# define DISPATCH_HOST_SUPPORTS_OSX(x) \ +# define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ (__MAC_OS_X_VERSION_MIN_REQUIRED >= (x)) -# if !DISPATCH_HOST_SUPPORTS_OSX(101000) -# error "OS X hosts older than OS X 10.10 aren't supported anymore" -# endif // !DISPATCH_HOST_SUPPORTS_OSX(101000) +# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101100) +# error "OS X hosts older than OS X 10.11 aren't supported anymore" +# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101000) #elif TARGET_OS_SIMULATOR -# define DISPATCH_HOST_SUPPORTS_OSX(x) \ +# define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ (IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED >= (x)) -# if !DISPATCH_HOST_SUPPORTS_OSX(101000) -# error "Simulator hosts older than OS X 10.10 aren't supported anymore" -# endif // !DISPATCH_HOST_SUPPORTS_OSX(101000) +# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101100) +# error "Simulator hosts older than OS X 10.11 aren't supported anymore" +# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101000) #else -# define DISPATCH_HOST_SUPPORTS_OSX(x) 1 -# if __IPHONE_OS_VERSION_MIN_REQUIRED < 70000 -# error "iOS hosts older than iOS 7.0 aren't supported anymore" +# define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 1 +# if __IPHONE_OS_VERSION_MIN_REQUIRED < 90000 +# error "iOS hosts older than iOS 9.0 aren't supported anymore" # endif #endif #else // !__APPLE__ -#define DISPATCH_HOST_SUPPORTS_OSX(x) 0 +#define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 0 #endif // !__APPLE__ @@ -78,6 +78,9 @@ #if !defined(OS_VOUCHER_ACTIVITY_SPI) && TARGET_OS_MAC #define OS_VOUCHER_ACTIVITY_SPI 1 #endif +#if !defined(OS_VOUCHER_ACTIVITY_GENERATE_SWAPS) +#define OS_VOUCHER_ACTIVITY_GENERATE_SWAPS 0 +#endif #if !defined(OS_FIREHOSE_SPI) && TARGET_OS_MAC #define OS_FIREHOSE_SPI 1 #endif @@ -243,7 +246,6 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #include #if !TARGET_OS_WIN32 -#include #include #include #ifdef __ANDROID__ @@ -256,9 +258,6 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #include #include #endif -#if defined(__linux__) -#include -#endif #ifdef __BLOCKS__ #include @@ -309,6 +308,31 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define DISPATCH_CONCAT(x,y) DISPATCH_CONCAT1(x,y) #define DISPATCH_CONCAT1(x,y) x ## y +#define DISPATCH_COUNT_ARGS(...) DISPATCH_COUNT_ARGS1(, ## __VA_ARGS__, \ + _8, _7, _6, _5, _4, _3, _2, _1, _0) +#define DISPATCH_COUNT_ARGS1(z, a, b, c, d, e, f, g, h, cnt, ...) cnt + +#if BYTE_ORDER == LITTLE_ENDIAN +#define DISPATCH_STRUCT_LE_2(a, b) struct { a; b; } +#define DISPATCH_STRUCT_LE_3(a, b, c) struct { a; b; c; } +#define DISPATCH_STRUCT_LE_4(a, b, c, d) struct { a; b; c; d; } +#else +#define DISPATCH_STRUCT_LE_2(a, b) struct { b; a; } +#define DISPATCH_STRUCT_LE_3(a, b, c) struct { c; b; a; } +#define DISPATCH_STRUCT_LE_4(a, b, c, d) struct { d; c; b; a; } +#endif +#if __has_feature(c_startic_assert) +#define DISPATCH_UNION_ASSERT(alias, st) \ + _Static_assert(sizeof(struct { alias; }) == sizeof(st), "bogus union"); +#else +#define DISPATCH_UNION_ASSERT(alias, st) +#endif +#define DISPATCH_UNION_LE(alias, ...) \ + DISPATCH_UNION_ASSERT(alias, DISPATCH_CONCAT(DISPATCH_STRUCT_LE, \ + DISPATCH_COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)) \ + union { alias; DISPATCH_CONCAT(DISPATCH_STRUCT_LE, \ + DISPATCH_COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); } + // workaround 6368156 #ifdef NSEC_PER_SEC #undef NSEC_PER_SEC @@ -340,16 +364,6 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define unlikely(x) (!!(x)) #endif // __GNUC__ -#if BYTE_ORDER == LITTLE_ENDIAN -#define DISPATCH_STRUCT_LITTLE_ENDIAN_2(a, b) struct { a; b; } -#define DISPATCH_STRUCT_LITTLE_ENDIAN_3(a, b, c) struct { a; b; c; } -#define DISPATCH_STRUCT_LITTLE_ENDIAN_4(a, b, c, d) struct { a; b; c; d; } -#else -#define DISPATCH_STRUCT_LITTLE_ENDIAN_2(a, b) struct { b; a; } -#define DISPATCH_STRUCT_LITTLE_ENDIAN_3(a, b, c) struct { c; b; a; } -#define DISPATCH_STRUCT_LITTLE_ENDIAN_4(a, b, c, d) struct { d; c; b; a; } -#endif - #define _TAILQ_IS_ENQUEUED(elm, field) \ ((elm)->field.tqe_prev != NULL) #define _TAILQ_MARK_NOT_ENQUEUED(elm, field) \ @@ -557,13 +571,6 @@ static inline long _dispatch_assume_zero(long e, long line) { } \ } while (0) -#if DISPATCH_DEBUG -#if HAVE_MACH -DISPATCH_NOINLINE DISPATCH_USED -void dispatch_debug_machport(mach_port_t name, const char* str); -#endif -#endif - #if DISPATCH_DEBUG /* This is the private version of the deprecated dispatch_debug() */ DISPATCH_NONNULL2 DISPATCH_NOTHROW @@ -612,8 +619,14 @@ _dispatch_fork_becomes_unsafe(void) } } +#if DISPATCH_INTROSPECTION +#undef DISPATCH_PERF_MON +#define DISPATCH_PERF_MON 0 +#endif + /* #includes dependent on internal.h */ #include "shims.h" +#include "event/event_internal.h" // Older Mac OS X and iOS Simulator fallbacks @@ -637,20 +650,22 @@ typedef pthread_worqueue_function_kevent_t pthread_workqueue_function_kevent_t; #define HAVE_PTHREAD_WORKQUEUE_KEVENT 1 #endif + #ifndef PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK -#if HAVE_PTHREAD_WORKQUEUE_QOS && DISPATCH_HOST_SUPPORTS_OSX(101200) +#if HAVE_PTHREAD_WORKQUEUE_QOS && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) #define PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK 1 #else #define PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK 0 #endif #endif // PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK -#if HAVE_MACH -#if !defined(MACH_NOTIFY_SEND_POSSIBLE) -#undef MACH_NOTIFY_SEND_POSSIBLE -#define MACH_NOTIFY_SEND_POSSIBLE MACH_NOTIFY_DEAD_NAME +#ifndef HAVE_PTHREAD_WORKQUEUE_NARROWING +#if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) +#define HAVE_PTHREAD_WORKQUEUE_NARROWING 0 +#else +#define HAVE_PTHREAD_WORKQUEUE_NARROWING 1 +#endif #endif -#endif // HAVE_MACH #ifdef EVFILT_MEMORYSTATUS #ifndef DISPATCH_USE_MEMORYSTATUS @@ -658,112 +673,19 @@ typedef pthread_worqueue_function_kevent_t pthread_workqueue_function_kevent_t; #endif #endif // EVFILT_MEMORYSTATUS -#if defined(EVFILT_VM) && !DISPATCH_USE_MEMORYSTATUS -#ifndef DISPATCH_USE_VM_PRESSURE -#define DISPATCH_USE_VM_PRESSURE 1 -#endif -#endif // EVFILT_VM - #if TARGET_OS_SIMULATOR #undef DISPATCH_USE_MEMORYPRESSURE_SOURCE #define DISPATCH_USE_MEMORYPRESSURE_SOURCE 0 -#undef DISPATCH_USE_VM_PRESSURE_SOURCE -#define DISPATCH_USE_VM_PRESSURE_SOURCE 0 #endif // TARGET_OS_SIMULATOR #if !defined(DISPATCH_USE_MEMORYPRESSURE_SOURCE) && DISPATCH_USE_MEMORYSTATUS #define DISPATCH_USE_MEMORYPRESSURE_SOURCE 1 -#elif !defined(DISPATCH_USE_VM_PRESSURE_SOURCE) && DISPATCH_USE_VM_PRESSURE -#define DISPATCH_USE_VM_PRESSURE_SOURCE 1 #endif #if DISPATCH_USE_MEMORYPRESSURE_SOURCE extern bool _dispatch_memory_warn; #endif -#if !defined(NOTE_LEEWAY) -#undef NOTE_LEEWAY -#define NOTE_LEEWAY 0 -#undef NOTE_CRITICAL -#define NOTE_CRITICAL 0 -#undef NOTE_BACKGROUND -#define NOTE_BACKGROUND 0 -#endif // NOTE_LEEWAY - -#if !defined(NOTE_FUNLOCK) -#define NOTE_FUNLOCK 0x00000100 -#endif - -#if !defined(NOTE_MACH_CONTINUOUS_TIME) -#define NOTE_MACH_CONTINUOUS_TIME 0 -#endif // NOTE_MACH_CONTINUOUS_TIME - -#if !defined(HOST_NOTIFY_CALENDAR_SET) -#define HOST_NOTIFY_CALENDAR_SET HOST_NOTIFY_CALENDAR_CHANGE -#endif // HOST_NOTIFY_CALENDAR_SET - -#if !defined(HOST_CALENDAR_SET_REPLYID) -#define HOST_CALENDAR_SET_REPLYID 951 -#endif // HOST_CALENDAR_SET_REPLYID - -#if HAVE_DECL_NOTE_REAP -#if defined(NOTE_REAP) && defined(__APPLE__) -#undef NOTE_REAP -#define NOTE_REAP 0x10000000 // -#endif -#endif // HAVE_DECL_NOTE_REAP - -#ifndef VQ_QUOTA -#undef HAVE_DECL_VQ_QUOTA // rdar://problem/24160982 -#endif // VQ_QUOTA - -#if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) || \ - !DISPATCH_HOST_SUPPORTS_OSX(101200) -#undef NOTE_MEMORYSTATUS_PROC_LIMIT_WARN -#define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN 0 -#endif // NOTE_MEMORYSTATUS_PROC_LIMIT_WARN - -#if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) || \ - !DISPATCH_HOST_SUPPORTS_OSX(101200) -#undef NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL -#define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL 0 -#endif // NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL - -#if !defined(EV_UDATA_SPECIFIC) || !DISPATCH_HOST_SUPPORTS_OSX(101100) -#undef DISPATCH_USE_EV_UDATA_SPECIFIC -#define DISPATCH_USE_EV_UDATA_SPECIFIC 0 -#elif !defined(DISPATCH_USE_EV_UDATA_SPECIFIC) -#define DISPATCH_USE_EV_UDATA_SPECIFIC 1 -#endif // EV_UDATA_SPECIFIC - -#if !DISPATCH_USE_EV_UDATA_SPECIFIC -#undef EV_UDATA_SPECIFIC -#define EV_UDATA_SPECIFIC 0 -#undef EV_VANISHED -#define EV_VANISHED 0 -#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC - -#ifndef EV_VANISHED -#define EV_VANISHED 0x0200 -#endif - -#ifndef DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS -#if TARGET_OS_MAC && !DISPATCH_HOST_SUPPORTS_OSX(101200) -// deferred delete can return bogus ENOENTs on older kernels -#define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 1 -#else -#define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 0 -#endif -#endif - -#if !defined(EV_SET_QOS) || !DISPATCH_HOST_SUPPORTS_OSX(101100) -#undef DISPATCH_USE_KEVENT_QOS -#define DISPATCH_USE_KEVENT_QOS 0 -#elif !defined(DISPATCH_USE_KEVENT_QOS) -#define DISPATCH_USE_KEVENT_QOS 1 -#endif // EV_SET_QOS - #if HAVE_PTHREAD_WORKQUEUE_KEVENT && defined(KEVENT_FLAG_WORKQ) && \ - DISPATCH_USE_EV_UDATA_SPECIFIC && DISPATCH_USE_KEVENT_QOS && \ - DISPATCH_HOST_SUPPORTS_OSX(101200) && \ + DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) && \ !defined(DISPATCH_USE_KEVENT_WORKQUEUE) #define DISPATCH_USE_KEVENT_WORKQUEUE 1 #endif @@ -774,48 +696,18 @@ extern bool _dispatch_memory_warn; #define DISPATCH_USE_MGR_THREAD 1 #endif -#if DISPATCH_USE_KEVENT_WORKQUEUE && DISPATCH_USE_EV_UDATA_SPECIFIC && \ - DISPATCH_HOST_SUPPORTS_OSX(101200) && \ +#if DISPATCH_USE_KEVENT_WORKQUEUE && \ + DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) && \ !defined(DISPATCH_USE_EVFILT_MACHPORT_DIRECT) #define DISPATCH_USE_EVFILT_MACHPORT_DIRECT 1 #endif -#ifndef MACH_SEND_OVERRIDE -#define MACH_SEND_OVERRIDE 0x00000020 -typedef unsigned int mach_msg_priority_t; -#define MACH_MSG_PRIORITY_UNSPECIFIED ((mach_msg_priority_t)0) -#endif // MACH_SEND_OVERRIDE - #if (!DISPATCH_USE_EVFILT_MACHPORT_DIRECT || DISPATCH_DEBUG) && \ !defined(DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK) #define DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK 1 #endif -#if DISPATCH_USE_KEVENT_QOS -typedef struct kevent_qos_s _dispatch_kevent_qos_s; -typedef typeof(((struct kevent_qos_s*)NULL)->qos) _dispatch_kevent_priority_t; -#else // DISPATCH_USE_KEVENT_QOS -#ifndef KEVENT_FLAG_IMMEDIATE -#define KEVENT_FLAG_NONE 0x00 -#define KEVENT_FLAG_IMMEDIATE 0x01 -#define KEVENT_FLAG_ERROR_EVENTS 0x02 -#endif // KEVENT_FLAG_IMMEDIATE -typedef struct kevent64_s _dispatch_kevent_qos_s; -#define kevent_qos(_kq, _changelist, _nchanges, _eventlist, _nevents, \ - _data_out, _data_available, _flags) \ - ({ unsigned int _f = (_flags); _dispatch_kevent_qos_s _kev_copy; \ - const _dispatch_kevent_qos_s *_cl = (_changelist); \ - int _n = (_nchanges); const struct timespec _timeout_immediately = {}; \ - dispatch_static_assert(!(_data_out) && !(_data_available)); \ - if (_f & KEVENT_FLAG_ERROR_EVENTS) { \ - dispatch_static_assert(_n == 1); \ - _kev_copy = *_cl; _kev_copy.flags |= EV_RECEIPT; } \ - kevent64((_kq), _f & KEVENT_FLAG_ERROR_EVENTS ? &_kev_copy : _cl, _n, \ - (_eventlist), (_nevents), 0, \ - _f & KEVENT_FLAG_IMMEDIATE ? &_timeout_immediately : NULL); }) -#endif // DISPATCH_USE_KEVENT_QOS - #if defined(F_SETNOSIGPIPE) && defined(F_GETNOSIGPIPE) #ifndef DISPATCH_USE_SETNOSIGPIPE #define DISPATCH_USE_SETNOSIGPIPE 1 @@ -845,10 +737,6 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #ifndef DISPATCH_USE_GUARDED_FD #define DISPATCH_USE_GUARDED_FD 1 #endif -// change_fdguard_np() requires GUARD_DUP -#if DISPATCH_USE_GUARDED_FD && RDAR_11814513 -#define DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD 1 -#endif #endif // HAVE_SYS_GUARDED_H @@ -859,9 +747,15 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #endif #ifndef KDBG_CODE #define KDBG_CODE(...) 0 +#define DBG_FUNC_START 0 +#define DBG_FUNC_END 0 #endif #define DISPATCH_CODE(subclass, code) \ KDBG_CODE(DBG_DISPATCH, DISPATCH_TRACE_SUBCLASS_##subclass, code) +#define DISPATCH_CODE_START(subclass, code) \ + (DISPATCH_CODE(subclass, code) | DBG_FUNC_START) +#define DISPATCH_CODE_END(subclass, code) \ + (DISPATCH_CODE(subclass, code) | DBG_FUNC_END) #ifdef ARIADNEDBG_CODE #define ARIADNE_ENTER_DISPATCH_MAIN_CODE ARIADNEDBG_CODE(220, 2) #else @@ -875,15 +769,22 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #define DISPATCH_TRACE_SUBCLASS_VOUCHER 1 #define DISPATCH_TRACE_SUBCLASS_PERF 2 #define DISPATCH_TRACE_SUBCLASS_MACH_MSG 3 +#define DISPATCH_TRACE_SUBCLASS_PERF_MON 4 #define DISPATCH_PERF_non_leaf_retarget DISPATCH_CODE(PERF, 1) #define DISPATCH_PERF_post_activate_retarget DISPATCH_CODE(PERF, 2) #define DISPATCH_PERF_post_activate_mutation DISPATCH_CODE(PERF, 3) #define DISPATCH_PERF_delayed_registration DISPATCH_CODE(PERF, 4) #define DISPATCH_PERF_mutable_target DISPATCH_CODE(PERF, 5) +#define DISPATCH_PERF_strict_bg_timer DISPATCH_CODE(PERF, 6) +#define DISPATCH_PERF_wlh_change DISPATCH_CODE(PERF, 7) #define DISPATCH_MACH_MSG_hdr_move DISPATCH_CODE(MACH_MSG, 1) +#define DISPATCH_PERF_MON_worker_thread_start DISPATCH_CODE_START(PERF_MON, 1) +#define DISPATCH_PERF_MON_worker_thread_end DISPATCH_CODE_END(PERF_MON, 1) +#define DISPATCH_PERF_MON_worker_useless DISPATCH_CODE(PERF_MON, 2) + DISPATCH_ALWAYS_INLINE static inline void _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, @@ -930,18 +831,14 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define MACH_SEND_INVALID_VOUCHER 0x10000005 #endif -#if TARGET_OS_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100 -#undef VOUCHER_USE_MACH_VOUCHER -#define VOUCHER_USE_MACH_VOUCHER 0 -#endif #ifndef VOUCHER_USE_MACH_VOUCHER #if __has_include() #define VOUCHER_USE_MACH_VOUCHER 1 #endif -#endif +#endif // VOUCHER_USE_MACH_VOUCHER #if RDAR_24272659 // FIXME: -#if !VOUCHER_USE_MACH_VOUCHER || !DISPATCH_HOST_SUPPORTS_OSX(101200) +#if !VOUCHER_USE_MACH_VOUCHER || !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) #undef VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER #define VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER 0 #elif !defined(VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER) @@ -952,7 +849,7 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER 0 #endif // RDAR_24272659 -#if !VOUCHER_USE_MACH_VOUCHER || !DISPATCH_HOST_SUPPORTS_OSX(101200) +#if !VOUCHER_USE_MACH_VOUCHER || !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) #undef VOUCHER_USE_BANK_AUTOREDEEM #define VOUCHER_USE_BANK_AUTOREDEEM 0 #elif !defined(VOUCHER_USE_BANK_AUTOREDEEM) @@ -961,7 +858,7 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #if !VOUCHER_USE_MACH_VOUCHER || \ !__has_include() || \ - !DISPATCH_HOST_SUPPORTS_OSX(101200) + !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) #undef VOUCHER_USE_MACH_VOUCHER_PRIORITY #define VOUCHER_USE_MACH_VOUCHER_PRIORITY 0 #elif !defined(VOUCHER_USE_MACH_VOUCHER_PRIORITY) @@ -1044,7 +941,7 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define DISPATCH_NO_VOUCHER ((voucher_t)(void*)~0ul) #define DISPATCH_NO_PRIORITY ((pthread_priority_t)~0ul) -DISPATCH_ENUM(_dispatch_thread_set_self, unsigned long, +DISPATCH_ENUM(dispatch_thread_set_self, unsigned long, DISPATCH_PRIORITY_ENFORCE = 0x1, DISPATCH_VOUCHER_REPLACE = 0x2, DISPATCH_VOUCHER_CONSUME = 0x4, @@ -1053,7 +950,7 @@ DISPATCH_ENUM(_dispatch_thread_set_self, unsigned long, DISPATCH_WARN_RESULT static inline voucher_t _dispatch_adopt_priority_and_set_voucher( pthread_priority_t priority, voucher_t voucher, - _dispatch_thread_set_self_t flags); + dispatch_thread_set_self_t flags); #if HAVE_MACH mach_port_t _dispatch_get_mach_host_port(void); #endif @@ -1066,8 +963,7 @@ extern int _dispatch_set_qos_class_enabled; #endif #endif // HAVE_PTHREAD_WORKQUEUE_QOS #if DISPATCH_USE_KEVENT_WORKQUEUE -#if !HAVE_PTHREAD_WORKQUEUE_QOS || !DISPATCH_USE_KEVENT_QOS || \ - !DISPATCH_USE_EV_UDATA_SPECIFIC +#if !HAVE_PTHREAD_WORKQUEUE_QOS || !EV_UDATA_SPECIFIC #error Invalid build configuration #endif #if DISPATCH_USE_MGR_THREAD @@ -1075,10 +971,13 @@ extern int _dispatch_kevent_workqueue_enabled; #else #define _dispatch_kevent_workqueue_enabled (1) #endif +#else +#define _dispatch_kevent_workqueue_enabled (0) #endif // DISPATCH_USE_KEVENT_WORKQUEUE + #if DISPATCH_USE_EVFILT_MACHPORT_DIRECT -#if !DISPATCH_USE_KEVENT_WORKQUEUE || !DISPATCH_USE_EV_UDATA_SPECIFIC +#if !DISPATCH_USE_KEVENT_WORKQUEUE || !EV_UDATA_SPECIFIC #error Invalid build configuration #endif #if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK @@ -1097,6 +996,7 @@ extern int _dispatch_evfilt_machport_direct_enabled; #include "introspection_internal.h" #include "queue_internal.h" #include "source_internal.h" +#include "mach_internal.h" #include "voucher_internal.h" #include "data_internal.h" #if !TARGET_OS_WIN32 diff --git a/src/introspection.c b/src/introspection.c index d847cb91a..cd6bcff0a 100644 --- a/src/introspection.c +++ b/src/introspection.c @@ -193,7 +193,7 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, case DC_OVERRIDE_STEALING_TYPE: case DC_OVERRIDE_OWNING_TYPE: dc = dc->dc_data; - if (_dispatch_object_has_vtable(dc)) { + if (!_dispatch_object_is_continuation(dc)) { // these really wrap queues so we should hide the continuation type dq = (dispatch_queue_t)dc; diqi->type = dispatch_introspection_queue_item_type_queue; @@ -204,6 +204,8 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, #endif case DC_ASYNC_REDIRECT_TYPE: DISPATCH_INTERNAL_CRASH(0, "Handled by the caller"); + case DC_MACH_ASYNC_REPLY_TYPE: + break; case DC_MACH_SEND_BARRRIER_DRAIN_TYPE: break; case DC_MACH_SEND_BARRIER_TYPE: @@ -211,23 +213,17 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, flags = (uintptr_t)dc->dc_data; dq = dq->do_targetq; break; + default: + DISPATCH_INTERNAL_CRASH(dc->do_vtable, "Unknown dc vtable type"); } } else { - if (flags & DISPATCH_OBJ_SYNC_SLOW_BIT) { + if (flags & DISPATCH_OBJ_SYNC_WAITER_BIT) { + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; waiter = pthread_from_mach_thread_np((mach_port_t)dc->dc_data); - if (flags & DISPATCH_OBJ_BARRIER_BIT) { - dc = dc->dc_ctxt; - dq = dc->dc_data; - } - ctxt = dc->dc_ctxt; - func = dc->dc_func; + ctxt = dsc->dsc_ctxt; + func = dsc->dsc_func; } - if (func == _dispatch_sync_recurse_invoke) { - dc = dc->dc_ctxt; - dq = dc->dc_data; - ctxt = dc->dc_ctxt; - func = dc->dc_func; - } else if (func == _dispatch_apply_invoke || + if (func == _dispatch_apply_invoke || func == _dispatch_apply_redirect_invoke) { dispatch_apply_t da = ctxt; if (da->da_todo) { @@ -252,7 +248,7 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, .function = func, .waiter = waiter, .barrier = (flags & DISPATCH_OBJ_BARRIER_BIT) || dq->dq_width == 1, - .sync = flags & DISPATCH_OBJ_SYNC_SLOW_BIT, + .sync = flags & DISPATCH_OBJ_SYNC_WAITER_BIT, .apply = apply, }; if (flags & DISPATCH_OBJ_GROUP_BIT) { @@ -300,16 +296,11 @@ _dispatch_introspection_source_get_info(dispatch_source_t ds) .suspend_count = _dq_state_suspend_cnt(dq_state) + ds->dq_side_suspend_cnt, .enqueued = _dq_state_is_enqueued(dq_state), .handler_is_block = hdlr_is_block, - .timer = ds->ds_is_timer, - .after = ds->ds_is_timer && (bool)(ds_timer(ds).flags & DISPATCH_TIMER_AFTER), + .timer = dr->du_is_timer, + .after = dr->du_is_timer && (dr->du_fflags & DISPATCH_TIMER_AFTER), + .type = (unsigned long)dr->du_filter, + .handle = (unsigned long)dr->du_ident, }; - dispatch_kevent_t dk = ds->ds_dkev; - if (ds->ds_is_custom_source) { - dis.type = (unsigned long)dk; - } else if (dk) { - dis.type = (unsigned long)dk->dk_kevent.filter; - dis.handle = (unsigned long)dk->dk_kevent.ident; - } return dis; } @@ -739,7 +730,7 @@ struct dispatch_order_frame_s { dispatch_queue_order_entry_t dof_e; }; -DISPATCH_NOINLINE +DISPATCH_NOINLINE DISPATCH_NORETURN static void _dispatch_introspection_lock_inversion_fail(dispatch_order_frame_t dof, dispatch_queue_t top_q, dispatch_queue_t bottom_q) diff --git a/src/introspection_internal.h b/src/introspection_internal.h index 06504a8ba..e2fa6d18b 100644 --- a/src/introspection_internal.h +++ b/src/introspection_internal.h @@ -66,7 +66,6 @@ void _dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f); #if DISPATCH_PURE_C -void _dispatch_sync_recurse_invoke(void *ctxt); static dispatch_queue_t _dispatch_queue_get_current(void); DISPATCH_ALWAYS_INLINE @@ -100,24 +99,10 @@ _dispatch_introspection_target_queue_changed(dispatch_queue_t dq); DISPATCH_ALWAYS_INLINE static inline void -_dispatch_introspection_barrier_sync_begin(dispatch_queue_t dq, - dispatch_function_t func) +_dispatch_introspection_sync_begin(dispatch_queue_t dq) { if (!_dispatch_introspection.debug_queue_inversions) return; - if (func != _dispatch_sync_recurse_invoke) { - _dispatch_introspection_order_record(dq, _dispatch_queue_get_current()); - } -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_introspection_non_barrier_sync_begin(dispatch_queue_t dq, - dispatch_function_t func) -{ - if (!_dispatch_introspection.debug_queue_inversions) return; - if (func != _dispatch_sync_recurse_invoke) { - _dispatch_introspection_order_record(dq, _dispatch_queue_get_current()); - } + _dispatch_introspection_order_record(dq, _dispatch_queue_get_current()); } #endif // DISPATCH_PURE_C @@ -129,7 +114,6 @@ _dispatch_introspection_non_barrier_sync_begin(dispatch_queue_t dq, #define _dispatch_introspection_init() #define _dispatch_introspection_thread_add() -#define _dispatch_introspection_thread_remove() DISPATCH_ALWAYS_INLINE static inline dispatch_queue_t @@ -177,13 +161,7 @@ _dispatch_introspection_target_queue_changed( DISPATCH_ALWAYS_INLINE static inline void -_dispatch_introspection_barrier_sync_begin(dispatch_queue_t dq DISPATCH_UNUSED, - dispatch_function_t func DISPATCH_UNUSED) {} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_introspection_non_barrier_sync_begin(dispatch_queue_t dq DISPATCH_UNUSED, - dispatch_function_t func DISPATCH_UNUSED) {} +_dispatch_introspection_sync_begin(dispatch_queue_t dq DISPATCH_UNUSED) {} #endif // DISPATCH_INTROSPECTION diff --git a/src/io.c b/src/io.c index 0a00e6e63..f538862dd 100644 --- a/src/io.c +++ b/src/io.c @@ -236,8 +236,7 @@ _dispatch_io_create(dispatch_io_type_t type) dispatch_io_t channel = _dispatch_alloc(DISPATCH_VTABLE(io), sizeof(struct dispatch_io_s)); channel->do_next = DISPATCH_OBJECT_LISTLESS; - channel->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, - true); + channel->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true); channel->params.type = type; channel->params.high = SIZE_MAX; channel->params.low = dispatch_io_defaults.low_water_chunks * @@ -889,7 +888,7 @@ dispatch_read(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, dispatch_operation_t op = _dispatch_operation_create(DOP_DIR_READ, channel, 0, length, dispatch_data_empty, - _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,false), + _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false), ^(bool done, dispatch_data_t data, int error) { if (data) { data = dispatch_data_create_concat(deliver_data, data); @@ -960,7 +959,7 @@ dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, dispatch_operation_t op = _dispatch_operation_create(DOP_DIR_WRITE, channel, 0, dispatch_data_get_size(data), data, - _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,false), + _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false), ^(bool done, dispatch_data_t d, int error) { if (done) { if (d) { @@ -1155,8 +1154,9 @@ _dispatch_operation_timer(dispatch_queue_t tq, dispatch_operation_t op) } dispatch_source_t timer = dispatch_source_create( DISPATCH_SOURCE_TYPE_TIMER, 0, 0, tq); - dispatch_source_set_timer(timer, dispatch_time(DISPATCH_TIME_NOW, - (int64_t)op->params.interval), op->params.interval, 0); + dispatch_source_set_timer(timer, + dispatch_time(DISPATCH_TIME_NOW, (int64_t)op->params.interval), + op->params.interval, 0); dispatch_source_set_event_handler(timer, ^{ // On stream queue or pick queue if (dispatch_source_testcancel(timer)) { @@ -1236,9 +1236,10 @@ _dispatch_fd_entry_guarded_open(dispatch_fd_entry_t fd_entry, const char *path, return fd; } errno = 0; +#else + (void)fd_entry; #endif return open(path, oflag, mode); - (void)fd_entry; } static inline int @@ -1248,11 +1249,12 @@ _dispatch_fd_entry_guarded_close(dispatch_fd_entry_t fd_entry, int fd) { guardid_t guard = (uintptr_t)fd_entry; return guarded_close_np(fd, &guard); } else +#else + (void)fd_entry; #endif { return close(fd); } - (void)fd_entry; } static inline void @@ -1388,8 +1390,9 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) break; ); } - _dispatch_stream_init(fd_entry, _dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_DEFAULT, false)); + + _dispatch_stream_init(fd_entry, + _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false)); } fd_entry->orig_flags = orig_flags; fd_entry->orig_nosigpipe = orig_nosigpipe; @@ -1456,8 +1459,8 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, if (S_ISREG(mode)) { _dispatch_disk_init(fd_entry, major(dev)); } else { - _dispatch_stream_init(fd_entry, _dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_DEFAULT, false)); + _dispatch_stream_init(fd_entry, + _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false)); } fd_entry->fd = -1; fd_entry->orig_flags = -1; @@ -1636,8 +1639,7 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) disk->do_next = DISPATCH_OBJECT_LISTLESS; disk->do_xref_cnt = -1; disk->advise_list_depth = pending_reqs_depth; - disk->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, - false); + disk->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); disk->dev = dev; TAILQ_INIT(&disk->operations); disk->cur_rq = TAILQ_FIRST(&disk->operations); @@ -1897,7 +1899,7 @@ _dispatch_stream_source(dispatch_stream_t stream, dispatch_operation_t op) // Close queue must not run user cleanup handlers until sources are fully // unregistered dispatch_queue_t close_queue = op->fd_entry->close_queue; - dispatch_source_set_cancel_handler(source, ^{ + dispatch_source_set_mandatory_cancel_handler(source, ^{ _dispatch_op_debug("stream source cancel", op); dispatch_resume(close_queue); }); diff --git a/src/libdispatch.codes b/src/libdispatch.codes index 9aca7e16c..64f82b532 100644 --- a/src/libdispatch.codes +++ b/src/libdispatch.codes @@ -11,3 +11,10 @@ 0x2e02000c DISPATCH_PERF_post_activate_mutation 0x2e020010 DISPATCH_PERF_delayed_registration 0x2e020014 DISPATCH_PERF_mutable_target +0x2e020018 DISPATCH_PERF_strict_bg_timer +0x2e02001c DISPATCH_PERF_wlh_change + +0x2e030004 DISPATCH_MACH_MSG_hdr_move + +0x2e040004 DISPATCH_PERF_MON_worker_thread +0x2e040008 DISPATCH_PERF_MON_worker_useless diff --git a/src/mach.c b/src/mach.c new file mode 100644 index 000000000..cc20645b4 --- /dev/null +++ b/src/mach.c @@ -0,0 +1,2832 @@ +/* + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" +#if HAVE_MACH + +#define DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT 0x1 +#define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2 +#define DISPATCH_MACH_WAIT_FOR_REPLY 0x4 +#define DISPATCH_MACH_OWNED_REPLY_PORT 0x8 +#define DISPATCH_MACH_ASYNC_REPLY 0x10 +#define DISPATCH_MACH_OPTIONS_MASK 0xffff + +#define DM_SEND_STATUS_SUCCESS 0x1 +#define DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT 0x2 + +DISPATCH_ENUM(dispatch_mach_send_invoke_flags, uint32_t, + DM_SEND_INVOKE_NONE = 0x0, + DM_SEND_INVOKE_FLUSH = 0x1, + DM_SEND_INVOKE_NEEDS_BARRIER = 0x2, + DM_SEND_INVOKE_CANCEL = 0x4, + DM_SEND_INVOKE_CAN_RUN_BARRIER = 0x8, + DM_SEND_INVOKE_IMMEDIATE_SEND = 0x10, +); +#define DM_SEND_INVOKE_IMMEDIATE_SEND_MASK \ + ((dispatch_mach_send_invoke_flags_t)DM_SEND_INVOKE_IMMEDIATE_SEND) + +static inline mach_msg_option_t _dispatch_mach_checkin_options(void); +static inline pthread_priority_t _dispatch_mach_priority_propagate( + mach_msg_option_t options); +static mach_port_t _dispatch_mach_msg_get_remote_port(dispatch_object_t dou); +static mach_port_t _dispatch_mach_msg_get_reply_port(dispatch_object_t dou); +static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm, + mach_port_t local_port, mach_port_t remote_port); +static inline void _dispatch_mach_msg_reply_received(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t local_port); +static dispatch_mach_msg_t _dispatch_mach_msg_create_reply_disconnected( + dispatch_object_t dou, dispatch_mach_reply_refs_t dmr, + dispatch_mach_reason_t reason); +static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm, + dispatch_object_t dou); +static inline mach_msg_header_t* _dispatch_mach_msg_get_msg( + dispatch_mach_msg_t dmsg); +static void _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou, + dispatch_qos_t qos); +static void _dispatch_mach_cancel(dispatch_mach_t dm); +static void _dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm, + dispatch_qos_t qos); +static void _dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg); +static void _dispatch_mach_push_async_reply_msg(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, dispatch_queue_t drq); +static dispatch_queue_t _dispatch_mach_msg_context_async_reply_queue( + void *ctxt); +static dispatch_continuation_t _dispatch_mach_msg_async_reply_wrap( + dispatch_mach_msg_t dmsg, dispatch_mach_t dm); +static void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm); +static void _dispatch_mach_notification_kevent_register(dispatch_mach_t dm, + mach_port_t send); + +dispatch_source_t +_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp, + const struct dispatch_continuation_s *dc) +{ + dispatch_source_t ds; + ds = dispatch_source_create(&_dispatch_source_type_mach_recv_direct, + recvp, 0, &_dispatch_mgr_q); + os_atomic_store(&ds->ds_refs->ds_handler[DS_EVENT_HANDLER], + (dispatch_continuation_t)dc, relaxed); + return ds; +} + +#pragma mark - +#pragma mark dispatch to XPC callbacks + +static dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks; + +// Default dmxh_direct_message_handler callback that does not handle +// messages inline. +static bool +_dispatch_mach_xpc_no_handle_message( + void *_Nullable context DISPATCH_UNUSED, + dispatch_mach_reason_t reason DISPATCH_UNUSED, + dispatch_mach_msg_t message DISPATCH_UNUSED, + mach_error_t error DISPATCH_UNUSED) +{ + return false; +} + +// Default dmxh_msg_context_reply_queue callback that returns a NULL queue. +static dispatch_queue_t +_dispatch_mach_msg_context_no_async_reply_queue( + void *_Nonnull msg_context DISPATCH_UNUSED) +{ + return NULL; +} + +// Default dmxh_async_reply_handler callback that crashes when called. +DISPATCH_NORETURN +static void +_dispatch_mach_default_async_reply_handler(void *context DISPATCH_UNUSED, + dispatch_mach_reason_t reason DISPATCH_UNUSED, + dispatch_mach_msg_t message DISPATCH_UNUSED) +{ + DISPATCH_CLIENT_CRASH(_dispatch_mach_xpc_hooks, + "_dispatch_mach_default_async_reply_handler called"); +} + +// Callbacks from dispatch to XPC. The default is to not support any callbacks. +static const struct dispatch_mach_xpc_hooks_s _dispatch_mach_xpc_hooks_default + = { + .version = DISPATCH_MACH_XPC_HOOKS_VERSION, + .dmxh_direct_message_handler = &_dispatch_mach_xpc_no_handle_message, + .dmxh_msg_context_reply_queue = + &_dispatch_mach_msg_context_no_async_reply_queue, + .dmxh_async_reply_handler = &_dispatch_mach_default_async_reply_handler, +}; + +static dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks + = &_dispatch_mach_xpc_hooks_default; + +void +dispatch_mach_hooks_install_4libxpc(dispatch_mach_xpc_hooks_t hooks) +{ + if (!os_atomic_cmpxchg(&_dispatch_mach_xpc_hooks, + &_dispatch_mach_xpc_hooks_default, hooks, relaxed)) { + DISPATCH_CLIENT_CRASH(_dispatch_mach_xpc_hooks, + "dispatch_mach_hooks_install_4libxpc called twice"); + } +} + +#pragma mark - +#pragma mark dispatch_mach_t + +static dispatch_mach_t +_dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, + dispatch_mach_handler_function_t handler, bool handler_is_block, + bool is_xpc) +{ + dispatch_mach_recv_refs_t dmrr; + dispatch_mach_send_refs_t dmsr; + dispatch_mach_t dm; + // ensure _dispatch_evfilt_machport_direct_enabled is initialized + _dispatch_root_queues_init(); + dm = _dispatch_alloc(DISPATCH_VTABLE(mach), + sizeof(struct dispatch_mach_s)); + _dispatch_queue_init(dm->_as_dq, DQF_LEGACY, 1, true); + + dm->dq_label = label; + dm->do_ref_cnt++; // the reference _dispatch_mach_cancel_invoke holds + dm->dm_is_xpc = is_xpc; + + dmrr = dux_create(&_dispatch_mach_type_recv, 0, 0)._dmrr; + dmrr->du_owner_wref = _dispatch_ptr2wref(dm); + dmrr->dmrr_handler_func = handler; + dmrr->dmrr_handler_ctxt = context; + dmrr->dmrr_handler_is_block = handler_is_block; + dm->dm_recv_refs = dmrr; + + dmsr = dux_create(&_dispatch_mach_type_send, 0, + DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD)._dmsr; + dmsr->du_owner_wref = _dispatch_ptr2wref(dm); + dm->dm_send_refs = dmsr; + + if (is_xpc) { + dispatch_xpc_term_refs_t _dxtr = + dux_create(&_dispatch_xpc_type_sigterm, SIGTERM, 0)._dxtr; + _dxtr->du_owner_wref = _dispatch_ptr2wref(dm); + dm->dm_xpc_term_refs = _dxtr; + } + + if (slowpath(!q)) { + q = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true); + } else { + _dispatch_retain(q); + } + dm->do_targetq = q; + _dispatch_object_debug(dm, "%s", __func__); + return dm; +} + +dispatch_mach_t +dispatch_mach_create(const char *label, dispatch_queue_t q, + dispatch_mach_handler_t handler) +{ + dispatch_block_t bb = _dispatch_Block_copy((void*)handler); + return _dispatch_mach_create(label, q, bb, + (dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), true, + false); +} + +dispatch_mach_t +dispatch_mach_create_f(const char *label, dispatch_queue_t q, void *context, + dispatch_mach_handler_function_t handler) +{ + return _dispatch_mach_create(label, q, context, handler, false, false); +} + +dispatch_mach_t +dispatch_mach_create_4libxpc(const char *label, dispatch_queue_t q, + void *context, dispatch_mach_handler_function_t handler) +{ + return _dispatch_mach_create(label, q, context, handler, false, true); +} + +void +_dispatch_mach_dispose(dispatch_mach_t dm) +{ + _dispatch_object_debug(dm, "%s", __func__); + _dispatch_unote_dispose(dm->dm_recv_refs); + dm->dm_recv_refs = NULL; + _dispatch_unote_dispose(dm->dm_send_refs); + dm->dm_send_refs = NULL; + if (dm->dm_xpc_term_refs) { + _dispatch_unote_dispose(dm->dm_xpc_term_refs); + dm->dm_xpc_term_refs = NULL; + } + _dispatch_queue_destroy(dm->_as_dq); +} + +void +dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, + mach_port_t send, dispatch_mach_msg_t checkin) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + uint32_t disconnect_cnt; + + if (MACH_PORT_VALID(receive)) { + dm->dm_recv_refs->du_ident = receive; + _dispatch_retain(dm); // the reference the manager queue holds + } + dmsr->dmsr_send = send; + if (MACH_PORT_VALID(send)) { + if (checkin) { + dispatch_mach_msg_t dmsg = checkin; + dispatch_retain(dmsg); + dmsg->dmsg_options = _dispatch_mach_checkin_options(); + dmsr->dmsr_checkin_port = _dispatch_mach_msg_get_remote_port(dmsg); + } + dmsr->dmsr_checkin = checkin; + } + dispatch_assert(DISPATCH_MACH_NEVER_CONNECTED - 1 == + DISPATCH_MACH_NEVER_INSTALLED); + disconnect_cnt = os_atomic_dec2o(dmsr, dmsr_disconnect_cnt, release); + if (unlikely(disconnect_cnt != DISPATCH_MACH_NEVER_INSTALLED)) { + DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel already connected"); + } + _dispatch_object_debug(dm, "%s", __func__); + return dispatch_activate(dm); +} + +static inline bool +_dispatch_mach_reply_tryremove(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr) +{ + bool removed; + _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); + if ((removed = _TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + } + _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); + return removed; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_reply_waiter_unregister(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, uint32_t options) +{ + dispatch_mach_msg_t dmsgr = NULL; + bool disconnected = (options & DU_UNREGISTER_DISCONNECTED); + if (options & DU_UNREGISTER_REPLY_REMOVE) { + _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); + if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration"); + } + TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); + } + if (disconnected) { + dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr, + DISPATCH_MACH_DISCONNECTED); + } else if (dmr->dmr_voucher) { + _voucher_release(dmr->dmr_voucher); + dmr->dmr_voucher = NULL; + } + _dispatch_debug("machport[0x%08x]: unregistering for sync reply%s, ctxt %p", + _dispatch_mach_reply_get_reply_port((mach_port_t)dmr->du_ident), + disconnected ? " (disconnected)" : "", dmr->dmr_ctxt); + if (dmsgr) { + return _dispatch_mach_handle_or_push_received_msg(dm, dmsgr); + } + dispatch_assert(!(options & DU_UNREGISTER_WAKEUP)); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, uint32_t options) +{ + dispatch_mach_msg_t dmsgr = NULL; + dispatch_queue_t drq = NULL; + bool replies_empty = false; + bool disconnected = (options & DU_UNREGISTER_DISCONNECTED); + if (options & DU_UNREGISTER_REPLY_REMOVE) { + _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); + if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration"); + } + TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + replies_empty = TAILQ_EMPTY(&dm->dm_send_refs->dmsr_replies); + _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); + } + if (disconnected) { + dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr, + dmr->dmr_async_reply ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED + : DISPATCH_MACH_DISCONNECTED); + if (dmr->dmr_ctxt) { + drq = _dispatch_mach_msg_context_async_reply_queue(dmr->dmr_ctxt); + } + } else if (dmr->dmr_voucher) { + _voucher_release(dmr->dmr_voucher); + dmr->dmr_voucher = NULL; + } + _dispatch_debug("machport[0x%08x]: unregistering for reply%s, ctxt %p", + (mach_port_t)dmr->du_ident, disconnected ? " (disconnected)" : "", + dmr->dmr_ctxt); + if (!_dispatch_unote_unregister(dmr, options)) { + _dispatch_debug("machport[0x%08x]: deferred delete kevent[%p]", + (mach_port_t)dmr->du_ident, dmr); + dispatch_assert(options == DU_UNREGISTER_DISCONNECTED); + // dmr must be put back so that the event delivery finds it, the + // replies lock is held by the caller. + TAILQ_INSERT_HEAD(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); + if (dmsgr) { + dmr->dmr_voucher = dmsgr->dmsg_voucher; + dmsgr->dmsg_voucher = NULL; + _dispatch_release(dmsgr); + } + return; // deferred unregistration + } + _dispatch_unote_dispose(dmr); + if (dmsgr) { + if (drq) { + return _dispatch_mach_push_async_reply_msg(dm, dmsgr, drq); + } else { + return _dispatch_mach_handle_or_push_received_msg(dm, dmsgr); + } + } + if ((options & DU_UNREGISTER_WAKEUP) && replies_empty && + (dm->dm_send_refs->dmsr_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED))) { + dx_wakeup(dm, 0, DISPATCH_WAKEUP_FLUSH); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_reply_waiter_register(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t reply_port, + dispatch_mach_msg_t dmsg, mach_msg_option_t msg_opts) +{ + dmr->du_owner_wref = _dispatch_ptr2wref(dm); + dmr->du_wlh = NULL; + dmr->du_filter = EVFILT_MACHPORT; + dmr->du_ident = reply_port; + if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) { + _dispatch_mach_reply_mark_reply_port_owned(dmr); + } else { + if (dmsg->dmsg_voucher) { + dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher); + } + dmr->dmr_priority = _dispatch_priority_from_pp(dmsg->dmsg_priority); + // make reply context visible to leaks rdar://11777199 + dmr->dmr_ctxt = dmsg->do_ctxt; + } + + _dispatch_debug("machport[0x%08x]: registering for sync reply, ctxt %p", + reply_port, dmsg->do_ctxt); + _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); + if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, + "Reply already registered"); + } + TAILQ_INSERT_TAIL(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); + _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port, + dispatch_mach_msg_t dmsg) +{ + dispatch_mach_reply_refs_t dmr; + dispatch_priority_t mpri, pri, rpri; + dispatch_priority_t overcommit; + + dmr = dux_create(&_dispatch_mach_type_reply, reply_port, 0)._dmr; + dmr->du_owner_wref = _dispatch_ptr2wref(dm); + if (dmsg->dmsg_voucher) { + dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher); + } + dmr->dmr_priority = _dispatch_priority_from_pp(dmsg->dmsg_priority); + // make reply context visible to leaks rdar://11777199 + dmr->dmr_ctxt = dmsg->do_ctxt; + + dispatch_queue_t drq = NULL; + if (dmsg->dmsg_options & DISPATCH_MACH_ASYNC_REPLY) { + dmr->dmr_async_reply = true; + drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + } + + dispatch_wlh_t wlh = dm->dq_wlh; + pri = (dm->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK); + overcommit = dm->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + if (drq) { + rpri = drq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK; + if (rpri > pri) { + pri = rpri; + overcommit = drq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + } + if (drq->dq_wlh) wlh = drq->dq_wlh; + } + if (pri && dmr->du_is_direct) { + mpri = _dispatch_priority_from_pp_strip_flags(dmsg->dmsg_priority); + if (pri < mpri) pri = mpri; + pri |= overcommit; + } else { + pri = DISPATCH_PRIORITY_FLAG_MANAGER; + } + + _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p", + reply_port, dmsg->do_ctxt); + _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); + if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, + "Reply already registered"); + } + TAILQ_INSERT_TAIL(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); + _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); + + if (!_dispatch_unote_register(dmr, wlh, pri)) { + _dispatch_mach_reply_kevent_unregister(dm, dmr, + DU_UNREGISTER_DISCONNECTED|DU_UNREGISTER_REPLY_REMOVE); + } +} + +#pragma mark - +#pragma mark dispatch_mach_msg + +static mach_port_t +_dispatch_get_thread_reply_port(void) +{ + mach_port_t reply_port, mrp = _dispatch_get_thread_mig_reply_port(); + if (mrp) { + reply_port = mrp; + _dispatch_debug("machport[0x%08x]: borrowed thread sync reply port", + reply_port); + } else { + reply_port = mach_reply_port(); + _dispatch_set_thread_mig_reply_port(reply_port); + _dispatch_debug("machport[0x%08x]: allocated thread sync reply port", + reply_port); + } + _dispatch_debug_machport(reply_port); + return reply_port; +} + +static void +_dispatch_clear_thread_reply_port(mach_port_t reply_port) +{ + mach_port_t mrp = _dispatch_get_thread_mig_reply_port(); + if (reply_port != mrp) { + if (mrp) { + _dispatch_debug("machport[0x%08x]: did not clear thread sync reply " + "port (found 0x%08x)", reply_port, mrp); + } + return; + } + _dispatch_set_thread_mig_reply_port(MACH_PORT_NULL); + _dispatch_debug_machport(reply_port); + _dispatch_debug("machport[0x%08x]: cleared thread sync reply port", + reply_port); +} + +static void +_dispatch_set_thread_reply_port(mach_port_t reply_port) +{ + _dispatch_debug_machport(reply_port); + mach_port_t mrp = _dispatch_get_thread_mig_reply_port(); + if (mrp) { + kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port, + MACH_PORT_RIGHT_RECEIVE, -1); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + _dispatch_debug("machport[0x%08x]: deallocated sync reply port " + "(found 0x%08x)", reply_port, mrp); + } else { + _dispatch_set_thread_mig_reply_port(reply_port); + _dispatch_debug("machport[0x%08x]: restored thread sync reply port", + reply_port); + } +} + +static inline mach_port_t +_dispatch_mach_msg_get_remote_port(dispatch_object_t dou) +{ + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); + mach_port_t remote = hdr->msgh_remote_port; + return remote; +} + +static inline mach_port_t +_dispatch_mach_msg_get_reply_port(dispatch_object_t dou) +{ + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); + mach_port_t local = hdr->msgh_local_port; + if (!MACH_PORT_VALID(local) || MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) != + MACH_MSG_TYPE_MAKE_SEND_ONCE) return MACH_PORT_NULL; + return local; +} + +static inline void +_dispatch_mach_msg_set_reason(dispatch_mach_msg_t dmsg, mach_error_t err, + unsigned long reason) +{ + dispatch_assert_zero(reason & ~(unsigned long)code_emask); + dmsg->dmsg_error = ((err || !reason) ? err : + err_local|err_sub(0x3e0)|(mach_error_t)reason); +} + +static inline unsigned long +_dispatch_mach_msg_get_reason(dispatch_mach_msg_t dmsg, mach_error_t *err_ptr) +{ + mach_error_t err = dmsg->dmsg_error; + + if ((err & system_emask) == err_local && err_get_sub(err) == 0x3e0) { + *err_ptr = 0; + return err_get_code(err); + } + *err_ptr = err; + return err ? DISPATCH_MACH_MESSAGE_SEND_FAILED : DISPATCH_MACH_MESSAGE_SENT; +} + +static inline dispatch_mach_msg_t +_dispatch_mach_msg_create_recv(mach_msg_header_t *hdr, mach_msg_size_t siz, + dispatch_mach_reply_refs_t dmr, uint32_t flags) +{ + dispatch_mach_msg_destructor_t destructor; + dispatch_mach_msg_t dmsg; + voucher_t voucher; + pthread_priority_t pp; + + if (dmr) { + _voucher_mach_msg_clear(hdr, false); // deallocate reply message voucher + pp = _dispatch_priority_to_pp(dmr->dmr_priority); + voucher = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + } else { + voucher = voucher_create_with_mach_msg(hdr); + pp = _voucher_get_priority(voucher); + } + + destructor = (flags & DISPATCH_EV_MSG_NEEDS_FREE) ? + DISPATCH_MACH_MSG_DESTRUCTOR_FREE : + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT; + dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL); + if (!(flags & DISPATCH_EV_MSG_NEEDS_FREE)) { + _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, + (uint64_t)hdr, (uint64_t)dmsg->dmsg_buf); + } + dmsg->dmsg_voucher = voucher; + dmsg->dmsg_priority = pp; + dmsg->do_ctxt = dmr ? dmr->dmr_ctxt : NULL; + _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_RECEIVED); + _dispatch_voucher_debug("mach-msg[%p] create", voucher, dmsg); + _dispatch_voucher_ktrace_dmsg_push(dmsg); + return dmsg; +} + +void +_dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, + mach_msg_header_t *hdr, mach_msg_size_t siz) +{ + // this function is very similar with what _dispatch_source_merge_evt does + // but can't reuse it as handling the message must be protected by the + // internal refcount between the first half and the trailer of what + // _dispatch_source_merge_evt does. + + dispatch_mach_recv_refs_t dmrr = du._dmrr; + dispatch_mach_t dm = _dispatch_wref2ptr(dmrr->du_owner_wref); + dispatch_wakeup_flags_t wflags = 0; + dispatch_queue_flags_t dqf; + dispatch_mach_msg_t dmsg; + + dispatch_assert(_dispatch_unote_needs_rearm(du)); + + if (flags & EV_VANISHED) { + DISPATCH_CLIENT_CRASH(du._du->du_ident, + "Unexpected EV_VANISHED (do not destroy random mach ports)"); + } + + if (dmrr->du_is_direct || (flags & (EV_DELETE | EV_ONESHOT))) { + // once we modify the queue atomic flags below, it will allow concurrent + // threads running _dispatch_mach_invoke2 to dispose of the source, + // so we can't safely borrow the reference we get from the muxnote udata + // anymore, and need our own + wflags = DISPATCH_WAKEUP_CONSUME; + _dispatch_retain(dm); // rdar://20382435 + } + + if (unlikely((flags & EV_ONESHOT) && !(flags & EV_DELETE))) { + dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + _dispatch_debug("kevent-source[%p]: deferred delete oneshot kevent[%p]", + dm, dmrr); + } else if (unlikely(flags & EV_DELETE)) { + _dispatch_source_refs_unregister(dm->_as_ds, + DU_UNREGISTER_ALREADY_DELETED); + dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + _dispatch_debug("kevent-source[%p]: deleted kevent[%p]", dm, dmrr); +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + } else if (unlikely(!dmrr->du_is_direct)) { + dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + _dispatch_unote_resume(du); +#endif + } else { + dispatch_assert(dmrr->du_is_direct); + dqf = _dispatch_queue_atomic_flags_clear(dm->_as_dq, DSF_ARMED); + _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", dm, dmrr); + } + + _dispatch_debug_machport(hdr->msgh_remote_port); + _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x", + hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); + + if (dqf & DSF_CANCELED) { + _dispatch_debug("machport[0x%08x]: drop msg id 0x%x, reply on 0x%08x", + hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); + mach_msg_destroy(hdr); + if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { + free(hdr); + } + return dx_wakeup(dm, 0, wflags | DISPATCH_WAKEUP_FLUSH); + } + + dmsg = _dispatch_mach_msg_create_recv(hdr, siz, NULL, flags); + _dispatch_mach_handle_or_push_received_msg(dm, dmsg); + if (wflags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dm); + } +} + +void +_dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags, + mach_msg_header_t *hdr, mach_msg_size_t siz) +{ + dispatch_mach_reply_refs_t dmr = du._dmr; + dispatch_mach_t dm = _dispatch_wref2ptr(dmr->du_owner_wref); + bool canceled = (_dispatch_queue_atomic_flags(dm->_as_dq) & DSF_CANCELED); + dispatch_mach_msg_t dmsg = NULL; + + _dispatch_debug_machport(hdr->msgh_remote_port); + _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x", + hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); + + uint32_t options = DU_UNREGISTER_IMMEDIATE_DELETE; + options |= DU_UNREGISTER_REPLY_REMOVE; + options |= DU_UNREGISTER_WAKEUP; + if (canceled) { + _dispatch_debug("machport[0x%08x]: drop msg id 0x%x, reply on 0x%08x", + hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); + options |= DU_UNREGISTER_DISCONNECTED; + mach_msg_destroy(hdr); + if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { + free(hdr); + } + } else { + dmsg = _dispatch_mach_msg_create_recv(hdr, siz, dmr, flags); + } + _dispatch_mach_reply_kevent_unregister(dm, dmr, options); + + if (!canceled) { + dispatch_queue_t drq = NULL; + if (dmsg->do_ctxt) { + drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + } + if (drq) { + _dispatch_mach_push_async_reply_msg(dm, dmsg, drq); + } else { + _dispatch_mach_handle_or_push_received_msg(dm, dmsg); + } + } +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_mach_msg_t +_dispatch_mach_msg_reply_recv(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t reply_port) +{ + if (slowpath(!MACH_PORT_VALID(reply_port))) { + DISPATCH_CLIENT_CRASH(reply_port, "Invalid reply port"); + } + void *ctxt = dmr->dmr_ctxt; + mach_msg_header_t *hdr, *hdr2 = NULL; + void *hdr_copyout_addr; + mach_msg_size_t siz, msgsiz = 0; + mach_msg_return_t kr; + mach_msg_option_t options; + siz = mach_vm_round_page(DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE + + DISPATCH_MACH_TRAILER_SIZE); + hdr = alloca(siz); + for (mach_vm_address_t p = mach_vm_trunc_page(hdr + vm_page_size); + p < (mach_vm_address_t)hdr + siz; p += vm_page_size) { + *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard + } + options = DISPATCH_MACH_RCV_OPTIONS & (~MACH_RCV_VOUCHER); +retry: + _dispatch_debug_machport(reply_port); + _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG %s", reply_port, + (options & MACH_RCV_TIMEOUT) ? "poll" : "wait"); + kr = mach_msg(hdr, options, 0, siz, reply_port, MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); + hdr_copyout_addr = hdr; + _dispatch_debug_machport(reply_port); + _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG (size %u, opts 0x%x) " + "returned: %s - 0x%x", reply_port, siz, options, + mach_error_string(kr), kr); + switch (kr) { + case MACH_RCV_TOO_LARGE: + if (!fastpath(hdr->msgh_size <= UINT_MAX - + DISPATCH_MACH_TRAILER_SIZE)) { + DISPATCH_CLIENT_CRASH(hdr->msgh_size, "Overlarge message"); + } + if (options & MACH_RCV_LARGE) { + msgsiz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE; + hdr2 = malloc(msgsiz); + if (dispatch_assume(hdr2)) { + hdr = hdr2; + siz = msgsiz; + } + options |= MACH_RCV_TIMEOUT; + options &= ~MACH_RCV_LARGE; + goto retry; + } + _dispatch_log("BUG in libdispatch client: " + "dispatch_mach_send_and_wait_for_reply: dropped message too " + "large to fit in memory: id = 0x%x, size = %u", hdr->msgh_id, + hdr->msgh_size); + break; + case MACH_RCV_INVALID_NAME: // rdar://problem/21963848 + case MACH_RCV_PORT_CHANGED: // rdar://problem/21885327 + case MACH_RCV_PORT_DIED: + // channel was disconnected/canceled and reply port destroyed + _dispatch_debug("machport[0x%08x]: sync reply port destroyed, ctxt %p: " + "%s - 0x%x", reply_port, ctxt, mach_error_string(kr), kr); + goto out; + case MACH_MSG_SUCCESS: + if (hdr->msgh_remote_port) { + _dispatch_debug_machport(hdr->msgh_remote_port); + } + _dispatch_debug("machport[0x%08x]: received msg id 0x%x, size = %u, " + "reply on 0x%08x", hdr->msgh_local_port, hdr->msgh_id, + hdr->msgh_size, hdr->msgh_remote_port); + siz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE; + if (hdr2 && siz < msgsiz) { + void *shrink = realloc(hdr2, msgsiz); + if (shrink) hdr = hdr2 = shrink; + } + break; + default: + dispatch_assume_zero(kr); + break; + } + _dispatch_mach_msg_reply_received(dm, dmr, hdr->msgh_local_port); + hdr->msgh_local_port = MACH_PORT_NULL; + if (slowpath((dm->dq_atomic_flags & DSF_CANCELED) || kr)) { + if (!kr) mach_msg_destroy(hdr); + goto out; + } + dispatch_mach_msg_t dmsg; + dispatch_mach_msg_destructor_t destructor = (!hdr2) ? + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT : + DISPATCH_MACH_MSG_DESTRUCTOR_FREE; + dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL); + if (!hdr2 || hdr != hdr_copyout_addr) { + _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, + (uint64_t)hdr_copyout_addr, + (uint64_t)_dispatch_mach_msg_get_msg(dmsg)); + } + dmsg->do_ctxt = ctxt; + return dmsg; +out: + free(hdr2); + return NULL; +} + +static inline void +_dispatch_mach_msg_reply_received(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t local_port) +{ + bool removed = _dispatch_mach_reply_tryremove(dm, dmr); + if (!MACH_PORT_VALID(local_port) || !removed) { + // port moved/destroyed during receive, or reply waiter was never + // registered or already removed (disconnected) + return; + } + mach_port_t reply_port = _dispatch_mach_reply_get_reply_port( + (mach_port_t)dmr->du_ident); + _dispatch_debug("machport[0x%08x]: unregistered for sync reply, ctxt %p", + reply_port, dmr->dmr_ctxt); + if (_dispatch_mach_reply_is_reply_port_owned(dmr)) { + _dispatch_set_thread_reply_port(reply_port); + if (local_port != reply_port) { + DISPATCH_CLIENT_CRASH(local_port, + "Reply received on unexpected port"); + } + return; + } + mach_msg_header_t *hdr; + dispatch_mach_msg_t dmsg; + dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); + hdr->msgh_local_port = local_port; + dmsg->dmsg_voucher = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + dmsg->dmsg_priority = _dispatch_priority_to_pp(dmr->dmr_priority); + dmsg->do_ctxt = dmr->dmr_ctxt; + _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_REPLY_RECEIVED); + return _dispatch_mach_handle_or_push_received_msg(dm, dmsg); +} + +static inline void +_dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port, + mach_port_t remote_port) +{ + mach_msg_header_t *hdr; + dispatch_mach_msg_t dmsg; + dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); + if (local_port) hdr->msgh_local_port = local_port; + if (remote_port) hdr->msgh_remote_port = remote_port; + _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED); + _dispatch_debug("machport[0x%08x]: %s right disconnected", local_port ? + local_port : remote_port, local_port ? "receive" : "send"); + return _dispatch_mach_handle_or_push_received_msg(dm, dmsg); +} + +static inline dispatch_mach_msg_t +_dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou, + dispatch_mach_reply_refs_t dmr, dispatch_mach_reason_t reason) +{ + dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; + mach_port_t reply_port = dmsg ? dmsg->dmsg_reply : + _dispatch_mach_reply_get_reply_port((mach_port_t)dmr->du_ident); + voucher_t v; + + if (!reply_port) { + if (!dmsg) { + v = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + if (v) _voucher_release(v); + } + return NULL; + } + + if (dmsg) { + v = dmsg->dmsg_voucher; + if (v) _voucher_retain(v); + } else { + v = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + } + + if ((dmsg && (dmsg->dmsg_options & DISPATCH_MACH_WAIT_FOR_REPLY) && + (dmsg->dmsg_options & DISPATCH_MACH_OWNED_REPLY_PORT)) || + (dmr && !_dispatch_unote_registered(dmr) && + _dispatch_mach_reply_is_reply_port_owned(dmr))) { + if (v) _voucher_release(v); + // deallocate owned reply port to break _dispatch_mach_msg_reply_recv + // out of waiting in mach_msg(MACH_RCV_MSG) + kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port, + MACH_PORT_RIGHT_RECEIVE, -1); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + return NULL; + } + + mach_msg_header_t *hdr; + dmsgr = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); + dmsgr->dmsg_voucher = v; + hdr->msgh_local_port = reply_port; + if (dmsg) { + dmsgr->dmsg_priority = dmsg->dmsg_priority; + dmsgr->do_ctxt = dmsg->do_ctxt; + } else { + dmsgr->dmsg_priority = _dispatch_priority_to_pp(dmr->dmr_priority); + dmsgr->do_ctxt = dmr->dmr_ctxt; + } + _dispatch_mach_msg_set_reason(dmsgr, 0, reason); + _dispatch_debug("machport[0x%08x]: reply disconnected, ctxt %p", + hdr->msgh_local_port, dmsgr->do_ctxt); + return dmsgr; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou) +{ + dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; + dispatch_queue_t drq = NULL; + mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); + mach_msg_option_t msg_opts = dmsg->dmsg_options; + _dispatch_debug("machport[0x%08x]: not sent msg id 0x%x, ctxt %p, " + "msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x", + msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt, + msg_opts, msg->msgh_voucher_port, dmsg->dmsg_reply); + unsigned long reason = (msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY) ? + 0 : DISPATCH_MACH_MESSAGE_NOT_SENT; + dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL, + msg_opts & DISPATCH_MACH_ASYNC_REPLY + ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED + : DISPATCH_MACH_DISCONNECTED); + if (dmsg->do_ctxt) { + drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + } + _dispatch_mach_msg_set_reason(dmsg, 0, reason); + _dispatch_mach_handle_or_push_received_msg(dm, dmsg); + if (dmsgr) { + if (drq) { + _dispatch_mach_push_async_reply_msg(dm, dmsgr, drq); + } else { + _dispatch_mach_handle_or_push_received_msg(dm, dmsgr); + } + } +} + +DISPATCH_NOINLINE +static uint32_t +_dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, + dispatch_mach_reply_refs_t dmr, dispatch_qos_t qos, + dispatch_mach_send_invoke_flags_t send_flags) +{ + dispatch_mach_send_refs_t dsrr = dm->dm_send_refs; + dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr = NULL; + voucher_t voucher = dmsg->dmsg_voucher; + dispatch_queue_t drq = NULL; + mach_voucher_t ipc_kvoucher = MACH_VOUCHER_NULL; + uint32_t send_status = 0; + bool clear_voucher = false, kvoucher_move_send = false; + mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); + bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == + MACH_MSG_TYPE_MOVE_SEND_ONCE); + mach_port_t reply_port = dmsg->dmsg_reply; + if (!is_reply) { + dm->dm_needs_mgr = 0; + if (unlikely(dsrr->dmsr_checkin && dmsg != dsrr->dmsr_checkin)) { + // send initial checkin message + if (unlikely(_dispatch_unote_registered(dsrr) && + _dispatch_queue_get_current() != &_dispatch_mgr_q)) { + // send kevent must be uninstalled on the manager queue + dm->dm_needs_mgr = 1; + goto out; + } + if (unlikely(!_dispatch_mach_msg_send(dm, + dsrr->dmsr_checkin, NULL, qos, DM_SEND_INVOKE_NONE))) { + goto out; + } + dsrr->dmsr_checkin = NULL; + } + } + mach_msg_return_t kr = 0; + mach_msg_option_t opts = 0, msg_opts = dmsg->dmsg_options; + if (!(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) { + mach_msg_priority_t msg_priority = MACH_MSG_PRIORITY_UNSPECIFIED; + opts = MACH_SEND_MSG | (msg_opts & ~DISPATCH_MACH_OPTIONS_MASK); + if (!is_reply) { + if (dmsg != dsrr->dmsr_checkin) { + msg->msgh_remote_port = dsrr->dmsr_send; + } + if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { + if (unlikely(!_dispatch_unote_registered(dsrr))) { + _dispatch_mach_notification_kevent_register(dm, + msg->msgh_remote_port); + } + if (likely(_dispatch_unote_registered(dsrr))) { + if (os_atomic_load2o(dsrr, dmsr_notification_armed, + relaxed)) { + goto out; + } + opts |= MACH_SEND_NOTIFY; + } + } + opts |= MACH_SEND_TIMEOUT; + if (dmsg->dmsg_priority != _voucher_get_priority(voucher)) { + ipc_kvoucher = _voucher_create_mach_voucher_with_priority( + voucher, dmsg->dmsg_priority); + } + _dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg); + if (ipc_kvoucher) { + kvoucher_move_send = true; + clear_voucher = _voucher_mach_msg_set_mach_voucher(msg, + ipc_kvoucher, kvoucher_move_send); + } else { + clear_voucher = _voucher_mach_msg_set(msg, voucher); + } + if (qos && _dispatch_evfilt_machport_direct_enabled) { + opts |= MACH_SEND_OVERRIDE; + msg_priority = (mach_msg_priority_t)_dispatch_qos_to_pp(qos); + } + } + _dispatch_debug_machport(msg->msgh_remote_port); + if (reply_port) _dispatch_debug_machport(reply_port); + if (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) { + if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) { + _dispatch_clear_thread_reply_port(reply_port); + } + _dispatch_mach_reply_waiter_register(dm, dmr, reply_port, dmsg, + msg_opts); + } + kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0, + msg_priority); + _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, " + "opts 0x%x, msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x: " + "%s - 0x%x", msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt, + opts, msg_opts, msg->msgh_voucher_port, reply_port, + mach_error_string(kr), kr); + if (unlikely(kr && (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY))) { + _dispatch_mach_reply_waiter_unregister(dm, dmr, + DU_UNREGISTER_REPLY_REMOVE); + } + if (clear_voucher) { + if (kr == MACH_SEND_INVALID_VOUCHER && msg->msgh_voucher_port) { + DISPATCH_CLIENT_CRASH(kr, "Voucher port corruption"); + } + mach_voucher_t kv; + kv = _voucher_mach_msg_clear(msg, kvoucher_move_send); + if (kvoucher_move_send) ipc_kvoucher = kv; + } + } + if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) { + if (opts & MACH_SEND_NOTIFY) { + _dispatch_debug("machport[0x%08x]: send-possible notification " + "armed", (mach_port_t)dsrr->du_ident); + _dispatch_mach_notification_set_armed(dsrr); + } else { + // send kevent must be installed on the manager queue + dm->dm_needs_mgr = 1; + } + if (ipc_kvoucher) { + _dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher); + voucher_t ipc_voucher; + ipc_voucher = _voucher_create_with_priority_and_mach_voucher( + voucher, dmsg->dmsg_priority, ipc_kvoucher); + _dispatch_voucher_debug("mach-msg[%p] replace voucher[%p]", + ipc_voucher, dmsg, voucher); + if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher); + dmsg->dmsg_voucher = ipc_voucher; + } + goto out; + } else if (ipc_kvoucher && (kr || !kvoucher_move_send)) { + _voucher_dealloc_mach_voucher(ipc_kvoucher); + } + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + if (!(msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) && !kr && reply_port && + !(_dispatch_unote_registered(dmrr) && + dmrr->du_ident == reply_port)) { + if (!dmrr->du_is_direct && + _dispatch_queue_get_current() != &_dispatch_mgr_q) { + // reply receive kevent must be installed on the manager queue + dm->dm_needs_mgr = 1; + dmsg->dmsg_options = msg_opts | DISPATCH_MACH_REGISTER_FOR_REPLY; + goto out; + } + _dispatch_mach_reply_kevent_register(dm, reply_port, dmsg); + } + if (unlikely(!is_reply && dmsg == dsrr->dmsr_checkin && + _dispatch_unote_registered(dsrr))) { + _dispatch_mach_notification_kevent_unregister(dm); + } + if (slowpath(kr)) { + // Send failed, so reply was never registered + dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL, + msg_opts & DISPATCH_MACH_ASYNC_REPLY + ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED + : DISPATCH_MACH_DISCONNECTED); + if (dmsg->do_ctxt) { + drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + } + } + _dispatch_mach_msg_set_reason(dmsg, kr, 0); + if ((send_flags & DM_SEND_INVOKE_IMMEDIATE_SEND) && + (msg_opts & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT)) { + // Return sent message synchronously + send_status |= DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT; + } else { + _dispatch_mach_handle_or_push_received_msg(dm, dmsg); + } + if (dmsgr) { + if (drq) { + _dispatch_mach_push_async_reply_msg(dm, dmsgr, drq); + } else { + _dispatch_mach_handle_or_push_received_msg(dm, dmsgr); + } + } + send_status |= DM_SEND_STATUS_SUCCESS; +out: + return send_status; +} + +#pragma mark - +#pragma mark dispatch_mach_send_refs_t + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dmsr_state_max_qos(uint64_t dmsr_state) +{ + return _dq_state_max_qos(dmsr_state); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dmsr_state_needs_override(uint64_t dmsr_state, dispatch_qos_t qos) +{ + dmsr_state &= DISPATCH_MACH_STATE_MAX_QOS_MASK; + return dmsr_state < _dq_state_from_qos(qos); +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dmsr_state_merge_override(uint64_t dmsr_state, dispatch_qos_t qos) +{ + if (_dmsr_state_needs_override(dmsr_state, qos)) { + dmsr_state &= ~DISPATCH_MACH_STATE_MAX_QOS_MASK; + dmsr_state |= _dq_state_from_qos(qos); + dmsr_state |= DISPATCH_MACH_STATE_DIRTY; + dmsr_state |= DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + } + return dmsr_state; +} + +#define _dispatch_mach_send_push_update_tail(dmsr, tail) \ + os_mpsc_push_update_tail(dmsr, dmsr, tail, do_next) +#define _dispatch_mach_send_push_update_head(dmsr, head) \ + os_mpsc_push_update_head(dmsr, dmsr, head) +#define _dispatch_mach_send_get_head(dmsr) \ + os_mpsc_get_head(dmsr, dmsr) +#define _dispatch_mach_send_unpop_head(dmsr, dc, dc_next) \ + os_mpsc_undo_pop_head(dmsr, dmsr, dc, dc_next, do_next) +#define _dispatch_mach_send_pop_head(dmsr, head) \ + os_mpsc_pop_head(dmsr, dmsr, head, do_next) + +#define dm_push(dm, dc, qos) ({ \ + dispatch_queue_t _dq = (dm)->_as_dq; \ + dispatch_assert(dx_vtable(_dq)->do_push == _dispatch_queue_push); \ + _dispatch_queue_push(_dq, dc, qos); \ + }) + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_mach_send_push_inline(dispatch_mach_send_refs_t dmsr, + dispatch_object_t dou) +{ + if (_dispatch_mach_send_push_update_tail(dmsr, dou._do)) { + _dispatch_mach_send_push_update_head(dmsr, dou._do); + return true; + } + return false; +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, + dispatch_mach_send_invoke_flags_t send_flags) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + dispatch_mach_reply_refs_t dmr; + dispatch_mach_msg_t dmsg; + struct dispatch_object_s *dc = NULL, *next_dc = NULL; + dispatch_qos_t qos = _dmsr_state_max_qos(dmsr->dmsr_state); + uint64_t old_state, new_state; + uint32_t send_status; + bool needs_mgr, disconnecting, returning_send_result = false; + +again: + needs_mgr = false; disconnecting = false; + while (dmsr->dmsr_tail) { + dc = _dispatch_mach_send_get_head(dmsr); + do { + dispatch_mach_send_invoke_flags_t sf = send_flags; + // Only request immediate send result for the first message + send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK; + next_dc = _dispatch_mach_send_pop_head(dmsr, dc); + if (_dispatch_object_has_type(dc, + DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) { + if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) { + goto partial_drain; + } + _dispatch_continuation_pop(dc, NULL, flags, dm->_as_dq); + continue; + } + if (_dispatch_object_is_sync_waiter(dc)) { + dmsg = ((dispatch_continuation_t)dc)->dc_data; + dmr = ((dispatch_continuation_t)dc)->dc_other; + } else if (_dispatch_object_has_vtable(dc)) { + dmsg = (dispatch_mach_msg_t)dc; + dmr = NULL; + } else { + if ((_dispatch_unote_registered(dmsr) || + !dm->dm_recv_refs->du_is_direct) && + (_dispatch_queue_get_current() != &_dispatch_mgr_q)) { + // send kevent must be uninstalled on the manager queue + needs_mgr = true; + goto partial_drain; + } + if (unlikely(!_dispatch_mach_reconnect_invoke(dm, dc))) { + disconnecting = true; + goto partial_drain; + } + _dispatch_perfmon_workitem_inc(); + continue; + } + _dispatch_voucher_ktrace_dmsg_pop(dmsg); + if (unlikely(dmsr->dmsr_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED))) { + _dispatch_mach_msg_not_sent(dm, dmsg); + _dispatch_perfmon_workitem_inc(); + continue; + } + send_status = _dispatch_mach_msg_send(dm, dmsg, dmr, qos, sf); + if (unlikely(!send_status)) { + goto partial_drain; + } + if (send_status & DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT) { + returning_send_result = true; + } + _dispatch_perfmon_workitem_inc(); + } while ((dc = next_dc)); + } + + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { + if (old_state & DISPATCH_MACH_STATE_DIRTY) { + new_state = old_state; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } else { + // unlock + new_state = 0; + } + }); + goto out; + +partial_drain: + // if this is not a complete drain, we must undo some things + _dispatch_mach_send_unpop_head(dmsr, dc, next_dc); + + if (_dispatch_object_has_type(dc, + DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) { + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { + new_state = old_state; + new_state |= DISPATCH_MACH_STATE_DIRTY; + new_state |= DISPATCH_MACH_STATE_PENDING_BARRIER; + new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + }); + } else { + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { + new_state = old_state; + if (old_state & (DISPATCH_MACH_STATE_DIRTY | + DISPATCH_MACH_STATE_RECEIVED_OVERRIDE)) { + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } else { + new_state |= DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK; + } + }); + } + +out: + if (old_state & DISPATCH_MACH_STATE_RECEIVED_OVERRIDE) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dmsr_state_max_qos(old_state)); + } + + if (unlikely(new_state & DISPATCH_MACH_STATE_UNLOCK_MASK)) { + qos = _dmsr_state_max_qos(new_state); + os_atomic_thread_fence(dependency); + dmsr = os_atomic_force_dependency_on(dmsr, new_state); + goto again; + } + + if (new_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { + qos = _dmsr_state_max_qos(new_state); + _dispatch_mach_send_barrier_drain_push(dm, qos); + } else { + if (needs_mgr || dm->dm_needs_mgr) { + qos = _dmsr_state_max_qos(new_state); + } else { + qos = 0; + } + if (!disconnecting) dx_wakeup(dm, qos, DISPATCH_WAKEUP_FLUSH); + } + return returning_send_result; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_send_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags, + dispatch_mach_send_invoke_flags_t send_flags) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + dispatch_lock_owner tid_self = _dispatch_tid_self(); + uint64_t old_state, new_state; + + uint64_t canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK; + uint64_t canlock_state = 0; + + if (send_flags & DM_SEND_INVOKE_NEEDS_BARRIER) { + canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER; + canlock_state = DISPATCH_MACH_STATE_PENDING_BARRIER; + } else if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) { + canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER; + } + + dispatch_qos_t oq_floor = _dispatch_get_basepri_override_qos_floor(); +retry: + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, acquire, { + new_state = old_state; + if (unlikely((old_state & canlock_mask) != canlock_state)) { + if (!(send_flags & DM_SEND_INVOKE_FLUSH)) { + os_atomic_rmw_loop_give_up(break); + } + new_state |= DISPATCH_MACH_STATE_DIRTY; + } else { + if (_dispatch_queue_should_override_self(old_state, oq_floor)) { + os_atomic_rmw_loop_give_up({ + oq_floor = _dispatch_queue_override_self(old_state); + goto retry; + }); + } + new_state |= tid_self; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } + }); + + if (unlikely((old_state & canlock_mask) != canlock_state)) { + return; + } + if (send_flags & DM_SEND_INVOKE_CANCEL) { + _dispatch_mach_cancel(dm); + } + _dispatch_mach_send_drain(dm, flags, send_flags); +} + +DISPATCH_NOINLINE +void +_dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc, + DISPATCH_UNUSED dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags) +{ + dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + dispatch_thread_frame_s dtf; + + DISPATCH_COMPILER_CAN_ASSUME(dc->dc_priority == DISPATCH_NO_PRIORITY); + DISPATCH_COMPILER_CAN_ASSUME(dc->dc_voucher == DISPATCH_NO_VOUCHER); + // hide the mach channel (see _dispatch_mach_barrier_invoke comment) + _dispatch_thread_frame_stash(&dtf); + _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags,{ + _dispatch_mach_send_invoke(dm, flags, + DM_SEND_INVOKE_NEEDS_BARRIER | DM_SEND_INVOKE_CAN_RUN_BARRIER); + }); + _dispatch_thread_frame_unstash(&dtf); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm, dispatch_qos_t qos) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + + dc->do_vtable = DC_VTABLE(MACH_SEND_BARRRIER_DRAIN); + dc->dc_func = NULL; + dc->dc_ctxt = NULL; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; + dm_push(dm, dc, qos); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_continuation_t dc, + dispatch_qos_t qos) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + uint64_t old_state, new_state, state_flags = 0; + dispatch_lock_owner owner; + bool wakeup; + + // when pushing a send barrier that destroys + // the last reference to this channel, and the send queue is already + // draining on another thread, the send barrier may run as soon as + // _dispatch_mach_send_push_inline() returns. + _dispatch_retain(dm); + + wakeup = _dispatch_mach_send_push_inline(dmsr, dc); + if (wakeup) { + state_flags = DISPATCH_MACH_STATE_DIRTY; + if (dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)) { + state_flags |= DISPATCH_MACH_STATE_PENDING_BARRIER; + } + } + + if (state_flags) { + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { + new_state = _dmsr_state_merge_override(old_state, qos); + new_state |= state_flags; + }); + } else { + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, relaxed, { + new_state = _dmsr_state_merge_override(old_state, qos); + if (old_state == new_state) { + os_atomic_rmw_loop_give_up(break); + } + }); + } + + qos = _dmsr_state_max_qos(new_state); + owner = _dispatch_lock_owner((dispatch_lock)old_state); + if (owner) { + if (_dmsr_state_needs_override(old_state, qos)) { + _dispatch_wqthread_override_start_check_owner(owner, qos, + &dmsr->dmsr_state_lock.dul_lock); + } + return _dispatch_release_tailcall(dm); + } + + dispatch_wakeup_flags_t wflags = 0; + if (state_flags & DISPATCH_MACH_STATE_PENDING_BARRIER) { + _dispatch_mach_send_barrier_drain_push(dm, qos); + } else if (wakeup || dmsr->dmsr_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED)) { + wflags = DISPATCH_WAKEUP_FLUSH | DISPATCH_WAKEUP_CONSUME; + } else if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { + wflags = DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_CONSUME; + } + if (wflags) { + return dx_wakeup(dm, qos, wflags); + } + return _dispatch_release_tailcall(dm); +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm, + dispatch_object_t dou, dispatch_qos_t qos, + dispatch_mach_send_invoke_flags_t send_flags) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + dispatch_lock_owner tid_self = _dispatch_tid_self(); + uint64_t old_state, new_state, canlock_mask, state_flags = 0; + dispatch_lock_owner owner; + + bool wakeup = _dispatch_mach_send_push_inline(dmsr, dou); + if (wakeup) { + state_flags = DISPATCH_MACH_STATE_DIRTY; + } + + if (unlikely(dmsr->dmsr_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED))) { + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { + new_state = _dmsr_state_merge_override(old_state, qos); + new_state |= state_flags; + }); + dx_wakeup(dm, qos, DISPATCH_WAKEUP_FLUSH); + return false; + } + + canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK | + DISPATCH_MACH_STATE_PENDING_BARRIER; + if (state_flags) { + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, seq_cst, { + new_state = _dmsr_state_merge_override(old_state, qos); + new_state |= state_flags; + if (likely((old_state & canlock_mask) == 0)) { + new_state |= tid_self; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } + }); + } else { + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, acquire, { + new_state = _dmsr_state_merge_override(old_state, qos); + if (new_state == old_state) { + os_atomic_rmw_loop_give_up(return false); + } + if (likely((old_state & canlock_mask) == 0)) { + new_state |= tid_self; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } + }); + } + + owner = _dispatch_lock_owner((dispatch_lock)old_state); + if (owner) { + if (_dmsr_state_needs_override(old_state, qos)) { + _dispatch_wqthread_override_start_check_owner(owner, qos, + &dmsr->dmsr_state_lock.dul_lock); + } + return false; + } + + if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { + dx_wakeup(dm, qos, DISPATCH_WAKEUP_OVERRIDING); + return false; + } + + // Ensure our message is still at the head of the queue and has not already + // been dequeued by another thread that raced us to the send queue lock. + // A plain load of the head and comparison against our object pointer is + // sufficient. + if (unlikely(!(wakeup && dou._do == dmsr->dmsr_head))) { + // Don't request immediate send result for messages we don't own + send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK; + } + return _dispatch_mach_send_drain(dm, DISPATCH_INVOKE_NONE, send_flags); +} + +#pragma mark - +#pragma mark dispatch_mach + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm) +{ + DISPATCH_ASSERT_ON_MANAGER_QUEUE(); + if (_dispatch_unote_registered(dm->dm_send_refs)) { + dispatch_assume(_dispatch_unote_unregister(dm->dm_send_refs, 0)); + } + dm->dm_send_refs->du_ident = 0; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_notification_kevent_register(dispatch_mach_t dm,mach_port_t send) +{ + DISPATCH_ASSERT_ON_MANAGER_QUEUE(); + dm->dm_send_refs->du_ident = send; + dispatch_assume(_dispatch_unote_register(dm->dm_send_refs, + DISPATCH_WLH_MANAGER, 0)); +} + +void +_dispatch_mach_merge_notification(dispatch_unote_t du, + uint32_t flags DISPATCH_UNUSED, uintptr_t data, + uintptr_t status DISPATCH_UNUSED, + pthread_priority_t pp DISPATCH_UNUSED) +{ + dispatch_mach_send_refs_t dmsr = du._dmsr; + dispatch_mach_t dm = _dispatch_wref2ptr(dmsr->du_owner_wref); + + if (data & dmsr->du_fflags) { + _dispatch_mach_send_invoke(dm, DISPATCH_INVOKE_MANAGER_DRAIN, + DM_SEND_INVOKE_FLUSH); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg) +{ + mach_error_t error; + dispatch_mach_reason_t reason = _dispatch_mach_msg_get_reason(dmsg, &error); + if (!dm->dm_is_xpc || + !_dispatch_mach_xpc_hooks->dmxh_direct_message_handler( + dm->dm_recv_refs->dmrr_handler_ctxt, reason, dmsg, error)) { + // Not XPC client or not a message that XPC can handle inline - push + // it onto the channel queue. + dm_push(dm, dmsg, _dispatch_qos_from_pp(dmsg->dmsg_priority)); + } else { + // XPC handled the message inline. Do the cleanup that would otherwise + // have happened in _dispatch_mach_msg_invoke(), leaving out steps that + // are not required in this context. + dmsg->do_next = DISPATCH_OBJECT_LISTLESS; + dispatch_release(dmsg); + } +} + +DISPATCH_ALWAYS_INLINE +static void +_dispatch_mach_push_async_reply_msg(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, dispatch_queue_t drq) { + // Push the message onto the given queue. This function is only used for + // replies to messages sent by + // dispatch_mach_send_with_result_and_async_reply_4libxpc(). + dispatch_continuation_t dc = _dispatch_mach_msg_async_reply_wrap(dmsg, dm); + _dispatch_trace_continuation_push(drq, dc); + dx_push(drq, dc, _dispatch_qos_from_pp(dmsg->dmsg_priority)); +} + +#pragma mark - +#pragma mark dispatch_mach_t + +static inline mach_msg_option_t +_dispatch_mach_checkin_options(void) +{ + mach_msg_option_t options = 0; +#if DISPATCH_USE_CHECKIN_NOIMPORTANCE + options = MACH_SEND_NOIMPORTANCE; // +#endif + return options; +} + + +static inline mach_msg_option_t +_dispatch_mach_send_options(void) +{ + mach_msg_option_t options = 0; + return options; +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_mach_priority_propagate(mach_msg_option_t options) +{ +#if DISPATCH_USE_NOIMPORTANCE_QOS + if (options & MACH_SEND_NOIMPORTANCE) return 0; +#else + (void)options; +#endif + return _dispatch_priority_propagate(); +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_send_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, + dispatch_continuation_t dc_wait, mach_msg_option_t options) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + if (slowpath(dmsg->do_next != DISPATCH_OBJECT_LISTLESS)) { + DISPATCH_CLIENT_CRASH(dmsg->do_next, "Message already enqueued"); + } + dispatch_retain(dmsg); + pthread_priority_t priority = _dispatch_mach_priority_propagate(options); + options |= _dispatch_mach_send_options(); + dmsg->dmsg_options = options; + mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); + dmsg->dmsg_reply = _dispatch_mach_msg_get_reply_port(dmsg); + bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == + MACH_MSG_TYPE_MOVE_SEND_ONCE); + dmsg->dmsg_priority = priority; + dmsg->dmsg_voucher = _voucher_copy(); + _dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg); + + uint32_t send_status; + bool returning_send_result = false; + dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE; + if (options & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT) { + send_flags = DM_SEND_INVOKE_IMMEDIATE_SEND; + } + if (is_reply && !dmsg->dmsg_reply && !dmsr->dmsr_disconnect_cnt && + !(dm->dq_atomic_flags & DSF_CANCELED)) { + // replies are sent to a send-once right and don't need the send queue + dispatch_assert(!dc_wait); + send_status = _dispatch_mach_msg_send(dm, dmsg, NULL, 0, send_flags); + dispatch_assert(send_status); + returning_send_result = !!(send_status & + DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT); + } else { + _dispatch_voucher_ktrace_dmsg_push(dmsg); + dispatch_object_t dou = { ._dmsg = dmsg }; + if (dc_wait) dou._dc = dc_wait; + returning_send_result = _dispatch_mach_send_push_and_trydrain(dm, dou, + _dispatch_qos_from_pp(priority), send_flags); + } + if (returning_send_result) { + _dispatch_voucher_debug("mach-msg[%p] clear", dmsg->dmsg_voucher, dmsg); + if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher); + dmsg->dmsg_voucher = NULL; + dmsg->do_next = DISPATCH_OBJECT_LISTLESS; + dispatch_release(dmsg); + } + return returning_send_result; +} + +DISPATCH_NOINLINE +void +dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, + mach_msg_option_t options) +{ + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); + dispatch_assert(!returned_send_result); +} + +DISPATCH_NOINLINE +void +dispatch_mach_send_with_result(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, + mach_msg_option_t options, dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error) +{ + if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) { + DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags"); + } + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT; + bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); + unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND; + mach_error_t err = 0; + if (returned_send_result) { + reason = _dispatch_mach_msg_get_reason(dmsg, &err); + } + *send_result = reason; + *send_error = err; +} + +static inline +dispatch_mach_msg_t +_dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options, + bool *returned_send_result) +{ + mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg); + if (!reply_port) { + // use per-thread mach reply port + reply_port = _dispatch_get_thread_reply_port(); + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); + dispatch_assert(MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) == + MACH_MSG_TYPE_MAKE_SEND_ONCE); + hdr->msgh_local_port = reply_port; + options |= DISPATCH_MACH_OWNED_REPLY_PORT; + } + + dispatch_mach_reply_refs_t dmr; +#if DISPATCH_DEBUG + dmr = _dispatch_calloc(1, sizeof(*dmr)); +#else + struct dispatch_mach_reply_refs_s dmr_buf = { }; + dmr = &dmr_buf; +#endif + struct dispatch_continuation_s dc_wait = { + .dc_flags = DISPATCH_OBJ_SYNC_WAITER_BIT, + .dc_data = dmsg, + .dc_other = dmr, + .dc_priority = DISPATCH_NO_PRIORITY, + .dc_voucher = DISPATCH_NO_VOUCHER, + }; + dmr->dmr_ctxt = dmsg->do_ctxt; + dmr->dmr_waiter_tid = _dispatch_tid_self(); + *returned_send_result = _dispatch_mach_send_msg(dm, dmsg, &dc_wait,options); + if (options & DISPATCH_MACH_OWNED_REPLY_PORT) { + _dispatch_clear_thread_reply_port(reply_port); + } + dmsg = _dispatch_mach_msg_reply_recv(dm, dmr, reply_port); +#if DISPATCH_DEBUG + free(dmr); +#endif + return dmsg; +} + +DISPATCH_NOINLINE +dispatch_mach_msg_t +dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options) +{ + bool returned_send_result; + dispatch_mach_msg_t reply; + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + options |= DISPATCH_MACH_WAIT_FOR_REPLY; + reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options, + &returned_send_result); + dispatch_assert(!returned_send_result); + return reply; +} + +DISPATCH_NOINLINE +dispatch_mach_msg_t +dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options, + dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error) +{ + if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) { + DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags"); + } + bool returned_send_result; + dispatch_mach_msg_t reply; + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + options |= DISPATCH_MACH_WAIT_FOR_REPLY; + options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT; + reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options, + &returned_send_result); + unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND; + mach_error_t err = 0; + if (returned_send_result) { + reason = _dispatch_mach_msg_get_reason(dmsg, &err); + } + *send_result = reason; + *send_error = err; + return reply; +} + +DISPATCH_NOINLINE +void +dispatch_mach_send_with_result_and_async_reply_4libxpc(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options, + dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error) +{ + if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) { + DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags"); + } + if (unlikely(!dm->dm_is_xpc)) { + DISPATCH_CLIENT_CRASH(0, + "dispatch_mach_send_with_result_and_wait_for_reply is XPC only"); + } + + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT; + mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg); + if (!reply_port) { + DISPATCH_CLIENT_CRASH(0, "Reply port needed for async send with reply"); + } + options |= DISPATCH_MACH_ASYNC_REPLY; + bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); + unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND; + mach_error_t err = 0; + if (returned_send_result) { + reason = _dispatch_mach_msg_get_reason(dmsg, &err); + } + *send_result = reason; + *send_error = err; +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_disconnect(dispatch_mach_t dm) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + bool disconnected; + if (_dispatch_unote_registered(dmsr)) { + _dispatch_mach_notification_kevent_unregister(dm); + } + if (MACH_PORT_VALID(dmsr->dmsr_send)) { + _dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dmsr->dmsr_send); + } + dmsr->dmsr_send = MACH_PORT_NULL; + if (dmsr->dmsr_checkin) { + _dispatch_mach_msg_not_sent(dm, dmsr->dmsr_checkin); + dmsr->dmsr_checkin = NULL; + } + _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); + dispatch_mach_reply_refs_t dmr, tmp; + TAILQ_FOREACH_SAFE(dmr, &dm->dm_send_refs->dmsr_replies, dmr_list, tmp) { + TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + if (_dispatch_unote_registered(dmr)) { + _dispatch_mach_reply_kevent_unregister(dm, dmr, + DU_UNREGISTER_DISCONNECTED); + } else { + _dispatch_mach_reply_waiter_unregister(dm, dmr, + DU_UNREGISTER_DISCONNECTED); + } + } + disconnected = TAILQ_EMPTY(&dm->dm_send_refs->dmsr_replies); + _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); + return disconnected; +} + +static void +_dispatch_mach_cancel(dispatch_mach_t dm) +{ + _dispatch_object_debug(dm, "%s", __func__); + if (!_dispatch_mach_disconnect(dm)) return; + + bool uninstalled = true; + dispatch_assert(!dm->dm_uninstalled); + + if (dm->dm_xpc_term_refs) { + uninstalled = _dispatch_unote_unregister(dm->dm_xpc_term_refs, 0); + } + + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + mach_port_t local_port = (mach_port_t)dmrr->du_ident; + if (local_port) { + _dispatch_source_refs_unregister(dm->_as_ds, 0); + if ((dm->dq_atomic_flags & DSF_STATE_MASK) == DSF_DELETED) { + _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL); + dmrr->du_ident = 0; + } else { + uninstalled = false; + } + } else { + _dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq, DSF_DELETED, + DSF_ARMED | DSF_DEFERRED_DELETE); + } + if (uninstalled) dm->dm_uninstalled = uninstalled; +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou) +{ + if (!_dispatch_mach_disconnect(dm)) return false; + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + dmsr->dmsr_checkin = dou._dc->dc_data; + dmsr->dmsr_send = (mach_port_t)dou._dc->dc_other; + _dispatch_continuation_free(dou._dc); + (void)os_atomic_dec2o(dmsr, dmsr_disconnect_cnt, relaxed); + _dispatch_object_debug(dm, "%s", __func__); + _dispatch_release(dm); // + return true; +} + +DISPATCH_NOINLINE +void +dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send, + dispatch_mach_msg_t checkin) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + (void)os_atomic_inc2o(dmsr, dmsr_disconnect_cnt, relaxed); + if (MACH_PORT_VALID(send) && checkin) { + dispatch_mach_msg_t dmsg = checkin; + dispatch_retain(dmsg); + dmsg->dmsg_options = _dispatch_mach_checkin_options(); + dmsr->dmsr_checkin_port = _dispatch_mach_msg_get_remote_port(dmsg); + } else { + checkin = NULL; + dmsr->dmsr_checkin_port = MACH_PORT_NULL; + } + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->dc_flags = DISPATCH_OBJ_CONSUME_BIT; + // actually called manually in _dispatch_mach_send_drain + dc->dc_func = (void*)_dispatch_mach_reconnect_invoke; + dc->dc_ctxt = dc; + dc->dc_data = checkin; + dc->dc_other = (void*)(uintptr_t)send; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; + _dispatch_retain(dm); // + return _dispatch_mach_send_push(dm, dc, 0); +} + +DISPATCH_NOINLINE +mach_port_t +dispatch_mach_get_checkin_port(dispatch_mach_t dm) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + if (slowpath(dm->dq_atomic_flags & DSF_CANCELED)) { + return MACH_PORT_DEAD; + } + return dmsr->dmsr_checkin_port; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_connect_invoke(dispatch_mach_t dm) +{ + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, + DISPATCH_MACH_CONNECTED, NULL, 0, dmrr->dmrr_handler_func); + dm->dm_connect_handler_called = 1; + _dispatch_perfmon_workitem_inc(); +} + +DISPATCH_ALWAYS_INLINE +static void +_dispatch_mach_msg_invoke_with_mach(dispatch_mach_msg_t dmsg, + dispatch_invoke_flags_t flags, dispatch_mach_t dm) +{ + dispatch_mach_recv_refs_t dmrr; + mach_error_t err; + unsigned long reason = _dispatch_mach_msg_get_reason(dmsg, &err); + dispatch_thread_set_self_t adopt_flags = DISPATCH_PRIORITY_ENFORCE| + DISPATCH_VOUCHER_CONSUME|DISPATCH_VOUCHER_REPLACE; + + dmrr = dm->dm_recv_refs; + dmsg->do_next = DISPATCH_OBJECT_LISTLESS; + _dispatch_voucher_ktrace_dmsg_pop(dmsg); + _dispatch_voucher_debug("mach-msg[%p] adopt", dmsg->dmsg_voucher, dmsg); + (void)_dispatch_adopt_priority_and_set_voucher(dmsg->dmsg_priority, + dmsg->dmsg_voucher, adopt_flags); + dmsg->dmsg_voucher = NULL; + dispatch_invoke_with_autoreleasepool(flags, { + if (flags & DISPATCH_INVOKE_ASYNC_REPLY) { + _dispatch_client_callout3(dmrr->dmrr_handler_ctxt, reason, dmsg, + _dispatch_mach_xpc_hooks->dmxh_async_reply_handler); + } else { + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); + } + _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, reason, dmsg, + err, dmrr->dmrr_handler_func); + } + _dispatch_perfmon_workitem_inc(); + }); + _dispatch_introspection_queue_item_complete(dmsg); + dispatch_release(dmsg); +} + +DISPATCH_NOINLINE +void +_dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, + DISPATCH_UNUSED dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags) +{ + dispatch_thread_frame_s dtf; + + // hide mach channel + dispatch_mach_t dm = (dispatch_mach_t)_dispatch_thread_frame_stash(&dtf); + _dispatch_mach_msg_invoke_with_mach(dmsg, flags, dm); + _dispatch_thread_frame_unstash(&dtf); +} + +DISPATCH_NOINLINE +void +_dispatch_mach_barrier_invoke(dispatch_continuation_t dc, + DISPATCH_UNUSED dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags) +{ + dispatch_thread_frame_s dtf; + dispatch_mach_t dm = dc->dc_other; + dispatch_mach_recv_refs_t dmrr; + uintptr_t dc_flags = (uintptr_t)dc->dc_data; + unsigned long type = dc_type(dc); + + // hide mach channel from clients + if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) { + // on the send queue, the mach channel isn't the current queue + // its target queue is the current one already + _dispatch_thread_frame_stash(&dtf); + } + dmrr = dm->dm_recv_refs; + DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DISPATCH_OBJ_CONSUME_BIT); + _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags, { + dispatch_invoke_with_autoreleasepool(flags, { + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); + } + _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); + _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, + DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0, + dmrr->dmrr_handler_func); + }); + }); + if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) { + _dispatch_thread_frame_unstash(&dtf); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_barrier_set_vtable(dispatch_continuation_t dc, + dispatch_mach_t dm, dispatch_continuation_vtable_t vtable) +{ + dc->dc_data = (void *)dc->dc_flags; + dc->dc_other = dm; + dc->do_vtable = vtable; // Must be after dc_flags load, dc_vtable aliases +} + +DISPATCH_NOINLINE +void +dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context, + dispatch_function_t func) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + dispatch_qos_t qos; + + _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); + _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_SEND_BARRIER)); + _dispatch_trace_continuation_push(dm->_as_dq, dc); + qos = _dispatch_continuation_override_qos(dm->_as_dq, dc); + return _dispatch_mach_send_push(dm, dc, qos); +} + +DISPATCH_NOINLINE +void +dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + dispatch_qos_t qos; + + _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); + _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_SEND_BARRIER)); + _dispatch_trace_continuation_push(dm->_as_dq, dc); + qos = _dispatch_continuation_override_qos(dm->_as_dq, dc); + return _dispatch_mach_send_push(dm, dc, qos); +} + +DISPATCH_NOINLINE +void +dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context, + dispatch_function_t func) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + + _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); + _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_RECV_BARRIER)); + return _dispatch_continuation_async(dm->_as_dq, dc); +} + +DISPATCH_NOINLINE +void +dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + + _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); + _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_RECV_BARRIER)); + return _dispatch_continuation_async(dm->_as_dq, dc); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_cancel_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags) +{ + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + + dispatch_invoke_with_autoreleasepool(flags, { + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); + } + _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, + DISPATCH_MACH_CANCELED, NULL, 0, dmrr->dmrr_handler_func); + _dispatch_perfmon_workitem_inc(); + }); + dm->dm_cancel_handler_called = 1; + _dispatch_release(dm); // the retain is done at creation time +} + +DISPATCH_NOINLINE +void +dispatch_mach_cancel(dispatch_mach_t dm) +{ + dispatch_source_cancel(dm->_as_ds); +} + +static void +_dispatch_mach_install(dispatch_mach_t dm, dispatch_priority_t pri, + dispatch_wlh_t wlh) +{ + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + uint32_t disconnect_cnt; + + if (!dm->dq_wlh && wlh) { + _dispatch_queue_class_record_wlh_hierarchy(dm, wlh); + } + if (dmrr->du_ident) { + _dispatch_source_refs_register(dm->_as_ds, pri); + } + if (dm->dm_xpc_term_refs) { + _dispatch_unote_register(dm->dm_xpc_term_refs, dm->dq_wlh, pri); + } + if (dmrr->du_is_direct && !dm->dq_priority) { + // _dispatch_mach_reply_kevent_register assumes this has been done + // which is unlike regular sources or queues, the DEFAULTQUEUE flag + // is used so that the priority of the channel doesn't act as + // a QoS floor for incoming messages (26761457) + dm->dq_priority = pri; + } + dm->ds_is_installed = true; + if (unlikely(!os_atomic_cmpxchgv2o(dm->dm_send_refs, dmsr_disconnect_cnt, + DISPATCH_MACH_NEVER_INSTALLED, 0, &disconnect_cnt, release))) { + DISPATCH_INTERNAL_CRASH(disconnect_cnt, "Channel already installed"); + } +} + +void +_dispatch_mach_finalize_activation(dispatch_mach_t dm) +{ + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + + // call "super" + _dispatch_queue_finalize_activation(dm->_as_dq); + + if (dmrr->du_is_direct && !dm->ds_is_installed) { + dispatch_source_t ds = dm->_as_ds; + dispatch_priority_t pri = _dispatch_source_compute_kevent_priority(ds); + if (pri) { + dispatch_wlh_t wlh = dm->dq_wlh; + if (!wlh) wlh = _dispatch_queue_class_compute_wlh(dm); + _dispatch_mach_install(dm, pri, wlh); + } + } +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_mach_tryarm(dispatch_mach_t dm, dispatch_queue_flags_t *out_dqf) +{ + dispatch_queue_flags_t oqf, nqf; + bool rc = os_atomic_rmw_loop2o(dm, dq_atomic_flags, oqf, nqf, relaxed, { + nqf = oqf; + if (nqf & (DSF_ARMED | DSF_CANCELED | DSF_DEFERRED_DELETE | + DSF_DELETED)) { + // the test is inside the loop because it's convenient but the + // result should not change for the duration of the rmw_loop + os_atomic_rmw_loop_give_up(break); + } + nqf |= DSF_ARMED; + }); + if (out_dqf) *out_dqf = nqf; + return rc; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_wakeup_target_t +_dispatch_mach_invoke2(dispatch_object_t dou, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t *owned) +{ + dispatch_mach_t dm = dou._dm; + dispatch_queue_wakeup_target_t retq = NULL; + dispatch_queue_t dq = _dispatch_queue_get_current(); + + flags |= DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS; + + // This function performs all mach channel actions. Each action is + // responsible for verifying that it takes place on the appropriate queue. + // If the current queue is not the correct queue for this action, the + // correct queue will be returned and the invoke will be re-driven on that + // queue. + + // The order of tests here in invoke and in wakeup should be consistent. + + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + dispatch_queue_t dkq = &_dispatch_mgr_q; + + if (dmrr->du_is_direct) { + dkq = dm->do_targetq; + } + + if (unlikely(!dm->ds_is_installed)) { + // The channel needs to be installed on the kevent queue. + if (dq != dkq) { + return dkq; + } + _dispatch_mach_install(dm, _dispatch_get_basepri(),_dispatch_get_wlh()); + _dispatch_perfmon_workitem_inc(); + } + + if (_dispatch_queue_class_probe(dm)) { + if (dq == dm->do_targetq) { +drain: + retq = _dispatch_queue_serial_drain(dm->_as_dq, dic, flags, owned); + } else { + retq = dm->do_targetq; + } + } + + dispatch_queue_flags_t dqf = 0; + if (!retq && dmrr->du_is_direct) { + if (_dispatch_mach_tryarm(dm, &dqf)) { + _dispatch_unote_resume(dmrr); + if (dq == dm->do_targetq && !dq->do_targetq && !dmsr->dmsr_tail && + (dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) && + dmrr->du_wlh != DISPATCH_WLH_GLOBAL) { + // try to redrive the drain from under the lock for channels + // targeting an overcommit root queue to avoid parking + // when the next message has already fired + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + if (dm->dq_items_tail) goto drain; + } + } + } else { + dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + } + + if (dmsr->dmsr_tail) { + bool requires_mgr = dm->dm_needs_mgr || (dmsr->dmsr_disconnect_cnt && + (_dispatch_unote_registered(dmsr) || !dmrr->du_is_direct)); + if (!os_atomic_load2o(dmsr, dmsr_notification_armed, relaxed) || + (dqf & DSF_CANCELED) || dmsr->dmsr_disconnect_cnt) { + // The channel has pending messages to send. + if (unlikely(requires_mgr && dq != &_dispatch_mgr_q)) { + return retq ? retq : &_dispatch_mgr_q; + } + dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE; + if (dq != &_dispatch_mgr_q) { + send_flags |= DM_SEND_INVOKE_CAN_RUN_BARRIER; + } + _dispatch_mach_send_invoke(dm, flags, send_flags); + } + if (!retq) retq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } else if (!retq && (dqf & DSF_CANCELED)) { + // The channel has been cancelled and needs to be uninstalled from the + // manager queue. After uninstallation, the cancellation handler needs + // to be delivered to the target queue. + if (!dm->dm_uninstalled) { + if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) { + // waiting for the delivery of a deferred delete event + return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } + if (dq != &_dispatch_mgr_q) { + return retq ? retq : &_dispatch_mgr_q; + } + _dispatch_mach_send_invoke(dm, flags, DM_SEND_INVOKE_CANCEL); + if (unlikely(!dm->dm_uninstalled)) { + // waiting for the delivery of a deferred delete event + // or deletion didn't happen because send_invoke couldn't + // acquire the send lock + return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } + } + if (!dm->dm_cancel_handler_called) { + if (dq != dm->do_targetq) { + return retq ? retq : dm->do_targetq; + } + _dispatch_mach_cancel_invoke(dm, flags); + } + } + + return retq; +} + +DISPATCH_NOINLINE +void +_dispatch_mach_invoke(dispatch_mach_t dm, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) +{ + _dispatch_queue_class_invoke(dm, dic, flags, _dispatch_mach_invoke2); +} + +void +_dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ + // This function determines whether the mach channel needs to be invoked. + // The order of tests here in probe and in invoke should be consistent. + + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + dispatch_queue_wakeup_target_t dkq = DISPATCH_QUEUE_WAKEUP_MGR; + dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE; + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + + if (dmrr->du_is_direct) { + dkq = DISPATCH_QUEUE_WAKEUP_TARGET; + } + + if (!dm->ds_is_installed) { + // The channel needs to be installed on the kevent queue. + tq = dkq; + goto done; + } + + if (_dispatch_queue_class_probe(dm)) { + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + goto done; + } + + if (_dispatch_lock_is_locked(dmsr->dmsr_state_lock.dul_lock)) { + // Sending and uninstallation below require the send lock, the channel + // will be woken up when the lock is dropped + goto done; + } + + if (dmsr->dmsr_tail) { + bool requires_mgr = dm->dm_needs_mgr || (dmsr->dmsr_disconnect_cnt && + (_dispatch_unote_registered(dmsr) || !dmrr->du_is_direct)); + if (!os_atomic_load2o(dmsr, dmsr_notification_armed, relaxed) || + (dqf & DSF_CANCELED) || dmsr->dmsr_disconnect_cnt) { + if (unlikely(requires_mgr)) { + tq = DISPATCH_QUEUE_WAKEUP_MGR; + } else { + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } + } + } else if (dqf & DSF_CANCELED) { + if (!dm->dm_uninstalled) { + if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) { + // waiting for the delivery of a deferred delete event + } else { + // The channel needs to be uninstalled from the manager queue + tq = DISPATCH_QUEUE_WAKEUP_MGR; + } + } else if (!dm->dm_cancel_handler_called) { + // the cancellation handler needs to be delivered to the target + // queue. + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } + } + +done: + if (tq) { + return _dispatch_queue_class_wakeup(dm->_as_dq, qos, flags, tq); + } else if (qos) { + return _dispatch_queue_class_override_drainer(dm->_as_dq, qos, flags); + } else if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dm); + } +} + +static void +_dispatch_mach_sigterm_invoke(void *ctx) +{ + dispatch_mach_t dm = ctx; + if (!(dm->dq_atomic_flags & DSF_CANCELED)) { + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, + DISPATCH_MACH_SIGTERM_RECEIVED, NULL, 0, + dmrr->dmrr_handler_func); + } +} + +void +_dispatch_xpc_sigterm_merge(dispatch_unote_t du, + uint32_t flags DISPATCH_UNUSED, uintptr_t data DISPATCH_UNUSED, + uintptr_t status DISPATCH_UNUSED, pthread_priority_t pp) +{ + dispatch_mach_t dm = _dispatch_wref2ptr(du._du->du_owner_wref); + uint32_t options = 0; + if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) && + !(flags & EV_DELETE)) { + options = DU_UNREGISTER_IMMEDIATE_DELETE; + } else { + dispatch_assert((flags & EV_ONESHOT) && (flags & EV_DELETE)); + options = DU_UNREGISTER_ALREADY_DELETED; + } + _dispatch_unote_unregister(du, options); + + if (!(dm->dq_atomic_flags & DSF_CANCELED)) { + _dispatch_barrier_async_detached_f(dm->_as_dq, dm, + _dispatch_mach_sigterm_invoke); + } else { + dx_wakeup(dm, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_FLUSH); + } +} + +#pragma mark - +#pragma mark dispatch_mach_msg_t + +dispatch_mach_msg_t +dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, + dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr) +{ + if (slowpath(size < sizeof(mach_msg_header_t)) || + slowpath(destructor && !msg)) { + DISPATCH_CLIENT_CRASH(size, "Empty message"); + } + dispatch_mach_msg_t dmsg = _dispatch_alloc(DISPATCH_VTABLE(mach_msg), + sizeof(struct dispatch_mach_msg_s) + + (destructor ? 0 : size - sizeof(dmsg->dmsg_msg))); + if (destructor) { + dmsg->dmsg_msg = msg; + } else if (msg) { + memcpy(dmsg->dmsg_buf, msg, size); + } + dmsg->do_next = DISPATCH_OBJECT_LISTLESS; + dmsg->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); + dmsg->dmsg_destructor = destructor; + dmsg->dmsg_size = size; + if (msg_ptr) { + *msg_ptr = _dispatch_mach_msg_get_msg(dmsg); + } + return dmsg; +} + +void +_dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg) +{ + if (dmsg->dmsg_voucher) { + _voucher_release(dmsg->dmsg_voucher); + dmsg->dmsg_voucher = NULL; + } + switch (dmsg->dmsg_destructor) { + case DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT: + break; + case DISPATCH_MACH_MSG_DESTRUCTOR_FREE: + free(dmsg->dmsg_msg); + break; + case DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE: { + mach_vm_size_t vm_size = dmsg->dmsg_size; + mach_vm_address_t vm_addr = (uintptr_t)dmsg->dmsg_msg; + (void)dispatch_assume_zero(mach_vm_deallocate(mach_task_self(), + vm_addr, vm_size)); + break; + }} +} + +static inline mach_msg_header_t* +_dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg) +{ + return dmsg->dmsg_destructor ? dmsg->dmsg_msg : + (mach_msg_header_t*)dmsg->dmsg_buf; +} + +mach_msg_header_t* +dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg, size_t *size_ptr) +{ + if (size_ptr) { + *size_ptr = dmsg->dmsg_size; + } + return _dispatch_mach_msg_get_msg(dmsg); +} + +size_t +_dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dx_kind(dmsg), dmsg); + offset += dsnprintf(&buf[offset], bufsiz - offset, "xrefcnt = 0x%x, " + "refcnt = 0x%x, ", dmsg->do_xref_cnt + 1, dmsg->do_ref_cnt + 1); + offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, " + "msgh[%p] = { ", dmsg->dmsg_options, dmsg->dmsg_buf); + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); + if (hdr->msgh_id) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "id 0x%x, ", + hdr->msgh_id); + } + if (hdr->msgh_size) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "size %u, ", + hdr->msgh_size); + } + if (hdr->msgh_bits) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "bits msgh_bits), + MACH_MSGH_BITS_REMOTE(hdr->msgh_bits)); + if (MACH_MSGH_BITS_OTHER(hdr->msgh_bits)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", o 0x%x", + MACH_MSGH_BITS_OTHER(hdr->msgh_bits)); + } + offset += dsnprintf(&buf[offset], bufsiz - offset, ">, "); + } + if (hdr->msgh_local_port && hdr->msgh_remote_port) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x, " + "remote 0x%x", hdr->msgh_local_port, hdr->msgh_remote_port); + } else if (hdr->msgh_local_port) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x", + hdr->msgh_local_port); + } else if (hdr->msgh_remote_port) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "remote 0x%x", + hdr->msgh_remote_port); + } else { + offset += dsnprintf(&buf[offset], bufsiz - offset, "no ports"); + } + offset += dsnprintf(&buf[offset], bufsiz - offset, " } }"); + return offset; +} + +DISPATCH_ALWAYS_INLINE +static dispatch_queue_t +_dispatch_mach_msg_context_async_reply_queue(void *msg_context) +{ + if (DISPATCH_MACH_XPC_SUPPORTS_ASYNC_REPLIES(_dispatch_mach_xpc_hooks)) { + return _dispatch_mach_xpc_hooks->dmxh_msg_context_reply_queue( + msg_context); + } + return NULL; +} + +static dispatch_continuation_t +_dispatch_mach_msg_async_reply_wrap(dispatch_mach_msg_t dmsg, + dispatch_mach_t dm) +{ + _dispatch_retain(dm); // Released in _dispatch_mach_msg_async_reply_invoke() + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->do_vtable = DC_VTABLE(MACH_ASYNC_REPLY); + dc->dc_data = dmsg; + dc->dc_other = dm; + dc->dc_priority = DISPATCH_NO_PRIORITY; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + return dc; +} + +DISPATCH_NOINLINE +void +_dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc, + DISPATCH_UNUSED dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags) +{ + // _dispatch_mach_msg_invoke_with_mach() releases the reference on dmsg + // taken by _dispatch_mach_msg_async_reply_wrap() after handling it. + dispatch_mach_msg_t dmsg = dc->dc_data; + dispatch_mach_t dm = dc->dc_other; + _dispatch_mach_msg_invoke_with_mach(dmsg, + flags | DISPATCH_INVOKE_ASYNC_REPLY, dm); + + // Balances _dispatch_mach_msg_async_reply_wrap + _dispatch_release(dc->dc_other); + + _dispatch_continuation_free(dc); +} + +#pragma mark - +#pragma mark dispatch_mig_server + +mach_msg_return_t +dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, + dispatch_mig_callback_t callback) +{ + mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT + | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) + | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER; + mach_msg_options_t tmp_options; + mig_reply_error_t *bufTemp, *bufRequest, *bufReply; + mach_msg_return_t kr = 0; + uint64_t assertion_token = 0; + uint32_t cnt = 1000; // do not stall out serial queues + boolean_t demux_success; + bool received = false; + size_t rcv_size = maxmsgsz + MAX_TRAILER_SIZE; + dispatch_source_refs_t dr = ds->ds_refs; + + bufRequest = alloca(rcv_size); + bufRequest->RetCode = 0; + for (mach_vm_address_t p = mach_vm_trunc_page(bufRequest + vm_page_size); + p < (mach_vm_address_t)bufRequest + rcv_size; p += vm_page_size) { + *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard + } + + bufReply = alloca(rcv_size); + bufReply->Head.msgh_size = 0; + for (mach_vm_address_t p = mach_vm_trunc_page(bufReply + vm_page_size); + p < (mach_vm_address_t)bufReply + rcv_size; p += vm_page_size) { + *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard + } + +#if DISPATCH_DEBUG + options |= MACH_RCV_LARGE; // rdar://problem/8422992 +#endif + tmp_options = options; + // XXX FIXME -- change this to not starve out the target queue + for (;;) { + if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (--cnt == 0)) { + options &= ~MACH_RCV_MSG; + tmp_options &= ~MACH_RCV_MSG; + + if (!(tmp_options & MACH_SEND_MSG)) { + goto out; + } + } + kr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size, + (mach_msg_size_t)rcv_size, (mach_port_t)dr->du_ident, 0, 0); + + tmp_options = options; + + if (slowpath(kr)) { + switch (kr) { + case MACH_SEND_INVALID_DEST: + case MACH_SEND_TIMED_OUT: + if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { + mach_msg_destroy(&bufReply->Head); + } + break; + case MACH_RCV_TIMED_OUT: + // Don't return an error if a message was sent this time or + // a message was successfully received previously + // rdar://problems/7363620&7791738 + if(bufReply->Head.msgh_remote_port || received) { + kr = MACH_MSG_SUCCESS; + } + break; + case MACH_RCV_INVALID_NAME: + break; +#if DISPATCH_DEBUG + case MACH_RCV_TOO_LARGE: + // receive messages that are too large and log their id and size + // rdar://problem/8422992 + tmp_options &= ~MACH_RCV_LARGE; + size_t large_size = bufReply->Head.msgh_size + MAX_TRAILER_SIZE; + void *large_buf = malloc(large_size); + if (large_buf) { + rcv_size = large_size; + bufReply = large_buf; + } + if (!mach_msg(&bufReply->Head, tmp_options, 0, + (mach_msg_size_t)rcv_size, + (mach_port_t)dr->du_ident, 0, 0)) { + _dispatch_log("BUG in libdispatch client: " + "dispatch_mig_server received message larger than " + "requested size %zd: id = 0x%x, size = %d", + maxmsgsz, bufReply->Head.msgh_id, + bufReply->Head.msgh_size); + } + if (large_buf) { + free(large_buf); + } + // fall through +#endif + default: + _dispatch_bug_mach_client( + "dispatch_mig_server: mach_msg() failed", kr); + break; + } + goto out; + } + + if (!(tmp_options & MACH_RCV_MSG)) { + goto out; + } + + if (assertion_token) { +#if DISPATCH_USE_IMPORTANCE_ASSERTION + int r = proc_importance_assertion_complete(assertion_token); + (void)dispatch_assume_zero(r); +#endif + assertion_token = 0; + } + received = true; + + bufTemp = bufRequest; + bufRequest = bufReply; + bufReply = bufTemp; + +#if DISPATCH_USE_IMPORTANCE_ASSERTION +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + int r = proc_importance_assertion_begin_with_msg(&bufRequest->Head, + NULL, &assertion_token); + if (r && slowpath(r != EIO)) { + (void)dispatch_assume_zero(r); + } +#pragma clang diagnostic pop +#endif + _voucher_replace(voucher_create_with_mach_msg(&bufRequest->Head)); + demux_success = callback(&bufRequest->Head, &bufReply->Head); + + if (!demux_success) { + // destroy the request - but not the reply port + bufRequest->Head.msgh_remote_port = 0; + mach_msg_destroy(&bufRequest->Head); + } else if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { + // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode + // is present + if (slowpath(bufReply->RetCode)) { + if (bufReply->RetCode == MIG_NO_REPLY) { + continue; + } + + // destroy the request - but not the reply port + bufRequest->Head.msgh_remote_port = 0; + mach_msg_destroy(&bufRequest->Head); + } + } + + if (bufReply->Head.msgh_remote_port) { + tmp_options |= MACH_SEND_MSG; + if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) != + MACH_MSG_TYPE_MOVE_SEND_ONCE) { + tmp_options |= MACH_SEND_TIMEOUT; + } + } + } + +out: + if (assertion_token) { +#if DISPATCH_USE_IMPORTANCE_ASSERTION + int r = proc_importance_assertion_complete(assertion_token); + (void)dispatch_assume_zero(r); +#endif + } + + return kr; +} + +#pragma mark - +#pragma mark dispatch_mach_debug + +static size_t +_dispatch_mach_debug_attr(dispatch_mach_t dm, char *buf, size_t bufsiz) +{ + dispatch_queue_t target = dm->do_targetq; + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + + return dsnprintf(buf, bufsiz, "target = %s[%p], receive = 0x%x, " + "send = 0x%x, send-possible = 0x%x%s, checkin = 0x%x%s, " + "send state = %016llx, disconnected = %d, canceled = %d ", + target && target->dq_label ? target->dq_label : "", target, + (mach_port_t)dmrr->du_ident, dmsr->dmsr_send, + (mach_port_t)dmsr->du_ident, + dmsr->dmsr_notification_armed ? " (armed)" : "", + dmsr->dmsr_checkin_port, dmsr->dmsr_checkin ? " (pending)" : "", + dmsr->dmsr_state, dmsr->dmsr_disconnect_cnt, + (bool)(dm->dq_atomic_flags & DSF_CANCELED)); +} + +size_t +_dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dm->dq_label && !dm->dm_cancel_handler_called ? dm->dq_label : + dx_kind(dm), dm); + offset += _dispatch_object_debug_attr(dm, &buf[offset], bufsiz - offset); + offset += _dispatch_mach_debug_attr(dm, &buf[offset], bufsiz - offset); + offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); + return offset; +} + +#endif /* HAVE_MACH */ diff --git a/src/mach_internal.h b/src/mach_internal.h new file mode 100644 index 000000000..8600a3897 --- /dev/null +++ b/src/mach_internal.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_MACH_INTERNAL__ +#define __DISPATCH_MACH_INTERNAL__ +#if HAVE_MACH + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +// NOTE: dispatch_source_mach_send_flags_t and dispatch_source_mach_recv_flags_t +// bit values must not overlap as they share the same kevent fflags ! + +/*! + * @enum dispatch_source_mach_send_flags_t + * + * @constant DISPATCH_MACH_SEND_DELETED + * Port-deleted notification. Disabled for source registration. + */ +enum { + DISPATCH_MACH_SEND_DELETED = 0x4, +}; +/*! + * @enum dispatch_source_mach_recv_flags_t + * + * @constant DISPATCH_MACH_RECV_MESSAGE + * Receive right has pending messages + */ +enum { + DISPATCH_MACH_RECV_MESSAGE = 0x2, +}; + + +DISPATCH_CLASS_DECL(mach); +DISPATCH_CLASS_DECL(mach_msg); + +#ifndef __cplusplus +struct dispatch_mach_s { + DISPATCH_SOURCE_HEADER(mach); + dispatch_mach_send_refs_t dm_send_refs; + dispatch_xpc_term_refs_t dm_xpc_term_refs; +} DISPATCH_ATOMIC64_ALIGN; + +struct dispatch_mach_msg_s { + DISPATCH_OBJECT_HEADER(mach_msg); + union { + mach_msg_option_t dmsg_options; + mach_error_t dmsg_error; + }; + mach_port_t dmsg_reply; + pthread_priority_t dmsg_priority; + voucher_t dmsg_voucher; + dispatch_mach_msg_destructor_t dmsg_destructor; + size_t dmsg_size; + union { + mach_msg_header_t *dmsg_msg; + char dmsg_buf[0]; + }; +}; + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_xref_dispose(struct dispatch_mach_s *dm) +{ + if (dm->dm_is_xpc) { + dm->dm_recv_refs->dmrr_handler_ctxt = (void *)0xbadfeed; + } +} +#endif // __cplusplus + +dispatch_source_t +_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp, + const struct dispatch_continuation_s *dc); + +void _dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +void _dispatch_mach_dispose(dispatch_mach_t dm); +void _dispatch_mach_finalize_activation(dispatch_mach_t dm); +void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags); +void _dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags); +size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz); +void _dispatch_mach_merge_notification(dispatch_unote_t du, + uint32_t flags, uintptr_t data, uintptr_t status, + pthread_priority_t pp); +void _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, + mach_msg_header_t *msg, mach_msg_size_t msgsz); +void _dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags, + mach_msg_header_t *msg, mach_msg_size_t msgsz); +void _dispatch_xpc_sigterm_merge(dispatch_unote_t du, uint32_t flags, + uintptr_t data, uintptr_t status, pthread_priority_t pp); + +void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg); +void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, + size_t bufsiz); + +void _dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +void _dispatch_mach_barrier_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); + +#endif // HAVE_MACH +#endif /* __DISPATCH_MACH_INTERNAL__ */ diff --git a/src/object.c b/src/object.c index 1928df53f..1ca41bc73 100644 --- a/src/object.c +++ b/src/object.c @@ -89,21 +89,19 @@ _os_object_release(_os_object_t obj) bool _os_object_retain_weak(_os_object_t obj) { - int xref_cnt = obj->os_obj_xref_cnt; - if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { - return true; // global object - } -retry: - if (slowpath(xref_cnt == -1)) { - return false; - } - if (slowpath(xref_cnt < -1)) { - goto overrelease; - } - if (slowpath(!os_atomic_cmpxchgvw2o(obj, os_obj_xref_cnt, xref_cnt, - xref_cnt + 1, &xref_cnt, relaxed))) { - goto retry; - } + int xref_cnt, nxref_cnt; + os_atomic_rmw_loop2o(obj, os_obj_xref_cnt, xref_cnt, nxref_cnt, relaxed, { + if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { + os_atomic_rmw_loop_give_up(return true); // global object + } + if (slowpath(xref_cnt == -1)) { + os_atomic_rmw_loop_give_up(return false); + } + if (slowpath(xref_cnt < -1)) { + os_atomic_rmw_loop_give_up(goto overrelease); + } + nxref_cnt = xref_cnt + 1; + }); return true; overrelease: _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); @@ -181,6 +179,10 @@ _dispatch_xref_dispose(dispatch_object_t dou) } if (dx_type(dou._do) == DISPATCH_SOURCE_KEVENT_TYPE) { _dispatch_source_xref_dispose(dou._ds); +#if HAVE_MACH + } else if (dx_type(dou._do) == DISPATCH_MACH_CHANNEL_TYPE) { + _dispatch_mach_xref_dispose(dou._dm); +#endif } else if (dx_type(dou._do) == DISPATCH_QUEUE_RUNLOOP_TYPE) { _dispatch_runloop_queue_xref_dispose(dou._dq); } @@ -240,7 +242,7 @@ dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t tq) } else if (dou._do->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT && !slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { if (slowpath(!tq)) { - tq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, false); + tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); } _dispatch_object_set_target_queue_inline(dou._do, tq); } diff --git a/src/object.m b/src/object.m index 323c98b47..59cbc9d5d 100644 --- a/src/object.m +++ b/src/object.m @@ -126,6 +126,24 @@ return objc_release(obj); } +void +_os_object_atfork_prepare(void) +{ + return _objc_atfork_prepare(); +} + +void +_os_object_atfork_parent(void) +{ + return _objc_atfork_parent(); +} + +void +_os_object_atfork_child(void) +{ + return _objc_atfork_child(); +} + #pragma mark - #pragma mark _os_object @@ -233,7 +251,7 @@ - (void)_dispose { NSUInteger offset = 0; NSString *desc = [dou debugDescription]; [desc getBytes:buf maxLength:bufsiz-1 usedLength:&offset - encoding:NSUTF8StringEncoding options:0 + encoding:NSUTF8StringEncoding options:(NSStringEncodingConversionOptions)0 range:NSMakeRange(0, [desc length]) remainingRange:NULL]; if (offset) buf[offset] = 0; return offset; @@ -263,9 +281,9 @@ - (NSString *)debugDescription { } else { strlcpy(buf, dx_kind(obj), sizeof(buf)); } - return [nsstring stringWithFormat: - [nsstring stringWithUTF8String:"<%s: %s>"], - class_getName([self class]), buf]; + NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"]; + if (!format) return nil; + return [nsstring stringWithFormat:format, class_getName([self class]), buf]; } @end @@ -277,9 +295,10 @@ @implementation DISPATCH_CLASS(queue) - (NSString *)description { Class nsstring = objc_lookUpClass("NSString"); if (!nsstring) return nil; - return [nsstring stringWithFormat: - [nsstring stringWithUTF8String:"<%s: %s[%p]>"], - class_getName([self class]), dispatch_queue_get_label(self), self]; + NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"]; + if (!format) return nil; + return [nsstring stringWithFormat:format, class_getName([self class]), + dispatch_queue_get_label(self), self]; } - (void)_xref_dispose { @@ -307,6 +326,7 @@ @implementation DISPATCH_CLASS(mach) - (void)_xref_dispose { _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); + _dispatch_mach_xref_dispose((struct dispatch_mach_s *)self); [super _xref_dispose]; } @@ -364,9 +384,9 @@ - (NSString *)debugDescription { if (!nsstring) return nil; char buf[2048]; _voucher_debug(self, buf, sizeof(buf)); - return [nsstring stringWithFormat: - [nsstring stringWithUTF8String:"<%s: %s>"], - class_getName([self class]), buf]; + NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"]; + if (!format) return nil; + return [nsstring stringWithFormat:format, class_getName([self class]), buf]; } @end @@ -448,6 +468,19 @@ - (NSString *)debugDescription { } #if HAVE_MACH +#undef _dispatch_client_callout3 +void +_dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, dispatch_mach_async_reply_callback_t f) +{ + @try { + return f(ctxt, reason, dmsg); + } + @catch (...) { + objc_terminate(); + } +} + #undef _dispatch_client_callout4 void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, diff --git a/src/object_internal.h b/src/object_internal.h index 80bb10251..abc3f4811 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -179,12 +179,15 @@ #define DISPATCH_INVOKABLE_VTABLE_HEADER(x) \ unsigned long const do_type; \ const char *const do_kind; \ - void (*const do_invoke)(struct x##_s *, dispatch_invoke_flags_t) + void (*const do_invoke)(struct x##_s *, dispatch_invoke_context_t, \ + dispatch_invoke_flags_t); \ + void (*const do_push)(struct x##_s *, dispatch_object_t, \ + dispatch_qos_t) #define DISPATCH_QUEUEABLE_VTABLE_HEADER(x) \ DISPATCH_INVOKABLE_VTABLE_HEADER(x); \ void (*const do_wakeup)(struct x##_s *, \ - pthread_priority_t, dispatch_wakeup_flags_t); \ + dispatch_qos_t, dispatch_wakeup_flags_t); \ void (*const do_dispose)(struct x##_s *) #define DISPATCH_OBJECT_VTABLE_HEADER(x) \ @@ -203,7 +206,8 @@ #define dx_kind(x) dx_vtable(x)->do_kind #define dx_debug(x, y, z) dx_vtable(x)->do_debug((x), (y), (z)) #define dx_dispose(x) dx_vtable(x)->do_dispose(x) -#define dx_invoke(x, z) dx_vtable(x)->do_invoke(x, z) +#define dx_invoke(x, y, z) dx_vtable(x)->do_invoke(x, y, z) +#define dx_push(x, y, z) dx_vtable(x)->do_push(x, y, z) #define dx_wakeup(x, y, z) dx_vtable(x)->do_wakeup(x, y, z) #define DISPATCH_OBJECT_GLOBAL_REFCNT _OS_OBJECT_GLOBAL_REFCNT @@ -226,8 +230,10 @@ // we sign extend the 64-bit version so that a better instruction encoding is // generated on Intel #define DISPATCH_OBJECT_LISTLESS ((void *)0xffffffff89abcdef) +#define DISPATCH_OBJECT_WLH_REQ ((void *)0xffffffff7009cdef) #else #define DISPATCH_OBJECT_LISTLESS ((void *)0x89abcdef) +#define DISPATCH_OBJECT_WLH_REQ ((void *)0x7009cdef) #endif DISPATCH_ENUM(dispatch_wakeup_flags, uint32_t, @@ -241,23 +247,39 @@ DISPATCH_ENUM(dispatch_wakeup_flags, uint32_t, // involved before dx_wakeup returns DISPATCH_WAKEUP_FLUSH = 0x00000002, - // A slow waiter was just enqueued - DISPATCH_WAKEUP_SLOW_WAITER = 0x00000004, + // The caller desires to apply an override on the object being woken up. + // When this flag is passed, the qos passed to dx_wakeup() should not be 0 + DISPATCH_WAKEUP_OVERRIDING = 0x00000004, - // The caller desires to apply an override on the object being woken up - // and has already adjusted the `oq_override` field. When this flag is - // passed, the priority passed to dx_wakeup() should not be 0 - DISPATCH_WAKEUP_OVERRIDING = 0x00000008, + // This wakeup is caused by a handoff from a slow waiter. + DISPATCH_WAKEUP_WAITER_HANDOFF = 0x00000008, - // At the time this queue was woken up it had an override that must be - // preserved (used to solve a race with _dispatch_queue_drain_try_unlock()) - DISPATCH_WAKEUP_WAS_OVERRIDDEN = 0x00000010, - -#define _DISPATCH_WAKEUP_OVERRIDE_BITS \ - ((dispatch_wakeup_flags_t)(DISPATCH_WAKEUP_OVERRIDING | \ - DISPATCH_WAKEUP_WAS_OVERRIDDEN)) + // This wakeup is caused by a dispatch_block_wait() + DISPATCH_WAKEUP_BLOCK_WAIT = 0x00000010, ); +typedef struct dispatch_invoke_context_s { + struct dispatch_object_s *dic_deferred; +#if HAVE_PTHREAD_WORKQUEUE_NARROWING + uint64_t dic_next_narrow_check; +#endif +} dispatch_invoke_context_s, *dispatch_invoke_context_t; + +#if HAVE_PTHREAD_WORKQUEUE_NARROWING +#define DISPATCH_NARROW_CHECK_INTERVAL \ + _dispatch_time_nano2mach(50 * NSEC_PER_MSEC) +#define DISPATCH_THREAD_IS_NARROWING 1 + +#define dispatch_with_disabled_narrowing(dic, ...) ({ \ + uint64_t suspend_narrow_check = dic->dic_next_narrow_check; \ + dic->dic_next_narrow_check = 0; \ + __VA_ARGS__; \ + dic->dic_next_narrow_check = suspend_narrow_check; \ + }) +#else +#define dispatch_with_disabled_narrowing(dic, ...) __VA_ARGS__ +#endif + DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, DISPATCH_INVOKE_NONE = 0x00000000, @@ -274,6 +296,17 @@ DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, DISPATCH_INVOKE_STEALING = 0x00000001, DISPATCH_INVOKE_OVERRIDING = 0x00000002, + // Misc flags + // + // @const DISPATCH_INVOKE_ASYNC_REPLY + // An asynchronous reply to a message is being handled. + // + // @const DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS + // The next serial drain should not allow sync waiters. + // + DISPATCH_INVOKE_ASYNC_REPLY = 0x00000004, + DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS = 0x00000008, + // Below this point flags are propagated to recursive calls to drain(), // continuation pop() or dx_invoke(). #define _DISPATCH_INVOKE_PROPAGATE_MASK 0xffff0000u @@ -410,38 +443,30 @@ struct dispatch_object_s { #if OS_OBJECT_HAVE_OBJC1 #define _OS_MPSC_QUEUE_FIELDS(ns, __state_field__) \ - struct dispatch_object_s *volatile ns##_items_head; \ - unsigned long ns##_serialnum; \ - union { \ - uint64_t volatile __state_field__; \ - DISPATCH_STRUCT_LITTLE_ENDIAN_2( \ + DISPATCH_UNION_LE(uint64_t volatile __state_field__, \ dispatch_lock __state_field__##_lock, \ uint32_t __state_field__##_bits \ - ); \ - }; /* needs to be 64-bit aligned */ \ - /* LP64 global queue cacheline boundary */ \ + ) DISPATCH_ATOMIC64_ALIGN; \ + struct dispatch_object_s *volatile ns##_items_head; \ + unsigned long ns##_serialnum; \ const char *ns##_label; \ - voucher_t ns##_override_voucher; \ - dispatch_priority_t ns##_priority; \ - dispatch_priority_t volatile ns##_override; \ - struct dispatch_object_s *volatile ns##_items_tail + dispatch_wlh_t ns##_wlh; \ + struct dispatch_object_s *volatile ns##_items_tail; \ + dispatch_priority_t ns##_priority #else #define _OS_MPSC_QUEUE_FIELDS(ns, __state_field__) \ struct dispatch_object_s *volatile ns##_items_head; \ - union { \ - uint64_t volatile __state_field__; \ - DISPATCH_STRUCT_LITTLE_ENDIAN_2( \ + DISPATCH_UNION_LE(uint64_t volatile __state_field__, \ dispatch_lock __state_field__##_lock, \ uint32_t __state_field__##_bits \ - ); \ - }; /* needs to be 64-bit aligned */ \ + ) DISPATCH_ATOMIC64_ALIGN; \ /* LP64 global queue cacheline boundary */ \ unsigned long ns##_serialnum; \ const char *ns##_label; \ - voucher_t ns##_override_voucher; \ - dispatch_priority_t ns##_priority; \ - dispatch_priority_t volatile ns##_override; \ - struct dispatch_object_s *volatile ns##_items_tail + dispatch_wlh_t ns##_wlh; \ + struct dispatch_object_s *volatile ns##_items_tail; \ + dispatch_priority_t ns##_priority + /* LP64: 32bit hole */ #endif OS_OBJECT_INTERNAL_CLASS_DECL(os_mpsc_queue, object, @@ -610,6 +635,9 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); #define _os_object_refcnt_dispose_barrier(o) \ _os_atomic_refcnt_dispose_barrier2o(o, os_obj_ref_cnt) +void _os_object_atfork_child(void); +void _os_object_atfork_parent(void); +void _os_object_atfork_prepare(void); void _os_object_init(void); unsigned long _os_object_retain_count(_os_object_t obj); bool _os_object_retain_weak(_os_object_t obj); diff --git a/src/once.c b/src/once.c index d7d6a8e64..75d7a39a5 100644 --- a/src/once.c +++ b/src/once.c @@ -63,61 +63,9 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) dow.dow_thread = _dispatch_tid_self(); _dispatch_client_callout(ctxt, func); - // The next barrier must be long and strong. - // - // The scenario: SMP systems with weakly ordered memory models - // and aggressive out-of-order instruction execution. - // - // The problem: - // - // The dispatch_once*() wrapper macro causes the callee's - // instruction stream to look like this (pseudo-RISC): - // - // load r5, pred-addr - // cmpi r5, -1 - // beq 1f - // call dispatch_once*() - // 1f: - // load r6, data-addr - // - // May be re-ordered like so: - // - // load r6, data-addr - // load r5, pred-addr - // cmpi r5, -1 - // beq 1f - // call dispatch_once*() - // 1f: - // - // Normally, a barrier on the read side is used to workaround - // the weakly ordered memory model. But barriers are expensive - // and we only need to synchronize once! After func(ctxt) - // completes, the predicate will be marked as "done" and the - // branch predictor will correctly skip the call to - // dispatch_once*(). - // - // A far faster alternative solution: Defeat the speculative - // read-ahead of peer CPUs. - // - // Modern architectures will throw away speculative results - // once a branch mis-prediction occurs. Therefore, if we can - // ensure that the predicate is not marked as being complete - // until long after the last store by func(ctxt), then we have - // defeated the read-ahead of peer CPUs. - // - // In other words, the last "store" by func(ctxt) must complete - // and then N cycles must elapse before ~0l is stored to *val. - // The value of N is whatever is sufficient to defeat the - // read-ahead mechanism of peer CPUs. - // - // On some CPUs, the most fully synchronizing instruction might - // need to be issued. - - os_atomic_maximally_synchronizing_barrier(); - // above assumed to contain release barrier - next = os_atomic_xchg(vval, DISPATCH_ONCE_DONE, relaxed); + next = (_dispatch_once_waiter_t)_dispatch_once_xchg_done(val); while (next != tail) { - _dispatch_wait_until(tmp = (_dispatch_once_waiter_t)next->dow_next); + tmp = (_dispatch_once_waiter_t)_dispatch_wait_until(next->dow_next); event = &next->dow_event; next = tmp; _dispatch_thread_event_signal(event); @@ -129,7 +77,7 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) if (next == DISPATCH_ONCE_DONE) { break; } - if (os_atomic_cmpxchgvw(vval, next, tail, &next, release)) { + if (os_atomic_cmpxchgv(vval, next, tail, &next, release)) { dow.dow_thread = next->dow_thread; dow.dow_next = next; if (dow.dow_thread) { diff --git a/src/queue.c b/src/queue.c index a08f21b5b..088c5cfd2 100644 --- a/src/queue.c +++ b/src/queue.c @@ -20,7 +20,7 @@ #include "internal.h" #if HAVE_MACH -#include "protocol.h" +#include "protocol.h" // _dispatch_send_wakeup_runloop_thread #endif #if (!HAVE_PTHREAD_WORKQUEUES || DISPATCH_DEBUG) && \ @@ -30,19 +30,11 @@ #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_ENABLE_THREAD_POOL #define DISPATCH_USE_PTHREAD_POOL 1 #endif -#if HAVE_PTHREAD_WORKQUEUES && (!HAVE_PTHREAD_WORKQUEUE_QOS || DISPATCH_DEBUG) \ - && !defined(DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK) -#define DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK 1 -#endif -#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK && \ +#if HAVE_PTHREAD_WORKQUEUES && (!HAVE_PTHREAD_WORKQUEUE_QOS || DISPATCH_DEBUG) && \ !HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \ !defined(DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK) #define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1 #endif -#if HAVE_PTHREAD_WORKQUEUE_QOS && !DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK -#undef HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP -#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 0 -#endif #if HAVE_PTHREAD_WORKQUEUES && DISPATCH_USE_PTHREAD_POOL && \ !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK #define pthread_workqueue_t void* @@ -50,15 +42,19 @@ static void _dispatch_sig_thread(void *ctxt); static void _dispatch_cache_cleanup(void *value); -static void _dispatch_sync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp); static void _dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc); static void _dispatch_queue_cleanup(void *ctxt); +static void _dispatch_wlh_cleanup(void *ctxt); static void _dispatch_deferred_items_cleanup(void *ctxt); static void _dispatch_frame_cleanup(void *ctxt); static void _dispatch_context_cleanup(void *ctxt); static void _dispatch_non_barrier_complete(dispatch_queue_t dq); -static inline void _dispatch_global_queue_poke(dispatch_queue_t dq); +static void _dispatch_queue_push_sync_waiter(dispatch_queue_t dq, + dispatch_sync_context_t dsc); +#if HAVE_PTHREAD_WORKQUEUE_QOS +static void _dispatch_root_queue_push_queue_override(dispatch_queue_t rq, + dispatch_queue_class_t dqu, dispatch_qos_t qos); +#endif #if HAVE_PTHREAD_WORKQUEUES static void _dispatch_worker_thread4(void *context); #if HAVE_PTHREAD_WORKQUEUE_QOS @@ -76,14 +72,11 @@ static int _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset); #if DISPATCH_COCOA_COMPAT static dispatch_once_t _dispatch_main_q_handle_pred; static void _dispatch_runloop_queue_poke(dispatch_queue_t dq, - pthread_priority_t pp, dispatch_wakeup_flags_t flags); + dispatch_qos_t qos, dispatch_wakeup_flags_t flags); static void _dispatch_runloop_queue_handle_init(void *ctxt); static void _dispatch_runloop_queue_handle_dispose(dispatch_queue_t dq); #endif -static void _dispatch_root_queues_init_once(void *context); -static dispatch_once_t _dispatch_root_queues_pred; - #pragma mark - #pragma mark dispatch_root_queue @@ -185,7 +178,7 @@ DISPATCH_CACHELINE_ALIGN static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = {{{ #if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_MAINTENANCE, + .dgq_qos = QOS_CLASS_MAINTENANCE, .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, .dgq_wq_options = 0, #endif @@ -196,7 +189,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { }}}, [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = {{{ #if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_MAINTENANCE, + .dgq_qos = QOS_CLASS_MAINTENANCE, .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif @@ -207,7 +200,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { }}}, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = {{{ #if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND, + .dgq_qos = QOS_CLASS_BACKGROUND, .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = 0, #endif @@ -218,7 +211,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { }}}, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = {{{ #if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND, + .dgq_qos = QOS_CLASS_BACKGROUND, .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif @@ -229,7 +222,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { }}}, [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = {{{ #if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_UTILITY, + .dgq_qos = QOS_CLASS_UTILITY, .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, .dgq_wq_options = 0, #endif @@ -240,7 +233,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { }}}, [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = {{{ #if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_UTILITY, + .dgq_qos = QOS_CLASS_UTILITY, .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif @@ -251,7 +244,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { }}}, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = {{{ #if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_DEFAULT, + .dgq_qos = QOS_CLASS_DEFAULT, .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, .dgq_wq_options = 0, #endif @@ -262,7 +255,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { }}}, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = {{{ #if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_DEFAULT, + .dgq_qos = QOS_CLASS_DEFAULT, .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif @@ -273,7 +266,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { }}}, [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = {{{ #if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_USER_INITIATED, + .dgq_qos = QOS_CLASS_USER_INITIATED, .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, .dgq_wq_options = 0, #endif @@ -284,7 +277,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { }}}, [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = {{{ #if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_USER_INITIATED, + .dgq_qos = QOS_CLASS_USER_INITIATED, .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif @@ -295,7 +288,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { }}}, [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = {{{ #if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE, + .dgq_qos = QOS_CLASS_USER_INTERACTIVE, .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = 0, #endif @@ -306,7 +299,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { }}}, [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = {{{ #if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE, + .dgq_qos = QOS_CLASS_USER_INTERACTIVE, .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif @@ -321,62 +314,70 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { // renaming this symbol DISPATCH_CACHELINE_ALIGN struct dispatch_queue_s _dispatch_root_queues[] = { -#define _DISPATCH_ROOT_QUEUE_ENTRY(n, ...) \ - [DISPATCH_ROOT_QUEUE_IDX_##n] = { \ +#define _DISPATCH_ROOT_QUEUE_IDX(n, flags) \ + ((flags & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \ + DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_OVERCOMMIT : \ + DISPATCH_ROOT_QUEUE_IDX_##n##_QOS) +#define _DISPATCH_ROOT_QUEUE_ENTRY(n, flags, ...) \ + [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \ DISPATCH_GLOBAL_OBJECT_HEADER(queue_root), \ .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \ .do_ctxt = &_dispatch_root_queue_contexts[ \ - DISPATCH_ROOT_QUEUE_IDX_##n], \ - .dq_width = DISPATCH_QUEUE_WIDTH_POOL, \ - .dq_override_voucher = DISPATCH_NO_VOUCHER, \ - .dq_override = DISPATCH_SATURATED_OVERRIDE, \ + _DISPATCH_ROOT_QUEUE_IDX(n, flags)], \ + .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \ + .dq_wlh = DISPATCH_WLH_GLOBAL, \ + .dq_priority = _dispatch_priority_make(DISPATCH_QOS_##n, 0) | flags | \ + DISPATCH_PRIORITY_FLAG_ROOTQUEUE | \ + ((flags & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE) ? 0 : \ + DISPATCH_QOS_##n << DISPATCH_PRIORITY_OVERRIDE_SHIFT), \ __VA_ARGS__ \ } - _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE_QOS, + _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, 0, .dq_label = "com.apple.root.maintenance-qos", .dq_serialnum = 4, ), - _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE_QOS_OVERCOMMIT, + _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.maintenance-qos.overcommit", .dq_serialnum = 5, ), - _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND_QOS, + _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, 0, .dq_label = "com.apple.root.background-qos", .dq_serialnum = 6, ), - _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND_QOS_OVERCOMMIT, + _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.background-qos.overcommit", .dq_serialnum = 7, ), - _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY_QOS, + _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, 0, .dq_label = "com.apple.root.utility-qos", .dq_serialnum = 8, ), - _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY_QOS_OVERCOMMIT, + _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.utility-qos.overcommit", .dq_serialnum = 9, ), - _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT_QOS, + _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE, .dq_label = "com.apple.root.default-qos", .dq_serialnum = 10, ), - _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT_QOS_OVERCOMMIT, + _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, + DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE | DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.default-qos.overcommit", .dq_serialnum = 11, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED_QOS, + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, 0, .dq_label = "com.apple.root.user-initiated-qos", .dq_serialnum = 12, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED_QOS_OVERCOMMIT, + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.user-initiated-qos.overcommit", .dq_serialnum = 13, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE_QOS, + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0, .dq_label = "com.apple.root.user-interactive-qos", .dq_serialnum = 14, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE_QOS_OVERCOMMIT, + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.user-interactive-qos.overcommit", .dq_serialnum = 15, ), @@ -407,36 +408,6 @@ static const dispatch_queue_t _dispatch_wq2root_queues[][2] = { }; #endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP -#define DISPATCH_PRIORITY_COUNT 5 - -enum { - // No DISPATCH_PRIORITY_IDX_MAINTENANCE define because there is no legacy - // maintenance priority - DISPATCH_PRIORITY_IDX_BACKGROUND = 0, - DISPATCH_PRIORITY_IDX_NON_INTERACTIVE, - DISPATCH_PRIORITY_IDX_LOW, - DISPATCH_PRIORITY_IDX_DEFAULT, - DISPATCH_PRIORITY_IDX_HIGH, -}; - -static qos_class_t _dispatch_priority2qos[] = { - [DISPATCH_PRIORITY_IDX_BACKGROUND] = _DISPATCH_QOS_CLASS_BACKGROUND, - [DISPATCH_PRIORITY_IDX_NON_INTERACTIVE] = _DISPATCH_QOS_CLASS_UTILITY, - [DISPATCH_PRIORITY_IDX_LOW] = _DISPATCH_QOS_CLASS_UTILITY, - [DISPATCH_PRIORITY_IDX_DEFAULT] = _DISPATCH_QOS_CLASS_DEFAULT, - [DISPATCH_PRIORITY_IDX_HIGH] = _DISPATCH_QOS_CLASS_USER_INITIATED, -}; - -#if HAVE_PTHREAD_WORKQUEUE_QOS -static const int _dispatch_priority2wq[] = { - [DISPATCH_PRIORITY_IDX_BACKGROUND] = WORKQ_BG_PRIOQUEUE, - [DISPATCH_PRIORITY_IDX_NON_INTERACTIVE] = WORKQ_NON_INTERACTIVE_PRIOQUEUE, - [DISPATCH_PRIORITY_IDX_LOW] = WORKQ_LOW_PRIOQUEUE, - [DISPATCH_PRIORITY_IDX_DEFAULT] = WORKQ_DEFAULT_PRIOQUEUE, - [DISPATCH_PRIORITY_IDX_HIGH] = WORKQ_HIGH_PRIOQUEUE, -}; -#endif - #if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES static struct dispatch_queue_s _dispatch_mgr_root_queue; #else @@ -452,9 +423,10 @@ struct dispatch_queue_s _dispatch_mgr_q = { .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1), .do_targetq = &_dispatch_mgr_root_queue, .dq_label = "com.apple.libdispatch-manager", - .dq_width = 1, - .dq_override_voucher = DISPATCH_NO_VOUCHER, - .dq_override = DISPATCH_SATURATED_OVERRIDE, + .dq_atomic_flags = DQF_WIDTH(1), + .dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER | + DISPATCH_PRIORITY_SATURATED_OVERRIDE, + .dq_wlh = DISPATCH_WLH_GLOBAL, .dq_serialnum = 2, }; @@ -464,48 +436,16 @@ dispatch_get_global_queue(long priority, unsigned long flags) if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) { return DISPATCH_BAD_INPUT; } - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init_once); - qos_class_t qos; - switch (priority) { -#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - case _DISPATCH_QOS_CLASS_MAINTENANCE: - if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] - .dq_priority) { - // map maintenance to background on old kernel - qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_BACKGROUND]; - } else { - qos = (qos_class_t)priority; - } - break; -#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - case DISPATCH_QUEUE_PRIORITY_BACKGROUND: - qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_BACKGROUND]; - break; - case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE: - qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_NON_INTERACTIVE]; - break; - case DISPATCH_QUEUE_PRIORITY_LOW: - qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_LOW]; - break; - case DISPATCH_QUEUE_PRIORITY_DEFAULT: - qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_DEFAULT]; - break; - case DISPATCH_QUEUE_PRIORITY_HIGH: - qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_HIGH]; - break; - case _DISPATCH_QOS_CLASS_USER_INTERACTIVE: -#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] - .dq_priority) { - qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_HIGH]; - break; - } + dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority); +#if !HAVE_PTHREAD_WORKQUEUE_QOS + if (qos == QOS_CLASS_MAINTENANCE) { + qos = DISPATCH_QOS_BACKGROUND; + } else if (qos == QOS_CLASS_USER_INTERACTIVE) { + qos = DISPATCH_QOS_USER_INITIATED; + } #endif - // fallthrough - default: - qos = (qos_class_t)priority; - break; + if (qos == DISPATCH_QOS_UNSPECIFIED) { + return DISPATCH_BAD_INPUT; } return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT); } @@ -515,7 +455,7 @@ static inline dispatch_queue_t _dispatch_get_current_queue(void) { return _dispatch_queue_get_current() ?: - _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true); + _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true); } dispatch_queue_t @@ -557,11 +497,14 @@ dispatch_assert_queue(dispatch_queue_t dq) if (likely(_dq_state_drain_owner(dq_state) == _dispatch_tid_self())) { return; } - if (likely(dq->dq_width > 1)) { - // we can look at the width: if it is changing while we read it, - // it means that a barrier is running on `dq` concurrently, which - // proves that we're not on `dq`. Hence reading a stale '1' is ok. - if (fastpath(_dispatch_thread_frame_find_queue(dq))) { + // we can look at the width: if it is changing while we read it, + // it means that a barrier is running on `dq` concurrently, which + // proves that we're not on `dq`. Hence reading a stale '1' is ok. + // + // However if we can have thread bound queues, these mess with lock + // ownership and we always have to take the slowpath + if (likely(DISPATCH_COCOA_COMPAT || dq->dq_width > 1)) { + if (likely(_dispatch_thread_frame_find_queue(dq))) { return; } } @@ -582,10 +525,13 @@ dispatch_assert_queue_not(dispatch_queue_t dq) return; } if (likely(_dq_state_drain_owner(dq_state) != _dispatch_tid_self())) { - if (likely(dq->dq_width == 1)) { - // we can look at the width: if it is changing while we read it, - // it means that a barrier is running on `dq` concurrently, which - // proves that we're not on `dq`. Hence reading a stale '1' is ok. + // we can look at the width: if it is changing while we read it, + // it means that a barrier is running on `dq` concurrently, which + // proves that we're not on `dq`. Hence reading a stale '1' is ok. + // + // However if we can have thread bound queues, these mess with lock + // ownership and we always have to take the slowpath + if (likely(!DISPATCH_COCOA_COMPAT && dq->dq_width == 1)) { return; } if (likely(!_dispatch_thread_frame_find_queue(dq))) { @@ -625,40 +571,6 @@ dispatch_assert_queue_barrier(dispatch_queue_t dq) #pragma mark - #pragma mark dispatch_init -#if HAVE_PTHREAD_WORKQUEUE_QOS -pthread_priority_t _dispatch_background_priority; -pthread_priority_t _dispatch_user_initiated_priority; - -static void -_dispatch_root_queues_init_qos(int supported) -{ - pthread_priority_t p; - qos_class_t qos; - unsigned int i; - for (i = 0; i < DISPATCH_PRIORITY_COUNT; i++) { - p = _pthread_qos_class_encode_workqueue(_dispatch_priority2wq[i], 0); - qos = _pthread_qos_class_decode(p, NULL, NULL); - dispatch_assert(qos != _DISPATCH_QOS_CLASS_UNSPECIFIED); - _dispatch_priority2qos[i] = qos; - } - for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { - qos = _dispatch_root_queue_contexts[i].dgq_qos; - if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE && - !(supported & WORKQ_FEATURE_MAINTENANCE)) { - continue; - } - unsigned long flags = i & 1 ? _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0; - flags |= _PTHREAD_PRIORITY_ROOTQUEUE_FLAG; - if (i == DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS || - i == DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT) { - flags |= _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; - } - p = _pthread_qos_class_encode(qos, 0, flags); - _dispatch_root_queues[i].dq_priority = (dispatch_priority_t)p; - } -} -#endif // HAVE_PTHREAD_WORKQUEUE_QOS - static inline bool _dispatch_root_queues_init_workq(int *wq_supported) { @@ -681,6 +593,7 @@ _dispatch_root_queues_init_workq(int *wq_supported) disable_kevent_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KEVENT_WQ")); #endif #endif + if (!disable_wq && !disable_qos) { *wq_supported = _pthread_workqueue_supported(); #if DISPATCH_USE_KEVENT_WORKQUEUE @@ -697,7 +610,7 @@ _dispatch_root_queues_init_workq(int *wq_supported) #endif result = !r; } else -#endif +#endif // DISPATCH_USE_KEVENT_WORKQUEUE if (*wq_supported & WORKQ_FEATURE_FINEPRIO) { #if DISPATCH_USE_MGR_THREAD r = _pthread_workqueue_init(_dispatch_worker_thread3, @@ -705,7 +618,10 @@ _dispatch_root_queues_init_workq(int *wq_supported) result = !r; #endif } - if (result) _dispatch_root_queues_init_qos(*wq_supported); + if (!(*wq_supported & WORKQ_FEATURE_MAINTENANCE)) { + DISPATCH_INTERNAL_CRASH(*wq_supported, + "QoS Maintenance support required"); + } } #endif // DISPATCH_USE_KEVENT_WORKQUEUE || HAVE_PTHREAD_WORKQUEUE_QOS #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP @@ -728,7 +644,7 @@ _dispatch_root_queues_init_workq(int *wq_supported) (void)dispatch_assume_zero(r); } #endif - int i; + size_t i; for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { pthread_workqueue_t pwq = NULL; dispatch_root_queue_context_t qc; @@ -784,30 +700,12 @@ _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, #endif } #endif // HAVE_PTHREAD_WORKQUEUES -#if USE_MACH_SEM - // override the default FIFO behavior for the pool semaphores - kern_return_t kr = semaphore_create(mach_task_self(), - &pqc->dpq_thread_mediator.dsema_port, SYNC_POLICY_LIFO, 0); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - (void)dispatch_assume(pqc->dpq_thread_mediator.dsema_port); -#elif USE_POSIX_SEM - /* XXXRW: POSIX semaphores don't support LIFO? */ - int ret = sem_init(&(pqc->dpq_thread_mediator.dsema_sem), 0, 0); - (void)dispatch_assume_zero(ret); -#endif + _dispatch_sema4_t *sema = &pqc->dpq_thread_mediator.dsema_sema; + _dispatch_sema4_init(sema, _DSEMA4_POLICY_LIFO); + _dispatch_sema4_create(sema, _DSEMA4_POLICY_LIFO); } #endif // DISPATCH_USE_PTHREAD_POOL -static dispatch_once_t _dispatch_root_queues_pred; - -void -_dispatch_root_queues_init(void) -{ - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init_once); -} - static void _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) { @@ -815,7 +713,7 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) _dispatch_fork_becomes_unsafe(); if (!_dispatch_root_queues_init_workq(&wq_supported)) { #if DISPATCH_ENABLE_THREAD_POOL - int i; + size_t i; for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { bool overcommit = true; #if TARGET_OS_EMBEDDED @@ -836,12 +734,19 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) } } +void +_dispatch_root_queues_init(void) +{ + static dispatch_once_t _dispatch_root_queues_pred; + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init_once); +} + DISPATCH_EXPORT DISPATCH_NOTHROW void libdispatch_init(void) { - dispatch_assert(DISPATCH_QUEUE_QOS_COUNT == 6); - dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 12); + dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 2 * DISPATCH_QOS_MAX); dispatch_assert(DISPATCH_QUEUE_PRIORITY_LOW == -DISPATCH_QUEUE_PRIORITY_HIGH); @@ -849,12 +754,6 @@ libdispatch_init(void) DISPATCH_ROOT_QUEUE_COUNT); dispatch_assert(countof(_dispatch_root_queue_contexts) == DISPATCH_ROOT_QUEUE_COUNT); - dispatch_assert(countof(_dispatch_priority2qos) == - DISPATCH_PRIORITY_COUNT); -#if HAVE_PTHREAD_WORKQUEUE_QOS - dispatch_assert(countof(_dispatch_priority2wq) == - DISPATCH_PRIORITY_COUNT); -#endif #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP dispatch_assert(sizeof(_dispatch_wq2root_queues) / sizeof(_dispatch_wq2root_queues[0][0]) == @@ -879,15 +778,9 @@ libdispatch_init(void) #if HAVE_PTHREAD_WORKQUEUE_QOS - // 26497968 _dispatch_user_initiated_priority should be set for qos - // propagation to work properly - pthread_priority_t p = _pthread_qos_class_encode(qos_class_main(), 0, 0); - _dispatch_main_q.dq_priority = (dispatch_priority_t)p; - _dispatch_main_q.dq_override = p & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - p = _pthread_qos_class_encode(_DISPATCH_QOS_CLASS_USER_INITIATED, 0, 0); - _dispatch_user_initiated_priority = p; - p = _pthread_qos_class_encode(_DISPATCH_QOS_CLASS_BACKGROUND, 0, 0); - _dispatch_background_priority = p; + dispatch_qos_t qos = _dispatch_qos_from_qos_class(qos_class_main()); + dispatch_priority_t pri = _dispatch_priority_make(qos, 0); + _dispatch_main_q.dq_priority = _dispatch_priority_with_override_qos(pri, qos); #if DISPATCH_DEBUG if (!slowpath(getenv("LIBDISPATCH_DISABLE_SET_QOS"))) { _dispatch_set_qos_class_enabled = 1; @@ -898,25 +791,24 @@ libdispatch_init(void) #if DISPATCH_USE_THREAD_LOCAL_STORAGE _dispatch_thread_key_create(&__dispatch_tsd_key, _libdispatch_tsd_cleanup); #else + _dispatch_thread_key_create(&dispatch_priority_key, NULL); + _dispatch_thread_key_create(&dispatch_r2k_key, NULL); _dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup); - _dispatch_thread_key_create(&dispatch_deferred_items_key, - _dispatch_deferred_items_cleanup); _dispatch_thread_key_create(&dispatch_frame_key, _dispatch_frame_cleanup); - _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup); _dispatch_thread_key_create(&dispatch_cache_key, _dispatch_cache_cleanup); _dispatch_thread_key_create(&dispatch_context_key, _dispatch_context_cleanup); - _dispatch_thread_key_create(&dispatch_defaultpriority_key, NULL); _dispatch_thread_key_create(&dispatch_pthread_root_queue_observer_hooks_key, NULL); -#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION + _dispatch_thread_key_create(&dispatch_basepri_key, NULL); +#if DISPATCH_INTROSPECTION + _dispatch_thread_key_create(&dispatch_introspection_key , NULL); +#elif DISPATCH_PERF_MON _dispatch_thread_key_create(&dispatch_bcounter_key, NULL); #endif -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK - if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - _dispatch_thread_key_create(&dispatch_sema4_key, - _dispatch_thread_semaphore_dispose); - } -#endif + _dispatch_thread_key_create(&dispatch_wlh_key, _dispatch_wlh_cleanup); + _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup); + _dispatch_thread_key_create(&dispatch_deferred_items_key, + _dispatch_deferred_items_cleanup); #endif #if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 @@ -938,40 +830,6 @@ libdispatch_init(void) _dispatch_introspection_init(); } -#if HAVE_MACH -static dispatch_once_t _dispatch_mach_host_port_pred; -static mach_port_t _dispatch_mach_host_port; - -static void -_dispatch_mach_host_port_init(void *ctxt DISPATCH_UNUSED) -{ - kern_return_t kr; - mach_port_t mp, mhp = mach_host_self(); - kr = host_get_host_port(mhp, &mp); - DISPATCH_VERIFY_MIG(kr); - if (fastpath(!kr)) { - // mach_host_self returned the HOST_PRIV port - kr = mach_port_deallocate(mach_task_self(), mhp); - DISPATCH_VERIFY_MIG(kr); - mhp = mp; - } else if (kr != KERN_INVALID_ARGUMENT) { - (void)dispatch_assume_zero(kr); - } - if (!fastpath(mhp)) { - DISPATCH_CLIENT_CRASH(kr, "Could not get unprivileged host port"); - } - _dispatch_mach_host_port = mhp; -} - -mach_port_t -_dispatch_get_mach_host_port(void) -{ - dispatch_once_f(&_dispatch_mach_host_port_pred, NULL, - _dispatch_mach_host_port_init); - return _dispatch_mach_host_port; -} -#endif - #if DISPATCH_USE_THREAD_LOCAL_STORAGE #include #include @@ -991,27 +849,29 @@ gettid(void) #define _tsd_call_cleanup(k, f) do { \ if ((f) && tsd->k) ((void(*)(void*))(f))(tsd->k); \ - } while (0) + } while (0) void _libdispatch_tsd_cleanup(void *ctx) { struct dispatch_tsd *tsd = (struct dispatch_tsd*) ctx; + _tsd_call_cleanup(dispatch_priority_key, NULL); + _tsd_call_cleanup(dispatch_r2k_key, NULL); + _tsd_call_cleanup(dispatch_queue_key, _dispatch_queue_cleanup); _tsd_call_cleanup(dispatch_frame_key, _dispatch_frame_cleanup); _tsd_call_cleanup(dispatch_cache_key, _dispatch_cache_cleanup); _tsd_call_cleanup(dispatch_context_key, _dispatch_context_cleanup); _tsd_call_cleanup(dispatch_pthread_root_queue_observer_hooks_key, NULL); - _tsd_call_cleanup(dispatch_defaultpriority_key, NULL); -#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION + _tsd_call_cleanup(dispatch_basepri_key, NULL); +#if DISPATCH_INTROSPECTION + _tsd_call_cleanup(dispatch_introspection_key, NULL); +#elif DISPATCH_PERF_MON _tsd_call_cleanup(dispatch_bcounter_key, NULL); #endif -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK - _tsd_call_cleanup(dispatch_sema4_key, _dispatch_thread_semaphore_dispose); -#endif - _tsd_call_cleanup(dispatch_priority_key, NULL); + _tsd_call_cleanup(dispatch_wlh_key, _dispatch_wlh_cleanup); _tsd_call_cleanup(dispatch_voucher_key, _voucher_thread_cleanup); _tsd_call_cleanup(dispatch_deferred_items_key, _dispatch_deferred_items_cleanup); @@ -1027,25 +887,14 @@ libdispatch_tsd_init(void) } #endif -DISPATCH_EXPORT DISPATCH_NOTHROW +DISPATCH_NOTHROW void -dispatch_atfork_child(void) +_dispatch_queue_atfork_child(void) { void *crash = (void *)0x100; size_t i; -#if HAVE_MACH - _dispatch_mach_host_port_pred = 0; - _dispatch_mach_host_port = MACH_VOUCHER_NULL; -#endif - _voucher_atfork_child(); - if (!_dispatch_is_multithreaded_inline()) { - // clear the _PROHIBIT bit if set - _dispatch_unsafe_fork = 0; - return; - } - _dispatch_unsafe_fork = 0; - _dispatch_child_of_unsafe_fork = true; + if (!_dispatch_is_multithreaded_inline()) return; _dispatch_main_q.dq_items_head = crash; _dispatch_main_q.dq_items_tail = crash; @@ -1068,13 +917,13 @@ _dispatch_qos_class_valid(dispatch_qos_class_t qos_class, int relative_priority) { qos_class_t qos = (qos_class_t)qos_class; switch (qos) { - case _DISPATCH_QOS_CLASS_MAINTENANCE: - case _DISPATCH_QOS_CLASS_BACKGROUND: - case _DISPATCH_QOS_CLASS_UTILITY: - case _DISPATCH_QOS_CLASS_DEFAULT: - case _DISPATCH_QOS_CLASS_USER_INITIATED: - case _DISPATCH_QOS_CLASS_USER_INTERACTIVE: - case _DISPATCH_QOS_CLASS_UNSPECIFIED: + case QOS_CLASS_MAINTENANCE: + case QOS_CLASS_BACKGROUND: + case QOS_CLASS_UTILITY: + case QOS_CLASS_DEFAULT: + case QOS_CLASS_USER_INITIATED: + case QOS_CLASS_USER_INTERACTIVE: + case QOS_CLASS_UNSPECIFIED: break; default: return false; @@ -1085,20 +934,6 @@ _dispatch_qos_class_valid(dispatch_qos_class_t qos_class, int relative_priority) return true; } -#define DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(qos) \ - [_DISPATCH_QOS_CLASS_##qos] = DQA_INDEX_QOS_CLASS_##qos - -static const -_dispatch_queue_attr_index_qos_class_t _dispatch_queue_attr_qos2idx[] = { - DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(UNSPECIFIED), - DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(MAINTENANCE), - DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(BACKGROUND), - DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(UTILITY), - DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(DEFAULT), - DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(USER_INITIATED), - DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(USER_INTERACTIVE), -}; - #define DISPATCH_QUEUE_ATTR_OVERCOMMIT2IDX(overcommit) \ ((overcommit) == _dispatch_queue_attr_overcommit_disabled ? \ DQA_INDEX_NON_OVERCOMMIT : \ @@ -1116,10 +951,10 @@ _dispatch_queue_attr_index_qos_class_t _dispatch_queue_attr_qos2idx[] = { #define DISPATCH_QUEUE_ATTR_PRIO2IDX(prio) (-(prio)) -#define DISPATCH_QUEUE_ATTR_QOS2IDX(qos) (_dispatch_queue_attr_qos2idx[(qos)]) +#define DISPATCH_QUEUE_ATTR_QOS2IDX(qos) (qos) static inline dispatch_queue_attr_t -_dispatch_get_queue_attr(qos_class_t qos, int prio, +_dispatch_get_queue_attr(dispatch_qos_t qos, int prio, _dispatch_queue_attr_overcommit_t overcommit, dispatch_autorelease_frequency_t frequency, bool concurrent, bool inactive) @@ -1136,16 +971,16 @@ _dispatch_get_queue_attr(qos_class_t qos, int prio, dispatch_queue_attr_t _dispatch_get_default_queue_attr(void) { - return _dispatch_get_queue_attr(_DISPATCH_QOS_CLASS_UNSPECIFIED, 0, + return _dispatch_get_queue_attr(DISPATCH_QOS_UNSPECIFIED, 0, _dispatch_queue_attr_overcommit_unspecified, DISPATCH_AUTORELEASE_FREQUENCY_INHERIT, false, false); } dispatch_queue_attr_t dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t dqa, - dispatch_qos_class_t qos_class, int relative_priority) + dispatch_qos_class_t qos_class, int relpri) { - if (!_dispatch_qos_class_valid(qos_class, relative_priority)) { + if (!_dispatch_qos_class_valid(qos_class, relpri)) { return DISPATCH_BAD_INPUT; } if (!slowpath(dqa)) { @@ -1153,8 +988,8 @@ dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t dqa, } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } - return _dispatch_get_queue_attr(qos_class, relative_priority, - dqa->dqa_overcommit, dqa->dqa_autorelease_frequency, + return _dispatch_get_queue_attr(_dispatch_qos_from_qos_class(qos_class), + relpri, dqa->dqa_overcommit, dqa->dqa_autorelease_frequency, dqa->dqa_concurrent, dqa->dqa_inactive); } @@ -1166,8 +1001,9 @@ dispatch_queue_attr_make_initially_inactive(dispatch_queue_attr_t dqa) } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } - return _dispatch_get_queue_attr(dqa->dqa_qos_class, - dqa->dqa_relative_priority, dqa->dqa_overcommit, + dispatch_priority_t pri = dqa->dqa_qos_and_relpri; + return _dispatch_get_queue_attr(_dispatch_priority_qos(pri), + _dispatch_priority_relpri(pri), dqa->dqa_overcommit, dqa->dqa_autorelease_frequency, dqa->dqa_concurrent, true); } @@ -1180,8 +1016,9 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t dqa, } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } - return _dispatch_get_queue_attr(dqa->dqa_qos_class, - dqa->dqa_relative_priority, overcommit ? + dispatch_priority_t pri = dqa->dqa_qos_and_relpri; + return _dispatch_get_queue_attr(_dispatch_priority_qos(pri), + _dispatch_priority_relpri(pri), overcommit ? _dispatch_queue_attr_overcommit_enabled : _dispatch_queue_attr_overcommit_disabled, dqa->dqa_autorelease_frequency, dqa->dqa_concurrent, @@ -1205,14 +1042,28 @@ dispatch_queue_attr_make_with_autorelease_frequency(dispatch_queue_attr_t dqa, } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } - return _dispatch_get_queue_attr(dqa->dqa_qos_class, - dqa->dqa_relative_priority, dqa->dqa_overcommit, + dispatch_priority_t pri = dqa->dqa_qos_and_relpri; + return _dispatch_get_queue_attr(_dispatch_priority_qos(pri), + _dispatch_priority_relpri(pri), dqa->dqa_overcommit, frequency, dqa->dqa_concurrent, dqa->dqa_inactive); } #pragma mark - #pragma mark dispatch_queue_t +void +dispatch_queue_set_label_nocopy(dispatch_queue_t dq, const char *label) +{ + if (dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { + return; + } + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dq); + if (unlikely(dqf & DQF_LABEL_NEEDS_FREE)) { + DISPATCH_CLIENT_CRASH(dq, "Cannot change label for this queue"); + } + dq->dq_label = label; +} + // skip zero // 1 - main_q // 2 - mgr_q @@ -1226,11 +1077,6 @@ static dispatch_queue_t _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, dispatch_queue_t tq, bool legacy) { -#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - // Be sure the root queue priorities are set - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init_once); -#endif if (!slowpath(dqa)) { dqa = _dispatch_get_default_queue_attr(); } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { @@ -1241,25 +1087,15 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, // Step 1: Normalize arguments (qos, overcommit, tq) // - qos_class_t qos = dqa->dqa_qos_class; -#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - if (qos == _DISPATCH_QOS_CLASS_USER_INTERACTIVE && - !_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS].dq_priority) { - qos = _DISPATCH_QOS_CLASS_USER_INITIATED; - } -#endif - bool maintenance_fallback = false; -#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - maintenance_fallback = true; -#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - if (maintenance_fallback) { - if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE && - !_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS].dq_priority) { - qos = _DISPATCH_QOS_CLASS_BACKGROUND; - } + dispatch_qos_t qos = _dispatch_priority_qos(dqa->dqa_qos_and_relpri); +#if !HAVE_PTHREAD_WORKQUEUE_QOS + if (qos == DISPATCH_QOS_USER_INTERACTIVE) { + qos = DISPATCH_QOS_USER_INITIATED; } + if (qos == DISPATCH_QOS_MAINTENANCE) { + qos = DISPATCH_QOS_BACKGROUND; + } +#endif // !HAVE_PTHREAD_WORKQUEUE_QOS _dispatch_queue_attr_overcommit_t overcommit = dqa->dqa_overcommit; if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) { @@ -1273,14 +1109,15 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, tq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { // Handle discrepancies between attr and target queue, attributes win if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { - if (tq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) { + if (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { overcommit = _dispatch_queue_attr_overcommit_enabled; } else { overcommit = _dispatch_queue_attr_overcommit_disabled; } } - if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) { - tq = _dispatch_get_root_queue_with_overcommit(tq, + if (qos == DISPATCH_QOS_UNSPECIFIED) { + dispatch_qos_t tq_qos = _dispatch_priority_qos(tq->dq_priority); + tq = _dispatch_get_root_queue(tq_qos, overcommit == _dispatch_queue_attr_overcommit_enabled); } else { tq = NULL; @@ -1292,7 +1129,7 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, DISPATCH_CLIENT_CRASH(tq, "Cannot specify an overcommit attribute " "and use this kind of target queue"); } - if (qos != _DISPATCH_QOS_CLASS_UNSPECIFIED) { + if (qos != DISPATCH_QOS_UNSPECIFIED) { DISPATCH_CLIENT_CRASH(tq, "Cannot specify a QoS attribute " "and use this kind of target queue"); } @@ -1305,10 +1142,9 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, } } if (!tq) { - qos_class_t tq_qos = qos == _DISPATCH_QOS_CLASS_UNSPECIFIED ? - _DISPATCH_QOS_CLASS_DEFAULT : qos; - tq = _dispatch_get_root_queue(tq_qos, overcommit == - _dispatch_queue_attr_overcommit_enabled); + tq = _dispatch_get_root_queue( + qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos, + overcommit == _dispatch_queue_attr_overcommit_enabled); if (slowpath(!tq)) { DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute"); } @@ -1342,6 +1178,9 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, dqf |= DQF_AUTORELEASE_ALWAYS; break; } + if (legacy) { + dqf |= DQF_LEGACY; + } if (label) { const char *tmp = _dispatch_strdup_if_mutable(label); if (tmp != label) { @@ -1358,20 +1197,27 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, dq->dq_label = label; #if HAVE_PTHREAD_WORKQUEUE_QOS - dq->dq_priority = (dispatch_priority_t)_pthread_qos_class_encode(qos, - dqa->dqa_relative_priority, - overcommit == _dispatch_queue_attr_overcommit_enabled ? - _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0); + dq->dq_priority = dqa->dqa_qos_and_relpri; + if (overcommit == _dispatch_queue_attr_overcommit_enabled) { + dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + } #endif _dispatch_retain(tq); - if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) { + if (qos == QOS_CLASS_UNSPECIFIED) { // legacy way of inherithing the QoS from the target _dispatch_queue_priority_inherit_from_target(dq, tq); } - if (!dqa->dqa_inactive) { + if (!dqa->dqa_inactive && !dx_hastypeflag(tq, QUEUE_ROOT)) { _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); } dq->do_targetq = tq; + if (!_dispatch_queue_is_legacy(dq) && !dqa->dqa_inactive) { + if (dx_hastypeflag(tq, QUEUE_ROOT)) { + dq->dq_wlh = _dispatch_root_queue_wlh_for_queue(tq, dq); + } else { + dq->dq_wlh = tq->dq_wlh; + } + } _dispatch_object_debug(dq, "%s", __func__); return _dispatch_introspection_queue_create(dq); } @@ -1394,10 +1240,8 @@ dispatch_queue_t dispatch_queue_create_with_accounting_override_voucher(const char *label, dispatch_queue_attr_t attr, voucher_t voucher) { - dispatch_queue_t dq = dispatch_queue_create_with_target(label, attr, - DISPATCH_TARGET_QUEUE_DEFAULT); - dq->dq_override_voucher = _voucher_create_accounting_voucher(voucher); - return dq; + (void)label; (void)attr; (void)voucher; + DISPATCH_CLIENT_CRASH(0, "Unsupported interface"); } void @@ -1409,11 +1253,17 @@ _dispatch_queue_destroy(dispatch_queue_t dq) if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { initial_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; } - if (dx_type(dq) == DISPATCH_SOURCE_KEVENT_TYPE) { + if (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE) { // dispatch_cancel_and_wait may apply overrides in a racy way with // the source cancellation finishing. This race is expensive and not // really worthwhile to resolve since the source becomes dead anyway. - dq_state &= ~DISPATCH_QUEUE_HAS_OVERRIDE; + // + // In a similar way using DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT causes + // DIRTY & MAX_QOS bits to stay with the channel or source sometimes + // never woken up before it dies, so we have to ignore them. + dq_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + dq_state &= ~DISPATCH_QUEUE_DIRTY; + dq_state &= ~DISPATCH_QUEUE_RECEIVED_OVERRIDE; } if (slowpath(dq_state != initial_state)) { if (_dq_state_drain_locked(dq_state)) { @@ -1444,9 +1294,8 @@ _dispatch_queue_destroy(dispatch_queue_t dq) if (dqsq) { _dispatch_release(dqsq); } - if (dq->dq_override_voucher != DISPATCH_NO_VOUCHER) { - if (dq->dq_override_voucher) _voucher_release(dq->dq_override_voucher); - dq->dq_override_voucher = DISPATCH_NO_VOUCHER; + if (dq->dq_wlh) { + dq->dq_wlh = NULL; } } @@ -1485,11 +1334,11 @@ _dispatch_queue_suspend_slow(dispatch_queue_t dq) // threads could have touched this value while we were trying to acquire // the lock, or because another thread raced us to do the same operation // and got to the lock first. - if (slowpath(os_sub_overflow(dq_state, delta, &value))) { + if (unlikely(os_sub_overflow(dq_state, delta, &value))) { os_atomic_rmw_loop_give_up(goto retry); } }); - if (slowpath(os_add_overflow(dq->dq_side_suspend_cnt, + if (unlikely(os_add_overflow(dq->dq_side_suspend_cnt, DISPATCH_QUEUE_SUSPEND_HALF, &dq->dq_side_suspend_cnt))) { DISPATCH_CLIENT_CRASH(0, "Too many nested calls to dispatch_suspend()"); } @@ -1509,11 +1358,20 @@ _dispatch_queue_suspend(dispatch_queue_t dq) os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { value = DISPATCH_QUEUE_SUSPEND_INTERVAL; - if (slowpath(os_add_overflow(dq_state, value, &value))) { + if (unlikely(os_add_overflow(dq_state, value, &value))) { os_atomic_rmw_loop_give_up({ return _dispatch_queue_suspend_slow(dq); }); } +#ifdef DLOCK_NOWAITERS_BIT + if (_dq_state_drain_locked(dq_state)) { + value |= DISPATCH_QUEUE_DRAIN_OWNER_MASK; + } else { + value ^= DLOCK_OWNER_INVALID; + } +#else + value |= DLOCK_OWNER_INVALID; +#endif }); if (!_dq_state_is_suspended(dq_state)) { @@ -1548,7 +1406,7 @@ _dispatch_queue_resume_slow(dispatch_queue_t dq) // threads could have touched this value while we were trying to acquire // the lock, or because another thread raced us to do the same operation // and got to the lock first. - if (slowpath(os_add_overflow(dq_state, delta, &value))) { + if (unlikely(os_add_overflow(dq_state, delta, &value))) { os_atomic_rmw_loop_give_up(goto retry); } }); @@ -1577,6 +1435,9 @@ _dispatch_queue_resume(dispatch_queue_t dq, bool activate) { // covers all suspend and inactive bits, including side suspend bit const uint64_t suspend_bits = DISPATCH_QUEUE_SUSPEND_BITS_MASK; + // covers all suspend and inactive bits and owner mask + const uint64_t suspend_owner_bits = DISPATCH_QUEUE_SUSPEND_BITS_MASK | + DISPATCH_QUEUE_DRAIN_OWNER_MASK; // backward compatibility: only dispatch sources can abuse // dispatch_resume() to really mean dispatch_activate() bool resume_can_activate = (dx_type(dq) == DISPATCH_SOURCE_KEVENT_TYPE); @@ -1635,9 +1496,31 @@ _dispatch_queue_resume(dispatch_queue_t dq, bool activate) value = dq_state - DISPATCH_QUEUE_INACTIVE - DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_SUSPEND_INTERVAL; + } else if ((dq_state & suspend_owner_bits) == (suspend_owner_bits & + (DISPATCH_QUEUE_SUSPEND_INTERVAL + DLOCK_OWNER_INVALID))) { + value = dq_state; + value ^= DISPATCH_QUEUE_SUSPEND_INTERVAL + DLOCK_OWNER_INVALID; + uint64_t full_width = value; + if (_dq_state_has_pending_barrier(full_width)) { + full_width -= DISPATCH_QUEUE_PENDING_BARRIER; + full_width += DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } else { + full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } + if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) == + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + value = full_width; + value &= ~DISPATCH_QUEUE_DIRTY; + value ^= _dispatch_tid_self(); + } else { + value &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + value &= ~DISPATCH_QUEUE_RECEIVED_OVERRIDE; + } } else { value = DISPATCH_QUEUE_SUSPEND_INTERVAL; - if (slowpath(os_sub_overflow(dq_state, value, &value))) { + if (unlikely(os_sub_overflow(dq_state, value, &value))) { // underflow means over-resume or a suspend count transfer // to the side count is needed os_atomic_rmw_loop_give_up({ @@ -1647,23 +1530,10 @@ _dispatch_queue_resume(dispatch_queue_t dq, bool activate) return _dispatch_queue_resume_slow(dq); }); } - if (_dq_state_is_runnable(value) && - !_dq_state_drain_locked(value)) { - uint64_t full_width = value; - if (_dq_state_has_pending_barrier(value)) { - full_width -= DISPATCH_QUEUE_PENDING_BARRIER; - full_width += DISPATCH_QUEUE_WIDTH_INTERVAL; - full_width += DISPATCH_QUEUE_IN_BARRIER; - } else { - full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - full_width += DISPATCH_QUEUE_IN_BARRIER; - } - if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) == - DISPATCH_QUEUE_WIDTH_FULL_BIT) { - value = full_width; - value &= ~DISPATCH_QUEUE_DIRTY; - value |= _dispatch_tid_self(); - } + if (unlikely(_dq_state_is_runnable(value))) { + // make drain_try_unlock() fail and reconsider whether + // it has work to do + value |= DISPATCH_QUEUE_DIRTY; } } }); @@ -1677,7 +1547,7 @@ _dispatch_queue_resume(dispatch_queue_t dq, bool activate) if (activate) { // if we're still in an activate codepath here we should have // { sc:>0 na:1 }, if not we've got a corrupt state - if (!fastpath(_dq_state_is_suspended(value))) { + if (unlikely(!_dq_state_is_suspended(value))) { DISPATCH_CLIENT_CRASH(dq, "Invalid suspension state"); } return; @@ -1688,22 +1558,23 @@ _dispatch_queue_resume(dispatch_queue_t dq, bool activate) } if ((dq_state ^ value) & DISPATCH_QUEUE_IN_BARRIER) { - _dispatch_release(dq); - return _dispatch_try_lock_transfer_or_wakeup(dq); - } - - if (_dq_state_should_wakeup(value)) { + _dispatch_try_lock_transfer_or_wakeup(dq); + } else if (_dq_state_should_wakeup(value)) { // - // seq_cst wrt state changes that were flushed and not acted upon - os_atomic_thread_fence(acquire); - pthread_priority_t pp = _dispatch_queue_reset_override_priority(dq, - _dispatch_queue_is_thread_bound(dq)); - return dx_wakeup(dq, pp, DISPATCH_WAKEUP_CONSUME); + // dependency ordering for dq state changes that were flushed + // and not acted upon + os_atomic_thread_fence(dependency); + dq = os_atomic_force_dependency_on(dq, value); + dispatch_qos_t qos = _dq_state_max_qos(dq_state); + // Balancing the retain() done in suspend() for rdar://8181908 + return dx_wakeup(dq, qos, DISPATCH_WAKEUP_CONSUME); } + + // Balancing the retain() done in suspend() for rdar://8181908 return _dispatch_release_tailcall(dq); over_resume: - if (slowpath(_dq_state_is_inactive(dq_state))) { + if (unlikely(_dq_state_is_inactive(dq_state))) { DISPATCH_CLIENT_CRASH(dq, "Over-resume of an inactive object"); } DISPATCH_CLIENT_CRASH(dq, "Over-resume of an object"); @@ -1719,19 +1590,13 @@ dispatch_queue_get_label(dispatch_queue_t dq) } qos_class_t -dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relative_priority_ptr) +dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relpri_ptr) { - qos_class_t qos = _DISPATCH_QOS_CLASS_UNSPECIFIED; - int relative_priority = 0; -#if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t dqp = dq->dq_priority; - if (dqp & _PTHREAD_PRIORITY_INHERIT_FLAG) dqp = 0; - qos = _pthread_qos_class_decode(dqp, &relative_priority, NULL); -#else - (void)dq; -#endif - if (relative_priority_ptr) *relative_priority_ptr = relative_priority; - return qos; + dispatch_qos_class_t qos = _dispatch_priority_qos(dq->dq_priority); + if (relpri_ptr) { + *relpri_ptr = qos ? _dispatch_priority_relpri(dq->dq_priority) : 0; + } + return _dispatch_qos_to_qos_class(qos); } static void @@ -1765,8 +1630,7 @@ _dispatch_queue_set_width2(void *ctxt) dispatch_queue_flags_t old_dqf, new_dqf; os_atomic_rmw_loop2o(dq, dq_atomic_flags, old_dqf, new_dqf, relaxed, { - new_dqf = old_dqf & ~DQF_WIDTH_MASK; - new_dqf |= (tmp << DQF_WIDTH_SHIFT); + new_dqf = (old_dqf & DQF_FLAGS_MASK) | DQF_WIDTH(tmp); }); _dispatch_object_debug(dq, "%s", __func__); } @@ -1808,7 +1672,10 @@ _dispatch_queue_legacy_set_target_queue(void *ctxt) } _dispatch_queue_priority_inherit_from_target(dq, tq); - _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); + if (!dx_hastypeflag(tq, QUEUE_ROOT)) { + _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); + } + #if HAVE_PTHREAD_WORKQUEUE_QOS // see _dispatch_queue_class_wakeup() _dispatch_queue_sidelock_lock(dq); @@ -1830,10 +1697,9 @@ _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq) dispatch_assert(dq->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT && dq->do_targetq); - if (slowpath(!tq)) { + if (unlikely(!tq)) { bool is_concurrent_q = (dq->dq_width > 1); - tq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, - !is_concurrent_q); + tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, !is_concurrent_q); } if (_dispatch_queue_try_inactive_suspend(dq)) { @@ -1841,9 +1707,12 @@ _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq) return dx_vtable(dq)->do_resume(dq, false); } - if (dq->dq_override_voucher != DISPATCH_NO_VOUCHER) { - DISPATCH_CLIENT_CRASH(dq, "Cannot change the target of a queue or " - "source with an accounting override voucher " + if (unlikely(!_dispatch_queue_is_legacy(dq))) { + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { + DISPATCH_CLIENT_CRASH(dq, "Cannot change the target of a queue " + "already targeted by other dispatch objects"); + } + DISPATCH_CLIENT_CRASH(dq, "Cannot change the target of this object " "after it has been activated"); } @@ -1861,11 +1730,6 @@ _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq) _dispatch_bug_deprecated("Changing the target of a source " "after it has been activated"); break; - - case DISPATCH_QUEUE_SERIAL_TYPE: - case DISPATCH_QUEUE_CONCURRENT_TYPE: - DISPATCH_CLIENT_CRASH(type, "Cannot change the target of this queue " - "after it has been activated"); default: DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type"); } @@ -1895,9 +1759,10 @@ static struct dispatch_queue_s _dispatch_mgr_root_queue = { .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, .do_ctxt = &_dispatch_mgr_root_queue_context, .dq_label = "com.apple.root.libdispatch-manager", - .dq_width = DISPATCH_QUEUE_WIDTH_POOL, - .dq_override = DISPATCH_SATURATED_OVERRIDE, - .dq_override_voucher = DISPATCH_NO_VOUCHER, + .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), + .dq_wlh = DISPATCH_WLH_GLOBAL, + .dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER | + DISPATCH_PRIORITY_SATURATED_OVERRIDE, .dq_serialnum = 3, }; #endif // DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES @@ -1913,17 +1778,16 @@ static struct { static dispatch_once_t _dispatch_mgr_sched_pred; -// TODO: switch to "event-reflector thread" property - #if HAVE_PTHREAD_WORKQUEUE_QOS +// TODO: switch to "event-reflector thread" property // Must be kept in sync with list of qos classes in sys/qos.h static const int _dispatch_mgr_sched_qos2prio[] = { - [_DISPATCH_QOS_CLASS_MAINTENANCE] = 4, - [_DISPATCH_QOS_CLASS_BACKGROUND] = 4, - [_DISPATCH_QOS_CLASS_UTILITY] = 20, - [_DISPATCH_QOS_CLASS_DEFAULT] = 31, - [_DISPATCH_QOS_CLASS_USER_INITIATED] = 37, - [_DISPATCH_QOS_CLASS_USER_INTERACTIVE] = 47, + [QOS_CLASS_MAINTENANCE] = 4, + [QOS_CLASS_BACKGROUND] = 4, + [QOS_CLASS_UTILITY] = 20, + [QOS_CLASS_DEFAULT] = 31, + [QOS_CLASS_USER_INITIATED] = 37, + [QOS_CLASS_USER_INTERACTIVE] = 47, }; #endif // HAVE_PTHREAD_WORKQUEUE_QOS @@ -1943,8 +1807,8 @@ _dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); #if HAVE_PTHREAD_WORKQUEUE_QOS qos_class_t qos = qos_class_main(); - if (qos == _DISPATCH_QOS_CLASS_DEFAULT) { - qos = _DISPATCH_QOS_CLASS_USER_INITIATED; // rdar://problem/17279292 + if (qos == QOS_CLASS_DEFAULT) { + qos = QOS_CLASS_USER_INITIATED; // rdar://problem/17279292 } if (qos) { _dispatch_mgr_sched.qos = qos; @@ -1977,8 +1841,6 @@ _dispatch_mgr_root_queue_init(void) (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr, qos, 0)); } - _dispatch_mgr_q.dq_priority = - (dispatch_priority_t)_pthread_qos_class_encode(qos, 0, 0); } #endif param.sched_priority = _dispatch_mgr_sched.prio; @@ -2050,8 +1912,7 @@ _dispatch_mgr_priority_raise(const pthread_attr_t *attr) if (p >= prio) os_atomic_rmw_loop_give_up(return); }); #if DISPATCH_USE_KEVENT_WORKQUEUE - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init_once); + _dispatch_root_queues_init(); if (_dispatch_kevent_workqueue_enabled) { pthread_priority_t pp = 0; if (prio > _dispatch_mgr_sched.default_prio) { @@ -2085,8 +1946,7 @@ void _dispatch_kevent_workqueue_init(void) { // Initialize kevent workqueue support - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init_once); + _dispatch_root_queues_init(); if (!_dispatch_kevent_workqueue_enabled) return; dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); qos_class_t qos = _dispatch_mgr_sched.qos; @@ -2094,7 +1954,6 @@ _dispatch_kevent_workqueue_init(void) pthread_priority_t pp = 0; if (qos) { pp = _pthread_qos_class_encode(qos, 0, 0); - _dispatch_mgr_q.dq_priority = (dispatch_priority_t)pp; } if (prio > _dispatch_mgr_sched.default_prio) { pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG; @@ -2104,7 +1963,7 @@ _dispatch_kevent_workqueue_init(void) (void)dispatch_assume_zero(r); } } -#endif +#endif // DISPATCH_USE_KEVENT_WORKQUEUE #pragma mark - #pragma mark dispatch_pthread_root_queue @@ -2142,10 +2001,11 @@ _dispatch_pthread_root_queue_create(const char *label, unsigned long flags, _dispatch_queue_init(dq, dqf, DISPATCH_QUEUE_WIDTH_POOL, false); dq->dq_label = label; - dq->dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, - dq->dq_override = DISPATCH_SATURATED_OVERRIDE; + dq->dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; dq->do_ctxt = qc; dq->do_targetq = NULL; + dq->dq_priority = DISPATCH_PRIORITY_SATURATED_OVERRIDE; + dq->dq_wlh = DISPATCH_WLH_GLOBAL; pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore); qc->dgq_ctxt = pqc; @@ -2230,8 +2090,7 @@ _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq) if (pqc->dpq_thread_configure) { Block_release(pqc->dpq_thread_configure); } - dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, - false); + dq->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); #endif if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) { free((void*)dq->dq_label); @@ -2246,7 +2105,7 @@ struct dispatch_queue_specific_queue_s { DISPATCH_QUEUE_HEADER(queue_specific_queue); TAILQ_HEAD(dispatch_queue_specific_head_s, dispatch_queue_specific_s) dqsq_contexts; -} DISPATCH_QUEUE_ALIGN; +} DISPATCH_ATOMIC64_ALIGN; struct dispatch_queue_specific_s { const void *dqs_key; @@ -2260,12 +2119,11 @@ void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq) { dispatch_queue_specific_t dqs, tmp; + dispatch_queue_t rq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); TAILQ_FOREACH_SAFE(dqs, &dqsq->dqsq_contexts, dqs_list, tmp) { if (dqs->dqs_destructor) { - dispatch_async_f(_dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_DEFAULT, false), dqs->dqs_ctxt, - dqs->dqs_destructor); + dispatch_async_f(rq, dqs->dqs_ctxt, dqs->dqs_destructor); } free(dqs); } @@ -2282,8 +2140,7 @@ _dispatch_queue_init_specific(dispatch_queue_t dq) _dispatch_queue_init(dqsq->_as_dq, DQF_NONE, DISPATCH_QUEUE_WIDTH_MAX, false); dqsq->do_xref_cnt = -1; - dqsq->do_targetq = _dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_USER_INITIATED, true); + dqsq->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_USER_INITIATED, true); dqsq->dq_label = "queue-specific"; TAILQ_INIT(&dqsq->dqsq_contexts); if (slowpath(!os_atomic_cmpxchg2o(dq, dq_specific_q, NULL, @@ -2304,7 +2161,7 @@ _dispatch_queue_set_specific(void *ctxt) // Destroy previous context for existing key if (dqs->dqs_destructor) { dispatch_async_f(_dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_DEFAULT, false), dqs->dqs_ctxt, + DISPATCH_QOS_DEFAULT, false), dqs->dqs_ctxt, dqs->dqs_destructor); } if (dqsn->dqs_ctxt) { @@ -2362,6 +2219,18 @@ _dispatch_queue_get_specific(void *ctxt) *ctxtp = NULL; } +DISPATCH_ALWAYS_INLINE +static inline void * +_dispatch_queue_get_specific_inline(dispatch_queue_t dq, const void *key) +{ + void *ctxt = NULL; + if (fastpath(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE && dq->dq_specific_q)){ + ctxt = (void *)key; + dispatch_sync_f(dq->dq_specific_q, &ctxt, _dispatch_queue_get_specific); + } + return ctxt; +} + DISPATCH_NOINLINE void * dispatch_queue_get_specific(dispatch_queue_t dq, const void *key) @@ -2369,13 +2238,7 @@ dispatch_queue_get_specific(dispatch_queue_t dq, const void *key) if (slowpath(!key)) { return NULL; } - void *ctxt = NULL; - - if (fastpath(dq->dq_specific_q)) { - ctxt = (void *)key; - dispatch_sync_f(dq->dq_specific_q, &ctxt, _dispatch_queue_get_specific); - } - return ctxt; + return _dispatch_queue_get_specific_inline(dq, key); } DISPATCH_NOINLINE @@ -2389,12 +2252,8 @@ dispatch_get_specific(const void *key) dispatch_queue_t dq = _dispatch_queue_get_current(); while (slowpath(dq)) { - if (slowpath(dq->dq_specific_q)) { - ctxt = (void *)key; - dispatch_sync_f(dq->dq_specific_q, &ctxt, - _dispatch_queue_get_specific); - if (ctxt) break; - } + ctxt = _dispatch_queue_get_specific_inline(dq, key); + if (ctxt) break; dq = dq->do_targetq; } return ctxt; @@ -2442,8 +2301,9 @@ _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) if (_dq_state_is_dirty(dq_state)) { offset += dsnprintf(&buf[offset], bufsiz - offset, ", dirty"); } - if (_dq_state_has_override(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", async-override"); + dispatch_qos_t qos = _dq_state_max_qos(dq_state); + if (qos) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", max qos %d", qos); } mach_port_t owner = _dq_state_drain_owner(dq_state); if (!_dispatch_queue_is_thread_bound(dq) && owner) { @@ -2489,34 +2349,37 @@ dispatch_debug_queue(dispatch_queue_t dq, const char* str) { } #endif -#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION -static OSSpinLock _dispatch_stats_lock; +#if DISPATCH_PERF_MON static struct { - uint64_t time_total; - uint64_t count_total; - uint64_t thread_total; -} _dispatch_stats[65]; // ffs*/fls*() returns zero when no bits are set + uint64_t volatile time_total; + uint64_t volatile count_total; + uint64_t volatile thread_total; +} _dispatch_stats[65]; -static void -_dispatch_queue_merge_stats(uint64_t start) +void +_dispatch_queue_merge_stats(uint64_t start, bool trace, perfmon_thread_type type) { uint64_t delta = _dispatch_absolute_time() - start; unsigned long count; + int bucket = 0; count = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); _dispatch_thread_setspecific(dispatch_bcounter_key, NULL); - int bucket = flsl((long)count); - - // 64-bit counters on 32-bit require a lock or a queue - OSSpinLockLock(&_dispatch_stats_lock); - - _dispatch_stats[bucket].time_total += delta; - _dispatch_stats[bucket].count_total += count; - _dispatch_stats[bucket].thread_total++; - - OSSpinLockUnlock(&_dispatch_stats_lock); + if (count == 0) { + bucket = 0; + if (trace) _dispatch_ktrace1(DISPATCH_PERF_MON_worker_useless, type); + } else { + bucket = (int)sizeof(count) * CHAR_BIT - __builtin_clzl(count); + os_atomic_add(&_dispatch_stats[bucket].count_total, count, relaxed); + } + os_atomic_add(&_dispatch_stats[bucket].time_total, delta, relaxed); + os_atomic_inc(&_dispatch_stats[bucket].thread_total, relaxed); + if (trace) { + _dispatch_ktrace3(DISPATCH_PERF_MON_worker_thread_end, count, delta, type); + } } + #endif #pragma mark - @@ -2536,8 +2399,8 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, pflags |= _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND; // when we unbind, overcomitness can flip, so we need to learn // it from the defaultpri, see _dispatch_priority_compute_update - pp |= (_dispatch_get_defaultpriority() & - _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + pp |= (_dispatch_get_basepri() & + DISPATCH_PRIORITY_FLAG_OVERCOMMIT); } else { // else we need to keep the one that is set in the current pri pp |= (old_pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); @@ -2575,7 +2438,7 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, DISPATCH_NOINLINE voucher_t _dispatch_set_priority_and_voucher_slow(pthread_priority_t priority, - voucher_t v, _dispatch_thread_set_self_t flags) + voucher_t v, dispatch_thread_set_self_t flags) { voucher_t ov = DISPATCH_NO_VOUCHER; mach_voucher_t kv = VOUCHER_NO_MACH_VOUCHER; @@ -2591,7 +2454,7 @@ _dispatch_set_priority_and_voucher_slow(pthread_priority_t priority, } } #if !PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK - flags &= ~(_dispatch_thread_set_self_t)DISPATCH_THREAD_PARK; + flags &= ~(dispatch_thread_set_self_t)DISPATCH_THREAD_PARK; #endif if (!(flags & DISPATCH_THREAD_PARK)) { _dispatch_set_priority_and_mach_voucher_slow(priority, kv); @@ -2606,6 +2469,34 @@ _dispatch_set_priority_and_voucher_slow(pthread_priority_t priority, #pragma mark - #pragma mark dispatch_continuation_t +const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { + DC_VTABLE_ENTRY(ASYNC_REDIRECT, + .do_kind = "dc-redirect", + .do_invoke = _dispatch_async_redirect_invoke), +#if HAVE_MACH + DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN, + .do_kind = "dc-mach-send-drain", + .do_invoke = _dispatch_mach_send_barrier_drain_invoke), + DC_VTABLE_ENTRY(MACH_SEND_BARRIER, + .do_kind = "dc-mach-send-barrier", + .do_invoke = _dispatch_mach_barrier_invoke), + DC_VTABLE_ENTRY(MACH_RECV_BARRIER, + .do_kind = "dc-mach-recv-barrier", + .do_invoke = _dispatch_mach_barrier_invoke), + DC_VTABLE_ENTRY(MACH_ASYNC_REPLY, + .do_kind = "dc-mach-async-reply", + .do_invoke = _dispatch_mach_msg_async_reply_invoke), +#endif +#if HAVE_PTHREAD_WORKQUEUE_QOS + DC_VTABLE_ENTRY(OVERRIDE_STEALING, + .do_kind = "dc-override-stealing", + .do_invoke = _dispatch_queue_override_invoke), + DC_VTABLE_ENTRY(OVERRIDE_OWNING, + .do_kind = "dc-override-owning", + .do_invoke = _dispatch_queue_override_invoke), +#endif +}; + static void _dispatch_force_cache_cleanup(void) { @@ -2639,7 +2530,7 @@ _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc) dc = _dispatch_thread_getspecific(dispatch_cache_key); int cnt; if (!dc || (cnt = dc->dc_cache_cnt - - _dispatch_continuation_cache_limit) <= 0){ + _dispatch_continuation_cache_limit) <= 0) { return; } do { @@ -2650,38 +2541,11 @@ _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc) } #endif -DISPATCH_ALWAYS_INLINE_NDEBUG -static inline void -_dispatch_continuation_slow_item_signal(dispatch_queue_t dq, - dispatch_object_t dou) -{ - dispatch_continuation_t dc = dou._dc; - pthread_priority_t pp = dq->dq_override; - - _dispatch_trace_continuation_pop(dq, dc); - if (pp > (dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - _dispatch_wqthread_override_start((mach_port_t)dc->dc_data, pp); - } - _dispatch_thread_event_signal((dispatch_thread_event_t)dc->dc_other); - _dispatch_introspection_queue_item_complete(dc); -} - DISPATCH_NOINLINE static void _dispatch_continuation_push(dispatch_queue_t dq, dispatch_continuation_t dc) { - _dispatch_queue_push(dq, dc, - _dispatch_continuation_get_override_priority(dq, dc)); -} - -DISPATCH_NOINLINE -static void -_dispatch_continuation_push_sync_slow(dispatch_queue_t dq, - dispatch_continuation_t dc) -{ - _dispatch_queue_push_inline(dq, dc, - _dispatch_continuation_get_override_priority(dq, dc), - DISPATCH_WAKEUP_SLOW_WAITER); + dx_push(dq, dc, _dispatch_continuation_override_qos(dq, dc)); } DISPATCH_ALWAYS_INLINE @@ -2838,20 +2702,16 @@ _dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd) } if (atomic_flags & DBF_CANCELED) goto out; - pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY; - _dispatch_thread_set_self_t adopt_flags = 0; - if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { + pthread_priority_t op = 0, p = 0; + if (_dispatch_block_invoke_should_set_priority(flags)) { op = _dispatch_get_priority(); - p = dbpd->dbpd_priority; - if (_dispatch_block_sync_should_enforce_qos_class(flags)) { - adopt_flags |= DISPATCH_PRIORITY_ENFORCE; - } + p = dbpd->dbpd_priority; } voucher_t ov, v = DISPATCH_NO_VOUCHER; if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { v = dbpd->dbpd_voucher; } - ov = _dispatch_adopt_priority_and_set_voucher(p, v, adopt_flags); + ov = _dispatch_set_priority_and_voucher(p, v, 0); dbpd->dbpd_thread = _dispatch_tid_self(); _dispatch_client_callout(dbpd->dbpd_block, _dispatch_Block_invoke(dbpd->dbpd_block)); @@ -2871,28 +2731,18 @@ _dispatch_block_sync_invoke(void *block) dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); dispatch_block_flags_t flags = dbpd->dbpd_flags; unsigned int atomic_flags = dbpd->dbpd_atomic_flags; - if (slowpath(atomic_flags & DBF_WAITED)) { + if (unlikely(atomic_flags & DBF_WAITED)) { DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " "run more than once and waited for"); } if (atomic_flags & DBF_CANCELED) goto out; - pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY; - _dispatch_thread_set_self_t adopt_flags = 0; - if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { - op = _dispatch_get_priority(); - p = dbpd->dbpd_priority; - if (_dispatch_block_sync_should_enforce_qos_class(flags)) { - adopt_flags |= DISPATCH_PRIORITY_ENFORCE; - } - } - voucher_t ov, v = DISPATCH_NO_VOUCHER; + voucher_t ov = DISPATCH_NO_VOUCHER; if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { - v = dbpd->dbpd_voucher; + ov = _dispatch_adopt_priority_and_set_voucher(0, dbpd->dbpd_voucher, 0); } - ov = _dispatch_adopt_priority_and_set_voucher(p, v, adopt_flags); dbpd->dbpd_block(); - _dispatch_reset_priority_and_voucher(op, ov); + _dispatch_reset_voucher(ov, 0); out: if ((atomic_flags & DBF_PERFORM) == 0) { if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { @@ -3002,7 +2852,8 @@ dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) // neither of us would ever release. Side effect: After a _wait // that times out, subsequent waits will not boost the qos of the // still-running block. - dx_wakeup(boost_oq, pp, DISPATCH_WAKEUP_OVERRIDING | + dx_wakeup(boost_oq, _dispatch_qos_from_pp(pp), + DISPATCH_WAKEUP_BLOCK_WAIT | DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_CONSUME); } @@ -3170,12 +3021,12 @@ _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt, dc->dc_ctxt = ctxt; dc->dc_voucher = DISPATCH_NO_VOUCHER; dc->dc_priority = DISPATCH_NO_PRIORITY; - _dispatch_queue_push(dq, dc, 0); + dx_push(dq, dc, 0); } #ifdef __BLOCKS__ void -dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void)) +dispatch_barrier_async(dispatch_queue_t dq, dispatch_block_t work) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_BARRIER_BIT; @@ -3190,7 +3041,7 @@ dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void)) void _dispatch_async_redirect_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags) + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) { dispatch_thread_frame_s dtf; struct dispatch_continuation_s *other_dc = dc->dc_other; @@ -3199,9 +3050,7 @@ _dispatch_async_redirect_invoke(dispatch_continuation_t dc, // the "right" root queue was stuffed into dc_func dispatch_queue_t assumed_rq = (dispatch_queue_t)dc->dc_func; dispatch_queue_t dq = dc->dc_data, rq, old_dq; - struct _dispatch_identity_s di; - - pthread_priority_t op, dp, old_dp; + dispatch_priority_t old_dbp; if (ctxt_flags) { flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK; @@ -3209,29 +3058,20 @@ _dispatch_async_redirect_invoke(dispatch_continuation_t dc, } old_dq = _dispatch_get_current_queue(); if (assumed_rq) { - _dispatch_queue_set_current(assumed_rq); - _dispatch_root_queue_identity_assume(&di, 0); - } - - old_dp = _dispatch_set_defaultpriority(dq->dq_priority, &dp); - op = dq->dq_override; - if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - _dispatch_wqthread_override_start(_dispatch_tid_self(), op); - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); + old_dbp = _dispatch_root_queue_identity_assume(assumed_rq); + _dispatch_set_basepri(dq->dq_priority); + } else { + old_dbp = _dispatch_set_basepri(dq->dq_priority); } _dispatch_thread_frame_push(&dtf, dq); _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, DISPATCH_OBJ_CONSUME_BIT, { - _dispatch_continuation_pop(other_dc, dq, flags); + _dispatch_continuation_pop(other_dc, dic, flags, dq); }); _dispatch_thread_frame_pop(&dtf); - if (assumed_rq) { - _dispatch_root_queue_identity_restore(&di); - _dispatch_queue_set_current(old_dq); - } - _dispatch_reset_defaultpriority(old_dp); + if (assumed_rq) _dispatch_queue_set_current(old_dq); + _dispatch_reset_basepri(old_dbp); rq = dq->do_targetq; while (slowpath(rq->do_targetq) && rq != old_dq) { @@ -3241,9 +3081,8 @@ _dispatch_async_redirect_invoke(dispatch_continuation_t dc, _dispatch_non_barrier_complete(dq); - if (dtf.dtf_deferred) { - struct dispatch_object_s *dou = dtf.dtf_deferred; - return _dispatch_queue_drain_deferred_invoke(dq, flags, 0, dou); + if (dic->dic_deferred) { + return _dispatch_queue_drain_deferred_invoke(dq, dic, flags, 0); } _dispatch_release_tailcall(dq); @@ -3270,7 +3109,7 @@ _dispatch_async_redirect_wrap(dispatch_queue_t dq, dispatch_object_t dou) DISPATCH_NOINLINE static void _dispatch_async_f_redirect(dispatch_queue_t dq, - dispatch_object_t dou, pthread_priority_t pp) + dispatch_object_t dou, dispatch_qos_t qos) { if (!slowpath(_dispatch_object_is_redirection(dou))) { dou._dc = _dispatch_async_redirect_wrap(dq, dou); @@ -3292,7 +3131,7 @@ _dispatch_async_f_redirect(dispatch_queue_t dq, dq = dq->do_targetq; } - _dispatch_queue_push(dq, dou, pp); + dx_push(dq, dou, qos); } DISPATCH_ALWAYS_INLINE @@ -3305,7 +3144,8 @@ _dispatch_continuation_redirect(dispatch_queue_t dq, // by _dispatch_async_f2. // However we want to end up on the root queue matching `dc` qos, so pick up // the current override of `dq` which includes dc's overrde (and maybe more) - _dispatch_async_f_redirect(dq, dc, dq->dq_override); + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + _dispatch_async_f_redirect(dq, dc, _dq_state_max_qos(dq_state)); _dispatch_introspection_queue_item_complete(dc); } @@ -3326,7 +3166,7 @@ _dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc) } return _dispatch_async_f_redirect(dq, dc, - _dispatch_continuation_get_override_priority(dq, dc)); + _dispatch_continuation_override_qos(dq, dc)); } DISPATCH_ALWAYS_INLINE @@ -3362,7 +3202,7 @@ dispatch_async_enforce_qos_class_f(dispatch_queue_t dq, void *ctxt, #ifdef __BLOCKS__ void -dispatch_async(dispatch_queue_t dq, void (^work)(void)) +dispatch_async(dispatch_queue_t dq, dispatch_block_t work) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; @@ -3411,293 +3251,324 @@ dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, #endif #pragma mark - -#pragma mark dispatch_sync / dispatch_barrier_sync recurse and invoke - -DISPATCH_NOINLINE -static void -_dispatch_sync_function_invoke_slow(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - voucher_t ov; - dispatch_thread_frame_s dtf; - _dispatch_thread_frame_push(&dtf, dq); - ov = _dispatch_set_priority_and_voucher(0, dq->dq_override_voucher, 0); - _dispatch_client_callout(ctxt, func); - _dispatch_perfmon_workitem_inc(); - _dispatch_reset_voucher(ov, 0); - _dispatch_thread_frame_pop(&dtf); -} +#pragma mark _dispatch_sync_invoke / _dispatch_sync_complete DISPATCH_ALWAYS_INLINE static inline void -_dispatch_sync_function_invoke_inline(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) +_dispatch_barrier_complete_inline(dispatch_queue_t dq) { - if (slowpath(dq->dq_override_voucher != DISPATCH_NO_VOUCHER)) { - return _dispatch_sync_function_invoke_slow(dq, ctxt, func); + uint64_t owned = DISPATCH_QUEUE_IN_BARRIER + + dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + + if (unlikely(dq->dq_items_tail)) { + return _dispatch_try_lock_transfer_or_wakeup(dq); + } + + if (unlikely(!_dispatch_queue_drain_try_unlock(dq, owned, true))) { + // someone enqueued a slow item at the head + // looping may be its last chance + return _dispatch_try_lock_transfer_or_wakeup(dq); } - dispatch_thread_frame_s dtf; - _dispatch_thread_frame_push(&dtf, dq); - _dispatch_client_callout(ctxt, func); - _dispatch_perfmon_workitem_inc(); - _dispatch_thread_frame_pop(&dtf); } DISPATCH_NOINLINE static void -_dispatch_sync_function_invoke(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) +_dispatch_barrier_complete(dispatch_queue_t dq) { - _dispatch_sync_function_invoke_inline(dq, ctxt, func); + _dispatch_barrier_complete_inline(dq); } -void -_dispatch_sync_recurse_invoke(void *ctxt) +DISPATCH_NOINLINE +static void +_dispatch_non_barrier_complete(dispatch_queue_t dq) { - dispatch_continuation_t dc = ctxt; - _dispatch_sync_function_invoke(dc->dc_data, dc->dc_ctxt, dc->dc_func); + uint64_t old_state, new_state; + + // see _dispatch_queue_resume() + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = old_state - DISPATCH_QUEUE_WIDTH_INTERVAL; + if (unlikely(_dq_state_drain_locked(old_state))) { + // make drain_try_unlock() fail and reconsider whether there's + // enough width now for a new item + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (likely(_dq_state_is_runnable(new_state))) { + uint64_t full_width = new_state; + if (_dq_state_has_pending_barrier(old_state)) { + full_width -= DISPATCH_QUEUE_PENDING_BARRIER; + full_width += DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } else { + full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } + if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) == + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + new_state = full_width; + new_state &= ~DISPATCH_QUEUE_DIRTY; + new_state ^= _dispatch_tid_self(); + } else if (_dq_state_is_dirty(old_state)) { + new_state |= DISPATCH_QUEUE_ENQUEUED; + } + } + }); + + if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { + return _dispatch_try_lock_transfer_or_wakeup(dq); + } + + if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { + _dispatch_retain(dq); + return dx_push(dq->do_targetq, dq, _dq_state_max_qos(new_state)); + } } + DISPATCH_ALWAYS_INLINE static inline void -_dispatch_sync_function_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +_dispatch_sync_function_invoke_inline(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { - struct dispatch_continuation_s dc = { - .dc_data = dq, - .dc_func = func, - .dc_ctxt = ctxt, - }; - _dispatch_sync_f(dq->do_targetq, &dc, _dispatch_sync_recurse_invoke, pp); + dispatch_thread_frame_s dtf; + _dispatch_thread_frame_push(&dtf, dq); + _dispatch_client_callout(ctxt, func); + _dispatch_perfmon_workitem_inc(); + _dispatch_thread_frame_pop(&dtf); } DISPATCH_NOINLINE static void -_dispatch_non_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt, +_dispatch_sync_function_invoke(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { _dispatch_sync_function_invoke_inline(dq, ctxt, func); - _dispatch_non_barrier_complete(dq); } DISPATCH_NOINLINE static void -_dispatch_non_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +_dispatch_sync_complete_recurse(dispatch_queue_t dq, dispatch_queue_t stop_dq, + uintptr_t dc_flags) { - _dispatch_sync_function_recurse(dq, ctxt, func, pp); - _dispatch_non_barrier_complete(dq); + bool barrier = (dc_flags & DISPATCH_OBJ_BARRIER_BIT); + do { + if (dq == stop_dq) return; + if (barrier) { + _dispatch_barrier_complete(dq); + } else { + _dispatch_non_barrier_complete(dq); + } + dq = dq->do_targetq; + barrier = (dq->dq_width == 1); + } while (unlikely(dq->do_targetq)); } -DISPATCH_ALWAYS_INLINE +DISPATCH_NOINLINE static void -_dispatch_non_barrier_sync_f_invoke_inline(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +_dispatch_sync_invoke_and_complete_recurse(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, uintptr_t dc_flags) { - _dispatch_introspection_non_barrier_sync_begin(dq, func); - if (slowpath(dq->do_targetq->do_targetq)) { - return _dispatch_non_barrier_sync_f_recurse(dq, ctxt, func, pp); - } - _dispatch_non_barrier_sync_f_invoke(dq, ctxt, func); + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + _dispatch_sync_complete_recurse(dq, NULL, dc_flags); } -#pragma mark - -#pragma mark dispatch_barrier_sync - DISPATCH_NOINLINE static void -_dispatch_barrier_complete(dispatch_queue_t dq) +_dispatch_sync_invoke_and_complete(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { - uint64_t owned = DISPATCH_QUEUE_IN_BARRIER + - dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - - if (slowpath(dq->dq_items_tail)) { - return _dispatch_try_lock_transfer_or_wakeup(dq); - } - - if (!fastpath(_dispatch_queue_drain_try_unlock(dq, owned))) { - // someone enqueued a slow item at the head - // looping may be its last chance - return _dispatch_try_lock_transfer_or_wakeup(dq); - } + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + _dispatch_non_barrier_complete(dq); } DISPATCH_NOINLINE static void -_dispatch_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +_dispatch_barrier_sync_invoke_and_complete(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { - _dispatch_sync_function_recurse(dq, ctxt, func, pp); - _dispatch_barrier_complete(dq); + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + _dispatch_barrier_complete_inline(dq); } +#pragma mark - +#pragma mark _dispatch_sync_wait / _dispatch_sync_waiter_wake + DISPATCH_NOINLINE static void -_dispatch_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) +_dispatch_sync_waiter_wake(OS_UNUSED dispatch_queue_t dq, + dispatch_sync_context_t dsc) { - _dispatch_sync_function_invoke_inline(dq, ctxt, func); - _dispatch_barrier_complete(dq); + if (dsc->dsc_override_qos > dsc->dsc_override_qos_floor) { + _dispatch_wqthread_override_start((mach_port_t)&dsc->dc_data, + dsc->dsc_override_qos); + } + _dispatch_thread_event_signal(&dsc->dsc_event); + _dispatch_introspection_queue_item_complete(dsc->_as_dc); } -DISPATCH_ALWAYS_INLINE +DISPATCH_NOINLINE static void -_dispatch_barrier_sync_f_invoke_inline(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +_dispatch_sync_waiter_redirect_or_wake(dispatch_queue_t dq, + dispatch_object_t dou) { - _dispatch_introspection_barrier_sync_begin(dq, func); - if (slowpath(dq->do_targetq->do_targetq)) { - return _dispatch_barrier_sync_f_recurse(dq, ctxt, func, pp); + dispatch_sync_context_t dsc = (dispatch_sync_context_t )dou._dc; + uint32_t tid = (uint32_t)(uintptr_t)dsc->dc_data; + + if (likely(dsc->dsc_override_qos)) { + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (dsc->dsc_override_qos < _dq_state_max_qos(dq_state)) { + dsc->dsc_override_qos = _dq_state_max_qos(dq_state); + } } - _dispatch_barrier_sync_f_invoke(dq, ctxt, func); -} + _dispatch_trace_continuation_pop(dq, dsc->_as_dc); -typedef struct dispatch_barrier_sync_context_s { - struct dispatch_continuation_s dbsc_dc; - dispatch_thread_frame_s dbsc_dtf; -} *dispatch_barrier_sync_context_t; + while (unlikely(dq->do_targetq->do_targetq)) { + dq = dq->do_targetq; + if (likely(dq->dq_width == 1)) { + dsc->dc_flags = DISPATCH_OBJ_BARRIER_BIT | + DISPATCH_OBJ_SYNC_WAITER_BIT; + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dq, tid))) { + _dispatch_introspection_queue_item_complete(dsc->_as_dc); + return _dispatch_queue_push_sync_waiter(dq, dsc); + } + } else { + dsc->dc_flags = DISPATCH_OBJ_SYNC_WAITER_BIT; + if (unlikely(!_dispatch_queue_try_reserve_sync_width(dq))) { + _dispatch_introspection_queue_item_complete(dsc->_as_dc); + return _dispatch_queue_push_sync_waiter(dq, dsc); + } + } + } + + return _dispatch_sync_waiter_wake(dq, dsc); +} +#if DISPATCH_COCOA_COMPAT static void -_dispatch_barrier_sync_f_slow_invoke(void *ctxt) +_dispatch_sync_thread_bound_invoke(void *ctxt) { - dispatch_barrier_sync_context_t dbsc = ctxt; - dispatch_continuation_t dc = &dbsc->dbsc_dc; - dispatch_queue_t dq = dc->dc_data; - dispatch_thread_event_t event = (dispatch_thread_event_t)dc->dc_other; + dispatch_sync_context_t dsc = ctxt; + dispatch_queue_t cq = _dispatch_queue_get_current(); + dispatch_queue_t orig_dq = dsc->dc_other; + dispatch_thread_frame_s dtf; + dispatch_assert(_dispatch_queue_is_thread_bound(cq)); - dispatch_assert(dq == _dispatch_queue_get_current()); -#if DISPATCH_COCOA_COMPAT - if (slowpath(_dispatch_queue_is_thread_bound(dq))) { - dispatch_assert(_dispatch_thread_frame_get_current() == NULL); + // the block runs on the thread the queue is bound to and not + // on the calling thread, but we mean to see the calling thread + // dispatch thread frames, so we fake the link, and then undo it + _dispatch_thread_frame_push_and_rebase(&dtf, orig_dq, &dsc->dsc_dtf); + _dispatch_client_callout(dsc->dsc_ctxt, dsc->dsc_func); + _dispatch_thread_frame_pop(&dtf); - // the block runs on the thread the queue is bound to and not - // on the calling thread, but we mean to see the calling thread - // dispatch thread frames, so we fake the link, and then undo it - _dispatch_thread_frame_set_current(&dbsc->dbsc_dtf); - // The queue is bound to a non-dispatch thread (e.g. main thread) - _dispatch_continuation_voucher_adopt(dc, DISPATCH_NO_VOUCHER, - DISPATCH_OBJ_CONSUME_BIT); - _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - os_atomic_store2o(dc, dc_func, NULL, release); - _dispatch_thread_frame_set_current(NULL); - } -#endif - _dispatch_thread_event_signal(event); // release + // communicate back to _dispatch_sync_wait who the thread bound queue + // was so that we skip it during _dispatch_sync_complete_recurse + dsc->dc_other = cq; + dsc->dsc_func = NULL; + _dispatch_thread_event_signal(&dsc->dsc_event); // release } +#endif DISPATCH_NOINLINE static void -_dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +_dispatch_sync_wait(dispatch_queue_t top_dq, void *ctxt, + dispatch_function_t func, uintptr_t top_dc_flags, + dispatch_queue_t dq, uintptr_t dc_flags) { - if (slowpath(!dq->do_targetq)) { - // see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE - return _dispatch_sync_function_invoke(dq, ctxt, func); - } + uint32_t tid = _dispatch_tid_self(); + dispatch_qos_t oq_floor = _dispatch_get_basepri_override_qos_floor(); + pthread_priority_t pp = _dispatch_get_priority(); - if (!pp) { - pp = _dispatch_get_priority(); - pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; - } - dispatch_thread_event_s event; - _dispatch_thread_event_init(&event); - struct dispatch_barrier_sync_context_s dbsc = { - .dbsc_dc = { - .dc_data = dq, -#if DISPATCH_COCOA_COMPAT - .dc_func = func, - .dc_ctxt = ctxt, -#endif - .dc_other = &event, - } + struct dispatch_sync_context_s dsc = { + .dc_flags = dc_flags | DISPATCH_OBJ_SYNC_WAITER_BIT, + .dc_data = (void *)(uintptr_t)tid, + .dc_other = top_dq, + .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG, + .dc_voucher = DISPATCH_NO_VOUCHER, + .dsc_func = func, + .dsc_ctxt = ctxt, + .dsc_override_qos_floor = oq_floor, + .dsc_override_qos = oq_floor, }; #if DISPATCH_COCOA_COMPAT // It's preferred to execute synchronous blocks on the current thread // due to thread-local side effects, etc. However, blocks submitted // to the main thread MUST be run on the main thread - if (slowpath(_dispatch_queue_is_thread_bound(dq))) { - // consumed by _dispatch_barrier_sync_f_slow_invoke - // or in the DISPATCH_COCOA_COMPAT hunk below - _dispatch_continuation_voucher_set(&dbsc.dbsc_dc, dq, 0); - // save frame linkage for _dispatch_barrier_sync_f_slow_invoke - _dispatch_thread_frame_save_state(&dbsc.dbsc_dtf); - // thread bound queues cannot mutate their target queue hierarchy - // so it's fine to look now - _dispatch_introspection_barrier_sync_begin(dq, func); - } -#endif - uint32_t th_self = _dispatch_tid_self(); - struct dispatch_continuation_s dbss = { - .dc_flags = DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT, - .dc_func = _dispatch_barrier_sync_f_slow_invoke, - .dc_ctxt = &dbsc, - .dc_data = (void*)(uintptr_t)th_self, - .dc_priority = pp, - .dc_other = &event, - .dc_voucher = DISPATCH_NO_VOUCHER, - }; + // + // Since we don't know whether that will happen, save the frame linkage + // for the sake of _dispatch_sync_thread_bound_invoke + _dispatch_thread_frame_save_state(&dsc.dsc_dtf); + + // Since the continuation doesn't have the CONSUME bit, the voucher will be + // retained on adoption on the thread bound queue if it happens so we can + // borrow this thread's reference + dsc.dc_voucher = _voucher_get(); + dsc.dc_func = _dispatch_sync_thread_bound_invoke; + dsc.dc_ctxt = &dsc; +#endif uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (unlikely(_dq_state_drain_locked_by(dq_state, th_self))) { - DISPATCH_CLIENT_CRASH(dq, "dispatch_barrier_sync called on queue " + if (unlikely(_dq_state_drain_locked_by(dq_state, tid))) { + DISPATCH_CLIENT_CRASH(dq, "dispatch_sync called on queue " "already owned by current thread"); } - _dispatch_continuation_push_sync_slow(dq, &dbss); - _dispatch_thread_event_wait(&event); // acquire - _dispatch_thread_event_destroy(&event); - if (_dispatch_queue_received_override(dq, pp)) { - // Ensure that the root queue sees that this thread was overridden. - // pairs with the _dispatch_wqthread_override_start in - // _dispatch_continuation_slow_item_signal - _dispatch_set_defaultpriority_override(); + _dispatch_thread_event_init(&dsc.dsc_event); + _dispatch_queue_push_sync_waiter(dq, &dsc); + _dispatch_thread_event_wait(&dsc.dsc_event); // acquire + _dispatch_thread_event_destroy(&dsc.dsc_event); + if (dsc.dsc_override_qos > dsc.dsc_override_qos_floor) { + // If we received an override from _dispatch_sync_waiter_wake(), + // ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(dsc.dsc_override_qos); } + _dispatch_introspection_sync_begin(top_dq); #if DISPATCH_COCOA_COMPAT - // Queue bound to a non-dispatch thread - if (dbsc.dbsc_dc.dc_func == NULL) { - return; - } else if (dbsc.dbsc_dc.dc_voucher) { - // this almost never happens, unless a dispatch_sync() onto a thread - // bound queue went to the slow path at the same time dispatch_main() - // is called, or the queue is detached from the runloop. - _voucher_release(dbsc.dbsc_dc.dc_voucher); + if (unlikely(dsc.dsc_func == NULL)) { + // Queue bound to a non-dispatch thread, the continuation already ran + // so just unlock all the things, except for the thread bound queue + dispatch_queue_t bound_dq = dsc.dc_other; + return _dispatch_sync_complete_recurse(top_dq, bound_dq, top_dc_flags); } #endif - - _dispatch_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); + _dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags); } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_barrier_sync_f2(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +DISPATCH_NOINLINE +static void +_dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, uintptr_t dc_flags) { - if (slowpath(!_dispatch_queue_try_acquire_barrier_sync(dq))) { - // global concurrent queues and queues bound to non-dispatch threads - // always fall into the slow case - return _dispatch_barrier_sync_f_slow(dq, ctxt, func, pp); + if (unlikely(!dq->do_targetq)) { + return _dispatch_sync_function_invoke(dq, ctxt, func); } - // - // TODO: the more correct thing to do would be to set dq_override to the qos - // of the thread that just acquired the barrier lock here. Unwinding that - // would slow down the uncontended fastpath however. - // - // The chosen tradeoff is that if an enqueue on a lower priority thread - // contends with this fastpath, this thread may receive a useless override. - // Improving this requires the override level to be part of the atomic - // dq_state - // - _dispatch_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); + _dispatch_sync_wait(dq, ctxt, func, dc_flags, dq, dc_flags); } +#pragma mark - +#pragma mark dispatch_sync / dispatch_barrier_sync + DISPATCH_NOINLINE static void -_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +_dispatch_sync_recurse(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, uintptr_t dc_flags) { - _dispatch_barrier_sync_f2(dq, ctxt, func, pp); + uint32_t tid = _dispatch_tid_self(); + dispatch_queue_t tq = dq->do_targetq; + + do { + if (likely(tq->dq_width == 1)) { + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(tq, tid))) { + return _dispatch_sync_wait(dq, ctxt, func, dc_flags, tq, + DISPATCH_OBJ_BARRIER_BIT); + } + } else { + if (unlikely(!_dispatch_queue_try_reserve_sync_width(tq))) { + return _dispatch_sync_wait(dq, ctxt, func, dc_flags, tq, 0); + } + } + tq = tq->do_targetq; + } while (unlikely(tq->do_targetq)); + + return _dispatch_sync_invoke_and_complete_recurse(dq, ctxt, func, dc_flags); } DISPATCH_NOINLINE @@ -3705,369 +3576,192 @@ void dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - _dispatch_barrier_sync_f2(dq, ctxt, func, 0); -} + uint32_t tid = _dispatch_tid_self(); -#ifdef __BLOCKS__ -DISPATCH_NOINLINE -static void -_dispatch_sync_block_with_private_data(dispatch_queue_t dq, - void (^work)(void), dispatch_block_flags_t flags) -{ - pthread_priority_t pp = _dispatch_block_get_priority(work); - - flags |= _dispatch_block_get_flags(work); - if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { - pthread_priority_t tp = _dispatch_get_priority(); - tp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (pp < tp) { - pp = tp | _PTHREAD_PRIORITY_ENFORCE_FLAG; - } else if (_dispatch_block_sync_should_enforce_qos_class(flags)) { - pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; - } - } - // balanced in d_block_sync_invoke or d_block_wait - if (os_atomic_cmpxchg2o(_dispatch_block_get_data(work), - dbpd_queue, NULL, dq, relaxed)) { - _dispatch_retain(dq); - } - if (flags & DISPATCH_BLOCK_BARRIER) { - _dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke, pp); - } else { - _dispatch_sync_f(dq, work, _dispatch_block_sync_invoke, pp); + // The more correct thing to do would be to merge the qos of the thread + // that just acquired the barrier lock into the queue state. + // + // However this is too expensive for the fastpath, so skip doing it. + // The chosen tradeoff is that if an enqueue on a lower priority thread + // contends with this fastpath, this thread may receive a useless override. + // + // Global concurrent queues and queues bound to non-dispatch threads + // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dq, tid))) { + return _dispatch_sync_f_slow(dq, ctxt, func, DISPATCH_OBJ_BARRIER_BIT); } -} -void -dispatch_barrier_sync(dispatch_queue_t dq, void (^work)(void)) -{ - if (slowpath(_dispatch_block_has_private_data(work))) { - dispatch_block_flags_t flags = DISPATCH_BLOCK_BARRIER; - return _dispatch_sync_block_with_private_data(dq, work, flags); + _dispatch_introspection_sync_begin(dq); + if (unlikely(dq->do_targetq->do_targetq)) { + return _dispatch_sync_recurse(dq, ctxt, func, DISPATCH_OBJ_BARRIER_BIT); } - dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work)); + _dispatch_barrier_sync_invoke_and_complete(dq, ctxt, func); } -#endif DISPATCH_NOINLINE void -_dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) +dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - // Use for mutation of queue-/source-internal state only, ignores target - // queue hierarchy! - if (!fastpath(_dispatch_queue_try_acquire_barrier_sync(dq))) { - return _dispatch_barrier_async_detached_f(dq, ctxt, func); + if (likely(dq->dq_width == 1)) { + return dispatch_barrier_sync_f(dq, ctxt, func); } - // skip the recursion because it's about the queue state only - _dispatch_barrier_sync_f_invoke(dq, ctxt, func); -} - -#pragma mark - -#pragma mark dispatch_sync - -DISPATCH_NOINLINE -static void -_dispatch_non_barrier_complete(dispatch_queue_t dq) -{ - uint64_t old_state, new_state; - - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - new_state = old_state - DISPATCH_QUEUE_WIDTH_INTERVAL; - if (_dq_state_is_runnable(new_state)) { - if (!_dq_state_is_runnable(old_state)) { - // we're making a FULL -> non FULL transition - new_state |= DISPATCH_QUEUE_DIRTY; - } - if (!_dq_state_drain_locked(new_state)) { - uint64_t full_width = new_state; - if (_dq_state_has_pending_barrier(new_state)) { - full_width -= DISPATCH_QUEUE_PENDING_BARRIER; - full_width += DISPATCH_QUEUE_WIDTH_INTERVAL; - full_width += DISPATCH_QUEUE_IN_BARRIER; - } else { - full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - full_width += DISPATCH_QUEUE_IN_BARRIER; - } - if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) == - DISPATCH_QUEUE_WIDTH_FULL_BIT) { - new_state = full_width; - new_state &= ~DISPATCH_QUEUE_DIRTY; - new_state |= _dispatch_tid_self(); - } - } - } - }); - if (_dq_state_is_in_barrier(new_state)) { - return _dispatch_try_lock_transfer_or_wakeup(dq); + // Global concurrent queues and queues bound to non-dispatch threads + // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE + if (unlikely(!_dispatch_queue_try_reserve_sync_width(dq))) { + return _dispatch_sync_f_slow(dq, ctxt, func, 0); } - if (!_dq_state_is_runnable(old_state)) { - _dispatch_queue_try_wakeup(dq, new_state, 0); + + _dispatch_introspection_sync_begin(dq); + if (unlikely(dq->do_targetq->do_targetq)) { + return _dispatch_sync_recurse(dq, ctxt, func, 0); } + _dispatch_sync_invoke_and_complete(dq, ctxt, func); } +#ifdef __BLOCKS__ DISPATCH_NOINLINE static void -_dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, - pthread_priority_t pp) -{ - dispatch_assert(dq->do_targetq); - if (!pp) { - pp = _dispatch_get_priority(); - pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; - } - dispatch_thread_event_s event; - _dispatch_thread_event_init(&event); - uint32_t th_self = _dispatch_tid_self(); - struct dispatch_continuation_s dc = { - .dc_flags = DISPATCH_OBJ_SYNC_SLOW_BIT, -#if DISPATCH_INTROSPECTION - .dc_func = func, - .dc_ctxt = ctxt, -#endif - .dc_data = (void*)(uintptr_t)th_self, - .dc_other = &event, - .dc_priority = pp, - .dc_voucher = DISPATCH_NO_VOUCHER, - }; - - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (unlikely(_dq_state_drain_locked_by(dq_state, th_self))) { - DISPATCH_CLIENT_CRASH(dq, "dispatch_sync called on queue " - "already owned by current thread"); - } - - _dispatch_continuation_push_sync_slow(dq, &dc); - _dispatch_thread_event_wait(&event); // acquire - _dispatch_thread_event_destroy(&event); - if (_dispatch_queue_received_override(dq, pp)) { - // Ensure that the root queue sees that this thread was overridden. - // pairs with the _dispatch_wqthread_override_start in - // _dispatch_continuation_slow_item_signal - _dispatch_set_defaultpriority_override(); - } - _dispatch_non_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_sync_f2(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, - pthread_priority_t pp) +_dispatch_sync_block_with_private_data(dispatch_queue_t dq, + dispatch_block_t work, dispatch_block_flags_t flags) { - // reserving non barrier width - // doesn't fail if only the ENQUEUED bit is set (unlike its barrier width - // equivalent), so we have to check that this thread hasn't enqueued - // anything ahead of this call or we can break ordering - if (slowpath(dq->dq_items_tail)) { - return _dispatch_sync_f_slow(dq, ctxt, func, pp); + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(work); + pthread_priority_t op = 0; + + flags |= dbpd->dbpd_flags; + if (_dispatch_block_invoke_should_set_priority(flags)) { + voucher_t v = DISPATCH_NO_VOUCHER; + op = _dispatch_get_priority(); + v = _dispatch_set_priority_and_voucher(dbpd->dbpd_priority, v, 0); + dispatch_assert(v == DISPATCH_NO_VOUCHER); } - // concurrent queues do not respect width on sync - if (slowpath(!_dispatch_queue_try_reserve_sync_width(dq))) { - return _dispatch_sync_f_slow(dq, ctxt, func, pp); + // balanced in d_block_sync_invoke or d_block_wait + if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq->_as_oq, relaxed)) { + _dispatch_retain(dq); } - _dispatch_non_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); -} - -DISPATCH_NOINLINE -static void -_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, - pthread_priority_t pp) -{ - if (DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width)) { - return _dispatch_sync_f2(dq, ctxt, func, pp); + if (flags & DISPATCH_BLOCK_BARRIER) { + dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke); + } else { + dispatch_sync_f(dq, work, _dispatch_block_sync_invoke); } - return _dispatch_barrier_sync_f(dq, ctxt, func, pp); + _dispatch_reset_priority_and_voucher(op, DISPATCH_NO_VOUCHER); } -DISPATCH_NOINLINE void -dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +dispatch_barrier_sync(dispatch_queue_t dq, dispatch_block_t work) { - if (DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width)) { - return _dispatch_sync_f2(dq, ctxt, func, 0); + if (unlikely(_dispatch_block_has_private_data(work))) { + dispatch_block_flags_t flags = DISPATCH_BLOCK_BARRIER; + return _dispatch_sync_block_with_private_data(dq, work, flags); } - return dispatch_barrier_sync_f(dq, ctxt, func); + dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work)); } -#ifdef __BLOCKS__ void -dispatch_sync(dispatch_queue_t dq, void (^work)(void)) +dispatch_sync(dispatch_queue_t dq, dispatch_block_t work) { - if (slowpath(_dispatch_block_has_private_data(work))) { + if (unlikely(_dispatch_block_has_private_data(work))) { return _dispatch_sync_block_with_private_data(dq, work, 0); } dispatch_sync_f(dq, work, _dispatch_Block_invoke(work)); } -#endif +#endif // __BLOCKS__ #pragma mark - #pragma mark dispatch_trysync -struct trysync_context { - dispatch_queue_t tc_dq; - void *tc_ctxt; - dispatch_function_t tc_func; -}; +// Use for mutation of queue-/source-internal state only +// ignores target queue hierarchy! +DISPATCH_NOINLINE +void +_dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + uint32_t tid = _dispatch_tid_self(); + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dq, tid))) { + return _dispatch_barrier_async_detached_f(dq, ctxt, func); + } + _dispatch_barrier_sync_invoke_and_complete(dq, ctxt, func); +} DISPATCH_NOINLINE -static int -_dispatch_trysync_recurse(dispatch_queue_t dq, - struct trysync_context *tc, bool barrier) +static long +_dispatch_trysync_recurse(dispatch_queue_t dq, void *ctxt, + dispatch_function_t f, uintptr_t dc_flags) { - dispatch_queue_t tq = dq->do_targetq; + uint32_t tid = _dispatch_tid_self(); + dispatch_queue_t q, tq = dq->do_targetq; - if (barrier) { - if (slowpath(!_dispatch_queue_try_acquire_barrier_sync(dq))) { - return EWOULDBLOCK; + for (;;) { + if (likely(tq->do_targetq == NULL)) { + _dispatch_sync_invoke_and_complete_recurse(dq, ctxt, f, dc_flags); + return true; } - } else { - // check nothing was queued by the current - // thread ahead of this call. _dispatch_queue_try_reserve_sync_width - // ignores the ENQUEUED bit which could cause it to miss a barrier_async - // made by the same thread just before. - if (slowpath(dq->dq_items_tail)) { - return EWOULDBLOCK; + if (unlikely(_dispatch_queue_cannot_trysync(tq))) { + for (q = dq; q != tq; q = q->do_targetq) { + _dispatch_queue_atomic_flags_set(q, DQF_CANNOT_TRYSYNC); + } + break; } - // concurrent queues do not respect width on sync - if (slowpath(!_dispatch_queue_try_reserve_sync_width(dq))) { - return EWOULDBLOCK; + if (likely(tq->dq_width == 1)) { + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(tq, tid))) { + break; + } + } else { + if (unlikely(!_dispatch_queue_try_reserve_sync_width(tq))) { + break; + } } + tq = tq->do_targetq; } - int rc = 0; - if (_dispatch_queue_cannot_trysync(tq)) { - _dispatch_queue_atomic_flags_set(dq, DQF_CANNOT_TRYSYNC); - rc = ENOTSUP; - } else if (tq->do_targetq) { - rc = _dispatch_trysync_recurse(tq, tc, tq->dq_width == 1); - if (rc == ENOTSUP) { - _dispatch_queue_atomic_flags_set(dq, DQF_CANNOT_TRYSYNC); - } - } else { - dispatch_thread_frame_s dtf; - _dispatch_thread_frame_push(&dtf, tq); - _dispatch_sync_function_invoke(tc->tc_dq, tc->tc_ctxt, tc->tc_func); - _dispatch_thread_frame_pop(&dtf); - } - if (barrier) { - _dispatch_barrier_complete(dq); - } else { - _dispatch_non_barrier_complete(dq); - } - return rc; + _dispatch_sync_complete_recurse(dq, tq, dc_flags); + return false; } DISPATCH_NOINLINE -bool +long _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t f) { - if (slowpath(!dq->do_targetq)) { - _dispatch_sync_function_invoke(dq, ctxt, f); - return true; + uint32_t tid = _dispatch_tid_self(); + if (unlikely(!dq->do_targetq)) { + DISPATCH_CLIENT_CRASH(dq, "_dispatch_trsync called on a root queue"); } - if (slowpath(_dispatch_queue_cannot_trysync(dq))) { + if (unlikely(_dispatch_queue_cannot_trysync(dq))) { return false; } - struct trysync_context tc = { - .tc_dq = dq, - .tc_func = f, - .tc_ctxt = ctxt, - }; - return _dispatch_trysync_recurse(dq, &tc, true) == 0; + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dq, tid))) { + return false; + } + return _dispatch_trysync_recurse(dq, ctxt, f, DISPATCH_OBJ_BARRIER_BIT); } DISPATCH_NOINLINE -bool +long _dispatch_trysync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t f) { - if (slowpath(!dq->do_targetq)) { - _dispatch_sync_function_invoke(dq, ctxt, f); - return true; - } - if (slowpath(_dispatch_queue_cannot_trysync(dq))) { - return false; + if (likely(dq->dq_width == 1)) { + return _dispatch_barrier_trysync_f(dq, ctxt, f); } - struct trysync_context tc = { - .tc_dq = dq, - .tc_func = f, - .tc_ctxt = ctxt, - }; - return _dispatch_trysync_recurse(dq, &tc, dq->dq_width == 1) == 0; -} - -#pragma mark - -#pragma mark dispatch_after - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_after(dispatch_time_t when, dispatch_queue_t queue, - void *ctxt, void *handler, bool block) -{ - dispatch_source_t ds; - uint64_t leeway, delta; - - if (when == DISPATCH_TIME_FOREVER) { -#if DISPATCH_DEBUG - DISPATCH_CLIENT_CRASH(0, "dispatch_after called with 'when' == infinity"); -#endif - return; + if (unlikely(!dq->do_targetq)) { + DISPATCH_CLIENT_CRASH(dq, "_dispatch_trsync called on a root queue"); } - - delta = _dispatch_timeout(when); - if (delta == 0) { - if (block) { - return dispatch_async(queue, handler); - } - return dispatch_async_f(queue, ctxt, handler); + if (unlikely(_dispatch_queue_cannot_trysync(dq))) { + return false; } - leeway = delta / 10; // - - if (leeway < NSEC_PER_MSEC) leeway = NSEC_PER_MSEC; - if (leeway > 60 * NSEC_PER_SEC) leeway = 60 * NSEC_PER_SEC; - - // this function can and should be optimized to not use a dispatch source - ds = dispatch_source_create(&_dispatch_source_type_after, 0, 0, queue); - dispatch_assert(ds); - - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - if (block) { - _dispatch_continuation_init(dc, ds, handler, 0, 0, 0); - } else { - _dispatch_continuation_init_f(dc, ds, ctxt, handler, 0, 0, 0); + if (unlikely(!_dispatch_queue_try_reserve_sync_width(dq))) { + return false; } - // reference `ds` so that it doesn't show up as a leak - dc->dc_data = ds; - _dispatch_source_set_event_handler_continuation(ds, dc); - dispatch_source_set_timer(ds, when, DISPATCH_TIME_FOREVER, leeway); - dispatch_activate(ds); -} - -DISPATCH_NOINLINE -void -dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, - dispatch_function_t func) -{ - _dispatch_after(when, queue, ctxt, func, false); -} - -#ifdef __BLOCKS__ -void -dispatch_after(dispatch_time_t when, dispatch_queue_t queue, - dispatch_block_t work) -{ - _dispatch_after(when, queue, NULL, work, true); + return _dispatch_trysync_recurse(dq, ctxt, f, 0); } -#endif #pragma mark - #pragma mark dispatch_queue_wakeup DISPATCH_NOINLINE void -_dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, +_dispatch_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags) { dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; @@ -4076,9 +3770,9 @@ _dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, target = DISPATCH_QUEUE_WAKEUP_TARGET; } if (target) { - return _dispatch_queue_class_wakeup(dq, pp, flags, target); - } else if (pp) { - return _dispatch_queue_class_override_drainer(dq, pp, flags); + return _dispatch_queue_class_wakeup(dq, qos, flags, target); + } else if (qos) { + return _dispatch_queue_class_override_drainer(dq, qos, flags); } else if (flags & DISPATCH_WAKEUP_CONSUME) { return _dispatch_release_tailcall(dq); } @@ -4128,24 +3822,27 @@ _dispatch_runloop_queue_set_handle(dispatch_queue_t dq, dispatch_runloop_handle_ #endif // DISPATCH_COCOA_COMPAT void -_dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, +_dispatch_runloop_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags) { #if DISPATCH_COCOA_COMPAT if (slowpath(_dispatch_queue_atomic_flags(dq) & DQF_RELEASED)) { // - return _dispatch_queue_wakeup(dq, pp, flags); + return _dispatch_queue_wakeup(dq, qos, flags); } + if (flags & DISPATCH_WAKEUP_FLUSH) { + os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); + } if (_dispatch_queue_class_probe(dq)) { - return _dispatch_runloop_queue_poke(dq, pp, flags); + return _dispatch_runloop_queue_poke(dq, qos, flags); } - pp = _dispatch_queue_reset_override_priority(dq, true); - if (pp) { + qos = _dispatch_queue_reset_max_qos(dq); + if (qos) { mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq); if (_dispatch_queue_class_probe(dq)) { - _dispatch_runloop_queue_poke(dq, pp, flags); + _dispatch_runloop_queue_poke(dq, qos, flags); } _dispatch_thread_override_end(owner, dq); return; @@ -4154,32 +3851,20 @@ _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, return _dispatch_release_tailcall(dq); } #else - return _dispatch_queue_wakeup(dq, pp, flags); + return _dispatch_queue_wakeup(dq, qos, flags); #endif } void -_dispatch_main_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, +_dispatch_main_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags) { #if DISPATCH_COCOA_COMPAT if (_dispatch_queue_is_thread_bound(dq)) { - return _dispatch_runloop_queue_wakeup(dq, pp, flags); + return _dispatch_runloop_queue_wakeup(dq, qos, flags); } #endif - return _dispatch_queue_wakeup(dq, pp, flags); -} - -void -_dispatch_root_queue_wakeup(dispatch_queue_t dq, - pthread_priority_t pp DISPATCH_UNUSED, - dispatch_wakeup_flags_t flags) -{ - if (flags & DISPATCH_WAKEUP_CONSUME) { - // see _dispatch_queue_push_set_head - dispatch_assert(flags & DISPATCH_WAKEUP_FLUSH); - } - _dispatch_global_queue_poke(dq); + return _dispatch_queue_wakeup(dq, qos, flags); } #pragma mark - @@ -4194,7 +3879,7 @@ _dispatch_runloop_queue_class_poke(dispatch_queue_t dq) return; } -#if TARGET_OS_MAC +#if HAVE_MACH mach_port_t mp = handle; kern_return_t kr = _dispatch_send_wakeup_runloop_thread(mp, 0); switch (kr) { @@ -4219,27 +3904,38 @@ _dispatch_runloop_queue_class_poke(dispatch_queue_t dq) DISPATCH_NOINLINE static void -_dispatch_runloop_queue_poke(dispatch_queue_t dq, - pthread_priority_t pp, dispatch_wakeup_flags_t flags) +_dispatch_runloop_queue_poke(dispatch_queue_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) { // it's not useful to handle WAKEUP_FLUSH because mach_msg() will have - // a release barrier and that when runloop queues stop being thread bound + // a release barrier and that when runloop queues stop being thread-bound // they have a non optional wake-up to start being a "normal" queue // either in _dispatch_runloop_queue_xref_dispose, // or in _dispatch_queue_cleanup2() for the main thread. + uint64_t old_state, new_state; if (dq == &_dispatch_main_q) { dispatch_once_f(&_dispatch_main_q_handle_pred, dq, _dispatch_runloop_queue_handle_init); } - _dispatch_queue_override_priority(dq, /* inout */ &pp, /* inout */ &flags); - if (flags & DISPATCH_WAKEUP_OVERRIDING) { - mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq); + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = _dq_state_merge_qos(old_state, qos); + if (old_state == new_state) { + os_atomic_rmw_loop_give_up(goto no_change); + } + }); + + dispatch_qos_t dq_qos = _dispatch_priority_qos(dq->dq_priority); + if (qos > dq_qos) { + mach_port_t owner = _dq_state_drain_owner(new_state); + pthread_priority_t pp = _dispatch_qos_to_pp(qos); _dispatch_thread_override_start(owner, pp, dq); - if (flags & DISPATCH_WAKEUP_WAS_OVERRIDDEN) { + if (_dq_state_max_qos(old_state) > dq_qos) { _dispatch_thread_override_end(owner, dq); } } +no_change: _dispatch_runloop_queue_class_poke(dq); if (flags & DISPATCH_WAKEUP_CONSUME) { return _dispatch_release_tailcall(dq); @@ -4253,8 +3949,9 @@ _dispatch_global_queue_poke_slow(dispatch_queue_t dq, unsigned int n) { dispatch_root_queue_context_t qc = dq->do_ctxt; uint32_t i = n; - int r; + int r = ENOSYS; + _dispatch_root_queues_init(); _dispatch_debug_root_queue(dq, __func__); #if HAVE_PTHREAD_WORKQUEUES #if DISPATCH_USE_PTHREAD_POOL @@ -4275,18 +3972,14 @@ _dispatch_global_queue_poke_slow(dispatch_queue_t dq, unsigned int n) return; } #endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK -#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP - if (!dq->dq_priority) { - r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority, - qc->dgq_wq_options, (int)i); - (void)dispatch_assume_zero(r); - return; - } -#endif #if HAVE_PTHREAD_WORKQUEUE_QOS - r = _pthread_workqueue_addthreads((int)i, dq->dq_priority); - (void)dispatch_assume_zero(r); + r = _pthread_workqueue_addthreads((int)i, + _dispatch_priority_to_pp(dq->dq_priority)); +#elif HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP + r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority, + qc->dgq_wq_options, (int)i); #endif + (void)dispatch_assume_zero(r); return; } #endif // HAVE_PTHREAD_WORKQUEUES @@ -4331,8 +4024,9 @@ _dispatch_global_queue_poke_slow(dispatch_queue_t dq, unsigned int n) #endif // DISPATCH_USE_PTHREAD_POOL } -static inline void -_dispatch_global_queue_poke_n(dispatch_queue_t dq, unsigned int n) +DISPATCH_NOINLINE +void +_dispatch_global_queue_poke(dispatch_queue_t dq, unsigned int n) { if (!_dispatch_queue_class_probe(dq)) { return; @@ -4349,38 +4043,95 @@ _dispatch_global_queue_poke_n(dispatch_queue_t dq, unsigned int n) return; } #endif // HAVE_PTHREAD_WORKQUEUES - return _dispatch_global_queue_poke_slow(dq, n); + return _dispatch_global_queue_poke_slow(dq, n); } -static inline void -_dispatch_global_queue_poke(dispatch_queue_t dq) +#pragma mark - +#pragma mark dispatch_queue_drain + +void +_dispatch_continuation_pop(dispatch_object_t dou, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, dispatch_queue_t dq) { - return _dispatch_global_queue_poke_n(dq, 1); + _dispatch_continuation_pop_inline(dou, dic, flags, dq); } -DISPATCH_NOINLINE void -_dispatch_queue_push_list_slow(dispatch_queue_t dq, unsigned int n) +_dispatch_continuation_invoke(dispatch_object_t dou, voucher_t ov, + dispatch_invoke_flags_t flags) { - return _dispatch_global_queue_poke_n(dq, n); + _dispatch_continuation_invoke_inline(dou, ov, flags); } -#pragma mark - -#pragma mark dispatch_queue_drain +DISPATCH_NOINLINE +static void +_dispatch_return_to_kernel(void) +{ + if (unlikely(_dispatch_get_wlh() == DISPATCH_WLH_GLOBAL)) { + _dispatch_clear_return_to_kernel(); + } else { + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + } +} -void -_dispatch_continuation_pop(dispatch_object_t dou, dispatch_queue_t dq, - dispatch_invoke_flags_t flags) +#if HAVE_PTHREAD_WORKQUEUE_NARROWING +static os_atomic(uint64_t) _dispatch_narrowing_deadlines[DISPATCH_QOS_MAX - 1]; + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_drain_init_narrowing_check_deadline(dispatch_invoke_context_t dic, + dispatch_priority_t pri) { - _dispatch_continuation_pop_inline(dou, dq, flags); + if (_dispatch_priority_qos(pri) && + !(pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)) { + dic->dic_next_narrow_check = _dispatch_approximate_time() + + DISPATCH_NARROW_CHECK_INTERVAL; + } } -void -_dispatch_continuation_invoke(dispatch_object_t dou, voucher_t override_voucher, - dispatch_invoke_flags_t flags) +DISPATCH_NOINLINE +static bool +_dispatch_queue_drain_should_narrow_slow(uint64_t now, + dispatch_invoke_context_t dic) +{ + if (dic->dic_next_narrow_check != DISPATCH_THREAD_IS_NARROWING) { + pthread_priority_t pp = _dispatch_get_priority(); + size_t idx = _dispatch_qos_from_pp(pp) - 1; + os_atomic(uint64_t) *deadline = &_dispatch_narrowing_deadlines[idx]; + uint64_t oldval, newval = now + DISPATCH_NARROW_CHECK_INTERVAL; + + dic->dic_next_narrow_check = newval; + os_atomic_rmw_loop(deadline, oldval, newval, relaxed, { + if (now < oldval) { + os_atomic_rmw_loop_give_up(return false); + } + }); + + if (!_pthread_workqueue_should_narrow(pp)) { + return false; + } + dic->dic_next_narrow_check = DISPATCH_THREAD_IS_NARROWING; + } + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_drain_should_narrow(dispatch_invoke_context_t dic) { - _dispatch_continuation_invoke_inline(dou, override_voucher, flags); + uint64_t next_check = dic->dic_next_narrow_check; + if (unlikely(next_check)) { + uint64_t now = _dispatch_approximate_time(); + if (unlikely(next_check < now)) { + return _dispatch_queue_drain_should_narrow_slow(now, dic); + } + } + return false; } +#else +#define _dispatch_queue_drain_init_narrowing_check_deadline(rq, dic) ((void)0) +#define _dispatch_queue_drain_should_narrow(dic) false +#endif /* * Drain comes in 2 flavours (serial/concurrent) and 2 modes @@ -4410,86 +4161,106 @@ _dispatch_continuation_invoke(dispatch_object_t dou, voucher_t override_voucher, * queue drain moves to the more efficient serial mode. */ DISPATCH_ALWAYS_INLINE -static dispatch_queue_t -_dispatch_queue_drain(dispatch_queue_t dq, dispatch_invoke_flags_t flags, - uint64_t *owned_ptr, struct dispatch_object_s **dc_out, - bool serial_drain) +static dispatch_queue_wakeup_target_t +_dispatch_queue_drain(dispatch_queue_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned_ptr, bool serial_drain) { dispatch_queue_t orig_tq = dq->do_targetq; dispatch_thread_frame_s dtf; struct dispatch_object_s *dc = NULL, *next_dc; - uint64_t owned = *owned_ptr; + uint64_t dq_state, owned = *owned_ptr; + + if (unlikely(!dq->dq_items_tail)) return NULL; _dispatch_thread_frame_push(&dtf, dq); - if (_dq_state_is_in_barrier(owned)) { + if (serial_drain || _dq_state_is_in_barrier(owned)) { // we really own `IN_BARRIER + dq->dq_width * WIDTH_INTERVAL` // but width can change while draining barrier work items, so we only // convert to `dq->dq_width * WIDTH_INTERVAL` when we drop `IN_BARRIER` owned = DISPATCH_QUEUE_IN_BARRIER; } - while (dq->dq_items_tail) { - dc = _dispatch_queue_head(dq); - do { - if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(dq))) { - goto out; + dc = _dispatch_queue_head(dq); + goto first_iteration; + + for (;;) { + dc = next_dc; + if (unlikely(dic->dic_deferred)) { + goto out_with_deferred_compute_owned; + } + if (unlikely(!dc)) { + if (!dq->dq_items_tail) { + break; } - if (unlikely(orig_tq != dq->do_targetq)) { - goto out; + dc = _dispatch_queue_head(dq); + } + if (unlikely(serial_drain != (dq->dq_width == 1))) { + break; + } + if (unlikely(_dispatch_queue_drain_should_narrow(dic))) { + break; + } + if (unlikely(_dispatch_needs_to_return_to_kernel())) { + _dispatch_return_to_kernel(); + } + +first_iteration: + dq_state = os_atomic_load(&dq->dq_state, relaxed); + if (unlikely(_dq_state_is_suspended(dq_state))) { + break; + } + if (unlikely(orig_tq != dq->do_targetq)) { + break; + } + + if (serial_drain || _dispatch_object_is_barrier(dc)) { + if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) { + if (!_dispatch_queue_try_upgrade_full_width(dq, owned)) { + goto out_with_no_width; + } + owned = DISPATCH_QUEUE_IN_BARRIER; } - if (unlikely(serial_drain != (dq->dq_width == 1))) { - goto out; + next_dc = _dispatch_queue_next(dq, dc); + if (_dispatch_object_is_sync_waiter(dc)) { + owned = 0; + dic->dic_deferred = dc; + goto out_with_deferred; } - if (serial_drain || _dispatch_object_is_barrier(dc)) { - if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) { - goto out; - } - next_dc = _dispatch_queue_next(dq, dc); - if (_dispatch_object_is_slow_item(dc)) { - owned = 0; - goto out_with_deferred; - } - } else { - if (owned == DISPATCH_QUEUE_IN_BARRIER) { - // we just ran barrier work items, we have to make their - // effect visible to other sync work items on other threads - // that may start coming in after this point, hence the - // release barrier - os_atomic_and2o(dq, dq_state, ~owned, release); - owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - } else if (unlikely(owned == 0)) { - if (_dispatch_object_is_slow_item(dc)) { - // sync "readers" don't observe the limit - _dispatch_queue_reserve_sync_width(dq); - } else if (!_dispatch_queue_try_acquire_async(dq)) { - goto out_with_no_width; - } - owned = DISPATCH_QUEUE_WIDTH_INTERVAL; - } - - next_dc = _dispatch_queue_next(dq, dc); - if (_dispatch_object_is_slow_item(dc)) { - owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; - _dispatch_continuation_slow_item_signal(dq, dc); - continue; + } else { + if (owned == DISPATCH_QUEUE_IN_BARRIER) { + // we just ran barrier work items, we have to make their + // effect visible to other sync work items on other threads + // that may start coming in after this point, hence the + // release barrier + os_atomic_xor2o(dq, dq_state, owned, release); + owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + } else if (unlikely(owned == 0)) { + if (_dispatch_object_is_sync_waiter(dc)) { + // sync "readers" don't observe the limit + _dispatch_queue_reserve_sync_width(dq); + } else if (!_dispatch_queue_try_acquire_async(dq)) { + goto out_with_no_width; } + owned = DISPATCH_QUEUE_WIDTH_INTERVAL; + } - if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) { - owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; - _dispatch_continuation_redirect(dq, dc); - continue; - } + next_dc = _dispatch_queue_next(dq, dc); + if (_dispatch_object_is_sync_waiter(dc)) { + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + _dispatch_sync_waiter_redirect_or_wake(dq, dc); + continue; } - _dispatch_continuation_pop_inline(dc, dq, flags); - _dispatch_perfmon_workitem_inc(); - if (unlikely(dtf.dtf_deferred)) { - goto out_with_deferred_compute_owned; + if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) { + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + _dispatch_continuation_redirect(dq, dc); + continue; } - } while ((dc = next_dc)); + } + + _dispatch_continuation_pop_inline(dc, dic, flags, dq); } -out: if (owned == DISPATCH_QUEUE_IN_BARRIER) { // if we're IN_BARRIER we really own the full width too owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; @@ -4504,7 +4275,7 @@ _dispatch_queue_drain(dispatch_queue_t dq, dispatch_invoke_flags_t flags, out_with_no_width: *owned_ptr = 0; _dispatch_thread_frame_pop(&dtf); - return NULL; + return DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; out_with_deferred_compute_owned: if (serial_drain) { @@ -4514,41 +4285,74 @@ _dispatch_queue_drain(dispatch_queue_t dq, dispatch_invoke_flags_t flags, // if we're IN_BARRIER we really own the full width too owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; } - if (next_dc) { - owned = _dispatch_queue_adjust_owned(dq, owned, next_dc); + if (dc) { + owned = _dispatch_queue_adjust_owned(dq, owned, dc); } } out_with_deferred: *owned_ptr = owned; - if (unlikely(!dc_out)) { + if (unlikely(flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS)) { DISPATCH_INTERNAL_CRASH(dc, "Deferred continuation on source, mach channel or mgr"); } - *dc_out = dc; _dispatch_thread_frame_pop(&dtf); return dq->do_targetq; } DISPATCH_NOINLINE -static dispatch_queue_t +static dispatch_queue_wakeup_target_t _dispatch_queue_concurrent_drain(dispatch_queue_t dq, - dispatch_invoke_flags_t flags, uint64_t *owned, - struct dispatch_object_s **dc_ptr) + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t *owned) { - return _dispatch_queue_drain(dq, flags, owned, dc_ptr, false); + return _dispatch_queue_drain(dq, dic, flags, owned, false); } DISPATCH_NOINLINE -dispatch_queue_t -_dispatch_queue_serial_drain(dispatch_queue_t dq, - dispatch_invoke_flags_t flags, uint64_t *owned, - struct dispatch_object_s **dc_ptr) +dispatch_queue_wakeup_target_t +_dispatch_queue_serial_drain(dispatch_queue_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned) { flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; - return _dispatch_queue_drain(dq, flags, owned, dc_ptr, true); + return _dispatch_queue_drain(dq, dic, flags, owned, true); } #if DISPATCH_COCOA_COMPAT +DISPATCH_NOINLINE +static void +_dispatch_main_queue_update_priority_from_thread(void) +{ + dispatch_queue_t dq = &_dispatch_main_q; + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + mach_port_t owner = _dq_state_drain_owner(dq_state); + + dispatch_priority_t main_pri = + _dispatch_priority_from_pp_strip_flags(_dispatch_get_priority()); + dispatch_qos_t main_qos = _dispatch_priority_qos(main_pri); + dispatch_qos_t max_qos = _dq_state_max_qos(dq_state); + dispatch_qos_t old_qos = _dispatch_priority_qos(dq->dq_priority); + + // the main thread QoS was adjusted by someone else, learn the new QoS + // and reinitialize _dispatch_main_q.dq_priority + dq->dq_priority = _dispatch_priority_with_override_qos(main_pri, main_qos); + + if (old_qos < max_qos && main_qos == DISPATCH_QOS_UNSPECIFIED) { + // main thread is opted out of QoS and we had an override + return _dispatch_thread_override_end(owner, dq); + } + + if (old_qos < max_qos && max_qos <= main_qos) { + // main QoS was raised, and we had an override which is now useless + return _dispatch_thread_override_end(owner, dq); + } + + if (main_qos < max_qos && max_qos <= old_qos) { + // main thread QoS was lowered, and we actually need an override + pthread_priority_t pp = _dispatch_qos_to_pp(max_qos); + return _dispatch_thread_override_start(owner, pp, dq); + } +} + static void _dispatch_main_queue_drain(void) { @@ -4559,6 +4363,7 @@ _dispatch_main_queue_drain(void) return; } + _dispatch_perfmon_start_notrace(); if (!fastpath(_dispatch_queue_is_thread_bound(dq))) { DISPATCH_CLIENT_CRASH(0, "_dispatch_main_queue_callback_4CF called" " after dispatch_main()"); @@ -4572,32 +4377,41 @@ _dispatch_main_queue_drain(void) dispatch_once_f(&_dispatch_main_q_handle_pred, dq, _dispatch_runloop_queue_handle_init); - _dispatch_perfmon_start(); // hide the frame chaining when CFRunLoop // drains the main runloop, as this should not be observable that way + _dispatch_set_wlh(dq->dq_wlh); _dispatch_thread_frame_push_and_rebase(&dtf, dq, NULL); - pthread_priority_t old_pri = _dispatch_get_priority(); - pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri, NULL); + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_priority_t pri = _dispatch_priority_from_pp(pp); + dispatch_qos_t qos = _dispatch_priority_qos(pri); voucher_t voucher = _voucher_copy(); + if (unlikely(qos != _dispatch_priority_qos(dq->dq_priority))) { + _dispatch_main_queue_update_priority_from_thread(); + } + dispatch_priority_t old_dbp = _dispatch_set_basepri(pri); + _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); + + dispatch_invoke_context_s dic = { }; struct dispatch_object_s *dc, *next_dc, *tail; dc = os_mpsc_capture_snapshot(dq, dq_items, &tail); do { next_dc = os_mpsc_pop_snapshot_head(dc, tail, do_next); - _dispatch_continuation_pop_inline(dc, dq, DISPATCH_INVOKE_NONE); - _dispatch_perfmon_workitem_inc(); + _dispatch_continuation_pop_inline(dc, &dic, DISPATCH_INVOKE_NONE, dq); } while ((dc = next_dc)); // runloop based queues use their port for the queue PUBLISH pattern // so this raw call to dx_wakeup(0) is valid dx_wakeup(dq, 0, 0); _dispatch_voucher_debug("main queue restore", voucher); - _dispatch_reset_defaultpriority(old_dp); - _dispatch_reset_priority_and_voucher(old_pri, voucher); + _dispatch_reset_basepri(old_dbp); + _dispatch_reset_basepri_override(); + _dispatch_reset_priority_and_voucher(pp, voucher); _dispatch_thread_frame_pop(&dtf); - _dispatch_perfmon_end(); + _dispatch_reset_wlh(); _dispatch_force_cache_cleanup(); + _dispatch_perfmon_end_notrace(); } static bool @@ -4606,18 +4420,21 @@ _dispatch_runloop_queue_drain_one(dispatch_queue_t dq) if (!dq->dq_items_tail) { return false; } + _dispatch_perfmon_start_notrace(); dispatch_thread_frame_s dtf; - _dispatch_perfmon_start(); + _dispatch_set_wlh(dq->dq_wlh); _dispatch_thread_frame_push(&dtf, dq); - pthread_priority_t old_pri = _dispatch_get_priority(); - pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri, NULL); + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_priority_t pri = _dispatch_priority_from_pp(pp); voucher_t voucher = _voucher_copy(); + dispatch_priority_t old_dbp = _dispatch_set_basepri(pri); + _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); + dispatch_invoke_context_s dic = { }; struct dispatch_object_s *dc, *next_dc; dc = _dispatch_queue_head(dq); next_dc = _dispatch_queue_next(dq, dc); - _dispatch_continuation_pop_inline(dc, dq, DISPATCH_INVOKE_NONE); - _dispatch_perfmon_workitem_inc(); + _dispatch_continuation_pop_inline(dc, &dic, DISPATCH_INVOKE_NONE, dq); if (!next_dc) { // runloop based queues use their port for the queue PUBLISH pattern @@ -4626,11 +4443,13 @@ _dispatch_runloop_queue_drain_one(dispatch_queue_t dq) } _dispatch_voucher_debug("runloop queue restore", voucher); - _dispatch_reset_defaultpriority(old_dp); - _dispatch_reset_priority_and_voucher(old_pri, voucher); + _dispatch_reset_basepri(old_dbp); + _dispatch_reset_basepri_override(); + _dispatch_reset_priority_and_voucher(pp, voucher); _dispatch_thread_frame_pop(&dtf); - _dispatch_perfmon_end(); + _dispatch_reset_wlh(); _dispatch_force_cache_cleanup(); + _dispatch_perfmon_end_notrace(); return next_dc; } #endif @@ -4639,19 +4458,19 @@ DISPATCH_NOINLINE void _dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq) { - dispatch_continuation_t dc_tmp, dc_start, dc_end; + dispatch_continuation_t dc_tmp, dc_start = NULL, dc_end = NULL; struct dispatch_object_s *dc = NULL; - uint64_t dq_state, owned; + uint64_t owned; size_t count = 0; owned = DISPATCH_QUEUE_IN_BARRIER; owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; attempt_running_slow_head: - if (slowpath(dq->dq_items_tail) && !DISPATCH_QUEUE_IS_SUSPENDED(dq)) { + if (dq->dq_items_tail && !DISPATCH_QUEUE_IS_SUSPENDED(dq)) { dc = _dispatch_queue_head(dq); - if (!_dispatch_object_is_slow_item(dc)) { + if (!_dispatch_object_is_sync_waiter(dc)) { // not a slow item, needs to wake up - } else if (fastpath(dq->dq_width == 1) || + } else if (likely(dq->dq_width == 1) || _dispatch_object_is_barrier(dc)) { // rdar://problem/8290662 "barrier/writer lock transfer" dc_start = dc_end = (dispatch_continuation_t)dc; @@ -4660,7 +4479,7 @@ _dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq) dc = _dispatch_queue_next(dq, dc); } else { // "reader lock transfer" - // we must not signal semaphores immediately because our right + // we must not wake waiters immediately because our right // for dequeuing is granted through holding the full "barrier" width // which a signaled work item could relinquish out from our feet dc_start = (dispatch_continuation_t)dc; @@ -4672,16 +4491,15 @@ _dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq) owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; count++; dc = _dispatch_queue_next(dq, dc); - } while (dc && _dispatch_object_is_slow_non_barrier(dc)); + } while (dc && _dispatch_object_is_sync_waiter_non_barrier(dc)); } if (count) { _dispatch_queue_drain_transfer_lock(dq, owned, dc_start); do { - // signaled job will release the continuation dc_tmp = dc_start; dc_start = dc_start->do_next; - _dispatch_continuation_slow_item_signal(dq, dc_tmp); + _dispatch_sync_waiter_redirect_or_wake(dq, dc_tmp); } while (dc_tmp != dc_end); return; } @@ -4694,9 +4512,9 @@ _dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq) // ds_pending_data or the wakeup logic, but lock transfer is useless // for sources and mach channels in the first place. owned = _dispatch_queue_adjust_owned(dq, owned, dc); - dq_state = _dispatch_queue_drain_unlock(dq, owned, NULL); - return _dispatch_queue_try_wakeup(dq, dq_state, 0); - } else if (!fastpath(_dispatch_queue_drain_try_unlock(dq, owned))) { + _dispatch_queue_drain_unlock(dq, owned); + return dx_wakeup(dq, 0, DISPATCH_WAKEUP_WAITER_HANDOFF); + } else if (unlikely(!_dispatch_queue_drain_try_unlock(dq, owned, true))) { // someone enqueued a slow item at the head // looping may be its last chance goto attempt_running_slow_head; @@ -4707,18 +4525,20 @@ void _dispatch_mgr_queue_drain(void) { const dispatch_invoke_flags_t flags = DISPATCH_INVOKE_MANAGER_DRAIN; + dispatch_invoke_context_s dic = { }; dispatch_queue_t dq = &_dispatch_mgr_q; uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; if (dq->dq_items_tail) { _dispatch_perfmon_start(); - if (slowpath(_dispatch_queue_serial_drain(dq, flags, &owned, NULL))) { + _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); + if (slowpath(_dispatch_queue_serial_drain(dq, &dic, flags, &owned))) { DISPATCH_INTERNAL_CRASH(0, "Interrupted drain on manager queue"); } _dispatch_voucher_debug("mgr queue clear", NULL); _voucher_clear(); - _dispatch_reset_defaultpriority_override(); - _dispatch_perfmon_end(); + _dispatch_reset_basepri_override(); + _dispatch_perfmon_end(perfmon_thread_manager); } #if DISPATCH_USE_KEVENT_WORKQUEUE @@ -4734,13 +4554,15 @@ _dispatch_mgr_queue_drain(void) void _dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq, - dispatch_invoke_flags_t flags, uint64_t to_unlock, - struct dispatch_object_s *dc) + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t to_unlock) { - if (_dispatch_object_is_slow_item(dc)) { + struct dispatch_object_s *dc = dic->dic_deferred; + if (_dispatch_object_is_sync_waiter(dc)) { dispatch_assert(to_unlock == 0); + dic->dic_deferred = NULL; _dispatch_queue_drain_transfer_lock(dq, to_unlock, dc); - _dispatch_continuation_slow_item_signal(dq, dc); + _dispatch_sync_waiter_redirect_or_wake(dq, dc); return _dispatch_release_tailcall(dq); } @@ -4748,7 +4570,6 @@ _dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq, uint64_t old_state, new_state; if (_dispatch_get_current_queue()->do_targetq) { - _dispatch_thread_frame_get_current()->dtf_deferred = dc; should_defer_again = true; should_pend_queue = false; } @@ -4776,8 +4597,8 @@ _dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq, _dispatch_try_lock_transfer_or_wakeup(dq); _dispatch_release(dq); } else if (to_unlock) { - uint64_t dq_state = _dispatch_queue_drain_unlock(dq, to_unlock, NULL); - _dispatch_queue_try_wakeup(dq, dq_state, DISPATCH_WAKEUP_CONSUME); + _dispatch_queue_drain_unlock(dq, to_unlock); + dx_wakeup(dq, 0, DISPATCH_WAKEUP_CONSUME); } else { _dispatch_release(dq); } @@ -4785,7 +4606,8 @@ _dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq, } if (!should_defer_again) { - dx_invoke(dc, flags & _DISPATCH_INVOKE_PROPAGATE_MASK); + dic->dic_deferred = NULL; + return dx_invoke(dc, dic, flags & _DISPATCH_INVOKE_PROPAGATE_MASK); } if (dq) { @@ -4796,17 +4618,18 @@ _dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq, _dq_state_drain_owner(old_state) != self) { os_atomic_rmw_loop_give_up({ // We may have been overridden, so inform the root queue - _dispatch_set_defaultpriority_override(); + _dispatch_set_basepri_override_qos( + _dq_state_max_qos(old_state)); return _dispatch_release_tailcall(dq); }); } - new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state); + new_state = DISPATCH_QUEUE_DRAIN_UNLOCK(new_state); }); - if (_dq_state_has_override(old_state)) { + if (_dq_state_received_override(old_state)) { // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); } - return dx_invoke(dq, flags | DISPATCH_INVOKE_STEALING); + return dx_invoke(dq, dic, flags | DISPATCH_INVOKE_STEALING); } } @@ -4816,19 +4639,16 @@ _dispatch_queue_finalize_activation(dispatch_queue_t dq) dispatch_queue_t tq = dq->do_targetq; _dispatch_queue_priority_inherit_from_target(dq, tq); _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); - if (dq->dq_override_voucher == DISPATCH_NO_VOUCHER) { - voucher_t v = tq->dq_override_voucher; - if (v != DISPATCH_NO_VOUCHER) { - if (v) _voucher_retain(v); - dq->dq_override_voucher = v; - } + if (!dq->dq_wlh) { + dispatch_wlh_t wlh = _dispatch_queue_class_compute_wlh(dq); + if (wlh) _dispatch_queue_class_record_wlh_hierarchy(dq, wlh); } } DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t -dispatch_queue_invoke2(dispatch_queue_t dq, dispatch_invoke_flags_t flags, - uint64_t *owned, struct dispatch_object_s **dc_ptr) +static inline dispatch_queue_wakeup_target_t +dispatch_queue_invoke2(dispatch_queue_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned) { dispatch_queue_t otq = dq->do_targetq; dispatch_queue_t cq = _dispatch_queue_get_current(); @@ -4837,18 +4657,19 @@ dispatch_queue_invoke2(dispatch_queue_t dq, dispatch_invoke_flags_t flags, return otq; } if (dq->dq_width == 1) { - return _dispatch_queue_serial_drain(dq, flags, owned, dc_ptr); + return _dispatch_queue_serial_drain(dq, dic, flags, owned); } - return _dispatch_queue_concurrent_drain(dq, flags, owned, dc_ptr); + return _dispatch_queue_concurrent_drain(dq, dic, flags, owned); } // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol DISPATCH_NOINLINE void -_dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags) +_dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags) { - _dispatch_queue_class_invoke(dq, flags, dispatch_queue_invoke2); + _dispatch_queue_class_invoke(dq, dic, flags, dispatch_queue_invoke2); } #pragma mark - @@ -4857,15 +4678,16 @@ _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags) #if HAVE_PTHREAD_WORKQUEUE_QOS void _dispatch_queue_override_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags) + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) { dispatch_queue_t old_rq = _dispatch_queue_get_current(); dispatch_queue_t assumed_rq = dc->dc_other; + dispatch_priority_t old_dp; voucher_t ov = DISPATCH_NO_VOUCHER; dispatch_object_t dou; dou._do = dc->dc_data; - _dispatch_queue_set_current(assumed_rq); + old_dp = _dispatch_root_queue_identity_assume(assumed_rq); flags |= DISPATCH_INVOKE_OVERRIDING; if (dc_type(dc) == DISPATCH_CONTINUATION_TYPE(OVERRIDE_STEALING)) { flags |= DISPATCH_INVOKE_STEALING; @@ -4876,49 +4698,57 @@ _dispatch_queue_override_invoke(dispatch_continuation_t dc, } _dispatch_continuation_pop_forwarded(dc, ov, DISPATCH_OBJ_CONSUME_BIT, { if (_dispatch_object_has_vtable(dou._do)) { - dx_invoke(dou._do, flags); + dx_invoke(dou._do, dic, flags); } else { _dispatch_continuation_invoke_inline(dou, ov, flags); } }); + _dispatch_reset_basepri(old_dp); _dispatch_queue_set_current(old_rq); } +#if DISPATCH_USE_KEVENT_WORKQUEUE +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_qos_root_queue_push_wlh(dispatch_queue_t rq, dispatch_qos_t qos) +{ + // for root queues, the override is the guaranteed minimum override level + if (qos > _dispatch_priority_override_qos(rq->dq_priority)) { + return qos; + } + return _dispatch_priority_qos(rq->dq_priority); +} +#endif // DISPATCH_USE_KEVENT_WORKQUEUE + DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_need_global_root_queue_push_override(dispatch_queue_t rq, - pthread_priority_t pp) +_dispatch_root_queue_push_needs_override(dispatch_queue_t rq, + dispatch_qos_t qos) { - pthread_priority_t rqp = rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - bool defaultqueue = rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + dispatch_qos_t rqos = _dispatch_priority_qos(rq->dq_priority); + bool defaultqueue = rq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE; - if (unlikely(!rqp)) return false; + if (unlikely(!rqos)) return false; - pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - return defaultqueue ? pp && pp != rqp : pp > rqp; + return defaultqueue ? qos && qos != rqos : qos > rqos; } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_need_global_root_queue_push_override_stealer(dispatch_queue_t rq, - pthread_priority_t pp) +_dispatch_root_queue_push_queue_override_needed(dispatch_queue_t rq, + dispatch_qos_t qos) { - pthread_priority_t rqp = rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - bool defaultqueue = rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; - - if (unlikely(!rqp)) return false; - - pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - return defaultqueue || pp > rqp; + // for root queues, the override is the guaranteed minimum override level + return qos > _dispatch_priority_override_qos(rq->dq_priority); } DISPATCH_NOINLINE static void _dispatch_root_queue_push_override(dispatch_queue_t orig_rq, - dispatch_object_t dou, pthread_priority_t pp) + dispatch_object_t dou, dispatch_qos_t qos) { - bool overcommit = orig_rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; - dispatch_queue_t rq = _dispatch_get_root_queue_for_priority(pp, overcommit); + bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + dispatch_queue_t rq = _dispatch_get_root_queue(qos, overcommit); dispatch_continuation_t dc = dou._dc; if (_dispatch_object_is_redirection(dc)) { @@ -4936,18 +4766,16 @@ _dispatch_root_queue_push_override(dispatch_queue_t orig_rq, dc->dc_priority = DISPATCH_NO_PRIORITY; dc->dc_voucher = DISPATCH_NO_VOUCHER; } - - DISPATCH_COMPILER_CAN_ASSUME(dx_type(rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); - _dispatch_queue_push_inline(rq, dc, 0, 0); + _dispatch_root_queue_push_inline(rq, dc, dc, 1); } DISPATCH_NOINLINE static void _dispatch_root_queue_push_override_stealer(dispatch_queue_t orig_rq, - dispatch_queue_t dq, pthread_priority_t pp) + dispatch_queue_t dq, dispatch_qos_t qos) { - bool overcommit = orig_rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; - dispatch_queue_t rq = _dispatch_get_root_queue_for_priority(pp, overcommit); + bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + dispatch_queue_t rq = _dispatch_get_root_queue(qos, overcommit); dispatch_continuation_t dc = _dispatch_continuation_alloc(); dc->do_vtable = DC_VTABLE(OVERRIDE_STEALING); @@ -4958,26 +4786,28 @@ _dispatch_root_queue_push_override_stealer(dispatch_queue_t orig_rq, dc->dc_data = dq; dc->dc_priority = DISPATCH_NO_PRIORITY; dc->dc_voucher = DISPATCH_NO_VOUCHER; - - DISPATCH_COMPILER_CAN_ASSUME(dx_type(rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); - _dispatch_queue_push_inline(rq, dc, 0, 0); + _dispatch_root_queue_push_inline(rq, dc, dc, 1); } DISPATCH_NOINLINE -static void +void _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, - pthread_priority_t pp, dispatch_wakeup_flags_t flags, uint64_t dq_state) + dispatch_qos_t qos, dispatch_wakeup_flags_t flags, uint64_t dq_state) { mach_port_t owner = _dq_state_drain_owner(dq_state); - pthread_priority_t pp2; dispatch_queue_t tq; + dispatch_qos_t oqos; bool locked; + if (_dq_state_is_suspended(dq_state)) { + goto out; + } + if (owner) { - int rc = _dispatch_wqthread_override_start_check_owner(owner, pp, + int rc = _dispatch_wqthread_override_start_check_owner(owner, qos, &dq->dq_state_lock); // EPERM means the target of the override is not a work queue thread - // and could be a thread bound queue such as the main queue. + // and could be a thread-bound queue such as the main queue. // When that happens we must get to that queue and wake it up if we // want the override to be appplied and take effect. if (rc != EPERM) { @@ -4985,20 +4815,16 @@ _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, } } - if (_dq_state_is_suspended(dq_state)) { - goto out; - } - tq = dq->do_targetq; - if (_dispatch_queue_has_immutable_target(dq)) { + if (likely(!_dispatch_queue_is_legacy(dq))) { locked = false; } else if (_dispatch_is_in_root_queues_array(tq)) { // avoid locking when we recognize the target queue as a global root // queue it is gross, but is a very common case. The locking isn't // needed because these target queues cannot go away. locked = false; - } else if (_dispatch_queue_sidelock_trylock(dq, pp)) { + } else if (_dispatch_queue_sidelock_trylock(dq, qos)) { // to traverse the tq chain safely we must // lock it to ensure it cannot change locked = true; @@ -5008,10 +4834,9 @@ _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, // // Leading to being there, the current thread has: // 1. enqueued an object on `dq` - // 2. raised the dq_override value of `dq` - // 3. set the HAS_OVERRIDE bit and not seen an owner - // 4. tried and failed to acquire the side lock - // + // 2. raised the max_qos value, set RECEIVED_OVERRIDE on `dq` + // and didn't see an owner + // 3. tried and failed to acquire the side lock // // The side lock owner can only be one of three things: // @@ -5021,20 +4846,19 @@ _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, // the eventual dispatch_resume(). // // - A dispatch_set_target_queue() call. The fact that we saw no `owner` - // means that the trysync it does wasn't being drained when (3) + // means that the trysync it does wasn't being drained when (2) // happened which can only be explained by one of these interleavings: // // o `dq` became idle between when the object queued in (1) ran and // the set_target_queue call and we were unlucky enough that our - // step (3) happened while this queue was idle. There is no reason + // step (2) happened while this queue was idle. There is no reason // to override anything anymore, the queue drained to completion // while we were preempted, our job is done. // - // o `dq` is queued but not draining during (1-3), then when we try - // to lock at (4) the queue is now draining a set_target_queue. - // Since we set HAS_OVERRIDE with a release barrier, the effect of - // (2) was visible to the drainer when he acquired the drain lock, - // and that guy has applied our override. Our job is done. + // o `dq` is queued but not draining during (1-2), then when we try + // to lock at (3) the queue is now draining a set_target_queue. + // This drainer must have seen the effects of (2) and that guy has + // applied our override. Our job is done. // // - Another instance of _dispatch_queue_class_wakeup_with_override(), // which is fine because trylock leaves a hint that we failed our @@ -5047,11 +4871,13 @@ _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, apply_again: if (dx_type(tq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { - if (_dispatch_need_global_root_queue_push_override_stealer(tq, pp)) { - _dispatch_root_queue_push_override_stealer(tq, dq, pp); + if (_dispatch_root_queue_push_queue_override_needed(tq, qos)) { + _dispatch_root_queue_push_queue_override(tq, dq, qos); } - } else if (_dispatch_queue_need_override(tq, pp)) { - dx_wakeup(tq, pp, DISPATCH_WAKEUP_OVERRIDING); + } else if (flags & DISPATCH_WAKEUP_WAITER_HANDOFF) { + dx_wakeup(tq, qos, flags); + } else if (_dispatch_queue_need_override(tq, qos)) { + dx_wakeup(tq, qos, DISPATCH_WAKEUP_OVERRIDING); } while (unlikely(locked && !_dispatch_queue_sidelock_tryunlock(dq))) { // rdar://problem/24081326 @@ -5060,9 +4886,9 @@ _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, // tried to acquire the side lock while we were running, and could have // had a better override than ours to apply. // - pp2 = dq->dq_override; - if (pp2 > pp) { - pp = pp2; + oqos = _dq_state_max_qos(os_atomic_load2o(dq, dq_state, relaxed)); + if (oqos > qos) { + qos = oqos; // The other instance had a better priority than ours, override // our thread, and apply the override that wasn't applied to `dq` // because of us. @@ -5080,126 +4906,166 @@ _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, DISPATCH_NOINLINE void _dispatch_queue_class_override_drainer(dispatch_queue_t dq, - pthread_priority_t pp, dispatch_wakeup_flags_t flags) + dispatch_qos_t qos, dispatch_wakeup_flags_t flags) { #if HAVE_PTHREAD_WORKQUEUE_QOS - uint64_t dq_state, value; + uint64_t old_state, new_state; // // Someone is trying to override the last work item of the queue. - // Do not remember this override on the queue because we know the precise - // duration the override is required for: until the current drain unlocks. // - // That is why this function only tries to set HAS_OVERRIDE if we can - // still observe a drainer, and doesn't need to set the DIRTY bit - // because oq_override wasn't touched and there is no race to resolve - // - os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { - if (!_dq_state_drain_locked(dq_state)) { - os_atomic_rmw_loop_give_up(break); + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + if (!_dq_state_drain_locked(old_state) && + !_dq_state_is_dirty(old_state)) { + os_atomic_rmw_loop_give_up(goto done); + } + new_state = _dq_state_merge_qos(old_state, qos); + if (new_state == old_state) { + os_atomic_rmw_loop_give_up(goto done); } - value = dq_state | DISPATCH_QUEUE_HAS_OVERRIDE; }); - if (_dq_state_drain_locked(dq_state)) { - return _dispatch_queue_class_wakeup_with_override(dq, pp, - flags, dq_state); + if (_dq_state_drain_locked(new_state)) { + return _dispatch_queue_class_wakeup_with_override(dq, qos, + flags, new_state); } + +done: #else - (void)pp; + (void)qos; #endif // HAVE_PTHREAD_WORKQUEUE_QOS if (flags & DISPATCH_WAKEUP_CONSUME) { return _dispatch_release_tailcall(dq); } } +#if HAVE_PTHREAD_WORKQUEUE_QOS +DISPATCH_NOINLINE +static void +_dispatch_root_queue_push_queue_override(dispatch_queue_t rq, + dispatch_queue_class_t dqu, dispatch_qos_t qos) +{ + // thread bound queues always have an owner set, so should never reach + // this codepath (see _dispatch_queue_class_wakeup_with_override). + dispatch_assert(!_dispatch_queue_is_thread_bound(dqu._dq)); + _dispatch_root_queue_push_override_stealer(rq, dqu._dq, qos); +} +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + #if DISPATCH_USE_KEVENT_WORKQUEUE +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_root_queue_push_queue(dispatch_queue_t rq, dispatch_queue_class_t dqu, + dispatch_qos_t qos) +{ + // thread bound queues aren't woken up on root queues + dispatch_assert(!_dispatch_queue_is_thread_bound(dqu._dq)); + if (likely(_dispatch_root_queue_allows_wlh_for_queue(rq, dqu._dq))) { + dispatch_qos_t wlh_qos; + wlh_qos = _dispatch_qos_root_queue_push_wlh(rq, qos); + } +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (_dispatch_root_queue_push_needs_override(rq, qos)) { + return _dispatch_root_queue_push_override(rq, dqu._dq->_as_do, qos); + } +#endif + _dispatch_root_queue_push_inline(rq, dqu._dq, dqu._dq, 1); +} + DISPATCH_NOINLINE static void -_dispatch_trystash_to_deferred_items(dispatch_queue_t dq, dispatch_object_t dou, - pthread_priority_t pp, dispatch_deferred_items_t ddi) +_dispatch_root_queue_push_try_stash(dispatch_queue_t rq, + dispatch_queue_class_t dqu, dispatch_qos_t qos, + dispatch_deferred_items_t ddi) { - dispatch_priority_t old_pp = ddi->ddi_stashed_pp; + dispatch_wlh_t cur_wlh = _dispatch_get_wlh(); + dispatch_wlh_t wlh = _dispatch_root_queue_wlh_for_queue(rq, dqu); dispatch_queue_t old_dq = ddi->ddi_stashed_dq; - struct dispatch_object_s *old_dou = ddi->ddi_stashed_dou; dispatch_priority_t rq_overcommit; + rq_overcommit = rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - rq_overcommit = dq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; - if (likely(!old_pp || rq_overcommit)) { - ddi->ddi_stashed_dq = dq; - ddi->ddi_stashed_dou = dou._do; - ddi->ddi_stashed_pp = (dispatch_priority_t)pp | rq_overcommit | - _PTHREAD_PRIORITY_PRIORITY_MASK; - if (likely(!old_pp)) { + if (cur_wlh != DISPATCH_WLH_GLOBAL) { + if (cur_wlh != (dispatch_wlh_t)dqu._dq) { + goto out; + } + dispatch_assert(old_dq == NULL); + } + + if (likely(!old_dq || rq_overcommit)) { + dispatch_queue_t old_rq = ddi->ddi_stashed_rq; + dispatch_priority_t old_pri = ddi->ddi_stashed_pri; + ddi->ddi_stashed_rq = rq; + ddi->ddi_stashed_dq = dqu._dq; + ddi->ddi_stashed_pri = _dispatch_priority_make(qos, 0) | rq_overcommit; + _dispatch_debug("wlh[%p]: deferring item %p, rq %p, pri 0x%x", + cur_wlh, dqu._dq, rq, ddi->ddi_stashed_pri); + if (likely(!old_dq)) { return; } // push the previously stashed item - pp = old_pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - dq = old_dq; - dou._do = old_dou; - } - if (_dispatch_need_global_root_queue_push_override(dq, pp)) { - return _dispatch_root_queue_push_override(dq, dou, pp); + qos = _dispatch_priority_qos(old_pri); + rq = old_rq; + dqu._dq = old_dq; } - // bit of cheating: we should really pass `pp` but we know that we are - // pushing onto a global queue at this point, and we just checked that - // `pp` doesn't matter. - DISPATCH_COMPILER_CAN_ASSUME(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); - _dispatch_queue_push_inline(dq, dou, 0, 0); -} -#endif -#if HAVE_PTHREAD_WORKQUEUE_QOS -DISPATCH_NOINLINE -static void -_dispatch_queue_push_slow(dispatch_queue_t dq, dispatch_object_t dou, - pthread_priority_t pp) -{ - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init_once); - _dispatch_queue_push(dq, dou, pp); +out: + if (cur_wlh != DISPATCH_WLH_GLOBAL) { + _dispatch_debug("wlh[%p]: not deferring item %p with wlh %p, rq %p", + cur_wlh, dqu._dq, wlh, rq); + } + _dispatch_root_queue_push_queue(rq, dqu, qos); } -#endif +#endif // DISPATCH_USE_KEVENT_WORKQUEUE DISPATCH_NOINLINE void -_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou, - pthread_priority_t pp) +_dispatch_root_queue_push(dispatch_queue_t dq, dispatch_object_t dou, + dispatch_qos_t qos) { - _dispatch_assert_is_valid_qos_override(pp); - if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { #if DISPATCH_USE_KEVENT_WORKQUEUE + if (_dispatch_object_has_vtable(dou) && dx_vtable(dou._do)->do_push) { dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); - if (unlikely(ddi && !(ddi->ddi_stashed_pp & - (dispatch_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK))) { - dispatch_assert(_dispatch_root_queues_pred == DLOCK_ONCE_DONE); - return _dispatch_trystash_to_deferred_items(dq, dou, pp, ddi); + if (unlikely(ddi && ddi->ddi_stashed_pri != DISPATCH_PRIORITY_NOSTASH)){ + return _dispatch_root_queue_push_try_stash(dq, dou._dq, qos, ddi); } + return _dispatch_root_queue_push_queue(dq, dou._dq, qos); + } #endif #if HAVE_PTHREAD_WORKQUEUE_QOS - // can't use dispatch_once_f() as it would create a frame - if (unlikely(_dispatch_root_queues_pred != DLOCK_ONCE_DONE)) { - return _dispatch_queue_push_slow(dq, dou, pp); - } - if (_dispatch_need_global_root_queue_push_override(dq, pp)) { - return _dispatch_root_queue_push_override(dq, dou, pp); - } + if (_dispatch_root_queue_push_needs_override(dq, qos)) { + return _dispatch_root_queue_push_override(dq, dou, qos); + } #endif + _dispatch_root_queue_push_inline(dq, dou, dou, 1); +} + +void +_dispatch_root_queue_wakeup(dispatch_queue_t dq, + DISPATCH_UNUSED dispatch_qos_t qos, dispatch_wakeup_flags_t flags) +{ + if (!(flags & DISPATCH_WAKEUP_BLOCK_WAIT)) { + DISPATCH_INTERNAL_CRASH(dq->dq_priority, + "Trying to wake up or override a root queue"); + } + if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dq); } - _dispatch_queue_push_inline(dq, dou, pp, 0); } DISPATCH_NOINLINE -static void -_dispatch_queue_class_wakeup_enqueue(dispatch_queue_t dq, pthread_priority_t pp, +void +_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou, + dispatch_qos_t qos) +{ + _dispatch_queue_push_inline(dq, dou, qos); +} + +DISPATCH_NOINLINE +void +_dispatch_queue_class_wakeup_enqueue(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) { dispatch_queue_t tq; - if (flags & (DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_WAS_OVERRIDDEN)) { - // _dispatch_queue_drain_try_unlock may have reset the override while - // we were becoming the enqueuer - _dispatch_queue_reinstate_override_priority(dq, (dispatch_priority_t)pp); - } if (!(flags & DISPATCH_WAKEUP_CONSUME)) { _dispatch_retain(dq); } @@ -5208,123 +5074,38 @@ _dispatch_queue_class_wakeup_enqueue(dispatch_queue_t dq, pthread_priority_t pp, // of a queue asyncing to that queue is not an uncommon pattern // and in that case the acquire is completely useless // - // so instead use a thread fence here when we will read the targetq - // pointer because that is the only thing that really requires - // that barrier. - os_atomic_thread_fence(acquire); - tq = dq->do_targetq; + // so instead use depdendency ordering to read the targetq pointer. + os_atomic_thread_fence(dependency); + tq = os_atomic_load_with_dependency_on2o(dq, do_targetq, (long)qos); } else { - dispatch_assert(target == DISPATCH_QUEUE_WAKEUP_MGR); - tq = &_dispatch_mgr_q; + tq = target; } - return _dispatch_queue_push(tq, dq, pp); + return dx_push(tq, dq, qos); } -DISPATCH_NOINLINE -void -_dispatch_queue_class_wakeup(dispatch_queue_t dq, pthread_priority_t pp, - dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) +DISPATCH_ALWAYS_INLINE +static void +_dispatch_queue_class_wakeup_finish(dispatch_queue_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target, + uint64_t old_state, uint64_t new_state) { - uint64_t old_state, new_state, bits = 0; - -#if HAVE_PTHREAD_WORKQUEUE_QOS - _dispatch_queue_override_priority(dq, /* inout */ &pp, /* inout */ &flags); -#endif - - if (flags & DISPATCH_WAKEUP_FLUSH) { - bits = DISPATCH_QUEUE_DIRTY; - } - if (flags & DISPATCH_WAKEUP_OVERRIDING) { - // - // Setting the dirty bit here is about forcing callers of - // _dispatch_queue_drain_try_unlock() to loop again when an override - // has just been set to close the following race: - // - // Drainer (in drain_try_unlokc(): - // override_reset(); - // preempted.... - // - // Enqueuer: - // atomic_or(oq_override, override, relaxed); - // atomic_or(dq_state, HAS_OVERRIDE, release); - // - // Drainer: - // ... resumes - // successful drain_unlock() and leaks `oq_override` - // - bits = DISPATCH_QUEUE_DIRTY | DISPATCH_QUEUE_HAS_OVERRIDE; - } + dispatch_assert(target != DISPATCH_QUEUE_WAKEUP_NONE); + dispatch_assert(target != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT); - if (flags & DISPATCH_WAKEUP_SLOW_WAITER) { - uint64_t pending_barrier_width = - (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; - uint64_t xor_owner_and_set_full_width_and_in_barrier = - _dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | - DISPATCH_QUEUE_IN_BARRIER; - -#ifdef DLOCK_NOWAITERS_BIT - bits |= DLOCK_NOWAITERS_BIT; -#else - bits |= DLOCK_WAITERS_BIT; -#endif - flags ^= DISPATCH_WAKEUP_SLOW_WAITER; - dispatch_assert(!(flags & DISPATCH_WAKEUP_CONSUME)); - - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = old_state | bits; - if (_dq_state_drain_pended(old_state)) { - // same as DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT - // but we want to be more efficient wrt the WAITERS_BIT - new_state &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK; - new_state &= ~DISPATCH_QUEUE_DRAIN_PENDED; - } - if (unlikely(_dq_state_drain_locked(new_state))) { -#ifdef DLOCK_NOWAITERS_BIT - new_state &= ~(uint64_t)DLOCK_NOWAITERS_BIT; -#endif - } else if (unlikely(!_dq_state_is_runnable(new_state) || - !(flags & DISPATCH_WAKEUP_FLUSH))) { - // either not runnable, or was not for the first item (26700358) - // so we should not try to lock and handle overrides instead - } else if (_dq_state_has_pending_barrier(old_state) || - new_state + pending_barrier_width < - DISPATCH_QUEUE_WIDTH_FULL_BIT) { - // see _dispatch_queue_drain_try_lock - new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; - new_state ^= xor_owner_and_set_full_width_and_in_barrier; - } else { - new_state |= DISPATCH_QUEUE_ENQUEUED; - } - }); - if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { - return _dispatch_try_lock_transfer_or_wakeup(dq); - } - } else if (bits) { - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release,{ - new_state = old_state | bits; - if (likely(_dq_state_should_wakeup(old_state))) { - new_state |= DISPATCH_QUEUE_ENQUEUED; - } - }); + if ((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK) { + flags |= DISPATCH_WAKEUP_OVERRIDING; } else { - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed,{ - new_state = old_state; - if (likely(_dq_state_should_wakeup(old_state))) { - new_state |= DISPATCH_QUEUE_ENQUEUED; - } else { - os_atomic_rmw_loop_give_up(break); - } - }); + flags &= ~(dispatch_wakeup_flags_t)DISPATCH_WAKEUP_OVERRIDING; + qos = _dq_state_max_qos(new_state); } - if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { - return _dispatch_queue_class_wakeup_enqueue(dq, pp, flags, target); + return _dispatch_queue_class_wakeup_enqueue(dq, qos, flags, target); } #if HAVE_PTHREAD_WORKQUEUE_QOS - if ((flags & DISPATCH_WAKEUP_OVERRIDING) - && target == DISPATCH_QUEUE_WAKEUP_TARGET) { - return _dispatch_queue_class_wakeup_with_override(dq, pp, + if ((flags & (DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_WAITER_HANDOFF)) + && target != DISPATCH_QUEUE_WAKEUP_MGR) { + return _dispatch_queue_class_wakeup_with_override(dq, qos, flags, new_state); } #endif @@ -5334,6 +5115,98 @@ _dispatch_queue_class_wakeup(dispatch_queue_t dq, pthread_priority_t pp, } } +DISPATCH_NOINLINE +void +_dispatch_queue_class_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) +{ + uint64_t old_state, new_state; + + qos = _dispatch_queue_override_qos(dq, qos); + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = _dq_state_merge_qos(old_state, qos); + if (likely(_dq_state_should_wakeup(old_state))) { + new_state |= DISPATCH_QUEUE_ENQUEUED; + } + if (flags & DISPATCH_WAKEUP_FLUSH) { + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (new_state == old_state) { + os_atomic_rmw_loop_give_up(break); + } + }); + + return _dispatch_queue_class_wakeup_finish(dq, qos, flags, target, + old_state, new_state); +} + +DISPATCH_NOINLINE +static void +_dispatch_queue_push_sync_waiter(dispatch_queue_t dq, + dispatch_sync_context_t dsc) +{ + uint64_t pending_barrier_width = + (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; + uint64_t xor_owner_and_set_full_width_and_in_barrier = + _dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | + DISPATCH_QUEUE_IN_BARRIER; + dispatch_qos_t qos = _dispatch_continuation_override_qos(dq, dsc->_as_dc); + uint64_t old_state, new_state; + dispatch_wakeup_flags_t flags = 0; + + _dispatch_trace_continuation_push(dq, dsc->_as_dc); + if (unlikely(_dispatch_queue_push_update_tail(dq, dsc->_as_do))) { + // for slow waiters, we borrow the reference of the caller + // so we don't need to protect the wakeup with a temporary retain + _dispatch_queue_push_update_head(dq, dsc->_as_do); + flags = DISPATCH_WAKEUP_FLUSH; + if (unlikely(_dispatch_queue_is_thread_bound(dq))) { + return dx_wakeup(dq, qos, flags); + } + } + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = _dq_state_merge_qos(old_state, qos); +#ifdef DLOCK_NOWAITERS_BIT + new_state |= DLOCK_NOWAITERS_BIT; +#else + new_state |= DLOCK_WAITERS_BIT; +#endif + if (flags & DISPATCH_WAKEUP_FLUSH) { + new_state |= DISPATCH_QUEUE_DIRTY; + } + if (_dq_state_drain_pended(old_state)) { + // same as DISPATCH_QUEUE_DRAIN_UNLOCK + // but we want to be more efficient wrt the WAITERS_BIT + new_state &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK; + new_state &= ~DISPATCH_QUEUE_DRAIN_PENDED; + } + if (unlikely(_dq_state_drain_locked(new_state))) { +#ifdef DLOCK_NOWAITERS_BIT + new_state &= ~(uint64_t)DLOCK_NOWAITERS_BIT; +#endif + } else if (unlikely(!_dq_state_is_runnable(new_state) || + !(flags & DISPATCH_WAKEUP_FLUSH))) { + // either not runnable, or was not for the first item (26700358) + // so we should not try to lock and handle overrides instead + } else if (_dq_state_has_pending_barrier(old_state) || + new_state + pending_barrier_width < + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + // see _dispatch_queue_drain_try_lock + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + new_state ^= xor_owner_and_set_full_width_and_in_barrier; + } else { + new_state |= DISPATCH_QUEUE_ENQUEUED; + } + }); + + if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { + return _dispatch_try_lock_transfer_or_wakeup(dq); + } + + return _dispatch_queue_class_wakeup_finish(dq, qos, flags, + DISPATCH_QUEUE_WAKEUP_TARGET, old_state, new_state); +} + #pragma mark - #pragma mark dispatch_root_queue_drain @@ -5375,7 +5248,7 @@ _dispatch_root_queue_drain_one_slow(dispatch_queue_t dq) (void)os_atomic_dec2o(qc, dgq_pending, relaxed); } if (!available) { - _dispatch_global_queue_poke(dq); + _dispatch_global_queue_poke(dq, 1); } return available; } @@ -5438,41 +5311,40 @@ _dispatch_root_queue_drain_one(dispatch_queue_t dq) goto out; } // There must be a next item now. - _dispatch_wait_until(next = head->do_next); + next = os_mpsc_get_next(head, do_next); } os_atomic_store2o(dq, dq_items_head, next, relaxed); - _dispatch_global_queue_poke(dq); + _dispatch_global_queue_poke(dq, 1); out: return head; } void -_dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq, - struct dispatch_object_s *dou, pthread_priority_t pp) +_dispatch_root_queue_drain_deferred_item(dispatch_queue_t rq, + dispatch_queue_t dq DISPATCH_PERF_MON_ARGS_PROTO) { - struct _dispatch_identity_s di; + // fake that we queued `dq` on `rq` for introspection purposes + _dispatch_trace_continuation_push(rq, dq); - // fake that we queued `dou` on `dq` for introspection purposes - _dispatch_trace_continuation_push(dq, dou); - - pp = _dispatch_priority_inherit_from_root_queue(pp, dq); - _dispatch_queue_set_current(dq); - _dispatch_root_queue_identity_assume(&di, pp); + _dispatch_queue_set_current(rq); + dispatch_priority_t old_pri = _dispatch_set_basepri(rq->dq_priority); #if DISPATCH_COCOA_COMPAT void *pool = _dispatch_last_resort_autorelease_pool_push(); #endif // DISPATCH_COCOA_COMPAT - _dispatch_perfmon_start(); - _dispatch_continuation_pop_inline(dou, dq, - DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN); - _dispatch_perfmon_workitem_inc(); - _dispatch_perfmon_end(); + dispatch_invoke_context_s dic = { }; + dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN | + DISPATCH_INVOKE_REDIRECTING_DRAIN; + _dispatch_queue_drain_init_narrowing_check_deadline(&dic, rq->dq_priority); + _dispatch_continuation_pop_inline(dq, &dic, flags, rq); + // event thread that could steal + _dispatch_perfmon_end(perfmon_thread_event_steal); #if DISPATCH_COCOA_COMPAT _dispatch_last_resort_autorelease_pool_pop(pool); #endif // DISPATCH_COCOA_COMPAT - _dispatch_reset_defaultpriority(di.old_pp); + _dispatch_reset_basepri(old_pri); _dispatch_queue_set_current(NULL); _dispatch_voucher_debug("root queue clear", NULL); @@ -5481,7 +5353,7 @@ _dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq, DISPATCH_NOT_TAIL_CALLED // prevent tailcall (for Instrument DTrace probe) static void -_dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pri) +_dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pp) { #if DISPATCH_DEBUG dispatch_queue_t cq; @@ -5490,28 +5362,42 @@ _dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pri) } #endif _dispatch_queue_set_current(dq); - if (dq->dq_priority) pri = dq->dq_priority; - pthread_priority_t old_dp = _dispatch_set_defaultpriority(pri, NULL); + dispatch_priority_t pri = dq->dq_priority; + if (!pri) pri = _dispatch_priority_from_pp(pp); + dispatch_priority_t old_dbp = _dispatch_set_basepri(pri); + _dispatch_set_wlh(DISPATCH_WLH_GLOBAL); #if DISPATCH_COCOA_COMPAT void *pool = _dispatch_last_resort_autorelease_pool_push(); #endif // DISPATCH_COCOA_COMPAT - _dispatch_perfmon_start(); struct dispatch_object_s *item; bool reset = false; + dispatch_invoke_context_s dic = { }; + dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN | + DISPATCH_INVOKE_REDIRECTING_DRAIN; + _dispatch_queue_drain_init_narrowing_check_deadline(&dic, pri); + _dispatch_perfmon_start(); while ((item = fastpath(_dispatch_root_queue_drain_one(dq)))) { if (reset) _dispatch_wqthread_override_reset(); - _dispatch_continuation_pop_inline(item, dq, - DISPATCH_INVOKE_WORKER_DRAIN|DISPATCH_INVOKE_REDIRECTING_DRAIN); - _dispatch_perfmon_workitem_inc(); - reset = _dispatch_reset_defaultpriority_override(); + _dispatch_continuation_pop_inline(item, &dic, flags, dq); + reset = _dispatch_reset_basepri_override(); + if (unlikely(_dispatch_queue_drain_should_narrow(&dic))) { + break; + } + } + + // overcommit or not. worker thread + if (pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) { + _dispatch_perfmon_end(perfmon_thread_worker_oc); + } else { + _dispatch_perfmon_end(perfmon_thread_worker_non_oc); } - _dispatch_perfmon_end(); #if DISPATCH_COCOA_COMPAT _dispatch_last_resort_autorelease_pool_pop(pool); #endif // DISPATCH_COCOA_COMPAT - _dispatch_reset_defaultpriority(old_dp); + _dispatch_reset_wlh(); + _dispatch_reset_basepri(old_dbp); _dispatch_queue_set_current(NULL); } @@ -5541,7 +5427,7 @@ _dispatch_worker_thread3(pthread_priority_t pp) dispatch_queue_t dq; pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); - dq = _dispatch_get_root_queue_for_priority(pp, overcommit); + dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), overcommit); return _dispatch_worker_thread4(dq); } #endif // HAVE_PTHREAD_WORKQUEUE_QOS @@ -5598,7 +5484,7 @@ _dispatch_worker_thread(void *context) dispatch_time(0, timeout)) == 0); (void)os_atomic_inc2o(qc, dgq_thread_pool_size, release); - _dispatch_global_queue_poke(dq); + _dispatch_global_queue_poke(dq, 1); _dispatch_release(dq); return NULL; @@ -5629,6 +5515,8 @@ _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset) (void)dispatch_assume_zero(r); r = sigdelset(set, SIGPIPE); (void)dispatch_assume_zero(r); + r = sigdelset(set, SIGPROF); + (void)dispatch_assume_zero(r); return pthread_sigmask(how, set, oset); } @@ -5653,8 +5541,9 @@ _dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags) dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD; dq = _dispatch_alloc(DISPATCH_VTABLE(queue_runloop), dqs); _dispatch_queue_init(dq, DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC, 1, false); - dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,true); + dq->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true); dq->dq_label = label ? label : "runloop-queue"; // no-copy contract + dq->dq_wlh = DISPATCH_WLH_GLOBAL; _dispatch_runloop_queue_handle_init(dq); _dispatch_queue_set_bound_thread(dq); _dispatch_object_debug(dq, "%s", __func__); @@ -5666,10 +5555,10 @@ _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq) { _dispatch_object_debug(dq, "%s", __func__); - pthread_priority_t pp = _dispatch_queue_reset_override_priority(dq, true); + dispatch_qos_t qos = _dispatch_queue_reset_max_qos(dq); _dispatch_queue_clear_bound_thread(dq); - dx_wakeup(dq, pp, DISPATCH_WAKEUP_FLUSH); - if (pp) _dispatch_thread_override_end(DISPATCH_QUEUE_DRAIN_OWNER(dq), dq); + dx_wakeup(dq, qos, DISPATCH_WAKEUP_FLUSH); + if (qos) _dispatch_thread_override_end(DISPATCH_QUEUE_DRAIN_OWNER(dq), dq); } void @@ -5828,12 +5717,7 @@ _dispatch_queue_set_mainq_drain_state(bool arg) void _dispatch_main_queue_callback_4CF( -#if TARGET_OS_MAC - mach_msg_header_t *_Null_unspecified msg -#else - void *ignored -#endif - DISPATCH_UNUSED) + void *ignored DISPATCH_UNUSED) { if (main_q_is_draining) { return; @@ -5848,9 +5732,7 @@ _dispatch_main_queue_callback_4CF( void dispatch_main(void) { - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init_once); - + _dispatch_root_queues_init(); #if HAVE_PTHREAD_MAIN_NP if (pthread_main_np()) { #endif @@ -5901,52 +5783,25 @@ static void _dispatch_queue_cleanup2(void) { dispatch_queue_t dq = &_dispatch_main_q; - _dispatch_queue_clear_bound_thread(dq); + uint64_t old_state, new_state; - // - // Here is what happens when both this cleanup happens because of - // dispatch_main() being called, and a concurrent enqueuer makes the queue - // non empty. - // - // _dispatch_queue_cleanup2: - // atomic_and(dq_is_thread_bound, ~DQF_THREAD_BOUND, relaxed); - // maximal_barrier(); - // if (load(dq_items_tail, seq_cst)) { - // // do the wake up the normal serial queue way - // } else { - // // do no wake up <---- - // } - // - // enqueuer: - // store(dq_items_tail, new_tail, release); - // if (load(dq_is_thread_bound, relaxed)) { - // // do the wake up the runloop way <---- - // } else { - // // do the wake up the normal serial way - // } - // - // what would be bad is to take both paths marked <---- because the queue - // wouldn't be woken up until the next time it's used (which may never - // happen) + // Turning the main queue from a runloop queue into an ordinary serial queue + // is a 3 steps operation: + // 1. finish taking the main queue lock the usual way + // 2. clear the THREAD_BOUND flag + // 3. do a handoff // - // An enqueuer that speculates the load of the old value of thread_bound - // and then does the store may wake up the main queue the runloop way. - // But then, the cleanup thread will see that store because the load - // of dq_items_tail is sequentially consistent, and we have just thrown away - // our pipeline. - // - // By the time cleanup2() is out of the maximally synchronizing barrier, - // no other thread can speculate the wrong load anymore, and both cleanup2() - // and a concurrent enqueuer would treat the queue in the standard non - // thread bound way - - _dispatch_queue_atomic_flags_clear(dq, - DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC); - os_atomic_maximally_synchronizing_barrier(); - // no need to drop the override, the thread will die anyway - // the barrier above includes an acquire, so it's ok to do this raw - // call to dx_wakeup(0) - dx_wakeup(dq, 0, 0); + // If an enqueuer executes concurrently, he may do the wakeup the runloop + // way, because he still believes the queue to be thread-bound, but the + // dirty bit will force this codepath to notice the enqueue, and the usual + // lock transfer will do the proper wakeup. + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + new_state = old_state & ~DISPATCH_QUEUE_DIRTY; + new_state += DISPATCH_QUEUE_WIDTH_INTERVAL; + new_state += DISPATCH_QUEUE_IN_BARRIER; + }); + _dispatch_queue_atomic_flags_clear(dq, DQF_THREAD_BOUND|DQF_CANNOT_TRYSYNC); + _dispatch_try_lock_transfer_or_wakeup(dq); // overload the "probably" variable to mean that dispatch_main() or // similar non-POSIX API was called @@ -5955,7 +5810,7 @@ _dispatch_queue_cleanup2(void) #ifndef __linux__ if (_dispatch_program_is_probably_callback_driven) { _dispatch_barrier_async_detached_f(_dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_DEFAULT, true), NULL, _dispatch_sig_thread); + DISPATCH_QOS_DEFAULT, true), NULL, _dispatch_sig_thread); sleep(1); // workaround 6778970 } #endif @@ -5978,6 +5833,15 @@ _dispatch_queue_cleanup(void *ctxt) "Premature thread exit while a dispatch queue is running"); } +DISPATCH_NORETURN +static void +_dispatch_wlh_cleanup(void *ctxt) +{ + // POSIX defines that destructors are only called if 'ctxt' is non-null + DISPATCH_INTERNAL_CRASH(ctxt, "Premature thread exit with active wlh"); +} + +DISPATCH_NORETURN static void _dispatch_deferred_items_cleanup(void *ctxt) { @@ -5986,6 +5850,7 @@ _dispatch_deferred_items_cleanup(void *ctxt) "Premature thread exit with unhandled deferred items"); } +DISPATCH_NORETURN static void _dispatch_frame_cleanup(void *ctxt) { @@ -5994,6 +5859,7 @@ _dispatch_frame_cleanup(void *ctxt) "Premature thread exit while a dispatch frame is active"); } +DISPATCH_NORETURN static void _dispatch_context_cleanup(void *ctxt) { diff --git a/src/queue_internal.h b/src/queue_internal.h index 1bff7b014..29e83cc59 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -49,16 +49,18 @@ #pragma mark dispatch_queue_t DISPATCH_ENUM(dispatch_queue_flags, uint32_t, - DQF_NONE = 0x0000, - DQF_AUTORELEASE_ALWAYS = 0x0001, - DQF_AUTORELEASE_NEVER = 0x0002, -#define _DQF_AUTORELEASE_MASK 0x0003 - DQF_THREAD_BOUND = 0x0004, // queue is bound to a thread - DQF_BARRIER_BIT = 0x0008, // queue is a barrier on its target - DQF_TARGETED = 0x0010, // queue is targeted by another object - DQF_LABEL_NEEDS_FREE = 0x0020, // queue label was strduped; need to free it - DQF_CANNOT_TRYSYNC = 0x0040, - DQF_RELEASED = 0x0080, // xref_cnt == -1 + DQF_NONE = 0x00000000, + DQF_AUTORELEASE_ALWAYS = 0x00010000, + DQF_AUTORELEASE_NEVER = 0x00020000, +#define _DQF_AUTORELEASE_MASK 0x00030000 + DQF_THREAD_BOUND = 0x00040000, // queue is bound to a thread + DQF_BARRIER_BIT = 0x00080000, // queue is a barrier on its target + DQF_TARGETED = 0x00100000, // queue is targeted by another object + DQF_LABEL_NEEDS_FREE = 0x00200000, // queue label was strduped; need to free it + DQF_CANNOT_TRYSYNC = 0x00400000, + DQF_RELEASED = 0x00800000, // xref_cnt == -1 + DQF_LEGACY = 0x01000000, + DQF_WLH_CHANGED = 0x02000000, // queue wlh changed from initial value // only applies to sources // @@ -77,66 +79,60 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, // will be -p-. // // -pd - // Received EV_DELETE (from ap-), needs to free `ds_dkev`, the knote is - // gone from the kernel, but ds_dkev lives. Next state will be --d. + // Received EV_DELETE (from ap-), needs to unregister ds_refs, the muxnote + // is gone from the kernel. Next state will be --d. // // -p- // Received an EV_ONESHOT event (from a--), or the delivery of an event // causing the cancellation to fail with EINPROGRESS was delivered - // (from ap-). The knote still lives, next state will be --d. + // (from ap-). The muxnote still lives, next state will be --d. // // --d - // Final state of the source, the knote is gone from the kernel and - // ds_dkev is freed. The source can safely be released. + // Final state of the source, the muxnote is gone from the kernel and + // ds_refs is unregistered. The source can safely be released. // // a-d (INVALID) // apd (INVALID) // Setting DSF_DELETED should also always atomically clear DSF_ARMED. If - // the knote is gone from the kernel, it makes no sense whatsoever to + // the muxnote is gone from the kernel, it makes no sense whatsoever to // have it armed. And generally speaking, once `d` or `p` has been set, // `a` cannot do a cleared -> set transition anymore // (see _dispatch_source_try_set_armed). // - DSF_CANCEL_WAITER = 0x0800, // synchronous waiters for cancel - DSF_CANCELED = 0x1000, // cancellation has been requested - DSF_ARMED = 0x2000, // source is armed - DSF_DEFERRED_DELETE = 0x4000, // source is pending delete - DSF_DELETED = 0x8000, // source knote is deleted + DSF_CANCEL_WAITER = 0x08000000, // synchronous waiters for cancel + DSF_CANCELED = 0x10000000, // cancellation has been requested + DSF_ARMED = 0x20000000, // source is armed + DSF_DEFERRED_DELETE = 0x40000000, // source is pending delete + DSF_DELETED = 0x80000000, // source muxnote is deleted #define DSF_STATE_MASK (DSF_ARMED | DSF_DEFERRED_DELETE | DSF_DELETED) - DQF_WIDTH_MASK = 0xffff0000, -#define DQF_WIDTH_SHIFT 16 +#define DQF_FLAGS_MASK ((dispatch_queue_flags_t)0xffff0000) +#define DQF_WIDTH_MASK ((dispatch_queue_flags_t)0x0000ffff) +#define DQF_WIDTH(n) ((dispatch_queue_flags_t)(uint16_t)(n)) ); #define _DISPATCH_QUEUE_HEADER(x) \ struct os_mpsc_queue_s _as_oq[0]; \ DISPATCH_OBJECT_HEADER(x); \ _OS_MPSC_QUEUE_FIELDS(dq, dq_state); \ - dispatch_queue_t dq_specific_q; \ - union { \ - uint32_t volatile dq_atomic_flags; \ - DISPATCH_STRUCT_LITTLE_ENDIAN_2( \ - uint16_t dq_atomic_bits, \ - uint16_t dq_width \ - ); \ - }; \ + DISPATCH_UNION_LE(uint32_t volatile dq_atomic_flags, \ + const uint16_t dq_width, \ + const uint16_t __dq_opaque \ + ); \ uint32_t dq_side_suspend_cnt; \ - DISPATCH_INTROSPECTION_QUEUE_HEADER; \ - dispatch_unfair_lock_s dq_sidelock - /* LP64: 32bit hole on LP64 */ + dispatch_unfair_lock_s dq_sidelock; \ + union { \ + dispatch_queue_t dq_specific_q; \ + struct dispatch_source_refs_s *ds_refs; \ + struct dispatch_timer_source_refs_s *ds_timer_refs; \ + struct dispatch_mach_recv_refs_s *dm_recv_refs; \ + }; \ + DISPATCH_INTROSPECTION_QUEUE_HEADER #define DISPATCH_QUEUE_HEADER(x) \ struct dispatch_queue_s _as_dq[0]; \ _DISPATCH_QUEUE_HEADER(x) -#define DISPATCH_QUEUE_ALIGN __attribute__((aligned(8))) - -#define DISPATCH_QUEUE_WIDTH_POOL 0x7fff -#define DISPATCH_QUEUE_WIDTH_MAX 0x7ffe -#define DISPATCH_QUEUE_USES_REDIRECTION(width) \ - ({ uint16_t _width = (width); \ - _width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; }) - #define DISPATCH_QUEUE_CACHELINE_PADDING \ char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD] #ifdef __LP64__ @@ -206,10 +202,15 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, * the full width of the queue is used or reserved (depending on the context) * In other words that the queue has reached or overflown its capacity. */ -#define DISPATCH_QUEUE_WIDTH_FULL_BIT 0x0010000000000000ull -#define DISPATCH_QUEUE_WIDTH_FULL 0x8000ull +#define DISPATCH_QUEUE_WIDTH_FULL_BIT 0x0010000000000000ull +#define DISPATCH_QUEUE_WIDTH_FULL 0x1000ull +#define DISPATCH_QUEUE_WIDTH_POOL (DISPATCH_QUEUE_WIDTH_FULL - 1) +#define DISPATCH_QUEUE_WIDTH_MAX (DISPATCH_QUEUE_WIDTH_FULL - 2) +#define DISPATCH_QUEUE_USES_REDIRECTION(width) \ + ({ uint16_t _width = (width); \ + _width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; }) /* - * w: width (bits 51 - 37) + * w: width (bits 51 - 40) * This encodes how many work items are in flight. Barriers hold `dq_width` * of them while they run. This is encoded as a signed offset with respect, * to full use, where the negative values represent how many available slots @@ -218,19 +219,29 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, * * When this value is positive, then `wo` is always set to 1. */ -#define DISPATCH_QUEUE_WIDTH_INTERVAL 0x0000002000000000ull -#define DISPATCH_QUEUE_WIDTH_MASK 0x001fffe000000000ull -#define DISPATCH_QUEUE_WIDTH_SHIFT 37 +#define DISPATCH_QUEUE_WIDTH_INTERVAL 0x0000010000000000ull +#define DISPATCH_QUEUE_WIDTH_MASK 0x001fff0000000000ull +#define DISPATCH_QUEUE_WIDTH_SHIFT 40 /* - * pb: pending barrier (bit 36) + * pb: pending barrier (bit 39) * Drainers set this bit when they couldn't run the next work item and it is * a barrier. When this bit is set, `dq_width - 1` work item slots are * reserved so that no wakeup happens until the last work item in flight * completes. */ -#define DISPATCH_QUEUE_PENDING_BARRIER 0x0000001000000000ull +#define DISPATCH_QUEUE_PENDING_BARRIER 0x0000008000000000ull +/* + * p: pended bit (bit 38) + * Set when a drain lock has been pended. When this bit is set, + * the drain lock is taken and ENQUEUED is never set. + * + * This bit marks a queue that needs further processing but was kept pended + * by an async drainer (not reenqueued) in the hope of being able to drain + * it further later. + */ +#define DISPATCH_QUEUE_DRAIN_PENDED 0x0000004000000000ull /* - * d: dirty bit (bit 35) + * d: dirty bit (bit 37) * This bit is set when a queue transitions from empty to not empty. * This bit is set before dq_items_head is set, with appropriate barriers. * Any thread looking at a queue head is responsible for unblocking any @@ -342,66 +353,48 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, * * So on the async "acquire" side, there is no subtlety at all. */ -#define DISPATCH_QUEUE_DIRTY 0x0000000800000000ull +#define DISPATCH_QUEUE_DIRTY 0x0000002000000000ull /* - * qo: (bit 34) - * Set when a queue has a useful override set. - * This bit is only cleared when the final drain_try_unlock() succeeds. - * - * When the queue dq_override is touched (overrides or-ed in), usually with - * _dispatch_queue_override_priority(), then the HAS_OVERRIDE bit is set - * with a release barrier and one of these three things happen next: - * - * - the queue is enqueued, which will cause it to be drained, and the - * override to be handled by _dispatch_queue_drain_try_unlock(). - * In rare cases it could cause the queue to be queued while empty though. - * - * - the DIRTY bit is also set with a release barrier, which pairs with - * the handling of these bits by _dispatch_queue_drain_try_unlock(), - * so that dq_override is reset properly. - * - * - the queue was suspended, and _dispatch_queue_resume() will handle the - * override as part of its wakeup sequence. + * e: enqueued bit (bit 36) + * Set when a queue is enqueued on its target queue */ -#define DISPATCH_QUEUE_HAS_OVERRIDE 0x0000000400000000ull +#define DISPATCH_QUEUE_ENQUEUED 0x0000001000000000ull /* - * p: pended bit (bit 33) - * Set when a drain lock has been pended. When this bit is set, - * the drain lock is taken and ENQUEUED is never set. - * - * This bit marks a queue that needs further processing but was kept pended - * by an async drainer (not reenqueued) in the hope of being able to drain - * it further later. + * o: has override (bits 34) + * Set when a queue has received a QOS override and needs to reset it. + * This bit is only cleared when the final drain_try_unlock() succeeds. */ -#define DISPATCH_QUEUE_DRAIN_PENDED 0x0000000200000000ull +#define DISPATCH_QUEUE_RECEIVED_OVERRIDE 0x0000000800000000ull /* - * e: enqueued bit (bit 32) - * Set when a queue is enqueued on its target queue + * max_qos: max qos (bits 34 - 32) + * This is the maximum qos that has been enqueued on the queue */ -#define DISPATCH_QUEUE_ENQUEUED 0x0000000100000000ull +#define DISPATCH_QUEUE_MAX_QOS_MASK 0x0000000700000000ull +#define DISPATCH_QUEUE_MAX_QOS_SHIFT 32 /* * dl: drain lock (bits 31-0) * This is used by the normal drain to drain exlusively relative to other * drain stealers (like the QoS Override codepath). It holds the identity * (thread port) of the current drainer. */ -#define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK 0x00000002ffffffffull +#define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK (DISPATCH_QUEUE_DRAIN_PENDED | ~0u) #ifdef DLOCK_NOWAITERS_BIT #define DISPATCH_QUEUE_DRAIN_OWNER_MASK \ ((uint64_t)(DLOCK_OWNER_MASK | DLOCK_NOFAILED_TRYLOCK_BIT)) -#define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \ - (((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK))\ - ^ DLOCK_NOWAITERS_BIT) +#define DISPATCH_QUEUE_DRAIN_UNLOCK(v) \ + (((v) & ~(DISPATCH_QUEUE_DIRTY | DISPATCH_QUEUE_DRAIN_PENDED \ + | DISPATCH_QUEUE_DRAIN_OWNER_MASK)) ^ DLOCK_NOWAITERS_BIT) #define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \ - (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \ + (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_MAX_QOS_MASK | \ DLOCK_NOWAITERS_BIT) #else #define DISPATCH_QUEUE_DRAIN_OWNER_MASK \ ((uint64_t)(DLOCK_OWNER_MASK | DLOCK_FAILED_TRYLOCK_BIT)) -#define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \ - ((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK)) +#define DISPATCH_QUEUE_DRAIN_UNLOCK(v) \ + ((v) & ~(DISPATCH_QUEUE_DIRTY | DISPATCH_QUEUE_DRAIN_PENDED | \ + DISPATCH_QUEUE_DRAIN_OWNER_MASK)) #define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \ - (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \ + (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_MAX_QOS_MASK | \ DLOCK_WAITERS_BIT) #endif /* @@ -497,12 +490,12 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, (DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_INTERVAL) DISPATCH_CLASS_DECL(queue); -#if !(defined(__cplusplus) && DISPATCH_INTROSPECTION) +#if !defined(__cplusplus) || !DISPATCH_INTROSPECTION struct dispatch_queue_s { _DISPATCH_QUEUE_HEADER(queue); DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only -} DISPATCH_QUEUE_ALIGN; -#endif // !(defined(__cplusplus) && DISPATCH_INTROSPECTION) +} DISPATCH_ATOMIC64_ALIGN; +#endif // !defined(__cplusplus) || !DISPATCH_INTROSPECTION DISPATCH_INTERNAL_SUBCLASS_DECL(queue_serial, queue); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_concurrent, queue); @@ -520,16 +513,14 @@ typedef union { struct dispatch_source_s *_ds; struct dispatch_mach_s *_dm; struct dispatch_queue_specific_queue_s *_dqsq; - struct dispatch_timer_aggregate_s *_dta; #if USE_OBJC os_mpsc_queue_t _ojbc_oq; dispatch_queue_t _objc_dq; dispatch_source_t _objc_ds; dispatch_mach_t _objc_dm; dispatch_queue_specific_queue_t _objc_dqsq; - dispatch_timer_aggregate_t _objc_dta; #endif -} dispatch_queue_class_t __attribute__((__transparent_union__)); +} dispatch_queue_class_t DISPATCH_TRANSPARENT_UNION; typedef struct dispatch_thread_context_s *dispatch_thread_context_t; typedef struct dispatch_thread_context_s { @@ -546,18 +537,22 @@ typedef struct dispatch_thread_frame_s { // must be in the same order as our TSD keys! dispatch_queue_t dtf_queue; dispatch_thread_frame_t dtf_prev; - struct dispatch_object_s *dtf_deferred; } dispatch_thread_frame_s; -DISPATCH_ENUM(dispatch_queue_wakeup_target, long, - DISPATCH_QUEUE_WAKEUP_NONE = 0, - DISPATCH_QUEUE_WAKEUP_TARGET, - DISPATCH_QUEUE_WAKEUP_MGR, -); +typedef dispatch_queue_t dispatch_queue_wakeup_target_t; +#define DISPATCH_QUEUE_WAKEUP_NONE ((dispatch_queue_wakeup_target_t)0) +#define DISPATCH_QUEUE_WAKEUP_TARGET ((dispatch_queue_wakeup_target_t)1) +#define DISPATCH_QUEUE_WAKEUP_MGR (&_dispatch_mgr_q) +#define DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT ((dispatch_queue_wakeup_target_t)-1) +void _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, + dispatch_qos_t qos, dispatch_wakeup_flags_t flags, uint64_t dq_state); void _dispatch_queue_class_override_drainer(dispatch_queue_t dqu, - pthread_priority_t pp, dispatch_wakeup_flags_t flags); -void _dispatch_queue_class_wakeup(dispatch_queue_t dqu, pthread_priority_t pp, + dispatch_qos_t qos, dispatch_wakeup_flags_t flags); +void _dispatch_queue_class_wakeup_enqueue(dispatch_queue_t dq, + dispatch_qos_t qos, dispatch_wakeup_flags_t flags, + dispatch_queue_wakeup_target_t target); +void _dispatch_queue_class_wakeup(dispatch_queue_t dqu, dispatch_qos_t qos, dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target); void _dispatch_queue_destroy(dispatch_queue_t dq); @@ -566,29 +561,32 @@ void _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq); void _dispatch_queue_suspend(dispatch_queue_t dq); void _dispatch_queue_resume(dispatch_queue_t dq, bool activate); void _dispatch_queue_finalize_activation(dispatch_queue_t dq); -void _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags); -void _dispatch_queue_push_list_slow(dispatch_queue_t dq, unsigned int n); +void _dispatch_queue_invoke(dispatch_queue_t dq, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +void _dispatch_global_queue_poke(dispatch_queue_t dq, unsigned int n); void _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou, - pthread_priority_t pp); + dispatch_qos_t qos); void _dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq); -void _dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, +void _dispatch_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); -dispatch_queue_t _dispatch_queue_serial_drain(dispatch_queue_t dq, - dispatch_invoke_flags_t flags, uint64_t *owned, - struct dispatch_object_s **dc_ptr); +dispatch_queue_wakeup_target_t _dispatch_queue_serial_drain(dispatch_queue_t dq, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t *owned); void _dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq, - dispatch_invoke_flags_t flags, uint64_t to_unlock, - struct dispatch_object_s *dc); + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t to_unlock); void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq); -void _dispatch_root_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, +void _dispatch_root_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); -void _dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq, - struct dispatch_object_s *dou, pthread_priority_t pp); +void _dispatch_root_queue_push(dispatch_queue_t dq, dispatch_object_t dou, + dispatch_qos_t qos); +void _dispatch_root_queue_drain_deferred_item(dispatch_queue_t rq, + dispatch_queue_t dq DISPATCH_PERF_MON_ARGS_PROTO); void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq); -void _dispatch_main_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, +void _dispatch_main_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); -void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, +void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq); void _dispatch_runloop_queue_dispose(dispatch_queue_t dq); @@ -603,13 +601,13 @@ void _dispatch_kevent_workqueue_init(void); #else static inline void _dispatch_kevent_workqueue_init(void) {} #endif -void _dispatch_sync_recurse_invoke(void *ctxt); void _dispatch_apply_invoke(void *ctxt); void _dispatch_apply_redirect_invoke(void *ctxt); void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func); void _dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func); +void _dispatch_queue_atfork_child(void); #if DISPATCH_DEBUG void dispatch_debug_queue(dispatch_queue_t dq, const char* str); @@ -622,10 +620,9 @@ size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz); size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz); -#define DISPATCH_QUEUE_QOS_COUNT 6 -#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_QOS_COUNT * 2) +#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QOS_MAX * 2) -// must be in lowest to highest qos order (as encoded in pthread_priority_t) +// must be in lowest to highest qos order (as encoded in dispatch_qos_t) // overcommit qos index values need bit 1 set enum { DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0, @@ -648,13 +645,13 @@ extern struct dispatch_queue_s _dispatch_root_queues[]; extern struct dispatch_queue_s _dispatch_mgr_q; void _dispatch_root_queues_init(void); -#if HAVE_PTHREAD_WORKQUEUE_QOS -extern pthread_priority_t _dispatch_background_priority; -extern pthread_priority_t _dispatch_user_initiated_priority; +#if DISPATCH_DEBUG +#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \ + dispatch_assert_queue(&_dispatch_mgr_q) +#else +#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() #endif -typedef uint8_t _dispatch_qos_class_t; - #pragma mark - #pragma mark dispatch_queue_attr_t @@ -667,8 +664,7 @@ typedef enum { DISPATCH_CLASS_DECL(queue_attr); struct dispatch_queue_attr_s { OS_OBJECT_STRUCT_HEADER(dispatch_queue_attr); - _dispatch_qos_class_t dqa_qos_class; - int8_t dqa_relative_priority; + dispatch_priority_requested_t dqa_qos_and_relpri; uint16_t dqa_overcommit:2; uint16_t dqa_autorelease_frequency:2; uint16_t dqa_concurrent:1; @@ -805,7 +801,7 @@ dispatch_queue_attr_t _dispatch_get_default_queue_attr(void); ~(DISPATCH_CONTINUATION_SIZE - 1u)) // continuation is a dispatch_sync or dispatch_barrier_sync -#define DISPATCH_OBJ_SYNC_SLOW_BIT 0x001ul +#define DISPATCH_OBJ_SYNC_WAITER_BIT 0x001ul // continuation acts as a barrier #define DISPATCH_OBJ_BARRIER_BIT 0x002ul // continuation resources are freed on run @@ -822,16 +818,29 @@ dispatch_queue_attr_t _dispatch_get_default_queue_attr(void); // use the voucher from the continuation even if the queue has voucher set #define DISPATCH_OBJ_ENFORCE_VOUCHER 0x080ul -struct dispatch_continuation_s { +typedef struct dispatch_continuation_s { struct dispatch_object_s _as_do[0]; DISPATCH_CONTINUATION_HEADER(continuation); -}; -typedef struct dispatch_continuation_s *dispatch_continuation_t; +} *dispatch_continuation_t; + +typedef struct dispatch_sync_context_s { + struct dispatch_object_s _as_do[0]; + struct dispatch_continuation_s _as_dc[0]; + DISPATCH_CONTINUATION_HEADER(continuation); + dispatch_function_t dsc_func; + void *dsc_ctxt; +#if DISPATCH_COCOA_COMPAT + dispatch_thread_frame_s dsc_dtf; +#endif + dispatch_thread_event_s dsc_event; + dispatch_qos_t dsc_override_qos_floor; + dispatch_qos_t dsc_override_qos; +} *dispatch_sync_context_t; typedef struct dispatch_continuation_vtable_s { _OS_OBJECT_CLASS_HEADER(); DISPATCH_INVOKABLE_VTABLE_HEADER(dispatch_continuation); -} *dispatch_continuation_vtable_t; +} const *dispatch_continuation_vtable_t; #ifndef DISPATCH_CONTINUATION_CACHE_LIMIT #if TARGET_OS_EMBEDDED @@ -847,8 +856,9 @@ dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void); void _dispatch_continuation_free_to_heap(dispatch_continuation_t c); void _dispatch_continuation_async(dispatch_queue_t dq, dispatch_continuation_t dc); -void _dispatch_continuation_pop(dispatch_object_t dou, dispatch_queue_t dq, - dispatch_invoke_flags_t flags); +void _dispatch_continuation_pop(dispatch_object_t dou, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + dispatch_queue_t dq); void _dispatch_continuation_invoke(dispatch_object_t dou, voucher_t override_voucher, dispatch_invoke_flags_t flags); @@ -870,6 +880,7 @@ enum { DC_MACH_SEND_BARRRIER_DRAIN_TYPE, DC_MACH_SEND_BARRIER_TYPE, DC_MACH_RECV_BARRIER_TYPE, + DC_MACH_ASYNC_REPLY_TYPE, #if HAVE_PTHREAD_WORKQUEUE_QOS DC_OVERRIDE_STEALING_TYPE, DC_OVERRIDE_OWNING_TYPE, @@ -896,12 +907,12 @@ extern const struct dispatch_continuation_vtable_s void _dispatch_async_redirect_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags); + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); #if HAVE_PTHREAD_WORKQUEUE_QOS void _dispatch_queue_override_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags); + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); #endif #define DC_VTABLE(name) (&_dispatch_continuation_vtables[DC_##name##_TYPE]) @@ -919,8 +930,14 @@ _dispatch_queue_override_invoke(dispatch_continuation_t dc, void _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri, mach_voucher_t kv); voucher_t _dispatch_set_priority_and_voucher_slow(pthread_priority_t pri, - voucher_t voucher, _dispatch_thread_set_self_t flags); - + voucher_t voucher, dispatch_thread_set_self_t flags); +#else +static inline void +_dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri, + mach_voucher_t kv) +{ + (void)pri; (void)kv; +} #endif #pragma mark - #pragma mark dispatch_apply_t @@ -990,12 +1007,12 @@ void _dispatch_continuation_init_slow(dispatch_continuation_t dc, void _dispatch_continuation_update_bits(dispatch_continuation_t dc, uintptr_t dc_flags); -bool _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, +long _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func); /* exported for tests in dispatch_trysync.c */ DISPATCH_EXPORT DISPATCH_NOTHROW -bool _dispatch_trysync_f(dispatch_queue_t dq, void *ctxt, +long _dispatch_trysync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t f); #endif /* __BLOCKS__ */ diff --git a/src/semaphore.c b/src/semaphore.c index 4d232b7eb..fa6d21ace 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -20,53 +20,6 @@ #include "internal.h" -// semaphores are too fundamental to use the dispatch_assume*() macros -#if USE_WIN32_SEM -// rdar://problem/8428132 -static DWORD best_resolution = 1; // 1ms - -DWORD -_push_timer_resolution(DWORD ms) -{ - MMRESULT res; - static dispatch_once_t once; - - if (ms > 16) { - // only update timer resolution if smaller than default 15.6ms - // zero means not updated - return 0; - } - - // aim for the best resolution we can accomplish - dispatch_once(&once, ^{ - TIMECAPS tc; - MMRESULT res; - res = timeGetDevCaps(&tc, sizeof(tc)); - if (res == MMSYSERR_NOERROR) { - best_resolution = min(max(tc.wPeriodMin, best_resolution), - tc.wPeriodMax); - } - }); - - res = timeBeginPeriod(best_resolution); - if (res == TIMERR_NOERROR) { - return best_resolution; - } - // zero means not updated - return 0; -} - -// match ms parameter to result from _push_timer_resolution -void -_pop_timer_resolution(DWORD ms) -{ - if (ms) { - timeEndPeriod(ms); - } -} -#endif /* USE_WIN32_SEM */ - - DISPATCH_WEAK // rdar://problem/8503746 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); @@ -79,36 +32,9 @@ _dispatch_semaphore_class_init(long value, dispatch_semaphore_class_t dsemau) struct dispatch_semaphore_header_s *dsema = dsemau._dsema_hdr; dsema->do_next = DISPATCH_OBJECT_LISTLESS; - dsema->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, - false); + dsema->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); dsema->dsema_value = value; -#if USE_POSIX_SEM - int ret = sem_init(&dsema->dsema_sem, 0, 0); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#endif -} - -static void -_dispatch_semaphore_class_dispose(dispatch_semaphore_class_t dsemau) -{ - struct dispatch_semaphore_header_s *dsema = dsemau._dsema_hdr; - -#if USE_MACH_SEM - kern_return_t kr; - if (dsema->dsema_port) { - kr = semaphore_destroy(mach_task_self(), dsema->dsema_port); - DISPATCH_VERIFY_MIG(kr); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - } - dsema->dsema_port = MACH_PORT_DEAD; -#elif USE_POSIX_SEM - int ret = sem_destroy(&dsema->dsema_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - if (dsema->dsema_handle) { - CloseHandle(dsema->dsema_handle); - } -#endif + _dispatch_sema4_init(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO); } #pragma mark - @@ -133,59 +59,6 @@ dispatch_semaphore_create(long value) return dsema; } -#if USE_MACH_SEM -static void -_dispatch_semaphore_create_port(semaphore_t *s4) -{ - kern_return_t kr; - semaphore_t tmp; - - if (*s4) { - return; - } - _dispatch_fork_becomes_unsafe(); - - // lazily allocate the semaphore port - - // Someday: - // 1) Switch to a doubly-linked FIFO in user-space. - // 2) User-space timers for the timeout. - // 3) Use the per-thread semaphore port. - - while ((kr = semaphore_create(mach_task_self(), &tmp, - SYNC_POLICY_FIFO, 0))) { - DISPATCH_VERIFY_MIG(kr); - _dispatch_temporary_resource_shortage(); - } - - if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) { - kr = semaphore_destroy(mach_task_self(), tmp); - DISPATCH_VERIFY_MIG(kr); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - } -} -#elif USE_WIN32_SEM -static void -_dispatch_semaphore_create_handle(HANDLE *s4) -{ - HANDLE tmp; - - if (*s4) { - return; - } - - // lazily allocate the semaphore port - - while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) { - _dispatch_temporary_resource_shortage(); - } - - if (!os_atomic_cmpxchg(s4, 0, tmp)) { - CloseHandle(tmp); - } -} -#endif - void _dispatch_semaphore_dispose(dispatch_object_t dou) { @@ -196,7 +69,7 @@ _dispatch_semaphore_dispose(dispatch_object_t dou) "Semaphore object deallocated while in use"); } - _dispatch_semaphore_class_dispose(dsema); + _dispatch_sema4_dispose(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO); } size_t @@ -210,7 +83,7 @@ _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz) offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset); #if USE_MACH_SEM offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", - dsema->dsema_port); + dsema->dsema_sema); #endif offset += dsnprintf(&buf[offset], bufsiz - offset, "value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig); @@ -221,18 +94,8 @@ DISPATCH_NOINLINE long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) { -#if USE_MACH_SEM - _dispatch_semaphore_create_port(&dsema->dsema_port); - kern_return_t kr = semaphore_signal(dsema->dsema_port); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); -#elif USE_POSIX_SEM - int ret = sem_post(&dsema->dsema_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dsema->dsema_handle); - int ret = ReleaseSemaphore(dsema->dsema_handle, 1, NULL); - dispatch_assume(ret); -#endif + _dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO); + _dispatch_sema4_signal(&dsema->dsema_sema, 1); return 1; } @@ -257,61 +120,12 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, { long orig; -#if USE_MACH_SEM - mach_timespec_t _timeout; - kern_return_t kr; -#elif USE_POSIX_SEM - struct timespec _timeout; - int ret; -#elif USE_WIN32_SEM - uint64_t nsec; - DWORD msec; - DWORD resolution; - DWORD wait_result; -#endif - -#if USE_MACH_SEM - _dispatch_semaphore_create_port(&dsema->dsema_port); -#elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dsema->dsema_handle); -#endif - + _dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO); switch (timeout) { default: -#if USE_MACH_SEM - do { - uint64_t nsec = _dispatch_timeout(timeout); - _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); - _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout)); - } while (kr == KERN_ABORTED); - - if (kr != KERN_OPERATION_TIMED_OUT) { - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - break; - } -#elif USE_POSIX_SEM - do { - uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); - _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); - _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout)); - } while (ret == -1 && errno == EINTR); - - if (!(ret == -1 && errno == ETIMEDOUT)) { - DISPATCH_SEMAPHORE_VERIFY_RET(ret); - break; - } -#elif USE_WIN32_SEM - nsec = _dispatch_timeout(timeout); - msec = (DWORD)(nsec / (uint64_t)1000000); - resolution = _push_timer_resolution(msec); - wait_result = WaitForSingleObject(dsema->dsema_handle, msec); - _pop_timer_resolution(resolution); - if (wait_result != WAIT_TIMEOUT) { + if (!_dispatch_sema4_timedwait(&dsema->dsema_sema, timeout)) { break; } -#endif // Fall through and try to undo what the fast path did to // dsema->dsema_value case DISPATCH_TIME_NOW: @@ -319,30 +133,13 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, while (orig < 0) { if (os_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1, &orig, relaxed)) { -#if USE_MACH_SEM - return KERN_OPERATION_TIMED_OUT; -#elif USE_POSIX_SEM || USE_WIN32_SEM - errno = ETIMEDOUT; - return -1; -#endif + return _DSEMA4_TIMEOUT(); } } // Another thread called semaphore_signal(). // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: -#if USE_MACH_SEM - do { - kr = semaphore_wait(dsema->dsema_port); - } while (kr == KERN_ABORTED); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); -#elif USE_POSIX_SEM - do { - ret = sem_wait(&dsema->dsema_sem); - } while (ret != 0); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - WaitForSingleObject(dsema->dsema_handle, INFINITE); -#endif + _dispatch_sema4_wait(&dsema->dsema_sema); break; } return 0; @@ -416,25 +213,8 @@ _dispatch_group_wake(dispatch_group_t dg, bool needs_release) rval = (long)os_atomic_xchg2o(dg, dg_waiters, 0, relaxed); if (rval) { // wake group waiters -#if USE_MACH_SEM - _dispatch_semaphore_create_port(&dg->dg_port); - do { - kern_return_t kr = semaphore_signal(dg->dg_port); - DISPATCH_GROUP_VERIFY_KR(kr); - } while (--rval); -#elif USE_POSIX_SEM - do { - int ret = sem_post(&dg->dg_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); - } while (--rval); -#elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dg->dg_handle); - int ret; - ret = ReleaseSemaphore(dg->dg_handle, rval, NULL); - dispatch_assume(ret); -#else -#error "No supported semaphore type" -#endif + _dispatch_sema4_create(&dg->dg_sema, _DSEMA4_POLICY_FIFO); + _dispatch_sema4_signal(&dg->dg_sema, rval); } if (head) { // async group notify blocks @@ -475,7 +255,7 @@ _dispatch_group_dispose(dispatch_object_t dou) "Group object deallocated while in use"); } - _dispatch_semaphore_class_dispose(dg); + _dispatch_sema4_dispose(&dg->dg_sema, _DSEMA4_POLICY_FIFO); } size_t @@ -489,7 +269,7 @@ _dispatch_group_debug(dispatch_object_t dou, char *buf, size_t bufsiz) offset += _dispatch_object_debug_attr(dg, &buf[offset], bufsiz - offset); #if USE_MACH_SEM offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", - dg->dg_port); + dg->dg_sema); #endif offset += dsnprintf(&buf[offset], bufsiz - offset, "count = %ld, waiters = %d }", dg->dg_value, dg->dg_waiters); @@ -503,19 +283,6 @@ _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) long value; int orig_waiters; -#if USE_MACH_SEM - mach_timespec_t _timeout; - kern_return_t kr; -#elif USE_POSIX_SEM // KVV - struct timespec _timeout; - int ret; -#elif USE_WIN32_SEM // KVV - uint64_t nsec; - DWORD msec; - DWORD resolution; - DWORD wait_result; -#endif - // check before we cause another signal to be sent by incrementing // dg->dg_waiters value = os_atomic_load2o(dg, dg_value, ordered); // 19296565 @@ -533,48 +300,12 @@ _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) timeout = DISPATCH_TIME_FOREVER; } -#if USE_MACH_SEM - _dispatch_semaphore_create_port(&dg->dg_port); -#elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dg->dg_handle); -#endif - + _dispatch_sema4_create(&dg->dg_sema, _DSEMA4_POLICY_FIFO); switch (timeout) { default: -#if USE_MACH_SEM - do { - uint64_t nsec = _dispatch_timeout(timeout); - _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); - _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - kr = slowpath(semaphore_timedwait(dg->dg_port, _timeout)); - } while (kr == KERN_ABORTED); - - if (kr != KERN_OPERATION_TIMED_OUT) { - DISPATCH_GROUP_VERIFY_KR(kr); - break; - } -#elif USE_POSIX_SEM - do { - uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); - _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); - _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - ret = slowpath(sem_timedwait(&dg->dg_sem, &_timeout)); - } while (ret == -1 && errno == EINTR); - - if (!(ret == -1 && errno == ETIMEDOUT)) { - DISPATCH_SEMAPHORE_VERIFY_RET(ret); + if (!_dispatch_sema4_timedwait(&dg->dg_sema, timeout)) { break; } -#elif USE_WIN32_SEM - nsec = _dispatch_timeout(timeout); - msec = (DWORD)(nsec / (uint64_t)1000000); - resolution = _push_timer_resolution(msec); - wait_result = WaitForSingleObject(dg->dg_handle, msec); - _pop_timer_resolution(resolution); - if (wait_result != WAIT_TIMEOUT) { - break; - } -#endif // Fall through and try to undo the earlier change to // dg->dg_waiters case DISPATCH_TIME_NOW: @@ -582,30 +313,13 @@ _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) while (orig_waiters) { if (os_atomic_cmpxchgvw2o(dg, dg_waiters, orig_waiters, orig_waiters - 1, &orig_waiters, relaxed)) { -#if USE_MACH_SEM - return KERN_OPERATION_TIMED_OUT; -#elif USE_POSIX_SEM || USE_WIN32_SEM - errno = ETIMEDOUT; - return -1; -#endif + return _DSEMA4_TIMEOUT(); } } - // Another thread called semaphore_signal(). + // Another thread is running _dispatch_group_wake() // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: -#if USE_MACH_SEM - do { - kr = semaphore_wait(dg->dg_port); - } while (kr == KERN_ABORTED); - DISPATCH_GROUP_VERIFY_KR(kr); -#elif USE_POSIX_SEM - do { - ret = sem_wait(&dg->dg_sem); - } while (ret == -1 && errno == EINTR); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - WaitForSingleObject(dg->dg_handle, INFINITE); -#endif + _dispatch_sema4_wait(&dg->dg_sema); break; } return 0; @@ -618,12 +332,7 @@ dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) return 0; } if (timeout == 0) { -#if USE_MACH_SEM - return KERN_OPERATION_TIMED_OUT; -#elif USE_POSIX_SEM || USE_WIN32_SEM - errno = ETIMEDOUT; - return (-1); -#endif + return _DSEMA4_TIMEOUT(); } return _dispatch_group_wait_slow(dg, timeout); } diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h index dceda6d97..3a4ef6db2 100644 --- a/src/semaphore_internal.h +++ b/src/semaphore_internal.h @@ -29,20 +29,10 @@ struct dispatch_queue_s; -#if USE_MACH_SEM -#define DISPATCH_OS_SEMA_FIELD(base) semaphore_t base##_port -#elif USE_POSIX_SEM -#define DISPATCH_OS_SEMA_FIELD(base) sem_t base##_sem -#elif USE_WIN32_SEM -#define DISPATCH_OS_SEMA_FIELD(base) HANDLE base##_handle -#else -#error "No supported semaphore type" -#endif - #define DISPATCH_SEMAPHORE_HEADER(cls, ns) \ DISPATCH_OBJECT_HEADER(cls); \ long volatile ns##_value; \ - DISPATCH_OS_SEMA_FIELD(ns) + _dispatch_sema4_t ns##_sema struct dispatch_semaphore_header_s { DISPATCH_SEMAPHORE_HEADER(semaphore, dsema); @@ -70,7 +60,7 @@ typedef union { dispatch_semaphore_t _objc_dsema; dispatch_group_t _objc_dg; #endif -} dispatch_semaphore_class_t __attribute__((__transparent_union__)); +} dispatch_semaphore_class_t DISPATCH_TRANSPARENT_UNION; dispatch_group_t _dispatch_group_create_and_enter(void); void _dispatch_group_dispose(dispatch_object_t dou); diff --git a/src/shims.h b/src/shims.h index 30d8929d4..8434341ec 100644 --- a/src/shims.h +++ b/src/shims.h @@ -28,71 +28,6 @@ #define __DISPATCH_OS_SHIMS__ #include -#if HAVE_PTHREAD_QOS_H && __has_include() -#include -#if __has_include() -#include -#define _DISPATCH_QOS_CLASS_USER_INTERACTIVE QOS_CLASS_USER_INTERACTIVE -#define _DISPATCH_QOS_CLASS_USER_INITIATED QOS_CLASS_USER_INITIATED -#define _DISPATCH_QOS_CLASS_DEFAULT QOS_CLASS_DEFAULT -#define _DISPATCH_QOS_CLASS_UTILITY QOS_CLASS_UTILITY -#define _DISPATCH_QOS_CLASS_BACKGROUND QOS_CLASS_BACKGROUND -#define _DISPATCH_QOS_CLASS_UNSPECIFIED QOS_CLASS_UNSPECIFIED -#else // pthread/qos_private.h -typedef unsigned long pthread_priority_t; -#endif // pthread/qos_private.h -#if __has_include() -#include -#define _DISPATCH_QOS_CLASS_MAINTENANCE QOS_CLASS_MAINTENANCE -#else // sys/qos_private.h -#define _DISPATCH_QOS_CLASS_MAINTENANCE 0x05 -#endif // sys/qos_private.h -#ifndef _PTHREAD_PRIORITY_OVERCOMMIT_FLAG -#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 -#endif -#ifndef _PTHREAD_PRIORITY_INHERIT_FLAG -#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000 -#endif -#ifndef _PTHREAD_PRIORITY_ROOTQUEUE_FLAG -#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000 -#endif -#ifndef _PTHREAD_PRIORITY_SCHED_PRI_FLAG -#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 -#endif -#ifndef _PTHREAD_PRIORITY_ENFORCE_FLAG -#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 -#endif -#ifndef _PTHREAD_PRIORITY_OVERRIDE_FLAG -#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000 -#endif -#ifndef _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG -#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 -#endif -#ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG -#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 -#endif -#ifndef _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG -#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 -#endif - -#else // HAVE_PTHREAD_QOS_H -typedef unsigned int qos_class_t; -typedef unsigned long pthread_priority_t; -#define QOS_MIN_RELATIVE_PRIORITY (-15) -#define _PTHREAD_PRIORITY_FLAGS_MASK (~0xffffff) -#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x00ffff00 -#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull) -#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff -#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 -#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000 -#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000 -#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 -#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000 -#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 -#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 -#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 -#endif // HAVE_PTHREAD_QOS_H - #ifdef __linux__ #include "shims/linux_stubs.h" #endif @@ -101,20 +36,8 @@ typedef unsigned long pthread_priority_t; #include "shims/android_stubs.h" #endif -typedef uint32_t dispatch_priority_t; -#define DISPATCH_SATURATED_OVERRIDE ((dispatch_priority_t)UINT32_MAX) - -#ifndef _DISPATCH_QOS_CLASS_USER_INTERACTIVE -enum { - _DISPATCH_QOS_CLASS_USER_INTERACTIVE = 0x21, - _DISPATCH_QOS_CLASS_USER_INITIATED = 0x19, - _DISPATCH_QOS_CLASS_DEFAULT = 0x15, - _DISPATCH_QOS_CLASS_UTILITY = 0x11, - _DISPATCH_QOS_CLASS_BACKGROUND = 0x09, - _DISPATCH_QOS_CLASS_MAINTENANCE = 0x05, - _DISPATCH_QOS_CLASS_UNSPECIFIED = 0x00, -}; -#endif // _DISPATCH_QOS_CLASS_USER_INTERACTIVE +#include "shims/priority.h" + #if HAVE_PTHREAD_WORKQUEUES #if __has_include() #include @@ -211,6 +134,15 @@ _pthread_qos_override_end_direct(mach_port_t thread, void *resource) #define _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND 0 #endif +#if PTHREAD_WORKQUEUE_SPI_VERSION < 20160427 +static inline bool +_pthread_workqueue_should_narrow(pthread_priority_t priority) +{ + (void)priority; + return false; +} +#endif + #if !HAVE_NORETURN_BUILTIN_TRAP /* * XXXRW: Work-around for possible clang bug in which __builtin_trap() is not @@ -231,6 +163,8 @@ void __builtin_trap(void); #ifndef __OS_INTERNAL_ATOMIC__ #include "shims/atomic.h" #endif +#define DISPATCH_ATOMIC64_ALIGN __attribute__((aligned(8))) + #include "shims/atomic_sfb.h" #include "shims/tsd.h" #include "shims/yield.h" diff --git a/src/shims/android_stubs.h b/src/shims/android_stubs.h index 576f89065..c8032a390 100644 --- a/src/shims/android_stubs.h +++ b/src/shims/android_stubs.h @@ -16,21 +16,8 @@ #ifndef __DISPATCH__ANDROID__STUBS__INTERNAL #define __DISPATCH__ANDROID__STUBS__INTERNAL -/* - * Missing sys/queue.h macro stubs - */ - -#ifndef TAILQ_FOREACH_SAFE -# define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ - for ((var) = TAILQ_FIRST((head)); \ - (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ - (var) = (tvar)) -#endif /* TAILQ_FOREACH_SAFE */ - -#if DISPATCH_DEBUG -#ifndef TRASHIT -# define TRASHIT(x) do {(x) = (void *)-1;} while (0) -#endif /* TRASHIT */ +#if !__has_feature(c_static_assert) +#define _Static_assert(...) #endif #endif /* __DISPATCH__ANDROID__STUBS__INTERNAL */ diff --git a/src/shims/atomic.h b/src/shims/atomic.h index 519947790..64af8b272 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -27,114 +27,50 @@ #ifndef __DISPATCH_SHIMS_ATOMIC__ #define __DISPATCH_SHIMS_ATOMIC__ -#if !__has_extension(c_atomic) || \ - !__has_extension(c_generic_selections) || \ - !__has_include() -#error libdispatch requires C11 with and generic selections +#if !__has_extension(c_atomic) || !__has_include() +#error libdispatch requires C11 with #endif #include -#define memory_order_ordered memory_order_seq_cst +#define memory_order_ordered memory_order_seq_cst +#define memory_order_dependency memory_order_acquire -#define _os_atomic_basetypeof(p) \ - typeof(*_Generic((p), \ - char*: (char*)(p), \ - volatile char*: (char*)(p), \ - signed char*: (signed char*)(p), \ - volatile signed char*: (signed char*)(p), \ - unsigned char*: (unsigned char*)(p), \ - volatile unsigned char*: (unsigned char*)(p), \ - short*: (short*)(p), \ - volatile short*: (short*)(p), \ - unsigned short*: (unsigned short*)(p), \ - volatile unsigned short*: (unsigned short*)(p), \ - int*: (int*)(p), \ - volatile int*: (int*)(p), \ - unsigned int*: (unsigned int*)(p), \ - volatile unsigned int*: (unsigned int*)(p), \ - long*: (long*)(p), \ - volatile long*: (long*)(p), \ - unsigned long*: (unsigned long*)(p), \ - volatile unsigned long*: (unsigned long*)(p), \ - long long*: (long long*)(p), \ - volatile long long*: (long long*)(p), \ - unsigned long long*: (unsigned long long*)(p), \ - volatile unsigned long long*: (unsigned long long*)(p), \ - const void**: (const void**)(p), \ - const void*volatile*: (const void**)(p), \ - default: (void**)(p))) +#define os_atomic(type) type _Atomic #define _os_atomic_c11_atomic(p) \ - _Generic((p), \ - char*: (_Atomic(char)*)(p), \ - volatile char*: (volatile _Atomic(char)*)(p), \ - signed char*: (_Atomic(signed char)*)(p), \ - volatile signed char*: (volatile _Atomic(signed char)*)(p), \ - unsigned char*: (_Atomic(unsigned char)*)(p), \ - volatile unsigned char*: (volatile _Atomic(unsigned char)*)(p), \ - short*: (_Atomic(short)*)(p), \ - volatile short*: (volatile _Atomic(short)*)(p), \ - unsigned short*: (_Atomic(unsigned short)*)(p), \ - volatile unsigned short*: (volatile _Atomic(unsigned short)*)(p), \ - int*: (_Atomic(int)*)(p), \ - volatile int*: (volatile _Atomic(int)*)(p), \ - unsigned int*: (_Atomic(unsigned int)*)(p), \ - volatile unsigned int*: (volatile _Atomic(unsigned int)*)(p), \ - long*: (_Atomic(long)*)(p), \ - volatile long*: (volatile _Atomic(long)*)(p), \ - unsigned long*: (_Atomic(unsigned long)*)(p), \ - volatile unsigned long*: (volatile _Atomic(unsigned long)*)(p), \ - long long*: (_Atomic(long long)*)(p), \ - volatile long long*: (volatile _Atomic(long long)*)(p), \ - unsigned long long*: (_Atomic(unsigned long long)*)(p), \ - volatile unsigned long long*: \ - (volatile _Atomic(unsigned long long)*)(p), \ - const void**: (_Atomic(const void*)*)(p), \ - const void*volatile*: (volatile _Atomic(const void*)*)(p), \ - default: (volatile _Atomic(void*)*)(p)) + ((typeof(*(p)) _Atomic *)(p)) -#define os_atomic_thread_fence(m) atomic_thread_fence(memory_order_##m) -// see comment in dispatch_once.c -#define os_atomic_maximally_synchronizing_barrier() \ - atomic_thread_fence(memory_order_seq_cst) +// This removes the _Atomic and volatile qualifiers on the type of *p +#define _os_atomic_basetypeof(p) \ + typeof(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed)) #define os_atomic_load(p, m) \ - ({ _os_atomic_basetypeof(p) _r = \ - atomic_load_explicit(_os_atomic_c11_atomic(p), \ - memory_order_##m); (typeof(*(p)))_r; }) + atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_##m) #define os_atomic_store(p, v, m) \ - ({ _os_atomic_basetypeof(p) _v = (v); \ - atomic_store_explicit(_os_atomic_c11_atomic(p), _v, \ - memory_order_##m); }) + atomic_store_explicit(_os_atomic_c11_atomic(p), v, memory_order_##m) #define os_atomic_xchg(p, v, m) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = \ - atomic_exchange_explicit(_os_atomic_c11_atomic(p), _v, \ - memory_order_##m); (typeof(*(p)))_r; }) + atomic_exchange_explicit(_os_atomic_c11_atomic(p), v, memory_order_##m) #define os_atomic_cmpxchg(p, e, v, m) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); \ + ({ _os_atomic_basetypeof(p) _r = (e); \ atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ - &_r, _v, memory_order_##m, \ - memory_order_relaxed); }) + &_r, v, memory_order_##m, memory_order_relaxed); }) #define os_atomic_cmpxchgv(p, e, v, g, m) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ + ({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \ atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ - &_r, _v, memory_order_##m, \ - memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) + &_r, v, memory_order_##m, memory_order_relaxed); *(g) = _r; _b; }) #define os_atomic_cmpxchgvw(p, e, v, g, m) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ + ({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \ atomic_compare_exchange_weak_explicit(_os_atomic_c11_atomic(p), \ - &_r, _v, memory_order_##m, \ - memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) + &_r, v, memory_order_##m, memory_order_relaxed); *(g) = _r; _b; }) #define _os_atomic_c11_op(p, v, m, o, op) \ ({ _os_atomic_basetypeof(p) _v = (v), _r = \ atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \ memory_order_##m); (typeof(*(p)))(_r op _v); }) #define _os_atomic_c11_op_orig(p, v, m, o, op) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = \ - atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \ - memory_order_##m); (typeof(*(p)))_r; }) + atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), v, \ + memory_order_##m) #define os_atomic_add(p, v, m) \ _os_atomic_c11_op((p), (v), m, add, +) #define os_atomic_add_orig(p, v, m) \ @@ -156,22 +92,13 @@ #define os_atomic_xor_orig(p, v, m) \ _os_atomic_c11_op_orig((p), (v), m, xor, ^) -#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ - bool _result = false; \ - typeof(p) _p = (p); \ - ov = os_atomic_load(_p, relaxed); \ - do { \ - __VA_ARGS__; \ - _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ - } while (os_unlikely(!_result)); \ - _result; \ - }) -#define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \ - os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__) -#define os_atomic_rmw_loop_give_up_with_fence(m, expr) \ - ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); }) -#define os_atomic_rmw_loop_give_up(expr) \ - os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) +#define os_atomic_force_dependency_on(p, e) (p) +#define os_atomic_load_with_dependency_on(p, e) \ + os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed) +#define os_atomic_load_with_dependency_on2o(p, f, e) \ + os_atomic_load_with_dependency_on(&(p)->f, e) + +#define os_atomic_thread_fence(m) atomic_thread_fence(memory_order_##m) #define os_atomic_load2o(p, f, m) \ os_atomic_load(&(p)->f, m) @@ -223,28 +150,21 @@ #define os_atomic_dec_orig2o(p, f, m) \ os_atomic_sub_orig2o(p, f, 1, m) -#if defined(__x86_64__) || defined(__i386__) -#undef os_atomic_maximally_synchronizing_barrier -#ifdef __LP64__ -#define os_atomic_maximally_synchronizing_barrier() \ - ({ unsigned long _clbr; __asm__ __volatile__( \ - "cpuid" \ - : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory"); }) -#else -#ifdef __llvm__ -#define os_atomic_maximally_synchronizing_barrier() \ - ({ unsigned long _clbr; __asm__ __volatile__( \ - "cpuid" \ - : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory"); }) -#else // gcc does not allow inline i386 asm to clobber ebx -#define os_atomic_maximally_synchronizing_barrier() \ - ({ unsigned long _clbr; __asm__ __volatile__( \ - "pushl %%ebx\n\t" \ - "cpuid\n\t" \ - "popl %%ebx" \ - : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory"); }) -#endif -#endif -#endif // defined(__x86_64__) || defined(__i386__) +#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ + bool _result = false; \ + typeof(p) _p = (p); \ + ov = os_atomic_load(_p, relaxed); \ + do { \ + __VA_ARGS__; \ + _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ + } while (os_unlikely(!_result)); \ + _result; \ + }) +#define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \ + os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__) +#define os_atomic_rmw_loop_give_up_with_fence(m, expr) \ + ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); }) +#define os_atomic_rmw_loop_give_up(expr) \ + os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) #endif // __DISPATCH_SHIMS_ATOMIC__ diff --git a/src/shims/atomic_sfb.h b/src/shims/atomic_sfb.h index 5f972b4fe..de074a444 100644 --- a/src/shims/atomic_sfb.h +++ b/src/shims/atomic_sfb.h @@ -27,43 +27,9 @@ #ifndef __DISPATCH_SHIMS_ATOMIC_SFB__ #define __DISPATCH_SHIMS_ATOMIC_SFB__ -#if __clang__ && __clang_major__ < 5 // -#define __builtin_ffs(x) __builtin_ffs((unsigned int)(x)) -#endif - -// Returns UINT_MAX if all the bits in p were already set. -#define os_atomic_set_first_bit(p,m) _os_atomic_set_first_bit(p,m) - -DISPATCH_ALWAYS_INLINE -static inline unsigned int -_os_atomic_set_first_bit(volatile unsigned long *p, - unsigned int max_index) -{ - unsigned int index; - unsigned long b, mask, b_masked; - - for (;;) { - b = *p; - // ffs returns 1 + index, or 0 if none set. - index = (unsigned int)__builtin_ffsl((long)~b); - if (slowpath(index == 0)) { - return UINT_MAX; - } - index--; - if (slowpath(index > max_index)) { - return UINT_MAX; - } - mask = ((typeof(b))1) << index; - b_masked = b | mask; - if (__sync_bool_compare_and_swap(p, b, b_masked)) { - return index; - } - } -} - #if defined(__x86_64__) || defined(__i386__) -#undef os_atomic_set_first_bit +// Returns UINT_MAX if all the bits in p were already set. DISPATCH_ALWAYS_INLINE static inline unsigned int os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max) @@ -108,7 +74,35 @@ os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max) return (unsigned int)bit; } +#else + +#if __clang__ && __clang_major__ < 5 // +#define __builtin_ffs(x) __builtin_ffs((unsigned int)(x)) #endif +DISPATCH_ALWAYS_INLINE +static inline unsigned int +os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max_index) +{ + unsigned int index; + unsigned long b, b_masked; + + os_atomic_rmw_loop(p, b, b_masked, relaxed, { + // ffs returns 1 + index, or 0 if none set + index = (unsigned int)__builtin_ffsl((long)~b); + if (slowpath(index == 0)) { + os_atomic_rmw_loop_give_up(return UINT_MAX); + } + index--; + if (slowpath(index > max_index)) { + os_atomic_rmw_loop_give_up(return UINT_MAX); + } + b_masked = b | (1UL << index); + }); + + return index; +} + +#endif #endif // __DISPATCH_SHIMS_ATOMIC_SFB__ diff --git a/src/shims/linux_stubs.h b/src/shims/linux_stubs.h index bafe40921..ec684170d 100644 --- a/src/shims/linux_stubs.h +++ b/src/shims/linux_stubs.h @@ -17,7 +17,7 @@ #define __DISPATCH__STUBS__INTERNAL #ifndef TAILQ_FOREACH_SAFE -#define TAILQ_FOREACH_SAFE(var, head, field, temp) \ +#define TAILQ_FOREACH_SAFE(var, head, field, temp) \ for ((var) = TAILQ_FIRST((head)); \ (var) && ((temp) = TAILQ_NEXT((var), field), 1); (var) = (temp)) #endif @@ -39,75 +39,28 @@ typedef uint32_t mach_port_t; typedef uint32_t mach_error_t; -typedef uint32_t mach_vm_size_t; - typedef uint32_t mach_msg_return_t; typedef uint32_t mach_msg_bits_t; -typedef uintptr_t mach_vm_address_t; - -typedef uint32_t dispatch_mach_msg_t; - -typedef uint32_t dispatch_mach_t; - -typedef uint32_t dispatch_mach_reason_t; - -typedef uint32_t voucher_activity_mode_t; - -typedef uint32_t voucher_activity_trace_id_t; - -typedef uint32_t voucher_activity_id_t; - -typedef uint32_t voucher_activity_flag_t; - -typedef struct { } mach_msg_header_t; - +typedef void *dispatch_mach_msg_t; -typedef void (*dispatch_mach_handler_function_t)(void*, dispatch_mach_reason_t, - dispatch_mach_msg_t, mach_error_t); +typedef uint64_t firehose_activity_id_t; -typedef void (*dispatch_mach_msg_destructor_t)(void*); +typedef void *mach_msg_header_t; // Print a warning when an unported code path executes. -#define LINUX_PORT_ERROR() do { printf("LINUX_PORT_ERROR_CALLED %s:%d: %s\n",__FILE__,__LINE__,__FUNCTION__); } while (0) +#define LINUX_PORT_ERROR() do { \ + printf("LINUX_PORT_ERROR_CALLED %s:%d: %s\n",\ + __FILE__,__LINE__,__FUNCTION__); } while (0) /* * Stub out defines for other missing types */ -#if __linux__ -// we fall back to use kevent -#define kevent64_s kevent -#define kevent64(kq,cl,nc,el,ne,f,to) kevent(kq,cl,nc,el,ne,to) -#endif - // SIZE_T_MAX should not be hardcoded like this here. #ifndef SIZE_T_MAX #define SIZE_T_MAX (~(size_t)0) #endif -// Define to 0 the NOTE_ values that are not present on Linux. -// Revisit this...would it be better to ifdef out the uses instead?? - -// The following values are passed as part of the EVFILT_TIMER requests - -#define IGNORE_KEVENT64_EXT /* will force the kevent64_s.ext[] to not be used -> leeway ignored */ - -#ifndef NOTE_SECONDS -#define NOTE_SECONDS 0x01 -#define NOTE_USECONDS 0x02 -#define NOTE_NSECONDS 0x04 -#define NOTE_ABSOLUTE 0x08 -#define KEVENT_NSEC_NOT_SUPPORTED -#endif -#define NOTE_CRITICAL 0x10 -#define NOTE_BACKGROUND 0x20 -#define NOTE_LEEWAY 0x40 - -// need to catch the following usage if it happens .. -// we simply return '0' as a value probably not correct - -#define NOTE_VM_PRESSURE ({LINUX_PORT_ERROR(); 0;}) - #endif diff --git a/src/shims/lock.c b/src/shims/lock.c index 983fe47b3..de90d60b0 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -49,6 +49,265 @@ _dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags, } #endif +#pragma mark - semaphores + +#if USE_MACH_SEM +#if __has_include() +#include +#define DISPATCH_USE_OS_SEMAPHORE_CACHE 1 +#else +#define DISPATCH_USE_OS_SEMAPHORE_CACHE 0 +#endif + +#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \ + DISPATCH_VERIFY_MIG(x); \ + if (unlikely((x) == KERN_INVALID_NAME)) { \ + DISPATCH_CLIENT_CRASH((x), \ + "Use-after-free of dispatch_semaphore_t or dispatch_group_t"); \ + } else if (unlikely(x)) { \ + DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \ + } \ + } while (0) + +void +_dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy) +{ + semaphore_t tmp = MACH_PORT_NULL; + + _dispatch_fork_becomes_unsafe(); + + // lazily allocate the semaphore port + + // Someday: + // 1) Switch to a doubly-linked FIFO in user-space. + // 2) User-space timers for the timeout. + +#if DISPATCH_USE_OS_SEMAPHORE_CACHE + if (policy == _DSEMA4_POLICY_FIFO) { + tmp = (_dispatch_sema4_t)os_get_cached_semaphore(); + if (!os_atomic_cmpxchg(s4, MACH_PORT_NULL, tmp, relaxed)) { + os_put_cached_semaphore((os_semaphore_t)tmp); + } + return; + } +#endif + + kern_return_t kr = semaphore_create(mach_task_self(), &tmp, policy, 0); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + + if (!os_atomic_cmpxchg(s4, MACH_PORT_NULL, tmp, relaxed)) { + kr = semaphore_destroy(mach_task_self(), tmp); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + } +} + +void +_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy) +{ + semaphore_t sema_port = *sema; + *sema = MACH_PORT_DEAD; +#if DISPATCH_USE_OS_SEMAPHORE_CACHE + if (policy == _DSEMA4_POLICY_FIFO) { + return os_put_cached_semaphore((os_semaphore_t)sema_port); + } +#endif + kern_return_t kr = semaphore_destroy(mach_task_self(), sema_port); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +} + +void +_dispatch_sema4_signal(_dispatch_sema4_t *sema, long count) +{ + do { + kern_return_t kr = semaphore_signal(*sema); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + } while (--count); +} + +void +_dispatch_sema4_wait(_dispatch_sema4_t *sema) +{ + kern_return_t kr; + do { + kr = semaphore_wait(*sema); + } while (kr == KERN_ABORTED); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +} + +bool +_dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) +{ + mach_timespec_t _timeout; + kern_return_t kr; + + do { + uint64_t nsec = _dispatch_timeout(timeout); + _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); + _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); + kr = slowpath(semaphore_timedwait(*sema, _timeout)); + } while (kr == KERN_ABORTED); + + if (kr == KERN_OPERATION_TIMED_OUT) { + return true; + } + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + return false; +} +#elif USE_POSIX_SEM +#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \ + if (unlikely((x) == -1)) { \ + DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \ + } \ + } while (0) + +void +_dispatch_sema4_init(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED) +{ + int rc = sem_init(sema, 0, 0); + DISPATCH_SEMAPHORE_VERIFY_RET(rc); +} + +void +_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED) +{ + int rc = sem_destroy(sema); + DISPATCH_SEMAPHORE_VERIFY_RET(rc); +} + +void +_dispatch_sema4_signal(_dispatch_sema4_t *sema, long count) +{ + do { + int ret = sem_post(sema); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); + } while (--count); +} + +void +_dispatch_sema4_wait(_dispatch_sema4_t *sema) +{ + int ret = sem_wait(sema); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); +} + +bool +_dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) +{ + struct timespec _timeout; + int ret; + + do { + uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); + _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); + _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); + ret = slowpath(sem_timedwait(sema, &_timeout)); + } while (ret == -1 && errno == EINTR); + + if (ret == -1 && errno == ETIMEDOUT) { + return true; + } + DISPATCH_SEMAPHORE_VERIFY_RET(ret); + return false; +} +#elif USE_WIN32_SEM +// rdar://problem/8428132 +static DWORD best_resolution = 1; // 1ms + +static DWORD +_push_timer_resolution(DWORD ms) +{ + MMRESULT res; + static dispatch_once_t once; + + if (ms > 16) { + // only update timer resolution if smaller than default 15.6ms + // zero means not updated + return 0; + } + + // aim for the best resolution we can accomplish + dispatch_once(&once, ^{ + TIMECAPS tc; + MMRESULT res; + res = timeGetDevCaps(&tc, sizeof(tc)); + if (res == MMSYSERR_NOERROR) { + best_resolution = min(max(tc.wPeriodMin, best_resolution), + tc.wPeriodMax); + } + }); + + res = timeBeginPeriod(best_resolution); + if (res == TIMERR_NOERROR) { + return best_resolution; + } + // zero means not updated + return 0; +} + +// match ms parameter to result from _push_timer_resolution +DISPATCH_ALWAYS_INLINE +static inline void +_pop_timer_resolution(DWORD ms) +{ + if (ms) timeEndPeriod(ms); +} + +void +_dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy DISPATCH_UNUSED) +{ + HANDLE tmp; + + // lazily allocate the semaphore port + + while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) { + _dispatch_temporary_resource_shortage(); + } + + if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) { + CloseHandle(tmp); + } +} + +void +_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED) +{ + HANDLE sema_handle = *sema; + CloseHandle(sema_handle); + *sema = 0; +} + +void +_dispatch_sema4_signal(_dispatch_sema4_t *sema, long count) +{ + int ret = ReleaseSemaphore(*sema, count, NULL); + dispatch_assume(ret); +} + +void +_dispatch_sema4_wait(_dispatch_sema4_t *sema) +{ + WaitForSingleObject(*sema, INFINITE); +} + +bool +_dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) +{ + uint64_t nsec; + DWORD msec; + DWORD resolution; + DWORD wait_result; + + nsec = _dispatch_timeout(timeout); + msec = (DWORD)(nsec / (uint64_t)1000000); + resolution = _push_timer_resolution(msec); + wait_result = WaitForSingleObject(dsema->dsema_handle, msec); + _pop_timer_resolution(resolution); + return wait_result == WAIT_TIMEOUT; +} +#else +#error "port has to implement _dispatch_sema4_t" +#endif + #pragma mark - ulock wrappers #if HAVE_UL_COMPARE_AND_WAIT @@ -210,36 +469,12 @@ _dispatch_wake_by_address(uint32_t volatile *address) #pragma mark - thread event -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK -semaphore_t -_dispatch_thread_semaphore_create(void) -{ - semaphore_t s4; - kern_return_t kr; - while (unlikely(kr = semaphore_create(mach_task_self(), &s4, - SYNC_POLICY_FIFO, 0))) { - DISPATCH_VERIFY_MIG(kr); - _dispatch_temporary_resource_shortage(); - } - return s4; -} - -void -_dispatch_thread_semaphore_dispose(void *ctxt) -{ - semaphore_t s4 = (semaphore_t)(uintptr_t)ctxt; - kern_return_t kr = semaphore_destroy(mach_task_self(), s4); - DISPATCH_VERIFY_MIG(kr); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); -} -#endif - void _dispatch_thread_event_signal_slow(dispatch_thread_event_t dte) { #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - kern_return_t kr = semaphore_signal(dte->dte_semaphore); + kern_return_t kr = semaphore_signal(dte->dte_sema); DISPATCH_SEMAPHORE_VERIFY_KR(kr); return; } @@ -248,9 +483,8 @@ _dispatch_thread_event_signal_slow(dispatch_thread_event_t dte) _dispatch_ulock_wake(&dte->dte_value, 0); #elif HAVE_FUTEX _dispatch_futex_wake(&dte->dte_value, 1, FUTEX_PRIVATE_FLAG); -#elif USE_POSIX_SEM - int rc = sem_post(&dte->dte_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#else + _dispatch_sema4_signal(&dte->dte_sema, 1); #endif } @@ -261,7 +495,7 @@ _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte) if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { kern_return_t kr; do { - kr = semaphore_wait(dte->dte_semaphore); + kr = semaphore_wait(dte->dte_sema); } while (unlikely(kr == KERN_ABORTED)); DISPATCH_SEMAPHORE_VERIFY_KR(kr); return; @@ -282,12 +516,8 @@ _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte) NULL, FUTEX_PRIVATE_FLAG); #endif } -#elif USE_POSIX_SEM - int rc; - do { - rc = sem_wait(&dte->dte_sem); - } while (unlikely(rc != 0)); - DISPATCH_SEMAPHORE_VERIFY_RET(rc); +#else + _dispatch_sema4_wait(&dte->dte_sema); #endif } diff --git a/src/shims/lock.h b/src/shims/lock.h index 246c80738..22382cff6 100644 --- a/src/shims/lock.h +++ b/src/shims/lock.h @@ -30,7 +30,7 @@ #pragma mark - platform macros DISPATCH_ENUM(dispatch_lock_options, uint32_t, - DLOCK_LOCK_NONE = 0x00000000, + DLOCK_LOCK_NONE = 0x00000000, DLOCK_LOCK_DATA_CONTENTION = 0x00010000, ); @@ -41,6 +41,7 @@ typedef uint32_t dispatch_lock; #define DLOCK_OWNER_NULL ((dispatch_lock_owner)MACH_PORT_NULL) #define DLOCK_OWNER_MASK ((dispatch_lock)0xfffffffc) +#define DLOCK_OWNER_INVALID ((dispatch_lock)0xffffffff) #define DLOCK_NOWAITERS_BIT ((dispatch_lock)0x00000001) #define DLOCK_NOFAILED_TRYLOCK_BIT ((dispatch_lock)0x00000002) #define _dispatch_tid_self() ((dispatch_lock_owner)_dispatch_thread_port()) @@ -88,6 +89,7 @@ _dispatch_lock_has_failed_trylock(dispatch_lock lock_value) #elif defined(__linux__) #include +#include #include #include /* For SYS_xxx definitions */ @@ -96,6 +98,7 @@ typedef pid_t dispatch_lock_owner; #define DLOCK_OWNER_NULL ((dispatch_lock_owner)0) #define DLOCK_OWNER_MASK ((dispatch_lock)FUTEX_TID_MASK) +#define DLOCK_OWNER_INVALID ((dispatch_lock)DLOCK_OWNER_MASK) #define DLOCK_WAITERS_BIT ((dispatch_lock)FUTEX_WAITERS) #define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)FUTEX_OWNER_DIED) #define _dispatch_tid_self() \ @@ -145,7 +148,7 @@ _dispatch_lock_has_failed_trylock(dispatch_lock lock_value) #endif #ifndef HAVE_UL_COMPARE_AND_WAIT -#if defined(UL_COMPARE_AND_WAIT) && DISPATCH_HOST_SUPPORTS_OSX(101200) +#if defined(UL_COMPARE_AND_WAIT) && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) # define HAVE_UL_COMPARE_AND_WAIT 1 #else # define HAVE_UL_COMPARE_AND_WAIT 0 @@ -153,17 +156,13 @@ _dispatch_lock_has_failed_trylock(dispatch_lock lock_value) #endif // HAVE_UL_COMPARE_AND_WAIT #ifndef HAVE_UL_UNFAIR_LOCK -#if defined(UL_UNFAIR_LOCK) && DISPATCH_HOST_SUPPORTS_OSX(101200) +#if defined(UL_UNFAIR_LOCK) && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) # define HAVE_UL_UNFAIR_LOCK 1 #else # define HAVE_UL_UNFAIR_LOCK 0 #endif #endif // HAVE_UL_UNFAIR_LOCK -#ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK -#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT && !HAVE_FUTEX) -#endif - #ifndef HAVE_FUTEX #ifdef __linux__ #define HAVE_FUTEX 1 @@ -172,29 +171,76 @@ _dispatch_lock_has_failed_trylock(dispatch_lock lock_value) #endif #endif // HAVE_FUTEX +#pragma mark - semaphores + +#ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK +#if TARGET_OS_MAC +#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT) +#else +#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK 0 +#endif +#endif + #if USE_MACH_SEM -#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \ - if (unlikely((x) == KERN_INVALID_NAME)) { \ - DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_semaphore_t"); \ - } else if (unlikely(x)) { \ - DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \ - } \ - } while (0) -#define DISPATCH_GROUP_VERIFY_KR(x) do { \ - if (unlikely((x) == KERN_INVALID_NAME)) { \ - DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_group_t"); \ - } else if (unlikely(x)) { \ - DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \ - } \ - } while (0) + +typedef semaphore_t _dispatch_sema4_t; +#define _DSEMA4_POLICY_FIFO SYNC_POLICY_FIFO +#define _DSEMA4_POLICY_LIFO SYNC_POLICY_LIFO +#define _DSEMA4_TIMEOUT() KERN_OPERATION_TIMED_OUT + +#define _dispatch_sema4_init(sema, policy) (void)(*(sema) = MACH_PORT_NULL) +#define _dispatch_sema4_is_created(sema) (*(sema) != MACH_PORT_NULL) +void _dispatch_sema4_create_slow(_dispatch_sema4_t *sema, int policy); + #elif USE_POSIX_SEM -#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \ - if (unlikely((x) == -1)) { \ - DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \ - } \ - } while (0) + +typedef sem_t _dispatch_sema4_t; +#define _DSEMA4_POLICY_FIFO 0 +#define _DSEMA4_POLICY_LIFO 0 +#define _DSEMA4_TIMEOUT() ((errno) = ETIMEDOUT, -1) + +void _dispatch_sema4_init(_dispatch_sema4_t *sema, int policy); +#define _dispatch_sema4_is_created(sema) 1 +#define _dispatch_sema4_create_slow(sema, policy) ((void)0) + +#elif USE_WIN32_SEM + +typedef HANDLE _dispatch_sema4_t; +#define _DSEMA4_POLICY_FIFO 0 +#define _DSEMA4_POLICY_LIFO 0 +#define _DSEMA4_TIMEOUT() ((errno) = ETIMEDOUT, -1) + +#define _dispatch_sema4_init(sema, policy) (void)(*(sema) = 0) +#define _dispatch_sema4_is_created(sema) (*(sema) != 0) +void _dispatch_sema4_create_slow(_dispatch_sema4_t *sema, int policy); + +#else +#error "port has to implement _dispatch_sema4_t" #endif +void _dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy); +void _dispatch_sema4_signal(_dispatch_sema4_t *sema, long count); +void _dispatch_sema4_wait(_dispatch_sema4_t *sema); +bool _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout); + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_sema4_create(_dispatch_sema4_t *sema, int policy) +{ + if (!_dispatch_sema4_is_created(sema)) { + _dispatch_sema4_create_slow(sema, policy); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_sema4_dispose(_dispatch_sema4_t *sema, int policy) +{ + if (_dispatch_sema4_is_created(sema)) { + _dispatch_sema4_dispose_slow(sema, policy); + } +} + #pragma mark - compare and wait DISPATCH_NOT_TAIL_CALLED @@ -224,7 +270,7 @@ void _dispatch_wake_by_address(uint32_t volatile *address); typedef struct dispatch_thread_event_s { #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK union { - semaphore_t dte_semaphore; + _dispatch_sema4_t dte_sema; uint32_t dte_value; }; #elif HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX @@ -232,43 +278,11 @@ typedef struct dispatch_thread_event_s { // UINT32_MAX means waited on, but not signalled yet // 0 is the initial and final state uint32_t dte_value; -#elif USE_POSIX_SEM - sem_t dte_sem; #else -# error define dispatch_thread_event_s for your platform + _dispatch_sema4_t dte_sema; #endif } dispatch_thread_event_s, *dispatch_thread_event_t; -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK -semaphore_t _dispatch_thread_semaphore_create(void); -void _dispatch_thread_semaphore_dispose(void *); - -DISPATCH_ALWAYS_INLINE -static inline semaphore_t -_dispatch_get_thread_semaphore(void) -{ - semaphore_t sema = (semaphore_t)(uintptr_t) - _dispatch_thread_getspecific(dispatch_sema4_key); - if (unlikely(!sema)) { - return _dispatch_thread_semaphore_create(); - } - _dispatch_thread_setspecific(dispatch_sema4_key, NULL); - return sema; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_put_thread_semaphore(semaphore_t sema) -{ - semaphore_t old_sema = (semaphore_t)(uintptr_t) - _dispatch_thread_getspecific(dispatch_sema4_key); - _dispatch_thread_setspecific(dispatch_sema4_key, (void*)(uintptr_t)sema); - if (unlikely(old_sema)) { - return _dispatch_thread_semaphore_dispose((void *)(uintptr_t)old_sema); - } -} -#endif - DISPATCH_NOT_TAIL_CALLED void _dispatch_thread_event_wait_slow(dispatch_thread_event_t); void _dispatch_thread_event_signal_slow(dispatch_thread_event_t); @@ -279,15 +293,15 @@ _dispatch_thread_event_init(dispatch_thread_event_t dte) { #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - dte->dte_semaphore = _dispatch_get_thread_semaphore(); + _dispatch_sema4_init(&dte->dte_sema, _DSEMA4_POLICY_FIFO); + _dispatch_sema4_create(&dte->dte_sema, _DSEMA4_POLICY_FIFO); return; } #endif #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX dte->dte_value = 0; -#elif USE_POSIX_SEM - int rc = sem_init(&dte->dte_sem, 0, 0); - DISPATCH_SEMAPHORE_VERIFY_RET(rc); +#else + _dispatch_sema4_init(&dte->dte_sema, _DSEMA4_POLICY_FIFO); #endif } @@ -308,7 +322,7 @@ _dispatch_thread_event_signal(dispatch_thread_event_t dte) // waiters do the validation return; } -#elif USE_POSIX_SEM +#else // fallthrough #endif _dispatch_thread_event_signal_slow(dte); @@ -331,7 +345,7 @@ _dispatch_thread_event_wait(dispatch_thread_event_t dte) // for any other value, go to the slowpath which checks it's not corrupt return; } -#elif USE_POSIX_SEM +#else // fallthrough #endif _dispatch_thread_event_wait_slow(dte); @@ -343,16 +357,15 @@ _dispatch_thread_event_destroy(dispatch_thread_event_t dte) { #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - _dispatch_put_thread_semaphore(dte->dte_semaphore); + _dispatch_sema4_dispose(&dte->dte_sema, _DSEMA4_POLICY_FIFO); return; } #endif #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX // nothing to do dispatch_assert(dte->dte_value == 0); -#elif USE_POSIX_SEM - int rc = sem_destroy(&dte->dte_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(rc); +#else + _dispatch_sema4_dispose(&dte->dte_sema, _DSEMA4_POLICY_FIFO); #endif } @@ -523,15 +536,30 @@ _dispatch_once_gate_tryenter(dispatch_once_gate_t l) _dispatch_gate_wait_slow(&(l)->dgo_gate, (dispatch_lock)DLOCK_ONCE_DONE, \ DLOCK_LOCK_NONE) +DISPATCH_ALWAYS_INLINE +static inline dispatch_once_t +_dispatch_once_xchg_done(dispatch_once_t *pred) +{ +#if defined(__i386__) || defined(__x86_64__) + // On Intel, any load is a load-acquire, so we don't need to be fancy + return os_atomic_xchg(pred, DLOCK_ONCE_DONE, release); +#elif defined(__linux__) + if (unlikely(syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0) < 0)) { + DISPATCH_INTERNAL_CRASH(errno, "sys_membarrier not supported"); + } + return os_atomic_xchg(pred, DLOCK_ONCE_DONE, relaxed); +#else +# error dispatch_once algorithm not available for this port +#endif +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_once_gate_broadcast(dispatch_once_gate_t l) { dispatch_once_t tid_cur, tid_self = (dispatch_once_t)_dispatch_tid_self(); - // see once.c for explanation about this trick - os_atomic_maximally_synchronizing_barrier(); - // above assumed to contain release barrier - tid_cur = os_atomic_xchg(&l->dgo_once, DLOCK_ONCE_DONE, relaxed); + + tid_cur = _dispatch_once_xchg_done(&l->dgo_once); if (likely(tid_cur == tid_self)) return; _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)tid_cur); } diff --git a/src/shims/perfmon.h b/src/shims/perfmon.h index 8af33ead9..fe23a1d2e 100644 --- a/src/shims/perfmon.h +++ b/src/shims/perfmon.h @@ -27,26 +27,22 @@ #ifndef __DISPATCH_SHIMS_PERFMON__ #define __DISPATCH_SHIMS_PERFMON__ -#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION - -#if defined (USE_APPLE_TSD_OPTIMIZATIONS) && defined(SIMULATE_5491082) && \ - (defined(__i386__) || defined(__x86_64__)) -#ifdef __LP64__ -#define _dispatch_perfmon_workitem_inc() asm("incq %%gs:%0" : "+m" \ - (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ - _PTHREAD_TSD_OFFSET)) :: "cc") -#define _dispatch_perfmon_workitem_dec() asm("decq %%gs:%0" : "+m" \ - (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ - _PTHREAD_TSD_OFFSET)) :: "cc") -#else -#define _dispatch_perfmon_workitem_inc() asm("incl %%gs:%0" : "+m" \ - (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ - _PTHREAD_TSD_OFFSET)) :: "cc") -#define _dispatch_perfmon_workitem_dec() asm("decl %%gs:%0" : "+m" \ - (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ - _PTHREAD_TSD_OFFSET)) :: "cc") +#if DISPATCH_PERF_MON +#if DISPATCH_INTROSPECTION +#error invalid configuration #endif -#else /* !USE_APPLE_TSD_OPTIMIZATIONS */ + +typedef enum { + perfmon_thread_no_trace = 0, + perfmon_thread_event_no_steal, // 1) Event threads that couldn't steal + perfmon_thread_event_steal, // 2) Event threads failing to steal very late + perfmon_thread_worker_non_oc, // 3) Non overcommit threads finding + // nothing on the root queues + perfmon_thread_worker_oc, // 4) Overcommit thread finding nothing to do + perfmon_thread_manager, +} perfmon_thread_type; + +DISPATCH_ALWAYS_INLINE static inline void _dispatch_perfmon_workitem_inc(void) { @@ -54,6 +50,8 @@ _dispatch_perfmon_workitem_inc(void) cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); _dispatch_thread_setspecific(dispatch_bcounter_key, (void *)++cnt); } + +DISPATCH_ALWAYS_INLINE static inline void _dispatch_perfmon_workitem_dec(void) { @@ -61,18 +59,38 @@ _dispatch_perfmon_workitem_dec(void) cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); _dispatch_thread_setspecific(dispatch_bcounter_key, (void *)--cnt); } -#endif /* USE_APPLE_TSD_OPTIMIZATIONS */ +#define DISPATCH_PERF_MON_ARGS_PROTO , uint64_t perfmon_start +#define DISPATCH_PERF_MON_ARGS , perfmon_start +#define DISPATCH_PERF_MON_VAR uint64_t perfmon_start; + +#define _dispatch_perfmon_start_impl(trace) ({ \ + if (trace) _dispatch_ktrace0(DISPATCH_PERF_MON_worker_thread_start); \ + perfmon_start = _dispatch_absolute_time(); \ + }) #define _dispatch_perfmon_start() \ - uint64_t start = _dispatch_absolute_time() -#define _dispatch_perfmon_end() \ - _dispatch_queue_merge_stats(start) + DISPATCH_PERF_MON_VAR _dispatch_perfmon_start_impl(true) +#define _dispatch_perfmon_start_notrace() \ + DISPATCH_PERF_MON_VAR _dispatch_perfmon_start_impl(false) +#define _dispatch_perfmon_end(thread_type) \ + _dispatch_queue_merge_stats(perfmon_start, true, thread_type) +#define _dispatch_perfmon_end_notrace() \ + _dispatch_queue_merge_stats(perfmon_start, false, perfmon_thread_no_trace) + +void _dispatch_queue_merge_stats(uint64_t start, bool trace, perfmon_thread_type type); + #else +#define DISPATCH_PERF_MON_ARGS_PROTO +#define DISPATCH_PERF_MON_ARGS +#define DISPATCH_PERF_MON_VAR #define _dispatch_perfmon_workitem_inc() #define _dispatch_perfmon_workitem_dec() +#define _dispatch_perfmon_start_impl(trace) #define _dispatch_perfmon_start() -#define _dispatch_perfmon_end() +#define _dispatch_perfmon_end(thread_type) +#define _dispatch_perfmon_start_notrace() +#define _dispatch_perfmon_end_notrace() #endif // DISPATCH_PERF_MON diff --git a/src/shims/priority.h b/src/shims/priority.h new file mode 100644 index 000000000..948e4c7af --- /dev/null +++ b/src/shims/priority.h @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_SHIMS_PRIORITY__ +#define __DISPATCH_SHIMS_PRIORITY__ + +#if HAVE_PTHREAD_QOS_H && __has_include() +#include +#include +#ifndef _PTHREAD_PRIORITY_OVERCOMMIT_FLAG +#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 +#endif +#ifndef _PTHREAD_PRIORITY_SCHED_PRI_FLAG +#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 +#endif +#ifndef _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG +#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 +#endif +#ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG +#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 +#endif +#ifndef _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG +#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 +#endif +#else // HAVE_PTHREAD_QOS_H +OS_ENUM(qos_class, unsigned int, + QOS_CLASS_USER_INTERACTIVE = 0x21, + QOS_CLASS_USER_INITIATED = 0x19, + QOS_CLASS_DEFAULT = 0x15, + QOS_CLASS_UTILITY = 0x11, + QOS_CLASS_BACKGROUND = 0x09, + QOS_CLASS_MAINTENANCE = 0x05, + QOS_CLASS_UNSPECIFIED = 0x00, +); +typedef unsigned long pthread_priority_t; +#define QOS_MIN_RELATIVE_PRIORITY (-15) +#define _PTHREAD_PRIORITY_FLAGS_MASK (~0xffffff) +#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x00ffff00 +#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull) +#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff +#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 +#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 +#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 +#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 +#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 +#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 + +#endif // HAVE_PTHREAD_QOS_H + +typedef uint32_t dispatch_qos_t; +typedef uint32_t dispatch_priority_t; +typedef uint32_t dispatch_priority_t; +typedef uint16_t dispatch_priority_requested_t; + +#define DISPATCH_QOS_UNSPECIFIED ((dispatch_qos_t)0) +#define DISPATCH_QOS_MAINTENANCE ((dispatch_qos_t)1) +#define DISPATCH_QOS_BACKGROUND ((dispatch_qos_t)2) +#define DISPATCH_QOS_UTILITY ((dispatch_qos_t)3) +#define DISPATCH_QOS_DEFAULT ((dispatch_qos_t)4) +#define DISPATCH_QOS_USER_INITIATED ((dispatch_qos_t)5) +#define DISPATCH_QOS_USER_INTERACTIVE ((dispatch_qos_t)6) +#define DISPATCH_QOS_MAX DISPATCH_QOS_USER_INTERACTIVE +#define DISPATCH_QOS_SATURATED ((dispatch_qos_t)15) + +#define DISPATCH_PRIORITY_RELPRI_MASK ((dispatch_priority_t)0x000000ff) +#define DISPATCH_PRIORITY_RELPRI_SHIFT 0 +#define DISPATCH_PRIORITY_QOS_MASK ((dispatch_priority_t)0x0000ff00) +#define DISPATCH_PRIORITY_QOS_SHIFT 8 +#define DISPATCH_PRIORITY_REQUESTED_MASK ((dispatch_priority_t)0x0000ffff) +#define DISPATCH_PRIORITY_OVERRIDE_MASK ((dispatch_priority_t)0x00ff0000) +#define DISPATCH_PRIORITY_OVERRIDE_SHIFT 16 +#define DISPATCH_PRIORITY_FLAGS_MASK ((dispatch_priority_t)0xff000000) + +#define DISPATCH_PRIORITY_SATURATED_OVERRIDE DISPATCH_PRIORITY_OVERRIDE_MASK + +#define DISPATCH_PRIORITY_FLAG_OVERCOMMIT ((dispatch_priority_t)0x80000000) // _PTHREAD_PRIORITY_OVERCOMMIT_FLAG +#define DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE ((dispatch_priority_t)0x04000000) // _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG +#define DISPATCH_PRIORITY_FLAG_MANAGER ((dispatch_priority_t)0x02000000) // _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG +#define DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK \ + (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE | \ + DISPATCH_PRIORITY_FLAG_MANAGER) + +// not passed to pthread +#define DISPATCH_PRIORITY_FLAG_INHERIT ((dispatch_priority_t)0x40000000) // _PTHREAD_PRIORITY_INHERIT_FLAG +#define DISPATCH_PRIORITY_FLAG_ENFORCE ((dispatch_priority_t)0x10000000) // _PTHREAD_PRIORITY_ENFORCE_FLAG +#define DISPATCH_PRIORITY_FLAG_ROOTQUEUE ((dispatch_priority_t)0x20000000) // _PTHREAD_PRIORITY_ROOTQUEUE_FLAG + +#pragma mark dispatch_qos + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_qos_from_qos_class(qos_class_t cls) +{ + switch ((unsigned int)cls) { + case QOS_CLASS_USER_INTERACTIVE: return DISPATCH_QOS_USER_INTERACTIVE; + case QOS_CLASS_USER_INITIATED: return DISPATCH_QOS_USER_INITIATED; + case QOS_CLASS_DEFAULT: return DISPATCH_QOS_DEFAULT; + case QOS_CLASS_UTILITY: return DISPATCH_QOS_UTILITY; + case QOS_CLASS_BACKGROUND: return DISPATCH_QOS_BACKGROUND; + case QOS_CLASS_MAINTENANCE: return DISPATCH_QOS_MAINTENANCE; + default: return DISPATCH_QOS_UNSPECIFIED; + } +} + +DISPATCH_ALWAYS_INLINE +static inline qos_class_t +_dispatch_qos_to_qos_class(dispatch_qos_t qos) +{ + switch (qos) { + case DISPATCH_QOS_USER_INTERACTIVE: return QOS_CLASS_USER_INTERACTIVE; + case DISPATCH_QOS_USER_INITIATED: return QOS_CLASS_USER_INITIATED; + case DISPATCH_QOS_DEFAULT: return QOS_CLASS_DEFAULT; + case DISPATCH_QOS_UTILITY: return QOS_CLASS_UTILITY; + case DISPATCH_QOS_BACKGROUND: return QOS_CLASS_BACKGROUND; + case DISPATCH_QOS_MAINTENANCE: return (qos_class_t)QOS_CLASS_MAINTENANCE; + default: return QOS_CLASS_UNSPECIFIED; + } +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_qos_from_queue_priority(long priority) +{ + switch (priority) { + case DISPATCH_QUEUE_PRIORITY_BACKGROUND: return DISPATCH_QOS_BACKGROUND; + case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE: return DISPATCH_QOS_UTILITY; + case DISPATCH_QUEUE_PRIORITY_LOW: return DISPATCH_QOS_UTILITY; + case DISPATCH_QUEUE_PRIORITY_DEFAULT: return DISPATCH_QOS_DEFAULT; + case DISPATCH_QUEUE_PRIORITY_HIGH: return DISPATCH_QOS_USER_INITIATED; + default: return _dispatch_qos_from_qos_class((qos_class_t)priority); + } +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_qos_from_pp(pthread_priority_t pp) +{ + pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + pp >>= _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; + return (dispatch_qos_t)__builtin_ffs((int)pp); +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_qos_to_pp(dispatch_qos_t qos) +{ + pthread_priority_t pp; + pp = 1ul << ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT); + return pp | _PTHREAD_PRIORITY_PRIORITY_MASK; +} + +// including maintenance +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_qos_is_background(dispatch_qos_t qos) +{ + return qos && qos <= DISPATCH_QOS_BACKGROUND; +} + +#pragma mark dispatch_priority + +#define _dispatch_priority_make(qos, relpri) \ + (qos ? ((((qos) << DISPATCH_PRIORITY_QOS_SHIFT) & DISPATCH_PRIORITY_QOS_MASK) | \ + ((dispatch_priority_t)(relpri - 1) & DISPATCH_PRIORITY_RELPRI_MASK)) : 0) + +DISPATCH_ALWAYS_INLINE +static inline dispatch_priority_t +_dispatch_priority_with_override_qos(dispatch_priority_t pri, + dispatch_qos_t oqos) +{ + pri &= ~DISPATCH_PRIORITY_OVERRIDE_MASK; + pri |= oqos << DISPATCH_PRIORITY_OVERRIDE_SHIFT; + return pri; +} + +DISPATCH_ALWAYS_INLINE +static inline int +_dispatch_priority_relpri(dispatch_priority_t dbp) +{ + if (dbp & DISPATCH_PRIORITY_QOS_MASK) { + return (int8_t)(dbp & DISPATCH_PRIORITY_RELPRI_MASK) + 1; + } + return 0; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_priority_qos(dispatch_priority_t dbp) +{ + dbp &= DISPATCH_PRIORITY_QOS_MASK; + return dbp >> DISPATCH_PRIORITY_QOS_SHIFT; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_priority_override_qos(dispatch_priority_t dbp) +{ + dbp &= DISPATCH_PRIORITY_OVERRIDE_MASK; + return dbp >> DISPATCH_PRIORITY_OVERRIDE_SHIFT; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_priority_t +_dispatch_priority_from_pp_impl(pthread_priority_t pp, bool keep_flags) +{ + dispatch_assert(!(pp & _PTHREAD_PRIORITY_SCHED_PRI_FLAG)); + + dispatch_priority_t dbp; + if (keep_flags) { + dbp = pp & (DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK | + DISPATCH_PRIORITY_RELPRI_MASK); + } else { + dbp = pp & DISPATCH_PRIORITY_RELPRI_MASK; + } + + dbp |= _dispatch_qos_from_pp(pp) << DISPATCH_PRIORITY_QOS_SHIFT; + return dbp; +} +#define _dispatch_priority_from_pp(pp) \ + _dispatch_priority_from_pp_impl(pp, true) +#define _dispatch_priority_from_pp_strip_flags(pp) \ + _dispatch_priority_from_pp_impl(pp, false) + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_priority_to_pp_impl(dispatch_priority_t dbp, bool keep_flags) +{ + pthread_priority_t pp; + if (keep_flags) { + pp = dbp & (DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK | + DISPATCH_PRIORITY_RELPRI_MASK); + } else { + pp = dbp & DISPATCH_PRIORITY_RELPRI_MASK; + } + dispatch_qos_t qos = _dispatch_priority_qos(dbp); + if (qos) { + pp |= (1ul << ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT)); + } + return pp; +} +#define _dispatch_priority_to_pp(pp) \ + _dispatch_priority_to_pp_impl(pp, true) +#define _dispatch_priority_to_pp_strip_flags(pp) \ + _dispatch_priority_to_pp_impl(pp, false) + +#endif // __DISPATCH_SHIMS_PRIORITY__ diff --git a/src/shims/time.h b/src/shims/time.h index 7b297711c..3010f08da 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -40,7 +40,11 @@ sleep(unsigned int seconds) } #endif -uint64_t _dispatch_get_nanoseconds(void); +typedef enum { + DISPATCH_CLOCK_WALL, + DISPATCH_CLOCK_MACH, +#define DISPATCH_CLOCK_COUNT (DISPATCH_CLOCK_MACH + 1) +} dispatch_clock_t; #if defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME // x86 currently implements mach time in nanoseconds @@ -73,14 +77,14 @@ _dispatch_time_mach2nano(uint64_t machtime) _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init); - if (!machtime || slowpath(data->ratio_1_to_1)) { + if (unlikely(!machtime || data->ratio_1_to_1)) { return machtime; } if (machtime >= INT64_MAX) { return INT64_MAX; } - long double big_tmp = ((long double)machtime * data->frac) + .5; - if (slowpath(big_tmp >= INT64_MAX)) { + long double big_tmp = ((long double)machtime * data->frac) + .5L; + if (unlikely(big_tmp >= INT64_MAX)) { return INT64_MAX; } return (uint64_t)big_tmp; @@ -92,50 +96,120 @@ _dispatch_time_nano2mach(uint64_t nsec) _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init); - if (!nsec || slowpath(data->ratio_1_to_1)) { + if (unlikely(!nsec || data->ratio_1_to_1)) { return nsec; } if (nsec >= INT64_MAX) { return INT64_MAX; } - long double big_tmp = ((long double)nsec / data->frac) + .5; - if (slowpath(big_tmp >= INT64_MAX)) { + long double big_tmp = ((long double)nsec / data->frac) + .5L; + if (unlikely(big_tmp >= INT64_MAX)) { return INT64_MAX; } return (uint64_t)big_tmp; } #endif +/* XXXRW: Some kind of overflow detection needed? */ +#define _dispatch_timespec_to_nano(ts) \ + ((uint64_t)(ts).tv_sec * NSEC_PER_SEC + (uint64_t)(ts).tv_nsec) +#define _dispatch_timeval_to_nano(tv) \ + ((uint64_t)(tv).tv_sec * NSEC_PER_SEC + \ + (uint64_t)(tv).tv_usec * NSEC_PER_USEC) + +static inline uint64_t +_dispatch_get_nanoseconds(void) +{ + dispatch_static_assert(sizeof(NSEC_PER_SEC) == 8); + dispatch_static_assert(sizeof(USEC_PER_SEC) == 8); + +#if TARGET_OS_MAC && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) + return clock_gettime_nsec_np(CLOCK_REALTIME); +#elif HAVE_DECL_CLOCK_REALTIME + struct timespec ts; + dispatch_assume_zero(clock_gettime(CLOCK_REALTIME, &ts)); + return _dispatch_timespec_to_nano(ts); +#elif TARGET_OS_WIN32 + // FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC). + FILETIME ft; + ULARGE_INTEGER li; + GetSystemTimeAsFileTime(&ft); + li.LowPart = ft.dwLowDateTime; + li.HighPart = ft.dwHighDateTime; + return li.QuadPart * 100ull; +#else + struct timeval tv; + dispatch_assert_zero(gettimeofday(&tv, NULL)); + return _dispatch_timeval_to_nano(tv); +#endif +} + static inline uint64_t _dispatch_absolute_time(void) { #if HAVE_MACH_ABSOLUTE_TIME return mach_absolute_time(); +#elif HAVE_DECL_CLOCK_UPTIME && !defined(__linux__) + struct timespec ts; + dispatch_assume_zero(clock_gettime(CLOCK_UPTIME, &ts)); + return _dispatch_timespec_to_nano(ts); +#elif HAVE_DECL_CLOCK_MONOTONIC + struct timespec ts; + dispatch_assume_zero(clock_gettime(CLOCK_MONOTONIC, &ts)); + return _dispatch_timespec_to_nano(ts); #elif TARGET_OS_WIN32 LARGE_INTEGER now; return QueryPerformanceCounter(&now) ? now.QuadPart : 0; #else - struct timespec ts; - int ret; +#error platform needs to implement _dispatch_absolute_time() +#endif +} -#if HAVE_DECL_CLOCK_UPTIME - ret = clock_gettime(CLOCK_UPTIME, &ts); -#elif HAVE_DECL_CLOCK_MONOTONIC - ret = clock_gettime(CLOCK_MONOTONIC, &ts); +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_approximate_time(void) +{ +#if HAVE_MACH_APPROXIMATE_TIME + return mach_approximate_time(); +#elif HAVE_DECL_CLOCK_UPTIME_FAST && !defined(__linux__) + struct timespec ts; + dispatch_assume_zero(clock_gettime(CLOCK_UPTIME_FAST, &ts)); + return _dispatch_timespec_to_nano(ts); +#elif defined(__linux__) + struct timespec ts; + dispatch_assume_zero(clock_gettime(CLOCK_REALTIME_COARSE, &ts)); + return _dispatch_timespec_to_nano(ts); #else -#error "clock_gettime: no supported absolute time clock" + return _dispatch_absolute_time(); #endif - (void)dispatch_assume_zero(ret); +} - /* XXXRW: Some kind of overflow detection needed? */ - return (ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec); -#endif // HAVE_MACH_ABSOLUTE_TIME +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_time_now(dispatch_clock_t clock) +{ + switch (clock) { + case DISPATCH_CLOCK_MACH: + return _dispatch_absolute_time(); + case DISPATCH_CLOCK_WALL: + return _dispatch_get_nanoseconds(); + } + __builtin_unreachable(); } +typedef struct { + uint64_t nows[DISPATCH_CLOCK_COUNT]; +} dispatch_clock_now_cache_s, *dispatch_clock_now_cache_t; + +DISPATCH_ALWAYS_INLINE static inline uint64_t -_dispatch_approximate_time(void) +_dispatch_time_now_cached(dispatch_clock_t clock, + dispatch_clock_now_cache_t cache) { - return _dispatch_absolute_time(); + if (likely(cache->nows[clock])) { + return cache->nows[clock]; + } + return cache->nows[clock] = _dispatch_time_now(clock); } #endif // __DISPATCH_SHIMS_TIME__ diff --git a/src/shims/tsd.h b/src/shims/tsd.h index 2e3ece8b0..f3d3cea5f 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -59,6 +59,16 @@ typedef struct { void *a; void *b; } dispatch_tsd_pair_t; #endif #if DISPATCH_USE_DIRECT_TSD +#ifndef __TSD_THREAD_QOS_CLASS +#define __TSD_THREAD_QOS_CLASS 4 +#endif +#ifndef __TSD_RETURN_TO_KERNEL +#define __TSD_RETURN_TO_KERNEL 5 +#endif + +static const unsigned long dispatch_priority_key = __TSD_THREAD_QOS_CLASS; +static const unsigned long dispatch_r2k_key = __TSD_RETURN_TO_KERNEL; + // dispatch_queue_key & dispatch_frame_key need to be contiguous // in that order, and queue_key to be an even number static const unsigned long dispatch_queue_key = __PTK_LIBDISPATCH_KEY0; @@ -67,21 +77,13 @@ static const unsigned long dispatch_cache_key = __PTK_LIBDISPATCH_KEY2; static const unsigned long dispatch_context_key = __PTK_LIBDISPATCH_KEY3; static const unsigned long dispatch_pthread_root_queue_observer_hooks_key = __PTK_LIBDISPATCH_KEY4; -static const unsigned long dispatch_defaultpriority_key =__PTK_LIBDISPATCH_KEY5; +static const unsigned long dispatch_basepri_key = __PTK_LIBDISPATCH_KEY5; #if DISPATCH_INTROSPECTION static const unsigned long dispatch_introspection_key = __PTK_LIBDISPATCH_KEY6; #elif DISPATCH_PERF_MON static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY6; #endif -static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY7; - -#ifndef __TSD_THREAD_QOS_CLASS -#define __TSD_THREAD_QOS_CLASS 4 -#endif -#ifndef __TSD_THREAD_VOUCHER -#define __TSD_THREAD_VOUCHER 6 -#endif -static const unsigned long dispatch_priority_key = __TSD_THREAD_QOS_CLASS; +static const unsigned long dispatch_wlh_key = __PTK_LIBDISPATCH_KEY7; static const unsigned long dispatch_voucher_key = __PTK_LIBDISPATCH_KEY8; static const unsigned long dispatch_deferred_items_key = __PTK_LIBDISPATCH_KEY9; @@ -108,16 +110,15 @@ struct dispatch_tsd { void *dispatch_cache_key; void *dispatch_context_key; void *dispatch_pthread_root_queue_observer_hooks_key; - void *dispatch_defaultpriority_key; + void *dispatch_basepri_key; #if DISPATCH_INTROSPECTION void *dispatch_introspection_key; #elif DISPATCH_PERF_MON void *dispatch_bcounter_key; -#endif -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK - void *dispatch_sema4_key; #endif void *dispatch_priority_key; + void *dispatch_r2k_key; + void *dispatch_wlh_key; void *dispatch_voucher_key; void *dispatch_deferred_items_key; }; @@ -160,19 +161,20 @@ _dispatch_get_tsd_base(void) _dispatch_thread_setspecific(k2,(p)[1]) ) #else +extern pthread_key_t dispatch_priority_key; +extern pthread_key_t dispatch_r2k_key; extern pthread_key_t dispatch_queue_key; extern pthread_key_t dispatch_frame_key; extern pthread_key_t dispatch_cache_key; extern pthread_key_t dispatch_context_key; extern pthread_key_t dispatch_pthread_root_queue_observer_hooks_key; -extern pthread_key_t dispatch_defaultpriority_key; +extern pthread_key_t dispatch_basepri_key; #if DISPATCH_INTROSPECTION extern pthread_key_t dispatch_introspection_key; #elif DISPATCH_PERF_MON extern pthread_key_t dispatch_bcounter_key; #endif -extern pthread_key_t dispatch_sema4_key; -extern pthread_key_t dispatch_priority_key; +extern pthread_key_t dispatch_wlh_key; extern pthread_key_t dispatch_voucher_key; extern pthread_key_t dispatch_deferred_items_key; diff --git a/src/shims/yield.h b/src/shims/yield.h index 1850aeeed..67f8679ac 100644 --- a/src/shims/yield.h +++ b/src/shims/yield.h @@ -31,31 +31,40 @@ #pragma mark _dispatch_wait_until #if DISPATCH_HW_CONFIG_UP -#define _dispatch_wait_until(c) do { \ +#define _dispatch_wait_until(c) ({ \ + typeof(c) _c; \ int _spins = 0; \ - while (!fastpath(c)) { \ + for (;;) { \ + if (likely(_c = (c))) break; \ _spins++; \ _dispatch_preemption_yield(_spins); \ - } } while (0) + } \ + _c; }) #elif TARGET_OS_EMBEDDED // #ifndef DISPATCH_WAIT_SPINS #define DISPATCH_WAIT_SPINS 1024 #endif -#define _dispatch_wait_until(c) do { \ +#define _dispatch_wait_until(c) ({ \ + typeof(c) _c; \ int _spins = -(DISPATCH_WAIT_SPINS); \ - while (!fastpath(c)) { \ + for (;;) { \ + if (likely(_c = (c))) break; \ if (slowpath(_spins++ >= 0)) { \ _dispatch_preemption_yield(_spins); \ } else { \ dispatch_hardware_pause(); \ } \ - } } while (0) + } \ + _c; }) #else -#define _dispatch_wait_until(c) do { \ - while (!fastpath(c)) { \ +#define _dispatch_wait_until(c) ({ \ + typeof(c) _c; \ + for (;;) { \ + if (likely(_c = (c))) break; \ dispatch_hardware_pause(); \ - } } while (0) + } \ + _c; }) #endif #pragma mark - diff --git a/src/source.c b/src/source.c index 7537f3223..c2020462c 100644 --- a/src/source.c +++ b/src/source.c @@ -19,216 +19,53 @@ */ #include "internal.h" -#if HAVE_MACH -#include "protocol.h" -#include "protocolServer.h" -#endif -#include - -#define DKEV_DISPOSE_IMMEDIATE_DELETE 0x1 -#define DKEV_UNREGISTER_DISCONNECTED 0x2 -#define DKEV_UNREGISTER_REPLY_REMOVE 0x4 -#define DKEV_UNREGISTER_WAKEUP 0x8 static void _dispatch_source_handler_free(dispatch_source_t ds, long kind); -static void _dispatch_source_merge_kevent(dispatch_source_t ds, - const _dispatch_kevent_qos_s *ke); -static bool _dispatch_kevent_register(dispatch_kevent_t *dkp, - pthread_priority_t pp, uint32_t *flgp); -static long _dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg, - unsigned int options); -static long _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, - uint32_t del_flags); -static void _dispatch_kevent_drain(_dispatch_kevent_qos_s *ke); -static void _dispatch_kevent_merge(_dispatch_kevent_qos_s *ke); -static void _dispatch_timers_kevent(_dispatch_kevent_qos_s *ke); -static void _dispatch_timers_unregister(dispatch_source_t ds, - dispatch_kevent_t dk); -static void _dispatch_timers_update(dispatch_source_t ds); -static void _dispatch_timer_aggregates_check(void); -static void _dispatch_timer_aggregates_register(dispatch_source_t ds); -static void _dispatch_timer_aggregates_update(dispatch_source_t ds, - unsigned int tidx); -static void _dispatch_timer_aggregates_unregister(dispatch_source_t ds, - unsigned int tidx); +static void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval); + +static void _dispatch_timers_update(dispatch_unote_t du); +static void _dispatch_timers_unregister(dispatch_timer_source_refs_t dt); + +static void _dispatch_source_timer_configure(dispatch_source_t ds); static inline unsigned long _dispatch_source_timer_data( - dispatch_source_refs_t dr, unsigned long prev); -static void _dispatch_kq_deferred_update(const _dispatch_kevent_qos_s *ke); -static long _dispatch_kq_immediate_update(_dispatch_kevent_qos_s *ke); -static void _dispatch_memorypressure_init(void); -#if HAVE_MACH -static void _dispatch_mach_host_calendar_change_register(void); -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK -static void _dispatch_mach_recv_msg_buf_init(void); -static kern_return_t _dispatch_kevent_machport_resume(dispatch_kevent_t dk, - uint32_t new_flags, uint32_t del_flags); -#endif -static kern_return_t _dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, - uint32_t new_flags, uint32_t del_flags); -static void _dispatch_mach_kevent_merge(_dispatch_kevent_qos_s *ke); -static mach_msg_size_t _dispatch_kevent_mach_msg_size( - _dispatch_kevent_qos_s *ke); -#else -static inline void _dispatch_mach_host_calendar_change_register(void) {} -static inline void _dispatch_mach_recv_msg_buf_init(void) {} -#endif -static const char * _evfiltstr(short filt); -#if DISPATCH_DEBUG -static void dispatch_kevent_debug(const char *verb, - const _dispatch_kevent_qos_s *kev, int i, int n, - const char *function, unsigned int line); -static void _dispatch_kevent_debugger(void *context); -#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \ - dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q) -#else -static inline void -dispatch_kevent_debug(const char *verb, const _dispatch_kevent_qos_s *kev, - int i, int n, const char *function, unsigned int line) -{ - (void)verb; (void)kev; (void)i; (void)n; (void)function; (void)line; -} -#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() -#endif -#define _dispatch_kevent_debug(verb, _kev) \ - dispatch_kevent_debug(verb, _kev, 0, 1, __FUNCTION__, __LINE__) -#define _dispatch_kevent_debug_n(verb, _kev, i, n) \ - dispatch_kevent_debug(verb, _kev, i, n, __FUNCTION__, __LINE__) -#ifndef DISPATCH_MGR_QUEUE_DEBUG -#define DISPATCH_MGR_QUEUE_DEBUG 0 -#endif -#if DISPATCH_MGR_QUEUE_DEBUG -#define _dispatch_kevent_mgr_debug _dispatch_kevent_debug -#else -static inline void -_dispatch_kevent_mgr_debug(_dispatch_kevent_qos_s* kev DISPATCH_UNUSED) {} -#endif + dispatch_source_t ds, dispatch_unote_t du); #pragma mark - #pragma mark dispatch_source_t dispatch_source_t -dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, +dispatch_source_create(dispatch_source_type_t dst, uintptr_t handle, unsigned long mask, dispatch_queue_t dq) { + dispatch_source_refs_t dr; + dispatch_source_t ds; + // ensure _dispatch_evfilt_machport_direct_enabled is initialized _dispatch_root_queues_init(); - const _dispatch_kevent_qos_s *proto_kev = &type->ke; - dispatch_source_t ds; - dispatch_kevent_t dk; - // input validation - if (type == NULL || (mask & ~type->mask)) { + dr = dux_create(dst, handle, mask)._dr; + if (unlikely(!dr)) { return DISPATCH_BAD_INPUT; } - if (type->mask && !mask) { - // expect a non-zero mask when the type declares one ... except - switch (type->ke.filter) { - case DISPATCH_EVFILT_TIMER: - break; // timers don't need masks -#if DISPATCH_USE_VM_PRESSURE - case EVFILT_VM: - break; // type->init forces the only acceptable mask -#endif - case DISPATCH_EVFILT_MACH_NOTIFICATION: - break; // type->init handles zero mask as a legacy case - default: - // otherwise reject as invalid input - return DISPATCH_BAD_INPUT; - } - } - - switch (type->ke.filter) { - case EVFILT_SIGNAL: - if (handle >= NSIG) { - return DISPATCH_BAD_INPUT; - } - break; - case EVFILT_FS: -#if DISPATCH_USE_VM_PRESSURE - case EVFILT_VM: -#endif -#if DISPATCH_USE_MEMORYSTATUS - case EVFILT_MEMORYSTATUS: -#endif - case DISPATCH_EVFILT_CUSTOM_ADD: - case DISPATCH_EVFILT_CUSTOM_OR: - if (handle) { - return DISPATCH_BAD_INPUT; - } - break; - case DISPATCH_EVFILT_TIMER: - if ((handle == 0) != (type->ke.ident == 0)) { - return DISPATCH_BAD_INPUT; - } - break; - default: - break; - } ds = _dispatch_alloc(DISPATCH_VTABLE(source), sizeof(struct dispatch_source_s)); // Initialize as a queue first, then override some settings below. - _dispatch_queue_init(ds->_as_dq, DQF_NONE, 1, true); + _dispatch_queue_init(ds->_as_dq, DQF_LEGACY, 1, true); ds->dq_label = "source"; ds->do_ref_cnt++; // the reference the manager queue holds - - switch (type->ke.filter) { - case DISPATCH_EVFILT_CUSTOM_OR: - dk = DISPATCH_KEV_CUSTOM_OR; - break; - case DISPATCH_EVFILT_CUSTOM_ADD: - dk = DISPATCH_KEV_CUSTOM_ADD; - break; - default: - dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = *proto_kev; - dk->dk_kevent.ident = handle; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; - dk->dk_kevent.fflags |= (uint32_t)mask; - dk->dk_kevent.udata = (_dispatch_kevent_qos_udata_t)dk; - TAILQ_INIT(&dk->dk_sources); - - ds->ds_pending_data_mask = dk->dk_kevent.fflags; - ds->ds_ident_hack = (uintptr_t)dk->dk_kevent.ident; - if (EV_UDATA_SPECIFIC & proto_kev->flags) { - dk->dk_kevent.flags |= EV_DISPATCH; - ds->ds_is_direct_kevent = true; - ds->ds_needs_rearm = true; - } - break; - } - ds->ds_dkev = dk; - - if ((EV_DISPATCH|EV_ONESHOT) & proto_kev->flags) { - ds->ds_needs_rearm = true; - } else if (!(EV_CLEAR & proto_kev->flags)) { - // we cheat and use EV_CLEAR to mean a "flag thingy" - ds->ds_is_adder = true; - } - // Some sources require special processing - if (type->init != NULL) { - type->init(ds, type, handle, mask, dq); - } - dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder)); - if (!ds->ds_is_custom_source && (dk->dk_kevent.flags & EV_VANISHED)) { - // see _dispatch_source_merge_kevent - dispatch_assert(!(dk->dk_kevent.flags & EV_ONESHOT)); - dispatch_assert(dk->dk_kevent.flags & EV_DISPATCH); - dispatch_assert(dk->dk_kevent.flags & EV_UDATA_SPECIFIC); - } - - if (fastpath(!ds->ds_refs)) { - ds->ds_refs = _dispatch_calloc(1ul, - sizeof(struct dispatch_source_refs_s)); - } - ds->ds_refs->dr_source_wref = _dispatch_ptr2wref(ds); + ds->ds_refs = dr; + dr->du_owner_wref = _dispatch_ptr2wref(ds); if (slowpath(!dq)) { - dq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true); + dq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true); } else { - _dispatch_retain(dq); + _dispatch_retain((dispatch_queue_t _Nonnull)dq); } ds->do_targetq = dq; + if (dr->du_is_timer && (dr->du_fflags & DISPATCH_TIMER_INTERVAL)) { + _dispatch_source_set_interval(ds, handle); + } _dispatch_object_debug(ds, "%s", __func__); return ds; } @@ -240,13 +77,19 @@ _dispatch_source_dispose(dispatch_source_t ds) _dispatch_source_handler_free(ds, DS_REGISTN_HANDLER); _dispatch_source_handler_free(ds, DS_EVENT_HANDLER); _dispatch_source_handler_free(ds, DS_CANCEL_HANDLER); - free(ds->ds_refs); + _dispatch_unote_dispose(ds->ds_refs); + ds->ds_refs = NULL; _dispatch_queue_destroy(ds->_as_dq); } void _dispatch_source_xref_dispose(dispatch_source_t ds) { + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if (unlikely(!(dqf & (DQF_LEGACY|DSF_CANCELED)))) { + DISPATCH_CLIENT_CRASH(ds, "Release of a source that has not been " + "cancelled, but has a mandatory cancel handler"); + } dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH); } @@ -259,78 +102,121 @@ dispatch_source_testcancel(dispatch_source_t ds) unsigned long dispatch_source_get_mask(dispatch_source_t ds) { - unsigned long mask = ds->ds_pending_data_mask; - if (ds->ds_vmpressure_override) { - mask = NOTE_VM_PRESSURE; + dispatch_source_refs_t dr = ds->ds_refs; + if (ds->dq_atomic_flags & DSF_CANCELED) { + return 0; + } +#if DISPATCH_USE_MEMORYSTATUS + if (dr->du_vmpressure_override) { + return NOTE_VM_PRESSURE; } #if TARGET_IPHONE_SIMULATOR - else if (ds->ds_memorypressure_override) { - mask = NOTE_MEMORYSTATUS_PRESSURE_WARN; + if (dr->du_memorypressure_override) { + return NOTE_MEMORYSTATUS_PRESSURE_WARN; } #endif - return mask; +#endif // DISPATCH_USE_MEMORYSTATUS + return dr->du_fflags; } uintptr_t dispatch_source_get_handle(dispatch_source_t ds) { - unsigned int handle = (unsigned int)ds->ds_ident_hack; + dispatch_source_refs_t dr = ds->ds_refs; #if TARGET_IPHONE_SIMULATOR - if (ds->ds_memorypressure_override) { - handle = 0; + if (dr->du_memorypressure_override) { + return 0; } #endif - return handle; + return dr->du_ident; } unsigned long dispatch_source_get_data(dispatch_source_t ds) { - unsigned long data = ds->ds_data; - if (ds->ds_vmpressure_override) { - data = NOTE_VM_PRESSURE; +#if DISPATCH_USE_MEMORYSTATUS + dispatch_source_refs_t dr = ds->ds_refs; + if (dr->du_vmpressure_override) { + return NOTE_VM_PRESSURE; } #if TARGET_IPHONE_SIMULATOR - else if (ds->ds_memorypressure_override) { - data = NOTE_MEMORYSTATUS_PRESSURE_WARN; + if (dr->du_memorypressure_override) { + return NOTE_MEMORYSTATUS_PRESSURE_WARN; } #endif - return data; +#endif // DISPATCH_USE_MEMORYSTATUS + uint64_t value = os_atomic_load2o(ds, ds_data, relaxed); + return (unsigned long)( + ds->ds_refs->du_data_action == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET + ? DISPATCH_SOURCE_GET_DATA(value) : value); } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_source_merge_data2(dispatch_source_t ds, - pthread_priority_t pp, unsigned long val) -{ - _dispatch_kevent_qos_s kev = { - .fflags = (typeof(kev.fflags))val, - .data = (typeof(kev.data))val, -#if DISPATCH_USE_KEVENT_QOS - .qos = (_dispatch_kevent_priority_t)pp, -#endif - }; -#if !DISPATCH_USE_KEVENT_QOS - (void)pp; -#endif - - dispatch_assert(ds->ds_dkev == DISPATCH_KEV_CUSTOM_OR || - ds->ds_dkev == DISPATCH_KEV_CUSTOM_ADD); - _dispatch_kevent_debug("synthetic data", &kev); - _dispatch_source_merge_kevent(ds, &kev); +size_t +dispatch_source_get_extended_data(dispatch_source_t ds, + dispatch_source_extended_data_t edata, size_t size) +{ + size_t target_size = MIN(size, + sizeof(struct dispatch_source_extended_data_s)); + if (size > 0) { + unsigned long data, status = 0; + if (ds->ds_refs->du_data_action + == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET) { + uint64_t combined = os_atomic_load(&ds->ds_data, relaxed); + data = DISPATCH_SOURCE_GET_DATA(combined); + status = DISPATCH_SOURCE_GET_STATUS(combined); + } else { + data = dispatch_source_get_data(ds); + } + if (size >= offsetof(struct dispatch_source_extended_data_s, data) + + sizeof(edata->data)) { + edata->data = data; + } + if (size >= offsetof(struct dispatch_source_extended_data_s, status) + + sizeof(edata->status)) { + edata->status = status; + } + if (size > sizeof(struct dispatch_source_extended_data_s)) { + memset( + (char *)edata + sizeof(struct dispatch_source_extended_data_s), + 0, size - sizeof(struct dispatch_source_extended_data_s)); + } + } + return target_size; } +DISPATCH_NOINLINE void -dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) +_dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp, + unsigned long val) { - _dispatch_source_merge_data2(ds, 0, val); + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + int filter = ds->ds_refs->du_filter; + + if (unlikely(dqf & (DSF_CANCELED | DSF_DELETED))) { + return; + } + + switch (filter) { + case DISPATCH_EVFILT_CUSTOM_ADD: + os_atomic_add2o(ds, ds_pending_data, val, relaxed); + break; + case DISPATCH_EVFILT_CUSTOM_OR: + os_atomic_or2o(ds, ds_pending_data, val, relaxed); + break; + case DISPATCH_EVFILT_CUSTOM_REPLACE: + os_atomic_store2o(ds, ds_pending_data, val, relaxed); + break; + default: + DISPATCH_CLIENT_CRASH(filter, "Invalid source type"); + } + + dx_wakeup(ds, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_FLUSH); } void -_dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp, - unsigned long val) +dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) { - _dispatch_source_merge_data2(ds, pp, val); + _dispatch_source_merge_data(ds, 0, val); } #pragma mark - @@ -450,6 +336,10 @@ _dispatch_source_set_handler(dispatch_source_t ds, long kind, _dispatch_source_handler_replace(ds, kind, dc); return dx_vtable(ds)->do_resume(ds, false); } + if (unlikely(!_dispatch_queue_is_legacy(ds->_as_dq))) { + DISPATCH_CLIENT_CRASH(kind, "Cannot change a handler of this source " + "after it has been activated"); + } _dispatch_ktrace1(DISPATCH_PERF_post_activate_mutation, ds); if (kind == DS_REGISTN_HANDLER) { _dispatch_bug_deprecated("Setting registration handler after " @@ -480,27 +370,40 @@ dispatch_source_set_event_handler_f(dispatch_source_t ds, _dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc); } -void -_dispatch_source_set_event_handler_continuation(dispatch_source_t ds, - dispatch_continuation_t dc) +#ifdef __BLOCKS__ +DISPATCH_NOINLINE +static void +_dispatch_source_set_cancel_handler(dispatch_source_t ds, + dispatch_block_t handler) { - _dispatch_trace_continuation_push(ds->_as_dq, dc); - _dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc); + dispatch_continuation_t dc; + dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, true); + _dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc); } -#ifdef __BLOCKS__ void dispatch_source_set_cancel_handler(dispatch_source_t ds, dispatch_block_t handler) { - dispatch_continuation_t dc; - dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, true); - _dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc); + if (unlikely(!_dispatch_queue_is_legacy(ds->_as_dq))) { + DISPATCH_CLIENT_CRASH(0, "Cannot set a non mandatory handler on " + "this source"); + } + return _dispatch_source_set_cancel_handler(ds, handler); } -#endif /* __BLOCKS__ */ void -dispatch_source_set_cancel_handler_f(dispatch_source_t ds, +dispatch_source_set_mandatory_cancel_handler(dispatch_source_t ds, + dispatch_block_t handler) +{ + _dispatch_queue_atomic_flags_clear(ds->_as_dq, DQF_LEGACY); + return _dispatch_source_set_cancel_handler(ds, handler); +} +#endif /* __BLOCKS__ */ + +DISPATCH_NOINLINE +static void +_dispatch_source_set_cancel_handler_f(dispatch_source_t ds, dispatch_function_t handler) { dispatch_continuation_t dc; @@ -508,6 +411,25 @@ dispatch_source_set_cancel_handler_f(dispatch_source_t ds, _dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc); } +void +dispatch_source_set_cancel_handler_f(dispatch_source_t ds, + dispatch_function_t handler) +{ + if (unlikely(!_dispatch_queue_is_legacy(ds->_as_dq))) { + DISPATCH_CLIENT_CRASH(0, "Cannot set a non mandatory handler on " + "this source"); + } + return _dispatch_source_set_cancel_handler_f(ds, handler); +} + +void +dispatch_source_set_mandatory_cancel_handler_f(dispatch_source_t ds, + dispatch_function_t handler) +{ + _dispatch_queue_atomic_flags_clear(ds->_as_dq, DQF_LEGACY); + return _dispatch_source_set_cancel_handler_f(ds, handler); +} + #ifdef __BLOCKS__ void dispatch_source_set_registration_handler(dispatch_source_t ds, @@ -545,7 +467,7 @@ _dispatch_source_registration_callout(dispatch_source_t ds, dispatch_queue_t cq, if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { dc->dc_ctxt = ds->do_ctxt; } - _dispatch_continuation_pop(dc, cq, flags); + _dispatch_continuation_pop(dc, NULL, flags, cq); } static void @@ -555,7 +477,6 @@ _dispatch_source_cancel_callout(dispatch_source_t ds, dispatch_queue_t cq, dispatch_continuation_t dc; dc = _dispatch_source_handler_take(ds, DS_CANCEL_HANDLER); - ds->ds_pending_data_mask = 0; ds->ds_pending_data = 0; ds->ds_data = 0; _dispatch_source_handler_free(ds, DS_EVENT_HANDLER); @@ -569,104 +490,76 @@ _dispatch_source_cancel_callout(dispatch_source_t ds, dispatch_queue_t cq, if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { dc->dc_ctxt = ds->do_ctxt; } - _dispatch_continuation_pop(dc, cq, flags); + _dispatch_continuation_pop(dc, NULL, flags, cq); } static void _dispatch_source_latch_and_call(dispatch_source_t ds, dispatch_queue_t cq, dispatch_invoke_flags_t flags) { - unsigned long prev; - dispatch_source_refs_t dr = ds->ds_refs; dispatch_continuation_t dc = _dispatch_source_get_handler(dr, DS_EVENT_HANDLER); - prev = os_atomic_xchg2o(ds, ds_pending_data, 0, relaxed); - if (ds->ds_is_level) { + uint64_t prev; + + if (dr->du_is_timer && !(dr->du_fflags & DISPATCH_TIMER_AFTER)) { + prev = _dispatch_source_timer_data(ds, dr); + } else { + prev = os_atomic_xchg2o(ds, ds_pending_data, 0, relaxed); + } + if (dr->du_data_action == DISPATCH_UNOTE_ACTION_DATA_SET) { ds->ds_data = ~prev; - } else if (ds->ds_is_timer && ds_timer(dr).target && prev) { - ds->ds_data = _dispatch_source_timer_data(dr, prev); } else { ds->ds_data = prev; } - if (!dispatch_assume(prev) || !dc) { + if (!dispatch_assume(prev != 0) || !dc) { return; } - _dispatch_continuation_pop(dc, cq, flags); - if (ds->ds_is_timer && (ds_timer(dr).flags & DISPATCH_TIMER_AFTER)) { + _dispatch_continuation_pop(dc, NULL, flags, cq); + if (dr->du_is_timer && (dr->du_fflags & DISPATCH_TIMER_AFTER)) { _dispatch_source_handler_free(ds, DS_EVENT_HANDLER); dispatch_release(ds); // dispatch_after sources are one-shot } } -static void -_dispatch_source_kevent_unregister(dispatch_source_t ds) +void +_dispatch_source_refs_unregister(dispatch_source_t ds, uint32_t options) { _dispatch_object_debug(ds, "%s", __func__); - uint32_t flags = (uint32_t)ds->ds_pending_data_mask; - dispatch_kevent_t dk = ds->ds_dkev; dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); - if (ds->ds_is_custom_source) { - ds->ds_dkev = NULL; - goto done; - } + dispatch_source_refs_t dr = ds->ds_refs; - if (ds->ds_is_direct_kevent && - ((dqf & DSF_DELETED) || !(ds->ds_is_installed))) { - dk->dk_kevent.flags |= EV_DELETE; // already deleted - dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED); - } - if (dk->dk_kevent.filter == DISPATCH_EVFILT_TIMER) { - ds->ds_dkev = NULL; - if (ds->ds_is_installed) { - _dispatch_timers_unregister(ds, dk); + if (dr->du_is_timer) { + // Because of the optimization to unregister fired oneshot timers + // from the target queue, we can't trust _dispatch_unote_registered() + // to tell the truth, it may not have happened yet + if (dqf & DSF_ARMED) { + _dispatch_timers_unregister(ds->ds_timer_refs); } - } else if (!ds->ds_is_direct_kevent) { - ds->ds_dkev = NULL; - dispatch_assert((bool)ds->ds_is_installed); - TAILQ_REMOVE(&dk->dk_sources, ds->ds_refs, dr_list); - _dispatch_kevent_unregister(dk, flags, 0); + dr->du_ident = DISPATCH_TIMER_IDENT_CANCELED; } else { - unsigned int dkev_dispose_options = 0; - if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) { - dkev_dispose_options |= DKEV_DISPOSE_IMMEDIATE_DELETE; - } else if (dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE) { - if (!ds->ds_is_direct_kevent) { - dkev_dispose_options |= DKEV_DISPOSE_IMMEDIATE_DELETE; - } + if (_dispatch_unote_needs_rearm(dr) && !(dqf & DSF_ARMED)) { + options |= DU_UNREGISTER_IMMEDIATE_DELETE; } - long r = _dispatch_kevent_unregister(dk, flags, dkev_dispose_options); - if (r == EINPROGRESS) { + if (!_dispatch_unote_unregister(dr, options)) { _dispatch_debug("kevent-source[%p]: deferred delete kevent[%p]", - ds, dk); + ds, dr); _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_DEFERRED_DELETE); return; // deferred unregistration -#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS - } else if (r == ENOENT) { - _dispatch_debug("kevent-source[%p]: ENOENT delete kevent[%p]", - ds, dk); - _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_DEFERRED_DELETE); - return; // potential concurrent EV_DELETE delivery rdar://22047283 -#endif - } else { - dispatch_assume_zero(r); } - ds->ds_dkev = NULL; - _TAILQ_TRASH_ENTRY(ds->ds_refs, dr_list); } -done: + dqf = _dispatch_queue_atomic_flags_set_and_clear_orig(ds->_as_dq, DSF_DELETED, DSF_ARMED | DSF_DEFERRED_DELETE | DSF_CANCEL_WAITER); if (dqf & DSF_CANCEL_WAITER) { _dispatch_wake_by_address(&ds->dq_atomic_flags); } ds->ds_is_installed = true; - ds->ds_needs_rearm = false; // re-arm is pointless and bad now - _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dk); + _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dr); _dispatch_release(ds); // the retain is done at creation time } DISPATCH_ALWAYS_INLINE -static bool +static inline bool _dispatch_source_tryarm(dispatch_source_t ds) { dispatch_queue_flags_t oqf, nqf; @@ -680,58 +573,52 @@ _dispatch_source_tryarm(dispatch_source_t ds) }); } -static bool -_dispatch_source_kevent_resume(dispatch_source_t ds, uint32_t new_flags) +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_source_refs_resume(dispatch_source_t ds) { - switch (ds->ds_dkev->dk_kevent.filter) { - case DISPATCH_EVFILT_TIMER: - _dispatch_timers_update(ds); - _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, - ds->ds_dkev); + dispatch_source_refs_t dr = ds->ds_refs; + if (dr->du_is_timer) { + _dispatch_timers_update(dr); return true; -#if HAVE_MACH - case EVFILT_MACHPORT: - if ((ds->ds_pending_data_mask & DISPATCH_MACH_RECV_MESSAGE) && - !ds->ds_is_direct_kevent) { - new_flags |= DISPATCH_MACH_RECV_MESSAGE; // emulate EV_DISPATCH - } - break; -#endif } if (unlikely(!_dispatch_source_tryarm(ds))) { return false; } - if (unlikely(_dispatch_kevent_resume(ds->ds_dkev, new_flags, 0))) { - _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, DSF_DELETED, - DSF_ARMED); - return false; - } - _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev); + _dispatch_unote_resume(dr); + _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, dr); return true; } -static void -_dispatch_source_kevent_register(dispatch_source_t ds, pthread_priority_t pp) +void +_dispatch_source_refs_register(dispatch_source_t ds, dispatch_priority_t pri) { - dispatch_assert_zero((bool)ds->ds_is_installed); - switch (ds->ds_dkev->dk_kevent.filter) { - case DISPATCH_EVFILT_TIMER: - _dispatch_timers_update(ds); - _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev); + dispatch_source_refs_t dr = ds->ds_refs; + + dispatch_assert(!ds->ds_is_installed); + + if (dr->du_is_timer) { + dispatch_priority_t kbp = _dispatch_source_compute_kevent_priority(ds); + // aggressively coalesce background/maintenance QoS timers + // + if (_dispatch_qos_is_background(_dispatch_priority_qos(kbp))) { + if (dr->du_fflags & DISPATCH_TIMER_STRICT) { + _dispatch_ktrace1(DISPATCH_PERF_strict_bg_timer, ds); + } else { + dr->du_fflags |= DISPATCH_TIMER_BACKGROUND; + dr->du_ident = _dispatch_source_timer_idx(dr); + } + } + _dispatch_timers_update(dr); return; } - uint32_t flags; - bool do_resume = _dispatch_kevent_register(&ds->ds_dkev, pp, &flags); - TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, ds->ds_refs, dr_list); - ds->ds_is_installed = true; - if (do_resume || ds->ds_needs_rearm) { - if (unlikely(!_dispatch_source_kevent_resume(ds, flags))) { - _dispatch_source_kevent_unregister(ds); - } + + if (unlikely(!_dispatch_source_tryarm(ds) || + !_dispatch_unote_register(dr, ds->dq_wlh, pri))) { + _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, DSF_DELETED, + DSF_ARMED | DSF_DEFERRED_DELETE); } else { - _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); + _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, dr); } _dispatch_object_debug(ds, "%s", __func__); } @@ -747,19 +634,19 @@ _dispatch_source_set_event_handler_context(void *ctxt) } } -static pthread_priority_t +dispatch_priority_t _dispatch_source_compute_kevent_priority(dispatch_source_t ds) { - pthread_priority_t p = ds->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + dispatch_priority_t p = ds->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK; dispatch_queue_t tq = ds->do_targetq; - pthread_priority_t tqp = tq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + dispatch_priority_t tqp = tq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK; - while (unlikely(tq->do_targetq)) { + while (unlikely(!dx_hastypeflag(tq, QUEUE_ROOT))) { if (unlikely(tq == &_dispatch_mgr_q)) { - return _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + return DISPATCH_PRIORITY_FLAG_MANAGER; } if (unlikely(_dispatch_queue_is_thread_bound(tq))) { - // thread bound hierarchies are weird, we need to install + // thread-bound hierarchies are weird, we need to install // from the context of the thread this hierarchy is bound to return 0; } @@ -769,18 +656,18 @@ _dispatch_source_compute_kevent_priority(dispatch_source_t ds) _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, ds); return 0; } - if (unlikely(!_dispatch_queue_has_immutable_target(tq))) { + if (unlikely(_dispatch_queue_is_legacy(tq))) { if (!_dispatch_is_in_root_queues_array(tq->do_targetq)) { // we're not allowed to dereference tq->do_targetq _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, ds); return 0; } } - if (!(tq->dq_priority & _PTHREAD_PRIORITY_INHERIT_FLAG)) { + if (!(tq->dq_priority & DISPATCH_PRIORITY_FLAG_INHERIT)) { if (p < tqp) p = tqp; } tq = tq->do_targetq; - tqp = tq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + tqp = tq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK; } if (unlikely(!tqp)) { @@ -790,22 +677,34 @@ _dispatch_source_compute_kevent_priority(dispatch_source_t ds) return _dispatch_priority_inherit_from_root_queue(p, tq); } +static void +_dispatch_source_install(dispatch_source_t ds, dispatch_priority_t pri, + dispatch_wlh_t wlh) +{ + if (!ds->dq_wlh && wlh) { + _dispatch_queue_class_record_wlh_hierarchy(ds, wlh); + } + _dispatch_source_refs_register(ds, pri); + ds->ds_is_installed = true; +} + void _dispatch_source_finalize_activation(dispatch_source_t ds) { dispatch_continuation_t dc; + dispatch_source_refs_t dr = ds->ds_refs; - if (unlikely(ds->ds_is_direct_kevent && + if (unlikely(dr->du_is_direct && (_dispatch_queue_atomic_flags(ds->_as_dq) & DSF_CANCELED))) { - return _dispatch_source_kevent_unregister(ds); + return _dispatch_source_refs_unregister(ds, 0); } - dc = _dispatch_source_get_event_handler(ds->ds_refs); + dc = _dispatch_source_get_event_handler(dr); if (dc) { if (_dispatch_object_is_barrier(dc)) { _dispatch_queue_atomic_flags_set(ds->_as_dq, DQF_BARRIER_BIT); } - ds->dq_priority = dc->dc_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + ds->dq_priority = _dispatch_priority_from_pp_strip_flags(dc->dc_priority); if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { _dispatch_barrier_async_detached_f(ds->_as_dq, ds, _dispatch_source_set_event_handler_context); @@ -815,26 +714,34 @@ _dispatch_source_finalize_activation(dispatch_source_t ds) // call "super" _dispatch_queue_finalize_activation(ds->_as_dq); - if (ds->ds_is_direct_kevent && !ds->ds_is_installed) { - pthread_priority_t pp = _dispatch_source_compute_kevent_priority(ds); - if (pp) _dispatch_source_kevent_register(ds, pp); + if (dr->du_is_direct && !ds->ds_is_installed) { + dispatch_priority_t pri = _dispatch_source_compute_kevent_priority(ds); + if (pri) { + dispatch_wlh_t wlh = ds->dq_wlh; + if (!wlh) wlh = _dispatch_queue_class_compute_wlh(ds); + _dispatch_source_install(ds, pri, wlh); + } } } DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t -_dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, - uint64_t *owned, struct dispatch_object_s **dc_ptr DISPATCH_UNUSED) +static inline dispatch_queue_wakeup_target_t +_dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned) { dispatch_source_t ds = dou._ds; - dispatch_queue_t retq = NULL; + dispatch_queue_wakeup_target_t retq = DISPATCH_QUEUE_WAKEUP_NONE; dispatch_queue_t dq = _dispatch_queue_get_current(); + flags |= DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS; + if (_dispatch_queue_class_probe(ds)) { // Intentionally always drain even when on the manager queue // and not the source's regular target queue: we need to be able // to drain timer setting and the like there. - retq = _dispatch_queue_serial_drain(ds->_as_dq, flags, owned, NULL); + dispatch_with_disabled_narrowing(dic, { + retq = _dispatch_queue_serial_drain(ds->_as_dq, dic, flags, owned); + }); } // This function performs all source actions. Each action is responsible @@ -846,17 +753,32 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, dispatch_source_refs_t dr = ds->ds_refs; dispatch_queue_t dkq = &_dispatch_mgr_q; + dispatch_queue_flags_t dqf; + bool prevent_starvation = false; - if (ds->ds_is_direct_kevent) { + if (dr->du_is_direct) { dkq = ds->do_targetq; } + if (dr->du_is_timer && + os_atomic_load2o(ds, ds_timer_refs->dt_pending_config, relaxed)) { + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if (!(dqf & (DSF_CANCELED | DQF_RELEASED))) { + // timer has to be configured on the kevent queue + if (dq != dkq) { + return dkq; + } + _dispatch_source_timer_configure(ds); + } + } + if (!ds->ds_is_installed) { // The source needs to be installed on the kevent queue. if (dq != dkq) { return dkq; } - _dispatch_source_kevent_register(ds, _dispatch_get_defaultpriority()); + _dispatch_source_install(ds, _dispatch_get_basepri(), + _dispatch_get_wlh()); } if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(ds))) { @@ -874,22 +796,20 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, _dispatch_source_registration_callout(ds, dq, flags); } - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); - bool prevent_starvation = false; - - if ((dqf & DSF_DEFERRED_DELETE) && - ((dqf & DSF_DELETED) || !(dqf & DSF_ARMED))) { + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if ((dqf & DSF_DEFERRED_DELETE) && !(dqf & DSF_ARMED)) { unregister_event: // DSF_DELETE: Pending source kevent unregistration has been completed // !DSF_ARMED: event was delivered and can safely be unregistered if (dq != dkq) { return dkq; } - _dispatch_source_kevent_unregister(ds); + _dispatch_source_refs_unregister(ds, DU_UNREGISTER_IMMEDIATE_DELETE); dqf = _dispatch_queue_atomic_flags(ds->_as_dq); } - if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && ds->ds_pending_data) { + if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && + os_atomic_load2o(ds, ds_pending_data, relaxed)) { // The source has pending data to deliver via the event handler callback // on the target queue. Some sources need to be rearmed on the kevent // queue after event delivery. @@ -901,12 +821,13 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, // re-queue to give other things already queued on the target queue // a chance to run. // - // however, if the source is directly targetting an overcommit root + // however, if the source is directly targeting an overcommit root // queue, this would requeue the source and ask for a new overcommit // thread right away. prevent_starvation = dq->do_targetq || - !(dq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); - if (prevent_starvation && ds->ds_pending_data) { + !(dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + if (prevent_starvation && + os_atomic_load2o(ds, ds_pending_data, relaxed)) { retq = ds->do_targetq; } } else { @@ -921,17 +842,21 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, // kevent queue. After uninstallation, the cancellation handler needs // to be delivered to the target queue. if (!(dqf & DSF_DELETED)) { - if (dq != dkq) { + if (dr->du_is_timer && !(dqf & DSF_ARMED)) { + // timers can cheat if not armed because there's nothing left + // to do on the manager queue and unregistration can happen + // on the regular target queue + } else if (dq != dkq) { return dkq; } - _dispatch_source_kevent_unregister(ds); + _dispatch_source_refs_unregister(ds, 0); dqf = _dispatch_queue_atomic_flags(ds->_as_dq); if (unlikely(dqf & DSF_DEFERRED_DELETE)) { if (!(dqf & DSF_ARMED)) { goto unregister_event; } // we need to wait for the EV_DELETE - return retq; + return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; } } if (dq != ds->do_targetq && (_dispatch_source_get_event_handler(dr) || @@ -945,7 +870,8 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, prevent_starvation = false; } - if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) { + if (_dispatch_unote_needs_rearm(dr) && + !(dqf & (DSF_ARMED|DSF_DELETED|DSF_CANCELED|DQF_RELEASED))) { // The source needs to be rearmed on the kevent queue. if (dq != dkq) { return dkq; @@ -954,20 +880,29 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, // no need for resume when we can directly unregister the kevent goto unregister_event; } - if (prevent_starvation) { + if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(ds))) { + // do not try to rearm the kevent if the source is suspended + // from the source handler + return ds->do_targetq; + } + if (prevent_starvation && dr->du_wlh == DISPATCH_WLH_GLOBAL) { // keep the old behavior to force re-enqueue to our target queue - // for the rearm. It is inefficient though and we should - // improve this . + // for the rearm. // // if the handler didn't run, or this is a pending delete // or our target queue is a global queue, then starvation is // not a concern and we can rearm right away. return ds->do_targetq; } - if (unlikely(!_dispatch_source_kevent_resume(ds, 0))) { - dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if (unlikely(!_dispatch_source_refs_resume(ds))) { goto unregister_event; } + if (!prevent_starvation && dr->du_wlh != DISPATCH_WLH_GLOBAL) { + // try to redrive the drain from under the lock for sources + // targeting an overcommit root queue to avoid parking + // when the next event has already fired + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + } } return retq; @@ -975,13 +910,14 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, DISPATCH_NOINLINE void -_dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_flags_t flags) +_dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags) { - _dispatch_queue_class_invoke(ds->_as_dq, flags, _dispatch_source_invoke2); + _dispatch_queue_class_invoke(ds, dic, flags, _dispatch_source_invoke2); } void -_dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp, +_dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos, dispatch_wakeup_flags_t flags) { // This function determines whether the source needs to be invoked. @@ -993,21 +929,26 @@ _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp, dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); bool deferred_delete = (dqf & DSF_DEFERRED_DELETE); - if (ds->ds_is_direct_kevent) { + if (dr->du_is_direct) { dkq = DISPATCH_QUEUE_WAKEUP_TARGET; } - if (!ds->ds_is_installed) { + if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && dr->du_is_timer && + os_atomic_load2o(ds, ds_timer_refs->dt_pending_config, relaxed)) { + // timer has to be configured on the kevent queue + tq = dkq; + } else if (!ds->ds_is_installed) { // The source needs to be installed on the kevent queue. tq = dkq; } else if (_dispatch_source_get_registration_handler(dr)) { // The registration handler needs to be delivered to the target queue. tq = DISPATCH_QUEUE_WAKEUP_TARGET; - } else if (deferred_delete && ((dqf & DSF_DELETED) || !(dqf & DSF_ARMED))) { + } else if (deferred_delete && !(dqf & DSF_ARMED)) { // Pending source kevent unregistration has been completed // or EV_ONESHOT event can be acknowledged tq = dkq; - } else if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && ds->ds_pending_data) { + } else if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && + os_atomic_load2o(ds, ds_pending_data, relaxed)) { // The source has pending data to deliver to the target queue. tq = DISPATCH_QUEUE_WAKEUP_TARGET; } else if ((dqf & (DSF_CANCELED | DQF_RELEASED)) && !deferred_delete) { @@ -1015,13 +956,21 @@ _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp, // cancellation handler needs to be delivered to the target queue. // Note: cancellation assumes installation. if (!(dqf & DSF_DELETED)) { - tq = dkq; + if (dr->du_is_timer && !(dqf & DSF_ARMED)) { + // timers can cheat if not armed because there's nothing left + // to do on the manager queue and unregistration can happen + // on the regular target queue + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } else { + tq = dkq; + } } else if (_dispatch_source_get_event_handler(dr) || _dispatch_source_get_cancel_handler(dr) || _dispatch_source_get_registration_handler(dr)) { tq = DISPATCH_QUEUE_WAKEUP_TARGET; } - } else if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) { + } else if (_dispatch_unote_needs_rearm(dr) && + !(dqf & (DSF_ARMED|DSF_DELETED|DSF_CANCELED|DQF_RELEASED))) { // The source needs to be rearmed on the kevent queue. tq = dkq; } @@ -1030,9 +979,9 @@ _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp, } if (tq) { - return _dispatch_queue_class_wakeup(ds->_as_dq, pp, flags, tq); - } else if (pp) { - return _dispatch_queue_class_override_drainer(ds->_as_dq, pp, flags); + return _dispatch_queue_class_wakeup(ds->_as_dq, qos, flags, tq); + } else if (qos) { + return _dispatch_queue_class_override_drainer(ds->_as_dq, qos, flags); } else if (flags & DISPATCH_WAKEUP_CONSUME) { return _dispatch_release_tailcall(ds); } @@ -1060,9 +1009,9 @@ void dispatch_source_cancel_and_wait(dispatch_source_t ds) { dispatch_queue_flags_t old_dqf, dqf, new_dqf; - pthread_priority_t pp; + dispatch_source_refs_t dr = ds->ds_refs; - if (unlikely(_dispatch_source_get_cancel_handler(ds->ds_refs))) { + if (unlikely(_dispatch_source_get_cancel_handler(dr))) { DISPATCH_CLIENT_CRASH(ds, "Source has a cancel handler"); } @@ -1074,7 +1023,7 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) } if ((old_dqf & DSF_STATE_MASK) == DSF_DELETED) { // just add DSF_CANCELED - } else if ((old_dqf & DSF_DEFERRED_DELETE) || !ds->ds_is_direct_kevent){ + } else if ((old_dqf & DSF_DEFERRED_DELETE) || !dr->du_is_direct) { new_dqf |= DSF_CANCEL_WAITER; } }); @@ -1126,7 +1075,7 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) // same thing _dispatch_source_invoke2() does when handling cancellation dqf = _dispatch_queue_atomic_flags(ds->_as_dq); if (!(dqf & (DSF_DEFERRED_DELETE | DSF_DELETED))) { - _dispatch_source_kevent_unregister(ds); + _dispatch_source_refs_unregister(ds, 0); dqf = _dispatch_queue_atomic_flags(ds->_as_dq); if (likely((dqf & DSF_STATE_MASK) == DSF_DELETED)) { _dispatch_source_cancel_callout(ds, NULL, DISPATCH_INVOKE_NONE); @@ -1137,16 +1086,17 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) DISPATCH_CLIENT_CRASH(ds, "dispatch_source_cancel_and_wait " "called from a source handler"); } else { + dispatch_qos_t qos; override: - pp = _dispatch_get_priority() & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - if (pp) dx_wakeup(ds, pp, DISPATCH_WAKEUP_OVERRIDING); + qos = _dispatch_qos_from_pp(_dispatch_get_priority()); + dx_wakeup(ds, qos, DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_FLUSH); dispatch_activate(ds); } dqf = _dispatch_queue_atomic_flags(ds->_as_dq); while (unlikely((dqf & DSF_STATE_MASK) != DSF_DELETED)) { if (unlikely(!(dqf & DSF_CANCEL_WAITER))) { - if (!os_atomic_cmpxchgvw2o(ds, dq_atomic_flags, + if (!os_atomic_cmpxchgv2o(ds, dq_atomic_flags, dqf, dqf | DSF_CANCEL_WAITER, &dqf, relaxed)) { continue; } @@ -1157,46 +1107,44 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) } } -static void -_dispatch_source_merge_kevent(dispatch_source_t ds, - const _dispatch_kevent_qos_s *ke) +void +_dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, uintptr_t data, + uintptr_t status, pthread_priority_t pp) { - _dispatch_object_debug(ds, "%s", __func__); - dispatch_wakeup_flags_t flags = 0; + dispatch_source_refs_t dr = du._dr; + dispatch_source_t ds = _dispatch_source_from_refs(dr); + dispatch_wakeup_flags_t wflags = 0; dispatch_queue_flags_t dqf; - pthread_priority_t pp = 0; - if (ds->ds_needs_rearm || (ke->flags & (EV_DELETE | EV_ONESHOT))) { + if (_dispatch_unote_needs_rearm(dr) || (flags & (EV_DELETE | EV_ONESHOT))) { // once we modify the queue atomic flags below, it will allow concurrent // threads running _dispatch_source_invoke2 to dispose of the source, - // so we can't safely borrow the reference we get from the knote udata + // so we can't safely borrow the reference we get from the muxnote udata // anymore, and need our own - flags = DISPATCH_WAKEUP_CONSUME; + wflags = DISPATCH_WAKEUP_CONSUME; _dispatch_retain(ds); // rdar://20382435 } - if ((ke->flags & EV_UDATA_SPECIFIC) && (ke->flags & EV_ONESHOT) && - !(ke->flags & EV_DELETE)) { + if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) && + !(flags & EV_DELETE)) { dqf = _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, DSF_DEFERRED_DELETE, DSF_ARMED); - if (ke->flags & EV_VANISHED) { - _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter), + if (flags & EV_VANISHED) { + _dispatch_bug_kevent_client("kevent", dr->du_type->dst_kind, "monitored resource vanished before the source " "cancel handler was invoked", 0); } _dispatch_debug("kevent-source[%p]: %s kevent[%p]", ds, - (ke->flags & EV_VANISHED) ? "vanished" : - "deferred delete oneshot", (void*)ke->udata); - } else if ((ke->flags & EV_DELETE) || (ke->flags & EV_ONESHOT)) { - dqf = _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, - DSF_DELETED, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: delete kevent[%p]", - ds, (void*)ke->udata); - if (ke->flags & EV_DELETE) goto done; - } else if (ds->ds_needs_rearm) { + (flags & EV_VANISHED) ? "vanished" : + "deferred delete oneshot", dr); + } else if (flags & (EV_DELETE | EV_ONESHOT)) { + _dispatch_source_refs_unregister(ds, DU_UNREGISTER_ALREADY_DELETED); + _dispatch_debug("kevent-source[%p]: deleted kevent[%p]", ds, dr); + if (flags & EV_DELETE) goto done; + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + } else if (_dispatch_unote_needs_rearm(dr)) { dqf = _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: disarmed kevent[%p] ", - ds, (void*)ke->udata); + _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dr); } else { dqf = _dispatch_queue_atomic_flags(ds->_as_dq); } @@ -1204,16 +1152,10 @@ _dispatch_source_merge_kevent(dispatch_source_t ds, if (dqf & (DSF_CANCELED | DQF_RELEASED)) { goto done; // rdar://20204025 } -#if HAVE_MACH - if (ke->filter == EVFILT_MACHPORT && - dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE) { - DISPATCH_INTERNAL_CRASH(ke->flags,"Unexpected kevent for mach channel"); - } -#endif - unsigned long data; - if ((ke->flags & EV_UDATA_SPECIFIC) && (ke->flags & EV_ONESHOT) && - (ke->flags & EV_VANISHED)) { + dispatch_unote_action_t action = dr->du_data_action; + if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) && + (flags & EV_VANISHED)) { // if the resource behind the ident vanished, the event handler can't // do anything useful anymore, so do not try to call it at all // @@ -1223,1199 +1165,1055 @@ _dispatch_source_merge_kevent(dispatch_source_t ds, // if we get both bits it was a real EV_VANISHED delivery os_atomic_store2o(ds, ds_pending_data, 0, relaxed); #if HAVE_MACH - } else if (ke->filter == EVFILT_MACHPORT) { - data = DISPATCH_MACH_RECV_MESSAGE; + } else if (dr->du_filter == EVFILT_MACHPORT) { os_atomic_store2o(ds, ds_pending_data, data, relaxed); #endif - } else if (ds->ds_is_level) { - // ke->data is signed and "negative available data" makes no sense - // zero bytes happens when EV_EOF is set - dispatch_assert(ke->data >= 0l); - data = ~(unsigned long)ke->data; + } else if (action == DISPATCH_UNOTE_ACTION_DATA_SET) { os_atomic_store2o(ds, ds_pending_data, data, relaxed); - } else if (ds->ds_is_adder) { - data = (unsigned long)ke->data; + } else if (action == DISPATCH_UNOTE_ACTION_DATA_ADD) { os_atomic_add2o(ds, ds_pending_data, data, relaxed); - } else if (ke->fflags & ds->ds_pending_data_mask) { - data = ke->fflags & ds->ds_pending_data_mask; + } else if (data && action == DISPATCH_UNOTE_ACTION_DATA_OR) { os_atomic_or2o(ds, ds_pending_data, data, relaxed); + } else if (data && action == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET) { + // We combine the data and status into a single 64-bit value. + uint64_t odata, ndata; + uint64_t value = DISPATCH_SOURCE_COMBINE_DATA_AND_STATUS(data, status); + os_atomic_rmw_loop2o(ds, ds_pending_data, odata, ndata, relaxed, { + ndata = DISPATCH_SOURCE_GET_DATA(odata) | value; + }); + } else if (data) { + DISPATCH_INTERNAL_CRASH(action, "Unexpected source action value"); } + _dispatch_debug("kevent-source[%p]: merged kevent[%p]", ds, dr); done: -#if DISPATCH_USE_KEVENT_QOS - pp = ((pthread_priority_t)ke->qos) & ~_PTHREAD_PRIORITY_FLAGS_MASK; -#endif - dx_wakeup(ds, pp, flags | DISPATCH_WAKEUP_FLUSH); + _dispatch_object_debug(ds, "%s", __func__); + dx_wakeup(ds, _dispatch_qos_from_pp(pp), wflags | DISPATCH_WAKEUP_FLUSH); } #pragma mark - -#pragma mark dispatch_kevent_t +#pragma mark dispatch_source_timer + +#if DISPATCH_USE_DTRACE +static dispatch_timer_source_refs_t + _dispatch_trace_next_timer[DISPATCH_TIMER_QOS_COUNT]; +#define _dispatch_trace_next_timer_set(x, q) \ + _dispatch_trace_next_timer[(q)] = (x) +#define _dispatch_trace_next_timer_program(d, q) \ + _dispatch_trace_timer_program(_dispatch_trace_next_timer[(q)], (d)) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mgr_trace_timers_wakes(void) +{ + uint32_t qos; -#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD -static void _dispatch_kevent_guard(dispatch_kevent_t dk); -static void _dispatch_kevent_unguard(dispatch_kevent_t dk); + if (_dispatch_timers_will_wake) { + if (slowpath(DISPATCH_TIMER_WAKE_ENABLED())) { + for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) { + if (_dispatch_timers_will_wake & (1 << qos)) { + _dispatch_trace_timer_wake(_dispatch_trace_next_timer[qos]); + } + } + } + _dispatch_timers_will_wake = 0; + } +} #else -static inline void _dispatch_kevent_guard(dispatch_kevent_t dk) { (void)dk; } -static inline void _dispatch_kevent_unguard(dispatch_kevent_t dk) { (void)dk; } +#define _dispatch_trace_next_timer_set(x, q) +#define _dispatch_trace_next_timer_program(d, q) +#define _dispatch_mgr_trace_timers_wakes() #endif -#if !DISPATCH_USE_EV_UDATA_SPECIFIC -static struct dispatch_kevent_s _dispatch_kevent_data_or = { - .dk_kevent = { - .filter = DISPATCH_EVFILT_CUSTOM_OR, - .flags = EV_CLEAR, - }, - .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_or.dk_sources), -}; -static struct dispatch_kevent_s _dispatch_kevent_data_add = { - .dk_kevent = { - .filter = DISPATCH_EVFILT_CUSTOM_ADD, - }, - .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_add.dk_sources), -}; -#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC - -#define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1)) - -DISPATCH_CACHELINE_ALIGN -static TAILQ_HEAD(, dispatch_kevent_s) _dispatch_sources[DSL_HASH_SIZE]; +#define _dispatch_source_timer_telemetry_enabled() false +DISPATCH_NOINLINE static void -_dispatch_kevent_init() +_dispatch_source_timer_telemetry_slow(dispatch_source_t ds, + dispatch_clock_t clock, struct dispatch_timer_source_s *values) { - unsigned int i; - for (i = 0; i < DSL_HASH_SIZE; i++) { - TAILQ_INIT(&_dispatch_sources[i]); - } - -#if !DISPATCH_USE_EV_UDATA_SPECIFIC - TAILQ_INSERT_TAIL(&_dispatch_sources[0], - &_dispatch_kevent_data_or, dk_list); - TAILQ_INSERT_TAIL(&_dispatch_sources[0], - &_dispatch_kevent_data_add, dk_list); - _dispatch_kevent_data_or.dk_kevent.udata = - (_dispatch_kevent_qos_udata_t)&_dispatch_kevent_data_or; - _dispatch_kevent_data_add.dk_kevent.udata = - (_dispatch_kevent_qos_udata_t)&_dispatch_kevent_data_add; -#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC + if (_dispatch_trace_timer_configure_enabled()) { + _dispatch_trace_timer_configure(ds, clock, values); + } } -static inline uintptr_t -_dispatch_kevent_hash(uint64_t ident, short filter) -{ - uint64_t value; -#if HAVE_MACH - value = (filter == EVFILT_MACHPORT || - filter == DISPATCH_EVFILT_MACH_NOTIFICATION ? - MACH_PORT_INDEX(ident) : ident); -#else - value = ident; - (void)filter; -#endif - return DSL_HASH((uintptr_t)value); -} - -static dispatch_kevent_t -_dispatch_kevent_find(uint64_t ident, short filter) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_source_timer_telemetry(dispatch_source_t ds, dispatch_clock_t clock, + struct dispatch_timer_source_s *values) { - uintptr_t hash = _dispatch_kevent_hash(ident, filter); - dispatch_kevent_t dki; - - TAILQ_FOREACH(dki, &_dispatch_sources[hash], dk_list) { - if (dki->dk_kevent.ident == ident && dki->dk_kevent.filter == filter) { - break; - } + if (_dispatch_trace_timer_configure_enabled() || + _dispatch_source_timer_telemetry_enabled()) { + _dispatch_source_timer_telemetry_slow(ds, clock, values); + asm(""); // prevent tailcall } - return dki; } +DISPATCH_NOINLINE static void -_dispatch_kevent_insert(dispatch_kevent_t dk) +_dispatch_source_timer_configure(dispatch_source_t ds) { - if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) return; - _dispatch_kevent_guard(dk); - uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident, - dk->dk_kevent.filter); - TAILQ_INSERT_TAIL(&_dispatch_sources[hash], dk, dk_list); -} + dispatch_timer_source_refs_t dt = ds->ds_timer_refs; + dispatch_timer_config_t dtc; -// Find existing kevents, and merge any new flags if necessary -static bool -_dispatch_kevent_register(dispatch_kevent_t *dkp, pthread_priority_t pp, - uint32_t *flgp) -{ - dispatch_kevent_t dk = NULL, ds_dkev = *dkp; - uint32_t new_flags; - bool do_resume = false; - - if (!(ds_dkev->dk_kevent.flags & EV_UDATA_SPECIFIC)) { - dk = _dispatch_kevent_find(ds_dkev->dk_kevent.ident, - ds_dkev->dk_kevent.filter); - } - if (dk) { - // If an existing dispatch kevent is found, check to see if new flags - // need to be added to the existing kevent - new_flags = ~dk->dk_kevent.fflags & ds_dkev->dk_kevent.fflags; - dk->dk_kevent.fflags |= ds_dkev->dk_kevent.fflags; - free(ds_dkev); - *dkp = dk; - do_resume = new_flags; + dtc = os_atomic_xchg2o(dt, dt_pending_config, NULL, dependency); + if (dtc->dtc_clock == DISPATCH_CLOCK_MACH) { + dt->du_fflags |= DISPATCH_TIMER_CLOCK_MACH; } else { - dk = ds_dkev; -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (!_dispatch_kevent_workqueue_enabled) { - // do nothing - } else if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { - dk->dk_kevent.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; - } else { - pp &= (~_PTHREAD_PRIORITY_FLAGS_MASK | - _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); - if (!pp) pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; - _dispatch_assert_is_valid_qos_class(pp); - dk->dk_kevent.qos = (_dispatch_kevent_priority_t)pp; - } -#else - (void)pp; -#endif - _dispatch_kevent_insert(dk); - new_flags = dk->dk_kevent.fflags; - do_resume = true; + dt->du_fflags &= ~(uint32_t)DISPATCH_TIMER_CLOCK_MACH; } - // Re-register the kevent with the kernel if new flags were added - // by the dispatch kevent - if (do_resume) { - dk->dk_kevent.flags |= EV_ADD; + dt->dt_timer = dtc->dtc_timer; + free(dtc); + if (ds->ds_is_installed) { + // Clear any pending data that might have accumulated on + // older timer params + os_atomic_store2o(ds, ds_pending_data, 0, relaxed); + _dispatch_timers_update(dt); } - *flgp = new_flags; - return do_resume; } -static long -_dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, - uint32_t del_flags) +static dispatch_timer_config_t +_dispatch_source_timer_config_create(dispatch_time_t start, + uint64_t interval, uint64_t leeway) { - long r; - bool oneshot; - if (dk->dk_kevent.flags & EV_DELETE) { - return 0; - } - switch (dk->dk_kevent.filter) { - case DISPATCH_EVFILT_TIMER: - case DISPATCH_EVFILT_CUSTOM_ADD: - case DISPATCH_EVFILT_CUSTOM_OR: - // these types not registered with kevent - return 0; -#if HAVE_MACH - case DISPATCH_EVFILT_MACH_NOTIFICATION: - return _dispatch_kevent_mach_notify_resume(dk, new_flags, del_flags); -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK - case EVFILT_MACHPORT: - if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { - return _dispatch_kevent_machport_resume(dk, new_flags, del_flags); + dispatch_timer_config_t dtc; + dtc = _dispatch_calloc(1ul, sizeof(struct dispatch_timer_config_s)); + if (unlikely(interval == 0)) { + if (start != DISPATCH_TIME_FOREVER) { + _dispatch_bug_deprecated("Setting timer interval to 0 requests " + "a 1ns timer, did you mean FOREVER (a one-shot timer)?"); } - // fall through -#endif -#endif // HAVE_MACH - default: - // oneshot dk may be freed by the time we return from - // _dispatch_kq_immediate_update if the event was delivered (and then - // unregistered) concurrently. - oneshot = (dk->dk_kevent.flags & EV_ONESHOT); - r = _dispatch_kq_immediate_update(&dk->dk_kevent); - if (r && (dk->dk_kevent.flags & EV_ADD) && - (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { - dk->dk_kevent.flags |= EV_DELETE; - dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED); - } else if (!oneshot && (dk->dk_kevent.flags & EV_DISPATCH)) { - // we can safely skip doing this for ONESHOT events because - // the next kq update we will do is _dispatch_kevent_dispose() - // which also clears EV_ADD. - dk->dk_kevent.flags &= ~(EV_ADD|EV_VANISHED); - } - return r; + interval = 1; + } else if ((int64_t)interval < 0) { + // 6866347 - make sure nanoseconds won't overflow + interval = INT64_MAX; + } + if ((int64_t)leeway < 0) { + leeway = INT64_MAX; + } + if (start == DISPATCH_TIME_NOW) { + start = _dispatch_absolute_time(); + } else if (start == DISPATCH_TIME_FOREVER) { + start = INT64_MAX; } - (void)new_flags; (void)del_flags; -} -static long -_dispatch_kevent_dispose(dispatch_kevent_t dk, unsigned int options) -{ - long r = 0; - switch (dk->dk_kevent.filter) { - case DISPATCH_EVFILT_TIMER: - case DISPATCH_EVFILT_CUSTOM_ADD: - case DISPATCH_EVFILT_CUSTOM_OR: - if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) { - free(dk); - } else { - // these sources live on statically allocated lists + if ((int64_t)start < 0) { + // wall clock + start = (dispatch_time_t)-((int64_t)start); + dtc->dtc_clock = DISPATCH_CLOCK_WALL; + } else { + // absolute clock + interval = _dispatch_time_nano2mach(interval); + if (interval < 1) { + // rdar://problem/7287561 interval must be at least one in + // in order to avoid later division by zero when calculating + // the missed interval count. (NOTE: the wall clock's + // interval is already "fixed" to be 1 or more) + interval = 1; } - return r; + leeway = _dispatch_time_nano2mach(leeway); + dtc->dtc_clock = DISPATCH_CLOCK_MACH; } - if (!(dk->dk_kevent.flags & EV_DELETE)) { - dk->dk_kevent.flags |= EV_DELETE; - dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED); - if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { - dk->dk_kevent.flags |= EV_ENABLE; - } - switch (dk->dk_kevent.filter) { -#if HAVE_MACH - case DISPATCH_EVFILT_MACH_NOTIFICATION: - r = _dispatch_kevent_mach_notify_resume(dk, 0,dk->dk_kevent.fflags); - break; -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK - case EVFILT_MACHPORT: - if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { - r = _dispatch_kevent_machport_resume(dk,0,dk->dk_kevent.fflags); - break; - } - // fall through -#endif -#endif - default: - if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { - _dispatch_kq_deferred_update(&dk->dk_kevent); - } else { - r = _dispatch_kq_immediate_update(&dk->dk_kevent); - } - break; - } - if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { - dk->dk_kevent.flags &= ~EV_ENABLE; - } + if (interval < INT64_MAX && leeway > interval / 2) { + leeway = interval / 2; } - if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) { - bool deferred_delete = (r == EINPROGRESS); -#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS - if (r == ENOENT) deferred_delete = true; -#endif - if (deferred_delete) { - // deferred EV_DELETE or concurrent concurrent EV_DELETE delivery - dk->dk_kevent.flags &= ~EV_DELETE; - dk->dk_kevent.flags |= EV_ENABLE; - return r; - } + + dtc->dtc_timer.target = start; + dtc->dtc_timer.interval = interval; + if (start + leeway < INT64_MAX) { + dtc->dtc_timer.deadline = start + leeway; } else { - uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident, - dk->dk_kevent.filter); - TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list); + dtc->dtc_timer.deadline = INT64_MAX; } - _dispatch_kevent_unguard(dk); - free(dk); - return r; + return dtc; } -static long -_dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg, - unsigned int options) +DISPATCH_NOINLINE +void +dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, + uint64_t interval, uint64_t leeway) { - dispatch_source_refs_t dri; - uint32_t del_flags, fflags = 0; - long r = 0; + dispatch_timer_source_refs_t dt = ds->ds_timer_refs; + dispatch_timer_config_t dtc; - if (TAILQ_EMPTY(&dk->dk_sources) || - (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { - r = _dispatch_kevent_dispose(dk, options); - } else { - TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { - dispatch_source_t dsi = _dispatch_source_from_refs(dri); - uint32_t mask = (uint32_t)dsi->ds_pending_data_mask; - fflags |= mask; - } - del_flags = flg & ~fflags; - if (del_flags) { - dk->dk_kevent.flags |= EV_ADD; - dk->dk_kevent.fflags &= ~del_flags; - r = _dispatch_kevent_resume(dk, 0, del_flags); - } + if (unlikely(!dt->du_is_timer || (dt->du_fflags&DISPATCH_TIMER_INTERVAL))) { + DISPATCH_CLIENT_CRASH(ds, "Attempt to set timer on a non-timer source"); } - return r; -} -DISPATCH_NOINLINE -static void -_dispatch_kevent_proc_exit(_dispatch_kevent_qos_s *ke) -{ - // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie - // . As a workaround, we simulate an exit event for - // any EVFILT_PROC with an invalid pid . - _dispatch_kevent_qos_s fake; - fake = *ke; - fake.flags &= ~EV_ERROR; - fake.flags |= EV_ONESHOT; - fake.fflags = NOTE_EXIT; - fake.data = 0; - _dispatch_kevent_debug("synthetic NOTE_EXIT", ke); - _dispatch_kevent_merge(&fake); + dtc = _dispatch_source_timer_config_create(start, interval, leeway); + _dispatch_source_timer_telemetry(ds, dtc->dtc_clock, &dtc->dtc_timer); + dtc = os_atomic_xchg2o(dt, dt_pending_config, dtc, release); + if (dtc) free(dtc); + dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH); } -DISPATCH_NOINLINE static void -_dispatch_kevent_error(_dispatch_kevent_qos_s *ke) +_dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval) { - _dispatch_kevent_qos_s *kev = NULL; - - if (ke->flags & EV_DELETE) { - if (ke->flags & EV_UDATA_SPECIFIC) { - if (ke->data == EINPROGRESS) { - // deferred EV_DELETE - return; - } -#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS - if (ke->data == ENOENT) { - // deferred EV_DELETE - return; - } -#endif - } - // for EV_DELETE if the update was deferred we may have reclaimed - // our dispatch_kevent_t, and it is unsafe to dereference it now. - } else if (ke->udata) { - kev = &((dispatch_kevent_t)ke->udata)->dk_kevent; - ke->flags |= kev->flags; - } - -#if HAVE_MACH - if (ke->filter == EVFILT_MACHPORT && ke->data == ENOTSUP && - (ke->flags & EV_ADD) && _dispatch_evfilt_machport_direct_enabled && - kev && (kev->fflags & MACH_RCV_MSG)) { - DISPATCH_INTERNAL_CRASH(ke->ident, - "Missing EVFILT_MACHPORT support for ports"); - } -#endif +#define NSEC_PER_FRAME (NSEC_PER_SEC/60) +// approx 1 year (60s * 60m * 24h * 365d) +#define FOREVER_NSEC 31536000000000000ull - if (ke->data) { - // log the unexpected error - _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter), - !ke->udata ? NULL : - ke->flags & EV_DELETE ? "delete" : - ke->flags & EV_ADD ? "add" : - ke->flags & EV_ENABLE ? "enable" : "monitor", - (int)ke->data); + dispatch_timer_source_refs_t dr = ds->ds_timer_refs; + const bool animation = dr->du_fflags & DISPATCH_INTERVAL_UI_ANIMATION; + if (fastpath(interval <= (animation ? FOREVER_NSEC/NSEC_PER_FRAME : + FOREVER_NSEC/NSEC_PER_MSEC))) { + interval *= animation ? NSEC_PER_FRAME : NSEC_PER_MSEC; + } else { + interval = FOREVER_NSEC; } + interval = _dispatch_time_nano2mach(interval); + uint64_t target = _dispatch_absolute_time() + interval; + target -= (target % interval); + const uint64_t leeway = animation ? + _dispatch_time_nano2mach(NSEC_PER_FRAME) : interval / 2; + dr->dt_timer.target = target; + dr->dt_timer.deadline = target + leeway; + dr->dt_timer.interval = interval; + _dispatch_source_timer_telemetry(ds, DISPATCH_CLOCK_MACH, &dr->dt_timer); } -static void -_dispatch_kevent_drain(_dispatch_kevent_qos_s *ke) +#pragma mark - +#pragma mark dispatch_after + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_after(dispatch_time_t when, dispatch_queue_t queue, + void *ctxt, void *handler, bool block) { + dispatch_timer_source_refs_t dt; + dispatch_source_t ds; + uint64_t leeway, delta; + + if (when == DISPATCH_TIME_FOREVER) { #if DISPATCH_DEBUG - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger); + DISPATCH_CLIENT_CRASH(0, "dispatch_after called with 'when' == infinity"); #endif - if (ke->filter == EVFILT_USER) { - _dispatch_kevent_mgr_debug(ke); return; } - if (slowpath(ke->flags & EV_ERROR)) { - if (ke->filter == EVFILT_PROC && ke->data == ESRCH) { - _dispatch_debug("kevent[0x%llx]: ESRCH from EVFILT_PROC: " - "generating fake NOTE_EXIT", (unsigned long long)ke->udata); - return _dispatch_kevent_proc_exit(ke); + + delta = _dispatch_timeout(when); + if (delta == 0) { + if (block) { + return dispatch_async(queue, handler); } - _dispatch_debug("kevent[0x%llx]: handling error", - (unsigned long long)ke->udata); - return _dispatch_kevent_error(ke); + return dispatch_async_f(queue, ctxt, handler); } - if (ke->filter == EVFILT_TIMER) { - _dispatch_debug("kevent[0x%llx]: handling timer", - (unsigned long long)ke->udata); - return _dispatch_timers_kevent(ke); + leeway = delta / 10; // + + if (leeway < NSEC_PER_MSEC) leeway = NSEC_PER_MSEC; + if (leeway > 60 * NSEC_PER_SEC) leeway = 60 * NSEC_PER_SEC; + + // this function can and should be optimized to not use a dispatch source + ds = dispatch_source_create(&_dispatch_source_type_after, 0, 0, queue); + dt = ds->ds_timer_refs; + + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + if (block) { + _dispatch_continuation_init(dc, ds, handler, 0, 0, 0); + } else { + _dispatch_continuation_init_f(dc, ds, ctxt, handler, 0, 0, 0); } -#if HAVE_MACH - if (ke->filter == EVFILT_MACHPORT) { - _dispatch_debug("kevent[0x%llx]: handling mach port", - (unsigned long long)ke->udata); - return _dispatch_mach_kevent_merge(ke); + // reference `ds` so that it doesn't show up as a leak + dc->dc_data = ds; + _dispatch_trace_continuation_push(ds->_as_dq, dc); + os_atomic_store2o(dt, ds_handler[DS_EVENT_HANDLER], dc, relaxed); + + if ((int64_t)when < 0) { + // wall clock + when = (dispatch_time_t)-((int64_t)when); + } else { + // absolute clock + dt->du_fflags |= DISPATCH_TIMER_CLOCK_MACH; + leeway = _dispatch_time_nano2mach(leeway); } -#endif - return _dispatch_kevent_merge(ke); + dt->dt_timer.target = when; + dt->dt_timer.interval = UINT64_MAX; + dt->dt_timer.deadline = when + leeway; + dispatch_activate(ds); } DISPATCH_NOINLINE -static void -_dispatch_kevent_merge(_dispatch_kevent_qos_s *ke) +void +dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, + dispatch_function_t func) { - dispatch_kevent_t dk = (void*)ke->udata; - dispatch_source_refs_t dri, dr_next; + _dispatch_after(when, queue, ctxt, func, false); +} - TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) { - _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), ke); - } +#ifdef __BLOCKS__ +void +dispatch_after(dispatch_time_t when, dispatch_queue_t queue, + dispatch_block_t work) +{ + _dispatch_after(when, queue, NULL, work, true); } +#endif -#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD -static void -_dispatch_kevent_guard(dispatch_kevent_t dk) +#pragma mark - +#pragma mark dispatch_timers + +/* + * The dispatch_timer_heap_t structure is a double min-heap of timers, + * interleaving the by-target min-heap in the even slots, and the by-deadline + * in the odd ones. + * + * The min element of these is held inline in the dispatch_timer_heap_t + * structure, and further entries are held in segments. + * + * dth_segments is the number of allocated segments. + * + * Segment 0 has a size of `DISPATCH_HEAP_INIT_SEGMENT_CAPACITY` pointers + * Segment k has a size of (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << (k - 1)) + * + * Segment n (dth_segments - 1) is the last segment and points its final n + * entries to previous segments. Its address is held in the `dth_heap` field. + * + * segment n [ regular timer pointers | n-1 | k | 0 ] + * | | | + * segment n-1 <---------------------------' | | + * segment k <--------------------------------' | + * segment 0 <------------------------------------' + */ +#define DISPATCH_HEAP_INIT_SEGMENT_CAPACITY 8u + +/* + * There are two min-heaps stored interleaved in a single array, + * even indices are for the by-target min-heap, and odd indices for + * the by-deadline one. + */ +#define DTH_HEAP_ID_MASK (DTH_ID_COUNT - 1) +#define DTH_HEAP_ID(idx) ((idx) & DTH_HEAP_ID_MASK) +#define DTH_IDX_FOR_HEAP_ID(idx, heap_id) \ + (((idx) & ~DTH_HEAP_ID_MASK) | (heap_id)) + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_timer_heap_capacity(uint32_t segments) { - guardid_t guard; - const unsigned int guard_flags = GUARD_CLOSE; - int r, fd_flags = 0; - switch (dk->dk_kevent.filter) { - case EVFILT_READ: - case EVFILT_WRITE: - case EVFILT_VNODE: - guard = &dk->dk_kevent; - r = change_fdguard_np((int)dk->dk_kevent.ident, NULL, 0, - &guard, guard_flags, &fd_flags); - if (slowpath(r == -1)) { - int err = errno; - if (err != EPERM) { - (void)dispatch_assume_zero(err); - } - return; - } - dk->dk_kevent.ext[0] = guard_flags; - dk->dk_kevent.ext[1] = fd_flags; - break; - } + if (segments == 0) return 2; + uint32_t seg_no = segments - 1; + // for C = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY, + // 2 + C + SUM(C << (i-1), i = 1..seg_no) - seg_no + return 2 + (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << seg_no) - seg_no; } +DISPATCH_NOINLINE static void -_dispatch_kevent_unguard(dispatch_kevent_t dk) +_dispatch_timer_heap_grow(dispatch_timer_heap_t dth) { - guardid_t guard; - unsigned int guard_flags; - int r, fd_flags; - switch (dk->dk_kevent.filter) { - case EVFILT_READ: - case EVFILT_WRITE: - case EVFILT_VNODE: - guard_flags = (unsigned int)dk->dk_kevent.ext[0]; - if (!guard_flags) { - return; - } - guard = &dk->dk_kevent; - fd_flags = (int)dk->dk_kevent.ext[1]; - r = change_fdguard_np((int)dk->dk_kevent.ident, &guard, - guard_flags, NULL, 0, &fd_flags); - if (slowpath(r == -1)) { - (void)dispatch_assume_zero(errno); - return; - } - dk->dk_kevent.ext[0] = 0; - break; + uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY; + uint32_t seg_no = dth->dth_segments++; + void **heap, **heap_prev = dth->dth_heap; + + if (seg_no > 0) { + seg_capacity <<= (seg_no - 1); + } + heap = _dispatch_calloc(seg_capacity, sizeof(void *)); + if (seg_no > 1) { + uint32_t prev_seg_no = seg_no - 1; + uint32_t prev_seg_capacity = seg_capacity >> 1; + memcpy(&heap[seg_capacity - prev_seg_no], + &heap_prev[prev_seg_capacity - prev_seg_no], + prev_seg_no * sizeof(void *)); + } + if (seg_no > 0) { + heap[seg_capacity - seg_no] = heap_prev; } + dth->dth_heap = heap; } -#endif // DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD - -#pragma mark - -#pragma mark dispatch_source_timer - -#if DISPATCH_USE_DTRACE -static dispatch_source_refs_t - _dispatch_trace_next_timer[DISPATCH_TIMER_QOS_COUNT]; -#define _dispatch_trace_next_timer_set(x, q) \ - _dispatch_trace_next_timer[(q)] = (x) -#define _dispatch_trace_next_timer_program(d, q) \ - _dispatch_trace_timer_program(_dispatch_trace_next_timer[(q)], (d)) -#define _dispatch_trace_next_timer_wake(q) \ - _dispatch_trace_timer_wake(_dispatch_trace_next_timer[(q)]) -#else -#define _dispatch_trace_next_timer_set(x, q) -#define _dispatch_trace_next_timer_program(d, q) -#define _dispatch_trace_next_timer_wake(q) -#endif - -#define _dispatch_source_timer_telemetry_enabled() false DISPATCH_NOINLINE static void -_dispatch_source_timer_telemetry_slow(dispatch_source_t ds, - uintptr_t ident, struct dispatch_timer_source_s *values) +_dispatch_timer_heap_shrink(dispatch_timer_heap_t dth) { - if (_dispatch_trace_timer_configure_enabled()) { - _dispatch_trace_timer_configure(ds, ident, values); + uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY; + uint32_t seg_no = --dth->dth_segments; + void **heap = dth->dth_heap, **heap_prev = NULL; + + if (seg_no > 0) { + seg_capacity <<= (seg_no - 1); + heap_prev = heap[seg_capacity - seg_no]; + } + if (seg_no > 1) { + uint32_t prev_seg_no = seg_no - 1; + uint32_t prev_seg_capacity = seg_capacity >> 1; + memcpy(&heap_prev[prev_seg_capacity - prev_seg_no], + &heap[seg_capacity - prev_seg_no], + prev_seg_no * sizeof(void *)); } + dth->dth_heap = heap_prev; + free(heap); } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_source_timer_telemetry(dispatch_source_t ds, uintptr_t ident, - struct dispatch_timer_source_s *values) -{ - if (_dispatch_trace_timer_configure_enabled() || - _dispatch_source_timer_telemetry_enabled()) { - _dispatch_source_timer_telemetry_slow(ds, ident, values); - asm(""); // prevent tailcall +static inline dispatch_timer_source_refs_t * +_dispatch_timer_heap_get_slot(dispatch_timer_heap_t dth, uint32_t idx) +{ + uint32_t seg_no, segments = dth->dth_segments; + void **segment; + + if (idx < DTH_ID_COUNT) { + return &dth->dth_min[idx]; + } + idx -= DTH_ID_COUNT; + + // Derive the segment number from the index. Naming + // DISPATCH_HEAP_INIT_SEGMENT_CAPACITY `C`, the segments index ranges are: + // 0: 0 .. (C - 1) + // 1: C .. 2 * C - 1 + // k: 2^(k-1) * C .. 2^k * C - 1 + // so `k` can be derived from the first bit set in `idx` + seg_no = (uint32_t)(__builtin_clz(DISPATCH_HEAP_INIT_SEGMENT_CAPACITY - 1) - + __builtin_clz(idx | (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY - 1))); + if (seg_no + 1 == segments) { + segment = dth->dth_heap; + } else { + uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY; + seg_capacity <<= (segments - 2); + segment = dth->dth_heap[seg_capacity - seg_no - 1]; + } + if (seg_no) { + idx -= DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << (seg_no - 1); } + return (dispatch_timer_source_refs_t *)(segment + idx); } -// approx 1 year (60s * 60m * 24h * 365d) -#define FOREVER_NSEC 31536000000000000ull - DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dispatch_source_timer_now(uint64_t nows[], unsigned int tidx) +static inline void +_dispatch_timer_heap_set(dispatch_timer_source_refs_t *slot, + dispatch_timer_source_refs_t dt, uint32_t idx) { - unsigned int tk = DISPATCH_TIMER_KIND(tidx); - if (nows && fastpath(nows[tk] != 0)) { - return nows[tk]; - } - uint64_t now; - switch (tk) { - case DISPATCH_TIMER_KIND_MACH: - now = _dispatch_absolute_time(); - break; - case DISPATCH_TIMER_KIND_WALL: - now = _dispatch_get_nanoseconds(); - break; - } - if (nows) { - nows[tk] = now; - } - return now; + *slot = dt; + dt->dt_heap_entry[DTH_HEAP_ID(idx)] = idx; } -static inline unsigned long -_dispatch_source_timer_data(dispatch_source_refs_t dr, unsigned long prev) +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_timer_heap_parent(uint32_t idx) { - // calculate the number of intervals since last fire - unsigned long data, missed; - uint64_t now; - now = _dispatch_source_timer_now(NULL, _dispatch_source_timer_idx(dr)); - missed = (unsigned long)((now - ds_timer(dr).last_fire) / - ds_timer(dr).interval); - // correct for missed intervals already delivered last time - data = prev - ds_timer(dr).missed + missed; - ds_timer(dr).missed = missed; - return data; + uint32_t heap_id = DTH_HEAP_ID(idx); + idx = (idx - DTH_ID_COUNT) / 2; // go to the parent + return DTH_IDX_FOR_HEAP_ID(idx, heap_id); } -struct dispatch_set_timer_params { - dispatch_source_t ds; - uintptr_t ident; - struct dispatch_timer_source_s values; -}; +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_timer_heap_left_child(uint32_t idx) +{ + uint32_t heap_id = DTH_HEAP_ID(idx); + // 2 * (idx - heap_id) + DTH_ID_COUNT + heap_id + return 2 * idx + DTH_ID_COUNT - heap_id; +} -static void -_dispatch_source_set_timer3(void *context) +#if DISPATCH_HAVE_TIMER_COALESCING +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_timer_heap_walk_skip(uint32_t idx, uint32_t count) { - // Called on the _dispatch_mgr_q - struct dispatch_set_timer_params *params = context; - dispatch_source_t ds = params->ds; - ds->ds_ident_hack = params->ident; - ds_timer(ds->ds_refs) = params->values; - // Clear any pending data that might have accumulated on - // older timer params - ds->ds_pending_data = 0; - // Re-arm in case we got disarmed because of pending set_timer suspension - _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, ds->ds_dkev); - dispatch_resume(ds); - // Must happen after resume to avoid getting disarmed due to suspension - _dispatch_timers_update(ds); - dispatch_release(ds); - if (params->values.flags & DISPATCH_TIMER_WALL_CLOCK) { - _dispatch_mach_host_calendar_change_register(); - } - free(params); + uint32_t heap_id = DTH_HEAP_ID(idx); + + idx -= heap_id; + if (unlikely(idx + DTH_ID_COUNT == count)) { + // reaching `count` doesn't mean we're done, but there is a weird + // corner case if the last item of the heap is a left child: + // + // /\ + // / \ + // / __\ + // /__/ + // ^ + // + // The formula below would return the sibling of `idx` which is + // out of bounds. Fortunately, the correct answer is the same + // as for idx's parent + idx = _dispatch_timer_heap_parent(idx); + } + + // + // When considering the index in a non interleaved, 1-based array + // representation of a heap, hence looking at (idx / DTH_ID_COUNT + 1) + // for a given idx in our dual-heaps, that index is in one of two forms: + // + // (a) 1xxxx011111 or (b) 111111111 + // d i 0 d 0 + // + // The first bit set is the row of the binary tree node (0-based). + // The following digits from most to least significant represent the path + // to that node, where `0` is a left turn and `1` a right turn. + // + // For example 0b0101 (5) is a node on row 2 accessed going left then right: + // + // row 0 1 + // / . + // row 1 2 3 + // . \ . . + // row 2 4 5 6 7 + // : : : : : : : : + // + // Skipping a sub-tree in walk order means going to the sibling of the last + // node reached after we turned left. If the node was of the form (a), + // this node is 1xxxx1, which for the above example is 0b0011 (3). + // If the node was of the form (b) then we never took a left, meaning + // we reached the last element in traversal order. + // + + // + // we want to find + // - the least significant bit set to 0 in (idx / DTH_ID_COUNT + 1) + // - which is offset by log_2(DTH_ID_COUNT) from the position of the least + // significant 0 in (idx + DTH_ID_COUNT + DTH_ID_COUNT - 1) + // since idx is a multiple of DTH_ID_COUNT and DTH_ID_COUNT a power of 2. + // - which in turn is the same as the position of the least significant 1 in + // ~(idx + DTH_ID_COUNT + DTH_ID_COUNT - 1) + // + dispatch_static_assert(powerof2(DTH_ID_COUNT)); + idx += DTH_ID_COUNT + DTH_ID_COUNT - 1; + idx >>= __builtin_ctz(~idx); + + // + // `idx` is now either: + // - 0 if it was the (b) case above, in which case the walk is done + // - 1xxxx0 as the position in a 0 based array representation of a non + // interleaved heap, so we just have to compute the interleaved index. + // + return likely(idx) ? DTH_ID_COUNT * idx + heap_id : UINT32_MAX; } -static void -_dispatch_source_set_timer2(void *context) +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_timer_heap_walk_next(uint32_t idx, uint32_t count) { - // Called on the source queue - struct dispatch_set_timer_params *params = context; - dispatch_suspend(params->ds); - _dispatch_barrier_async_detached_f(&_dispatch_mgr_q, params, - _dispatch_source_set_timer3); + // + // Goes to the next element in heap walk order, which is the prefix ordered + // walk of the tree. + // + // From a given node, the next item to return is the left child if it + // exists, else the first right sibling we find by walking our parent chain, + // which is exactly what _dispatch_timer_heap_walk_skip() returns. + // + uint32_t lchild = _dispatch_timer_heap_left_child(idx); + if (lchild < count) { + return lchild; + } + return _dispatch_timer_heap_walk_skip(idx, count); } DISPATCH_NOINLINE -static struct dispatch_set_timer_params * -_dispatch_source_timer_params(dispatch_source_t ds, dispatch_time_t start, - uint64_t interval, uint64_t leeway) -{ - struct dispatch_set_timer_params *params; - params = _dispatch_calloc(1ul, sizeof(struct dispatch_set_timer_params)); - params->ds = ds; - params->values.flags = ds_timer(ds->ds_refs).flags; +static uint64_t +_dispatch_timer_heap_max_target_before(dispatch_timer_heap_t dth, uint64_t limit) +{ + dispatch_timer_source_refs_t dri; + uint32_t idx = _dispatch_timer_heap_left_child(DTH_TARGET_ID); + uint32_t count = dth->dth_count; + uint64_t tmp, target = dth->dth_min[DTH_TARGET_ID]->dt_timer.target; + + while (idx < count) { + dri = *_dispatch_timer_heap_get_slot(dth, idx); + tmp = dri->dt_timer.target; + if (tmp > limit) { + // skip subtree since none of the targets below can be before limit + idx = _dispatch_timer_heap_walk_skip(idx, count); + } else { + target = tmp; + idx = _dispatch_timer_heap_walk_next(idx, count); + } + } + return target; +} +#endif // DISPATCH_HAVE_TIMER_COALESCING - if (interval == 0) { - // we use zero internally to mean disabled - interval = 1; - } else if ((int64_t)interval < 0) { - // 6866347 - make sure nanoseconds won't overflow - interval = INT64_MAX; +DISPATCH_NOINLINE +static void +_dispatch_timer_heap_resift(dispatch_timer_heap_t dth, + dispatch_timer_source_refs_t dt, uint32_t idx) +{ + dispatch_static_assert(offsetof(struct dispatch_timer_source_s, target) == + offsetof(struct dispatch_timer_source_s, heap_key[DTH_TARGET_ID])); + dispatch_static_assert(offsetof(struct dispatch_timer_source_s, deadline) == + offsetof(struct dispatch_timer_source_s, heap_key[DTH_DEADLINE_ID])); +#define dth_cmp(hid, dt1, op, dt2) \ + (((dt1)->dt_timer.heap_key)[hid] op ((dt2)->dt_timer.heap_key)[hid]) + + dispatch_timer_source_refs_t *pslot, pdt; + dispatch_timer_source_refs_t *cslot, cdt; + dispatch_timer_source_refs_t *rslot, rdt; + uint32_t cidx, dth_count = dth->dth_count; + dispatch_timer_source_refs_t *slot; + int heap_id = DTH_HEAP_ID(idx); + bool sifted_up = false; + + // try to sift up + + slot = _dispatch_timer_heap_get_slot(dth, idx); + while (idx >= DTH_ID_COUNT) { + uint32_t pidx = _dispatch_timer_heap_parent(idx); + pslot = _dispatch_timer_heap_get_slot(dth, pidx); + pdt = *pslot; + if (dth_cmp(heap_id, pdt, <=, dt)) { + break; + } + _dispatch_timer_heap_set(slot, pdt, idx); + slot = pslot; + idx = pidx; + sifted_up = true; } - if ((int64_t)leeway < 0) { - leeway = INT64_MAX; + if (sifted_up) { + goto done; } - if (start == DISPATCH_TIME_NOW) { - start = _dispatch_absolute_time(); - } else if (start == DISPATCH_TIME_FOREVER) { - start = INT64_MAX; + + // try to sift down + + while ((cidx = _dispatch_timer_heap_left_child(idx)) < dth_count) { + uint32_t ridx = cidx + DTH_ID_COUNT; + cslot = _dispatch_timer_heap_get_slot(dth, cidx); + cdt = *cslot; + if (ridx < dth_count) { + rslot = _dispatch_timer_heap_get_slot(dth, ridx); + rdt = *rslot; + if (dth_cmp(heap_id, cdt, >, rdt)) { + cidx = ridx; + cdt = rdt; + cslot = rslot; + } + } + if (dth_cmp(heap_id, dt, <=, cdt)) { + break; + } + _dispatch_timer_heap_set(slot, cdt, idx); + slot = cslot; + idx = cidx; } - if ((int64_t)start < 0) { - // wall clock - start = (dispatch_time_t)-((int64_t)start); - params->values.flags |= DISPATCH_TIMER_WALL_CLOCK; - } else { - // absolute clock - interval = _dispatch_time_nano2mach(interval); - if (interval < 1) { - // rdar://problem/7287561 interval must be at least one in - // in order to avoid later division by zero when calculating - // the missed interval count. (NOTE: the wall clock's - // interval is already "fixed" to be 1 or more) - interval = 1; - } - leeway = _dispatch_time_nano2mach(leeway); - params->values.flags &= ~(unsigned long)DISPATCH_TIMER_WALL_CLOCK; - } - params->ident = DISPATCH_TIMER_IDENT(params->values.flags); - params->values.target = start; - params->values.deadline = (start < UINT64_MAX - leeway) ? - start + leeway : UINT64_MAX; - params->values.interval = interval; - params->values.leeway = (interval == INT64_MAX || leeway < interval / 2) ? - leeway : interval / 2; - return params; +done: + _dispatch_timer_heap_set(slot, dt, idx); +#undef dth_cmp } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, - uint64_t interval, uint64_t leeway, bool source_sync) +static void +_dispatch_timer_heap_insert(dispatch_timer_heap_t dth, + dispatch_timer_source_refs_t dt) { - if (slowpath(!ds->ds_is_timer) || - slowpath(ds_timer(ds->ds_refs).flags & DISPATCH_TIMER_INTERVAL)) { - DISPATCH_CLIENT_CRASH(ds, "Attempt to set timer on a non-timer source"); - } + uint32_t idx = (dth->dth_count += DTH_ID_COUNT) - DTH_ID_COUNT; - struct dispatch_set_timer_params *params; - params = _dispatch_source_timer_params(ds, start, interval, leeway); + if (idx == 0) { + dt->dt_heap_entry[DTH_TARGET_ID] = DTH_TARGET_ID; + dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_DEADLINE_ID; + dth->dth_min[DTH_TARGET_ID] = dth->dth_min[DTH_DEADLINE_ID] = dt; + return; + } - _dispatch_source_timer_telemetry(ds, params->ident, ¶ms->values); - // Suspend the source so that it doesn't fire with pending changes - // The use of suspend/resume requires the external retain/release - dispatch_retain(ds); - if (source_sync) { - return _dispatch_barrier_trysync_or_async_f(ds->_as_dq, params, - _dispatch_source_set_timer2); - } else { - return _dispatch_source_set_timer2(params); + if (unlikely(idx + DTH_ID_COUNT > + _dispatch_timer_heap_capacity(dth->dth_segments))) { + _dispatch_timer_heap_grow(dth); } + _dispatch_timer_heap_resift(dth, dt, idx + DTH_TARGET_ID); + _dispatch_timer_heap_resift(dth, dt, idx + DTH_DEADLINE_ID); } -void -dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, - uint64_t interval, uint64_t leeway) +DISPATCH_NOINLINE +static void +_dispatch_timer_heap_remove(dispatch_timer_heap_t dth, + dispatch_timer_source_refs_t removed_dt) { - _dispatch_source_set_timer(ds, start, interval, leeway, true); -} + uint32_t idx = (dth->dth_count -= DTH_ID_COUNT); -void -_dispatch_source_set_runloop_timer_4CF(dispatch_source_t ds, - dispatch_time_t start, uint64_t interval, uint64_t leeway) -{ - // Don't serialize through the source queue for CF timers - _dispatch_source_set_timer(ds, start, interval, leeway, false); -} + if (idx == 0) { + dth->dth_min[DTH_TARGET_ID] = dth->dth_min[DTH_DEADLINE_ID] = NULL; + return; + } -void -_dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval) -{ - dispatch_source_refs_t dr = ds->ds_refs; - #define NSEC_PER_FRAME (NSEC_PER_SEC/60) - const bool animation = ds_timer(dr).flags & DISPATCH_INTERVAL_UI_ANIMATION; - if (fastpath(interval <= (animation ? FOREVER_NSEC/NSEC_PER_FRAME : - FOREVER_NSEC/NSEC_PER_MSEC))) { - interval *= animation ? NSEC_PER_FRAME : NSEC_PER_MSEC; - } else { - interval = FOREVER_NSEC; + for (uint32_t heap_id = 0; heap_id < DTH_ID_COUNT; heap_id++) { + dispatch_timer_source_refs_t *slot, dt; + slot = _dispatch_timer_heap_get_slot(dth, idx + heap_id); + dt = *slot; *slot = NULL; + if (dt != removed_dt) { + uint32_t removed_idx = removed_dt->dt_heap_entry[heap_id]; + _dispatch_timer_heap_resift(dth, dt, removed_idx); + } + } + if (unlikely(idx <= _dispatch_timer_heap_capacity(dth->dth_segments - 1))) { + _dispatch_timer_heap_shrink(dth); } - interval = _dispatch_time_nano2mach(interval); - uint64_t target = _dispatch_absolute_time() + interval; - target = (target / interval) * interval; - const uint64_t leeway = animation ? - _dispatch_time_nano2mach(NSEC_PER_FRAME) : interval / 2; - ds_timer(dr).target = target; - ds_timer(dr).deadline = target + leeway; - ds_timer(dr).interval = interval; - ds_timer(dr).leeway = leeway; - _dispatch_source_timer_telemetry(ds, ds->ds_ident_hack, &ds_timer(dr)); } -#pragma mark - -#pragma mark dispatch_timers +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_timer_heap_update(dispatch_timer_heap_t dth, + dispatch_timer_source_refs_t dt) +{ + _dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_TARGET_ID]); + _dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_DEADLINE_ID]); +} -#define DISPATCH_TIMER_STRUCT(refs) \ - uint64_t target, deadline; \ - TAILQ_HEAD(, refs) dt_sources - -typedef struct dispatch_timer_s { - DISPATCH_TIMER_STRUCT(dispatch_timer_source_refs_s); -} *dispatch_timer_t; - -#define DISPATCH_TIMER_INITIALIZER(tidx) \ - [tidx] = { \ - .target = UINT64_MAX, \ - .deadline = UINT64_MAX, \ - .dt_sources = TAILQ_HEAD_INITIALIZER( \ - _dispatch_timer[tidx].dt_sources), \ - } -#define DISPATCH_TIMER_INIT(kind, qos) \ - DISPATCH_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \ - DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos)) - -struct dispatch_timer_s _dispatch_timer[] = { - DISPATCH_TIMER_INIT(WALL, NORMAL), - DISPATCH_TIMER_INIT(WALL, CRITICAL), - DISPATCH_TIMER_INIT(WALL, BACKGROUND), - DISPATCH_TIMER_INIT(MACH, NORMAL), - DISPATCH_TIMER_INIT(MACH, CRITICAL), - DISPATCH_TIMER_INIT(MACH, BACKGROUND), -}; -#define DISPATCH_TIMER_COUNT \ - ((sizeof(_dispatch_timer) / sizeof(_dispatch_timer[0]))) +DISPATCH_ALWAYS_INLINE +static bool +_dispatch_timer_heap_has_new_min(dispatch_timer_heap_t dth, + uint32_t count, uint32_t mask) +{ + dispatch_timer_source_refs_t dt; + bool changed = false; + uint64_t tmp; + uint32_t tidx; -#if __linux__ -#define DISPATCH_KEVENT_TIMER_UDATA(tidx) \ - (void*)&_dispatch_kevent_timer[tidx] -#else -#define DISPATCH_KEVENT_TIMER_UDATA(tidx) \ - (uintptr_t)&_dispatch_kevent_timer[tidx] -#endif -#ifdef __LP64__ -#define DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx) \ - .udata = DISPATCH_KEVENT_TIMER_UDATA(tidx) -#else // __LP64__ -// dynamic initialization in _dispatch_timers_init() -#define DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx) \ - .udata = 0 -#endif // __LP64__ -#define DISPATCH_KEVENT_TIMER_INITIALIZER(tidx) \ - [tidx] = { \ - .dk_kevent = { \ - .ident = tidx, \ - .filter = DISPATCH_EVFILT_TIMER, \ - DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx), \ - }, \ - .dk_sources = TAILQ_HEAD_INITIALIZER( \ - _dispatch_kevent_timer[tidx].dk_sources), \ - } -#define DISPATCH_KEVENT_TIMER_INIT(kind, qos) \ - DISPATCH_KEVENT_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \ - DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos)) - -struct dispatch_kevent_s _dispatch_kevent_timer[] = { - DISPATCH_KEVENT_TIMER_INIT(WALL, NORMAL), - DISPATCH_KEVENT_TIMER_INIT(WALL, CRITICAL), - DISPATCH_KEVENT_TIMER_INIT(WALL, BACKGROUND), - DISPATCH_KEVENT_TIMER_INIT(MACH, NORMAL), - DISPATCH_KEVENT_TIMER_INIT(MACH, CRITICAL), - DISPATCH_KEVENT_TIMER_INIT(MACH, BACKGROUND), - DISPATCH_KEVENT_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX_DISARM), -}; -#define DISPATCH_KEVENT_TIMER_COUNT \ - ((sizeof(_dispatch_kevent_timer) / sizeof(_dispatch_kevent_timer[0]))) - -#define DISPATCH_KEVENT_TIMEOUT_IDENT_MASK (~0ull << 8) -#define DISPATCH_KEVENT_TIMEOUT_INITIALIZER(tidx, note) \ - [tidx] = { \ - .ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK|(tidx), \ - .filter = EVFILT_TIMER, \ - .flags = EV_ONESHOT, \ - .fflags = NOTE_ABSOLUTE|NOTE_NSECONDS|NOTE_LEEWAY|(note), \ - } -#define DISPATCH_KEVENT_TIMEOUT_INIT(kind, qos, note) \ - DISPATCH_KEVENT_TIMEOUT_INITIALIZER(DISPATCH_TIMER_INDEX( \ - DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos), note) - -_dispatch_kevent_qos_s _dispatch_kevent_timeout[] = { - DISPATCH_KEVENT_TIMEOUT_INIT(WALL, NORMAL, NOTE_MACH_CONTINUOUS_TIME), - DISPATCH_KEVENT_TIMEOUT_INIT(WALL, CRITICAL, NOTE_MACH_CONTINUOUS_TIME | NOTE_CRITICAL), - DISPATCH_KEVENT_TIMEOUT_INIT(WALL, BACKGROUND, NOTE_MACH_CONTINUOUS_TIME | NOTE_BACKGROUND), - DISPATCH_KEVENT_TIMEOUT_INIT(MACH, NORMAL, 0), - DISPATCH_KEVENT_TIMEOUT_INIT(MACH, CRITICAL, NOTE_CRITICAL), - DISPATCH_KEVENT_TIMEOUT_INIT(MACH, BACKGROUND, NOTE_BACKGROUND), -}; -#define DISPATCH_KEVENT_TIMEOUT_COUNT \ - ((sizeof(_dispatch_kevent_timeout) / sizeof(_dispatch_kevent_timeout[0]))) -#if __has_feature(c_static_assert) -_Static_assert(DISPATCH_KEVENT_TIMEOUT_COUNT == DISPATCH_TIMER_INDEX_COUNT - 1, - "should have a kevent for everything but disarm (ddt assumes this)"); -#endif + for (tidx = 0; tidx < count; tidx++) { + if (!(mask & (1u << tidx))) { + continue; + } -#define DISPATCH_KEVENT_COALESCING_WINDOW_INIT(qos, ms) \ - [DISPATCH_TIMER_QOS_##qos] = 2ull * (ms) * NSEC_PER_MSEC + dt = dth[tidx].dth_min[DTH_TARGET_ID]; + tmp = dt ? dt->dt_timer.target : UINT64_MAX; + if (dth[tidx].dth_target != tmp) { + dth[tidx].dth_target = tmp; + changed = true; + } + dt = dth[tidx].dth_min[DTH_DEADLINE_ID]; + tmp = dt ? dt->dt_timer.deadline : UINT64_MAX; + if (dth[tidx].dth_deadline != tmp) { + dth[tidx].dth_deadline = tmp; + changed = true; + } + } + return changed; +} -static const uint64_t _dispatch_kevent_coalescing_window[] = { - DISPATCH_KEVENT_COALESCING_WINDOW_INIT(NORMAL, 75), - DISPATCH_KEVENT_COALESCING_WINDOW_INIT(CRITICAL, 1), - DISPATCH_KEVENT_COALESCING_WINDOW_INIT(BACKGROUND, 100), -}; +static inline void +_dispatch_timers_unregister(dispatch_timer_source_refs_t dt) +{ + uint32_t tidx = dt->du_ident; + dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx]; -#define _dispatch_timers_insert(tidx, dra, dr, dr_list, dta, dt, dt_list) ({ \ - typeof(dr) dri = NULL; typeof(dt) dti; \ - if (tidx != DISPATCH_TIMER_INDEX_DISARM) { \ - TAILQ_FOREACH(dri, &dra[tidx].dk_sources, dr_list) { \ - if (ds_timer(dr).target < ds_timer(dri).target) { \ - break; \ - } \ - } \ - TAILQ_FOREACH(dti, &dta[tidx].dt_sources, dt_list) { \ - if (ds_timer(dt).deadline < ds_timer(dti).deadline) { \ - break; \ - } \ - } \ - if (dti) { \ - TAILQ_INSERT_BEFORE(dti, dt, dt_list); \ - } else { \ - TAILQ_INSERT_TAIL(&dta[tidx].dt_sources, dt, dt_list); \ - } \ - } \ - if (dri) { \ - TAILQ_INSERT_BEFORE(dri, dr, dr_list); \ - } else { \ - TAILQ_INSERT_TAIL(&dra[tidx].dk_sources, dr, dr_list); \ - } \ - }) - -#define _dispatch_timers_remove(tidx, dk, dra, dr, dr_list, dta, dt, dt_list) \ - ({ \ - if (tidx != DISPATCH_TIMER_INDEX_DISARM) { \ - TAILQ_REMOVE(&dta[tidx].dt_sources, dt, dt_list); \ - } \ - TAILQ_REMOVE(dk ? &(*(dk)).dk_sources : &dra[tidx].dk_sources, dr, \ - dr_list); }) - -#define _dispatch_timers_check(dra, dta) ({ \ - unsigned int timerm = _dispatch_timers_mask; \ - bool update = false; \ - unsigned int tidx; \ - for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { \ - if (!(timerm & (1 << tidx))){ \ - continue; \ - } \ - dispatch_timer_source_refs_t dr = (dispatch_timer_source_refs_t) \ - TAILQ_FIRST(&dra[tidx].dk_sources); \ - dispatch_timer_source_refs_t dt = (dispatch_timer_source_refs_t) \ - TAILQ_FIRST(&dta[tidx].dt_sources); \ - uint64_t target = dr ? ds_timer(dr).target : UINT64_MAX; \ - uint64_t deadline = dr ? ds_timer(dt).deadline : UINT64_MAX; \ - if (target != dta[tidx].target) { \ - dta[tidx].target = target; \ - update = true; \ - } \ - if (deadline != dta[tidx].deadline) { \ - dta[tidx].deadline = deadline; \ - update = true; \ - } \ - } \ - update; }) - -static bool _dispatch_timers_reconfigure, _dispatch_timer_expired; -static unsigned int _dispatch_timers_mask; -static bool _dispatch_timers_force_max_leeway; + _dispatch_timer_heap_remove(heap, dt); + _dispatch_timers_reconfigure = true; + _dispatch_timers_processing_mask |= 1 << tidx; + dt->du_wlh = NULL; +} -static void -_dispatch_timers_init(void) +static inline void +_dispatch_timers_register(dispatch_timer_source_refs_t dt, uint32_t tidx) { -#ifndef __LP64__ - unsigned int tidx; - for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { - _dispatch_kevent_timer[tidx].dk_kevent.udata = - DISPATCH_KEVENT_TIMER_UDATA(tidx); - } -#endif // __LP64__ - if (slowpath(getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY"))) { - _dispatch_timers_force_max_leeway = true; + dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx]; + if (_dispatch_unote_registered(dt)) { + dispatch_assert(dt->du_ident == tidx); + _dispatch_timer_heap_update(heap, dt); + } else { + dt->du_ident = tidx; + _dispatch_timer_heap_insert(heap, dt); } + _dispatch_timers_reconfigure = true; + _dispatch_timers_processing_mask |= 1 << tidx; + dt->du_wlh = DISPATCH_WLH_GLOBAL; } -static inline void -_dispatch_timers_unregister(dispatch_source_t ds, dispatch_kevent_t dk) +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_source_timer_tryarm(dispatch_source_t ds) { - dispatch_source_refs_t dr = ds->ds_refs; - unsigned int tidx = (unsigned int)dk->dk_kevent.ident; - - if (slowpath(ds_timer_aggregate(ds))) { - _dispatch_timer_aggregates_unregister(ds, tidx); - } - _dispatch_timers_remove(tidx, dk, _dispatch_kevent_timer, dr, dr_list, - _dispatch_timer, (dispatch_timer_source_refs_t)dr, dt_list); - if (tidx != DISPATCH_TIMER_INDEX_DISARM) { - _dispatch_timers_reconfigure = true; - _dispatch_timers_mask |= 1 << tidx; - } + dispatch_queue_flags_t oqf, nqf; + return os_atomic_rmw_loop2o(ds, dq_atomic_flags, oqf, nqf, relaxed, { + if (oqf & (DSF_CANCELED | DQF_RELEASED)) { + // do not install a cancelled timer + os_atomic_rmw_loop_give_up(break); + } + nqf = oqf | DSF_ARMED; + }); } // Updates the ordered list of timers based on next fire date for changes to ds. // Should only be called from the context of _dispatch_mgr_q. static void -_dispatch_timers_update(dispatch_source_t ds) +_dispatch_timers_update(dispatch_unote_t du) { - dispatch_kevent_t dk = ds->ds_dkev; - dispatch_source_refs_t dr = ds->ds_refs; - unsigned int tidx; + dispatch_timer_source_refs_t dr = du._dt; + dispatch_source_t ds = _dispatch_source_from_refs(dr); + const char *verb = "updated"; + bool will_register, disarm = false; DISPATCH_ASSERT_ON_MANAGER_QUEUE(); - // Do not reschedule timers unregistered with _dispatch_kevent_unregister() - if (slowpath(!dk)) { + if (unlikely(dr->du_ident == DISPATCH_TIMER_IDENT_CANCELED)) { return; } - // Move timers that are disabled, suspended or have missed intervals to the - // disarmed list, rearm after resume resp. source invoke will reenable them - if (!ds_timer(dr).target || DISPATCH_QUEUE_IS_SUSPENDED(ds) || - ds->ds_pending_data) { - tidx = DISPATCH_TIMER_INDEX_DISARM; - _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, - ds->ds_dkev); - } else { - tidx = _dispatch_source_timer_idx(dr); + + // Unregister timers that are unconfigured, disabled, suspended or have + // missed intervals. Rearm after dispatch_set_timer(), resume or source + // invoke will reenable them + will_register = dr->dt_timer.target < INT64_MAX && + !os_atomic_load2o(ds, ds_pending_data, relaxed) && + !DISPATCH_QUEUE_IS_SUSPENDED(ds) && + !os_atomic_load2o(dr, dt_pending_config, relaxed); + if (!_dispatch_unote_registered(dr) && will_register) { + if (unlikely(!_dispatch_source_timer_tryarm(ds))) { + return; + } + verb = "armed"; + } else if (unlikely(_dispatch_unote_registered(dr) && !will_register)) { + disarm = true; + verb = "disarmed"; } - if (slowpath(ds_timer_aggregate(ds))) { - _dispatch_timer_aggregates_register(ds); + + uint32_t tidx = _dispatch_source_timer_idx(dr); + if (unlikely(_dispatch_unote_registered(dr) && + (!will_register || dr->du_ident != tidx))) { + _dispatch_timers_unregister(dr); } - if (slowpath(!ds->ds_is_installed)) { - ds->ds_is_installed = true; - if (tidx != DISPATCH_TIMER_INDEX_DISARM) { - _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, - ds->ds_dkev); - } - _dispatch_object_debug(ds, "%s", __func__); - ds->ds_dkev = NULL; - free(dk); - } else { - _dispatch_timers_unregister(ds, dk); + if (likely(will_register)) { + _dispatch_timers_register(dr, tidx); } - if (tidx != DISPATCH_TIMER_INDEX_DISARM) { - _dispatch_timers_reconfigure = true; - _dispatch_timers_mask |= 1 << tidx; + + if (disarm) { + _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED); } - if (dk != &_dispatch_kevent_timer[tidx]){ - ds->ds_dkev = &_dispatch_kevent_timer[tidx]; + _dispatch_debug("kevent-source[%p]: %s timer[%p]", ds, verb, dr); + _dispatch_object_debug(ds, "%s", __func__); +} + +#define DISPATCH_TIMER_MISSED_MARKER 1ul + +DISPATCH_ALWAYS_INLINE +static inline unsigned long +_dispatch_source_timer_compute_missed(dispatch_timer_source_refs_t dt, + uint64_t now, unsigned long prev) +{ + uint64_t missed = (now - dt->dt_timer.target) / dt->dt_timer.interval; + if (++missed + prev > LONG_MAX) { + missed = LONG_MAX - prev; } - _dispatch_timers_insert(tidx, _dispatch_kevent_timer, dr, dr_list, - _dispatch_timer, (dispatch_timer_source_refs_t)dr, dt_list); - if (slowpath(ds_timer_aggregate(ds))) { - _dispatch_timer_aggregates_update(ds, tidx); + if (dt->dt_timer.interval < INT64_MAX) { + uint64_t push_by = missed * dt->dt_timer.interval; + dt->dt_timer.target += push_by; + dt->dt_timer.deadline += push_by; + } else { + dt->dt_timer.target = UINT64_MAX; + dt->dt_timer.deadline = UINT64_MAX; } + prev += missed; + return prev; +} + +DISPATCH_ALWAYS_INLINE +static inline unsigned long +_dispatch_source_timer_data(dispatch_source_t ds, dispatch_unote_t du) +{ + dispatch_timer_source_refs_t dr = du._dt; + unsigned long data, prev, clear_prev = 0; + + os_atomic_rmw_loop2o(ds, ds_pending_data, prev, clear_prev, relaxed, { + data = prev >> 1; + if (unlikely(prev & DISPATCH_TIMER_MISSED_MARKER)) { + os_atomic_rmw_loop_give_up(goto handle_missed_intervals); + } + }); + return data; + +handle_missed_intervals: + // The timer may be in _dispatch_source_invoke2() already for other + // reasons such as running the registration handler when ds_pending_data + // is changed by _dispatch_timers_run2() without holding the drain lock. + // + // We hence need dependency ordering to pair with the release barrier + // done by _dispatch_timers_run2() when setting the MISSED_MARKER bit. + os_atomic_thread_fence(dependency); + dr = os_atomic_force_dependency_on(dr, data); + + uint64_t now = _dispatch_time_now(DISPATCH_TIMER_CLOCK(dr->du_ident)); + if (now >= dr->dt_timer.target) { + OS_COMPILER_CAN_ASSUME(dr->dt_timer.interval < INT64_MAX); + data = _dispatch_source_timer_compute_missed(dr, now, data); + } + + // When we see the MISSED_MARKER the manager has given up on this timer + // and expects the handler to call "resume". + // + // However, it may not have reflected this into the atomic flags yet + // so make sure _dispatch_source_invoke2() sees the timer is disarmed + // + // The subsequent _dispatch_source_refs_resume() will enqueue the source + // on the manager and make the changes to `ds_timer` above visible. + _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED); + os_atomic_store2o(ds, ds_pending_data, 0, relaxed); + return data; } static inline void -_dispatch_timers_run2(uint64_t nows[], unsigned int tidx) +_dispatch_timers_run2(dispatch_clock_now_cache_t nows, uint32_t tidx) { - dispatch_source_refs_t dr; + dispatch_timer_source_refs_t dr; dispatch_source_t ds; - uint64_t now, missed; + uint64_t data, pending_data; + uint64_t now = _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows); - now = _dispatch_source_timer_now(nows, tidx); - while ((dr = TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources))) { + while ((dr = _dispatch_timers_heap[tidx].dth_min[DTH_TARGET_ID])) { + dispatch_assert(tidx == dr->du_ident && dr->dt_timer.target); ds = _dispatch_source_from_refs(dr); - // We may find timers on the wrong list due to a pending update from - // dispatch_source_set_timer. Force an update of the list in that case. - if (tidx != ds->ds_ident_hack) { - _dispatch_timers_update(ds); - continue; - } - if (!ds_timer(dr).target) { - // No configured timers on the list - break; - } - if (ds_timer(dr).target > now) { + if (dr->dt_timer.target > now) { // Done running timers for now. break; } - // Remove timers that are suspended or have missed intervals from the - // list, rearm after resume resp. source invoke will reenable them - if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || ds->ds_pending_data) { - _dispatch_timers_update(ds); + if (dr->du_fflags & DISPATCH_TIMER_AFTER) { + _dispatch_trace_timer_fire(dr, 1, 1); + _dispatch_source_merge_evt(dr, EV_ONESHOT, 1, 0, 0); + _dispatch_debug("kevent-source[%p]: fired after timer[%p]", ds, dr); + _dispatch_object_debug(ds, "%s", __func__); continue; } - // Calculate number of missed intervals. - missed = (now - ds_timer(dr).target) / ds_timer(dr).interval; - if (++missed > INT_MAX) { - missed = INT_MAX; + + _dispatch_retain(ds); + data = os_atomic_load2o(ds, ds_pending_data, relaxed); + if (unlikely(data)) { + // the release barrier is required to make the changes + // to `ds_timer` visible to _dispatch_source_timer_data() + if (os_atomic_cmpxchg2o(ds, ds_pending_data, data, + data | DISPATCH_TIMER_MISSED_MARKER, release)) { + _dispatch_timers_update(dr); + _dispatch_release(ds); + continue; + } } - if (ds_timer(dr).interval < INT64_MAX) { - ds_timer(dr).target += missed * ds_timer(dr).interval; - ds_timer(dr).deadline = ds_timer(dr).target + ds_timer(dr).leeway; + + data = _dispatch_source_timer_compute_missed(dr, now, 0); + _dispatch_timers_update(dr); + pending_data = data << 1; + if (!_dispatch_unote_registered(dr) && dr->dt_timer.target < INT64_MAX){ + // if we unregistered because of suspension we have to fake we + // missed events. + pending_data |= DISPATCH_TIMER_MISSED_MARKER; + os_atomic_store2o(ds, ds_pending_data, pending_data, release); } else { - ds_timer(dr).target = UINT64_MAX; - ds_timer(dr).deadline = UINT64_MAX; - } - _dispatch_timers_update(ds); - ds_timer(dr).last_fire = now; - - unsigned long data; - data = os_atomic_add2o(ds, ds_pending_data, - (unsigned long)missed, relaxed); - _dispatch_trace_timer_fire(dr, data, (unsigned long)missed); - dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH); - if (ds_timer(dr).flags & DISPATCH_TIMER_AFTER) { - _dispatch_source_kevent_unregister(ds); + os_atomic_store2o(ds, ds_pending_data, pending_data, relaxed); } + _dispatch_trace_timer_fire(dr, data, data); + _dispatch_debug("kevent-source[%p]: fired timer[%p]", ds, dr); + _dispatch_object_debug(ds, "%s", __func__); + dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH | DISPATCH_WAKEUP_CONSUME); } } DISPATCH_NOINLINE static void -_dispatch_timers_run(uint64_t nows[]) +_dispatch_timers_run(dispatch_clock_now_cache_t nows) { - unsigned int tidx; + uint32_t tidx; for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { - if (!TAILQ_EMPTY(&_dispatch_kevent_timer[tidx].dk_sources)) { + if (_dispatch_timers_heap[tidx].dth_count) { _dispatch_timers_run2(nows, tidx); } } } -static inline unsigned int -_dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], - uint64_t *delay, uint64_t *leeway, int qos, int kind) +#if DISPATCH_HAVE_TIMER_COALESCING +#define DISPATCH_KEVENT_COALESCING_WINDOW_INIT(qos, ms) \ + [DISPATCH_TIMER_QOS_##qos] = 2ull * (ms) * NSEC_PER_MSEC + +static const uint64_t _dispatch_kevent_coalescing_window[] = { + DISPATCH_KEVENT_COALESCING_WINDOW_INIT(NORMAL, 75), +#if DISPATCH_HAVE_TIMER_QOS + DISPATCH_KEVENT_COALESCING_WINDOW_INIT(CRITICAL, 1), + DISPATCH_KEVENT_COALESCING_WINDOW_INIT(BACKGROUND, 100), +#endif +}; +#endif // DISPATCH_HAVE_TIMER_COALESCING + +static inline dispatch_timer_delay_s +_dispatch_timers_get_delay(dispatch_timer_heap_t dth, dispatch_clock_t clock, + uint32_t qos, dispatch_clock_now_cache_t nows) { - unsigned int tidx, ridx = DISPATCH_TIMER_COUNT; - uint64_t tmp, delta = UINT64_MAX, dldelta = UINT64_MAX; + uint64_t target = dth->dth_target, deadline = dth->dth_deadline; + uint64_t delta = INT64_MAX, dldelta = INT64_MAX; + dispatch_timer_delay_s rc; - for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { - if (qos >= 0 && qos != DISPATCH_TIMER_QOS(tidx)){ - continue; - } - if (kind >= 0 && kind != DISPATCH_TIMER_KIND(tidx)){ - continue; - } - uint64_t target = timer[tidx].target; - if (target == UINT64_MAX) { - continue; - } - uint64_t deadline = timer[tidx].deadline; - if (qos >= 0) { - // Timer pre-coalescing - uint64_t window = _dispatch_kevent_coalescing_window[qos]; - uint64_t latest = deadline > window ? deadline - window : 0; - dispatch_source_refs_t dri; - TAILQ_FOREACH(dri, &_dispatch_kevent_timer[tidx].dk_sources, - dr_list) { - tmp = ds_timer(dri).target; - if (tmp > latest) break; - target = tmp; - } - } - uint64_t now = _dispatch_source_timer_now(nows, tidx); - if (target <= now) { - delta = 0; - break; - } - tmp = target - now; - if (DISPATCH_TIMER_KIND(tidx) != DISPATCH_TIMER_KIND_WALL) { - tmp = _dispatch_time_mach2nano(tmp); - } - if (tmp < INT64_MAX && tmp < delta) { - ridx = tidx; - delta = tmp; - } - dispatch_assert(target <= deadline); - tmp = deadline - now; - if (DISPATCH_TIMER_KIND(tidx) != DISPATCH_TIMER_KIND_WALL) { - tmp = _dispatch_time_mach2nano(tmp); - } - if (tmp < INT64_MAX && tmp < dldelta) { - dldelta = tmp; - } + dispatch_assert(target <= deadline); + if (delta == 0 || target >= INT64_MAX) { + goto done; } - *delay = delta; - *leeway = delta && delta < UINT64_MAX ? dldelta - delta : UINT64_MAX; - return ridx; -} + if (qos < DISPATCH_TIMER_QOS_COUNT && dth->dth_count > 2) { +#if DISPATCH_HAVE_TIMER_COALESCING + // Timer pre-coalescing + // When we have several timers with this target/deadline bracket: + // + // Target window Deadline + // V <-------V + // t1: [...........|.................] + // t2: [......|.......] + // t3: [..|..........] + // t4: | [.............] + // ^ + // Optimal Target + // + // Coalescing works better if the Target is delayed to "Optimal", by + // picking the latest target that isn't too close to the deadline. + uint64_t window = _dispatch_kevent_coalescing_window[qos]; + if (target + window < deadline) { + uint64_t latest = deadline - window; + target = _dispatch_timer_heap_max_target_before(dth, latest); + } +#endif + } -#ifdef __linux__ -// in linux we map the _dispatch_kevent_qos_s to struct kevent instead -// of struct kevent64. We loose the kevent.ext[] members and the time -// out is based on relavite msec based time vs. absolute nsec based time. -// For now we make the adjustments right here until the solution -// to either extend libkqueue with a proper kevent64 API or removing kevent -// all together and move to a lower API (e.g. epoll or kernel_module. -// Also leeway is ignored. + uint64_t now = _dispatch_time_now_cached(clock, nows); + if (target <= now) { + delta = 0; + dldelta = 0; + goto done; + } -static void -_dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay, - uint64_t leeway, uint64_t nows[]) -{ - // call to update nows[] - _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL); -#ifdef KEVENT_NSEC_NOT_SUPPORTED - // adjust nsec based delay to msec based and ignore leeway - delay /= 1000000L; - if ((int64_t)(delay) <= 0) { - delay = 1; // if value <= 0 the dispatch will stop + uint64_t tmp = target - now; + if (clock != DISPATCH_CLOCK_WALL) { + tmp = _dispatch_time_mach2nano(tmp); + } + if (tmp < delta) { + delta = tmp; } -#else - ke->fflags |= NOTE_NSECONDS; -#endif - ke->data = (int64_t)delay; -} -#else -static void -_dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay, - uint64_t leeway, uint64_t nows[]) -{ - delay += _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL); - if (slowpath(_dispatch_timers_force_max_leeway)) { - ke->data = (int64_t)(delay + leeway); - ke->ext[1] = 0; - } else { - ke->data = (int64_t)delay; - ke->ext[1] = leeway; + tmp = deadline - now; + if (clock != DISPATCH_CLOCK_WALL) { + tmp = _dispatch_time_mach2nano(tmp); + } + if (tmp < dldelta) { + dldelta = tmp; } + +done: + rc.delay = delta; + rc.leeway = delta < INT64_MAX ? dldelta - delta : INT64_MAX; + return rc; } -#endif // __linux__ static bool -_dispatch_timers_program2(uint64_t nows[], _dispatch_kevent_qos_s *ke, - unsigned int tidx) +_dispatch_timers_program2(dispatch_clock_now_cache_t nows, uint32_t tidx) { - bool poll; - uint64_t delay, leeway; - - _dispatch_timers_get_delay(nows, _dispatch_timer, &delay, &leeway, - (int)DISPATCH_TIMER_QOS(tidx), (int)DISPATCH_TIMER_KIND(tidx)); - poll = (delay == 0); - if (poll || delay == UINT64_MAX) { - _dispatch_trace_next_timer_set(NULL, DISPATCH_TIMER_QOS(tidx)); - if (!ke->data) { - return poll; - } - ke->data = 0; - ke->flags |= EV_DELETE; - ke->flags &= ~(EV_ADD|EV_ENABLE); - } else { - _dispatch_trace_next_timer_set( - TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources), DISPATCH_TIMER_QOS(tidx)); - _dispatch_trace_next_timer_program(delay, DISPATCH_TIMER_QOS(tidx)); - _dispatch_kevent_timer_set_delay(ke, delay, leeway, nows); - ke->flags |= EV_ADD|EV_ENABLE; - ke->flags &= ~EV_DELETE; -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (_dispatch_kevent_workqueue_enabled) { - ke->qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + uint32_t qos = DISPATCH_TIMER_QOS(tidx); + dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx); + dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx]; + dispatch_timer_delay_s range; + + range = _dispatch_timers_get_delay(heap, clock, qos, nows); + if (range.delay == 0 || range.delay >= INT64_MAX) { + _dispatch_trace_next_timer_set(NULL, qos); + if (heap->dth_flags & DTH_ARMED) { + _dispatch_event_loop_timer_delete(tidx); } -#endif + return range.delay == 0; } - _dispatch_kq_deferred_update(ke); - return poll; + + _dispatch_trace_next_timer_set(heap->dth_min[DTH_TARGET_ID], qos); + _dispatch_trace_next_timer_program(range.delay, qos); + _dispatch_event_loop_timer_arm(tidx, range, nows); + return false; } DISPATCH_NOINLINE static bool -_dispatch_timers_program(uint64_t nows[]) +_dispatch_timers_program(dispatch_clock_now_cache_t nows) { bool poll = false; - unsigned int tidx, timerm = _dispatch_timers_mask; - for (tidx = 0; tidx < DISPATCH_KEVENT_TIMEOUT_COUNT; tidx++) { - if (!(timerm & 1 << tidx)){ - continue; + uint32_t tidx, timerm = _dispatch_timers_processing_mask; + + for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { + if (timerm & (1 << tidx)) { + poll |= _dispatch_timers_program2(nows, tidx); } - poll |= _dispatch_timers_program2(nows, &_dispatch_kevent_timeout[tidx], - tidx); } return poll; } @@ -2424,581 +2222,92 @@ DISPATCH_NOINLINE static bool _dispatch_timers_configure(void) { - _dispatch_timer_aggregates_check(); // Find out if there is a new target/deadline on the timer lists - return _dispatch_timers_check(_dispatch_kevent_timer, _dispatch_timer); -} - -#if HAVE_MACH -static void -_dispatch_timers_calendar_change(void) -{ - unsigned int qos; - - // calendar change may have gone past the wallclock deadline - _dispatch_timer_expired = true; - for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) { - _dispatch_timers_mask |= - 1 << DISPATCH_TIMER_INDEX(DISPATCH_TIMER_KIND_WALL, qos); - } -} -#endif - -static void -_dispatch_timers_kevent(_dispatch_kevent_qos_s *ke) -{ - dispatch_assert(ke->data > 0); - dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) == - DISPATCH_KEVENT_TIMEOUT_IDENT_MASK); - unsigned int tidx = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK; - dispatch_assert(tidx < DISPATCH_KEVENT_TIMEOUT_COUNT); - dispatch_assert(_dispatch_kevent_timeout[tidx].data != 0); - _dispatch_kevent_timeout[tidx].data = 0; // kevent deleted via EV_ONESHOT - _dispatch_timer_expired = true; - _dispatch_timers_mask |= 1 << tidx; - _dispatch_trace_next_timer_wake(DISPATCH_TIMER_QOS(tidx)); + return _dispatch_timer_heap_has_new_min(_dispatch_timers_heap, + countof(_dispatch_timers_heap), _dispatch_timers_processing_mask); } static inline bool _dispatch_mgr_timers(void) { - uint64_t nows[DISPATCH_TIMER_KIND_COUNT] = {}; - bool expired = slowpath(_dispatch_timer_expired); - if (expired) { - _dispatch_timers_run(nows); + dispatch_clock_now_cache_s nows = { }; + bool expired = _dispatch_timers_expired; + if (unlikely(expired)) { + _dispatch_timers_run(&nows); } - bool reconfigure = slowpath(_dispatch_timers_reconfigure); - if (reconfigure || expired) { + _dispatch_mgr_trace_timers_wakes(); + bool reconfigure = _dispatch_timers_reconfigure; + if (unlikely(reconfigure || expired)) { if (reconfigure) { reconfigure = _dispatch_timers_configure(); _dispatch_timers_reconfigure = false; } if (reconfigure || expired) { - expired = _dispatch_timer_expired = _dispatch_timers_program(nows); - expired = expired || _dispatch_mgr_q.dq_items_tail; + expired = _dispatch_timers_expired = _dispatch_timers_program(&nows); } - _dispatch_timers_mask = 0; + _dispatch_timers_processing_mask = 0; } return expired; } #pragma mark - -#pragma mark dispatch_timer_aggregate - -typedef struct { - TAILQ_HEAD(, dispatch_timer_source_aggregate_refs_s) dk_sources; -} dispatch_timer_aggregate_refs_s; - -typedef struct dispatch_timer_aggregate_s { - DISPATCH_QUEUE_HEADER(queue); - TAILQ_ENTRY(dispatch_timer_aggregate_s) dta_list; - dispatch_timer_aggregate_refs_s - dta_kevent_timer[DISPATCH_KEVENT_TIMER_COUNT]; - struct { - DISPATCH_TIMER_STRUCT(dispatch_timer_source_aggregate_refs_s); - } dta_timer[DISPATCH_TIMER_COUNT]; - struct dispatch_timer_s dta_timer_data[DISPATCH_TIMER_COUNT]; - unsigned int dta_refcount; -} DISPATCH_QUEUE_ALIGN dispatch_timer_aggregate_s; - -typedef TAILQ_HEAD(, dispatch_timer_aggregate_s) dispatch_timer_aggregates_s; -static dispatch_timer_aggregates_s _dispatch_timer_aggregates = - TAILQ_HEAD_INITIALIZER(_dispatch_timer_aggregates); - -dispatch_timer_aggregate_t -dispatch_timer_aggregate_create(void) -{ - unsigned int tidx; - dispatch_timer_aggregate_t dta = _dispatch_alloc(DISPATCH_VTABLE(queue), - sizeof(struct dispatch_timer_aggregate_s)); - _dispatch_queue_init(dta->_as_dq, DQF_NONE, - DISPATCH_QUEUE_WIDTH_MAX, false); - dta->do_targetq = _dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_USER_INITIATED, true); - //FIXME: aggregates need custom vtable - //dta->dq_label = "timer-aggregate"; - for (tidx = 0; tidx < DISPATCH_KEVENT_TIMER_COUNT; tidx++) { - TAILQ_INIT(&dta->dta_kevent_timer[tidx].dk_sources); - } - for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { - TAILQ_INIT(&dta->dta_timer[tidx].dt_sources); - dta->dta_timer[tidx].target = UINT64_MAX; - dta->dta_timer[tidx].deadline = UINT64_MAX; - dta->dta_timer_data[tidx].target = UINT64_MAX; - dta->dta_timer_data[tidx].deadline = UINT64_MAX; - } - return (dispatch_timer_aggregate_t)_dispatch_introspection_queue_create( - dta->_as_dq); -} - -typedef struct dispatch_timer_delay_s { - dispatch_timer_t timer; - uint64_t delay, leeway; -} *dispatch_timer_delay_t; - -static void -_dispatch_timer_aggregate_get_delay(void *ctxt) -{ - dispatch_timer_delay_t dtd = ctxt; - struct { uint64_t nows[DISPATCH_TIMER_KIND_COUNT]; } dtn = {}; - _dispatch_timers_get_delay(dtn.nows, dtd->timer, &dtd->delay, &dtd->leeway, - -1, -1); -} - -uint64_t -dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t dta, - uint64_t *leeway_ptr) -{ - struct dispatch_timer_delay_s dtd = { - .timer = dta->dta_timer_data, - }; - dispatch_sync_f(dta->_as_dq, &dtd, _dispatch_timer_aggregate_get_delay); - if (leeway_ptr) { - *leeway_ptr = dtd.leeway; - } - return dtd.delay; -} +#pragma mark dispatch_mgr -static void -_dispatch_timer_aggregate_update(void *ctxt) +void +_dispatch_mgr_queue_wakeup(dispatch_queue_t dq, + dispatch_qos_t qos, dispatch_wakeup_flags_t flags) { - dispatch_timer_aggregate_t dta = (void*)_dispatch_queue_get_current(); - dispatch_timer_t dtau = ctxt; - unsigned int tidx; - for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { - dta->dta_timer_data[tidx].target = dtau[tidx].target; - dta->dta_timer_data[tidx].deadline = dtau[tidx].deadline; + if (flags & DISPATCH_WAKEUP_FLUSH) { + os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); } - free(dtau); -} -DISPATCH_NOINLINE -static void -_dispatch_timer_aggregates_configure(void) -{ - dispatch_timer_aggregate_t dta; - dispatch_timer_t dtau; - TAILQ_FOREACH(dta, &_dispatch_timer_aggregates, dta_list) { - if (!_dispatch_timers_check(dta->dta_kevent_timer, dta->dta_timer)) { - continue; - } - dtau = _dispatch_calloc(DISPATCH_TIMER_COUNT, sizeof(*dtau)); - memcpy(dtau, dta->dta_timer, sizeof(dta->dta_timer)); - _dispatch_barrier_async_detached_f(dta->_as_dq, dtau, - _dispatch_timer_aggregate_update); + if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { + return; } -} -static inline void -_dispatch_timer_aggregates_check(void) -{ - if (fastpath(TAILQ_EMPTY(&_dispatch_timer_aggregates))) { + if (!_dispatch_queue_class_probe(&_dispatch_mgr_q)) { return; } - _dispatch_timer_aggregates_configure(); + + _dispatch_event_loop_poke(DISPATCH_WLH_MANAGER, qos, 0); } +#if DISPATCH_USE_MGR_THREAD +DISPATCH_NOINLINE static void -_dispatch_timer_aggregates_register(dispatch_source_t ds) +_dispatch_mgr_init(void) { - dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds); - if (!dta->dta_refcount++) { - TAILQ_INSERT_TAIL(&_dispatch_timer_aggregates, dta, dta_list); + uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + _dispatch_queue_set_current(&_dispatch_mgr_q); + if (_dispatch_queue_drain_try_lock(&_dispatch_mgr_q, + DISPATCH_INVOKE_STEALING, NULL) != owned) { + DISPATCH_INTERNAL_CRASH(0, "Locking the manager should not fail"); } + _dispatch_mgr_priority_init(); + _dispatch_event_loop_init(); } -DISPATCH_NOINLINE +DISPATCH_NOINLINE DISPATCH_NORETURN static void -_dispatch_timer_aggregates_update(dispatch_source_t ds, unsigned int tidx) -{ - dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds); - dispatch_timer_source_aggregate_refs_t dr; - dr = (dispatch_timer_source_aggregate_refs_t)ds->ds_refs; - _dispatch_timers_insert(tidx, dta->dta_kevent_timer, dr, dra_list, - dta->dta_timer, dr, dta_list); -} - -DISPATCH_NOINLINE -static void -_dispatch_timer_aggregates_unregister(dispatch_source_t ds, unsigned int tidx) -{ - dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds); - dispatch_timer_source_aggregate_refs_t dr; - dr = (dispatch_timer_source_aggregate_refs_t)ds->ds_refs; - _dispatch_timers_remove(tidx, (dispatch_timer_aggregate_refs_s*)NULL, - dta->dta_kevent_timer, dr, dra_list, dta->dta_timer, dr, dta_list); - if (!--dta->dta_refcount) { - TAILQ_REMOVE(&_dispatch_timer_aggregates, dta, dta_list); - } -} - -#pragma mark - -#pragma mark dispatch_kqueue - -static int _dispatch_kq; - -#if DISPATCH_DEBUG_QOS && DISPATCH_USE_KEVENT_WORKQUEUE -#define _dispatch_kevent_assert_valid_qos(ke) ({ \ - if (_dispatch_kevent_workqueue_enabled) { \ - const _dispatch_kevent_qos_s *_ke = (ke); \ - if (_ke->flags & (EV_ADD|EV_ENABLE)) { \ - _dispatch_assert_is_valid_qos_class(\ - (pthread_priority_t)_ke->qos); \ - dispatch_assert(_ke->qos); \ - } \ - } \ - }) -#else -#define _dispatch_kevent_assert_valid_qos(ke) ((void)ke) -#endif - - -static void -_dispatch_kq_init(void *context DISPATCH_UNUSED) -{ - _dispatch_fork_becomes_unsafe(); -#if DISPATCH_USE_KEVENT_WORKQUEUE - _dispatch_kevent_workqueue_init(); - if (_dispatch_kevent_workqueue_enabled) { - int r; - const _dispatch_kevent_qos_s kev[] = { - [0] = { - .ident = 1, - .filter = EVFILT_USER, - .flags = EV_ADD|EV_CLEAR, - .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, - }, - [1] = { - .ident = 1, - .filter = EVFILT_USER, - .fflags = NOTE_TRIGGER, - }, - }; - _dispatch_kq = -1; -retry: - r = kevent_qos(-1, kev, 2, NULL, 0, NULL, NULL, - KEVENT_FLAG_WORKQ|KEVENT_FLAG_IMMEDIATE); - if (slowpath(r == -1)) { - int err = errno; - switch (err) { - case EINTR: - goto retry; - default: - DISPATCH_CLIENT_CRASH(err, - "Failed to initalize workqueue kevent"); - break; - } - } - return; - } -#endif // DISPATCH_USE_KEVENT_WORKQUEUE -#if DISPATCH_USE_MGR_THREAD - static const _dispatch_kevent_qos_s kev = { - .ident = 1, - .filter = EVFILT_USER, - .flags = EV_ADD|EV_CLEAR, - }; - - _dispatch_fork_becomes_unsafe(); -#if DISPATCH_USE_GUARDED_FD - guardid_t guard = (uintptr_t)&kev; - _dispatch_kq = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP); -#else - _dispatch_kq = kqueue(); -#endif - if (_dispatch_kq == -1) { - int err = errno; - switch (err) { - case EMFILE: - DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " - "process is out of file descriptors"); - break; - case ENFILE: - DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " - "system is out of file descriptors"); - break; - case ENOMEM: - DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " - "kernel is out of memory"); - break; - default: - DISPATCH_INTERNAL_CRASH(err, "kqueue() failure"); - break; - } - } - (void)dispatch_assume_zero(kevent_qos(_dispatch_kq, &kev, 1, NULL, 0, NULL, - NULL, 0)); - _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); -#endif // DISPATCH_USE_MGR_THREAD -} - -DISPATCH_NOINLINE -static long -_dispatch_kq_update(const _dispatch_kevent_qos_s *ke, int n) -{ - int i, r; - _dispatch_kevent_qos_s kev_error[n]; - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_kq_init); - - for (i = 0; i < n; i++) { - if (ke[i].filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) { - _dispatch_kevent_debug_n("updating", ke + i, i, n); - } - } - - unsigned int flags = KEVENT_FLAG_ERROR_EVENTS; -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (_dispatch_kevent_workqueue_enabled) { - flags |= KEVENT_FLAG_WORKQ; - } -#endif - -retry: - r = kevent_qos(_dispatch_kq, ke, n, kev_error, n, NULL, NULL, flags); - if (slowpath(r == -1)) { - int err = errno; - switch (err) { - case EINTR: - goto retry; - case EBADF: - DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors"); - break; - default: - (void)dispatch_assume_zero(err); - break; - } - return err; - } - for (i = 0, n = r; i < n; i++) { - if (kev_error[i].flags & EV_ERROR) { - _dispatch_kevent_debug("returned error", &kev_error[i]); - _dispatch_kevent_drain(&kev_error[i]); - r = (int)kev_error[i].data; - } else { - _dispatch_kevent_mgr_debug(&kev_error[i]); - r = 0; - } - } - return r; -} - -DISPATCH_ALWAYS_INLINE -static void -_dispatch_kq_update_all(const _dispatch_kevent_qos_s *kev, int n) -{ - (void)_dispatch_kq_update(kev, n); -} - -DISPATCH_ALWAYS_INLINE -static long -_dispatch_kq_update_one(const _dispatch_kevent_qos_s *kev) -{ - return _dispatch_kq_update(kev, 1); -} - -static inline bool -_dispatch_kevent_maps_to_same_knote(const _dispatch_kevent_qos_s *e1, - const _dispatch_kevent_qos_s *e2) -{ - return e1->filter == e2->filter && - e1->ident == e2->ident && - e1->udata == e2->udata; -} - -static inline int -_dispatch_deferred_event_find_slot(dispatch_deferred_items_t ddi, - const _dispatch_kevent_qos_s *ke) -{ - _dispatch_kevent_qos_s *events = ddi->ddi_eventlist; - int i; - - for (i = 0; i < ddi->ddi_nevents; i++) { - if (_dispatch_kevent_maps_to_same_knote(&events[i], ke)) { - break; - } - } - return i; -} - -static void -_dispatch_kq_deferred_update(const _dispatch_kevent_qos_s *ke) -{ - dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); - int slot; - - _dispatch_kevent_assert_valid_qos(ke); - if (ddi) { - if (unlikely(ddi->ddi_nevents == ddi->ddi_maxevents)) { - _dispatch_deferred_items_set(NULL); - _dispatch_kq_update_all(ddi->ddi_eventlist, ddi->ddi_nevents); - ddi->ddi_nevents = 0; - _dispatch_deferred_items_set(ddi); - } - if (ke->filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) { - _dispatch_kevent_debug("deferred", ke); - } - bool needs_enable = false; - slot = _dispatch_deferred_event_find_slot(ddi, ke); - if (slot == ddi->ddi_nevents) { - ddi->ddi_nevents++; - } else if (ke->flags & EV_DELETE) { - // when deleting and an enable is pending, - // we must merge EV_ENABLE to do an immediate deletion - needs_enable = (ddi->ddi_eventlist[slot].flags & EV_ENABLE); - } - ddi->ddi_eventlist[slot] = *ke; - if (needs_enable) { - ddi->ddi_eventlist[slot].flags |= EV_ENABLE; - } - } else { - _dispatch_kq_update_one(ke); - } -} - -static long -_dispatch_kq_immediate_update(_dispatch_kevent_qos_s *ke) -{ - dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); - int slot, last; - - _dispatch_kevent_assert_valid_qos(ke); - if (ddi) { - _dispatch_kevent_qos_s *events = ddi->ddi_eventlist; - slot = _dispatch_deferred_event_find_slot(ddi, ke); - if (slot < ddi->ddi_nevents) { - // when deleting and an enable is pending, - // we must merge EV_ENABLE to do an immediate deletion - if ((ke->flags & EV_DELETE) && (events[slot].flags & EV_ENABLE)) { - ke->flags |= EV_ENABLE; - } - last = --ddi->ddi_nevents; - if (slot != last) { - events[slot] = events[last]; - } - } - } - return _dispatch_kq_update_one(ke); -} - -#pragma mark - -#pragma mark dispatch_mgr - -DISPATCH_NOINLINE -static void -_dispatch_mgr_queue_poke(dispatch_queue_t dq DISPATCH_UNUSED, - pthread_priority_t pp DISPATCH_UNUSED) -{ - static const _dispatch_kevent_qos_s kev = { - .ident = 1, - .filter = EVFILT_USER, - .fflags = NOTE_TRIGGER, - }; - -#if DISPATCH_DEBUG && DISPATCH_MGR_QUEUE_DEBUG - _dispatch_debug("waking up the dispatch manager queue: %p", dq); -#endif - _dispatch_kq_deferred_update(&kev); -} - -void -_dispatch_mgr_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, - dispatch_wakeup_flags_t flags) -{ - if (flags & DISPATCH_WAKEUP_FLUSH) { - os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); - } - - if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { - return; - } - - if (!_dispatch_queue_class_probe(&_dispatch_mgr_q)) { - return; - } - - _dispatch_mgr_queue_poke(dq, pp); -} - -DISPATCH_NOINLINE -static void -_dispatch_event_init(void) -{ - _dispatch_kevent_init(); - _dispatch_timers_init(); -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK - _dispatch_mach_recv_msg_buf_init(); -#endif - _dispatch_memorypressure_init(); - _voucher_activity_debug_channel_init(); -} - -#if DISPATCH_USE_MGR_THREAD -DISPATCH_NOINLINE -static void -_dispatch_mgr_init(void) -{ - uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; - _dispatch_queue_set_current(&_dispatch_mgr_q); - if (_dispatch_queue_drain_try_lock(&_dispatch_mgr_q, - DISPATCH_INVOKE_STEALING, NULL) != owned) { - DISPATCH_INTERNAL_CRASH(0, "Locking the manager should not fail"); - } - _dispatch_mgr_priority_init(); - _dispatch_event_init(); -} - -DISPATCH_NOINLINE -static bool -_dispatch_mgr_wait_for_event(dispatch_deferred_items_t ddi, bool poll) -{ - int r; - dispatch_assert((size_t)ddi->ddi_maxevents < countof(ddi->ddi_eventlist)); - -retry: - r = kevent_qos(_dispatch_kq, ddi->ddi_eventlist, ddi->ddi_nevents, - ddi->ddi_eventlist + ddi->ddi_maxevents, 1, NULL, NULL, - poll ? KEVENT_FLAG_IMMEDIATE : KEVENT_FLAG_NONE); - if (slowpath(r == -1)) { - int err = errno; - switch (err) { - case EINTR: - goto retry; - case EBADF: - DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors"); - break; - default: - (void)dispatch_assume_zero(err); - break; - } - } - ddi->ddi_nevents = 0; - return r > 0; -} - -DISPATCH_NOINLINE DISPATCH_NORETURN -static void -_dispatch_mgr_invoke(void) +_dispatch_mgr_invoke(void) { dispatch_deferred_items_s ddi; bool poll; - ddi.ddi_magic = DISPATCH_DEFERRED_ITEMS_MAGIC; - ddi.ddi_stashed_pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + ddi.ddi_stashed_pri = DISPATCH_PRIORITY_NOSTASH; + ddi.ddi_stashed_dq = NULL; + ddi.ddi_stashed_rq = NULL; +#if DISPATCH_EVENT_BACKEND_KEVENT ddi.ddi_nevents = 0; - ddi.ddi_maxevents = 1; - +#endif + dispatch_assert(_dispatch_get_wlh() == DISPATCH_WLH_GLOBAL); _dispatch_deferred_items_set(&ddi); for (;;) { _dispatch_mgr_queue_drain(); poll = _dispatch_mgr_timers(); poll = poll || _dispatch_queue_class_probe(&_dispatch_mgr_q); - if (_dispatch_mgr_wait_for_event(&ddi, poll)) { - _dispatch_kevent_qos_s *ke = ddi.ddi_eventlist + ddi.ddi_maxevents; - _dispatch_kevent_debug("received", ke); - _dispatch_kevent_drain(ke); - } + _dispatch_event_loop_drain(poll ? KEVENT_FLAG_IMMEDIATE : 0); } } #endif // DISPATCH_USE_MGR_THREAD @@ -3006,6 +2315,7 @@ _dispatch_mgr_invoke(void) DISPATCH_NORETURN void _dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED, + dispatch_invoke_context_t dic DISPATCH_UNUSED, dispatch_invoke_flags_t flags DISPATCH_UNUSED) { #if DISPATCH_USE_KEVENT_WORKQUEUE @@ -3024,18 +2334,25 @@ _dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED, #if DISPATCH_USE_KEVENT_WORKQUEUE -#define DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER ((pthread_priority_t)(~0ul)) +#define DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER ((dispatch_priority_t)~0u) + +_Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >= + DISPATCH_DEFERRED_ITEMS_EVENT_COUNT, + "our list should not be longer than the kernel's"); DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_kevent_worker_thread_init(dispatch_deferred_items_t ddi) +static inline dispatch_priority_t +_dispatch_wlh_worker_thread_init(dispatch_wlh_t wlh, + dispatch_deferred_items_t ddi) { + dispatch_assert(wlh); uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + dispatch_priority_t old_dbp; - ddi->ddi_magic = DISPATCH_DEFERRED_ITEMS_MAGIC; + ddi->ddi_stashed_pri = DISPATCH_PRIORITY_NOSTASH; + ddi->ddi_stashed_dq = NULL; + ddi->ddi_stashed_rq = NULL; ddi->ddi_nevents = 0; - ddi->ddi_maxevents = countof(ddi->ddi_eventlist); - ddi->ddi_stashed_pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; pthread_priority_t pp = _dispatch_get_priority(); if (!(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) { @@ -3046,10 +2363,19 @@ _dispatch_kevent_worker_thread_init(dispatch_deferred_items_t ddi) // Also add the NEEDS_UNBIND flag so that // _dispatch_priority_compute_update knows it has to unbind pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; - pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + if (wlh == DISPATCH_WLH_GLOBAL) { + pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + } else { + // pthread sets the flag when it is an event delivery thread + // so we need to explicitly clear it + pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + } _dispatch_thread_setspecific(dispatch_priority_key, - (void *)(uintptr_t)pp); - ddi->ddi_stashed_pp = 0; + (void *)(uintptr_t)pp); + ddi->ddi_stashed_pri = 0; + if (wlh != DISPATCH_WLH_GLOBAL) { + _dispatch_debug("wlh[%p]: handling events", wlh); + } return DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER; } @@ -3076,8 +2402,7 @@ _dispatch_kevent_worker_thread_init(dispatch_deferred_items_t ddi) _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); // ensure kevents registered from this thread are registered at manager QoS - pthread_priority_t old_dp = _dispatch_set_defaultpriority( - (pthread_priority_t)_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, NULL); + old_dbp = _dispatch_set_basepri(DISPATCH_PRIORITY_FLAG_MANAGER); _dispatch_queue_set_current(&_dispatch_mgr_q); if (_dispatch_queue_drain_try_lock(&_dispatch_mgr_q, DISPATCH_INVOKE_STEALING, NULL) != owned) { @@ -3086,3756 +2411,142 @@ _dispatch_kevent_worker_thread_init(dispatch_deferred_items_t ddi) static int event_thread_init; if (!event_thread_init) { event_thread_init = 1; - _dispatch_event_init(); + _dispatch_event_loop_init(); } - return old_dp; + return old_dbp; } DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool -_dispatch_kevent_worker_thread_reset(pthread_priority_t old_dp) +_dispatch_wlh_worker_thread_reset(dispatch_priority_t old_dbp) { dispatch_queue_t dq = &_dispatch_mgr_q; - uint64_t orig_dq_state; + uint64_t orig_dq_state = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; - _dispatch_queue_drain_unlock(dq, DISPATCH_QUEUE_SERIAL_DRAIN_OWNED, - &orig_dq_state); - _dispatch_reset_defaultpriority(old_dp); + orig_dq_state = _dispatch_queue_drain_unlock(dq, orig_dq_state); + _dispatch_reset_basepri(old_dbp); _dispatch_queue_set_current(NULL); return _dq_state_is_dirty(orig_dq_state); } -DISPATCH_NOINLINE -void -_dispatch_kevent_worker_thread(_dispatch_kevent_qos_s **events, int *nevents) +DISPATCH_ALWAYS_INLINE +static void +_dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t *events, + int *nevents) { _dispatch_introspection_thread_add(); - if (!events && !nevents) { - // events for worker thread request have already been delivered earlier - return; - } - - _dispatch_kevent_qos_s *ke = *events; + dispatch_kevent_t ke = *events; + DISPATCH_PERF_MON_VAR int n = *nevents; if (!dispatch_assume(n) || !dispatch_assume(*events)) return; dispatch_deferred_items_s ddi; - pthread_priority_t old_dp = _dispatch_kevent_worker_thread_init(&ddi); - - _dispatch_deferred_items_set(&ddi); - for (int i = 0; i < n; i++) { - _dispatch_kevent_debug("received", ke); - _dispatch_kevent_drain(ke++); + dispatch_priority_t old_dbp = _dispatch_wlh_worker_thread_init(wlh, &ddi); + if (old_dbp == DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) { + _dispatch_perfmon_start_impl(true); + } else { + dispatch_assert(wlh == DISPATCH_WLH_GLOBAL); + wlh = DISPATCH_WLH_GLOBAL; } + _dispatch_set_wlh(wlh); + _dispatch_deferred_items_set(&ddi); + _dispatch_event_loop_merge(ke, n); - if (old_dp != DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) { + if (old_dbp != DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) { _dispatch_mgr_queue_drain(); bool poll = _dispatch_mgr_timers(); - if (_dispatch_kevent_worker_thread_reset(old_dp)) { + if (_dispatch_wlh_worker_thread_reset(old_dbp)) { poll = true; } - if (poll) _dispatch_mgr_queue_poke(&_dispatch_mgr_q, 0); - } - _dispatch_deferred_items_set(NULL); - - if (ddi.ddi_stashed_pp & _PTHREAD_PRIORITY_PRIORITY_MASK) { - *nevents = 0; - if (ddi.ddi_nevents) { - _dispatch_kq_update_all(ddi.ddi_eventlist, ddi.ddi_nevents); + if (poll) _dispatch_event_loop_poke(DISPATCH_WLH_MANAGER, 0, 0); + } else if (ddi.ddi_stashed_dq) { + if (wlh == DISPATCH_WLH_GLOBAL) { + if (ddi.ddi_nevents) _dispatch_event_loop_update(); + _dispatch_deferred_items_set(NULL); + } else { + ddi.ddi_stashed_pri = DISPATCH_PRIORITY_NOSTASH; } - ddi.ddi_stashed_pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - return _dispatch_root_queue_drain_deferred_item(ddi.ddi_stashed_dq, - ddi.ddi_stashed_dou, ddi.ddi_stashed_pp); -#ifndef WORKQ_KEVENT_EVENT_BUFFER_LEN - } else if (ddi.ddi_nevents > *nevents) { - *nevents = 0; - _dispatch_kq_update_all(ddi.ddi_eventlist, ddi.ddi_nevents); -#endif - } else { - *nevents = ddi.ddi_nevents; - dispatch_static_assert(__builtin_types_compatible_p(typeof(**events), - typeof(*ddi.ddi_eventlist))); - memcpy(*events, ddi.ddi_eventlist, - (size_t)ddi.ddi_nevents * sizeof(*ddi.ddi_eventlist)); - } -} -#endif // DISPATCH_USE_KEVENT_WORKQUEUE -#pragma mark - -#pragma mark dispatch_memorypressure - -#if DISPATCH_USE_MEMORYPRESSURE_SOURCE -#define DISPATCH_MEMORYPRESSURE_SOURCE_TYPE DISPATCH_SOURCE_TYPE_MEMORYPRESSURE -#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK ( \ - DISPATCH_MEMORYPRESSURE_NORMAL | \ - DISPATCH_MEMORYPRESSURE_WARN | \ - DISPATCH_MEMORYPRESSURE_CRITICAL | \ - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \ - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL) -#define DISPATCH_MEMORYPRESSURE_MALLOC_MASK ( \ - DISPATCH_MEMORYPRESSURE_WARN | \ - DISPATCH_MEMORYPRESSURE_CRITICAL | \ - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \ - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL) -#elif DISPATCH_USE_VM_PRESSURE_SOURCE -#define DISPATCH_MEMORYPRESSURE_SOURCE_TYPE DISPATCH_SOURCE_TYPE_VM -#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK DISPATCH_VM_PRESSURE -#endif + _dispatch_debug("wlh[%p]: draining deferred item %p", wlh, + ddi.ddi_stashed_dq); + _dispatch_root_queue_drain_deferred_item(ddi.ddi_stashed_rq, + ddi.ddi_stashed_dq DISPATCH_PERF_MON_ARGS); + } -#if DISPATCH_USE_MEMORYPRESSURE_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE -static dispatch_source_t _dispatch_memorypressure_source; + _dispatch_deferred_items_set(NULL); + _dispatch_reset_wlh(); -static void -_dispatch_memorypressure_handler(void *context DISPATCH_UNUSED) -{ -#if DISPATCH_USE_MEMORYPRESSURE_SOURCE - unsigned long memorypressure; - memorypressure = dispatch_source_get_data(_dispatch_memorypressure_source); - - if (memorypressure & DISPATCH_MEMORYPRESSURE_NORMAL) { - _dispatch_memory_warn = false; - _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; -#if VOUCHER_USE_MACH_VOUCHER - if (_firehose_task_buffer) { - firehose_buffer_clear_bank_flags(_firehose_task_buffer, - FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY); - } -#endif - } - if (memorypressure & DISPATCH_MEMORYPRESSURE_WARN) { - _dispatch_memory_warn = true; - _dispatch_continuation_cache_limit = - DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN; -#if VOUCHER_USE_MACH_VOUCHER - if (_firehose_task_buffer) { - firehose_buffer_set_bank_flags(_firehose_task_buffer, - FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY); - } -#endif + if (ddi.ddi_nevents) { + _dispatch_debug("flushing %d deferred kevents", ddi.ddi_nevents); } - if (memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK) { - malloc_memory_event_handler(memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK); + *nevents = ddi.ddi_nevents; + dispatch_static_assert(__builtin_types_compatible_p(typeof(**events), + typeof(*ddi.ddi_eventlist))); + memcpy(*events, ddi.ddi_eventlist, + (size_t)ddi.ddi_nevents * sizeof(*ddi.ddi_eventlist)); + if (old_dbp == DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER && !ddi.ddi_stashed_dq) { + _dispatch_perfmon_end(perfmon_thread_event_no_steal); } -#elif DISPATCH_USE_VM_PRESSURE_SOURCE - // we must have gotten DISPATCH_VM_PRESSURE - malloc_zone_pressure_relief(0,0); -#endif } -static void -_dispatch_memorypressure_init(void) +DISPATCH_NOINLINE +void +_dispatch_kevent_worker_thread(dispatch_kevent_t *events, int *nevents) { - _dispatch_memorypressure_source = dispatch_source_create( - DISPATCH_MEMORYPRESSURE_SOURCE_TYPE, 0, - DISPATCH_MEMORYPRESSURE_SOURCE_MASK, - _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true)); - dispatch_source_set_event_handler_f(_dispatch_memorypressure_source, - _dispatch_memorypressure_handler); - dispatch_activate(_dispatch_memorypressure_source); + if (!events && !nevents) { + // events for worker thread request have already been delivered earlier + return; + } + return _dispatch_wlh_worker_thread(DISPATCH_WLH_GLOBAL, events, nevents); } -#else -static inline void _dispatch_memorypressure_init(void) {} -#endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE - -#pragma mark - -#pragma mark dispatch_mach - -#if HAVE_MACH - -#if DISPATCH_DEBUG && DISPATCH_MACHPORT_DEBUG -#define _dispatch_debug_machport(name) \ - dispatch_debug_machport((name), __func__) -#else -#define _dispatch_debug_machport(name) ((void)(name)) -#endif -// Flags for all notifications that are registered/unregistered when a -// send-possible notification is requested/delivered -#define _DISPATCH_MACH_SP_FLAGS (DISPATCH_MACH_SEND_POSSIBLE| \ - DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_DELETED) -#define _DISPATCH_MACH_RECV_FLAGS (DISPATCH_MACH_RECV_MESSAGE| \ - DISPATCH_MACH_RECV_MESSAGE_DIRECT| \ - DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE) -#define _DISPATCH_MACH_RECV_DIRECT_FLAGS ( \ - DISPATCH_MACH_RECV_MESSAGE_DIRECT| \ - DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE) - -#define _DISPATCH_IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v) -#define _DISPATCH_HASH(x, y) (_DISPATCH_IS_POWER_OF_TWO(y) ? \ - (MACH_PORT_INDEX(x) & ((y) - 1)) : (MACH_PORT_INDEX(x) % (y))) - -#define _DISPATCH_MACHPORT_HASH_SIZE 32 -#define _DISPATCH_MACHPORT_HASH(x) \ - _DISPATCH_HASH((x), _DISPATCH_MACHPORT_HASH_SIZE) - -#ifndef MACH_RCV_VOUCHER -#define MACH_RCV_VOUCHER 0x00000800 -#endif -#define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_CTX -#define DISPATCH_MACH_RCV_OPTIONS ( \ - MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \ - MACH_RCV_TRAILER_ELEMENTS(DISPATCH_MACH_RCV_TRAILER) | \ - MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)) | \ - MACH_RCV_VOUCHER - -#define DISPATCH_MACH_NOTIFICATION_ARMED(dk) ((dk)->dk_kevent.ext[0]) - -static void _dispatch_kevent_mach_msg_recv(_dispatch_kevent_qos_s *ke, - mach_msg_header_t *hdr); -static void _dispatch_kevent_mach_msg_destroy(_dispatch_kevent_qos_s *ke, - mach_msg_header_t *hdr); -static void _dispatch_source_merge_mach_msg(dispatch_source_t ds, - dispatch_source_refs_t dr, dispatch_kevent_t dk, - _dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr, - mach_msg_size_t siz); -static kern_return_t _dispatch_mach_notify_update(dispatch_kevent_t dk, - uint32_t new_flags, uint32_t del_flags, uint32_t mask, - mach_msg_id_t notify_msgid, mach_port_mscount_t notify_sync); -static void _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr); -static void _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, unsigned int options); -static void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm); -static void _dispatch_mach_msg_recv(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, _dispatch_kevent_qos_s *ke, - mach_msg_header_t *hdr, mach_msg_size_t siz); -static void _dispatch_mach_merge_notification_kevent(dispatch_mach_t dm, - const _dispatch_kevent_qos_s *ke); -static inline mach_msg_option_t _dispatch_mach_checkin_options(void); - -static const size_t _dispatch_mach_recv_msg_size = - DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE; -static const size_t dispatch_mach_trailer_size = - sizeof(dispatch_mach_trailer_t); -static mach_port_t _dispatch_mach_notify_port; -static dispatch_source_t _dispatch_mach_notify_source; - -static inline void* -_dispatch_kevent_mach_msg_buf(_dispatch_kevent_qos_s *ke) -{ - return (void*)ke->ext[0]; -} -static inline mach_msg_size_t -_dispatch_kevent_mach_msg_size(_dispatch_kevent_qos_s *ke) -{ - // buffer size in the successful receive case, but message size (like - // msgh_size) in the MACH_RCV_TOO_LARGE case, i.e. add trailer size. - return (mach_msg_size_t)ke->ext[1]; -} +#endif // DISPATCH_USE_KEVENT_WORKQUEUE +#pragma mark - +#pragma mark dispatch_source_debug -static void -_dispatch_source_type_mach_recv_direct_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) +static size_t +_dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) { - ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE_DIRECT; -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK - if (_dispatch_evfilt_machport_direct_enabled) return; - ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT; - ds->ds_dkev->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); - ds->ds_is_direct_kevent = false; -#endif + dispatch_queue_t target = ds->do_targetq; + dispatch_source_refs_t dr = ds->ds_refs; + return dsnprintf(buf, bufsiz, "target = %s[%p], ident = 0x%x, " + "mask = 0x%x, pending_data = 0x%llx, registered = %d, " + "armed = %d, deleted = %d%s, canceled = %d, ", + target && target->dq_label ? target->dq_label : "", target, + dr->du_ident, dr->du_fflags, ds->ds_pending_data, + ds->ds_is_installed, (bool)(ds->dq_atomic_flags & DSF_ARMED), + (bool)(ds->dq_atomic_flags & DSF_DELETED), + (ds->dq_atomic_flags & DSF_DEFERRED_DELETE) ? " (pending)" : "", + (bool)(ds->dq_atomic_flags & DSF_CANCELED)); } -static const -struct dispatch_source_type_s _dispatch_source_type_mach_recv_direct = { - .ke = { - .filter = EVFILT_MACHPORT, - .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, - .fflags = DISPATCH_MACH_RCV_OPTIONS, - }, - .init = _dispatch_source_type_mach_recv_direct_init, -}; - -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK -static mach_port_t _dispatch_mach_portset, _dispatch_mach_recv_portset; -static _dispatch_kevent_qos_s _dispatch_mach_recv_kevent = { - .filter = EVFILT_MACHPORT, - .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, - .fflags = DISPATCH_MACH_RCV_OPTIONS, -}; - -static void -_dispatch_mach_recv_msg_buf_init(void) +static size_t +_dispatch_timer_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) { - if (_dispatch_evfilt_machport_direct_enabled) return; - mach_vm_size_t vm_size = mach_vm_round_page( - _dispatch_mach_recv_msg_size + dispatch_mach_trailer_size); - mach_vm_address_t vm_addr = vm_page_size; - kern_return_t kr; - - while (slowpath(kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size, - VM_FLAGS_ANYWHERE))) { - if (kr != KERN_NO_SPACE) { - DISPATCH_CLIENT_CRASH(kr, - "Could not allocate mach msg receive buffer"); - } - _dispatch_temporary_resource_shortage(); - vm_addr = vm_page_size; - } - _dispatch_mach_recv_kevent.ext[0] = (uintptr_t)vm_addr; - _dispatch_mach_recv_kevent.ext[1] = vm_size; + dispatch_timer_source_refs_t dr = ds->ds_timer_refs; + return dsnprintf(buf, bufsiz, "timer = { target = 0x%llx, deadline = 0x%llx" + ", interval = 0x%llx, flags = 0x%x }, ", + (unsigned long long)dr->dt_timer.target, + (unsigned long long)dr->dt_timer.deadline, + (unsigned long long)dr->dt_timer.interval, dr->du_fflags); } -#endif -DISPATCH_NOINLINE -static void -_dispatch_source_merge_mach_msg_direct(dispatch_source_t ds, - _dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr) +size_t +_dispatch_source_debug(dispatch_source_t ds, char *buf, size_t bufsiz) { - dispatch_continuation_t dc = _dispatch_source_get_event_handler(ds->ds_refs); - dispatch_queue_t cq = _dispatch_queue_get_current(); - - // see firehose_client_push_notify_async - _dispatch_queue_set_current(ds->_as_dq); - dc->dc_func(hdr); - _dispatch_queue_set_current(cq); - if (hdr != _dispatch_kevent_mach_msg_buf(ke)) { - free(hdr); + dispatch_source_refs_t dr = ds->ds_refs; + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dx_kind(ds), ds); + offset += _dispatch_object_debug_attr(ds, &buf[offset], bufsiz - offset); + offset += _dispatch_source_debug_attr(ds, &buf[offset], bufsiz - offset); + if (dr->du_is_timer) { + offset += _dispatch_timer_debug_attr(ds, &buf[offset], bufsiz - offset); } + offset += dsnprintf(&buf[offset], bufsiz - offset, "kevent = %p%s, " + "filter = %s }", dr, dr->du_is_direct ? " (direct)" : "", + dr->du_type->dst_kind); + return offset; } - -dispatch_source_t -_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp, - const struct dispatch_continuation_s *dc) -{ - dispatch_source_t ds; - ds = dispatch_source_create(&_dispatch_source_type_mach_recv_direct, - recvp, 0, &_dispatch_mgr_q); - os_atomic_store(&ds->ds_refs->ds_handler[DS_EVENT_HANDLER], - (dispatch_continuation_t)dc, relaxed); - return ds; -} - -static void -_dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED) -{ - kern_return_t kr; -#if HAVE_MACH_PORT_CONSTRUCT - mach_port_options_t opts = { .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT }; -#ifdef __LP64__ - const mach_port_context_t guard = 0xfeed09071f1ca7edull; -#else - const mach_port_context_t guard = 0xff1ca7edull; -#endif - kr = mach_port_construct(mach_task_self(), &opts, guard, - &_dispatch_mach_notify_port); -#else - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, - &_dispatch_mach_notify_port); -#endif - DISPATCH_VERIFY_MIG(kr); - if (slowpath(kr)) { - DISPATCH_CLIENT_CRASH(kr, - "mach_port_construct() failed: cannot create receive right"); - } - - static const struct dispatch_continuation_s dc = { - .dc_func = (void*)_dispatch_mach_notify_source_invoke, - }; - _dispatch_mach_notify_source = _dispatch_source_create_mach_msg_direct_recv( - _dispatch_mach_notify_port, &dc); - dispatch_assert(_dispatch_mach_notify_source); - dispatch_activate(_dispatch_mach_notify_source); -} - -static mach_port_t -_dispatch_get_mach_notify_port(void) -{ - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_mach_notify_port_init); - return _dispatch_mach_notify_port; -} - -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK -static void -_dispatch_mach_recv_portset_init(void *context DISPATCH_UNUSED) -{ - kern_return_t kr; - - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, - &_dispatch_mach_recv_portset); - DISPATCH_VERIFY_MIG(kr); - if (slowpath(kr)) { - DISPATCH_CLIENT_CRASH(kr, - "mach_port_allocate() failed: cannot create port set"); - } - _dispatch_kevent_qos_s *ke = &_dispatch_mach_recv_kevent; - dispatch_assert(_dispatch_kevent_mach_msg_buf(ke)); - dispatch_assert(dispatch_mach_trailer_size == - REQUESTED_TRAILER_SIZE_NATIVE(MACH_RCV_TRAILER_ELEMENTS( - DISPATCH_MACH_RCV_TRAILER))); - ke->ident = _dispatch_mach_recv_portset; -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (_dispatch_kevent_workqueue_enabled) { - ke->qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; - } -#endif - _dispatch_kq_immediate_update(&_dispatch_mach_recv_kevent); -} - -static mach_port_t -_dispatch_get_mach_recv_portset(void) -{ - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_mach_recv_portset_init); - return _dispatch_mach_recv_portset; -} - -static void -_dispatch_mach_portset_init(void *context DISPATCH_UNUSED) -{ - _dispatch_kevent_qos_s kev = { - .filter = EVFILT_MACHPORT, - .flags = EV_ADD, - }; -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (_dispatch_kevent_workqueue_enabled) { - kev.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; - } -#endif - - kern_return_t kr; - - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, - &_dispatch_mach_portset); - DISPATCH_VERIFY_MIG(kr); - if (slowpath(kr)) { - DISPATCH_CLIENT_CRASH(kr, - "mach_port_allocate() failed: cannot create port set"); - } - kev.ident = _dispatch_mach_portset; - _dispatch_kq_immediate_update(&kev); -} - -static mach_port_t -_dispatch_get_mach_portset(void) -{ - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_mach_portset_init); - return _dispatch_mach_portset; -} - -static kern_return_t -_dispatch_mach_portset_update(dispatch_kevent_t dk, mach_port_t mps) -{ - mach_port_t mp = (mach_port_t)dk->dk_kevent.ident; - kern_return_t kr; - - _dispatch_debug_machport(mp); - kr = mach_port_move_member(mach_task_self(), mp, mps); - if (slowpath(kr)) { - DISPATCH_VERIFY_MIG(kr); - switch (kr) { - case KERN_INVALID_RIGHT: - if (mps) { - _dispatch_bug_mach_client("_dispatch_kevent_machport_enable: " - "mach_port_move_member() failed ", kr); - break; - } - //fall through - case KERN_INVALID_NAME: -#if DISPATCH_DEBUG - _dispatch_log("Corruption: Mach receive right 0x%x destroyed " - "prematurely", mp); -#endif - break; - default: - (void)dispatch_assume_zero(kr); - break; - } - } - return mps ? kr : 0; -} - -static kern_return_t -_dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, - uint32_t del_flags) -{ - kern_return_t kr = 0; - dispatch_assert_zero(new_flags & del_flags); - if ((new_flags & _DISPATCH_MACH_RECV_FLAGS) || - (del_flags & _DISPATCH_MACH_RECV_FLAGS)) { - mach_port_t mps; - if (new_flags & _DISPATCH_MACH_RECV_DIRECT_FLAGS) { - mps = _dispatch_get_mach_recv_portset(); - } else if ((new_flags & DISPATCH_MACH_RECV_MESSAGE) || - ((del_flags & _DISPATCH_MACH_RECV_DIRECT_FLAGS) && - (dk->dk_kevent.fflags & DISPATCH_MACH_RECV_MESSAGE))) { - mps = _dispatch_get_mach_portset(); - } else { - mps = MACH_PORT_NULL; - } - kr = _dispatch_mach_portset_update(dk, mps); - } - return kr; -} -#endif // DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK - -static kern_return_t -_dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, uint32_t new_flags, - uint32_t del_flags) -{ - kern_return_t kr = 0; - dispatch_assert_zero(new_flags & del_flags); - if ((new_flags & _DISPATCH_MACH_SP_FLAGS) || - (del_flags & _DISPATCH_MACH_SP_FLAGS)) { - // Requesting a (delayed) non-sync send-possible notification - // registers for both immediate dead-name notification and delayed-arm - // send-possible notification for the port. - // The send-possible notification is armed when a mach_msg() with the - // the MACH_SEND_NOTIFY to the port times out. - // If send-possible is unavailable, fall back to immediate dead-name - // registration rdar://problem/2527840&9008724 - kr = _dispatch_mach_notify_update(dk, new_flags, del_flags, - _DISPATCH_MACH_SP_FLAGS, MACH_NOTIFY_SEND_POSSIBLE, - MACH_NOTIFY_SEND_POSSIBLE == MACH_NOTIFY_DEAD_NAME ? 1 : 0); - } - return kr; -} - -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK -DISPATCH_NOINLINE -static void -_dispatch_kevent_machport_drain(_dispatch_kevent_qos_s *ke) -{ - mach_port_t name = (mach_port_name_t)ke->data; - dispatch_kevent_t dk; - - _dispatch_debug_machport(name); - dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); - if (!dispatch_assume(dk)) { - return; - } - _dispatch_mach_portset_update(dk, MACH_PORT_NULL); // emulate EV_DISPATCH - - _dispatch_kevent_qos_s kev = { - .ident = name, - .filter = EVFILT_MACHPORT, - .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, - .fflags = DISPATCH_MACH_RECV_MESSAGE, - .udata = (uintptr_t)dk, - }; - _dispatch_kevent_debug("synthetic", &kev); - _dispatch_kevent_merge(&kev); -} -#endif - -DISPATCH_NOINLINE -static void -_dispatch_kevent_mach_msg_drain(_dispatch_kevent_qos_s *ke) -{ - mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke); - mach_msg_size_t siz; - mach_msg_return_t kr = (mach_msg_return_t)ke->fflags; - - if (!fastpath(hdr)) { - DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message"); - } - if (fastpath(!kr)) { - _dispatch_kevent_mach_msg_recv(ke, hdr); - goto out; - } else if (kr != MACH_RCV_TOO_LARGE) { - goto out; - } else if (!ke->data) { - DISPATCH_INTERNAL_CRASH(0, "MACH_RCV_LARGE_IDENTITY with no identity"); - } - if (slowpath(ke->ext[1] > (UINT_MAX - dispatch_mach_trailer_size))) { - DISPATCH_INTERNAL_CRASH(ke->ext[1], - "EVFILT_MACHPORT with overlarge message"); - } - siz = _dispatch_kevent_mach_msg_size(ke) + dispatch_mach_trailer_size; - hdr = malloc(siz); - if (!dispatch_assume(hdr)) { - // Kernel will discard message too large to fit - hdr = NULL; - siz = 0; - } - mach_port_t name = (mach_port_name_t)ke->data; - const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS | - MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE); - kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE, - MACH_PORT_NULL); - if (fastpath(!kr)) { - _dispatch_kevent_mach_msg_recv(ke, hdr); - goto out; - } else if (kr == MACH_RCV_TOO_LARGE) { - _dispatch_log("BUG in libdispatch client: " - "_dispatch_kevent_mach_msg_drain: dropped message too " - "large to fit in memory: id = 0x%x, size = %u", - hdr->msgh_id, _dispatch_kevent_mach_msg_size(ke)); - kr = MACH_MSG_SUCCESS; - } - if (hdr != _dispatch_kevent_mach_msg_buf(ke)) { - free(hdr); - } -out: - if (slowpath(kr)) { - _dispatch_bug_mach_client("_dispatch_kevent_mach_msg_drain: " - "message reception failed", kr); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_kevent_merge(_dispatch_kevent_qos_s *ke) -{ - if (unlikely(!(ke->flags & EV_UDATA_SPECIFIC))) { -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK - if (ke->ident == _dispatch_mach_recv_portset) { - _dispatch_kevent_mach_msg_drain(ke); - return _dispatch_kq_deferred_update(&_dispatch_mach_recv_kevent); - } else if (ke->ident == _dispatch_mach_portset) { - return _dispatch_kevent_machport_drain(ke); - } -#endif - return _dispatch_kevent_error(ke); - } - - dispatch_kevent_t dk = (dispatch_kevent_t)ke->udata; - dispatch_source_refs_t dr = TAILQ_FIRST(&dk->dk_sources); - bool is_reply = (dk->dk_kevent.flags & EV_ONESHOT); - dispatch_source_t ds = _dispatch_source_from_refs(dr); - - if (_dispatch_kevent_mach_msg_size(ke)) { - _dispatch_kevent_mach_msg_drain(ke); - if (is_reply) { - // _dispatch_kevent_mach_msg_drain() should have deleted this event - dispatch_assert(ke->flags & EV_DELETE); - return; - } - - if (!(ds->dq_atomic_flags & DSF_CANCELED)) { - // re-arm the mach channel - ke->fflags = DISPATCH_MACH_RCV_OPTIONS; - ke->data = 0; - ke->ext[0] = 0; - ke->ext[1] = 0; - return _dispatch_kq_deferred_update(ke); - } - } else if (is_reply) { - DISPATCH_INTERNAL_CRASH(ke->flags, "Unexpected EVFILT_MACHPORT event"); - } - if (unlikely((ke->flags & EV_VANISHED) && - (dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE))) { - DISPATCH_CLIENT_CRASH(ke->flags, - "Unexpected EV_VANISHED (do not destroy random mach ports)"); - } - return _dispatch_kevent_merge(ke); -} - -static void -_dispatch_kevent_mach_msg_recv(_dispatch_kevent_qos_s *ke, - mach_msg_header_t *hdr) -{ - dispatch_source_refs_t dri; - dispatch_kevent_t dk; - mach_port_t name = hdr->msgh_local_port; - mach_msg_size_t siz = hdr->msgh_size + dispatch_mach_trailer_size; - - if (!dispatch_assume(hdr->msgh_size <= UINT_MAX - - dispatch_mach_trailer_size)) { - _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " - "received overlarge message"); - return _dispatch_kevent_mach_msg_destroy(ke, hdr); - } - if (!dispatch_assume(name)) { - _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " - "received message with MACH_PORT_NULL port"); - return _dispatch_kevent_mach_msg_destroy(ke, hdr); - } - _dispatch_debug_machport(name); - if (ke->flags & EV_UDATA_SPECIFIC) { - dk = (void*)ke->udata; - } else { - dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); - } - if (!dispatch_assume(dk)) { - _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " - "received message with unknown kevent"); - return _dispatch_kevent_mach_msg_destroy(ke, hdr); - } - TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { - dispatch_source_t dsi = _dispatch_source_from_refs(dri); - if (dsi->ds_pending_data_mask & _DISPATCH_MACH_RECV_DIRECT_FLAGS) { - return _dispatch_source_merge_mach_msg(dsi, dri, dk, ke, hdr, siz); - } - } - _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " - "received message with no listeners"); - return _dispatch_kevent_mach_msg_destroy(ke, hdr); -} - -static void -_dispatch_kevent_mach_msg_destroy(_dispatch_kevent_qos_s *ke, - mach_msg_header_t *hdr) -{ - if (hdr) { - mach_msg_destroy(hdr); - if (hdr != _dispatch_kevent_mach_msg_buf(ke)) { - free(hdr); - } - } -} - -static void -_dispatch_source_merge_mach_msg(dispatch_source_t ds, dispatch_source_refs_t dr, - dispatch_kevent_t dk, _dispatch_kevent_qos_s *ke, - mach_msg_header_t *hdr, mach_msg_size_t siz) -{ - if (dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE) { - return _dispatch_source_merge_mach_msg_direct(ds, ke, hdr); - } - dispatch_mach_reply_refs_t dmr = NULL; - if (dk->dk_kevent.flags & EV_ONESHOT) { - dmr = (dispatch_mach_reply_refs_t)dr; - } - return _dispatch_mach_msg_recv((dispatch_mach_t)ds, dmr, ke, hdr, siz); -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, bool final) -{ - dispatch_source_refs_t dri, dr_next; - dispatch_kevent_t dk; - bool unreg; - - dk = _dispatch_kevent_find(name, DISPATCH_EVFILT_MACH_NOTIFICATION); - if (!dk) { - return; - } - - // Update notification registration state. - dk->dk_kevent.data &= ~_DISPATCH_MACH_SP_FLAGS; - _dispatch_kevent_qos_s kev = { - .ident = name, - .filter = DISPATCH_EVFILT_MACH_NOTIFICATION, - .flags = EV_ADD|EV_ENABLE, - .fflags = flag, - .udata = (uintptr_t)dk, - }; - if (final) { - // This can never happen again - unreg = true; - } else { - // Re-register for notification before delivery - unreg = _dispatch_kevent_resume(dk, flag, 0); - } - DISPATCH_MACH_NOTIFICATION_ARMED(dk) = 0; - TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) { - dispatch_source_t dsi = _dispatch_source_from_refs(dri); - if (dx_type(dsi) == DISPATCH_MACH_CHANNEL_TYPE) { - dispatch_mach_t dm = (dispatch_mach_t)dsi; - _dispatch_mach_merge_notification_kevent(dm, &kev); - if (unreg && dm->dm_dkev) { - _dispatch_mach_notification_kevent_unregister(dm); - } - } else { - _dispatch_source_merge_kevent(dsi, &kev); - if (unreg) { - _dispatch_source_kevent_unregister(dsi); - } - } - if (!dr_next || DISPATCH_MACH_NOTIFICATION_ARMED(dk)) { - // current merge is last in list (dk might have been freed) - // or it re-armed the notification - return; - } - } -} - -static kern_return_t -_dispatch_mach_notify_update(dispatch_kevent_t dk, uint32_t new_flags, - uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid, - mach_port_mscount_t notify_sync) -{ - mach_port_t previous, port = (mach_port_t)dk->dk_kevent.ident; - typeof(dk->dk_kevent.data) prev = dk->dk_kevent.data; - kern_return_t kr, krr = 0; - - // Update notification registration state. - dk->dk_kevent.data |= (new_flags | dk->dk_kevent.fflags) & mask; - dk->dk_kevent.data &= ~(del_flags & mask); - - _dispatch_debug_machport(port); - if ((dk->dk_kevent.data & mask) && !(prev & mask)) { - _dispatch_debug("machport[0x%08x]: registering for send-possible " - "notification", port); - previous = MACH_PORT_NULL; - krr = mach_port_request_notification(mach_task_self(), port, - notify_msgid, notify_sync, _dispatch_get_mach_notify_port(), - MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); - DISPATCH_VERIFY_MIG(krr); - - switch(krr) { - case KERN_INVALID_NAME: - case KERN_INVALID_RIGHT: - // Suppress errors & clear registration state - dk->dk_kevent.data &= ~mask; - break; - default: - // Else, we don't expect any errors from mach. Log any errors - if (dispatch_assume_zero(krr)) { - // log the error & clear registration state - dk->dk_kevent.data &= ~mask; - } else if (dispatch_assume_zero(previous)) { - // Another subsystem has beat libdispatch to requesting the - // specified Mach notification on this port. We should - // technically cache the previous port and message it when the - // kernel messages our port. Or we can just say screw those - // subsystems and deallocate the previous port. - // They should adopt libdispatch :-P - kr = mach_port_deallocate(mach_task_self(), previous); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - previous = MACH_PORT_NULL; - } - } - } else if (!(dk->dk_kevent.data & mask) && (prev & mask)) { - _dispatch_debug("machport[0x%08x]: unregistering for send-possible " - "notification", port); - previous = MACH_PORT_NULL; - kr = mach_port_request_notification(mach_task_self(), port, - notify_msgid, notify_sync, MACH_PORT_NULL, - MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous); - DISPATCH_VERIFY_MIG(kr); - - switch (kr) { - case KERN_INVALID_NAME: - case KERN_INVALID_RIGHT: - case KERN_INVALID_ARGUMENT: - break; - default: - if (dispatch_assume_zero(kr)) { - // log the error - } - } - } else { - return 0; - } - if (slowpath(previous)) { - // the kernel has not consumed the send-once right yet - (void)dispatch_assume_zero( - _dispatch_send_consume_send_once_right(previous)); - } - return krr; -} - -static void -_dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED) -{ - static int notify_type = HOST_NOTIFY_CALENDAR_SET; - kern_return_t kr; - _dispatch_debug("registering for calendar-change notification"); -retry: - kr = host_request_notification(_dispatch_get_mach_host_port(), - notify_type, _dispatch_get_mach_notify_port()); - // Fallback when missing support for newer _SET variant, fires strictly more. - if (kr == KERN_INVALID_ARGUMENT && - notify_type != HOST_NOTIFY_CALENDAR_CHANGE){ - notify_type = HOST_NOTIFY_CALENDAR_CHANGE; - goto retry; - } - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); -} - -static void -_dispatch_mach_host_calendar_change_register(void) -{ - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_mach_host_notify_update); -} - -static void -_dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr) -{ - mig_reply_error_t reply; - dispatch_assert(sizeof(mig_reply_error_t) == sizeof(union - __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem)); - dispatch_assert(sizeof(mig_reply_error_t) < _dispatch_mach_recv_msg_size); - boolean_t success = libdispatch_internal_protocol_server(hdr, &reply.Head); - if (!success && reply.RetCode == MIG_BAD_ID && - (hdr->msgh_id == HOST_CALENDAR_SET_REPLYID || - hdr->msgh_id == HOST_CALENDAR_CHANGED_REPLYID)) { - _dispatch_debug("calendar-change notification"); - _dispatch_timers_calendar_change(); - _dispatch_mach_host_notify_update(NULL); - success = TRUE; - reply.RetCode = KERN_SUCCESS; - } - if (dispatch_assume(success) && reply.RetCode != MIG_NO_REPLY) { - (void)dispatch_assume_zero(reply.RetCode); - } - if (!success || (reply.RetCode && reply.RetCode != MIG_NO_REPLY)) { - mach_msg_destroy(hdr); - } -} - -kern_return_t -_dispatch_mach_notify_port_deleted(mach_port_t notify DISPATCH_UNUSED, - mach_port_name_t name) -{ -#if DISPATCH_DEBUG - _dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x " - "deleted prematurely", name); -#endif - - _dispatch_debug_machport(name); - _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DELETED, true); - - return KERN_SUCCESS; -} - -kern_return_t -_dispatch_mach_notify_dead_name(mach_port_t notify DISPATCH_UNUSED, - mach_port_name_t name) -{ - kern_return_t kr; - - _dispatch_debug("machport[0x%08x]: dead-name notification", name); - _dispatch_debug_machport(name); - _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DEAD, true); - - // the act of receiving a dead name notification allocates a dead-name - // right that must be deallocated - kr = mach_port_deallocate(mach_task_self(), name); - DISPATCH_VERIFY_MIG(kr); - //(void)dispatch_assume_zero(kr); - - return KERN_SUCCESS; -} - -kern_return_t -_dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED, - mach_port_name_t name) -{ - _dispatch_debug("machport[0x%08x]: send-possible notification", name); - _dispatch_debug_machport(name); - _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_POSSIBLE, false); - - return KERN_SUCCESS; -} - -#pragma mark - -#pragma mark dispatch_mach_t - -#define DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT 0x1 -#define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2 -#define DISPATCH_MACH_WAIT_FOR_REPLY 0x4 -#define DISPATCH_MACH_OWNED_REPLY_PORT 0x8 -#define DISPATCH_MACH_OPTIONS_MASK 0xffff - -#define DM_SEND_STATUS_SUCCESS 0x1 -#define DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT 0x2 - -DISPATCH_ENUM(dispatch_mach_send_invoke_flags, uint32_t, - DM_SEND_INVOKE_NONE = 0x0, - DM_SEND_INVOKE_FLUSH = 0x1, - DM_SEND_INVOKE_NEEDS_BARRIER = 0x2, - DM_SEND_INVOKE_CANCEL = 0x4, - DM_SEND_INVOKE_CAN_RUN_BARRIER = 0x8, - DM_SEND_INVOKE_IMMEDIATE_SEND = 0x10, -); -#define DM_SEND_INVOKE_IMMEDIATE_SEND_MASK \ - ((dispatch_mach_send_invoke_flags_t)DM_SEND_INVOKE_IMMEDIATE_SEND) - -static inline pthread_priority_t _dispatch_mach_priority_propagate( - mach_msg_option_t options); -static mach_port_t _dispatch_mach_msg_get_remote_port(dispatch_object_t dou); -static mach_port_t _dispatch_mach_msg_get_reply_port(dispatch_object_t dou); -static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm, - mach_port_t local_port, mach_port_t remote_port); -static inline void _dispatch_mach_msg_reply_received(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, mach_port_t local_port); -static dispatch_mach_msg_t _dispatch_mach_msg_create_reply_disconnected( - dispatch_object_t dou, dispatch_mach_reply_refs_t dmr); -static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm, - dispatch_object_t dou); -static inline mach_msg_header_t* _dispatch_mach_msg_get_msg( - dispatch_mach_msg_t dmsg); -static void _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou, - pthread_priority_t pp); - -static dispatch_mach_t -_dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, - dispatch_mach_handler_function_t handler, bool handler_is_block) -{ - dispatch_mach_t dm; - dispatch_mach_refs_t dr; - - dm = _dispatch_alloc(DISPATCH_VTABLE(mach), - sizeof(struct dispatch_mach_s)); - _dispatch_queue_init(dm->_as_dq, DQF_NONE, 1, true); - - dm->dq_label = label; - dm->do_ref_cnt++; // the reference _dispatch_mach_cancel_invoke holds - - dr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_refs_s)); - dr->dr_source_wref = _dispatch_ptr2wref(dm); - dr->dm_handler_func = handler; - dr->dm_handler_ctxt = context; - dm->ds_refs = dr; - dm->dm_handler_is_block = handler_is_block; - - dm->dm_refs = _dispatch_calloc(1ul, - sizeof(struct dispatch_mach_send_refs_s)); - dm->dm_refs->dr_source_wref = _dispatch_ptr2wref(dm); - dm->dm_refs->dm_disconnect_cnt = DISPATCH_MACH_NEVER_CONNECTED; - TAILQ_INIT(&dm->dm_refs->dm_replies); - - if (slowpath(!q)) { - q = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true); - } else { - _dispatch_retain(q); - } - dm->do_targetq = q; - _dispatch_object_debug(dm, "%s", __func__); - return dm; -} - -dispatch_mach_t -dispatch_mach_create(const char *label, dispatch_queue_t q, - dispatch_mach_handler_t handler) -{ - dispatch_block_t bb = _dispatch_Block_copy((void*)handler); - return _dispatch_mach_create(label, q, bb, - (dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), true); -} - -dispatch_mach_t -dispatch_mach_create_f(const char *label, dispatch_queue_t q, void *context, - dispatch_mach_handler_function_t handler) -{ - return _dispatch_mach_create(label, q, context, handler, false); -} - -void -_dispatch_mach_dispose(dispatch_mach_t dm) -{ - _dispatch_object_debug(dm, "%s", __func__); - dispatch_mach_refs_t dr = dm->ds_refs; - if (dm->dm_handler_is_block && dr->dm_handler_ctxt) { - Block_release(dr->dm_handler_ctxt); - } - free(dr); - free(dm->dm_refs); - _dispatch_queue_destroy(dm->_as_dq); -} - -void -dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, - mach_port_t send, dispatch_mach_msg_t checkin) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - dispatch_kevent_t dk; - uint32_t disconnect_cnt; - dispatch_source_type_t type = &_dispatch_source_type_mach_recv_direct; - - dm->ds_is_direct_kevent = (bool)_dispatch_evfilt_machport_direct_enabled; - if (MACH_PORT_VALID(receive)) { - dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = type->ke; - dk->dk_kevent.ident = receive; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE|EV_VANISHED; - dk->dk_kevent.udata = (uintptr_t)dk; - TAILQ_INIT(&dk->dk_sources); - dm->ds_dkev = dk; - dm->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE_DIRECT; - dm->ds_needs_rearm = dm->ds_is_direct_kevent; - if (!dm->ds_is_direct_kevent) { - dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT; - dk->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); - } - _dispatch_retain(dm); // the reference the manager queue holds - } - dr->dm_send = send; - if (MACH_PORT_VALID(send)) { - if (checkin) { - dispatch_retain(checkin); - checkin->dmsg_options = _dispatch_mach_checkin_options(); - dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin); - } - dr->dm_checkin = checkin; - } - // monitor message reply ports - dm->ds_pending_data_mask |= DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; - dispatch_assert(DISPATCH_MACH_NEVER_CONNECTED - 1 == - DISPATCH_MACH_NEVER_INSTALLED); - disconnect_cnt = os_atomic_dec2o(dr, dm_disconnect_cnt, release); - if (unlikely(disconnect_cnt != DISPATCH_MACH_NEVER_INSTALLED)) { - DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel already connected"); - } - _dispatch_object_debug(dm, "%s", __func__); - return dispatch_activate(dm); -} - -// assumes low bit of mach port names is always set -#define DISPATCH_MACH_REPLY_PORT_UNOWNED 0x1u - -static inline void -_dispatch_mach_reply_mark_reply_port_owned(dispatch_mach_reply_refs_t dmr) -{ - dmr->dmr_reply &= ~DISPATCH_MACH_REPLY_PORT_UNOWNED; -} - -static inline bool -_dispatch_mach_reply_is_reply_port_owned(dispatch_mach_reply_refs_t dmr) -{ - mach_port_t reply_port = dmr->dmr_reply; - return reply_port ? !(reply_port & DISPATCH_MACH_REPLY_PORT_UNOWNED) :false; -} - -static inline mach_port_t -_dispatch_mach_reply_get_reply_port(dispatch_mach_reply_refs_t dmr) -{ - mach_port_t reply_port = dmr->dmr_reply; - return reply_port ? (reply_port | DISPATCH_MACH_REPLY_PORT_UNOWNED) : 0; -} - -static inline bool -_dispatch_mach_reply_tryremove(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr) -{ - bool removed; - _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); - if ((removed = _TAILQ_IS_ENQUEUED(dmr, dmr_list))) { - TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); - _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); - } - _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); - return removed; -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_reply_waiter_unregister(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, unsigned int options) -{ - dispatch_mach_msg_t dmsgr = NULL; - bool disconnected = (options & DKEV_UNREGISTER_DISCONNECTED); - if (options & DKEV_UNREGISTER_REPLY_REMOVE) { - _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); - if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { - DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration"); - } - TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); - _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); - _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); - } - if (disconnected) { - dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr); - } else if (dmr->dmr_voucher) { - _voucher_release(dmr->dmr_voucher); - dmr->dmr_voucher = NULL; - } - _dispatch_debug("machport[0x%08x]: unregistering for sync reply%s, ctxt %p", - _dispatch_mach_reply_get_reply_port(dmr), - disconnected ? " (disconnected)" : "", dmr->dmr_ctxt); - if (dmsgr) { - return _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); - } - dispatch_assert(!(options & DKEV_UNREGISTER_WAKEUP)); -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, unsigned int options) -{ - dispatch_mach_msg_t dmsgr = NULL; - bool replies_empty = false; - bool disconnected = (options & DKEV_UNREGISTER_DISCONNECTED); - if (options & DKEV_UNREGISTER_REPLY_REMOVE) { - _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); - if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { - DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration"); - } - TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); - _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); - replies_empty = TAILQ_EMPTY(&dm->dm_refs->dm_replies); - _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); - } - if (disconnected) { - dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr); - } else if (dmr->dmr_voucher) { - _voucher_release(dmr->dmr_voucher); - dmr->dmr_voucher = NULL; - } - uint32_t flags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; - dispatch_kevent_t dk = dmr->dmr_dkev; - _dispatch_debug("machport[0x%08x]: unregistering for reply%s, ctxt %p", - (mach_port_t)dk->dk_kevent.ident, - disconnected ? " (disconnected)" : "", dmr->dmr_ctxt); - if (!dm->ds_is_direct_kevent) { - dmr->dmr_dkev = NULL; - TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dmr, dr_list); - _dispatch_kevent_unregister(dk, flags, 0); - } else { - long r = _dispatch_kevent_unregister(dk, flags, options); - if (r == EINPROGRESS) { - _dispatch_debug("machport[0x%08x]: deferred delete kevent[%p]", - (mach_port_t)dk->dk_kevent.ident, dk); - dispatch_assert(options == DKEV_UNREGISTER_DISCONNECTED); - // dmr must be put back so that the event delivery finds it, the - // replies lock is held by the caller. - TAILQ_INSERT_HEAD(&dm->dm_refs->dm_replies, dmr, dmr_list); - if (dmsgr) { - dmr->dmr_voucher = dmsgr->dmsg_voucher; - dmsgr->dmsg_voucher = NULL; - dispatch_release(dmsgr); - } - return; // deferred unregistration - } - dispatch_assume_zero(r); - dmr->dmr_dkev = NULL; - _TAILQ_TRASH_ENTRY(dmr, dr_list); - } - free(dmr); - if (dmsgr) { - return _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); - } - if ((options & DKEV_UNREGISTER_WAKEUP) && replies_empty && - (dm->dm_refs->dm_disconnect_cnt || - (dm->dq_atomic_flags & DSF_CANCELED))) { - dx_wakeup(dm, 0, DISPATCH_WAKEUP_FLUSH); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_reply_waiter_register(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, mach_port_t reply_port, - dispatch_mach_msg_t dmsg, mach_msg_option_t msg_opts) -{ - dmr->dr_source_wref = _dispatch_ptr2wref(dm); - dmr->dmr_dkev = NULL; - dmr->dmr_reply = reply_port; - if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) { - _dispatch_mach_reply_mark_reply_port_owned(dmr); - } else { - if (dmsg->dmsg_voucher) { - dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher); - } - dmr->dmr_priority = (dispatch_priority_t)dmsg->dmsg_priority; - // make reply context visible to leaks rdar://11777199 - dmr->dmr_ctxt = dmsg->do_ctxt; - } - - _dispatch_debug("machport[0x%08x]: registering for sync reply, ctxt %p", - reply_port, dmsg->do_ctxt); - _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); - if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { - DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, "Reply already registered"); - } - TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dmr_list); - _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port, - dispatch_mach_msg_t dmsg) -{ - dispatch_kevent_t dk; - dispatch_mach_reply_refs_t dmr; - dispatch_source_type_t type = &_dispatch_source_type_mach_recv_direct; - pthread_priority_t mp, pp; - - dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = type->ke; - dk->dk_kevent.ident = reply_port; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE|EV_ONESHOT; - dk->dk_kevent.udata = (uintptr_t)dk; - TAILQ_INIT(&dk->dk_sources); - if (!dm->ds_is_direct_kevent) { - dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; - dk->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); - } - - dmr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_reply_refs_s)); - dmr->dr_source_wref = _dispatch_ptr2wref(dm); - dmr->dmr_dkev = dk; - dmr->dmr_reply = reply_port; - if (dmsg->dmsg_voucher) { - dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher); - } - dmr->dmr_priority = (dispatch_priority_t)dmsg->dmsg_priority; - // make reply context visible to leaks rdar://11777199 - dmr->dmr_ctxt = dmsg->do_ctxt; - - pp = dm->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (pp && dm->ds_is_direct_kevent) { - mp = dmsg->dmsg_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (pp < mp) pp = mp; - pp |= dm->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; - } else { - pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; - } - - _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p", - reply_port, dmsg->do_ctxt); - uint32_t flags; - bool do_resume = _dispatch_kevent_register(&dmr->dmr_dkev, pp, &flags); - TAILQ_INSERT_TAIL(&dmr->dmr_dkev->dk_sources, (dispatch_source_refs_t)dmr, - dr_list); - _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); - if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { - DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, "Reply already registered"); - } - TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dmr_list); - _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); - if (do_resume && _dispatch_kevent_resume(dmr->dmr_dkev, flags, 0)) { - return _dispatch_mach_reply_kevent_unregister(dm, dmr, - DKEV_UNREGISTER_DISCONNECTED|DKEV_UNREGISTER_REPLY_REMOVE); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm) -{ - DISPATCH_ASSERT_ON_MANAGER_QUEUE(); - dispatch_kevent_t dk = dm->dm_dkev; - dm->dm_dkev = NULL; - TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dm->dm_refs, - dr_list); - dm->ds_pending_data_mask &= ~(unsigned long) - (DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD); - _dispatch_kevent_unregister(dk, - DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD, 0); -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_notification_kevent_register(dispatch_mach_t dm,mach_port_t send) -{ - DISPATCH_ASSERT_ON_MANAGER_QUEUE(); - dispatch_kevent_t dk; - - dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = _dispatch_source_type_mach_send.ke; - dk->dk_kevent.ident = send; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; - dk->dk_kevent.fflags = DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD; - dk->dk_kevent.udata = (uintptr_t)dk; - TAILQ_INIT(&dk->dk_sources); - - dm->ds_pending_data_mask |= dk->dk_kevent.fflags; - - uint32_t flags; - bool do_resume = _dispatch_kevent_register(&dk, - _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, &flags); - TAILQ_INSERT_TAIL(&dk->dk_sources, - (dispatch_source_refs_t)dm->dm_refs, dr_list); - dm->dm_dkev = dk; - if (do_resume && _dispatch_kevent_resume(dm->dm_dkev, flags, 0)) { - _dispatch_mach_notification_kevent_unregister(dm); - } -} - -static mach_port_t -_dispatch_get_thread_reply_port(void) -{ - mach_port_t reply_port, mrp = _dispatch_get_thread_mig_reply_port(); - if (mrp) { - reply_port = mrp; - _dispatch_debug("machport[0x%08x]: borrowed thread sync reply port", - reply_port); - } else { - reply_port = mach_reply_port(); - _dispatch_set_thread_mig_reply_port(reply_port); - _dispatch_debug("machport[0x%08x]: allocated thread sync reply port", - reply_port); - } - _dispatch_debug_machport(reply_port); - return reply_port; -} - -static void -_dispatch_clear_thread_reply_port(mach_port_t reply_port) -{ - mach_port_t mrp = _dispatch_get_thread_mig_reply_port(); - if (reply_port != mrp) { - if (mrp) { - _dispatch_debug("machport[0x%08x]: did not clear thread sync reply " - "port (found 0x%08x)", reply_port, mrp); - } - return; - } - _dispatch_set_thread_mig_reply_port(MACH_PORT_NULL); - _dispatch_debug_machport(reply_port); - _dispatch_debug("machport[0x%08x]: cleared thread sync reply port", - reply_port); -} - -static void -_dispatch_set_thread_reply_port(mach_port_t reply_port) -{ - _dispatch_debug_machport(reply_port); - mach_port_t mrp = _dispatch_get_thread_mig_reply_port(); - if (mrp) { - kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port, - MACH_PORT_RIGHT_RECEIVE, -1); - DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); - _dispatch_debug("machport[0x%08x]: deallocated sync reply port " - "(found 0x%08x)", reply_port, mrp); - } else { - _dispatch_set_thread_mig_reply_port(reply_port); - _dispatch_debug("machport[0x%08x]: restored thread sync reply port", - reply_port); - } -} - -static inline mach_port_t -_dispatch_mach_msg_get_remote_port(dispatch_object_t dou) -{ - mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); - mach_port_t remote = hdr->msgh_remote_port; - return remote; -} - -static inline mach_port_t -_dispatch_mach_msg_get_reply_port(dispatch_object_t dou) -{ - mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); - mach_port_t local = hdr->msgh_local_port; - if (!MACH_PORT_VALID(local) || MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) != - MACH_MSG_TYPE_MAKE_SEND_ONCE) return MACH_PORT_NULL; - return local; -} - -static inline void -_dispatch_mach_msg_set_reason(dispatch_mach_msg_t dmsg, mach_error_t err, - unsigned long reason) -{ - dispatch_assert_zero(reason & ~(unsigned long)code_emask); - dmsg->dmsg_error = ((err || !reason) ? err : - err_local|err_sub(0x3e0)|(mach_error_t)reason); -} - -static inline unsigned long -_dispatch_mach_msg_get_reason(dispatch_mach_msg_t dmsg, mach_error_t *err_ptr) -{ - mach_error_t err = dmsg->dmsg_error; - - dmsg->dmsg_error = 0; - if ((err & system_emask) == err_local && err_get_sub(err) == 0x3e0) { - *err_ptr = 0; - return err_get_code(err); - } - *err_ptr = err; - return err ? DISPATCH_MACH_MESSAGE_SEND_FAILED : DISPATCH_MACH_MESSAGE_SENT; -} - -static void -_dispatch_mach_msg_recv(dispatch_mach_t dm, dispatch_mach_reply_refs_t dmr, - _dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr, mach_msg_size_t siz) -{ - _dispatch_debug_machport(hdr->msgh_remote_port); - _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x", - hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); - bool canceled = (dm->dq_atomic_flags & DSF_CANCELED); - if (!dmr && canceled) { - // message received after cancellation, _dispatch_mach_kevent_merge is - // responsible for mach channel source state (e.g. deferred deletion) - return _dispatch_kevent_mach_msg_destroy(ke, hdr); - } - dispatch_mach_msg_t dmsg; - voucher_t voucher; - pthread_priority_t priority; - void *ctxt = NULL; - if (dmr) { - _voucher_mach_msg_clear(hdr, false); // deallocate reply message voucher - voucher = dmr->dmr_voucher; - dmr->dmr_voucher = NULL; // transfer reference - priority = dmr->dmr_priority; - ctxt = dmr->dmr_ctxt; - unsigned int options = DKEV_DISPOSE_IMMEDIATE_DELETE; - options |= DKEV_UNREGISTER_REPLY_REMOVE; - options |= DKEV_UNREGISTER_WAKEUP; - if (canceled) options |= DKEV_UNREGISTER_DISCONNECTED; - _dispatch_mach_reply_kevent_unregister(dm, dmr, options); - ke->flags |= EV_DELETE; // remember that unregister deleted the event - if (canceled) return; - } else { - voucher = voucher_create_with_mach_msg(hdr); - priority = _voucher_get_priority(voucher); - } - dispatch_mach_msg_destructor_t destructor; - destructor = (hdr == _dispatch_kevent_mach_msg_buf(ke)) ? - DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT : - DISPATCH_MACH_MSG_DESTRUCTOR_FREE; - dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL); - if (hdr == _dispatch_kevent_mach_msg_buf(ke)) { - _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, (uint64_t)hdr, (uint64_t)dmsg->dmsg_buf); - } - dmsg->dmsg_voucher = voucher; - dmsg->dmsg_priority = priority; - dmsg->do_ctxt = ctxt; - _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_RECEIVED); - _dispatch_voucher_debug("mach-msg[%p] create", voucher, dmsg); - _dispatch_voucher_ktrace_dmsg_push(dmsg); - return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_mach_msg_t -_dispatch_mach_msg_reply_recv(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, mach_port_t reply_port) -{ - if (slowpath(!MACH_PORT_VALID(reply_port))) { - DISPATCH_CLIENT_CRASH(reply_port, "Invalid reply port"); - } - void *ctxt = dmr->dmr_ctxt; - mach_msg_header_t *hdr, *hdr2 = NULL; - void *hdr_copyout_addr; - mach_msg_size_t siz, msgsiz = 0; - mach_msg_return_t kr; - mach_msg_option_t options; - siz = mach_vm_round_page(_dispatch_mach_recv_msg_size + - dispatch_mach_trailer_size); - hdr = alloca(siz); - for (mach_vm_address_t p = mach_vm_trunc_page(hdr + vm_page_size); - p < (mach_vm_address_t)hdr + siz; p += vm_page_size) { - *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard - } - options = DISPATCH_MACH_RCV_OPTIONS & (~MACH_RCV_VOUCHER); -retry: - _dispatch_debug_machport(reply_port); - _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG %s", reply_port, - (options & MACH_RCV_TIMEOUT) ? "poll" : "wait"); - kr = mach_msg(hdr, options, 0, siz, reply_port, MACH_MSG_TIMEOUT_NONE, - MACH_PORT_NULL); - hdr_copyout_addr = hdr; - _dispatch_debug_machport(reply_port); - _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG (size %u, opts 0x%x) " - "returned: %s - 0x%x", reply_port, siz, options, - mach_error_string(kr), kr); - switch (kr) { - case MACH_RCV_TOO_LARGE: - if (!fastpath(hdr->msgh_size <= UINT_MAX - - dispatch_mach_trailer_size)) { - DISPATCH_CLIENT_CRASH(hdr->msgh_size, "Overlarge message"); - } - if (options & MACH_RCV_LARGE) { - msgsiz = hdr->msgh_size + dispatch_mach_trailer_size; - hdr2 = malloc(msgsiz); - if (dispatch_assume(hdr2)) { - hdr = hdr2; - siz = msgsiz; - } - options |= MACH_RCV_TIMEOUT; - options &= ~MACH_RCV_LARGE; - goto retry; - } - _dispatch_log("BUG in libdispatch client: " - "dispatch_mach_send_and_wait_for_reply: dropped message too " - "large to fit in memory: id = 0x%x, size = %u", hdr->msgh_id, - hdr->msgh_size); - break; - case MACH_RCV_INVALID_NAME: // rdar://problem/21963848 - case MACH_RCV_PORT_CHANGED: // rdar://problem/21885327 - case MACH_RCV_PORT_DIED: - // channel was disconnected/canceled and reply port destroyed - _dispatch_debug("machport[0x%08x]: sync reply port destroyed, ctxt %p: " - "%s - 0x%x", reply_port, ctxt, mach_error_string(kr), kr); - goto out; - case MACH_MSG_SUCCESS: - if (hdr->msgh_remote_port) { - _dispatch_debug_machport(hdr->msgh_remote_port); - } - _dispatch_debug("machport[0x%08x]: received msg id 0x%x, size = %u, " - "reply on 0x%08x", hdr->msgh_local_port, hdr->msgh_id, - hdr->msgh_size, hdr->msgh_remote_port); - siz = hdr->msgh_size + dispatch_mach_trailer_size; - if (hdr2 && siz < msgsiz) { - void *shrink = realloc(hdr2, msgsiz); - if (shrink) hdr = hdr2 = shrink; - } - break; - default: - dispatch_assume_zero(kr); - break; - } - _dispatch_mach_msg_reply_received(dm, dmr, hdr->msgh_local_port); - hdr->msgh_local_port = MACH_PORT_NULL; - if (slowpath((dm->dq_atomic_flags & DSF_CANCELED) || kr)) { - if (!kr) mach_msg_destroy(hdr); - goto out; - } - dispatch_mach_msg_t dmsg; - dispatch_mach_msg_destructor_t destructor = (!hdr2) ? - DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT : - DISPATCH_MACH_MSG_DESTRUCTOR_FREE; - dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL); - if (!hdr2 || hdr != hdr_copyout_addr) { - _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, (uint64_t)hdr_copyout_addr, (uint64_t)_dispatch_mach_msg_get_msg(dmsg)); - } - dmsg->do_ctxt = ctxt; - return dmsg; -out: - free(hdr2); - return NULL; -} - -static inline void -_dispatch_mach_msg_reply_received(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, mach_port_t local_port) -{ - bool removed = _dispatch_mach_reply_tryremove(dm, dmr); - if (!MACH_PORT_VALID(local_port) || !removed) { - // port moved/destroyed during receive, or reply waiter was never - // registered or already removed (disconnected) - return; - } - mach_port_t reply_port = _dispatch_mach_reply_get_reply_port(dmr); - _dispatch_debug("machport[0x%08x]: unregistered for sync reply, ctxt %p", - reply_port, dmr->dmr_ctxt); - if (_dispatch_mach_reply_is_reply_port_owned(dmr)) { - _dispatch_set_thread_reply_port(reply_port); - if (local_port != reply_port) { - DISPATCH_CLIENT_CRASH(local_port, - "Reply received on unexpected port"); - } - return; - } - mach_msg_header_t *hdr; - dispatch_mach_msg_t dmsg; - dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), - DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); - hdr->msgh_local_port = local_port; - dmsg->dmsg_voucher = dmr->dmr_voucher; - dmr->dmr_voucher = NULL; // transfer reference - dmsg->dmsg_priority = dmr->dmr_priority; - dmsg->do_ctxt = dmr->dmr_ctxt; - _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_REPLY_RECEIVED); - return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); -} - -static inline void -_dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port, - mach_port_t remote_port) -{ - mach_msg_header_t *hdr; - dispatch_mach_msg_t dmsg; - dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), - DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); - if (local_port) hdr->msgh_local_port = local_port; - if (remote_port) hdr->msgh_remote_port = remote_port; - _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED); - _dispatch_debug("machport[0x%08x]: %s right disconnected", local_port ? - local_port : remote_port, local_port ? "receive" : "send"); - return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); -} - -static inline dispatch_mach_msg_t -_dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou, - dispatch_mach_reply_refs_t dmr) -{ - dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; - mach_port_t reply_port = dmsg ? dmsg->dmsg_reply : - _dispatch_mach_reply_get_reply_port(dmr); - voucher_t v; - - if (!reply_port) { - if (!dmsg) { - v = dmr->dmr_voucher; - dmr->dmr_voucher = NULL; // transfer reference - if (v) _voucher_release(v); - } - return NULL; - } - - if (dmsg) { - v = dmsg->dmsg_voucher; - if (v) _voucher_retain(v); - } else { - v = dmr->dmr_voucher; - dmr->dmr_voucher = NULL; // transfer reference - } - - if ((dmsg && (dmsg->dmsg_options & DISPATCH_MACH_WAIT_FOR_REPLY) && - (dmsg->dmsg_options & DISPATCH_MACH_OWNED_REPLY_PORT)) || - (dmr && !dmr->dmr_dkev && - _dispatch_mach_reply_is_reply_port_owned(dmr))) { - if (v) _voucher_release(v); - // deallocate owned reply port to break _dispatch_mach_msg_reply_recv - // out of waiting in mach_msg(MACH_RCV_MSG) - kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port, - MACH_PORT_RIGHT_RECEIVE, -1); - DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); - return NULL; - } - - mach_msg_header_t *hdr; - dmsgr = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), - DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); - dmsgr->dmsg_voucher = v; - hdr->msgh_local_port = reply_port; - if (dmsg) { - dmsgr->dmsg_priority = dmsg->dmsg_priority; - dmsgr->do_ctxt = dmsg->do_ctxt; - } else { - dmsgr->dmsg_priority = dmr->dmr_priority; - dmsgr->do_ctxt = dmr->dmr_ctxt; - } - _dispatch_mach_msg_set_reason(dmsgr, 0, DISPATCH_MACH_DISCONNECTED); - _dispatch_debug("machport[0x%08x]: reply disconnected, ctxt %p", - hdr->msgh_local_port, dmsgr->do_ctxt); - return dmsgr; -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou) -{ - dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; - mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); - mach_msg_option_t msg_opts = dmsg->dmsg_options; - _dispatch_debug("machport[0x%08x]: not sent msg id 0x%x, ctxt %p, " - "msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x", - msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt, - msg_opts, msg->msgh_voucher_port, dmsg->dmsg_reply); - unsigned long reason = (msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY) ? - 0 : DISPATCH_MACH_MESSAGE_NOT_SENT; - dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL); - _dispatch_mach_msg_set_reason(dmsg, 0, reason); - _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); - if (dmsgr) _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); -} - -DISPATCH_NOINLINE -static uint32_t -_dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, - dispatch_mach_reply_refs_t dmr, pthread_priority_t pp, - dispatch_mach_send_invoke_flags_t send_flags) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr = NULL; - voucher_t voucher = dmsg->dmsg_voucher; - mach_voucher_t ipc_kvoucher = MACH_VOUCHER_NULL; - uint32_t send_status = 0; - bool clear_voucher = false, kvoucher_move_send = false; - mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); - bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == - MACH_MSG_TYPE_MOVE_SEND_ONCE); - mach_port_t reply_port = dmsg->dmsg_reply; - if (!is_reply) { - dr->dm_needs_mgr = 0; - if (unlikely(dr->dm_checkin && dmsg != dr->dm_checkin)) { - // send initial checkin message - if (dm->dm_dkev && slowpath(_dispatch_queue_get_current() != - &_dispatch_mgr_q)) { - // send kevent must be uninstalled on the manager queue - dr->dm_needs_mgr = 1; - goto out; - } - if (unlikely(!_dispatch_mach_msg_send(dm, - dr->dm_checkin, NULL, pp, DM_SEND_INVOKE_NONE))) { - goto out; - } - dr->dm_checkin = NULL; - } - } - mach_msg_return_t kr = 0; - mach_msg_option_t opts = 0, msg_opts = dmsg->dmsg_options; - if (!(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) { - mach_msg_priority_t msg_priority = MACH_MSG_PRIORITY_UNSPECIFIED; - opts = MACH_SEND_MSG | (msg_opts & ~DISPATCH_MACH_OPTIONS_MASK); - if (!is_reply) { - if (dmsg != dr->dm_checkin) { - msg->msgh_remote_port = dr->dm_send; - } - if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { - if (slowpath(!dm->dm_dkev)) { - _dispatch_mach_notification_kevent_register(dm, - msg->msgh_remote_port); - } - if (fastpath(dm->dm_dkev)) { - if (DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) { - goto out; - } - opts |= MACH_SEND_NOTIFY; - } - } - opts |= MACH_SEND_TIMEOUT; - if (dmsg->dmsg_priority != _voucher_get_priority(voucher)) { - ipc_kvoucher = _voucher_create_mach_voucher_with_priority( - voucher, dmsg->dmsg_priority); - } - _dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg); - if (ipc_kvoucher) { - kvoucher_move_send = true; - clear_voucher = _voucher_mach_msg_set_mach_voucher(msg, - ipc_kvoucher, kvoucher_move_send); - } else { - clear_voucher = _voucher_mach_msg_set(msg, voucher); - } - if (pp && _dispatch_evfilt_machport_direct_enabled) { - opts |= MACH_SEND_OVERRIDE; - msg_priority = (mach_msg_priority_t)pp; - } - } - _dispatch_debug_machport(msg->msgh_remote_port); - if (reply_port) _dispatch_debug_machport(reply_port); - if (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) { - if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) { - _dispatch_clear_thread_reply_port(reply_port); - } - _dispatch_mach_reply_waiter_register(dm, dmr, reply_port, dmsg, - msg_opts); - } - kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0, - msg_priority); - _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, " - "opts 0x%x, msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x: " - "%s - 0x%x", msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt, - opts, msg_opts, msg->msgh_voucher_port, reply_port, - mach_error_string(kr), kr); - if (unlikely(kr && (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY))) { - _dispatch_mach_reply_waiter_unregister(dm, dmr, - DKEV_UNREGISTER_REPLY_REMOVE); - } - if (clear_voucher) { - if (kr == MACH_SEND_INVALID_VOUCHER && msg->msgh_voucher_port) { - DISPATCH_CLIENT_CRASH(kr, "Voucher port corruption"); - } - mach_voucher_t kv; - kv = _voucher_mach_msg_clear(msg, kvoucher_move_send); - if (kvoucher_move_send) ipc_kvoucher = kv; - } - } - if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) { - if (opts & MACH_SEND_NOTIFY) { - _dispatch_debug("machport[0x%08x]: send-possible notification " - "armed", (mach_port_t)dm->dm_dkev->dk_kevent.ident); - DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev) = 1; - } else { - // send kevent must be installed on the manager queue - dr->dm_needs_mgr = 1; - } - if (ipc_kvoucher) { - _dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher); - voucher_t ipc_voucher; - ipc_voucher = _voucher_create_with_priority_and_mach_voucher( - voucher, dmsg->dmsg_priority, ipc_kvoucher); - _dispatch_voucher_debug("mach-msg[%p] replace voucher[%p]", - ipc_voucher, dmsg, voucher); - if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher); - dmsg->dmsg_voucher = ipc_voucher; - } - goto out; - } else if (ipc_kvoucher && (kr || !kvoucher_move_send)) { - _voucher_dealloc_mach_voucher(ipc_kvoucher); - } - if (!(msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) && !kr && reply_port && - !(dm->ds_dkev && dm->ds_dkev->dk_kevent.ident == reply_port)) { - if (!dm->ds_is_direct_kevent && - _dispatch_queue_get_current() != &_dispatch_mgr_q) { - // reply receive kevent must be installed on the manager queue - dr->dm_needs_mgr = 1; - dmsg->dmsg_options = msg_opts | DISPATCH_MACH_REGISTER_FOR_REPLY; - goto out; - } - _dispatch_mach_reply_kevent_register(dm, reply_port, dmsg); - } - if (unlikely(!is_reply && dmsg == dr->dm_checkin && dm->dm_dkev)) { - _dispatch_mach_notification_kevent_unregister(dm); - } - if (slowpath(kr)) { - // Send failed, so reply was never registered - dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL); - } - _dispatch_mach_msg_set_reason(dmsg, kr, 0); - if ((send_flags & DM_SEND_INVOKE_IMMEDIATE_SEND) && - (msg_opts & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT)) { - // Return sent message synchronously - send_status |= DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT; - } else { - _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); - } - if (dmsgr) _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); - send_status |= DM_SEND_STATUS_SUCCESS; -out: - return send_status; -} - -#pragma mark - -#pragma mark dispatch_mach_send_refs_t - -static void _dispatch_mach_cancel(dispatch_mach_t dm); -static void _dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm, - pthread_priority_t pp); - -DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dm_state_get_override(uint64_t dm_state) -{ - dm_state &= DISPATCH_MACH_STATE_OVERRIDE_MASK; - return (pthread_priority_t)(dm_state >> 32); -} - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dm_state_override_from_priority(pthread_priority_t pp) -{ - uint64_t pp_state = pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - return pp_state << 32; -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dm_state_needs_override(uint64_t dm_state, uint64_t pp_state) -{ - return (pp_state > (dm_state & DISPATCH_MACH_STATE_OVERRIDE_MASK)); -} - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dm_state_merge_override(uint64_t dm_state, uint64_t pp_state) -{ - if (_dm_state_needs_override(dm_state, pp_state)) { - dm_state &= ~DISPATCH_MACH_STATE_OVERRIDE_MASK; - dm_state |= pp_state; - dm_state |= DISPATCH_MACH_STATE_DIRTY; - dm_state |= DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; - } - return dm_state; -} - -#define _dispatch_mach_send_push_update_tail(dr, tail) \ - os_mpsc_push_update_tail(dr, dm, tail, do_next) -#define _dispatch_mach_send_push_update_head(dr, head) \ - os_mpsc_push_update_head(dr, dm, head) -#define _dispatch_mach_send_get_head(dr) \ - os_mpsc_get_head(dr, dm) -#define _dispatch_mach_send_unpop_head(dr, dc, dc_next) \ - os_mpsc_undo_pop_head(dr, dm, dc, dc_next, do_next) -#define _dispatch_mach_send_pop_head(dr, head) \ - os_mpsc_pop_head(dr, dm, head, do_next) - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_mach_send_push_inline(dispatch_mach_send_refs_t dr, - dispatch_object_t dou) -{ - if (_dispatch_mach_send_push_update_tail(dr, dou._do)) { - _dispatch_mach_send_push_update_head(dr, dou._do); - return true; - } - return false; -} - -DISPATCH_NOINLINE -static bool -_dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, - dispatch_mach_send_invoke_flags_t send_flags) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - dispatch_mach_reply_refs_t dmr; - dispatch_mach_msg_t dmsg; - struct dispatch_object_s *dc = NULL, *next_dc = NULL; - pthread_priority_t pp = _dm_state_get_override(dr->dm_state); - uint64_t old_state, new_state; - uint32_t send_status; - bool needs_mgr, disconnecting, returning_send_result = false; - -again: - needs_mgr = false; disconnecting = false; - while (dr->dm_tail) { - dc = _dispatch_mach_send_get_head(dr); - do { - dispatch_mach_send_invoke_flags_t sf = send_flags; - // Only request immediate send result for the first message - send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK; - next_dc = _dispatch_mach_send_pop_head(dr, dc); - if (_dispatch_object_has_type(dc, - DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) { - if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) { - goto partial_drain; - } - _dispatch_continuation_pop(dc, dm->_as_dq, flags); - continue; - } - if (_dispatch_object_is_slow_item(dc)) { - dmsg = ((dispatch_continuation_t)dc)->dc_data; - dmr = ((dispatch_continuation_t)dc)->dc_other; - } else if (_dispatch_object_has_vtable(dc)) { - dmsg = (dispatch_mach_msg_t)dc; - dmr = NULL; - } else { - if ((dm->dm_dkev || !dm->ds_is_direct_kevent) && - (_dispatch_queue_get_current() != &_dispatch_mgr_q)) { - // send kevent must be uninstalled on the manager queue - needs_mgr = true; - goto partial_drain; - } - if (unlikely(!_dispatch_mach_reconnect_invoke(dm, dc))) { - disconnecting = true; - goto partial_drain; - } - continue; - } - _dispatch_voucher_ktrace_dmsg_pop(dmsg); - if (unlikely(dr->dm_disconnect_cnt || - (dm->dq_atomic_flags & DSF_CANCELED))) { - _dispatch_mach_msg_not_sent(dm, dmsg); - continue; - } - send_status = _dispatch_mach_msg_send(dm, dmsg, dmr, pp, sf); - if (unlikely(!send_status)) { - goto partial_drain; - } - if (send_status & DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT) { - returning_send_result = true; - } - } while ((dc = next_dc)); - } - - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { - if (old_state & DISPATCH_MACH_STATE_DIRTY) { - new_state = old_state; - new_state &= ~DISPATCH_MACH_STATE_DIRTY; - new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; - new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; - } else { - // unlock - new_state = 0; - } - }); - goto out; - -partial_drain: - // if this is not a complete drain, we must undo some things - _dispatch_mach_send_unpop_head(dr, dc, next_dc); - - if (_dispatch_object_has_type(dc, - DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) { - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { - new_state = old_state; - new_state |= DISPATCH_MACH_STATE_DIRTY; - new_state |= DISPATCH_MACH_STATE_PENDING_BARRIER; - new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK; - }); - } else { - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { - new_state = old_state; - if (old_state & (DISPATCH_MACH_STATE_DIRTY | - DISPATCH_MACH_STATE_RECEIVED_OVERRIDE)) { - new_state &= ~DISPATCH_MACH_STATE_DIRTY; - new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; - new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; - } else { - new_state |= DISPATCH_MACH_STATE_DIRTY; - new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK; - } - }); - } - -out: - if (old_state & DISPATCH_MACH_STATE_RECEIVED_OVERRIDE) { - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); - } - - if (unlikely(new_state & DISPATCH_MACH_STATE_UNLOCK_MASK)) { - os_atomic_thread_fence(acquire); - pp = _dm_state_get_override(new_state); - goto again; - } - - if (new_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { - pp = _dm_state_get_override(new_state); - _dispatch_mach_send_barrier_drain_push(dm, pp); - } else { - if (needs_mgr) { - pp = _dm_state_get_override(new_state); - } else { - pp = 0; - } - if (!disconnecting) dx_wakeup(dm, pp, DISPATCH_WAKEUP_FLUSH); - } - return returning_send_result; -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_send_invoke(dispatch_mach_t dm, - dispatch_invoke_flags_t flags, - dispatch_mach_send_invoke_flags_t send_flags) -{ - dispatch_lock_owner tid_self = _dispatch_tid_self(); - uint64_t old_state, new_state; - pthread_priority_t pp_floor; - - uint64_t canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK; - uint64_t canlock_state = 0; - - if (send_flags & DM_SEND_INVOKE_NEEDS_BARRIER) { - canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER; - canlock_state = DISPATCH_MACH_STATE_PENDING_BARRIER; - } else if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) { - canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER; - } - - if (flags & DISPATCH_INVOKE_MANAGER_DRAIN) { - pp_floor = 0; - } else { - // _dispatch_queue_class_invoke will have applied the queue override - // (if any) before we get here. Else use the default base priority - // as an estimation of the priority we already asked for. - pp_floor = dm->_as_dq->dq_override; - if (!pp_floor) { - pp_floor = _dispatch_get_defaultpriority(); - pp_floor &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - } - } - -retry: - os_atomic_rmw_loop2o(dm->dm_refs, dm_state, old_state, new_state, acquire, { - new_state = old_state; - if (unlikely((old_state & canlock_mask) != canlock_state)) { - if (!(send_flags & DM_SEND_INVOKE_FLUSH)) { - os_atomic_rmw_loop_give_up(break); - } - new_state |= DISPATCH_MACH_STATE_DIRTY; - } else { - if (likely(pp_floor)) { - pthread_priority_t pp = _dm_state_get_override(old_state); - if (unlikely(pp > pp_floor)) { - os_atomic_rmw_loop_give_up({ - _dispatch_wqthread_override_start(tid_self, pp); - // Ensure that the root queue sees - // that this thread was overridden. - _dispatch_set_defaultpriority_override(); - pp_floor = pp; - goto retry; - }); - } - } - new_state |= tid_self; - new_state &= ~DISPATCH_MACH_STATE_DIRTY; - new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; - new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; - } - }); - - if (unlikely((old_state & canlock_mask) != canlock_state)) { - return; - } - if (send_flags & DM_SEND_INVOKE_CANCEL) { - _dispatch_mach_cancel(dm); - } - _dispatch_mach_send_drain(dm, flags, send_flags); -} - -DISPATCH_NOINLINE -void -_dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags) -{ - dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; - dispatch_thread_frame_s dtf; - - DISPATCH_COMPILER_CAN_ASSUME(dc->dc_priority == DISPATCH_NO_PRIORITY); - DISPATCH_COMPILER_CAN_ASSUME(dc->dc_voucher == DISPATCH_NO_VOUCHER); - // hide the mach channel (see _dispatch_mach_barrier_invoke comment) - _dispatch_thread_frame_stash(&dtf); - _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags,{ - _dispatch_mach_send_invoke(dm, flags, - DM_SEND_INVOKE_NEEDS_BARRIER | DM_SEND_INVOKE_CAN_RUN_BARRIER); - }); - _dispatch_thread_frame_unstash(&dtf); -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm, - pthread_priority_t pp) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - - dc->do_vtable = DC_VTABLE(MACH_SEND_BARRRIER_DRAIN); - dc->dc_func = NULL; - dc->dc_ctxt = NULL; - dc->dc_voucher = DISPATCH_NO_VOUCHER; - dc->dc_priority = DISPATCH_NO_PRIORITY; - return _dispatch_queue_push(dm->_as_dq, dc, pp); -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_continuation_t dc, - pthread_priority_t pp) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - uint64_t pp_state, old_state, new_state, state_flags = 0; - dispatch_lock_owner owner; - bool wakeup; - - // when pushing a send barrier that destroys - // the last reference to this channel, and the send queue is already - // draining on another thread, the send barrier may run as soon as - // _dispatch_mach_send_push_inline() returns. - _dispatch_retain(dm); - pp_state = _dm_state_override_from_priority(pp); - - wakeup = _dispatch_mach_send_push_inline(dr, dc); - if (wakeup) { - state_flags = DISPATCH_MACH_STATE_DIRTY; - if (dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)) { - state_flags |= DISPATCH_MACH_STATE_PENDING_BARRIER; - } - } - - if (state_flags) { - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { - new_state = _dm_state_merge_override(old_state, pp_state); - new_state |= state_flags; - }); - } else { - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, relaxed, { - new_state = _dm_state_merge_override(old_state, pp_state); - if (old_state == new_state) { - os_atomic_rmw_loop_give_up(break); - } - }); - } - - pp = _dm_state_get_override(new_state); - owner = _dispatch_lock_owner((dispatch_lock)old_state); - if (owner) { - if (_dm_state_needs_override(old_state, pp_state)) { - _dispatch_wqthread_override_start_check_owner(owner, pp, - &dr->dm_state_lock.dul_lock); - } - return _dispatch_release_tailcall(dm); - } - - dispatch_wakeup_flags_t wflags = 0; - if (state_flags & DISPATCH_MACH_STATE_PENDING_BARRIER) { - _dispatch_mach_send_barrier_drain_push(dm, pp); - } else if (wakeup || dr->dm_disconnect_cnt || - (dm->dq_atomic_flags & DSF_CANCELED)) { - wflags = DISPATCH_WAKEUP_FLUSH | DISPATCH_WAKEUP_CONSUME; - } else if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { - wflags = DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_CONSUME; - } - if (wflags) { - return dx_wakeup(dm, pp, wflags); - } - return _dispatch_release_tailcall(dm); -} - -DISPATCH_NOINLINE -static bool -_dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm, - dispatch_object_t dou, pthread_priority_t pp, - dispatch_mach_send_invoke_flags_t send_flags) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - dispatch_lock_owner tid_self = _dispatch_tid_self(); - uint64_t pp_state, old_state, new_state, canlock_mask, state_flags = 0; - dispatch_lock_owner owner; - - pp_state = _dm_state_override_from_priority(pp); - bool wakeup = _dispatch_mach_send_push_inline(dr, dou); - if (wakeup) { - state_flags = DISPATCH_MACH_STATE_DIRTY; - } - - if (unlikely(dr->dm_disconnect_cnt || - (dm->dq_atomic_flags & DSF_CANCELED))) { - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { - new_state = _dm_state_merge_override(old_state, pp_state); - new_state |= state_flags; - }); - dx_wakeup(dm, pp, DISPATCH_WAKEUP_FLUSH); - return false; - } - - canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK | - DISPATCH_MACH_STATE_PENDING_BARRIER; - if (state_flags) { - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, seq_cst, { - new_state = _dm_state_merge_override(old_state, pp_state); - new_state |= state_flags; - if (likely((old_state & canlock_mask) == 0)) { - new_state |= tid_self; - new_state &= ~DISPATCH_MACH_STATE_DIRTY; - new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; - new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; - } - }); - } else { - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, acquire, { - new_state = _dm_state_merge_override(old_state, pp_state); - if (new_state == old_state) { - os_atomic_rmw_loop_give_up(return false); - } - if (likely((old_state & canlock_mask) == 0)) { - new_state |= tid_self; - new_state &= ~DISPATCH_MACH_STATE_DIRTY; - new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; - new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; - } - }); - } - - owner = _dispatch_lock_owner((dispatch_lock)old_state); - if (owner) { - if (_dm_state_needs_override(old_state, pp_state)) { - _dispatch_wqthread_override_start_check_owner(owner, pp, - &dr->dm_state_lock.dul_lock); - } - return false; - } - - if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { - dx_wakeup(dm, pp, DISPATCH_WAKEUP_OVERRIDING); - return false; - } - - // Ensure our message is still at the head of the queue and has not already - // been dequeued by another thread that raced us to the send queue lock. - // A plain load of the head and comparison against our object pointer is - // sufficient. - if (unlikely(!(wakeup && dou._do == dr->dm_head))) { - // Don't request immediate send result for messages we don't own - send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK; - } - return _dispatch_mach_send_drain(dm, DISPATCH_INVOKE_NONE, send_flags); -} - -static void -_dispatch_mach_merge_notification_kevent(dispatch_mach_t dm, - const _dispatch_kevent_qos_s *ke) -{ - if (!(ke->fflags & dm->ds_pending_data_mask)) { - return; - } - _dispatch_mach_send_invoke(dm, DISPATCH_INVOKE_MANAGER_DRAIN, - DM_SEND_INVOKE_FLUSH); -} - -#pragma mark - -#pragma mark dispatch_mach_t - -static inline mach_msg_option_t -_dispatch_mach_checkin_options(void) -{ - mach_msg_option_t options = 0; -#if DISPATCH_USE_CHECKIN_NOIMPORTANCE - options = MACH_SEND_NOIMPORTANCE; // -#endif - return options; -} - - -static inline mach_msg_option_t -_dispatch_mach_send_options(void) -{ - mach_msg_option_t options = 0; - return options; -} - -DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_mach_priority_propagate(mach_msg_option_t options) -{ -#if DISPATCH_USE_NOIMPORTANCE_QOS - if (options & MACH_SEND_NOIMPORTANCE) return 0; -#else - (void)options; -#endif - return _dispatch_priority_propagate(); -} - -DISPATCH_NOINLINE -static bool -_dispatch_mach_send_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, - dispatch_continuation_t dc_wait, mach_msg_option_t options) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - if (slowpath(dmsg->do_next != DISPATCH_OBJECT_LISTLESS)) { - DISPATCH_CLIENT_CRASH(dmsg->do_next, "Message already enqueued"); - } - dispatch_retain(dmsg); - pthread_priority_t priority = _dispatch_mach_priority_propagate(options); - options |= _dispatch_mach_send_options(); - dmsg->dmsg_options = options; - mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); - dmsg->dmsg_reply = _dispatch_mach_msg_get_reply_port(dmsg); - bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == - MACH_MSG_TYPE_MOVE_SEND_ONCE); - dmsg->dmsg_priority = priority; - dmsg->dmsg_voucher = _voucher_copy(); - _dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg); - - uint32_t send_status; - bool returning_send_result = false; - dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE; - if (options & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT) { - send_flags = DM_SEND_INVOKE_IMMEDIATE_SEND; - } - if (is_reply && !dmsg->dmsg_reply && !dr->dm_disconnect_cnt && - !(dm->dq_atomic_flags & DSF_CANCELED)) { - // replies are sent to a send-once right and don't need the send queue - dispatch_assert(!dc_wait); - send_status = _dispatch_mach_msg_send(dm, dmsg, NULL, 0, send_flags); - dispatch_assert(send_status); - returning_send_result = !!(send_status & - DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT); - } else { - _dispatch_voucher_ktrace_dmsg_push(dmsg); - priority &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - dispatch_object_t dou = { ._dmsg = dmsg }; - if (dc_wait) dou._dc = dc_wait; - returning_send_result = _dispatch_mach_send_push_and_trydrain(dm, dou, - priority, send_flags); - } - if (returning_send_result) { - _dispatch_voucher_debug("mach-msg[%p] clear", dmsg->dmsg_voucher, dmsg); - if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher); - dmsg->dmsg_voucher = NULL; - dmsg->do_next = DISPATCH_OBJECT_LISTLESS; - dispatch_release(dmsg); - } - return returning_send_result; -} - -DISPATCH_NOINLINE -void -dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, - mach_msg_option_t options) -{ - dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); - options &= ~DISPATCH_MACH_OPTIONS_MASK; - bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); - dispatch_assert(!returned_send_result); -} - -DISPATCH_NOINLINE -void -dispatch_mach_send_with_result(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, - mach_msg_option_t options, dispatch_mach_send_flags_t send_flags, - dispatch_mach_reason_t *send_result, mach_error_t *send_error) -{ - if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) { - DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags"); - } - dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); - options &= ~DISPATCH_MACH_OPTIONS_MASK; - options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT; - bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); - unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND; - mach_error_t err = 0; - if (returned_send_result) { - reason = _dispatch_mach_msg_get_reason(dmsg, &err); - } - *send_result = reason; - *send_error = err; -} - -static inline -dispatch_mach_msg_t -_dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, - dispatch_mach_msg_t dmsg, mach_msg_option_t options, - bool *returned_send_result) -{ - mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg); - if (!reply_port) { - // use per-thread mach reply port - reply_port = _dispatch_get_thread_reply_port(); - mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); - dispatch_assert(MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) == - MACH_MSG_TYPE_MAKE_SEND_ONCE); - hdr->msgh_local_port = reply_port; - options |= DISPATCH_MACH_OWNED_REPLY_PORT; - } - - dispatch_mach_reply_refs_t dmr; -#if DISPATCH_DEBUG - dmr = _dispatch_calloc(1, sizeof(*dmr)); -#else - struct dispatch_mach_reply_refs_s dmr_buf = { }; - dmr = &dmr_buf; -#endif - struct dispatch_continuation_s dc_wait = { - .dc_flags = DISPATCH_OBJ_SYNC_SLOW_BIT, - .dc_data = dmsg, - .dc_other = dmr, - .dc_priority = DISPATCH_NO_PRIORITY, - .dc_voucher = DISPATCH_NO_VOUCHER, - }; - dmr->dmr_ctxt = dmsg->do_ctxt; - *returned_send_result = _dispatch_mach_send_msg(dm, dmsg, &dc_wait,options); - if (options & DISPATCH_MACH_OWNED_REPLY_PORT) { - _dispatch_clear_thread_reply_port(reply_port); - } - dmsg = _dispatch_mach_msg_reply_recv(dm, dmr, reply_port); -#if DISPATCH_DEBUG - free(dmr); -#endif - return dmsg; -} - -DISPATCH_NOINLINE -dispatch_mach_msg_t -dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, - dispatch_mach_msg_t dmsg, mach_msg_option_t options) -{ - bool returned_send_result; - dispatch_mach_msg_t reply; - dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); - options &= ~DISPATCH_MACH_OPTIONS_MASK; - options |= DISPATCH_MACH_WAIT_FOR_REPLY; - reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options, - &returned_send_result); - dispatch_assert(!returned_send_result); - return reply; -} - -DISPATCH_NOINLINE -dispatch_mach_msg_t -dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t dm, - dispatch_mach_msg_t dmsg, mach_msg_option_t options, - dispatch_mach_send_flags_t send_flags, - dispatch_mach_reason_t *send_result, mach_error_t *send_error) -{ - if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) { - DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags"); - } - bool returned_send_result; - dispatch_mach_msg_t reply; - dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); - options &= ~DISPATCH_MACH_OPTIONS_MASK; - options |= DISPATCH_MACH_WAIT_FOR_REPLY; - options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT; - reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options, - &returned_send_result); - unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND; - mach_error_t err = 0; - if (returned_send_result) { - reason = _dispatch_mach_msg_get_reason(dmsg, &err); - } - *send_result = reason; - *send_error = err; - return reply; -} - -DISPATCH_NOINLINE -static bool -_dispatch_mach_disconnect(dispatch_mach_t dm) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - bool disconnected; - if (dm->dm_dkev) { - _dispatch_mach_notification_kevent_unregister(dm); - } - if (MACH_PORT_VALID(dr->dm_send)) { - _dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dr->dm_send); - } - dr->dm_send = MACH_PORT_NULL; - if (dr->dm_checkin) { - _dispatch_mach_msg_not_sent(dm, dr->dm_checkin); - dr->dm_checkin = NULL; - } - _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); - dispatch_mach_reply_refs_t dmr, tmp; - TAILQ_FOREACH_SAFE(dmr, &dm->dm_refs->dm_replies, dmr_list, tmp) { - TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); - _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); - if (dmr->dmr_dkev) { - _dispatch_mach_reply_kevent_unregister(dm, dmr, - DKEV_UNREGISTER_DISCONNECTED); - } else { - _dispatch_mach_reply_waiter_unregister(dm, dmr, - DKEV_UNREGISTER_DISCONNECTED); - } - } - disconnected = TAILQ_EMPTY(&dm->dm_refs->dm_replies); - _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); - return disconnected; -} - -static void -_dispatch_mach_cancel(dispatch_mach_t dm) -{ - _dispatch_object_debug(dm, "%s", __func__); - if (!_dispatch_mach_disconnect(dm)) return; - if (dm->ds_dkev) { - mach_port_t local_port = (mach_port_t)dm->ds_dkev->dk_kevent.ident; - _dispatch_source_kevent_unregister(dm->_as_ds); - if ((dm->dq_atomic_flags & DSF_STATE_MASK) == DSF_DELETED) { - _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL); - } - } else { - _dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq, DSF_DELETED, - DSF_ARMED | DSF_DEFERRED_DELETE); - } -} - -DISPATCH_NOINLINE -static bool -_dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou) -{ - if (!_dispatch_mach_disconnect(dm)) return false; - dispatch_mach_send_refs_t dr = dm->dm_refs; - dr->dm_checkin = dou._dc->dc_data; - dr->dm_send = (mach_port_t)dou._dc->dc_other; - _dispatch_continuation_free(dou._dc); - (void)os_atomic_dec2o(dr, dm_disconnect_cnt, relaxed); - _dispatch_object_debug(dm, "%s", __func__); - _dispatch_release(dm); // - return true; -} - -DISPATCH_NOINLINE -void -dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send, - dispatch_mach_msg_t checkin) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - (void)os_atomic_inc2o(dr, dm_disconnect_cnt, relaxed); - if (MACH_PORT_VALID(send) && checkin) { - dispatch_retain(checkin); - checkin->dmsg_options = _dispatch_mach_checkin_options(); - dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin); - } else { - checkin = NULL; - dr->dm_checkin_port = MACH_PORT_NULL; - } - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->dc_flags = DISPATCH_OBJ_CONSUME_BIT; - // actually called manually in _dispatch_mach_send_drain - dc->dc_func = (void*)_dispatch_mach_reconnect_invoke; - dc->dc_ctxt = dc; - dc->dc_data = checkin; - dc->dc_other = (void*)(uintptr_t)send; - dc->dc_voucher = DISPATCH_NO_VOUCHER; - dc->dc_priority = DISPATCH_NO_PRIORITY; - _dispatch_retain(dm); // - return _dispatch_mach_send_push(dm, dc, 0); -} - -DISPATCH_NOINLINE -mach_port_t -dispatch_mach_get_checkin_port(dispatch_mach_t dm) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - if (slowpath(dm->dq_atomic_flags & DSF_CANCELED)) { - return MACH_PORT_DEAD; - } - return dr->dm_checkin_port; -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_connect_invoke(dispatch_mach_t dm) -{ - dispatch_mach_refs_t dr = dm->ds_refs; - _dispatch_client_callout4(dr->dm_handler_ctxt, - DISPATCH_MACH_CONNECTED, NULL, 0, dr->dm_handler_func); - dm->dm_connect_handler_called = 1; -} - -DISPATCH_NOINLINE -void -_dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, - dispatch_invoke_flags_t flags) -{ - dispatch_thread_frame_s dtf; - dispatch_mach_refs_t dr; - dispatch_mach_t dm; - mach_error_t err; - unsigned long reason = _dispatch_mach_msg_get_reason(dmsg, &err); - _dispatch_thread_set_self_t adopt_flags = DISPATCH_PRIORITY_ENFORCE| - DISPATCH_VOUCHER_CONSUME|DISPATCH_VOUCHER_REPLACE; - - // hide mach channel - dm = (dispatch_mach_t)_dispatch_thread_frame_stash(&dtf); - dr = dm->ds_refs; - dmsg->do_next = DISPATCH_OBJECT_LISTLESS; - _dispatch_voucher_ktrace_dmsg_pop(dmsg); - _dispatch_voucher_debug("mach-msg[%p] adopt", dmsg->dmsg_voucher, dmsg); - (void)_dispatch_adopt_priority_and_set_voucher(dmsg->dmsg_priority, - dmsg->dmsg_voucher, adopt_flags); - dmsg->dmsg_voucher = NULL; - dispatch_invoke_with_autoreleasepool(flags, { - if (slowpath(!dm->dm_connect_handler_called)) { - _dispatch_mach_connect_invoke(dm); - } - _dispatch_client_callout4(dr->dm_handler_ctxt, reason, dmsg, err, - dr->dm_handler_func); - }); - _dispatch_thread_frame_unstash(&dtf); - _dispatch_introspection_queue_item_complete(dmsg); - dispatch_release(dmsg); -} - -DISPATCH_NOINLINE -void -_dispatch_mach_barrier_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags) -{ - dispatch_thread_frame_s dtf; - dispatch_mach_t dm = dc->dc_other; - dispatch_mach_refs_t dr; - uintptr_t dc_flags = (uintptr_t)dc->dc_data; - unsigned long type = dc_type(dc); - - // hide mach channel from clients - if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) { - // on the send queue, the mach channel isn't the current queue - // its target queue is the current one already - _dispatch_thread_frame_stash(&dtf); - } - dr = dm->ds_refs; - DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DISPATCH_OBJ_CONSUME_BIT); - _dispatch_continuation_pop_forwarded(dc, dm->dq_override_voucher, dc_flags,{ - dispatch_invoke_with_autoreleasepool(flags, { - if (slowpath(!dm->dm_connect_handler_called)) { - _dispatch_mach_connect_invoke(dm); - } - _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - _dispatch_client_callout4(dr->dm_handler_ctxt, - DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0, - dr->dm_handler_func); - }); - }); - if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) { - _dispatch_thread_frame_unstash(&dtf); - } -} - -DISPATCH_NOINLINE -void -dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context, - dispatch_function_t func) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; - pthread_priority_t pp; - - _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); - dc->dc_data = (void *)dc->dc_flags; - dc->dc_other = dm; - dc->do_vtable = DC_VTABLE(MACH_SEND_BARRIER); - _dispatch_trace_continuation_push(dm->_as_dq, dc); - pp = _dispatch_continuation_get_override_priority(dm->_as_dq, dc); - return _dispatch_mach_send_push(dm, dc, pp); -} - -DISPATCH_NOINLINE -void -dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; - pthread_priority_t pp; - - _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); - dc->dc_data = (void *)dc->dc_flags; - dc->dc_other = dm; - dc->do_vtable = DC_VTABLE(MACH_SEND_BARRIER); - _dispatch_trace_continuation_push(dm->_as_dq, dc); - pp = _dispatch_continuation_get_override_priority(dm->_as_dq, dc); - return _dispatch_mach_send_push(dm, dc, pp); -} - -DISPATCH_NOINLINE -void -dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context, - dispatch_function_t func) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; - - _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); - dc->dc_data = (void *)dc->dc_flags; - dc->dc_other = dm; - dc->do_vtable = DC_VTABLE(MACH_RECV_BARRIER); - return _dispatch_continuation_async(dm->_as_dq, dc); -} - -DISPATCH_NOINLINE -void -dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; - - _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); - dc->dc_data = (void *)dc->dc_flags; - dc->dc_other = dm; - dc->do_vtable = DC_VTABLE(MACH_RECV_BARRIER); - return _dispatch_continuation_async(dm->_as_dq, dc); -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_cancel_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags) -{ - dispatch_mach_refs_t dr = dm->ds_refs; - - dispatch_invoke_with_autoreleasepool(flags, { - if (slowpath(!dm->dm_connect_handler_called)) { - _dispatch_mach_connect_invoke(dm); - } - _dispatch_client_callout4(dr->dm_handler_ctxt, - DISPATCH_MACH_CANCELED, NULL, 0, dr->dm_handler_func); - }); - dm->dm_cancel_handler_called = 1; - _dispatch_release(dm); // the retain is done at creation time -} - -DISPATCH_NOINLINE -void -dispatch_mach_cancel(dispatch_mach_t dm) -{ - dispatch_source_cancel(dm->_as_ds); -} - -static void -_dispatch_mach_install(dispatch_mach_t dm, pthread_priority_t pp) -{ - uint32_t disconnect_cnt; - - if (dm->ds_dkev) { - _dispatch_source_kevent_register(dm->_as_ds, pp); - } - if (dm->ds_is_direct_kevent) { - pp &= (~_PTHREAD_PRIORITY_FLAGS_MASK | - _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG | - _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); - // _dispatch_mach_reply_kevent_register assumes this has been done - // which is unlike regular sources or queues, the DEFAULTQUEUE flag - // is used so that the priority of that channel doesn't act as a floor - // QoS for incoming messages (26761457) - dm->dq_priority = (dispatch_priority_t)pp; - } - dm->ds_is_installed = true; - if (unlikely(!os_atomic_cmpxchgv2o(dm->dm_refs, dm_disconnect_cnt, - DISPATCH_MACH_NEVER_INSTALLED, 0, &disconnect_cnt, release))) { - DISPATCH_INTERNAL_CRASH(disconnect_cnt, "Channel already installed"); - } -} - -void -_dispatch_mach_finalize_activation(dispatch_mach_t dm) -{ - if (dm->ds_is_direct_kevent && !dm->ds_is_installed) { - dispatch_source_t ds = dm->_as_ds; - pthread_priority_t pp = _dispatch_source_compute_kevent_priority(ds); - if (pp) _dispatch_mach_install(dm, pp); - } - - // call "super" - _dispatch_queue_finalize_activation(dm->_as_dq); -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t -_dispatch_mach_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, - uint64_t *owned, struct dispatch_object_s **dc_ptr DISPATCH_UNUSED) -{ - dispatch_mach_t dm = dou._dm; - dispatch_queue_t retq = NULL; - dispatch_queue_t dq = _dispatch_queue_get_current(); - - // This function performs all mach channel actions. Each action is - // responsible for verifying that it takes place on the appropriate queue. - // If the current queue is not the correct queue for this action, the - // correct queue will be returned and the invoke will be re-driven on that - // queue. - - // The order of tests here in invoke and in wakeup should be consistent. - - dispatch_mach_send_refs_t dr = dm->dm_refs; - dispatch_queue_t dkq = &_dispatch_mgr_q; - - if (dm->ds_is_direct_kevent) { - dkq = dm->do_targetq; - } - - if (slowpath(!dm->ds_is_installed)) { - // The channel needs to be installed on the kevent queue. - if (dq != dkq) { - return dkq; - } - _dispatch_mach_install(dm, _dispatch_get_defaultpriority()); - } - - if (_dispatch_queue_class_probe(dm)) { - if (dq == dm->do_targetq) { - retq = _dispatch_queue_serial_drain(dm->_as_dq, flags, owned, NULL); - } else { - retq = dm->do_targetq; - } - } - - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq); - - if (dr->dm_tail) { - bool requires_mgr = dr->dm_needs_mgr || (dr->dm_disconnect_cnt && - (dm->dm_dkev || !dm->ds_is_direct_kevent)); - if (!(dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) || - (dqf & DSF_CANCELED) || dr->dm_disconnect_cnt) { - // The channel has pending messages to send. - if (unlikely(requires_mgr && dq != &_dispatch_mgr_q)) { - return retq ? retq : &_dispatch_mgr_q; - } - dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE; - if (dq != &_dispatch_mgr_q) { - send_flags |= DM_SEND_INVOKE_CAN_RUN_BARRIER; - } - _dispatch_mach_send_invoke(dm, flags, send_flags); - } - } else if (dqf & DSF_CANCELED) { - // The channel has been cancelled and needs to be uninstalled from the - // manager queue. After uninstallation, the cancellation handler needs - // to be delivered to the target queue. - if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) { - // waiting for the delivery of a deferred delete event - return retq; - } - if ((dqf & DSF_STATE_MASK) != DSF_DELETED) { - if (dq != &_dispatch_mgr_q) { - return retq ? retq : &_dispatch_mgr_q; - } - _dispatch_mach_send_invoke(dm, flags, DM_SEND_INVOKE_CANCEL); - dqf = _dispatch_queue_atomic_flags(dm->_as_dq); - if (unlikely((dqf & DSF_STATE_MASK) != DSF_DELETED)) { - // waiting for the delivery of a deferred delete event - // or deletion didn't happen because send_invoke couldn't - // acquire the send lock - return retq; - } - } - if (!dm->dm_cancel_handler_called) { - if (dq != dm->do_targetq) { - return retq ? retq : dm->do_targetq; - } - _dispatch_mach_cancel_invoke(dm, flags); - } - } - - return retq; -} - -DISPATCH_NOINLINE -void -_dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags) -{ - _dispatch_queue_class_invoke(dm, flags, _dispatch_mach_invoke2); -} - -void -_dispatch_mach_wakeup(dispatch_mach_t dm, pthread_priority_t pp, - dispatch_wakeup_flags_t flags) -{ - // This function determines whether the mach channel needs to be invoked. - // The order of tests here in probe and in invoke should be consistent. - - dispatch_mach_send_refs_t dr = dm->dm_refs; - dispatch_queue_wakeup_target_t dkq = DISPATCH_QUEUE_WAKEUP_MGR; - dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE; - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq); - - if (dm->ds_is_direct_kevent) { - dkq = DISPATCH_QUEUE_WAKEUP_TARGET; - } - - if (!dm->ds_is_installed) { - // The channel needs to be installed on the kevent queue. - tq = dkq; - goto done; - } - - if (_dispatch_queue_class_probe(dm)) { - tq = DISPATCH_QUEUE_WAKEUP_TARGET; - goto done; - } - - if (_dispatch_lock_is_locked(dr->dm_state_lock.dul_lock)) { - // Sending and uninstallation below require the send lock, the channel - // will be woken up when the lock is dropped - _dispatch_queue_reinstate_override_priority(dm, (dispatch_priority_t)pp); - goto done; - } - - if (dr->dm_tail) { - bool requires_mgr = dr->dm_needs_mgr || (dr->dm_disconnect_cnt && - (dm->dm_dkev || !dm->ds_is_direct_kevent)); - if (!(dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) || - (dqf & DSF_CANCELED) || dr->dm_disconnect_cnt) { - if (unlikely(requires_mgr)) { - tq = DISPATCH_QUEUE_WAKEUP_MGR; - } else { - tq = DISPATCH_QUEUE_WAKEUP_TARGET; - } - } else { - // can happen when we can't send because the port is full - // but we should not lose the override - _dispatch_queue_reinstate_override_priority(dm, - (dispatch_priority_t)pp); - } - } else if (dqf & DSF_CANCELED) { - if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) { - // waiting for the delivery of a deferred delete event - } else if ((dqf & DSF_STATE_MASK) != DSF_DELETED) { - // The channel needs to be uninstalled from the manager queue - tq = DISPATCH_QUEUE_WAKEUP_MGR; - } else if (!dm->dm_cancel_handler_called) { - // the cancellation handler needs to be delivered to the target - // queue. - tq = DISPATCH_QUEUE_WAKEUP_TARGET; - } - } - -done: - if (tq) { - return _dispatch_queue_class_wakeup(dm->_as_dq, pp, flags, tq); - } else if (pp) { - return _dispatch_queue_class_override_drainer(dm->_as_dq, pp, flags); - } else if (flags & DISPATCH_WAKEUP_CONSUME) { - return _dispatch_release_tailcall(dm); - } -} - -#pragma mark - -#pragma mark dispatch_mach_msg_t - -dispatch_mach_msg_t -dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, - dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr) -{ - if (slowpath(size < sizeof(mach_msg_header_t)) || - slowpath(destructor && !msg)) { - DISPATCH_CLIENT_CRASH(size, "Empty message"); - } - dispatch_mach_msg_t dmsg = _dispatch_alloc(DISPATCH_VTABLE(mach_msg), - sizeof(struct dispatch_mach_msg_s) + - (destructor ? 0 : size - sizeof(dmsg->dmsg_msg))); - if (destructor) { - dmsg->dmsg_msg = msg; - } else if (msg) { - memcpy(dmsg->dmsg_buf, msg, size); - } - dmsg->do_next = DISPATCH_OBJECT_LISTLESS; - dmsg->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, - false); - dmsg->dmsg_destructor = destructor; - dmsg->dmsg_size = size; - if (msg_ptr) { - *msg_ptr = _dispatch_mach_msg_get_msg(dmsg); - } - return dmsg; -} - -void -_dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg) -{ - if (dmsg->dmsg_voucher) { - _voucher_release(dmsg->dmsg_voucher); - dmsg->dmsg_voucher = NULL; - } - switch (dmsg->dmsg_destructor) { - case DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT: - break; - case DISPATCH_MACH_MSG_DESTRUCTOR_FREE: - free(dmsg->dmsg_msg); - break; - case DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE: { - mach_vm_size_t vm_size = dmsg->dmsg_size; - mach_vm_address_t vm_addr = (uintptr_t)dmsg->dmsg_msg; - (void)dispatch_assume_zero(mach_vm_deallocate(mach_task_self(), - vm_addr, vm_size)); - break; - }} -} - -static inline mach_msg_header_t* -_dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg) -{ - return dmsg->dmsg_destructor ? dmsg->dmsg_msg : - (mach_msg_header_t*)dmsg->dmsg_buf; -} - -mach_msg_header_t* -dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg, size_t *size_ptr) -{ - if (size_ptr) { - *size_ptr = dmsg->dmsg_size; - } - return _dispatch_mach_msg_get_msg(dmsg); -} - -size_t -_dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz) -{ - size_t offset = 0; - offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dx_kind(dmsg), dmsg); - offset += dsnprintf(&buf[offset], bufsiz - offset, "xrefcnt = 0x%x, " - "refcnt = 0x%x, ", dmsg->do_xref_cnt + 1, dmsg->do_ref_cnt + 1); - offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, " - "msgh[%p] = { ", dmsg->dmsg_options, dmsg->dmsg_buf); - mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); - if (hdr->msgh_id) { - offset += dsnprintf(&buf[offset], bufsiz - offset, "id 0x%x, ", - hdr->msgh_id); - } - if (hdr->msgh_size) { - offset += dsnprintf(&buf[offset], bufsiz - offset, "size %u, ", - hdr->msgh_size); - } - if (hdr->msgh_bits) { - offset += dsnprintf(&buf[offset], bufsiz - offset, "bits msgh_bits), - MACH_MSGH_BITS_REMOTE(hdr->msgh_bits)); - if (MACH_MSGH_BITS_OTHER(hdr->msgh_bits)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", o 0x%x", - MACH_MSGH_BITS_OTHER(hdr->msgh_bits)); - } - offset += dsnprintf(&buf[offset], bufsiz - offset, ">, "); - } - if (hdr->msgh_local_port && hdr->msgh_remote_port) { - offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x, " - "remote 0x%x", hdr->msgh_local_port, hdr->msgh_remote_port); - } else if (hdr->msgh_local_port) { - offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x", - hdr->msgh_local_port); - } else if (hdr->msgh_remote_port) { - offset += dsnprintf(&buf[offset], bufsiz - offset, "remote 0x%x", - hdr->msgh_remote_port); - } else { - offset += dsnprintf(&buf[offset], bufsiz - offset, "no ports"); - } - offset += dsnprintf(&buf[offset], bufsiz - offset, " } }"); - return offset; -} - -#pragma mark - -#pragma mark dispatch_mig_server - -mach_msg_return_t -dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, - dispatch_mig_callback_t callback) -{ - mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT - | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) - | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER; - mach_msg_options_t tmp_options; - mig_reply_error_t *bufTemp, *bufRequest, *bufReply; - mach_msg_return_t kr = 0; - uint64_t assertion_token = 0; - unsigned int cnt = 1000; // do not stall out serial queues - boolean_t demux_success; - bool received = false; - size_t rcv_size = maxmsgsz + MAX_TRAILER_SIZE; - - bufRequest = alloca(rcv_size); - bufRequest->RetCode = 0; - for (mach_vm_address_t p = mach_vm_trunc_page(bufRequest + vm_page_size); - p < (mach_vm_address_t)bufRequest + rcv_size; p += vm_page_size) { - *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard - } - - bufReply = alloca(rcv_size); - bufReply->Head.msgh_size = 0; - for (mach_vm_address_t p = mach_vm_trunc_page(bufReply + vm_page_size); - p < (mach_vm_address_t)bufReply + rcv_size; p += vm_page_size) { - *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard - } - -#if DISPATCH_DEBUG - options |= MACH_RCV_LARGE; // rdar://problem/8422992 -#endif - tmp_options = options; - // XXX FIXME -- change this to not starve out the target queue - for (;;) { - if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (--cnt == 0)) { - options &= ~MACH_RCV_MSG; - tmp_options &= ~MACH_RCV_MSG; - - if (!(tmp_options & MACH_SEND_MSG)) { - goto out; - } - } - kr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size, - (mach_msg_size_t)rcv_size, (mach_port_t)ds->ds_ident_hack, 0,0); - - tmp_options = options; - - if (slowpath(kr)) { - switch (kr) { - case MACH_SEND_INVALID_DEST: - case MACH_SEND_TIMED_OUT: - if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { - mach_msg_destroy(&bufReply->Head); - } - break; - case MACH_RCV_TIMED_OUT: - // Don't return an error if a message was sent this time or - // a message was successfully received previously - // rdar://problems/7363620&7791738 - if(bufReply->Head.msgh_remote_port || received) { - kr = MACH_MSG_SUCCESS; - } - break; - case MACH_RCV_INVALID_NAME: - break; -#if DISPATCH_DEBUG - case MACH_RCV_TOO_LARGE: - // receive messages that are too large and log their id and size - // rdar://problem/8422992 - tmp_options &= ~MACH_RCV_LARGE; - size_t large_size = bufReply->Head.msgh_size + MAX_TRAILER_SIZE; - void *large_buf = malloc(large_size); - if (large_buf) { - rcv_size = large_size; - bufReply = large_buf; - } - if (!mach_msg(&bufReply->Head, tmp_options, 0, - (mach_msg_size_t)rcv_size, - (mach_port_t)ds->ds_ident_hack, 0, 0)) { - _dispatch_log("BUG in libdispatch client: " - "dispatch_mig_server received message larger than " - "requested size %zd: id = 0x%x, size = %d", - maxmsgsz, bufReply->Head.msgh_id, - bufReply->Head.msgh_size); - } - if (large_buf) { - free(large_buf); - } - // fall through -#endif - default: - _dispatch_bug_mach_client( - "dispatch_mig_server: mach_msg() failed", kr); - break; - } - goto out; - } - - if (!(tmp_options & MACH_RCV_MSG)) { - goto out; - } - - if (assertion_token) { -#if DISPATCH_USE_IMPORTANCE_ASSERTION - int r = proc_importance_assertion_complete(assertion_token); - (void)dispatch_assume_zero(r); -#endif - assertion_token = 0; - } - received = true; - - bufTemp = bufRequest; - bufRequest = bufReply; - bufReply = bufTemp; - -#if DISPATCH_USE_IMPORTANCE_ASSERTION -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wdeprecated-declarations" - int r = proc_importance_assertion_begin_with_msg(&bufRequest->Head, - NULL, &assertion_token); - if (r && slowpath(r != EIO)) { - (void)dispatch_assume_zero(r); - } -#pragma clang diagnostic pop -#endif - _voucher_replace(voucher_create_with_mach_msg(&bufRequest->Head)); - demux_success = callback(&bufRequest->Head, &bufReply->Head); - - if (!demux_success) { - // destroy the request - but not the reply port - bufRequest->Head.msgh_remote_port = 0; - mach_msg_destroy(&bufRequest->Head); - } else if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { - // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode - // is present - if (slowpath(bufReply->RetCode)) { - if (bufReply->RetCode == MIG_NO_REPLY) { - continue; - } - - // destroy the request - but not the reply port - bufRequest->Head.msgh_remote_port = 0; - mach_msg_destroy(&bufRequest->Head); - } - } - - if (bufReply->Head.msgh_remote_port) { - tmp_options |= MACH_SEND_MSG; - if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) != - MACH_MSG_TYPE_MOVE_SEND_ONCE) { - tmp_options |= MACH_SEND_TIMEOUT; - } - } - } - -out: - if (assertion_token) { -#if DISPATCH_USE_IMPORTANCE_ASSERTION - int r = proc_importance_assertion_complete(assertion_token); - (void)dispatch_assume_zero(r); -#endif - } - - return kr; -} - -#endif /* HAVE_MACH */ - -#pragma mark - -#pragma mark dispatch_source_debug - -DISPATCH_NOINLINE -static const char * -_evfiltstr(short filt) -{ - switch (filt) { -#define _evfilt2(f) case (f): return #f - _evfilt2(EVFILT_READ); - _evfilt2(EVFILT_WRITE); - _evfilt2(EVFILT_AIO); - _evfilt2(EVFILT_VNODE); - _evfilt2(EVFILT_PROC); - _evfilt2(EVFILT_SIGNAL); - _evfilt2(EVFILT_TIMER); -#if HAVE_MACH - _evfilt2(EVFILT_MACHPORT); - _evfilt2(DISPATCH_EVFILT_MACH_NOTIFICATION); -#endif - _evfilt2(EVFILT_FS); - _evfilt2(EVFILT_USER); -#ifdef EVFILT_VM - _evfilt2(EVFILT_VM); -#endif -#ifdef EVFILT_SOCK - _evfilt2(EVFILT_SOCK); -#endif -#ifdef EVFILT_MEMORYSTATUS - _evfilt2(EVFILT_MEMORYSTATUS); -#endif - - _evfilt2(DISPATCH_EVFILT_TIMER); - _evfilt2(DISPATCH_EVFILT_CUSTOM_ADD); - _evfilt2(DISPATCH_EVFILT_CUSTOM_OR); - default: - return "EVFILT_missing"; - } -} - -#if DISPATCH_DEBUG -static const char * -_evflagstr2(uint16_t *flagsp) -{ -#define _evflag2(f) \ - if ((*flagsp & (f)) == (f) && (f)) { \ - *flagsp &= ~(f); \ - return #f "|"; \ - } - _evflag2(EV_ADD); - _evflag2(EV_DELETE); - _evflag2(EV_ENABLE); - _evflag2(EV_DISABLE); - _evflag2(EV_ONESHOT); - _evflag2(EV_CLEAR); - _evflag2(EV_RECEIPT); - _evflag2(EV_DISPATCH); - _evflag2(EV_UDATA_SPECIFIC); -#ifdef EV_POLL - _evflag2(EV_POLL); -#endif -#ifdef EV_OOBAND - _evflag2(EV_OOBAND); -#endif - _evflag2(EV_ERROR); - _evflag2(EV_EOF); - _evflag2(EV_VANISHED); - *flagsp = 0; - return "EV_UNKNOWN "; -} - -DISPATCH_NOINLINE -static const char * -_evflagstr(uint16_t flags, char *str, size_t strsize) -{ - str[0] = 0; - while (flags) { - strlcat(str, _evflagstr2(&flags), strsize); - } - size_t sz = strlen(str); - if (sz) str[sz-1] = 0; - return str; -} -#endif - -static size_t -_dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) -{ - dispatch_queue_t target = ds->do_targetq; - return dsnprintf(buf, bufsiz, "target = %s[%p], ident = 0x%lx, " - "mask = 0x%lx, pending_data = 0x%lx, registered = %d, " - "armed = %d, deleted = %d%s, canceled = %d, ", - target && target->dq_label ? target->dq_label : "", target, - ds->ds_ident_hack, ds->ds_pending_data_mask, ds->ds_pending_data, - ds->ds_is_installed, (bool)(ds->dq_atomic_flags & DSF_ARMED), - (bool)(ds->dq_atomic_flags & DSF_DELETED), - (ds->dq_atomic_flags & DSF_DEFERRED_DELETE) ? " (pending)" : "", - (bool)(ds->dq_atomic_flags & DSF_CANCELED)); -} - -static size_t -_dispatch_timer_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) -{ - dispatch_source_refs_t dr = ds->ds_refs; - return dsnprintf(buf, bufsiz, "timer = { target = 0x%llx, deadline = 0x%llx" - ", last_fire = 0x%llx, interval = 0x%llx, flags = 0x%lx }, ", - (unsigned long long)ds_timer(dr).target, - (unsigned long long)ds_timer(dr).deadline, - (unsigned long long)ds_timer(dr).last_fire, - (unsigned long long)ds_timer(dr).interval, ds_timer(dr).flags); -} - -size_t -_dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz) -{ - size_t offset = 0; - offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dx_kind(ds), ds); - offset += _dispatch_object_debug_attr(ds, &buf[offset], bufsiz - offset); - offset += _dispatch_source_debug_attr(ds, &buf[offset], bufsiz - offset); - if (ds->ds_is_timer) { - offset += _dispatch_timer_debug_attr(ds, &buf[offset], bufsiz - offset); - } - const char *filter; - if (!ds->ds_dkev) { - filter = "????"; - } else if (ds->ds_is_custom_source) { - filter = _evfiltstr((int16_t)(uintptr_t)ds->ds_dkev); - } else { - filter = _evfiltstr(ds->ds_dkev->dk_kevent.filter); - } - offset += dsnprintf(&buf[offset], bufsiz - offset, "kevent = %p%s, " - "filter = %s }", ds->ds_dkev, ds->ds_is_direct_kevent ? " (direct)" - : "", filter); - return offset; -} - -#if HAVE_MACH -static size_t -_dispatch_mach_debug_attr(dispatch_mach_t dm, char* buf, size_t bufsiz) -{ - dispatch_queue_t target = dm->do_targetq; - return dsnprintf(buf, bufsiz, "target = %s[%p], receive = 0x%x, " - "send = 0x%x, send-possible = 0x%x%s, checkin = 0x%x%s, " - "send state = %016llx, disconnected = %d, canceled = %d ", - target && target->dq_label ? target->dq_label : "", target, - dm->ds_dkev ?(mach_port_t)dm->ds_dkev->dk_kevent.ident:0, - dm->dm_refs->dm_send, - dm->dm_dkev ?(mach_port_t)dm->dm_dkev->dk_kevent.ident:0, - dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev) ? - " (armed)" : "", dm->dm_refs->dm_checkin_port, - dm->dm_refs->dm_checkin ? " (pending)" : "", - dm->dm_refs->dm_state, dm->dm_refs->dm_disconnect_cnt, - (bool)(dm->dq_atomic_flags & DSF_CANCELED)); -} - -size_t -_dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz) -{ - size_t offset = 0; - offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dm->dq_label && !dm->dm_cancel_handler_called ? dm->dq_label : - dx_kind(dm), dm); - offset += _dispatch_object_debug_attr(dm, &buf[offset], bufsiz - offset); - offset += _dispatch_mach_debug_attr(dm, &buf[offset], bufsiz - offset); - offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); - return offset; -} -#endif // HAVE_MACH - -#if DISPATCH_DEBUG -DISPATCH_NOINLINE -static void -dispatch_kevent_debug(const char *verb, const _dispatch_kevent_qos_s *kev, - int i, int n, const char *function, unsigned int line) -{ - char flagstr[256]; - char i_n[31]; - - if (n > 1) { - snprintf(i_n, sizeof(i_n), "%d/%d ", i + 1, n); - } else { - i_n[0] = '\0'; - } -#if DISPATCH_USE_KEVENT_QOS - _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, " - "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, " - "qos = 0x%x, ext[0] = 0x%llx, ext[1] = 0x%llx, ext[2] = 0x%llx, " - "ext[3] = 0x%llx }: %s #%u", verb, kev, i_n, kev->ident, - _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr, - sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata, - kev->qos, kev->ext[0], kev->ext[1], kev->ext[2], kev->ext[3], - function, line); -#else - _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, " - "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, " - "ext[0] = 0x%llx, ext[1] = 0x%llx }: %s #%u", verb, kev, i_n, - kev->ident, _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr, - sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata, -#ifndef IGNORE_KEVENT64_EXT - kev->ext[0], kev->ext[1], -#else - 0ull, 0ull, -#endif - function, line); -#endif -} - -static void -_dispatch_kevent_debugger2(void *context) -{ - struct sockaddr sa; - socklen_t sa_len = sizeof(sa); - int c, fd = (int)(long)context; - unsigned int i; - dispatch_kevent_t dk; - dispatch_source_t ds; - dispatch_source_refs_t dr; - FILE *debug_stream; - - c = accept(fd, &sa, &sa_len); - if (c == -1) { - if (errno != EAGAIN) { - (void)dispatch_assume_zero(errno); - } - return; - } -#if 0 - int r = fcntl(c, F_SETFL, 0); // disable non-blocking IO - if (r == -1) { - (void)dispatch_assume_zero(errno); - } -#endif - debug_stream = fdopen(c, "a"); - if (!dispatch_assume(debug_stream)) { - close(c); - return; - } - - fprintf(debug_stream, "HTTP/1.0 200 OK\r\n"); - fprintf(debug_stream, "Content-type: text/html\r\n"); - fprintf(debug_stream, "Pragma: nocache\r\n"); - fprintf(debug_stream, "\r\n"); - fprintf(debug_stream, "\n"); - fprintf(debug_stream, "PID %u\n", getpid()); - fprintf(debug_stream, "\n
    \n"); - - for (i = 0; i < DSL_HASH_SIZE; i++) { - if (TAILQ_EMPTY(&_dispatch_sources[i])) { - continue; - } - TAILQ_FOREACH(dk, &_dispatch_sources[i], dk_list) { - fprintf(debug_stream, "\t
  • DK %p ident %lu filter %s flags " - "0x%hx fflags 0x%x data 0x%lx udata %p\n", - dk, (unsigned long)dk->dk_kevent.ident, - _evfiltstr(dk->dk_kevent.filter), dk->dk_kevent.flags, - dk->dk_kevent.fflags, (unsigned long)dk->dk_kevent.data, - (void*)dk->dk_kevent.udata); - fprintf(debug_stream, "\t\t
      \n"); - TAILQ_FOREACH(dr, &dk->dk_sources, dr_list) { - ds = _dispatch_source_from_refs(dr); - fprintf(debug_stream, "\t\t\t
    • DS %p refcnt 0x%x state " - "0x%llx data 0x%lx mask 0x%lx flags 0x%x
    • \n", - ds, ds->do_ref_cnt + 1, ds->dq_state, - ds->ds_pending_data, ds->ds_pending_data_mask, - ds->dq_atomic_flags); - if (_dq_state_is_enqueued(ds->dq_state)) { - dispatch_queue_t dq = ds->do_targetq; - fprintf(debug_stream, "\t\t
      DQ: %p refcnt 0x%x state " - "0x%llx label: %s\n", dq, dq->do_ref_cnt + 1, - dq->dq_state, dq->dq_label ?: ""); - } - } - fprintf(debug_stream, "\t\t
    \n"); - fprintf(debug_stream, "\t
  • \n"); - } - } - fprintf(debug_stream, "
\n\n\n"); - fflush(debug_stream); - fclose(debug_stream); -} - -static void -_dispatch_kevent_debugger2_cancel(void *context) -{ - int ret, fd = (int)(long)context; - - ret = close(fd); - if (ret != -1) { - (void)dispatch_assume_zero(errno); - } -} - -static void -_dispatch_kevent_debugger(void *context DISPATCH_UNUSED) -{ - union { - struct sockaddr_in sa_in; - struct sockaddr sa; - } sa_u = { - .sa_in = { - .sin_family = AF_INET, - .sin_addr = { htonl(INADDR_LOOPBACK), }, - }, - }; - dispatch_source_t ds; - const char *valstr; - int val, r, fd, sock_opt = 1; - socklen_t slen = sizeof(sa_u); - -#ifndef __linux__ - if (issetugid()) { - return; - } -#endif - valstr = getenv("LIBDISPATCH_DEBUGGER"); - if (!valstr) { - return; - } - val = atoi(valstr); - if (val == 2) { - sa_u.sa_in.sin_addr.s_addr = 0; - } - fd = socket(PF_INET, SOCK_STREAM, 0); - if (fd == -1) { - (void)dispatch_assume_zero(errno); - return; - } - r = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (void *)&sock_opt, - (socklen_t) sizeof sock_opt); - if (r == -1) { - (void)dispatch_assume_zero(errno); - goto out_bad; - } -#if 0 - r = fcntl(fd, F_SETFL, O_NONBLOCK); - if (r == -1) { - (void)dispatch_assume_zero(errno); - goto out_bad; - } -#endif - r = bind(fd, &sa_u.sa, sizeof(sa_u)); - if (r == -1) { - (void)dispatch_assume_zero(errno); - goto out_bad; - } - r = listen(fd, SOMAXCONN); - if (r == -1) { - (void)dispatch_assume_zero(errno); - goto out_bad; - } - r = getsockname(fd, &sa_u.sa, &slen); - if (r == -1) { - (void)dispatch_assume_zero(errno); - goto out_bad; - } - - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)fd, 0, - &_dispatch_mgr_q); - if (dispatch_assume(ds)) { - _dispatch_log("LIBDISPATCH: debug port: %hu", - (in_port_t)ntohs(sa_u.sa_in.sin_port)); - - /* ownership of fd transfers to ds */ - dispatch_set_context(ds, (void *)(long)fd); - dispatch_source_set_event_handler_f(ds, _dispatch_kevent_debugger2); - dispatch_source_set_cancel_handler_f(ds, - _dispatch_kevent_debugger2_cancel); - dispatch_resume(ds); - - return; - } -out_bad: - close(fd); -} - -#if HAVE_MACH - -#ifndef MACH_PORT_TYPE_SPREQUEST -#define MACH_PORT_TYPE_SPREQUEST 0x40000000 -#endif - -DISPATCH_NOINLINE -void -dispatch_debug_machport(mach_port_t name, const char* str) -{ - mach_port_type_t type; - mach_msg_bits_t ns = 0, nr = 0, nso = 0, nd = 0; - unsigned int dnreqs = 0, dnrsiz; - kern_return_t kr = mach_port_type(mach_task_self(), name, &type); - if (kr) { - _dispatch_log("machport[0x%08x] = { error(0x%x) \"%s\" }: %s", name, - kr, mach_error_string(kr), str); - return; - } - if (type & MACH_PORT_TYPE_SEND) { - (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, - MACH_PORT_RIGHT_SEND, &ns)); - } - if (type & MACH_PORT_TYPE_SEND_ONCE) { - (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, - MACH_PORT_RIGHT_SEND_ONCE, &nso)); - } - if (type & MACH_PORT_TYPE_DEAD_NAME) { - (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, - MACH_PORT_RIGHT_DEAD_NAME, &nd)); - } - if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND)) { - kr = mach_port_dnrequest_info(mach_task_self(), name, &dnrsiz, &dnreqs); - if (kr != KERN_INVALID_RIGHT) (void)dispatch_assume_zero(kr); - } - if (type & MACH_PORT_TYPE_RECEIVE) { - mach_port_status_t status = { .mps_pset = 0, }; - mach_msg_type_number_t cnt = MACH_PORT_RECEIVE_STATUS_COUNT; - (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, - MACH_PORT_RIGHT_RECEIVE, &nr)); - (void)dispatch_assume_zero(mach_port_get_attributes(mach_task_self(), - name, MACH_PORT_RECEIVE_STATUS, (void*)&status, &cnt)); - _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) " - "dnreqs(%03u) spreq(%s) nsreq(%s) pdreq(%s) srights(%s) " - "sorights(%03u) qlim(%03u) msgcount(%03u) mkscount(%03u) " - "seqno(%03u) }: %s", name, nr, ns, nso, nd, dnreqs, - type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N", - status.mps_nsrequest ? "Y":"N", status.mps_pdrequest ? "Y":"N", - status.mps_srights ? "Y":"N", status.mps_sorights, - status.mps_qlimit, status.mps_msgcount, status.mps_mscount, - status.mps_seqno, str); - } else if (type & (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_SEND_ONCE| - MACH_PORT_TYPE_DEAD_NAME)) { - _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) " - "dnreqs(%03u) spreq(%s) }: %s", name, nr, ns, nso, nd, dnreqs, - type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N", str); - } else { - _dispatch_log("machport[0x%08x] = { type(0x%08x) }: %s", name, type, - str); - } -} - -#endif // HAVE_MACH - -#endif // DISPATCH_DEBUG diff --git a/src/source_internal.h b/src/source_internal.h index 41b6d11a0..208227456 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -32,168 +32,45 @@ #include // for HeaderDoc #endif -#define DISPATCH_EVFILT_TIMER (-EVFILT_SYSCOUNT - 1) -#define DISPATCH_EVFILT_CUSTOM_ADD (-EVFILT_SYSCOUNT - 2) -#define DISPATCH_EVFILT_CUSTOM_OR (-EVFILT_SYSCOUNT - 3) -#define DISPATCH_EVFILT_MACH_NOTIFICATION (-EVFILT_SYSCOUNT - 4) -#define DISPATCH_EVFILT_SYSCOUNT ( EVFILT_SYSCOUNT + 4) - -#if HAVE_MACH -// NOTE: dispatch_source_mach_send_flags_t and dispatch_source_mach_recv_flags_t -// bit values must not overlap as they share the same kevent fflags ! - -/*! - * @enum dispatch_source_mach_send_flags_t - * - * @constant DISPATCH_MACH_SEND_DELETED - * Port-deleted notification. Disabled for source registration. - */ -enum { - DISPATCH_MACH_SEND_DELETED = 0x4, -}; -/*! - * @enum dispatch_source_mach_recv_flags_t - * - * @constant DISPATCH_MACH_RECV_MESSAGE - * Receive right has pending messages - * - * @constant DISPATCH_MACH_RECV_MESSAGE_DIRECT - * Receive messages from receive right directly via kevent64() - * - * @constant DISPATCH_MACH_RECV_NO_SENDERS - * Receive right has no more senders. TODO - */ -enum { - DISPATCH_MACH_RECV_MESSAGE = 0x2, - DISPATCH_MACH_RECV_MESSAGE_DIRECT = 0x10, - DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE = 0x20, - DISPATCH_MACH_RECV_NO_SENDERS = 0x40, -}; -#endif // HAVE_MACH - enum { /* DISPATCH_TIMER_STRICT 0x1 */ /* DISPATCH_TIMER_BACKGROUND = 0x2, */ - DISPATCH_TIMER_WALL_CLOCK = 0x4, + DISPATCH_TIMER_CLOCK_MACH = 0x4, DISPATCH_TIMER_INTERVAL = 0x8, - DISPATCH_TIMER_WITH_AGGREGATE = 0x10, + DISPATCH_TIMER_AFTER = 0x10, /* DISPATCH_INTERVAL_UI_ANIMATION = 0x20 */ - DISPATCH_TIMER_AFTER = 0x40, }; -#define DISPATCH_TIMER_QOS_NORMAL 0u -#define DISPATCH_TIMER_QOS_CRITICAL 1u -#define DISPATCH_TIMER_QOS_BACKGROUND 2u -#define DISPATCH_TIMER_QOS_COUNT (DISPATCH_TIMER_QOS_BACKGROUND + 1) -#define DISPATCH_TIMER_QOS(tidx) (((uintptr_t)(tidx) >> 1) & 0x3ul) - -#define DISPATCH_TIMER_KIND_WALL 0u -#define DISPATCH_TIMER_KIND_MACH 1u -#define DISPATCH_TIMER_KIND_COUNT (DISPATCH_TIMER_KIND_MACH + 1) -#define DISPATCH_TIMER_KIND(tidx) ((uintptr_t)(tidx) & 0x1ul) - -#define DISPATCH_TIMER_INDEX(kind, qos) ((qos) << 1 | (kind)) -#define DISPATCH_TIMER_INDEX_DISARM \ - DISPATCH_TIMER_INDEX(0, DISPATCH_TIMER_QOS_COUNT) -#define DISPATCH_TIMER_INDEX_COUNT (DISPATCH_TIMER_INDEX_DISARM + 1) -#define DISPATCH_TIMER_IDENT(flags) ({ unsigned long f = (flags); \ - DISPATCH_TIMER_INDEX(f & DISPATCH_TIMER_WALL_CLOCK ? \ - DISPATCH_TIMER_KIND_WALL : DISPATCH_TIMER_KIND_MACH, \ - f & DISPATCH_TIMER_STRICT ? DISPATCH_TIMER_QOS_CRITICAL : \ - f & DISPATCH_TIMER_BACKGROUND ? DISPATCH_TIMER_QOS_BACKGROUND : \ - DISPATCH_TIMER_QOS_NORMAL); }) - -struct dispatch_kevent_s { - TAILQ_ENTRY(dispatch_kevent_s) dk_list; - TAILQ_HEAD(, dispatch_source_refs_s) dk_sources; - _dispatch_kevent_qos_s dk_kevent; -}; - -typedef struct dispatch_kevent_s *dispatch_kevent_t; - -typedef typeof(((dispatch_kevent_t)NULL)->dk_kevent.udata) _dispatch_kevent_qos_udata_t; - -#define DISPATCH_KEV_CUSTOM_ADD ((dispatch_kevent_t)DISPATCH_EVFILT_CUSTOM_ADD) -#define DISPATCH_KEV_CUSTOM_OR ((dispatch_kevent_t)DISPATCH_EVFILT_CUSTOM_OR) - -struct dispatch_source_type_s { - _dispatch_kevent_qos_s ke; - uint64_t mask; - void (*init)(dispatch_source_t ds, dispatch_source_type_t type, - uintptr_t handle, unsigned long mask, dispatch_queue_t q); -}; - -struct dispatch_timer_source_s { - uint64_t target; - uint64_t deadline; - uint64_t last_fire; - uint64_t interval; - uint64_t leeway; - unsigned long flags; // dispatch_timer_flags_t - unsigned long missed; -}; - -enum { - DS_EVENT_HANDLER = 0, - DS_CANCEL_HANDLER, - DS_REGISTN_HANDLER, -}; - -// Source state which may contain references to the source object -// Separately allocated so that 'leaks' can see sources -typedef struct dispatch_source_refs_s { - TAILQ_ENTRY(dispatch_source_refs_s) dr_list; - uintptr_t dr_source_wref; // "weak" backref to dispatch_source_t - dispatch_continuation_t volatile ds_handler[3]; -} *dispatch_source_refs_t; - -typedef struct dispatch_timer_source_refs_s { - struct dispatch_source_refs_s _ds_refs; - struct dispatch_timer_source_s _ds_timer; - TAILQ_ENTRY(dispatch_timer_source_refs_s) dt_list; -} *dispatch_timer_source_refs_t; - -typedef struct dispatch_timer_source_aggregate_refs_s { - struct dispatch_timer_source_refs_s _dsa_refs; - TAILQ_ENTRY(dispatch_timer_source_aggregate_refs_s) dra_list; - TAILQ_ENTRY(dispatch_timer_source_aggregate_refs_s) dta_list; -} *dispatch_timer_source_aggregate_refs_t; - -#define _dispatch_ptr2wref(ptr) (~(uintptr_t)(ptr)) -#define _dispatch_wref2ptr(ref) ((void*)~(ref)) -#define _dispatch_source_from_refs(dr) \ - ((dispatch_source_t)_dispatch_wref2ptr((dr)->dr_source_wref)) -#define ds_timer(dr) \ - (((dispatch_timer_source_refs_t)(dr))->_ds_timer) -#define ds_timer_aggregate(ds) \ - ((dispatch_timer_aggregate_t)((ds)->dq_specific_q)) - DISPATCH_ALWAYS_INLINE static inline unsigned int -_dispatch_source_timer_idx(dispatch_source_refs_t dr) +_dispatch_source_timer_idx(dispatch_unote_t du) { - return DISPATCH_TIMER_IDENT(ds_timer(dr).flags); + uint32_t clock, qos = 0, fflags = du._dt->du_fflags; + + dispatch_assert(DISPATCH_CLOCK_MACH == 1); + dispatch_assert(DISPATCH_CLOCK_WALL == 0); + clock = (fflags & DISPATCH_TIMER_CLOCK_MACH) / DISPATCH_TIMER_CLOCK_MACH; + +#if DISPATCH_HAVE_TIMER_QOS + dispatch_assert(DISPATCH_TIMER_STRICT == DISPATCH_TIMER_QOS_CRITICAL); + dispatch_assert(DISPATCH_TIMER_BACKGROUND == DISPATCH_TIMER_QOS_BACKGROUND); + qos = fflags & (DISPATCH_TIMER_STRICT | DISPATCH_TIMER_BACKGROUND); + // flags are normalized so this should never happen + dispatch_assert(qos < DISPATCH_TIMER_QOS_COUNT); +#endif + + return DISPATCH_TIMER_INDEX(clock, qos); } #define _DISPATCH_SOURCE_HEADER(refs) \ DISPATCH_QUEUE_HEADER(refs); \ - /* LP64: fills 32bit hole in QUEUE_HEADER */ \ unsigned int \ - ds_is_level:1, \ - ds_is_adder:1, \ ds_is_installed:1, \ - ds_is_direct_kevent:1, \ - ds_is_custom_source:1, \ - ds_needs_rearm:1, \ - ds_is_timer:1, \ - ds_vmpressure_override:1, \ - ds_memorypressure_override:1, \ - dm_handler_is_block:1, \ + dm_needs_mgr:1, \ dm_connect_handler_called:1, \ - dm_cancel_handler_called:1; \ - dispatch_kevent_t ds_dkev; \ - dispatch_##refs##_refs_t ds_refs; \ - unsigned long ds_pending_data_mask; + dm_uninstalled:1, \ + dm_cancel_handler_called:1, \ + dm_is_xpc:1 #define DISPATCH_SOURCE_HEADER(refs) \ struct dispatch_source_s _as_ds[0]; \ @@ -202,150 +79,51 @@ _dispatch_source_timer_idx(dispatch_source_refs_t dr) DISPATCH_CLASS_DECL_BARE(source); _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(dispatch_source, dispatch_object); -#if DISPATCH_PURE_C +#ifndef __cplusplus struct dispatch_source_s { _DISPATCH_SOURCE_HEADER(source); - unsigned long ds_ident_hack; - unsigned long ds_data; - unsigned long ds_pending_data; -} DISPATCH_QUEUE_ALIGN; -#endif - -#if HAVE_MACH -// Mach channel state which may contain references to the channel object -// layout must match dispatch_source_refs_s -struct dispatch_mach_refs_s { - TAILQ_ENTRY(dispatch_mach_refs_s) dr_list; - uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t - dispatch_mach_handler_function_t dm_handler_func; - void *dm_handler_ctxt; -}; -typedef struct dispatch_mach_refs_s *dispatch_mach_refs_t; - -struct dispatch_mach_reply_refs_s { - TAILQ_ENTRY(dispatch_mach_reply_refs_s) dr_list; - uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t - dispatch_kevent_t dmr_dkev; - void *dmr_ctxt; - mach_port_t dmr_reply; - dispatch_priority_t dmr_priority; - voucher_t dmr_voucher; - TAILQ_ENTRY(dispatch_mach_reply_refs_s) dmr_list; -}; -typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t; - -#define _DISPATCH_MACH_STATE_UNUSED_MASK_2 0xff00000000000000ull -#define DISPATCH_MACH_STATE_OVERRIDE_MASK 0x00ffff0000000000ull -#define _DISPATCH_MACH_STATE_UNUSED_MASK_1 0x000000f000000000ull -#define DISPATCH_MACH_STATE_DIRTY 0x0000000800000000ull -#define DISPATCH_MACH_STATE_RECEIVED_OVERRIDE 0x0000000400000000ull -#define _DISPATCH_MACH_STATE_UNUSED_MASK_0 0x0000000200000000ull -#define DISPATCH_MACH_STATE_PENDING_BARRIER 0x0000000100000000ull -#define DISPATCH_MACH_STATE_UNLOCK_MASK 0x00000000ffffffffull + uint64_t ds_data DISPATCH_ATOMIC64_ALIGN; + uint64_t ds_pending_data DISPATCH_ATOMIC64_ALIGN; +} DISPATCH_ATOMIC64_ALIGN; -struct dispatch_mach_send_refs_s { - TAILQ_ENTRY(dispatch_mach_send_refs_s) dr_list; - uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t - dispatch_mach_msg_t dm_checkin; - TAILQ_HEAD(, dispatch_mach_reply_refs_s) dm_replies; - dispatch_unfair_lock_s dm_replies_lock; -#define DISPATCH_MACH_DISCONNECT_MAGIC_BASE (0x80000000) -#define DISPATCH_MACH_NEVER_INSTALLED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 0) -#define DISPATCH_MACH_NEVER_CONNECTED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 1) - uint32_t volatile dm_disconnect_cnt; - union { - uint64_t volatile dm_state; - DISPATCH_STRUCT_LITTLE_ENDIAN_2( - dispatch_unfair_lock_s dm_state_lock, - uint32_t dm_state_bits - ); - }; - unsigned int dm_needs_mgr:1; - struct dispatch_object_s *volatile dm_tail; - struct dispatch_object_s *volatile dm_head; - mach_port_t dm_send, dm_checkin_port; -}; -typedef struct dispatch_mach_send_refs_s *dispatch_mach_send_refs_t; +// Extracts source data from the ds_data field +#define DISPATCH_SOURCE_GET_DATA(d) ((d) & 0xFFFFFFFF) -DISPATCH_CLASS_DECL(mach); -#if DISPATCH_PURE_C -struct dispatch_mach_s { - DISPATCH_SOURCE_HEADER(mach); - dispatch_kevent_t dm_dkev; - dispatch_mach_send_refs_t dm_refs; -} DISPATCH_QUEUE_ALIGN; -#endif +// Extracts status from the ds_data field +#define DISPATCH_SOURCE_GET_STATUS(d) ((d) >> 32) -DISPATCH_CLASS_DECL(mach_msg); -struct dispatch_mach_msg_s { - DISPATCH_OBJECT_HEADER(mach_msg); - union { - mach_msg_option_t dmsg_options; - mach_error_t dmsg_error; - }; - mach_port_t dmsg_reply; - pthread_priority_t dmsg_priority; - voucher_t dmsg_voucher; - dispatch_mach_msg_destructor_t dmsg_destructor; - size_t dmsg_size; - union { - mach_msg_header_t *dmsg_msg; - char dmsg_buf[0]; - }; -}; -#endif // HAVE_MACH +// Combine data and status for the ds_data field +#define DISPATCH_SOURCE_COMBINE_DATA_AND_STATUS(data, status) \ + ((((uint64_t)(status)) << 32) | (data)) -extern const struct dispatch_source_type_s _dispatch_source_type_after; +#endif // __cplusplus -#if TARGET_OS_EMBEDDED -#define DSL_HASH_SIZE 64u // must be a power of two -#else -#define DSL_HASH_SIZE 256u // must be a power of two -#endif - -dispatch_source_t -_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp, - const struct dispatch_continuation_s *dc); +dispatch_priority_t +_dispatch_source_compute_kevent_priority(dispatch_source_t ds); +void _dispatch_source_refs_register(dispatch_source_t ds, dispatch_priority_t bp); +void _dispatch_source_refs_unregister(dispatch_source_t ds, uint32_t options); void _dispatch_source_xref_dispose(dispatch_source_t ds); void _dispatch_source_dispose(dispatch_source_t ds); void _dispatch_source_finalize_activation(dispatch_source_t ds); -void _dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_flags_t flags); -void _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp, +void _dispatch_source_invoke(dispatch_source_t ds, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +void _dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); +void _dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, + uintptr_t data, uintptr_t status, pthread_priority_t pp); size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz); -void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval); -void _dispatch_source_set_event_handler_continuation(dispatch_source_t ds, - dispatch_continuation_t dc); + DISPATCH_EXPORT // for firehose server void _dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp, unsigned long val); -#if HAVE_MACH -void _dispatch_mach_dispose(dispatch_mach_t dm); -void _dispatch_mach_finalize_activation(dispatch_mach_t dm); -void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags); -void _dispatch_mach_wakeup(dispatch_mach_t dm, pthread_priority_t pp, +void _dispatch_mgr_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); -size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz); - -void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg); -void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, +void _dispatch_mgr_thread(dispatch_queue_t dq, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); -size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, - size_t bufsiz); - -void _dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags); -void _dispatch_mach_barrier_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags); -#endif // HAVE_MACH - -void _dispatch_mgr_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, - dispatch_wakeup_flags_t flags); -void _dispatch_mgr_thread(dispatch_queue_t dq, dispatch_invoke_flags_t flags); #if DISPATCH_USE_KEVENT_WORKQUEUE -void _dispatch_kevent_worker_thread(_dispatch_kevent_qos_s **events, +void _dispatch_kevent_worker_thread(dispatch_kevent_t *events, int *nevents); -#endif +#endif // DISPATCH_USE_KEVENT_WORKQUEUE #endif /* __DISPATCH_SOURCE_INTERNAL__ */ diff --git a/src/swift/Block.swift b/src/swift/Block.swift index adddc530f..d4cae3c60 100644 --- a/src/swift/Block.swift +++ b/src/swift/Block.swift @@ -66,10 +66,10 @@ public class DispatchWorkItem { } public func notify( - qos: DispatchQoS = .unspecified, - flags: DispatchWorkItemFlags = [], - queue: DispatchQueue, - execute: @escaping @convention(block) () -> ()) + qos: DispatchQoS = .unspecified, + flags: DispatchWorkItemFlags = [], + queue: DispatchQueue, + execute: @escaping @convention(block) () -> ()) { if qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: execute) diff --git a/src/swift/Data.swift b/src/swift/Data.swift index 72f53d619..e0b7263c9 100644 --- a/src/swift/Data.swift +++ b/src/swift/Data.swift @@ -92,7 +92,7 @@ public struct DispatchData : RandomAccessCollection { } public func enumerateBytes( - block: (_ buffer: UnsafeBufferPointer, _ byteIndex: Int, _ stop: inout Bool) -> Void) + block: (_ buffer: UnsafeBufferPointer, _ byteIndex: Int, _ stop: inout Bool) -> Void) { // we know that capturing block in the closure being created/passed to dispatch_data_apply // does not cause block to escape because dispatch_data_apply does not allow its diff --git a/src/swift/Private.swift b/src/swift/Private.swift index 3861b770b..df6a7b336 100644 --- a/src/swift/Private.swift +++ b/src/swift/Private.swift @@ -117,7 +117,7 @@ public func dispatch_group_async(_ group: DispatchGroup, _ queue: DispatchQueue, } @available(*, unavailable, renamed: "DispatchGroup.notify(self:qos:flags:queue:execute:)") -public func dispatch_group_notify(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: @escaping () -> Void) +public func dispatch_group_notify(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: @escaping () -> Void) { fatalError() } @@ -141,7 +141,7 @@ public func dispatch_io_set_interval(_ channel: DispatchIO, _ interval: UInt64, } @available(*, unavailable, renamed:"DispatchQueue.concurrentPerform(iterations:execute:)") -public func dispatch_apply(_ iterations: Int, _ queue: DispatchQueue, _ block: (Int) -> Void) +public func dispatch_apply(_ iterations: Int, _ queue: DispatchQueue, _ block: (Int) -> Void) { fatalError() } @@ -159,7 +159,7 @@ public func dispatch_get_global_queue(_ identifier: Int, _ flags: UInt) -> Dispa } @available(*, unavailable, renamed: "getter:DispatchQueue.main()") -public func dispatch_get_main_queue() -> DispatchQueue +public func dispatch_get_main_queue() -> DispatchQueue { fatalError() } diff --git a/src/swift/Queue.swift b/src/swift/Queue.swift index 155be8aff..b7628c9cf 100644 --- a/src/swift/Queue.swift +++ b/src/swift/Queue.swift @@ -87,7 +87,7 @@ public extension DispatchQueue { internal func _attr(attr: dispatch_queue_attr_t?) -> dispatch_queue_attr_t? { if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { switch self { - case .inherit: + case .inherit: // DISPATCH_AUTORELEASE_FREQUENCY_INHERIT return CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(0)) case .workItem: @@ -141,8 +141,8 @@ public extension DispatchQueue { target: DispatchQueue? = nil) { var attr = attributes._attr() - if autoreleaseFrequency != .inherit { - attr = autoreleaseFrequency._attr(attr: attr) + if autoreleaseFrequency != .inherit { + attr = autoreleaseFrequency._attr(attr: attr) } if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified { attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority)) @@ -176,10 +176,10 @@ public extension DispatchQueue { } public func async( - group: DispatchGroup? = nil, - qos: DispatchQoS = .unspecified, - flags: DispatchWorkItemFlags = [], - execute work: @escaping @convention(block) () -> Void) + group: DispatchGroup? = nil, + qos: DispatchQoS = .unspecified, + flags: DispatchWorkItemFlags = [], + execute work: @escaping @convention(block) () -> Void) { if group == nil && qos == .unspecified { // Fast-path route for the most common API usage @@ -210,8 +210,8 @@ public extension DispatchQueue { } private func _syncHelper( - fn: (() -> ()) -> (), - execute work: () throws -> T, + fn: (() -> ()) -> (), + execute work: () throws -> T, rescue: ((Swift.Error) throws -> (T))) rethrows -> T { var result: T? @@ -232,7 +232,7 @@ public extension DispatchQueue { @available(OSX 10.10, iOS 8.0, *) private func _syncHelper( - fn: (DispatchWorkItem) -> (), + fn: (DispatchWorkItem) -> (), flags: DispatchWorkItemFlags, execute work: () throws -> T, rescue: @escaping ((Swift.Error) throws -> (T))) rethrows -> T @@ -243,7 +243,7 @@ public extension DispatchQueue { do { result = try work() } catch let e { - error = e + error = e } }) fn(workItem) @@ -269,10 +269,10 @@ public extension DispatchQueue { } public func asyncAfter( - deadline: DispatchTime, - qos: DispatchQoS = .unspecified, - flags: DispatchWorkItemFlags = [], - execute work: @escaping @convention(block) () -> Void) + deadline: DispatchTime, + qos: DispatchQoS = .unspecified, + flags: DispatchWorkItemFlags = [], + execute work: @escaping @convention(block) () -> Void) { if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: work) @@ -284,9 +284,9 @@ public extension DispatchQueue { public func asyncAfter( wallDeadline: DispatchWallTime, - qos: DispatchQoS = .unspecified, - flags: DispatchWorkItemFlags = [], - execute work: @escaping @convention(block) () -> Void) + qos: DispatchQoS = .unspecified, + flags: DispatchWorkItemFlags = [], + execute work: @escaping @convention(block) () -> Void) { if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: work) diff --git a/src/swift/Time.swift b/src/swift/Time.swift index af31f6c12..0b07742e6 100644 --- a/src/swift/Time.swift +++ b/src/swift/Time.swift @@ -26,7 +26,7 @@ public struct DispatchTime : Comparable { public static let distantFuture = DispatchTime(rawValue: ~0) - fileprivate init(rawValue: dispatch_time_t) { + fileprivate init(rawValue: dispatch_time_t) { self.rawValue = rawValue } diff --git a/src/time.c b/src/time.c index 6d008319b..6db48806a 100644 --- a/src/time.c +++ b/src/time.c @@ -20,28 +20,6 @@ #include "internal.h" -uint64_t -_dispatch_get_nanoseconds(void) -{ -#if !TARGET_OS_WIN32 - struct timeval now; - int r = gettimeofday(&now, NULL); - dispatch_assert_zero(r); - dispatch_assert(sizeof(NSEC_PER_SEC) == 8); - dispatch_assert(sizeof(NSEC_PER_USEC) == 8); - return (uint64_t)now.tv_sec * NSEC_PER_SEC + - (uint64_t)now.tv_usec * NSEC_PER_USEC; -#else /* TARGET_OS_WIN32 */ - // FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC). - FILETIME ft; - ULARGE_INTEGER li; - GetSystemTimeAsFileTime(&ft); - li.LowPart = ft.dwLowDateTime; - li.HighPart = ft.dwHighDateTime; - return li.QuadPart * 100ull; -#endif /* TARGET_OS_WIN32 */ -} - #if !(defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME) \ || TARGET_OS_WIN32 DISPATCH_CACHELINE_ALIGN _dispatch_host_time_data_s _dispatch_host_time_data = { @@ -115,7 +93,7 @@ dispatch_walltime(const struct timespec *inval, int64_t delta) { int64_t nsec; if (inval) { - nsec = inval->tv_sec * 1000000000ll + inval->tv_nsec; + nsec = (int64_t)_dispatch_timespec_to_nano(*inval); } else { nsec = (int64_t)_dispatch_get_nanoseconds(); } diff --git a/src/trace.h b/src/trace.h index d73ff3fb3..35722043b 100644 --- a/src/trace.h +++ b/src/trace.h @@ -108,7 +108,7 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) } else { \ _dc = (void*)_do; \ _ctxt = _dc->dc_ctxt; \ - if (_dc->dc_flags & DISPATCH_OBJ_SYNC_SLOW_BIT) { \ + if (_dc->dc_flags & DISPATCH_OBJ_SYNC_WAITER_BIT) { \ _kind = "semaphore"; \ _func = (dispatch_function_t)dispatch_semaphore_signal; \ } else if (_dc->dc_flags & DISPATCH_OBJ_BLOCK_BIT) { \ @@ -131,8 +131,8 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) #if DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, - dispatch_object_t _tail, pthread_priority_t pp, unsigned int n) +_dispatch_trace_root_queue_push_list(dispatch_queue_t dq, + dispatch_object_t _head, dispatch_object_t _tail, unsigned int n) { if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { struct dispatch_object_s *dou = _head._do; @@ -141,20 +141,20 @@ _dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, } while (dou != _tail._do && (dou = dou->do_next)); } _dispatch_introspection_queue_push_list(dq, _head, _tail); - _dispatch_queue_push_list(dq, _head, _tail, pp, n); + _dispatch_root_queue_push_inline(dq, _head, _tail, n); } DISPATCH_ALWAYS_INLINE static inline void _dispatch_trace_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail, - pthread_priority_t pp, dispatch_wakeup_flags_t flags) + dispatch_qos_t qos) { if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { struct dispatch_object_s *dou = _tail._do; _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); } _dispatch_introspection_queue_push(dq, _tail); - _dispatch_queue_push_inline(dq, _tail, pp, flags); + _dispatch_queue_push_inline(dq, _tail, qos); } DISPATCH_ALWAYS_INLINE @@ -168,7 +168,7 @@ _dispatch_trace_continuation_push(dispatch_queue_t dq, dispatch_object_t _tail) _dispatch_introspection_queue_push(dq, _tail); } -#define _dispatch_queue_push_list _dispatch_trace_queue_push_list +#define _dispatch_root_queue_push_inline _dispatch_trace_root_queue_push_list #define _dispatch_queue_push_inline _dispatch_trace_queue_push_inline DISPATCH_ALWAYS_INLINE @@ -189,7 +189,7 @@ _dispatch_trace_continuation_pop(dispatch_queue_t dq, dispatch_object_t dou) #if DISPATCH_USE_DTRACE static inline dispatch_function_t -_dispatch_trace_timer_function(dispatch_source_refs_t dr) +_dispatch_trace_timer_function(dispatch_timer_source_refs_t dr) { dispatch_continuation_t dc; dc = os_atomic_load(&dr->ds_handler[DS_EVENT_HANDLER], relaxed); @@ -198,12 +198,12 @@ _dispatch_trace_timer_function(dispatch_source_refs_t dr) DISPATCH_ALWAYS_INLINE static inline dispatch_trace_timer_params_t -_dispatch_trace_timer_params(uintptr_t ident, +_dispatch_trace_timer_params(dispatch_clock_t clock, struct dispatch_timer_source_s *values, uint64_t deadline, dispatch_trace_timer_params_t params) { - #define _dispatch_trace_time2nano3(t) (DISPATCH_TIMER_KIND(ident) \ - == DISPATCH_TIMER_KIND_MACH ? _dispatch_time_mach2nano(t) : (t)) + #define _dispatch_trace_time2nano3(t) \ + (clock == DISPATCH_CLOCK_MACH ? _dispatch_time_mach2nano(t) : (t)) #define _dispatch_trace_time2nano2(v, t) ({ uint64_t _t = (t); \ (v) >= INT64_MAX ? -1ll : (int64_t)_dispatch_trace_time2nano3(_t);}) #define _dispatch_trace_time2nano(v) ({ uint64_t _t; \ @@ -212,14 +212,13 @@ _dispatch_trace_timer_params(uintptr_t ident, if (deadline) { params->deadline = (int64_t)deadline; } else { - uint64_t now = (DISPATCH_TIMER_KIND(ident) == - DISPATCH_TIMER_KIND_MACH ? _dispatch_absolute_time() : - _dispatch_get_nanoseconds()); + uint64_t now = _dispatch_time_now(clock); params->deadline = _dispatch_trace_time2nano2(values->target, values->target < now ? 0 : values->target - now); } + uint64_t leeway = values->deadline - values->target; params->interval = _dispatch_trace_time2nano(values->interval); - params->leeway = _dispatch_trace_time2nano(values->leeway); + params->leeway = _dispatch_trace_time2nano(leeway); return params; } @@ -232,33 +231,34 @@ _dispatch_trace_timer_configure_enabled(void) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_timer_configure(dispatch_source_t ds, uintptr_t ident, +_dispatch_trace_timer_configure(dispatch_source_t ds, dispatch_clock_t clock, struct dispatch_timer_source_s *values) { + dispatch_timer_source_refs_t dr = ds->ds_timer_refs; struct dispatch_trace_timer_params_s params; - DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(ds->ds_refs), - _dispatch_trace_timer_params(ident, values, 0, - ¶ms)); + DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(dr), + _dispatch_trace_timer_params(clock, values, 0, ¶ms)); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_timer_program(dispatch_source_refs_t dr, uint64_t deadline) +_dispatch_trace_timer_program(dispatch_timer_source_refs_t dr, uint64_t deadline) { if (slowpath(DISPATCH_TIMER_PROGRAM_ENABLED())) { if (deadline && dr) { dispatch_source_t ds = _dispatch_source_from_refs(dr); + dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(dr->du_ident); struct dispatch_trace_timer_params_s params; DISPATCH_TIMER_PROGRAM(ds, _dispatch_trace_timer_function(dr), - _dispatch_trace_timer_params(ds->ds_ident_hack, - &ds_timer(dr), deadline, ¶ms)); + _dispatch_trace_timer_params(clock, &dr->dt_timer, + deadline, ¶ms)); } } } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_timer_wake(dispatch_source_refs_t dr) +_dispatch_trace_timer_wake(dispatch_timer_source_refs_t dr) { if (slowpath(DISPATCH_TIMER_WAKE_ENABLED())) { if (dr) { @@ -270,8 +270,8 @@ _dispatch_trace_timer_wake(dispatch_source_refs_t dr) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data, - unsigned long missed) +_dispatch_trace_timer_fire(dispatch_timer_source_refs_t dr, uint64_t data, + uint64_t missed) { if (slowpath(DISPATCH_TIMER_FIRE_ENABLED())) { if (!(data - missed) && dr) { @@ -284,8 +284,8 @@ _dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data, #else #define _dispatch_trace_timer_configure_enabled() false -#define _dispatch_trace_timer_configure(ds, ident, values) \ - do { (void)(ds); (void)(ident); (void)(values); } while(0) +#define _dispatch_trace_timer_configure(ds, clock, values) \ + do { (void)(ds); (void)(clock); (void)(values); } while(0) #define _dispatch_trace_timer_program(dr, deadline) \ do { (void)(dr); (void)(deadline); } while(0) #define _dispatch_trace_timer_wake(dr) \ diff --git a/src/voucher.c b/src/voucher.c index ee04e3b19..9f97b7a67 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -165,24 +165,69 @@ _voucher_thread_cleanup(void *voucher) _voucher_release(voucher); } +#pragma mark - +#pragma mark voucher_hash + DISPATCH_CACHELINE_ALIGN -static TAILQ_HEAD(, voucher_s) _vouchers[VL_HASH_SIZE]; -#define _vouchers_head(kv) (&_vouchers[VL_HASH((kv))]) -static dispatch_unfair_lock_s _vouchers_lock; -#define _vouchers_lock_lock() _dispatch_unfair_lock_lock(&_vouchers_lock) -#define _vouchers_lock_unlock() _dispatch_unfair_lock_unlock(&_vouchers_lock) +static voucher_hash_head_s _voucher_hash[VL_HASH_SIZE]; + +#define _voucher_hash_head(kv) (&_voucher_hash[VL_HASH((kv))]) +static dispatch_unfair_lock_s _voucher_hash_lock; +#define _voucher_hash_lock_lock() \ + _dispatch_unfair_lock_lock(&_voucher_hash_lock) +#define _voucher_hash_lock_unlock() \ + _dispatch_unfair_lock_unlock(&_voucher_hash_lock) + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_hash_head_init(voucher_hash_head_s *head) +{ + _voucher_hash_set_next(&head->vhh_first, VOUCHER_NULL); + _voucher_hash_set_prev_ptr(&head->vhh_last_ptr, &head->vhh_first); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_hash_enqueue(mach_voucher_t kv, voucher_t v) +{ + // same as TAILQ_INSERT_TAIL + voucher_hash_head_s *head = _voucher_hash_head(kv); + uintptr_t prev_ptr = head->vhh_last_ptr; + _voucher_hash_set_next(&v->v_list.vhe_next, VOUCHER_NULL); + v->v_list.vhe_prev_ptr = prev_ptr; + _voucher_hash_store_to_prev_ptr(prev_ptr, v); + _voucher_hash_set_prev_ptr(&head->vhh_last_ptr, &v->v_list.vhe_next); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_hash_remove(mach_voucher_t kv, voucher_t v) +{ + // same as TAILQ_REMOVE + voucher_hash_head_s *head = _voucher_hash_head(kv); + voucher_t next = _voucher_hash_get_next(v->v_list.vhe_next); + uintptr_t prev_ptr = v->v_list.vhe_prev_ptr; + if (next) { + next->v_list.vhe_prev_ptr = prev_ptr; + } else { + head->vhh_last_ptr = prev_ptr; + } + _voucher_hash_store_to_prev_ptr(prev_ptr, next); + _voucher_hash_mark_not_enqueued(v); +} static voucher_t _voucher_find_and_retain(mach_voucher_t kv) { - voucher_t v; if (!kv) return NULL; - _vouchers_lock_lock(); - TAILQ_FOREACH(v, _vouchers_head(kv), v_list) { + _voucher_hash_lock_lock(); + voucher_hash_head_s *head = _voucher_hash_head(kv); + voucher_t v = _voucher_hash_get_next(head->vhh_first); + while (v) { if (v->v_ipc_kvoucher == kv) { int xref_cnt = os_atomic_inc2o(v, os_obj_xref_cnt, relaxed); _dispatch_voucher_debug("retain -> %d", v, xref_cnt + 1); - if (slowpath(xref_cnt < 0)) { + if (unlikely(xref_cnt < 0)) { _dispatch_voucher_debug("over-release", v); _OS_OBJECT_CLIENT_CRASH("Voucher over-release"); } @@ -192,8 +237,9 @@ _voucher_find_and_retain(mach_voucher_t kv) } break; } + v = _voucher_hash_get_next(v->v_list.vhe_next); } - _vouchers_lock_unlock(); + _voucher_hash_lock_unlock(); return v; } @@ -202,35 +248,35 @@ _voucher_insert(voucher_t v) { mach_voucher_t kv = v->v_ipc_kvoucher; if (!kv) return; - _vouchers_lock_lock(); - if (slowpath(_TAILQ_IS_ENQUEUED(v, v_list))) { + _voucher_hash_lock_lock(); + if (unlikely(_voucher_hash_is_enqueued(v))) { _dispatch_voucher_debug("corruption", v); - DISPATCH_CLIENT_CRASH(v->v_list.tqe_prev, "Voucher corruption"); + DISPATCH_CLIENT_CRASH(0, "Voucher corruption"); } - TAILQ_INSERT_TAIL(_vouchers_head(kv), v, v_list); - _vouchers_lock_unlock(); + _voucher_hash_enqueue(kv, v); + _voucher_hash_lock_unlock(); } static void _voucher_remove(voucher_t v) { mach_voucher_t kv = v->v_ipc_kvoucher; - if (!_TAILQ_IS_ENQUEUED(v, v_list)) return; - _vouchers_lock_lock(); - if (slowpath(!kv)) { + if (!_voucher_hash_is_enqueued(v)) return; + _voucher_hash_lock_lock(); + if (unlikely(!kv)) { _dispatch_voucher_debug("corruption", v); DISPATCH_CLIENT_CRASH(0, "Voucher corruption"); } // check for resurrection race with _voucher_find_and_retain - if (os_atomic_load2o(v, os_obj_xref_cnt, ordered) < 0 && - _TAILQ_IS_ENQUEUED(v, v_list)) { - TAILQ_REMOVE(_vouchers_head(kv), v, v_list); - _TAILQ_MARK_NOT_ENQUEUED(v, v_list); - v->v_list.tqe_next = (void*)~0ull; + if (os_atomic_load2o(v, os_obj_xref_cnt, ordered) < 0) { + if (_voucher_hash_is_enqueued(v)) _voucher_hash_remove(kv, v); } - _vouchers_lock_unlock(); + _voucher_hash_lock_unlock(); } +#pragma mark - +#pragma mark mach_voucher_t + void _voucher_dealloc_mach_voucher(mach_voucher_t kv) { @@ -419,7 +465,7 @@ _voucher_get_mach_voucher(voucher_t voucher) size = _voucher_mach_recipe_init(mvar, voucher, kvb, voucher->v_priority); kr = _voucher_create_mach_voucher(mvar, size, &kv); - if (dispatch_assume_zero(kr) || !kv){ + if (dispatch_assume_zero(kr) || !kv) { return MACH_VOUCHER_NULL; } if (!os_atomic_cmpxchgv2o(voucher, v_ipc_kvoucher, MACH_VOUCHER_NULL, @@ -453,7 +499,7 @@ _voucher_create_mach_voucher_with_priority(voucher_t voucher, size = _voucher_mach_recipe_init(mvar, voucher, kvb, priority); kr = _voucher_create_mach_voucher(mvar, size, &kv); - if (dispatch_assume_zero(kr) || !kv){ + if (dispatch_assume_zero(kr) || !kv) { return MACH_VOUCHER_NULL; } _dispatch_kvoucher_debug("create with priority from voucher[%p]", kv, @@ -635,7 +681,7 @@ _voucher_create_without_importance(voucher_t ov) }; kr = _voucher_create_mach_voucher(importance_remove_recipe, sizeof(importance_remove_recipe), &kv); - if (dispatch_assume_zero(kr) || !kv){ + if (dispatch_assume_zero(kr) || !kv) { if (ov->v_ipc_kvoucher) return NULL; kv = MACH_VOUCHER_NULL; } @@ -684,7 +730,7 @@ _voucher_create_accounting_voucher(voucher_t ov) }; kr = _voucher_create_mach_voucher(&accounting_copy_recipe, sizeof(accounting_copy_recipe), &kv); - if (dispatch_assume_zero(kr) || !kv){ + if (dispatch_assume_zero(kr) || !kv) { return NULL; } voucher_t v = _voucher_find_and_retain(kv); @@ -764,11 +810,11 @@ void _voucher_dispose(voucher_t voucher) { _dispatch_voucher_debug("dispose", voucher); - if (slowpath(_TAILQ_IS_ENQUEUED(voucher, v_list))) { + if (slowpath(_voucher_hash_is_enqueued(voucher))) { _dispatch_voucher_debug("corruption", voucher); - DISPATCH_CLIENT_CRASH(voucher->v_list.tqe_prev, "Voucher corruption"); + DISPATCH_CLIENT_CRASH(0, "Voucher corruption"); } - voucher->v_list.tqe_next = DISPATCH_OBJECT_LISTLESS; + _voucher_hash_mark_not_enqueued(voucher); if (voucher->v_ipc_kvoucher) { if (voucher->v_ipc_kvoucher != voucher->v_kvoucher) { _voucher_dealloc_mach_voucher(voucher->v_ipc_kvoucher); @@ -806,10 +852,9 @@ _voucher_activity_debug_channel_init(void) { dispatch_mach_handler_function_t handler = NULL; - if (_voucher_libtrace_hooks && _voucher_libtrace_hooks->vah_version >= 2) { + if (_voucher_libtrace_hooks) { handler = _voucher_libtrace_hooks->vah_debug_channel_handler; } - if (!handler) return; dispatch_mach_t dm; @@ -989,6 +1034,9 @@ _voucher_libkernel_init(void) void voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks) { + if (hooks->vah_version < 3) { + DISPATCH_CLIENT_CRASH(hooks->vah_version, "unsupported vah_version"); + } if (!os_atomic_cmpxchg(&_voucher_libtrace_hooks, NULL, hooks, relaxed)) { DISPATCH_CLIENT_CRASH(_voucher_libtrace_hooks, @@ -1002,7 +1050,7 @@ _voucher_init(void) _voucher_libkernel_init(); unsigned int i; for (i = 0; i < VL_HASH_SIZE; i++) { - TAILQ_INIT(&_vouchers[i]); + _voucher_hash_head_init(&_voucher_hash[i]); } } @@ -1051,6 +1099,12 @@ _voucher_activity_id_allocate(firehose_activity_flags_t flags) return FIREHOSE_ACTIVITY_ID_MAKE(aid, flags); } +firehose_activity_id_t +voucher_activity_id_allocate(firehose_activity_flags_t flags) +{ + return _voucher_activity_id_allocate(flags); +} + #define _voucher_activity_tracepoint_reserve(stamp, stream, pub, priv, privbuf) \ firehose_buffer_tracepoint_reserve(_firehose_task_buffer, stamp, \ stream, pub, priv, privbuf) @@ -1094,6 +1148,13 @@ _firehose_task_buffer_init(void *ctx OS_UNUSED) // firehose_buffer_create always consumes the send-right _firehose_task_buffer = firehose_buffer_create(logd_port, _voucher_unique_pid, flags); + if (_voucher_libtrace_hooks->vah_version >= 4 && + _voucher_libtrace_hooks->vah_metadata_init) { + firehose_buffer_t fb = _firehose_task_buffer; + size_t meta_sz = FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE; + void *meta = (void *)((uintptr_t)(&fb->fb_header + 1) - meta_sz); + _voucher_libtrace_hooks->vah_metadata_init(meta, meta_sz); + } } } @@ -1126,30 +1187,23 @@ voucher_activity_get_metadata_buffer(size_t *length) } voucher_t -voucher_activity_create(firehose_tracepoint_id_t trace_id, - voucher_t base, firehose_activity_flags_t flags, uint64_t location) -{ - return voucher_activity_create_with_location(&trace_id, base, flags, location); -} - -voucher_t -voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, - voucher_t base, firehose_activity_flags_t flags, uint64_t location) +voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id, + voucher_t base, firehose_activity_flags_t flags, + const void *pubdata, size_t publen) { firehose_activity_id_t va_id = 0, current_id = 0, parent_id = 0; firehose_tracepoint_id_u ftid = { .ftid_value = *trace_id }; - uint16_t pubsize = sizeof(va_id) + sizeof(location); uint64_t creator_id = 0; + uint16_t pubsize; voucher_t ov = _voucher_get(); voucher_t v; + if (os_add_overflow(sizeof(va_id), publen, &pubsize) || pubsize > 128) { + DISPATCH_CLIENT_CRASH(pubsize, "Absurd publen"); + } if (base == VOUCHER_CURRENT) { base = ov; } - if (_voucher_activity_disabled()) { - *trace_id = 0; - return base ? _voucher_retain(base) : VOUCHER_NULL; - } FIREHOSE_TRACE_ID_CLEAR_FLAG(ftid, base, has_unique_pid); if (ov && (current_id = ov->v_activity)) { @@ -1179,6 +1233,10 @@ voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, v->v_activity_creator = _voucher_unique_pid; v->v_parent_activity = parent_id; + if (_voucher_activity_disabled()) { + goto done; + } + static const firehose_stream_t streams[2] = { firehose_stream_metadata, firehose_stream_persist, @@ -1202,13 +1260,23 @@ voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, pubptr = _dispatch_memappend(pubptr, &parent_id); } pubptr = _dispatch_memappend(pubptr, &va_id); - pubptr = _dispatch_memappend(pubptr, &location); + pubptr = _dispatch_mempcpy(pubptr, pubdata, publen); _voucher_activity_tracepoint_flush(ft, ftid); } +done: *trace_id = ftid.ftid_value; return v; } +voucher_t +voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, + voucher_t base, firehose_activity_flags_t flags, uint64_t loc) +{ + return voucher_activity_create_with_data(trace_id, base, flags, + &loc, sizeof(loc)); +} + +#if OS_VOUCHER_ACTIVITY_GENERATE_SWAPS void _voucher_activity_swap(firehose_activity_id_t old_id, firehose_activity_id_t new_id) @@ -1245,6 +1313,7 @@ _voucher_activity_swap(firehose_activity_id_t old_id, if (new_id) pubptr = _dispatch_memappend(pubptr, &new_id); _voucher_activity_tracepoint_flush(ft, ftid); } +#endif firehose_activity_id_t voucher_get_activity_id_and_creator(voucher_t v, uint64_t *creator_pid, @@ -1276,22 +1345,22 @@ voucher_activity_flush(firehose_stream_t stream) firehose_buffer_stream_flush(_firehose_task_buffer, stream); } -DISPATCH_ALWAYS_INLINE -static inline firehose_tracepoint_id_t -_voucher_activity_trace(firehose_stream_t stream, - firehose_tracepoint_id_u ftid, uint64_t stamp, - const void *pubdata, size_t publen, - const void *privdata, size_t privlen) +DISPATCH_NOINLINE +firehose_tracepoint_id_t +voucher_activity_trace_v(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t stamp, + const struct iovec *iov, size_t publen, size_t privlen) { + firehose_tracepoint_id_u ftid = { .ftid_value = trace_id }; const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); const size_t _firehose_chunk_payload_size = - sizeof(((struct firehose_buffer_chunk_s *)0)->fbc_data); + sizeof(((struct firehose_chunk_s *)0)->fc_data); if (_voucher_activity_disabled()) return 0; firehose_tracepoint_t ft; firehose_activity_id_t va_id = 0; - firehose_buffer_chunk_t fbc; + firehose_chunk_t fc; uint8_t *privptr, *pubptr; size_t pubsize = publen; voucher_t ov = _voucher_get(); @@ -1331,38 +1400,52 @@ _voucher_activity_trace(firehose_stream_t stream, pubptr = _dispatch_memappend(pubptr, &creator_pid); } if (privlen) { - fbc = firehose_buffer_chunk_for_address(ft); + fc = firehose_buffer_chunk_for_address(ft); struct firehose_buffer_range_s range = { - .fbr_offset = (uint16_t)(privptr - fbc->fbc_start), + .fbr_offset = (uint16_t)(privptr - fc->fc_start), .fbr_length = (uint16_t)privlen, }; pubptr = _dispatch_memappend(pubptr, &range); - _dispatch_mempcpy(privptr, privdata, privlen); } - _dispatch_mempcpy(pubptr, pubdata, publen); + while (publen > 0) { + pubptr = _dispatch_mempcpy(pubptr, iov->iov_base, iov->iov_len); + if (unlikely(os_sub_overflow(publen, iov->iov_len, &publen))) { + DISPATCH_CLIENT_CRASH(0, "Invalid arguments"); + } + iov++; + } + while (privlen > 0) { + privptr = _dispatch_mempcpy(privptr, iov->iov_base, iov->iov_len); + if (unlikely(os_sub_overflow(privlen, iov->iov_len, &privlen))) { + DISPATCH_CLIENT_CRASH(0, "Invalid arguments"); + } + iov++; + } _voucher_activity_tracepoint_flush(ft, ftid); return ftid.ftid_value; } firehose_tracepoint_id_t voucher_activity_trace(firehose_stream_t stream, - firehose_tracepoint_id_t trace_id, uint64_t timestamp, + firehose_tracepoint_id_t trace_id, uint64_t stamp, const void *pubdata, size_t publen) { - firehose_tracepoint_id_u ftid = { .ftid_value = trace_id }; - return _voucher_activity_trace(stream, ftid, timestamp, pubdata, publen, - NULL, 0); + struct iovec iov = { (void *)pubdata, publen }; + return voucher_activity_trace_v(stream, trace_id, stamp, &iov, publen, 0); } firehose_tracepoint_id_t voucher_activity_trace_with_private_strings(firehose_stream_t stream, - firehose_tracepoint_id_t trace_id, uint64_t timestamp, + firehose_tracepoint_id_t trace_id, uint64_t stamp, const void *pubdata, size_t publen, const void *privdata, size_t privlen) { - firehose_tracepoint_id_u ftid = { .ftid_value = trace_id }; - return _voucher_activity_trace(stream, ftid, timestamp, - pubdata, publen, privdata, privlen); + struct iovec iov[2] = { + { (void *)pubdata, publen }, + { (void *)privdata, privlen }, + }; + return voucher_activity_trace_v(stream, trace_id, stamp, + iov, publen, privlen); } #pragma mark - @@ -1410,7 +1493,7 @@ voucher_create(voucher_recipe_t recipe) (void)recipe; return NULL; } -#endif +#endif // VOUCHER_ENABLE_RECIPE_OBJECTS voucher_t voucher_adopt(voucher_t voucher) @@ -1509,7 +1592,7 @@ voucher_get_mach_voucher(voucher_t voucher) (void)voucher; return 0; } -#endif +#endif // VOUCHER_ENABLE_GET_MACH_VOUCHER void _voucher_xref_dispose(voucher_t voucher) @@ -1543,7 +1626,7 @@ voucher_get_current_persona_proximate_info(struct proc_persona_info *persona_inf (void)persona_info; return -1; } -#endif +#endif // VOUCHER_EXPORT_PERSONA_SPI void _voucher_activity_debug_channel_init(void) @@ -1621,6 +1704,16 @@ voucher_activity_trace_with_private_strings(firehose_stream_t stream, return 0; } +firehose_tracepoint_id_t +voucher_activity_trace_v(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const struct iovec *iov, size_t publen, size_t privlen) +{ + (void)stream; (void)trace_id; (void)timestamp; + (void)iov; (void)publen; (void)privlen; + return 0; +} + void voucher_activity_flush(firehose_stream_t stream) { diff --git a/src/voucher_internal.h b/src/voucher_internal.h index b34ad4643..d16fc8a2b 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -54,7 +54,7 @@ OS_OBJECT_DECL_CLASS(voucher_recipe); * @result * The newly created voucher object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t voucher_create(voucher_recipe_t recipe); @@ -78,7 +78,7 @@ voucher_create(voucher_recipe_t recipe); * @result * A mach voucher port. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW mach_voucher_t voucher_get_mach_voucher(voucher_t voucher); @@ -90,7 +90,7 @@ voucher_get_mach_voucher(voucher_t voucher); void _voucher_init(void); void _voucher_atfork_child(void); void _voucher_activity_debug_channel_init(void); -#if OS_VOUCHER_ACTIVITY_SPI +#if OS_VOUCHER_ACTIVITY_SPI && OS_VOUCHER_ACTIVITY_GENERATE_SWAPS void _voucher_activity_swap(firehose_activity_id_t old_id, firehose_activity_id_t new_id); #endif @@ -160,7 +160,10 @@ typedef struct voucher_s { struct voucher_vtable_s *os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); - TAILQ_ENTRY(voucher_s) v_list; + struct voucher_hash_entry_s { + uintptr_t vhe_next; + uintptr_t vhe_prev_ptr; + } v_list; mach_voucher_t v_kvoucher, v_ipc_kvoucher; // if equal, only one reference voucher_t v_kvbase; // if non-NULL, v_kvoucher is a borrowed reference firehose_activity_id_t v_activity; @@ -174,6 +177,54 @@ typedef struct voucher_s { #endif } voucher_s; +typedef struct voucher_hash_head_s { + uintptr_t vhh_first; + uintptr_t vhh_last_ptr; +} voucher_hash_head_s; + +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_hash_is_enqueued(const struct voucher_s *v) +{ + return v->v_list.vhe_prev_ptr != 0; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_hash_mark_not_enqueued(struct voucher_s *v) +{ + v->v_list.vhe_prev_ptr = 0; + v->v_list.vhe_next = (uintptr_t)DISPATCH_OBJECT_LISTLESS; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_hash_set_next(uintptr_t *next, struct voucher_s *v) +{ + *next = ~(uintptr_t)v; +} + +DISPATCH_ALWAYS_INLINE +static inline voucher_t +_voucher_hash_get_next(uintptr_t next) +{ + return (voucher_t)~next; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_hash_set_prev_ptr(uintptr_t *prev_ptr, uintptr_t *addr) +{ + *prev_ptr = ~(uintptr_t)addr; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_hash_store_to_prev_ptr(uintptr_t prev_ptr, struct voucher_s *v) +{ + *(uintptr_t *)~prev_ptr = ~(uintptr_t)v; +} + #if VOUCHER_ENABLE_RECIPE_OBJECTS #define _voucher_extra_size(v) ((v)->v_recipe_extra_size) #define _voucher_extra_recipes(v) ((char*)(v) + (v)->v_recipe_extra_offset) @@ -206,12 +257,7 @@ typedef struct voucher_recipe_s { _dispatch_debug("voucher[%p]: " msg, v, ##__VA_ARGS__) #define _dispatch_kvoucher_debug(msg, kv, ...) \ _dispatch_debug("kvoucher[0x%08x]: " msg, kv, ##__VA_ARGS__) -#if DISPATCH_MACHPORT_DEBUG -#define _dispatch_voucher_debug_machport(name) \ - dispatch_debug_machport((name), __func__) -#else -#define _dispatch_voucher_debug_machport(name) ((void)(name)) -#endif +#define _dispatch_voucher_debug_machport(name) _dispatch_debug_machport(name) #else #define _dispatch_voucher_debug(msg, v, ...) #define _dispatch_kvoucher_debug(msg, kv, ...) @@ -323,9 +369,11 @@ _voucher_swap_and_get_mach_voucher(voucher_t ov, voucher_t voucher) _dispatch_thread_setspecific(dispatch_voucher_key, voucher); mach_voucher_t kv = voucher ? voucher->v_kvoucher : MACH_VOUCHER_NULL; mach_voucher_t okv = ov ? ov->v_kvoucher : MACH_VOUCHER_NULL; +#if OS_VOUCHER_ACTIVITY_GENERATE_SWAPS firehose_activity_id_t aid = voucher ? voucher->v_activity : 0; firehose_activity_id_t oaid = ov ? ov->v_activity : 0; if (aid != oaid) _voucher_activity_swap(aid, oaid); +#endif return (kv != okv) ? kv : VOUCHER_NO_MACH_VOUCHER; } @@ -500,21 +548,13 @@ _dispatch_continuation_voucher_set(dispatch_continuation_t dc, { voucher_t v = NULL; + (void)dqu; // _dispatch_continuation_voucher_set is never called for blocks with // private data or with the DISPATCH_BLOCK_HAS_VOUCHER flag set. // only _dispatch_continuation_init_slow handles this bit. dispatch_assert(!(flags & DISPATCH_BLOCK_HAS_VOUCHER)); - if (dqu._oq->oq_override_voucher != DISPATCH_NO_VOUCHER) { - // if the queue has an override voucher, we should not capture anything - // - // if the continuation is enqueued before the queue is activated, then - // this optimization fails and we do capture whatever is current - // - // _dispatch_continuation_voucher_adopt() would do the right thing - // but using DISPATCH_NO_VOUCHER here is more efficient. - v = DISPATCH_NO_VOUCHER; - } else if (!(flags & DISPATCH_BLOCK_NO_VOUCHER)) { + if (!(flags & DISPATCH_BLOCK_NO_VOUCHER)) { v = _voucher_copy(); } dc->dc_voucher = v; @@ -530,7 +570,7 @@ _dispatch_continuation_voucher_adopt(dispatch_continuation_t dc, voucher_t ov, uintptr_t dc_flags) { voucher_t v = dc->dc_voucher; - _dispatch_thread_set_self_t consume = (dc_flags & DISPATCH_OBJ_CONSUME_BIT); + dispatch_thread_set_self_t consume = (dc_flags & DISPATCH_OBJ_CONSUME_BIT); dispatch_assert(DISPATCH_OBJ_CONSUME_BIT == DISPATCH_VOUCHER_CONSUME); if (consume) { @@ -542,7 +582,7 @@ _dispatch_continuation_voucher_adopt(dispatch_continuation_t dc, if (likely(!(dc_flags & DISPATCH_OBJ_ENFORCE_VOUCHER))) { if (unlikely(ov != DISPATCH_NO_VOUCHER && v != ov)) { - if (consume) _voucher_release(v); + if (consume && v) _voucher_release(v); consume = 0; v = ov; } diff --git a/tests/Makefile.am b/tests/Makefile.am index 1688fc95c..a47613097 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -75,7 +75,7 @@ TESTS= \ $(ADDITIONAL_TESTS) -dispatch_c99_CFLAGS=$(DISPATCH_TESTS_CFLAGS) $(CBLOCKS_FLAGS) $(KQUEUE_CFLAGS) -std=c99 +dispatch_c99_CFLAGS=$(DISPATCH_TESTS_CFLAGS) $(CBLOCKS_FLAGS) -std=c99 dispatch_plusplus_SOURCES=dispatch_plusplus.cpp dispatch_priority2_SOURCES=dispatch_priority.c dispatch_priority2_CPPFLAGS=$(AM_CPPFLAGS) -DUSE_SET_TARGET_QUEUE=1 @@ -83,16 +83,11 @@ dispatch_priority2_CPPFLAGS=$(AM_CPPFLAGS) -DUSE_SET_TARGET_QUEUE=1 AM_CPPFLAGS=-I$(top_builddir) -I$(top_srcdir) DISPATCH_TESTS_CFLAGS=-Wall -Wno-deprecated-declarations $(MARCH_FLAGS) -AM_CFLAGS=$(DISPATCH_TESTS_CFLAGS) $(CBLOCKS_FLAGS) $(KQUEUE_CFLAGS) $(BSD_OVERLAY_CFLAGS) +AM_CFLAGS=$(DISPATCH_TESTS_CFLAGS) $(CBLOCKS_FLAGS) $(BSD_OVERLAY_CFLAGS) AM_OBJCFLAGS=$(DISPATCH_TESTS_CFLAGS) $(CBLOCKS_FLAGS) AM_CXXFLAGS=$(DISPATCH_TESTS_CFLAGS) $(CXXBLOCKS_FLAGS) $(BSD_OVERLAY_CFLAGS) AM_OBJCXXFLAGS=$(DISPATCH_TESTS_CFLAGS) $(CXXBLOCKS_FLAGS) -if BUILD_OWN_KQUEUES - KQUEUE_LIBS+=$(top_builddir)/libkqueue/libkqueue.la - KQUEUE_CFLAGS+=-I$(top_srcdir)/libkqueue/include -endif - if !BUILD_OWN_PTHREAD_WORKQUEUES if HAVE_PTHREAD_WORKQUEUES PTHREAD_WORKQUEUE_LIBS=-lpthread_workqueue @@ -109,7 +104,7 @@ if HAVE_SWIFT AM_LDFLAGS=-rpath $(SWIFT_LIBDIR) endif -LDADD=libbsdtests.la $(top_builddir)/src/libdispatch.la $(KQUEUE_LIBS) $(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) $(SWIFT_LIBS) +LDADD=libbsdtests.la $(top_builddir)/src/libdispatch.la $(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) $(SWIFT_LIBS) libbsdtests_la_LDFLAGS=-avoid-version bsdtestsummarize_LDADD=-lm $(BSD_OVERLAY_LIBS) diff --git a/tests/dispatch_test.c b/tests/dispatch_test.c index ce92ae00c..52c61cfaf 100644 --- a/tests/dispatch_test.c +++ b/tests/dispatch_test.c @@ -28,7 +28,10 @@ #include #include #include +#if __has_include() +#define HAS_SYS_EVENT_H 1 #include +#endif #include #include @@ -47,6 +50,7 @@ dispatch_test_start(const char* desc) bool dispatch_test_check_evfilt_read_for_fd(int fd) { +#if HAS_SYS_EVENT_H int kq = kqueue(); assert(kq != -1); struct kevent ke = { @@ -60,6 +64,10 @@ dispatch_test_check_evfilt_read_for_fd(int fd) int r = kevent(kq, &ke, 1, &ke, 1, &t); close(kq); return r > 0; +#else + // TODO: Need to write a real check for epoll-backend here + return true; +#endif } void diff --git a/xcodeconfig/libdispatch-resolver_iphoneos.order b/xcodeconfig/libdispatch-resolver_iphoneos.order deleted file mode 100644 index eea98459d..000000000 --- a/xcodeconfig/libdispatch-resolver_iphoneos.order +++ /dev/null @@ -1,20 +0,0 @@ -# -# Copyright (c) 2013 Apple Inc. All rights reserved. -# -# @APPLE_APACHE_LICENSE_HEADER_START@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# @APPLE_APACHE_LICENSE_HEADER_END@ -# - diff --git a/xcodeconfig/libdispatch.aliases b/xcodeconfig/libdispatch.aliases index 65dfd04f9..d8a5113a2 100644 --- a/xcodeconfig/libdispatch.aliases +++ b/xcodeconfig/libdispatch.aliases @@ -19,8 +19,9 @@ # __dispatch_data_destructor_vm_deallocate __dispatch_data_destructor_munmap -__dispatch_source_type_memorypressure __dispatch_source_type_memorystatus __dispatch_queue_attrs __dispatch_queue_attr_concurrent +__dispatch_source_type_memorypressure __dispatch_source_type_memorystatus _dispatch_assert_queue$V2 _dispatch_assert_queue _dispatch_assert_queue_not$V2 _dispatch_assert_queue_not _dispatch_queue_create_with_target$V2 _dispatch_queue_create_with_target +_dispatch_source_set_timer __dispatch_source_set_runloop_timer_4CF diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig index d5b08d6dd..a2ea6d96d 100644 --- a/xcodeconfig/libdispatch.xcconfig +++ b/xcodeconfig/libdispatch.xcconfig @@ -40,6 +40,7 @@ ONLY_ACTIVE_ARCH = NO CLANG_LINK_OBJC_RUNTIME = NO GCC_C_LANGUAGE_STANDARD = gnu11 CLANG_CXX_LANGUAGE_STANDARD = gnu++11 +ENABLE_STRICT_OBJC_MSGSEND = YES GCC_ENABLE_CPP_EXCEPTIONS = NO GCC_STRICT_ALIASING = YES GCC_SYMBOLS_PRIVATE_EXTERN = YES @@ -49,20 +50,33 @@ GCC_WARN_64_TO_32_BIT_CONVERSION = YES GCC_WARN_ABOUT_RETURN_TYPE = YES GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES GCC_WARN_ABOUT_MISSING_NEWLINE = YES -GCC_WARN_UNUSED_VARIABLE = YES -GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES GCC_WARN_ABOUT_MISSING_FIELD_INITIALIZERS = YES +GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES GCC_WARN_SIGN_COMPARE = YES +GCC_WARN_STRICT_SELECTOR_MATCH = YES +GCC_WARN_UNDECLARED_SELECTOR = YES GCC_WARN_UNINITIALIZED_AUTOS = YES +GCC_WARN_UNKNOWN_PRAGMAS = YES +GCC_WARN_UNUSED_FUNCTION = YES +GCC_WARN_UNUSED_LABEL = YES +GCC_WARN_UNUSED_PARAMETER = YES +GCC_WARN_UNUSED_VARIABLE = YES +CLANG_WARN_ASSIGN_ENUM = YES +CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES +CLANG_WARN_DOCUMENTATION_COMMENTS = YES +CLANG_WARN__DUPLICATE_METHOD_MATCH = YES CLANG_WARN_EMPTY_BODY = YES CLANG_WARN_IMPLICIT_SIGN_CONVERSION = YES +CLANG_WARN_INFINITE_RECURSION = YES +CLANG_WARN_OBJC_IMPLICIT_ATOMIC_PROPERTIES = YES +CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS = YES CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES -CLANG_WARN_DOCUMENTATION_COMMENTS = YES GCC_TREAT_WARNINGS_AS_ERRORS = YES GCC_OPTIMIZATION_LEVEL = s GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 $(DISPATCH_PREPROCESSOR_DEFINITIONS) GCC_NO_COMMON_BLOCKS = YES -WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -Wno-packed -Wno-unknown-warning-option +WARNING_CFLAGS = -Wall -Wextra -Warray-bounds-pointer-arithmetic -Watomic-properties -Wcomma -Wconditional-uninitialized -Wcovered-switch-default -Wdate-time -Wdeprecated -Wdouble-promotion -Wduplicate-enum -Wexpansion-to-defined -Wfloat-equal -Widiomatic-parentheses -Wignored-qualifiers -Wimplicit-fallthrough -Wnullable-to-nonnull-conversion -Wobjc-interface-ivars -Wover-aligned -Wpacked -Wpointer-arith -Wselector -Wstatic-in-inline -Wsuper-class-method-mismatch -Wswitch-enum -Wtautological-compare -Wunguarded-availability -Wunused -Wno-unknown-warning-option $(NO_WARNING_CFLAGS) +NO_WARNING_CFLAGS = -Wno-pedantic -Wno-bad-function-cast -Wno-c++-compat -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-cast-align -Wno-cast-qual -Wno-disabled-macro-expansion -Wno-documentation-unknown-command -Wno-format-nonliteral -Wno-missing-variable-declarations -Wno-old-style-cast -Wno-padded -Wno-reserved-id-macro -Wno-shift-sign-overflow -Wno-undef -Wno-unreachable-code-aggressive -Wno-unused-macros -Wno-used-but-marked-unused -Wno-vla OTHER_CFLAGS = -fverbose-asm -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders OTHER_CFLAGS[arch=i386][sdk=macosx*] = $(OTHER_CFLAGS) -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions OTHER_CFLAGS_normal = -momit-leaf-frame-pointer diff --git a/xcodeconfig/libdispatch_iphoneos.order b/xcodeconfig/libdispatch_iphoneos.order deleted file mode 100644 index eea98459d..000000000 --- a/xcodeconfig/libdispatch_iphoneos.order +++ /dev/null @@ -1,20 +0,0 @@ -# -# Copyright (c) 2013 Apple Inc. All rights reserved. -# -# @APPLE_APACHE_LICENSE_HEADER_START@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# @APPLE_APACHE_LICENSE_HEADER_END@ -# - diff --git a/xcodescripts/install-manpages.sh b/xcodescripts/install-manpages.sh index d9e28af6c..db13163e8 100755 --- a/xcodescripts/install-manpages.sh +++ b/xcodescripts/install-manpages.sh @@ -64,7 +64,7 @@ for m in dispatch_group_enter dispatch_group_leave dispatch_group_wait \ ln -f dispatch_group_create.3 ${m}.3 done -for m in dispatch_retain dispatch_release dispatch_suspend dispatch_resume \ +for m in dispatch_retain dispatch_release dispatch_suspend dispatch_resume dispatch_activate \ dispatch_get_context dispatch_set_context dispatch_set_finalizer_f; do ln -f dispatch_object.3 ${m}.3 done