From b45bc2fbfab501c0fe585b02e2061dc7c65d5ef2 Mon Sep 17 00:00:00 2001 From: David Grove Date: Wed, 15 Jun 2016 10:16:37 -0400 Subject: [PATCH] Linux port of libdispatch-685 merge Minimal set of changes to get sources merged from libdispatch-685 to compile and run on Linux. --- configure.ac | 3 - dispatch/dispatch.h | 31 ++++++++ os/linux_base.h | 128 +++++++++++-------------------- os/voucher_private.h | 2 +- src/firehose/firehose_internal.h | 4 + src/init.c | 4 + src/inline_internal.h | 20 ++--- src/internal.h | 20 +++-- src/object.c | 2 +- src/object_internal.h | 2 +- src/queue.c | 32 ++++++-- src/semaphore.c | 47 +----------- src/shims.h | 8 +- src/shims/linux_stubs.h | 83 +++++++++++++++++++- src/shims/lock.c | 35 +++++---- src/shims/lock.h | 19 ++++- src/source.c | 12 ++- src/voucher_internal.h | 5 +- 18 files changed, 274 insertions(+), 183 deletions(-) diff --git a/configure.ac b/configure.ac index ed32be47e..87346354a 100644 --- a/configure.ac +++ b/configure.ac @@ -295,9 +295,6 @@ AC_MSG_CHECKING([what semaphore type to use]); AS_IF([test "x$have_mach" = "xtrue"], [AC_DEFINE(USE_MACH_SEM, 1, [Define to use Mach semaphores]) AC_MSG_RESULT([Mach semaphores])], - [test "x$have_futex" = "xtrue"], - [AC_DEFINE(USE_FUTEX_SEM, 1, [Define to use Futex semaphores]) - AC_MSG_RESULT([Futex semaphores])], [test "x$have_sem_init" = "xtrue"], [AC_DEFINE(USE_POSIX_SEM, 1, [Define to use POSIX semaphores]) AC_MSG_RESULT([POSIX semaphores])], diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index 8d854412c..349dd5983 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -41,6 +41,37 @@ #ifndef __OSX_AVAILABLE_STARTING #define __OSX_AVAILABLE_STARTING(x, y) #endif +#ifndef __OSX_AVAILABLE_BUT_DEPRECATED +#define __OSX_AVAILABLE_BUT_DEPRECATED(...) +#endif +#ifndef __OSX_AVAILABLE_BUT_DEPRECATED_MSG +#define __OSX_AVAILABLE_BUT_DEPRECATED_MSG(...) +#endif + +#ifndef __OSX_AVAILABLE +#define __OSX_AVAILABLE(...) +#endif +#ifndef __IOS_AVAILABLE +#define __IOS_AVAILABLE(...) +#endif +#ifndef __TVOS_AVAILABLE +#define __TVOS_AVAILABLE(...) +#endif +#ifndef __WATCHOS_AVAILABLE +#define __WATCHOS_AVAILABLE(...) +#endif +#ifndef __OSX_DEPRECATED +#define __OSX_DEPRECATED(...) +#endif +#ifndef __IOS_DEPRECATED +#define __IOS_DEPRECATED(...) +#endif +#ifndef __TVOS_DEPRECATED +#define __TVOS_DEPRECATED(...) +#endif +#ifndef __WATCHOS_DEPRECATED +#define __WATCHOS_DEPRECATED(...) +#endif #define DISPATCH_API_VERSION 20160612 diff --git a/os/linux_base.h b/os/linux_base.h index 3d54fa29a..96a3c825b 100644 --- a/os/linux_base.h +++ b/os/linux_base.h @@ -13,92 +13,60 @@ #ifndef __OS_LINUX_BASE__ #define __OS_LINUX_BASE__ -// #include #include +#include -// marker for hacks we have made to make progress -#define __LINUX_PORT_HDD__ 1 - -/* - * Stub out defines for some mach types and related macros - */ - -typedef uint32_t mach_port_t; - -#define MACH_PORT_NULL (0) -#define MACH_PORT_DEAD (-1) - -#define EVFILT_MACHPORT (-8) - -typedef uint32_t mach_error_t; - -typedef uint32_t mach_vm_size_t; - -typedef uint32_t mach_msg_return_t; - -typedef uintptr_t mach_vm_address_t; - -typedef uint32_t dispatch_mach_msg_t; - -typedef uint32_t dispatch_mach_t; - -typedef uint32_t dispatch_mach_reason_t; - -typedef uint32_t voucher_activity_mode_t; - -typedef uint32_t voucher_activity_trace_id_t; - -typedef uint32_t voucher_activity_id_t; - -typedef uint32_t _voucher_activity_buffer_hook_t;; - -typedef uint32_t voucher_activity_flag_t; - -typedef struct -{ -} mach_msg_header_t; - - -typedef void (*dispatch_mach_handler_function_t)(void*, dispatch_mach_reason_t, - dispatch_mach_msg_t, mach_error_t); - -typedef void (*dispatch_mach_msg_destructor_t)(void*); - -// Print a warning when an unported code path executes. -#define LINUX_PORT_ERROR() do { printf("LINUX_PORT_ERROR_CALLED %s:%d: %s\n",__FILE__,__LINE__,__FUNCTION__); } while (0) - -/* - * Stub out defines for other missing types - */ - -#if __linux__ -// we fall back to use kevent -#define kevent64_s kevent -#define kevent64(kq,cl,nc,el,ne,f,to) kevent(kq,cl,nc,el,ne,to) +#if __GNUC__ +#define OS_EXPECT(x, v) __builtin_expect((x), (v)) +#else +#define OS_EXPECT(x, v) (x) #endif -// SIZE_T_MAX should not be hardcoded like this here. -#define SIZE_T_MAX (0x7fffffff) +#ifndef os_likely +#define os_likely(x) OS_EXPECT(!!(x), 1) +#endif +#ifndef os_unlikely +#define os_unlikely(x) OS_EXPECT(!!(x), 0) +#endif -// Define to 0 the NOTE_ values that are not present on Linux. -// Revisit this...would it be better to ifdef out the uses instead?? +#if __has_feature(assume_nonnull) +#define OS_ASSUME_NONNULL_BEGIN _Pragma("clang assume_nonnull begin") +#define OS_ASSUME_NONNULL_END _Pragma("clang assume_nonnull end") +#else +#define OS_ASSUME_NONNULL_BEGIN +#define OS_ASSUME_NONNULL_END +#endif -// The following values are passed as part of the EVFILT_TIMER requests +#if __has_builtin(__builtin_assume) +#define OS_COMPILER_CAN_ASSUME(expr) __builtin_assume(expr) +#else +#define OS_COMPILER_CAN_ASSUME(expr) ((void)(expr)) +#endif -#define IGNORE_KEVENT64_EXT /* will force the kevent64_s.ext[] to not be used -> leeway ignored */ +#if __has_feature(attribute_availability_swift) +// equivalent to __SWIFT_UNAVAILABLE from Availability.h +#define OS_SWIFT_UNAVAILABLE(_msg) \ + __attribute__((__availability__(swift, unavailable, message=_msg))) +#else +#define OS_SWIFT_UNAVAILABLE(_msg) +#endif -#define NOTE_SECONDS 0x01 -#define NOTE_USECONDS 0x02 -#define NOTE_NSECONDS 0x04 -#define NOTE_ABSOLUTE 0x08 -#define NOTE_CRITICAL 0x10 -#define NOTE_BACKGROUND 0x20 -#define NOTE_LEEWAY 0x40 +#if __has_attribute(swift_private) +# define OS_REFINED_FOR_SWIFT __attribute__((__swift_private__)) +#else +# define OS_REFINED_FOR_SWIFT +#endif -// need to catch the following usage if it happens .. -// we simply return '0' as a value probably not correct +#if __has_attribute(swift_name) +# define OS_SWIFT_NAME(_name) __attribute__((__swift_name__(#_name))) +#else +# define OS_SWIFT_NAME(_name) +#endif -#define NOTE_VM_PRESSURE ({LINUX_PORT_ERROR(); 0;}) +#define __OS_STRINGIFY(s) #s +#define OS_STRINGIFY(s) __OS_STRINGIFY(s) +#define __OS_CONCAT(x, y) x ## y +#define OS_CONCAT(x, y) __OS_CONCAT(x, y) /* * Stub out misc linking and compilation attributes @@ -123,12 +91,4 @@ typedef void (*dispatch_mach_msg_destructor_t)(void*); #endif #define OS_NOTHROW - -// These and similar macros come from Availabilty.h on OS X -// Need a better way to do this long term. -#define __OSX_AVAILABLE_BUT_DEPRECATED(a,b,c,d) // -#define __OSX_AVAILABLE_BUT_DEPRECATED_MSG(a,b,c,d,msg) // - - - #endif /* __OS_LINUX_BASE__ */ diff --git a/os/voucher_private.h b/os/voucher_private.h index 3ecddfb40..9d97b885e 100644 --- a/os/voucher_private.h +++ b/os/voucher_private.h @@ -416,6 +416,7 @@ dispatch_queue_create_with_accounting_override_voucher( dispatch_queue_attr_t _Nullable attr, voucher_t _Nullable voucher); +#ifdef __APPLE__ /*! * @group Voucher Mach SPI * SPI intended for clients that need to interact with mach messages or mach @@ -444,7 +445,6 @@ OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_create_with_mach_msg(mach_msg_header_t *msg); -#ifdef __APPLE__ /*! * @group Voucher Persona SPI * SPI intended for clients that need to interact with personas. diff --git a/src/firehose/firehose_internal.h b/src/firehose/firehose_internal.h index 0f84b8efa..29d1ad240 100644 --- a/src/firehose/firehose_internal.h +++ b/src/firehose/firehose_internal.h @@ -21,6 +21,8 @@ #ifndef __FIREHOSE_INTERNAL__ #define __FIREHOSE_INTERNAL__ +#if OS_FIREHOSE_SPI + // make sure this is defined so that we get MIG_SERVER_DIED when a send once // notification is sent back because of a crashed server #ifndef __MigTypeCheck @@ -44,4 +46,6 @@ #endif #include "firehose_inline_internal.h" +#endif // OS_FIREHOSE_SPI + #endif // __FIREHOSE_INTERNAL__ diff --git a/src/init.c b/src/init.c index 29ae2398b..e204e7d10 100644 --- a/src/init.c +++ b/src/init.c @@ -486,6 +486,7 @@ const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { DC_VTABLE_ENTRY(ASYNC_REDIRECT, .do_kind = "dc-redirect", .do_invoke = _dispatch_async_redirect_invoke), +#if HAVE_MACH DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN, .do_kind = "dc-mach-send-drain", .do_invoke = _dispatch_mach_send_barrier_drain_invoke), @@ -495,6 +496,7 @@ const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { DC_VTABLE_ENTRY(MACH_RECV_BARRIER, .do_kind = "dc-mach-recv-barrier", .do_invoke = _dispatch_mach_barrier_invoke), +#endif #if HAVE_PTHREAD_WORKQUEUE_QOS DC_VTABLE_ENTRY(OVERRIDE_STEALING, .do_kind = "dc-override-stealing", @@ -1195,7 +1197,9 @@ dispatch_source_type_readwrite_init(dispatch_source_t ds, { ds->ds_is_level = true; // bypass kernel check for device kqueue support rdar://19004921 +#ifdef NOTE_LOWAT ds->ds_dkev->dk_kevent.fflags = NOTE_LOWAT; +#endif ds->ds_dkev->dk_kevent.data = 1; } diff --git a/src/inline_internal.h b/src/inline_internal.h index f7cc3371a..f4e44a183 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -977,7 +977,7 @@ _dispatch_queue_drain_try_lock(dispatch_queue_t dq, uint64_t pending_barrier_width = (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; uint64_t xor_owner_and_set_full_width = - _dispatch_thread_port() | DISPATCH_QUEUE_WIDTH_FULL_BIT; + _dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT; uint64_t clear_enqueued_bit, old_state, new_state; if (flags & DISPATCH_INVOKE_STEALING) { @@ -1041,7 +1041,7 @@ static inline bool _dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq) { uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; - value |= _dispatch_thread_port(); + value |= _dispatch_tid_self(); return os_atomic_cmpxchg2o(dq, dq_state, DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width), value, acquire); @@ -1577,7 +1577,7 @@ _dispatch_root_queue_identity_assume(struct _dispatch_identity_s *di, if (!pp) pp = di->old_pri; if ((pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) > (assumed_rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - _dispatch_wqthread_override_start(_dispatch_thread_port(), pp); + _dispatch_wqthread_override_start(_dispatch_tid_self(), pp); // Ensure that the root queue sees that this thread was overridden. _dispatch_set_defaultpriority_override(); } @@ -1630,7 +1630,7 @@ _dispatch_queue_class_invoke(dispatch_object_t dou, drain_pending_barrier: if (overriding) { _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx", - _dispatch_thread_port(), _dispatch_get_defaultpriority()); + _dispatch_tid_self(), _dispatch_get_defaultpriority()); _dispatch_root_queue_identity_assume(&di, 0, 0); } @@ -1640,7 +1640,7 @@ _dispatch_queue_class_invoke(dispatch_object_t dou, old_dp = _dispatch_set_defaultpriority(dq->dq_priority, &dp); op = dq->dq_override; if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - _dispatch_wqthread_override_start(_dispatch_thread_port(), op); + _dispatch_wqthread_override_start(_dispatch_tid_self(), op); // Ensure that the root queue sees that this thread was overridden. _dispatch_set_defaultpriority_override(); } @@ -1825,7 +1825,7 @@ _dispatch_queue_set_bound_thread(dispatch_queue_t dq) { // Tag thread-bound queues with the owning thread dispatch_assert(_dispatch_queue_is_thread_bound(dq)); - mach_port_t old_owner, self = _dispatch_thread_port(); + mach_port_t old_owner, self = _dispatch_tid_self(); uint64_t dq_state = os_atomic_or_orig2o(dq, dq_state, self, relaxed); if (unlikely(old_owner = _dq_state_drain_owner(dq_state))) { DISPATCH_INTERNAL_CRASH(old_owner, "Queue bound twice"); @@ -1888,7 +1888,7 @@ _dispatch_reset_defaultpriority(pthread_priority_t pp) pp |= old_pp & _PTHREAD_PRIORITY_OVERRIDE_FLAG; _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); #else - (void)priority; + (void)pp; #endif } @@ -1994,7 +1994,7 @@ _dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags) return defaultpri; } #else - (void)priority; (void)flags; + (void)pp; (void)flags; return 0; #endif } @@ -2037,6 +2037,7 @@ static inline pthread_priority_t _dispatch_priority_compute_update(pthread_priority_t pp, _dispatch_thread_set_self_t flags) { +#if HAVE_PTHREAD_WORKQUEUE_QOS dispatch_assert(pp != DISPATCH_NO_PRIORITY); if (!_dispatch_set_qos_class_enabled) return 0; // the priority in _dispatch_get_priority() only tracks manager-ness @@ -2047,7 +2048,6 @@ _dispatch_priority_compute_update(pthread_priority_t pp, // the manager bit is invalid input, but we keep it to get meaningful // assertions in _dispatch_set_priority_and_voucher_slow() pp &= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; -#if HAVE_PTHREAD_WORKQUEUE_QOS pthread_priority_t cur_priority = _dispatch_get_priority(); pthread_priority_t unbind = _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; pthread_priority_t overcommit = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; @@ -2064,6 +2064,8 @@ _dispatch_priority_compute_update(pthread_priority_t pp, cur_priority &= ~overcommit; } if (unlikely(pp != cur_priority)) return pp; +#else + (void)pp; (void)flags; #endif return 0; } diff --git a/src/internal.h b/src/internal.h index 49543ce83..f745113cd 100644 --- a/src/internal.h +++ b/src/internal.h @@ -144,7 +144,11 @@ #include #endif -#define DISPATCH_PURE_C (!defined(__OBJC__) && !defined(__cplusplus)) +#if defined(__OBJC__) || defined(__cplusplus) +#define DISPATCH_PURE_C 0 +#else +#define DISPATCH_PURE_C 1 +#endif /* private.h must be included last to avoid picking up installed headers. */ #include "os/object_private.h" @@ -256,10 +260,6 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #endif #include #include -#if USE_FUTEX_SEM -#include -#include -#endif #if USE_POSIX_SEM #include #endif @@ -939,6 +939,16 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define VOUCHER_USE_BANK_AUTOREDEEM 1 #endif +#if OS_FIREHOSE_SPI +#include +#else +typedef uint64_t firehose_activity_id_t; +typedef uint64_t firehose_tracepoint_id_t; +typedef unsigned long firehose_activity_flags_t; +typedef uint8_t firehose_stream_t; +typedef void * voucher_activity_hooks_t; +#endif + #if !VOUCHER_USE_MACH_VOUCHER || \ !__has_include() || \ !DISPATCH_HOST_SUPPORTS_OSX(101200) diff --git a/src/object.c b/src/object.c index 56f6dc88a..1928df53f 100644 --- a/src/object.c +++ b/src/object.c @@ -240,7 +240,7 @@ dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t tq) } else if (dou._do->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT && !slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { if (slowpath(!tq)) { - tq = _dispatch_get_root_queue(QOS_CLASS_DEFAULT, false); + tq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, false); } _dispatch_object_set_target_queue_inline(dou._do, tq); } diff --git a/src/object_internal.h b/src/object_internal.h index 1ae8ae6ef..80bb10251 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -490,7 +490,7 @@ void _dispatch_last_resort_autorelease_pool_pop(void *context); }) #else #define dispatch_invoke_with_autoreleasepool(flags, ...) \ - do { __VA_ARGS__; } while (0) + do { (void)flags; __VA_ARGS__; } while (0) #endif diff --git a/src/queue.c b/src/queue.c index ca8fa5aab..1670783d3 100644 --- a/src/queue.c +++ b/src/queue.c @@ -794,8 +794,6 @@ _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, /* XXXRW: POSIX semaphores don't support LIFO? */ int ret = sem_init(&(pqc->dpq_thread_mediator.dsema_sem), 0, 0); (void)dispatch_assume_zero(ret); -#elif USE_FUTEX_SEM - pqc->dpq_thread_mediator.dsema_futex = DISPATCH_FUTEX_INIT; #endif } #endif // DISPATCH_USE_PTHREAD_POOL @@ -909,10 +907,12 @@ libdispatch_init(void) #if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION _dispatch_thread_key_create(&dispatch_bcounter_key, NULL); #endif +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { _dispatch_thread_key_create(&dispatch_sema4_key, _dispatch_thread_semaphore_dispose); } +#endif #if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 _dispatch_main_q.do_targetq = &_dispatch_root_queues[ @@ -1076,7 +1076,7 @@ _dispatch_get_queue_attr(qos_class_t qos, int prio, dispatch_queue_attr_t _dispatch_get_default_queue_attr(void) { - return _dispatch_get_queue_attr(QOS_CLASS_UNSPECIFIED, 0, + return _dispatch_get_queue_attr(_DISPATCH_QOS_CLASS_UNSPECIFIED, 0, _dispatch_queue_attr_overcommit_unspecified, DISPATCH_AUTORELEASE_FREQUENCY_INHERIT, false, false); } @@ -1238,10 +1238,15 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, } } if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) { - qos = _pthread_qos_class_decode(tq->dq_priority, NULL, NULL); - // force going through _dispatch_queue_priority_inherit_from_target - tq = _dispatch_get_root_queue(qos, overcommit == - _dispatch_queue_attr_overcommit_enabled); + if (overcommit == _dispatch_queue_attr_overcommit_enabled) { + if (!(tq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG)) { + tq++; + } + } else { + if (tq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) { + tq--; + } + } } else { tq = NULL; } @@ -2346,7 +2351,7 @@ _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) offset += dsnprintf(&buf[offset], bufsiz - offset, "target = %s[%p], width = 0x%x, state = 0x%016llx", target && target->dq_label ? target->dq_label : "", target, - dq->dq_width, dq_state); + dq->dq_width, (unsigned long long)dq_state); if (_dq_state_is_suspended(dq_state)) { offset += dsnprintf(&buf[offset], bufsiz - offset, ", suspended = %d", _dq_state_suspend_cnt(dq_state)); @@ -2653,6 +2658,7 @@ _dispatch_block_create_with_voucher_and_priority(dispatch_block_flags_t flags, voucher_t voucher, pthread_priority_t pri, dispatch_block_t block) { flags = _dispatch_block_normalize_flags(flags); +#if HAVE_PTHREAD_WORKQUEUE_QOS bool assign = (flags & DISPATCH_BLOCK_ASSIGN_CURRENT); if (assign && !(flags & DISPATCH_BLOCK_HAS_VOUCHER)) { @@ -2666,6 +2672,7 @@ _dispatch_block_create_with_voucher_and_priority(dispatch_block_flags_t flags, pri = _dispatch_priority_propagate(); flags |= DISPATCH_BLOCK_HAS_PRIORITY; } +#endif dispatch_block_t db = _dispatch_block_create(flags, voucher, pri, block); #if DISPATCH_DEBUG dispatch_assert(_dispatch_block_get_data(db)); @@ -3988,8 +3995,10 @@ _dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, } if (target) { return _dispatch_queue_class_wakeup(dq, pp, flags, target); +#if HAVE_PTHREAD_WORKQUEUE_QOS } else if (pp) { return _dispatch_queue_class_override_drainer(dq, pp, flags); +#endif } else if (flags & DISPATCH_WAKEUP_CONSUME) { return _dispatch_release_tailcall(dq); } @@ -4023,6 +4032,13 @@ _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, return _dispatch_release_tailcall(dq); } } +#else +void +_dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags) +{ + LINUX_PORT_ERROR(); +} #endif void diff --git a/src/semaphore.c b/src/semaphore.c index 6a1006b82..09d68108f 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -85,8 +85,6 @@ _dispatch_semaphore_class_init(long value, dispatch_semaphore_class_t dsemau) #if USE_POSIX_SEM int ret = sem_init(&dsema->dsema_sem, 0, 0); DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_FUTEX_SEM - dsema->dsema_futex = DISPATCH_FUTEX_INIT; #endif } @@ -230,9 +228,6 @@ _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) #elif USE_POSIX_SEM int ret = sem_post(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_FUTEX_SEM - bool ret = _dispatch_futex_signal(&dsema->dsema_futex); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); #elif USE_WIN32_SEM _dispatch_semaphore_create_handle(&dsema->dsema_handle); int ret = ReleaseSemaphore(dsema->dsema_handle, 1, NULL); @@ -268,9 +263,6 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, #elif USE_POSIX_SEM struct timespec _timeout; int ret; -#elif USE_FUTEX_SEM - struct timespec _timeout; - bool ret; #elif USE_WIN32_SEM uint64_t nsec; DWORD msec; @@ -329,7 +321,7 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, &orig, relaxed)) { #if USE_MACH_SEM return KERN_OPERATION_TIMED_OUT; -#elif USE_POSIX_SEM || USE_FUTEX_SEM || USE_WIN32_SEM +#elif USE_POSIX_SEM || USE_WIN32_SEM errno = ETIMEDOUT; return -1; #endif @@ -348,12 +340,6 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, ret = sem_wait(&dsema->dsema_sem); } while (ret != 0); DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_FUTEX_SEM - do { - pthread_workqueue_signal_np(); - ret = _dispatch_futex_wait(&dsema->dsema_futex, NULL); - } while (ret == false && errno == EINTR); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); #elif USE_WIN32_SEM WaitForSingleObject(dsema->dsema_handle, INFINITE); #endif @@ -441,11 +427,6 @@ _dispatch_group_wake(dispatch_group_t dg, bool needs_release) int ret = sem_post(&dg->dg_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); } while (--rval); -#elif USE_FUTEX_SEM - do { - bool ret = _dispatch_futex_signal(&dsema->dsema_futex); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); - } while (--rval); #elif USE_WIN32_SEM _dispatch_semaphore_create_handle(&dg->dg_handle); int ret; @@ -528,9 +509,6 @@ _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) #elif USE_POSIX_SEM // KVV struct timespec _timeout; int ret; -#elif USE_FUTEX_SEM - struct timespec _timeout; - bool ret; #elif USE_WIN32_SEM // KVV uint64_t nsec; DWORD msec; @@ -587,19 +565,6 @@ _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) DISPATCH_SEMAPHORE_VERIFY_RET(ret); break; } -#elif USE_FUTEX_SEM - do { - uint64_t nsec = _dispatch_timeout(timeout); - _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); - _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - pthread_workqueue_signal_np(); - ret = slowpath(_dispatch_futex_wait(&dsema->dsema_futex, &_timeout)); - } while (ret == false && errno == EINTR); - - if (!(ret == false && errno == ETIMEDOUT)) { - DISPATCH_SEMAPHORE_VERIFY_RET(ret); - break; - } #elif USE_WIN32_SEM nsec = _dispatch_timeout(timeout); msec = (DWORD)(nsec / (uint64_t)1000000); @@ -619,7 +584,7 @@ _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) orig_waiters - 1, &orig_waiters, relaxed)) { #if USE_MACH_SEM return KERN_OPERATION_TIMED_OUT; -#elif USE_POSIX_SEM || USE_FUTEX_SEM || USE_WIN32_SEM +#elif USE_POSIX_SEM || USE_WIN32_SEM errno = ETIMEDOUT; return -1; #endif @@ -638,12 +603,6 @@ _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) ret = sem_wait(&dg->dg_sem); } while (ret == -1 && errno == EINTR); DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_FUTEX_SEM - do { - pthread_workqueue_signal_np(); - ret = _dispatch_futex_wait(&dsema->dsema_futex, NULL); - } while (ret == false && errno == EINTR); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); #elif USE_WIN32_SEM WaitForSingleObject(dg->dg_handle, INFINITE); #endif @@ -661,7 +620,7 @@ dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) if (timeout == 0) { #if USE_MACH_SEM return KERN_OPERATION_TIMED_OUT; -#elif USE_POSIX_SEM || USE_FUTEX_SEM || USE_WIN32_SEM +#elif USE_POSIX_SEM || USE_WIN32_SEM errno = ETIMEDOUT; return (-1); #endif diff --git a/src/shims.h b/src/shims.h index b26884396..db288225e 100644 --- a/src/shims.h +++ b/src/shims.h @@ -93,6 +93,10 @@ typedef unsigned long pthread_priority_t; #define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 #endif // HAVE_PTHREAD_QOS_H +#ifdef __linux__ +#include "shims/linux_stubs.h" +#endif + typedef uint32_t dispatch_priority_t; #define DISPATCH_SATURATED_OVERRIDE ((dispatch_priority_t)UINT32_MAX) @@ -220,10 +224,6 @@ void __builtin_trap(void); #endif -#ifdef __linux__ -#include "shims/linux_stubs.h" -#endif - #ifndef __OS_INTERNAL_ATOMIC__ #include "shims/atomic.h" #endif diff --git a/src/shims/linux_stubs.h b/src/shims/linux_stubs.h index b5946a4c9..7726e9073 100644 --- a/src/shims/linux_stubs.h +++ b/src/shims/linux_stubs.h @@ -16,11 +16,88 @@ #ifndef __DISPATCH__STUBS__INTERNAL #define __DISPATCH__STUBS__INTERNAL -mach_port_t pthread_mach_thread_np(); +// marker for hacks we have made to make progress +#define __LINUX_PORT_HDD__ 1 + +/* + * Stub out defines for some mach types and related macros + */ + +typedef uint32_t mach_port_t; + +#define MACH_PORT_NULL (0) +#define MACH_PORT_DEAD (-1) + +typedef uint32_t mach_error_t; + +typedef uint32_t mach_vm_size_t; + +typedef uint32_t mach_msg_return_t; + +typedef uint32_t mach_msg_bits_t; + +typedef uintptr_t mach_vm_address_t; + +typedef uint32_t dispatch_mach_msg_t; + +typedef uint32_t dispatch_mach_t; + +typedef uint32_t dispatch_mach_reason_t; + +typedef uint32_t voucher_activity_mode_t; + +typedef uint32_t voucher_activity_trace_id_t; + +typedef uint32_t voucher_activity_id_t; + +typedef uint32_t _voucher_activity_buffer_hook_t;; + +typedef uint32_t voucher_activity_flag_t; + +typedef struct { } mach_msg_header_t; + + +typedef void (*dispatch_mach_handler_function_t)(void*, dispatch_mach_reason_t, + dispatch_mach_msg_t, mach_error_t); + +typedef void (*dispatch_mach_msg_destructor_t)(void*); mach_port_t mach_task_self(); -void mach_vm_deallocate(mach_port_t, mach_vm_address_t, mach_vm_size_t); +// Print a warning when an unported code path executes. +#define LINUX_PORT_ERROR() do { printf("LINUX_PORT_ERROR_CALLED %s:%d: %s\n",__FILE__,__LINE__,__FUNCTION__); } while (0) + +/* + * Stub out defines for other missing types + */ + +#if __linux__ +// we fall back to use kevent +#define kevent64_s kevent +#define kevent64(kq,cl,nc,el,ne,f,to) kevent(kq,cl,nc,el,ne,to) +#endif + +// SIZE_T_MAX should not be hardcoded like this here. +#define SIZE_T_MAX (0x7fffffff) + +// Define to 0 the NOTE_ values that are not present on Linux. +// Revisit this...would it be better to ifdef out the uses instead?? + +// The following values are passed as part of the EVFILT_TIMER requests + +#define IGNORE_KEVENT64_EXT /* will force the kevent64_s.ext[] to not be used -> leeway ignored */ + +#define NOTE_SECONDS 0x01 +#define NOTE_USECONDS 0x02 +#define NOTE_NSECONDS 0x04 +#define NOTE_ABSOLUTE 0x08 +#define NOTE_CRITICAL 0x10 +#define NOTE_BACKGROUND 0x20 +#define NOTE_LEEWAY 0x40 + +// need to catch the following usage if it happens .. +// we simply return '0' as a value probably not correct + +#define NOTE_VM_PRESSURE ({LINUX_PORT_ERROR(); 0;}) -char* mach_error_string(mach_msg_return_t); #endif diff --git a/src/shims/lock.c b/src/shims/lock.c index dfca11ba8..2fab69107 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -117,7 +117,6 @@ _dispatch_unfair_lock_wake(uint32_t *uaddr, uint32_t flags) #pragma mark - futex wrappers #if HAVE_FUTEX #include -#include #include DISPATCH_ALWAYS_INLINE @@ -140,7 +139,7 @@ _dispatch_futex_wait(uint32_t *uaddr, uint32_t val, ); } -static int +static void _dispatch_futex_wake(uint32_t *uaddr, int wake, int opflags) { int rc; @@ -157,7 +156,7 @@ _dispatch_futex_lock_pi(uint32_t *uaddr, struct timespec *timeout, int detect, { _dlock_syscall_switch(err, _dispatch_futex(uaddr, FUTEX_LOCK_PI, detect, timeout, - NULL, 0, opflags); + NULL, 0, opflags), case 0: return; default: DISPATCH_CLIENT_CRASH(errno, "futex_lock_pi() failed"); ); @@ -167,7 +166,7 @@ static void _dispatch_futex_unlock_pi(uint32_t *uaddr, int opflags) { _dlock_syscall_switch(err, - _dispatch_futex(uaddr, FUTEX_UNLOCK_PI, 0, NULL, NULL, 0, opflags); + _dispatch_futex(uaddr, FUTEX_UNLOCK_PI, 0, NULL, NULL, 0, opflags), case 0: return; default: DISPATCH_CLIENT_CRASH(errno, "futex_unlock_pi() failed"); ); @@ -183,7 +182,7 @@ _dispatch_wait_on_address(uint32_t volatile *address, uint32_t value, #if HAVE_UL_COMPARE_AND_WAIT _dispatch_ulock_wait((uint32_t *)address, value, 0, flags); #elif HAVE_FUTEX - _dispatch_futex_wait((uint32_t *)address, value, NULL, FUTEX_FLAG_PRIVATE); + _dispatch_futex_wait((uint32_t *)address, value, NULL, FUTEX_PRIVATE_FLAG); #else mach_msg_timeout_t timeout = 1; while (os_atomic_load(address, relaxed) == value) { @@ -199,7 +198,7 @@ _dispatch_wake_by_address(uint32_t volatile *address) #if HAVE_UL_COMPARE_AND_WAIT _dispatch_ulock_wake((uint32_t *)address, ULF_WAKE_ALL); #elif HAVE_FUTEX - _dispatch_futex_wake((uint32_t *)address, INT_MAX, FUTEX_FLAG_PRIVATE); + _dispatch_futex_wake((uint32_t *)address, INT_MAX, FUTEX_PRIVATE_FLAG); #else (void)address; #endif @@ -207,6 +206,7 @@ _dispatch_wake_by_address(uint32_t volatile *address) #pragma mark - thread event +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK semaphore_t _dispatch_thread_semaphore_create(void) { @@ -228,19 +228,22 @@ _dispatch_thread_semaphore_dispose(void *ctxt) DISPATCH_VERIFY_MIG(kr); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } +#endif void _dispatch_thread_event_signal_slow(dispatch_thread_event_t dte) { +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { kern_return_t kr = semaphore_signal(dte->dte_semaphore); DISPATCH_SEMAPHORE_VERIFY_KR(kr); return; } +#endif #if HAVE_UL_COMPARE_AND_WAIT _dispatch_ulock_wake(&dte->dte_value, 0); #elif HAVE_FUTEX - _dispatch_futex_wake(&dte->dte_value, 1, FUTEX_FLAG_PRIVATE); + _dispatch_futex_wake(&dte->dte_value, 1, FUTEX_PRIVATE_FLAG); #elif USE_POSIX_SEM int rc = sem_post(&dte->dte_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); @@ -250,6 +253,7 @@ _dispatch_thread_event_signal_slow(dispatch_thread_event_t dte) void _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte) { +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { kern_return_t kr; do { @@ -258,6 +262,7 @@ _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte) DISPATCH_SEMAPHORE_VERIFY_KR(kr); return; } +#endif #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX for (;;) { uint32_t value = os_atomic_load(&dte->dte_value, acquire); @@ -269,8 +274,8 @@ _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte) int rc = _dispatch_ulock_wait(&dte->dte_value, UINT32_MAX, 0, 0); dispatch_assert(rc == 0 || rc == EFAULT); #elif HAVE_FUTEX - _dispatch_futex_wait(&dgl->dte_value, UINT32_MAX, - NULL, FUTEX_FLAG_PRIVATE); + _dispatch_futex_wait(&dte->dte_value, UINT32_MAX, + NULL, FUTEX_PRIVATE_FLAG); #endif } #elif USE_POSIX_SEM @@ -318,9 +323,11 @@ _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul, } #elif HAVE_FUTEX void -_dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul) +_dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul, + dispatch_lock_options_t flags) { - _dispatch_futex_lock_pi(&dul->dul_lock, 1, NULL, FUTEX_FLAG_PRIVATE); + (void)flags; + _dispatch_futex_lock_pi(&dul->dul_lock, NULL, 1, FUTEX_PRIVATE_FLAG); } #else void @@ -355,7 +362,7 @@ _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t dul, } #elif HAVE_FUTEX // futex_unlock_pi() handles both OWNER_DIED which we abuse & WAITERS - _dispatch_futex_unlock_pi(&dul->dul_lock, FUTEX_FLAG_PRIVATE); + _dispatch_futex_unlock_pi(&dul->dul_lock, FUTEX_PRIVATE_FLAG); #else (void)dul; #endif @@ -388,7 +395,7 @@ _dispatch_gate_wait_slow(dispatch_gate_t dgl, dispatch_lock value, #if HAVE_UL_UNFAIR_LOCK _dispatch_unfair_lock_wait(&dgl->dgl_lock, tid_new, 0, flags); #elif HAVE_FUTEX - _dispatch_futex_wait(&dgl->dgl_lock, tid_new, NULL, FUTEX_FLAG_PRIVATE); + _dispatch_futex_wait(&dgl->dgl_lock, tid_new, NULL, FUTEX_PRIVATE_FLAG); #else _dispatch_thread_switch(tid_new, flags, timeout++); #endif @@ -407,7 +414,7 @@ _dispatch_gate_broadcast_slow(dispatch_gate_t dgl, dispatch_lock tid_cur) #if HAVE_UL_UNFAIR_LOCK _dispatch_unfair_lock_wake(&dgl->dgl_lock, ULF_WAKE_ALL); #elif HAVE_FUTEX - _dispatch_futex_wake(&dgl->dgl_lock, INT_MAX, FUTEX_FLAG_PRIVATE); + _dispatch_futex_wake(&dgl->dgl_lock, INT_MAX, FUTEX_PRIVATE_FLAG); #else (void)dgl; #endif diff --git a/src/shims/lock.h b/src/shims/lock.h index 52a705cd5..0786d9947 100644 --- a/src/shims/lock.h +++ b/src/shims/lock.h @@ -87,6 +87,9 @@ _dispatch_lock_has_failed_trylock(dispatch_lock lock_value) } #elif defined(__linux__) +#include +#include +#include /* For SYS_xxx definitions */ typedef uint32_t dispatch_lock; typedef pid_t dispatch_lock_owner; @@ -95,7 +98,7 @@ typedef pid_t dispatch_lock_owner; #define DLOCK_OWNER_MASK ((dispatch_lock)FUTEX_TID_MASK) #define DLOCK_WAITERS_BIT ((dispatch_lock)FUTEX_WAITERS) #define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)FUTEX_OWNER_DIED) -#define _dispatch_tid_self() /* FIXME cached in TSD in the swift port */ +#define _dispatch_tid_self() syscall(SYS_gettid) /* FIXME: should be cached in TSD instead of doing syscall each time */ DISPATCH_ALWAYS_INLINE static inline bool @@ -157,7 +160,7 @@ _dispatch_lock_has_failed_trylock(dispatch_lock lock_value) #endif // HAVE_UL_UNFAIR_LOCK #ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK -#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT) +#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT && !HAVE_FUTEX) #endif #ifndef HAVE_FUTEX @@ -235,6 +238,7 @@ typedef struct dispatch_thread_event_s { #endif } dispatch_thread_event_s, *dispatch_thread_event_t; +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK semaphore_t _dispatch_thread_semaphore_create(void); void _dispatch_thread_semaphore_dispose(void *); @@ -262,6 +266,7 @@ _dispatch_put_thread_semaphore(semaphore_t sema) return _dispatch_thread_semaphore_dispose((void *)(uintptr_t)old_sema); } } +#endif DISPATCH_NOT_TAIL_CALLED void _dispatch_thread_event_wait_slow(dispatch_thread_event_t); @@ -271,10 +276,12 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_thread_event_init(dispatch_thread_event_t dte) { +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { dte->dte_semaphore = _dispatch_get_thread_semaphore(); return; } +#endif #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX dte->dte_value = 0; #elif USE_POSIX_SEM @@ -287,10 +294,12 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_thread_event_signal(dispatch_thread_event_t dte) { +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { _dispatch_thread_event_signal_slow(dte); return; } +#endif #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX if (os_atomic_inc_orig(&dte->dte_value, release) == 0) { // 0 -> 1 transition doesn't need a signal @@ -309,10 +318,12 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_thread_event_wait(dispatch_thread_event_t dte) { +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { _dispatch_thread_event_wait_slow(dte); return; } +#endif #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX if (os_atomic_dec(&dte->dte_value, acquire) == 0) { // 1 -> 0 is always a valid transition, so we can return @@ -329,10 +340,12 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_thread_event_destroy(dispatch_thread_event_t dte) { +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { _dispatch_put_thread_semaphore(dte->dte_semaphore); return; } +#endif #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX // nothing to do dispatch_assert(dte->dte_value == 0); @@ -428,7 +441,7 @@ _dispatch_unfair_lock_unlock_had_failed_trylock(dispatch_unfair_lock_t l) #if HAVE_FUTEX if (likely(os_atomic_cmpxchgv(&l->dul_lock, tid_self, DLOCK_OWNER_NULL, &tid_cur, release))) { - return; + return false; } #else tid_cur = os_atomic_xchg(&l->dul_lock, DLOCK_OWNER_NULL, release); diff --git a/src/source.c b/src/source.c index dd26814df..e5743e2f9 100644 --- a/src/source.c +++ b/src/source.c @@ -622,7 +622,7 @@ _dispatch_source_kevent_unregister(dispatch_source_t ds) } } else if (!ds->ds_is_direct_kevent) { ds->ds_dkev = NULL; - dispatch_assert(ds->ds_is_installed); + dispatch_assert((bool)ds->ds_is_installed); TAILQ_REMOVE(&dk->dk_sources, ds->ds_refs, dr_list); _dispatch_kevent_unregister(dk, flags, 0); } else { @@ -1031,8 +1031,10 @@ _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp, if (tq) { return _dispatch_queue_class_wakeup(ds->_as_dq, pp, flags, tq); +#if HAVE_PTHREAD_WORKQUEUE_QOS } else if (pp) { return _dispatch_queue_class_override_drainer(ds->_as_dq, pp, flags); +#endif } else if (flags & DISPATCH_WAKEUP_CONSUME) { return _dispatch_release_tailcall(ds); } @@ -1204,10 +1206,12 @@ _dispatch_source_merge_kevent(dispatch_source_t ds, if (dqf & (DSF_CANCELED | DQF_RELEASED)) { goto done; // rdar://20204025 } +#if HAVE_MACH if (ke->filter == EVFILT_MACHPORT && dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE) { DISPATCH_INTERNAL_CRASH(ke->flags,"Unexpected kevent for mach channel"); } +#endif unsigned long data; if ((ke->flags & EV_UDATA_SPECIFIC) && (ke->flags & EV_ONESHOT) && @@ -1220,9 +1224,11 @@ _dispatch_source_merge_kevent(dispatch_source_t ds, // Since we never ask for both EV_ONESHOT and EV_VANISHED for sources, // if we get both bits it was a real EV_VANISHED delivery os_atomic_store2o(ds, ds_pending_data, 0, relaxed); +#if HAVE_MACH } else if (ke->filter == EVFILT_MACHPORT) { data = DISPATCH_MACH_RECV_MESSAGE; os_atomic_store2o(ds, ds_pending_data, data, relaxed); +#endif } else if (ds->ds_is_level) { // ke->data is signed and "negative available data" makes no sense // zero bytes happens when EV_EOF is set @@ -1571,12 +1577,14 @@ _dispatch_kevent_error(_dispatch_kevent_qos_s *ke) ke->flags |= kev->flags; } +#if HAVE_MACH if (ke->filter == EVFILT_MACHPORT && ke->data == ENOTSUP && (ke->flags & EV_ADD) && _dispatch_evfilt_machport_direct_enabled && kev && (kev->fflags & MACH_RCV_MSG)) { DISPATCH_INTERNAL_CRASH(ke->ident, "Missing EVFILT_MACHPORT support for ports"); } +#endif if (ke->data) { // log the unexpected error @@ -6078,8 +6086,10 @@ _dispatch_mach_wakeup(dispatch_mach_t dm, pthread_priority_t pp, done: if (tq) { return _dispatch_queue_class_wakeup(dm->_as_dq, pp, flags, tq); +#if HAVE_PTHREAD_WORKQUEUE_QOS } else if (pp) { return _dispatch_queue_class_override_drainer(dm->_as_dq, pp, flags); +#endif } else if (flags & DISPATCH_WAKEUP_CONSUME) { return _dispatch_release_tailcall(dm); } diff --git a/src/voucher_internal.h b/src/voucher_internal.h index 461cdfddd..3aa1a6579 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -692,9 +692,10 @@ _dispatch_continuation_voucher_set(dispatch_continuation_t dc, DISPATCH_ALWAYS_INLINE static inline void -_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc, long dc_vtable) +_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc, voucher_t ov, + uintptr_t dc_flags) { - (void)dc; (void)dc_vtable; + (void)dc; (void)ov; (void)dc_flags; } #endif // VOUCHER_USE_MACH_VOUCHER