From 442fbc9d237f5ec73b89e46275796350834b042e Mon Sep 17 00:00:00 2001 From: Apple Date: Wed, 18 Nov 2020 23:16:54 +0000 Subject: [PATCH] libplatform-254.40.4.tar.gz --- include/libkern/OSAtomicDeprecated.h | 113 ++++++++- include/libkern/OSAtomicQueue.h | 78 +------ include/os/base.h | 319 -------------------------- include/string_x86.h | 56 +++++ include/ucontext.h | 22 +- internal/os/internal.h | 25 +- private/os/base_private.h | 39 ---- private/os/crashlog_private.h | 230 +++++++++++++++++++ private/os/internal/atomic.h | 236 ------------------- private/os/internal/crashlog.h | 48 ---- private/os/internal/internal_shared.h | 59 ----- private/os/lock_private.h | 129 ++++++----- src/atomics/OSAtomicFifo.h | 35 +++ src/atomics/arm64/OSAtomic.c | 66 ++++++ src/atomics/init.c | 22 +- src/atomics/x86_64/OSAtomic.s | 14 +- src/atomics/x86_64/OSAtomicFifo.c | 65 ++++++ src/cachecontrol/arm64/cache.s | 4 +- src/introspection/introspection.c | 4 +- src/os/atomic.c | 46 ++-- src/os/lock.c | 162 ++++++++----- src/setjmp/arm/_setjmp.h | 6 +- src/setjmp/arm/longjmp.s | 7 +- src/setjmp/arm/setjmp.s | 27 ++- src/setjmp/arm64/setjmp.s | 43 +++- src/setjmp/generic/sigtramp.c | 40 +++- src/setjmp/x86_64/_setjmp.s | 1 + src/setjmp/x86_64/setjmp.s | 2 +- src/simple/asl.c | 4 +- src/simple/string_io.c | 45 +++- src/ucontext/arm64/_ctx_start.s | 78 +++++++ src/ucontext/arm64/_setcontext.s | 151 ++++++++++++ src/ucontext/arm64/asm_help.h | 76 ++++++ src/ucontext/arm64/getcontext.s | 182 +++++++++++++++ src/ucontext/arm64/mcontext.c | 58 +++++ src/ucontext/generic/getmcontext.c | 144 +++++++++++- src/ucontext/generic/makecontext.c | 223 +++++++++++++++++- src/ucontext/generic/setcontext.c | 49 +++- src/ucontext/generic/swapcontext.c | 35 ++- src/ucontext/x86_64/_ctx_start.s | 6 +- src/ucontext/x86_64/_setcontext.s | 6 +- src/ucontext/x86_64/getcontext.s | 6 +- xcodeconfig/libplatform.aliases | 2 - xcodeconfig/libplatform.xcconfig | 22 +- xcodeconfig/os.xcconfig | 2 +- xcodeconfig/perarch.xcconfig | 5 + xcodeconfig/static.xcconfig | 1 + 47 files changed, 1927 insertions(+), 1066 deletions(-) delete mode 100644 include/os/base.h create mode 100644 include/string_x86.h delete mode 100644 private/os/base_private.h create mode 100644 private/os/crashlog_private.h delete mode 100644 private/os/internal/atomic.h delete mode 100644 private/os/internal/crashlog.h delete mode 100644 private/os/internal/internal_shared.h create mode 100644 src/atomics/OSAtomicFifo.h create mode 100644 src/atomics/arm64/OSAtomic.c create mode 100644 src/atomics/x86_64/OSAtomicFifo.c create mode 100644 src/ucontext/arm64/_ctx_start.s create mode 100644 src/ucontext/arm64/_setcontext.s create mode 100644 src/ucontext/arm64/asm_help.h create mode 100644 src/ucontext/arm64/getcontext.s create mode 100644 src/ucontext/arm64/mcontext.c diff --git a/include/libkern/OSAtomicDeprecated.h b/include/libkern/OSAtomicDeprecated.h index 1b0ef91..3a56eae 100644 --- a/include/libkern/OSAtomicDeprecated.h +++ b/include/libkern/OSAtomicDeprecated.h @@ -35,13 +35,14 @@ * is preferred. */ +#include + #if !(defined(OSATOMIC_USE_INLINED) && OSATOMIC_USE_INLINED) #include #include #include #include -#include #ifndef OSATOMIC_DEPRECATED #define OSATOMIC_DEPRECATED 1 @@ -161,7 +162,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0) int32_t OSAtomicAdd32Barrier( int32_t __theAmount, volatile int32_t *__theValue ); -#if __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_10 || __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_7_1 +#if __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_10 || __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_7_1 || TARGET_OS_DRIVERKIT /*! @abstract Atomically increments a 32-bit value. @result Returns the new value. @@ -248,7 +249,7 @@ int64_t OSAtomicAdd64Barrier( int64_t __theAmount, volatile OSAtomic_int64_aligned64_t *__theValue ); -#if __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_10 || __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_7_1 +#if __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_10 || __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_7_1 || TARGET_OS_DRIVERKIT /*! @abstract Atomically increments a 64-bit value. @result Returns the new value. @@ -361,7 +362,7 @@ int32_t OSAtomicOr32Orig( uint32_t __theMask, volatile uint32_t *__theValue ); This function performs the bitwise OR of the value given by __theMask with the value in the memory location referenced by __theValue, storing the result back to that memory location atomically. - + This function is equivalent to {@link OSAtomicOr32Orig} except that it also introduces a barrier. @result Returns the original value referenced by __theValue. @@ -481,7 +482,7 @@ int32_t OSAtomicXor32Orig( uint32_t __theMask, volatile uint32_t *__theValue ); OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_xor) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_3_2) int32_t OSAtomicXor32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue ); - + /*! @group Compare and swap * Functions in this group return true if the swap occured. There are several versions, @@ -587,7 +588,7 @@ bool OSAtomicCompareAndSwapIntBarrier( int __oldValue, int __newValue, volatile match, this function stores the value from __newValue into that memory location atomically. - This function is equivalent to {@link OSAtomicCompareAndSwap32} on 32-bit architectures, + This function is equivalent to {@link OSAtomicCompareAndSwap32} on 32-bit architectures, or {@link OSAtomicCompareAndSwap64} on 64-bit architectures. @result Returns TRUE on a match, FALSE otherwise. */ @@ -606,7 +607,7 @@ bool OSAtomicCompareAndSwapLong( long __oldValue, long __newValue, volatile long This function is equivalent to {@link OSAtomicCompareAndSwapLong} except that it also introduces a barrier. - This function is equivalent to {@link OSAtomicCompareAndSwap32} on 32-bit architectures, + This function is equivalent to {@link OSAtomicCompareAndSwap32} on 32-bit architectures, or {@link OSAtomicCompareAndSwap64} on 64-bit architectures. @result Returns TRUE on a match, FALSE otherwise. */ @@ -706,7 +707,7 @@ bool OSAtomicTestAndSetBarrier( uint32_t __n, volatile void *__theAddress ); For example, if __theAddress points to a 64-bit value, to compare the value of the most significant bit, you would specify 56 for __n. - + @result Returns the original value of the bit being tested. */ @@ -719,15 +720,15 @@ bool OSAtomicTestAndClear( uint32_t __n, volatile void *__theAddress ); @discussion This function tests a bit in the value referenced by __theAddress and if it is not cleared, clears it. - + The bit is chosen by the value of __n such that the operation will be performed on bit (0x80 >> (__n & 7)) of byte ((char *)__theAddress + (n >> 3)). - + For example, if __theAddress points to a 64-bit value, to compare the value of the most significant bit, you would specify 56 for __n. - + This function is equivalent to {@link OSAtomicTestAndSet} except that it also introduces a barrier. @result @@ -736,7 +737,7 @@ bool OSAtomicTestAndClear( uint32_t __n, volatile void *__theAddress ); OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_and) __OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0) bool OSAtomicTestAndClearBarrier( uint32_t __n, volatile void *__theAddress ); - + /*! @group Memory barriers */ @@ -1174,4 +1175,92 @@ __END_DECLS #endif // defined(OSATOMIC_USE_INLINED) && OSATOMIC_USE_INLINED +#if TARGET_OS_OSX || TARGET_OS_DRIVERKIT + +__BEGIN_DECLS + +/*! @group Lockless atomic fifo enqueue and dequeue + * These routines manipulate singly-linked FIFO lists. + * + * This API is deprecated and no longer recommended + */ + +/*! @abstract The data structure for a fifo queue head. + @discussion + You should always initialize a fifo queue head structure with the + initialization vector {@link OS_ATOMIC_FIFO_QUEUE_INIT} before use. + */ +#if defined(__LP64__) + +typedef volatile struct { + void *opaque1; + void *opaque2; + int opaque3; +} __attribute__ ((aligned (16))) OSFifoQueueHead; + +#else + +typedef volatile struct { + void *opaque1; + void *opaque2; + int opaque3; +} OSFifoQueueHead; + +#endif +/*! @abstract The initialization vector for a fifo queue head. */ +#define OS_ATOMIC_FIFO_QUEUE_INIT { NULL, NULL, 0 } + +/*! @abstract Enqueue an element onto a list. + @discussion + Memory barriers are incorporated as needed to permit thread-safe access + to the queue element. + @param __list + The list on which you want to enqueue the element. + @param __new + The element to add. + @param __offset + The "offset" parameter is the offset (in bytes) of the link field + from the beginning of the data structure being queued (__new). + The link field should be a pointer type. + The __offset value needs to be same for all enqueuing and + dequeuing operations on the same list, even if different structure types + are enqueued on that list. The use of offsetset(), defined in + stddef.h is the common way to specify the __offset + value. + + @note + This API is deprecated and no longer recommended + */ +__API_DEPRECATED("No longer supported", macos(10.7, 10.16)) +void OSAtomicFifoEnqueue( OSFifoQueueHead *__list, void *__new, size_t __offset); + +/*! @abstract Dequeue an element from a list. + @discussion + Memory barriers are incorporated as needed to permit thread-safe access + to the queue element. + @param __list + The list from which you want to dequeue an element. + @param __offset + The "offset" parameter is the offset (in bytes) of the link field + from the beginning of the data structure being dequeued (__new). + The link field should be a pointer type. + The __offset value needs to be same for all enqueuing and + dequeuing operations on the same list, even if different structure types + are enqueued on that list. The use of offsetset(), defined in + stddef.h is the common way to specify the __offset + value. + @result + Returns the oldest enqueued element, or NULL if the + list is empty. + + @note + This API is deprecated and no longer recommended + */ +__API_DEPRECATED("No longer supported", macos(10.7, 10.16)) +void* OSAtomicFifoDequeue( OSFifoQueueHead *__list, size_t __offset); + +__END_DECLS + +#endif /* TARGET_OS_OSX || TARGET_OS_DRIVERKIT */ + #endif /* _OSATOMIC_DEPRECATED_H_ */ diff --git a/include/libkern/OSAtomicQueue.h b/include/libkern/OSAtomicQueue.h index 103f1e8..0ca841c 100644 --- a/include/libkern/OSAtomicQueue.h +++ b/include/libkern/OSAtomicQueue.h @@ -28,6 +28,7 @@ #include #include #include +#include "OSAtomicDeprecated.h" #include @@ -109,83 +110,6 @@ void OSAtomicEnqueue( OSQueueHead *__list, void *__new, size_t __offset); __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_4_0) void* OSAtomicDequeue( OSQueueHead *__list, size_t __offset); -#if defined(__x86_64__) || defined(__i386__) - -/*! @group Lockless atomic fifo enqueue and dequeue - * These routines manipulate singly-linked FIFO lists. - */ - -/*! @abstract The data structure for a fifo queue head. - @discussion - You should always initialize a fifo queue head structure with the - initialization vector {@link OS_ATOMIC_FIFO_QUEUE_INIT} before use. - */ -#if defined(__x86_64__) - -typedef volatile struct { - void *opaque1; - void *opaque2; - int opaque3; -} __attribute__ ((aligned (16))) OSFifoQueueHead; - -#else - -typedef volatile struct { - void *opaque1; - void *opaque2; - int opaque3; -} OSFifoQueueHead; - -#endif - -/*! @abstract The initialization vector for a fifo queue head. */ -#define OS_ATOMIC_FIFO_QUEUE_INIT { NULL, NULL, 0 } - -/*! @abstract Enqueue an element onto a list. - @discussion - Memory barriers are incorporated as needed to permit thread-safe access - to the queue element. - @param __list - The list on which you want to enqueue the element. - @param __new - The element to add. - @param __offset - The "offset" parameter is the offset (in bytes) of the link field - from the beginning of the data structure being queued (__new). - The link field should be a pointer type. - The __offset value needs to be same for all enqueuing and - dequeuing operations on the same list, even if different structure types - are enqueued on that list. The use of offsetset(), defined in - stddef.h is the common way to specify the __offset - value. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_NA) -void OSAtomicFifoEnqueue( OSFifoQueueHead *__list, void *__new, size_t __offset); - -/*! @abstract Dequeue an element from a list. - @discussion - Memory barriers are incorporated as needed to permit thread-safe access - to the queue element. - @param __list - The list from which you want to dequeue an element. - @param __offset - The "offset" parameter is the offset (in bytes) of the link field - from the beginning of the data structure being dequeued (__new). - The link field should be a pointer type. - The __offset value needs to be same for all enqueuing and - dequeuing operations on the same list, even if different structure types - are enqueued on that list. The use of offsetset(), defined in - stddef.h is the common way to specify the __offset - value. - @result - Returns the oldest enqueued element, or NULL if the - list is empty. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_NA) -void* OSAtomicFifoDequeue( OSFifoQueueHead *__list, size_t __offset); - -#endif /* __i386__ || __x86_64__ */ - __END_DECLS #endif /* _OSATOMICQUEUE_H_ */ diff --git a/include/os/base.h b/include/os/base.h deleted file mode 100644 index b187dce..0000000 --- a/include/os/base.h +++ /dev/null @@ -1,319 +0,0 @@ -/* - * Copyright (c) 2008-2013 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -#ifndef __OS_BASE__ -#define __OS_BASE__ - -#include - -#ifndef __has_builtin -#define __has_builtin(x) 0 -#endif -#ifndef __has_include -#define __has_include(x) 0 -#endif -#ifndef __has_feature -#define __has_feature(x) 0 -#endif -#ifndef __has_attribute -#define __has_attribute(x) 0 -#endif -#ifndef __has_extension -#define __has_extension(x) 0 -#endif - -#undef OS_INLINE // -#if __GNUC__ -#define OS_NORETURN __attribute__((__noreturn__)) -#define OS_NOTHROW __attribute__((__nothrow__)) -#define OS_NONNULL1 __attribute__((__nonnull__(1))) -#define OS_NONNULL2 __attribute__((__nonnull__(2))) -#define OS_NONNULL3 __attribute__((__nonnull__(3))) -#define OS_NONNULL4 __attribute__((__nonnull__(4))) -#define OS_NONNULL5 __attribute__((__nonnull__(5))) -#define OS_NONNULL6 __attribute__((__nonnull__(6))) -#define OS_NONNULL7 __attribute__((__nonnull__(7))) -#define OS_NONNULL8 __attribute__((__nonnull__(8))) -#define OS_NONNULL9 __attribute__((__nonnull__(9))) -#define OS_NONNULL10 __attribute__((__nonnull__(10))) -#define OS_NONNULL11 __attribute__((__nonnull__(11))) -#define OS_NONNULL12 __attribute__((__nonnull__(12))) -#define OS_NONNULL13 __attribute__((__nonnull__(13))) -#define OS_NONNULL14 __attribute__((__nonnull__(14))) -#define OS_NONNULL15 __attribute__((__nonnull__(15))) -#define OS_NONNULL_ALL __attribute__((__nonnull__)) -#define OS_SENTINEL __attribute__((__sentinel__)) -#define OS_PURE __attribute__((__pure__)) -#define OS_CONST __attribute__((__const__)) -#define OS_WARN_RESULT __attribute__((__warn_unused_result__)) -#define OS_MALLOC __attribute__((__malloc__)) -#define OS_USED __attribute__((__used__)) -#define OS_UNUSED __attribute__((__unused__)) -#define OS_COLD __attribute__((__cold__)) -#define OS_WEAK __attribute__((__weak__)) -#define OS_WEAK_IMPORT __attribute__((__weak_import__)) -#define OS_NOINLINE __attribute__((__noinline__)) -#define OS_ALWAYS_INLINE __attribute__((__always_inline__)) -#define OS_TRANSPARENT_UNION __attribute__((__transparent_union__)) -#define OS_ALIGNED(n) __attribute__((__aligned__((n)))) -#define OS_FORMAT_PRINTF(x,y) __attribute__((__format__(printf,x,y))) -#define OS_EXPORT extern __attribute__((__visibility__("default"))) -#define OS_INLINE static __inline__ -#define OS_EXPECT(x, v) __builtin_expect((x), (v)) -#else -#define OS_NORETURN -#define OS_NOTHROW -#define OS_NONNULL1 -#define OS_NONNULL2 -#define OS_NONNULL3 -#define OS_NONNULL4 -#define OS_NONNULL5 -#define OS_NONNULL6 -#define OS_NONNULL7 -#define OS_NONNULL8 -#define OS_NONNULL9 -#define OS_NONNULL10 -#define OS_NONNULL11 -#define OS_NONNULL12 -#define OS_NONNULL13 -#define OS_NONNULL14 -#define OS_NONNULL15 -#define OS_NONNULL_ALL -#define OS_SENTINEL -#define OS_PURE -#define OS_CONST -#define OS_WARN_RESULT -#define OS_MALLOC -#define OS_USED -#define OS_UNUSED -#define OS_COLD -#define OS_WEAK -#define OS_WEAK_IMPORT -#define OS_NOINLINE -#define OS_ALWAYS_INLINE -#define OS_TRANSPARENT_UNION -#define OS_ALIGNED(n) -#define OS_FORMAT_PRINTF(x,y) -#define OS_EXPORT extern -#define OS_INLINE static inline -#define OS_EXPECT(x, v) (x) -#endif - -#if __has_attribute(noescape) -#define OS_NOESCAPE __attribute__((__noescape__)) -#else -#define OS_NOESCAPE -#endif - -#if defined(__cplusplus) && defined(__clang__) -#define OS_FALLTHROUGH [[clang::fallthrough]] -#else -#define OS_FALLTHROUGH -#endif - -#if __has_feature(assume_nonnull) -#define OS_ASSUME_NONNULL_BEGIN _Pragma("clang assume_nonnull begin") -#define OS_ASSUME_NONNULL_END _Pragma("clang assume_nonnull end") -#else -#define OS_ASSUME_NONNULL_BEGIN -#define OS_ASSUME_NONNULL_END -#endif - -#if __has_builtin(__builtin_assume) -#define OS_COMPILER_CAN_ASSUME(expr) __builtin_assume(expr) -#else -#define OS_COMPILER_CAN_ASSUME(expr) ((void)(expr)) -#endif - -#if __has_extension(attribute_overloadable) -#define OS_OVERLOADABLE __attribute__((__overloadable__)) -#else -#define OS_OVERLOADABLE -#endif - -#if __has_attribute(enum_extensibility) -#define __OS_ENUM_ATTR __attribute__((enum_extensibility(open))) -#define __OS_ENUM_ATTR_CLOSED __attribute__((enum_extensibility(closed))) -#else -#define __OS_ENUM_ATTR -#define __OS_ENUM_ATTR_CLOSED -#endif // __has_attribute(enum_extensibility) - -#if __has_attribute(flag_enum) -/*! - * Compile with -Wflag-enum and -Wassign-enum to enforce at definition and - * assignment, respectively, i.e. -Wflag-enum prevents you from creating new - * enumeration values from illegal values within the enum definition, and - * -Wassign-enum prevents you from assigning illegal values to a variable of the - * enum type. - */ -#define __OS_OPTIONS_ATTR __attribute__((flag_enum)) -#else -#define __OS_OPTIONS_ATTR -#endif // __has_attribute(flag_enum) - -#if __has_feature(objc_fixed_enum) || __has_extension(cxx_fixed_enum) || \ - __has_extension(cxx_strong_enums) -#define OS_ENUM(_name, _type, ...) \ - typedef enum : _type { __VA_ARGS__ } _name##_t -#define OS_CLOSED_ENUM(_name, _type, ...) \ - typedef enum : _type { __VA_ARGS__ } \ - __OS_ENUM_ATTR_CLOSED _name##_t -#define OS_OPTIONS(_name, _type, ...) \ - typedef enum : _type { __VA_ARGS__ } \ - __OS_ENUM_ATTR __OS_OPTIONS_ATTR _name##_t -#define OS_CLOSED_OPTIONS(_name, _type, ...) \ - typedef enum : _type { __VA_ARGS__ } \ - __OS_ENUM_ATTR_CLOSED __OS_OPTIONS_ATTR _name##_t -#else -/*! - * There is unfortunately no good way in plain C to have both fixed-type enums - * and enforcement for clang's enum_extensibility extensions. The primary goal - * of these macros is to allow you to define an enum and specify its width in a - * single statement, and for plain C that is accomplished by defining an - * anonymous enum and then separately typedef'ing the requested type name to the - * requested underlying integer type. So the type emitted actually has no - * relationship at all to the enum, and therefore while the compiler could - * enforce enum extensibility if you used the enum type, it cannot do so if you - * use the "_t" type resulting from this expression. - * - * But we still define a named enum type and decorate it appropriately for you, - * so if you really want the enum extensibility enforcement, you can use the - * enum type yourself, i.e. when compiling with a C compiler: - * - * OS_CLOSED_ENUM(my_type, uint64_t, - * FOO, - * BAR, - * BAZ, - * ); - * - * my_type_t mt = 98; // legal - * enum my_type emt = 98; // illegal - * - * But be aware that the underlying enum type's width is subject only to the C - * language's guarantees -- namely that it will be compatible with int, char, - * and unsigned char. It is not safe to rely on the size of this type. - * - * When compiling in ObjC or C++, both of the above assignments are illegal. - */ -#define __OS_ENUM_C_FALLBACK(_name, _type, ...) \ - typedef _type _name##_t; enum _name { __VA_ARGS__ } - -#define OS_ENUM(_name, _type, ...) \ - typedef _type _name##_t; enum { __VA_ARGS__ } -#define OS_CLOSED_ENUM(_name, _type, ...) \ - __OS_ENUM_C_FALLBACK(_name, _type, ## __VA_ARGS__) \ - __OS_ENUM_ATTR_CLOSED -#define OS_OPTIONS(_name, _type, ...) \ - __OS_ENUM_C_FALLBACK(_name, _type, ## __VA_ARGS__) \ - __OS_ENUM_ATTR __OS_OPTIONS_ATTR -#define OS_CLOSED_OPTIONS(_name, _type, ...) \ - __OS_ENUM_C_FALLBACK(_name, _type, ## __VA_ARGS__) \ - __OS_ENUM_ATTR_CLOSED __OS_OPTIONS_ATTR -#endif // __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums) - -#if __has_feature(attribute_availability_swift) -// equivalent to __SWIFT_UNAVAILABLE from Availability.h -#define OS_SWIFT_UNAVAILABLE(_msg) \ - __attribute__((__availability__(swift, unavailable, message=_msg))) -#else -#define OS_SWIFT_UNAVAILABLE(_msg) -#endif - -#if __has_attribute(swift_private) -# define OS_REFINED_FOR_SWIFT __attribute__((__swift_private__)) -#else -# define OS_REFINED_FOR_SWIFT -#endif - -#if __has_attribute(swift_name) -# define OS_SWIFT_NAME(_name) __attribute__((__swift_name__(#_name))) -#else -# define OS_SWIFT_NAME(_name) -#endif - -#define __OS_STRINGIFY(s) #s -#define OS_STRINGIFY(s) __OS_STRINGIFY(s) -#define __OS_CONCAT(x, y) x ## y -#define OS_CONCAT(x, y) __OS_CONCAT(x, y) - -#ifdef __GNUC__ -#define os_prevent_tail_call_optimization() __asm__("") -#define os_is_compile_time_constant(expr) __builtin_constant_p(expr) -#define os_compiler_barrier() __asm__ __volatile__("" ::: "memory") -#else -#define os_prevent_tail_call_optimization() do { } while (0) -#define os_is_compile_time_constant(expr) 0 -#define os_compiler_barrier() do { } while (0) -#endif - -#if __has_attribute(not_tail_called) -#define OS_NOT_TAIL_CALLED __attribute__((__not_tail_called__)) -#else -#define OS_NOT_TAIL_CALLED -#endif - -typedef void (*os_function_t)(void *_Nullable); - -#ifdef __BLOCKS__ -/*! - * @typedef os_block_t - * - * @abstract - * Generic type for a block taking no arguments and returning no value. - * - * @discussion - * When not building with Objective-C ARC, a block object allocated on or - * copied to the heap must be released with a -[release] message or the - * Block_release() function. - * - * The declaration of a block literal allocates storage on the stack. - * Therefore, this is an invalid construct: - * - * os_block_t block; - * if (x) { - * block = ^{ printf("true\n"); }; - * } else { - * block = ^{ printf("false\n"); }; - * } - * block(); // unsafe!!! - * - * - * What is happening behind the scenes: - * - * if (x) { - * struct Block __tmp_1 = ...; // setup details - * block = &__tmp_1; - * } else { - * struct Block __tmp_2 = ...; // setup details - * block = &__tmp_2; - * } - * - * - * As the example demonstrates, the address of a stack variable is escaping the - * scope in which it is allocated. That is a classic C bug. - * - * Instead, the block literal must be copied to the heap with the Block_copy() - * function or by sending it a -[copy] message. - */ -typedef void (^os_block_t)(void); -#endif - -#endif // __OS_BASE__ diff --git a/include/string_x86.h b/include/string_x86.h new file mode 100644 index 0000000..7e9cfae --- /dev/null +++ b/include/string_x86.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2020 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _STRING_X86_H +#define _STRING_X86_H + +#include + +#if defined(__x86_64__) + +__BEGIN_DECLS +/* These SSE variants have the same behavior as their original functions. + * SSE instructions are used in these variants instead of best possible + * implementation. + */ +__OSX_AVAILABLE(10.16) __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE +void *memmove_sse_np(void *__dst, const void *__src, size_t __len); + +__OSX_AVAILABLE(10.16) __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE +void *memset_sse_np(void *__b, int __c, size_t __len); + +__OSX_AVAILABLE(10.16) __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE +void bzero_sse_np(void *, size_t); + +__OSX_AVAILABLE(10.16) __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE +void memset_pattern4_sse_np(void *__b, const void *__pattern4, size_t __len); + +__OSX_AVAILABLE(10.16) __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE +void memset_pattern8_sse_np(void *__b, const void *__pattern8, size_t __len); + +__OSX_AVAILABLE(10.16) __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE +void memset_pattern16_sse_np(void *__b, const void *__pattern16, size_t __len); +__END_DECLS + +#endif /* __x86_64__ */ + +#endif /* _STRING_X86_H */ diff --git a/include/ucontext.h b/include/ucontext.h index db570b2..27b4111 100644 --- a/include/ucontext.h +++ b/include/ucontext.h @@ -2,14 +2,14 @@ * Copyright (c) 2002, 2008, 2009 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -34,10 +34,18 @@ #include __BEGIN_DECLS -int getcontext(ucontext_t *) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_2_0, __IPHONE_2_0) __WATCHOS_PROHIBITED __TVOS_PROHIBITED; -void makecontext(ucontext_t *, void (*)(), int, ...) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_2_0, __IPHONE_2_0) __WATCHOS_PROHIBITED __TVOS_PROHIBITED; -int setcontext(const ucontext_t *) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_2_0, __IPHONE_2_0) __WATCHOS_PROHIBITED __TVOS_PROHIBITED; -int swapcontext(ucontext_t * __restrict, const ucontext_t * __restrict) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_2_0, __IPHONE_2_0) __WATCHOS_PROHIBITED __TVOS_PROHIBITED; +__API_DEPRECATED("No longer supported", macos(10.5, 10.6)) +int getcontext(ucontext_t *); + +__API_DEPRECATED("No longer supported", macos(10.5, 10.6)) +void makecontext(ucontext_t *, void (*)(), int, ...); + +__API_DEPRECATED("No longer supported", macos(10.5, 10.6)) +int setcontext(const ucontext_t *); + +__API_DEPRECATED("No longer supported", macos(10.5, 10.6)) +int swapcontext(ucontext_t * __restrict, const ucontext_t * __restrict); + __END_DECLS #else /* !_XOPEN_SOURCE */ #error The deprecated ucontext routines require _XOPEN_SOURCE to be defined diff --git a/internal/os/internal.h b/internal/os/internal.h index 1784216..bad563f 100644 --- a/internal/os/internal.h +++ b/internal/os/internal.h @@ -26,9 +26,6 @@ #include #include -#include "os/base_private.h" -#include "os/semaphore_private.h" - #include #include #include @@ -38,23 +35,21 @@ #endif #include + +#include + +#include "os/base_private.h" +#include "os/semaphore_private.h" +#include "os/crashlog_private.h" +#include "yield.h" + #define likely(x) os_likely(x) #define unlikely(x) os_unlikely(x) -#define __OS_CRASH__(rc, msg) ({ \ - _os_set_crash_log_cause_and_message(rc, msg); \ - os_prevent_tail_call_optimization(); \ - __builtin_trap(); \ - }) - #define __LIBPLATFORM_CLIENT_CRASH__(rc, msg) \ - __OS_CRASH__(rc, "BUG IN CLIENT OF LIBPLATFORM: " msg) + OS_BUG_CLIENT(rc, "LIBPLATFORM", msg) #define __LIBPLATFORM_INTERNAL_CRASH__(rc, msg) \ - __OS_CRASH__(rc, "BUG IN LIBPLATFORM: " msg) - -#define __OS_EXPOSE_INTERNALS__ 1 -#include "os/internal/internal_shared.h" -#include "yield.h" + OS_BUG_INTERNAL(rc, "LIBPLATFORM", msg) #define OS_NOEXPORT extern __attribute__((__visibility__("hidden"))) diff --git a/private/os/base_private.h b/private/os/base_private.h deleted file mode 100644 index 2d38266..0000000 --- a/private/os/base_private.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2008-2013 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -#ifndef __OS_BASE_PRIVATE__ -#define __OS_BASE_PRIVATE__ - -#include - -#ifndef os_fastpath -#define os_fastpath(x) ((__typeof__(x))OS_EXPECT((long)(x), ~0l)) -#endif -#ifndef os_slowpath -#define os_slowpath(x) ((__typeof__(x))OS_EXPECT((long)(x), 0l)) -#endif -#ifndef os_likely -#define os_likely(x) OS_EXPECT(!!(x), 1) -#endif -#ifndef os_unlikely -#define os_unlikely(x) OS_EXPECT(!!(x), 0) -#endif - -#endif // __OS_BASE_PRIVATE__ diff --git a/private/os/crashlog_private.h b/private/os/crashlog_private.h new file mode 100644 index 0000000..8368c11 --- /dev/null +++ b/private/os/crashlog_private.h @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_CRASHLOG_PRIVATE__ +#define __OS_CRASHLOG_PRIVATE__ + +#include + +#if __has_include() +#include + +#if defined(__x86_64__) + +#define __os_set_crash_log_cause_and_message(ac, msg) \ + ({ long _ac = (long)(ac); __asm__ ( \ + "mov %[_msg], %[_cr_msg]\n\t" \ + "mov %[_ac], %[_cr_ac]" \ + : [_ac] "+&a" (_ac), \ + [_cr_msg] "=m" (gCRAnnotations.message), \ + [_cr_ac] "=m" (gCRAnnotations.abort_cause) \ + : [_msg] "r" (("" msg)) \ + ); }) +#define _os_set_crash_log_message(msg) \ + ({ long _clbr; __asm__ ( \ + "mov %[_msg], %[_cr_msg]" \ + : "=&a" (_clbr), \ + [_cr_msg] "=m" (gCRAnnotations.message) \ + : [_msg] "r" (("" msg)) \ + ); }) + +#elif defined(__arm__) + +#define __os_set_crash_log_cause_and_message_impl(msg, ac_expr, set_cause, ...) \ + ({ ac_expr; __asm__( \ + "push {r9, r10}\n\t" \ + \ + "movw r9, :lower16:(%[_msg] - 1f - 4)\n\t" \ + "movt r9, :upper16:(%[_msg] - 1f - 4)\n" \ + "1:\n\t" \ + "add r9, pc\n\t" \ + \ + "movw r10, :lower16:(3f - 2f - 4)\n\t" \ + "movt r10, :upper16:(3f - 2f - 4)\n" \ + "2:\n\t" \ + "add r10, pc\n\t" \ + "ldr r10, [r10]\n\t" \ + \ + "str r9, [r10, %[_msgo]]\n\t" \ + "mov r9, #0\n\t" \ + "str r9, [r10, %[_msgo] + 4]\n\t" \ + set_cause \ + "pop {r9, r10}\n\t" \ + \ + ".non_lazy_symbol_pointer\n" \ + "3:\n\t" \ + ".indirect_symbol _gCRAnnotations\n\t" \ + ".long 0\n\t" \ + ".previous" \ + :: [_msgo] "i" (__builtin_offsetof(typeof(gCRAnnotations), message)), \ + [_msg] "i" (("" msg)), \ + ## __VA_ARGS__); }) + +#define __os_set_crash_log_cause_and_message(ac, msg) \ + __os_set_crash_log_cause_and_message_impl(msg, \ + register long _ac asm("r8") = (long)(ac), \ + "strd %[_ac], r9, [r10, %[_aco]]\n\t", \ + [_aco] "i" (__builtin_offsetof(typeof(gCRAnnotations), abort_cause)), \ + [_ac] "r" (_ac)) +#define _os_set_crash_log_message(msg) \ + __os_set_crash_log_cause_and_message_impl(msg, (void)0, "") + +#elif defined(__arm64__) + +#define __os_set_crash_log_cause_and_message_impl(msg, ac_expr, set_cause, ...) \ + ({ ac_expr; __asm__( \ + "stp x20, x21, [sp, #-16]!\n\t" \ + "adrp x20, %[_msg]@PAGE\n\t" \ + "add x20, x20, %[_msg]@PAGEOFF\n\t" \ + "adrp x21, %[_cr]@PAGE\n\t" \ + "add x21, x21, %[_cr]@PAGEOFF\n\t" \ + "str x20, [x21, %[_msgo]]\n\t" \ + set_cause \ + "ldp x20, x21, [sp], #16" \ + :: [_cr] "i" (&gCRAnnotations), \ + [_msgo] "i" (__builtin_offsetof(typeof(gCRAnnotations), message)), \ + [_msg] "i" (("" msg)), \ + ## __VA_ARGS__); }) + +#define __os_set_crash_log_cast_ac(ac) \ + _Generic(ac, \ + const void *: (uint64_t)(uintptr_t)(ac), \ + void *: (uint64_t)(uintptr_t)(ac), \ + default: (uint64_t)(ac)) + +#define __os_set_crash_log_cause_and_message(ac, msg) \ + __os_set_crash_log_cause_and_message_impl(msg, \ + register uint64_t _ac asm("x8") = __os_set_crash_log_cast_ac(ac), \ + "str %[_ac], [x21, %[_aco]]\n\t", \ + [_aco] "i" (__builtin_offsetof(typeof(gCRAnnotations), abort_cause)), \ + [_ac] "r" (_ac)) +#define _os_set_crash_log_message(msg) \ + __os_set_crash_log_cause_and_message_impl(msg, (void)0, "") + +#else +#define __os_set_crash_log_cause_and_message(ac, msg) ({ \ + gCRAnnotations.abort_cause = (uint64_t)(int64_t)(ac); \ + CRSetCrashLogMessage(msg); \ + }) +#define _os_set_crash_log_message(msg) CRSetCrashLogMessage(msg) +#endif + +/*! + * @macro _os_set_crash_log_cause_and_message + * + * @brief + * Set an abort cause and message before likely crash. + * + * @discussion + * This macro is really meant to minimize register clobbering making sure that + * the context is minimally touched. + * + * - On Intel, %rax is used to store the abort cause + * - On arm and arm64, r8/x8 is used to store the abort cause, other registers + * are left untouched. + * + * An excellent way to use this macro is for example using a wrapper such + * as below: + * + * + * OS_NOINLINE OS_NORETURN OS_COLD + * static void + * _my_type_corruption_abort(my_type_t object OS_UNUSED, + * my_other_type_t other OS_UNUSED, long code) + * { + * _os_set_crash_log_cause_and_message(code, "object is corrupt"); + * __builtin_trap(); + * } + * + * + * That wrapper when used: + * - is understood as being unlikely and never inlined (OS_COLD OS_NOINLINE) + * - captures the address of @a object as well as the one of the companion + * object @a other in registers that are easy to introspect in crash traces + * - captures the abort cause / error code + * + * @param ac + * The abort cause to set. If it is statically provably 0, then it's ignored. + * If the argument type is narrower than long, then it is sign-extended to long. + * + * @param msg + * The static string message to set + */ +#define _os_set_crash_log_cause_and_message(ac, msg) \ + __builtin_choose_expr(os_is_compile_time_constant(!(ac)), ({ \ + if (ac) { \ + __os_set_crash_log_cause_and_message(ac, msg); \ + } else { \ + _os_set_crash_log_message(msg); \ + } }), __os_set_crash_log_cause_and_message(ac, msg)) + +#define _os_set_crash_log_message_dynamic(msg) CRSetCrashLogMessage(msg) + +#else + +#define _os_set_crash_log_cause_and_message(ac, msg) ((void)(ac), (void)(msg)) +#define _os_set_crash_log_message(msg) ((void)(msg)) +#define _os_set_crash_log_message_dynamic(msg) ((void)(msg)) + +#endif // __has_include() + +/*! + * @macro OS_BUG_INTERNAL + * + * @brief + * Perform a register-preserving crash due to invalid library invariants. + * + * @param ac + * The abort cause to set (see _os_set_crash_log_cause_and_message). + * + * @param lib + * The name of the library. + * + * @param msg + * The static string message to append. + */ +#define OS_BUG_INTERNAL(ac, lib, msg) ({ \ + _os_set_crash_log_cause_and_message(ac, "BUG IN " lib ": " msg); \ + os_prevent_tail_call_optimization(); \ + __builtin_trap(); \ +}) + +/*! + * @macro OS_BUG_CLIENT + * + * @brief + * Perform a register-preserving crash due to an API misuse by a library client. + * + * @param ac + * The abort cause to set (see _os_set_crash_log_cause_and_message). + * + * @param lib + * The name of the library. + * + * @param msg + * The static string message to append. + */ +#define OS_BUG_CLIENT(ac, lib, msg) ({ \ + _os_set_crash_log_cause_and_message(ac, "BUG IN CLIENT OF " lib ": " msg); \ + os_prevent_tail_call_optimization(); \ + __builtin_trap(); \ +}) + +#endif // __OS_CRASHLOG_PRIVATE__ diff --git a/private/os/internal/atomic.h b/private/os/internal/atomic.h deleted file mode 100644 index f4fc18a..0000000 --- a/private/os/internal/atomic.h +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Copyright (c) 2008-2013 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -#ifndef __OS_INTERNAL_ATOMIC__ -#define __OS_INTERNAL_ATOMIC__ - -#ifndef __OS_EXPOSE_INTERNALS_INDIRECT__ -/* - * Use c11 or c++11 std::atomic from instead - * - * XXX /!\ WARNING /!\ XXX - * - * This header file describes INTERNAL interfaces to libplatform used by other - * libsystem targets, which are subject to change in future releases of OS X - * and iOS. Any applications relying on these interfaces WILL break. - * - * If you are not a libsystem target, you should NOT EVER use these headers. - * Not even a little. - * - * XXX /!\ WARNING /!\ XXX - */ -#error "Please #include instead of this file directly." -#else - -// generate error during codegen -#define _os_atomic_unimplemented() \ - ({ __asm__(".err unimplemented"); }) - -#pragma mark - -#pragma mark memory_order - -typedef enum _os_atomic_memory_order { - _os_atomic_memory_order_relaxed, - _os_atomic_memory_order_consume, - _os_atomic_memory_order_acquire, - _os_atomic_memory_order_release, - _os_atomic_memory_order_acq_rel, - _os_atomic_memory_order_seq_cst, - _os_atomic_memory_order_ordered, - _os_atomic_memory_order_dependency, -} _os_atomic_memory_order; - -#if !OS_ATOMIC_UP - -#define os_atomic_memory_order_relaxed _os_atomic_memory_order_relaxed -#define os_atomic_memory_order_acquire _os_atomic_memory_order_acquire -#define os_atomic_memory_order_release _os_atomic_memory_order_release -#define os_atomic_memory_order_acq_rel _os_atomic_memory_order_acq_rel -#define os_atomic_memory_order_seq_cst _os_atomic_memory_order_seq_cst -#define os_atomic_memory_order_ordered _os_atomic_memory_order_seq_cst -#define os_atomic_memory_order_dependency _os_atomic_memory_order_acquire - -#else // OS_ATOMIC_UP - -#define os_atomic_memory_order_relaxed _os_atomic_memory_order_relaxed -#define os_atomic_memory_order_acquire _os_atomic_memory_order_relaxed -#define os_atomic_memory_order_release _os_atomic_memory_order_relaxed -#define os_atomic_memory_order_acq_rel _os_atomic_memory_order_relaxed -#define os_atomic_memory_order_seq_cst _os_atomic_memory_order_relaxed -#define os_atomic_memory_order_ordered _os_atomic_memory_order_relaxed -#define os_atomic_memory_order_dependency _os_atomic_memory_order_relaxed - -#endif // OS_ATOMIC_UP - -#pragma mark - -#pragma mark c11 - -#if !__has_extension(c_atomic) -#error "Please use a C11 compiler" -#endif - -#define os_atomic(type) type _Atomic - -#define _os_atomic_c11_atomic(p) \ - ((typeof(*(p)) _Atomic *)(p)) - -// This removes the _Atomic and volatile qualifiers on the type of *p -#define _os_atomic_basetypeof(p) \ - typeof(__c11_atomic_load(_os_atomic_c11_atomic(p), \ - _os_atomic_memory_order_relaxed)) - -#define _os_atomic_baseptr(p) \ - ((_os_atomic_basetypeof(p) *)(p)) - -#define _os_atomic_barrier(m) \ - __c11_atomic_thread_fence(os_atomic_memory_order_##m) -#define os_atomic_load(p, m) \ - __c11_atomic_load(_os_atomic_c11_atomic(p), os_atomic_memory_order_##m) -#define os_atomic_store(p, v, m) \ - __c11_atomic_store(_os_atomic_c11_atomic(p), v, \ - os_atomic_memory_order_##m) -#define os_atomic_xchg(p, v, m) \ - __c11_atomic_exchange(_os_atomic_c11_atomic(p), v, \ - os_atomic_memory_order_##m) -#define os_atomic_cmpxchg(p, e, v, m) \ - ({ _os_atomic_basetypeof(p) _r = (e); \ - __c11_atomic_compare_exchange_strong(_os_atomic_c11_atomic(p), \ - &_r, v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); }) -#define os_atomic_cmpxchgv(p, e, v, g, m) \ - ({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \ - __c11_atomic_compare_exchange_strong(_os_atomic_c11_atomic(p), \ - &_r, v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); \ - *(g) = _r; _b; }) -#define os_atomic_cmpxchgvw(p, e, v, g, m) \ - ({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \ - __c11_atomic_compare_exchange_weak(_os_atomic_c11_atomic(p), \ - &_r, v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); \ - *(g) = _r; _b; }) -#define _os_atomic_c11_op(p, v, m, o, op) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = \ - __c11_atomic_fetch_##o(_os_atomic_c11_atomic(p), _v, \ - os_atomic_memory_order_##m); (typeof(_r))(_r op _v); }) -#define _os_atomic_c11_op_orig(p, v, m, o, op) \ - __c11_atomic_fetch_##o(_os_atomic_c11_atomic(p), v, \ - os_atomic_memory_order_##m) - -#define os_atomic_add(p, v, m) \ - _os_atomic_c11_op((p), (v), m, add, +) -#define os_atomic_add_orig(p, v, m) \ - _os_atomic_c11_op_orig((p), (v), m, add, +) -#define os_atomic_sub(p, v, m) \ - _os_atomic_c11_op((p), (v), m, sub, -) -#define os_atomic_sub_orig(p, v, m) \ - _os_atomic_c11_op_orig((p), (v), m, sub, -) -#define os_atomic_and(p, v, m) \ - _os_atomic_c11_op((p), (v), m, and, &) -#define os_atomic_and_orig(p, v, m) \ - _os_atomic_c11_op_orig((p), (v), m, and, &) -#define os_atomic_or(p, v, m) \ - _os_atomic_c11_op((p), (v), m, or, |) -#define os_atomic_or_orig(p, v, m) \ - _os_atomic_c11_op_orig((p), (v), m, or, |) -#define os_atomic_xor(p, v, m) \ - _os_atomic_c11_op((p), (v), m, xor, ^) -#define os_atomic_xor_orig(p, v, m) \ - _os_atomic_c11_op_orig((p), (v), m, xor, ^) - -#define os_atomic_force_dependency_on(p, e) (p) -#define os_atomic_load_with_dependency_on(p, e) \ - os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed) -#define os_atomic_load_with_dependency_on2o(p, f, e) \ - os_atomic_load_with_dependency_on(&(p)->f, e) - -#pragma mark - -#pragma mark generic - -#define os_atomic_thread_fence(m) _os_atomic_barrier(m) - -#define os_atomic_load2o(p, f, m) \ - os_atomic_load(&(p)->f, m) -#define os_atomic_store2o(p, f, v, m) \ - os_atomic_store(&(p)->f, (v), m) -#define os_atomic_xchg2o(p, f, v, m) \ - os_atomic_xchg(&(p)->f, (v), m) -#define os_atomic_cmpxchg2o(p, f, e, v, m) \ - os_atomic_cmpxchg(&(p)->f, (e), (v), m) -#define os_atomic_cmpxchgv2o(p, f, e, v, g, m) \ - os_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m) -#define os_atomic_cmpxchgvw2o(p, f, e, v, g, m) \ - os_atomic_cmpxchgvw(&(p)->f, (e), (v), (g), m) -#define os_atomic_add2o(p, f, v, m) \ - os_atomic_add(&(p)->f, (v), m) -#define os_atomic_add_orig2o(p, f, v, m) \ - os_atomic_add_orig(&(p)->f, (v), m) -#define os_atomic_sub2o(p, f, v, m) \ - os_atomic_sub(&(p)->f, (v), m) -#define os_atomic_sub_orig2o(p, f, v, m) \ - os_atomic_sub_orig(&(p)->f, (v), m) -#define os_atomic_and2o(p, f, v, m) \ - os_atomic_and(&(p)->f, (v), m) -#define os_atomic_and_orig2o(p, f, v, m) \ - os_atomic_and_orig(&(p)->f, (v), m) -#define os_atomic_or2o(p, f, v, m) \ - os_atomic_or(&(p)->f, (v), m) -#define os_atomic_or_orig2o(p, f, v, m) \ - os_atomic_or_orig(&(p)->f, (v), m) -#define os_atomic_xor2o(p, f, v, m) \ - os_atomic_xor(&(p)->f, (v), m) -#define os_atomic_xor_orig2o(p, f, v, m) \ - os_atomic_xor_orig(&(p)->f, (v), m) - -#define os_atomic_inc(p, m) \ - os_atomic_add((p), 1, m) -#define os_atomic_inc_orig(p, m) \ - os_atomic_add_orig((p), 1, m) -#define os_atomic_inc2o(p, f, m) \ - os_atomic_add2o(p, f, 1, m) -#define os_atomic_inc_orig2o(p, f, m) \ - os_atomic_add_orig2o(p, f, 1, m) -#define os_atomic_dec(p, m) \ - os_atomic_sub((p), 1, m) -#define os_atomic_dec_orig(p, m) \ - os_atomic_sub_orig((p), 1, m) -#define os_atomic_dec2o(p, f, m) \ - os_atomic_sub2o(p, f, 1, m) -#define os_atomic_dec_orig2o(p, f, m) \ - os_atomic_sub_orig2o(p, f, 1, m) - -#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ - bool _result = false; \ - typeof(p) _p = (p); \ - ov = os_atomic_load(_p, relaxed); \ - do { \ - __VA_ARGS__; \ - _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ - } while (os_unlikely(!_result)); \ - _result; \ - }) -#define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \ - os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__) -#define os_atomic_rmw_loop_give_up_with_fence(m, expr) \ - ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); }) -#define os_atomic_rmw_loop_give_up(expr) \ - os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) - - -#endif // __OS_EXPOSE_INTERNALS_INDIRECT__ - -#endif // __OS_ATOMIC__ diff --git a/private/os/internal/crashlog.h b/private/os/internal/crashlog.h deleted file mode 100644 index 41417ab..0000000 --- a/private/os/internal/crashlog.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -#ifndef __OS_INTERNAL_CRASHLOG__ -#define __OS_INTERNAL_CRASHLOG__ - -#ifndef __OS_EXPOSE_INTERNALS_INDIRECT__ -/* - * XXX /!\ WARNING /!\ XXX - * - * This header file describes INTERNAL interfaces to libplatform used by other - * libsystem targets, which are subject to change in future releases of OS X - * and iOS. Any applications relying on these interfaces WILL break. - * - * If you are not a libsystem target, you should NOT EVER use these headers. - * Not even a little. - * - * XXX /!\ WARNING /!\ XXX - */ -#error "Please #include instead of this file directly." -#else - - -#define _os_set_crash_log_cause_and_message(ac, msg) ((void)(ac), (void)(msg)) -#define _os_set_crash_log_message(msg) ((void)(msg)) -#define _os_set_crash_log_message_dynamic(msg) ((void)(msg)) - - -#endif // __OS_EXPOSE_INTERNALS_INDIRECT__ - -#endif // __OS_INTERNAL_CRASHLOG__ diff --git a/private/os/internal/internal_shared.h b/private/os/internal/internal_shared.h deleted file mode 100644 index c0e4e81..0000000 --- a/private/os/internal/internal_shared.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2015 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -#ifndef __OS_INTERNAL_SHARED__ -#define __OS_INTERNAL_SHARED__ - -#ifndef __OS_EXPOSE_INTERNALS__ -/* - * XXX /!\ WARNING /!\ XXX - * - * This header file describes INTERNAL interfaces to libplatform used by other - * libsystem targets, which are subject to change in future releases of Mac - * OS X and iOS. Any applications relying on these interfaces WILL break. - * - * If you are not a libsystem target, you should NOT EVER use these headers. - * Not even a little. - * - * XXX /!\ WARNING /!\ XXX - */ -#error "these internals are not for general use outside of libsystem" -#else - -#ifndef __OS_EXPOSE_INTERNALS_INDIRECT__ -#define __OS_EXPOSE_INTERNALS_INDIRECT__ -#endif - -#include -#include -#include -#if defined(__arm__) || defined(__arm64__) -#include -#endif - -#include -#include -#include -#include - - -#endif // __OS_EXPOSE_INTERNALS__ - -#endif // __OS_INTERNAL_SHARED__ diff --git a/private/os/lock_private.h b/private/os/lock_private.h index 92abefe..ffc1991 100644 --- a/private/os/lock_private.h +++ b/private/os/lock_private.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -36,7 +37,7 @@ OS_ASSUME_NONNULL_BEGIN * Low-level lock SPI */ -#define OS_LOCK_SPI_VERSION 20171006 +#define OS_LOCK_SPI_VERSION 20190424 /*! * @typedef os_lock_t @@ -314,24 +315,27 @@ OS_EXPORT OS_NOTHROW OS_NONNULL_ALL void os_unfair_lock_lock_with_options(os_unfair_lock_t lock, os_unfair_lock_options_t options); -/*! @group os_unfair_lock no-TSD interfaces +/*! + * @group os_unfair_lock no-TSD interfaces * * Like the above, but don't require being on a thread with valid TSD, so they - * can be called from injected mach-threads. The normal routines use the TSD - * value for mach_thread_self(), these routines use MACH_PORT_DEAD for the - * locked value instead. As a result, they will be unable to resolve priority - * inversions. + * can be called from injected mach-threads. * - * This should only be used by libpthread. + * The normal routines use the TSD value for mach_thread_self(), but mach + * threads do not have TSDs. Instead these functions require the value for + * mach_thread_self() to be passed explicitly. * + * This should only be used directly by libpthread. */ -OS_UNFAIR_LOCK_AVAILABILITY +__API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) OS_EXPORT OS_NOTHROW OS_NONNULL_ALL -void os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock); +void os_unfair_lock_lock_no_tsd(os_unfair_lock_t lock, + os_unfair_lock_options_t options, mach_port_t mts); -OS_UNFAIR_LOCK_AVAILABILITY +__API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) OS_EXPORT OS_NOTHROW OS_NONNULL_ALL -void os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock); +void os_unfair_lock_unlock_no_tsd(os_unfair_lock_t lock, mach_port_t mts); + /*! @group os_unfair_recursive_lock SPI * @@ -604,6 +608,57 @@ OS_ASSUME_NONNULL_BEGIN #define OS_UNFAIR_LOCK_UNLOCKED {0} #endif +/*! + * @function os_unfair_lock_lock_no_tsd_inline + * + * @abstract + * Locks an os_unfair_lock, without requiring valid TSD. + * + * This should only be used directly by libpthread. + * + * @param lock + * Pointer to an os_unfair_lock. + */ +__API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL +void +os_unfair_lock_lock_no_tsd_inline(os_unfair_lock_t lock, + os_unfair_lock_options_t options, mach_port_t mts) +{ + os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts }; + if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)( + (_os_atomic_unfair_lock*)lock, &unlocked, locked, + OSLOCK_STD(memory_order_acquire), + OSLOCK_STD(memory_order_relaxed))) { + return os_unfair_lock_lock_no_tsd(lock, options, mts); + } +} + +/*! + * @function os_unfair_lock_unlock_no_tsd_inline + * + * @abstract + * Unlocks an os_unfair_lock, without requiring valid TSD. + * + * This should only be used directly by libpthread. + * + * @param lock + * Pointer to an os_unfair_lock. + */ +__API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL +void +os_unfair_lock_unlock_no_tsd_inline(os_unfair_lock_t lock, mach_port_t mts) +{ + os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts }; + if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)( + (_os_atomic_unfair_lock*)lock, &locked, unlocked, + OSLOCK_STD(memory_order_release), + OSLOCK_STD(memory_order_relaxed))) { + return os_unfair_lock_unlock_no_tsd(lock, mts); + } +} + /*! * @function os_unfair_lock_lock_inline * @@ -720,58 +775,6 @@ os_unfair_lock_unlock_inline(os_unfair_lock_t lock) } } -/*! - * @function os_unfair_lock_lock_inline_no_tsd_4libpthread - * - * @abstract - * Locks an os_unfair_lock, without requiring valid TSD. - * - * This should only be used by libpthread. - * - * @param lock - * Pointer to an os_unfair_lock. - */ -OS_UNFAIR_LOCK_AVAILABILITY -OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL -void -os_unfair_lock_lock_inline_no_tsd_4libpthread(os_unfair_lock_t lock) -{ - uint32_t mts = MACH_PORT_DEAD; - os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts }; - if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)( - (_os_atomic_unfair_lock*)lock, &unlocked, locked, - OSLOCK_STD(memory_order_acquire), - OSLOCK_STD(memory_order_relaxed))) { - return os_unfair_lock_lock_no_tsd_4libpthread(lock); - } -} - -/*! - * @function os_unfair_lock_unlock_inline_no_tsd_4libpthread - * - * @abstract - * Unlocks an os_unfair_lock, without requiring valid TSD. - * - * This should only be used by libpthread. - * - * @param lock - * Pointer to an os_unfair_lock. - */ -OS_UNFAIR_LOCK_AVAILABILITY -OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL -void -os_unfair_lock_unlock_inline_no_tsd_4libpthread(os_unfair_lock_t lock) -{ - uint32_t mts = MACH_PORT_DEAD; - os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts }; - if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)( - (_os_atomic_unfair_lock*)lock, &locked, unlocked, - OSLOCK_STD(memory_order_release), - OSLOCK_STD(memory_order_relaxed))) { - return os_unfair_lock_unlock_no_tsd_4libpthread(lock); - } -} - OS_ASSUME_NONNULL_END #undef OSLOCK_STD diff --git a/src/atomics/OSAtomicFifo.h b/src/atomics/OSAtomicFifo.h new file mode 100644 index 0000000..02d6f1a --- /dev/null +++ b/src/atomics/OSAtomicFifo.h @@ -0,0 +1,35 @@ +// +// OSAtomicFifo.h +// libatomics +// +// Created by Rokhini Prabhu on 4/7/20. +// + +#ifndef _OS_ATOMIC_FIFO_QUEUE_ +#define _OS_ATOMIC_FIFO_QUEUE_ + +#if defined(__arm64e__) && __has_feature(ptrauth_calls) +#include + +#define COMMPAGE_PFZ_BASE_AUTH_KEY ptrauth_key_process_independent_code +#define COMMPAGE_PFZ_FN_AUTH_KEY ptrauth_key_function_pointer +#define COMMPAGE_PFZ_BASE_DISCRIMINATOR ptrauth_string_discriminator("pfz") + +#define COMMPAGE_PFZ_BASE_PTR __ptrauth(COMMPAGE_PFZ_BASE_AUTH_KEY, 1, COMMPAGE_PFZ_BASE_DISCRIMINATOR) + +#define SIGN_PFZ_FUNCTION_PTR(ptr) ptrauth_sign_unauthenticated(ptr, COMMPAGE_PFZ_FN_AUTH_KEY, 0) + +#else /* defined(__arm64e__) && __has_feature(ptrauth_calls) */ + +#define COMMPAGE_PFZ_BASE_AUTH_KEY 0 +#define COMMPAGE_PFZ_FN_AUTH_KEY 0 +#define COMMPAGE_PFZ_BASE_DISCRIMINATOR 0 + +#define COMMPAGE_PFZ_BASE_PTR + +#define SIGN_PFZ_FUNCTION_PTR(ptr) ptr +#endif /* defined(__arm64e__) && __has_feature(ptrauth_calls) */ + +extern void *COMMPAGE_PFZ_BASE_PTR commpage_pfz_base; + +#endif /* _OS_ATOMIC_FIFO_QUEUE_ */ diff --git a/src/atomics/arm64/OSAtomic.c b/src/atomics/arm64/OSAtomic.c new file mode 100644 index 0000000..9a730fa --- /dev/null +++ b/src/atomics/arm64/OSAtomic.c @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include + +#if TARGET_OS_OSX || TARGET_OS_DRIVERKIT + +/* + * This file implements the following functions for the arm64 architecture. + * + * void OSAtomicFifoEnqueue( OSFifoQueueHead *__list, void *__new, + * size_t __offset); + * void* OSAtomicFifoDequeue( OSFifoQueueHead *__list, size_t __offset); + * + */ + +#include +#include + +#include "libkern/OSAtomic.h" +#include "../OSAtomicFifo.h" + +typedef void (OSAtomicFifoEnqueue_t)(OSFifoQueueHead *, void *, size_t); +typedef void *(OSAtomicFifoDequeue_t)(OSFifoQueueHead *, size_t); + +void OSAtomicFifoEnqueue(OSFifoQueueHead *__list, void *__new, size_t __offset) +{ + void *addr = commpage_pfz_base; + addr += _COMM_PAGE_TEXT_ATOMIC_ENQUEUE; + + OSAtomicFifoEnqueue_t *OSAtomicFifoEnqueueInternal = SIGN_PFZ_FUNCTION_PTR(addr); + + return OSAtomicFifoEnqueueInternal(__list, __new, __offset); +} + +void * OSAtomicFifoDequeue( OSFifoQueueHead *__list, size_t __offset) +{ + void *addr = commpage_pfz_base; + addr += _COMM_PAGE_TEXT_ATOMIC_DEQUEUE; + + OSAtomicFifoDequeue_t *OSAtomicFifoDequeueInternal = SIGN_PFZ_FUNCTION_PTR(addr); + + return OSAtomicFifoDequeueInternal(__list, __offset); +} + +#endif diff --git a/src/atomics/init.c b/src/atomics/init.c index 58e9ed5..7bc1300 100644 --- a/src/atomics/init.c +++ b/src/atomics/init.c @@ -31,14 +31,17 @@ #include #include +#include "OSAtomicFifo.h" + __attribute__ ((visibility ("hidden"))) -uintptr_t commpage_pfz_base=0; +void *COMMPAGE_PFZ_BASE_PTR commpage_pfz_base = 0; __attribute__ ((visibility ("hidden"))) void __pfz_setup(const char *apple[]) { const char *p = _simple_getenv(apple, "pfz"); + uintptr_t base = 0; if (p != NULL) { const char *q; @@ -48,16 +51,16 @@ __pfz_setup(const char *apple[]) } for (q = p + 2; *q; q++) { - commpage_pfz_base <<= 4; // *= 16 + base <<= 4; // *= 16 if ('0' <= *q && *q <= '9') { - commpage_pfz_base += *q - '0'; + base += *q - '0'; } else if ('a' <= *q && *q <= 'f') { - commpage_pfz_base += *q - 'a' + 10; + base += *q - 'a' + 10; } else if ('A' <= *q && *q <= 'F') { - commpage_pfz_base += *q - 'A' + 10; + base += *q - 'A' + 10; } else { - commpage_pfz_base=0; + base=0; goto __pfz_setup_clear; } } @@ -66,7 +69,8 @@ __pfz_setup_clear: bzero((void *)((uintptr_t)p - 4), strlen(p) + 4); } - if (commpage_pfz_base == 0) { - commpage_pfz_base = _COMM_PAGE_TEXT_START; - } + if (base != 0) { + commpage_pfz_base = base; + } } + diff --git a/src/atomics/x86_64/OSAtomic.s b/src/atomics/x86_64/OSAtomic.s index 543aacb..033194e 100644 --- a/src/atomics/x86_64/OSAtomic.s +++ b/src/atomics/x86_64/OSAtomic.s @@ -3,14 +3,14 @@ * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -18,7 +18,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -91,7 +91,7 @@ OS_ATOMIC_FUNCTION_START(OSAtomicAnd32Orig, 2) OS_ATOMIC_FUNCTION_START(OSAtomicAnd32OrigBarrier, 2) ATOMIC_ARITHMETIC(andl, ATOMIC_RET_ORIG) ret - + // uint32_t OSAtomicOr32Orig( uint32_t mask, uint32_t *value); OS_ATOMIC_FUNCTION_START(OSAtomicOr32Orig, 2) OS_ATOMIC_FUNCTION_START(OSAtomicOr32OrigBarrier, 2) @@ -251,7 +251,7 @@ OS_ATOMIC_FUNCTION_START(OSAtomicDequeue, 2) * * void OSAtomicFifoEnqueue( OSFifoQueueHead *list, void *new, size_t offset); */ -OS_ATOMIC_FUNCTION_START(OSAtomicFifoEnqueue, 2) +OS_ATOMIC_FUNCTION_START(OSAtomicFifoEnqueue$VARIANT$PFZ, 2) pushq %rbx xorl %ebx,%ebx // clear "preemption pending" flag movq _commpage_pfz_base(%rip),%rcx @@ -266,7 +266,7 @@ OS_ATOMIC_FUNCTION_START(OSAtomicFifoEnqueue, 2) /* void* OSAtomicFifoDequeue( OSFifoQueueHead *list, size_t offset); */ -OS_ATOMIC_FUNCTION_START(OSAtomicFifoDequeue, 2) +OS_ATOMIC_FUNCTION_START(OSAtomicFifoDequeue$VARIANT$PFZ, 2) pushq %rbx xorl %ebx,%ebx // clear "preemption pending" flag movq _commpage_pfz_base(%rip), %rcx @@ -276,7 +276,7 @@ OS_ATOMIC_FUNCTION_START(OSAtomicFifoDequeue, 2) testl %ebx,%ebx // pending preemption? jz 1f call _preempt // call into the kernel to pfz_exit -1: +1: popq %rbx ret // ptr to 1st element in Q in %rax diff --git a/src/atomics/x86_64/OSAtomicFifo.c b/src/atomics/x86_64/OSAtomicFifo.c new file mode 100644 index 0000000..fc65588 --- /dev/null +++ b/src/atomics/x86_64/OSAtomicFifo.c @@ -0,0 +1,65 @@ +#include +#include + +#define OS_UNFAIR_LOCK_INLINE 1 +#include "os/lock_private.h" + +typedef volatile struct { + void *first; + void *last; + os_unfair_lock lock; +} __attribute__ ((aligned (16))) UnfairFifoQueueHead; + +#define set_next(element, offset, new) \ + *((void**)(((uintptr_t)element) + offset)) = new; +#define get_next(element, offset) \ + *((void**)(((uintptr_t)element) + offset)); + +// This is a naive implementation using unfair locks to support translated +// x86_64 apps only. Native x86_64 and arm64 apps will use the +// PFZ implementations +void OSAtomicFifoEnqueue$VARIANT$UnfairLock(UnfairFifoQueueHead *list, void *new, size_t offset) { + set_next(new, offset, NULL); + + os_unfair_lock_lock_inline(&list->lock); + if (list->last == NULL) { + list->first = new; + } else { + set_next(list->last, offset, new); + } + list->last = new; + os_unfair_lock_unlock_inline(&list->lock); +} + +void* OSAtomicFifoDequeue$VARIANT$UnfairLock(UnfairFifoQueueHead *list, size_t offset) { + os_unfair_lock_lock_inline(&list->lock); + void *element = list->first; + if (element != NULL) { + void *next = get_next(element, offset); + if (next == NULL) { + list->last = NULL; + } + list->first = next; + } + os_unfair_lock_unlock_inline(&list->lock); + + return element; +} + +#define MakeResolver(name) \ + void * name ## Resolver(void) __asm__("_" #name); \ + void * name ## Resolver(void) { \ + __asm__(".symbol_resolver _" #name); \ + uint64_t capabilities = *(uint64_t*)_COMM_PAGE_CPU_CAPABILITIES64; \ + if (capabilities & kIsTranslated) { \ + return name ## $VARIANT$UnfairLock; \ + } else { \ + return name ## $VARIANT$PFZ; \ + } \ + } + +void OSAtomicFifoEnqueue$VARIANT$PFZ(OSFifoQueueHead *, void *, size_t); +void* OSAtomicFifoDequeue$VARIANT$PFZ(OSFifoQueueHead *, size_t); + +MakeResolver(OSAtomicFifoEnqueue) +MakeResolver(OSAtomicFifoDequeue) diff --git a/src/cachecontrol/arm64/cache.s b/src/cachecontrol/arm64/cache.s index 0774e34..d18f19a 100644 --- a/src/cachecontrol/arm64/cache.s +++ b/src/cachecontrol/arm64/cache.s @@ -25,6 +25,7 @@ #include #include + #define MMU_I_CLINE 6 // cache line size as 1< 0 ? - MOV64 x8, _COMM_PAGE_CPU_CAPABILITIES - ldr w8, [x8] and x9, x0, #~((1<act, flavor, state, &count); *size = count * sizeof(natural_t); return ret; @@ -445,7 +445,7 @@ platform_thread_set_state(platform_thread_t thread, size_t size) { kern_return_t ret; - mach_msg_type_number_t count = (int)size / (int)sizeof(natural_t); + mach_msg_type_number_t count = (int)(size / sizeof(natural_t)); ret = thread_set_state(thread->act, flavor, (thread_state_t)state, count); return ret; } diff --git a/src/os/atomic.c b/src/os/atomic.c index 778611a..a64f7ca 100644 --- a/src/os/atomic.c +++ b/src/os/atomic.c @@ -22,7 +22,7 @@ #include "resolver.h" #include "libkern/OSAtomic.h" -#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR +#if defined(__arm__) || defined(__arm64__) OS_ATOMIC_EXPORT int32_t OSAtomicAdd32(int32_t v, volatile int32_t *p); @@ -389,43 +389,63 @@ typedef struct { long gencount; } _OSQueueHead; - -void -OSAtomicEnqueue(OSQueueHead *list, void *new, size_t offset) +OS_ALWAYS_INLINE +static inline void +_OSAtomicEnqueue_llsc(OSQueueHead *list, void *new, size_t offset) { void * volatile *headptr = &(((_OSQueueHead*)list)->item); void * volatile *nextptr = (void*)((char*)new + offset); - void *head, *next; + void *head, *tmp, *next; head = os_atomic_load(headptr, relaxed); next = new; do { - *nextptr = head; - } while (!os_atomic_cmpxchgvw(headptr, head, next, &head, release)); + *nextptr = tmp = head; + head = os_atomic_load_exclusive(headptr, relaxed); + } while (tmp != head || !os_atomic_store_exclusive(headptr, next, release)); } -void* -OSAtomicDequeue(OSQueueHead *list, size_t offset) +OS_ALWAYS_INLINE +static inline void * +_OSAtomicDequeue_llsc(OSQueueHead *list, size_t offset) { void * volatile *headptr = &(((_OSQueueHead*)list)->item); void * volatile *nextptr; void *head, *next; - os_atomic_rmw_loop(headptr, head, next, acquire, { - if (!head) os_atomic_rmw_loop_give_up(break); + do { + head = os_atomic_load_exclusive(headptr, acquire); + if (!head) { + os_atomic_clear_exclusive(); + break; + } nextptr = (void*)((char*)head + offset); next = *nextptr; - }); + } while (unlikely(!os_atomic_store_exclusive(headptr, next, relaxed))); + return head; } +void +OSAtomicEnqueue(OSQueueHead *list, void *new, size_t offset) +{ + return _OSAtomicEnqueue_llsc(list, new, offset); +} + +void* +OSAtomicDequeue(OSQueueHead *list, size_t offset) +{ + return _OSAtomicDequeue_llsc(list, offset); +} + + void OSMemoryBarrier(void) { os_atomic_thread_fence(seq_cst); } -#endif // TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR +#endif // defined(__arm__) || defined(__arm64__) struct _os_empty_files_are_not_c_files; diff --git a/src/os/lock.c b/src/os/lock.c index 6d120be..a01eb67 100644 --- a/src/os/lock.c +++ b/src/os/lock.c @@ -72,8 +72,7 @@ _os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value) #pragma mark - #pragma mark OSSpinLock -OS_NOEXPORT OS_NOINLINE void _OSSpinLockLockSlow(volatile OSSpinLock *l); - +OS_ATOMIC_EXPORT OS_NOINLINE void _OSSpinLockLockSlow(volatile OSSpinLock *l); OS_ATOMIC_EXPORT void OSSpinLockLock(volatile OSSpinLock *l); OS_ATOMIC_EXPORT bool OSSpinLockTry(volatile OSSpinLock *l); OS_ATOMIC_EXPORT int spin_lock_try(volatile OSSpinLock *l); @@ -85,10 +84,11 @@ static const OSSpinLock _OSSpinLockLocked = 1; static const OSSpinLock _OSSpinLockLocked = -1; #endif - - #if OS_ATOMIC_UP // Don't spin on UP +#elif defined(__arm__) || defined(__arm64__) +#define OS_LOCK_SPIN_SPIN_TRIES 100 +#define OS_LOCK_SPIN_PAUSE() os_hardware_wfe() #else #define OS_LOCK_SPIN_SPIN_TRIES 1000 #define OS_LOCK_SPIN_PAUSE() os_hardware_pause() @@ -98,13 +98,19 @@ OS_ALWAYS_INLINE static uint64_t _os_lock_yield_deadline(mach_msg_timeout_t timeout) { - uint64_t abstime = timeout * NSEC_PER_MSEC; -#if !(defined(__i386__) || defined(__x86_64__)) + uint64_t abstime = timeout; +#if defined(__arm__) + // some armv7 targets do not have div, like the armv7 arch + // so hardcode the most typical clock resolution it has + // as we don't really need accuracy here anyway + abstime *= NSEC_PER_MSEC * 128 / 3; +#elif defined(__i386__) || defined(__x86_64__) + // abstime is in nanoseconds +#else mach_timebase_info_data_t tbi; kern_return_t kr = mach_timebase_info(&tbi); if (kr) return UINT64_MAX; - abstime *= tbi.denom; - abstime /= tbi.numer; + abstime *= (NSEC_PER_MSEC * tbi.denom / tbi.numer); #endif return mach_absolute_time() + abstime; } @@ -147,6 +153,50 @@ _OSSpinLockLockSlow(volatile OSSpinLock *l) { return _OSSpinLockLockYield(l); // Don't spin on UP } +#elif defined(__arm64__) +// Exclusive monitor must be held during WFE +#if defined(__ARM_ARCH_8_2__) +void +_OSSpinLockLockSlow(volatile OSSpinLock *l) +{ + uint32_t tries = OS_LOCK_SPIN_SPIN_TRIES; + OSSpinLock lock; +_spin: + while (unlikely(lock = os_atomic_load_exclusive(l, relaxed))) { + if (unlikely(lock != _OSSpinLockLocked)) { + os_atomic_clear_exclusive(); + return _os_lock_corruption_abort((void *)l, (uintptr_t)lock); + } + if (unlikely(!tries--)) { + os_atomic_clear_exclusive(); + return _OSSpinLockLockYield(l); + } + OS_LOCK_SPIN_PAUSE(); + } + os_atomic_clear_exclusive(); + bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire); + if (likely(r)) return; + goto _spin; +} +#else // !__ARM_ARCH_8_2__ +void +_OSSpinLockLockSlow(volatile OSSpinLock *l) +{ + uint32_t tries = OS_LOCK_SPIN_SPIN_TRIES; + OSSpinLock lock; + os_atomic_rmw_loop(l, lock, _OSSpinLockLocked, acquire, if (unlikely(lock)){ + if (unlikely(lock != _OSSpinLockLocked)) { + os_atomic_rmw_loop_give_up(return + _os_lock_corruption_abort((void *)l, (uintptr_t)lock)); + } + if (unlikely(!tries--)) { + os_atomic_rmw_loop_give_up(return _OSSpinLockLockYield(l)); + } + OS_LOCK_SPIN_PAUSE(); + continue; + }); +} +#endif // !__ARM_ARCH_8_2__ #else // !OS_ATOMIC_UP void _OSSpinLockLockSlow(volatile OSSpinLock *l) @@ -168,7 +218,6 @@ _spin: #endif // !OS_ATOMIC_UP - #if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR typedef struct _os_nospin_lock_s *_os_nospin_lock_t; @@ -342,7 +391,7 @@ _handoff: // Redrive the handoff every 1ms until switching to wait if (option == SWITCH_OPTION_OSLOCK_WAIT) timeout++; } - bool r = os_atomic_cmpxchgv2o(l, osl_owner, MACH_PORT_NULL, self, &owner, + bool r = os_atomic_cmpxchgv(&l->osl_owner, MACH_PORT_NULL, self, &owner, acquire); if (likely(r)) return; goto _handoff; @@ -352,7 +401,7 @@ void _os_lock_handoff_lock(_os_lock_handoff_t l) { os_lock_owner_t self = _os_lock_owner_get_self(); - bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire); + bool r = os_atomic_cmpxchg(&l->osl_owner, MACH_PORT_NULL, self, acquire); if (likely(r)) return; return _os_lock_handoff_lock_slow(l); } @@ -361,14 +410,14 @@ bool _os_lock_handoff_trylock(_os_lock_handoff_t l) { os_lock_owner_t self = _os_lock_owner_get_self(); - bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire); + bool r = os_atomic_cmpxchg(&l->osl_owner, MACH_PORT_NULL, self, acquire); return r; } void _os_lock_handoff_unlock(_os_lock_handoff_t l) { - os_atomic_store2o(l, osl_owner, MACH_PORT_NULL, release); + os_atomic_store(&l->osl_owner, MACH_PORT_NULL, release); } @@ -409,13 +458,10 @@ OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options(os_unfair_lock_t lock, OS_ATOMIC_EXPORT bool os_unfair_lock_trylock(os_unfair_lock_t lock); OS_ATOMIC_EXPORT void os_unfair_lock_unlock(os_unfair_lock_t lock); -OS_ATOMIC_EXPORT void os_unfair_lock_lock_no_tsd_4libpthread( - os_unfair_lock_t lock); -OS_ATOMIC_EXPORT void os_unfair_lock_unlock_no_tsd_4libpthread( - os_unfair_lock_t lock); -OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options_4Libc( - os_unfair_lock_t lock, os_unfair_lock_options_t options); -OS_ATOMIC_EXPORT void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock); +OS_ATOMIC_EXPORT void os_unfair_lock_lock_no_tsd(os_unfair_lock_t lock, + os_unfair_lock_options_t options, mach_port_t mts); +OS_ATOMIC_EXPORT void os_unfair_lock_unlock_no_tsd(os_unfair_lock_t lock, + mach_port_t mts); OS_NOINLINE OS_NORETURN OS_COLD void _os_unfair_lock_recursive_abort(os_lock_owner_t owner); @@ -462,8 +508,8 @@ _os_unfair_lock_corruption_abort(os_ulock_value_t current) OS_NOINLINE static void -_os_unfair_lock_lock_slow(_os_unfair_lock_t l, os_lock_owner_t self, - os_unfair_lock_options_t options) +_os_unfair_lock_lock_slow(_os_unfair_lock_t l, + os_unfair_lock_options_t options, os_lock_owner_t self) { os_unfair_lock_options_t allow_anonymous_owner = options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER; @@ -472,7 +518,7 @@ _os_unfair_lock_lock_slow(_os_unfair_lock_t l, os_lock_owner_t self, __LIBPLATFORM_CLIENT_CRASH__(options, "Invalid options"); } os_ulock_value_t current, new, waiters_mask = 0; - while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) != + while (unlikely((current = os_atomic_load(&l->oul_value, relaxed)) != OS_LOCK_NO_OWNER)) { _retry: if (unlikely(OS_ULOCK_IS_OWNER(current, self, allow_anonymous_owner))) { @@ -481,7 +527,7 @@ _retry: new = current & ~OS_ULOCK_NOWAITERS_BIT; if (current != new) { // Clear nowaiters bit in lock value before waiting - if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, ¤t, + if (!os_atomic_cmpxchgv(&l->oul_value, current, new, ¤t, relaxed)){ continue; } @@ -507,15 +553,15 @@ _retry: } } new = self & ~waiters_mask; - bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new, + bool r = os_atomic_cmpxchgv(&l->oul_value, OS_LOCK_NO_OWNER, new, ¤t, acquire); if (unlikely(!r)) goto _retry; } OS_NOINLINE static void -_os_unfair_lock_unlock_slow(_os_unfair_lock_t l, os_ulock_value_t current, - os_lock_owner_t self, os_unfair_lock_options_t options) +_os_unfair_lock_unlock_slow(_os_unfair_lock_t l, os_lock_owner_t self, + os_ulock_value_t current, os_unfair_lock_options_t options) { os_unfair_lock_options_t allow_anonymous_owner = options & OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER; @@ -547,9 +593,9 @@ os_unfair_lock_lock(os_unfair_lock_t lock) { _os_unfair_lock_t l = (_os_unfair_lock_t)lock; os_lock_owner_t self = _os_lock_owner_get_self(); - bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire); + bool r = os_atomic_cmpxchg(&l->oul_value, OS_LOCK_NO_OWNER, self, acquire); if (likely(r)) return; - return _os_unfair_lock_lock_slow(l, self, OS_UNFAIR_LOCK_NONE); + return _os_unfair_lock_lock_slow(l, OS_UNFAIR_LOCK_NONE, self); } void @@ -558,9 +604,9 @@ os_unfair_lock_lock_with_options(os_unfair_lock_t lock, { _os_unfair_lock_t l = (_os_unfair_lock_t)lock; os_lock_owner_t self = _os_lock_owner_get_self(); - bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire); + bool r = os_atomic_cmpxchg(&l->oul_value, OS_LOCK_NO_OWNER, self, acquire); if (likely(r)) return; - return _os_unfair_lock_lock_slow(l, self, options); + return _os_unfair_lock_lock_slow(l, options, self); } bool @@ -568,7 +614,7 @@ os_unfair_lock_trylock(os_unfair_lock_t lock) { _os_unfair_lock_t l = (_os_unfair_lock_t)lock; os_lock_owner_t self = _os_lock_owner_get_self(); - bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire); + bool r = os_atomic_cmpxchg(&l->oul_value, OS_LOCK_NO_OWNER, self, acquire); return r; } @@ -578,33 +624,29 @@ os_unfair_lock_unlock(os_unfair_lock_t lock) _os_unfair_lock_t l = (_os_unfair_lock_t)lock; os_lock_owner_t self = _os_lock_owner_get_self(); os_ulock_value_t current; - current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release); + current = os_atomic_xchg(&l->oul_value, OS_LOCK_NO_OWNER, release); if (likely(current == self)) return; - return _os_unfair_lock_unlock_slow(l, current, self, 0); + return _os_unfair_lock_unlock_slow(l, self, current, 0); } void -os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock) +os_unfair_lock_lock_no_tsd(os_unfair_lock_t lock, + os_unfair_lock_options_t options, mach_port_t self) { _os_unfair_lock_t l = (_os_unfair_lock_t)lock; - os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER; - bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire); + bool r = os_atomic_cmpxchg(&l->oul_value, OS_LOCK_NO_OWNER, self, acquire); if (likely(r)) return; - return _os_unfair_lock_lock_slow(l, self, - OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION| - OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER); + return _os_unfair_lock_lock_slow(l, options, self); } void -os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock) +os_unfair_lock_unlock_no_tsd(os_unfair_lock_t lock, mach_port_t self) { _os_unfair_lock_t l = (_os_unfair_lock_t)lock; - os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER; os_ulock_value_t current; - current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release); + current = os_atomic_xchg(&l->oul_value, OS_LOCK_NO_OWNER, release); if (likely(current == self)) return; - return _os_unfair_lock_unlock_slow(l, current, self, - OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER); + return _os_unfair_lock_unlock_slow(l, self, current, 0); } @@ -613,7 +655,7 @@ os_unfair_lock_assert_owner(os_unfair_lock_t lock) { _os_unfair_lock_t l = (_os_unfair_lock_t)lock; os_lock_owner_t self = _os_lock_owner_get_self(); - os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed); + os_ulock_value_t current = os_atomic_load(&l->oul_value, relaxed); if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) { __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: " "Lock unexpectedly not owned by current thread"); @@ -625,7 +667,7 @@ os_unfair_lock_assert_not_owner(os_unfair_lock_t lock) { _os_unfair_lock_t l = (_os_unfair_lock_t)lock; os_lock_owner_t self = _os_lock_owner_get_self(); - os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed); + os_ulock_value_t current = os_atomic_load(&l->oul_value, relaxed); if (unlikely(OS_ULOCK_IS_OWNER(current, self, 0))) { __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: " "Lock unexpectedly owned by current thread"); @@ -649,9 +691,6 @@ void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock); OS_ATOMIC_EXPORT bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock); -OS_ATOMIC_EXPORT -void os_unfair_recursive_lock_unlock_forked_child(os_unfair_recursive_lock_t lock); - static inline os_lock_owner_t _os_unfair_lock_owner(os_unfair_lock_t lock) @@ -676,7 +715,7 @@ os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock, os_lock_owner_t cur, self = _os_lock_owner_get_self(); _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock; - if (likely(os_atomic_cmpxchgv2o(l, oul_value, + if (likely(os_atomic_cmpxchgv(&l->oul_value, OS_LOCK_NO_OWNER, self, &cur, acquire))) { return; } @@ -686,7 +725,7 @@ os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock, return; } - return _os_unfair_lock_lock_slow(l, self, options); + return _os_unfair_lock_lock_slow(l, options, self); } bool @@ -695,7 +734,7 @@ os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock) os_lock_owner_t cur, self = _os_lock_owner_get_self(); _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock; - if (likely(os_atomic_cmpxchgv2o(l, oul_value, + if (likely(os_atomic_cmpxchgv(&l->oul_value, OS_LOCK_NO_OWNER, self, &cur, acquire))) { return true; } @@ -725,9 +764,9 @@ _os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock, _os_unfair_lock_t l = (_os_unfair_lock_t)lock; os_ulock_value_t current; - current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release); + current = os_atomic_xchg(&l->oul_value, OS_LOCK_NO_OWNER, release); if (likely(current == self)) return; - return _os_unfair_lock_unlock_slow(l, current, self, 0); + return _os_unfair_lock_unlock_slow(l, self, current, 0); } void @@ -749,6 +788,7 @@ os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock) return false; } + void os_unfair_recursive_lock_unlock_forked_child(os_unfair_recursive_lock_t lock) { @@ -817,7 +857,7 @@ _os_nospin_lock_lock_slow(_os_nospin_lock_t l) os_lock_owner_t self = _os_lock_owner_get_self(); os_ulock_value_t current, new, waiters_mask = 0; uint32_t timeout = 1; - while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) != + while (unlikely((current = os_atomic_load(&l->oul_value, relaxed)) != OS_LOCK_NO_OWNER)) { _retry: new = current & ~OS_ULOCK_NOWAITERS_BIT; @@ -825,7 +865,7 @@ _retry: // be 1, check that new didn't become 0 (unlocked) by clearing this bit if (current != new && new) { // Clear nowaiters bit in lock value before waiting - if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, ¤t, + if (!os_atomic_cmpxchgv(&l->oul_value, current, new, ¤t, relaxed)){ continue; } @@ -851,7 +891,7 @@ _retry: } } new = self & ~waiters_mask; - bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new, + bool r = os_atomic_cmpxchgv(&l->oul_value, OS_LOCK_NO_OWNER, new, ¤t, acquire); if (unlikely(!r)) goto _retry; } @@ -887,7 +927,7 @@ void _os_nospin_lock_lock(_os_nospin_lock_t l) { os_lock_owner_t self = _os_lock_owner_get_self(); - bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire); + bool r = os_atomic_cmpxchg(&l->oul_value, OS_LOCK_NO_OWNER, self, acquire); if (likely(r)) return; return _os_nospin_lock_lock_slow(l); } @@ -896,7 +936,7 @@ bool _os_nospin_lock_trylock(_os_nospin_lock_t l) { os_lock_owner_t self = _os_lock_owner_get_self(); - bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire); + bool r = os_atomic_cmpxchg(&l->oul_value, OS_LOCK_NO_OWNER, self, acquire); return r; } @@ -905,7 +945,7 @@ _os_nospin_lock_unlock(_os_nospin_lock_t l) { os_lock_owner_t self = _os_lock_owner_get_self(); os_ulock_value_t current; - current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release); + current = os_atomic_xchg(&l->oul_value, OS_LOCK_NO_OWNER, release); if (likely(current == self)) return; return _os_nospin_lock_unlock_slow(l, current); } diff --git a/src/setjmp/arm/_setjmp.h b/src/setjmp/arm/_setjmp.h index 2c53d1a..05b9a40 100644 --- a/src/setjmp/arm/_setjmp.h +++ b/src/setjmp/arm/_setjmp.h @@ -43,7 +43,11 @@ #define JMP_VFP 0x24 -#define JMP_sig 0x68 +#define JMP_sigmask 0x68 +#define JMP_sigonstack 0x6C + +#define STACK_SSFLAGS 8 // offsetof(stack_t, ss_flags) + #define JMP_SIGFLAG 0x70 diff --git a/src/setjmp/arm/longjmp.s b/src/setjmp/arm/longjmp.s index 36bab5c..51b76be 100644 --- a/src/setjmp/arm/longjmp.s +++ b/src/setjmp/arm/longjmp.s @@ -54,12 +54,17 @@ ENTRY_POINT(_longjmp) #endif mov r6, r0 // preserve args across _sigprocmask mov r8, r1 - ldr r0, [ r6, #JMP_sig ] // restore the signal mask + ldr r0, [ r6, #JMP_sigmask ] // restore the signal mask mov r1, sp // set str r0, [sp] movs r0, #3 // SIG_SETMASK movs r2, #0 // oset CALL_EXTERNAL(_sigprocmask) + + // Restore the sigaltstack status + ldr r0, [r6, JMP_sigonstack] // r0 = saved sigonstack info + CALL_EXTERNAL(__sigunaltstack) + mov r0, r6 mov r1, r8 #ifdef __ARM_ARCH_7K__ diff --git a/src/setjmp/arm/setjmp.s b/src/setjmp/arm/setjmp.s index 92399ec..a7e0ffd 100644 --- a/src/setjmp/arm/setjmp.s +++ b/src/setjmp/arm/setjmp.s @@ -49,13 +49,26 @@ ENTRY_POINT(_sigsetjmp) ENTRY_POINT(_setjmp) str lr, [ r0, #JMP_lr ] str r8, [ r0, #JMP_r8 ] - mov r8, r0 - mov r0, #1 // get the previous signal mask - mov r1, #0 // - add r2, r8, #JMP_sig // get address where previous mask needs to be - CALL_EXTERNAL(_sigprocmask) // make a syscall to get mask + mov r8, r0 // r8 = jmp_buf + + // Get previous sigmask + mov r0, #1 // r0 = SIG_BLOCK + mov r1, #0 // r1 = NULL + add r2, r8, #JMP_sigmask // r2 = address to put the sigmask in + CALL_EXTERNAL(_sigprocmask) // sigprocmask(SIGBLOCK, NULL, &old_mask); + + // Get altstack status + sub sp, sp, #32 // Put a stack_t on the stack + mov r0, #0 // r0 = ss = NULL + mov r1, sp // r1 = oss = the place on the stack where stack_t is located + CALL_EXTERNAL(___sigaltstack) // sigaltstack(NULL, oss) + ldr r0, [sp, STACK_SSFLAGS] // r0 = ss flags from stack_t + str r0, [r8, JMP_sigonstack] // *(r8 + JMP_sigonstack) = r0 + add sp, sp, #32 // reset sp + + // Do the remaining register stuff mov r0, r8 // restore jmp_buf ptr - ldr r8, [ r0, #JMP_r8 ] - ldr lr, [ r0, #JMP_lr ] + ldr r8, [ r0, #JMP_r8 ] + ldr lr, [ r0, #JMP_lr ] L__exit: BRANCH_EXTERNAL(__setjmp) diff --git a/src/setjmp/arm64/setjmp.s b/src/setjmp/arm64/setjmp.s index 430b7a9..02b79bd 100644 --- a/src/setjmp/arm64/setjmp.s +++ b/src/setjmp/arm64/setjmp.s @@ -2,14 +2,14 @@ * Copyright (c) 2011-2018 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -32,8 +32,11 @@ #define JMP_d10_d11 #0x80 #define JMP_d12_d13 #0x90 #define JMP_d14_d15 #0xA0 -#define JMP_sig #0xB0 +#define JMP_sigmask #0xB0 #define JMP_sigflag #0xB8 +#define JMP_sigonstack #0xBC /* whether the thread is on sigaltstack or not */ + +#define STACK_SSFLAGS 16 // offsetof(stack_t, ss_flags) #include #include @@ -77,6 +80,7 @@ ENTRY_POINT(__longjmp) _OS_PTR_UNMUNGE(fp, x10, x16) _OS_PTR_UNMUNGE(lr, x11, x16) _OS_PTR_UNMUNGE(x12, x12, x16) + ldrb w16, [sp] /* probe to detect absolutely corrupt stack pointers */ mov sp, x12 cmp w1, #0 csinc w0, w1, wzr, ne @@ -92,15 +96,25 @@ ENTRY_POINT(_sigsetjmp) /* int setjmp(jmp_buf env); */ ENTRY_POINT(_setjmp) - stp x21, lr, [x0] - mov x21, x0 + stp x21, lr, [x0] // Store x21 and lr in jmpbuf (for now) + mov x21, x0 // x21 = x0 - orr w0, wzr, #0x1 - mov x1, #0 - add x2, x21, JMP_sig + // Save the sigmask + orr w0, wzr, #0x1 // x0 = how = SIG_BLOCK + mov x1, #0 // x1 = set = 0 + add x2, x21, JMP_sigmask // x2 = oset = (x21 + JMP_sigmask) CALL_EXTERNAL(_sigprocmask) - mov x0, x21 + // Get current sigaltstack status + sub sp, sp, #32 // 24 bytes for a stack_t on the stack, +8 for alignment of stack + mov x0, xzr // x0 = ss = NULL + mov x1, sp // x1 = oss = the place on the stack where the stack_t is located + CALL_EXTERNAL(___sigaltstack) // sigaltstack(NULL, oss) + ldr w0, [sp, STACK_SSFLAGS] // w0 = ss flags from stack_t + str w0, [x21, JMP_sigonstack] // *(x21 + JMP_sigonstack) = w0 + add sp, sp, #32 // Reset sp + + mov x0, x21 // x0 = x21 ldp x21, lr, [x0] b __setjmp @@ -118,12 +132,19 @@ ENTRY_POINT(_longjmp) sub sp, sp, #16 mov x21, x0 // x21/x22 will be restored by __longjmp mov x22, x1 - ldr x8, [x21, JMP_sig] // restore the signal mask + + // Restore the signal mask + ldr x8, [x21, JMP_sigmask] // restore the signal mask str x8, [sp, #8] orr w0, wzr, #0x3 // SIG_SETMASK add x1, sp, #8 // set mov x2, #0 // oset CALL_EXTERNAL(_sigprocmask) + + // Restore the sigaltstack status + ldr x0, [x21, JMP_sigonstack] // x0 = saved sigonstack info + CALL_EXTERNAL(__sigunaltstack) + mov x0, x21 mov x1, x22 add sp, sp, #16 diff --git a/src/setjmp/generic/sigtramp.c b/src/setjmp/generic/sigtramp.c index 32e31db..ce9f505 100644 --- a/src/setjmp/generic/sigtramp.c +++ b/src/setjmp/generic/sigtramp.c @@ -44,9 +44,9 @@ extern int __sigreturn(ucontext_t *, int, uintptr_t); * Note that the kernel saves/restores all of our register state. */ -/* On i386, i386/sys/_sigtramp.s defines this. There is no in_sigtramp on arm */ -#if defined(__DYNAMIC__) && defined(__x86_64__) -OS_NOEXPORT +/* On i386, i386/sys/_sigtramp.s defines this. */ +#if defined(__DYNAMIC__) && !defined(__i386__) +OS_NOEXPORT int __in_sigtramp; int __in_sigtramp = 0; #endif @@ -72,7 +72,7 @@ _sigunaltstack(int set) { /* sigreturn(uctx, ctxstyle); */ /* syscall (SYS_SIGRETURN, uctx, ctxstyle); */ - __sigreturn (NULL, (set == SS_ONSTACK) ? UC_SET_ALT_STACK : UC_RESET_ALT_STACK, 0); + __sigreturn (NULL, (set & SS_ONSTACK) ? UC_SET_ALT_STACK : UC_RESET_ALT_STACK, 0); } /* On these architectures, _sigtramp is implemented in assembly to @@ -88,21 +88,37 @@ _sigtramp( ucontext_t *uctx, uintptr_t token ) { + __in_sigtramp = sig; int ctxstyle = UC_FLAVOR; - if (sigstyle == UC_TRAD) - sa_handler(sig); - else { + /* Some variants are not supposed to get the last 2 parameters but it's + * easier to pass them along - especially on arm64 whereby the extra fields + * are probably in caller save registers anyways, thereby making no + * difference to callee if we populate them or not. + * + * + * Moreover, sigaction(2)'s man page implies that the following behavior + * should be supported: + * + * If the SA_SIGINFO flag is not set, the handler function should match + * either the ANSI C or traditional BSD prototype and be pointed to by + * the sa_handler member of struct sigaction. In practice, FreeBSD + * always sends the three arguments of the latter and since the ANSI C + * prototype is a subset, both will work. + * + * See bad siginfo struct sent to SIGCHILD signal + * handler in arm64 process + */ #if TARGET_OS_WATCH - // - sa_sigaction(sig, sinfo, NULL); + // + sa_sigaction(sig, sinfo, NULL); #else - sa_sigaction(sig, sinfo, uctx); + sa_sigaction(sig, sinfo, uctx); #endif - } - /* sigreturn(uctx, ctxstyle); */ + /* sigreturn(uctx, ctxstyle); */ /* syscall (SYS_SIGRETURN, uctx, ctxstyle); */ + __in_sigtramp = 0; __sigreturn (uctx, ctxstyle, token); __builtin_trap(); /* __sigreturn returning is a fatal error */ } diff --git a/src/setjmp/x86_64/_setjmp.s b/src/setjmp/x86_64/_setjmp.s index cd3ad9e..7d42131 100644 --- a/src/setjmp/x86_64/_setjmp.s +++ b/src/setjmp/x86_64/_setjmp.s @@ -104,6 +104,7 @@ LEAF(__longjmp, 0) movq %rsi, %rbp movq JB_RSP(%rdi), %rsi _OS_PTR_UNMUNGE(%rsi) + movsbq (%rsi), %r12 // probe to detect absolutely corrupt stack pointers movq %rsi, %rsp movq JB_R12(%rdi), %r12 movq JB_R13(%rdi), %r13 diff --git a/src/setjmp/x86_64/setjmp.s b/src/setjmp/x86_64/setjmp.s index cfee74a..9a69366 100644 --- a/src/setjmp/x86_64/setjmp.s +++ b/src/setjmp/x86_64/setjmp.s @@ -109,7 +109,7 @@ LEAF(_longjmp, 0) movq %rsp, %rsi // set = address where we stored the mask xorq %rdx, %rdx // oset = NULL CALL_EXTERN_AGAIN(_sigprocmask) - + // Restore sigaltstack status movq 16(%rsp), %rdi // Grab jmpbuf but leave it on the stack movl JB_ONSTACK(%rdi), %edi // Pass old state to _sigunaltstack() diff --git a/src/simple/asl.c b/src/simple/asl.c index 4928a6a..6e58e09 100644 --- a/src/simple/asl.c +++ b/src/simple/asl.c @@ -69,7 +69,7 @@ struct asl_context { bool asl_enabled; const char *progname; int asl_fd; -#if TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC +#if TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST const char *sim_log_path; os_unfair_lock sim_connect_lock; #else @@ -146,7 +146,7 @@ _simple_asl_get_fd(void) return -1; } -#if TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC +#if TARGET_OS_SIMULATOR && !TARGET_OS_MACCATALYST os_unfair_lock_lock_with_options(&ctx->sim_connect_lock, OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION); if (ctx->sim_log_path) { diff --git a/src/simple/string_io.c b/src/simple/string_io.c index 0c8c637..2f7a225 100644 --- a/src/simple/string_io.c +++ b/src/simple/string_io.c @@ -147,6 +147,41 @@ put_n(BUF *b, _esc_func esc, const char *str, ssize_t n) put_c(b, esc, *str++); } +#if __LP64__ || defined(__arm64__) +static unsigned long long +udiv10(unsigned long long a, unsigned long long *rem) +{ + *rem = a % 10; + return a / 10; +} +#else +unsigned long long +udiv10(unsigned long long a, unsigned long long *rem_out) +{ + if (a <= UINT_MAX) { + *rem_out = (unsigned long long)((unsigned int)a % 10); + return (unsigned long long)((unsigned int)a / 10); + } + + // The biggest multiple of 10 that dividend might contain + unsigned long long divisor = 0xa000000000000000; + unsigned long long dividend = a; + unsigned long long quotient = 0; + + while (divisor >= 0xa) { + quotient = quotient << 1; + if (dividend >= divisor) { + dividend -= divisor; + quotient += 1; + } + divisor = divisor >> 1; + } + + *rem_out = dividend; + return quotient; +} +#endif + /* * Output the signed decimal string representing the number in "in". "width" is * the minimum field width, and "zero" is a boolean value, true for zero padding @@ -160,6 +195,7 @@ dec(BUF *b, _esc_func esc, long long in, int width, int zero) ssize_t pad; int neg = 0; unsigned long long n = (unsigned long long)in; + unsigned long long rem; if(in < 0) { neg++; @@ -169,8 +205,8 @@ dec(BUF *b, _esc_func esc, long long in, int width, int zero) *--cp = 0; if(n) { while(n) { - *--cp = (n % 10) + '0'; - n /= 10; + n = udiv10(n, &rem); + *--cp = rem + '0'; } } else *--cp = '0'; @@ -268,13 +304,14 @@ udec(BUF *b, _esc_func esc, unsigned long long n, int width, int zero) { char buf[32]; char *cp = buf + sizeof(buf); + unsigned long long rem; ssize_t pad; *--cp = 0; if(n) { while(n) { - *--cp = (n % 10) + '0'; - n /= 10; + n = udiv10(n, &rem); + *--cp = rem + '0'; } } else *--cp = '0'; diff --git a/src/ucontext/arm64/_ctx_start.s b/src/ucontext/arm64/_ctx_start.s new file mode 100644 index 0000000..44fec4b --- /dev/null +++ b/src/ucontext/arm64/_ctx_start.s @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "asm_help.h" +#include +#include + +.text + +#if TARGET_OS_OSX || TARGET_OS_DRIVERKIT + +/* Helper macro for unmunging pointers in place */ +.macro PTR_UNMUNGE addr +#if defined(__LP64__) + _OS_PTR_MUNGE_TOKEN(x16, x16) +#else + _OS_PTR_MUNGE_TOKEN(x16, w16) +#endif + _OS_PTR_UNMUNGE(\addr, \addr, x16) +.endmacro + +.macro CALL_USER_FUNC func + // Populate the first 8 arguments in registers from the stack. Coordinated + // with makecontext which populates the arguments on the stack + ldp w0, w1, [sp], #32 + ldp w2, w3, [sp, #-24] + ldp w4, w5, [sp, #-16] + ldp w6, w7, [sp, #-8] + + PTR_UNMUNGE \func + +#if defined(__arm64e__) + blraaz \func +#else + blr \func +#endif +.endmacro + +.private_extern __ctx_start +.align 2 +__ctx_start: + /* x20 = munged signed user func, + * x19 = uctx, + * fp = top of stack, + * sp = where args end */ + CALL_USER_FUNC x20 + + /* user function returned, set up stack for _ctx_done */ + + /* Reset to top of stack */ + mov sp, fp + + mov x0, x19 /* x0 = uctx */ + bl __ctx_done + + brk #666 /* Should not get here */ + +#endif diff --git a/src/ucontext/arm64/_setcontext.s b/src/ucontext/arm64/_setcontext.s new file mode 100644 index 0000000..0d1b384 --- /dev/null +++ b/src/ucontext/arm64/_setcontext.s @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "asm_help.h" +#include +#include +/* + * void setcontext(ucontext_t *ucp); + * + * _STRUCT_UCONTEXT { + * int uc_onstack; + * __darwin_sigset_t uc_sigmask; // signal mask used by this context + * _STRUCT_SIGALTSTACK uc_stack; // stack used by this context + * _STRUCT_UCONTEXT *uc_link; // pointer to resuming context + * __darwin_size_t uc_mcsize; // size of the machine context passed in + * _STRUCT_MCONTEXT *uc_mcontext; // pointer to machine specific context + * #ifdef _XOPEN_SOURCE + * _STRUCT_MCONTEXT __mcontext_data; + * #endif + * }; + * + * From the standard: + * The setcontext() function shall restore the user context pointed to by + * ucp. A successful call to setcontext() shall not return; program execution + * resumes at the point specified by the ucp argument passed to setcontext(). + * The ucp argument should be created either by a prior call to getcontext() + * or makecontext(), or by being passed as an argument to a signal handler. + * If the ucp argument was created with getcontext(), program execution continues + * as if the corresponding call of getcontext() had just returned. + * + * setcontext restores the following fields (with the help of a helper function): + * uc_sigmask + * machine data pointed by uc_mcontext + * + * The ASM below mainly handles restoring the machine context data - note that + * in coordination with getcontext, only the arm64 callee save registers are + * being restored. + */ + +.text + +#if TARGET_OS_OSX || TARGET_OS_DRIVERKIT +/* Helper macro for authenticating fp, sp and lr and moves the auth-ed values to + * the right registers + * + * Uses x9 + * Modifies input registers, fp, sp and lr + */ +.macro PTR_AUTH_FP_SP_LR fp, sp, lr, flags +#if defined(__arm64e__) + // Auth sp with constant discriminator + mov x9, #52205 // x9 = ptrauth_string_discriminator("sp") + autda \sp, x9 + ldr xzr, [\sp] // Probe the new stack pointer to catch a corrupt stack + mov sp, \sp + + // Auth fp with constant discriminator + mov x9, #17687 // x9 = ptrauth_string_discriminator("fp") + autda \fp, x9 + mov fp, \fp + + // Check to see how the lr is signed. If it is signed with B key, nothing to + // do + mov lr, \lr + tbnz \flags, LR_SIGNED_WITH_IB_BIT, 2f + + // Auth the input LR per the scheme in the thread state + mov x16, \lr + mov x17, x16 // x16 = x17 = lr + + mov x9, #30675 // x9 = ptrauth_string_discriminator("lr") + autia x16, x9 + xpaci x17 + cmp x16, x17 + b.eq 1f + brk #666 + +1: + // Auth succeeded - resign the lr with the sp, auth will happen again on + // return + mov lr, x16 + pacibsp +2: +#else + mov sp, \sp + mov fp, \fp + mov lr, \lr +#endif +.endmacro + +.private_extern __setcontext +.align 2 +__setcontext: + // x0 = mcontext + + // Restore x19-x28 + ldp x19, x20, [x0, MCONTEXT_OFFSET_X19_X20] + ldp x21, x22, [x0, MCONTEXT_OFFSET_X21_X22] + ldp x23, x24, [x0, MCONTEXT_OFFSET_X23_X24] + ldp x25, x26, [x0, MCONTEXT_OFFSET_X25_X26] + ldp x27, x28, [x0, MCONTEXT_OFFSET_X27_X28] + + // Restore NEON registers + ldr d8, [x0, MCONTEXT_OFFSET_D8] + ldr d9, [x0, MCONTEXT_OFFSET_D9] + ldr d10, [x0, MCONTEXT_OFFSET_D10] + ldr d11, [x0, MCONTEXT_OFFSET_D11] + ldr d12, [x0, MCONTEXT_OFFSET_D12] + ldr d13, [x0, MCONTEXT_OFFSET_D13] + ldr d14, [x0, MCONTEXT_OFFSET_D14] + ldr d15, [x0, MCONTEXT_OFFSET_D15] + + // Restore sp, fp, lr. + ldp x10, x12, [x0, MCONTEXT_OFFSET_FP_LR] + ldr x11, [x0, MCONTEXT_OFFSET_SP] + ldr w13, [x0, MCONTEXT_OFFSET_FLAGS] + + // x10 = signed fp + // x11 = signed sp + // x12 = signed lr + // x13 = flags + + // Auth the ptrs and move them to the right registers + PTR_AUTH_FP_SP_LR x10, x11, x12, w13 + + // Restore return value + mov x0, xzr + + ARM64_STACK_EPILOG + +#endif /* TARGET_OS_OSX || TARGET_OS_DRIVERKIT */ diff --git a/src/ucontext/arm64/asm_help.h b/src/ucontext/arm64/asm_help.h new file mode 100644 index 0000000..1a29099 --- /dev/null +++ b/src/ucontext/arm64/asm_help.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* ASM Macro helpers */ +#if defined(__ASSEMBLER__) + +.macro ARM64_STACK_PROLOG +#if __has_feature(ptrauth_returns) + pacibsp +#endif +.endmacro + +.macro ARM64_STACK_EPILOG +#if __has_feature(ptrauth_returns) + retab +#else + ret +#endif +.endmacro + +#define PUSH_FRAME \ + stp fp, lr, [sp, #-16]! %% \ + mov fp, sp %% + +#define POP_FRAME \ + mov sp, fp %% \ + ldp fp, lr, [sp], #16 %% +#endif /* ASSEMBLER */ + +/* Offsets of the various register states inside of the mcontext data */ +#define MCONTEXT_OFFSET_X0 16 + +#define MCONTEXT_OFFSET_X19_X20 168 +#define MCONTEXT_OFFSET_X21_X22 184 +#define MCONTEXT_OFFSET_X23_X24 200 + +#define MCONTEXT_OFFSET_X25_X26 216 +#define MCONTEXT_OFFSET_X27_X28 232 + +#define MCONTEXT_OFFSET_FP_LR 248 +#define MCONTEXT_OFFSET_SP 264 +#define MCONTEXT_OFFSET_FLAGS 284 + +#define MCONTEXT_OFFSET_D8 424 +#define MCONTEXT_OFFSET_D9 440 +#define MCONTEXT_OFFSET_D10 456 +#define MCONTEXT_OFFSET_D11 472 +#define MCONTEXT_OFFSET_D12 488 +#define MCONTEXT_OFFSET_D13 504 +#define MCONTEXT_OFFSET_D14 520 +#define MCONTEXT_OFFSET_D15 536 + +#if __has_feature(ptrauth_calls) +#define LR_SIGNED_WITH_IB 0x2 /* Copied from __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR */ +#define LR_SIGNED_WITH_IB_BIT 0x1 +#endif diff --git a/src/ucontext/arm64/getcontext.s b/src/ucontext/arm64/getcontext.s new file mode 100644 index 0000000..ac7cd59 --- /dev/null +++ b/src/ucontext/arm64/getcontext.s @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "asm_help.h" +#include +#include + +/* + * int getcontext(ucontext_t *ucp); + * + * _STRUCT_UCONTEXT { + * int uc_onstack; + * __darwin_sigset_t uc_sigmask; // signal mask used by this context + * _STRUCT_SIGALTSTACK uc_stack; // stack used by this context + * _STRUCT_UCONTEXT *uc_link; // pointer to resuming context + * __darwin_size_t uc_mcsize; // size of the machine context passed in + * _STRUCT_MCONTEXT *uc_mcontext; // pointer to machine specific context + * #ifdef _XOPEN_SOURCE + * _STRUCT_MCONTEXT __mcontext_data; + * #endif + * }; + * + * _STRUCT_MCONTEXT64 + * { + * _STRUCT_ARM_EXCEPTION_STATE64 __es; + * _STRUCT_ARM_THREAD_STATE64 __ss; + * _STRUCT_ARM_NEON_STATE64 __ns; + * }; + * + * From the standard: + * The getcontext(3) function shall initialize the structure pointed to by + * ucp to the current user context of the calling thread. The ucontext_t + * type that ucp points to defines the user context and includes the + * contents of the calling thread's machine registers, the signal mask, and + * the current execution stack. + * + * getcontext populates the following fields (with the help of a helper function): + * uc_sigmask + * uc_mcontext + * uc_mcsize + * __mcontext_data + * uc_stack + * + * The ASM below mainly handles populating the machine context. Per the + * standard, getcontext should populate the machine context such that if + * setcontext is called with "ucp argument which was created with getcontext(), + * program execution continues as if the corresponding call of getcontext() had + * just returned". + * + * As such, the mcontext is saved such that: + * - sp and fp are saved to be that of the caller. + * - pc is not saved, lr is saved. We'll return from setcontext to the + * caller (the current lr) via a ret. + * - only callee save registers are saved in the machine context, caller + * will restore the caller save registers. + * - For neon registers, we save d8-d15. Per the standard: + * Registers v8-v15 must be preserved by a callee across subroutine + * calls; the remaining registers (v0-v7, v16-v31) do not need to be + * preserved (or should be preserved by the caller). Additionally, + * only the bottom 64 bits of each value stored in v8-v15 need to be + * preserved; it is the responsibility of the caller to preserve larger + * values. + * - we don't need to save the arm exception state + */ + +.text + +#if TARGET_OS_OSX || TARGET_OS_DRIVERKIT + +/* Pointer auths fp, sp and lr and puts them in the final locations specified by + * input arguments + * + * Modifies: lr + * Uses: x9 + */ +.macro PTR_SIGN_FP_SP_LR fp, sp, lr, flags +#if defined(__arm64e__) + // Sign fp with fp constant discriminator + mov \fp, fp + mov x9, #17687 // x9 = ptrauth_string_discriminator("fp") + pacda \fp, x9 + + // Sign sp with sp constant discriminator + mov \sp, sp + mov x9, #52205 // x9 = ptrauth_string_discriminator("sp") + pacda \sp, x9 + + // lr is signed with sp and b key, just set a flag marking so and don't + // change the signature + mov \lr, lr + mov \flags, LR_SIGNED_WITH_IB +#else + mov \fp, fp + mov \sp, sp + mov \lr, lr +#endif +.endmacro + +.align 2 +.globl _getcontext +_getcontext: + ARM64_STACK_PROLOG + + // Note that we're pushing and popping a frame around the subroutine call so + // that we have the lr, fp, and sp saved + PUSH_FRAME + // We don't need to caller save x9 - x15 since we're not going to + // save them in the mcontext later anyways and since they are caller save + // registers, the caller of getcontext will restore them if needed. + + // x0 = ucp pointer + // x1 = sp + mov x1, sp + bl _populate_signal_stack_context + POP_FRAME // Restore lr, fp and sp + + // x0 = mcontext pointer + + // Pointer sign fp, sp, lr and mark flags as needed + PTR_SIGN_FP_SP_LR x10, x11, x12, x13 + + // x10 = signed fp + // x11 = signed sp + // x12 = signed lr + // x13 = mcontext flags + + // Save frame pointer and lr + stp x10, x12, [x0, MCONTEXT_OFFSET_FP_LR] + + // Save stack pointer + str x11, [x0, MCONTEXT_OFFSET_SP] + +#if defined(__arm64e__) + // Save the flags + str w13, [x0, MCONTEXT_OFFSET_FLAGS] +#endif + + // Save x19 - x28 + stp x19, x20, [x0, MCONTEXT_OFFSET_X19_X20] + stp x21, x22, [x0, MCONTEXT_OFFSET_X21_X22] + stp x23, x24, [x0, MCONTEXT_OFFSET_X23_X24] + stp x25, x26, [x0, MCONTEXT_OFFSET_X25_X26] + stp x27, x28, [x0, MCONTEXT_OFFSET_X27_X28] + + // Save return value + str xzr, [x0, MCONTEXT_OFFSET_X0] + + // Save NEON registers + str d8, [x0, MCONTEXT_OFFSET_D8] + str d9, [x0, MCONTEXT_OFFSET_D9] + str d10, [x0, MCONTEXT_OFFSET_D10] + str d11, [x0, MCONTEXT_OFFSET_D11] + str d12, [x0, MCONTEXT_OFFSET_D12] + str d13, [x0, MCONTEXT_OFFSET_D13] + str d14, [x0, MCONTEXT_OFFSET_D14] + str d15, [x0, MCONTEXT_OFFSET_D15] + + mov x0, xzr /* Return value from getcontext */ + + ARM64_STACK_EPILOG + +#endif diff --git a/src/ucontext/arm64/mcontext.c b/src/ucontext/arm64/mcontext.c new file mode 100644 index 0000000..a322308 --- /dev/null +++ b/src/ucontext/arm64/mcontext.c @@ -0,0 +1,58 @@ +#include "asm_help.h" + +#define _XOPEN_SOURCE 600L +#include +#include + +#include + +_Static_assert(offsetof(struct __darwin_mcontext64, __ss.__x[0]) == MCONTEXT_OFFSET_X0, + "MCONTEXT_OFFSET_X0"); +_Static_assert(offsetof(struct __darwin_mcontext64, __ss.__x[19]) == MCONTEXT_OFFSET_X19_X20, + "MCONTEXT_OFFSET_X19_X20"); +_Static_assert(offsetof(struct __darwin_mcontext64, __ss.__x[21]) == MCONTEXT_OFFSET_X21_X22, + "MCONTEXT_OFFSET_X21_X22"); +_Static_assert(offsetof(struct __darwin_mcontext64, __ss.__x[23]) == MCONTEXT_OFFSET_X23_X24, + "MCONTEXT_OFFSET_X23_X24"); +_Static_assert(offsetof(struct __darwin_mcontext64, __ss.__x[25]) == MCONTEXT_OFFSET_X25_X26, + "MCONTEXT_OFFSET_X25_X26"); +_Static_assert(offsetof(struct __darwin_mcontext64, __ss.__x[27]) == MCONTEXT_OFFSET_X27_X28, + "MCONTEXT_OFFSET_X27_X28"); + +#if __has_feature(ptrauth_calls) +_Static_assert(offsetof(struct __darwin_mcontext64, __ss.__opaque_fp) == MCONTEXT_OFFSET_FP_LR, + "MCONTEXT_OFFSET_FP_LR"); +_Static_assert(offsetof(struct __darwin_mcontext64, __ss.__opaque_sp) == MCONTEXT_OFFSET_SP, + "MCONTEXT_OFFSET_SP"); +_Static_assert(offsetof(struct __darwin_mcontext64, __ss.__opaque_flags) == MCONTEXT_OFFSET_FLAGS, + "MCONTEXT_OFFSET_FLAGS"); +#else +_Static_assert(offsetof(struct __darwin_mcontext64, __ss.__fp) == MCONTEXT_OFFSET_FP_LR, + "MCONTEXT_OFFSET_FP_LR"); +_Static_assert(offsetof(struct __darwin_mcontext64, __ss.__sp) == MCONTEXT_OFFSET_SP, + "MCONTEXT_OFFSET_SP"); +#endif + + +// Neon registers are 128 bits wide. d suffix refers to the last 64 bits of the +// 128 bit register. Hence the -8 offset. +_Static_assert(offsetof(struct __darwin_mcontext64, __ns.__v[8]) == (MCONTEXT_OFFSET_D8 - 8), + "MCONTEXT_OFFSET_D8"); +_Static_assert(offsetof(struct __darwin_mcontext64, __ns.__v[9]) == (MCONTEXT_OFFSET_D9 - 8), + "MCONTEXT_OFFSET_D9"); +_Static_assert(offsetof(struct __darwin_mcontext64, __ns.__v[10]) == (MCONTEXT_OFFSET_D10 - 8), + "MCONTEXT_OFFSET_D10"); +_Static_assert(offsetof(struct __darwin_mcontext64, __ns.__v[11]) == (MCONTEXT_OFFSET_D11 - 8), + "MCONTEXT_OFFSET_D11"); +_Static_assert(offsetof(struct __darwin_mcontext64, __ns.__v[12]) == (MCONTEXT_OFFSET_D12 - 8), + "MCONTEXT_OFFSET_D12"); +_Static_assert(offsetof(struct __darwin_mcontext64, __ns.__v[13]) == (MCONTEXT_OFFSET_D13 - 8), + "MCONTEXT_OFFSET_D13"); +_Static_assert(offsetof(struct __darwin_mcontext64, __ns.__v[14]) == (MCONTEXT_OFFSET_D14 - 8), + "MCONTEXT_OFFSET_D14"); +_Static_assert(offsetof(struct __darwin_mcontext64, __ns.__v[15]) == (MCONTEXT_OFFSET_D15 - 8), + "MCONTEXT_OFFSET_D15"); + +#if __has_feature(ptrauth_calls) +_Static_assert((1 << LR_SIGNED_WITH_IB_BIT) == LR_SIGNED_WITH_IB, "LR_SIGNED_WITH_IB_BIT"); +#endif diff --git a/src/ucontext/generic/getmcontext.c b/src/ucontext/generic/getmcontext.c index 255628f..13be5f0 100644 --- a/src/ucontext/generic/getmcontext.c +++ b/src/ucontext/generic/getmcontext.c @@ -2,14 +2,14 @@ * Copyright (c) 2007, 2008, 2009 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,13 +17,27 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #define _XOPEN_SOURCE 600L #include #include +#include +#include +#include + +/* This is a macro to capture all the code added in here that is purely to make + * conformance tests pass and seems to have no functional reason nor is it + * required by the standard */ +#define CONFORMANCE_SPECIFIC_HACK 1 + +#ifdef __DYNAMIC__ +extern int __in_sigtramp; +#endif /* __DYNAMIC_ */ + +#if TARGET_OS_OSX || TARGET_OS_DRIVERKIT #if defined(__x86_64__) || defined(__i386__) @@ -32,6 +46,9 @@ #include #include +#include +#include + extern int __sigaltstack(const stack_t * __restrict, stack_t * __restrict); #ifdef __DYNAMIC__ @@ -46,6 +63,7 @@ getmcontext(ucontext_t *uctx, void *sp) size_t stacksize = 0; stack_t stack; +#if CONFORMANCE_SPECIFIC_HACK uctx->uc_stack.ss_sp = sp; uctx->uc_stack.ss_flags = 0; @@ -63,29 +81,135 @@ getmcontext(ucontext_t *uctx, void *sp) } uctx->uc_stack.ss_size = stacksize; +#endif + + uctx->uc_mcontext = mctx; + uctx->uc_mcsize = sizeof(*mctx); - if (uctx->uc_mcontext != mctx) { - uctx->uc_mcontext = mctx; +#if CONFORMANCE_SPECIFIC_HACK #ifdef __DYNAMIC__ - uctx->uc_link = (ucontext_t*)(uintptr_t)__in_sigtramp; /* non-zero if in signal handler */ + uctx->uc_link = (ucontext_t*)(uintptr_t)__in_sigtramp; /* non-zero if in signal handler */ #else /* !__DYNAMIC__ */ - uctx->uc_link = 0; + uctx->uc_link = NULL; #endif /* __DYNAMIC__ */ - } +#endif /* CONFORMANCE_SPECIFIC_HACK */ sigprocmask(0, NULL, &uctx->uc_sigmask); return mctx; } +#elif defined(__arm64__) + +#include +#include +#include + +#include +#include + +extern int __sigaltstack(const stack_t * __restrict, stack_t * __restrict); + +/* @function populate_signal_stack_context + * + * @note + * + * _STRUCT_UCONTEXT { + * int uc_onstack; + * __darwin_sigset_t uc_sigmask; // signal mask used by this context + * _STRUCT_SIGALTSTACK uc_stack; // stack used by this context + * _STRUCT_UCONTEXT *uc_link; // pointer to resuming context + * __darwin_size_t uc_mcsize; // size of the machine context passed in + * _STRUCT_MCONTEXT *uc_mcontext; // pointer to machine specific context + * #ifdef _XOPEN_SOURCE + * _STRUCT_MCONTEXT __mcontext_data; + * #endif + * }; + * + * populate_signal_stack_context unconditionally populates the following fields: + * uc_sigmask + * uc_mcontext + * uc_mcsize + * __mcontext_data + * uc_link + * + * The standard specifies this about uc_stack: + * + * Before a call is made to makecontext(), the application shall ensure + * that the context being modified has a stack allocated for it. + * + * ie. the client is generally responsible for managing the stack on on which + * their context runs and initializing it properly. + */ +__attribute__((visibility("hidden"))) +mcontext_t +populate_signal_stack_context(ucontext_t *ucp, void *sp) +{ +#if CONFORMANCE_SPECIFIC_HACK + /* The conformance tests seems to require that we populate the uc_stack in + * getcontext even though the standard requires - as stated above - that the + * clients manage the stack that their code runs on. This makes no + * functional sense but is put in here to make conformance tests work */ + stack_t stack; + + if (0 == __sigaltstack(NULL, &stack) && (stack.ss_flags & SA_ONSTACK)) { + } else { + stack.ss_sp = sp; + + // This stacksize is the wrong number - it provides the stack size of + // the main thread and not the current thread. We can't know the + // stacksize of the current thread without jumping through some crazy + // hoops and it seems like per the standard, this field should not be + // required anyways since the client should be allocating and managing + // stacks themselves for makecontext. + struct rlimit rlim; + if (0 == getrlimit(RLIMIT_STACK, &rlim)) + stack.ss_size = rlim.rlim_cur; + } + ucp->uc_stack = stack; +#endif + + /* Populate signal information */ + sigprocmask(SIG_UNBLOCK, NULL, &ucp->uc_sigmask); + + /* Always use the mcontext that is embedded in the struct */ + mcontext_t mctx = (mcontext_t) &ucp->__mcontext_data; + ucp->uc_mcontext = mctx; + ucp->uc_mcsize = sizeof(*mctx); + +#if CONFORMANCE_SPECIFIC_HACK + /* The conformance tests for getcontext requires that: + * uc_link = 0 if we're in the "main context" + * uc_link = non-0 if we're on signal context while calling getcontext + * + * It seems like it doesn't require uc_link to a valid pointer in the 2nd + * case, just not 0. It also seems to require that the uc_link is + * diversified if we have multiple contexts populated from the signal stack. + * So we have it be the address of the in_signal_handler value. + * + * AFAICT, there seems to be no reason to require populating uc_link at all + * but it is what the tests expects. + */ +#ifdef __DYNAMIC__ + ucp->uc_link = (ucontext_t*)(uintptr_t)__in_sigtramp; /* non-zero if in signal handler */ +#else /* !__DYNAMIC__ */ + ucp->uc_link = NULL; +#endif /* __DYNAMIC__ */ + +#endif + + return mctx; +} + +#endif /* arm64 || x86_64 || i386 */ + #else int -getcontext(ucontext_t *u) +getcontext(ucontext_t *uctx) { errno = ENOTSUP; return -1; } - #endif diff --git a/src/ucontext/generic/makecontext.c b/src/ucontext/generic/makecontext.c index 4f8a39d..ef7d2ff 100644 --- a/src/ucontext/generic/makecontext.c +++ b/src/ucontext/generic/makecontext.c @@ -48,16 +48,28 @@ */ #define _XOPEN_SOURCE 600L +#define _DARWIN_C_SOURCE #include #include +#include +#include +#include +#include + +#include + +/* This is a macro to capture all the code added in here that is purely to make + * conformance tests pass and seems to have no functional reason nor is it + * required by the standard */ +#define CONFORMANCE_SPECIFIC_HACK 1 + +#if TARGET_OS_OSX || TARGET_OS_DRIVERKIT + #if defined(__x86_64__) || defined(__i386__) #pragma clang diagnostic ignored "-Wdeprecated-declarations" -#include #include -#include -#include /* Prototypes */ extern void _ctx_start(ucontext_t *, int argc, ...); @@ -93,8 +105,7 @@ makecontext(ucontext_t *ucp, void (*start)(), int argc, ...) if (ucp == NULL) return; - else if ((ucp->uc_stack.ss_sp == NULL) || - (ucp->uc_stack.ss_size < MINSIGSTKSZ)) { + else if (ucp->uc_stack.ss_sp == NULL) { /* * This should really return -1 with errno set to ENOMEM * or something, but the spec says that makecontext is @@ -112,6 +123,8 @@ makecontext(ucontext_t *ucp, void (*start)(), int argc, ...) /* * Arrange the stack as follows: * + * Bottom of the stack + * * _ctx_start() - context start wrapper * start() - user start routine * arg1 - first argument, aligned(16) @@ -119,6 +132,8 @@ makecontext(ucontext_t *ucp, void (*start)(), int argc, ...) * argn * ucp - this context, %rbp/%ebp points here * + * stack top + * * When the context is started, control will return to * the context start wrapper which will pop the user * start routine from the top of the stack. After that, @@ -130,8 +145,6 @@ makecontext(ucontext_t *ucp, void (*start)(), int argc, ...) * will then call _ctx_done() to swap in the next context * (uc_link != 0) or exit the program (uc_link == 0). */ - mcontext_t mc; - stack_top = (char *)(ucp->uc_stack.ss_sp + ucp->uc_stack.ss_size - sizeof(intptr_t)); @@ -188,6 +201,19 @@ makecontext(ucontext_t *ucp, void (*start)(), int argc, ...) /* The ucontext is placed at the bottom of the stack. */ *argp = (intptr_t)ucp; +#if CONFORMANCE_SPECIFIC_HACK + // There is a conformance test which initialized a ucontext A by memcpy-ing + // a ucontext B that was previously initialized with getcontext. + // getcontext(B) modified B such that B.uc_mcontext = &B.__mcontext_data; + // But by doing the memcpy of B to A, A.uc_mcontext = &B.__mcontext_data + // when that's not necessarily what we want. We therefore have to + // unfortunately reassign A.uc_mccontext = &A.__mcontext_data even though we + // don't know if A.__mcontext_data was properly initialized before we use + // it. This is really because the conformance test doesn't initialize + // properly with multiple getcontexts and instead copies contexts around. + ucp->uc_mcontext = (mcontext_t) &ucp->__mcontext_data; +#endif + /* * Set the machine context to point to the top of the * stack and the program counter to the context start @@ -197,7 +223,7 @@ makecontext(ucontext_t *ucp, void (*start)(), int argc, ...) * %r12/%esi to point to the base of the stack where ucp * is stored. */ - mc = ucp->uc_mcontext; + mcontext_t mc = ucp->uc_mcontext; #if defined(__x86_64__) /* Use callee-save and match _ctx_start implementation */ mc->__ss.__r12 = (intptr_t)argp; @@ -213,11 +239,188 @@ makecontext(ucontext_t *ucp, void (*start)(), int argc, ...) } } -#else +#elif defined(__arm64__) + +/* + * _STRUCT_UCONTEXT { + * int uc_onstack; + * __darwin_sigset_t uc_sigmask; // signal mask used by this context + * _STRUCT_SIGALTSTACK uc_stack; // stack used by this context + * _STRUCT_UCONTEXT *uc_link; // pointer to resuming context + * __darwin_size_t uc_mcsize; // size of the machine context passed in + * _STRUCT_MCONTEXT *uc_mcontext; // pointer to machine specific context + * #ifdef _XOPEN_SOURCE + * _STRUCT_MCONTEXT __mcontext_data; + * #endif + * }; + * + * From the standard: + * The makecontext() function shall modify the context specified by uctx, which + * has been initialized using getcontext(). When this context is resumed using + * swapcontext() or setcontext(), program execution shall continue by calling + * func, passing it the arguments that follow argc in the makecontext() call. + * + * Before a call is made to makecontext(), the application shall ensure that the + * context being modified has a stack allocated for it. The application shall + * ensure that the value of argc matches the number of arguments of type int + * passed to func; otherwise, the behavior is undefined. + * + * makecontext will set up the uc_stack such that when setcontext or swapcontext + * is called on the ucontext, it will first execute a helper function _ctx_start() + * which will call the client specified function and then call a second + * helper _ctx_done() (which will either follow the ctxt specified by uc_link or + * exit.) + * + * void _ctx_start((void *func)(int arg1, ...), ...) + * void _ctx_done(ucontext_t *uctx); + * + * makecontext modifies the uc_stack as specified: + * + * High addresses + * __________________ <---- fp in context + * | arg n-1, arg n | + * | ... | + * | arg1, arg2 | + * | _______________ | <----- sp in mcontext + * | | + * | | + * | | + * | | + * | | + * | | + * Low addresses + * + * The mcontext is also modified such that: + * - sp points to the end of the arguments on the stack + * - fp points to the stack top + * - lr points to _ctx_start. + * - x19 = uctx + * - x20 = user func + * Note: It is fine to modify register state since we'll never go back to + * the state we getcontext-ed from. We modify callee save registers so that + * they are a) actually set by setcontext b) still present when we return + * from user_func in _ctx_start + * + * The first thing which _ctx_start will do is pop the first 8 arguments off the + * stack and then branch to user_func. This works because it leaves the + * remaining arguments after the first 8 from the stack. Once the client + * function returns in _ctx_start, we'll be back to the current state as + * specified above in the diagram. + * + * We can then set up the stack for calling _ctx_done + * a) Set sp = fp. + * b) Move x19 (which is callee save and therefore restored if used by user_func), to x0 + * c) Call _ctx_done() + */ + +#include +#include +#include +#include +#include + +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + +extern void _ctx_start(void (*user_func)()); void -makecontext(ucontext_t *u, void (*f)(void), int a, ...) +_ctx_done(ucontext_t *uctx) { + if (uctx->uc_link == NULL) { + _exit(0); + } else { + uctx->uc_mcsize = 0; /* To make sure that this is not called again without reinitializing */ + setcontext((ucontext_t *) uctx->uc_link); + __builtin_trap(); /* should never get here */ + } } +#define ALIGN_TO_16_BYTES(addr) (addr & ~0xf) +#define ARM64_REGISTER_ARGS 8 + +void +makecontext(ucontext_t *uctx, void (*func)(void), int argc, ...) +{ + if (uctx == NULL) { + return; + } + + if (uctx->uc_stack.ss_sp == NULL) { + goto error; + } + + if (argc < 0 || argc > NCARGS) { + goto error; + } + +#if CONFORMANCE_SPECIFIC_HACK + // There is a conformance test which initialized a ucontext A by memcpy-ing + // a ucontext B that was previously initialized with getcontext. + // getcontext(B) modified B such that B.uc_mcontext = &B.__mcontext_data; + // But by doing the memcpy of B to A, A.uc_mcontext = &B.__mcontext_data + // when that's not necessarily what we want. We therefore have to + // unfortunately reassign A.uc_mccontext = &A.__mcontext_data even though we + // don't know if A.__mcontext_data was properly initialized before we use + // it. This is really because the conformance test doesn't initialize + // properly with multiple getcontexts and instead copies contexts around. + uctx->uc_mcontext = (mcontext_t) &uctx->__mcontext_data; +#endif + + bzero(uctx->uc_stack.ss_sp, uctx->uc_stack.ss_size); + + uintptr_t fp = (char *) uctx->uc_stack.ss_sp + uctx->uc_stack.ss_size; + fp = ALIGN_TO_16_BYTES(fp); + + // All args are set up on the stack. We also make sure that we also have at + // least 8 args on the stack (and populate with 0 if the input argc < 8). + // This way _ctx_start will always have 8 args to pop out from the stack + // before it calls the client function. + int padded_argc = (argc < ARM64_REGISTER_ARGS) ? ARM64_REGISTER_ARGS : argc; + + uintptr_t sp = fp - (sizeof(int) * padded_argc); + sp = ALIGN_TO_16_BYTES(sp); + + // Populate the stack with all the args. Per arm64 calling convention ABI, we + // do not need to pad and make sure that the arguments are aligned in any + // manner. + int *current_arg_addr = (int *) sp; + + va_list argv; + va_start(argv, argc); + for (int i = 0; i < argc; i++) { + *current_arg_addr = va_arg(argv, int); + current_arg_addr++; + } + va_end(argv); + + mcontext_t mctx = uctx->uc_mcontext; + +#if defined(__arm64e__) + // The set macros below read from the opaque_flags to decide how to set the + // fields (including whether to sign them) and so we need to make sure that + // we require signing always. + mctx->__ss.__opaque_flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH; #endif + + arm_thread_state64_set_fp(mctx->__ss, fp); + arm_thread_state64_set_sp(mctx->__ss, sp); + arm_thread_state64_set_lr_fptr(mctx->__ss, (void *) _ctx_start); + + mctx->__ss.__x[19] = uctx; + mctx->__ss.__x[20] = _OS_PTR_MUNGE(func); + return; +error: + uctx->uc_mcsize = 0; + return; +} + +#endif /* arm64 || x86_64 || i386 */ + +#else /* TARGET_OS_OSX || TARGET_OS_DRIVERKIT */ + +void +makecontext(ucontext_t *u, void (*f)(void), int argc, ...) +{ +} + +#endif /* TARGET_OS_OSX || TARGET_OS_DRIVERKIT */ diff --git a/src/ucontext/generic/setcontext.c b/src/ucontext/generic/setcontext.c index ac2faac..f037264 100644 --- a/src/ucontext/generic/setcontext.c +++ b/src/ucontext/generic/setcontext.c @@ -2,14 +2,14 @@ * Copyright (c) 2007, 2009 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,39 +17,64 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ - #define _XOPEN_SOURCE 600L #include #include +#include -#if defined(__x86_64__) || defined(__i386__) +#if TARGET_OS_OSX || TARGET_OS_DRIVERKIT #include #include extern int _setcontext(const void *); +/* This is a macro to capture all the code added in here that is purely to make + * conformance tests pass and seems to have no functional reason nor is it + * required by the standard */ +#define CONFORMANCE_SPECIFIC_HACK 1 + int setcontext(const ucontext_t *uctx) { - mcontext_t mctx = (mcontext_t)&uctx->__mcontext_data; - ucontext_t *_uctx = (ucontext_t *)uctx; - if (mctx != _uctx->uc_mcontext) - _uctx->uc_mcontext = mctx; + if (uctx->uc_mcsize == 0) { /* Invalid context */ + errno = EINVAL; + return -1; + } + sigprocmask(SIG_SETMASK, &uctx->uc_sigmask, NULL); -#if defined(__x86_64__) + mcontext_t mctx = uctx->uc_mcontext; +#if CONFORMANCE_SPECIFIC_HACK + // There is a conformance test which initialized a ucontext A by memcpy-ing + // a ucontext B that was previously initialized with getcontext. + // getcontext(B) modified B such that B.uc_mcontext = &B.__mcontext_data; + // But by doing the memcpy of B to A, A.uc_mcontext = &B.__mcontext_data + // when that's not necessarily what we want. We therefore have to + // unfortunately ignore A.uc_mccontext and use &A.__mcontext_data even though we + // don't know if A.__mcontext_data was properly initialized. This is really + // because the conformance test doesn't initialize properly with multiple + // getcontexts and instead copies contexts around. + // + // + // Note that this hack, is causing us to fail when restoring a ucontext from + // a signal. See Restoring context from signal + // fails on intel and arm64 platforms + mctx = (mcontext_t) &uctx->__mcontext_data; +#endif + +#if defined(__x86_64__) || defined(__arm64__) return _setcontext(mctx); #else return _setcontext(uctx); #endif } -#else +#else /* TARGET_OS_OSX || TARGET_OS_DRIVERKIT */ int setcontext(const ucontext_t *uctx) @@ -58,4 +83,4 @@ setcontext(const ucontext_t *uctx) return -1; } -#endif +#endif /* TARGET_OS_OSX || TARGET_OS_DRIVERKIT */ diff --git a/src/ucontext/generic/swapcontext.c b/src/ucontext/generic/swapcontext.c index 2195700..6750b5d 100644 --- a/src/ucontext/generic/swapcontext.c +++ b/src/ucontext/generic/swapcontext.c @@ -2,14 +2,14 @@ * Copyright (c) 2007, 2009 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -50,8 +50,15 @@ #define _XOPEN_SOURCE 600L #include #include +#include + +/* This is a macro to capture all the code added in here that is purely to make + * conformance tests pass and seems to have no functional reason nor is it + * required by the standard */ +#define CONFORMANCE_SPECIFIC_HACK 1 + +#if TARGET_OS_OSX || TARGET_OS_DRIVERKIT -#if defined(__x86_64__) || defined(__i386__) #pragma clang diagnostic ignored "-Wdeprecated-declarations" #include @@ -65,22 +72,34 @@ int swapcontext(ucontext_t *oucp, const ucontext_t *ucp) { int ret; - if ((oucp == NULL) || (ucp == NULL)) { errno = EINVAL; - return (-1); + return -1; } + oucp->uc_flags &= ~UCF_SWAPPED; + +#if CONFORMANCE_SPECIFIC_HACK + // getcontext overwrites uc_link so we save it and restore it + ucontext_t *next_context = oucp->uc_link; ret = getcontext(oucp); + oucp->uc_link = next_context; +#endif + if ((ret == 0) && !(oucp->uc_flags & UCF_SWAPPED)) { oucp->uc_flags |= UCF_SWAPPED; + /* In the future, when someone calls setcontext(oucp), that will return + * us to the getcontext call above with ret = 0. However, because we + * just flipped the UCF_SWAPPED bit, we will not call setcontext again + * and will return. */ ret = setcontext(ucp); } + asm(""); // Prevent tailcall return (ret); } -#else +#else /* TARGET_OS_OSX || TARGET_OS_DRIVERKIT */ int swapcontext(ucontext_t *oucp, const ucontext_t *ucp) @@ -89,4 +108,4 @@ swapcontext(ucontext_t *oucp, const ucontext_t *ucp) return -1; } -#endif +#endif /* TARGET_OS_OSX || TARGET_OS_DRIVERKIT */ diff --git a/src/ucontext/x86_64/_ctx_start.s b/src/ucontext/x86_64/_ctx_start.s index 5ab64a2..961240f 100644 --- a/src/ucontext/x86_64/_ctx_start.s +++ b/src/ucontext/x86_64/_ctx_start.s @@ -79,10 +79,8 @@ LABEL(__ctx_start) popq %r9 callq *%rax /* call start function */ - movq %r12, %rsp /* - * setup stack for completion routine; - * ucp is now at top of stack - */ + movq %r12, %rsp /* setup stack for completion routine; + ucp is now at top of stack. r12 is calleee save */ movq (%rsp), %rdi CALL_EXTERN(__ctx_done) /* should never return */ int $5 /* trap */ diff --git a/src/ucontext/x86_64/_setcontext.s b/src/ucontext/x86_64/_setcontext.s index 7688add..1dfe33c 100644 --- a/src/ucontext/x86_64/_setcontext.s +++ b/src/ucontext/x86_64/_setcontext.s @@ -2,14 +2,14 @@ * Copyright (c) 2007,2009 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ diff --git a/src/ucontext/x86_64/getcontext.s b/src/ucontext/x86_64/getcontext.s index e6a303c..9e729dc 100644 --- a/src/ucontext/x86_64/getcontext.s +++ b/src/ucontext/x86_64/getcontext.s @@ -2,14 +2,14 @@ * Copyright (c) 2007,2009 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ diff --git a/xcodeconfig/libplatform.aliases b/xcodeconfig/libplatform.aliases index bba8d5a..b000112 100644 --- a/xcodeconfig/libplatform.aliases +++ b/xcodeconfig/libplatform.aliases @@ -1,5 +1,3 @@ __platform_bzero ___bzero __os_lock_type_spin __os_lock_type_eliding __os_lock_type_spin __os_lock_type_transactional -_os_unfair_lock_lock_with_options _os_unfair_lock_lock_with_options_4Libc -_os_unfair_lock_unlock _os_unfair_lock_unlock_4Libc diff --git a/xcodeconfig/libplatform.xcconfig b/xcodeconfig/libplatform.xcconfig index 048f98b..033e549 100644 --- a/xcodeconfig/libplatform.xcconfig +++ b/xcodeconfig/libplatform.xcconfig @@ -62,7 +62,7 @@ ATOMICS_LIBRARIES = $(CONFIGURATION_BUILD_DIR)/libatomics_i386_$(CURRENT_VARIANT CACHECONTROL_LIBRARIES = $(CONFIGURATION_BUILD_DIR)/libcachecontrol_i386_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libcachecontrol_x86_64_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libcachecontrol_arm_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libcachecontrol_arm64_$(CURRENT_VARIANT).a $(EXTRA_CACHECONTROL_LIBRARIES) SETJMP_LIBRARIES = $(CONFIGURATION_BUILD_DIR)/libsetjmp_i386_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libsetjmp_x86_64_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libsetjmp_arm_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libsetjmp_arm64_$(CURRENT_VARIANT).a $(EXTRA_SETJMP_LIBRARIES) STRING_LIBRARIES = $(CONFIGURATION_BUILD_DIR)/libstring_i386_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libstring_x86_64_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libstring_arm_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libstring_arm64_$(CURRENT_VARIANT).a $(EXTRA_STRING_LIBRARIES) -UCONTEXT_LIBRARIES = $(CONFIGURATION_BUILD_DIR)/libucontext_i386_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libucontext_x86_64_$(CURRENT_VARIANT).a +UCONTEXT_LIBRARIES = $(CONFIGURATION_BUILD_DIR)/libucontext_i386_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libucontext_x86_64_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libucontext_arm64_$(CURRENT_VARIANT).a IS_ZIPPERED = YES @@ -70,7 +70,7 @@ SIMULATOR_LDFLAGS = SIMULATOR_LDFLAGS[sdk=macosx*] = -Wl,-simulator_support OTHER_LDFLAGS = $(OTHER_LDFLAGS_$(TARGET_NAME)) $(CR_LDFLAGS) -OTHER_LDFLAGS_libsystem_platform = -all_load $(PLATFORM_LIBRARIES) -umbrella System -L$(SDK_INSTALL_ROOT)/usr/lib/system -ldyld $(lcompiler_rt) $(lsystem_kernel) -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libplatform.aliases,$(DIRTY_DATA_LDFLAGS) $(SIMULATOR_LDFLAGS) +OTHER_LDFLAGS_libsystem_platform = -all_load $(PLATFORM_LIBRARIES) -umbrella System -L$(SDK_INSTALL_ROOT)/usr/lib/system $(ldyld) $(lsystem_kernel) -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libplatform.aliases,$(DIRTY_DATA_LDFLAGS) $(SIMULATOR_LDFLAGS) OTHER_LIBTOOLFLAGS = $(OTHER_LIBTOOLFLAGS_$(TARGET_NAME)) OTHER_LIBTOOLFLAGS_libplatform_simple_dyld = $(CONFIGURATION_BUILD_DIR)/libsimple_$(CURRENT_VARIANT).a @@ -87,6 +87,20 @@ OTHER_LIBTOOLFLAGS_libucontext = $(UCONTEXT_LIBRARIES) lsystem_kernel = -lsystem_kernel lsystem_kernel[sdk=iphonesimulator*] = -lsystem_sim_kernel -lcompiler_rt = -lcompiler_rt -lcompiler_rt[sdk=driverkit*] = + +// rdar://problem/46882983&54282933 +// On macOS, to support the i386 watchOS Simulator, we will continue building +// libplatform with an i386 slice for the foreseeable future, even though the +// rest of the OS has dropped i386. (This also applies to libpthread and +// libsyscall). Normally, dylibs with any dependency on another dylib need +// to link libdyld for lazy stub binding. libdyld has many dependencies, so +// that would create a dependency cycle that leads to the whole libSystem +// umbrella keeping an i386 slice. Instead, ld64 has changed so that the +// i386 simulator_support slice of libplatform doesn't use lazy binding and so +// doesn't need -ldyld. +// So, to break the dependency cycle, macOS libplatform will not link libdyld. +// All other platforms (including DriverKit on macOS) will continue to link +// libdyld. +ldyld = -ldyld +ldyld[sdk=macos*] = diff --git a/xcodeconfig/os.xcconfig b/xcodeconfig/os.xcconfig index fb56700..5136988 100644 --- a/xcodeconfig/os.xcconfig +++ b/xcodeconfig/os.xcconfig @@ -14,7 +14,7 @@ CLANG_WARN_EMPTY_BODY = YES CLANG_WARN_IMPLICIT_SIGN_CONVERSION = YES CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES GCC_TREAT_WARNINGS_AS_ERRORS = YES -WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -Wno-unknown-warning-option +WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -Wno-unknown-warning-option -Wno-atomic-implicit-seq-cst COMPILER_CFLAGS = -momit-leaf-frame-pointer OTHER_CFLAGS_debug = diff --git a/xcodeconfig/perarch.xcconfig b/xcodeconfig/perarch.xcconfig index 8ec4910..187196a 100644 --- a/xcodeconfig/perarch.xcconfig +++ b/xcodeconfig/perarch.xcconfig @@ -7,6 +7,7 @@ ARCH_FAMILY_armv7s = arm ARCH_FAMILY_armv7f = arm ARCH_FAMILY_armv7k = arm ARCH_FAMILY_arm64 = arm64 +ARCH_FAMILY_undefined_arch = undefined_arch EXCLUDED_SOURCE_FILE_NAMES = * @@ -21,6 +22,10 @@ INCLUDED_SOURCE_FILE_NAMES_i386_i386 = * INCLUDED_SOURCE_FILE_NAMES__i386 = * INCLUDED_SOURCE_FILE_NAMES_x86_64_x86_64 = * INCLUDED_SOURCE_FILE_NAMES__x86_64 = * +INCLUDED_SOURCE_FILE_NAMES_undefined_arch_undefined_arch = * +INCLUDED_SOURCE_FILE_NAMES__undefined_arch = * +INCLUDED_SOURCE_FILE_NAMES_undefined_arch_ = * +INCLUDED_SOURCE_FILE_NAMES__ = * // To force fallback to generic C implementations for dyld_Sim INCLUDED_SOURCE_FILE_NAMES_x86_64_x86_64[sdk=iphonesimulator*] = diff --git a/xcodeconfig/static.xcconfig b/xcodeconfig/static.xcconfig index 92ec0e3..9cf69a0 100644 --- a/xcodeconfig/static.xcconfig +++ b/xcodeconfig/static.xcconfig @@ -3,6 +3,7 @@ // pick _static in libplatform.xcconfig BUILD_VARIANTS = static +INSTALLHDRS_SCRIPT_PHASE = NO EXECUTABLE_PREFIX = lib PRODUCT_NAME = platform INSTALL_PATH = /usr/local/lib/system -- 2.45.2