OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
void os_unfair_lock_unlock(os_unfair_lock_t lock);
+/*!
+ * @function os_unfair_lock_assert_owner
+ *
+ * @abstract
+ * Asserts that the calling thread is the current owner of the specified
+ * unfair lock.
+ *
+ * @discussion
+ * If the lock is currently owned by the calling thread, this function returns.
+ *
+ * If the lock is unlocked or owned by a different thread, this function
+ * asserts and terminates the process.
+ *
+ * @param lock
+ * Pointer to an os_unfair_lock.
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_unfair_lock_assert_owner(os_unfair_lock_t lock);
+
+/*!
+ * @function os_unfair_lock_assert_not_owner
+ *
+ * @abstract
+ * Asserts that the calling thread is not the current owner of the specified
+ * unfair lock.
+ *
+ * @discussion
+ * If the lock is unlocked or owned by a different thread, this function
+ * returns.
+ *
+ * If the lock is currently owned by the current thread, this function asserts
+ * and terminates the process.
+ *
+ * @param lock
+ * Pointer to an os_unfair_lock.
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_unfair_lock_assert_not_owner(os_unfair_lock_t lock);
+
__END_DECLS
OS_ASSUME_NONNULL_END
_spins++; \
_os_preemption_yield(_spins); \
} } while (0)
-#elif TARGET_OS_EMBEDDED
+#else
// <rdar://problem/15508918>
#ifndef OS_WAIT_SPINS
#define OS_WAIT_SPINS 1024
os_hardware_pause(); \
} \
} } while (0)
-#else
-#define _os_wait_until(c) do { \
- while (!(c)) { \
- os_hardware_pause(); \
- } } while (0)
#endif
#pragma mark -
#pragma mark -
#pragma mark _os_preemption_yield
-#if defined(SWITCH_OPTION_OSLOCK_DEPRESS) && !(TARGET_IPHONE_SIMULATOR && \
- IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090)
-#define OS_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_OSLOCK_DEPRESS
-#else
-#define OS_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_DEPRESS
-#endif
#define _os_preemption_yield(n) thread_switch(MACH_PORT_NULL, \
- OS_YIELD_THREAD_SWITCH_OPTION, (mach_msg_timeout_t)(n))
+ SWITCH_OPTION_OSLOCK_DEPRESS, (mach_msg_timeout_t)(n))
#endif // __OS_YIELD__
* Low-level lock SPI
*/
-#define OS_LOCK_SPI_VERSION 20160406
+#define OS_LOCK_SPI_VERSION 20171006
/*!
* @typedef os_lock_t
#define OS_LOCK_DECL(type, size) \
typedef OS_LOCK_STRUCT(type) : public OS_LOCK(base) { \
private: \
- OS_LOCK_TYPE_STRUCT(type) * const osl_type OS_UNUSED; \
+ OS_LOCK_TYPE_STRUCT(type) * osl_type OS_UNUSED; \
uintptr_t _osl_##type##_opaque[size-1] OS_UNUSED; \
public: \
constexpr OS_LOCK(type)() : \
#define OS_LOCK_DECL(type, size) \
typedef OS_LOCK_STRUCT(type) { \
- OS_LOCK_TYPE_STRUCT(type) * const osl_type; \
+ OS_LOCK_TYPE_STRUCT(type) * osl_type; \
uintptr_t _osl_##type##_opaque[size-1]; \
} OS_LOCK(type)
void os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
os_unfair_lock_options_t options);
+/*! @group os_unfair_lock no-TSD interfaces
+ *
+ * Like the above, but don't require being on a thread with valid TSD, so they
+ * can be called from injected mach-threads. The normal routines use the TSD
+ * value for mach_thread_self(), these routines use MACH_PORT_DEAD for the
+ * locked value instead. As a result, they will be unable to resolve priority
+ * inversions.
+ *
+ * This should only be used by libpthread.
+ *
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock);
+
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock);
+
+/*! @group os_unfair_recursive_lock SPI
+ *
+ * @abstract
+ * Similar to os_unfair_lock, but recursive.
+ *
+ * @discussion
+ * Must be initialized with OS_UNFAIR_RECURSIVE_LOCK_INIT
+ */
+
+#define OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY \
+ __OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) \
+ __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+#define OS_UNFAIR_RECURSIVE_LOCK_INIT \
+ ((os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0})
+#elif defined(__cplusplus) && __cplusplus >= 201103L
+#define OS_UNFAIR_RECURSIVE_LOCK_INIT \
+ (os_unfair_recursive_lock{OS_UNFAIR_LOCK_INIT, 0})
+#elif defined(__cplusplus)
+#define OS_UNFAIR_RECURSIVE_LOCK_INIT (os_unfair_recursive_lock(\
+ (os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0}))
+#else
+#define OS_UNFAIR_RECURSIVE_LOCK_INIT \
+ {OS_UNFAIR_LOCK_INIT, 0}
+#endif // OS_UNFAIR_RECURSIVE_LOCK_INIT
+
/*!
- * @function os_unfair_lock_assert_owner
+ * @typedef os_unfair_recursive_lock
*
* @abstract
- * Asserts that the calling thread is the current owner of the specified
- * unfair lock.
+ * Low-level lock that allows waiters to block efficiently on contention.
*
* @discussion
- * If the lock is currently owned by the calling thread, this function returns.
+ * See os_unfair_lock.
*
- * If the lock is unlocked or owned by a different thread, this function
- * asserts and terminates the process.
+ */
+OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
+typedef struct os_unfair_recursive_lock_s {
+ os_unfair_lock ourl_lock;
+ uint32_t ourl_count;
+} os_unfair_recursive_lock, *os_unfair_recursive_lock_t;
+
+/*!
+ * @function os_unfair_recursive_lock_lock_with_options
*
- * @param lock
- * Pointer to an os_unfair_lock.
+ * @abstract
+ * See os_unfair_lock_lock_with_options
*/
-OS_UNFAIR_LOCK_AVAILABILITY
+OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
-void os_unfair_lock_assert_owner(os_unfair_lock_t lock);
+void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
+ os_unfair_lock_options_t options);
/*!
- * @function os_unfair_lock_assert_not_owner
+ * @function os_unfair_recursive_lock_lock
*
* @abstract
- * Asserts that the calling thread is not the current owner of the specified
- * unfair lock.
+ * See os_unfair_lock_lock
+ */
+OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
+OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
+void
+os_unfair_recursive_lock_lock(os_unfair_recursive_lock_t lock)
+{
+ os_unfair_recursive_lock_lock_with_options(lock, OS_UNFAIR_LOCK_NONE);
+}
+
+/*!
+ * @function os_unfair_recursive_lock_trylock
*
- * @discussion
- * If the lock is unlocked or owned by a different thread, this function
- * returns.
+ * @abstract
+ * See os_unfair_lock_trylock
+ */
+OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_WARN_RESULT OS_NONNULL_ALL
+bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock);
+
+/*!
+ * @function os_unfair_recursive_lock_unlock
*
- * If the lock is currently owned by the current thread, this function asserts
- * and terminates the process.
+ * @abstract
+ * See os_unfair_lock_unlock
+ */
+OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock);
+
+/*!
+ * @function os_unfair_recursive_lock_tryunlock4objc
*
- * @param lock
- * Pointer to an os_unfair_lock.
+ * @abstract
+ * See os_unfair_lock_unlock
*/
-OS_UNFAIR_LOCK_AVAILABILITY
+OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
-void os_unfair_lock_assert_not_owner(os_unfair_lock_t lock);
+bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock);
-/*! @group os_unfair_lock no-TSD interfaces
+/*!
+ * @function os_unfair_recursive_lock_assert_owner
*
- * Like the above, but don't require being on a thread with valid TSD, so they
- * can be called from injected mach-threads. The normal routines use the TSD
- * value for mach_thread_self(), these routines use MACH_PORT_DEAD for the
- * locked value instead. As a result, they will be unable to resolve priority
- * inversions.
+ * @abstract
+ * See os_unfair_lock_assert_owner
+ */
+OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
+OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
+void
+os_unfair_recursive_lock_assert_owner(os_unfair_recursive_lock_t lock)
+{
+ os_unfair_lock_assert_owner(&lock->ourl_lock);
+}
+
+/*!
+ * @function os_unfair_recursive_lock_assert_not_owner
*
- * This should only be used by libpthread.
+ * @abstract
+ * See os_unfair_lock_assert_not_owner
+ */
+OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
+OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
+void
+os_unfair_recursive_lock_assert_not_owner(os_unfair_recursive_lock_t lock)
+{
+ os_unfair_lock_assert_not_owner(&lock->ourl_lock);
+}
+
+#if __has_attribute(cleanup)
+
+/*!
+ * @function os_unfair_lock_scoped_guard_unlock
*
+ * @abstract
+ * Used by os_unfair_lock_lock_scoped_guard
*/
OS_UNFAIR_LOCK_AVAILABILITY
-OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
-void os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock);
+OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
+void
+os_unfair_lock_scoped_guard_unlock(os_unfair_lock_t _Nonnull * _Nonnull lock)
+{
+ os_unfair_lock_unlock(*lock);
+}
-OS_UNFAIR_LOCK_AVAILABILITY
-OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
-void os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock);
+/*!
+ * @function os_unfair_lock_lock_scoped_guard
+ *
+ * @abstract
+ * Same as os_unfair_lock_lock() except that os_unfair_lock_unlock() is
+ * automatically called when the enclosing C scope ends.
+ *
+ * @param name
+ * Name for the variable holding the guard.
+ *
+ * @param lock
+ * Pointer to an os_unfair_lock.
+ *
+ * @see os_unfair_lock_lock
+ * @see os_unfair_lock_unlock
+ */
+#define os_unfair_lock_lock_scoped_guard(guard_name, lock) \
+ os_unfair_lock_t \
+ __attribute__((cleanup(os_unfair_lock_scoped_guard_unlock))) \
+ guard_name = lock; \
+ os_unfair_lock_lock(guard_name)
+
+#endif // __has_attribute(cleanup)
__END_DECLS
os_unfair_lock_lock_inline(os_unfair_lock_t lock)
{
if (!_pthread_has_direct_tsd()) return os_unfair_lock_lock(lock);
- uintptr_t mts = (uintptr_t)_pthread_getspecific_direct(
+ uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
_PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
if (!_pthread_has_direct_tsd()) {
return os_unfair_lock_lock_with_options(lock, options);
}
- uintptr_t mts = (uintptr_t)_pthread_getspecific_direct(
+ uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
_PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
os_unfair_lock_trylock_inline(os_unfair_lock_t lock)
{
if (!_pthread_has_direct_tsd()) return os_unfair_lock_trylock(lock);
- uintptr_t mts = (uintptr_t)_pthread_getspecific_direct(
+ uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
_PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
return OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
os_unfair_lock_unlock_inline(os_unfair_lock_t lock)
{
if (!_pthread_has_direct_tsd()) return os_unfair_lock_unlock(lock);
- uintptr_t mts = (uintptr_t)_pthread_getspecific_direct(
+ uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
_PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
void
os_unfair_lock_lock_inline_no_tsd_4libpthread(os_unfair_lock_t lock)
{
- uintptr_t mts = (uintptr_t)MACH_PORT_DEAD;
+ uint32_t mts = MACH_PORT_DEAD;
os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
(_os_atomic_unfair_lock*)lock, &unlocked, locked,
void
os_unfair_lock_unlock_inline_no_tsd_4libpthread(os_unfair_lock_t lock)
{
- uintptr_t mts = (uintptr_t)MACH_PORT_DEAD;
+ uint32_t mts = MACH_PORT_DEAD;
os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
(_os_atomic_unfair_lock*)lock, &locked, unlocked,
#include <platform/string.h>
-__BEGIN_DECLS
-
-/* Helpers for other common non-primitive routines */
-
-__header_always_inline
-size_t
-_platform_strlen(const char *s) {
- const char *t = _platform_memchr(s, '\0', SIZE_MAX);
- return (uintptr_t)t - (uintptr_t)s;
-}
-
-__header_always_inline
-size_t
-_platform_strlcpy(char * restrict dst, const char * restrict src, size_t maxlen) {
- const size_t srclen = _platform_strlen(src);
- if (srclen < maxlen) {
- _platform_memmove(dst, src, srclen+1);
- } else if (maxlen != 0) {
- _platform_memmove(dst, src, maxlen-1);
- dst[maxlen-1] = '\0';
- }
- return srclen;
-}
-
-__END_DECLS
-
/* Compat macros for primitives */
#define bzero _platform_bzero
#define memchr _platform_memchr
#define memset_pattern16 _platform_memset_pattern16
#define strchr _platform_strchr
#define strcmp _platform_strcmp
-#define strncmp _platform_strncmp
-
-/* Compat macros for non-primitive helpers */
+#define strcpy _platform_strcpy
+#define strlcat _platform_strlcat
#define strlcpy _platform_strlcpy
#define strlen _platform_strlen
+#define strncmp _platform_strncmp
+#define strncpy _platform_strncpy
+#define strnlen _platform_strnlen
+#define strstr _platform_strstr
#endif /* _PLATFORM_COMPAT_H_ */
#ifndef _PLATFORM_STRING_H_
#define _PLATFORM_STRING_H_
+#include <_types.h>
#include <sys/cdefs.h>
#include <Availability.h>
#include <TargetConditionals.h>
+#include <sys/_types/_size_t.h>
+#include <sys/_types/_null.h>
#include <stdint.h>
#include <sys/types.h>
#define _PLATFORM_OPTIMIZED_MEMSET_PATTERN16 0
#define _PLATFORM_OPTIMIZED_STRCHR 0
#define _PLATFORM_OPTIMIZED_STRCMP 0
+#define _PLATFORM_OPTIMIZED_STRCPY 0
+#define _PLATFORM_OPTIMIZED_STRLCAT 0
+#define _PLATFORM_OPTIMIZED_STRLCPY 0
+#define _PLATFORM_OPTIMIZED_STRLEN 0
#define _PLATFORM_OPTIMIZED_STRNCMP 0
+#define _PLATFORM_OPTIMIZED_STRNCPY 0
+#define _PLATFORM_OPTIMIZED_STRNLEN 0
+#define _PLATFORM_OPTIMIZED_STRSTR 0
/* Primitives used to implement C memory and string routines */
__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
void *
-_platform_memccpy(void *restrict dst, const void *restrict src, int c, size_t n);
+_platform_memccpy(void *__restrict dst, const void *__restrict src, int c, size_t n);
__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
void *
int
_platform_strcmp(const char *s1, const char *s2);
+__OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
+char *
+_platform_strcpy(char * __restrict dst, const char * __restrict src);
+
+__OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
+size_t
+_platform_strlcat(char * __restrict dst, const char * __restrict src, size_t maxlen);
+
+__OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
+size_t
+_platform_strlcpy(char * __restrict dst, const char * __restrict src, size_t maxlen);
+
+__OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
+size_t
+_platform_strlen(const char *str);
+
__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
int
_platform_strncmp(const char *s1, const char *s2, size_t n);
+__OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
+char *
+_platform_strncpy(char * __restrict dst, const char * __restrict src, size_t maxlen);
+
+__OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
+size_t
+_platform_strnlen(const char *s, size_t maxlen);
+
+__OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
+char *
+_platform_strstr(const char *s, const char *find);
+
__END_DECLS
#endif /* _PLATFORM_STRING_H_ */
#include <TargetConditionals.h>
#include <stdlib.h>
+#include <platform/string.h>
+#include <_libkernel_init.h>
struct ProgramVars; /* forward reference */
extern void _simple_asl_init(const char *envp[], const struct ProgramVars *vars);
extern void __pfz_setup(const char *apple[]);
+#if !VARIANT_STATIC
+static const struct _libkernel_string_functions _platform_string_functions = {
+ .version = 1,
+ .bzero = _platform_bzero,
+ .memchr = _platform_memchr,
+ .memcmp = _platform_memcmp,
+ .memmove = _platform_memmove,
+ .memccpy = _platform_memccpy,
+ .memset = _platform_memset,
+ .strchr = _platform_strchr,
+ .strcmp = _platform_strcmp,
+ .strcpy = _platform_strcpy,
+ .strlcat = _platform_strlcat,
+ .strlcpy = _platform_strlcpy,
+ .strlen = _platform_strlen,
+ .strncmp = _platform_strncmp,
+ .strncpy = _platform_strncpy,
+ .strnlen = _platform_strnlen,
+ .strstr = _platform_strstr,
+};
+#endif
+
void
__libplatform_init(void *future_use __unused, const char *envp[],
const char *apple[], const struct ProgramVars *vars)
{
/* In the Simulator, we just provide _simple for dyld */
-#if !TARGET_IPHONE_SIMULATOR
+#if !TARGET_OS_SIMULATOR
__pfz_setup(apple);
#endif
_simple_asl_init(envp, vars);
+
+#if !VARIANT_STATIC
+ __libkernel_platform_init(&_platform_string_functions);
+#endif
}
#include "resolver.h"
#include "libkern/OSAtomic.h"
-#if TARGET_OS_EMBEDDED
+#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
OS_ATOMIC_EXPORT
int32_t OSAtomicAdd32(int32_t v, volatile int32_t *p);
os_atomic_thread_fence(seq_cst);
}
-#endif // TARGET_OS_EMBEDDED
+#endif // TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
struct _os_empty_files_are_not_c_files;
* @APPLE_APACHE_LICENSE_HEADER_END@
*/
+#define OS_UNFAIR_LOCK_INLINE 1
#include "lock_internal.h"
#include "os/internal.h"
#include "resolver.h"
OS_ATOMIC_EXPORT int spin_lock_try(volatile OSSpinLock *l);
OS_ATOMIC_EXPORT void OSSpinLockUnlock(volatile OSSpinLock *l);
-static const OSSpinLock _OSSpinLockLocked = TARGET_OS_EMBEDDED ? 1 : -1;
+#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
+static const OSSpinLock _OSSpinLockLocked = 1;
+#else
+static const OSSpinLock _OSSpinLockLocked = -1;
+#endif
+
#if OS_ATOMIC_UP
#define OS_LOCK_NO_OWNER MACH_PORT_NULL
-OS_ALWAYS_INLINE
+OS_ALWAYS_INLINE OS_CONST
static inline os_lock_owner_t
_os_lock_owner_get_self(void)
{
}
+#pragma mark -
+#pragma mark os_unfair_recursive_lock
+
+OS_ATOMIC_EXPORT
+void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
+ os_unfair_lock_options_t options);
+
+OS_ATOMIC_EXPORT
+bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock);
+
+OS_ATOMIC_EXPORT
+void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock);
+
+OS_ATOMIC_EXPORT
+bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock);
+
+
+static inline os_lock_owner_t
+_os_unfair_lock_owner(os_unfair_lock_t lock)
+{
+ _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
+ return OS_ULOCK_OWNER(os_atomic_load(&l->oul_value, relaxed));
+}
+
+void
+os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
+ os_unfair_lock_options_t options)
+{
+ os_lock_owner_t cur, self = _os_lock_owner_get_self();
+ _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock;
+
+ if (likely(os_atomic_cmpxchgv2o(l, oul_value,
+ OS_LOCK_NO_OWNER, self, &cur, acquire))) {
+ return;
+ }
+
+ if (OS_ULOCK_OWNER(cur) == self) {
+ lock->ourl_count++;
+ return;
+ }
+
+ return _os_unfair_lock_lock_slow(l, self, options);
+}
+
+bool
+os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock)
+{
+ os_lock_owner_t cur, self = _os_lock_owner_get_self();
+ _os_unfair_lock_t l = (_os_unfair_lock_t)&lock->ourl_lock;
+
+ if (likely(os_atomic_cmpxchgv2o(l, oul_value,
+ OS_LOCK_NO_OWNER, self, &cur, acquire))) {
+ return true;
+ }
+
+ if (likely(OS_ULOCK_OWNER(cur) == self)) {
+ lock->ourl_count++;
+ return true;
+ }
+
+ return false;
+}
+
+
+OS_ALWAYS_INLINE
+static inline void
+_os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock,
+ os_lock_owner_t self)
+{
+ if (unlikely(lock->ourl_count)) {
+ os_lock_owner_t cur = _os_unfair_lock_owner(&lock->ourl_lock);
+ if (unlikely(cur != self)) {
+ _os_unfair_lock_unowned_abort(cur);
+ }
+ lock->ourl_count--;
+ return;
+ }
+
+ _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
+ os_ulock_value_t current;
+ current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
+ if (likely(current == self)) return;
+ return _os_unfair_lock_unlock_slow(l, current, self, 0);
+}
+
+void
+os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock)
+{
+ os_lock_owner_t self = _os_lock_owner_get_self();
+ _os_unfair_recursive_lock_unlock(lock, self);
+}
+
+bool
+os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock)
+{
+ os_lock_owner_t cur = _os_unfair_lock_owner(&lock->ourl_lock);
+ os_lock_owner_t self = _os_lock_owner_get_self();
+ if (likely(cur == self)) {
+ _os_unfair_recursive_lock_unlock(lock, self);
+ return true;
+ }
+ return false;
+}
+
+
#pragma mark -
#pragma mark _os_lock_unfair_t
typedef struct os_once_gate_s {
union {
os_ulock_value_t ogo_lock;
- os_once_t ogo_once;
+ uintptr_t ogo_once;
};
} os_once_gate_s, *os_once_gate_t;
-#define OS_ONCE_INIT ((os_once_t)0l)
-#define OS_ONCE_DONE (~(os_once_t)0l)
+#define OS_ONCE_INIT ((uintptr_t)0l)
+#define OS_ONCE_DONE (~(uintptr_t)0l)
+
+#if defined(__i386__) || defined(__x86_64__)
+#define OS_ONCE_USE_QUIESCENT_COUNTER 0
+#else
+#define OS_ONCE_USE_QUIESCENT_COUNTER 1
+#endif
OS_ATOMIC_EXPORT void _os_once(os_once_t *val, void *ctxt, os_function_t func);
OS_ATOMIC_EXPORT void __os_once_reset(os_once_t *val);
}
-OS_NOINLINE
-static void
-_os_once_gate_wait_slow(os_ulock_value_t *gate, os_lock_owner_t self)
+#if OS_ONCE_USE_QUIESCENT_COUNTER
+#define OS_ONCE_MAKE_GEN(gen) (((gen) << 2) + OS_ULOCK_NOWAITERS_BIT)
+#define OS_ONCE_IS_GEN(gen) (((gen) & 3) == OS_ULOCK_NOWAITERS_BIT)
+
+// the _COMM_PAGE_CPU_QUIESCENT_COUNTER value is incremented every time
+// all CPUs have performed a context switch.
+//
+// To make sure all CPUs context switched at least once since `gen`,
+// we need to observe 4 increments, see libdispatch/src/shims/lock.h
+#define OS_ONCE_GEN_SAFE_DELTA (4 << 2)
+
+OS_ALWAYS_INLINE
+static inline uintptr_t
+_os_once_generation(void)
+{
+ uintptr_t value = *(volatile uintptr_t *)_COMM_PAGE_CPU_QUIESCENT_COUNTER;
+ return OS_ONCE_MAKE_GEN(value);
+}
+
+OS_ALWAYS_INLINE
+static inline uintptr_t
+_os_once_mark_quiescing(os_once_gate_t og)
{
- os_ulock_value_t tid_old, tid_new;
+ return os_atomic_xchg(&og->ogo_once, _os_once_generation(), release);
+}
- for (;;) {
- os_atomic_rmw_loop(gate, tid_old, tid_new, relaxed, {
- switch (tid_old) {
- case (os_ulock_value_t)OS_ONCE_INIT: // raced with __os_once_reset()
- case (os_ulock_value_t)OS_ONCE_DONE: // raced with _os_once()
- os_atomic_rmw_loop_give_up(return);
- }
- tid_new = tid_old & ~OS_ULOCK_NOWAITERS_BIT;
- if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break);
- });
- if (unlikely(OS_ULOCK_IS_OWNER(tid_old, self, 0))) {
- return _os_once_gate_recursive_abort(self);
- }
- int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO,
- gate, tid_new, 0);
- if (unlikely(ret < 0)) {
- switch (-ret) {
- case EINTR:
- case EFAULT:
- continue;
- case EOWNERDEAD:
- _os_once_gate_corruption_abort(tid_old);
- break;
- default:
- __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
- }
- }
+OS_ALWAYS_INLINE
+static void
+_os_once_mark_done_if_quiesced(os_once_gate_t og, uintptr_t gen)
+{
+ if (_os_once_generation() - gen >= OS_ONCE_GEN_SAFE_DELTA) {
+ os_atomic_store(&og->ogo_once, OS_ONCE_DONE, relaxed);
}
}
+#else
+OS_ALWAYS_INLINE
+static inline uintptr_t
+_os_once_mark_done(os_once_gate_t og)
+{
+ return os_atomic_xchg(&og->ogo_once, OS_ONCE_DONE, release);
+}
+#endif
OS_NOINLINE
static void
-_os_once_gate_broadcast_slow(os_ulock_value_t *gate, os_ulock_value_t current,
+_os_once_gate_broadcast(os_once_gate_t og, os_ulock_value_t current,
os_lock_owner_t self)
{
if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self, 0))) {
}
for (;;) {
int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO | ULF_WAKE_ALL,
- gate, 0);
+ &og->ogo_lock, 0);
if (unlikely(ret < 0)) {
switch (-ret) {
case EINTR:
}
}
-OS_ALWAYS_INLINE
+OS_NOINLINE
static void
-_os_once_gate_set_value_and_broadcast(os_once_gate_t og, os_lock_owner_t self,
- os_once_t value)
+_os_once_callout(os_once_gate_t og, void *ctxt, os_function_t func,
+ os_lock_owner_t self)
{
- os_ulock_value_t current;
-#if defined(__i386__) || defined(__x86_64__)
- // On Intel, any load is a load-acquire, so we don't need to be fancy
- current = (os_ulock_value_t)os_atomic_xchg(&og->ogo_once, value, release);
+ uintptr_t v;
+
+ func(ctxt);
+
+#if OS_ONCE_USE_QUIESCENT_COUNTER
+ v = _os_once_mark_quiescing(og);
#else
-# error os_once algorithm not available for this architecture
+ v = _os_once_mark_done(og);
#endif
- if (likely(current == self)) return;
- _os_once_gate_broadcast_slow(&og->ogo_lock, current, self);
+ if (likely((os_ulock_value_t)v == self)) return;
+ _os_once_gate_broadcast(og, (os_ulock_value_t)v, self);
+}
+
+OS_NOINLINE
+static void
+_os_once_gate_wait(os_once_gate_t og, void *ctxt, os_function_t func,
+ os_lock_owner_t self)
+{
+ uintptr_t old, new;
+
+ for (;;) {
+ os_atomic_rmw_loop(&og->ogo_once, old, new, relaxed, {
+ if (old == OS_ONCE_DONE) {
+ os_atomic_rmw_loop_give_up(return);
+#if OS_ONCE_USE_QUIESCENT_COUNTER
+ } else if (OS_ONCE_IS_GEN(old)) {
+ os_atomic_rmw_loop_give_up({
+ os_atomic_thread_fence(acquire);
+ return _os_once_mark_done_if_quiesced(og, old);
+ });
+#endif
+ } else if (old == OS_ONCE_INIT) {
+ // __os_once_reset was used, try to become the new initializer
+ new = (uintptr_t)self;
+ } else {
+ new = old & ~(uintptr_t)OS_ULOCK_NOWAITERS_BIT;
+ if (new == old) os_atomic_rmw_loop_give_up(break);
+ }
+ });
+ if (old == OS_ONCE_INIT) {
+ // see comment in _os_once, pairs with the release barrier
+ // in __os_once_reset()
+ os_atomic_thread_fence(acquire);
+ return _os_once_callout(og, ctxt, func, self);
+ }
+ if (unlikely(OS_ULOCK_IS_OWNER((os_lock_owner_t)old, self, 0))) {
+ return _os_once_gate_recursive_abort(self);
+ }
+ int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO,
+ &og->ogo_lock, (os_ulock_value_t)new, 0);
+ if (unlikely(ret < 0)) {
+ switch (-ret) {
+ case EINTR:
+ case EFAULT:
+ continue;
+ case EOWNERDEAD:
+ _os_once_gate_corruption_abort((os_lock_owner_t)old);
+ break;
+ default:
+ __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
+ }
+ }
+ }
}
// Atomically resets the once value to zero and then signals all
-// pending waiters to return from their _os_once_gate_wait_slow()
+// pending waiters to return from their __ulock_wait()
void
__os_once_reset(os_once_t *val)
{
os_once_gate_t og = (os_once_gate_t)val;
os_lock_owner_t self = _os_lock_owner_get_self();
- _os_once_gate_set_value_and_broadcast(og, self, OS_ONCE_INIT);
+ uintptr_t v;
+
+ v = os_atomic_xchg(&og->ogo_once, OS_ONCE_INIT, release);
+ if (likely((os_ulock_value_t)v == self)) return;
+ return _os_once_gate_broadcast(og, (os_ulock_value_t)v, self);
}
void
_os_once(os_once_t *val, void *ctxt, os_function_t func)
{
os_once_gate_t og = (os_once_gate_t)val;
- os_lock_owner_t self = _os_lock_owner_get_self();
- os_once_t v = (os_once_t)self;
+ os_lock_owner_t self;
+ uintptr_t v;
+
+#if OS_ONCE_USE_QUIESCENT_COUNTER
+ v = os_atomic_load(&og->ogo_once, acquire);
+ if (likely(OS_ONCE_IS_GEN(v))) {
+ return _os_once_mark_done_if_quiesced(og, v);
+ }
+#endif
+
+ self = _os_lock_owner_get_self();
+ v = (uintptr_t)self;
- if (likely(os_atomic_cmpxchg(&og->ogo_once, OS_ONCE_INIT, v, relaxed))) {
- func(ctxt);
- _os_once_gate_set_value_and_broadcast(og, self, OS_ONCE_DONE);
- } else {
- _os_once_gate_wait_slow(&og->ogo_lock, self);
+ // The acquire barrier pairs with the release in __os_once_reset()
+ // for cases when a previous initializer failed.
+ if (likely(os_atomic_cmpxchg(&og->ogo_once, OS_ONCE_INIT, v, acquire))) {
+ return _os_once_callout(og, ctxt, func, self);
}
+ return _os_once_gate_wait(og, ctxt, func, self);
}
#define OS_LOCK_STRUCT_DECL_INTERNAL(type, ...) \
typedef struct OS_LOCK_S_INTERNAL(type) { \
- OS_LOCK_TYPE_STRUCT(type) * const osl_type; \
+ OS_LOCK_TYPE_STRUCT(type) * osl_type; \
__VA_ARGS__ \
} OS_LOCK_S_INTERNAL(type); \
typedef OS_LOCK_STRUCT_INTERNAL(type) *OS_LOCK_T_INTERNAL(type)
/*
- * Copyright (c) 1999-2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999-2018 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <architecture/arm/asm_help.h>
#include "_setjmp.h"
#include <arm/arch.h>
+#include <os/tsd.h>
/* int _longjmp(jmp_buf env, int val); */
ENTRY_POINT(__longjmp)
- ldmia r0!, { r4-r8, r10-r11, sp, lr }
+ movs r12, r1
+ ldmia r0!, { r1-r6, r8, r10-r11 }
vldmia r0, { d8-d15 }
- movs r0, r1
+ movs r0, r12
moveq r0, #1
+ _OS_PTR_MUNGE_TOKEN(r12, r12)
+ _OS_PTR_UNMUNGE(r7, r1, r12) // fp
+ _OS_PTR_UNMUNGE(lr, r2, r12)
+ _OS_PTR_UNMUNGE(sp, r3, r12)
bx lr
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999-2018 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <architecture/arm/asm_help.h>
#include "_setjmp.h"
#include <arm/arch.h>
+#include <os/tsd.h>
ENTRY_POINT(__setjmp)
- stmia r0!, { r4-r8, r10-r11, sp, lr }
+ _OS_PTR_MUNGE_TOKEN(r12, r12)
+ _OS_PTR_MUNGE(r1, r7, r12) // fp
+ _OS_PTR_MUNGE(r2, lr, r12)
+ _OS_PTR_MUNGE(r3, sp, r12)
+ stmia r0!, { r1-r6, r8, r10-r11 }
vstmia r0, { d8-d15 }
- mov r0, #0
- bx lr
+ mov r0, #0
+ bx lr
/*
- * Copyright (c) 2011 Apple Inc. All rights reserved.
+ * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#define JMP_r23_24 #0x20
#define JMP_r25_26 #0x30
#define JMP_r27_28 #0x40
-#define JMP_r29_lr #0x50
-#define JMP_fp_sp #0x60
-
+#define JMP_fp_lr #0x50
+#define JMP_sp_rsvd #0x60 /* second field is reserved/unused */
#define JMP_d8_d9 #0x70
#define JMP_d10_d11 #0x80
#define JMP_d12_d13 #0x90
#define JMP_sigflag #0xB8
#include <architecture/arm/asm_help.h>
+#include <os/tsd.h>
+
/* int _setjmp(jmp_buf env); */
ENTRY_POINT(__setjmp)
- add x1, sp, #0 /* can't STP from sp */
+ _OS_PTR_MUNGE_TOKEN(x16, x16)
+ _OS_PTR_MUNGE(x10, fp, x16)
+ _OS_PTR_MUNGE(x11, lr, x16)
+ mov x12, sp
+ _OS_PTR_MUNGE(x12, x12, x16)
stp x19, x20, [x0, JMP_r19_20]
stp x21, x22, [x0, JMP_r21_22]
stp x23, x24, [x0, JMP_r23_24]
stp x25, x26, [x0, JMP_r25_26]
stp x27, x28, [x0, JMP_r27_28]
- stp x29, lr, [x0, JMP_r29_lr]
- stp fp, x1, [x0, JMP_fp_sp]
+ stp x10, x11, [x0, JMP_fp_lr]
+ str x12, [x0, JMP_sp_rsvd]
stp d8, d9, [x0, JMP_d8_d9]
stp d10, d11, [x0, JMP_d10_d11]
stp d12, d13, [x0, JMP_d12_d13]
stp d14, d15, [x0, JMP_d14_d15]
- mov x0, #0
+ mov w0, #0
ret
/* void _longjmp(jmp_buf env, int val); */
ENTRY_POINT(__longjmp)
+ _OS_PTR_MUNGE_TOKEN(x16, x16)
ldp x19, x20, [x0, JMP_r19_20]
ldp x21, x22, [x0, JMP_r21_22]
ldp x23, x24, [x0, JMP_r23_24]
ldp x25, x26, [x0, JMP_r25_26]
ldp x27, x28, [x0, JMP_r27_28]
- ldp x29, lr, [x0, JMP_r29_lr]
- ldp fp, x2, [x0, JMP_fp_sp]
+ ldp x10, x11, [x0, JMP_fp_lr]
+ ldr x12, [x0, JMP_sp_rsvd]
ldp d8, d9, [x0, JMP_d8_d9]
ldp d10, d11, [x0, JMP_d10_d11]
ldp d12, d13, [x0, JMP_d12_d13]
ldp d14, d15, [x0, JMP_d14_d15]
- add sp, x2, #0
- mov x0, x1
- cmp x0, #0 /* longjmp returns 1 if val is 0 */
- b.ne 1f
- add x0, x0, #1
-1: ret
+ _OS_PTR_UNMUNGE(fp, x10, x16)
+ _OS_PTR_UNMUNGE(lr, x11, x16)
+ _OS_PTR_UNMUNGE(x12, x12, x16)
+ mov sp, x12
+ cmp w1, #0
+ csinc w0, w1, wzr, ne
+ ret
/* int sigsetjmp(sigjmp_buf env, int savemask); */
ENTRY_POINT(_sigsetjmp)
- str x1, [x0, JMP_sigflag]
- cmp x1, #0
- b.ne 1f
+ str w1, [x0, JMP_sigflag]
+ cbnz w1, 1f
b __setjmp
1:
/* else, fall through */
ENTRY_POINT(_setjmp)
stp x21, lr, [x0]
mov x21, x0
-
- mov x0, #1
+
+ orr w0, wzr, #0x1
mov x1, #0
add x2, x21, JMP_sig
CALL_EXTERNAL(_sigprocmask)
ldp x21, lr, [x0]
b __setjmp
+
/* void siglongjmp(sigjmp_buf env, int val); */
ENTRY_POINT(_siglongjmp)
- ldr x2, [x0, JMP_sigflag]
- cmp x2, #0
- b.ne 1f
+ ldr w8, [x0, JMP_sigflag]
+ cbnz w8, 1f
b __longjmp
1:
/* else, fall through */
sub sp, sp, #16
mov x21, x0 // x21/x22 will be restored by __longjmp
mov x22, x1
- ldr x0, [x21, JMP_sig] // restore the signal mask
- str x0, [sp, #8]
- add x1, sp, #8 // set
+ ldr x8, [x21, JMP_sig] // restore the signal mask
+ str x8, [sp, #8]
orr w0, wzr, #0x3 // SIG_SETMASK
- movz x2, #0 // oset
+ add x1, sp, #8 // set
+ mov x2, #0 // oset
CALL_EXTERNAL(_sigprocmask)
mov x0, x21
mov x1, x22
--- /dev/null
+/*
+ * Copyright (c) 1999-2017 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+/*
+ * Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved
+ *
+ * @(#)sigaction.c 1.0
+ */
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/signal.h>
+#include <errno.h>
+
+// keep in sync with BSD_KERNEL_PRIVATE value in xnu/bsd/sys/signal.h
+#define SA_VALIDATE_SIGRETURN_FROM_SIGTRAMP 0x0400
+
+/*
+ * Intercept the sigaction syscall and use our signal trampoline
+ * as the signal handler instead. The code here is derived
+ * from sigvec in sys/kern_sig.c.
+ */
+extern int __sigaction (int, struct __sigaction * __restrict,
+ struct sigaction * __restrict);
+
+int
+__platform_sigaction (int sig, const struct sigaction * __restrict nsv,
+ struct sigaction * __restrict osv)
+{
+ extern void _sigtramp();
+ struct __sigaction sa;
+ struct __sigaction *sap;
+ int ret;
+
+ if (sig <= 0 || sig >= NSIG || sig == SIGKILL || sig == SIGSTOP) {
+ errno = EINVAL;
+ return (-1);
+ }
+ sap = (struct __sigaction *)0;
+ if (nsv) {
+ sa.sa_handler = nsv->sa_handler;
+ sa.sa_tramp = _sigtramp;
+ sa.sa_mask = nsv->sa_mask;
+ sa.sa_flags = nsv->sa_flags | SA_VALIDATE_SIGRETURN_FROM_SIGTRAMP;
+ sap = &sa;
+ }
+ ret = __sigaction(sig, sap, osv);
+ return ret;
+}
#import <ucontext.h>
#import <mach/thread_status.h>
#include <TargetConditionals.h>
+#import <os/internal.h>
-extern int __sigreturn(ucontext_t *, int);
+extern int __sigreturn(ucontext_t *, int, uintptr_t);
/*
* sigvec registers _sigtramp as the handler for any signal requiring
/* On i386, i386/sys/_sigtramp.s defines this. There is no in_sigtramp on arm */
#if defined(__DYNAMIC__) && defined(__x86_64__)
-__attribute__((visibility("hidden")))
+OS_NOEXPORT
int __in_sigtramp = 0;
#endif
/* These defn should match the kernel one */
#define UC_TRAD 1
#define UC_FLAVOR 30
-#if defined(__ppc__) || defined(__ppc64__)
-#define UC_TRAD64 20
-#define UC_TRAD64_VEC 25
-#define UC_FLAVOR_VEC 35
-#define UC_FLAVOR64 40
-#define UC_FLAVOR64_VEC 45
-#define UC_DUAL 50
-#define UC_DUAL_VEC 55
-
- /* The following are valid mcontext sizes */
-#define UC_FLAVOR_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
-
-#define UC_FLAVOR_VEC_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
-
-#define UC_FLAVOR64_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
-
-#define UC_FLAVOR64_VEC_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
-#endif
#define UC_SET_ALT_STACK 0x40000000
#define UC_RESET_ALT_STACK 0x80000000
* to having more than one set of registers, etc., for the various 32/64 etc.
* contexts)..
*/
+OS_NOEXPORT
void
_sigunaltstack(int set)
{
/* sigreturn(uctx, ctxstyle); */
/* syscall (SYS_SIGRETURN, uctx, ctxstyle); */
- __sigreturn (NULL, (set == SS_ONSTACK) ? UC_SET_ALT_STACK : UC_RESET_ALT_STACK);
+ __sigreturn (NULL, (set == SS_ONSTACK) ? UC_SET_ALT_STACK : UC_RESET_ALT_STACK, 0);
}
/* On these architectures, _sigtramp is implemented in assembly to
ensure it matches its DWARF unwind information. */
#if !defined (__i386__) && !defined (__x86_64__)
-
+OS_NOEXPORT
void
_sigtramp(
union __sigaction_u __sigaction_u,
int sigstyle,
int sig,
siginfo_t *sinfo,
- ucontext_t *uctx
+ ucontext_t *uctx,
+ uintptr_t token
) {
int ctxstyle = UC_FLAVOR;
/* sigreturn(uctx, ctxstyle); */
/* syscall (SYS_SIGRETURN, uctx, ctxstyle); */
- __sigreturn (uctx, ctxstyle);
+ __sigreturn (uctx, ctxstyle, token);
+ __builtin_trap(); /* __sigreturn returning is a fatal error */
}
#endif /* not ppc nor ppc64 nor i386 nor x86_64 */
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999-2018 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
*/
#include <architecture/i386/asm_help.h>
+#include <os/tsd.h>
// The FP control word is actually two bytes, but there's no harm in
// using four bytes for it and keeping the struct aligned.
#define JB_GS 68
LEAF(__setjmp, 0)
- movl 4(%esp), %ecx // jmp_buf (struct sigcontext *)
+ movl 4(%esp), %ecx // jmp_buf (struct sigcontext *)
- // Build the jmp_buf
- fnstcw JB_FPCW(%ecx) // Save the FP control word
- stmxcsr JB_MXCSR(%ecx) // Save the MXCSR
- movl %ebx, JB_EBX(%ecx)
- movl %edi, JB_EDI(%ecx)
- movl %esi, JB_ESI(%ecx)
- movl %ebp, JB_EBP(%ecx)
+ // Build the jmp_buf
+ fnstcw JB_FPCW(%ecx) // Save the FP control word
+ stmxcsr JB_MXCSR(%ecx) // Save the MXCSR
+ movl %ebx, JB_EBX(%ecx)
+ movl %edi, JB_EDI(%ecx)
+ movl %esi, JB_ESI(%ecx)
+ movl %ebp, %eax
+ _OS_PTR_MUNGE(%eax)
+ movl %eax, JB_EBP(%ecx)
- // EIP is set to the frame return address value
- movl (%esp), %eax
- movl %eax, JB_EIP(%ecx)
- // ESP is set to the frame return address plus 4
- leal 4(%esp), %eax
- movl %eax, JB_ESP(%ecx)
+ // EIP is set to the frame return address value
+ movl (%esp), %eax
+ _OS_PTR_MUNGE(%eax)
+ movl %eax, JB_EIP(%ecx)
+ // ESP is set to the frame return address plus 4
+ leal 4(%esp), %eax
+ _OS_PTR_MUNGE(%eax)
+ movl %eax, JB_ESP(%ecx)
- // return 0
- xorl %eax, %eax
- ret
+ // return 0
+ xorl %eax, %eax
+ ret
LEAF(__longjmp, 0)
- fninit // Clear all FP exceptions
- movl 4(%esp), %ecx // jmp_buf (struct sigcontext *)
- movl 8(%esp), %eax // return value
- testl %eax, %eax
- jnz 1f
- incl %eax
+ fninit // Clear all FP exceptions
+ movl 4(%esp), %ecx // jmp_buf (struct sigcontext *)
+ movl 8(%esp), %edx // return value
+ xorl %eax, %eax
+ incl %eax
+ testl %edx, %edx
+ cmovnel %edx, %eax
// general registers
-1: movl JB_EBX(%ecx), %ebx
+ movl JB_EBX(%ecx), %ebx
movl JB_ESI(%ecx), %esi
movl JB_EDI(%ecx), %edi
- movl JB_EBP(%ecx), %ebp
- movl JB_ESP(%ecx), %esp
+ movl JB_EBP(%ecx), %edx
+ _OS_PTR_UNMUNGE(%edx)
+ movl %edx, %ebp
+ movl JB_ESP(%ecx), %edx
+ _OS_PTR_UNMUNGE(%edx)
+ movl %edx, %esp
+ movl JB_EIP(%ecx), %edx
+ _OS_PTR_UNMUNGE(%edx)
fldcw JB_FPCW(%ecx) // Restore FP control word
ldmxcsr JB_MXCSR(%ecx) // Restore the MXCSR
- cld // Make sure DF is reset
- jmp *JB_EIP(%ecx)
+ cld // Make sure DF is reset
+ jmp *%edx
#include <sys/syscall.h>
#if defined(__DYNAMIC__)
-#if IGNORE_RDAR_13625839
.private_extern ___in_sigtramp
-#endif
.globl ___in_sigtramp
.data
.align 2
%ebp frame pointer
%ebx Address of "L00000000001$pb"
%esi uctx
-
+ %edi token
+
void
_sigtramp(
union __sigaction_u __sigaction_u,
)
*/
+#if RDAR_35834092
+ .private_extern __sigtramp
+#endif
.globl __sigtramp
.text
.align 4,0x90
movl 16(%ebp), %edx # get 'sig'
movl 20(%ebp), %eax # get 'sinfo'
movl 24(%ebp), %esi # get 'uctx'
+ movl 28(%ebp), %edi # get 'token'
/* Call the signal handler.
Some variants are not supposed to get the last two parameters,
but the test to prevent this is more expensive than just passing
#endif
movl %esi, 4(%esp)
movl $ UC_FLAVOR, 8(%esp)
+ movl %edi, 12(%esp)
movl $ SYS_sigreturn, %eax
int $0x80
+ ud2 /* __sigreturn returning is a fatal error */
Lend:
/* DWARF unwind table #defines. */
/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999-2018 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
*/
#include <architecture/i386/asm_help.h>
+#include <os/tsd.h>
#define JB_RBX 0
#define JB_RBP 8
// now build sigcontext
movq %rbx, JB_RBX(%rdi)
- movq %rbp, JB_RBP(%rdi)
+ movq %rbp, %rax
+ _OS_PTR_MUNGE(%rax)
+ movq %rax, JB_RBP(%rdi)
movq %r12, JB_R12(%rdi)
movq %r13, JB_R13(%rdi)
movq %r14, JB_R14(%rdi)
// RIP is set to the frame return address value
movq (%rsp), %rax
+ _OS_PTR_MUNGE(%rax)
movq %rax, JB_RIP(%rdi)
// RSP is set to the frame return address plus 8
leaq 8(%rsp), %rax
+ _OS_PTR_MUNGE(%rax)
movq %rax, JB_RSP(%rdi)
// save fp control word
fninit // Clear all FP exceptions
// %rdi is a jmp_buf (struct sigcontext *)
// %esi is the return value
- movl %esi, %eax
testl %esi, %esi
- jnz 1f
- incl %eax
+ movl $1, %eax
+ cmovnel %esi, %eax
// general registers
-1:
movq JB_RBX(%rdi), %rbx
- movq JB_RBP(%rdi), %rbp
- movq JB_RSP(%rdi), %rsp
+ movq JB_RBP(%rdi), %rsi
+ _OS_PTR_UNMUNGE(%rsi)
+ movq %rsi, %rbp
+ movq JB_RSP(%rdi), %rsi
+ _OS_PTR_UNMUNGE(%rsi)
+ movq %rsi, %rsp
movq JB_R12(%rdi), %r12
movq JB_R13(%rdi), %r13
movq JB_R14(%rdi), %r14
movq JB_R15(%rdi), %r15
+ movq JB_RIP(%rdi), %rsi
+ _OS_PTR_UNMUNGE(%rsi)
// restore FP control word
fldcw JB_FPCONTROL(%rdi)
// restore MXCSR
ldmxcsr JB_MXCSR(%rdi)
-
// Make sure DF is reset
cld
- jmp *JB_RIP(%rdi)
+ jmp *%rsi
/* register use:
%rbx uctx
-
+ %r12 token
+
void
_sigtramp(
union __sigaction_u __sigaction_u, %rdi
int sig, %rdx
siginfo_t *sinfo, %rcx
ucontext_t *uctx %r8
+ uintptr_t token %r8
)
*/
+#if RDAR_35834092
+ .private_extern __sigtramp
+#endif
.globl __sigtramp
.text
.align 4,0x90
#endif
/* Save uctx in %rbx. */
movq %r8, %rbx
+ /* Save token in %r12. */
+ movq %r9, %r12
/* Call the signal handler.
Some variants are not supposed to get the last two parameters,
but the test to prevent this is more expensive than just passing
#endif
movq %rbx, %rdi
movl $ UC_FLAVOR, %esi
+ movq %r12, %rdx
callq ___sigreturn
+ ud2 /* __sigreturn returning is a fatal error */
ret
Lend:
bool asl_enabled;
const char *progname;
int asl_fd;
-#if TARGET_IPHONE_SIMULATOR
+#if TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC
const char *sim_log_path;
os_unfair_lock sim_connect_lock;
#else
ctx->asl_enabled = true;
if (vars && vars->__prognamePtr) {
ctx->progname = *(vars->__prognamePtr);
-#if TARGET_IPHONE_SIMULATOR
+#if TARGET_OS_SIMULATOR
} else {
const char * progname = *_NSGetProgname();
if (progname)
return -1;
}
-#if TARGET_IPHONE_SIMULATOR
+#if TARGET_OS_SIMULATOR && !TARGET_OS_IOSMAC
os_unfair_lock_lock_with_options(&ctx->sim_connect_lock,
OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION);
if (ctx->sim_log_path) {
#endif
}
-#if VARIANT_DYLD && TARGET_IPHONE_SIMULATOR
+#if VARIANT_DYLD && TARGET_OS_SIMULATOR
int
ffsl(long mask)
{
#endif
}
-#if VARIANT_DYLD && TARGET_IPHONE_SIMULATOR
+#if VARIANT_DYLD && TARGET_OS_SIMULATOR
int
flsl(long mask)
{
--- /dev/null
+/*
+ * Copyright (c) 2011 Apple, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_STRCPY
+
+char *
+_platform_strcpy(char * restrict dst, const char * restrict src) {
+ const size_t length = _platform_strlen(src);
+ // The stpcpy() and strcpy() functions copy the string src to dst
+ // (including the terminating '\0' character).
+ _platform_memmove(dst, src, length+1);
+ // The strcpy() and strncpy() functions return dst.
+ return dst;
+}
+
+#if VARIANT_STATIC
+char *
+strcpy(char * restrict dst, const char * restrict src) {
+ return _platform_strcpy(dst, src);
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2011 Apple, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_STRLCAT
+
+size_t
+_platform_strlcat(char * restrict dst, const char * restrict src, size_t maxlen) {
+ const size_t srclen = _platform_strlen(src);
+ const size_t dstlen = _platform_strnlen(dst, maxlen);
+ if (dstlen == maxlen) return maxlen+srclen;
+ if (srclen < maxlen-dstlen) {
+ _platform_memmove(dst+dstlen, src, srclen+1);
+ } else {
+ _platform_memmove(dst+dstlen, src, maxlen-dstlen-1);
+ dst[maxlen-1] = '\0';
+ }
+ return dstlen + srclen;
+}
+
+#if VARIANT_STATIC
+size_t
+strlcat(char * restrict dst, const char * restrict src, size_t maxlen) {
+ return _platform_strlcat(dst, src, maxlen);
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2011 Apple, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_STRLCPY
+
+size_t
+_platform_strlcpy(char * restrict dst, const char * restrict src, size_t maxlen) {
+ const size_t srclen = _platform_strlen(src);
+ if (srclen < maxlen) {
+ _platform_memmove(dst, src, srclen+1);
+ } else if (maxlen != 0) {
+ _platform_memmove(dst, src, maxlen-1);
+ dst[maxlen-1] = '\0';
+ }
+ return srclen;
+}
+
+#if VARIANT_STATIC
+size_t
+strlcpy(char * restrict dst, const char * restrict src, size_t maxlen) {
+ return _platform_strlcpy(dst, src, maxlen);
+}
+#endif
+
+#endif
--- /dev/null
+/*-
+ * Copyright (c) 2009 Xin LI <delphij@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/string/strlen.c,v 1.7 2009/01/26 07:31:28 delphij Exp $");
+
+#include <limits.h>
+#include <sys/types.h>
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_STRLEN
+
+/*
+ * Portable strlen() for 32-bit and 64-bit systems.
+ *
+ * Rationale: it is generally much more efficient to do word length
+ * operations and avoid branches on modern computer systems, as
+ * compared to byte-length operations with a lot of branches.
+ *
+ * The expression:
+ *
+ * ((x - 0x01....01) & ~x & 0x80....80)
+ *
+ * would evaluate to a non-zero value iff any of the bytes in the
+ * original word is zero. However, we can further reduce ~1/3 of
+ * time if we consider that strlen() usually operate on 7-bit ASCII
+ * by employing the following expression, which allows false positive
+ * when high bit of 1 and use the tail case to catch these case:
+ *
+ * ((x - 0x01....01) & 0x80....80)
+ *
+ * This is more than 5.2 times as fast as the raw implementation on
+ * Intel T7300 under long mode for strings longer than word length.
+ */
+
+/* Magic numbers for the algorithm */
+#if LONG_BIT == 32
+static const unsigned long mask01 = 0x01010101;
+static const unsigned long mask80 = 0x80808080;
+#elif LONG_BIT == 64
+static const unsigned long mask01 = 0x0101010101010101;
+static const unsigned long mask80 = 0x8080808080808080;
+#else
+#error Unsupported word size
+#endif
+
+#define LONGPTR_MASK (sizeof(long) - 1)
+
+/*
+ * Helper macro to return string length if we caught the zero
+ * byte.
+ */
+#define testbyte(x) \
+ do { \
+ if (p[x] == '\0') \
+ return (p - str + x); \
+ } while (0)
+
+size_t
+_platform_strlen(const char *str)
+{
+ const char *p;
+ const unsigned long *lp;
+
+ /* Skip the first few bytes until we have an aligned p */
+ for (p = str; (uintptr_t)p & LONGPTR_MASK; p++)
+ if (*p == '\0')
+ return (p - str);
+
+ /* Scan the rest of the string using word sized operation */
+ for (lp = (const unsigned long *)p; ; lp++)
+ if ((*lp - mask01) & mask80) {
+ p = (const char *)(lp);
+ testbyte(0);
+ testbyte(1);
+ testbyte(2);
+ testbyte(3);
+#if (LONG_BIT >= 64)
+ testbyte(4);
+ testbyte(5);
+ testbyte(6);
+ testbyte(7);
+#endif
+ }
+
+ /* NOTREACHED */
+ return (0);
+}
+
+#if VARIANT_STATIC
+size_t
+strlen(const char *str)
+{
+ return _platform_strlen(str);
+}
+#endif
+
+#endif
+
--- /dev/null
+/*
+ * Copyright (c) 2011 Apple, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_STRNCPY
+
+char *
+_platform_strncpy(char * restrict dst, const char * restrict src, size_t maxlen) {
+ const size_t srclen = _platform_strnlen(src, maxlen);
+ if (srclen < maxlen) {
+ // The stpncpy() and strncpy() functions copy at most maxlen
+ // characters from src into dst.
+ _platform_memmove(dst, src, srclen);
+ // If src is less than maxlen characters long, the remainder
+ // of dst is filled with '\0' characters.
+ _platform_memset(dst+srclen, 0, maxlen-srclen);
+ } else {
+ // Otherwise, dst is not terminated.
+ _platform_memmove(dst, src, maxlen);
+ }
+ // The strcpy() and strncpy() functions return dst.
+ return dst;
+}
+
+#if VARIANT_STATIC
+char *
+strncpy(char * restrict dst, const char * restrict src, size_t maxlen) {
+ return _platform_strncpy(dst, src, maxlen);
+}
+#endif
+
+#endif
--- /dev/null
+/*-
+ * Copyright (c) 2009 David Schultz <das@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/string/strnlen.c,v 1.1 2009/02/28 06:00:58 das Exp $");
+
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_STRNLEN
+
+size_t
+_platform_strnlen(const char *s, size_t maxlen)
+{
+ size_t len;
+
+ for (len = 0; len < maxlen; len++, s++) {
+ if (!*s)
+ break;
+ }
+ return (len);
+}
+
+#if VARIANT_STATIC
+size_t
+strnlen(const char *s, size_t maxlen)
+{
+ return _platform_strnlen(s, maxlen);
+}
+#endif
+
+#endif
--- /dev/null
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)strstr.c 8.1 (Berkeley) 6/4/93";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/string/strstr.c,v 1.6 2009/02/03 17:58:20 danger Exp $");
+
+#include <sys/_types/_null.h>
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_STRSTR
+
+/*
+ * Find the first occurrence of find in s.
+ */
+char *
+_platform_strstr(const char *s, const char *find)
+{
+ char c, sc;
+ size_t len;
+
+ if ((c = *find++) != '\0') {
+ len = _platform_strlen(find);
+ do {
+ do {
+ if ((sc = *s++) == '\0')
+ return (NULL);
+ } while (sc != c);
+ } while (_platform_strncmp(s, find, len) != 0);
+ s--;
+ }
+ return ((char *)s);
+}
+
+#if VARIANT_STATIC
+char *
+strstr(const char *s, const char *find)
+{
+ return _platform_strstr(s, find);
+}
+#endif
+
+#endif