]> git.saurik.com Git - apple/libplatform.git/commitdiff
libplatform-126.1.2.tar.gz macos-1012 macos-10121 macos-10122 macos-10123 os-x-1012 v126.1.2
authorApple <opensource@apple.com>
Thu, 3 Nov 2016 01:19:12 +0000 (01:19 +0000)
committerApple <opensource@apple.com>
Thu, 3 Nov 2016 01:19:12 +0000 (01:19 +0000)
107 files changed:
.gitignore [new file with mode: 0644]
LICENSE [new file with mode: 0644]
include/_simple.h [new file with mode: 0644]
include/libkern/OSAtomic.h [new file with mode: 0644]
include/libkern/OSAtomicDeprecated.h [new file with mode: 0644]
include/libkern/OSAtomicQueue.h [new file with mode: 0644]
include/libkern/OSCacheControl.h [new file with mode: 0644]
include/libkern/OSSpinLockDeprecated.h [new file with mode: 0644]
include/os/alloc_once_impl.h [new file with mode: 0644]
include/os/base.h [new file with mode: 0644]
include/os/base_private.h [new file with mode: 0644]
include/os/internal/atomic.h [new file with mode: 0644]
include/os/internal/crashlog.h [new file with mode: 0644]
include/os/internal/internal_shared.h [new file with mode: 0644]
include/os/lock.h [new file with mode: 0644]
include/os/lock_private.h [new file with mode: 0644]
include/os/once_private.h [new file with mode: 0644]
include/os/semaphore_private.h [new file with mode: 0644]
include/platform/compat.h [new file with mode: 0644]
include/platform/introspection_private.h [new file with mode: 0644]
include/platform/string.h [new file with mode: 0644]
include/setjmp.h [new file with mode: 0644]
include/ucontext.h [new file with mode: 0644]
internal/os/internal.h [new file with mode: 0644]
internal/os/internal_asm.h [new file with mode: 0644]
internal/os/yield.h [new file with mode: 0644]
man/atomic.3 [new file with mode: 0644]
man/atomic_deprecated.3 [new file with mode: 0644]
man/cache.3 [new file with mode: 0644]
man/ffs.3 [new file with mode: 0644]
man/getcontext.3 [new file with mode: 0644]
man/makecontext.3 [new file with mode: 0644]
man/manpages.lst [new file with mode: 0644]
man/setjmp.3 [new file with mode: 0644]
man/spinlock_deprecated.3 [new file with mode: 0644]
man/ucontext.3 [new file with mode: 0644]
private/libkern/OSAtomic.h [new file with mode: 0644]
private/libkern/module.modulemap [new file with mode: 0644]
src/atomics/common/MKGetTimeBaseInfo.c [new file with mode: 0644]
src/atomics/i386/OSAtomic.s [new file with mode: 0644]
src/atomics/i386/pfz.s [new file with mode: 0644]
src/atomics/init.c [new file with mode: 0644]
src/atomics/x86_64/OSAtomic.s [new file with mode: 0644]
src/atomics/x86_64/pfz.s [new file with mode: 0644]
src/cachecontrol/arm/cache.s [new file with mode: 0644]
src/cachecontrol/arm64/cache.s [new file with mode: 0644]
src/cachecontrol/generic/cache.c [new file with mode: 0644]
src/cachecontrol/i386/cache.s [new file with mode: 0644]
src/cachecontrol/x86_64/cache.s [new file with mode: 0644]
src/force_libplatform_to_build.c [new file with mode: 0644]
src/init.c [new file with mode: 0644]
src/introspection/introspection.c [new file with mode: 0644]
src/introspection/introspection_internal.h [new file with mode: 0644]
src/os/alloc_once.c [new file with mode: 0644]
src/os/atomic.c [new file with mode: 0644]
src/os/atomic_up.c [new file with mode: 0644]
src/os/lock.c [new file with mode: 0644]
src/os/lock_internal.h [new file with mode: 0644]
src/os/lock_up.c [new file with mode: 0644]
src/os/lock_wfe.c [new file with mode: 0644]
src/os/resolver.c [new file with mode: 0644]
src/os/resolver.h [new file with mode: 0644]
src/os/semaphore.c [new file with mode: 0644]
src/setjmp/arm/_longjmp.s [new file with mode: 0644]
src/setjmp/arm/_setjmp.h [new file with mode: 0644]
src/setjmp/arm/_setjmp.s [new file with mode: 0644]
src/setjmp/arm/longjmp.s [new file with mode: 0644]
src/setjmp/arm/setjmp.s [new file with mode: 0644]
src/setjmp/arm64/setjmp.s [new file with mode: 0644]
src/setjmp/generic/setjmperr.c [new file with mode: 0644]
src/setjmp/generic/sigtramp.c [new file with mode: 0644]
src/setjmp/i386/_setjmp.s [new file with mode: 0644]
src/setjmp/i386/_sigtramp.s [new file with mode: 0644]
src/setjmp/i386/setjmp.s [new file with mode: 0644]
src/setjmp/x86_64/_setjmp.s [new file with mode: 0644]
src/setjmp/x86_64/_sigtramp.s [new file with mode: 0644]
src/setjmp/x86_64/setjmp.s [new file with mode: 0644]
src/simple/asl.c [new file with mode: 0644]
src/simple/getenv.c [new file with mode: 0644]
src/simple/string_io.c [new file with mode: 0644]
src/string/generic/bzero.c [new file with mode: 0644]
src/string/generic/ffsll.c [new file with mode: 0644]
src/string/generic/flsll.c [new file with mode: 0644]
src/string/generic/memccpy.c [new file with mode: 0644]
src/string/generic/memchr.c [new file with mode: 0644]
src/string/generic/memcmp.c [new file with mode: 0644]
src/string/generic/memmove.c [new file with mode: 0644]
src/string/generic/memset_pattern.c [new file with mode: 0644]
src/string/generic/strchr.c [new file with mode: 0644]
src/string/generic/strcmp.c [new file with mode: 0644]
src/string/generic/strncmp.c [new file with mode: 0644]
src/ucontext/generic/getmcontext.c [new file with mode: 0644]
src/ucontext/generic/makecontext.c [new file with mode: 0644]
src/ucontext/generic/setcontext.c [new file with mode: 0644]
src/ucontext/generic/swapcontext.c [new file with mode: 0644]
src/ucontext/i386/_ctx_start.s [new file with mode: 0644]
src/ucontext/i386/_setcontext.s [new file with mode: 0644]
src/ucontext/i386/getcontext.s [new file with mode: 0644]
src/ucontext/x86_64/_ctx_start.s [new file with mode: 0644]
src/ucontext/x86_64/_setcontext.s [new file with mode: 0644]
src/ucontext/x86_64/getcontext.s [new file with mode: 0644]
xcodeconfig/atomics.xcconfig [new file with mode: 0644]
xcodeconfig/libplatform.aliases [new file with mode: 0644]
xcodeconfig/libplatform.xcconfig [new file with mode: 0644]
xcodeconfig/os.xcconfig [new file with mode: 0644]
xcodeconfig/perarch.xcconfig [new file with mode: 0644]
xcodeconfig/static.xcconfig [new file with mode: 0644]

diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..b45520f
--- /dev/null
@@ -0,0 +1,2 @@
+/libplatform.xcodeproj/project.xcworkspace/
+/libplatform.xcodeproj/xcuserdata/
diff --git a/LICENSE b/LICENSE
new file mode 100644 (file)
index 0000000..d645695
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/include/_simple.h b/include/_simple.h
new file mode 100644 (file)
index 0000000..b3bd924
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2006, 2010, 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef _SYSTEM_SIMPLE_H_
+#define _SYSTEM_SIMPLE_H_
+
+#include <sys/cdefs.h>
+#include <stdarg.h>
+
+#include <Availability.h>
+
+typedef void *_SIMPLE_STRING;
+typedef const char *_esc_func(unsigned char);
+
+__BEGIN_DECLS
+/*
+ * A simplified vfprintf variant.  The format string is interpreted with
+ * arguments from the va_list, and the results are written to the given
+ * file descriptor.
+ */
+void _simple_vdprintf(int __fd, const char *__fmt, va_list __ap);
+
+/*
+ * A simplified fprintf variant.  The format string is interpreted with
+ * arguments from the variable argument list, and the results are written
+ * to the given file descriptor.
+ */
+void _simple_dprintf(int __fd, const char *__fmt, ...);
+
+/*
+ * A simplified string allocate routine.  Pass the opaque pointer to structure
+ * to _simple_*sprintf() routines.  Use _simple_string() to retrieve the
+ * current string (the string is guaranteed to be null terminated only on
+ * the call to _simple_string()).  Use _simple_sfree() to free the structure
+ * and string memory.
+ */
+_SIMPLE_STRING _simple_salloc(void);
+
+/*
+ * The format string is interpreted with arguments from the va_list, and the
+ * results are appended to the string maintained by the opaque structure, as
+ * returned by a previous call to _simple_salloc().
+ * Always returns 0 on OS X >= 10.12 and iOS >= 10.0
+ */
+int _simple_vsprintf(_SIMPLE_STRING __b, const char *__fmt, va_list __ap);
+
+/*
+ * The format string is interpreted with arguments from the variable argument
+ * list, and the results are appended to the string maintained by the opaque
+ * structure, as returned by a previous call to _simple_salloc().
+ * Always returns 0 on OS X >= 10.12 and iOS >= 10.0
+ */
+int _simple_sprintf(_SIMPLE_STRING __b, const char *__fmt, ...);
+
+/*
+ * Like _simple_vsprintf(), except __esc is a function to call on each
+ * character; the function returns NULL if the character should be passed
+ * as is, otherwise, the returned character string is used instead.
+ */
+int _simple_vesprintf(_SIMPLE_STRING __b, _esc_func __esc, const char *__fmt, va_list __ap);
+
+/*
+ * Like _simple_sprintf(), except __esc is a function to call on each
+ * character; the function returns NULL if the character should be passed
+ * as is, otherwise, the returned character string is used instead.
+ */
+int _simple_esprintf(_SIMPLE_STRING __b, _esc_func __esc, const char *__fmt, ...);
+
+/*
+ * Return the null terminated string from the opaque structure, as returned
+ * by a previous call to _simple_salloc().
+ */
+char *_simple_string(_SIMPLE_STRING __b);
+
+/*
+ * Reposition the pointer to the first null in the buffer.  After a call to
+ * _simple_string, the buffer can be modified, and shrunk.
+ */
+void _simple_sresize(_SIMPLE_STRING __b);
+
+/*
+ * Append the null-terminated string to the string associated with the opaque
+ * structure. Always returns 0 on OS X >= 10.12 and iOS >= 10.0
+ */
+int _simple_sappend(_SIMPLE_STRING __b, const char *__str);
+
+/*
+ * Like _simple_sappend(), except __esc is a function to call on each
+ * character; the function returns NULL if the character should be passed
+ * as is, otherwise, the returned character string is used instead.
+ */
+int _simple_esappend(_SIMPLE_STRING __b, _esc_func __esc, const char *__str);
+
+/*
+ * Write the string associated with the opaque structure to the file descriptor.
+ */
+void _simple_put(_SIMPLE_STRING __b, int __fd);
+
+/*
+ * Write the string associated with the opaque structure and a trailing newline,
+ * to the file descriptor.
+ */
+void _simple_putline(_SIMPLE_STRING __b, int __fd);
+
+/*
+ * Free the opaque structure, and the associated string.
+ */
+void _simple_sfree(_SIMPLE_STRING __b);
+
+/*
+ * Simplified ASL log interface; does not use malloc.  Unfortunately, this
+ * requires knowledge of the format used by ASL.
+ */
+#ifndef ASL_LEVEL_DEBUG
+#define ASL_LEVEL_EMERG   0
+#define ASL_LEVEL_ALERT   1
+#define ASL_LEVEL_CRIT    2
+#define ASL_LEVEL_ERR     3
+#define ASL_LEVEL_WARNING 4
+#define ASL_LEVEL_NOTICE  5
+#define ASL_LEVEL_INFO    6
+#define ASL_LEVEL_DEBUG   7
+#endif
+
+void _simple_asl_log(int __level, const char *__facility, const char *__message);
+void _simple_asl_log_prog(int level, const char *facility, const char *message, const char *progname);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+_SIMPLE_STRING _simple_asl_msg_new(void);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+void _simple_asl_msg_set(_SIMPLE_STRING __b, const char *__key, const char *__val);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+void _simple_asl_send(_SIMPLE_STRING __b);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+const char *_simple_getenv(const char *envp[], const char *var);
+
+__END_DECLS
+
+#endif /* _SYSTEM_SIMPLE_H_ */
diff --git a/include/libkern/OSAtomic.h b/include/libkern/OSAtomic.h
new file mode 100644 (file)
index 0000000..ac38f94
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2004-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef _OSATOMIC_H_
+#define _OSATOMIC_H_
+
+/*! @header
+ * These are deprecated legacy interfaces for atomic and synchronization
+ * operations.
+ *
+ * Define OSATOMIC_USE_INLINED=1 to get inline implementations of the
+ * OSAtomic interfaces in terms of the <stdatomic.h> primitives.
+ *
+ * Define OSSPINLOCK_USE_INLINED=1 to get inline implementations of the
+ * OSSpinLock interfaces in terms of the <os/lock.h> primitives.
+ *
+ * These are intended as a transition convenience, direct use of those
+ * primitives should be preferred.
+ */
+
+#include <sys/cdefs.h>
+
+#include "OSAtomicDeprecated.h"
+#include "OSSpinLockDeprecated.h"
+#include "OSAtomicQueue.h"
+
+#endif /* _OSATOMIC_H_ */
diff --git a/include/libkern/OSAtomicDeprecated.h b/include/libkern/OSAtomicDeprecated.h
new file mode 100644 (file)
index 0000000..278e04e
--- /dev/null
@@ -0,0 +1,1177 @@
+/*
+ * Copyright (c) 2004-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef _OSATOMIC_DEPRECATED_H_
+#define _OSATOMIC_DEPRECATED_H_
+
+/*! @header
+ * These are deprecated legacy interfaces for atomic operations.
+ * The C11 interfaces in <stdatomic.h> resp. C++11 interfaces in <atomic>
+ * should be used instead.
+ *
+ * Define OSATOMIC_USE_INLINED=1 to get inline implementations of these
+ * interfaces in terms of the <stdatomic.h> resp. <atomic> primitives.
+ * This is intended as a transition convenience, direct use of those primitives
+ * is preferred.
+ */
+
+#if !(defined(OSATOMIC_USE_INLINED) && OSATOMIC_USE_INLINED)
+
+#include    <sys/cdefs.h>
+#include    <stddef.h>
+#include    <stdint.h>
+#include    <stdbool.h>
+#include    <Availability.h>
+
+#ifndef OSATOMIC_DEPRECATED
+#define OSATOMIC_DEPRECATED 1
+#ifndef __cplusplus
+#define OSATOMIC_BARRIER_DEPRECATED_MSG(_r) \
+               "Use " #_r "() from <stdatomic.h> instead"
+#define OSATOMIC_DEPRECATED_MSG(_r) \
+               "Use " #_r "_explicit(memory_order_relaxed) from <stdatomic.h> instead"
+#else
+#define OSATOMIC_BARRIER_DEPRECATED_MSG(_r) \
+               "Use std::" #_r "() from <atomic> instead"
+#define OSATOMIC_DEPRECATED_MSG(_r) \
+               "Use std::" #_r "_explicit(std::memory_order_relaxed) from <atomic> instead"
+#endif
+#define OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(_r) \
+       __OS_AVAILABILITY_MSG(macosx, deprecated=10.12, OSATOMIC_BARRIER_DEPRECATED_MSG(_r)) \
+       __OS_AVAILABILITY_MSG(ios, deprecated=10.0, OSATOMIC_BARRIER_DEPRECATED_MSG(_r)) \
+       __OS_AVAILABILITY_MSG(tvos, deprecated=10.0, OSATOMIC_BARRIER_DEPRECATED_MSG(_r)) \
+       __OS_AVAILABILITY_MSG(watchos, deprecated=3.0, OSATOMIC_BARRIER_DEPRECATED_MSG(_r))
+#define OSATOMIC_DEPRECATED_REPLACE_WITH(_r) \
+       __OS_AVAILABILITY_MSG(macosx, deprecated=10.12, OSATOMIC_DEPRECATED_MSG(_r)) \
+       __OS_AVAILABILITY_MSG(ios, deprecated=10.0, OSATOMIC_DEPRECATED_MSG(_r)) \
+       __OS_AVAILABILITY_MSG(tvos, deprecated=10.0, OSATOMIC_DEPRECATED_MSG(_r)) \
+       __OS_AVAILABILITY_MSG(watchos, deprecated=3.0, OSATOMIC_DEPRECATED_MSG(_r))
+#else
+#undef OSATOMIC_DEPRECATED
+#define OSATOMIC_DEPRECATED 0
+#define OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(_r)
+#define OSATOMIC_DEPRECATED_REPLACE_WITH(_r)
+#endif
+
+/*
+ * WARNING: all addresses passed to these functions must be "naturally aligned",
+ * i.e. <code>int32_t</code> pointers must be 32-bit aligned (low 2 bits of
+ * address are zeroes), and <code>int64_t</code> pointers must be 64-bit
+ * aligned (low 3 bits of address are zeroes.).
+ * Note that this is not the default alignment of the <code>int64_t</code> type
+ * in the iOS ARMv7 ABI, see
+ * {@link //apple_ref/doc/uid/TP40009021-SW8 iPhoneOSABIReference}
+ *
+ * Note that some versions of the atomic functions incorporate memory barriers
+ * and some do not.  Barriers strictly order memory access on weakly-ordered
+ * architectures such as ARM.  All loads and stores that appear (in sequential
+ * program order) before the barrier are guaranteed to complete before any
+ * load or store that appears after the barrier.
+ *
+ * The barrier operation is typically a no-op on uniprocessor systems and
+ * fully enabled on multiprocessor systems. On some platforms, such as ARM,
+ * the barrier can be quite expensive.
+ *
+ * Most code should use the barrier functions to ensure that memory shared
+ * between threads is properly synchronized.  For example, if you want to
+ * initialize a shared data structure and then atomically increment a variable
+ * to indicate that the initialization is complete, you must use
+ * {@link OSAtomicIncrement32Barrier} to ensure that the stores to your data
+ * structure complete before the atomic increment.
+ *
+ * Likewise, the consumer of that data structure must use
+ * {@link OSAtomicDecrement32Barrier},
+ * in order to ensure that their loads of the structure are not executed before
+ * the atomic decrement.  On the other hand, if you are simply incrementing a
+ * global counter, then it is safe and potentially faster to use
+ * {@link OSAtomicIncrement32}.
+ *
+ * If you are unsure which version to use, prefer the barrier variants as they
+ * are safer.
+ *
+ * For the kernel-space version of this header, see
+ * {@link //apple_ref/doc/header/OSAtomic.h OSAtomic.h (Kernel Framework)}
+ *
+ * @apiuid //apple_ref/doc/header/user_space_OSAtomic.h
+ */
+
+__BEGIN_DECLS
+
+/*! @typedef OSAtomic_int64_aligned64_t
+ * 64-bit aligned <code>int64_t</code> type.
+ * Use for variables whose addresses are passed to OSAtomic*64() functions to
+ * get the compiler to generate the required alignment.
+ */
+
+#if __has_attribute(aligned)
+typedef int64_t __attribute__((__aligned__((sizeof(int64_t)))))
+               OSAtomic_int64_aligned64_t;
+#else
+typedef int64_t OSAtomic_int64_aligned64_t;
+#endif
+
+/*! @group Arithmetic functions
+    All functions in this group return the new value.
+ */
+
+/*! @abstract Atomically adds two 32-bit values.
+    @discussion
+       This function adds the value given by <code>__theAmount</code> to the
+       value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+    @result Returns the new value.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_fetch_add)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+int32_t        OSAtomicAdd32( int32_t __theAmount, volatile int32_t *__theValue );
+
+
+/*! @abstract Atomically adds two 32-bit values.
+    @discussion
+       This function adds the value given by <code>__theAmount</code> to the
+       value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+
+       This function is equivalent to {@link OSAtomicAdd32}
+       except that it also introduces a barrier.
+    @result Returns the new value.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_add)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+int32_t        OSAtomicAdd32Barrier( int32_t __theAmount, volatile int32_t *__theValue );
+
+
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_10 || __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_7_1
+
+/*! @abstract Atomically increments a 32-bit value.
+    @result Returns the new value.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_fetch_add)
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_7_1)
+int32_t        OSAtomicIncrement32( volatile int32_t *__theValue );
+
+
+/*! @abstract Atomically increments a 32-bit value with a barrier.
+    @discussion
+       This function is equivalent to {@link OSAtomicIncrement32}
+       except that it also introduces a barrier.
+    @result Returns the new value.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_add)
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_7_1)
+int32_t        OSAtomicIncrement32Barrier( volatile int32_t *__theValue );
+
+
+/*! @abstract Atomically decrements a 32-bit value.
+    @result Returns the new value.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_fetch_sub)
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_7_1)
+int32_t        OSAtomicDecrement32( volatile int32_t *__theValue );
+
+
+/*! @abstract Atomically decrements a 32-bit value with a barrier.
+    @discussion
+       This function is equivalent to {@link OSAtomicDecrement32}
+       except that it also introduces a barrier.
+    @result Returns the new value.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_sub)
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_7_1)
+int32_t        OSAtomicDecrement32Barrier( volatile int32_t *__theValue );
+
+#else
+__inline static
+int32_t        OSAtomicIncrement32( volatile int32_t *__theValue )
+            { return OSAtomicAdd32(  1, __theValue); }
+
+__inline static
+int32_t        OSAtomicIncrement32Barrier( volatile int32_t *__theValue )
+            { return OSAtomicAdd32Barrier(  1, __theValue); }
+
+__inline static
+int32_t        OSAtomicDecrement32( volatile int32_t *__theValue )
+            { return OSAtomicAdd32( -1, __theValue); }
+
+__inline static
+int32_t        OSAtomicDecrement32Barrier( volatile int32_t *__theValue )
+            { return OSAtomicAdd32Barrier( -1, __theValue); }
+#endif
+
+
+/*! @abstract Atomically adds two 64-bit values.
+    @discussion
+       This function adds the value given by <code>__theAmount</code> to the
+       value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+    @result Returns the new value.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_fetch_add)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+int64_t        OSAtomicAdd64( int64_t __theAmount,
+               volatile OSAtomic_int64_aligned64_t *__theValue );
+
+
+/*! @abstract Atomically adds two 64-bit values with a barrier.
+    @discussion
+       This function adds the value given by <code>__theAmount</code> to the
+       value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+
+       This function is equivalent to {@link OSAtomicAdd64}
+       except that it also introduces a barrier.
+    @result Returns the new value.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_add)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_3_2)
+int64_t        OSAtomicAdd64Barrier( int64_t __theAmount,
+               volatile OSAtomic_int64_aligned64_t *__theValue );
+
+
+#if __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_10 || __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_7_1
+
+/*! @abstract Atomically increments a 64-bit value.
+    @result Returns the new value.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_fetch_add)
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_7_1)
+int64_t        OSAtomicIncrement64( volatile OSAtomic_int64_aligned64_t *__theValue );
+
+
+/*! @abstract Atomically increments a 64-bit value with a barrier.
+    @discussion
+       This function is equivalent to {@link OSAtomicIncrement64}
+       except that it also introduces a barrier.
+    @result Returns the new value.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_add)
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_7_1)
+int64_t        OSAtomicIncrement64Barrier( volatile OSAtomic_int64_aligned64_t *__theValue );
+
+
+/*! @abstract Atomically decrements a 64-bit value.
+    @result Returns the new value.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_fetch_sub)
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_7_1)
+int64_t        OSAtomicDecrement64( volatile OSAtomic_int64_aligned64_t *__theValue );
+
+
+/*! @abstract Atomically decrements a 64-bit value with a barrier.
+    @discussion
+       This function is equivalent to {@link OSAtomicDecrement64}
+       except that it also introduces a barrier.
+    @result Returns the new value.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_sub)
+__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_7_1)
+int64_t        OSAtomicDecrement64Barrier( volatile OSAtomic_int64_aligned64_t *__theValue );
+
+#else
+__inline static
+int64_t        OSAtomicIncrement64( volatile OSAtomic_int64_aligned64_t *__theValue )
+            { return OSAtomicAdd64(  1, __theValue); }
+
+__inline static
+int64_t        OSAtomicIncrement64Barrier( volatile OSAtomic_int64_aligned64_t *__theValue )
+            { return OSAtomicAdd64Barrier(  1, __theValue); }
+
+__inline static
+int64_t        OSAtomicDecrement64( volatile OSAtomic_int64_aligned64_t *__theValue )
+            { return OSAtomicAdd64( -1, __theValue); }
+
+__inline static
+int64_t        OSAtomicDecrement64Barrier( volatile OSAtomic_int64_aligned64_t *__theValue )
+            { return OSAtomicAdd64Barrier( -1, __theValue); }
+#endif
+
+
+/*! @group Boolean functions (AND, OR, XOR)
+ *
+ * @discussion Functions in this group come in four variants for each operation:
+ * with and without barriers, and functions that return the original value or
+ * the result value of the operation.
+ *
+ * The "Orig" versions return the original value, (before the operation); the non-Orig
+ * versions return the value after the operation.  All are layered on top of
+ * {@link OSAtomicCompareAndSwap32} and similar.
+ */
+
+/*! @abstract Atomic bitwise OR of two 32-bit values.
+    @discussion
+       This function performs the bitwise OR of the value given by <code>__theMask</code>
+       with the value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+    @result Returns the new value.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_fetch_or)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+int32_t        OSAtomicOr32( uint32_t __theMask, volatile uint32_t *__theValue );
+
+
+/*! @abstract Atomic bitwise OR of two 32-bit values with barrier.
+    @discussion
+       This function performs the bitwise OR of the value given by <code>__theMask</code>
+       with the value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+
+       This function is equivalent to {@link OSAtomicOr32}
+       except that it also introduces a barrier.
+    @result Returns the new value.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_or)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+int32_t        OSAtomicOr32Barrier( uint32_t __theMask, volatile uint32_t *__theValue );
+
+
+/*! @abstract Atomic bitwise OR of two 32-bit values returning original.
+    @discussion
+       This function performs the bitwise OR of the value given by <code>__theMask</code>
+       with the value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+    @result Returns the original value referenced by <code>__theValue</code>.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_fetch_or)
+__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_3_2)
+int32_t        OSAtomicOr32Orig( uint32_t __theMask, volatile uint32_t *__theValue );
+
+
+/*! @abstract Atomic bitwise OR of two 32-bit values returning original with barrier.
+    @discussion
+       This function performs the bitwise OR of the value given by <code>__theMask</code>
+       with the value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+       This function is equivalent to {@link OSAtomicOr32Orig}
+       except that it also introduces a barrier.
+    @result Returns the original value referenced by <code>__theValue</code>.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_or)
+__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_3_2)
+int32_t        OSAtomicOr32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue );
+
+
+
+
+/*! @abstract Atomic bitwise AND of two 32-bit values.
+    @discussion
+       This function performs the bitwise AND of the value given by <code>__theMask</code>
+       with the value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+    @result Returns the new value.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_fetch_and)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+int32_t        OSAtomicAnd32( uint32_t __theMask, volatile uint32_t *__theValue );
+
+
+/*! @abstract Atomic bitwise AND of two 32-bit values with barrier.
+    @discussion
+       This function performs the bitwise AND of the value given by <code>__theMask</code>
+       with the value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+
+       This function is equivalent to {@link OSAtomicAnd32}
+       except that it also introduces a barrier.
+    @result Returns the new value.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_and)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+int32_t        OSAtomicAnd32Barrier( uint32_t __theMask, volatile uint32_t *__theValue );
+
+
+/*! @abstract Atomic bitwise AND of two 32-bit values returning original.
+    @discussion
+       This function performs the bitwise AND of the value given by <code>__theMask</code>
+       with the value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+    @result Returns the original value referenced by <code>__theValue</code>.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_fetch_and)
+__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_3_2)
+int32_t        OSAtomicAnd32Orig( uint32_t __theMask, volatile uint32_t *__theValue );
+
+
+/*! @abstract Atomic bitwise AND of two 32-bit values returning original with barrier.
+    @discussion
+       This function performs the bitwise AND of the value given by <code>__theMask</code>
+       with the value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+
+       This function is equivalent to {@link OSAtomicAnd32Orig}
+       except that it also introduces a barrier.
+    @result Returns the original value referenced by <code>__theValue</code>.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_and)
+__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_3_2)
+int32_t        OSAtomicAnd32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue );
+
+
+
+
+/*! @abstract Atomic bitwise XOR of two 32-bit values.
+    @discussion
+       This function performs the bitwise XOR of the value given by <code>__theMask</code>
+       with the value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+    @result Returns the new value.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_fetch_xor)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+int32_t        OSAtomicXor32( uint32_t __theMask, volatile uint32_t *__theValue );
+
+
+/*! @abstract Atomic bitwise XOR of two 32-bit values with barrier.
+    @discussion
+       This function performs the bitwise XOR of the value given by <code>__theMask</code>
+       with the value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+
+       This function is equivalent to {@link OSAtomicXor32}
+       except that it also introduces a barrier.
+    @result Returns the new value.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_xor)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+int32_t        OSAtomicXor32Barrier( uint32_t __theMask, volatile uint32_t *__theValue );
+
+
+/*! @abstract Atomic bitwise XOR of two 32-bit values returning original.
+    @discussion
+       This function performs the bitwise XOR of the value given by <code>__theMask</code>
+       with the value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+    @result Returns the original value referenced by <code>__theValue</code>.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_fetch_xor)
+__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_3_2)
+int32_t        OSAtomicXor32Orig( uint32_t __theMask, volatile uint32_t *__theValue );
+
+
+/*! @abstract Atomic bitwise XOR of two 32-bit values returning original with barrier.
+    @discussion
+       This function performs the bitwise XOR of the value given by <code>__theMask</code>
+       with the value in the memory location referenced by <code>__theValue</code>,
+       storing the result back to that memory location atomically.
+
+       This function is equivalent to {@link OSAtomicXor32Orig}
+       except that it also introduces a barrier.
+    @result Returns the original value referenced by <code>__theValue</code>.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_xor)
+__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_3_2)
+int32_t        OSAtomicXor32OrigBarrier( uint32_t __theMask, volatile uint32_t *__theValue );
+
+/*! @group Compare and swap
+ * Functions in this group return true if the swap occured.  There are several versions,
+ * depending on data type and on whether or not a barrier is used.
+ */
+
+
+/*! @abstract Compare and swap for 32-bit values.
+    @discussion
+       This function compares the value in <code>__oldValue</code> to the value
+       in the memory location referenced by <code>__theValue</code>.  If the values
+       match, this function stores the value from <code>__newValue</code> into
+       that memory location atomically.
+    @result Returns TRUE on a match, FALSE otherwise.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_compare_exchange_strong)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+bool    OSAtomicCompareAndSwap32( int32_t __oldValue, int32_t __newValue, volatile int32_t *__theValue );
+
+
+/*! @abstract Compare and swap for 32-bit values with barrier.
+    @discussion
+       This function compares the value in <code>__oldValue</code> to the value
+       in the memory location referenced by <code>__theValue</code>.  If the values
+       match, this function stores the value from <code>__newValue</code> into
+       that memory location atomically.
+
+       This function is equivalent to {@link OSAtomicCompareAndSwap32}
+       except that it also introduces a barrier.
+    @result Returns TRUE on a match, FALSE otherwise.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_compare_exchange_strong)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+bool    OSAtomicCompareAndSwap32Barrier( int32_t __oldValue, int32_t __newValue, volatile int32_t *__theValue );
+
+
+/*! @abstract Compare and swap pointers.
+    @discussion
+       This function compares the pointer stored in <code>__oldValue</code> to the pointer
+       in the memory location referenced by <code>__theValue</code>.  If the pointers
+       match, this function stores the pointer from <code>__newValue</code> into
+       that memory location atomically.
+    @result Returns TRUE on a match, FALSE otherwise.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_compare_exchange_strong)
+__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0)
+bool   OSAtomicCompareAndSwapPtr( void *__oldValue, void *__newValue, void * volatile *__theValue );
+
+
+/*! @abstract Compare and swap pointers with barrier.
+    @discussion
+       This function compares the pointer stored in <code>__oldValue</code> to the pointer
+       in the memory location referenced by <code>__theValue</code>.  If the pointers
+       match, this function stores the pointer from <code>__newValue</code> into
+       that memory location atomically.
+
+       This function is equivalent to {@link OSAtomicCompareAndSwapPtr}
+       except that it also introduces a barrier.
+    @result Returns TRUE on a match, FALSE otherwise.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_compare_exchange_strong)
+__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0)
+bool   OSAtomicCompareAndSwapPtrBarrier( void *__oldValue, void *__newValue, void * volatile *__theValue );
+
+
+/*! @abstract Compare and swap for <code>int</code> values.
+    @discussion
+       This function compares the value in <code>__oldValue</code> to the value
+       in the memory location referenced by <code>__theValue</code>.  If the values
+       match, this function stores the value from <code>__newValue</code> into
+       that memory location atomically.
+
+       This function is equivalent to {@link OSAtomicCompareAndSwap32}.
+    @result Returns TRUE on a match, FALSE otherwise.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_compare_exchange_strong)
+__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0)
+bool   OSAtomicCompareAndSwapInt( int __oldValue, int __newValue, volatile int *__theValue );
+
+
+/*! @abstract Compare and swap for <code>int</code> values.
+    @discussion
+       This function compares the value in <code>__oldValue</code> to the value
+       in the memory location referenced by <code>__theValue</code>.  If the values
+       match, this function stores the value from <code>__newValue</code> into
+       that memory location atomically.
+
+       This function is equivalent to {@link OSAtomicCompareAndSwapInt}
+       except that it also introduces a barrier.
+
+       This function is equivalent to {@link OSAtomicCompareAndSwap32Barrier}.
+    @result Returns TRUE on a match, FALSE otherwise.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_compare_exchange_strong)
+__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0)
+bool   OSAtomicCompareAndSwapIntBarrier( int __oldValue, int __newValue, volatile int *__theValue );
+
+
+/*! @abstract Compare and swap for <code>long</code> values.
+    @discussion
+       This function compares the value in <code>__oldValue</code> to the value
+       in the memory location referenced by <code>__theValue</code>.  If the values
+       match, this function stores the value from <code>__newValue</code> into
+       that memory location atomically.
+
+       This function is equivalent to {@link OSAtomicCompareAndSwap32} on 32-bit architectures, 
+       or {@link OSAtomicCompareAndSwap64} on 64-bit architectures.
+    @result Returns TRUE on a match, FALSE otherwise.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_compare_exchange_strong)
+__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0)
+bool   OSAtomicCompareAndSwapLong( long __oldValue, long __newValue, volatile long *__theValue );
+
+
+/*! @abstract Compare and swap for <code>long</code> values.
+    @discussion
+       This function compares the value in <code>__oldValue</code> to the value
+       in the memory location referenced by <code>__theValue</code>.  If the values
+       match, this function stores the value from <code>__newValue</code> into
+       that memory location atomically.
+
+       This function is equivalent to {@link OSAtomicCompareAndSwapLong}
+       except that it also introduces a barrier.
+
+       This function is equivalent to {@link OSAtomicCompareAndSwap32} on 32-bit architectures, 
+       or {@link OSAtomicCompareAndSwap64} on 64-bit architectures.
+    @result Returns TRUE on a match, FALSE otherwise.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_compare_exchange_strong)
+__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0)
+bool   OSAtomicCompareAndSwapLongBarrier( long __oldValue, long __newValue, volatile long *__theValue );
+
+
+/*! @abstract Compare and swap for <code>uint64_t</code> values.
+    @discussion
+       This function compares the value in <code>__oldValue</code> to the value
+       in the memory location referenced by <code>__theValue</code>.  If the values
+       match, this function stores the value from <code>__newValue</code> into
+       that memory location atomically.
+    @result Returns TRUE on a match, FALSE otherwise.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_compare_exchange_strong)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+bool    OSAtomicCompareAndSwap64( int64_t __oldValue, int64_t __newValue,
+               volatile OSAtomic_int64_aligned64_t *__theValue );
+
+
+/*! @abstract Compare and swap for <code>uint64_t</code> values.
+    @discussion
+       This function compares the value in <code>__oldValue</code> to the value
+       in the memory location referenced by <code>__theValue</code>.  If the values
+       match, this function stores the value from <code>__newValue</code> into
+       that memory location atomically.
+
+       This function is equivalent to {@link OSAtomicCompareAndSwap64}
+       except that it also introduces a barrier.
+    @result Returns TRUE on a match, FALSE otherwise.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_compare_exchange_strong)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_3_2)
+bool    OSAtomicCompareAndSwap64Barrier( int64_t __oldValue, int64_t __newValue,
+               volatile OSAtomic_int64_aligned64_t *__theValue );
+
+
+/* Test and set.
+ * They return the original value of the bit, and operate on bit (0x80>>(n&7))
+ * in byte ((char*)theAddress + (n>>3)).
+ */
+/*! @abstract Atomic test and set
+    @discussion
+       This function tests a bit in the value referenced by
+       <code>__theAddress</code> and if it is not set, sets it.
+
+       The bit is chosen by the value of <code>__n</code> such that the
+       operation will be performed on bit <code>(0x80 >> (__n & 7))</code>
+       of byte <code>((char *)__theAddress + (n >> 3))</code>.
+
+       For example, if <code>__theAddress</code> points to a 64-bit value,
+       to compare the value of the most significant bit, you would specify
+       <code>56</code> for <code>__n</code>.
+    @result
+       Returns the original value of the bit being tested.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_fetch_or)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+bool    OSAtomicTestAndSet( uint32_t __n, volatile void *__theAddress );
+
+
+/*! @abstract Atomic test and set with barrier
+    @discussion
+       This function tests a bit in the value referenced by <code>__theAddress</code>
+       and if it is not set, sets it.
+
+       The bit is chosen by the value of <code>__n</code> such that the
+       operation will be performed on bit <code>(0x80 >> (__n & 7))</code>
+       of byte <code>((char *)__theAddress + (n >> 3))</code>.
+
+       For example, if <code>__theAddress</code> points to a 64-bit value,
+       to compare the value of the most significant bit, you would specify
+       <code>56</code> for <code>__n</code>.
+
+       This function is equivalent to {@link OSAtomicTestAndSet}
+       except that it also introduces a barrier.
+    @result
+       Returns the original value of the bit being tested.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_or)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+bool    OSAtomicTestAndSetBarrier( uint32_t __n, volatile void *__theAddress );
+
+
+
+/*! @abstract Atomic test and clear
+    @discussion
+       This function tests a bit in the value referenced by <code>__theAddress</code>
+       and if it is not cleared, clears it.
+
+       The bit is chosen by the value of <code>__n</code> such that the
+       operation will be performed on bit <code>(0x80 >> (__n & 7))</code>
+       of byte <code>((char *)__theAddress + (n >> 3))</code>.
+
+       For example, if <code>__theAddress</code> points to a 64-bit value,
+       to compare the value of the most significant bit, you would specify
+       <code>56</code> for <code>__n</code>.
+    @result
+       Returns the original value of the bit being tested.
+ */
+OSATOMIC_DEPRECATED_REPLACE_WITH(atomic_fetch_and)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+bool    OSAtomicTestAndClear( uint32_t __n, volatile void *__theAddress );
+
+
+/*! @abstract Atomic test and clear
+    @discussion
+       This function tests a bit in the value referenced by <code>__theAddress</code>
+       and if it is not cleared, clears it.
+       The bit is chosen by the value of <code>__n</code> such that the
+       operation will be performed on bit <code>(0x80 >> (__n & 7))</code>
+       of byte <code>((char *)__theAddress + (n >> 3))</code>.
+       For example, if <code>__theAddress</code> points to a 64-bit value,
+       to compare the value of the most significant bit, you would specify
+       <code>56</code> for <code>__n</code>.
+       This function is equivalent to {@link OSAtomicTestAndSet}
+       except that it also introduces a barrier.
+    @result
+       Returns the original value of the bit being tested.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_fetch_and)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+bool    OSAtomicTestAndClearBarrier( uint32_t __n, volatile void *__theAddress );
+
+/*! @group Memory barriers */
+
+/*! @abstract Memory barrier.
+    @discussion
+       This function serves as both a read and write barrier.
+ */
+OSATOMIC_BARRIER_DEPRECATED_REPLACE_WITH(atomic_thread_fence)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+void    OSMemoryBarrier( void );
+
+__END_DECLS
+
+#else // defined(OSATOMIC_USE_INLINED) && OSATOMIC_USE_INLINED
+
+/*
+ * Inline implementations of the legacy OSAtomic interfaces in terms of
+ * C11 <stdatomic.h> resp. C++11 <atomic> primitives.
+ * Direct use of those primitives is preferred.
+ */
+
+#include <sys/cdefs.h>
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C++" {
+#if !(__has_include(<atomic>) && __has_feature(cxx_atomic))
+#error Cannot use inlined OSAtomic without <atomic> and C++11 atomics
+#endif
+#include <atomic>
+typedef std::atomic<uint8_t> _OSAtomic_uint8_t;
+typedef std::atomic<int32_t> _OSAtomic_int32_t;
+typedef std::atomic<uint32_t> _OSAtomic_uint32_t;
+typedef std::atomic<int64_t> _OSAtomic_int64_t;
+typedef std::atomic<void*> _OSAtomic_void_ptr_t;
+#define OSATOMIC_STD(_a) std::_a
+__BEGIN_DECLS
+#else
+#if !(__has_include(<stdatomic.h>) && __has_extension(c_atomic))
+#error Cannot use inlined OSAtomic without <stdatomic.h> and C11 atomics
+#endif
+#include <stdatomic.h>
+typedef _Atomic(uint8_t) _OSAtomic_uint8_t;
+typedef _Atomic(int32_t) _OSAtomic_int32_t;
+typedef _Atomic(uint32_t) _OSAtomic_uint32_t;
+typedef _Atomic(int64_t) _OSAtomic_int64_t;
+typedef _Atomic(void*) _OSAtomic_void_ptr_t;
+#define OSATOMIC_STD(_a) _a
+#endif
+
+#if __has_extension(c_alignof) && __has_attribute(aligned)
+typedef int64_t __attribute__((__aligned__(_Alignof(_OSAtomic_int64_t))))
+               OSAtomic_int64_aligned64_t;
+#elif __has_attribute(aligned)
+typedef int64_t __attribute__((__aligned__((sizeof(_OSAtomic_int64_t)))))
+               OSAtomic_int64_aligned64_t;
+#else
+typedef int64_t OSAtomic_int64_aligned64_t;
+#endif
+
+#if __has_attribute(always_inline)
+#define OSATOMIC_INLINE static __inline
+#else
+#define OSATOMIC_INLINE static __inline __attribute__((__always_inline__))
+#endif
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicAdd32(int32_t __theAmount, volatile int32_t *__theValue)
+{
+       return (OSATOMIC_STD(atomic_fetch_add_explicit)(
+                       (volatile _OSAtomic_int32_t*) __theValue, __theAmount,
+                       OSATOMIC_STD(memory_order_relaxed)) + __theAmount);
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicAdd32Barrier(int32_t __theAmount, volatile int32_t *__theValue)
+{
+       return (OSATOMIC_STD(atomic_fetch_add_explicit)(
+                       (volatile _OSAtomic_int32_t*) __theValue, __theAmount,
+                       OSATOMIC_STD(memory_order_seq_cst)) + __theAmount);
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicIncrement32(volatile int32_t *__theValue)
+{
+       return OSAtomicAdd32(1, __theValue);
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicIncrement32Barrier(volatile int32_t *__theValue)
+{
+       return OSAtomicAdd32Barrier(1, __theValue);
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicDecrement32(volatile int32_t *__theValue)
+{
+       return OSAtomicAdd32(-1, __theValue);
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicDecrement32Barrier(volatile int32_t *__theValue)
+{
+       return OSAtomicAdd32Barrier(-1, __theValue);
+}
+
+OSATOMIC_INLINE
+int64_t
+OSAtomicAdd64(int64_t __theAmount,
+               volatile OSAtomic_int64_aligned64_t *__theValue)
+{
+       return (OSATOMIC_STD(atomic_fetch_add_explicit)(
+                       (volatile _OSAtomic_int64_t*) __theValue, __theAmount,
+                       OSATOMIC_STD(memory_order_relaxed)) + __theAmount);
+}
+
+OSATOMIC_INLINE
+int64_t
+OSAtomicAdd64Barrier(int64_t __theAmount,
+               volatile OSAtomic_int64_aligned64_t *__theValue)
+{
+       return (OSATOMIC_STD(atomic_fetch_add_explicit)(
+                       (volatile _OSAtomic_int64_t*) __theValue, __theAmount,
+                       OSATOMIC_STD(memory_order_seq_cst)) + __theAmount);
+}
+
+OSATOMIC_INLINE
+int64_t
+OSAtomicIncrement64(volatile OSAtomic_int64_aligned64_t *__theValue)
+{
+       return OSAtomicAdd64(1, __theValue);
+}
+
+OSATOMIC_INLINE
+int64_t
+OSAtomicIncrement64Barrier(volatile OSAtomic_int64_aligned64_t *__theValue)
+{
+       return OSAtomicAdd64Barrier(1, __theValue);
+}
+
+OSATOMIC_INLINE
+int64_t
+OSAtomicDecrement64(volatile OSAtomic_int64_aligned64_t *__theValue)
+{
+       return OSAtomicAdd64(-1, __theValue);
+}
+
+OSATOMIC_INLINE
+int64_t
+OSAtomicDecrement64Barrier(volatile OSAtomic_int64_aligned64_t *__theValue)
+{
+       return OSAtomicAdd64Barrier(-1, __theValue);
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicOr32(uint32_t __theMask, volatile uint32_t *__theValue)
+{
+       return (int32_t)(OSATOMIC_STD(atomic_fetch_or_explicit)(
+                       (volatile _OSAtomic_uint32_t*)__theValue, __theMask,
+                       OSATOMIC_STD(memory_order_relaxed)) | __theMask);
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicOr32Barrier(uint32_t __theMask, volatile uint32_t *__theValue)
+{
+       return (int32_t)(OSATOMIC_STD(atomic_fetch_or_explicit)(
+                       (volatile _OSAtomic_uint32_t*)__theValue, __theMask,
+                       OSATOMIC_STD(memory_order_seq_cst)) | __theMask);
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicOr32Orig(uint32_t __theMask, volatile uint32_t *__theValue)
+{
+       return (int32_t)(OSATOMIC_STD(atomic_fetch_or_explicit)(
+                       (volatile _OSAtomic_uint32_t*)__theValue, __theMask,
+                       OSATOMIC_STD(memory_order_relaxed)));
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicOr32OrigBarrier(uint32_t __theMask, volatile uint32_t *__theValue)
+{
+       return (int32_t)(OSATOMIC_STD(atomic_fetch_or_explicit)(
+                       (volatile _OSAtomic_uint32_t*)__theValue, __theMask,
+                       OSATOMIC_STD(memory_order_seq_cst)));
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicAnd32(uint32_t __theMask, volatile uint32_t *__theValue)
+{
+       return (int32_t)(OSATOMIC_STD(atomic_fetch_and_explicit)(
+                       (volatile _OSAtomic_uint32_t*)__theValue, __theMask,
+                       OSATOMIC_STD(memory_order_relaxed)) & __theMask);
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicAnd32Barrier(uint32_t __theMask, volatile uint32_t *__theValue)
+{
+       return (int32_t)(OSATOMIC_STD(atomic_fetch_and_explicit)(
+                       (volatile _OSAtomic_uint32_t*)__theValue, __theMask,
+                       OSATOMIC_STD(memory_order_seq_cst)) & __theMask);
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicAnd32Orig(uint32_t __theMask, volatile uint32_t *__theValue)
+{
+       return (int32_t)(OSATOMIC_STD(atomic_fetch_and_explicit)(
+                       (volatile _OSAtomic_uint32_t*)__theValue, __theMask,
+                       OSATOMIC_STD(memory_order_relaxed)));
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicAnd32OrigBarrier(uint32_t __theMask, volatile uint32_t *__theValue)
+{
+       return (int32_t)(OSATOMIC_STD(atomic_fetch_and_explicit)(
+                       (volatile _OSAtomic_uint32_t*)__theValue, __theMask,
+                       OSATOMIC_STD(memory_order_seq_cst)));
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicXor32(uint32_t __theMask, volatile uint32_t *__theValue)
+{
+       return (int32_t)(OSATOMIC_STD(atomic_fetch_xor_explicit)(
+                       (volatile _OSAtomic_uint32_t*)__theValue, __theMask,
+                       OSATOMIC_STD(memory_order_relaxed)) ^ __theMask);
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicXor32Barrier(uint32_t __theMask, volatile uint32_t *__theValue)
+{
+       return (int32_t)(OSATOMIC_STD(atomic_fetch_xor_explicit)(
+                       (volatile _OSAtomic_uint32_t*)__theValue, __theMask,
+                       OSATOMIC_STD(memory_order_seq_cst)) ^ __theMask);
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicXor32Orig(uint32_t __theMask, volatile uint32_t *__theValue)
+{
+       return (int32_t)(OSATOMIC_STD(atomic_fetch_xor_explicit)(
+                       (volatile _OSAtomic_uint32_t*)__theValue, __theMask,
+                       OSATOMIC_STD(memory_order_relaxed)));
+}
+
+OSATOMIC_INLINE
+int32_t
+OSAtomicXor32OrigBarrier(uint32_t __theMask, volatile uint32_t *__theValue)
+{
+       return (int32_t)(OSATOMIC_STD(atomic_fetch_xor_explicit)(
+                       (volatile _OSAtomic_uint32_t*)__theValue, __theMask,
+                       OSATOMIC_STD(memory_order_seq_cst)));
+}
+
+OSATOMIC_INLINE
+bool
+OSAtomicCompareAndSwap32(int32_t __oldValue, int32_t __newValue,
+               volatile int32_t *__theValue)
+{
+       return (OSATOMIC_STD(atomic_compare_exchange_strong_explicit)(
+                       (volatile _OSAtomic_int32_t*)__theValue, &__oldValue, __newValue,
+                       OSATOMIC_STD(memory_order_relaxed),
+                       OSATOMIC_STD(memory_order_relaxed)));
+}
+
+OSATOMIC_INLINE
+bool
+OSAtomicCompareAndSwap32Barrier(int32_t __oldValue, int32_t __newValue,
+               volatile int32_t *__theValue)
+{
+       return (OSATOMIC_STD(atomic_compare_exchange_strong_explicit)(
+                       (volatile _OSAtomic_int32_t*)__theValue, &__oldValue, __newValue,
+                       OSATOMIC_STD(memory_order_seq_cst),
+                       OSATOMIC_STD(memory_order_relaxed)));
+}
+
+OSATOMIC_INLINE
+bool
+OSAtomicCompareAndSwapPtr(void *__oldValue, void *__newValue,
+               void * volatile *__theValue)
+{
+       return (OSATOMIC_STD(atomic_compare_exchange_strong_explicit)(
+                       (volatile _OSAtomic_void_ptr_t*)__theValue, &__oldValue, __newValue,
+                       OSATOMIC_STD(memory_order_relaxed),
+                       OSATOMIC_STD(memory_order_relaxed)));
+}
+
+OSATOMIC_INLINE
+bool
+OSAtomicCompareAndSwapPtrBarrier(void *__oldValue, void *__newValue,
+               void * volatile *__theValue)
+{
+       return (OSATOMIC_STD(atomic_compare_exchange_strong_explicit)(
+                       (volatile _OSAtomic_void_ptr_t*)__theValue, &__oldValue, __newValue,
+                       OSATOMIC_STD(memory_order_seq_cst),
+                       OSATOMIC_STD(memory_order_relaxed)));
+}
+
+OSATOMIC_INLINE
+bool
+OSAtomicCompareAndSwapInt(int __oldValue, int __newValue,
+               volatile int *__theValue)
+{
+       return (OSATOMIC_STD(atomic_compare_exchange_strong_explicit)(
+                       (volatile OSATOMIC_STD(atomic_int)*)__theValue, &__oldValue,
+                       __newValue, OSATOMIC_STD(memory_order_relaxed),
+                       OSATOMIC_STD(memory_order_relaxed)));
+}
+
+OSATOMIC_INLINE
+bool
+OSAtomicCompareAndSwapIntBarrier(int __oldValue, int __newValue,
+               volatile int *__theValue)
+{
+       return (OSATOMIC_STD(atomic_compare_exchange_strong_explicit)(
+                       (volatile OSATOMIC_STD(atomic_int)*)__theValue, &__oldValue,
+                       __newValue, OSATOMIC_STD(memory_order_seq_cst),
+                       OSATOMIC_STD(memory_order_relaxed)));
+}
+
+OSATOMIC_INLINE
+bool
+OSAtomicCompareAndSwapLong(long __oldValue, long __newValue,
+               volatile long *__theValue)
+{
+       return (OSATOMIC_STD(atomic_compare_exchange_strong_explicit)(
+                       (volatile OSATOMIC_STD(atomic_long)*)__theValue, &__oldValue,
+                       __newValue, OSATOMIC_STD(memory_order_relaxed),
+                       OSATOMIC_STD(memory_order_relaxed)));
+}
+
+OSATOMIC_INLINE
+bool
+OSAtomicCompareAndSwapLongBarrier(long __oldValue, long __newValue,
+               volatile long *__theValue)
+{
+       return (OSATOMIC_STD(atomic_compare_exchange_strong_explicit)(
+                       (volatile OSATOMIC_STD(atomic_long)*)__theValue, &__oldValue,
+                       __newValue, OSATOMIC_STD(memory_order_seq_cst),
+                       OSATOMIC_STD(memory_order_relaxed)));
+}
+
+OSATOMIC_INLINE
+bool
+OSAtomicCompareAndSwap64(int64_t __oldValue, int64_t __newValue,
+               volatile OSAtomic_int64_aligned64_t *__theValue)
+{
+       return (OSATOMIC_STD(atomic_compare_exchange_strong_explicit)(
+                       (volatile _OSAtomic_int64_t*)__theValue, &__oldValue, __newValue,
+                       OSATOMIC_STD(memory_order_relaxed),
+                       OSATOMIC_STD(memory_order_relaxed)));
+}
+
+OSATOMIC_INLINE
+bool
+OSAtomicCompareAndSwap64Barrier(int64_t __oldValue, int64_t __newValue,
+               volatile OSAtomic_int64_aligned64_t *__theValue)
+{
+       return (OSATOMIC_STD(atomic_compare_exchange_strong_explicit)(
+                       (volatile _OSAtomic_int64_t*)__theValue, &__oldValue, __newValue,
+                       OSATOMIC_STD(memory_order_seq_cst),
+                       OSATOMIC_STD(memory_order_relaxed)));
+}
+
+OSATOMIC_INLINE
+bool
+OSAtomicTestAndSet(uint32_t __n, volatile void *__theAddress)
+{
+       uintptr_t a = (uintptr_t)__theAddress + (__n >> 3);
+       uint8_t v = (0x80u >> (__n & 7));
+       return (OSATOMIC_STD(atomic_fetch_or_explicit)((_OSAtomic_uint8_t*)a, v,
+                       OSATOMIC_STD(memory_order_relaxed)) & v);
+}
+
+OSATOMIC_INLINE
+bool
+OSAtomicTestAndSetBarrier(uint32_t __n, volatile void *__theAddress)
+{
+       uintptr_t a = (uintptr_t)__theAddress + (__n >> 3);
+       uint8_t v = (0x80u >> (__n & 7));
+       return (OSATOMIC_STD(atomic_fetch_or_explicit)((_OSAtomic_uint8_t*)a, v,
+                       OSATOMIC_STD(memory_order_seq_cst)) & v);
+}
+
+OSATOMIC_INLINE
+bool
+OSAtomicTestAndClear(uint32_t __n, volatile void *__theAddress)
+{
+       uintptr_t a = (uintptr_t)__theAddress + (__n >> 3);
+       uint8_t v = (0x80u >> (__n & 7));
+       return (OSATOMIC_STD(atomic_fetch_and_explicit)((_OSAtomic_uint8_t*)a,
+                       (uint8_t)~v, OSATOMIC_STD(memory_order_relaxed)) & v);
+}
+
+OSATOMIC_INLINE
+bool
+OSAtomicTestAndClearBarrier(uint32_t __n, volatile void *__theAddress)
+{
+       uintptr_t a = (uintptr_t)__theAddress + (__n >> 3);
+       uint8_t v = (0x80u >> (__n & 7));
+       return (OSATOMIC_STD(atomic_fetch_and_explicit)((_OSAtomic_uint8_t*)a,
+                       (uint8_t)~v, OSATOMIC_STD(memory_order_seq_cst)) & v);
+}
+
+OSATOMIC_INLINE
+void
+OSMemoryBarrier(void)
+{
+       OSATOMIC_STD(atomic_thread_fence)(OSATOMIC_STD(memory_order_seq_cst));
+}
+
+#undef OSATOMIC_INLINE
+#undef OSATOMIC_STD
+#ifdef __cplusplus
+__END_DECLS
+} // extern "C++"
+#endif
+
+#endif // defined(OSATOMIC_USE_INLINED) && OSATOMIC_USE_INLINED
+
+#endif /* _OSATOMIC_DEPRECATED_H_ */
diff --git a/include/libkern/OSAtomicQueue.h b/include/libkern/OSAtomicQueue.h
new file mode 100644 (file)
index 0000000..8ffa4be
--- /dev/null
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2004-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef _OSATOMICQUEUE_H_
+#define _OSATOMICQUEUE_H_
+
+#include    <stddef.h>
+#include    <sys/cdefs.h>
+#include    <stdint.h>
+#include    <stdbool.h>
+
+#include    <Availability.h>
+
+/*! @header Lockless atomic enqueue and dequeue
+ * These routines manipulate singly-linked LIFO lists.
+ */
+
+__BEGIN_DECLS
+
+/*! @abstract The data structure for a queue head.
+    @discussion
+       You should always initialize a queue head structure with the
+       initialization vector {@link OS_ATOMIC_QUEUE_INIT} before use.
+ */
+#if defined(__x86_64__)
+
+typedef volatile struct {
+       void    *opaque1;
+       long     opaque2;
+} __attribute__ ((aligned (16))) OSQueueHead;
+
+#else
+
+typedef volatile struct {
+       void    *opaque1;
+       long     opaque2;
+} OSQueueHead;
+
+#endif
+
+/*! @abstract The initialization vector for a queue head. */
+#define        OS_ATOMIC_QUEUE_INIT    { NULL, 0 }
+
+/*! @abstract Enqueue an element onto a list.
+    @discussion
+       Memory barriers are incorporated as needed to permit thread-safe access
+       to the queue element.
+    @param __list
+       The list on which you want to enqueue the element.
+    @param __new
+       The element to add.
+    @param __offset
+       The "offset" parameter is the offset (in bytes) of the link field
+       from the beginning of the data structure being queued (<code>__new</code>).
+       The link field should be a pointer type.
+       The <code>__offset</code> value needs to be same for all enqueuing and
+       dequeuing operations on the same list, even if different structure types
+       are enqueued on that list.  The use of <code>offsetset()</code>, defined in
+       <code>stddef.h</code> is the common way to specify the <code>__offset</code>
+       value.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_4_0)
+void  OSAtomicEnqueue( OSQueueHead *__list, void *__new, size_t __offset);
+
+
+/*! @abstract Dequeue an element from a list.
+    @discussion
+       Memory barriers are incorporated as needed to permit thread-safe access
+       to the queue element.
+    @param __list
+       The list from which you want to dequeue an element.
+    @param __offset
+       The "offset" parameter is the offset (in bytes) of the link field
+       from the beginning of the data structure being dequeued (<code>__new</code>).
+       The link field should be a pointer type.
+       The <code>__offset</code> value needs to be same for all enqueuing and
+       dequeuing operations on the same list, even if different structure types
+       are enqueued on that list.  The use of <code>offsetset()</code>, defined in
+       <code>stddef.h</code> is the common way to specify the <code>__offset</code>
+       value.
+       IMPORTANT: the memory backing the link field of a queue element must not be
+       unmapped after OSAtomicDequeue() returns until all concurrent calls to
+       OSAtomicDequeue() for the same list on other threads have also returned,
+       as they may still be accessing that memory location.
+    @result
+       Returns the most recently enqueued element, or <code>NULL</code> if the
+       list is empty.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_4_0)
+void* OSAtomicDequeue( OSQueueHead *__list, size_t __offset);
+
+#if defined(__x86_64__) || defined(__i386__)
+
+/*! @group Lockless atomic fifo enqueue and dequeue
+ * These routines manipulate singly-linked FIFO lists.
+ */
+
+/*! @abstract The data structure for a fifo queue head.
+    @discussion
+       You should always initialize a fifo queue head structure with the
+       initialization vector {@link OS_ATOMIC_FIFO_QUEUE_INIT} before use.
+ */
+#if defined(__x86_64__)
+
+typedef        volatile struct {
+       void    *opaque1;
+       void    *opaque2;
+       int      opaque3;
+} __attribute__ ((aligned (16))) OSFifoQueueHead;
+
+#else
+
+typedef        volatile struct {
+       void    *opaque1;
+       void    *opaque2;
+       int      opaque3;
+} OSFifoQueueHead;
+
+#endif
+
+/*! @abstract The initialization vector for a fifo queue head. */
+#define OS_ATOMIC_FIFO_QUEUE_INIT   { NULL, NULL, 0 }
+
+/*! @abstract Enqueue an element onto a list.
+    @discussion
+       Memory barriers are incorporated as needed to permit thread-safe access
+       to the queue element.
+    @param __list
+       The list on which you want to enqueue the element.
+    @param __new
+       The element to add.
+    @param __offset
+       The "offset" parameter is the offset (in bytes) of the link field
+       from the beginning of the data structure being queued (<code>__new</code>).
+       The link field should be a pointer type.
+       The <code>__offset</code> value needs to be same for all enqueuing and
+       dequeuing operations on the same list, even if different structure types
+       are enqueued on that list.  The use of <code>offsetset()</code>, defined in
+       <code>stddef.h</code> is the common way to specify the <code>__offset</code>
+       value.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_NA)
+void  OSAtomicFifoEnqueue( OSFifoQueueHead *__list, void *__new, size_t __offset);
+
+/*! @abstract Dequeue an element from a list.
+    @discussion
+       Memory barriers are incorporated as needed to permit thread-safe access
+       to the queue element.
+    @param __list
+       The list from which you want to dequeue an element.
+    @param __offset
+       The "offset" parameter is the offset (in bytes) of the link field
+       from the beginning of the data structure being dequeued (<code>__new</code>).
+       The link field should be a pointer type.
+       The <code>__offset</code> value needs to be same for all enqueuing and
+       dequeuing operations on the same list, even if different structure types
+       are enqueued on that list.  The use of <code>offsetset()</code>, defined in
+       <code>stddef.h</code> is the common way to specify the <code>__offset</code>
+       value.
+    @result
+       Returns the oldest enqueued element, or <code>NULL</code> if the
+       list is empty.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_NA)
+void* OSAtomicFifoDequeue( OSFifoQueueHead *__list, size_t __offset);
+
+#endif /* __i386__ || __x86_64__ */
+
+__END_DECLS
+
+#endif /* _OSATOMICQUEUE_H_ */
diff --git a/include/libkern/OSCacheControl.h b/include/libkern/OSCacheControl.h
new file mode 100644 (file)
index 0000000..4464477
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef _OS_CACHE_CONTROL_H_
+#define _OS_CACHE_CONTROL_H_
+
+#include    <stddef.h>
+#include    <sys/cdefs.h>
+#include    <stdint.h>
+#include    <Availability.h>
+
+__BEGIN_DECLS
+
+
+/* Functions performed by sys_cache_control(): */
+
+/* Prepare memory for execution.  This should be called
+ * after writing machine instructions to memory, before
+ * executing them.  It syncs the dcache and icache.
+ * On IA32 processors this function is a NOP, because
+ * no synchronization is required.
+ */
+#define        kCacheFunctionPrepareForExecution       1
+
+/* Flush data cache(s).  This ensures that cached data 
+ * makes it all the way out to DRAM, and then removes
+ * copies of the data from all processor caches.
+ * It can be useful when dealing with cache incoherent
+ * devices or DMA.
+ */
+#define        kCacheFunctionFlushDcache       2
+
+
+/* perform one of the above cache functions: */
+int    sys_cache_control( int function, void *start, size_t len) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+/* equivalent to sys_cache_control(kCacheFunctionPrepareForExecution): */
+void   sys_icache_invalidate( void *start, size_t len) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+/* equivalent to sys_cache_control(kCacheFunctionFlushDcache): */
+void   sys_dcache_flush( void *start, size_t len) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0);
+
+
+__END_DECLS
+
+#endif /* _OS_CACHE_CONTROL_H_ */
diff --git a/include/libkern/OSSpinLockDeprecated.h b/include/libkern/OSSpinLockDeprecated.h
new file mode 100644 (file)
index 0000000..68b64cd
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2004-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef _OSSPINLOCK_DEPRECATED_H_
+#define _OSSPINLOCK_DEPRECATED_H_
+
+/*! @header
+ * These are deprecated legacy interfaces for userspace spinlocks.
+ *
+ * These interfaces should no longer be used, particularily in situations where
+ * threads of differing priorities may contend on the same spinlock.
+ *
+ * The interfaces in <os/lock.h> should be used instead in cases where a very
+ * low-level lock primitive is required. In general however, using higher level
+ * synchronization primitives such as those provided by the pthread or dispatch
+ * subsystems should be preferred.
+ *
+ * Define OSSPINLOCK_USE_INLINED=1 to get inline implementations of these
+ * interfaces in terms of the <os/lock.h> primitives. This is intended as a
+ * transition convenience, direct use of those primitives is preferred.
+ */
+
+#ifndef OSSPINLOCK_DEPRECATED
+#define OSSPINLOCK_DEPRECATED 1
+#define OSSPINLOCK_DEPRECATED_MSG(_r) "Use " #_r "() from <os/lock.h> instead"
+#define OSSPINLOCK_DEPRECATED_REPLACE_WITH(_r) \
+       __OS_AVAILABILITY_MSG(macosx, deprecated=10.12, OSSPINLOCK_DEPRECATED_MSG(_r)) \
+       __OS_AVAILABILITY_MSG(ios, deprecated=10.0, OSSPINLOCK_DEPRECATED_MSG(_r)) \
+       __OS_AVAILABILITY_MSG(tvos, deprecated=10.0, OSSPINLOCK_DEPRECATED_MSG(_r)) \
+       __OS_AVAILABILITY_MSG(watchos, deprecated=3.0, OSSPINLOCK_DEPRECATED_MSG(_r))
+#else
+#undef OSSPINLOCK_DEPRECATED
+#define OSSPINLOCK_DEPRECATED 0
+#define OSSPINLOCK_DEPRECATED_REPLACE_WITH(_r)
+#endif
+
+#if !(defined(OSSPINLOCK_USE_INLINED) && OSSPINLOCK_USE_INLINED)
+
+#include    <sys/cdefs.h>
+#include    <stddef.h>
+#include    <stdint.h>
+#include    <stdbool.h>
+#include    <Availability.h>
+
+__BEGIN_DECLS
+
+/*! @abstract The default value for an <code>OSSpinLock</code>.
+    @discussion
+       The convention is that unlocked is zero, locked is nonzero.
+ */
+#define        OS_SPINLOCK_INIT    0
+
+
+/*! @abstract Data type for a spinlock.
+    @discussion
+       You should always initialize a spinlock to {@link OS_SPINLOCK_INIT} before
+       using it.
+ */
+typedef int32_t OSSpinLock OSSPINLOCK_DEPRECATED_REPLACE_WITH(os_unfair_lock);
+
+
+/*! @abstract Locks a spinlock if it would not block
+    @result
+       Returns <code>false</code> if the lock was already held by another thread,
+       <code>true</code> if it took the lock successfully.
+ */
+OSSPINLOCK_DEPRECATED_REPLACE_WITH(os_unfair_lock_trylock)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+bool    OSSpinLockTry( volatile OSSpinLock *__lock );
+
+
+/*! @abstract Locks a spinlock
+    @discussion
+       Although the lock operation spins, it employs various strategies to back
+       off if the lock is held.
+ */
+OSSPINLOCK_DEPRECATED_REPLACE_WITH(os_unfair_lock_lock)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+void    OSSpinLockLock( volatile OSSpinLock *__lock );
+
+
+/*! @abstract Unlocks a spinlock */
+OSSPINLOCK_DEPRECATED_REPLACE_WITH(os_unfair_lock_unlock)
+__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+void    OSSpinLockUnlock( volatile OSSpinLock *__lock );
+
+__END_DECLS
+
+#else /* OSSPINLOCK_USE_INLINED */
+
+/*
+ * Inline implementations of the legacy OSSpinLock interfaces in terms of the
+ * of the <os/lock.h> primitives. Direct use of those primitives is preferred.
+ *
+ * NOTE: the locked value of os_unfair_lock is implementation defined and
+ * subject to change, code that relies on the specific locked value used by the
+ * legacy OSSpinLock interface WILL break when using these inline
+ * implementations in terms of os_unfair_lock.
+ */
+
+#if !OSSPINLOCK_USE_INLINED_TRANSPARENT
+
+#include <os/lock.h>
+
+__BEGIN_DECLS
+
+#if __has_attribute(always_inline)
+#define OSSPINLOCK_INLINE static __inline
+#else
+#define OSSPINLOCK_INLINE static __inline __attribute__((__always_inline__))
+#endif
+
+#define OS_SPINLOCK_INIT 0
+typedef int32_t OSSpinLock;
+
+#if  __has_extension(c_static_assert)
+_Static_assert(sizeof(OSSpinLock) == sizeof(os_unfair_lock),
+               "Incompatible os_unfair_lock type");
+#endif
+
+OSSPINLOCK_INLINE
+void
+OSSpinLockLock(volatile OSSpinLock *__lock)
+{
+       os_unfair_lock_t lock = (os_unfair_lock_t)__lock;
+       return os_unfair_lock_lock(lock);
+}
+
+OSSPINLOCK_INLINE
+bool
+OSSpinLockTry(volatile OSSpinLock *__lock)
+{
+       os_unfair_lock_t lock = (os_unfair_lock_t)__lock;
+       return os_unfair_lock_trylock(lock);
+}
+
+OSSPINLOCK_INLINE
+void
+OSSpinLockUnlock(volatile OSSpinLock *__lock)
+{
+       os_unfair_lock_t lock = (os_unfair_lock_t)__lock;
+       return os_unfair_lock_unlock(lock);
+}
+
+#undef OSSPINLOCK_INLINE
+
+__END_DECLS
+
+#else /* OSSPINLOCK_USE_INLINED_TRANSPARENT */
+
+#include    <sys/cdefs.h>
+#include    <stddef.h>
+#include    <stdint.h>
+#include    <stdbool.h>
+#include    <Availability.h>
+
+#define OS_NOSPIN_LOCK_AVAILABILITY \
+               __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) \
+               __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+
+__BEGIN_DECLS
+
+#define OS_SPINLOCK_INIT 0
+typedef int32_t OSSpinLock OSSPINLOCK_DEPRECATED_REPLACE_WITH(os_unfair_lock);
+typedef volatile OSSpinLock *_os_nospin_lock_t;
+
+OSSPINLOCK_DEPRECATED_REPLACE_WITH(os_unfair_lock_lock)
+OS_NOSPIN_LOCK_AVAILABILITY
+void _os_nospin_lock_lock(_os_nospin_lock_t lock);
+#undef OSSpinLockLock
+#define OSSpinLockLock(lock) _os_nospin_lock_lock(lock)
+
+OSSPINLOCK_DEPRECATED_REPLACE_WITH(os_unfair_lock_trylock)
+OS_NOSPIN_LOCK_AVAILABILITY
+bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
+#undef OSSpinLockTry
+#define OSSpinLockTry(lock) _os_nospin_lock_trylock(lock)
+
+OSSPINLOCK_DEPRECATED_REPLACE_WITH(os_unfair_lock_unlock)
+OS_NOSPIN_LOCK_AVAILABILITY
+void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
+#undef OSSpinLockUnlock
+#define OSSpinLockUnlock(lock) _os_nospin_lock_unlock(lock)
+
+__END_DECLS
+
+#endif /* OSSPINLOCK_USE_INLINED_TRANSPARENT */
+
+#endif /* OSSPINLOCK_USE_INLINED */
+
+#endif /* _OSSPINLOCK_DEPRECATED_H_ */
diff --git a/include/os/alloc_once_impl.h b/include/os/alloc_once_impl.h
new file mode 100644 (file)
index 0000000..d821e18
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2012-2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#ifndef __OS_ALLOC_ONCE_IMPL__
+#define __OS_ALLOC_ONCE_IMPL__
+
+#ifndef __OS_ALLOC_INDIRECT__
+#error "Please include <os/alloc_once_private.h> instead of this file directly."
+#endif
+
+#include <Availability.h>
+#include <sys/types.h>
+#include <os/base_private.h>
+#include <os/once_private.h>
+
+__BEGIN_DECLS
+
+#define OS_ALLOC_SPI_VERSION 20120430
+
+#define OS_ALLOC_ONCE_KEY_MAX 100
+
+typedef os_once_t os_alloc_token_t;
+struct _os_alloc_once_s {
+       os_alloc_token_t once;
+       void *ptr;
+};
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+extern struct _os_alloc_once_s _os_alloc_once_table[];
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0)
+OS_EXPORT OS_NONNULL1
+void*
+_os_alloc_once(struct _os_alloc_once_s *slot, size_t sz, os_function_t init);
+
+/* 
+ * The region allocated by os_alloc_once is 0-filled when initially
+ * returned (or handed off to the initializer).
+ */
+OS_WARN_RESULT OS_NOTHROW OS_CONST
+__header_always_inline void*
+os_alloc_once(os_alloc_token_t token, size_t sz, os_function_t init)
+{
+       struct _os_alloc_once_s *slot = &_os_alloc_once_table[token];
+       if (OS_EXPECT(slot->once, ~0l) != ~0l) {
+               void *ptr = _os_alloc_once(slot, sz, init);
+               OS_COMPILER_CAN_ASSUME(slot->once == ~0l);
+               return ptr;
+       }
+       return slot->ptr;
+}
+
+__END_DECLS
+
+#endif // __OS_ALLOC_ONCE_IMPL__
diff --git a/include/os/base.h b/include/os/base.h
new file mode 100644 (file)
index 0000000..77ab213
--- /dev/null
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __OS_BASE__
+#define __OS_BASE__
+
+#include <sys/cdefs.h>
+
+#ifndef __has_builtin
+#define __has_builtin(x) 0
+#endif
+#ifndef __has_include
+#define __has_include(x) 0
+#endif
+#ifndef __has_feature
+#define __has_feature(x) 0
+#endif
+#ifndef __has_attribute
+#define __has_attribute(x) 0
+#endif
+#ifndef __has_extension
+#define __has_extension(x) 0
+#endif
+
+#undef OS_INLINE // <sys/_types/_os_inline.h>
+#if __GNUC__
+#define OS_NORETURN __attribute__((__noreturn__))
+#define OS_NOTHROW __attribute__((__nothrow__))
+#define OS_NONNULL1 __attribute__((__nonnull__(1)))
+#define OS_NONNULL2 __attribute__((__nonnull__(2)))
+#define OS_NONNULL3 __attribute__((__nonnull__(3)))
+#define OS_NONNULL4 __attribute__((__nonnull__(4)))
+#define OS_NONNULL5 __attribute__((__nonnull__(5)))
+#define OS_NONNULL6 __attribute__((__nonnull__(6)))
+#define OS_NONNULL7 __attribute__((__nonnull__(7)))
+#define OS_NONNULL8 __attribute__((__nonnull__(8)))
+#define OS_NONNULL9 __attribute__((__nonnull__(9)))
+#define OS_NONNULL10 __attribute__((__nonnull__(10)))
+#define OS_NONNULL11 __attribute__((__nonnull__(11)))
+#define OS_NONNULL12 __attribute__((__nonnull__(12)))
+#define OS_NONNULL13 __attribute__((__nonnull__(13)))
+#define OS_NONNULL14 __attribute__((__nonnull__(14)))
+#define OS_NONNULL15 __attribute__((__nonnull__(15)))
+#define OS_NONNULL_ALL __attribute__((__nonnull__))
+#define OS_SENTINEL __attribute__((__sentinel__))
+#define OS_PURE __attribute__((__pure__))
+#define OS_CONST __attribute__((__const__))
+#define OS_WARN_RESULT __attribute__((__warn_unused_result__))
+#define OS_MALLOC __attribute__((__malloc__))
+#define OS_USED __attribute__((__used__))
+#define OS_UNUSED __attribute__((__unused__))
+#define OS_COLD __attribute__((__cold__))
+#define OS_WEAK __attribute__((__weak__))
+#define OS_WEAK_IMPORT __attribute__((__weak_import__))
+#define OS_NOINLINE __attribute__((__noinline__))
+#define OS_ALWAYS_INLINE __attribute__((__always_inline__))
+#define OS_TRANSPARENT_UNION __attribute__((__transparent_union__))
+#define OS_ALIGNED(n) __attribute__((__aligned__((n))))
+#define OS_FORMAT_PRINTF(x,y) __attribute__((__format__(printf,x,y)))
+#define OS_EXPORT extern __attribute__((__visibility__("default")))
+#define OS_INLINE static __inline__
+#define OS_EXPECT(x, v) __builtin_expect((x), (v))
+#else
+#define OS_NORETURN
+#define OS_NOTHROW
+#define OS_NONNULL1
+#define OS_NONNULL2
+#define OS_NONNULL3
+#define OS_NONNULL4
+#define OS_NONNULL5
+#define OS_NONNULL6
+#define OS_NONNULL7
+#define OS_NONNULL8
+#define OS_NONNULL9
+#define OS_NONNULL10
+#define OS_NONNULL11
+#define OS_NONNULL12
+#define OS_NONNULL13
+#define OS_NONNULL14
+#define OS_NONNULL15
+#define OS_NONNULL_ALL
+#define OS_SENTINEL
+#define OS_PURE
+#define OS_CONST
+#define OS_WARN_RESULT
+#define OS_MALLOC
+#define OS_USED
+#define OS_UNUSED
+#define OS_COLD
+#define OS_WEAK
+#define OS_WEAK_IMPORT
+#define OS_NOINLINE
+#define OS_ALWAYS_INLINE
+#define OS_TRANSPARENT_UNION
+#define OS_ALIGNED(n)
+#define OS_FORMAT_PRINTF(x,y)
+#define OS_EXPORT extern
+#define OS_INLINE static inline
+#define OS_EXPECT(x, v) (x)
+#endif
+
+#if __has_attribute(noescape)
+#define OS_NOESCAPE __attribute__((__noescape__))
+#else
+#define OS_NOESCAPE
+#endif
+
+#if __has_feature(assume_nonnull)
+#define OS_ASSUME_NONNULL_BEGIN _Pragma("clang assume_nonnull begin")
+#define OS_ASSUME_NONNULL_END   _Pragma("clang assume_nonnull end")
+#else
+#define OS_ASSUME_NONNULL_BEGIN
+#define OS_ASSUME_NONNULL_END
+#endif
+
+#if __has_builtin(__builtin_assume)
+#define OS_COMPILER_CAN_ASSUME(expr) __builtin_assume(expr)
+#else
+#define OS_COMPILER_CAN_ASSUME(expr) ((void)(expr))
+#endif
+
+#if __has_extension(attribute_overloadable)
+#define OS_OVERLOADABLE __attribute__((__overloadable__))
+#else
+#define OS_OVERLOADABLE
+#endif
+
+#if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums)
+#define OS_ENUM(_name, _type, ...) \
+               typedef enum : _type { __VA_ARGS__ } _name##_t
+#else
+#define OS_ENUM(_name, _type, ...) \
+               enum { __VA_ARGS__ }; typedef _type _name##_t
+#endif
+
+#if __has_feature(attribute_availability_swift)
+// equivalent to __SWIFT_UNAVAILABLE from Availability.h
+#define OS_SWIFT_UNAVAILABLE(_msg) \
+               __attribute__((__availability__(swift, unavailable, message=_msg)))
+#else
+#define OS_SWIFT_UNAVAILABLE(_msg)
+#endif
+
+#if __has_attribute(swift_private)
+# define OS_REFINED_FOR_SWIFT __attribute__((__swift_private__))
+#else
+# define OS_REFINED_FOR_SWIFT
+#endif
+
+#if __has_attribute(swift_name)
+# define OS_SWIFT_NAME(_name) __attribute__((__swift_name__(#_name)))
+#else
+# define OS_SWIFT_NAME(_name)
+#endif
+
+#define __OS_STRINGIFY(s) #s
+#define OS_STRINGIFY(s) __OS_STRINGIFY(s)
+#define __OS_CONCAT(x, y) x ## y
+#define OS_CONCAT(x, y) __OS_CONCAT(x, y)
+
+#ifdef __GNUC__
+#define os_prevent_tail_call_optimization()  __asm__("")
+#define os_is_compile_time_constant(expr)  __builtin_constant_p(expr)
+#define os_compiler_barrier()  __asm__ __volatile__("" ::: "memory")
+#else
+#define os_prevent_tail_call_optimization()  do { } while (0)
+#define os_is_compile_time_constant(expr)  0
+#define os_compiler_barrier()  do { } while (0)
+#endif
+
+#if __has_attribute(not_tail_called)
+#define OS_NOT_TAIL_CALLED __attribute__((__not_tail_called__))
+#else
+#define OS_NOT_TAIL_CALLED
+#endif
+
+typedef void (*os_function_t)(void *_Nullable);
+
+#ifdef __BLOCKS__
+/*!
+ * @typedef os_block_t
+ *
+ * @abstract
+ * Generic type for a block taking no arguments and returning no value.
+ *
+ * @discussion
+ * When not building with Objective-C ARC, a block object allocated on or
+ * copied to the heap must be released with a -[release] message or the
+ * Block_release() function.
+ *
+ * The declaration of a block literal allocates storage on the stack.
+ * Therefore, this is an invalid construct:
+ * <code>
+ * os_block_t block;
+ * if (x) {
+ *     block = ^{ printf("true\n"); };
+ * } else {
+ *     block = ^{ printf("false\n"); };
+ * }
+ * block(); // unsafe!!!
+ * </code>
+ *
+ * What is happening behind the scenes:
+ * <code>
+ * if (x) {
+ *     struct Block __tmp_1 = ...; // setup details
+ *     block = &__tmp_1;
+ * } else {
+ *     struct Block __tmp_2 = ...; // setup details
+ *     block = &__tmp_2;
+ * }
+ * </code>
+ *
+ * As the example demonstrates, the address of a stack variable is escaping the
+ * scope in which it is allocated. That is a classic C bug.
+ *
+ * Instead, the block literal must be copied to the heap with the Block_copy()
+ * function or by sending it a -[copy] message.
+ */
+typedef void (^os_block_t)(void);
+#endif
+
+#endif // __OS_BASE__
diff --git a/include/os/base_private.h b/include/os/base_private.h
new file mode 100644 (file)
index 0000000..2d38266
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __OS_BASE_PRIVATE__
+#define __OS_BASE_PRIVATE__
+
+#include <os/base.h>
+
+#ifndef os_fastpath
+#define os_fastpath(x) ((__typeof__(x))OS_EXPECT((long)(x), ~0l))
+#endif
+#ifndef os_slowpath
+#define os_slowpath(x) ((__typeof__(x))OS_EXPECT((long)(x), 0l))
+#endif
+#ifndef os_likely
+#define os_likely(x) OS_EXPECT(!!(x), 1)
+#endif
+#ifndef os_unlikely
+#define os_unlikely(x) OS_EXPECT(!!(x), 0)
+#endif
+
+#endif // __OS_BASE_PRIVATE__
diff --git a/include/os/internal/atomic.h b/include/os/internal/atomic.h
new file mode 100644 (file)
index 0000000..f2af82b
--- /dev/null
@@ -0,0 +1,440 @@
+/*
+ * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __OS_INTERNAL_ATOMIC__
+#define __OS_INTERNAL_ATOMIC__
+
+#ifndef __OS_EXPOSE_INTERNALS_INDIRECT__
+/*
+ * Use c11 <stdatomic.h> or c++11 std::atomic from <atomic> instead
+ *
+ * XXX                           /!\ WARNING /!\                           XXX
+ *
+ * This header file describes INTERNAL interfaces to libplatform used by other
+ * libsystem targets, which are subject to change in future releases of OS X
+ * and iOS. Any applications relying on these interfaces WILL break.
+ *
+ * If you are not a libsystem target, you should NOT EVER use these headers.
+ * Not even a little.
+ *
+ * XXX                           /!\ WARNING /!\                           XXX
+ */
+#error "Please #include <os/internal/internal_shared.h> instead of this file directly."
+#else
+
+// generate error during codegen
+#define _os_atomic_unimplemented() \
+               ({ __asm__(".err unimplemented"); })
+
+#pragma mark -
+#pragma mark memory_order
+
+typedef enum _os_atomic_memory_order
+{
+       _os_atomic_memory_order_relaxed,
+       _os_atomic_memory_order_consume,
+       _os_atomic_memory_order_acquire,
+       _os_atomic_memory_order_release,
+       _os_atomic_memory_order_acq_rel,
+       _os_atomic_memory_order_seq_cst,
+       _os_atomic_memory_order_ordered,
+} _os_atomic_memory_order;
+
+#if !OS_ATOMIC_UP
+
+#define os_atomic_memory_order_relaxed \
+               _os_atomic_memory_order_relaxed
+#define os_atomic_memory_order_acquire \
+               _os_atomic_memory_order_acquire
+#define os_atomic_memory_order_release \
+               _os_atomic_memory_order_release
+#define os_atomic_memory_order_acq_rel \
+               _os_atomic_memory_order_acq_rel
+#define os_atomic_memory_order_seq_cst \
+               _os_atomic_memory_order_seq_cst
+#define os_atomic_memory_order_ordered \
+               _os_atomic_memory_order_seq_cst
+
+#else // OS_ATOMIC_UP
+
+#define os_atomic_memory_order_relaxed \
+               _os_atomic_memory_order_relaxed
+#define os_atomic_memory_order_acquire \
+               _os_atomic_memory_order_relaxed
+#define os_atomic_memory_order_release \
+               _os_atomic_memory_order_relaxed
+#define os_atomic_memory_order_acq_rel \
+               _os_atomic_memory_order_relaxed
+#define os_atomic_memory_order_seq_cst \
+               _os_atomic_memory_order_relaxed
+#define os_atomic_memory_order_ordered \
+               _os_atomic_memory_order_relaxed
+
+#endif // OS_ATOMIC_UP
+
+#if __has_extension(c_generic_selections)
+#define _os_atomic_basetypeof(p) \
+               typeof(*_Generic((p), \
+               char*: (char*)(p), \
+               volatile char*: (char*)(p), \
+               signed char*: (signed char*)(p), \
+               volatile signed char*: (signed char*)(p), \
+               unsigned char*: (unsigned char*)(p), \
+               volatile unsigned char*: (unsigned char*)(p), \
+               short*: (short*)(p), \
+               volatile short*: (short*)(p), \
+               unsigned short*: (unsigned short*)(p), \
+               volatile unsigned short*: (unsigned short*)(p), \
+               int*: (int*)(p), \
+               volatile int*: (int*)(p), \
+               unsigned int*: (unsigned int*)(p), \
+               volatile unsigned int*: (unsigned int*)(p), \
+               long*: (long*)(p), \
+               volatile long*: (long*)(p), \
+               unsigned long*: (unsigned long*)(p), \
+               volatile unsigned long*: (unsigned long*)(p), \
+               long long*: (long long*)(p), \
+               volatile long long*: (long long*)(p), \
+               unsigned long long*: (unsigned long long*)(p), \
+               volatile unsigned long long*: (unsigned long long*)(p), \
+               const void**: (const void**)(p), \
+               const void*volatile*: (const void**)(p), \
+               default: (void**)(p)))
+#endif
+
+#if __has_extension(c_atomic) && __has_extension(c_generic_selections)
+#pragma mark -
+#pragma mark c11
+
+#define _os_atomic_c11_atomic(p) \
+               _Generic((p), \
+               char*: (_Atomic(char)*)(p), \
+               volatile char*: (volatile _Atomic(char)*)(p), \
+               signed char*: (_Atomic(signed char)*)(p), \
+               volatile signed char*: (volatile _Atomic(signed char)*)(p), \
+               unsigned char*: (_Atomic(unsigned char)*)(p), \
+               volatile unsigned char*: (volatile _Atomic(unsigned char)*)(p), \
+               short*: (_Atomic(short)*)(p), \
+               volatile short*: (volatile _Atomic(short)*)(p), \
+               unsigned short*: (_Atomic(unsigned short)*)(p), \
+               volatile unsigned short*: (volatile _Atomic(unsigned short)*)(p), \
+               int*: (_Atomic(int)*)(p), \
+               volatile int*: (volatile _Atomic(int)*)(p), \
+               unsigned int*: (_Atomic(unsigned int)*)(p), \
+               volatile unsigned int*: (volatile _Atomic(unsigned int)*)(p), \
+               long*: (_Atomic(long)*)(p), \
+               volatile long*: (volatile _Atomic(long)*)(p), \
+               unsigned long*: (_Atomic(unsigned long)*)(p), \
+               volatile unsigned long*: (volatile _Atomic(unsigned long)*)(p), \
+               long long*: (_Atomic(long long)*)(p), \
+               volatile long long*: (volatile _Atomic(long long)*)(p), \
+               unsigned long long*: (_Atomic(unsigned long long)*)(p), \
+               volatile unsigned long long*: \
+                               (volatile _Atomic(unsigned long long)*)(p), \
+               const void**: (_Atomic(const void*)*)(p), \
+               const void*volatile*: (volatile _Atomic(const void*)*)(p), \
+               default: (volatile _Atomic(void*)*)(p))
+
+#define _os_atomic_barrier(m) \
+               ({ __c11_atomic_thread_fence(os_atomic_memory_order_##m); })
+#define os_atomic_load(p, m) \
+               ({ _os_atomic_basetypeof(p) _r = \
+               __c11_atomic_load(_os_atomic_c11_atomic(p), \
+               os_atomic_memory_order_##m); (typeof(*(p)))_r; })
+#define os_atomic_store(p, v, m) \
+               ({ _os_atomic_basetypeof(p) _v = (v); \
+               __c11_atomic_store(_os_atomic_c11_atomic(p), _v, \
+               os_atomic_memory_order_##m); })
+#define os_atomic_xchg(p, v, m) \
+               ({ _os_atomic_basetypeof(p) _v = (v), _r = \
+               __c11_atomic_exchange(_os_atomic_c11_atomic(p), _v, \
+               os_atomic_memory_order_##m); (typeof(*(p)))_r; })
+#define os_atomic_cmpxchg(p, e, v, m) \
+               ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); \
+               __c11_atomic_compare_exchange_strong(_os_atomic_c11_atomic(p), \
+               &_r, _v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); })
+#define os_atomic_cmpxchgv(p, e, v, g, m) \
+               ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \
+               __c11_atomic_compare_exchange_strong(_os_atomic_c11_atomic(p), \
+               &_r, _v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); \
+               *(g) = (typeof(*(p)))_r; _b; })
+#define os_atomic_cmpxchgvw(p, e, v, g, m) \
+               ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \
+               __c11_atomic_compare_exchange_weak(_os_atomic_c11_atomic(p), \
+               &_r, _v, os_atomic_memory_order_##m, os_atomic_memory_order_relaxed); \
+               *(g) = (typeof(*(p)))_r;  _b; })
+#define _os_atomic_c11_op(p, v, m, o, op) \
+               ({ _os_atomic_basetypeof(p) _v = (v), _r = \
+               __c11_atomic_fetch_##o(_os_atomic_c11_atomic(p), _v, \
+               os_atomic_memory_order_##m); (typeof(*(p)))(_r op _v); })
+#define _os_atomic_c11_op_orig(p, v, m, o, op) \
+               ({ _os_atomic_basetypeof(p) _v = (v), _r = \
+               __c11_atomic_fetch_##o(_os_atomic_c11_atomic(p), _v, \
+               os_atomic_memory_order_##m); (typeof(*(p)))_r; })
+
+#define os_atomic_add(p, v, m) \
+               _os_atomic_c11_op((p), (v), m, add, +)
+#define os_atomic_add_orig(p, v, m) \
+               _os_atomic_c11_op_orig((p), (v), m, add, +)
+#define os_atomic_sub(p, v, m) \
+               _os_atomic_c11_op((p), (v), m, sub, -)
+#define os_atomic_sub_orig(p, v, m) \
+               _os_atomic_c11_op_orig((p), (v), m, sub, -)
+#define os_atomic_and(p, v, m) \
+               _os_atomic_c11_op((p), (v), m, and, &)
+#define os_atomic_and_orig(p, v, m) \
+               _os_atomic_c11_op_orig((p), (v), m, and, &)
+#define os_atomic_or(p, v, m) \
+               _os_atomic_c11_op((p), (v), m, or, |)
+#define os_atomic_or_orig(p, v, m) \
+               _os_atomic_c11_op_orig((p), (v), m, or, |)
+#define os_atomic_xor(p, v, m) \
+               _os_atomic_c11_op((p), (v), m, xor, ^)
+#define os_atomic_xor_orig(p, v, m) \
+               _os_atomic_c11_op_orig((p), (v), m, xor, ^)
+
+#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)
+#pragma mark -
+#pragma mark gnu99
+
+#define _os_atomic_full_barrier()      \
+               __sync_synchronize()
+#define _os_atomic_barrier(m)  \
+               ({ switch(os_atomic_memory_order_##m) { \
+               case _os_atomic_memory_order_relaxed: \
+                       break; \
+               default: \
+                       _os_atomic_full_barrier(); break; \
+               } })
+#define os_atomic_load(p, m) \
+               ({ typeof(*(p)) _r = *(p); \
+               switch(os_atomic_memory_order_##m) { \
+               case _os_atomic_memory_order_relaxed: \
+               case _os_atomic_memory_order_acquire: \
+               case _os_atomic_memory_order_seq_cst: \
+                       _os_atomic_barrier(m); \
+                       break; \
+               default: \
+                       _os_atomic_unimplemented(); break; \
+               } _r; })
+#define os_atomic_store(p, v, m) \
+               ({ switch(os_atomic_memory_order_##m) { \
+               case _os_atomic_memory_order_relaxed: \
+               case _os_atomic_memory_order_release: \
+               case _os_atomic_memory_order_seq_cst: \
+                       _os_atomic_barrier(m); \
+                       *(p) = (v); break; \
+               default: \
+                       _os_atomic_unimplemented(); break; \
+               } switch(os_atomic_memory_order_##m) { \
+               case _os_atomic_memory_order_seq_cst: \
+                       _os_atomic_barrier(m); break; \
+               default: \
+                       break; \
+               } })
+#if __has_builtin(__sync_swap)
+#define os_atomic_xchg(p, v, m) \
+               ((typeof(*(p)))__sync_swap((p), (v)))
+#else
+#define os_atomic_xchg(p, v, m) \
+               ((typeof(*(p)))__sync_lock_test_and_set((p), (v)))
+#endif
+#define os_atomic_cmpxchg(p, e, v, m) \
+               __sync_bool_compare_and_swap((p), (e), (v))
+#define os_atomic_cmpxchgv(p, e, v, g, m) \
+               ({ typeof(*(g)) _e = (e), _r = \
+               __sync_val_compare_and_swap((p), _e, (v)); \
+               bool _b = (_e == _r); *(g) = _r; _b; })
+#define os_atomic_cmpxchgvw(p, e, v, g, m) \
+               os_atomic_cmpxchgv((p), (e), (v), (g), m)
+
+#define os_atomic_add(p, v, m) \
+               __sync_add_and_fetch((p), (v))
+#define os_atomic_add_orig(p, v, m) \
+               __sync_fetch_and_add((p), (v))
+#define os_atomic_sub(p, v, m) \
+               __sync_sub_and_fetch((p), (v))
+#define os_atomic_sub_orig(p, v, m) \
+               __sync_fetch_and_sub((p), (v))
+#define os_atomic_and(p, v, m) \
+               __sync_and_and_fetch((p), (v))
+#define os_atomic_and_orig(p, v, m) \
+               __sync_fetch_and_and((p), (v))
+#define os_atomic_or(p, v, m) \
+               __sync_or_and_fetch((p), (v))
+#define os_atomic_or_orig(p, v, m) \
+               __sync_fetch_and_or((p), (v))
+#define os_atomic_xor(p, v, m) \
+               __sync_xor_and_fetch((p), (v))
+#define os_atomic_xor_orig(p, v, m) \
+               __sync_fetch_and_xor((p), (v))
+
+#if defined(__x86_64__) || defined(__i386__)
+// GCC emits nothing for __sync_synchronize() on x86_64 & i386
+#undef _os_atomic_full_barrier
+#define _os_atomic_full_barrier() \
+               ({ __asm__ __volatile__( \
+               "mfence" \
+               : : : "memory"); })
+#undef os_atomic_load
+#define os_atomic_load(p, m) \
+               ({ switch(os_atomic_memory_order_##m) { \
+               case _os_atomic_memory_order_relaxed: \
+               case _os_atomic_memory_order_acquire: \
+               case _os_atomic_memory_order_seq_cst: \
+                       break; \
+               default: \
+                       _os_atomic_unimplemented(); break; \
+               } *(p); })
+// xchg is faster than store + mfence
+#undef os_atomic_store
+#define os_atomic_store(p, v, m) \
+               ({ switch(os_atomic_memory_order_##m) { \
+               case _os_atomic_memory_order_relaxed: \
+               case _os_atomic_memory_order_release: \
+                       *(p) = (v); break; \
+               case _os_atomic_memory_order_seq_cst: \
+                       (void)os_atomic_xchg((p), (v), m); break; \
+               default:\
+                       _os_atomic_unimplemented(); break; \
+               } })
+#endif
+
+#else
+#error "Please upgrade to GCC 4.2 or newer."
+#endif
+
+#pragma mark -
+#pragma mark generic
+
+#define os_atomic_thread_fence(m) _os_atomic_barrier(m)
+// see comment in os_once.c
+#define os_atomic_maximally_synchronizing_barrier() \
+               _os_atomic_barrier(seq_cst)
+
+#define os_atomic_load2o(p, f, m) \
+               os_atomic_load(&(p)->f, m)
+#define os_atomic_store2o(p, f, v, m) \
+               os_atomic_store(&(p)->f, (v), m)
+#define os_atomic_xchg2o(p, f, v, m) \
+               os_atomic_xchg(&(p)->f, (v), m)
+#define os_atomic_cmpxchg2o(p, f, e, v, m) \
+               os_atomic_cmpxchg(&(p)->f, (e), (v), m)
+#define os_atomic_cmpxchgv2o(p, f, e, v, g, m) \
+               os_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m)
+#define os_atomic_cmpxchgvw2o(p, f, e, v, g, m) \
+               os_atomic_cmpxchgvw(&(p)->f, (e), (v), (g), m)
+#define os_atomic_add2o(p, f, v, m) \
+               os_atomic_add(&(p)->f, (v), m)
+#define os_atomic_add_orig2o(p, f, v, m) \
+               os_atomic_add_orig(&(p)->f, (v), m)
+#define os_atomic_sub2o(p, f, v, m) \
+               os_atomic_sub(&(p)->f, (v), m)
+#define os_atomic_sub_orig2o(p, f, v, m) \
+               os_atomic_sub_orig(&(p)->f, (v), m)
+#define os_atomic_and2o(p, f, v, m) \
+               os_atomic_and(&(p)->f, (v), m)
+#define os_atomic_and_orig2o(p, f, v, m) \
+               os_atomic_and_orig(&(p)->f, (v), m)
+#define os_atomic_or2o(p, f, v, m) \
+               os_atomic_or(&(p)->f, (v), m)
+#define os_atomic_or_orig2o(p, f, v, m) \
+               os_atomic_or_orig(&(p)->f, (v), m)
+#define os_atomic_xor2o(p, f, v, m) \
+               os_atomic_xor(&(p)->f, (v), m)
+#define os_atomic_xor_orig2o(p, f, v, m) \
+               os_atomic_xor_orig(&(p)->f, (v), m)
+
+#define os_atomic_inc(p, m) \
+               os_atomic_add((p), 1, m)
+#define os_atomic_inc_orig(p, m) \
+               os_atomic_add_orig((p), 1, m)
+#define os_atomic_inc2o(p, f, m) \
+               os_atomic_add2o(p, f, 1, m)
+#define os_atomic_inc_orig2o(p, f, m) \
+               os_atomic_add_orig2o(p, f, 1, m)
+#define os_atomic_dec(p, m) \
+               os_atomic_sub((p), 1, m)
+#define os_atomic_dec_orig(p, m) \
+               os_atomic_sub_orig((p), 1, m)
+#define os_atomic_dec2o(p, f, m) \
+               os_atomic_sub2o(p, f, 1, m)
+#define os_atomic_dec_orig2o(p, f, m) \
+               os_atomic_sub_orig2o(p, f, 1, m)
+
+#define os_atomic_rmw_loop(p, ov, nv, m, ...)  ({ \
+               bool _result = false; \
+               typeof(p) _p = (p); \
+               ov = os_atomic_load(_p, relaxed); \
+               do { \
+                       __VA_ARGS__; \
+                       _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \
+               } while (os_unlikely(!_result)); \
+               _result; \
+       })
+#define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \
+               os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__)
+#define os_atomic_rmw_loop_give_up_with_fence(m, expr) \
+               ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); })
+#define os_atomic_rmw_loop_give_up(expr) \
+               os_atomic_rmw_loop_give_up_with_fence(relaxed, expr)
+
+#define os_atomic_tsx_xacq_cmpxchgv(p, e, v, g) \
+               os_atomic_cmpxchgv((p), (e), (v), (g), acquire)
+#define os_atomic_tsx_xrel_store(p, v) \
+               os_atomic_store(p, v, release)
+#define os_atomic_tsx_xacq_cmpxchgv2o(p, f, e, v, g) \
+               os_atomic_tsx_xacq_cmpxchgv(&(p)->f, (e), (v), (g))
+#define os_atomic_tsx_xrel_store2o(p, f, v) \
+               os_atomic_tsx_xrel_store(&(p)->f, (v))
+
+#if defined(__x86_64__) || defined(__i386__)
+#pragma mark -
+#pragma mark x86
+
+#undef os_atomic_maximally_synchronizing_barrier
+#ifdef __LP64__
+#define os_atomic_maximally_synchronizing_barrier() \
+               ({ unsigned long _clbr; __asm__ __volatile__( \
+               "cpuid" \
+               : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory"); })
+#else
+#ifdef __llvm__
+#define os_atomic_maximally_synchronizing_barrier() \
+               ({ unsigned long _clbr; __asm__ __volatile__( \
+               "cpuid" \
+               : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory"); })
+#else // gcc does not allow inline i386 asm to clobber ebx
+#define os_atomic_maximally_synchronizing_barrier() \
+               ({ unsigned long _clbr; __asm__ __volatile__( \
+               "pushl  %%ebx\n\t" \
+               "cpuid\n\t" \
+               "popl   %%ebx" \
+               : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory"); })
+#endif
+#endif
+
+
+#endif
+
+
+#endif // __OS_EXPOSE_INTERNALS_INDIRECT__
+
+#endif // __OS_ATOMIC__
diff --git a/include/os/internal/crashlog.h b/include/os/internal/crashlog.h
new file mode 100644 (file)
index 0000000..41417ab
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __OS_INTERNAL_CRASHLOG__
+#define __OS_INTERNAL_CRASHLOG__
+
+#ifndef __OS_EXPOSE_INTERNALS_INDIRECT__
+/*
+ * XXX                           /!\ WARNING /!\                           XXX
+ *
+ * This header file describes INTERNAL interfaces to libplatform used by other
+ * libsystem targets, which are subject to change in future releases of OS X
+ * and iOS. Any applications relying on these interfaces WILL break.
+ *
+ * If you are not a libsystem target, you should NOT EVER use these headers.
+ * Not even a little.
+ *
+ * XXX                           /!\ WARNING /!\                           XXX
+ */
+#error "Please #include <os/internal/internal_shared.h> instead of this file directly."
+#else
+
+
+#define _os_set_crash_log_cause_and_message(ac, msg) ((void)(ac), (void)(msg))
+#define _os_set_crash_log_message(msg) ((void)(msg))
+#define _os_set_crash_log_message_dynamic(msg) ((void)(msg))
+
+
+#endif // __OS_EXPOSE_INTERNALS_INDIRECT__
+
+#endif // __OS_INTERNAL_CRASHLOG__
diff --git a/include/os/internal/internal_shared.h b/include/os/internal/internal_shared.h
new file mode 100644 (file)
index 0000000..732c420
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __OS_INTERNAL_SHARED__
+#define __OS_INTERNAL_SHARED__
+
+#ifndef __OS_EXPOSE_INTERNALS__
+/*
+ * XXX                           /!\ WARNING /!\                           XXX
+ *
+ * This header file describes INTERNAL interfaces to libplatform used by other
+ * libsystem targets, which are subject to change in future releases of Mac
+ * OS X and iOS. Any applications relying on these interfaces WILL break.
+ *
+ * If you are not a libsystem target, you should NOT EVER use these headers.
+ * Not even a little.
+ *
+ * XXX                           /!\ WARNING /!\                           XXX
+ */
+#error "these internals are not for general use outside of libsystem"
+#else
+
+#ifndef __OS_EXPOSE_INTERNALS_INDIRECT__
+#define __OS_EXPOSE_INTERNALS_INDIRECT__
+#endif
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stddef.h>
+#if defined(__arm__) || defined(__arm64__)
+#include <arm/arch.h>
+#endif
+
+#include <os/base.h>
+#include <os/base_private.h>
+#include <os/internal/atomic.h>
+#include <os/internal/crashlog.h>
+
+#endif // __OS_EXPOSE_INTERNALS__
+
+#endif // __OS_INTERNAL_SHARED__
diff --git a/include/os/lock.h b/include/os/lock.h
new file mode 100644 (file)
index 0000000..4af8449
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __OS_LOCK__
+#define __OS_LOCK__
+
+#include <Availability.h>
+#include <sys/cdefs.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <os/base.h>
+
+OS_ASSUME_NONNULL_BEGIN
+
+/*! @header
+ * Low-level lock API.
+ */
+
+#define OS_LOCK_API_VERSION 20160309
+
+__BEGIN_DECLS
+
+#define OS_UNFAIR_LOCK_AVAILABILITY \
+               __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) \
+               __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+
+/*!
+ * @typedef os_unfair_lock
+ *
+ * @abstract
+ * Low-level lock that allows waiters to block efficiently on contention.
+ *
+ * In general, higher level synchronization primitives such as those provided by
+ * the pthread or dispatch subsystems should be preferred.
+ *
+ * The values stored in the lock should be considered opaque and implementation
+ * defined, they contain thread ownership information that the system may use
+ * to attempt to resolve priority inversions.
+ *
+ * This lock must be unlocked from the same thread that locked it, attemps to
+ * unlock from a different thread will cause an assertion aborting the process.
+ *
+ * This lock must not be accessed from multiple processes or threads via shared
+ * or multiply-mapped memory, the lock implementation relies on the address of
+ * the lock value and owning process.
+ *
+ * Must be initialized with OS_UNFAIR_LOCK_INIT
+ *
+ * @discussion
+ * Replacement for the deprecated OSSpinLock. Does not spin on contention but
+ * waits in the kernel to be woken up by an unlock.
+ *
+ * As with OSSpinLock there is no attempt at fairness or lock ordering, e.g. an
+ * unlocker can potentially immediately reacquire the lock before a woken up
+ * waiter gets an opportunity to attempt to acquire the lock. This may be
+ * advantageous for performance reasons, but also makes starvation of waiters a
+ * possibility.
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+typedef struct os_unfair_lock_s {
+       uint32_t _os_unfair_lock_opaque;
+} os_unfair_lock, *os_unfair_lock_t;
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+#define OS_UNFAIR_LOCK_INIT ((os_unfair_lock){0})
+#elif defined(__cplusplus) && __cplusplus >= 201103L
+#define OS_UNFAIR_LOCK_INIT (os_unfair_lock{})
+#elif defined(__cplusplus)
+#define OS_UNFAIR_LOCK_INIT (os_unfair_lock())
+#else
+#define OS_UNFAIR_LOCK_INIT {0}
+#endif
+
+/*!
+ * @function os_unfair_lock_lock
+ *
+ * @abstract
+ * Locks an os_unfair_lock.
+ *
+ * @param lock
+ * Pointer to an os_unfair_lock.
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_unfair_lock_lock(os_unfair_lock_t lock);
+
+/*!
+ * @function os_unfair_lock_trylock
+ *
+ * @abstract
+ * Locks an os_unfair_lock if it is not already locked.
+ *
+ * @discussion
+ * It is invalid to surround this function with a retry loop, if this function
+ * returns false, the program must be able to proceed without having acquired
+ * the lock, or it must call os_unfair_lock_lock() directly (a retry loop around
+ * os_unfair_lock_trylock() amounts to an inefficient implementation of
+ * os_unfair_lock_lock() that hides the lock waiter from the system and prevents
+ * resolution of priority inversions).
+ *
+ * @param lock
+ * Pointer to an os_unfair_lock.
+ *
+ * @result
+ * Returns true if the lock was succesfully locked and false if the lock was
+ * already locked.
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_WARN_RESULT OS_NONNULL_ALL
+bool os_unfair_lock_trylock(os_unfair_lock_t lock);
+
+/*!
+ * @function os_unfair_lock_unlock
+ *
+ * @abstract
+ * Unlocks an os_unfair_lock.
+ *
+ * @param lock
+ * Pointer to an os_unfair_lock.
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_unfair_lock_unlock(os_unfair_lock_t lock);
+
+__END_DECLS
+
+OS_ASSUME_NONNULL_END
+
+#endif // __OS_LOCK__
diff --git a/include/os/lock_private.h b/include/os/lock_private.h
new file mode 100644 (file)
index 0000000..f1daee2
--- /dev/null
@@ -0,0 +1,650 @@
+/*
+ * Copyright (c) 2013-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __OS_LOCK_PRIVATE__
+#define __OS_LOCK_PRIVATE__
+
+#include <Availability.h>
+#include <TargetConditionals.h>
+#include <sys/cdefs.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <os/base_private.h>
+#include <os/lock.h>
+
+OS_ASSUME_NONNULL_BEGIN
+
+/*! @header
+ * Low-level lock SPI
+ */
+
+#define OS_LOCK_SPI_VERSION 20160406
+
+/*!
+ * @typedef os_lock_t
+ *
+ * @abstract
+ * Pointer to one of the os_lock variants.
+ */
+
+#define OS_LOCK_TYPE_STRUCT(type) const struct _os_lock_type_##type##_s
+#define OS_LOCK_TYPE_REF(type) _os_lock_type_##type
+#define OS_LOCK_TYPE_DECL(type) OS_LOCK_TYPE_STRUCT(type) OS_LOCK_TYPE_REF(type)
+
+#define OS_LOCK(type) os_lock_##type##_s
+#define OS_LOCK_STRUCT(type) struct OS_LOCK(type)
+
+#if defined(__cplusplus) && __cplusplus >= 201103L
+
+#define OS_LOCK_DECL(type, size) \
+               typedef OS_LOCK_STRUCT(type) : public OS_LOCK(base) { \
+                       private: \
+                       OS_LOCK_TYPE_STRUCT(type) * const osl_type OS_UNUSED; \
+                       uintptr_t _osl_##type##_opaque[size-1] OS_UNUSED; \
+                       public: \
+            constexpr OS_LOCK(type)() : \
+                               osl_type(&OS_LOCK_TYPE_REF(type)), _osl_##type##_opaque() {} \
+               } OS_LOCK(type)
+#define OS_LOCK_INIT(type) {}
+
+typedef OS_LOCK_STRUCT(base) {
+       protected:
+       constexpr OS_LOCK(base)() {}
+} *os_lock_t;
+
+#else
+
+#define OS_LOCK_DECL(type, size) \
+               typedef OS_LOCK_STRUCT(type) { \
+                       OS_LOCK_TYPE_STRUCT(type) * const osl_type; \
+                       uintptr_t _osl_##type##_opaque[size-1]; \
+               } OS_LOCK(type)
+
+#define OS_LOCK_INIT(type) { .osl_type = &OS_LOCK_TYPE_REF(type), }
+
+#ifndef OS_LOCK_T_MEMBER
+#define OS_LOCK_T_MEMBER(type) OS_LOCK_STRUCT(type) *_osl_##type
+#endif
+
+typedef OS_TRANSPARENT_UNION union {
+       OS_LOCK_T_MEMBER(base);
+       OS_LOCK_T_MEMBER(unfair);
+       OS_LOCK_T_MEMBER(nospin);
+       OS_LOCK_T_MEMBER(spin);
+       OS_LOCK_T_MEMBER(handoff);
+       OS_LOCK_T_MEMBER(eliding);
+       OS_LOCK_T_MEMBER(transactional);
+} os_lock_t;
+
+#endif
+
+/*!
+ * @typedef os_lock_unfair_s
+ *
+ * @abstract
+ * os_lock variant equivalent to os_unfair_lock. Does not spin on contention but
+ * waits in the kernel to be woken up by an unlock. The lock value contains
+ * ownership information that the system may use to attempt to resolve priority
+ * inversions.
+ *
+ * @discussion
+ * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
+ * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
+ * can potentially immediately reacquire the lock before a woken up waiter gets
+ * an opportunity to attempt to acquire the lock, so starvation is possibile.
+ *
+ * Must be initialized with OS_LOCK_UNFAIR_INIT
+ */
+__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
+__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+OS_EXPORT OS_LOCK_TYPE_DECL(unfair);
+OS_LOCK_DECL(unfair, 2);
+#define OS_LOCK_UNFAIR_INIT OS_LOCK_INIT(unfair)
+
+/*!
+ * @typedef os_lock_nospin_s
+ *
+ * @abstract
+ * os_lock variant that does not spin on contention but waits in the kernel to
+ * be woken up by an unlock. No attempt to resolve priority inversions is made
+ * so os_unfair_lock or os_lock_unfair_s should generally be preferred.
+ *
+ * @discussion
+ * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
+ * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
+ * can potentially immediately reacquire the lock before a woken up waiter gets
+ * an opportunity to attempt to acquire the lock, so starvation is possibile.
+ *
+ * Must be initialized with OS_LOCK_NOSPIN_INIT
+ */
+__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
+__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+OS_EXPORT OS_LOCK_TYPE_DECL(nospin);
+OS_LOCK_DECL(nospin, 2);
+#define OS_LOCK_NOSPIN_INIT OS_LOCK_INIT(nospin)
+
+/*!
+ * @typedef os_lock_spin_s
+ *
+ * @abstract
+ * Deprecated os_lock variant that on contention starts by spinning trying to
+ * acquire the lock, then depressing the priority of the current thread and
+ * finally blocking the thread waiting for the lock to become available.
+ * Equivalent to OSSpinLock and equally not recommended, see discussion in
+ * libkern/OSAtomic.h headerdoc.
+ *
+ * @discussion
+ * Spinlocks are intended to be held only for very brief periods of time. The
+ * critical section must not make syscalls and should avoid touching areas of
+ * memory that may trigger a page fault, in particular if the critical section
+ * may be executing on threads of widely differing priorities or on a mix of
+ * IO-throttled and unthrottled threads.
+ *
+ * Must be initialized with OS_LOCK_SPIN_INIT
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+OS_EXPORT OS_LOCK_TYPE_DECL(spin);
+OS_LOCK_DECL(spin, 2);
+#define OS_LOCK_SPIN_INIT OS_LOCK_INIT(spin)
+
+/*!
+ * @typedef os_lock_handoff_s
+ *
+ * @abstract
+ * os_lock variant that on contention hands off the current kernel thread to the
+ * lock-owning userspace thread (if it is not running), temporarily overriding
+ * its priority and IO throttle if necessary.
+ *
+ * @discussion
+ * Intended for use in limited circumstances where the critical section might
+ * be executing on threads of widely differing priorities or on a mix of
+ * IO-throttled and unthrottled threads where the ordinary os_lock_spin_s would
+ * be likely to encounter a priority inversion.
+ *
+ * IMPORTANT: This lock variant is NOT intended as a general replacement for all
+ * uses of os_lock_spin_s or OSSpinLock.
+ *
+ * Must be initialized with OS_LOCK_HANDOFF_INIT
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+OS_EXPORT OS_LOCK_TYPE_DECL(handoff);
+OS_LOCK_DECL(handoff, 2);
+#define OS_LOCK_HANDOFF_INIT OS_LOCK_INIT(handoff)
+
+
+#if !TARGET_OS_IPHONE
+/*!
+ * @typedef os_lock_eliding_s
+ *
+ * @abstract
+ * os_lock variant that uses hardware lock elision support if available to allow
+ * multiple processors to concurrently execute a critical section as long as
+ * they don't perform conflicting operations on each other's data. In case of
+ * conflict, the lock reverts to exclusive operation and os_lock_spin_s behavior
+ * on contention (at potential extra cost for the aborted attempt at lock-elided
+ * concurrent execution). If hardware HLE support is not present, this lock
+ * variant behaves like os_lock_spin_s.
+ *
+ * @discussion
+ * IMPORTANT: Use of this lock variant MUST be extensively tested on hardware
+ * with HLE support to ensure the data access pattern and length of the critical
+ * section allows lock-elided execution to succeed frequently enough to offset
+ * the cost of any aborted concurrent execution.
+ *
+ * Must be initialized with OS_LOCK_ELIDING_INIT
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_NA)
+OS_EXPORT OS_LOCK_TYPE_DECL(eliding);
+OS_LOCK_DECL(eliding, 8) OS_ALIGNED(64);
+#define OS_LOCK_ELIDING_INIT OS_LOCK_INIT(eliding)
+
+/*!
+ * @typedef os_lock_transactional_s
+ *
+ * @abstract
+ * os_lock variant that uses hardware restricted transactional memory support if
+ * available to allow multiple processors to concurrently execute the critical
+ * section as a transactional region. If transactional execution aborts, the
+ * lock reverts to exclusive operation and os_lock_spin_s behavior on contention
+ * (at potential extra cost for the aborted attempt at transactional concurrent
+ * execution). If hardware RTM support is not present, this lock variant behaves
+ * like os_lock_eliding_s.
+ *
+ * @discussion
+ * IMPORTANT: Use of this lock variant MUST be extensively tested on hardware
+ * with RTM support to ensure the data access pattern and length of the critical
+ * section allows transactional execution to succeed frequently enough to offset
+ * the cost of any aborted transactions.
+ *
+ * Must be initialized with OS_LOCK_TRANSACTIONAL_INIT
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_NA)
+OS_EXPORT OS_LOCK_TYPE_DECL(transactional);
+OS_LOCK_DECL(transactional, 8) OS_ALIGNED(64);
+#define OS_LOCK_TRANSACTIONAL_INIT OS_LOCK_INIT(transactional)
+#endif
+
+__BEGIN_DECLS
+
+/*!
+ * @function os_lock_lock
+ *
+ * @abstract
+ * Locks an os_lock variant.
+ *
+ * @param lock
+ * Pointer to one of the os_lock variants.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_lock_lock(os_lock_t lock);
+
+/*!
+ * @function os_lock_trylock
+ *
+ * @abstract
+ * Locks an os_lock variant if it is not already locked.
+ *
+ * @param lock
+ * Pointer to one of the os_lock variants.
+ *
+ * @result
+ * Returns true if the lock was succesfully locked and false if the lock was
+ * already locked.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+bool os_lock_trylock(os_lock_t lock);
+
+/*!
+ * @function os_lock_unlock
+ *
+ * @abstract
+ * Unlocks an os_lock variant.
+ *
+ * @param lock
+ * Pointer to one of the os_lock variants.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_lock_unlock(os_lock_t lock);
+
+/*! @group os_unfair_lock SPI
+ *
+ * @abstract
+ * Replacement for the deprecated OSSpinLock. Does not spin on contention but
+ * waits in the kernel to be woken up by an unlock. The opaque lock value
+ * contains thread ownership information that the system may use to attempt to
+ * resolve priority inversions.
+ *
+ * This lock must be unlocked from the same thread that locked it, attemps to
+ * unlock from a different thread will cause an assertion aborting the process.
+ *
+ * This lock must not be accessed from multiple processes or threads via shared
+ * or multiply-mapped memory, the lock implementation relies on the address of
+ * the lock value and owning process.
+ *
+ * @discussion
+ * As with OSSpinLock there is no attempt at fairness or lock ordering, e.g. an
+ * unlocker can potentially immediately reacquire the lock before a woken up
+ * waiter gets an opportunity to attempt to acquire the lock. This may be
+ * advantageous for performance reasons, but also makes starvation of waiters a
+ * possibility.
+ *
+ * Must be initialized with OS_UNFAIR_LOCK_INIT
+ */
+
+/*!
+ * @typedef os_unfair_lock_options_t
+ *
+ * @const OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
+ * This flag informs the runtime that the specified lock is used for data
+ * synchronization and that the lock owner is always able to make progress
+ * toward releasing the lock without the help of another thread in the same
+ * process. This hint will cause the workqueue subsystem to not create new
+ * threads to offset for threads waiting for the lock.
+ *
+ * When this flag is used, the code running under the critical section should
+ * be well known and under your control  (Generally it should not call into
+ * framework code).
+ */
+OS_ENUM(os_unfair_lock_options, uint32_t,
+       OS_UNFAIR_LOCK_NONE
+               OS_UNFAIR_LOCK_AVAILABILITY = 0x00000000,
+       OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
+               OS_UNFAIR_LOCK_AVAILABILITY = 0x00010000,
+);
+
+/*!
+ * @function os_unfair_lock_lock_with_options
+ *
+ * @abstract
+ * Locks an os_unfair_lock.
+ *
+ * @param lock
+ * Pointer to an os_unfair_lock.
+ *
+ * @param options
+ * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
+               os_unfair_lock_options_t options);
+
+/*!
+ * @function os_unfair_lock_assert_owner
+ *
+ * @abstract
+ * Asserts that the calling thread is the current owner of the specified
+ * unfair lock.
+ *
+ * @discussion
+ * If the lock is currently owned by the calling thread, this function returns.
+ *
+ * If the lock is unlocked or owned by a different thread, this function
+ * asserts and terminates the process.
+ *
+ * @param lock
+ * Pointer to an os_unfair_lock.
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_unfair_lock_assert_owner(os_unfair_lock_t lock);
+
+/*!
+ * @function os_unfair_lock_assert_not_owner
+ *
+ * @abstract
+ * Asserts that the calling thread is not the current owner of the specified
+ * unfair lock.
+ *
+ * @discussion
+ * If the lock is unlocked or owned by a different thread, this function
+ * returns.
+ *
+ * If the lock is currently owned by the current thread, this function asserts
+ * and terminates the process.
+ *
+ * @param lock
+ * Pointer to an os_unfair_lock.
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_unfair_lock_assert_not_owner(os_unfair_lock_t lock);
+
+/*! @group os_unfair_lock variant for consumption by Libc
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_unfair_lock_lock_with_options_4Libc(os_unfair_lock_t lock,
+               os_unfair_lock_options_t options);
+
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock);
+
+__END_DECLS
+
+OS_ASSUME_NONNULL_END
+
+/*! @group Inline os_unfair_lock interfaces
+ *
+ * Inline versions of the os_unfair_lock fastpath.
+ *
+ * Intended exclusively for special highly performance-sensitive cases where the
+ * function calls to the os_unfair_lock API entrypoints add measurable overhead.
+ *
+ * Do not use in frameworks to implement synchronization API primitives that are
+ * exposed to developers, that would lead to false primitives for that API from
+ * tools such as ThreadSanitizer.
+ *
+ * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
+ * DO NOT USE IN CODE THAT IS NOT PART OF THE OPERATING SYSTEM OR THAT IS NOT
+ *          REBUILT AS PART OF AN OS WORLDBUILD. YOU HAVE BEEN WARNED!
+ * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
+ *
+ * Define OS_UNFAIR_LOCK_INLINE=1 to indicate that you have read the warning
+ * above and still wish to use these interfaces.
+ */
+
+#if defined(OS_UNFAIR_LOCK_INLINE) && OS_UNFAIR_LOCK_INLINE
+
+#include <pthread/tsd_private.h>
+
+#ifdef __cplusplus
+extern "C++" {
+#if !(__has_include(<atomic>) && __has_feature(cxx_atomic))
+#error Cannot use inline os_unfair_lock without <atomic> and C++11 atomics
+#endif
+#include <atomic>
+typedef std::atomic<os_unfair_lock> _os_atomic_unfair_lock;
+#define OSLOCK_STD(_a) std::_a
+__BEGIN_DECLS
+#else
+#if !(__has_include(<stdatomic.h>) && __has_extension(c_atomic))
+#error Cannot use inline os_unfair_lock without <stdatomic.h> and C11 atomics
+#endif
+#include <stdatomic.h>
+typedef _Atomic(os_unfair_lock) _os_atomic_unfair_lock;
+#define OSLOCK_STD(_a) _a
+#endif
+
+OS_ASSUME_NONNULL_BEGIN
+
+/*!
+ * @function os_unfair_lock_lock_inline
+ *
+ * @abstract
+ * Locks an os_unfair_lock.
+ *
+ * @param lock
+ * Pointer to an os_unfair_lock.
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
+void
+os_unfair_lock_lock_inline(os_unfair_lock_t lock)
+{
+       if (!_pthread_has_direct_tsd()) return os_unfair_lock_lock(lock);
+       uintptr_t mts = (uintptr_t)_pthread_getspecific_direct(
+                       _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
+       os_unfair_lock unlocked = OS_UNFAIR_LOCK_INIT, locked = { mts };
+       if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
+                       (_os_atomic_unfair_lock*)lock, &unlocked, locked,
+                       OSLOCK_STD(memory_order_acquire),
+                       OSLOCK_STD(memory_order_relaxed))) {
+               return os_unfair_lock_lock(lock);
+       }
+}
+
+/*!
+ * @function os_unfair_lock_lock_with_options_inline
+ *
+ * @abstract
+ * Locks an os_unfair_lock.
+ *
+ * @param lock
+ * Pointer to an os_unfair_lock.
+ *
+ * @param options
+ * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
+void
+os_unfair_lock_lock_with_options_inline(os_unfair_lock_t lock,
+               os_unfair_lock_options_t options)
+{
+       if (!_pthread_has_direct_tsd()) {
+               return os_unfair_lock_lock_with_options(lock, options);
+       }
+       uintptr_t mts = (uintptr_t)_pthread_getspecific_direct(
+                       _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
+       os_unfair_lock unlocked = OS_UNFAIR_LOCK_INIT, locked = { mts };
+       if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
+                       (_os_atomic_unfair_lock*)lock, &unlocked, locked,
+                       OSLOCK_STD(memory_order_acquire),
+                       OSLOCK_STD(memory_order_relaxed))) {
+               return os_unfair_lock_lock_with_options(lock, options);
+       }
+}
+
+/*!
+ * @function os_unfair_lock_trylock_inline
+ *
+ * @abstract
+ * Locks an os_unfair_lock if it is not already locked.
+ *
+ * @discussion
+ * It is invalid to surround this function with a retry loop, if this function
+ * returns false, the program must be able to proceed without having acquired
+ * the lock, or it must call os_unfair_lock_lock_inline() instead.
+ *
+ * @param lock
+ * Pointer to an os_unfair_lock.
+ *
+ * @result
+ * Returns true if the lock was succesfully locked and false if the lock was
+ * already locked.
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_INLINE OS_ALWAYS_INLINE OS_WARN_RESULT OS_NONNULL_ALL
+bool
+os_unfair_lock_trylock_inline(os_unfair_lock_t lock)
+{
+       if (!_pthread_has_direct_tsd()) return os_unfair_lock_trylock(lock);
+       uintptr_t mts = (uintptr_t)_pthread_getspecific_direct(
+                       _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
+       os_unfair_lock unlocked = OS_UNFAIR_LOCK_INIT, locked = { mts };
+       return OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
+                       (_os_atomic_unfair_lock*)lock, &unlocked, locked,
+                       OSLOCK_STD(memory_order_acquire), OSLOCK_STD(memory_order_relaxed));
+}
+
+/*!
+ * @function os_unfair_lock_unlock_inline
+ *
+ * @abstract
+ * Unlocks an os_unfair_lock.
+ *
+ * @param lock
+ * Pointer to an os_unfair_lock.
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
+void
+os_unfair_lock_unlock_inline(os_unfair_lock_t lock)
+{
+       if (!_pthread_has_direct_tsd()) return os_unfair_lock_unlock(lock);
+       uintptr_t mts = (uintptr_t)_pthread_getspecific_direct(
+                       _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
+       os_unfair_lock unlocked = OS_UNFAIR_LOCK_INIT, locked = { mts };
+       if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
+                       (_os_atomic_unfair_lock*)lock, &locked, unlocked,
+                       OSLOCK_STD(memory_order_release),
+                       OSLOCK_STD(memory_order_relaxed))) {
+               return os_unfair_lock_unlock(lock);
+       }
+}
+
+/*! @group os_unfair_lock no-TSD interfaces
+ *
+ * Like the above, but don't require being on a thread with valid TSD, so they
+ * can be called from injected mach-threads.  The normal routines use the TSD
+ * value for mach_thread_self(), these routines use MACH_PORT_DEAD for the
+ * locked value instead.  As a result, they will be unable to resolve priority
+ * inversions.
+ *
+ * This should only be used by libpthread.
+ *
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock);
+
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
+void os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock);
+
+/*!
+ * @function os_unfair_lock_lock_inline_no_tsd_4libpthread
+ *
+ * @abstract
+ * Locks an os_unfair_lock, without requiring valid TSD.
+ *
+ * This should only be used by libpthread.
+ *
+ * @param lock
+ * Pointer to an os_unfair_lock.
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
+void
+os_unfair_lock_lock_inline_no_tsd_4libpthread(os_unfair_lock_t lock)
+{
+       uintptr_t mts = (uintptr_t)MACH_PORT_DEAD;
+       os_unfair_lock unlocked = OS_UNFAIR_LOCK_INIT, locked = { mts };
+       if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
+                       (_os_atomic_unfair_lock*)lock, &unlocked, locked,
+                       OSLOCK_STD(memory_order_acquire),
+                       OSLOCK_STD(memory_order_relaxed))) {
+               return os_unfair_lock_lock_no_tsd_4libpthread(lock);
+       }
+}
+
+/*!
+ * @function os_unfair_lock_unlock_inline_no_tsd_4libpthread
+ *
+ * @abstract
+ * Unlocks an os_unfair_lock, without requiring valid TSD.
+ *
+ * This should only be used by libpthread.
+ *
+ * @param lock
+ * Pointer to an os_unfair_lock.
+ */
+OS_UNFAIR_LOCK_AVAILABILITY
+OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
+void
+os_unfair_lock_unlock_inline_no_tsd_4libpthread(os_unfair_lock_t lock)
+{
+       uintptr_t mts = (uintptr_t)MACH_PORT_DEAD;
+       os_unfair_lock unlocked = OS_UNFAIR_LOCK_INIT, locked = { mts };
+       if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
+                       (_os_atomic_unfair_lock*)lock, &locked, unlocked,
+                       OSLOCK_STD(memory_order_release),
+                       OSLOCK_STD(memory_order_relaxed))) {
+               return os_unfair_lock_unlock_no_tsd_4libpthread(lock);
+       }
+}
+
+OS_ASSUME_NONNULL_END
+
+#undef OSLOCK_STD
+#ifdef __cplusplus
+__END_DECLS
+} // extern "C++"
+#endif
+
+#endif // OS_UNFAIR_LOCK_INLINE
+
+#endif // __OS_LOCK_PRIVATE__
diff --git a/include/os/once_private.h b/include/os/once_private.h
new file mode 100644 (file)
index 0000000..c93cc14
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __OS_ONCE_PRIVATE__
+#define __OS_ONCE_PRIVATE__
+
+#include <Availability.h>
+#include <os/base_private.h>
+
+OS_ASSUME_NONNULL_BEGIN
+
+__BEGIN_DECLS
+
+#define OS_ONCE_SPI_VERSION 20130313
+
+OS_SWIFT_UNAVAILABLE("Swift has lazy init")
+typedef long os_once_t;
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+OS_EXPORT OS_NONNULL1 OS_NONNULL3 OS_NOTHROW
+OS_SWIFT_UNAVAILABLE("Swift has lazy init")
+void
+_os_once(os_once_t *predicate, void *_Nullable context, os_function_t function);
+
+OS_NONNULL1 OS_NONNULL3 OS_NOTHROW
+__header_always_inline void
+os_once(os_once_t *predicate, void *_Nullable context, os_function_t function)
+{
+       if (OS_EXPECT(*predicate, ~0l) != ~0l) {
+               _os_once(predicate, context, function);
+               OS_COMPILER_CAN_ASSUME(*predicate == ~0l);
+       } else {
+               os_compiler_barrier();
+       }
+}
+
+/* This SPI is *strictly* for the use of pthread_once only. This is not
+ * safe in general use of os_once.
+ */
+__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0)
+OS_EXPORT OS_NONNULL1 OS_NOTHROW
+OS_SWIFT_UNAVAILABLE("Swift has lazy init")
+void
+__os_once_reset(os_once_t *val);
+
+__END_DECLS
+
+OS_ASSUME_NONNULL_END
+
+#endif // __OS_ONCE_PRIVATE__
diff --git a/include/os/semaphore_private.h b/include/os/semaphore_private.h
new file mode 100644 (file)
index 0000000..d6bd728
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __OS_SEMAPHORE_PRIVATE__
+#define __OS_SEMAPHORE_PRIVATE__
+
+#include <Availability.h>
+#include <stdint.h>
+#include <os/base_private.h>
+#include <os/tsd.h>
+
+OS_ASSUME_NONNULL_BEGIN
+
+__BEGIN_DECLS
+
+#define OS_SEMAPHORE_SPI_VERSION 20130313
+
+typedef uintptr_t os_semaphore_t;
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+OS_EXPORT OS_WARN_RESULT OS_NOTHROW
+os_semaphore_t _os_semaphore_create(void);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+OS_EXPORT OS_NOTHROW
+void _os_semaphore_dispose(os_semaphore_t);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+OS_EXPORT OS_NOTHROW
+void _os_semaphore_wait(os_semaphore_t);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+OS_EXPORT OS_NOTHROW
+void _os_semaphore_signal(os_semaphore_t);
+
+OS_WARN_RESULT OS_NOTHROW
+__header_always_inline os_semaphore_t
+os_get_cached_semaphore(void)
+{
+       os_semaphore_t sema;
+       sema = (os_semaphore_t)_os_tsd_get_direct(__TSD_SEMAPHORE_CACHE);
+       if (os_unlikely(!sema)) {
+               return _os_semaphore_create();
+       }
+       _os_tsd_set_direct(__TSD_SEMAPHORE_CACHE, 0);
+       return sema;
+}
+
+OS_NOTHROW
+__header_always_inline void
+os_put_cached_semaphore(os_semaphore_t sema)
+{
+       os_semaphore_t old_sema;
+       old_sema = (os_semaphore_t)_os_tsd_get_direct(__TSD_SEMAPHORE_CACHE);
+       _os_tsd_set_direct(__TSD_SEMAPHORE_CACHE, (void*)sema);
+       if (os_unlikely(old_sema)) {
+               return _os_semaphore_dispose(old_sema);
+       }
+}
+
+OS_NOTHROW
+__header_always_inline void
+os_semaphore_wait(os_semaphore_t sema)
+{
+       return _os_semaphore_wait(sema);
+}
+
+OS_NOTHROW
+__header_always_inline void
+os_semaphore_signal(os_semaphore_t sema)
+{
+       return _os_semaphore_signal(sema);
+}
+
+__END_DECLS
+
+OS_ASSUME_NONNULL_END
+
+#endif // __OS_SEMAPHORE_PRIVATE__
diff --git a/include/platform/compat.h b/include/platform/compat.h
new file mode 100644 (file)
index 0000000..a9f59fd
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef _PLATFORM_COMPAT_H_
+#define _PLATFORM_COMPAT_H_
+
+#include <platform/string.h>
+
+__BEGIN_DECLS
+
+/* Helpers for other common non-primitive routines */
+
+__header_always_inline
+size_t
+_platform_strlen(const char *s) {
+       const char *t = _platform_memchr(s, '\0', SIZE_MAX);
+       return (uintptr_t)t - (uintptr_t)s;
+}
+
+__header_always_inline
+size_t
+_platform_strlcpy(char * restrict dst, const char * restrict src, size_t maxlen) {
+       const size_t srclen = _platform_strlen(src);
+       if (srclen < maxlen) {
+               _platform_memmove(dst, src, srclen+1);
+       } else if (maxlen != 0) {
+               _platform_memmove(dst, src, maxlen-1);
+               dst[maxlen-1] = '\0';
+       }
+       return srclen;
+}
+
+__END_DECLS
+
+/* Compat macros for primitives */
+#define bzero            _platform_bzero
+#define memchr           _platform_memchr
+#define memcmp           _platform_memcmp
+#define memmove          _platform_memmove
+#define memccpy          _platform_memccpy
+#define memset           _platform_memset
+#define memset_pattern4  _platform_memset_pattern4
+#define memset_pattern8  _platform_memset_pattern8
+#define memset_pattern16 _platform_memset_pattern16
+#define strchr           _platform_strchr
+#define strcmp           _platform_strcmp
+#define strncmp          _platform_strncmp
+
+/* Compat macros for non-primitive helpers */
+#define strlcpy          _platform_strlcpy
+#define strlen           _platform_strlen
+
+#endif /* _PLATFORM_COMPAT_H_ */
diff --git a/include/platform/introspection_private.h b/include/platform/introspection_private.h
new file mode 100644 (file)
index 0000000..f09ee58
--- /dev/null
@@ -0,0 +1,447 @@
+#ifndef __PLATFORM_INTROSPECTION_H__
+#define __PLATFORM_INTROSPECTION_H__
+
+#include <mach/mach_types.h>
+#include <mach/thread_info.h>
+#include <mach/thread_status.h>
+
+#include <sys/types.h>
+
+#include <stdbool.h>
+
+typedef struct platform_task_s *platform_task_t;
+typedef struct platform_thread_s *platform_thread_t;
+
+/*!
+ * @typedef platform_thread_id_t
+ *
+ * @discussion
+ * The type of the 64-bit system-wide unique thread ID.
+ */
+typedef uint64_t platform_thread_id_t;
+
+/*! @functiongroup Tasks */
+
+/*!
+ * @function platform_task_attach
+ *
+ * @discussion
+ * Attach to a process (specified by its mach task port) for debugging. This
+ * function creates a new task handle which must be disposed by a call to
+ * platform_task_detach().
+ *
+ * @param task
+ * On output, a newly created task handle.
+ *
+ * @param target
+ * The mach task port of the target process.
+ *
+ * @return
+ * KERN_SUCCESS if the process was successfully attached, otherwise a mach
+ * error code.
+ */
+kern_return_t
+platform_task_attach(platform_task_t *task, task_t target);
+
+/*!
+ * @function platform_task_detach
+ *
+ * @discussion
+ * Detaches from the target task and deallocates all memory associated with
+ * the task handle.
+ *
+ * @param task
+ * The task handle to detach.
+ *
+ * @return
+ * KERN_SUCCESS if the process was successfully detached, otherwise a mach
+ * error code.
+ */
+kern_return_t
+platform_task_detach(platform_task_t task);
+
+/*!
+ * @function platform_task_is_64_bit
+ *
+ * @discussion
+ * Returns true if the target task is LP64.
+ *
+ * @param task
+ * A handle to the target task.
+ *
+ * @return
+ * true if the target task is LP64, otherwise false.
+ */
+bool
+platform_task_is_64_bit(platform_task_t task);
+
+/*!
+ * @function platform_task_suspend_threads
+ *
+ * @discussion
+ * Suspends all the threads in the target task. This differs from task_suspend
+ * in that the task itself is not suspended, only the individual threads. While
+ * this suspension is in effect, any newly created threads will be created in
+ * a suspended state. The debuger may resume an individual thread for execution
+ * using platform_thread_resume() or evaluate an expression in the context of
+ * the task or a specific thread using platform_task_perform() and
+ * platform_thread_perform(), respectively. All threads in the task may be
+ * resumed with platform_task_resume_threads().
+ *
+ * @param task
+ * A handle to the target task.
+ *
+ * @return
+ * KERN_SUCCESS if the threads were successfully suspended, otherwise a mach
+ * error code.
+ */
+kern_return_t
+platform_task_suspend_threads(platform_task_t task);
+
+/*!
+ * @function platform_task_resume_threads
+ *
+ * @discussion
+ * Resumes the threads in the target task. See platform_task_suspend_threads().
+ *
+ * @param task
+ * A handle to the target task.
+ */
+kern_return_t
+platform_task_resume_threads(platform_task_t task);
+
+/*!
+ * @function platform_task_perform
+ *
+ * @discussion
+ * Performs the specified function on a newly created thread in the target task.
+ * This newly created thread will execute even if the threads in the task are
+ * suspended as the result of a call to platform_task_suspend_threads().
+ *
+ * The function and context addresses are in the virtual address space of the
+ * target task. It is the responsiblity of the debugger to have previously
+ * mapped executable text and data at these addresses in the target task.
+ *
+ * @param task
+ * A handle to the target task.
+ *
+ * @param func_addr
+ * The address (in the virtual address space of the target task) of the
+ * function to perform. The function should be of type (void (*)(void *))
+ * and will be passed the value of the data_addr parameter.
+ *
+ * @param data_addr
+ * The address (in the virtual address space of the target task) of the
+ * data to pass as a parameter to the function to perform.
+ *
+ * @return
+ * KERN_SUCCESS if the function was successfully performed, otherwise a mach
+ * error code.
+ */
+kern_return_t
+platform_task_perform(platform_task_t task,
+                     mach_vm_address_t func_addr,
+                     mach_vm_address_t data_addr);
+
+/*!
+ * @function platform_task_update_threads
+ *
+ * @discussion
+ * Updates an internal representation of all threads in the target task. The
+ * list of threads may then be iterated using platform_task_copy_next_thread().
+ *
+ * Calling this function resets any iteration currently in progress and a
+ * subsequent call to platform_task_copy_next_thread() will return the first
+ * thread in the list.
+ *
+ * @param task
+ * A handle to the target task.
+ *
+ * @return
+ * KERN_SUCCESS if the threads were successfully updated, otherwise a mach
+ * error code.
+ */
+kern_return_t
+platform_task_update_threads(platform_task_t task);
+
+/*!
+ * @function platform_task_copy_next_thread
+ *
+ * @discussion
+ * Iterates the list of threads in the task. Returns a copied thread handle
+ * which must subsequently be released using platform_thread_release().
+ *
+ * The platform_task_update_threads() function must be called before this
+ * function will return any thread handles. A NULL pointer is returned to
+ * signify the end of the list
+ *
+ * @param task
+ * A handle to the target task.
+ *
+ * @return
+ * A thread handle which must be released using platform_thread_release(),
+ * or NULL, signifying the end of the list.
+ */
+platform_thread_t
+platform_task_copy_next_thread(platform_task_t task);
+
+/*! @functiongroup Threads */
+
+/*!
+ * @function platform_thread_get_unique_id
+ *
+ * @discussion
+ * Returns the 64-bit system-wide unique ID of the target thread.
+ *
+ * @param thread
+ * A handle to the target thread.
+ *
+ * @return
+ * The unique ID of the thread.
+ */
+platform_thread_id_t
+platform_thread_get_unique_id(platform_thread_t thread);
+
+/*!
+ * @function platform_thread_release
+ *
+ * @discussion
+ * Releases a thread handle obtained by platform_task_copy_next_thread().
+ *
+ * @param thread
+ * The thread handle to release.
+ */
+void
+platform_thread_release(platform_thread_t thread);
+
+/*!
+ * @function platform_thread_abort_safely
+ *
+ * @discussion
+ * Similar to thread_abort_safely().
+ *
+ * @param thread
+ * A handle to the thread to signal.
+ *
+ * @return
+ * KERN_SUCCESS if the thread was successfully signaled, otherwise a mach
+ * error code.
+ */
+kern_return_t
+platform_thread_abort_safely(platform_thread_t thread);
+
+/*!
+ * @function platform_thread_suspend
+ *
+ * @discussion
+ * Suspends execution of a thread similar to thread_suspend(). See also
+ * platform_task_suspend_threads().
+ *
+ * @param thread
+ * A handle to the thread to suspend.
+ *
+ * @return
+ * KERN_SUCCESS if the thread was successfully suspended, otherwise a mach
+ * error code.
+ */
+kern_return_t
+platform_thread_suspend(platform_thread_t thread);
+
+/*!
+ * @function platform_thread_resume
+ *
+ * @discussion
+ * Suspends execution of a thread similar to thread_suspend(). See also
+ * platform_task_suspend_threads() and platform_task_resume_threads().
+ *
+ * @param thread
+ * A handle to the thread to resume.
+ *
+ * @return
+ * KERN_SUCCESS if the thread was successfully resumed, otherwise a mach
+ * error code.
+*/
+kern_return_t
+platform_thread_resume(platform_thread_t thread);
+
+/*!
+ * @function platform_thread_info
+ *
+ * @discussion
+ * Similar to thread_info. Supported flavor structures:
+ * - THREAD_BASIC_INFO: struct thread_basic_info
+ * - THREAD_IDENTIFIER_INFO: struct thread_identifier_info
+ *
+ * @param thread
+ * A handle to the target thread.
+ *
+ * @param flavor
+ * The desired thread info structure.
+ *
+ * @param info
+ * A pointer to storage where the thread info structure should be written.
+ *
+ * @param size
+ * On input, the size in bytes of the storage where the thread info structure
+ * is to be written. On output, the size of the thread info structure in bytes.
+ *
+ * @return
+ * KERN_SUCCESS if the function was successfully performed, otherwise a mach
+ * error code.
+ */
+kern_return_t
+platform_thread_info(platform_thread_t thread,
+                      thread_flavor_t flavor,
+                      void *info,
+                      size_t *size);
+
+/*!
+ * @function platform_thread_get_state
+ *
+ * @discussion
+ * Similar to thread_get_state. Supported flavor structures:
+ * - x86_THREAD_STATE32: struct ...
+ * - x86_FLOAT_STATE32: struct ...
+ * - x86_EXCEPTION_STATE32: struct ...
+ * - x86_DEBUG_STATE32: struct ...
+ * - x86_AVX_STATE32: struct ...
+ * - x86_THREAD_STATE64: struct ...
+ * - x86_FLOAT_STATE64: struct ...
+ * - x86_EXCEPTION_STATE64: struct ...
+ * - x86_DEBUG_STATE64: struct ...
+ * - x86_AVX_STATE64: struct ...
+ * - ARM_THREAD_STATE32: struct ...
+ * - ARM_FLOAT_STATE32: struct ...
+ * - ARM_EXCEPTION_STATE32: struct ...
+ * - ARM_DEBUG_STATE32: struct ...
+ * - ARM_THREAD_STATE64: struct ...
+ * - ARM_FLOAT_STATE64: struct ...
+ * - ARM_EXCEPTION_STATE64: struct ...
+ * - ARM_DEBUG_STATE64: struct ...
+ * - ...
+ *
+ * @param thread
+ * A handle to the target thread.
+ *
+ * @param flavor
+ * The desired thread state structure.
+ *
+ * @param state
+ * A pointer to storage where the thread state structure should be written.
+ *
+ * @param size
+ * On input, the size in bytes of the storage where the thread state structure
+ * is to be written. On output, the size of the thread state structure in bytes.
+ *
+ * @return
+ * KERN_SUCCESS if the function was successfully performed, otherwise a mach
+ * error code.
+ */
+kern_return_t
+platform_thread_get_state(platform_thread_t thread,
+                         thread_state_flavor_t flavor,
+                         void *state,
+                         size_t *size);
+
+/*!
+ * @function platform_thread_set_state
+ *
+ * @discussion
+ * Similar to thread_set_state. Supported flavor structures:
+ * - x86_THREAD_STATE32: struct ...
+ * - x86_FLOAT_STATE32: struct ...
+ * - x86_EXCEPTION_STATE32: struct ...
+ * - x86_DEBUG_STATE32: struct ...
+ * - x86_AVX_STATE32: struct ...
+ * - x86_THREAD_STATE64: struct ...
+ * - x86_FLOAT_STATE64: struct ...
+ * - x86_EXCEPTION_STATE64: struct ...
+ * - x86_DEBUG_STATE64: struct ...
+ * - x86_AVX_STATE64: struct ...
+ * - ARM_THREAD_STATE32: struct ...
+ * - ARM_FLOAT_STATE32: struct ...
+ * - ARM_EXCEPTION_STATE32: struct ...
+ * - ARM_DEBUG_STATE32: struct ...
+ * - ARM_THREAD_STATE64: struct ...
+ * - ARM_FLOAT_STATE64: struct ...
+ * - ARM_EXCEPTION_STATE64: struct ...
+ * - ARM_DEBUG_STATE64: struct ...
+ * - ...
+ *
+ * @param thread
+ * A handle to the target thread.
+ *
+ * @param flavor
+ * The desired thread state structure.
+ *
+ * @param state
+ * A pointer to storage where the thread state structure should be written.
+ *
+ * @param size
+ * The size of the thread state structure in bytes.
+ *
+ * @return
+ * KERN_SUCCESS if the function was successfully performed, otherwise a mach
+ * error code.
+ */
+kern_return_t
+platform_thread_set_state(platform_thread_t thread,
+                         thread_state_flavor_t flavor,
+                         const void *state,
+                         size_t size);
+
+/*!
+ * @function platform_thread_perform
+ *
+ * @discussion
+ * Performs the specified function within the context of the specified thread
+ * in the target task. The function will execute in the style of an
+ * asynchronous signal handler even if the thread is suspended as the result
+ * of a call to platform_task_suspend_threads() or platform_thread_suspend().
+ * The original state of the thread will be restored when the function returns.
+ *
+ * The function and context addresses are in the virtual address space of the
+ * target task. It is the responsiblity of the debugger to have previously
+ * mapped executable text and data at these addresses in the target task.
+ *
+ * See also platform_task_perform().
+ *
+ * @param thread
+ * A handle to the target thread.
+ *
+ * @param func_addr
+ * The address (in the virtual address space of the target task) of the
+ * function to perform. The function should be of type (void (*)(void *))
+ * and will be passed the value of the data_addr parameter.
+ *
+ * @param data_addr
+ * The address (in the virtual address space of the target task) of the
+ * data to pass as a parameter to the function to perform.
+ *
+ * @return
+ * KERN_SUCCESS if the function was successfully performed, otherwise a mach
+ * error code.
+ */
+kern_return_t
+platform_thread_perform(platform_thread_t thread,
+                       mach_vm_address_t func_addr,
+                       mach_vm_address_t data_addr);
+
+/*!
+ * @function platform_thread_get_pthread
+ *
+ * @discussion
+ * Returns a pointer to mapped memory which represents the pthread_t of the
+ * target process. Any embedded pointers will need to be mapped into the current 
+ * process space on a case-by-case basis.
+ *
+ * @param thread
+ * A handle to the target thread.
+ *
+ * @return
+ * A valid pointer.
+ */
+const void *
+platform_thread_get_pthread(platform_thread_t thread);
+
+#endif // __PLATFORM_INTROSPECTION_H__
diff --git a/include/platform/string.h b/include/platform/string.h
new file mode 100644 (file)
index 0000000..c4d1c3f
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef _PLATFORM_STRING_H_
+#define _PLATFORM_STRING_H_
+
+#include <sys/cdefs.h>
+#include <Availability.h>
+#include <TargetConditionals.h>
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#define _PLATFORM_OPTIMIZED_BZERO 0
+#define _PLATFORM_OPTIMIZED_MEMCCPY 0
+#define _PLATFORM_OPTIMIZED_MEMCHR 0
+#define _PLATFORM_OPTIMIZED_MEMCMP 0
+#define _PLATFORM_OPTIMIZED_MEMMOVE 0
+#define _PLATFORM_OPTIMIZED_MEMSET 0
+#define _PLATFORM_OPTIMIZED_MEMSET_PATTERN4 0
+#define _PLATFORM_OPTIMIZED_MEMSET_PATTERN8 0
+#define _PLATFORM_OPTIMIZED_MEMSET_PATTERN16 0
+#define _PLATFORM_OPTIMIZED_STRCHR 0
+#define _PLATFORM_OPTIMIZED_STRCMP 0
+#define _PLATFORM_OPTIMIZED_STRNCMP 0
+
+/* Primitives used to implement C memory and string routines */
+
+__BEGIN_DECLS
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+void
+_platform_bzero(void *s, size_t n);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+void *
+_platform_memchr(const void *s, int c, size_t n);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+int
+_platform_memcmp(const void *s1, const void *s2, size_t n);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+void *
+_platform_memmove(void *dst, const void *src, size_t n);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+void *
+_platform_memccpy(void *restrict dst, const void *restrict src, int c, size_t n);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+void *
+_platform_memset(void *b, int c, size_t len);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+void
+_platform_memset_pattern4(void *b, const void *pattern4, size_t len);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+void
+_platform_memset_pattern8(void *b, const void *pattern8, size_t len);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+void
+_platform_memset_pattern16(void *b, const void *pattern16, size_t len);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+char *
+_platform_strchr(const char *s, int c);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+int
+_platform_strcmp(const char *s1, const char *s2);
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
+int
+_platform_strncmp(const char *s1, const char *s2, size_t n);
+
+__END_DECLS
+
+#endif /* _PLATFORM_STRING_H_ */
diff --git a/include/setjmp.h b/include/setjmp.h
new file mode 100644 (file)
index 0000000..f54ddef
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+#ifndef _BSD_SETJMP_H
+#define _BSD_SETJMP_H
+
+#include <sys/cdefs.h>
+#include <Availability.h>
+
+#if defined(__x86_64__)
+/*
+ * _JBLEN is number of ints required to save the following:
+ * rflags, rip, rbp, rsp, rbx, r12, r13, r14, r15... these are 8 bytes each
+ * mxcsr, fp control word, sigmask... these are 4 bytes each
+ * add 16 ints for future expansion needs...
+ */
+#define _JBLEN ((9 * 2) + 3 + 16)
+typedef int jmp_buf[_JBLEN];
+typedef int sigjmp_buf[_JBLEN + 1];
+
+#elif defined(__i386__)
+
+/*
+ * _JBLEN is number of ints required to save the following:
+ * eax, ebx, ecx, edx, edi, esi, ebp, esp, ss, eflags, eip,
+ * cs, de, es, fs, gs == 16 ints
+ * onstack, mask = 2 ints
+ */
+
+#define _JBLEN (18)
+typedef int jmp_buf[_JBLEN];
+typedef int sigjmp_buf[_JBLEN + 1];
+
+#elif defined(__arm__) && !defined(__ARM_ARCH_7K__)
+
+#include <machine/signal.h>
+
+/*
+ *     _JBLEN is number of ints required to save the following:
+ *     r4-r8, r10, fp, sp, lr, sig  == 10 register_t sized
+ *     s16-s31 == 16 register_t sized + 1 int for FSTMX
+ *     1 extra int for future use
+ */
+#define _JBLEN         (10 + 16 + 2)
+#define _JBLEN_MAX     _JBLEN
+
+typedef int jmp_buf[_JBLEN];
+typedef int sigjmp_buf[_JBLEN + 1];
+
+#elif defined(__arm64__) || defined(__ARM_ARCH_7K__)
+/*
+ * _JBLEN is the number of ints required to save the following:
+ * r21-r29, sp, fp, lr == 12 registers, 8 bytes each. d8-d15
+ * are another 8 registers, each 8 bytes long. (aapcs64 specifies
+ * that only 64-bit versions of FP registers need to be saved).
+ * Finally, two 8-byte fields for signal handling purposes.
+ */
+#define _JBLEN         ((14 + 8 + 2) * 2)
+
+typedef int jmp_buf[_JBLEN];
+typedef int sigjmp_buf[_JBLEN + 1];
+
+#else
+#      error Undefined platform for setjmp
+#endif
+
+__BEGIN_DECLS
+extern int     setjmp(jmp_buf);
+extern void longjmp(jmp_buf, int) __dead2;
+
+#ifndef _ANSI_SOURCE
+int    _setjmp(jmp_buf);
+void   _longjmp(jmp_buf, int) __dead2;
+int    sigsetjmp(sigjmp_buf, int);
+void   siglongjmp(sigjmp_buf, int) __dead2;
+#endif /* _ANSI_SOURCE  */
+
+#if !defined(_ANSI_SOURCE) && (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE))
+void   longjmperror(void);
+#endif /* neither ANSI nor POSIX */
+__END_DECLS
+
+#endif /* _BSD_SETJMP_H */
diff --git a/include/ucontext.h b/include/ucontext.h
new file mode 100644 (file)
index 0000000..db570b2
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2002, 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/*
+ * These routines are DEPRECATED and should not be used.
+ */
+#ifndef _UCONTEXT_H_
+#define _UCONTEXT_H_
+
+#include <sys/cdefs.h>
+
+#ifdef _XOPEN_SOURCE
+#include <sys/ucontext.h>
+#include <Availability.h>
+
+__BEGIN_DECLS
+int  getcontext(ucontext_t *) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_2_0, __IPHONE_2_0) __WATCHOS_PROHIBITED __TVOS_PROHIBITED;
+void makecontext(ucontext_t *, void (*)(), int, ...) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_2_0, __IPHONE_2_0) __WATCHOS_PROHIBITED __TVOS_PROHIBITED;
+int  setcontext(const ucontext_t *) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_2_0, __IPHONE_2_0) __WATCHOS_PROHIBITED __TVOS_PROHIBITED;
+int  swapcontext(ucontext_t * __restrict, const ucontext_t * __restrict) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_2_0, __IPHONE_2_0) __WATCHOS_PROHIBITED __TVOS_PROHIBITED;
+__END_DECLS
+#else /* !_XOPEN_SOURCE */
+#error The deprecated ucontext routines require _XOPEN_SOURCE to be defined
+#endif /* _XOPEN_SOURCE */
+
+#endif /* _UCONTEXT_H_ */
diff --git a/internal/os/internal.h b/internal/os/internal.h
new file mode 100644 (file)
index 0000000..4d7a083
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __OS_INTERNAL_H__
+#define __OS_INTERNAL_H__
+
+#define __OS_ALLOC_INDIRECT__
+
+#include <TargetConditionals.h>
+#include <machine/cpu_capabilities.h>
+
+#include "os/base_private.h"
+#include "os/semaphore_private.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <limits.h>
+#if defined(__arm__)
+#include <arm/arch.h>
+#endif
+#include <mach/thread_switch.h>
+
+#define likely(x) os_likely(x)
+#define unlikely(x) os_unlikely(x)
+
+#define __OS_CRASH__(rc, msg)  ({ \
+               _os_set_crash_log_cause_and_message(rc, msg); \
+               os_prevent_tail_call_optimization(); \
+               __builtin_trap(); \
+       })
+
+#define __LIBPLATFORM_CLIENT_CRASH__(rc, msg) \
+               __OS_CRASH__(rc, "BUG IN CLIENT OF LIBPLATFORM: " msg)
+#define __LIBPLATFORM_INTERNAL_CRASH__(rc, msg) \
+               __OS_CRASH__(rc, "BUG IN LIBPLATFORM: " msg)
+
+
+#define __OS_EXPOSE_INTERNALS__ 1
+#include "os/internal/internal_shared.h"
+#include "yield.h"
+
+#if !VARIANT_NO_RESOLVERS
+#if defined(_ARM_ARCH_7) && !defined(__ARM_ARCH_7S__)
+#if OS_ATOMIC_UP
+#define OS_VARIANT_SELECTOR up
+#else
+#define OS_VARIANT_SELECTOR mp
+#endif
+#endif
+#if !defined(OS_VARIANT_SELECTOR) && defined(VARIANT_NO_RESOLVERS)
+// forced up variant for no-barrier OSAtomics
+#define OS_ATOMIC_NO_BARRIER_ONLY 1
+#define OS_VARIANT_SELECTOR up
+#endif
+#if (defined(_ARM_ARCH_7) || defined(__arm64__)) && \
+               (!defined(OS_ATOMIC_WFE) && !OS_ATOMIC_UP)
+#define OS_ATOMIC_WFE 0
+#endif
+#ifdef OS_ATOMIC_WFE
+#if OS_ATOMIC_WFE
+#define OS_LOCK_VARIANT_SELECTOR wfe
+#else
+#define OS_LOCK_VARIANT_SELECTOR mp
+#endif
+#endif
+#endif // !VARIANT_NO_RESOLVERS
+
+#define OS_VARIANT(f, v) OS_CONCAT(f, OS_CONCAT($VARIANT$, v))
+
+#ifdef OS_VARIANT_SELECTOR
+#define _OS_ATOMIC_ALIAS_PRIVATE_EXTERN(n) \
+               ".private_extern _" OS_STRINGIFY(n) "\n\t"
+#define OS_ATOMIC_EXPORT
+#else
+#define _OS_ATOMIC_ALIAS_PRIVATE_EXTERN(n)
+#define OS_ATOMIC_EXPORT OS_EXPORT
+#endif
+#define _OS_ATOMIC_ALIAS_GLOBL(n) \
+               ".globl _" OS_STRINGIFY(n) "\n\t"
+#ifdef __thumb__
+#define _OS_ATOMIC_ALIAS_THUMB(n) \
+               ".thumb_func _" OS_STRINGIFY(n) "\n\t"
+#else
+#define _OS_ATOMIC_ALIAS_THUMB(n)
+#endif
+#define _OS_ATOMIC_ALIAS_SET(n, o) \
+               ".set _" OS_STRINGIFY(n) ", _" OS_STRINGIFY(o)
+
+#define OS_ATOMIC_ALIAS(n, o) __asm__( \
+               _OS_ATOMIC_ALIAS_PRIVATE_EXTERN(n) \
+               _OS_ATOMIC_ALIAS_GLOBL(n) \
+               _OS_ATOMIC_ALIAS_THUMB(n) \
+               _OS_ATOMIC_ALIAS_SET(n, o))
+
+#define OS_ATOMIC_EXPORT_ALIAS(n, o) __asm__( \
+               _OS_ATOMIC_ALIAS_GLOBL(n) \
+               _OS_ATOMIC_ALIAS_THUMB(n) \
+               _OS_ATOMIC_ALIAS_SET(n, o))
+
+#define _OS_VARIANT_RESOLVER(s, v, ...) \
+       __attribute__((visibility(OS_STRINGIFY(v)))) extern void* s(void); \
+       void* s(void) { \
+       __asm__(".symbol_resolver _" OS_STRINGIFY(s)); \
+               __VA_ARGS__ \
+       }
+
+#define _OS_VARIANT_UPMP_RESOLVER(s, v) \
+       _OS_VARIANT_RESOLVER(s, v, \
+               uint32_t *_c = (void*)(uintptr_t)_COMM_PAGE_CPU_CAPABILITIES; \
+               if (*_c & kUP) { \
+                       extern void OS_VARIANT(s, up)(void); \
+                       return &OS_VARIANT(s, up); \
+               } else { \
+                       extern void OS_VARIANT(s, mp)(void); \
+                       return &OS_VARIANT(s, mp); \
+               })
+
+#define OS_VARIANT_UPMP_RESOLVER(s) \
+       _OS_VARIANT_UPMP_RESOLVER(s, default)
+
+#define OS_VARIANT_UPMP_RESOLVER_INTERNAL(s) \
+       _OS_VARIANT_UPMP_RESOLVER(s, hidden)
+
+#endif // __OS_INTERNAL_H__
diff --git a/internal/os/internal_asm.h b/internal/os/internal_asm.h
new file mode 100644 (file)
index 0000000..e01272d
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#ifndef __INTERNAL_ASM_H__
+#define __INTERNAL_ASM_H__
+
+#include <machine/cpu_capabilities.h>
+
+#define OS_STRINGIFY1(s) #s
+#define OS_STRINGIFY(s) OS_STRINGIFY1(s)
+#define OS_CONCAT1(x, y) x ## y
+#define OS_CONCAT(x, y) OS_CONCAT1(x, y)
+
+#ifdef __ASSEMBLER__
+#define OS_VARIANT(f, v) OS_CONCAT(_, OS_CONCAT(f, OS_CONCAT($VARIANT$, v)))
+#else
+#define OS_VARIANT(f, v) OS_CONCAT(f, OS_CONCAT($VARIANT$, v))
+#endif
+
+#if defined(__ASSEMBLER__)
+
+#if defined(__i386__) || defined(__x86_64__)
+
+#define OS_VARIANT_FUNCTION_START(name, variant, alignment) \
+       .text ; \
+       .align alignment, 0x90 ; \
+       .private_extern OS_VARIANT(name, variant) ; \
+       OS_VARIANT(name, variant) ## :
+
+// GENERIC indicates that this function will be chosen as the generic
+// implementation (at compile time) when building targets which do not
+// support dyld variant resolves.
+#if defined(VARIANT_NO_RESOLVERS) || defined(VARIANT_DYLD)
+#define OS_VARIANT_FUNCTION_START_GENERIC(name, variant, alignment) \
+       OS_VARIANT_FUNCTION_START(name, variant, alignment) \
+       .globl _ ## name ; \
+       _ ## name ## :
+#else
+#define OS_VARIANT_FUNCTION_START_GENERIC OS_VARIANT_FUNCTION_START
+#endif
+
+#define OS_ATOMIC_FUNCTION_START(name, alignment) \
+       .text ; \
+       .align alignment, 0x90 ; \
+       .globl _ ## name ; \
+       _ ## name ## :
+
+#endif // defined(__i386__) || defined(__x86_64__)
+
+#endif // defined(__ASSEMBLER__)
+
+#endif // __INTERNAL_ASM_H__
diff --git a/internal/os/yield.h b/internal/os/yield.h
new file mode 100644 (file)
index 0000000..a094b2d
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+/*
+ * IMPORTANT: This header file describes INTERNAL interfaces to libplatform
+ * which are subject to change in future releases of Mac OS X. Any applications
+ * relying on these interfaces WILL break.
+ */
+
+#ifndef __OS_YIELD__
+#define __OS_YIELD__
+
+#include <TargetConditionals.h>
+
+#pragma mark -
+#pragma mark _os_wait_until
+
+#if OS_ATOMIC_UP
+#define _os_wait_until(c) do { \
+               int _spins = 0; \
+               while (unlikely(!(c))) { \
+                       _spins++; \
+                       _os_preemption_yield(_spins); \
+               } } while (0)
+#elif TARGET_OS_EMBEDDED
+// <rdar://problem/15508918>
+#ifndef OS_WAIT_SPINS
+#define OS_WAIT_SPINS 1024
+#endif
+#define _os_wait_until(c) do { \
+               int _spins = -(OS_WAIT_SPINS); \
+               while (unlikely(!(c))) { \
+                       if (unlikely(_spins++ >= 0)) { \
+                               _os_preemption_yield(_spins); \
+                       } else { \
+                               os_hardware_pause(); \
+                       } \
+               } } while (0)
+#else
+#define _os_wait_until(c) do { \
+               while (!(c)) { \
+                       os_hardware_pause(); \
+               } } while (0)
+#endif
+
+#pragma mark -
+#pragma mark os_hardware_pause
+
+#if defined(__x86_64__) || defined(__i386__)
+#define os_hardware_pause() __asm__("pause")
+#elif (defined(__arm__) && defined(_ARM_ARCH_7) && defined(__thumb__)) || \
+               defined(__arm64__)
+#define os_hardware_pause() __asm__("yield")
+#define os_hardware_wfe()   __asm__("wfe")
+#else
+#define os_hardware_pause() __asm__("")
+#endif
+
+#pragma mark -
+#pragma mark _os_preemption_yield
+
+#if defined(SWITCH_OPTION_OSLOCK_DEPRESS) && !(TARGET_IPHONE_SIMULATOR && \
+               IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090)
+#define OS_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_OSLOCK_DEPRESS
+#else
+#define OS_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_DEPRESS
+#endif
+#define _os_preemption_yield(n) thread_switch(MACH_PORT_NULL, \
+               OS_YIELD_THREAD_SWITCH_OPTION, (mach_msg_timeout_t)(n))
+
+#endif // __OS_YIELD__
diff --git a/man/atomic.3 b/man/atomic.3
new file mode 100644 (file)
index 0000000..eee6e43
--- /dev/null
@@ -0,0 +1,62 @@
+.Dd May 26, 2004
+.Dt ATOMIC 3
+.Os Darwin
+.Sh NAME
+.Nm OSAtomicEnqueue ,
+.Nm OSAtomicDequeue
+.Nd atomic lockless queues
+.Sh SYNOPSIS
+.In libkern/OSAtomic.h
+.Ft void
+.Fn OSAtomicEnqueue "OSQueueHead *list" "void *new" "size_t offset"
+.Ft void*
+.Fn OSAtomicDequeue "OSQueueHead *list" "size_t offset"
+.Sh DESCRIPTION
+The routines
+.Fn OSAtomicEnqueue
+and
+.Fn OSAtomicDequeue
+operate on singly linked LIFO queues.  Ie, a dequeue operation will return the
+most recently enqueued element, or NULL if the list is empty.  The operations
+are lockless, and barriers are used as necessary to permit thread-safe access to
+the queue element.
+.Fa offset
+is the offset in bytes to the link field in the queue element.
+.Pp
+.Bf -symbolic
+Important: the memory backing the link field of a queue element must not be
+unmapped after
+.Fn OSAtomicDequeue
+returns until all concurrent calls to
+.Fn OSAtomicDequeue
+for the same list on other threads have also returned, as they may still be
+accessing that memory location.
+.Ef
+.Sh EXAMPLES
+.Bd -literal -offset indent
+       typedef struct elem {
+               long    data1;
+               struct elem *link;
+               int     data2;
+       } elem_t;
+       
+       elem_t fred, mary, *p;
+       
+       OSQueueHead q = OS_ATOMIC_QUEUE_INIT;
+       
+       OSAtomicEnqueue( &q, &fred, offsetof(elem_t,link) );
+       OSAtomicEnqueue( &q, &mary, offsetof(elem_t,link) );
+       
+       p = OSAtomicDequeue( &q, offsetof(elem_t,link) );
+       
+.Ed
+In this example, the call of
+.Fn OSAtomicDequeue
+will return a ptr to mary.
+.Sh RETURN VALUES
+The dequeue operation returns the most recently enqueued element, or NULL if the list in empty.
+.Sh SEE ALSO
+.Xr atomic_deprecated 3 ,
+.Xr spinlock_deprecated 3
+.Sh HISTORY
+These functions first appeared in Mac OS 10.5 (Leopard).
diff --git a/man/atomic_deprecated.3 b/man/atomic_deprecated.3
new file mode 100644 (file)
index 0000000..8db9557
--- /dev/null
@@ -0,0 +1,232 @@
+.Dd May 26, 2004
+.Dt ATOMIC_DEPRECATED 3
+.Os Darwin
+.Sh NAME
+.Nm OSAtomicAdd32 ,
+.Nm OSAtomicAdd32Barrier ,
+.Nm OSAtomicIncrement32 ,
+.Nm OSAtomicIncrement32Barrier ,
+.Nm OSAtomicDecrement32 ,
+.Nm OSAtomicDecrement32Barrier ,
+.Nm OSAtomicOr32 ,
+.Nm OSAtomicOr32Barrier ,
+.Nm OSAtomicOr32Orig ,
+.Nm OSAtomicOr32OrigBarrier ,
+.Nm OSAtomicAnd32 ,
+.Nm OSAtomicAnd32Barrier ,
+.Nm OSAtomicAnd32Orig ,
+.Nm OSAtomicAnd32OrigBarrier ,
+.Nm OSAtomicXor32 ,
+.Nm OSAtomicXor32Barrier ,
+.Nm OSAtomicXor32Orig ,
+.Nm OSAtomicXor32OrigBarrier ,
+.Nm OSAtomicAdd64 ,
+.Nm OSAtomicAdd64Barrier ,
+.Nm OSAtomicIncrement64 ,
+.Nm OSAtomicIncrement64Barrier ,
+.Nm OSAtomicDecrement64 ,
+.Nm OSAtomicDecrement64Barrier ,
+.Nm OSAtomicCompareAndSwapInt ,
+.Nm OSAtomicCompareAndSwapIntBarrier ,
+.Nm OSAtomicCompareAndSwapLong ,
+.Nm OSAtomicCompareAndSwapLongBarrier ,
+.Nm OSAtomicCompareAndSwapPtr ,
+.Nm OSAtomicCompareAndSwapPtrBarrier ,
+.Nm OSAtomicCompareAndSwap32 ,
+.Nm OSAtomicCompareAndSwap32Barrier ,
+.Nm OSAtomicCompareAndSwap64 ,
+.Nm OSAtomicCompareAndSwap64Barrier ,
+.Nm OSAtomicTestAndSet ,
+.Nm OSAtomicTestAndSetBarrier ,
+.Nm OSAtomicTestAndClear ,
+.Nm OSAtomicTestAndClearBarrier ,
+.Nm OSMemoryBarrier
+.Nd deprecated atomic add, increment, decrement, or, and, xor, compare and swap, test and set, test and clear, and memory barrier
+.Sh SYNOPSIS
+.In libkern/OSAtomic.h
+.Ft int32_t
+.Fn OSAtomicAdd32 "int32_t theAmount" "volatile int32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicAdd32Barrier "int32_t theAmount" "volatile int32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicIncrement32 "volatile int32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicIncrement32Barrier "volatile int32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicDecrement32 "volatile int32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicDecrement32Barrier "volatile int32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicOr32 "uint32_t theMask" "volatile uint32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicOr32Barrier "uint32_t theMask" "volatile uint32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicAnd32 "uint32_t theMask" "volatile uint32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicAnd32Barrier "uint32_t theMask" "volatile uint32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicXor32 "uint32_t theMask" "volatile uint32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicXor32Barrier "uint32_t theMask" "volatile uint32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicOr32Orig "uint32_t theMask" "volatile uint32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicOr32OrigBarrier "uint32_t theMask" "volatile uint32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicAnd32Orig "uint32_t theMask" "volatile uint32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicAnd32OrigBarrier "uint32_t theMask" "volatile uint32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicXor32Orig "uint32_t theMask" "volatile uint32_t *theValue"
+.Ft int32_t
+.Fn OSAtomicXor32OrigBarrier "uint32_t theMask" "volatile uint32_t *theValue"
+.Ft int64_t
+.Fn OSAtomicAdd64 "int64_t theAmount" "volatile OSAtomic_int64_aligned64_t *theValue"
+.Ft int64_t
+.Fn OSAtomicAdd64Barrier "int64_t theAmount" "volatile OSAtomic_int64_aligned64_t *theValue"
+.Ft int64_t
+.Fn OSAtomicIncrement64 "volatile OSAtomic_int64_aligned64_t *theValue"
+.Ft int64_t
+.Fn OSAtomicIncrement64Barrier "volatile OSAtomic_int64_aligned64_t *theValue"
+.Ft int64_t
+.Fn OSAtomicDecrement64 "volatile OSAtomic_int64_aligned64_t *theValue"
+.Ft int64_t
+.Fn OSAtomicDecrement64Barrier "volatile OSAtomic_int64_aligned64_t *theValue"
+.Ft bool
+.Fn OSAtomicCompareAndSwapInt "int oldValue" "int newValue" "volatile int *theValue"
+.Ft bool
+.Fn OSAtomicCompareAndSwapIntBarrier "int oldValue" "int newValue" "volatile int *theValue"
+.Ft bool
+.Fn OSAtomicCompareAndSwapLong "long oldValue" "long newValue" "volatile long *theValue"
+.Ft bool
+.Fn OSAtomicCompareAndSwapLongBarrier "long oldValue" "long newValue" "volatile long *theValue"
+.Ft bool
+.Fn OSAtomicCompareAndSwapPtr "void* oldValue" "void* newValue" "void* volatile *theValue"
+.Ft bool
+.Fn OSAtomicCompareAndSwapPtrBarrier "void* oldValue" "void* newValue" "void* volatile *theValue"
+.Ft bool
+.Fn OSAtomicCompareAndSwap32 "int32_t oldValue" "int32_t newValue" "volatile int32_t *theValue"
+.Ft bool
+.Fn OSAtomicCompareAndSwap32Barrier "int32_t oldValue" "int32_t newValue" "volatile int32_t *theValue"
+.Ft bool
+.Fn OSAtomicCompareAndSwap64 "int64_t oldValue" "int64_t newValue" "volatile OSAtomic_int64_aligned64_t *theValue"
+.Ft bool
+.Fn OSAtomicCompareAndSwap64Barrier "int64_t oldValue" "int64_t newValue" "volatile OSAtomic_int64_aligned64_t *theValue"
+.Ft bool
+.Fn OSAtomicTestAndSet "uint32_t n" "volatile void *theAddress"
+.Ft bool
+.Fn OSAtomicTestAndSetBarrier "uint32_t n" "volatile void *theAddress"
+.Ft bool
+.Fn OSAtomicTestAndClear "uint32_t n" "volatile void *theAddress"
+.Ft bool
+.Fn OSAtomicTestAndClearBarrier "uint32_t n" "volatile void *theAddress"
+.Ft bool
+.Fn OSAtomicEnqueue "OSQueueHead *list" "void *new" "size_t offset"
+.Ft void*
+.Fn OSAtomicDequeue "OSQueueHead *list" "size_t offset"
+.Ft void
+.Fn OSMemoryBarrier "void"
+.Sh DESCRIPTION
+.Bf -symbolic
+These are deprecated interfaces for atomic and synchronization
+operations, provided for compatibility with legacy code. New code should use
+the C11
+.In stdatomic.h
+interfaces.
+.Ef
+.Pp
+These functions are thread and multiprocessor safe.  For each function, there
+is a version which incorporates a memory barrier and another version which does
+not.
+Barriers strictly order memory access on a weakly-ordered architecture such as
+ARM. All loads and stores executed in sequential program
+order before the barrier will complete before any load or store executed after
+the barrier.
+On some platforms, such as ARM, the barrier operation can be quite expensive.
+.Pp
+Most code will want to use the barrier functions to ensure that memory shared
+between threads is properly synchronized.  For example, if you want to
+initialize a shared data structure and then atomically increment a variable to
+indicate that the initialization is complete, then you must use
+.Fn OSAtomicIncrement32Barrier
+to ensure that the stores to your data structure complete before the atomic add.
+Likewise, the consumer of that data structure must use
+.Fn OSAtomicDecrement32Barrier ,
+in order to ensure that their loads of the structure are not executed before
+the atomic decrement.  On the other hand, if you are simply incrementing a
+global counter, then it is safe and potentially much faster to use
+.Fn OSAtomicIncrement32 .
+If you are unsure which version to use, prefer the barrier variants as they are
+safer.
+.Pp
+The logical (and, or, xor) and bit test operations are layered on top of the
+.Fn OSAtomicCompareAndSwap
+primitives.  There are four versions of each logical operation, depending on
+whether or not there is a barrier, and whether the return value is the result
+of the operation (eg,
+.Fn OSAtomicOr32
+) or the original value before the operation (eg,
+.Fn OSAtomicOr32Orig
+).
+.Pp
+The memory address
+.Fa theValue
+must be "naturally aligned", i.e. 32-bit aligned for 32-bit operations and
+64-bit aligned for 64-bit operations. Note that this is not the default alignment
+of the
+.Vt int64_t
+in the iOS ARMv7 ABI, the
+.Vt OSAtomic_int64_aligned64_t
+type can be used to declare variables with the required alignment.
+.Pp
+The
+.Fn OSAtomicCompareAndSwap
+operations compare
+.Fa oldValue
+to
+.Fa *theValue ,
+and set
+.Fa *theValue
+to
+.Fa newValue
+if the comparison is equal.  The comparison and assignment
+occur as one atomic operation.
+.Pp
+.Fn OSAtomicTestAndSet
+and
+.Fn OSAtomicTestAndClear
+operate on bit (0x80 >> (
+.Fa n
+& 7)) of byte ((char*)
+.Fa theAddress
++ (
+.Fa n
+>> 3)).  They set the named bit to either 1 or 0, respectively.
+.Fa theAddress
+need not be aligned.
+.Pp
+The
+.Fn OSMemoryBarrier
+function strictly orders memory accesses in a weakly ordered memory model such
+as with ARM, by creating a barrier.
+All loads and stores executed in sequential program order before the barrier
+will complete with respect to the memory coherence mechanism, before any load
+or store executed after the barrier. Used with an atomic operation, the barrier
+can be used to create custom synchronization protocols as an alternative to the
+spinlock or queue/dequeue operations. Note that this barrier does not order
+uncached loads and stores. On a uniprocessor, the barrier operation is
+typically optimized into a no-op.
+.Sh RETURN VALUES
+The arithmetic operations return the new value, after the operation has been
+performed. The boolean operations come in two styles, one of which returns the
+new value, and one of which (the "Orig" versions) returns the old.
+The compare-and-swap operations return true if the comparison was equal, ie if
+the swap occured. The bit test and set/clear operations return the original
+value of the bit.
+.Sh SEE ALSO
+.Xr atomic 3 ,
+.Xr spinlock_deprecated 3
+.Sh HISTORY
+Most of these functions first appeared in Mac OS 10.4 (Tiger).  The "Orig"
+forms of the boolean operations, the "int", "long" and "ptr" forms of
+compare-and-swap first appeared in Mac OS 10.5 (Leopard).
diff --git a/man/cache.3 b/man/cache.3
new file mode 100644 (file)
index 0000000..0dbe77d
--- /dev/null
@@ -0,0 +1,55 @@
+.Dd September 21, 2006
+.Dt CACHE 3
+.Os Darwin
+.Sh NAME
+.Nm sys_cache_control ,
+.Nm sys_icache_invalidate ,
+.Nm sys_dcache_flush
+.Nd cache control
+.Sh LIBRARY
+.Lb libc
+.Sh SYNOPSIS
+.In libkern/OSCacheControl.h
+.Ft int
+.Fn sys_cache_control "int function" "void *start" "size_t len"
+.Ft void
+.Fn sys_icache_invalidate "void *start" "size_t len"
+.Ft void
+.Fn sys_dcache_flush "void *start" "size_t len"
+.Sh DESCRIPTION
+.Pp
+These functions operate on every cache line containing one of the 
+.Fa len
+bytes of memory pointed to by
+.Fa start .
+Normally the operations apply to every
+processor in the system, but the exact semantics of these
+operations is platform dependent.  They should be used with caution.
+.Pp
+.Fn sys_cache_control
+performs the operation specified by
+.Fa function .
+Refer to the header file for a list of currently supported functions.
+.Pp
+.Fn sys_icache_invalidate
+prepares memory for execution, typically by invalidating the instruction
+cache for the indicated range.  This should be called
+after writing machine instructions to memory, and before
+executing them.  On IA32 processors this function is a NOP, because
+their instruction caches are coherent.
+.Pp
+.Fn sys_dcache_flush
+writes modified data cache lines to main memory,
+and then invalidates all lines in the range being operated on.
+It can be useful when dealing with cache incoherent
+devices or DMA.
+.Sh RETURN VALUES
+.Fn sys_cache_control
+returns zero on success, ENOTSUP if
+.Fa function
+is not valid.
+.Sh SEE ALSO
+.Xr atomic 3 ,
+.Xr barrier 3
+.Sh HISTORY
+These functions first appeared in Mac OS 10.5 (Leopard).
\ No newline at end of file
diff --git a/man/ffs.3 b/man/ffs.3
new file mode 100644 (file)
index 0000000..df37a3d
--- /dev/null
+++ b/man/ffs.3
@@ -0,0 +1,110 @@
+.\" Copyright (c) 1990, 1991, 1993
+.\"    The Regents of the University of California.  All rights reserved.
+.\"
+.\" This code is derived from software contributed to Berkeley by
+.\" Chris Torek.
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\"    notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\"    notice, this list of conditions and the following disclaimer in the
+.\"    documentation and/or other materials provided with the distribution.
+.\" 4. Neither the name of the University nor the names of its contributors
+.\"    may be used to endorse or promote products derived from this software
+.\"    without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"     @(#)ffs.3      8.2 (Berkeley) 4/19/94
+.\" $FreeBSD: src/lib/libc/string/ffs.3,v 1.15 2012/09/30 03:25:04 eadler Exp $
+.\"
+.Dd September 29, 2012
+.Dt FFS 3
+.Os
+.Sh NAME
+.Nm ffs ,
+.Nm ffsl ,
+.Nm ffsll ,
+.Nm fls ,
+.Nm flsl ,
+.Nm flsll
+.Nd find first or last bit set in a bit string
+.Sh LIBRARY
+.Lb libc
+.Sh SYNOPSIS
+.In strings.h
+.Ft int
+.Fn ffs "int value"
+.Ft int
+.Fn ffsl "long value"
+.Ft int
+.Fn ffsll "long long value"
+.Ft int
+.Fn fls "int value"
+.Ft int
+.Fn flsl "long value"
+.Ft int
+.Fn flsll "long long value"
+.Sh DESCRIPTION
+The
+.Fn ffs ,
+.Fn ffsl
+and
+.Fn ffsll
+functions find the first (least significant) bit set
+in
+.Fa value
+and return the index of that bit.
+.Pp
+The
+.Fn fls ,
+.Fn flsl
+and
+.Fn flsll
+functions find the last (most significant) bit set in
+.Fa value
+and return the index of that bit.
+.Pp
+Bits are numbered starting at 1, the least significant bit.
+A return value of zero from any of these functions means that the
+argument was zero.
+.Sh SEE ALSO
+.Xr bitstring 3
+.Sh HISTORY
+The
+.Fn ffs
+function appeared in
+.Bx 4.3 .
+Its prototype existed previously in
+.In string.h
+before it was moved to
+.In strings.h
+for
+.St -p1003.1-2001
+compliance.
+.Pp
+The
+.Fn ffsl ,
+.Fn fls
+and
+.Fn flsl
+functions appeared in
+.Fx 5.3 .
+The
+.Fn ffsll
+and
+.Fn flsll
+functions appeared in
+.Fx 7.1 .
diff --git a/man/getcontext.3 b/man/getcontext.3
new file mode 100644 (file)
index 0000000..c2963ad
--- /dev/null
@@ -0,0 +1,121 @@
+.\" Copyright (c) 2002 Packet Design, LLC.
+.\" All rights reserved.
+.\"
+.\" Subject to the following obligations and disclaimer of warranty,
+.\" use and redistribution of this software, in source or object code
+.\" forms, with or without modifications are expressly permitted by
+.\" Packet Design; provided, however, that:
+.\"
+.\"    (i)  Any and all reproductions of the source or object code
+.\"         must include the copyright notice above and the following
+.\"         disclaimer of warranties; and
+.\"    (ii) No rights are granted, in any manner or form, to use
+.\"         Packet Design trademarks, including the mark "PACKET DESIGN"
+.\"         on advertising, endorsements, or otherwise except as such
+.\"         appears in the above copyright notice or in the software.
+.\"
+.\" THIS SOFTWARE IS BEING PROVIDED BY PACKET DESIGN "AS IS", AND
+.\" TO THE MAXIMUM EXTENT PERMITTED BY LAW, PACKET DESIGN MAKES NO
+.\" REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING
+.\" THIS SOFTWARE, INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED
+.\" WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+.\" OR NON-INFRINGEMENT.  PACKET DESIGN DOES NOT WARRANT, GUARANTEE,
+.\" OR MAKE ANY REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS
+.\" OF THE USE OF THIS SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY,
+.\" RELIABILITY OR OTHERWISE.  IN NO EVENT SHALL PACKET DESIGN BE
+.\" LIABLE FOR ANY DAMAGES RESULTING FROM OR ARISING OUT OF ANY USE
+.\" OF THIS SOFTWARE, INCLUDING WITHOUT LIMITATION, ANY DIRECT,
+.\" INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE, OR CONSEQUENTIAL
+.\" DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, LOSS OF
+.\" USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY THEORY OF
+.\" LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+.\" THE USE OF THIS SOFTWARE, EVEN IF PACKET DESIGN IS ADVISED OF
+.\" THE POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" $FreeBSD: src/lib/libc/gen/getcontext.3,v 1.3 2004/12/03 14:10:04 rse Exp $
+.\"
+.Dd September 10, 2002
+.Dt GETCONTEXT 3
+.Os
+.Sh NAME
+.Nm getcontext , setcontext
+.Nd get and set user thread context
+.Sh LIBRARY
+.Lb libc
+.Sh SYNOPSIS
+.In ucontext.h
+.Ft int
+.Fn getcontext "ucontext_t *ucp"
+.Ft int
+.Fn setcontext "const ucontext_t *ucp"
+.Sh DESCRIPTION
+The
+.Fn getcontext
+function
+saves the current thread's execution context in the structure pointed to by
+.Fa ucp .
+This saved context may then later be restored by calling
+.Fn setcontext .
+.Pp
+The
+.Fn setcontext
+function
+makes a previously saved thread context the current thread context, i.e.,
+the current context is lost and
+.Fn setcontext
+does not return.
+Instead, execution continues in the context specified by
+.Fa ucp ,
+which must have been previously initialized by a call to
+.Fn getcontext ,
+.Xr makecontext 3 ,
+or by being passed as an argument to a signal handler (see
+.Xr sigaction 2 ) .
+.Pp
+If
+.Fa ucp
+was initialized by
+.Fn getcontext ,
+then execution continues as if the original
+.Fn getcontext
+call had just returned (again).
+.Pp
+If
+.Fa ucp
+was initialized by
+.Xr makecontext 3 ,
+execution continues with the invocation of the function specified to
+.Xr makecontext 3 .
+When that function returns,
+.Fa "ucp->uc_link"
+determines what happens next:
+if
+.Fa "ucp->uc_link"
+is
+.Dv NULL ,
+the process exits;
+otherwise,
+.Fn setcontext "ucp->uc_link"
+is implicitly invoked.
+.Pp
+If
+.Fa ucp
+was initialized by the invocation of a signal handler, execution continues
+at the point the thread was interrupted by the signal.
+.Sh RETURN VALUES
+If successful,
+.Fn getcontext
+returns zero and
+.Fn setcontext
+does not return; otherwise \-1 is returned.
+.Sh ERRORS
+No errors are defined for
+.Fn getcontext
+or
+.Fn setcontext .
+.Sh SEE ALSO
+.Xr sigaction 2 ,
+.Xr sigaltstack 2 ,
+.Xr makecontext 3 ,
+.Xr ucontext 3
diff --git a/man/makecontext.3 b/man/makecontext.3
new file mode 100644 (file)
index 0000000..d60038b
--- /dev/null
@@ -0,0 +1,122 @@
+.\" Copyright (c) 2002 Packet Design, LLC.
+.\" All rights reserved.
+.\"
+.\" Subject to the following obligations and disclaimer of warranty,
+.\" use and redistribution of this software, in source or object code
+.\" forms, with or without modifications are expressly permitted by
+.\" Packet Design; provided, however, that:
+.\"
+.\"    (i)  Any and all reproductions of the source or object code
+.\"         must include the copyright notice above and the following
+.\"         disclaimer of warranties; and
+.\"    (ii) No rights are granted, in any manner or form, to use
+.\"         Packet Design trademarks, including the mark "PACKET DESIGN"
+.\"         on advertising, endorsements, or otherwise except as such
+.\"         appears in the above copyright notice or in the software.
+.\"
+.\" THIS SOFTWARE IS BEING PROVIDED BY PACKET DESIGN "AS IS", AND
+.\" TO THE MAXIMUM EXTENT PERMITTED BY LAW, PACKET DESIGN MAKES NO
+.\" REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING
+.\" THIS SOFTWARE, INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED
+.\" WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+.\" OR NON-INFRINGEMENT.  PACKET DESIGN DOES NOT WARRANT, GUARANTEE,
+.\" OR MAKE ANY REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS
+.\" OF THE USE OF THIS SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY,
+.\" RELIABILITY OR OTHERWISE.  IN NO EVENT SHALL PACKET DESIGN BE
+.\" LIABLE FOR ANY DAMAGES RESULTING FROM OR ARISING OUT OF ANY USE
+.\" OF THIS SOFTWARE, INCLUDING WITHOUT LIMITATION, ANY DIRECT,
+.\" INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE, OR CONSEQUENTIAL
+.\" DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, LOSS OF
+.\" USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY THEORY OF
+.\" LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+.\" THE USE OF THIS SOFTWARE, EVEN IF PACKET DESIGN IS ADVISED OF
+.\" THE POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" $FreeBSD: src/lib/libc/gen/makecontext.3,v 1.4 2002/12/19 09:40:21 ru Exp $
+.\"
+.Dd September 10, 2002
+.Dt MAKECONTEXT 3
+.Os
+.Sh NAME
+.Nm makecontext , swapcontext
+.Nd modify and exchange user thread contexts
+.Sh LIBRARY
+.Lb libc
+.Sh SYNOPSIS
+.In ucontext.h
+.Ft void
+.Fo makecontext
+.Fa "ucontext_t *ucp"
+.Fa "void \*[lp]*func\*[rp]\*[lp]\*[rp]"
+.Fa "int argc" ...
+.Fc
+.Ft int
+.Fn swapcontext "ucontext_t *oucp" "const ucontext_t *ucp"
+.Sh DESCRIPTION
+The
+.Fn makecontext
+function
+modifies the user thread context pointed to by
+.Fa ucp ,
+which must have previously been initialized by a call to
+.Xr getcontext 3
+and had a stack allocated for it.
+The context is modified so that it will continue execution by invoking
+.Fn func
+with the arguments (of type
+.Ft int )
+provided.
+The
+.Fa argc
+argument
+must be equal to the number of additional arguments provided to
+.Fn makecontext
+and also equal to the number of arguments to
+.Fn func ,
+or else the behavior is undefined.
+.Pp
+The
+.Fa "ucp->uc_link"
+argument
+must be initialized before calling
+.Fn makecontext
+and determines the action to take when
+.Fn func
+returns:
+if equal to
+.Dv NULL ,
+the process exits;
+otherwise,
+.Fn setcontext "ucp->uc_link"
+is implicitly invoked.
+.Pp
+The
+.Fn swapcontext
+function
+saves the current thread context in
+.Fa "*oucp"
+and makes
+.Fa "*ucp"
+the currently active context.
+.Sh RETURN VALUES
+If successful,
+.Fn swapcontext
+returns zero;
+otherwise \-1 is returned and the global variable
+.Va errno
+is set appropriately.
+.Sh ERRORS
+The
+.Fn swapcontext
+function
+will fail if:
+.Bl -tag -width Er
+.It Bq Er ENOMEM
+There is not enough stack space in
+.Fa ucp
+to complete the operation.
+.El
+.Sh SEE ALSO
+.Xr setcontext 3 ,
+.Xr ucontext 3
diff --git a/man/manpages.lst b/man/manpages.lst
new file mode 100644 (file)
index 0000000..898750d
--- /dev/null
@@ -0,0 +1,13 @@
+# manpage tables
+# <source> <dest> [<link> <link> ...]
+
+# man3
+atomic.3 atomic.3 OSAtomicDequeue.3 OSAtomicEnqueue.3
+atomic_deprecated.3 atomic_deprecated.3 OSAtomicAdd32.3 OSAtomicAdd32Barrier.3 OSAtomicAdd64.3 OSAtomicAdd64Barrier.3 OSAtomicAnd32.3 OSAtomicAnd32Barrier.3 OSAtomicAnd32Orig.3 OSAtomicAnd32OrigBarrier.3 OSAtomicCompareAndSwap32.3 OSAtomicCompareAndSwap32Barrier.3 OSAtomicCompareAndSwap64.3 OSAtomicCompareAndSwap64Barrier.3 OSAtomicCompareAndSwapInt.3 OSAtomicCompareAndSwapIntBarrier.3 OSAtomicCompareAndSwapLong.3 OSAtomicCompareAndSwapLongBarrier.3 OSAtomicCompareAndSwapPtr.3 OSAtomicCompareAndSwapPtrBarrier.3 OSAtomicDecrement32.3 OSAtomicDecrement32Barrier.3 OSAtomicDecrement64.3 OSAtomicDecrement64Barrier.3 OSAtomicIncrement32.3 OSAtomicIncrement32Barrier.3 OSAtomicIncrement64.3 OSAtomicIncrement64Barrier.3 OSAtomicOr32.3 OSAtomicOr32Barrier.3 OSAtomicOr32Orig.3 OSAtomicOr32OrigBarrier.3 OSAtomicTestAndClear.3 OSAtomicTestAndClearBarrier.3 OSAtomicTestAndSet.3 OSAtomicTestAndSetBarrier.3 OSAtomicXor32.3 OSAtomicXor32Barrier.3 OSAtomicXor32Orig.3 OSAtomicXor32OrigBarrier.3 OSMemoryBarrier.3
+cache.3 cache.3 sys_cache_control.3 sys_icache_invalidate.3 sys_dcache_flush.3
+ffs.3 ffs.3 ffsl.3 ffsll.3 fls.3 flsl.3 flsll.3
+getcontext.3 getcontext.3 setcontext.3
+makecontext.3 makecontext.3 swapcontext.3
+setjmp.3 setjmp.3 _longjmp.3 _setjmp.3 longjmp.3 longjmperr.3 longjmperror.3 siglongjmp.3 sigsetjmp.3
+spinlock_deprecated.3 spinlock_deprecated.3 OSSpinLockLock.3 OSSpinLockTry.3 OSSpinLockUnlock.3
+ucontext.3 ucontext.3
diff --git a/man/setjmp.3 b/man/setjmp.3
new file mode 100644 (file)
index 0000000..9f37028
--- /dev/null
@@ -0,0 +1,195 @@
+.\" Copyright (c) 1990, 1991, 1993
+.\"    The Regents of the University of California.  All rights reserved.
+.\"
+.\" This code is derived from software contributed to Berkeley by
+.\" the American National Standards Committee X3, on Information
+.\" Processing Systems.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\"    notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\"    notice, this list of conditions and the following disclaimer in the
+.\"    documentation and/or other materials provided with the distribution.
+.\" 3. All advertising materials mentioning features or use of this software
+.\"    must display the following acknowledgement:
+.\"    This product includes software developed by the University of
+.\"    California, Berkeley and its contributors.
+.\" 4. Neither the name of the University nor the names of its contributors
+.\"    may be used to endorse or promote products derived from this software
+.\"    without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"     @(#)setjmp.3   8.1 (Berkeley) 6/4/93
+.\" $FreeBSD: src/lib/libc/gen/setjmp.3,v 1.9 2001/10/01 16:08:51 ru Exp $
+.\"
+.Dd June 4, 1993
+.Dt SETJMP 3
+.Os
+.Sh NAME
+.Nm _longjmp ,
+.Nm _setjmp ,
+.Nm longjmp ,
+.Nm longjmperror ,
+.Nm setjmp ,
+.Nm siglongjmp ,
+.Nm sigsetjmp
+.Nd non-local jumps
+.Sh LIBRARY
+.Lb libc
+.Sh SYNOPSIS
+.In setjmp.h
+.Ft void
+.Fo _longjmp
+.Fa "jmp_buf env"
+.Fa "int val"
+.Fc
+.Ft int
+.Fo _setjmp
+.Fa "jmp_buf env"
+.Fc
+.Ft void
+.Fo longjmp
+.Fa "jmp_buf env"
+.Fa "int val"
+.Fc
+.Ft void
+.Fo longjmperror
+.Fa void
+.Fc
+.Ft int
+.Fo setjmp
+.Fa "jmp_buf env"
+.Fc
+.Ft void
+.Fo siglongjmp
+.Fa "sigjmp_buf env"
+.Fa "int val"
+.Fc
+.Ft int
+.Fo sigsetjmp
+.Fa "sigjmp_buf env"
+.Fa "int savemask"
+.Fc
+.Sh DESCRIPTION
+The
+.Fn sigsetjmp ,
+.Fn setjmp ,
+and
+.Fn _setjmp
+functions save their calling environment in
+.Fa env .
+Each of these functions returns 0.
+.Pp
+The corresponding
+.Fn longjmp
+functions restore the environment saved by their most recent respective
+invocations
+of the
+.Fn setjmp
+function.
+They then return, so that program execution continues
+as if the corresponding invocation of the
+.Fn setjmp
+call had just returned the value specified by
+.Fa val ,
+instead of 0.
+.Pp
+Pairs of calls may be intermixed
+(i.e., both
+.Fn sigsetjmp
+and
+.Fn siglongjmp
+and
+.Fn setjmp
+and
+.Fn longjmp
+combinations may be used in the same program); however, individual
+calls may not (e.g. the
+.Fa env
+argument to
+.Fn setjmp
+may not be passed to
+.Fn siglongjmp ) .
+.Pp
+The
+.Fn longjmp
+routines may not be called after the routine which called the
+.Fn setjmp
+routines returns.
+.Pp
+All accessible objects have values as of the time
+.Fn longjmp
+routine was called, except that the values of objects of automatic storage
+invocation duration that do not have the
+.Em volatile
+type and have been changed between the
+.Fn setjmp
+invocation and
+.Fn longjmp
+call are indeterminate.
+.Pp
+The
+.Fn setjmp Ns / Ns Fn longjmp
+pairs save and restore the signal mask while
+.Fn _setjmp Ns / Ns Fn _longjmp
+pairs save and restore only the register set and the stack.
+(See
+.Fn sigprocmask 2 . )
+.Pp
+The
+.Fn sigsetjmp Ns / Ns Fn siglongjmp
+function
+pairs save and restore the signal mask if the argument
+.Fa savemask
+is non-zero; otherwise, only the register set and the stack are saved.
+.Sh ERRORS
+If the contents of the
+.Fa env
+are corrupted, or correspond to an environment that has already returned,
+the
+.Fn longjmp
+routine calls the routine
+.Fn longjmperror 3 .
+If
+.Fn longjmperror
+returns, the program is aborted (see
+.Xr abort 3 ) .
+The default version of
+.Fn longjmperror
+prints the message
+.Dq Li longjmp botch
+to standard error and returns.
+User programs wishing to exit more gracefully should write their own
+versions of
+.Fn longjmperror .
+.Sh SEE ALSO
+.Xr sigaction 2 ,
+.Xr sigaltstack 2 ,
+.Xr signal 3
+.Sh STANDARDS
+The
+.Fn setjmp
+and
+.Fn longjmp
+functions conform to
+.St -isoC .
+The
+.Fn sigsetjmp
+and
+.Fn siglongjmp
+functions conform to
+.St -p1003.1-88 .
diff --git a/man/spinlock_deprecated.3 b/man/spinlock_deprecated.3
new file mode 100644 (file)
index 0000000..3305c32
--- /dev/null
@@ -0,0 +1,54 @@
+.Dd May 26, 2004
+.Dt SPINLOCK_DEPRECATED 3
+.Os Darwin
+.Sh NAME
+.Nm OSSpinLockTry ,
+.Nm OSSpinLockLock ,
+.Nm OSSpinLockUnlock
+.Nd deprecated atomic spin lock synchronization primitives
+.Sh SYNOPSIS
+.In libkern/OSAtomic.h
+.Ft bool
+.Fn OSSpinLockTry "OSSpinLock *lock"
+.Ft void
+.Fn OSSpinLockLock "OSSpinLock *lock"
+.Ft void
+.Fn OSSpinLockUnlock "OSSpinLock *lock"
+.Sh DESCRIPTION
+.Bf -symbolic
+These are deprecated interfaces for userspace spinlocks, provided for
+compatibility with legacy code. These interfaces should no longer be used,
+particularily in situations where threads of differing priorities may contend
+on the same spinlock.
+.Pp
+The interfaces in
+.In os/lock.h
+should be used instead in cases where a very low-level lock primitive is
+required. In general however, using higher level synchronization primitives
+such as those provided by the pthread or dispatch subsystems are preferred.
+.Ef
+.Pp
+The OSSpinLock operations use memory barriers to synchronize access to shared
+memory protected by the lock.  Preemption is possible while the lock is held.
+.Pp
+.Ft OSSpinLock
+is an integer type.  The convention is that unlocked is zero, and locked is nonzero.
+Locks must be naturally aligned and cannot be in cache-inhibited memory.
+.Pp
+.Fn OSSpinLockLock
+will spin if the lock is already held, but employs various strategies to back
+off. Because it can spin, it will generally be less cpu and power efficient than
+other synchronization primitives.
+.Pp
+.Fn OSSpinLockTry
+immediately returns false if the lock was held, true if it took the lock.
+It does not spin.
+.Pp
+.Fn OSSpinLockUnlock
+unconditionally unlocks the lock by zeroing it.
+.Sh RETURN VALUES
+.Fn OSSpinLockTry
+returns true if it took the lock, false if the lock was already held.
+.Sh SEE ALSO
+.Xr atomic 3 ,
+.Xr atomic_deprecated 3
diff --git a/man/ucontext.3 b/man/ucontext.3
new file mode 100644 (file)
index 0000000..86210de
--- /dev/null
@@ -0,0 +1,107 @@
+.\" Copyright (c) 2002 Packet Design, LLC.
+.\" All rights reserved.
+.\"
+.\" Subject to the following obligations and disclaimer of warranty,
+.\" use and redistribution of this software, in source or object code
+.\" forms, with or without modifications are expressly permitted by
+.\" Packet Design; provided, however, that:
+.\"
+.\"    (i)  Any and all reproductions of the source or object code
+.\"         must include the copyright notice above and the following
+.\"         disclaimer of warranties; and
+.\"    (ii) No rights are granted, in any manner or form, to use
+.\"         Packet Design trademarks, including the mark "PACKET DESIGN"
+.\"         on advertising, endorsements, or otherwise except as such
+.\"         appears in the above copyright notice or in the software.
+.\"
+.\" THIS SOFTWARE IS BEING PROVIDED BY PACKET DESIGN "AS IS", AND
+.\" TO THE MAXIMUM EXTENT PERMITTED BY LAW, PACKET DESIGN MAKES NO
+.\" REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING
+.\" THIS SOFTWARE, INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED
+.\" WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
+.\" OR NON-INFRINGEMENT.  PACKET DESIGN DOES NOT WARRANT, GUARANTEE,
+.\" OR MAKE ANY REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS
+.\" OF THE USE OF THIS SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY,
+.\" RELIABILITY OR OTHERWISE.  IN NO EVENT SHALL PACKET DESIGN BE
+.\" LIABLE FOR ANY DAMAGES RESULTING FROM OR ARISING OUT OF ANY USE
+.\" OF THIS SOFTWARE, INCLUDING WITHOUT LIMITATION, ANY DIRECT,
+.\" INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE, OR CONSEQUENTIAL
+.\" DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, LOSS OF
+.\" USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY THEORY OF
+.\" LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+.\" THE USE OF THIS SOFTWARE, EVEN IF PACKET DESIGN IS ADVISED OF
+.\" THE POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" $FreeBSD: src/lib/libc/gen/ucontext.3,v 1.3 2004/07/03 22:30:08 ru Exp $
+.\"
+.Dd September 10, 2002
+.Dt UCONTEXT 3
+.Os
+.Sh NAME
+.Nm ucontext
+.Nd user thread context
+.Sh LIBRARY
+.Lb libc
+.Sh SYNOPSIS
+.In ucontext.h
+.Sh DESCRIPTION
+The
+.Vt ucontext_t
+type is a structure type suitable for holding the context for a user
+thread of execution.
+A thread's context includes its stack, saved registers, and list of
+blocked signals.
+.Pp
+The
+.Vt ucontext_t
+structure contains at least these fields:
+.Pp
+.Bl -tag -width ".Va mcontext_t\ \ uc_mcontext" -offset 3n -compact
+.It Va "ucontext_t *uc_link"
+context to assume when this one returns
+.It Va "sigset_t uc_sigmask"
+signals being blocked
+.It Va "stack_t uc_stack"
+stack area
+.It Va "mcontext_t uc_mcontext"
+saved registers
+.El
+.Pp
+The
+.Va uc_link
+field points to the context to resume when this context's entry point
+function returns.
+If
+.Va uc_link
+is equal to
+.Dv NULL ,
+then the process exits when this context returns.
+.Pp
+The
+.Va uc_mcontext
+field is machine-dependent and should be treated as opaque by
+portable applications.
+.Pp
+The following functions are defined to manipulate
+.Vt ucontext_t
+structures:
+.Pp
+.Bl -item -offset 3n -compact
+.It
+.Ft int
+.Fn getcontext "ucontext_t *" ;
+.It
+.Ft int
+.Fn setcontext "const ucontext_t *" ;
+.It
+.Ft void
+.Fn makecontext "ucontext_t *" "void \*[lp]*\*[rp]\*[lp]void\*[rp]" int ... ;
+.It
+.Ft int
+.Fn swapcontext "ucontext_t *" "const ucontext_t *" ;
+.El
+.Sh SEE ALSO
+.Xr sigaltstack 2 ,
+.Xr getcontext 3 ,
+.Xr makecontext 3
diff --git a/private/libkern/OSAtomic.h b/private/libkern/OSAtomic.h
new file mode 100644 (file)
index 0000000..7b3f296
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2004-2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef _OSATOMIC_PRIVATE_H_
+#define _OSATOMIC_PRIVATE_H_
+
+#include <sys/cdefs.h>
+
+#if __has_include(<libkern/OSAtomicDeprecated.h>) && \
+               __has_include(<libkern/OSSpinLockDeprecated.h>) && \
+               __has_include(<libkern/OSAtomicQueue.h>)
+
+
+#include <libkern/OSAtomicDeprecated.h>
+#include <libkern/OSSpinLockDeprecated.h>
+#include <libkern/OSAtomicQueue.h>
+
+#else
+
+#include_next <libkern/OSAtomic.h>
+
+#endif
+
+#endif // _OSATOMIC_PRIVATE_H_
diff --git a/private/libkern/module.modulemap b/private/libkern/module.modulemap
new file mode 100644 (file)
index 0000000..72b650c
--- /dev/null
@@ -0,0 +1,6 @@
+module libkernPrivate [system] [extern_c] {
+       module OSAtomic {
+               header "OSAtomic.h"
+               export Darwin.libkern.OSAtomic
+       }
+}
diff --git a/src/atomics/common/MKGetTimeBaseInfo.c b/src/atomics/common/MKGetTimeBaseInfo.c
new file mode 100644 (file)
index 0000000..2139364
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2007 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#if defined(i386) || defined(__arm__)
+
+#include <mach/kern_return.h>
+#include <mach/mach_time.h>
+#include <stdint.h>
+
+extern void spin_lock(int *);
+extern void spin_unlock(int *);
+
+/* deprecated function stub */
+kern_return_t
+MKGetTimeBaseInfo(
+    uint32_t *minAbsoluteTimeDelta,
+    uint32_t *theAbsoluteTimeToNanosecondNumerator,
+    uint32_t *theAbsoluteTimeToNanosecondDenominator,
+    uint32_t *theProcessorToAbsoluteTimeNumerator,
+    uint32_t *theProcessorToAbsoluteTimeDenominator
+) {
+    static struct mach_timebase_info mti = {0};
+    static int MKGetTimeBaseInfo_spin_lock = 0;
+
+    if(mti.numer == 0) {
+       kern_return_t err;
+       spin_lock(&MKGetTimeBaseInfo_spin_lock);
+       err = mach_timebase_info(&mti);
+       spin_unlock(&MKGetTimeBaseInfo_spin_lock);
+       if(err != KERN_SUCCESS)
+           return err;
+    }
+    if(theAbsoluteTimeToNanosecondNumerator)
+       *theAbsoluteTimeToNanosecondNumerator = mti.numer;
+    if(theAbsoluteTimeToNanosecondDenominator)
+       *theAbsoluteTimeToNanosecondDenominator = mti.denom;
+    if(minAbsoluteTimeDelta)
+       *minAbsoluteTimeDelta = 1;
+    if(theProcessorToAbsoluteTimeNumerator)
+       *theProcessorToAbsoluteTimeNumerator = 1;
+    if(theProcessorToAbsoluteTimeDenominator)
+       *theProcessorToAbsoluteTimeDenominator = 1;
+    return KERN_SUCCESS;
+}
+
+#endif
diff --git a/src/atomics/i386/OSAtomic.s b/src/atomics/i386/OSAtomic.s
new file mode 100644 (file)
index 0000000..5cef29f
--- /dev/null
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <architecture/i386/asm_help.h>
+#include "os/internal_asm.h"
+
+#define ATOMIC_RET_ORIG        0
+#define ATOMIC_RET_NEW 1
+
+// compare and exchange 32-bit
+// xchg32 <new> <dst>
+.macro xchg32
+       lock
+       cmpxchgl        $0, ($1)
+.endm
+
+// compare and exchange 64-bit
+// xchg64 <dst>
+.macro xchg64
+       lock
+       cmpxchg8b       ($0)
+.endm
+
+
+// int32_t OSAtomicAdd32(int32_t theAmount, volatile int32_t *theValue);
+#define ATOMIC_ARITHMETIC(instr, orig) \
+       movl    8(%esp), %ecx   /* load 2nd arg ptr into ecx */ ;\
+       movl    (%ecx), %eax    /* load contents of ecx into eax */ ;\
+1:     movl    4(%esp), %edx   /* load 1st arg into edx */ ;\
+       instr   %eax, %edx      /* do the operation */ ;\
+       xchg32  %edx, %ecx      /* old in %eax, new in %edx, exchange into %ecx */ ;\
+       jnz     1b              /* go back if we failed to exchange */ ;\
+       .if orig == ATOMIC_RET_NEW ;\
+       movl    %edx, %eax      /* return new value */ ;\
+       .endif
+
+// bool OSAtomicTestAndSet(uint32_t n, volatile void *theAddress);
+#define ATOMIC_BIT_OP(instr) \
+       movl    4(%esp), %eax   ;\
+       movl    8(%esp), %edx   ;\
+       shldl   $3,%edx,%ecx    /* save top 3 bits of address in %ecx */ ;\
+       shll    $3,%edx         ;\
+       xorl    $7,%eax         /* bit position is numbered big endian so convert to little endian */ ;\
+       addl    %eax,%edx       /* generate bit address */ ;\
+       adcl    $0,%ecx         /* handle carry out of lower half of address */ ;\
+       movl    %edx,%eax       /* copy lower half of bit address */ ;\
+       andl    $31,%eax        /* keep bit offset in range 0..31 */ ;\
+       xorl    %eax,%edx       /* 4-byte align address */ ;\
+       shrdl   $3,%ecx,%edx    /* restore 32-bit byte address in %edx */ ;\
+       lock                    ;\
+       instr   %eax, (%edx)    ;\
+       setc    %al             ;\
+       movzbl  %al,%eax        // widen in case caller assumes we return an int
+
+// int64_t OSAtomicAdd64(int64_t theAmount, volatile int64_t *theValue);
+#define ATOMIC_ADD64() \
+       pushl   %ebx            ;\
+       pushl   %esi            ;\
+       movl    20(%esp), %esi  ;\
+       movl    0(%esi), %eax   ;\
+       movl    4(%esi), %edx   ;\
+1:     movl    12(%esp), %ebx  ;\
+       movl    16(%esp), %ecx  ;\
+       addl    %eax, %ebx      ;\
+       adcl    %edx, %ecx      ;\
+       xchg64  %esi    ;\
+       jnz     1b              ;\
+       movl    %ebx, %eax      ;\
+       movl    %ecx, %edx      ;\
+       popl    %esi            ;\
+       popl    %ebx
+
+// int64_t OSAtomicIncrement64(volatile int64_t *theValue);
+#define ATOMIC_INC64() \
+       pushl   %ebx            ;\
+       pushl   %esi            ;\
+       movl    12(%esp), %esi  ;\
+       movl    0(%esi), %eax   ;\
+       movl    4(%esi), %edx   ;\
+1:     movl    $1, %ebx        ;\
+       xorl    %ecx, %ecx      ;\
+       addl    %eax, %ebx      ;\
+       adcl    %edx, %ecx      ;\
+       xchg64  %esi    ;\
+       jnz     1b              ;\
+       movl    %ebx, %eax      ;\
+       movl    %ecx, %edx      ;\
+       popl    %esi            ;\
+       popl    %ebx
+
+// int64_t OSAtomicDecrement64(volatile int64_t *theValue);
+#define ATOMIC_DEC64() \
+       pushl   %ebx            ;\
+       pushl   %esi            ;\
+       movl    12(%esp), %esi  ;\
+       movl    0(%esi), %eax   ;\
+       movl    4(%esi), %edx   ;\
+1:     movl    $-1, %ebx       ;\
+       movl    $-1, %ecx       ;\
+       addl    %eax, %ebx      ;\
+       adcl    %edx, %ecx      ;\
+       xchg64  %esi    ;\
+       jnz     1b              ;\
+       movl    %ebx, %eax      ;\
+       movl    %ecx, %edx      ;\
+       popl    %esi            ;\
+       popl    %ebx
+
+       .text
+
+OS_ATOMIC_FUNCTION_START(OSAtomicAnd32, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicAnd32Barrier, 2)
+       ATOMIC_ARITHMETIC(andl, ATOMIC_RET_NEW)
+       ret
+
+OS_ATOMIC_FUNCTION_START(OSAtomicOr32, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicOr32Barrier, 2)
+       ATOMIC_ARITHMETIC(orl, ATOMIC_RET_NEW)
+       ret
+
+OS_ATOMIC_FUNCTION_START(OSAtomicXor32, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicXor32Barrier, 2)
+       ATOMIC_ARITHMETIC(xorl, ATOMIC_RET_NEW)
+       ret
+
+OS_ATOMIC_FUNCTION_START(OSAtomicAnd32Orig, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicAnd32OrigBarrier, 2)
+       ATOMIC_ARITHMETIC(andl, ATOMIC_RET_ORIG)
+       ret
+
+OS_ATOMIC_FUNCTION_START(OSAtomicOr32Orig, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicOr32OrigBarrier, 2)
+       ATOMIC_ARITHMETIC(orl, ATOMIC_RET_ORIG)
+       ret
+
+OS_ATOMIC_FUNCTION_START(OSAtomicXor32Orig, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicXor32OrigBarrier, 2)
+       ATOMIC_ARITHMETIC(xorl, ATOMIC_RET_ORIG)
+       ret
+
+// bool OSAtomicCompareAndSwapInt(int oldValue, int newValue, volatile int *theValue);
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapPtr, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapPtrBarrier, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapInt, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapIntBarrier, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapLong, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapLongBarrier, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap32, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap32Barrier, 2)
+       movl    4(%esp), %eax
+       movl    8(%esp), %edx
+       movl    12(%esp), %ecx
+       xchg32  %edx, %ecx
+       sete    %al
+       movzbl  %al,%eax        // widen in case caller assumes we return an int
+       ret
+
+// bool OSAtomicCompareAndSwap64(int64_t oldValue, int64_t newValue, volatile int64_t *theValue);
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap64, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap64Barrier, 2)
+       pushl   %ebx            // push out spare stuff for space
+       pushl   %esi
+       movl    12(%esp), %eax  // load in 1st 64-bit parameter
+       movl    16(%esp), %edx
+       movl    20(%esp), %ebx  // load in 2nd 64-bit parameter
+       movl    24(%esp), %ecx
+       movl    28(%esp), %esi  // laod in destination address
+       xchg64  %esi    // compare and swap 64-bit
+       sete    %al
+       movzbl  %al,%eax        // widen in case caller assumes we return an int
+       popl    %esi
+       popl    %ebx
+       ret
+
+OS_ATOMIC_FUNCTION_START(OSAtomicAdd32, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicAdd32Barrier, 2)
+       movl    4(%esp), %eax
+       movl    8(%esp), %edx
+       movl    %eax, %ecx
+       lock
+       xaddl   %eax, (%edx)
+       addl    %ecx, %eax
+       ret
+
+OS_VARIANT_FUNCTION_START(OSAtomicIncrement32, up, 2)
+OS_VARIANT_FUNCTION_START(OSAtomicIncrement32Barrier, up, 2)
+       movl    4(%esp), %ecx
+       movl    $1, %eax
+       xaddl   %eax, (%ecx)
+       incl    %eax
+       ret
+
+OS_ATOMIC_FUNCTION_START(OSAtomicIncrement32, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicIncrement32Barrier, 2)
+       movl    4(%esp), %ecx
+       movl    $1, %eax
+       lock
+       xaddl   %eax, (%ecx)
+       incl    %eax
+       ret
+
+OS_ATOMIC_FUNCTION_START(OSAtomicDecrement32, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicDecrement32Barrier, 2)
+       movl    4(%esp), %ecx
+       movl    $-1, %eax
+       lock
+       xaddl   %eax, (%ecx)
+       decl    %eax
+       ret
+
+OS_ATOMIC_FUNCTION_START(OSAtomicAdd64, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicAdd64Barrier, 2)
+       ATOMIC_ADD64()
+       ret
+
+OS_ATOMIC_FUNCTION_START(OSAtomicIncrement64, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicIncrement64Barrier, 2)
+       ATOMIC_INC64()
+       ret
+
+OS_ATOMIC_FUNCTION_START(OSAtomicDecrement64, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicDecrement64Barrier, 2)
+       ATOMIC_DEC64()
+       ret
+
+OS_ATOMIC_FUNCTION_START(OSAtomicTestAndSet, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicTestAndSetBarrier, 2)
+       ATOMIC_BIT_OP(btsl)
+       ret
+
+OS_ATOMIC_FUNCTION_START(OSAtomicTestAndClear, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicTestAndClearBarrier, 2)
+       ATOMIC_BIT_OP(btrl)
+       ret
+       
+// OSMemoryBarrier()
+// These are used both in 32 and 64-bit mode.  We use a fence even on UP
+// machines, so this function can be used with nontemporal stores.
+
+OS_ATOMIC_FUNCTION_START(OSMemoryBarrier, 4)
+       mfence
+       ret
+
+       /*
+        *      typedef volatile struct {
+        *              void    *opaque1;  <-- ptr to 1st queue element or null
+        *              long     opaque2;  <-- generation count
+        *      } OSQueueHead;
+        *
+        * void  OSAtomicEnqueue( OSQueueHead *list, void *new, size_t offset);
+        */
+OS_ATOMIC_FUNCTION_START(OSAtomicEnqueue, 2)
+       pushl   %edi
+       pushl   %esi
+       pushl   %ebx
+       movl    16(%esp),%edi   // %edi == ptr to list head
+       movl    20(%esp),%ebx   // %ebx == new
+       movl    24(%esp),%esi   // %esi == offset
+       movl    (%edi),%eax     // %eax == ptr to 1st element in Q
+       movl    4(%edi),%edx    // %edx == current generation count
+1:     movl    %eax,(%ebx,%esi)// link to old list head from new element
+       movl    %edx,%ecx
+       incl    %ecx            // increment generation count
+       xchg64  %edi    // ...push on new element
+       jnz     1b
+       popl    %ebx
+       popl    %esi
+       popl    %edi
+       ret
+
+/* void* OSAtomicDequeue( OSQueueHead *list, size_t offset); */
+OS_ATOMIC_FUNCTION_START(OSAtomicDequeue, 2)
+       pushl   %edi
+       pushl   %esi
+       pushl   %ebx
+       movl    16(%esp),%edi   // %edi == ptr to list head
+       movl    20(%esp),%esi   // %esi == offset
+       movl    (%edi),%eax     // %eax == ptr to 1st element in Q
+       movl    4(%edi),%edx    // %edx == current generation count
+1:     testl   %eax,%eax       // list empty?
+       jz      2f              // yes
+       movl    (%eax,%esi),%ebx // point to 2nd in Q
+       movl    %edx,%ecx
+       incl    %ecx            // increment generation count
+       xchg64  %edi    // ...pop off 1st element
+       jnz     1b
+2:     popl    %ebx
+       popl    %esi
+       popl    %edi
+       ret                     // ptr to 1st element in Q still in %eax
+
+/*
+ *     typedef volatile struct {
+ *             void    *opaque1;  <-- ptr to first queue element or null
+ *             void    *opaque2;  <-- ptr to last queue element or null
+ *             int      opaque3;  <-- spinlock
+ *     } OSFifoQueueHead;
+ *
+ * void  OSAtomicFifoEnqueue( OSFifoQueueHead *list, void *new, size_t offset);
+ */
+OS_ATOMIC_FUNCTION_START(OSAtomicFifoEnqueue, 2)
+       pushl   %edi
+       pushl   %esi
+       pushl   %ebx
+       xorl    %ebx,%ebx       // clear "preemption pending" flag
+       movl    16(%esp),%edi   // %edi == ptr to list head
+       movl    20(%esp),%esi   // %esi == new
+       EXTERN_TO_REG(_commpage_pfz_base,%ecx)
+       movl    (%ecx), %ecx
+       addl    $(_COMM_TEXT_PFZ_ENQUEUE_OFFSET), %ecx
+       movl    24(%esp),%edx   // %edx == offset
+       call    *%ecx
+       testl   %ebx,%ebx       // pending preemption?
+       jz      1f
+       call    _preempt
+1:     popl    %ebx
+       popl    %esi
+       popl    %edi
+       ret
+       
+/* void* OSAtomicFifoDequeue( OSFifoQueueHead *list, size_t offset); */
+OS_ATOMIC_FUNCTION_START(OSAtomicFifoDequeue, 2)
+       pushl   %edi
+       pushl   %esi
+       pushl   %ebx
+       xorl    %ebx,%ebx       // clear "preemption pending" flag
+       movl    16(%esp),%edi   // %edi == ptr to list head
+       PICIFY(_commpage_pfz_base)
+       movl    (%edx),%ecx
+       movl    20(%esp),%edx   // %edx == offset
+       addl    $(_COMM_TEXT_PFZ_DEQUEUE_OFFSET), %ecx
+       call    *%ecx
+       testl   %ebx,%ebx       // pending preemption?
+       jz      1f
+       pushl   %eax            // save return value across sysenter
+       call    _preempt
+       popl    %eax
+1:     popl    %ebx
+       popl    %esi
+       popl    %edi
+       ret                     // ptr to 1st element in Q still in %eax
+
+// Local Variables:
+// tab-width: 8
+// End:
diff --git a/src/atomics/i386/pfz.s b/src/atomics/i386/pfz.s
new file mode 100644 (file)
index 0000000..ca6940b
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2006-2013 Apple, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <machine/cpu_capabilities.h>
+#include <architecture/i386/asm_help.h>
+
+       .text
+
+/* Subroutine to make a preempt syscall.  Called when we notice %ebx is
+ * nonzero after returning from a PFZ subroutine.
+ * When we enter kernel:
+ *     %edx = return address
+ *     %ecx = stack ptr
+ * Destroys %eax, %ecx, and %edx.
+ */
+       .align  4
+       .private_extern _preempt
+       .globl _preempt
+_preempt:
+       popl    %edx            // get return address
+       movl    %esp,%ecx       // save stack ptr here
+       movl    $(-58),%eax     /* 58 = pfz_exit */
+       xorl    %ebx,%ebx       // clear "preemption pending" flag
+       sysenter
+
+/* Subroutine to back off if we cannot get the spinlock.  Called
+ * after a few attempts inline in the PFZ subroutines.  This code is
+ * not in the PFZ.
+ *     %edi = ptr to queue head structure
+ *     %ebx = preemption flag (nonzero if preemption pending)
+ * Destroys %eax.
+ */
+
+       .align  4
+       .private_extern _backoff
+       .globl _backoff
+_backoff:
+       testl   %ebx,%ebx       // does kernel want to preempt us?
+       jz      1f              // no
+       xorl    %ebx,%ebx       // yes, clear flag
+       pushl   %edx            // preserve regs used by preempt syscall
+       pushl   %ecx
+       call    _preempt
+       popl    %ecx
+       popl    %edx
+1:
+       pause                   // SMT-friendly backoff
+       cmpl    $0,8(%edi)      // sniff the lockword
+       jnz     1b              // loop if still taken
+       ret                     // lockword is free, so reenter PFZ
diff --git a/src/atomics/init.c b/src/atomics/init.c
new file mode 100644 (file)
index 0000000..58e9ed5
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <limits.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <machine/cpu_capabilities.h>
+
+#include <_simple.h>
+
+#include <platform/string.h>
+#include <platform/compat.h>
+
+__attribute__ ((visibility ("hidden")))
+uintptr_t commpage_pfz_base=0;
+
+__attribute__ ((visibility ("hidden")))
+void
+__pfz_setup(const char *apple[])
+{
+    const char *p = _simple_getenv(apple, "pfz");
+    if (p != NULL) {
+        const char *q;
+
+        /* We are given hex starting with 0x */
+        if (p[0] != '0' || p[1] != 'x') {
+            goto __pfz_setup_clear;
+        }
+
+        for (q = p + 2; *q; q++) {
+            commpage_pfz_base <<= 4; // *= 16
+
+            if ('0' <= *q && *q <= '9') {
+                commpage_pfz_base += *q - '0';
+            } else if ('a' <= *q && *q <= 'f') {
+                commpage_pfz_base += *q - 'a' + 10;
+            } else if ('A' <= *q && *q <= 'F') {
+                commpage_pfz_base += *q - 'A' + 10;
+            } else {
+                commpage_pfz_base=0;
+                goto __pfz_setup_clear;
+            }
+        }
+
+__pfz_setup_clear:
+        bzero((void *)((uintptr_t)p - 4), strlen(p) + 4);
+    }
+
+    if (commpage_pfz_base == 0) {
+        commpage_pfz_base = _COMM_PAGE_TEXT_START;
+    }
+}
diff --git a/src/atomics/x86_64/OSAtomic.s b/src/atomics/x86_64/OSAtomic.s
new file mode 100644 (file)
index 0000000..543aacb
--- /dev/null
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <architecture/i386/asm_help.h>
+#include "os/internal_asm.h"
+
+.text
+
+#define ATOMIC_RET_ORIG        0
+#define ATOMIC_RET_NEW 1
+
+// compare and exchange 32-bit
+// xchg32 <new> <dst>
+.macro xchg32
+       lock
+       cmpxchgl        $0, ($1)
+.endm
+
+// xchg64 <new> <dst>
+.macro xchg64
+       lock
+       cmpxchg         $0, ($1)
+.endm
+
+#define        ATOMIC_ARITHMETIC(instr, orig)  \
+       movl    (%rsi), %eax    /* get 2nd arg -> eax */ ;\
+1:     movl    %eax, %edx      /* copy value to new reg */ ;\
+       instr   %edi, %edx      /* apply instr to %edx with arg2 */ ;\
+       xchg32  %edx, %rsi      /* do the compare swap (see macro above) */ ;\
+       jnz     1b              /* jump if failed */ ;\
+       .if orig == 1           /* to return the new value, overwrite eax */ ;\
+       movl    %edx, %eax      /* return the new value */ ;\
+       .endif
+
+// Used in OSAtomicTestAndSet( uint32_t n, void *value ), assumes ABI parameter loctions
+// Manpage says bit to test/set is (0x80 >> (n & 7)) of byte (addr + (n >> 3))
+#define        ATOMIC_BIT_OP(instr)    \
+       xorl    $7, %edi        /* bit position is numbered big endian so convert to little endian */ ;\
+       shlq    $3, %rsi        ;\
+       addq    %rdi, %rsi      /* generate bit address */ ;\
+       movq    %rsi, %rdi      ;\
+       andq    $31, %rdi       /* keep bit offset in range 0..31 */ ;\
+       xorq    %rdi, %rsi      /* 4-byte align address */ ;\
+       shrq    $3, %rsi        /* get 4-byte aligned address */ ;\
+       lock                    /* lock the bit test */ ;\
+       instr   %edi, (%rsi)    /* do the bit test, supplied into the macro */ ;\
+       setc    %al             ;\
+       movzbl  %al,%eax        /* widen in case caller assumes we return an int */
+
+// uint32_t OSAtomicAnd32( uint32_t mask, uint32_t *value);
+OS_ATOMIC_FUNCTION_START(OSAtomicAnd32, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicAnd32Barrier, 2)
+       ATOMIC_ARITHMETIC(andl, ATOMIC_RET_NEW)
+       ret
+
+// uint32_t OSAtomicOr32( uint32_t mask, uint32_t *value);
+OS_ATOMIC_FUNCTION_START(OSAtomicOr32, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicOr32Barrier, 2)
+       ATOMIC_ARITHMETIC(orl, ATOMIC_RET_NEW)
+       ret
+
+// uint32_t OSAtomicXor32( uint32_t mask, uint32_t *value);
+OS_ATOMIC_FUNCTION_START(OSAtomicXor32, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicXor32Barrier, 2)
+       ATOMIC_ARITHMETIC(xorl, ATOMIC_RET_NEW)
+       ret
+
+// uint32_t OSAtomicAnd32Orig( uint32_t mask, uint32_t *value);
+OS_ATOMIC_FUNCTION_START(OSAtomicAnd32Orig, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicAnd32OrigBarrier, 2)
+       ATOMIC_ARITHMETIC(andl, ATOMIC_RET_ORIG)
+       ret
+       
+// uint32_t OSAtomicOr32Orig( uint32_t mask, uint32_t *value);
+OS_ATOMIC_FUNCTION_START(OSAtomicOr32Orig, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicOr32OrigBarrier, 2)
+       ATOMIC_ARITHMETIC(orl, ATOMIC_RET_ORIG)
+       ret
+
+// uint32_t OSAtomicXor32Orig( uint32_t mask, uint32_t *value);
+OS_ATOMIC_FUNCTION_START(OSAtomicXor32Orig, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicXor32OrigBarrier, 2)
+       ATOMIC_ARITHMETIC(xorl, ATOMIC_RET_ORIG)
+       ret
+
+// bool OSAtomicCompareAndSwap32( int32_t old, int32_t new, int32_t *value);
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapInt, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapIntBarrier, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap32, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap32Barrier, 2)
+       movl    %edi, %eax
+       xchg32  %esi, %rdx
+       sete    %al
+       movzbl  %al,%eax        // widen in case caller assumes we return an int
+       ret
+
+// bool OSAtomicCompareAndSwap64( int64_t old, int64_t new, int64_t *value);
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapPtr, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapPtrBarrier, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapLong, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapLongBarrier, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap64, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap64Barrier, 2)
+       mov     %rdi, %rax
+       xchg64  %rsi, %rdx
+       sete    %al
+       movzbl  %al,%eax        // widen in case caller assumes we return an int
+       ret
+
+// int32_t OSAtomicAdd32( int32_t amt, int32_t *value );
+OS_ATOMIC_FUNCTION_START(OSAtomicAdd32, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicAdd32Barrier, 2)
+       movl    %edi, %eax      // save amt to add
+       lock                    // lock prefix breaks tabs ;)
+       xaddl   %edi, (%rsi)    // swap and add value, returns old value in %edi
+       addl    %edi, %eax      // add old value to amt as return value
+       ret
+
+// int32_t OSAtomicIncrement32(int32_t *value );
+OS_ATOMIC_FUNCTION_START(OSAtomicIncrement32, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicIncrement32Barrier, 2)
+       movl    $1, %eax        // load increment
+       lock                    // lock prefix breaks tabs ;)
+       xaddl   %eax, (%rdi)    // swap and add value, returns old value in %eax
+       incl    %eax    // increment old value as return value
+       ret
+
+// int32_t OSAtomicDecrement32(int32_t *value );
+OS_ATOMIC_FUNCTION_START(OSAtomicDecrement32, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicDecrement32Barrier, 2)
+       movl    $-1, %eax       // load decrement
+       lock                    // lock prefix breaks tabs ;)
+       xaddl   %eax, (%rdi)    // swap and add value, returns old value in %eax
+       decl    %eax    // decrement old value as return value
+       ret
+
+// int64_t OSAtomicAdd64( int64_t amt, int64_t *value );
+OS_ATOMIC_FUNCTION_START(OSAtomicAdd64, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicAdd64Barrier, 2)
+       movq    %rdi, %rax      // save amt to add
+       lock
+       xaddq   %rdi, (%rsi)    // swap and add value, returns old value in %rsi
+       addq    %rdi, %rax      // add old value to amt as return value
+       ret
+
+// int64_t OSAtomicIncrement64(int64_t *value );
+OS_ATOMIC_FUNCTION_START(OSAtomicIncrement64, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicIncrement64Barrier, 2)
+       movq    $1, %rax        // load increment
+       lock                    // lock prefix breaks tabs ;)
+       xaddq   %rax, (%rdi)    // swap and add value, returns old value in %eax
+       incq    %rax    // increment old value as return value
+       ret
+
+// int64_t OSAtomicDecrement64(int64_t *value );
+OS_ATOMIC_FUNCTION_START(OSAtomicDecrement64, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicDecrement64Barrier, 2)
+       movq    $-1, %rax       // load decrement
+       lock                    // lock prefix breaks tabs ;)
+       xaddq   %rax, (%rdi)    // swap and add value, returns old value in %eax
+       decq    %rax    // decrement old value as return value
+       ret
+
+// bool OSAtomicTestAndSet( uint32_t n, void *value );
+OS_ATOMIC_FUNCTION_START(OSAtomicTestAndSet, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicTestAndSetBarrier, 2)
+       ATOMIC_BIT_OP(btsl)
+       ret
+
+// bool OSAtomicTestAndClear( uint32_t n, void *value );
+OS_ATOMIC_FUNCTION_START(OSAtomicTestAndClear, 2)
+OS_ATOMIC_FUNCTION_START(OSAtomicTestAndClearBarrier, 2)
+       ATOMIC_BIT_OP(btrl)
+       ret
+
+// void OSMemoryBarrier( void );
+OS_ATOMIC_FUNCTION_START(OSMemoryBarrier, 2)
+       mfence
+       ret
+
+/*
+ *     typedef volatile struct {
+ *             void    *opaque1;  <-- ptr to 1st queue element or null
+ *             long     opaque2;  <-- generation count
+ *     } OSQueueHead;
+ *
+ * void  OSAtomicEnqueue( OSQueueHead *list, void *new, size_t offset);
+ */
+OS_ATOMIC_FUNCTION_START(OSAtomicEnqueue, 2)
+       pushq   %rbx            // %rdi == list head, %rsi == new, %rdx == offset
+       movq    %rsi,%rbx       // %rbx == new
+       movq    %rdx,%rsi       // %rsi == offset
+       movq    (%rdi),%rax     // %rax == ptr to 1st element in Q
+       movq    8(%rdi),%rdx    // %rdx == current generation count
+1:
+       movq    %rax,(%rbx,%rsi)// link to old list head from new element
+       movq    %rdx,%rcx
+       incq    %rcx            // increment generation count
+       lock                    // always lock for now...
+       cmpxchg16b (%rdi)       // ...push on new element
+       jnz     1b
+       popq    %rbx
+       ret
+
+
+       /* void* OSAtomicDequeue( OSQueueHead *list, size_t offset); */
+OS_ATOMIC_FUNCTION_START(OSAtomicDequeue, 2)
+       pushq   %rbx            // %rdi == list head, %rsi == offset
+       movq    (%rdi),%rax     // %rax == ptr to 1st element in Q
+       movq    8(%rdi),%rdx    // %rdx == current generation count
+1:
+       testq   %rax,%rax       // list empty?
+       jz      2f              // yes
+       movq    (%rax,%rsi),%rbx // point to 2nd in Q
+       movq    %rdx,%rcx
+       incq    %rcx            // increment generation count
+       lock                    // always lock for now...
+       cmpxchg16b (%rdi)       // ...pop off 1st element
+       jnz     1b
+2:
+       popq    %rbx
+       ret                     // ptr to 1st element in Q still in %rax
+
+/*
+ *     typedef volatile struct {
+ *             void    *opaque1;  <-- ptr to first queue element or null
+ *             void    *opaque2;  <-- ptr to last queue element or null
+ *             int      opaque3;  <-- spinlock
+ *     } OSFifoQueueHead;
+ *
+ * void  OSAtomicFifoEnqueue( OSFifoQueueHead *list, void *new, size_t offset);
+ */
+OS_ATOMIC_FUNCTION_START(OSAtomicFifoEnqueue, 2)
+       pushq   %rbx
+       xorl    %ebx,%ebx       // clear "preemption pending" flag
+       movq    _commpage_pfz_base(%rip),%rcx
+       addq    $(_COMM_TEXT_PFZ_ENQUEUE_OFFSET), %rcx
+       call    *%rcx
+       testl   %ebx,%ebx       // pending preemption?
+       jz      1f
+       call    _preempt        // call into the kernel to pfz_exit
+1:     
+       popq    %rbx
+       ret
+       
+       
+/* void* OSAtomicFifoDequeue( OSFifoQueueHead *list, size_t offset); */
+OS_ATOMIC_FUNCTION_START(OSAtomicFifoDequeue, 2)
+       pushq   %rbx
+       xorl    %ebx,%ebx       // clear "preemption pending" flag
+       movq    _commpage_pfz_base(%rip), %rcx
+       movq    %rsi,%rdx       // move offset to %rdx to be like the Enqueue case
+       addq    $(_COMM_TEXT_PFZ_DEQUEUE_OFFSET), %rcx
+       call    *%rcx
+       testl   %ebx,%ebx       // pending preemption?
+       jz      1f
+       call    _preempt        // call into the kernel to pfz_exit
+1:     
+       popq    %rbx
+       ret                     // ptr to 1st element in Q in %rax
+
+// Local Variables:
+// tab-width: 8
+// End:
diff --git a/src/atomics/x86_64/pfz.s b/src/atomics/x86_64/pfz.s
new file mode 100644 (file)
index 0000000..9647b19
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2006-2013 Apple, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <machine/cpu_capabilities.h>
+#include <architecture/i386/asm_help.h>
+#include <mach/i386/syscall_sw.h>
+
+       .text
+
+/* Subroutine to make a preempt syscall.  Called when we notice %ebx is
+ * nonzero after returning from a PFZ subroutine.  Not in PFZ.
+ *
+ * All registers preserved (but does clear the %ebx preemption flag).
+ */
+       .align 2
+       .private_extern _preempt
+       .globl _preempt
+_preempt:
+       pushq   %rax
+       pushq   %rcx
+       pushq   %r11
+       movl    $(SYSCALL_CONSTRUCT_MACH(58)),%eax      /* 58 = pfz_exit */
+       xorl    %ebx,%ebx
+       syscall
+       popq    %r11
+       popq    %rcx
+       popq    %rax
+       ret
+
+/* Subroutine to back off if we cannot get the spinlock.  Called
+ * after a few attempts inline in the PFZ subroutines.  This code is
+ * not in the PFZ.
+ *     %rdi = ptr to queue head structure
+ *     %ebx = preemption flag (nonzero if preemption pending)
+ * Uses: %rax.
+ */
+       .align 2
+       .private_extern _backoff
+       .globl _backoff
+_backoff:
+       testl   %ebx,%ebx       // does kernel want to preempt us?
+       jz      1f              // no
+       call    _preempt
+1:
+       pause                   // SMT-friendly backoff
+       cmpl    $0,16(%rdi)     // sniff the lockword
+       jnz     1b              // loop if still taken
+       ret                     // lockword is free, so reenter PFZ
diff --git a/src/cachecontrol/arm/cache.s b/src/cachecontrol/arm/cache.s
new file mode 100644 (file)
index 0000000..e4c8aab
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+    .text
+    .align 2
+
+#include <mach/arm/syscall_sw.h>
+
+/* void sys_icache_invalidate(addr_t start, int length) */
+.globl _sys_icache_invalidate
+_sys_icache_invalidate:
+       /* fast trap for icache_invalidate */
+       mov     r3, #0
+       mov     r12, #0x80000000
+       swi     #SWI_SYSCALL
+       bx      lr
+
+/* void sys_dcache_flush(addr_t start, int length) */
+.globl _sys_dcache_flush
+_sys_dcache_flush:
+       /* fast trap for dcache_flush */
+       mov     r3, #1
+       mov     r12, #0x80000000
+       swi     #SWI_SYSCALL
+       bx      lr
diff --git a/src/cachecontrol/arm64/cache.s b/src/cachecontrol/arm64/cache.s
new file mode 100644 (file)
index 0000000..d59afdd
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2011 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <mach/arm/syscall_sw.h>
+
+/* void sys_icache_invalidate(addr_t start, int length) */
+.globl _sys_icache_invalidate
+_sys_icache_invalidate:
+       /* fast trap for icache_invalidate */
+       mov     x3, #0
+       mov     x16, #0x80000000
+       svc     #SWI_SYSCALL
+       ret
+
+/* void sys_dcache_flush(addr_t start, int length) */
+.globl _sys_dcache_flush
+_sys_dcache_flush:
+       /* fast trap for dcache_flush */
+       mov     x3, #1
+       mov     x16, #0x80000000
+       svc     #SWI_SYSCALL
+       ret
+
diff --git a/src/cachecontrol/generic/cache.c b/src/cachecontrol/generic/cache.c
new file mode 100644 (file)
index 0000000..1fc465a
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2003-2006 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+/* cache control */
+
+#include <stddef.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <libkern/OSCacheControl.h>
+
+int
+sys_cache_control(int function, void *start, size_t len)
+{
+       int     status = 0;
+       
+       switch( function ) {
+       
+       case kCacheFunctionPrepareForExecution:
+               sys_icache_invalidate(start, len);
+               break;
+               
+       case kCacheFunctionFlushDcache:
+               sys_dcache_flush(start, len);
+               break;
+               
+       default:
+               status = ENOTSUP;
+       }
+       
+       return  status;
+}
diff --git a/src/cachecontrol/i386/cache.s b/src/cachecontrol/i386/cache.s
new file mode 100644 (file)
index 0000000..ca4b958
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2003-2006 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+ #include <machine/cpu_capabilities.h>
+
+
+    .text
+    .align 4, 0x00
+
+/* void sys_icache_invalidate(addr_t start, int length) */
+
+       .globl  _sys_icache_invalidate
+_sys_icache_invalidate:
+       // This is a NOP on intel processors, since the intent of the API
+       // is to make data executable, and Intel L1Is are coherent with L1D.
+       ret
+
+
+/* void sys_dcache_flush(addr_t start, int length)  */
+
+       .globl  _sys_dcache_flush
+_sys_dcache_flush:
+       movl    8(%esp),%ecx            // get length
+       movl    4(%esp),%edx            // get ptr
+       testl   %ecx,%ecx               // length 0?
+       jz      2f                      // yes
+       mfence                          // ensure previous stores make it to memory
+       clflush -1(%edx,%ecx)           // make sure last line is flushed
+1:
+       clflush (%edx)                  // flush a line
+       addl    $64,%edx
+       subl    $64,%ecx
+       ja      1b
+       mfence                          // make sure memory is updated before we return
+2:
+       ret
diff --git a/src/cachecontrol/x86_64/cache.s b/src/cachecontrol/x86_64/cache.s
new file mode 100644 (file)
index 0000000..f7a3648
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2003-2006 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+ #include <machine/cpu_capabilities.h>
+
+
+    .text
+    .align 4, 0x00
+
+/* void sys_icache_invalidate(addr_t start, int length) */
+
+       .globl  _sys_icache_invalidate
+_sys_icache_invalidate:
+       // This is a NOP on intel processors, since the intent of the API
+       // is to make data executable, and Intel L1Is are coherent with L1D.
+       ret
+
+
+/* void sys_dcache_flush(addr_t start, int length)  */
+
+       .globl  _sys_dcache_flush
+_sys_dcache_flush:
+       testq   %rsi,%rsi               // length 0?
+       jz      2f                      // yes
+       mfence                          // ensure previous stores make it to memory
+       clflush -1(%rdi,%rsi)           // make sure last line is flushed
+1:
+       clflush (%rdi)                  // flush a line
+       addq    $64,%rdi
+       subq    $64,%rsi
+       ja      1b
+       mfence                          // make sure memory is updated before we return
+2:
+       ret
diff --git a/src/force_libplatform_to_build.c b/src/force_libplatform_to_build.c
new file mode 100644 (file)
index 0000000..9b0b97c
--- /dev/null
@@ -0,0 +1,6 @@
+//  XCode will not build a library unless it contains at least one module.
+//  Absent this requirement, libm.dylib would be composed entirely by linking
+//  the component static libraries together, but to satisfy it, we must have
+//  a C file.
+
+typedef int theCStandardDoesNotAllowAnEmptyModule;
diff --git a/src/init.c b/src/init.c
new file mode 100644 (file)
index 0000000..7e8bc6a
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <stdlib.h>
+#include <TargetConditionals.h>
+
+struct ProgramVars; /* forward reference */
+
+extern void _simple_asl_init(const char *envp[], const struct ProgramVars *vars);
+extern void __pfz_setup(const char *apple[]);
+
+
+void __libplatform_init(void *future_use __unused, const char *envp[], const char *apple[], const struct ProgramVars *vars) {
+
+    /* In the Simulator, we just provide _simple for dyld */
+#if !TARGET_IPHONE_SIMULATOR
+    __pfz_setup(apple);
+#endif
+    _simple_asl_init(envp, vars);
+
+}
diff --git a/src/introspection/introspection.c b/src/introspection/introspection.c
new file mode 100644 (file)
index 0000000..c4d451b
--- /dev/null
@@ -0,0 +1,524 @@
+#include "os/internal.h"
+#include "platform/introspection_private.h"
+#include "introspection_internal.h"
+
+#include <mach/mach_error.h>
+#include <mach/mach_init.h>
+#include <mach/mach_port.h>
+#include <mach/mach_vm.h>
+#include <mach/task.h>
+#include <mach/thread_act.h>
+
+#include <sys/sysctl.h>
+
+#include "libkern/OSAtomic.h"
+
+// Returns the number of thread entries that can be stored in a task page.
+static unsigned int
+_platform_threads_per_page(void)
+{
+       // Subtract out the header storage.
+       return (int)(vm_page_size / sizeof(struct platform_thread_s)) - 1;
+}
+
+// Returns the page-aligned base address for a given pthread structure address.
+static mach_vm_size_t
+_platform_pthread_addr(mach_vm_address_t addr) {
+       return trunc_page(addr); // XXX approximation
+}
+
+// Returns the page-aligned size for a given pthread structure address.
+static mach_vm_size_t
+_platform_pthread_size(mach_vm_address_t addr) {
+       return vm_page_size; // XXX approximation
+}
+
+static kern_return_t
+_platform_thread_deallocate(platform_thread_t thread)
+{
+       kern_return_t ret;
+       if (MACH_PORT_VALID(thread->act)) {
+               mach_port_deallocate(mach_task_self(), thread->act);
+               thread->act = MACH_PORT_NULL;
+       }
+       
+       if (thread->pthread_addr != 0) {
+               ret = mach_vm_deallocate(mach_task_self(),
+                                                                _platform_pthread_addr(thread->pthread_addr),
+                                                                _platform_pthread_size(thread->pthread_addr));
+               thread->pthread_addr = 0;
+       }
+       return ret;
+}
+
+static kern_return_t
+_platform_task_deallocate(platform_task_t task)
+{
+       kern_return_t ret;
+
+       if (!task) {
+               return KERN_INVALID_TASK;
+       }
+
+       task_t port = task->metadata.port;
+       if (port != MACH_PORT_NULL) {
+               mach_port_deallocate(mach_task_self(), port);
+       }
+
+       platform_task_t ptr = task;
+       do {
+               mach_vm_address_t addr = (mach_vm_address_t)ptr;
+
+               // Deallocate threads.
+               int i, start = (ptr == task) ? 1 : 0; // Skip over meta data.
+               for (i = start; i < _platform_threads_per_page() - start; ++i) {
+                       _platform_thread_deallocate(&ptr->threads[i]);
+               }
+
+               ptr = ptr->header.next;
+               ret = mach_vm_deallocate(mach_task_self(), addr, vm_page_size);
+       } while (ptr);
+
+       return ret;
+}
+
+extern int __sysctl(int *, unsigned int, void *, size_t *, void *, size_t);
+
+static kern_return_t
+_platform_task_query_64_bit(platform_task_t task)
+{
+       task_flags_info_data_t task_flags_info;
+       mach_msg_type_number_t count = TASK_FLAGS_INFO_COUNT;
+
+       kern_return_t ret = task_info(task->metadata.port, TASK_FLAGS_INFO, (task_info_t) &task_flags_info, &count);
+       if (ret == KERN_SUCCESS) {
+               task->metadata.is_64_bit = (task_flags_info.flags & TF_LP64) ? true : false;
+       } else if (ret == KERN_INVALID_ARGUMENT) {
+               pid_t pid;
+               kern_return_t ret = pid_for_task(task->metadata.port, &pid);
+               if (ret != KERN_SUCCESS) return ret;
+
+               struct kinfo_proc info;
+               size_t size = sizeof(info);
+               int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid };
+               unsigned int len = sizeof(mib) / sizeof(*mib);
+               int res = __sysctl(mib, len, &info, &size, NULL, 0);
+               if (res == 0 && size >= sizeof(info)) {
+                       task->metadata.is_64_bit = (info.kp_proc.p_flag & P_LP64) != 0;
+               }
+       }
+
+       return ret;
+}
+
+kern_return_t
+platform_task_attach(platform_task_t *out_task, task_t port)
+{
+       kern_return_t ret;
+       
+       // Test some invariants.
+       _Static_assert(sizeof(struct platform_task_header_s) == 32, "");
+       _Static_assert(sizeof(struct platform_task_metadata_s) == 32, "");
+       _Static_assert(sizeof(struct platform_thread_s) == 32, "");
+       
+       // Allocate storage for the returned task handle.
+       mach_vm_address_t addr = 0;
+       mach_vm_size_t size = vm_page_size;
+       ret = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE);
+       if (ret != KERN_SUCCESS) return ret;
+       
+       platform_task_t result = (platform_task_t)addr;
+
+       // Keep a reference to the task port.
+       ret = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1);
+       if (ret != KERN_SUCCESS) {
+               _platform_task_deallocate(result);
+               return ret;
+       }
+
+       result->header.head = result;
+       result->metadata.port = port;
+
+       ret = _platform_task_query_64_bit(result);
+       if (ret != KERN_SUCCESS) {
+               _platform_task_deallocate(result);
+               return ret;
+       }
+
+       *out_task = result;
+
+       return ret;
+}
+
+kern_return_t
+platform_task_detach(platform_task_t task)
+{
+       kern_return_t ret;
+       ret = _platform_task_deallocate(task);
+       return ret;
+}
+
+bool
+platform_task_is_64_bit(platform_task_t task)
+{
+       return task->metadata.is_64_bit;
+}
+
+kern_return_t
+platform_task_suspend_threads(platform_task_t task)
+{
+       return KERN_NOT_SUPPORTED;
+}
+
+kern_return_t
+platform_task_resume_threads(platform_task_t task)
+{
+       return KERN_NOT_SUPPORTED;
+}
+
+kern_return_t
+platform_task_perform(platform_task_t task,
+                                         mach_vm_address_t func_addr,
+                                         mach_vm_address_t data_addr)
+{
+       return KERN_NOT_SUPPORTED;
+}
+
+static platform_task_t
+_platform_thread_get_task(platform_thread_t thread)
+{
+       platform_task_t task = (platform_task_t)trunc_page((uintptr_t)thread);
+       platform_task_t head = task->header.head;
+       if (head) {
+               task = head;
+       }
+       return task;
+}
+
+static kern_return_t
+_platform_thread_map(platform_task_t task,
+                                        platform_thread_t thread,
+                                        mach_vm_address_t thread_handle)
+{
+       kern_return_t ret;
+       vm_prot_t cur_protection, max_protection;
+       
+       vm_offset_t data = 0;
+       mach_vm_size_t wordsize = task->metadata.is_64_bit ? 8 : 4;
+       mach_msg_type_number_t size;
+       ret = mach_vm_read(_platform_thread_get_task(thread)->metadata.port,
+                                          thread_handle, // &TSD[0]
+                                          wordsize,
+                                          &data,
+                                          &size);
+       if (ret != KERN_SUCCESS) return ret;
+       
+       mach_vm_address_t pthread_addr = (uintptr_t)*(void **)data; // deref TSD[0]
+       mach_vm_deallocate(mach_task_self(), data, size);
+
+       mach_vm_address_t src_addr = _platform_pthread_addr(pthread_addr);
+       mach_vm_offset_t offset = pthread_addr - src_addr;
+       mach_vm_address_t dst_addr = 0;
+       ret = mach_vm_remap(mach_task_self(),
+                                               &dst_addr,
+                                               _platform_pthread_size(pthread_addr),
+                                               0,
+                                               VM_FLAGS_ANYWHERE,
+                                               _platform_thread_get_task(thread)->metadata.port,
+                                               src_addr,
+                                               0, // no copy
+                                               &cur_protection,
+                                               &max_protection,
+                                               VM_INHERIT_NONE);
+       if (ret == KERN_SUCCESS) {
+               thread->pthread_addr = dst_addr + offset;
+       }
+
+       return ret;
+}
+
+// Add a mach thread to the task's thread array. Updates the existing entry
+// with the same unique id if one exists, otherwise allocates a new entry.
+// Consumes the reference to the thread act mach port.
+static kern_return_t
+_platform_task_add_mach_thread(platform_task_t task, thread_act_t act)
+{
+       int i;
+       kern_return_t ret;
+
+       thread_identifier_info_data_t info;
+       mach_msg_type_number_t info_count = THREAD_IDENTIFIER_INFO_COUNT;
+       ret = thread_info(act,
+                                         THREAD_IDENTIFIER_INFO,
+                                         (thread_info_t)&info,
+                                         &info_count);
+       if (ret != KERN_SUCCESS) return ret;
+
+       // Anything older than the previous generation is a candidate for recycling.
+       uint32_t gen = task->metadata.gen - 1;
+
+       // Look for an existing slot with this unique ID or the first empty slot.
+       platform_thread_t empty = NULL;
+       platform_thread_t found = NULL;
+       platform_task_t last, ptr = task;
+       do {
+               int start = (ptr == task) ? 1 : 0; // Skip over meta data.
+               for (i = start; i < _platform_threads_per_page() - start; ++i) {
+                       platform_thread_t thread = &ptr->threads[i];
+                       if (!empty &&
+                               thread->refcnt == 0 &&
+                               (thread->unique_id == 0 || thread->gen < gen)) {
+                               empty = &ptr->threads[i];
+                       } else if (task->threads[i].unique_id == info.thread_id) {
+                               found = &ptr->threads[i];
+                               break;
+                       }
+               }
+               last = ptr;
+       } while (!found && (ptr = ptr->header.next));
+               
+       if (found) {
+               mach_port_deallocate(mach_task_self(), found->act);
+               found->act = act;
+               found->gen = task->metadata.gen;
+       } else {
+               if (!empty) {
+                       // Allocate new storage if necessary.
+                       mach_vm_address_t addr = 0;
+                       mach_vm_size_t size = vm_page_size;
+                       ret = mach_vm_allocate(mach_task_self(),
+                                                                  &addr,
+                                                                  size,
+                                                                  VM_FLAGS_ANYWHERE);
+                       if (ret != KERN_SUCCESS) return ret;
+                       
+                       last = last->header.next = (platform_task_t)addr;
+                       last->header.head = task;
+                       
+                       empty = &last->threads[0];
+               } else {
+                       _platform_thread_deallocate(empty);
+               }
+
+               empty->act = act; // transfer ownership
+               empty->gen = task->metadata.gen;
+               empty->unique_id = info.thread_id;
+               ret = _platform_thread_map(task, empty, info.thread_handle);
+       }
+
+       return ret;
+}
+
+kern_return_t
+platform_task_update_threads(platform_task_t task)
+{
+       kern_return_t ret;
+       thread_act_array_t array;
+       mach_msg_type_number_t array_count;
+       ret = task_threads(task->metadata.port, &array, &array_count);
+       if (ret != KERN_SUCCESS) return ret;
+
+       ++task->metadata.gen;
+       task->metadata.cursor = &task->threads[1]; // Reset iteration cursor.
+
+       unsigned int i;
+       for (i = 0; i < array_count; ++i) {
+               ret = _platform_task_add_mach_thread(task, array[i]);
+       }
+
+       mach_vm_size_t array_size = array_count * sizeof(*array);
+       mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)array, array_size);
+
+       return ret;
+}
+
+platform_thread_t
+platform_task_copy_next_thread(platform_task_t task)
+{
+       int i;
+       platform_thread_t result = NULL;
+       platform_thread_t cursor = task->metadata.cursor;
+
+       if (cursor == NULL) {
+               // End of iteration.
+               return NULL;
+       }
+
+       uint32_t gen = task->metadata.gen;
+       platform_task_t ptr = (platform_task_t)trunc_page((uintptr_t)cursor);
+
+       do {
+               if (cursor->gen == gen && cursor->unique_id != 0) {
+                       result = cursor;
+               }
+
+               ++cursor;
+
+               if ((uintptr_t)cursor >= ((uintptr_t)ptr + vm_page_size)) {
+                       ptr = ptr->header.next;
+                       if (ptr) {
+                               cursor = &ptr->threads[0];
+                       } else {
+                               cursor = NULL;
+                       }
+               }
+       } while (!result && cursor);
+
+       task->metadata.cursor = cursor;
+
+       if (result) {
+               OSAtomicIncrement32(&result->refcnt);
+       }
+
+       return result;
+}
+
+platform_thread_id_t
+platform_thread_get_unique_id(platform_thread_t thread)
+{
+       return thread->unique_id;
+}
+
+void
+platform_thread_release(platform_thread_t thread)
+{
+       int32_t refcnt = OSAtomicDecrement32(&thread->refcnt);
+       if (refcnt < 0) {
+               __LIBPLATFORM_CLIENT_CRASH__(refcnt, "Over-release of platform thread object");
+       }
+}
+
+kern_return_t
+platform_thread_abort_safely(platform_thread_t thread)
+{
+       kern_return_t ret;
+       ret = thread_abort_safely(thread->act);
+       return ret;
+}
+
+kern_return_t
+platform_thread_suspend(platform_thread_t thread)
+{
+       kern_return_t ret;
+       ret = thread_suspend(thread->act);
+       return ret;
+}
+
+kern_return_t
+platform_thread_resume(platform_thread_t thread)
+{
+       kern_return_t ret;
+       ret = thread_resume(thread->act);
+       return ret;
+}
+
+kern_return_t
+platform_thread_info(platform_thread_t thread,
+                                        thread_flavor_t flavor,
+                                        void *info,
+                                        size_t *size)
+{
+       kern_return_t ret;
+       mach_msg_type_number_t count = (int)*size / sizeof(natural_t);
+       ret = thread_info(thread->act, flavor, info, &count);
+       *size = count * sizeof(natural_t);
+       return ret;
+}
+
+kern_return_t
+platform_thread_get_state(platform_thread_t thread,
+                                                 thread_state_flavor_t flavor,
+                                                 void *state,
+                                                 size_t *size)
+{
+       kern_return_t ret;
+       mach_msg_type_number_t count = (int)*size / (int)sizeof(natural_t);
+       ret = thread_get_state(thread->act, flavor, state, &count);
+       *size = count * sizeof(natural_t);
+       return ret;
+}
+
+kern_return_t
+platform_thread_set_state(platform_thread_t thread,
+                                                 thread_state_flavor_t flavor,
+                                                 const void *state,
+                                                 size_t size)
+{
+       kern_return_t ret;
+       mach_msg_type_number_t count = (int)size / (int)sizeof(natural_t);
+       ret = thread_set_state(thread->act, flavor, (thread_state_t)state, count);
+       return ret;
+}
+
+kern_return_t
+platform_thread_perform(platform_thread_t thread,
+                                               mach_vm_address_t func_addr,
+                                               mach_vm_address_t data_addr)
+{
+       return KERN_NOT_SUPPORTED;
+}
+
+const void *
+platform_thread_get_pthread(platform_thread_t thread)
+{
+       return (const void *) thread->pthread_addr;
+}
+
+#ifdef MAIN
+
+// cc -DMAIN -I../../include/platform introspection.c
+
+#include <stdio.h>
+#include <unistd.h>
+
+int main(int argc, char *argv[]) {
+       kern_return_t ret;
+       
+       task_t port = MACH_PORT_NULL;
+       ret = task_for_pid(mach_task_self(), getppid(), &port);
+       if (ret != KERN_SUCCESS) {
+               mach_error("task_for_pid", ret);
+               return 1;
+       }
+       
+       platform_task_t task = NULL;
+       ret = platform_task_attach(&task, port);
+       if (ret != KERN_SUCCESS) {
+               mach_error("platform_task_attach", ret);
+               return 1;
+       }
+       
+       printf("Task is %s.\n", platform_task_is_64_bit(task) ? "64-bit" : "32-bit");
+       
+       int i;
+       for (i = 0; i < 3; ++i) {
+               ret = platform_task_update_threads(task);
+               if (ret != KERN_SUCCESS) {
+                       mach_error("platform_task_update_threads", ret);
+                       return 1;
+               }
+               
+               platform_thread_t thread;
+               while ((thread = platform_task_copy_next_thread(task))) {
+                       printf("thread = { .unique_id = 0x%llx, pthread_addr = 0x%llx }\n",
+                                  thread->unique_id,
+                                  thread->pthread_addr);
+                       printf("pthread = { .sig = %lx, .unique_id = 0x%llx }\n",
+                                  *(unsigned long *)thread->pthread_addr,
+                                  *(uint64_t *)((uintptr_t)thread->pthread_addr + 32));
+                       
+                       platform_thread_release(thread);
+               }
+
+               sleep(3);
+       }
+       
+       ret = platform_task_detach(task);
+       if (ret != KERN_SUCCESS) {
+               mach_error("platform_task_detach", ret);
+               return 1;
+       }
+       
+       return 0;
+}
+#endif
diff --git a/src/introspection/introspection_internal.h b/src/introspection/introspection_internal.h
new file mode 100644 (file)
index 0000000..c28fe73
--- /dev/null
@@ -0,0 +1,64 @@
+#ifndef __PLATFORM_INTROSPECTION_INTERNAL_H__
+#define __PLATFORM_INTROSPECTION_INTERNAL_H__
+
+
+// Task Allocations (with 4K vm_page_size)
+//
+// Page 1                              Page 2
+// +----------------+ <---- Head ----- +----------------+
+// | Header         | ----- Next ----> | Header         |  ----- Next ----> ...
+// +----------------+                  +----------------+
+// | Meta Data      |                  | Thread[126]    |
+// +----------------+                  +----------------+
+// | Thread[0]      |                  | Thread[127]    |
+// +----------------+                  +----------------+
+// ~ ...            ~                  ~ ...            ~
+// +----------------+                  +----------------+
+// | Thread[125]    |                  | Thread[252]    |
+// +----------------+                  +----------------+
+//
+
+// 32 bytes
+struct platform_task_header_s {
+       // Linkage to other platform task pages.
+       platform_task_t head;
+       platform_task_t next;
+#ifdef __LP64__
+       uint64_t _reserved[2];
+#else
+       uint32_t _reserved[6];
+#endif
+};
+
+// 32 bytes
+struct platform_task_metadata_s {
+       platform_thread_t cursor;
+       task_t port;
+       uint32_t gen;
+       uint32_t is_64_bit:1, unused_flags:31;
+#ifdef __LP64__
+       uint32_t _reserved[3];
+#else
+       uint32_t _reserved[4];
+#endif
+};
+
+// 32 bytes
+struct platform_thread_s {
+       int32_t refcnt;
+       uint32_t gen;
+       platform_thread_id_t unique_id;
+       mach_vm_address_t pthread_addr;
+       thread_act_t act;
+       uint32_t _reserved;
+};
+
+struct platform_task_s {
+       struct platform_task_header_s header;
+       union {
+               struct platform_task_metadata_s metadata;
+               struct platform_thread_s threads[127];
+       };
+};
+
+#endif // __PLATFORM_INTROSPECTION_INTERNAL_H__
diff --git a/src/os/alloc_once.c b/src/os/alloc_once.c
new file mode 100644 (file)
index 0000000..a8ff358
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2012-2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#include "os/internal.h"
+#include "os/alloc_once_impl.h"
+#include <mach/mach_init.h>
+#include <mach/mach_vm.h>
+#include <mach/vm_statistics.h>
+
+#pragma mark -
+#pragma mark os_alloc
+
+typedef struct _os_alloc_heap_metadata_s {
+       size_t allocated_bytes;
+       void *prev;
+} _os_alloc_heap_metadata_s;
+
+#define allocation_size (2 * vm_page_size)
+#define usable (allocation_size-sizeof(_os_alloc_heap_metadata_s))
+static void * volatile _os_alloc_heap;
+
+/*
+ * Simple allocator that doesn't have to worry about ever freeing allocations.
+ *
+ * The heapptr entry of _os_alloc_once_metadata always points to the newest
+ * available heap page, or NULL if this is the first allocation. The heap has a
+ * small header at the top of each heap block, recording the currently
+ * allocated bytes and the pointer to the previous heap block.
+ *
+ * Ignoring the special case where the heapptr is NULL; in which case we always
+ * make a block. The allocator first atomically increments the allocated_bytes
+ * counter by sz and calculates the eventual base pointer. If base+sz is
+ * greater than allocation_size then we begin allocating a new page. Otherwise,
+ * base is returned.
+ *
+ * Page allocation vm_allocates a new page of allocation_size and then attempts
+ * to atomically cmpxchg that pointer with the current headptr. If successful,
+ * it links the previous page to the new heap block for debugging purposes and
+ * then reattempts allocation. If a thread loses the allocation race, it
+ * vm_deallocates the still-clean region and reattempts the whole allocation.
+ */
+
+static inline void*
+_os_alloc_alloc(void *heap, size_t sz)
+{
+       if (likely(heap)) {
+               _os_alloc_heap_metadata_s *metadata = (_os_alloc_heap_metadata_s*)heap;
+               size_t used = os_atomic_add(&metadata->allocated_bytes, sz, relaxed);
+               if (likely(used <= usable)) {
+                       return ((char*)metadata + sizeof(_os_alloc_heap_metadata_s) +
+                                       used - sz);
+               }
+       }
+       /* This fall-through case is heap == NULL, or heap block is exhausted. */
+       return NULL;
+}
+
+OS_NOINLINE
+static void*
+_os_alloc_slow(void *heap, size_t sz)
+{
+       void *ptr;
+       do {
+               /*
+                * <rdar://problem/13208498> We allocate at PAGE_SIZE or above to ensure
+                * we don't land in the zero page *if* a binary has opted not to include
+                * the __PAGEZERO load command.
+                */
+               mach_vm_address_t heapblk = PAGE_SIZE;
+               kern_return_t kr;
+               kr = mach_vm_map(mach_task_self(), &heapblk, allocation_size,
+                               0, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_OS_ALLOC_ONCE),
+                               MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
+                               VM_INHERIT_DEFAULT);
+               if (unlikely(kr)) {
+                       __LIBPLATFORM_INTERNAL_CRASH__(kr, "Failed to allocate in os_alloc_once");
+               }
+               if (os_atomic_cmpxchg(&_os_alloc_heap, heap, (void*)heapblk, relaxed)) {
+                       ((_os_alloc_heap_metadata_s*)heapblk)->prev = heap;
+                       heap = (void*)heapblk;
+               } else {
+                       mach_vm_deallocate(mach_task_self(), heapblk, allocation_size);
+                       heap = _os_alloc_heap;
+               }
+               ptr = _os_alloc_alloc(heap, sz);
+       } while (unlikely(!ptr));
+       return ptr;
+}
+
+static inline void*
+_os_alloc2(size_t sz)
+{
+       void *heap, *ptr;
+       if (unlikely(!sz || sz > usable)) {
+               __LIBPLATFORM_CLIENT_CRASH__(sz, "Requested allocation size is invalid");
+       }
+       heap = _os_alloc_heap;
+       if (likely(ptr = _os_alloc_alloc(heap, sz))) {
+               return ptr;
+       }
+       return _os_alloc_slow(heap, sz);
+}
+
+#pragma mark -
+#pragma mark os_alloc_once
+
+typedef struct _os_alloc_once_ctxt_s {
+       struct _os_alloc_once_s *slot;
+       size_t sz;
+       os_function_t init;
+} _os_alloc_once_ctxt_s;
+
+static void
+_os_alloc(void *ctxt)
+{
+       _os_alloc_once_ctxt_s *c = ctxt;
+       c->slot->ptr = _os_alloc2((c->sz + 0xf) & ~0xfu);
+       if (c->init) {
+               c->init(c->slot->ptr);
+       }
+}
+
+void*
+_os_alloc_once(struct _os_alloc_once_s *slot, size_t sz, os_function_t init)
+{
+       _os_alloc_once_ctxt_s c = {
+               .slot = slot,
+               .sz = sz,
+               .init = init,
+       };
+       _os_once(&slot->once, &c, _os_alloc);
+       return slot->ptr;
+}
diff --git a/src/os/atomic.c b/src/os/atomic.c
new file mode 100644 (file)
index 0000000..6c846f6
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#include "os/internal.h"
+#include "libkern/OSAtomic.h"
+#include "resolver.h"
+
+#if TARGET_OS_EMBEDDED
+
+OS_ATOMIC_EXPORT
+int32_t OSAtomicAdd32Barrier(int32_t v, volatile int32_t *p);
+OS_ATOMIC_EXPORT
+int32_t OSAtomicIncrement32Barrier(volatile int32_t *p);
+OS_ATOMIC_EXPORT
+int32_t OSAtomicDecrement32Barrier(volatile int32_t *p);
+OS_ATOMIC_EXPORT
+int64_t OSAtomicAdd64Barrier(int64_t v, volatile int64_t *p);
+OS_ATOMIC_EXPORT
+int64_t OSAtomicIncrement64Barrier(volatile int64_t *p);
+OS_ATOMIC_EXPORT
+int64_t OSAtomicDecrement64Barrier(volatile int64_t *p);
+OS_ATOMIC_EXPORT
+int32_t OSAtomicAnd32Barrier(uint32_t v, volatile uint32_t *p);
+OS_ATOMIC_EXPORT
+int32_t OSAtomicAnd32OrigBarrier(uint32_t v, volatile uint32_t *p);
+OS_ATOMIC_EXPORT
+int32_t OSAtomicOr32Barrier(uint32_t v, volatile uint32_t *p);
+OS_ATOMIC_EXPORT
+int32_t OSAtomicOr32OrigBarrier(uint32_t v, volatile uint32_t *p);
+OS_ATOMIC_EXPORT
+int32_t OSAtomicXor32Barrier(uint32_t v, volatile uint32_t *p);
+OS_ATOMIC_EXPORT
+int32_t OSAtomicXor32OrigBarrier(uint32_t v, volatile uint32_t *p);
+OS_ATOMIC_EXPORT
+bool OSAtomicCompareAndSwap32Barrier(int32_t o, int32_t n, volatile int32_t *p);
+OS_ATOMIC_EXPORT
+bool OSAtomicCompareAndSwap64Barrier(int64_t o, int64_t n, volatile int64_t *p);
+OS_ATOMIC_EXPORT
+bool OSAtomicTestAndSetBarrier(uint32_t n, volatile void * p);
+OS_ATOMIC_EXPORT
+bool OSAtomicTestAndClearBarrier(uint32_t n, volatile void * p);
+OS_ATOMIC_EXPORT
+void OSAtomicEnqueue(OSQueueHead *list, void *new, size_t offset);
+OS_ATOMIC_EXPORT
+void* OSAtomicDequeue(OSQueueHead *list, size_t offset);
+OS_ATOMIC_EXPORT
+void OSMemoryBarrier(void);
+
+#if OS_ATOMIC_UP
+#define OS_ATOMIC_ALIAS_NO_BARRIER(n) OS_ATOMIC_EXPORT_ALIAS(n, n##Barrier)
+#else
+#define OS_ATOMIC_ALIAS_NO_BARRIER(n)
+#endif
+
+int32_t
+OSAtomicAdd32Barrier(int32_t v, volatile int32_t *p)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicAdd32);
+       int32_t r = os_atomic_add(p, v, acq_rel);
+       return r;
+}
+
+int32_t
+OSAtomicIncrement32Barrier(volatile int32_t *p)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicIncrement32);
+       int32_t r = os_atomic_add(p, 1, acq_rel);
+       return r;
+}
+
+int32_t
+OSAtomicDecrement32Barrier(volatile int32_t *p)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicDecrement32);
+       int32_t r = os_atomic_add(p, -1, acq_rel);
+       return r;
+}
+
+int64_t
+OSAtomicAdd64Barrier(int64_t v, volatile int64_t *p)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicAdd64);
+       int64_t r = os_atomic_add(p, v, acq_rel);
+       return r;
+}
+
+int64_t
+OSAtomicIncrement64Barrier(volatile int64_t *p)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicIncrement64);
+       int64_t r = os_atomic_add(p, 1, acq_rel);
+       return r;
+}
+
+int64_t
+OSAtomicDecrement64Barrier(volatile int64_t *p)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicDecrement64);
+       int64_t r = os_atomic_add(p, -1, acq_rel);
+       return r;
+}
+
+int32_t
+OSAtomicAnd32Barrier(uint32_t v, volatile uint32_t *p)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicAnd32);
+       uint32_t r = os_atomic_and(p, v, acq_rel);
+       return (int32_t)r;
+}
+
+int32_t
+OSAtomicAnd32OrigBarrier(uint32_t v, volatile uint32_t *p)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicAnd32Orig);
+       uint32_t r = os_atomic_and_orig(p, v, acq_rel);
+       return (int32_t)r;
+}
+
+int32_t
+OSAtomicOr32Barrier(uint32_t v, volatile uint32_t *p)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicOr32);
+       uint32_t r = os_atomic_or(p, v, acq_rel);
+       return (int32_t)r;
+}
+
+int32_t
+OSAtomicOr32OrigBarrier(uint32_t v, volatile uint32_t *p)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicOr32Orig);
+       uint32_t r = os_atomic_or_orig(p, v, acq_rel);
+       return (int32_t)r;
+}
+
+int32_t
+OSAtomicXor32Barrier(uint32_t v, volatile uint32_t *p)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicXor32);
+       uint32_t r = os_atomic_xor(p, v, acq_rel);
+       return (int32_t)r;
+}
+
+int32_t
+OSAtomicXor32OrigBarrier(uint32_t v, volatile uint32_t *p)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicXor32Orig);
+       uint32_t r = os_atomic_xor_orig(p, v, acq_rel);
+       return (int32_t)r;
+}
+
+bool
+OSAtomicCompareAndSwap32Barrier(int32_t o, int32_t n, volatile int32_t *p)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicCompareAndSwap32);
+       OS_ATOMIC_ALIAS(OSAtomicCompareAndSwapIntBarrier,
+                       OSAtomicCompareAndSwap32Barrier);
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicCompareAndSwapInt);
+#ifndef __LP64__
+       OS_ATOMIC_ALIAS(OSAtomicCompareAndSwapLongBarrier,
+                       OSAtomicCompareAndSwap32Barrier);
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicCompareAndSwapLong);
+       OS_ATOMIC_ALIAS(OSAtomicCompareAndSwapPtrBarrier,
+                       OSAtomicCompareAndSwap32Barrier);
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicCompareAndSwapPtr);
+#endif
+       return os_atomic_cmpxchg(p, o, n, acq_rel);
+}
+
+bool
+OSAtomicCompareAndSwap64Barrier(int64_t o, int64_t n, volatile int64_t *p)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicCompareAndSwap64);
+#ifdef __LP64__
+       OS_ATOMIC_ALIAS(OSAtomicCompareAndSwapLongBarrier,
+                       OSAtomicCompareAndSwap64Barrier);
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicCompareAndSwapLong);
+       OS_ATOMIC_ALIAS(OSAtomicCompareAndSwapPtrBarrier,
+                       OSAtomicCompareAndSwap64Barrier);
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicCompareAndSwapPtr);
+#endif
+       return os_atomic_cmpxchg(p, o, n, acq_rel);
+}
+
+static inline uint32_t*
+_OSAtomicTestPtrVal(uint32_t bit, volatile void *addr, uint32_t *vp)
+{
+       uintptr_t a = (uintptr_t)addr;
+       if (a & 3) {
+               // 32-bit align addr and adjust bit to compensate <rdar://12927920>
+               bit += (a & 3) * 8;
+               a &= ~3ull;
+       }
+       *vp = (0x80u >> (bit & 7)) << (bit & ~7u & 31);
+       return (uint32_t*)((char*)a + 4 * (bit / 32));
+}
+
+bool
+OSAtomicTestAndSetBarrier(uint32_t bit, volatile void *addr)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicTestAndSet);
+       uint32_t v;
+       volatile uint32_t *p = _OSAtomicTestPtrVal(bit, addr, &v);
+       uint32_t r = os_atomic_or_orig(p, v, acq_rel);
+       return (r & v);
+}
+
+bool
+OSAtomicTestAndClearBarrier(uint32_t bit, volatile void *addr)
+{
+       OS_ATOMIC_ALIAS_NO_BARRIER(OSAtomicTestAndClear);
+       uint32_t v;
+       volatile uint32_t *p = _OSAtomicTestPtrVal(bit, addr, &v);
+       uint32_t r = os_atomic_and_orig(p, ~v, acq_rel);
+       return (r & v);
+}
+
+#if !OS_ATOMIC_NO_BARRIER_ONLY
+
+typedef volatile struct {
+       void * volatile item;
+       long unused;
+} _OSQueueHead;
+
+void
+OSAtomicEnqueue(OSQueueHead *list, void *new, size_t offset)
+{
+       void * volatile *headptr = &(((_OSQueueHead*)list)->item);
+       void * volatile *nextptr = (void*)((char*)new + offset);
+       void *head = *headptr;
+       do {
+               *nextptr = head;
+       } while (!os_atomic_cmpxchgvw(headptr, head, new, &head, release));
+}
+
+void*
+OSAtomicDequeue(OSQueueHead *list, size_t offset)
+{
+       void * volatile *headptr = &(((_OSQueueHead*)list)->item);
+       void * volatile *nextptr;
+       void *head, *next;
+       (void)os_atomic_rmw_loop(headptr, head, next, acquire, {
+               if (!head) {
+                       os_atomic_rmw_loop_give_up(break);
+               }
+               nextptr = (void*)((char*)head + offset);
+               next = *nextptr;
+       });
+       return head;
+}
+
+void
+OSMemoryBarrier(void)
+{
+       os_atomic_thread_fence(seq_cst);
+}
+
+#endif // !OS_ATOMIC_NO_BARRIER_ONLY
+#endif // TARGET_OS_EMBEDDED
+
+struct _os_empty_files_are_not_c_files;
diff --git a/src/os/atomic_up.c b/src/os/atomic_up.c
new file mode 100644 (file)
index 0000000..9013643
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+// Force up variant to be generated to get the no-barrier OSAtomics
+#undef VARIANT_NO_RESOLVERS
+#define VARIANT_NO_RESOLVERS 0
+
+#define OS_ATOMIC_UP 1
+#include "os/internal.h"
+
+#ifdef OS_VARIANT_SELECTOR
+#define OS_VARIANT_ONLY 1
+#include "atomic.c"
+#endif
+
+struct _os_empty_files_are_not_c_files;
diff --git a/src/os/lock.c b/src/os/lock.c
new file mode 100644 (file)
index 0000000..8f3f7a9
--- /dev/null
@@ -0,0 +1,1124 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#include "lock_internal.h"
+#include "libkern/OSAtomic.h"
+#include "os/lock.h"
+#include "os/lock_private.h"
+#include "os/once_private.h"
+#include "resolver.h"
+
+#include <mach/mach_init.h>
+#include <mach/mach_traps.h>
+#include <mach/thread_switch.h>
+#include <os/tsd.h>
+
+#pragma mark -
+#pragma mark _os_lock_base_t
+
+#if !OS_VARIANT_ONLY
+
+OS_LOCK_STRUCT_DECL_INTERNAL(base);
+OS_USED static OS_LOCK_TYPE_STRUCT_DECL(base);
+
+void
+os_lock_lock(os_lock_t l)
+{
+       return l._osl_base->osl_type->osl_lock(l);
+}
+
+bool
+os_lock_trylock(os_lock_t l)
+{
+       return l._osl_base->osl_type->osl_trylock(l);
+}
+
+void
+os_lock_unlock(os_lock_t l)
+{
+       return l._osl_base->osl_type->osl_unlock(l);
+}
+
+#endif //!OS_VARIANT_ONLY
+
+OS_NOINLINE OS_NORETURN OS_COLD
+static void
+_os_lock_corruption_abort(void *lock_ptr OS_UNUSED, uintptr_t lock_value)
+{
+       __LIBPLATFORM_CLIENT_CRASH__(lock_value, "os_lock is corrupt");
+}
+
+#pragma mark -
+#pragma mark OSSpinLock
+
+#ifdef OS_LOCK_VARIANT_SELECTOR
+void _OSSpinLockLockSlow(volatile OSSpinLock *l);
+#else
+OS_NOINLINE OS_USED static void _OSSpinLockLockSlow(volatile OSSpinLock *l);
+#endif // OS_LOCK_VARIANT_SELECTOR
+
+OS_ATOMIC_EXPORT void OSSpinLockLock(volatile OSSpinLock *l);
+OS_ATOMIC_EXPORT bool OSSpinLockTry(volatile OSSpinLock *l);
+OS_ATOMIC_EXPORT int spin_lock_try(volatile OSSpinLock *l);
+OS_ATOMIC_EXPORT void OSSpinLockUnlock(volatile OSSpinLock *l);
+
+#if OS_ATOMIC_UP
+// Don't spin on UP
+#elif OS_ATOMIC_WFE
+#define OS_LOCK_SPIN_SPIN_TRIES 100
+#define OS_LOCK_SPIN_PAUSE() os_hardware_wfe()
+#else
+#define OS_LOCK_SPIN_SPIN_TRIES 1000
+#define OS_LOCK_SPIN_PAUSE() os_hardware_pause()
+#endif
+#define OS_LOCK_SPIN_YIELD_TRIES 100
+
+static const OSSpinLock _OSSpinLockLocked = TARGET_OS_EMBEDDED ? 1 : -1;
+
+OS_NOINLINE
+static void
+_OSSpinLockLockYield(volatile OSSpinLock *l)
+{
+       int option = SWITCH_OPTION_DEPRESS;
+       mach_msg_timeout_t timeout = 1;
+       uint32_t tries = OS_LOCK_SPIN_YIELD_TRIES;
+       OSSpinLock lock;
+       while (unlikely(lock = *l)) {
+_yield:
+               if (unlikely(lock != _OSSpinLockLocked)) {
+                       _os_lock_corruption_abort((void *)l, (uintptr_t)lock);
+               }
+               // Yield until tries first hits zero, then permanently switch to wait
+               if (unlikely(!tries--)) option = SWITCH_OPTION_WAIT;
+               thread_switch(MACH_PORT_NULL, option, timeout);
+       }
+       bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire);
+       if (likely(r)) return;
+       goto _yield;
+}
+
+#if OS_ATOMIC_UP
+void
+_OSSpinLockLockSlow(volatile OSSpinLock *l)
+{
+       return _OSSpinLockLockYield(l); // Don't spin on UP
+}
+#else
+void
+_OSSpinLockLockSlow(volatile OSSpinLock *l)
+{
+       uint32_t tries = OS_LOCK_SPIN_SPIN_TRIES;
+       OSSpinLock lock;
+       while (unlikely(lock = *l)) {
+_spin:
+               if (unlikely(lock != _OSSpinLockLocked)) {
+                       return _os_lock_corruption_abort((void *)l, (uintptr_t)lock);
+               }
+               if (unlikely(!tries--)) return _OSSpinLockLockYield(l);
+               OS_LOCK_SPIN_PAUSE();
+       }
+       bool r = os_atomic_cmpxchgv(l, 0, _OSSpinLockLocked, &lock, acquire);
+       if (likely(r)) return;
+       goto _spin;
+}
+#endif
+
+#ifdef OS_LOCK_VARIANT_SELECTOR
+#undef _OSSpinLockLockSlow
+extern void _OSSpinLockLockSlow(volatile OSSpinLock *l);
+#endif
+
+#if !OS_LOCK_VARIANT_ONLY
+
+#if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR
+
+typedef struct _os_nospin_lock_s *_os_nospin_lock_t;
+void _os_nospin_lock_lock(_os_nospin_lock_t lock);
+bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
+void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
+
+void
+OSSpinLockLock(volatile OSSpinLock *l)
+{
+       OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock);
+       OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock);
+       return _os_nospin_lock_lock((_os_nospin_lock_t)l);
+}
+
+bool
+OSSpinLockTry(volatile OSSpinLock *l)
+{
+       return _os_nospin_lock_trylock((_os_nospin_lock_t)l);
+}
+
+int
+spin_lock_try(volatile OSSpinLock *l)
+{
+       OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try);
+       return _os_nospin_lock_trylock((_os_nospin_lock_t)l);
+}
+
+void
+OSSpinLockUnlock(volatile OSSpinLock *l)
+{
+       OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock);
+       OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock);
+       return _os_nospin_lock_unlock((_os_nospin_lock_t)l);
+}
+
+#undef OS_ATOMIC_ALIAS
+#define OS_ATOMIC_ALIAS(n, o)
+static void _OSSpinLockLock(volatile OSSpinLock *l);
+#undef OSSpinLockLock
+#define OSSpinLockLock _OSSpinLockLock
+static bool _OSSpinLockTry(volatile OSSpinLock *l);
+#undef OSSpinLockTry
+#define OSSpinLockTry _OSSpinLockTry
+static __unused int __spin_lock_try(volatile OSSpinLock *l);
+#undef spin_lock_try
+#define spin_lock_try __spin_lock_try
+static void _OSSpinLockUnlock(volatile OSSpinLock *l);
+#undef OSSpinLockUnlock
+#define OSSpinLockUnlock _OSSpinLockUnlock
+
+#endif // OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK
+
+void
+OSSpinLockLock(volatile OSSpinLock *l)
+{
+       OS_ATOMIC_ALIAS(spin_lock, OSSpinLockLock);
+       OS_ATOMIC_ALIAS(_spin_lock, OSSpinLockLock);
+       bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire);
+       if (likely(r)) return;
+       return _OSSpinLockLockSlow(l);
+}
+
+bool
+OSSpinLockTry(volatile OSSpinLock *l)
+{
+       bool r = os_atomic_cmpxchg(l, 0, _OSSpinLockLocked, acquire);
+       return r;
+}
+
+int
+spin_lock_try(volatile OSSpinLock *l) // <rdar://problem/13316060>
+{
+       OS_ATOMIC_ALIAS(_spin_lock_try, spin_lock_try);
+       return OSSpinLockTry(l);
+}
+
+void
+OSSpinLockUnlock(volatile OSSpinLock *l)
+{
+       OS_ATOMIC_ALIAS(spin_unlock, OSSpinLockUnlock);
+       OS_ATOMIC_ALIAS(_spin_unlock, OSSpinLockUnlock);
+       os_atomic_store(l, 0, release);
+}
+
+#pragma mark -
+#pragma mark os_lock_spin_t
+
+OS_LOCK_STRUCT_DECL_INTERNAL(spin,
+       OSSpinLock volatile osl_spinlock;
+);
+#if !OS_VARIANT_ONLY
+OS_LOCK_METHODS_DECL(spin);
+OS_LOCK_TYPE_INSTANCE(spin);
+#endif // !OS_VARIANT_ONLY
+
+#ifdef OS_VARIANT_SELECTOR
+#define _os_lock_spin_lock \
+               OS_VARIANT(_os_lock_spin_lock, OS_VARIANT_SELECTOR)
+#define _os_lock_spin_trylock \
+               OS_VARIANT(_os_lock_spin_trylock, OS_VARIANT_SELECTOR)
+#define _os_lock_spin_unlock \
+               OS_VARIANT(_os_lock_spin_unlock, OS_VARIANT_SELECTOR)
+OS_LOCK_METHODS_DECL(spin);
+#endif // OS_VARIANT_SELECTOR
+
+void
+_os_lock_spin_lock(_os_lock_spin_t l)
+{
+       return OSSpinLockLock(&l->osl_spinlock);
+}
+
+bool
+_os_lock_spin_trylock(_os_lock_spin_t l)
+{
+       return OSSpinLockTry(&l->osl_spinlock);
+}
+
+void
+_os_lock_spin_unlock(_os_lock_spin_t l)
+{
+       return OSSpinLockUnlock(&l->osl_spinlock);
+}
+
+#pragma mark -
+#pragma mark os_lock_owner_t
+
+#ifndef __TSD_MACH_THREAD_SELF
+#define __TSD_MACH_THREAD_SELF 3
+#endif
+
+typedef mach_port_name_t os_lock_owner_t;
+
+OS_ALWAYS_INLINE
+static inline os_lock_owner_t
+_os_lock_owner_get_self(void)
+{
+       os_lock_owner_t self;
+       self = (os_lock_owner_t)_os_tsd_get_direct(__TSD_MACH_THREAD_SELF);
+       return self;
+}
+
+#define OS_LOCK_NO_OWNER MACH_PORT_NULL
+
+#if !OS_LOCK_VARIANT_ONLY
+
+OS_NOINLINE OS_NORETURN OS_COLD
+static void
+_os_lock_recursive_abort(os_lock_owner_t owner)
+{
+       __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
+                       "os_lock");
+}
+
+#endif //!OS_LOCK_VARIANT_ONLY
+
+#pragma mark -
+#pragma mark os_lock_handoff_t
+
+OS_LOCK_STRUCT_DECL_INTERNAL(handoff,
+       os_lock_owner_t volatile osl_owner;
+);
+#if !OS_VARIANT_ONLY
+OS_LOCK_METHODS_DECL(handoff);
+OS_LOCK_TYPE_INSTANCE(handoff);
+#endif // !OS_VARIANT_ONLY
+
+#ifdef OS_VARIANT_SELECTOR
+#define _os_lock_handoff_lock \
+               OS_VARIANT(_os_lock_handoff_lock, OS_VARIANT_SELECTOR)
+#define _os_lock_handoff_trylock \
+               OS_VARIANT(_os_lock_handoff_trylock, OS_VARIANT_SELECTOR)
+#define _os_lock_handoff_unlock \
+               OS_VARIANT(_os_lock_handoff_unlock, OS_VARIANT_SELECTOR)
+OS_LOCK_METHODS_DECL(handoff);
+#endif // OS_VARIANT_SELECTOR
+
+#define OS_LOCK_HANDOFF_YIELD_TRIES 100
+
+OS_NOINLINE
+static void
+_os_lock_handoff_lock_slow(_os_lock_handoff_t l)
+{
+       int option = SWITCH_OPTION_OSLOCK_DEPRESS;
+       mach_msg_timeout_t timeout = 1;
+       uint32_t tries = OS_LOCK_HANDOFF_YIELD_TRIES;
+       os_lock_owner_t self = _os_lock_owner_get_self(), owner;
+       while (unlikely(owner = l->osl_owner)) {
+_handoff:
+               if (unlikely(owner == self)) return _os_lock_recursive_abort(self);
+               // Yield until tries first hits zero, then permanently switch to wait
+               if (unlikely(!tries--)) option = SWITCH_OPTION_OSLOCK_WAIT;
+               thread_switch(owner, option, timeout);
+               // Redrive the handoff every 1ms until switching to wait
+               if (option == SWITCH_OPTION_OSLOCK_WAIT) timeout++;
+       }
+       bool r = os_atomic_cmpxchgv2o(l, osl_owner, MACH_PORT_NULL, self, &owner,
+                       acquire);
+       if (likely(r)) return;
+       goto _handoff;
+}
+
+void
+_os_lock_handoff_lock(_os_lock_handoff_t l)
+{
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire);
+       if (likely(r)) return;
+       return _os_lock_handoff_lock_slow(l);
+}
+
+bool
+_os_lock_handoff_trylock(_os_lock_handoff_t l)
+{
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       bool r = os_atomic_cmpxchg2o(l, osl_owner, MACH_PORT_NULL, self, acquire);
+       return r;
+}
+
+void
+_os_lock_handoff_unlock(_os_lock_handoff_t l)
+{
+       os_atomic_store2o(l, osl_owner, MACH_PORT_NULL, release);
+}
+
+#pragma mark -
+#pragma mark os_ulock_value_t
+
+#include <sys/errno.h>
+#include <sys/ulock.h>
+
+typedef os_lock_owner_t os_ulock_value_t;
+
+// This assumes that all thread mach port values always have the low bit set!
+// Clearing this bit is used to communicate the existence of waiters to unlock.
+#define OS_ULOCK_NOWAITERS_BIT ((os_ulock_value_t)1u)
+#define OS_ULOCK_OWNER(value) ((value) | OS_ULOCK_NOWAITERS_BIT)
+
+#define OS_ULOCK_ANONYMOUS_OWNER MACH_PORT_DEAD
+#define OS_ULOCK_IS_OWNER(value, self) ({ \
+               os_lock_owner_t _owner = OS_ULOCK_OWNER(value); \
+               (_owner == (self) && _owner != OS_ULOCK_ANONYMOUS_OWNER); })
+#define OS_ULOCK_IS_NOT_OWNER(value, self) ({ \
+               os_lock_owner_t _owner = OS_ULOCK_OWNER(value); \
+               (_owner != (self) && _owner != OS_ULOCK_ANONYMOUS_OWNER); })
+
+
+#pragma mark -
+#pragma mark os_unfair_lock
+
+typedef struct _os_unfair_lock_s {
+       os_ulock_value_t oul_value;
+} *_os_unfair_lock_t;
+
+_Static_assert(sizeof(struct os_unfair_lock_s) ==
+               sizeof(struct _os_unfair_lock_s), "os_unfair_lock size mismatch");
+
+OS_ATOMIC_EXPORT void os_unfair_lock_lock(os_unfair_lock_t lock);
+OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
+               os_unfair_lock_options_t options);
+OS_ATOMIC_EXPORT bool os_unfair_lock_trylock(os_unfair_lock_t lock);
+OS_ATOMIC_EXPORT void os_unfair_lock_unlock(os_unfair_lock_t lock);
+
+OS_ATOMIC_EXPORT void os_unfair_lock_lock_no_tsd_4libpthread(
+               os_unfair_lock_t lock);
+OS_ATOMIC_EXPORT void os_unfair_lock_unlock_no_tsd_4libpthread(
+               os_unfair_lock_t lock);
+
+_Static_assert(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION ==
+               ULF_WAIT_WORKQ_DATA_CONTENTION,
+               "check value for OS_UNFAIR_LOCK_OPTIONS_MASK");
+#define OS_UNFAIR_LOCK_OPTIONS_MASK \
+               (os_unfair_lock_options_t)(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION)
+
+OS_NOINLINE OS_NORETURN OS_COLD
+static void
+_os_unfair_lock_recursive_abort(os_lock_owner_t owner)
+{
+       __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
+                       "os_unfair_lock");
+}
+
+OS_NOINLINE OS_NORETURN OS_COLD
+static void
+_os_unfair_lock_unowned_abort(os_lock_owner_t owner)
+{
+       __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_unfair_lock not "
+                       "owned by current thread");
+}
+
+OS_NOINLINE OS_NORETURN OS_COLD
+static void
+_os_unfair_lock_corruption_abort(os_ulock_value_t current)
+{
+       __LIBPLATFORM_CLIENT_CRASH__(current, "os_unfair_lock is corrupt");
+}
+
+OS_NOINLINE
+static void
+_os_unfair_lock_lock_slow(_os_unfair_lock_t l, os_lock_owner_t self,
+               os_unfair_lock_options_t options)
+{
+       os_ulock_value_t current, new, waiters_mask = 0;
+       if (unlikely(options & ~OS_UNFAIR_LOCK_OPTIONS_MASK)) {
+               __LIBPLATFORM_CLIENT_CRASH__(options, "Invalid options");
+       }
+       while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
+                       OS_LOCK_NO_OWNER)) {
+_retry:
+               if (unlikely(OS_ULOCK_IS_OWNER(current, self))) {
+                       return _os_unfair_lock_recursive_abort(self);
+               }
+               new = current & ~OS_ULOCK_NOWAITERS_BIT;
+               if (current != new) {
+                       // Clear nowaiters bit in lock value before waiting
+                       if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
+                                       relaxed)){
+                               continue;
+                       }
+                       current = new;
+               }
+               int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO | options,
+                               l, current, 0);
+               if (unlikely(ret < 0)) {
+                       switch (-ret) {
+                       case EINTR:
+                       case EFAULT:
+                               continue;
+                       case EOWNERDEAD:
+                               _os_unfair_lock_corruption_abort(current);
+                               break;
+                       default:
+                               __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
+                       }
+               }
+               // If there are more waiters, unset nowaiters bit when acquiring lock
+               waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
+       }
+       new = self & ~waiters_mask;
+       bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
+                       &current, acquire);
+       if (unlikely(!r)) goto _retry;
+}
+
+OS_NOINLINE
+static void
+_os_unfair_lock_unlock_slow(_os_unfair_lock_t l, os_ulock_value_t current,
+               os_lock_owner_t self)
+{
+       if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self))) {
+               return _os_unfair_lock_unowned_abort(OS_ULOCK_OWNER(current));
+       }
+       if (current & OS_ULOCK_NOWAITERS_BIT) {
+               __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
+       }
+       for (;;) {
+               int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, l, 0);
+               if (unlikely(ret < 0)) {
+                       switch (-ret) {
+                       case EINTR:
+                               continue;
+                       case ENOENT:
+                               break;
+                       default:
+                               __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
+                       }
+               }
+               break;
+       }
+}
+
+void
+os_unfair_lock_lock(os_unfair_lock_t lock)
+{
+       _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
+       if (likely(r)) return;
+       return _os_unfair_lock_lock_slow(l, self, OS_UNFAIR_LOCK_NONE);
+}
+
+void
+os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
+               os_unfair_lock_options_t options)
+{
+       _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
+       if (likely(r)) return;
+       return _os_unfair_lock_lock_slow(l, self, options);
+}
+
+bool
+os_unfair_lock_trylock(os_unfair_lock_t lock)
+{
+       _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
+       return r;
+}
+
+void
+os_unfair_lock_unlock(os_unfair_lock_t lock)
+{
+       _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       os_ulock_value_t current;
+       current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
+       if (likely(current == self)) return;
+       return _os_unfair_lock_unlock_slow(l, current, self);
+}
+
+void
+os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock)
+{
+       _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
+       os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER;
+       bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
+       if (likely(r)) return;
+       return _os_unfair_lock_lock_slow(l, self,
+                       OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION);
+}
+
+void
+os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock)
+{
+       _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
+       os_lock_owner_t self = OS_ULOCK_ANONYMOUS_OWNER;
+       os_ulock_value_t current;
+       current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
+       if (likely(current == self)) return;
+       return _os_unfair_lock_unlock_slow(l, current, self);
+}
+
+#pragma mark -
+#pragma mark _os_lock_unfair_t 4Libc // <rdar://problem/27138264>
+
+OS_ATOMIC_EXPORT void os_unfair_lock_lock_with_options_4Libc(
+               os_unfair_lock_t lock, os_unfair_lock_options_t options);
+OS_ATOMIC_EXPORT void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock);
+
+OS_NOINLINE
+static void
+_os_unfair_lock_lock_slow_4Libc(_os_unfair_lock_t l, os_lock_owner_t self,
+               os_unfair_lock_options_t options)
+{
+       os_ulock_value_t current, new, waiters_mask = 0;
+       if (unlikely(options & ~OS_UNFAIR_LOCK_OPTIONS_MASK)) {
+               __LIBPLATFORM_CLIENT_CRASH__(options, "Invalid options");
+       }
+       while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
+                       OS_LOCK_NO_OWNER)) {
+_retry:
+               if (unlikely(OS_ULOCK_IS_OWNER(current, self))) {
+                       return _os_unfair_lock_recursive_abort(self);
+               }
+               new = current & ~OS_ULOCK_NOWAITERS_BIT;
+               if (current != new) {
+                       // Clear nowaiters bit in lock value before waiting
+                       if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
+                                       relaxed)){
+                               continue;
+                       }
+                       current = new;
+               }
+               int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO | options,
+                               l, current, 0);
+               if (unlikely(ret < 0)) {
+                       switch (-ret) {
+                       case EINTR:
+                       case EFAULT:
+                               continue;
+                       case EOWNERDEAD:
+                               // if we get an `EOWNERDEAD` it could be corruption of the lock
+                               // so for the Libc locks, if we can steal the lock, assume
+                               // it is corruption and pretend we got the lock with contention
+                               new = self & ~OS_ULOCK_NOWAITERS_BIT;
+                               if (os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
+                                                               acquire)) {
+                                       return;
+                               }
+                               break;
+                       default:
+                               __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
+                       }
+               }
+               // If there are more waiters, unset nowaiters bit when acquiring lock
+               waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
+       }
+       new = self & ~waiters_mask;
+       bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
+                       &current, acquire);
+       if (unlikely(!r)) goto _retry;
+}
+
+OS_NOINLINE
+static void
+_os_unfair_lock_unlock_slow_4Libc(_os_unfair_lock_t l)
+{
+       for (;;) {
+               int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, l, 0);
+               if (unlikely(ret < 0)) {
+                       switch (-ret) {
+                       case EINTR:
+                               continue;
+                       case ENOENT:
+                               break;
+                       default:
+                               __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
+                       }
+               }
+               break;
+       }
+}
+
+void
+os_unfair_lock_lock_with_options_4Libc(os_unfair_lock_t lock,
+               os_unfair_lock_options_t options)
+{
+       _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
+       if (likely(r)) return;
+       return _os_unfair_lock_lock_slow_4Libc(l, self, options);
+}
+
+void
+os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock)
+{
+       _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       os_ulock_value_t current;
+       current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
+       if (likely(current == self)) return;
+       return _os_unfair_lock_unlock_slow_4Libc(l);
+}
+
+#if !OS_VARIANT_ONLY
+void
+os_unfair_lock_assert_owner(os_unfair_lock_t lock)
+{
+       _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
+       if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self))) {
+               __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
+                               "Lock unexpectedly not owned by current thread");
+       }
+}
+
+void
+os_unfair_lock_assert_not_owner(os_unfair_lock_t lock)
+{
+       _os_unfair_lock_t l = (_os_unfair_lock_t)lock;
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       os_ulock_value_t current = os_atomic_load2o(l, oul_value, relaxed);
+       if (unlikely(OS_ULOCK_IS_OWNER(current, self))) {
+               __LIBPLATFORM_CLIENT_CRASH__(current, "Assertion failed: "
+                               "Lock unexpectedly owned by current thread");
+       }
+}
+#endif
+
+#pragma mark -
+#pragma mark _os_lock_unfair_t
+
+OS_LOCK_STRUCT_DECL_INTERNAL(unfair,
+       os_unfair_lock osl_unfair_lock;
+);
+#if !OS_VARIANT_ONLY
+OS_LOCK_METHODS_DECL(unfair);
+OS_LOCK_TYPE_INSTANCE(unfair);
+#endif // !OS_VARIANT_ONLY
+
+#ifdef OS_VARIANT_SELECTOR
+#define _os_lock_unfair_lock \
+               OS_VARIANT(_os_lock_unfair_lock, OS_VARIANT_SELECTOR)
+#define _os_lock_unfair_trylock \
+               OS_VARIANT(_os_lock_unfair_trylock, OS_VARIANT_SELECTOR)
+#define _os_lock_unfair_unlock \
+               OS_VARIANT(_os_lock_unfair_unlock, OS_VARIANT_SELECTOR)
+OS_LOCK_METHODS_DECL(unfair);
+#endif // OS_VARIANT_SELECTOR
+
+void
+_os_lock_unfair_lock(_os_lock_unfair_t l)
+{
+       return os_unfair_lock_lock(&l->osl_unfair_lock);
+}
+
+bool
+_os_lock_unfair_trylock(_os_lock_unfair_t l)
+{
+       return os_unfair_lock_trylock(&l->osl_unfair_lock);
+}
+
+void
+_os_lock_unfair_unlock(_os_lock_unfair_t l)
+{
+       return os_unfair_lock_unlock(&l->osl_unfair_lock);
+}
+
+#pragma mark -
+#pragma mark _os_nospin_lock
+
+typedef struct _os_nospin_lock_s {
+       os_ulock_value_t oul_value;
+} _os_nospin_lock, *_os_nospin_lock_t;
+
+_Static_assert(sizeof(OSSpinLock) ==
+               sizeof(struct _os_nospin_lock_s), "os_nospin_lock size mismatch");
+
+OS_ATOMIC_EXPORT void _os_nospin_lock_lock(_os_nospin_lock_t lock);
+OS_ATOMIC_EXPORT bool _os_nospin_lock_trylock(_os_nospin_lock_t lock);
+OS_ATOMIC_EXPORT void _os_nospin_lock_unlock(_os_nospin_lock_t lock);
+
+OS_NOINLINE
+static void
+_os_nospin_lock_lock_slow(_os_nospin_lock_t l)
+{
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       os_ulock_value_t current, new, waiters_mask = 0;
+       uint32_t timeout = 1;
+       while (unlikely((current = os_atomic_load2o(l, oul_value, relaxed)) !=
+                       OS_LOCK_NO_OWNER)) {
+_retry:
+               new = current & ~OS_ULOCK_NOWAITERS_BIT;
+               // For safer compatibility with OSSpinLock where _OSSpinLockLocked may
+               // be 1, check that new didn't become 0 (unlocked) by clearing this bit
+               if (current != new && new) {
+                       // Clear nowaiters bit in lock value before waiting
+                       if (!os_atomic_cmpxchgv2o(l, oul_value, current, new, &current,
+                                       relaxed)){
+                               continue;
+                       }
+                       current = new;
+               }
+               int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, current,
+                               timeout * 1000);
+               if (unlikely(ret < 0)) {
+                       switch (-ret) {
+                       case ETIMEDOUT:
+                               timeout++;
+                               continue;
+                       case EINTR:
+                       case EFAULT:
+                               continue;
+                       default:
+                               __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
+                       }
+               }
+               // If there are more waiters, unset nowaiters bit when acquiring lock
+               waiters_mask = (ret > 0) ? OS_ULOCK_NOWAITERS_BIT : 0;
+       }
+       new = self & ~waiters_mask;
+       bool r = os_atomic_cmpxchgv2o(l, oul_value, OS_LOCK_NO_OWNER, new,
+                       &current, acquire);
+       if (unlikely(!r)) goto _retry;
+}
+
+OS_NOINLINE
+static void
+_os_nospin_lock_unlock_slow(_os_nospin_lock_t l, os_ulock_value_t current)
+{
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       if (unlikely(OS_ULOCK_OWNER(current) != self)) {
+               return; // no unowned_abort for drop-in compatibility with OSSpinLock
+       }
+       if (current & OS_ULOCK_NOWAITERS_BIT) {
+               __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
+       }
+       for (;;) {
+               int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, l, 0);
+               if (unlikely(ret < 0)) {
+                       switch (-ret) {
+                       case EINTR:
+                               continue;
+                       case ENOENT:
+                               break;
+                       default:
+                               __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
+                       }
+               }
+               break;
+       }
+}
+
+void
+_os_nospin_lock_lock(_os_nospin_lock_t l)
+{
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
+       if (likely(r)) return;
+       return _os_nospin_lock_lock_slow(l);
+}
+
+bool
+_os_nospin_lock_trylock(_os_nospin_lock_t l)
+{
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       bool r = os_atomic_cmpxchg2o(l, oul_value, OS_LOCK_NO_OWNER, self, acquire);
+       return r;
+}
+
+void
+_os_nospin_lock_unlock(_os_nospin_lock_t l)
+{
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       os_ulock_value_t current;
+       current = os_atomic_xchg2o(l, oul_value, OS_LOCK_NO_OWNER, release);
+       if (likely(current == self)) return;
+       return _os_nospin_lock_unlock_slow(l, current);
+}
+
+#pragma mark -
+#pragma mark _os_lock_nospin_t
+
+OS_LOCK_STRUCT_DECL_INTERNAL(nospin,
+       _os_nospin_lock osl_nospin_lock;
+);
+#if !OS_VARIANT_ONLY
+OS_LOCK_METHODS_DECL(nospin);
+OS_LOCK_TYPE_INSTANCE(nospin);
+#endif // !OS_VARIANT_ONLY
+
+#ifdef OS_VARIANT_SELECTOR
+#define _os_lock_nospin_lock \
+               OS_VARIANT(_os_lock_nospin_lock, OS_VARIANT_SELECTOR)
+#define _os_lock_nospin_trylock \
+               OS_VARIANT(_os_lock_nospin_trylock, OS_VARIANT_SELECTOR)
+#define _os_lock_nospin_unlock \
+               OS_VARIANT(_os_lock_nospin_unlock, OS_VARIANT_SELECTOR)
+OS_LOCK_METHODS_DECL(nospin);
+#endif // OS_VARIANT_SELECTOR
+
+void
+_os_lock_nospin_lock(_os_lock_nospin_t l)
+{
+       return _os_nospin_lock_lock(&l->osl_nospin_lock);
+}
+
+bool
+_os_lock_nospin_trylock(_os_lock_nospin_t l)
+{
+       return _os_nospin_lock_trylock(&l->osl_nospin_lock);
+}
+
+void
+_os_lock_nospin_unlock(_os_lock_nospin_t l)
+{
+       return _os_nospin_lock_unlock(&l->osl_nospin_lock);
+}
+
+#pragma mark -
+#pragma mark os_once_t
+
+typedef struct os_once_gate_s {
+       union {
+               os_ulock_value_t ogo_lock;
+               os_once_t ogo_once;
+       };
+} os_once_gate_s, *os_once_gate_t;
+
+#define OS_ONCE_INIT ((os_once_t)0l)
+#define OS_ONCE_DONE (~(os_once_t)0l)
+
+OS_ATOMIC_EXPORT void _os_once(os_once_t *val, void *ctxt, os_function_t func);
+OS_ATOMIC_EXPORT void __os_once_reset(os_once_t *val);
+
+OS_NOINLINE OS_NORETURN OS_COLD
+static void
+_os_once_gate_recursive_abort(os_lock_owner_t owner)
+{
+       __LIBPLATFORM_CLIENT_CRASH__(owner, "Trying to recursively lock an "
+                       "os_once_t");
+}
+
+OS_NOINLINE OS_NORETURN OS_COLD
+static void
+_os_once_gate_unowned_abort(os_lock_owner_t owner)
+{
+       __LIBPLATFORM_CLIENT_CRASH__(owner, "Unlock of an os_once_t not "
+                       "owned by current thread");
+}
+
+OS_NOINLINE OS_NORETURN OS_COLD
+static void
+_os_once_gate_corruption_abort(os_ulock_value_t current)
+{
+       __LIBPLATFORM_CLIENT_CRASH__(current, "os_once_t is corrupt");
+}
+
+OS_NOINLINE
+static void
+_os_once_gate_wait_slow(os_ulock_value_t *gate, os_lock_owner_t self)
+{
+       os_ulock_value_t tid_old, tid_new;
+
+       for (;;) {
+               os_atomic_rmw_loop(gate, tid_old, tid_new, relaxed, {
+                       switch (tid_old) {
+                       case (os_ulock_value_t)OS_ONCE_INIT: // raced with __os_once_reset()
+                       case (os_ulock_value_t)OS_ONCE_DONE: // raced with _os_once()
+                               os_atomic_rmw_loop_give_up(return);
+                       }
+                       tid_new = tid_old & ~OS_ULOCK_NOWAITERS_BIT;
+                       if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break);
+               });
+               if (unlikely(OS_ULOCK_IS_OWNER(tid_old, self))) {
+                       return _os_once_gate_recursive_abort(self);
+               }
+               int ret = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO,
+                               gate, tid_new, 0);
+               if (unlikely(ret < 0)) {
+                       switch (-ret) {
+                       case EINTR:
+                       case EFAULT:
+                               continue;
+                       case EOWNERDEAD:
+                               _os_once_gate_corruption_abort(tid_old);
+                               break;
+                       default:
+                               __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wait failure");
+                       }
+               }
+       }
+}
+
+OS_NOINLINE
+static void
+_os_once_gate_broadcast_slow(os_ulock_value_t *gate, os_ulock_value_t current,
+               os_lock_owner_t self)
+{
+       if (unlikely(OS_ULOCK_IS_NOT_OWNER(current, self))) {
+               return _os_once_gate_unowned_abort(OS_ULOCK_OWNER(current));
+       }
+       if (current & OS_ULOCK_NOWAITERS_BIT) {
+               __LIBPLATFORM_INTERNAL_CRASH__(current, "unlock_slow with no waiters");
+       }
+       for (;;) {
+               int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO | ULF_WAKE_ALL,
+                               gate, 0);
+               if (unlikely(ret < 0)) {
+                       switch (-ret) {
+                       case EINTR:
+                               continue;
+                       case ENOENT:
+                               break;
+                       default:
+                               __LIBPLATFORM_INTERNAL_CRASH__(-ret, "ulock_wake failure");
+                       }
+               }
+               break;
+       }
+}
+
+OS_ALWAYS_INLINE
+static void
+_os_once_gate_set_value_and_broadcast(os_once_gate_t og, os_lock_owner_t self,
+               os_once_t value)
+{
+       // The next barrier must be long and strong.
+       //
+       // The scenario: SMP systems with weakly ordered memory models
+       // and aggressive out-of-order instruction execution.
+       //
+       // The problem:
+       //
+       // The os_once*() wrapper macro causes the callee's
+       // instruction stream to look like this (pseudo-RISC):
+       //
+       //      load r5, pred-addr
+       //      cmpi r5, -1
+       //      beq  1f
+       //      call os_once*()
+       //      1f:
+       //      load r6, data-addr
+       //
+       // May be re-ordered like so:
+       //
+       //      load r6, data-addr
+       //      load r5, pred-addr
+       //      cmpi r5, -1
+       //      beq  1f
+       //      call os_once*()
+       //      1f:
+       //
+       // Normally, a barrier on the read side is used to workaround
+       // the weakly ordered memory model. But barriers are expensive
+       // and we only need to synchronize once! After func(ctxt)
+       // completes, the predicate will be marked as "done" and the
+       // branch predictor will correctly skip the call to
+       // os_once*().
+       //
+       // A far faster alternative solution: Defeat the speculative
+       // read-ahead of peer CPUs.
+       //
+       // Modern architectures will throw away speculative results
+       // once a branch mis-prediction occurs. Therefore, if we can
+       // ensure that the predicate is not marked as being complete
+       // until long after the last store by func(ctxt), then we have
+       // defeated the read-ahead of peer CPUs.
+       //
+       // In other words, the last "store" by func(ctxt) must complete
+       // and then N cycles must elapse before ~0l is stored to *val.
+       // The value of N is whatever is sufficient to defeat the
+       // read-ahead mechanism of peer CPUs.
+       //
+       // On some CPUs, the most fully synchronizing instruction might
+       // need to be issued.
+       os_atomic_maximally_synchronizing_barrier();
+
+       // above assumed to contain release barrier
+       os_ulock_value_t current =
+                       (os_ulock_value_t)os_atomic_xchg(&og->ogo_once, value, relaxed);
+       if (likely(current == self)) return;
+       _os_once_gate_broadcast_slow(&og->ogo_lock, current, self);
+}
+
+// Atomically resets the once value to zero and then signals all
+// pending waiters to return from their _os_once_gate_wait_slow()
+void
+__os_once_reset(os_once_t *val)
+{
+       os_once_gate_t og = (os_once_gate_t)val;
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       _os_once_gate_set_value_and_broadcast(og, self, OS_ONCE_INIT);
+}
+
+void
+_os_once(os_once_t *val, void *ctxt, os_function_t func)
+{
+       os_once_gate_t og = (os_once_gate_t)val;
+       os_lock_owner_t self = _os_lock_owner_get_self();
+       os_once_t v = (os_once_t)self;
+
+       if (likely(os_atomic_cmpxchg(&og->ogo_once, OS_ONCE_INIT, v, relaxed))) {
+               func(ctxt);
+               _os_once_gate_set_value_and_broadcast(og, self, OS_ONCE_DONE);
+       } else {
+               _os_once_gate_wait_slow(&og->ogo_lock, self);
+       }
+}
+
+#if !OS_VARIANT_ONLY
+
+#pragma mark -
+#pragma mark os_lock_eliding_t
+
+#if !TARGET_OS_IPHONE
+
+#define _os_lock_eliding_t _os_lock_spin_t
+#define _os_lock_eliding_lock _os_lock_spin_lock
+#define _os_lock_eliding_trylock _os_lock_spin_trylock
+#define _os_lock_eliding_unlock _os_lock_spin_unlock
+OS_LOCK_METHODS_DECL(eliding);
+OS_LOCK_TYPE_INSTANCE(eliding);
+
+#pragma mark -
+#pragma mark os_lock_transactional_t
+
+OS_LOCK_STRUCT_DECL_INTERNAL(transactional,
+       uintptr_t volatile osl_lock;
+);
+
+#define _os_lock_transactional_t _os_lock_eliding_t
+#define _os_lock_transactional_lock _os_lock_eliding_lock
+#define _os_lock_transactional_trylock _os_lock_eliding_trylock
+#define _os_lock_transactional_unlock _os_lock_eliding_unlock
+OS_LOCK_METHODS_DECL(transactional);
+OS_LOCK_TYPE_INSTANCE(transactional);
+
+#endif // !TARGET_OS_IPHONE
+#endif // !OS_VARIANT_ONLY
+#endif // !OS_LOCK_VARIANT_ONLY
diff --git a/src/os/lock_internal.h b/src/os/lock_internal.h
new file mode 100644 (file)
index 0000000..af6a862
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __OS_LOCK_INTERNAL__
+#define __OS_LOCK_INTERNAL__
+
+#define OS_LOCK_S_INTERNAL(type) _os_lock_##type##_s
+#define OS_LOCK_T_INTERNAL(type) _os_lock_##type##_t
+#define OS_LOCK_STRUCT_INTERNAL(type) struct OS_LOCK_S_INTERNAL(type)
+#define OS_LOCK_T_MEMBER(type) \
+               OS_LOCK_STRUCT(type) *_osl_opaque_##type; \
+               OS_LOCK_STRUCT_INTERNAL(type) *_osl_##type
+
+#define OS_LOCK_STRUCT_DECL_INTERNAL(type, ...) \
+               typedef struct OS_LOCK_S_INTERNAL(type) { \
+                       OS_LOCK_TYPE_STRUCT(type) * const osl_type; \
+                       __VA_ARGS__ \
+               } OS_LOCK_S_INTERNAL(type); \
+               typedef OS_LOCK_STRUCT_INTERNAL(type) *OS_LOCK_T_INTERNAL(type)
+
+#define OS_LOCK_METHODS_DECL(type) \
+               void _os_lock_##type##_lock(OS_LOCK_T_INTERNAL(type)); \
+               bool _os_lock_##type##_trylock(OS_LOCK_T_INTERNAL(type)); \
+               void _os_lock_##type##_unlock(OS_LOCK_T_INTERNAL(type))
+
+#define OS_LOCK_TYPE_STRUCT_DECL(type) \
+               OS_LOCK_TYPE_STRUCT(type) { \
+                       const char *osl_kind; \
+                       void (*osl_lock)(os_lock_t); \
+                       bool (*osl_trylock)(os_lock_t); \
+                       void (*osl_unlock)(os_lock_t); \
+               } OS_LOCK_TYPE_REF(type)
+
+#define OS_LOCK_TYPE_INSTANCE(type) \
+               OS_LOCK_TYPE_STRUCT_DECL(type) = { \
+                       .osl_kind = #type, \
+                       .osl_lock = _os_lock_##type##_lock, \
+                       .osl_trylock = _os_lock_##type##_trylock, \
+                       .osl_unlock = _os_lock_##type##_unlock, \
+               }
+
+#include "os/internal.h"
+
+#endif // __OS_LOCK_INTERNAL__
diff --git a/src/os/lock_up.c b/src/os/lock_up.c
new file mode 100644 (file)
index 0000000..e03a22d
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#define OS_ATOMIC_UP 1
+#include "lock_internal.h"
+
+#ifdef OS_VARIANT_SELECTOR
+#define OS_VARIANT_ONLY 1
+#include "lock.c"
+#endif
+
+struct _os_empty_files_are_not_c_files;
diff --git a/src/os/lock_wfe.c b/src/os/lock_wfe.c
new file mode 100644 (file)
index 0000000..36dd190
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#include <TargetConditionals.h>
+
+#if TARGET_OS_EMBEDDED
+
+#define OS_ATOMIC_WFE 1
+#include "lock_internal.h"
+
+#ifdef OS_LOCK_VARIANT_SELECTOR
+#define OS_VARIANT_ONLY 1
+#define OS_LOCK_VARIANT_ONLY 1
+#include "lock.c"
+#endif
+
+#endif // TARGET_OS_EMBEDDED
+
+struct _os_empty_files_are_not_c_files;
diff --git a/src/os/resolver.c b/src/os/resolver.c
new file mode 100644 (file)
index 0000000..2fa6b0a
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#include "lock_internal.h"
+
+#ifdef OS_VARIANT_SELECTOR
+
+#if TARGET_OS_EMBEDDED
+OS_VARIANT_UPMP_RESOLVER(OSAtomicAdd32Barrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicIncrement32Barrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicDecrement32Barrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicAdd64Barrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicIncrement64Barrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicDecrement64Barrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicAnd32Barrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicAnd32OrigBarrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicOr32Barrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicOr32OrigBarrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicXor32Barrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicXor32OrigBarrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicCompareAndSwap32Barrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicCompareAndSwap64Barrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicCompareAndSwapIntBarrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicCompareAndSwapLongBarrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicCompareAndSwapPtrBarrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicTestAndSetBarrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicTestAndClearBarrier)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicEnqueue)
+OS_VARIANT_UPMP_RESOLVER(OSAtomicDequeue)
+OS_VARIANT_UPMP_RESOLVER(OSMemoryBarrier)
+
+OS_VARIANT_UPMP_RESOLVER(OSSpinLockLock)
+OS_VARIANT_UPMP_RESOLVER(OSSpinLockTry)
+OS_VARIANT_UPMP_RESOLVER(OSSpinLockUnlock)
+OS_VARIANT_UPMP_RESOLVER(spin_lock)
+OS_VARIANT_UPMP_RESOLVER(spin_lock_try)
+OS_VARIANT_UPMP_RESOLVER(spin_unlock)
+OS_VARIANT_UPMP_RESOLVER(_spin_lock)
+OS_VARIANT_UPMP_RESOLVER(_spin_lock_try)
+OS_VARIANT_UPMP_RESOLVER(_spin_unlock)
+
+OS_VARIANT_UPMP_RESOLVER(os_unfair_lock_lock)
+OS_VARIANT_UPMP_RESOLVER(os_unfair_lock_lock_with_options)
+OS_VARIANT_UPMP_RESOLVER(os_unfair_lock_trylock)
+OS_VARIANT_UPMP_RESOLVER(os_unfair_lock_unlock)
+OS_VARIANT_UPMP_RESOLVER(os_unfair_lock_lock_no_tsd_4libpthread)
+OS_VARIANT_UPMP_RESOLVER(os_unfair_lock_unlock_no_tsd_4libpthread)
+OS_VARIANT_UPMP_RESOLVER(os_unfair_lock_lock_with_options_4Libc)
+OS_VARIANT_UPMP_RESOLVER(os_unfair_lock_unlock_4Libc)
+OS_VARIANT_UPMP_RESOLVER(_os_nospin_lock_lock)
+OS_VARIANT_UPMP_RESOLVER(_os_nospin_lock_trylock)
+OS_VARIANT_UPMP_RESOLVER(_os_nospin_lock_unlock)
+
+OS_VARIANT_UPMP_RESOLVER_INTERNAL(_os_lock_spin_lock)
+OS_VARIANT_UPMP_RESOLVER_INTERNAL(_os_lock_spin_trylock)
+OS_VARIANT_UPMP_RESOLVER_INTERNAL(_os_lock_spin_unlock)
+OS_VARIANT_UPMP_RESOLVER_INTERNAL(_os_lock_handoff_lock)
+OS_VARIANT_UPMP_RESOLVER_INTERNAL(_os_lock_handoff_trylock)
+OS_VARIANT_UPMP_RESOLVER_INTERNAL(_os_lock_handoff_unlock)
+OS_VARIANT_UPMP_RESOLVER_INTERNAL(_os_lock_unfair_lock)
+OS_VARIANT_UPMP_RESOLVER_INTERNAL(_os_lock_unfair_trylock)
+OS_VARIANT_UPMP_RESOLVER_INTERNAL(_os_lock_unfair_unlock)
+OS_VARIANT_UPMP_RESOLVER_INTERNAL(_os_lock_nospin_lock)
+OS_VARIANT_UPMP_RESOLVER_INTERNAL(_os_lock_nospin_trylock)
+OS_VARIANT_UPMP_RESOLVER_INTERNAL(_os_lock_nospin_unlock)
+
+OS_VARIANT_UPMP_RESOLVER(_os_once)
+OS_VARIANT_UPMP_RESOLVER(__os_once_reset)
+#endif // TARGET_OS_EMBEDDED
+
+#endif // OS_VARIANT_SELECTOR
+
+#ifdef OS_LOCK_VARIANT_SELECTOR
+
+#define WFE_RESOLVER(s) \
+       _OS_VARIANT_RESOLVER(s, hidden, \
+               uint32_t *_c = (void*)(uintptr_t)_COMM_PAGE_CPU_CAPABILITIES; \
+               if (*_c & kHasEvent) { \
+                       extern void OS_VARIANT(s, wfe)(void); \
+                       return &OS_VARIANT(s, wfe); \
+               } else { \
+                       extern void OS_VARIANT(s, mp)(void); \
+                       return &OS_VARIANT(s, mp); \
+               })
+
+WFE_RESOLVER(_OSSpinLockLockSlow)
+
+#endif // OS_LOCK_VARIANT_SELECTOR
+
diff --git a/src/os/resolver.h b/src/os/resolver.h
new file mode 100644 (file)
index 0000000..345a6b6
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __OS_RESOLVER_H__
+#define __OS_RESOLVER_H__
+
+#include <TargetConditionals.h>
+
+#ifdef OS_VARIANT_SELECTOR
+
+#if TARGET_OS_EMBEDDED
+#define OSAtomicAdd32Barrier \
+               OS_VARIANT(OSAtomicAdd32Barrier, OS_VARIANT_SELECTOR)
+#define OSAtomicIncrement32Barrier \
+               OS_VARIANT(OSAtomicIncrement32Barrier, OS_VARIANT_SELECTOR)
+#define OSAtomicDecrement32Barrier \
+               OS_VARIANT(OSAtomicDecrement32Barrier, OS_VARIANT_SELECTOR)
+#define OSAtomicAdd64Barrier \
+               OS_VARIANT(OSAtomicAdd64Barrier, OS_VARIANT_SELECTOR)
+#define OSAtomicIncrement64Barrier \
+               OS_VARIANT(OSAtomicIncrement64Barrier, OS_VARIANT_SELECTOR)
+#define OSAtomicDecrement64Barrier \
+               OS_VARIANT(OSAtomicDecrement64Barrier, OS_VARIANT_SELECTOR)
+#define OSAtomicAnd32Barrier \
+               OS_VARIANT(OSAtomicAnd32Barrier, OS_VARIANT_SELECTOR)
+#define OSAtomicAnd32OrigBarrier \
+               OS_VARIANT(OSAtomicAnd32OrigBarrier, OS_VARIANT_SELECTOR)
+#define OSAtomicOr32Barrier \
+               OS_VARIANT(OSAtomicOr32Barrier, OS_VARIANT_SELECTOR)
+#define OSAtomicOr32OrigBarrier \
+               OS_VARIANT(OSAtomicOr32OrigBarrier, OS_VARIANT_SELECTOR)
+#define OSAtomicXor32Barrier \
+               OS_VARIANT(OSAtomicXor32Barrier, OS_VARIANT_SELECTOR)
+#define OSAtomicXor32OrigBarrier \
+               OS_VARIANT(OSAtomicXor32OrigBarrier, OS_VARIANT_SELECTOR)
+#define OSAtomicCompareAndSwap32Barrier \
+               OS_VARIANT(OSAtomicCompareAndSwap32Barrier, OS_VARIANT_SELECTOR)
+#define OSAtomicCompareAndSwap64Barrier \
+               OS_VARIANT(OSAtomicCompareAndSwap64Barrier, OS_VARIANT_SELECTOR)
+#define OSAtomicCompareAndSwapIntBarrier \
+               OS_VARIANT(OSAtomicCompareAndSwapIntBarrier, OS_VARIANT_SELECTOR)
+#define OSAtomicCompareAndSwapLongBarrier \
+               OS_VARIANT(OSAtomicCompareAndSwapLongBarrier, OS_VARIANT_SELECTOR)
+#define OSAtomicCompareAndSwapPtrBarrier \
+               OS_VARIANT(OSAtomicCompareAndSwapPtrBarrier, OS_VARIANT_SELECTOR)
+#define OSAtomicTestAndSetBarrier \
+               OS_VARIANT(OSAtomicTestAndSetBarrier, OS_VARIANT_SELECTOR)
+#define OSAtomicTestAndClearBarrier \
+               OS_VARIANT(OSAtomicTestAndClearBarrier, OS_VARIANT_SELECTOR)
+#define OSAtomicEnqueue \
+               OS_VARIANT(OSAtomicEnqueue, OS_VARIANT_SELECTOR)
+#define OSAtomicDequeue \
+               OS_VARIANT(OSAtomicDequeue, OS_VARIANT_SELECTOR)
+#define OSMemoryBarrier \
+               OS_VARIANT(OSMemoryBarrier, OS_VARIANT_SELECTOR)
+
+#define OSSpinLockLock \
+               OS_VARIANT(OSSpinLockLock, OS_VARIANT_SELECTOR)
+#define OSSpinLockTry \
+               OS_VARIANT(OSSpinLockTry, OS_VARIANT_SELECTOR)
+#define OSSpinLockUnlock \
+               OS_VARIANT(OSSpinLockUnlock, OS_VARIANT_SELECTOR)
+#define spin_lock \
+               OS_VARIANT(spin_lock, OS_VARIANT_SELECTOR)
+#define spin_lock_try \
+               OS_VARIANT(spin_lock_try, OS_VARIANT_SELECTOR)
+#define spin_unlock \
+               OS_VARIANT(spin_unlock, OS_VARIANT_SELECTOR)
+#define _spin_lock \
+               OS_VARIANT(_spin_lock, OS_VARIANT_SELECTOR)
+#define _spin_lock_try \
+               OS_VARIANT(_spin_lock_try, OS_VARIANT_SELECTOR)
+#define _spin_unlock \
+               OS_VARIANT(_spin_unlock, OS_VARIANT_SELECTOR)
+#define os_unfair_lock_lock \
+               OS_VARIANT(os_unfair_lock_lock, OS_VARIANT_SELECTOR)
+#define os_unfair_lock_lock_with_options \
+               OS_VARIANT(os_unfair_lock_lock_with_options, OS_VARIANT_SELECTOR)
+#define os_unfair_lock_trylock \
+               OS_VARIANT(os_unfair_lock_trylock, OS_VARIANT_SELECTOR)
+#define os_unfair_lock_unlock \
+               OS_VARIANT(os_unfair_lock_unlock, OS_VARIANT_SELECTOR)
+#define os_unfair_lock_lock_no_tsd_4libpthread \
+               OS_VARIANT(os_unfair_lock_lock_no_tsd_4libpthread, OS_VARIANT_SELECTOR)
+#define os_unfair_lock_unlock_no_tsd_4libpthread \
+               OS_VARIANT(os_unfair_lock_unlock_no_tsd_4libpthread, OS_VARIANT_SELECTOR)
+#define os_unfair_lock_lock_with_options_4Libc \
+               OS_VARIANT(os_unfair_lock_lock_with_options_4Libc, OS_VARIANT_SELECTOR)
+#define os_unfair_lock_unlock_4Libc \
+               OS_VARIANT(os_unfair_lock_unlock_4Libc, OS_VARIANT_SELECTOR)
+#define _os_nospin_lock_lock \
+               OS_VARIANT(_os_nospin_lock_lock, OS_VARIANT_SELECTOR)
+#define _os_nospin_lock_trylock \
+               OS_VARIANT(_os_nospin_lock_trylock, OS_VARIANT_SELECTOR)
+#define _os_nospin_lock_unlock \
+               OS_VARIANT(_os_nospin_lock_unlock, OS_VARIANT_SELECTOR)
+#define _os_once \
+               OS_VARIANT(_os_once, OS_VARIANT_SELECTOR)
+#define __os_once_reset \
+               OS_VARIANT(__os_once_reset, OS_VARIANT_SELECTOR)
+#endif // TARGET_OS_EMBEDDED
+#endif // OS_VARIANT_SELECTOR
+
+#ifdef OS_LOCK_VARIANT_SELECTOR
+#define _OSSpinLockLockSlow \
+               OS_VARIANT(_OSSpinLockLockSlow, OS_LOCK_VARIANT_SELECTOR)
+#endif // OS_LOCK_VARIANT_SELECTOR
+
+#endif // __OS_RESOLVER_H__
diff --git a/src/os/semaphore.c b/src/os/semaphore.c
new file mode 100644 (file)
index 0000000..d805afc
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#include "os/internal.h"
+#include <mach/mach_init.h>
+#include <mach/semaphore.h>
+#include <mach/task.h>
+#include <mach/thread_switch.h>
+
+#define OS_VERIFY_MIG(x, msg) do { \
+               if (unlikely((x) == MIG_REPLY_MISMATCH)) { \
+                       __LIBPLATFORM_CLIENT_CRASH__(x, msg); \
+               } \
+       } while (0)
+
+#define OS_SEMAPHORE_VERIFY_KR(x, msg) do { \
+               if (unlikely(x)) { \
+                       __LIBPLATFORM_CLIENT_CRASH__(x, msg); \
+               } \
+       } while (0)
+
+os_semaphore_t
+_os_semaphore_create(void)
+{
+       semaphore_t s4;
+       kern_return_t kr;
+       while (unlikely(kr = semaphore_create(mach_task_self(), &s4,
+                       SYNC_POLICY_FIFO, 0))) {
+               OS_VERIFY_MIG(kr, "Allocating semaphore failed with MIG_REPLY_MISMATCH");
+               thread_switch(MACH_PORT_NULL, SWITCH_OPTION_WAIT, 100);
+       }
+       return (os_semaphore_t)s4;
+}
+
+void
+_os_semaphore_dispose(os_semaphore_t sema)
+{
+       semaphore_t s4 = (semaphore_t)sema;
+       kern_return_t kr = semaphore_destroy(mach_task_self(), s4);
+       OS_SEMAPHORE_VERIFY_KR(kr, "Destroying semaphore failed");
+}
+
+void
+_os_semaphore_signal(os_semaphore_t sema)
+{
+       semaphore_t s4 = (semaphore_t)sema;
+       kern_return_t kr = semaphore_signal(s4);
+       OS_SEMAPHORE_VERIFY_KR(kr, "Signaling semaphore failed");
+}
+
+void
+_os_semaphore_wait(os_semaphore_t sema)
+{
+       semaphore_t s4 = (semaphore_t)sema;
+       kern_return_t kr;
+       do {
+               kr = semaphore_wait(s4);
+       } while (unlikely(kr == KERN_ABORTED));
+       OS_SEMAPHORE_VERIFY_KR(kr, "Waiting on semaphore failed");
+}
diff --git a/src/setjmp/arm/_longjmp.s b/src/setjmp/arm/_longjmp.s
new file mode 100644 (file)
index 0000000..0bd11ab
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 1999-2006 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/*
+ * Copyright (c) 1998-2008 Apple Inc. All rights reserved.
+ *
+ *     Implements _longjmp()
+ *
+ */
+
+#include <architecture/arm/asm_help.h>
+#include "_setjmp.h"
+#include <arm/arch.h>
+
+/*     int _longjmp(jmp_buf env, int val); */
+            
+ENTRY_POINT(__longjmp)
+       ldmia   r0!, { r4-r8, r10-r11, sp, lr }
+       vldmia  r0, { d8-d15 }
+       movs    r0, r1
+       moveq   r0, #1
+       bx              lr
diff --git a/src/setjmp/arm/_setjmp.h b/src/setjmp/arm/_setjmp.h
new file mode 100644 (file)
index 0000000..2c53d1a
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+/*
+ *     Copyright (c) 1998, Apple Computer Inc. All rights reserved.
+ *
+ *     File: _setjmp.h
+ *
+ *     Defines for register offsets in the save area.
+ *
+ */
+
+#if defined(__arm__)
+
+#define JMP_r4         0x00
+#define JMP_r5         0x04
+#define JMP_r6         0x08
+#define JMP_r7         0x0c
+#define JMP_r8         0x10
+#define JMP_r10                0x14
+#define JMP_fp         0x18
+#define JMP_sp         0x1c
+#define JMP_lr         0x20
+
+#define JMP_VFP                0x24
+
+#define JMP_sig                0x68
+
+#define JMP_SIGFLAG    0x70
+
+#else
+#error architecture not supported
+#endif
diff --git a/src/setjmp/arm/_setjmp.s b/src/setjmp/arm/_setjmp.s
new file mode 100644 (file)
index 0000000..433f8c9
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/*
+ * Copyright (c) 1998-2008 Apple Inc. All rights reserved.
+ *
+ *     Implements _setjmp()
+ *
+ */
+
+#include <architecture/arm/asm_help.h>
+#include "_setjmp.h"
+#include <arm/arch.h>
+
+ENTRY_POINT(__setjmp)
+       stmia   r0!, { r4-r8, r10-r11, sp, lr }
+       vstmia  r0, { d8-d15 }
+       mov     r0, #0
+       bx      lr
diff --git a/src/setjmp/arm/longjmp.s b/src/setjmp/arm/longjmp.s
new file mode 100644 (file)
index 0000000..36bab5c
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+/*
+ * Copyright (c) 1998-2008 Apple Inc. All rights reserved.
+ *
+ *     File: sys/arm/longjmp.s
+ *
+ *     Implements siglongjmp(), longjmp(), _longjmp() 
+ *
+ */
+
+#include <architecture/arm/asm_help.h>
+#include "_setjmp.h"
+
+/*
+ *     longjmp routines
+ */
+
+/*     void siglongjmp(sigjmp_buf env, int val); */
+
+ENTRY_POINT(_siglongjmp)
+       ldr     r2, [ r0, #JMP_SIGFLAG ]        //  load sigflag
+       cmp     r2, #0                          // test if zero
+       beq     L__exit                         // if zero do _longjmp()
+       // else *** fall through *** to longjmp()
+
+/*     void longjmp(jmp_buf env, int val); */
+
+ENTRY_POINT(_longjmp)
+#ifdef __ARM_ARCH_7K__
+       sub sp, sp, #16                                 // armv7k stack is 16-byte aligned.
+#else
+       sub sp, sp, #4
+#endif
+       mov     r6, r0                                          // preserve args across _sigprocmask
+       mov     r8, r1
+       ldr     r0, [ r6, #JMP_sig ]            // restore the signal mask
+       mov     r1, sp                                          // set
+       str     r0, [sp]
+       movs    r0, #3                                  // SIG_SETMASK
+       movs    r2, #0                                  // oset
+       CALL_EXTERNAL(_sigprocmask)
+       mov     r0, r6
+       mov     r1, r8
+#ifdef __ARM_ARCH_7K__
+       add sp, sp, #16                                 // armv7k stack is 16-byte aligned.
+#else
+       add sp, sp, #4
+#endif
+L__exit:
+       BRANCH_EXTERNAL(__longjmp)
diff --git a/src/setjmp/arm/setjmp.s b/src/setjmp/arm/setjmp.s
new file mode 100644 (file)
index 0000000..92399ec
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+/*
+ * Copyright (c) 1998-2008 Apple Inc. All rights reserved.
+ *
+ *     File: sys/arm/setjmp.s
+ *
+ *     Implements sigsetjmp(), setjmp(), _setjmp()
+ *
+ */
+
+#include <architecture/arm/asm_help.h>
+#include "_setjmp.h"
+
+/*
+ * setjmp  routines
+ */
+
+/*     int sigsetjmp(sigjmp_buf env, int savemask); */
+
+ENTRY_POINT(_sigsetjmp)
+       str     r1, [ r0, #JMP_SIGFLAG ]        // save sigflag
+       cmp     r1, #0                          // test if r1 is 0
+       beq     L__exit                         // if r1 == 0 do _setjmp()
+       // else *** fall through ***  to setjmp()
+
+/*     int setjmp(jmp_buf env); */
+
+ENTRY_POINT(_setjmp)
+       str     lr, [ r0, #JMP_lr ]
+       str     r8, [ r0, #JMP_r8 ]
+       mov     r8, r0
+       mov     r0, #1                          // get the previous signal mask
+       mov     r1, #0                          //
+       add     r2, r8, #JMP_sig                // get address where previous mask needs to be
+       CALL_EXTERNAL(_sigprocmask)             // make a syscall to get mask
+       mov     r0, r8                          // restore jmp_buf ptr
+       ldr     r8, [ r0,  #JMP_r8 ] 
+       ldr     lr, [ r0,  #JMP_lr ] 
+L__exit:
+       BRANCH_EXTERNAL(__setjmp)
diff --git a/src/setjmp/arm64/setjmp.s b/src/setjmp/arm64/setjmp.s
new file mode 100644 (file)
index 0000000..890179d
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2011 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#define        JMP_r19_20      #0x00
+#define        JMP_r21_22      #0x10
+#define        JMP_r23_24      #0x20
+#define        JMP_r25_26      #0x30
+#define        JMP_r27_28      #0x40
+#define        JMP_r29_lr      #0x50
+#define        JMP_fp_sp       #0x60
+
+#define        JMP_d8_d9       #0x70
+#define        JMP_d10_d11     #0x80
+#define        JMP_d12_d13     #0x90
+#define        JMP_d14_d15     #0xA0
+#define        JMP_sig         #0xB0
+#define        JMP_sigflag     #0xB8
+
+#include <architecture/arm/asm_help.h>
+
+/* int _setjmp(jmp_buf env); */
+ENTRY_POINT(__setjmp)
+       add             x1, sp, #0      /* can't STP from sp */
+       stp             x19, x20,       [x0, JMP_r19_20]
+       stp             x21, x22,       [x0, JMP_r21_22]
+       stp             x23, x24,       [x0, JMP_r23_24]
+       stp             x25, x26,       [x0, JMP_r25_26]
+       stp             x27, x28,       [x0, JMP_r27_28]
+       stp             x29, lr,        [x0, JMP_r29_lr]
+       stp             fp, x1,         [x0, JMP_fp_sp]
+       stp             d8, d9,         [x0, JMP_d8_d9]
+       stp             d10, d11,       [x0, JMP_d10_d11]
+       stp             d12, d13,       [x0, JMP_d12_d13]
+       stp             d14, d15,       [x0, JMP_d14_d15]
+       mov             x0, #0
+       ret
+
+/* void _longjmp(jmp_buf env, int val); */
+ENTRY_POINT(__longjmp)
+       ldp             x19, x20,       [x0, JMP_r19_20]
+       ldp             x21, x22,       [x0, JMP_r21_22]
+       ldp             x23, x24,       [x0, JMP_r23_24]
+       ldp             x25, x26,       [x0, JMP_r25_26]
+       ldp             x27, x28,       [x0, JMP_r27_28]
+       ldp             x29, lr,        [x0, JMP_r29_lr]
+       ldp             fp, x2,         [x0, JMP_fp_sp]
+       ldp             d8, d9,         [x0, JMP_d8_d9]
+       ldp             d10, d11,       [x0, JMP_d10_d11]
+       ldp             d12, d13,       [x0, JMP_d12_d13]
+       ldp             d14, d15,       [x0, JMP_d14_d15]
+       add             sp, x2, #0
+       mov             x0, x1
+       cmp             x0, #0          /* longjmp returns 1 if val is 0 */
+       b.ne    1f
+       add             x0, x0, #1
+1:     ret
+
+/* int sigsetjmp(sigjmp_buf env, int savemask); */
+ENTRY_POINT(_sigsetjmp)
+       str             x1, [x0, JMP_sigflag]
+       cmp             x1, #0
+       b.ne    1f
+       b               __setjmp
+1:
+       /* else, fall through */
+
+/* int setjmp(jmp_buf env); */
+ENTRY_POINT(_setjmp)
+       stp             x21, lr, [x0]
+       mov             x21, x0
+       
+       mov             x0, #1
+       mov             x1, #0
+       add             x2, x21, JMP_sig
+       CALL_EXTERNAL(_sigprocmask)
+
+       mov             x0, x21
+       ldp             x21, lr, [x0]
+       b               __setjmp
+
+/* void siglongjmp(sigjmp_buf env, int val); */
+ENTRY_POINT(_siglongjmp)
+       ldr             x2, [x0, JMP_sigflag]
+       cmp             x2, #0
+       b.ne    1f
+       b               __longjmp
+1:
+       /* else, fall through */
+
+/* void longjmp(jmp_buf env, int val); */
+ENTRY_POINT(_longjmp)
+       sub     sp, sp, #16
+       mov             x21, x0                                 // x21/x22 will be restored by __longjmp
+       mov             x22, x1
+       ldr             x0, [x21, JMP_sig]              // restore the signal mask
+       str     x0, [sp, #8]
+       add     x1, sp, #8                              // set
+       orr     w0, wzr, #0x3                   // SIG_SETMASK
+       movz    x2, #0                                  // oset
+       CALL_EXTERNAL(_sigprocmask)
+       mov             x0, x21
+       mov             x1, x22
+       add     sp, sp, #16
+       b               __longjmp
diff --git a/src/setjmp/generic/setjmperr.c b/src/setjmp/generic/setjmperr.c
new file mode 100644 (file)
index 0000000..1c2e207
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+/*
+ * Copyright (c) 1980 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms are permitted
+ * provided that the above copyright notice and this paragraph are
+ * duplicated in all such forms and that any documentation,
+ * advertising materials, and other materials related to such
+ * distribution and use acknowledge that the software was developed
+ * by the University of California, Berkeley.  The name of the
+ * University may not be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)setjmperr.c        5.4 (Berkeley) 6/27/88";
+#endif /* LIBC_SCCS and not lint */
+
+#include <unistd.h>
+#include <TargetConditionals.h>
+
+#if !TARGET_OS_IPHONE && (defined(__i386__) || defined(__x86_64__))
+/*
+ * This routine is called from longjmp() when an error occurs.
+ * Programs that wish to exit gracefully from this error may
+ * write their own versions.
+ * If this routine returns, the program is aborted.
+ */
+
+void
+longjmperror()
+{
+#define        ERRMSG  "longjmp botch\n"
+       write(2, ERRMSG, sizeof(ERRMSG) - 1);
+}
+#endif
diff --git a/src/setjmp/generic/sigtramp.c b/src/setjmp/generic/sigtramp.c
new file mode 100644 (file)
index 0000000..9a1bf76
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 1999, 2007 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+/*
+ * Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved
+ */
+
+#define _XOPEN_SOURCE 600
+
+#import        <sys/types.h>
+#import        <signal.h>
+#import        <unistd.h>
+#import        <ucontext.h>
+#import        <mach/thread_status.h>
+#include <TargetConditionals.h>
+
+extern int __sigreturn(ucontext_t *, int);
+
+/*
+ * sigvec registers _sigtramp as the handler for any signal requiring
+ * user-mode intervention.  All _sigtramp does is find the real handler,
+ * calls it, then sigreturn's.
+ *
+ * Note that the kernel saves/restores all of our register state.
+ */
+
+/* On i386, i386/sys/_sigtramp.s defines this. There is no in_sigtramp on arm */
+#if defined(__DYNAMIC__) && defined(__x86_64__)
+__attribute__((visibility("hidden")))
+int __in_sigtramp = 0;
+#endif
+
+/* These defn should match the kernel one */
+#define UC_TRAD                        1
+#define UC_FLAVOR              30
+#if defined(__ppc__) || defined(__ppc64__)
+#define UC_TRAD64              20
+#define UC_TRAD64_VEC          25
+#define UC_FLAVOR_VEC          35
+#define UC_FLAVOR64            40
+#define UC_FLAVOR64_VEC                45
+#define UC_DUAL                        50
+#define UC_DUAL_VEC            55
+
+ /* The following are valid mcontext sizes */
+#define UC_FLAVOR_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
+
+#define UC_FLAVOR_VEC_SIZE ((PPC_THREAD_STATE_COUNT + PPC_EXCEPTION_STATE_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
+
+#define UC_FLAVOR64_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int))
+
+#define UC_FLAVOR64_VEC_SIZE ((PPC_THREAD_STATE64_COUNT + PPC_EXCEPTION_STATE64_COUNT + PPC_FLOAT_STATE_COUNT + PPC_VECTOR_STATE_COUNT) * sizeof(int))
+#endif
+
+#define UC_SET_ALT_STACK       0x40000000
+#define UC_RESET_ALT_STACK     0x80000000
+
+/*
+ * Reset the kernel's idea of the use of an alternate stack; this is used by
+ * both longjmp() and siglongjmp().  Nothing other than this reset is needed,
+ * since restoring the registers and other operations that would normally be
+ * done by sigreturn() are handled in user space, so we do not pass a user
+ * context (in PPC, a user context is not the same as a jmpbuf mcontext, due
+ * to having more than one set of registers, etc., for the various 32/64 etc.
+ * contexts)..
+ */
+void
+_sigunaltstack(int set)
+{
+        /* sigreturn(uctx, ctxstyle); */
+       /* syscall (SYS_SIGRETURN, uctx, ctxstyle); */
+       __sigreturn (NULL, (set == SS_ONSTACK) ? UC_SET_ALT_STACK : UC_RESET_ALT_STACK);
+}
+
+/* On these architectures, _sigtramp is implemented in assembly to
+   ensure it matches its DWARF unwind information.  */
+#if !defined (__i386__) && !defined (__x86_64__)
+
+void
+_sigtramp(
+       union __sigaction_u __sigaction_u,
+       int                     sigstyle,
+       int                     sig,
+       siginfo_t               *sinfo,
+       ucontext_t              *uctx
+) {
+       int ctxstyle = UC_FLAVOR;
+
+       if (sigstyle == UC_TRAD)
+               sa_handler(sig);
+       else {
+#if TARGET_OS_WATCH
+               // <rdar://problem/22016014>
+               sa_sigaction(sig, sinfo, NULL);
+#else
+               sa_sigaction(sig, sinfo, uctx);
+#endif
+       }
+
+        /* sigreturn(uctx, ctxstyle); */
+       /* syscall (SYS_SIGRETURN, uctx, ctxstyle); */
+       __sigreturn (uctx, ctxstyle);
+}
+
+#endif /* not ppc nor ppc64 nor i386 nor x86_64 */
diff --git a/src/setjmp/i386/_setjmp.s b/src/setjmp/i386/_setjmp.s
new file mode 100644 (file)
index 0000000..5828337
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+/*
+ * Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved
+ *
+ * HISTORY
+ *  20-Apr-92    Bruce Martin (bmartin@next.com)
+ *      Created from M68K sources.
+ */
+
+/*
+ * C library -- _setjmp, _longjmp
+ *
+ *     _longjmp(a,v)
+ * will generate a "return(v)" from
+ * the last call to
+ *     _setjmp(a)
+ * by restoring registers from the stack,
+ * The previous signal state is NOT restored.
+ *
+ */
+
+#include <architecture/i386/asm_help.h>
+
+// The FP control word is actually two bytes, but there's no harm in
+// using four bytes for it and keeping the struct aligned.
+#define JB_FPCW         0
+#define JB_MASK         4
+#define JB_MXCSR        8
+#define JB_EBX          12
+#define JB_ONSTACK      16
+#define JB_EDX          20
+#define JB_EDI          24
+#define JB_ESI          28
+#define JB_EBP          32
+#define JB_ESP          36
+#define JB_SS           40
+#define JB_EFLAGS       44
+#define JB_EIP          48
+#define JB_CS           52
+#define JB_DS           56
+#define JB_ES           60
+#define JB_FS           64
+#define JB_GS           68
+
+LEAF(__setjmp, 0)
+        movl    4(%esp), %ecx           // jmp_buf (struct sigcontext *)
+
+        // Build the jmp_buf
+        fnstcw  JB_FPCW(%ecx)                  // Save the FP control word
+        stmxcsr JB_MXCSR(%ecx)                 // Save the MXCSR
+        movl    %ebx, JB_EBX(%ecx)
+        movl    %edi, JB_EDI(%ecx)
+        movl    %esi, JB_ESI(%ecx)
+        movl    %ebp, JB_EBP(%ecx)
+
+        // EIP is set to the frame return address value
+        movl    (%esp), %eax
+        movl    %eax, JB_EIP(%ecx)
+        // ESP is set to the frame return address plus 4
+        leal    4(%esp), %eax
+        movl    %eax, JB_ESP(%ecx)
+
+        // return 0
+        xorl    %eax, %eax
+        ret
+
+
+LEAF(__longjmp, 0)
+       fninit                          // Clear all FP exceptions
+       movl    4(%esp), %ecx           // jmp_buf (struct sigcontext *)
+       movl    8(%esp), %eax           // return value
+       testl   %eax, %eax
+       jnz 1f
+       incl    %eax
+
+       // general registers
+1:     movl    JB_EBX(%ecx), %ebx
+       movl    JB_ESI(%ecx), %esi
+       movl    JB_EDI(%ecx), %edi
+       movl    JB_EBP(%ecx), %ebp
+       movl    JB_ESP(%ecx), %esp
+
+       fldcw   JB_FPCW(%ecx)                   // Restore FP control word
+       ldmxcsr JB_MXCSR(%ecx)                  // Restore the MXCSR
+
+       cld                                     // Make sure DF is reset
+       jmp     *JB_EIP(%ecx)
diff --git a/src/setjmp/i386/_sigtramp.s b/src/setjmp/i386/_sigtramp.s
new file mode 100644 (file)
index 0000000..edf1d6b
--- /dev/null
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2007, 2011 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <sys/syscall.h>
+
+#if defined(__DYNAMIC__)
+#if IGNORE_RDAR_13625839
+       .private_extern ___in_sigtramp
+#endif
+       .globl ___in_sigtramp
+       .data
+       .align 2
+___in_sigtramp:
+       .space 4
+#endif
+
+#define UC_TRAD                        1
+#define UC_FLAVOR              30
+
+/* Structure fields for ucontext and mcontext.  */
+#define UCONTEXT_UC_MCONTEXT   28
+
+#define MCONTEXT_ES_EXCEPTION  0
+#define MCONTEXT_SS_EAX                12
+#define MCONTEXT_SS_EBX                16
+#define MCONTEXT_SS_ECX                20
+#define MCONTEXT_SS_EDX                24
+#define MCONTEXT_SS_EDI                28
+#define MCONTEXT_SS_ESI                32
+#define MCONTEXT_SS_EBP                36
+#define MCONTEXT_SS_ESP                40
+#define MCONTEXT_SS_EFLAGS     48
+#define MCONTEXT_SS_EIP                52
+
+/* register use:
+       %ebp    frame pointer
+       %ebx    Address of "L00000000001$pb"
+       %esi    uctx
+       
+void
+_sigtramp(
+       union __sigaction_u __sigaction_u,
+       int                     sigstyle,
+       int                     sig,
+       siginfo_t               *sinfo,
+       ucontext_t              *uctx
+)
+*/
+
+       .globl __sigtramp
+       .text
+       .align 4,0x90
+__sigtramp:
+Lstart:
+       /* Although this routine does not need any stack frame, various parts
+          of the OS can't analyse the stack without them.  */
+       pushl   %ebp
+       movl    %esp, %ebp
+       subl    $24, %esp
+       movl    8(%ebp), %ecx   # get '__sigaction_u'
+#if defined(__DYNAMIC__)
+       call    0f
+"L00000000001$pb":
+0:
+       popl    %ebx
+       incl    ___in_sigtramp-"L00000000001$pb"(%ebx)
+#endif
+       movl    16(%ebp), %edx  # get 'sig'
+       movl    20(%ebp), %eax  # get 'sinfo'
+       movl    24(%ebp), %esi  # get 'uctx'
+       /* Call the signal handler.
+          Some variants are not supposed to get the last two parameters,
+          but the test to prevent this is more expensive than just passing
+          them.  */
+       movl    %esi, 8(%esp)
+       movl    %eax, 4(%esp)
+       movl    %edx, (%esp)
+       call    *%ecx
+#if defined(__DYNAMIC__)
+       decl    ___in_sigtramp-"L00000000001$pb"(%ebx)
+#endif
+       movl    %esi, 4(%esp)
+       movl    $ UC_FLAVOR, 8(%esp)
+       movl    $ SYS_sigreturn, %eax
+       int     $0x80
+Lend:
+
+/* DWARF unwind table #defines.  */
+#define DW_CFA_advance_loc_4 0x44
+#define DW_CFA_def_cfa 0x0c
+#define DW_CFA_def_cfa_expression 0x0F
+#define DW_CFA_expression 0x10
+#define DW_CFA_val_expression 0x16
+#define DW_CFA_offset(column) 0x80+(column)
+
+/* DWARF expression #defines.  */
+#define DW_OP_deref 0x06
+#define DW_OP_const1u 0x08
+#define DW_OP_dup 0x12
+#define DW_OP_drop 0x13
+#define DW_OP_over 0x14
+#define DW_OP_pick 0x15
+#define DW_OP_swap 0x16
+#define DW_OP_rot 0x17
+#define DW_OP_abs 0x19
+#define DW_OP_and 0x1a
+#define DW_OP_div 0x1b
+#define DW_OP_minus 0x1c
+#define DW_OP_mod 0x1d
+#define DW_OP_mul 0x1e
+#define DW_OP_neg 0x1f
+#define DW_OP_not 0x20
+#define DW_OP_or 0x21
+#define DW_OP_plus 0x22
+#define DW_OP_plus_uconst 0x23
+#define DW_OP_shl 0x24
+#define DW_OP_shr 0x25
+#define DW_OP_shra 0x26
+#define DW_OP_xor 0x27
+#define DW_OP_skip 0x2f
+#define DW_OP_bra 0x28
+#define DW_OP_eq 0x29
+#define DW_OP_ge 0x2A
+#define DW_OP_gt 0x2B
+#define DW_OP_le 0x2C
+#define DW_OP_lt 0x2D
+#define DW_OP_ne 0x2E
+#define DW_OP_lit(n) 0x30+(n)
+#define DW_OP_breg(n) 0x70+(n)
+#define DW_OP_deref_size 0x94
+
+/* The location expression we'll use.  */
+
+#define loc_expr_for_reg(regno, offs)                          \
+       .byte DW_CFA_expression, regno, 5 /* block length */,   \
+        DW_OP_breg(6), UCONTEXT_UC_MCONTEXT, DW_OP_deref,      \
+        DW_OP_plus_uconst, offs
+
+       /* Unwind tables.  */
+       .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
+EH_frame1:
+       .set L$set$0,LECIE1-LSCIE1
+       .long L$set$0   # Length of Common Information Entry
+LSCIE1:
+       .long   0       # CIE Identifier Tag
+       .byte   0x1     # CIE Version
+       .ascii "zRS\0"  # CIE Augmentation
+       .byte   0x1     # uleb128 0x1; CIE Code Alignment Factor
+       .byte   0x7c    # sleb128 -4; CIE Data Alignment Factor
+       .byte   0x8     # CIE RA Column
+       .byte   0x1     # uleb128 0x1; Augmentation size
+       .byte   0x10    # FDE Encoding (pcrel)
+       .byte   DW_CFA_def_cfa
+       .byte   0x5     # uleb128 0x5
+       .byte   0x4     # uleb128 0x4
+       .byte   DW_CFA_offset(8)
+       .byte   0x1     # uleb128 0x1
+       .byte   DW_CFA_offset(8)        // double DW_CFA_offset (eip, -4) tells linker to not make compact unwind
+       .byte   0x1     # uleb128 0x1
+       .align 2
+LECIE1:
+       .globl _sigtramp.eh
+_sigtramp.eh:
+LSFDE1:
+       .set L$set$1,LEFDE1-LASFDE1
+       .long L$set$1   # FDE Length
+LASFDE1:
+       .long   LASFDE1-EH_frame1       # FDE CIE offset
+       .long   Lstart-.        # FDE initial location
+       .set L$set$2,Lend-Lstart
+       .long   L$set$2 # FDE address range
+       .byte   0x0     # uleb128 0x0; Augmentation size
+
+       /* Now for the expressions, which all compute
+          uctx->uc_mcontext->register
+          for each register.
+          
+          Describe even the registers that are not call-saved because they
+          might be being used in the prologue to save other registers.
+          Only integer registers are described at present.    */
+
+       loc_expr_for_reg (0, MCONTEXT_SS_EAX)
+       loc_expr_for_reg (1, MCONTEXT_SS_ECX)
+       loc_expr_for_reg (2, MCONTEXT_SS_EDX)
+       loc_expr_for_reg (3, MCONTEXT_SS_EBX)
+       loc_expr_for_reg (4, MCONTEXT_SS_EBP) # note that GCC switches
+       loc_expr_for_reg (5, MCONTEXT_SS_ESP) # DWARF registers 4 & 5
+       loc_expr_for_reg (6, MCONTEXT_SS_ESI)
+       loc_expr_for_reg (7, MCONTEXT_SS_EDI)
+       loc_expr_for_reg (9, MCONTEXT_SS_EFLAGS)
+
+       /* The Intel architecture classifies exceptions into three categories,
+          'faults' which put the address of the faulting instruction
+          in EIP, 'traps' which put the following instruction in EIP,
+          and 'aborts' which don't typically report the instruction
+          causing the exception.
+
+          The traps are #BP and #OF.  */
+
+       .byte DW_CFA_val_expression, 8
+       .set L$set$3,Lpc_end-Lpc_start
+       .byte L$set$3
+Lpc_start:
+       /* Push the mcontext address twice.  */
+       .byte DW_OP_breg(6), UCONTEXT_UC_MCONTEXT, DW_OP_deref, DW_OP_dup
+       /* Find the value of EIP.  */
+       .byte   DW_OP_plus_uconst, MCONTEXT_SS_EIP, DW_OP_deref, DW_OP_swap
+       /* Determine the exception type.  */
+       .byte   DW_OP_plus_uconst, MCONTEXT_ES_EXCEPTION, DW_OP_deref
+       /* Check whether it is #BP (3) or #OF (4).  */
+       .byte   DW_OP_dup, DW_OP_lit(3), DW_OP_ne
+       .byte    DW_OP_swap, DW_OP_lit(4), DW_OP_ne, DW_OP_and
+       /* If it is, then add 1 to the instruction address, so as to point
+          within or past the faulting instruction.  */
+       .byte   DW_OP_plus
+Lpc_end:       
+
+       /* The CFA will have been saved as the value of ESP (it is not
+          ESP+4).  */
+       .byte DW_CFA_def_cfa_expression
+       .set L$set$4,Lcfa_end-Lcfa_start
+       .byte L$set$4
+Lcfa_start:
+       .byte DW_OP_breg(6), UCONTEXT_UC_MCONTEXT, DW_OP_deref
+       .byte  DW_OP_plus_uconst, MCONTEXT_SS_ESP, DW_OP_deref
+Lcfa_end:
+
+       .align 2
+LEFDE1:
+               
+       .subsections_via_symbols
diff --git a/src/setjmp/i386/setjmp.s b/src/setjmp/i386/setjmp.s
new file mode 100644 (file)
index 0000000..4fd84aa
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+/*
+ * Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved
+ */
+/*
+ * NeXT 386 setjmp/longjmp
+ *
+ * Written by Bruce Martin, NeXT Inc. 4/9/92
+ */
+
+/*
+ * C library -- setjmp, longjmp
+ *
+ *     longjmp(a,v)
+ * will generate a "return(v)" from
+ * the last call to
+ *     setjmp(a)
+ * by restoring registers from the stack,
+ * The previous value of the signal mask is
+ * restored.
+ *
+ */
+
+#include <architecture/i386/asm_help.h>
+
+// The FP control word is actually two bytes, but there's no harm in
+// using four bytes for it and keeping the struct aligned.
+#define JB_FPCW                0
+#define JB_MASK                4
+#define JB_MXCSR       8
+#define JB_EBX         12
+#define JB_ONSTACK     16
+#define JB_EDX         20
+#define JB_EDI         24
+#define JB_ESI         28
+#define JB_EBP         32
+#define JB_ESP         36
+#define JB_SS          40
+#define JB_EFLAGS      44
+#define JB_EIP         48
+#define JB_CS          52
+#define JB_DS          56
+#define JB_ES          60
+#define JB_FS          64
+#define JB_GS          68
+#define JB_SAVEMASK    72      // sigsetjmp/siglongjmp only
+
+LEAF(_sigsetjmp, 0)
+       movl    4(%esp), %eax           // sigjmp_buf * jmpbuf; 
+       movl    8(%esp), %ecx           // int savemask;
+       movl    %ecx, JB_SAVEMASK(%eax) // jmpbuf[_JBLEN] = savemask;
+       cmpl    $0, %ecx                // if savemask != 0
+       jne     _setjmp                 //     setjmp(jmpbuf); 
+       jmp L_do__setjmp                // else _setjmp(jmpbuf); 
+       
+LEAF(_setjmp, 0)
+       subl    $16, %esp               // make space for return from sigprocmask 
+                                                       // + 12 to align stack
+       pushl   %esp                    // oset
+       pushl   $0                              // set = NULL
+       pushl   $1                              // how = SIG_BLOCK
+       CALL_EXTERN(_sigprocmask)
+       movl    12(%esp),%eax   // save the mask
+       addl    $28, %esp               // restore original esp
+       movl    4(%esp), %ecx           // jmp_buf (struct sigcontext *)
+       movl    %eax, JB_MASK(%ecx)
+
+       subl    $20, %esp               // temporary struct sigaltstack + 8 to 
+                                                       // align stack
+       pushl   %esp                    // oss
+       pushl   $0                      // ss == NULL
+       CALL_EXTERN(___sigaltstack)     // get alternate signal stack info
+       movl    16(%esp), %eax          // oss->ss_flags
+       addl    $28, %esp                       // Restore %esp
+       movl    %eax, JB_ONSTACK(%ecx)
+
+L_do__setjmp:
+       BRANCH_EXTERN(__setjmp)
+
+LEAF(_siglongjmp, 0)
+       movl 4(%esp), %eax              // sigjmp_buf * jmpbuf; 
+       cmpl $0, JB_SAVEMASK(%eax)      // if jmpbuf[_JBLEN] != 0
+       jne     _longjmp                //     longjmp(jmpbuf, var); 
+       jmp L_do__longjmp               // else _longjmp(jmpbuf, var); 
+       
+LEAF(_longjmp, 0)
+       movl    4(%esp), %ecx           // address of jmp_buf (saved context)
+       movl    JB_MASK(%ecx),%eax      // get the mask
+       subl    $12, %esp                       // Make sure the stack is 16-byte 
+                                                               // aligned when we call sigprocmask
+       pushl   %eax                            // store the mask
+       movl    %esp, %edx                      // save the address where we stored the mask
+       pushl   $0                                      // oset = NULL
+       pushl   %edx                            // set
+       pushl   $3                                      // how = SIG_SETMASK
+       CALL_EXTERN_AGAIN(_sigprocmask)
+       addl    $28, %esp                       // restore original esp
+
+       movl    4(%esp), %ecx           // address of jmp_buf
+       movl    JB_ONSTACK(%ecx), %eax  // ss_flags
+       subl    $8, %esp
+       pushl   %eax
+       CALL_EXTERN(__sigunaltstack)
+       addl    $12, %esp
+       
+L_do__longjmp:
+       BRANCH_EXTERN(__longjmp)        // else
+END(_longjmp)
diff --git a/src/setjmp/x86_64/_setjmp.s b/src/setjmp/x86_64/_setjmp.s
new file mode 100644 (file)
index 0000000..02cd31e
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+/*
+ * Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved
+ *
+ * HISTORY
+ *  20-Apr-92    Bruce Martin (bmartin@next.com)
+ *      Created from M68K sources.
+ */
+
+/*
+ * C library -- _setjmp, _longjmp
+ *
+ *     _longjmp(a,v)
+ * will generate a "return(v)" from
+ * the last call to
+ *     _setjmp(a)
+ * by restoring registers from the stack,
+ * The previous signal state is NOT restored.
+ *
+ */
+
+#include <architecture/i386/asm_help.h>
+
+#define JB_RBX                 0
+#define JB_RBP                 8
+#define JB_RSP                 16
+#define JB_R12                 24
+#define JB_R13                 32
+#define JB_R14                 40
+#define JB_R15                 48
+#define JB_RIP                 56
+#define JB_RFLAGS              64
+#define JB_MXCSR               72
+#define JB_FPCONTROL   76
+#define JB_MASK                        80
+
+LEAF(__setjmp, 0)
+       // %rdi is a jmp_buf (struct sigcontext *)
+
+       // now build sigcontext
+       movq    %rbx, JB_RBX(%rdi)
+       movq    %rbp, JB_RBP(%rdi)
+       movq    %r12, JB_R12(%rdi)
+       movq    %r13, JB_R13(%rdi)
+       movq    %r14, JB_R14(%rdi)
+       movq    %r15, JB_R15(%rdi)
+
+       // RIP is set to the frame return address value
+       movq    (%rsp), %rax
+       movq    %rax, JB_RIP(%rdi)
+       // RSP is set to the frame return address plus 8
+       leaq    8(%rsp), %rax
+       movq    %rax, JB_RSP(%rdi)
+
+       // save fp control word
+       fnstcw  JB_FPCONTROL(%rdi)
+
+       // save MXCSR
+       stmxcsr JB_MXCSR(%rdi)
+
+       // return 0
+       xorl    %eax, %eax
+       ret
+
+
+LEAF(__longjmp, 0)
+       fninit                          // Clear all FP exceptions
+       // %rdi is a jmp_buf (struct sigcontext *)
+       // %esi is the return value
+       movl    %esi, %eax
+       testl   %esi, %esi
+       jnz     1f
+       incl    %eax
+
+       // general registers
+1:
+       movq    JB_RBX(%rdi), %rbx
+       movq    JB_RBP(%rdi), %rbp
+       movq    JB_RSP(%rdi), %rsp
+       movq    JB_R12(%rdi), %r12
+       movq    JB_R13(%rdi), %r13
+       movq    JB_R14(%rdi), %r14
+       movq    JB_R15(%rdi), %r15
+
+       // restore FP control word
+       fldcw   JB_FPCONTROL(%rdi)
+
+       // restore MXCSR
+       ldmxcsr JB_MXCSR(%rdi)
+
+
+       // Make sure DF is reset
+       cld
+
+       jmp             *JB_RIP(%rdi)
diff --git a/src/setjmp/x86_64/_sigtramp.s b/src/setjmp/x86_64/_sigtramp.s
new file mode 100644 (file)
index 0000000..a528650
--- /dev/null
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2007, 2011 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <sys/syscall.h>
+
+#define UC_TRAD                        1
+#define UC_FLAVOR              30
+
+/* Structure fields for ucontext and mcontext.  */
+#define UCONTEXT_UC_MCONTEXT   48
+
+#define MCONTEXT_ES_EXCEPTION  0
+#define MCONTEXT_SS_RAX                16
+#define MCONTEXT_SS_RBX                24
+#define MCONTEXT_SS_RCX                32
+#define MCONTEXT_SS_RDX                40
+#define MCONTEXT_SS_RDI                48
+#define MCONTEXT_SS_RSI                56
+#define MCONTEXT_SS_RBP                64
+#define MCONTEXT_SS_RSP                72
+#define MCONTEXT_SS_R8         80
+#define MCONTEXT_SS_RIP                144
+
+/* register use:
+       %rbx    uctx
+       
+void
+_sigtramp(
+       union __sigaction_u __sigaction_u,      %rdi
+       int                     sigstyle,       %rsi
+       int                     sig,            %rdx
+       siginfo_t               *sinfo,         %rcx
+       ucontext_t              *uctx           %r8
+)
+*/
+
+       .globl __sigtramp
+       .text
+       .align 4,0x90
+__sigtramp:
+Lstart:
+       /* Although this routine does not need any stack frame, various parts
+          of the OS can't analyse the stack without them.  */
+       pushq   %rbp
+       movq    %rsp, %rbp
+
+       movq    %rdi, %rax      # set up address for call
+
+#if defined(__DYNAMIC__)
+       incl    ___in_sigtramp(%rip)
+#endif
+       /* Save uctx in %rbx.  */
+       movq    %r8, %rbx
+       /* Call the signal handler.
+          Some variants are not supposed to get the last two parameters,
+          but the test to prevent this is more expensive than just passing
+          them.  */
+       movl    %edx, %edi
+       movq    %rcx, %rsi
+       movq    %r8, %rdx
+Lcall_start:
+       call    *%rax
+Lcall_end:
+#if defined(__DYNAMIC__)
+       decl    ___in_sigtramp(%rip)
+#endif
+       movq    %rbx, %rdi
+       movl    $ UC_FLAVOR, %esi
+       callq   ___sigreturn
+       ret
+Lend:
+
+/* DWARF unwind table #defines.  */
+#define DW_CFA_advance_loc_4 0x44
+#define DW_CFA_def_cfa 0x0c
+#define DW_CFA_def_cfa_expression 0x0F
+#define DW_CFA_expression 0x10
+#define DW_CFA_val_expression 0x16
+#define DW_CFA_offset(column) 0x80+(column)
+
+/* DWARF expression #defines.  */
+#define DW_OP_deref 0x06
+#define DW_OP_const1u 0x08
+#define DW_OP_dup 0x12
+#define DW_OP_drop 0x13
+#define DW_OP_over 0x14
+#define DW_OP_pick 0x15
+#define DW_OP_swap 0x16
+#define DW_OP_rot 0x17
+#define DW_OP_abs 0x19
+#define DW_OP_and 0x1a
+#define DW_OP_div 0x1b
+#define DW_OP_minus 0x1c
+#define DW_OP_mod 0x1d
+#define DW_OP_mul 0x1e
+#define DW_OP_neg 0x1f
+#define DW_OP_not 0x20
+#define DW_OP_or 0x21
+#define DW_OP_plus 0x22
+#define DW_OP_plus_uconst 0x23
+#define DW_OP_shl 0x24
+#define DW_OP_shr 0x25
+#define DW_OP_shra 0x26
+#define DW_OP_xor 0x27
+#define DW_OP_skip 0x2f
+#define DW_OP_bra 0x28
+#define DW_OP_eq 0x29
+#define DW_OP_ge 0x2A
+#define DW_OP_gt 0x2B
+#define DW_OP_le 0x2C
+#define DW_OP_lt 0x2D
+#define DW_OP_ne 0x2E
+#define DW_OP_lit(n) 0x30+(n)
+#define DW_OP_breg(n) 0x70+(n)
+#define DW_OP_deref_size 0x94
+
+/* The location expression we'll use.  */
+
+#define loc_expr_for_reg(regno, offs)                          \
+       .byte DW_CFA_expression, regno, 5 /* block length */,   \
+        DW_OP_breg(3), UCONTEXT_UC_MCONTEXT, DW_OP_deref,      \
+        DW_OP_plus_uconst, offs
+
+/* For r8 through r13 */
+#define loc_expr_rN(regno) \
+  loc_expr_for_reg(regno, MCONTEXT_SS_R8+(8*(regno-8)))
+
+/* For r14 through r15 */
+#define loc_expr_rN_long(regno)                                                \
+       .byte DW_CFA_expression, regno, 6 /* block length */,           \
+        DW_OP_breg(3), UCONTEXT_UC_MCONTEXT, DW_OP_deref,              \
+        DW_OP_plus_uconst, MCONTEXT_SS_R8+(8*(regno-8)), 1
+
+       /* Unwind tables.  */
+       .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
+EH_frame1:
+       .set L$set$0,LECIE1-LSCIE1
+       .long L$set$0   # Length of Common Information Entry
+LSCIE1:
+       .long   0       # CIE Identifier Tag
+       .byte   0x1     # CIE Version
+       .ascii "zRS\0"  # CIE Augmentation
+       .byte   0x1     # uleb128 0x1; CIE Code Alignment Factor
+       .byte   0x78    # sleb128 -8; CIE Data Alignment Factor
+       .byte   0x10    # CIE RA Column
+       .byte   0x1     # uleb128 0x1; Augmentation size
+       .byte   0x10    # FDE Encoding (pcrel)
+       .byte   DW_CFA_def_cfa
+       .byte   0x7     # uleb128 0x5
+       .byte   0x8     # uleb128 0x4
+       .byte   DW_CFA_offset(16)
+       .byte   0x1     # uleb128 0x1
+       .byte   DW_CFA_offset(16)       // duplicate DW_CFA_offset (rip, -8) tells linker to not make compact unwind
+       .byte   0x1     # uleb128 0x1
+       .align 3
+LECIE1:
+       .globl _sigtramp.eh
+_sigtramp.eh:
+LSFDE1:
+       .set L$set$1,LEFDE1-LASFDE1
+       .long L$set$1   # FDE Length
+LASFDE1:
+       .long   LASFDE1-EH_frame1       # FDE CIE offset
+       .quad   Lstart-.        # FDE initial location
+       .set L$set$2,Lend-Lstart
+       .quad   L$set$2 # FDE address range
+       .byte   0x0     # uleb128 0x0; Augmentation size
+
+       /* Now for the expressions, which all compute
+          uctx->uc_mcontext->register
+          for each register.
+          
+          Describe even the registers that are not call-saved because they
+          might be being used in the prologue to save other registers.
+          Only integer registers are described at present.    */
+
+       loc_expr_for_reg (0, MCONTEXT_SS_RAX)
+       loc_expr_for_reg (1, MCONTEXT_SS_RDX)
+       loc_expr_for_reg (2, MCONTEXT_SS_RCX)
+       loc_expr_for_reg (3, MCONTEXT_SS_RBX)
+       loc_expr_for_reg (4, MCONTEXT_SS_RSI)
+       loc_expr_for_reg (5, MCONTEXT_SS_RDI)
+       loc_expr_for_reg (6, MCONTEXT_SS_RBP)
+       loc_expr_for_reg (7, MCONTEXT_SS_RSP)
+       loc_expr_rN (8)
+       loc_expr_rN (9)
+       loc_expr_rN (10)
+       loc_expr_rN (11)
+       loc_expr_rN (12)
+       loc_expr_rN (13)
+       loc_expr_rN_long (14)
+       loc_expr_rN_long (15)
+
+       /* The Intel architecture classifies exceptions into three categories,
+          'faults' which put the address of the faulting instruction
+          in EIP, 'traps' which put the following instruction in EIP,
+          and 'aborts' which don't typically report the instruction
+          causing the exception.
+
+          The traps are #BP and #OF.  */
+
+       .byte DW_CFA_val_expression, 16
+       .set L$set$3,Lpc_end-Lpc_start
+       .byte L$set$3
+Lpc_start:
+       /* Push the mcontext address twice.  */
+       .byte DW_OP_breg(3), UCONTEXT_UC_MCONTEXT, DW_OP_deref, DW_OP_dup
+       /* Find the value of EIP.  */
+       .byte   DW_OP_plus_uconst, MCONTEXT_SS_RIP, MCONTEXT_SS_RIP >> 7
+       .byte   DW_OP_deref, DW_OP_swap
+       /* Determine the exception type.  */
+       .byte   DW_OP_plus_uconst, MCONTEXT_ES_EXCEPTION, DW_OP_deref_size, 4
+       /* Check whether it is #BP (3) or #OF (4).  */
+       .byte   DW_OP_dup, DW_OP_lit(3), DW_OP_ne
+       .byte    DW_OP_swap, DW_OP_lit(4), DW_OP_ne, DW_OP_and
+       /* If it is not, then add 1 to the instruction address, so as to point
+          within or past the faulting instruction.  */
+       .byte   DW_OP_plus
+Lpc_end:
+
+       /* The CFA will have been saved as the value of RSP (it is not
+          RSP+8).  */
+       .byte DW_CFA_def_cfa_expression
+       .set L$set$4,Lcfa_end-Lcfa_start
+       .byte L$set$4
+Lcfa_start:
+       .byte DW_OP_breg(3), UCONTEXT_UC_MCONTEXT, DW_OP_deref
+       .byte  DW_OP_plus_uconst, MCONTEXT_SS_RSP, DW_OP_deref
+Lcfa_end:
+
+       .align 3
+LEFDE1:
+               
+       .subsections_via_symbols
diff --git a/src/setjmp/x86_64/setjmp.s b/src/setjmp/x86_64/setjmp.s
new file mode 100644 (file)
index 0000000..cfee74a
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+/*
+ * Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved
+ */
+/*
+ * NeXT 386 setjmp/longjmp
+ *
+ * Written by Bruce Martin, NeXT Inc. 4/9/92
+ */
+
+/*
+ * C library -- setjmp, longjmp
+ *
+ *     longjmp(a,v)
+ * will generate a "return(v)" from
+ * the last call to
+ *     setjmp(a)
+ * by restoring registers from the stack,
+ * The previous value of the signal mask is
+ * restored.
+ *
+ */
+
+#include <architecture/i386/asm_help.h>
+
+#define JB_RBX                 0
+#define JB_RBP                 8
+#define JB_RSP                 16
+#define JB_R12                 24
+#define JB_R13                 32
+#define JB_R14                 40
+#define JB_R15                 48
+#define JB_RIP                 56
+#define JB_RFLAGS              64
+#define JB_MXCSR               72
+#define JB_FPCONTROL   76
+#define JB_MASK                        80
+#define JB_SAVEMASK            84              // sigsetjmp/siglongjmp only
+#define JB_ONSTACK             88      
+
+#define STACK_SSFLAGS          16              // offsetof(stack_t, ss_flags)
+
+LEAF(_sigsetjmp, 0)
+       // %rdi is sigjmp_buf * jmpbuf;
+       // %esi is int savemask
+       movl    %esi, JB_SAVEMASK(%rdi) // jmpbuf[_JBLEN] = savemask;
+       cmpl    $0, %esi                // if savemask != 0
+       jne     _setjmp                  // setjmp(jmpbuf);
+       jmp     L_do__setjmp            // else _setjmp(jmpbuf);
+
+LEAF(_setjmp, 0)
+       pushq   %rdi                    // Preserve the jmp_buf across the call
+       movl    $1, %edi                // how = SIG_BLOCK
+       xorq    %rsi, %rsi            // set = NULL
+       subq    $16, %rsp               // Allocate space for the return from sigprocmask + 8 to align stack
+       movq    %rsp, %rdx              // oset = allocated space
+       CALL_EXTERN(_sigprocmask)
+       popq    %rax                    // Save the mask
+       addq    $8, %rsp                // Restore the stack to before we align it
+       movq    (%rsp), %rdi            // jmp_buf (struct sigcontext *).  Leave pointer on the stack for _sigaltstack call)
+       movl    %eax, JB_MASK(%rdi)
+
+       // Get current sigaltstack status (stack_t)
+       subq    $32, %rsp                       // 24 bytes for a stack_t, + 8 for the jmp_buf pointer, + 8 is correctly aligned
+       movq    %rsp, %rsi                      // oss
+       xorq    %rdi, %rdi                      // ss == NULL
+       CALL_EXTERN(___sigaltstack)             // __sigaltstack(NULL, oss)
+       movl    STACK_SSFLAGS(%rsp), %eax       // oss.ss_flags
+       movq    32(%rsp), %rdi                  // jmpbuf (will be first argument to subsequent call)
+       movl    %eax, JB_ONSTACK(%rdi)          // Store ss_flags in jmpbuf
+       addq    $40, %rsp                       // restore %rsp
+
+L_do__setjmp:
+       BRANCH_EXTERN(__setjmp)
+
+LEAF(_siglongjmp, 0)
+       // %rdi is sigjmp_buf * jmpbuf;
+       cmpl    $0, JB_SAVEMASK(%rdi)      // if jmpbuf[_JBLEN] != 0
+       jne     _longjmp                //     longjmp(jmpbuf, var);
+       jmp     L_do__longjmp          // else _longjmp(jmpbuf, var);
+
+LEAF(_longjmp, 0)
+       // %rdi is address of jmp_buf (saved context)
+       pushq   %rdi                            // Preserve the jmp_buf across the call
+       pushq   %rsi                            // Preserve the value across the call
+       pushq   JB_MASK(%rdi)           // Put the mask on the stack
+       movq    $3, %rdi                        // how = SIG_SETMASK
+       movq    %rsp, %rsi                      // set = address where we stored the mask
+       xorq    %rdx, %rdx                      // oset = NULL
+       CALL_EXTERN_AGAIN(_sigprocmask)
+       
+       // Restore sigaltstack status
+       movq    16(%rsp), %rdi                  // Grab jmpbuf but leave it on the stack
+       movl    JB_ONSTACK(%rdi), %edi          // Pass old state to _sigunaltstack()
+       CALL_EXTERN(__sigunaltstack)
+       addq    $8, %rsp                        // Restore stack
+       popq    %rsi
+       popq    %rdi                            // Pass jmpbuf to _longjmp
+
+L_do__longjmp:
+       BRANCH_EXTERN(__longjmp)        // else
+END(_longjmp)
diff --git a/src/simple/asl.c b/src/simple/asl.c
new file mode 100644 (file)
index 0000000..d970ea0
--- /dev/null
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2005, 2006, 2009 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <stdlib.h>
+#include <stdbool.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/syslog.h>
+#include <sys/time.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <crt_externs.h>
+
+#include <TargetConditionals.h>
+
+#include <_simple.h>
+
+#include <os/lock.h>
+#include <os/lock_private.h>
+#include <os/alloc_once_private.h>
+#include <platform/string.h>
+#include <platform/compat.h>
+
+#define ASL_LOG_PATH _PATH_LOG
+
+extern ssize_t __sendto(int, const void *, size_t, int, const struct sockaddr *, socklen_t);
+extern int __gettimeofday(struct timeval *, struct timezone *);
+
+struct ProgramVars
+{
+    void*       mh;
+    int*        NXArgcPtr;
+    char***     NXArgvPtr;
+    char***     environPtr;
+    char**      __prognamePtr;
+};
+
+struct asl_context {
+       bool asl_enabled;
+       const char *progname;
+       int asl_fd;
+#if TARGET_IPHONE_SIMULATOR
+       const char *sim_log_path;
+       os_unfair_lock sim_connect_lock;
+#else
+       os_once_t connect_once;
+#endif
+};
+
+static struct asl_context* _simple_asl_get_context(void);
+static void _simple_asl_init_context(void *ctx);
+static int _simple_asl_connect(const char *log_path);
+static void _simple_asl_connect_once(void * __unused arg);
+static int _simple_asl_get_fd(void);
+
+/*
+ * Simplified ASL log interface; does not use malloc.  Unfortunately, this
+ * requires knowledge of the format used by ASL.
+ */
+
+static const char *
+_simple_asl_escape_key(unsigned char c)
+{
+       switch(c)
+       {
+               case '\\': return "\\\\";
+               case '[':  return "\\[";
+               case ']':  return "\\]";
+               case '\n': return "\\n";
+               case ' ':  return "\\s";
+       }
+
+       return NULL;
+}
+
+static const char *
+_simple_asl_escape_val(unsigned char c)
+{
+       switch(c)
+       {
+               case '\\': return "\\\\";
+               case '[':  return "\\[";
+               case ']':  return "\\]";
+               case '\n': return "\\n";
+       }
+
+       return NULL;
+}
+
+__attribute__((visibility("hidden")))
+void
+_simple_asl_init(const char *envp[], const struct ProgramVars *vars)
+{
+       const char *str;
+       struct asl_context *ctx = _simple_asl_get_context();
+       str = _simple_getenv(envp, "ASL_DISABLE");
+       if ((str != NULL) && (!strcmp(str, "1"))) return;
+       ctx->asl_enabled = true;
+       if (vars && vars->__prognamePtr) {
+               ctx->progname = *(vars->__prognamePtr);
+#if TARGET_IPHONE_SIMULATOR
+       } else {
+               const char * progname = *_NSGetProgname();
+               if (progname)
+                       ctx->progname = progname;
+#endif
+       }
+}
+
+static int
+_simple_asl_connect(const char *log_path)
+{
+       int fd = socket(AF_UNIX, SOCK_DGRAM, 0);
+       if (fd == -1) return;
+
+       fcntl(fd, F_SETFD, FD_CLOEXEC);
+
+       struct sockaddr_un addr;
+       addr.sun_family = AF_UNIX;
+
+       size_t amt = strlen(log_path) + 1;
+       if (sizeof(addr.sun_path) < amt)
+               amt = sizeof(addr.sun_path);
+       memmove(addr.sun_path, log_path, amt);
+
+       if (connect(fd, (struct sockaddr *)&addr, sizeof(addr)) == -1) {
+               close(fd);
+               return -1;
+       }
+       return fd;
+}
+
+static void
+_simple_asl_connect_once(void * __unused once_arg)
+{
+       struct asl_context *ctx = _simple_asl_get_context();
+       ctx->asl_fd = _simple_asl_connect(ASL_LOG_PATH);
+}
+
+static int
+_simple_asl_get_fd(void)
+{
+       struct asl_context *ctx = _simple_asl_get_context();
+       if (!ctx->asl_enabled) {
+               return -1;
+       }
+
+#if TARGET_IPHONE_SIMULATOR
+       os_unfair_lock_lock_with_options(&ctx->sim_connect_lock,
+                       OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION);
+       if (ctx->sim_log_path) {
+               // all setup has been done already
+               os_unfair_lock_unlock(&ctx->sim_connect_lock);
+               return ctx->asl_fd;
+       }
+       ctx->sim_log_path = _simple_getenv(
+                       (const char **)*_NSGetEnviron(), "IOS_SIMULATOR_SYSLOG_SOCKET");
+       if (ctx->sim_log_path) {
+               // the first and only time the envvar is being checked
+               // asl_fd procured by the end of this call will be used forever
+               int sim_log_fd = _simple_asl_connect(ctx->sim_log_path);
+               if (sim_log_fd > -1) {
+                       // successfully connected to the SIM path
+                       if (ctx->asl_fd > -1) {
+                               // close the ASL_LOG_PATH fd
+                               close(ctx->asl_fd);
+                       }
+                       ctx->asl_fd = sim_log_fd;
+               }
+       }
+       if (ctx->asl_fd < 0) {
+               // either there is no envvar or it didn't work. fallback to ASL_LOG_PATH
+               ctx->asl_fd = _simple_asl_connect(ASL_LOG_PATH);
+       }
+       os_unfair_lock_unlock(&ctx->sim_connect_lock);
+       return ctx->asl_fd;
+#else
+       os_once(&ctx->connect_once, NULL, _simple_asl_connect_once);
+       return ctx->asl_fd;
+#endif
+}
+
+_SIMPLE_STRING
+_simple_asl_msg_new(void)
+{
+       _SIMPLE_STRING b = _simple_salloc();
+
+       if (b == NULL) return NULL;
+
+       if (_simple_sprintf(b, "         0", 0))
+       {
+               _simple_sfree(b);
+               return NULL;
+       }
+
+       return b;
+}
+
+void
+_simple_asl_msg_set(_SIMPLE_STRING __b, const char *__key, const char *__val)
+{
+       if (__b == NULL) return;
+       if (__key == NULL) return;
+
+       do
+       {
+               if (_simple_sprintf(__b, " [", 0)) break;
+               if (_simple_esprintf(__b, _simple_asl_escape_key, "%s", __key)) break;
+               if (__val != NULL)
+               {
+                       if (_simple_esprintf(__b, _simple_asl_escape_val, " %s", __val)) break;
+                       if (!strcmp(__key, "Message"))
+                       {
+                               char *cp;
+
+                               /* remove trailing (escaped) newlines */
+                               cp = _simple_string(__b);
+                               cp += strlen(cp);
+                               for (;;)
+                               {
+                                       cp -= 2;
+                                       if (strcmp(cp, "\\n") != 0) break;
+                                       *cp = 0;
+                               }
+
+                               _simple_sresize(__b);
+                       }
+               }
+
+               if (_simple_sappend(__b, "]")) break;
+               return;
+       } while (0);
+}
+
+void
+_simple_asl_send(_SIMPLE_STRING __b)
+{
+       struct timeval tv;
+       int asl_fd = _simple_asl_get_fd();
+       if (asl_fd < 0) return;
+
+       __gettimeofday(&tv, NULL);
+
+       do
+       {
+               char *cp;
+
+               if (_simple_sprintf(__b, " [PID ", 0)) break;
+               if (_simple_esprintf(__b, _simple_asl_escape_val, "%u", getpid())) break;
+               if (_simple_sprintf(__b, "] [UID ", 0)) break;
+               if (_simple_esprintf(__b, _simple_asl_escape_val, "%u", getuid())) break;
+               if (_simple_sprintf(__b, "] [GID ", 0)) break;
+               if (_simple_esprintf(__b, _simple_asl_escape_val, "%u", getgid())) break;
+               if (_simple_sprintf(__b, "] [Time ", 0)) break;
+               if (_simple_esprintf(__b, _simple_asl_escape_val, "%lu", tv.tv_sec)) break;
+               if (_simple_sappend(__b, "] [TimeNanoSec ")) break;
+               if (_simple_esprintf(__b, _simple_asl_escape_val, "%d", tv.tv_usec * 1000)) break;
+               if (_simple_sappend(__b, "]\n")) break;
+
+               cp = _simple_string(__b);
+               __sendto(asl_fd, cp, strlen(cp), 0, NULL, 0);
+    } while (0);
+}
+
+void
+_simple_asl_log_prog(int level, const char *facility, const char *message, const char *prog)
+{
+       char lstr[2];
+
+       _SIMPLE_STRING b = _simple_asl_msg_new();
+       if (b == NULL) return;
+
+       if (level < 0) level = 0;
+       if (level > 7) level = 7;
+       lstr[0] = level + '0';
+       lstr[1] = '\0';
+
+       _simple_asl_msg_set(b, "Sender", prog);
+       _simple_asl_msg_set(b, "Level", lstr);
+       _simple_asl_msg_set(b, "Facility", facility);
+       _simple_asl_msg_set(b, "Message", message);
+       _simple_asl_send(b);
+       _simple_sfree(b);
+}
+
+void
+_simple_asl_log(int level, const char *facility, const char *message)
+{
+       _simple_asl_log_prog(level, facility, message,
+                       _simple_asl_get_context()->progname);
+}
+
+static struct asl_context *
+_simple_asl_get_context(void)
+{
+       return os_alloc_once(OS_ALLOC_ONCE_KEY_LIBSYSTEM_PLATFORM_ASL,
+                       sizeof(struct asl_context), _simple_asl_init_context);
+}
+
+static void
+_simple_asl_init_context(void *arg)
+{
+       struct asl_context *ctx = (struct asl_context *)arg;
+       // ctx is zero-filled when it comes out of _os_alloc_once
+       ctx->progname = "unknown";
+       ctx->asl_fd = -1;
+}
diff --git a/src/simple/getenv.c b/src/simple/getenv.c
new file mode 100644 (file)
index 0000000..b3f1b7c
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <TargetConditionals.h>
+
+#include <stdlib.h>
+
+#include <_simple.h>
+
+#include <platform/string.h>
+#include <platform/compat.h>
+
+const char *
+_simple_getenv(const char *envp[], const char *var) {
+    const char **p;
+    size_t var_len;
+
+    var_len = strlen(var);
+
+    for (p = envp; p && *p; p++) {
+        size_t p_len = strlen(*p);
+
+        if (p_len >= var_len &&
+            memcmp(*p, var, var_len) == 0 &&
+            (*p)[var_len] == '=') {
+            return &(*p)[var_len + 1];
+        }
+    }
+
+    return NULL;
+}
diff --git a/src/simple/string_io.c b/src/simple/string_io.c
new file mode 100644 (file)
index 0000000..2e581e8
--- /dev/null
@@ -0,0 +1,593 @@
+/*
+ * Copyright (c) 2005, 2006, 2009 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <mach/mach_init.h>
+#include <mach/vm_map.h>
+#include <errno.h>
+
+#include "os/internal.h"
+#include "_simple.h"
+#include "platform/string.h"
+#include "platform/compat.h"
+
+#ifndef VM_PAGE_SIZE
+#define VM_PAGE_SIZE   4096
+#endif
+
+#define BUF_SIZE(s)    (((BUF *)(s))->end - ((BUF *)(s))->buf + 1)
+/* we use a small buffer to minimize stack usage constraints */
+#define MYBUFSIZE      32
+
+typedef struct _BUF {
+       char *buf;
+       char *ptr;
+       char *end;
+       int fd;
+       void (*full)(struct _BUF *);
+} BUF;
+
+/* flush the buffer */
+static void
+_flush(BUF *b)
+{
+       char *buf = b->buf;
+       ssize_t n = b->ptr - buf;
+       ssize_t w;
+
+       while (n > 0) {
+               w = write(b->fd, buf, n);
+               if (w < 0) {
+                       if (errno == EINTR || errno == EAGAIN)
+                               continue;
+                       break;
+               }
+               n -= w;
+               buf += n;
+       }
+}
+
+/* flush the buffer and reset the pointer */
+static void
+_flush_reset(BUF *b)
+{
+       _flush(b);
+       b->ptr = b->buf;
+}
+
+/* enlarge the buffer */
+static void
+_enlarge(BUF *b)
+{
+       vm_address_t new;
+       vm_size_t sold, snew;
+       intptr_t diff;
+       kern_return_t kr;
+
+       new = (vm_address_t)(b->end + 1);
+       if(vm_allocate(mach_task_self(), &new, VM_PAGE_SIZE, 0) == 0) {
+               /* page is adjacent */
+               b->end += VM_PAGE_SIZE;
+               return;
+       }
+       sold = BUF_SIZE(b);
+       snew = (sold + VM_PAGE_SIZE) & ~(VM_PAGE_SIZE - 1);
+       if ((kr = vm_allocate(mach_task_self(), &new, snew, 1)) != 0) {
+               __LIBPLATFORM_CLIENT_CRASH__(kr, "Failed to allocate memory for buffer");
+       }
+       diff = new - (vm_address_t)b->buf;
+       memmove((void *)new, b->buf, sold);
+       if((intptr_t)(b->buf) & (VM_PAGE_SIZE - 1)) {
+               sold &= ~(VM_PAGE_SIZE - 1);
+               b->buf = (char *)((intptr_t)(b->buf + VM_PAGE_SIZE) & ~(VM_PAGE_SIZE - 1));
+               b->end = (char *)(new + snew - 1);
+       } else
+               b->end += diff + VM_PAGE_SIZE;
+       if(sold > 0) {
+               vm_deallocate(mach_task_self(), (vm_address_t)b->buf, sold);
+       }
+       b->buf = (char *)new;
+       b->ptr += diff;
+}
+
+static inline void put_s(BUF *, _esc_func, const char *);
+/* output a single character */
+static inline void
+put_c(BUF *b, _esc_func esc, unsigned char c)
+{
+       const char *cp;
+
+       if(esc && (cp = esc(c)) != NULL)
+               put_s(b, NULL, cp);
+       else {
+               if(b->ptr >= b->end)
+                       b->full(b);
+               *b->ptr++ = c;
+       }
+}
+
+/* output a null-terminated string */
+static inline void
+put_s(BUF *b, _esc_func esc, const char *str)
+{
+       while(*str)
+               put_c(b, esc, *str++);
+}
+
+/* output a string of the specified size */
+static inline void
+put_n(BUF *b, _esc_func esc, const char *str, ssize_t n)
+{
+       while(n-- > 0)
+               put_c(b, esc, *str++);
+}
+
+/*
+ * Output the signed decimal string representing the number in "in".  "width" is
+ * the minimum field width, and "zero" is a boolean value, true for zero padding
+ * (otherwise blank padding).
+ */
+static void
+dec(BUF *b, _esc_func esc, long long in, int width, int zero)
+{
+       char buf[32];
+       char *cp = buf + sizeof(buf);
+       ssize_t pad;
+       int neg = 0;
+       unsigned long long n = (unsigned long long)in;
+
+       if(in < 0) {
+               neg++;
+               width--;
+               n = ~n + 1;
+       }
+       *--cp = 0;
+       if(n) {
+               while(n) {
+                       *--cp = (n % 10) + '0';
+                       n /= 10;
+               }
+       } else
+               *--cp = '0';
+       if(neg && zero) {
+               put_c(b, esc, '-');
+               neg = 0;
+       }
+       pad = width - strlen(cp);
+       zero = zero ? '0' : ' ';
+       while(pad-- > 0)
+               put_c(b, esc, zero);
+       if(neg)
+               put_c(b, esc, '-');
+       put_s(b, esc, cp);
+}
+
+/*
+ * Output the hex string representing the number in "n".  "width" is the
+ * minimum field width, and "zero" is a boolean value, true for zero padding
+ * (otherwise blank padding).  "upper" is a boolean value, true for upper
+ * case hex characters, lower case otherwise.  "p" is a boolean value, true
+ * if 0x should be prepended (for %p), otherwise nothing.
+ */
+static const char _h[] = "0123456789abcdef";
+static const char _H[] = "0123456789ABCDEF";
+static const char _0x[] = "0x";
+
+static void
+hex(BUF *b, _esc_func esc, unsigned long long n, int width, int zero, int upper, int p)
+{
+       char buf[32];
+       char *cp = buf + sizeof(buf);
+       const char *h = upper ? _H : _h;
+
+       *--cp = 0;
+       if(n) {
+               while(n) {
+                       *--cp = h[n & 0xf];
+                       n >>= 4;
+               }
+       } else
+               *--cp = '0';
+       if(p) {
+               width -= 2;
+               if(zero) {
+                       put_s(b, esc, _0x);
+                       p = 0;
+               }
+       }
+       width -= strlen(cp);
+       zero = zero ? '0' : ' ';
+       while(width-- > 0)
+               put_c(b, esc, zero);
+       if(p)
+               put_s(b, esc, _0x);
+       put_s(b, esc, cp);
+}
+
+/*
+ * Output the unsigned decimal string representing the number in "n".  "width"
+ * is the minimum field width, and "zero" is a boolean value, true for zero
+ * padding (otherwise blank padding).
+ */
+static void
+udec(BUF *b, _esc_func esc, unsigned long long n, int width, int zero)
+{
+       char buf[32];
+       char *cp = buf + sizeof(buf);
+       ssize_t pad;
+
+       *--cp = 0;
+       if(n) {
+               while(n) {
+                       *--cp = (n % 10) + '0';
+                       n /= 10;
+               }
+       } else
+               *--cp = '0';
+       pad = width - strlen(cp);
+       zero = zero ? '0' : ' ';
+       while(pad-- > 0)
+               put_c(b, esc, zero);
+       put_s(b, esc, cp);
+}
+
+/*
+ * Output the unsigned decimal string representing the number in "n", rounded
+ * to the nearest MB, KB or b.  "width" is the minimum field width, and "zero"
+ * is a boolean value, true for zero padding (otherwise blank padding).
+ */
+static void
+ydec(BUF *b, _esc_func esc, unsigned long long n, int width, int zero)
+{
+       if(n >= 10 * (1 << 20)) {
+               n += (1 << 19);
+               udec(b, esc, n >> 20, width, zero);
+               put_s(b, esc, "MB");
+       } else if (n >= 10 * (1 << 10)) {
+               n += (1 << 9);
+               udec(b, esc, n >> 10, width, zero);
+               put_s(b, esc, "KB");
+       } else {
+               udec(b, esc, n, width, zero);
+               put_s(b, esc, "b");
+       }
+}
+
+/*
+ * The actual engine for all the _simple_*printf routines.
+ */
+static void
+__simple_bprintf(BUF *b, _esc_func esc, const char *fmt, va_list ap)
+{
+       while(*fmt) {
+               int lflag, zero, width;
+               char *cp;
+               if(!(cp = strchr(fmt, '%'))) {
+                       put_s(b, esc, fmt);
+                       break;
+               }
+               put_n(b, esc, fmt, cp - fmt);
+               fmt = cp + 1;
+               if(*fmt == '%') {
+                       put_c(b, esc, '%');
+                       fmt++;
+                       continue;
+               }
+               lflag = zero = width = 0;
+               for(;;) {
+                       switch(*fmt) {
+                       case '0':
+                               zero++;
+                               fmt++;
+                               /* drop through */
+                       case '1': case '2': case '3': case '4': case '5':
+                       case '6': case '7': case '8': case '9':
+                               while(*fmt >= '0' && *fmt <= '9')
+                                       width = 10 * width + (*fmt++ - '0');
+                               continue;
+                       case 'c':
+                               zero = zero ? '0' : ' ';
+                               width--;
+                               while(width-- > 0)
+                                       put_c(b, esc, zero);
+                               put_c(b, esc, va_arg(ap, int));
+                               break;
+                       case 'd': case 'i':
+                               switch(lflag) {
+                               case 0:
+                                       dec(b, esc, va_arg(ap, int), width, zero);
+                                       break;
+                               case 1:
+                                       dec(b, esc, va_arg(ap, long), width, zero);
+                                       break;
+                               default:
+                                       dec(b, esc, va_arg(ap, long long), width, zero);
+                                       break;
+                               }
+                               break;
+                       case 'l':
+                               lflag++;
+                               fmt++;
+                               continue;
+                       case 'p':
+                               hex(b, esc, (unsigned long)va_arg(ap, void *), width, zero, 0, 1);
+                               break;
+                       case 's':
+                               cp = va_arg(ap, char *);
+                               cp = cp ? cp : "(null)";
+                               width -= strlen(cp);
+                               zero = zero ? '0' : ' ';
+                               while(width-- > 0)
+                                       put_c(b, esc, zero);
+                               put_s(b, esc, cp);
+                               break;
+                       case 'u':
+                               switch(lflag) {
+                               case 0:
+                                       udec(b, esc, va_arg(ap, unsigned int), width, zero);
+                                       break;
+                               case 1:
+                                       udec(b, esc, va_arg(ap, unsigned long), width, zero);
+                                       break;
+                               default:
+                                       udec(b, esc, va_arg(ap, unsigned long long), width, zero);
+                                       break;
+                               }
+                               break;
+                       case 'X': case 'x':
+                               switch(lflag) {
+                               case 0:
+                                       hex(b, esc, va_arg(ap, unsigned int), width, zero,
+                                               *fmt == 'X', 0);
+                                       break;
+                               case 1:
+                                       hex(b, esc, va_arg(ap, unsigned long), width, zero,
+                                               *fmt == 'X', 0);
+                                       break;
+                               default:
+                                       hex(b, esc, va_arg(ap, unsigned long long), width, zero,
+                                               *fmt == 'X', 0);
+                                       break;
+                               }
+                               break;
+                       case 'y':
+                               switch(lflag) {
+                               case 0:
+                                       ydec(b, esc, va_arg(ap, unsigned int), width, zero);
+                                       break;
+                               case 1:
+                                       ydec(b, esc, va_arg(ap, unsigned long), width, zero);
+                                       break;
+                               default:
+                                       ydec(b, esc, va_arg(ap, unsigned long long), width, zero);
+                                       break;
+                               }
+                               break;
+                       default:
+                               put_c(b, esc, *fmt);
+                               break;
+                       }
+                       break;
+               }
+               fmt++;
+       }
+}
+
+/*
+ * A simplified vfprintf variant.  The format string is interpreted with
+ * arguments from the va_list, and the results are written to the given
+ * file descriptor.
+ */
+void
+_simple_vdprintf(int fd, const char *fmt, va_list ap)
+{
+       BUF b;
+       char buf[MYBUFSIZE];
+
+       b.buf = buf;
+       b.fd = fd;
+       b.ptr = b.buf;
+       b.end = b.buf + MYBUFSIZE;
+       b.full = _flush_reset;
+       __simple_bprintf(&b, NULL, fmt, ap);
+       _flush(&b);
+}
+
+/*
+ * A simplified fprintf variant.  The format string is interpreted with
+ * arguments from the variable argument list, and the results are written
+ * to the given file descriptor.
+ */
+void
+_simple_dprintf(int fd, const char *fmt, ...)
+{
+       va_list ap;
+
+       va_start(ap, fmt);
+       _simple_vdprintf(fd, fmt, ap);
+       va_end(ap);
+}
+
+/*
+ * A simplified string allocate routine.  Pass the opaque pointer to structure
+ * to _simple_*sprintf() routines.  Use _simple_string() to retrieve the
+ * current string (the string is guaranteed to be null terminated only on
+ * the call to _simple_string()).  Use _simple_sfree() to free the structure
+ * and string memory.
+ */
+_SIMPLE_STRING
+_simple_salloc(void)
+{
+       kern_return_t kr;
+       BUF *b;
+
+       kr = vm_allocate(mach_task_self(), (vm_address_t *)&b, VM_PAGE_SIZE, 1);
+       if (kr) {
+               __LIBPLATFORM_CLIENT_CRASH__(kr, "Failed to allocate memory for string");
+       }
+       b->ptr = b->buf = (char *)b + sizeof(BUF);
+       b->end = (char *)b + VM_PAGE_SIZE - 1;
+       b->full = _enlarge;
+       return (_SIMPLE_STRING)b;
+}
+
+/*
+ * The format string is interpreted with arguments from the va_list, and the
+ * results are appended to the string maintained by the opaque structure, as
+ * returned by a previous call to _simple_salloc(). Always returns 0.
+ */
+int
+_simple_vsprintf(_SIMPLE_STRING b, const char *fmt, va_list ap)
+{
+       return _simple_vesprintf(b, NULL, fmt, ap);
+}
+
+/*
+ * The format string is interpreted with arguments from the variable argument
+ * list, and the results are appended to the string maintained by the opaque
+ * structure, as returned by a previous call to _simple_salloc().
+ * Always returns 0.
+ */
+int
+_simple_sprintf(_SIMPLE_STRING b, const char *fmt, ...)
+{
+       va_list ap;
+       int ret;
+
+       va_start(ap, fmt);
+       ret = _simple_vesprintf(b, NULL, fmt, ap);
+       va_end(ap);
+       return ret;
+}
+
+/*
+ * Like _simple_vsprintf(), except __esc is a function to call on each
+ * character; the function returns NULL if the character should be passed
+ * as is, otherwise, the returned character string is used instead.
+ */
+int
+_simple_vesprintf(_SIMPLE_STRING b, _esc_func esc, const char *fmt, va_list ap)
+{
+       __simple_bprintf((BUF *)b, esc, fmt, ap);
+       return 0;
+}
+
+/*
+ * Like _simple_sprintf(), except __esc is a function to call on each
+ * character; the function returns NULL if the character should be passed
+ * as is, otherwise, the returned character string is used instead.
+ */
+int _simple_esprintf(_SIMPLE_STRING b, _esc_func esc, const char *fmt, ...)
+{
+       va_list ap;
+       int ret;
+
+       va_start(ap, fmt);
+       ret = _simple_vesprintf(b, esc, fmt, ap);
+       va_end(ap);
+       return ret;
+}
+
+/*
+ * Return the null terminated string from the opaque structure, as returned
+ * by a previous call to _simple_salloc().
+ */
+char *
+_simple_string(_SIMPLE_STRING b)
+{
+       *((BUF *)b)->ptr = 0;
+       return ((BUF *)b)->buf;
+}
+
+/*
+ * Reposition the pointer to the first null in the buffer.  After a call to
+ * _simple_string, the buffer can be modified, and shrunk.
+ */
+void
+_simple_sresize(_SIMPLE_STRING b)
+{
+       ((BUF *)b)->ptr = ((BUF *)b)->buf + strlen(((BUF *)b)->buf);
+}
+
+/*
+ * Append the null-terminated string to the string associated with the opaque
+ * structure.  Always returns 0.
+ */
+int
+_simple_sappend(_SIMPLE_STRING b, const char *str)
+{
+       return _simple_esappend(b, NULL, str);
+}
+
+/*
+ * Like _simple_sappend(), except __esc is a function to call on each
+ * character; the function returns NULL if the character should be passed
+ * as is, otherwise, the returned character string is used instead.
+ */
+int _simple_esappend(_SIMPLE_STRING b, _esc_func esc, const char *str)
+{
+       put_s((BUF *)b, esc, str);
+       return 0;
+}
+
+/*
+ * Write the string associated with the opaque structure to the file descriptor.
+ */
+void
+_simple_put(_SIMPLE_STRING b, int fd)
+{
+       ((BUF *)b)->fd = fd;
+       _flush((BUF *)b);
+}
+
+/*
+ * Write the string associated with the opaque structure and a trailing newline,
+ * to the file descriptor.
+ */
+void
+_simple_putline(_SIMPLE_STRING b, int fd)
+{
+       ((BUF *)b)->fd = fd;
+       *((BUF *)b)->ptr++ = '\n';
+       _flush((BUF *)b);
+       ((BUF *)b)->ptr--;
+}
+
+/*
+ * Free the opaque structure, and the associated string.
+ */
+void
+_simple_sfree(_SIMPLE_STRING b)
+{
+       vm_size_t s;
+
+       if(b == NULL) return;
+       if(((intptr_t)(((BUF *)b)->buf) & (VM_PAGE_SIZE - 1)) == 0) {
+               vm_deallocate(mach_task_self(), (vm_address_t)((BUF *)b)->buf, BUF_SIZE(b));
+               s = VM_PAGE_SIZE;
+       } else {
+               s = ((BUF *)b)->end - (char *)b + 1;
+       }
+       vm_deallocate(mach_task_self(), (vm_address_t)b, s);
+}
diff --git a/src/string/generic/bzero.c b/src/string/generic/bzero.c
new file mode 100644 (file)
index 0000000..4c070a4
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_MEMSET
+
+void *
+_platform_memset(void *b, int c, size_t len) {
+       unsigned char pattern[4];
+
+       pattern[0] = (unsigned char)c;
+       pattern[1] = (unsigned char)c;
+       pattern[2] = (unsigned char)c;
+       pattern[3] = (unsigned char)c;
+
+       _platform_memset_pattern4(b, pattern, len);
+       return b;
+}
+
+#if VARIANT_STATIC
+void *
+memset(void *b, int c, size_t len) {
+       return _platform_memset(b, c, len);
+}
+#endif
+
+#endif
+
+
+#if !_PLATFORM_OPTIMIZED_BZERO
+
+void
+_platform_bzero(void *s, size_t n)
+{
+       _platform_memset(s, 0, n);
+}
+
+#if VARIANT_STATIC
+void
+bzero(void *s, size_t n) {
+       _platform_bzero(s, n);
+}
+
+void
+__bzero(void *s, size_t n) {
+       _platform_bzero(s, n);
+}
+#endif
+
+#endif
diff --git a/src/string/generic/ffsll.c b/src/string/generic/ffsll.c
new file mode 100644 (file)
index 0000000..12c9a09
--- /dev/null
@@ -0,0 +1,103 @@
+/*-
+ * Copyright (c) 1990, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <TargetConditionals.h>
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/string/ffsll.c,v 1.1 2008/11/03 10:22:19 kib Exp $");
+
+#include <strings.h>
+
+/*
+ * Find First Set bit
+ */
+int
+ffsll(long long mask)
+{
+#if __has_builtin(__builtin_ffsll)
+       return __builtin_ffsll(mask);
+#elif __has_builtin(__builtin_ctzll)
+       if (mask == 0)
+               return (0);
+
+       return __builtin_ctzll(mask) + 1;
+#else
+       int bit;
+
+       if (mask == 0)
+               return (0);
+       for (bit = 1; !(mask & 1); bit++)
+               mask = (unsigned long long)mask >> 1;
+       return (bit);
+#endif
+}
+
+#if VARIANT_DYLD && TARGET_IPHONE_SIMULATOR
+int
+ffsl(long mask)
+{
+#if __has_builtin(__builtin_ffsl)
+       return __builtin_ffsl(mask);
+#elif __has_builtin(__builtin_ctzl)
+       if (mask == 0)
+               return (0);
+
+       return __builtin_ctzl(mask) + 1;
+#else
+       int bit;
+
+       if (mask == 0)
+               return (0);
+       for (bit = 1; !(mask & 1); bit++)
+               mask = (unsigned long)mask >> 1;
+       return (bit);
+#endif
+}
+
+int
+ffs(int mask)
+{
+#if __has_builtin(__builtin_ffs)
+       return __builtin_ffs(mask);
+#elif __has_builtin(__builtin_ctz)
+       if (mask == 0)
+               return (0);
+
+       return __builtin_ctz(mask) + 1;
+#else
+       int bit;
+
+       if (mask == 0)
+               return (0);
+       for (bit = 1; !(mask & 1); bit++)
+               mask = (unsigned)mask >> 1;
+       return (bit);
+#endif
+}
+#endif
+
diff --git a/src/string/generic/flsll.c b/src/string/generic/flsll.c
new file mode 100644 (file)
index 0000000..b6c893b
--- /dev/null
@@ -0,0 +1,102 @@
+/*-
+ * Copyright (c) 1990, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <TargetConditionals.h>
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/lib/libc/string/flsll.c,v 1.1 2008/11/03 10:22:19 kib Exp $");
+
+#include <strings.h>
+
+/*
+ * Find Last Set bit
+ */
+int
+flsll(long long mask)
+{
+#if __has_builtin(__builtin_flsll)
+       return __builtin_flsll(mask);
+#elif __has_builtin(__builtin_clzll)
+       if (mask == 0)
+               return (0);
+
+       return (sizeof(mask) << 3) - __builtin_clzll(mask);
+#else
+       int bit;
+
+       if (mask == 0)
+               return (0);
+       for (bit = 1; mask != 1; bit++)
+               mask = (unsigned long long)mask >> 1;
+       return (bit);
+#endif
+}
+
+#if VARIANT_DYLD && TARGET_IPHONE_SIMULATOR
+int
+flsl(long mask)
+{
+#if __has_builtin(__builtin_flsl)
+       return __builtin_flsl(mask);
+#elif __has_builtin(__builtin_clzl)
+       if (mask == 0)
+               return (0);
+
+       return (sizeof(mask) << 3) - __builtin_clzl(mask);
+#else
+       int bit;
+
+       if (mask == 0)
+               return (0);
+       for (bit = 1; mask != 1; bit++)
+               mask = (unsigned long)mask >> 1;
+       return (bit);
+#endif
+}
+
+int
+fls(int mask)
+{
+#if __has_builtin(__builtin_fls)
+       return __builtin_fls(mask);
+#elif __has_builtin(__builtin_clz)
+       if (mask == 0)
+               return (0);
+
+       return (sizeof(mask) << 3) - __builtin_clz(mask);
+#else
+       int bit;
+
+       if (mask == 0)
+               return (0);
+       for (bit = 1; mask != 1; bit++)
+               mask = (unsigned)mask >> 1;
+       return (bit);
+#endif
+}
+#endif
diff --git a/src/string/generic/memccpy.c b/src/string/generic/memccpy.c
new file mode 100644 (file)
index 0000000..f9aee55
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_MEMCCPY
+
+#include <stdlib.h>
+
+void *
+_platform_memccpy(void *t, const void *f, int c, size_t n)
+{
+       void *last;
+
+       if (n == 0) {
+               return NULL;
+       }
+
+       last = _platform_memchr(f, c, n);
+
+       if (last == NULL) {
+               _platform_memmove(t, f, n);
+               return NULL;
+       } else {
+               n = (char *)last - (char *)f + 1;
+               _platform_memmove(t, f, n);
+               return (void *)((char *)t + n);
+       }
+}
+
+#if VARIANT_STATIC
+void *
+memccpy(void *t, const void *f, int c, size_t n)
+{
+       return _platform_memccpy(t, f, c, n);
+}
+#endif
+
+#endif
diff --git a/src/string/generic/memchr.c b/src/string/generic/memchr.c
new file mode 100644 (file)
index 0000000..3e1dc30
--- /dev/null
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (c) 1990, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_MEMCHR
+
+#include <stdlib.h>
+
+void *
+_platform_memchr(const void *s, int c, size_t n)
+{
+       if (n != 0) {
+               const unsigned char *p = s;
+
+               do {
+                       if (*p++ == (unsigned char)c)
+                               return ((void *)(p - 1));
+               } while (--n != 0);
+       }
+       return (NULL);
+}
+
+#if VARIANT_STATIC
+void *
+memchr(const void *s, int c, size_t n)
+{
+       return _platform_memchr(s, c, n);
+}
+#endif
+
+#endif
diff --git a/src/string/generic/memcmp.c b/src/string/generic/memcmp.c
new file mode 100644 (file)
index 0000000..fd4311d
--- /dev/null
@@ -0,0 +1,65 @@
+/*-
+ * Copyright (c) 1990, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_MEMCMP
+
+int
+_platform_memcmp(const void *s1, const void *s2, size_t n)
+{
+       if (n != 0) {
+               const unsigned char *p1 = s1, *p2 = s2;
+
+               do {
+                       if (*p1++ != *p2++)
+                               return (*--p1 - *--p2);
+               } while (--n != 0);
+       }
+       return (0);
+}
+
+#if VARIANT_STATIC
+int
+memcmp(const void *s1, const void *s2, size_t n)
+{
+       return _platform_memcmp(s1, s2, n);
+}
+
+int
+bcmp(const void *s1, const void *s2, size_t n)
+{
+       return _platform_memcmp(s1, s2, n);
+}
+#endif
+
+#endif
diff --git a/src/string/generic/memmove.c b/src/string/generic/memmove.c
new file mode 100644 (file)
index 0000000..e11f68c
--- /dev/null
@@ -0,0 +1,134 @@
+/*-
+ * Copyright (c) 1990, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_MEMMOVE
+
+#include <sys/types.h>
+
+/*
+ * sizeof(word) MUST BE A POWER OF TWO
+ * SO THAT wmask BELOW IS ALL ONES
+ */
+typedef        long word;              /* "word" used for optimal copy speed */
+
+#define        wsize   sizeof(word)
+#define        wmask   (wsize - 1)
+
+/*
+ * Copy a block of memory, handling overlap.
+ * This is the routine that actually implements
+ * (the portable versions of) bcopy, memcpy, and memmove.
+ */
+
+void *
+_platform_memmove(void *dst0, const void *src0, size_t length)
+{
+       char *dst = dst0;
+       const char *src = src0;
+       size_t t;
+
+       if (length == 0 || dst == src)          /* nothing to do */
+               goto done;
+
+       /*
+        * Macros: loop-t-times; and loop-t-times, t>0
+        */
+#define        TLOOP(s) if (t) TLOOP1(s)
+#define        TLOOP1(s) do { s; } while (--t)
+
+       if ((unsigned long)dst < (unsigned long)src) {
+               /*
+                * Copy forward.
+                */
+               t = (uintptr_t)src;     /* only need low bits */
+               if ((t | (uintptr_t)dst) & wmask) {
+                       /*
+                        * Try to align operands.  This cannot be done
+                        * unless the low bits match.
+                        */
+                       if ((t ^ (uintptr_t)dst) & wmask || length < wsize)
+                               t = length;
+                       else
+                               t = wsize - (t & wmask);
+                       length -= t;
+                       TLOOP1(*dst++ = *src++);
+               }
+               /*
+                * Copy whole words, then mop up any trailing bytes.
+                */
+               t = length / wsize;
+               TLOOP(*(word *)dst = *(word *)src; src += wsize; dst += wsize);
+               t = length & wmask;
+               TLOOP(*dst++ = *src++);
+       } else {
+               /*
+                * Copy backwards.  Otherwise essentially the same.
+                * Alignment works as before, except that it takes
+                * (t&wmask) bytes to align, not wsize-(t&wmask).
+                */
+               src += length;
+               dst += length;
+               t = (uintptr_t)src;
+               if ((t | (uintptr_t)dst) & wmask) {
+                       if ((t ^ (uintptr_t)dst) & wmask || length <= wsize)
+                               t = length;
+                       else
+                               t &= wmask;
+                       length -= t;
+                       TLOOP1(*--dst = *--src);
+               }
+               t = length / wsize;
+               TLOOP(src -= wsize; dst -= wsize; *(word *)dst = *(word *)src);
+               t = length & wmask;
+               TLOOP(*--dst = *--src);
+       }
+done:
+       return (dst0);
+}
+
+#if VARIANT_STATIC
+void *
+memmove(void *dst0, const void *src0, size_t length)
+{
+       return _platform_memmove(dst0, src0, length);
+}
+
+void *
+memcpy(void *dst0, const void *src0, size_t length)
+{
+       return _platform_memmove(dst0, src0, length);
+}
+#endif
+
+#endif
diff --git a/src/string/generic/memset_pattern.c b/src/string/generic/memset_pattern.c
new file mode 100644 (file)
index 0000000..94aa085
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2013 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_MEMSET_PATTERN4
+
+void
+_platform_memset_pattern4(void *b, const void *pattern4, size_t len)
+{
+       char * start = (char *)b;
+       char * p = (char *)b;
+       while ((start + len) - p >= 4) {
+               _platform_memmove(p, pattern4, 4);
+               p += 4;
+       }
+       if ((start + len) - p != 0) {
+               _platform_memmove(p, pattern4, (start + len) - p);
+       }
+}
+
+#if VARIANT_STATIC
+void
+memset_pattern4(void *b, const void *pattern4, size_t len)
+{
+       return _platform_memset_pattern4(b, pattern4, len);
+}
+#endif
+
+#endif
+
+
+#if !_PLATFORM_OPTIMIZED_MEMSET_PATTERN8
+
+void
+_platform_memset_pattern8(void *b, const void *pattern8, size_t len)
+{
+       char * start = (char *)b;
+       char * p = (char *)b;
+       while ((start + len) - p >= 8) {
+               _platform_memmove(p, pattern8, 8);
+               p += 8;
+       }
+       if ((start + len) - p != 0) {
+               _platform_memmove(p, pattern8, (start + len) - p);
+       }
+}
+
+#if VARIANT_STATIC
+void
+memset_pattern8(void *b, const void *pattern8, size_t len)
+{
+       return _platform_memset_pattern8(b, pattern8, len);
+}
+#endif
+
+#endif
+
+
+#if !_PLATFORM_OPTIMIZED_MEMSET_PATTERN16
+
+void
+_platform_memset_pattern16(void *b, const void *pattern16, size_t len)
+{
+       char * start = (char *)b;
+       char * p = (char *)b;
+       while ((start + len) - p >= 16) {
+               _platform_memmove(p, pattern16, 16);
+               p += 16;
+       }
+       if ((start + len) - p != 0) {
+               _platform_memmove(p, pattern16, (start + len) - p);
+       }
+}
+
+#if VARIANT_STATIC
+void
+memset_pattern16(void *b, const void *pattern16, size_t len)
+{
+       return _platform_memset_pattern16(b, pattern16, len);
+}
+#endif
+
+#endif
diff --git a/src/string/generic/strchr.c b/src/string/generic/strchr.c
new file mode 100644 (file)
index 0000000..d290e15
--- /dev/null
@@ -0,0 +1,59 @@
+/*-
+ * Copyright (c) 1990, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_STRCHR
+
+#include <stdlib.h>
+
+char *
+_platform_strchr(const char *p, int ch)
+{
+       char c;
+
+       c = ch;
+       for (;; ++p) {
+               if (*p == c)
+                       return ((char *)p);
+               if (*p == '\0')
+                       return (NULL);
+       }
+       /* NOTREACHED */
+}
+
+#if VARIANT_STATIC
+char *
+strchr(const char *p, int ch)
+{
+       return _platform_strchr(p, ch);
+}
+#endif
+
+#endif
diff --git a/src/string/generic/strcmp.c b/src/string/generic/strcmp.c
new file mode 100644 (file)
index 0000000..bc46727
--- /dev/null
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 1990, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_STRCMP
+
+int
+_platform_strcmp(const char *s1, const char *s2)
+{
+       while (*s1 == *s2++)
+               if (*s1++ == '\0')
+                       return (0);
+       return (*(const unsigned char *)s1 - *(const unsigned char *)(s2 - 1));
+}
+
+#if VARIANT_STATIC
+int
+strcmp(const char *s1, const char *s2)
+{
+       return _platform_strcmp(s1, s2);
+}
+#endif
+
+#endif
diff --git a/src/string/generic/strncmp.c b/src/string/generic/strncmp.c
new file mode 100644 (file)
index 0000000..2f8c80f
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 1989, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <platform/string.h>
+
+#if !_PLATFORM_OPTIMIZED_STRNCMP
+
+int
+_platform_strncmp(const char *s1, const char *s2, size_t n)
+{
+
+       if (n == 0)
+               return (0);
+       do {
+               if (*s1 != *s2++)
+                       return (*(const unsigned char *)s1 -
+                               *(const unsigned char *)(s2 - 1));
+               if (*s1++ == '\0')
+                       break;
+       } while (--n != 0);
+       return (0);
+}
+
+#if VARIANT_STATIC
+int
+strncmp(const char *s1, const char *s2, size_t n)
+{
+       return _platform_strncmp(s1, s2, n);
+}
+#endif
+
+#endif
diff --git a/src/ucontext/generic/getmcontext.c b/src/ucontext/generic/getmcontext.c
new file mode 100644 (file)
index 0000000..255628f
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2007, 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#define _XOPEN_SOURCE 600L
+#include <ucontext.h>
+#include <errno.h>
+
+#if defined(__x86_64__) || defined(__i386__)
+
+#include <sys/resource.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <signal.h>
+
+extern int __sigaltstack(const stack_t * __restrict, stack_t * __restrict);
+
+#ifdef __DYNAMIC__
+extern int __in_sigtramp;
+#endif /* __DYNAMIC_ */
+
+__attribute__((visibility("hidden")))
+mcontext_t
+getmcontext(ucontext_t *uctx, void *sp)
+{
+       mcontext_t mctx = (mcontext_t)&uctx->__mcontext_data;
+       size_t stacksize = 0;
+       stack_t stack;
+
+       uctx->uc_stack.ss_sp = sp;
+       uctx->uc_stack.ss_flags = 0;
+
+       if (0 == __sigaltstack(NULL, &stack)) {
+               if (stack.ss_flags & SS_ONSTACK) {
+                       uctx->uc_stack = stack;
+                       stacksize = stack.ss_size;
+               }
+       }
+
+       if (stacksize == 0) {
+               struct rlimit rlim;
+               if (0 == getrlimit(RLIMIT_STACK, &rlim))
+                       stacksize = rlim.rlim_cur;
+       }
+
+       uctx->uc_stack.ss_size = stacksize;
+
+       if (uctx->uc_mcontext != mctx) {
+               uctx->uc_mcontext = mctx;
+
+#ifdef __DYNAMIC__
+               uctx->uc_link = (ucontext_t*)(uintptr_t)__in_sigtramp; /* non-zero if in signal handler */
+#else  /* !__DYNAMIC__ */
+               uctx->uc_link = 0;
+#endif /* __DYNAMIC__ */
+
+       }
+
+       sigprocmask(0, NULL, &uctx->uc_sigmask);
+       return mctx;
+}
+
+#else
+
+int
+getcontext(ucontext_t *u)
+{
+       errno = ENOTSUP;
+       return -1;
+}
+
+#endif
diff --git a/src/ucontext/generic/makecontext.c b/src/ucontext/generic/makecontext.c
new file mode 100644 (file)
index 0000000..4f8a39d
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2007, 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/*
+ * Copyright (c) 2001 Daniel M. Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define _XOPEN_SOURCE 600L
+#include <ucontext.h>
+#include <errno.h>
+
+#if defined(__x86_64__) || defined(__i386__)
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+
+#include <sys/param.h>
+#include <stddef.h>
+#include <stdarg.h>
+#include <unistd.h>
+
+/* Prototypes */
+extern void _ctx_start(ucontext_t *, int argc, ...);
+
+__attribute__((visibility("hidden")))
+void
+_ctx_done (ucontext_t *ucp)
+{
+       if (ucp->uc_link == NULL)
+               _exit(0);
+       else {
+               /*
+                * Since this context has finished, don't allow it
+                * to be restarted without being reinitialized (via
+                * setcontext or swapcontext).
+                */
+               ucp->uc_mcsize = 0;
+
+               /* Set context to next one in link */
+               /* XXX - what to do for error, abort? */
+               setcontext((const ucontext_t *)ucp->uc_link);
+               __builtin_trap();       /* should never get here */
+       }
+}
+
+void
+makecontext(ucontext_t *ucp, void (*start)(), int argc, ...)
+{
+       va_list         ap;
+       char            *stack_top;
+       intptr_t        *argp;
+       int             i;
+
+       if (ucp == NULL)
+               return;
+       else if ((ucp->uc_stack.ss_sp == NULL) ||
+           (ucp->uc_stack.ss_size < MINSIGSTKSZ)) {
+               /*
+                * This should really return -1 with errno set to ENOMEM
+                * or something, but the spec says that makecontext is
+                * a void function.   At least make sure that the context
+                * isn't valid so it can't be used without an error.
+                */
+               ucp->uc_mcsize = 0;
+       }
+       /* XXX - Do we want to sanity check argc? */
+       else if ((argc < 0) || (argc > NCARGS)) {
+               ucp->uc_mcsize = 0;
+       }
+       /* Make sure the context is valid. */
+       else {
+               /*
+                * Arrange the stack as follows:
+                *
+                *      _ctx_start()    - context start wrapper
+                *      start()         - user start routine
+                *      arg1            - first argument, aligned(16)
+                *      ...
+                *      argn
+                *      ucp             - this context, %rbp/%ebp points here
+                *
+                * When the context is started, control will return to
+                * the context start wrapper which will pop the user
+                * start routine from the top of the stack.  After that,
+                * the top of the stack will be setup with all arguments
+                * necessary for calling the start routine.  When the
+                * start routine returns, the context wrapper then sets
+                * the stack pointer to %rbp/%ebp which was setup to point to
+                * the base of the stack (and where ucp is stored).  It
+                * will then call _ctx_done() to swap in the next context
+                * (uc_link != 0) or exit the program (uc_link == 0).
+                */
+               mcontext_t mc;
+
+               stack_top = (char *)(ucp->uc_stack.ss_sp +
+                   ucp->uc_stack.ss_size - sizeof(intptr_t));
+
+               int minargc = argc;
+#if defined(__x86_64__)
+               /* Give 6 stack slots to _ctx_start */
+               if (minargc < 6)
+                       minargc = 6;
+#endif
+               /*
+                * Adjust top of stack to allow for 3 pointers (return
+                * address, _ctx_start, and ucp) and argc arguments.
+                * We allow the arguments to be pointers also.  The first
+                * argument to the user function must be properly aligned.
+                */
+
+               stack_top = stack_top - (sizeof(intptr_t) * (1 + minargc));
+               stack_top = (char *)((intptr_t)stack_top & ~15);
+               stack_top = stack_top - (2 * sizeof(intptr_t));
+               argp = (intptr_t *)stack_top;
+
+               /*
+                * Setup the top of the stack with the user start routine
+                * followed by all of its aguments and the pointer to the
+                * ucontext.  We need to leave a spare spot at the top of
+                * the stack because setcontext will move rip/eip to the top
+                * of the stack before returning.
+                */
+               *argp = (intptr_t)_ctx_start;  /* overwritten with same value */
+               argp++;
+               *argp = (intptr_t)start;
+               argp++;
+
+               /* Add all the arguments: */
+               va_start(ap, argc);
+               for (i = 0; i < argc; i++) {
+                       *argp = va_arg(ap, intptr_t);
+                       argp++;
+               }
+               va_end(ap);
+
+#if defined(__x86_64__)
+               /* Always provide space for ctx_start to pop the parameter registers */
+               for (;argc < minargc; argc++) {
+                       *argp++ = 0;
+               }
+
+               /* Keep stack aligned */
+               if (argc & 1) {
+                       *argp++ = 0;
+               }
+#endif
+
+               /* The ucontext is placed at the bottom of the stack. */
+               *argp = (intptr_t)ucp;
+
+               /*
+                * Set the machine context to point to the top of the
+                * stack and the program counter to the context start
+                * wrapper.  Note that setcontext() pushes the return
+                * address onto the top of the stack, so allow for this
+                * by adjusting the stack downward 1 slot.  Also set
+                * %r12/%esi to point to the base of the stack where ucp
+                * is stored.
+                */
+               mc = ucp->uc_mcontext;
+#if defined(__x86_64__)
+               /* Use callee-save and match _ctx_start implementation */
+               mc->__ss.__r12 = (intptr_t)argp;
+               mc->__ss.__rbp = 0;
+               mc->__ss.__rsp = (intptr_t)stack_top + sizeof(caddr_t);
+               mc->__ss.__rip = (intptr_t)_ctx_start;
+#else
+               mc->__ss.__esi = (int)argp;
+               mc->__ss.__ebp = 0;
+               mc->__ss.__esp = (int)stack_top + sizeof(caddr_t);
+               mc->__ss.__eip = (int)_ctx_start;
+#endif
+       }
+}
+
+#else
+
+void
+makecontext(ucontext_t *u, void (*f)(void), int a, ...)
+{
+}
+
+#endif
diff --git a/src/ucontext/generic/setcontext.c b/src/ucontext/generic/setcontext.c
new file mode 100644 (file)
index 0000000..ac2faac
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2007, 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+
+#define _XOPEN_SOURCE 600L
+#include <ucontext.h>
+#include <errno.h>
+
+#if defined(__x86_64__) || defined(__i386__)
+
+#include <stddef.h>
+#include <signal.h>
+
+extern int _setcontext(const void *);
+
+int
+setcontext(const ucontext_t *uctx)
+{
+       mcontext_t mctx = (mcontext_t)&uctx->__mcontext_data;
+       ucontext_t *_uctx = (ucontext_t *)uctx;
+       if (mctx != _uctx->uc_mcontext)
+               _uctx->uc_mcontext = mctx;
+       sigprocmask(SIG_SETMASK, &uctx->uc_sigmask, NULL);
+
+#if defined(__x86_64__)
+       return _setcontext(mctx);
+#else
+       return _setcontext(uctx);
+#endif
+}
+
+#else
+
+int
+setcontext(const ucontext_t *uctx)
+{
+       errno = ENOTSUP;
+       return -1;
+}
+
+#endif
diff --git a/src/ucontext/generic/swapcontext.c b/src/ucontext/generic/swapcontext.c
new file mode 100644 (file)
index 0000000..2195700
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2007, 2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/*
+ * Copyright (c) 2001 Daniel M. Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define _XOPEN_SOURCE 600L
+#include <ucontext.h>
+#include <errno.h>
+
+#if defined(__x86_64__) || defined(__i386__)
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+
+#include <sys/param.h>
+#include <sys/signal.h>
+#include <stddef.h>
+
+#define uc_flags uc_onstack
+#define UCF_SWAPPED 0x80000000
+
+int
+swapcontext(ucontext_t *oucp, const ucontext_t *ucp)
+{
+       int ret;
+
+       if ((oucp == NULL) || (ucp == NULL)) {
+               errno = EINVAL;
+               return (-1);
+       }
+       oucp->uc_flags &= ~UCF_SWAPPED;
+       ret = getcontext(oucp);
+       if ((ret == 0) && !(oucp->uc_flags & UCF_SWAPPED)) {
+               oucp->uc_flags |= UCF_SWAPPED;
+               ret = setcontext(ucp);
+       }
+       asm(""); // Prevent tailcall <rdar://problem/12581792>
+       return (ret);
+}
+
+#else
+
+int
+swapcontext(ucontext_t *oucp, const ucontext_t *ucp)
+{
+       errno = ENOTSUP;
+       return -1;
+}
+
+#endif
diff --git a/src/ucontext/i386/_ctx_start.s b/src/ucontext/i386/_ctx_start.s
new file mode 100644 (file)
index 0000000..f066c5b
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2007 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/*
+ * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__i386__)
+
+#include <architecture/i386/asm_help.h>
+
+/*
+ * _ctx_start((void *func)(int arg1, ..., argn),
+ *            int arg1, ..., argn, ucontext_t *ucp)
+ *
+ * 0(%esp)             - func
+ * 4(%esp)             - arg1
+ * 8(%esp)             - arg2
+ * ...
+ * (4*n)(%esp)         - argn
+ * (4*(n + 1))(%esp)   - ucp, %ebp setup to point here (base of stack)
+ */
+TEXT
+.private_extern __ctx_start
+LABEL(__ctx_start)
+       popl    %eax            /* get start function */
+       call    *%eax           /* call start function */
+       movl    %esi, %esp      /*
+                                * setup stack for completion routine;
+                                * ucp is now at top of stack
+                                */
+       CALL_EXTERN(__ctx_done) /* should never return */
+       int $5                          /* trap */
+
+#endif /* __i386__ */
diff --git a/src/ucontext/i386/_setcontext.s b/src/ucontext/i386/_setcontext.s
new file mode 100644 (file)
index 0000000..6989254
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2007 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#if defined(__i386__)
+
+#include <architecture/i386/asm_help.h>
+
+TEXT
+.private_extern __setcontext
+LABEL(__setcontext)
+       movl  4(%esp), %ecx
+       movl  28(%ecx), %ecx
+       movl  16(%ecx), %ebx
+       movl  28(%ecx), %edi
+       movl  32(%ecx), %esi
+       movl  36(%ecx), %ebp
+       movl  40(%ecx), %esp
+       pushl 48(%ecx)
+       popfl
+       movl  12(%ecx), %eax
+       jmp   *52(%ecx)
+
+#endif /* __i386__ */
diff --git a/src/ucontext/i386/getcontext.s b/src/ucontext/i386/getcontext.s
new file mode 100644 (file)
index 0000000..8cbe4c6
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2007 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#if defined(__i386__)
+
+#include <architecture/i386/asm_help.h>
+
+TEXT
+LABEL(_getcontext)
+       subl  $28, %esp
+       movl  32(%esp), %eax
+       movl  %eax,   (%esp)
+       movl  %esp,  4(%esp)
+       CALL_EXTERN(_getmcontext)
+       movl  %eax, %ecx
+       addl  $28, %esp
+       movl  %ebx, 16(%ecx)
+       movl  %edi, 28(%ecx)
+       movl  %esi, 32(%ecx)
+       movl  %ebp, 36(%ecx)
+       movl  (%esp), %eax
+       movl  %eax, 52(%ecx)
+       movl  %esp, %eax
+       addl  $4, %eax
+       movl  %eax, 40(%ecx)
+       pushf
+       popl  %eax
+       movl  %eax, 48(%ecx)
+       xorl  %eax, %eax
+       movl  %eax, 12(%ecx)
+       ret
+
+#endif /* __i386__ */
diff --git a/src/ucontext/x86_64/_ctx_start.s b/src/ucontext/x86_64/_ctx_start.s
new file mode 100644 (file)
index 0000000..5ab64a2
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2007 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+/*
+ * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Neither the name of the author nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__x86_64__)
+
+#include <architecture/i386/asm_help.h>
+
+/*
+ * _ctx_start((void *func)(int arg1, ..., argn),
+ *            int arg1, ..., argn, ucontext_t *ucp)
+ *
+ * %rdi                - func
+ * %rsi                - arg1
+ * %rdx                - arg2
+ * %rcx                - arg3
+ * %r8         - arg4
+ * %r9         - arg5
+ * WRONG!
+ * (8*(n-6))(%rsp)             - argn
+ * (8*(n + 1))(%rsp)   - ucp, %rbp setup to point here (base of stack)
+ */
+TEXT
+.private_extern __ctx_start
+LABEL(__ctx_start)
+       popq    %rax            /* accounted for in makecontext() */
+       /* makecontext will simulate 6 parameters at least */
+       /* Or it could just set these in the mcontext... */
+       popq    %rdi
+       popq    %rsi
+       popq    %rdx
+       popq    %rcx
+       popq    %r8
+       popq    %r9
+
+       callq   *%rax           /* call start function */
+       movq    %r12, %rsp      /*
+                                * setup stack for completion routine;
+                                * ucp is now at top of stack
+                                */
+       movq    (%rsp), %rdi
+       CALL_EXTERN(__ctx_done) /* should never return */
+       int $5                          /* trap */
+
+#endif /* __x86_64__ */
diff --git a/src/ucontext/x86_64/_setcontext.s b/src/ucontext/x86_64/_setcontext.s
new file mode 100644 (file)
index 0000000..7688add
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2007,2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#if defined(__x86_64__)
+
+#include <architecture/i386/asm_help.h>
+
+#define MCONTEXT_SS_RAX     16
+#define MCONTEXT_SS_RBX     24
+#define MCONTEXT_SS_RCX     32
+#define MCONTEXT_SS_RDX     40
+#define MCONTEXT_SS_RDI     48
+#define MCONTEXT_SS_RSI     56
+#define MCONTEXT_SS_RBP     64
+#define MCONTEXT_SS_RSP     72
+#define MCONTEXT_SS_R8      80
+#define MCONTEXT_SS_RIP     144
+#define MCONTEXT_SS_RFLAGS  152
+
+TEXT
+.private_extern __setcontext
+LABEL(__setcontext)
+       /* struct mcontext_t * %rdi */
+#if DEBUG
+       movq  MCONTEXT_SS_RSI(%rdi),   %rsi
+       movq  MCONTEXT_SS_RCX(%rdi),   %rcx
+       movq  MCONTEXT_SS_R8+0(%rdi),  %r8
+       movq  MCONTEXT_SS_R8+8(%rdi),  %r9
+       movq  MCONTEXT_SS_R8+16(%rdi), %r10
+       movq  MCONTEXT_SS_R8+24(%rdi), %r11
+#endif
+       movq  MCONTEXT_SS_RBX(%rdi),   %rbx
+       movq  MCONTEXT_SS_R8+32(%rdi), %r12
+       movq  MCONTEXT_SS_R8+40(%rdi), %r13
+       movq  MCONTEXT_SS_R8+48(%rdi), %r14
+       movq  MCONTEXT_SS_R8+56(%rdi), %r15
+
+       movq  MCONTEXT_SS_RSP(%rdi), %rsp
+       movq  MCONTEXT_SS_RBP(%rdi), %rbp
+
+       xorl  %eax, %eax        /* force x=getcontext(); ... setcontext(); to keep x==0 */
+
+#if DEBUG
+       movq  MCONTEXT_SS_RIP(%rdi), %rdx
+       movq  MCONTEXT_SS_RDI(%rdi), %rdi
+       jmp  *%rdx
+#else
+       jmp  *MCONTEXT_SS_RIP(%rdi)
+#endif
+
+#endif /* __x86_64__ */
diff --git a/src/ucontext/x86_64/getcontext.s b/src/ucontext/x86_64/getcontext.s
new file mode 100644 (file)
index 0000000..e6a303c
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2007,2009 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ * 
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ * 
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#if defined(__x86_64__)
+
+#include <architecture/i386/asm_help.h>
+
+#define MCONTEXT_SS_RAX     16
+#define MCONTEXT_SS_RBX     24
+#define MCONTEXT_SS_RCX     32
+#define MCONTEXT_SS_RDX     40
+#define MCONTEXT_SS_RDI     48
+#define MCONTEXT_SS_RSI     56
+#define MCONTEXT_SS_RBP     64
+#define MCONTEXT_SS_RSP     72
+#define MCONTEXT_SS_R8      80
+#define MCONTEXT_SS_RIP     144
+#define MCONTEXT_SS_RFLAGS  152
+
+TEXT
+LABEL(_getcontext)
+       /* struct ucontext_t * $rdi */
+       push  %rbp
+       movq  %rsp, %rbp
+       movq  %rsp, %rsi
+       CALL_EXTERN(_getmcontext) /* getmcontext(uctx, sp) */
+       pop   %rbp
+
+#if DEBUG
+       movq  $0, MCONTEXT_SS_RAX(%rax)
+       movq  $0, MCONTEXT_SS_RDX(%rax)
+       movq  $0, MCONTEXT_SS_RCX(%rax)
+       movq  $0, MCONTEXT_SS_RDI(%rax)
+       movq  $0, MCONTEXT_SS_RSI(%rax)
+       movq  $0, MCONTEXT_SS_R8(%rax)
+       movq  $0, MCONTEXT_SS_R8+8(%rax)
+       movq  $0, MCONTEXT_SS_R8+16(%rax)
+       movq  $0, MCONTEXT_SS_R8+24(%rax)
+       movq  $0, MCONTEXT_SS_RFLAGS(%rax)
+#endif
+
+       movq  %rbp, MCONTEXT_SS_RBP(%rax)
+       movq  %rbx, MCONTEXT_SS_RBX(%rax)
+       movq  %r12, MCONTEXT_SS_R8+32(%rax)
+       movq  %r13, MCONTEXT_SS_R8+40(%rax)
+       movq  %r14, MCONTEXT_SS_R8+48(%rax)
+       movq  %r15, MCONTEXT_SS_R8+56(%rax)
+       movq  (%rsp), %rcx              /* return address */
+       movq  %rcx, MCONTEXT_SS_RIP(%rax)
+       leaq  8(%rsp), %rcx
+       movq  %rcx, MCONTEXT_SS_RSP(%rax)
+       xorl  %eax, %eax
+       ret
+
+#endif /* __x86_64__ */
diff --git a/xcodeconfig/atomics.xcconfig b/xcodeconfig/atomics.xcconfig
new file mode 100644 (file)
index 0000000..e284e51
--- /dev/null
@@ -0,0 +1,12 @@
+#include "libplatform.xcconfig"
+#include "perarch.xcconfig"
+
+// Make sure that OSAtomic isn't build unoptimised, otherwise the inlines
+// don't do what they are designed to do.
+COMPILER_CFLAGS = -momit-leaf-frame-pointer
+OTHER_CFLAGS_debug =
+
+OSATOMIC_PREPROCESSOR_DEFINITIONS = OSATOMIC_USE_INLINED=0 OSATOMIC_DEPRECATED=0
+
+PUBLIC_HEADERS_FOLDER_PATH = /usr/include/libkern
+PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/libkern
diff --git a/xcodeconfig/libplatform.aliases b/xcodeconfig/libplatform.aliases
new file mode 100644 (file)
index 0000000..842ba78
--- /dev/null
@@ -0,0 +1 @@
+__platform_bzero ___bzero
diff --git a/xcodeconfig/libplatform.xcconfig b/xcodeconfig/libplatform.xcconfig
new file mode 100644 (file)
index 0000000..e607f4b
--- /dev/null
@@ -0,0 +1,72 @@
+#include "<DEVELOPER_DIR>/Makefiles/CoreOS/Xcode/BSD.xcconfig"
+
+VERSIONING_SYSTEM = ""
+
+// Standard settings
+SDKROOT = macosx.internal
+SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator
+BUILD_VARIANTS = normal debug dyld static
+
+EXECUTABLE_PREFIX = lib
+INSTALL_PATH = /usr/lib/system
+PUBLIC_HEADERS_FOLDER_PATH = /usr/include
+PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include
+
+USE_HEADERMAP = NO
+SKIP_INSTALL = YES
+INSTALLHDRS_SCRIPT_PHASE = YES
+
+GCC_OPTIMIZATION_LEVEL = s
+
+// TODO: Remove -fno-stack-protector once it has been moved down (after libproc is moved down)
+OTHER_CFLAGS = -fno-stack-protector -fdollars-in-identifiers -fno-common -fverbose-asm $(COMPILER_CFLAGS) -isystem $(SYSTEM_FRAMEWORK_HEADERS)
+OTHER_CFLAGS_normal = -momit-leaf-frame-pointer
+OTHER_CFLAGS_debug = -fno-inline -O0
+
+GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS_$(CURRENT_VARIANT)) $(OSATOMIC_PREPROCESSOR_DEFINITIONS)
+GCC_PREPROCESSOR_DEFINITIONS_dyld = VARIANT_DYLD=1 VARIANT_NO_RESOLVERS=1 VARIANT_STATIC=1
+GCC_PREPROCESSOR_DEFINITIONS_static = VARIANT_NO_RESOLVERS=1 VARIANT_STATIC=1
+GCC_PREPROCESSOR_DEFINITIONS_debug = DEBUG=1
+
+OSATOMIC_PREPROCESSOR_DEFINITIONS = OSATOMIC_USE_INLINED=1 OSSPINLOCK_USE_INLINED=1 OS_UNFAIR_LOCK_INLINE=0
+
+STRIP_INSTALLED_PRODUCT = $(STRIP_INSTALLED_PRODUCT_$(CURRENT_VARIANT))
+STRIP_INSTALLED_PRODUCT_normal = YES
+STRIP_INSTALLED_PRODUCT_dyld = NO
+STRIP_INSTALLED_PRODUCT_static = NO
+STRIP_INSTALLED_PRODUCT_debug = YES
+
+SRCROOT_SEARCH_PATHS = $(SRCROOT)/private $(SRCROOT)/include $(SRCROOT)/internal
+SYSTEM_FRAMEWORK_HEADERS = $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders
+HEADER_SEARCH_PATHS = $(SRCROOT_SEARCH_PATHS) $(SDKROOT)/usr/local/include $(SDKROOT)/usr/include $(inherited)
+
+DEAD_CODE_STRIPPING = NO
+
+LINK_WITH_STANDARD_LIBRARIES = NO
+DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION)
+
+PLATFORM_LIBRARIES = $(CONFIGURATION_BUILD_DIR)/libsimple_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libatomics_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libcachecontrol_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libos_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libsetjmp_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libstring_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libucontext_$(CURRENT_VARIANT).a
+ATOMICS_LIBRARIES = $(CONFIGURATION_BUILD_DIR)/libatomics_i386_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libatomics_x86_64_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libatomics_arm_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libatomics_arm64_$(CURRENT_VARIANT).a $(EXTRA_ATOMICS_LIBRARIES)
+CACHECONTROL_LIBRARIES = $(CONFIGURATION_BUILD_DIR)/libcachecontrol_i386_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libcachecontrol_x86_64_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libcachecontrol_arm_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libcachecontrol_arm64_$(CURRENT_VARIANT).a $(EXTRA_CACHECONTROL_LIBRARIES)
+SETJMP_LIBRARIES = $(CONFIGURATION_BUILD_DIR)/libsetjmp_i386_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libsetjmp_x86_64_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libsetjmp_arm_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libsetjmp_arm64_$(CURRENT_VARIANT).a $(EXTRA_SETJMP_LIBRARIES)
+STRING_LIBRARIES = $(CONFIGURATION_BUILD_DIR)/libstring_i386_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libstring_x86_64_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libstring_arm_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libstring_arm64_$(CURRENT_VARIANT).a $(EXTRA_STRING_LIBRARIES)
+UCONTEXT_LIBRARIES = $(CONFIGURATION_BUILD_DIR)/libucontext_i386_$(CURRENT_VARIANT).a $(CONFIGURATION_BUILD_DIR)/libucontext_x86_64_$(CURRENT_VARIANT).a
+
+OTHER_LDFLAGS = $(OTHER_LDFLAGS_$(TARGET_NAME)) $(CR_LDFLAGS)
+OTHER_LDFLAGS_libsystem_platform = -all_load $(PLATFORM_LIBRARIES) -umbrella System -L/usr/lib/system -ldyld -lcompiler_rt $(lsystem_kernel) -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libplatform.aliases,$(DIRTY_DATA_LDFLAGS)
+
+OTHER_LIBTOOLFLAGS = $(OTHER_LIBTOOLFLAGS_$(TARGET_NAME))
+OTHER_LIBTOOLFLAGS_libplatform_simple_dyld = $(CONFIGURATION_BUILD_DIR)/libsimple_$(CURRENT_VARIANT).a
+OTHER_LIBTOOLFLAGS_libplatform_string_dyld = $(CONFIGURATION_BUILD_DIR)/libstring_$(CURRENT_VARIANT).a
+OTHER_LIBTOOLFLAGS_libplatform_os_dyld =
+OTHER_LIBTOOLFLAGS_libplatform_dyld = $(PLATFORM_LIBRARIES)
+OTHER_LIBTOOLFLAGS_libplatform_static = $(PLATFORM_LIBRARIES)
+OTHER_LIBTOOLFLAGS_libatomics = $(ATOMICS_LIBRARIES)
+OTHER_LIBTOOLFLAGS_libcachecontrol = $(CACHECONTROL_LIBRARIES)
+OTHER_LIBTOOLFLAGS_libsetjmp = $(SETJMP_LIBRARIES)
+OTHER_LIBTOOLFLAGS_libstring = $(STRING_LIBRARIES)
+OTHER_LIBTOOLFLAGS_libucontext = $(UCONTEXT_LIBRARIES)
+
+lsystem_kernel = -lsystem_kernel
+lsystem_kernel[sdk=iphonesimulator*] = -lsystem_sim_kernel
+
diff --git a/xcodeconfig/os.xcconfig b/xcodeconfig/os.xcconfig
new file mode 100644 (file)
index 0000000..b163c0f
--- /dev/null
@@ -0,0 +1,28 @@
+#include "libplatform.xcconfig"
+
+GCC_STRICT_ALIASING = YES
+GCC_SYMBOLS_PRIVATE_EXTERN = YES
+GCC_WARN_SHADOW = YES
+GCC_WARN_64_TO_32_BIT_CONVERSION = YES
+GCC_WARN_ABOUT_RETURN_TYPE = YES
+GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES
+GCC_WARN_ABOUT_MISSING_NEWLINE = YES
+GCC_WARN_UNUSED_VARIABLE = YES
+GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES
+GCC_WARN_ABOUT_MISSING_FIELD_INITIALIZERS = YES
+GCC_WARN_SIGN_COMPARE = YES
+GCC_WARN_UNINITIALIZED_AUTOS = YES
+CLANG_WARN_EMPTY_BODY = YES
+CLANG_WARN_IMPLICIT_SIGN_CONVERSION = YES
+CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES
+GCC_TREAT_WARNINGS_AS_ERRORS = YES
+WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -Wno-unknown-warning-option
+
+COMPILER_CFLAGS = -momit-leaf-frame-pointer
+OTHER_CFLAGS_debug =
+
+OSATOMIC_PREPROCESSOR_DEFINITIONS = OSATOMIC_USE_INLINED=0 OSATOMIC_DEPRECATED=0 OSSPINLOCK_USE_INLINED=0 OSSPINLOCK_DEPRECATED=0
+
+PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os
+PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os
+OS_INTERNAL_HEADERS_FOLDER_PATH = /usr/local/include/os/internal
diff --git a/xcodeconfig/perarch.xcconfig b/xcodeconfig/perarch.xcconfig
new file mode 100644 (file)
index 0000000..8ec4910
--- /dev/null
@@ -0,0 +1,27 @@
+ARCH_FAMILY = $(ARCH_FAMILY_$(CURRENT_ARCH))
+ARCH_FAMILY_x86_64 = x86_64
+ARCH_FAMILY_i386 = i386
+ARCH_FAMILY_armv6 = arm
+ARCH_FAMILY_armv7 = arm
+ARCH_FAMILY_armv7s = arm
+ARCH_FAMILY_armv7f = arm
+ARCH_FAMILY_armv7k = arm
+ARCH_FAMILY_arm64 = arm64
+
+EXCLUDED_SOURCE_FILE_NAMES = *
+
+INCLUDED_SOURCE_FILE_NAMES = force_libplatform_to_build.c $(INCLUDED_SOURCE_FILE_NAMES_$(PERARCH_TARGET)_TARGET)
+INCLUDED_SOURCE_FILE_NAMES__TARGET = $(INCLUDED_SOURCE_FILE_NAMES_$(ARCH_FAMILY_THIS_SLICE)_$(ARCH_FAMILY))
+
+INCLUDED_SOURCE_FILE_NAMES_arm_arm = *
+INCLUDED_SOURCE_FILE_NAMES__arm = *
+INCLUDED_SOURCE_FILE_NAMES_arm64_arm64 = *
+INCLUDED_SOURCE_FILE_NAMES__arm64 = *
+INCLUDED_SOURCE_FILE_NAMES_i386_i386 = *
+INCLUDED_SOURCE_FILE_NAMES__i386 = *
+INCLUDED_SOURCE_FILE_NAMES_x86_64_x86_64 = *
+INCLUDED_SOURCE_FILE_NAMES__x86_64 = *
+
+// To force fallback to generic C implementations for dyld_Sim
+INCLUDED_SOURCE_FILE_NAMES_x86_64_x86_64[sdk=iphonesimulator*] =
+INCLUDED_SOURCE_FILE_NAMES_i386_i386[sdk=iphonesimulator*] =
diff --git a/xcodeconfig/static.xcconfig b/xcodeconfig/static.xcconfig
new file mode 100644 (file)
index 0000000..92ec0e3
--- /dev/null
@@ -0,0 +1,9 @@
+#include "libplatform.xcconfig"
+
+// pick <BLAH>_static in libplatform.xcconfig
+BUILD_VARIANTS = static
+
+EXECUTABLE_PREFIX = lib
+PRODUCT_NAME = platform
+INSTALL_PATH = /usr/local/lib/system
+SKIP_INSTALL = NO