]> git.saurik.com Git - apple/xnu.git/blobdiff - libsyscall/custom/__getpid.s
xnu-4903.270.47.tar.gz
[apple/xnu.git] / libsyscall / custom / __getpid.s
index a6cc5a5f42cbaa7a365feb8334e212e52764b706..a8daa7398196cf16cbfd00cb973c4511bdda11ad 100644 (file)
 
 #include "SYS.h"
 
-#if defined(__ppc__) || defined(__ppc64__)
-
-        .data
-        .globl  __current_pid
-        .align  2
-__current_pid:
-        .long 0
-
-MI_ENTRY_POINT(___getpid)
-#if defined(__DYNAMIC__)
-        mflr    r0              // note we cannot use MI_GET_ADDRESS...
-        bcl    20,31,1f         // ...because we define __current_pid
-1:
-        mflr    r5
-        mtlr    r0
-        addis   r5, r5, ha16(__current_pid - 1b)
-        addi    r5, r5, lo16(__current_pid - 1b)
-#else
-       lis     r5,hi16(__current_pid)
-       ori     r5,r5,lo16(__current_pid)
-#endif
-        lwz     r3,0(r5)               // get the cached pid
-        cmpwi  r3,0                    // if positive,
-        bgtlr++                 // return it
-       
-        SYSCALL_NONAME(getpid, 0)
-
-        lwarx  r4,0,r5                 // see if we can cache it
-        cmpwi  r4,0                    // we can't if there are any...
-        blt--  1f              // ...vforks in progress
-
-        stwcx. r3,0,r5                 // ignore cache conflicts
-        blr
-1:
-        li      r6,-4           // on 970, cancel the reservation using red zone...
-        stwcx.  r3,r6,r1        // ...to avoid an errata
-        blr
-
-#elif defined(__i386__)
+#if defined(__i386__)
 
        .data
        .private_extern __current_pid
-__current_pid:
+L__current_pid_addr:
+ __current_pid:
        .long 0
-L__current_pid_addr = __current_pid
 
 #if defined(__DYNAMIC__)
 #define GET_CURRENT_PID                                \
@@ -99,7 +61,7 @@ LEAF(___getpid, 0)
        jle             1f
        ret
 1:
-       UNIX_SYSCALL_NONAME(getpid, 0)
+       UNIX_SYSCALL_NONAME(getpid, 0, cerror_nocancel)
        movl            %eax, %edx
        xorl            %eax, %eax
        GET_CURRENT_PID
@@ -126,7 +88,7 @@ LEAF(___getpid, 0)
        jle             1f
        ret
 1:
-       UNIX_SYSCALL_NONAME(getpid, 0)
+       UNIX_SYSCALL_NONAME(getpid, 0, cerror_nocancel)
        movl            %eax, %edx
        xorl            %eax, %eax
        leaq            __current_pid(%rip), %rcx
@@ -151,14 +113,14 @@ __current_pid:
         */
        .long 0
 
-MI_ENTRY_POINT(_getpid)
+MI_ENTRY_POINT(___getpid)
        ldr     r3, L__current_pid
 L1:    add     r3, pc, r3              // r3 = &__current_pid
        ldr     r0, [r3]                // get the cached pid
        cmp     r0, #0
        bxgt    lr                      // if positive, return it
 
-       SYSCALL_NONAME(getpid, 0)
+       SYSCALL_NONAME(getpid, 0, cerror_nocancel)
 
 #ifdef _ARM_ARCH_6
        ldrex   r2, [r3]                // see if we can cache it
@@ -180,6 +142,32 @@ L1:        add     r3, pc, r3              // r3 = &__current_pid
 L__current_pid:        
        .long   __current_pid - (L1+8)          
 
+#elif defined(__arm64__)
+       .data
+       .globl  __current_pid
+       .align 2
+__current_pid:
+       /* cached pid.  possible values:
+        *      0:              no value cached
+        *      > 0:            cached pid of current process
+        *      < 0:            negative number of vforks in progress
+        *      int_min:        for pre-armv6, "looking" value (0x80000000)
+        */
+       .long 0
+
+MI_ENTRY_POINT(___getpid)
+       MI_GET_ADDRESS(x9, __current_pid)       // Get address of cached value
+       ldr             w0, [x9]                                        // Load it
+       cmp             w0, #0                                          // See if there's a cached value
+       b.le    L_notcached                                     // If not, make syscall
+       ret                                                                     // Else, we're done
+L_notcached:
+       SYSCALL_NONAME(getpid, 0, cerror_nocancel)
+       ldxr    w10, [x9]                                       // Exclusive load
+       cbnz    w10, L_done                                     // Unless unset, don't even try
+       stxr    wzr, w0, [x9]                           // Try to store, but don't care if we fail (someone will win, or not)
+L_done:
+       ret                                                                     // Done
 #else
 #error Unsupported architecture
 #endif