addl $1, (%rdx)
jmp *%rdi
+#elif defined(__arm__)
+
+#include <arm/arch.h>
+
+ .globl cerror
+ MI_ENTRY_POINT(___vfork)
+
+ MI_GET_ADDRESS(r3, __current_pid) // get address of __current_pid
+#ifdef _ARM_ARCH_6
+L0:
+ ldrex r1, [r3]
+ subs r1, r1, #1 // if __current_pid <= 0, decrement it
+ movpl r1, #-1 // otherwise put -1 in there
+ strex r2, r1, [r3]
+ cmp r2, #0
+ bne L0
+#else
+ mov r2, #0x80000000 // load "looking" value
+L0:
+ swp r1, r2, [r3] // look at the value, lock others out
+ cmp r1, r2 // anyone else trying to look?
+ beq L0 // yes, so wait our turn
+ subs r1, r1, #1 // if __current_pid <= 0, decrement it
+ movpl r1, #-1 // otherwise put -1 in there
+ str r1, [r3]
+#endif
+
+ mov r1, #1 // prime results
+ mov r12, #SYS_vfork
+ swi #SWI_SYSCALL // make the syscall
+ bcs Lbotch // error?
+ cmp r1, #0 // parent (r1=0) or child(r1=1)
+ beq Lparent
+
+ //child here...
+ mov r0, #0
+ bx lr // return
+
+Lbotch:
+ MI_CALL_EXTERNAL(_cerror) // jump here on error
+ mov r0,#-1 // set the error
+ // reload values clobbered by cerror (so we can treat them as live in Lparent)
+ MI_GET_ADDRESS(r3, __current_pid) // get address of __current_pid
+#ifndef _ARM_ARCH_6
+ mov r2, #0x80000000 // load "looking" value
+#endif
+ // fall thru
+
+Lparent:
+#ifdef _ARM_ARCH_6
+ ldrex r1, [r3]
+ add r1, r1, #1 // we're back, decrement vfork count
+ strex r2, r1, [r3]
+ cmp r2, #0
+ bne Lparent
+#else
+ swp r1, r2, [r3] // look at the value, lock others out
+ cmp r1, r2 // anyone else trying to look?
+ beq Lparent // yes, so wait our turn
+ add r1, r1, #1 // we're back, decrement vfork count
+ str r1, [r3]
+#endif
+
+ bx lr // return
+
+#elif defined(__arm64__)
+
+ MI_ENTRY_POINT(___vfork)
+
+ MI_GET_ADDRESS(x9, __current_pid)
+Ltry_set_vfork:
+ ldxr w10, [x9] // Get old current pid value (exclusive)
+ mov w11, #-1 // Will be -1 if current value is positive
+ subs w10, w10, #1 // Subtract one
+ csel w12, w11, w10, pl // If >= 0, set to -1, else set to (current - 1)
+ stxr w13, w12, [x9] // Attempt exclusive store to current pid
+ cbnz w13, Ltry_set_vfork // If store failed, retry
+
+ // ARM sets r1 to 1 here. I don't see why.
+ mov w16, #SYS_vfork // Set syscall code
+ svc #SWI_SYSCALL
+ b.cs Lbotch
+ cbz w1, Lparent
+
+ // Child
+ mov w0, #0
+ ret
+
+ // Error case
+Lbotch:
+ bl _cerror // Update errno
+ mov w0, #-1 // Set return value
+ MI_GET_ADDRESS(x9, __current_pid) // Reload current pid address
+ // Fall through
+Lparent:
+ ldxr w10, [x9] // Exclusive load current pid value
+ add w10, w10, #1 // Increment (i.e. decrement vfork count)
+ stxr w11, w10, [x9] // Attempt exclusive store of updated vfork count
+ cbnz w11, Lparent // If exclusive store failed, retry
+ ret // Done, return
+
#else
#error Unsupported architecture
#endif