+
+/* PREEMPTION FREE ZONE (PFZ)
+ *
+ * A portion of the commpage is speacial-cased by the kernel to be "preemption free",
+ * ie as if we had disabled interrupts in user mode. This facilitates writing
+ * "nearly-lockless" code, for example code that must be serialized by a spinlock but
+ * which we do not want to preempt while the spinlock is held.
+ *
+ * The PFZ is implemented by collecting all the "preemption-free" code into a single
+ * contiguous region of the commpage. Register %ebx is used as a flag register;
+ * before entering the PFZ, %ebx is cleared. If some event occurs that would normally
+ * result in a premption while in the PFZ, the kernel sets %ebx nonzero instead of
+ * preempting. Then, when the routine leaves the PFZ we check %ebx and
+ * if nonzero execute a special "pfz_exit" syscall to take the delayed preemption.
+ *
+ * PFZ code must bound the amount of time spent in the PFZ, in order to control
+ * latency. Backward branches are dangerous and must not be used in a way that
+ * could inadvertently create a long-running loop.
+ *
+ * Because we need to avoid being preempted between changing the mutex stateword
+ * and entering the kernel to relinquish, some low-level pthread mutex manipulations
+ * are located in the PFZ.
+ */
+
+/* Work around 10062261 with a dummy non-local symbol */
+pthreads_dummy_symbol:
+
+/* Internal routine to handle pthread mutex lock operation. This is in the PFZ.
+ * %edi == ptr to LVAL/UVAL pair
+ * %esi == ptr to argument list on stack
+ * %ebx == preempion pending flag (kernel sets nonzero if we should preempt)
+ */
+COMMPAGE_FUNCTION_START(pfz_mutex_lock, 32, 4)
+ pushl %ebp // set up frame for backtrace
+ movl %esp,%ebp
+1:
+ movl 16(%esi),%ecx // get mask (ie, PTHRW_EBIT etc)
+2:
+ movl PTHRW_LVAL(%edi),%eax // get mutex LVAL
+ testl %eax,%ecx // is mutex available?
+ jnz 5f // no
+
+ /* lock is available (if we act fast) */
+ lea PTHRW_INC(%eax),%edx // copy original lval and bump sequence count
+ orl $PTHRW_EBIT, %edx // set EBIT
+ lock
+ cmpxchgl %edx,PTHRW_LVAL(%edi) // try to acquire lock for real
+ jz 4f // got it
+3:
+ testl %ebx,%ebx // kernel trying to preempt us?
+ jz 2b // no, so loop and try again
+ COMMPAGE_CALL(_COMM_PAGE_PREEMPT,_COMM_PAGE_PFZ_MUTEX_LOCK,pfz_mutex_lock)
+ jmp 1b // loop to try again
+
+ /* we acquired the mutex */
+4:
+ movl 20(%esi),%eax // get ptr to TID field of mutex
+ movl 8(%esi),%ecx // get 64-bit mtid
+ movl 12(%esi),%edx
+ movl %ecx,0(%eax) // store my TID in mutex structure
+ movl %edx,4(%eax)
+ movl $PTHRW_STATUS_ACQUIRED,%eax
+ popl %ebp