]> git.saurik.com Git - apple/libplatform.git/blob - src/atomics/i386/pfz.s
libplatform-254.40.4.tar.gz
[apple/libplatform.git] / src / atomics / i386 / pfz.s
1 /*
2 * Copyright (c) 2006-2013 Apple, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <machine/cpu_capabilities.h>
25 #include <architecture/i386/asm_help.h>
26
27 .text
28
29 /* Subroutine to make a preempt syscall. Called when we notice %ebx is
30 * nonzero after returning from a PFZ subroutine.
31 * When we enter kernel:
32 * %edx = return address
33 * %ecx = stack ptr
34 * Destroys %eax, %ecx, and %edx.
35 */
36 .align 4
37 .private_extern _preempt
38 .globl _preempt
39 _preempt:
40 popl %edx // get return address
41 movl %esp,%ecx // save stack ptr here
42 movl $(-58),%eax /* 58 = pfz_exit */
43 xorl %ebx,%ebx // clear "preemption pending" flag
44 sysenter
45
46 /* Subroutine to back off if we cannot get the spinlock. Called
47 * after a few attempts inline in the PFZ subroutines. This code is
48 * not in the PFZ.
49 * %edi = ptr to queue head structure
50 * %ebx = preemption flag (nonzero if preemption pending)
51 * Destroys %eax.
52 */
53
54 .align 4
55 .private_extern _backoff
56 .globl _backoff
57 _backoff:
58 testl %ebx,%ebx // does kernel want to preempt us?
59 jz 1f // no
60 xorl %ebx,%ebx // yes, clear flag
61 pushl %edx // preserve regs used by preempt syscall
62 pushl %ecx
63 call _preempt
64 popl %ecx
65 popl %edx
66 1:
67 pause // SMT-friendly backoff
68 cmpl $0,8(%edi) // sniff the lockword
69 jnz 1b // loop if still taken
70 ret // lockword is free, so reenter PFZ