]> git.saurik.com Git - apple/libc.git/blob - i386/pthreads/preempt.s
Libc-825.25.tar.gz
[apple/libc.git] / i386 / pthreads / preempt.s
1 /*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <machine/cpu_capabilities.h>
25 #include <mach/i386/syscall_sw.h>
26
27 /* Subroutine to make a preempt syscall. Called when we notice %ebx is
28 * nonzero after returning from a PFZ subroutine.
29 * When we enter kernel:
30 * %edx = return address
31 * %ecx = stack ptr
32 * Destroys %eax, %ecx, and %edx.
33 */
34 .align 4
35 .private_extern _preempt
36 _preempt:
37 popl %edx // get return address
38 movl %esp,%ecx // save stack ptr here
39 movl $(-58),%eax /* 58 = pfz_exit */
40 xorl %ebx,%ebx // clear "preemption pending" flag
41 sysenter
42
43
44 /* Subroutine to back off if we cannot get the spinlock. Called
45 * after a few attempts inline in the PFZ subroutines. This code is
46 * not in the PFZ.
47 * %edi = ptr to queue head structure
48 * %ebx = preemption flag (nonzero if preemption pending)
49 * Destroys %eax.
50 */
51
52 .align 4
53 .private_extern _backoff
54 _backoff:
55 testl %ebx,%ebx // does kernel want to preempt us?
56 jz 1f // no
57 xorl %ebx,%ebx // yes, clear flag
58 pushl %edx // preserve regs used by preempt syscall
59 pushl %ecx
60 call _preempt
61 popl %ecx
62 popl %edx
63 1:
64 pause // SMT-friendly backoff
65 cmpl $0,8(%edi) // sniff the lockword
66 jnz 1b // loop if still taken
67 ret // lockword is free, so reenter PFZ