]> git.saurik.com Git - apple/libc.git/blob - x86_64/pthreads/preempt.s
Libc-763.11.tar.gz
[apple/libc.git] / x86_64 / pthreads / preempt.s
1 /*
2 * Copyright (c) 2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <machine/cpu_capabilities.h>
25 #include <mach/i386/syscall_sw.h>
26
27 /* Subroutine to make a preempt syscall. Called when we notice %ebx is
28 * nonzero after returning from a PFZ subroutine. Not in PFZ.
29 *
30 * All registers preserved (but does clear the %ebx preemption flag).
31 */
32 .align 2
33 .private_extern _preempt
34 _preempt:
35 pushq %rax
36 pushq %rcx
37 pushq %r11
38 movl $(SYSCALL_CONSTRUCT_MACH(58)),%eax /* 58 = pfz_exit */
39 xorl %ebx,%ebx
40 syscall
41 popq %r11
42 popq %rcx
43 popq %rax
44 ret
45
46 /* Subroutine to back off if we cannot get the spinlock. Called
47 * after a few attempts inline in the PFZ subroutines. This code is
48 * not in the PFZ.
49 * %rdi = ptr to queue head structure
50 * %ebx = preemption flag (nonzero if preemption pending)
51 * Uses: %rax.
52 */
53 .align 2
54 .private_extern _backoff
55 _backoff:
56 testl %ebx,%ebx // does kernel want to preempt us?
57 jz 1f // no
58 call _preempt
59 1:
60 pause // SMT-friendly backoff
61 cmpl $0,16(%rdi) // sniff the lockword
62 jnz 1b // loop if still taken
63 ret // lockword is free, so reenter PFZ