]> git.saurik.com Git - apple/xnu.git/blame - iokit/IOKit/ppc/IOSharedLockImp.h
xnu-201.14.tar.gz
[apple/xnu.git] / iokit / IOKit / ppc / IOSharedLockImp.h
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
24 *
25 * HISTORY
26 *
27 */
28
29/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved.
30 *
31 * EventShmemLock.h - Shared memory area locks for use between the
32 * WindowServer and the Event Driver.
33 *
34 * HISTORY
35 * 30 Nov 1992 Ben Fathi (benf@next.com)
36 * Ported to m98k.
37 *
38 * 29 April 1992 Mike Paquette at NeXT
39 * Created.
40 *
41 * Multiprocessor locks used within the shared memory area between the
42 * kernel and event system. These must work in both user and kernel mode.
43 * The locks are defined in an include file so they get exported to the local
44 * include file area.
45 */
46
47
48#ifndef _IOKIT_IOSHAREDLOCKIMP_H
49#define _IOKIT_IOSHAREDLOCKIMP_H
50
51#include <architecture/ppc/asm_help.h>
7b1edb79
A
52#ifdef KERNEL
53#undef END
54#include <mach/ppc/asm.h>
55#endif
1c79356b 56
1c79356b
A
57.macro DISABLE_PREEMPTION
58#ifdef KERNEL
7b1edb79
A
59 stwu r1,-(FM_SIZE)(r1)
60 mflr r0
61 stw r3,FM_ARG0(r1)
62 stw r0,(FM_SIZE+FM_LR_SAVE)(r1)
63 bl EXT(_disable_preemption)
64 lwz r3,FM_ARG0(r1)
65 lwz r1,0(r1)
66 lwz r0,FM_LR_SAVE(r1)
67 mtlr r0
1c79356b
A
68#endif
69.endmacro
70.macro ENABLE_PREEMPTION
71#ifdef KERNEL
7b1edb79
A
72 stwu r1,-(FM_SIZE)(r1)
73 mflr r0
74 stw r3,FM_ARG0(r1)
75 stw r0,(FM_SIZE+FM_LR_SAVE)(r1)
76 bl EXT(_enable_preemption)
77 lwz r3,FM_ARG0(r1)
78 lwz r1,0(r1)
79 lwz r0,FM_LR_SAVE(r1)
80 mtlr r0
1c79356b
A
81#endif
82.endmacro
83
84/*
85 * void
86 * ev_lock(p)
87 * register int *p;
88 *
89 * Lock the lock pointed to by p. Spin (possibly forever) until
90 * the lock is available. Test and test and set logic used.
91 */
92 TEXT
93
94#ifndef KERNEL
95LEAF(_ev_lock)
0b4e3aa0
A
96 li a6,1 // lock value
97 lwarx a7,0,a0 // CEMV10
1c79356b
A
989:
99 sync
0b4e3aa0
A
100 lwarx a7,0,a0 // read the lock
101 cmpwi cr0,a7,0 // is it busy?
102 bne- 9b // yes, spin
1c79356b 103 sync
0b4e3aa0
A
104 stwcx. a6,0,a0 // try to get the lock
105 bne- 9b // failed, try again
1c79356b 106 isync
0b4e3aa0 107 blr // got it, return
1c79356b 108END(_ev_lock)
0b4e3aa0
A
109
110LEAF(_IOSpinLock)
111 li a6,1 // lock value
112 lwarx a7,0,a0 // CEMV10
1139:
114 sync
115 lwarx a7,0,a0 // read the lock
116 cmpwi cr0,a7,0 // is it busy?
117 bne- 9b // yes, spin
118 sync
119 stwcx. a6,0,a0 // try to get the lock
120 bne- 9b // failed, try again
121 isync
122 blr // got it, return
123END(_IOSpinLock)
1c79356b
A
124#endif
125
126/*
127 * void
128 * spin_unlock(p)
129 * int *p;
130 *
131 * Unlock the lock pointed to by p.
132 */
133
134LEAF(_ev_unlock)
1c79356b
A
135 sync
136 li a7,0
137 stw a7,0(a0)
138 ENABLE_PREEMPTION()
139 blr
140END(_ev_unlock)
141
0b4e3aa0
A
142LEAF(_IOSpinUnlock)
143 sync
144 li a7,0
145 stw a7,0(a0)
146 ENABLE_PREEMPTION()
147 blr
148END(_IOSpinUnlock)
149
1c79356b
A
150
151/*
152 * ev_try_lock(p)
153 * int *p;
154 *
155 * Try to lock p. Return TRUE if successful in obtaining lock.
156 */
157
158LEAF(_ev_try_lock)
0b4e3aa0 159 li a6,1 // lock value
1c79356b 160 DISABLE_PREEMPTION()
0b4e3aa0 161 lwarx a7,0,a0 // CEMV10
1c79356b
A
1628:
163 sync
0b4e3aa0
A
164 lwarx a7,0,a0 // read the lock
165 cmpwi cr0,a7,0 // is it busy?
166 bne- 9f // yes, give up
1c79356b 167 sync
0b4e3aa0
A
168 stwcx. a6,0,a0 // try to get the lock
169 bne- 8b // failed, try again
170 li a0,1 // return TRUE
1c79356b
A
171 isync
172 blr
1739:
174 ENABLE_PREEMPTION()
0b4e3aa0 175 li a0,0 // return FALSE
1c79356b
A
176 blr
177END(_ev_try_lock)
178
0b4e3aa0
A
179LEAF(_IOTrySpinLock)
180 li a6,1 // lock value
181 DISABLE_PREEMPTION()
182 lwarx a7,0,a0 // CEMV10
1838:
184 sync
185 lwarx a7,0,a0 // read the lock
186 cmpwi cr0,a7,0 // is it busy?
187 bne- 9f // yes, give up
188 sync
189 stwcx. a6,0,a0 // try to get the lock
190 bne- 8b // failed, try again
191 li a0,1 // return TRUE
192 isync
193 blr
1949:
195 ENABLE_PREEMPTION()
196 li a0,0 // return FALSE
197 blr
198END(_IOTrySpinLock)
199
1c79356b 200#endif /* ! _IOKIT_IOSHAREDLOCKIMP_H */