]> git.saurik.com Git - apple/xnu.git/blame_incremental - iokit/IOKit/ppc/IOSharedLockImp.h
xnu-344.49.tar.gz
[apple/xnu.git] / iokit / IOKit / ppc / IOSharedLockImp.h
... / ...
CommitLineData
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
27 *
28 * HISTORY
29 *
30 */
31
32/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved.
33 *
34 * EventShmemLock.h - Shared memory area locks for use between the
35 * WindowServer and the Event Driver.
36 *
37 * HISTORY
38 * 30 Nov 1992 Ben Fathi (benf@next.com)
39 * Ported to m98k.
40 *
41 * 29 April 1992 Mike Paquette at NeXT
42 * Created.
43 *
44 * Multiprocessor locks used within the shared memory area between the
45 * kernel and event system. These must work in both user and kernel mode.
46 * The locks are defined in an include file so they get exported to the local
47 * include file area.
48 */
49
50
51#ifndef _IOKIT_IOSHAREDLOCKIMP_H
52#define _IOKIT_IOSHAREDLOCKIMP_H
53
54#include <architecture/ppc/asm_help.h>
55#ifdef KERNEL
56#undef END
57#include <mach/ppc/asm.h>
58#endif
59
60.macro DISABLE_PREEMPTION
61#ifdef KERNEL
62 stwu r1,-(FM_SIZE)(r1)
63 mflr r0
64 stw r3,FM_ARG0(r1)
65 stw r0,(FM_SIZE+FM_LR_SAVE)(r1)
66 bl EXT(_disable_preemption)
67 lwz r3,FM_ARG0(r1)
68 lwz r1,0(r1)
69 lwz r0,FM_LR_SAVE(r1)
70 mtlr r0
71#endif
72.endmacro
73.macro ENABLE_PREEMPTION
74#ifdef KERNEL
75 stwu r1,-(FM_SIZE)(r1)
76 mflr r0
77 stw r3,FM_ARG0(r1)
78 stw r0,(FM_SIZE+FM_LR_SAVE)(r1)
79 bl EXT(_enable_preemption)
80 lwz r3,FM_ARG0(r1)
81 lwz r1,0(r1)
82 lwz r0,FM_LR_SAVE(r1)
83 mtlr r0
84#endif
85.endmacro
86
87/*
88 * void
89 * ev_lock(p)
90 * register int *p;
91 *
92 * Lock the lock pointed to by p. Spin (possibly forever) until
93 * the lock is available. Test and test and set logic used.
94 */
95 TEXT
96
97#ifndef KERNEL
98LEAF(_ev_lock)
99 li a6,1 // lock value
100 lwarx a7,0,a0 // CEMV10
1019:
102 sync
103 lwarx a7,0,a0 // read the lock
104 cmpwi cr0,a7,0 // is it busy?
105 bne- 9b // yes, spin
106 sync
107 stwcx. a6,0,a0 // try to get the lock
108 bne- 9b // failed, try again
109 isync
110 blr // got it, return
111END(_ev_lock)
112
113LEAF(_IOSpinLock)
114 li a6,1 // lock value
115 lwarx a7,0,a0 // CEMV10
1169:
117 sync
118 lwarx a7,0,a0 // read the lock
119 cmpwi cr0,a7,0 // is it busy?
120 bne- 9b // yes, spin
121 sync
122 stwcx. a6,0,a0 // try to get the lock
123 bne- 9b // failed, try again
124 isync
125 blr // got it, return
126END(_IOSpinLock)
127#endif
128
129/*
130 * void
131 * spin_unlock(p)
132 * int *p;
133 *
134 * Unlock the lock pointed to by p.
135 */
136
137LEAF(_ev_unlock)
138 sync
139 li a7,0
140 stw a7,0(a0)
141 ENABLE_PREEMPTION()
142 blr
143END(_ev_unlock)
144
145LEAF(_IOSpinUnlock)
146 sync
147 li a7,0
148 stw a7,0(a0)
149 ENABLE_PREEMPTION()
150 blr
151END(_IOSpinUnlock)
152
153
154/*
155 * ev_try_lock(p)
156 * int *p;
157 *
158 * Try to lock p. Return TRUE if successful in obtaining lock.
159 */
160
161LEAF(_ev_try_lock)
162 li a6,1 // lock value
163 DISABLE_PREEMPTION()
164 lwarx a7,0,a0 // CEMV10
1658:
166 sync
167 lwarx a7,0,a0 // read the lock
168 cmpwi cr0,a7,0 // is it busy?
169 bne- 9f // yes, give up
170 sync
171 stwcx. a6,0,a0 // try to get the lock
172 bne- 8b // failed, try again
173 li a0,1 // return TRUE
174 isync
175 blr
1769:
177 ENABLE_PREEMPTION()
178 li a0,0 // return FALSE
179 blr
180END(_ev_try_lock)
181
182LEAF(_IOTrySpinLock)
183 li a6,1 // lock value
184 DISABLE_PREEMPTION()
185 lwarx a7,0,a0 // CEMV10
1868:
187 sync
188 lwarx a7,0,a0 // read the lock
189 cmpwi cr0,a7,0 // is it busy?
190 bne- 9f // yes, give up
191 sync
192 stwcx. a6,0,a0 // try to get the lock
193 bne- 8b // failed, try again
194 li a0,1 // return TRUE
195 isync
196 blr
1979:
198 ENABLE_PREEMPTION()
199 li a0,0 // return FALSE
200 blr
201END(_IOTrySpinLock)
202
203#endif /* ! _IOKIT_IOSHAREDLOCKIMP_H */