]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | ||
23 | #include <cpus.h> | |
24 | #include <mach_assert.h> | |
25 | #include <mach_ldebug.h> | |
26 | #include <mach_rt.h> | |
27 | ||
28 | #include <kern/etap_options.h> | |
29 | ||
30 | #include <ppc/asm.h> | |
31 | #include <ppc/proc_reg.h> | |
32 | #include <assym.s> | |
33 | ||
34 | #define STRING ascii | |
35 | ||
36 | #define SWT_HI 0+FM_SIZE | |
37 | #define SWT_LO 4+FM_SIZE | |
38 | #define MISSED 8+FM_SIZE | |
39 | ||
0b4e3aa0 A |
40 | #define ILK_LOCKED 0x01 |
41 | #define MUTEX_LOCKED 0x02 | |
42 | #define SLOCK_FAST 0x02 | |
43 | ||
1c79356b A |
44 | ; |
45 | ; NOTE: make sure that PREEMPTSTACK in aligned_data is | |
46 | ; set the same as it is here. This is the number of | |
47 | ; traceback entries we can handle per processor | |
48 | ; | |
49 | ; A value of 0 disables the stack. | |
50 | ; | |
51 | #define PREEMPTSTACK 0 | |
52 | #define CHECKNMI 0 | |
53 | #define CHECKLOCKS 1 | |
54 | ||
55 | #include <ppc/POWERMAC/mp/mp.h> | |
56 | ||
57 | #define PROLOG(space) \ | |
58 | stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \ | |
59 | mflr r0 __ASMNL__ \ | |
60 | stw r3,FM_ARG0(r1) __ASMNL__ \ | |
61 | stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__ | |
62 | ||
63 | #define EPILOG \ | |
64 | lwz r1,0(r1) __ASMNL__ \ | |
65 | lwz r0,FM_LR_SAVE(r1) __ASMNL__ \ | |
66 | mtlr r0 __ASMNL__ | |
67 | ||
68 | #if MACH_LDEBUG && CHECKLOCKS | |
69 | /* | |
70 | * Routines for general lock debugging. | |
71 | */ | |
72 | ||
73 | /* Gets lock check flags in CR6: CR bits 24-27 */ | |
74 | ||
75 | #define CHECK_SETUP(rg) \ | |
76 | lis rg,hi16(EXT(dgWork)) __ASMNL__ \ | |
77 | ori rg,rg,lo16(EXT(dgWork)) __ASMNL__ \ | |
78 | lbz rg,dgFlags(rg) __ASMNL__ \ | |
79 | mtcrf 2,rg __ASMNL__ | |
80 | ||
81 | ||
82 | /* | |
83 | * Checks for expected lock types and calls "panic" on | |
84 | * mismatch. Detects calls to Mutex functions with | |
85 | * type simplelock and vice versa. | |
86 | */ | |
87 | #define CHECK_MUTEX_TYPE() \ | |
88 | bt 24+disLktypeb,1f __ASMNL__ \ | |
89 | lwz r10,MUTEX_TYPE(r3) __ASMNL__ \ | |
90 | cmpwi r10,MUTEX_TAG __ASMNL__ \ | |
91 | beq+ 1f __ASMNL__ \ | |
92 | lis r3,hi16(not_a_mutex) __ASMNL__ \ | |
93 | ori r3,r3,lo16(not_a_mutex) __ASMNL__ \ | |
94 | bl EXT(panic) __ASMNL__ \ | |
95 | lwz r3,FM_ARG0(r1) __ASMNL__ \ | |
96 | 1: | |
97 | ||
98 | .data | |
99 | not_a_mutex: | |
100 | STRINGD "not a mutex!\n\000" | |
101 | .text | |
102 | ||
103 | #define CHECK_SIMPLE_LOCK_TYPE() \ | |
104 | bt 24+disLktypeb,1f __ASMNL__ \ | |
105 | lwz r10,SLOCK_TYPE(r3) __ASMNL__ \ | |
106 | cmpwi r10,USLOCK_TAG __ASMNL__ \ | |
107 | beq+ 1f __ASMNL__ \ | |
108 | lis r3,hi16(not_a_slock) __ASMNL__ \ | |
109 | ori r3,r3,lo16(not_a_slock) __ASMNL__ \ | |
110 | bl EXT(panic) __ASMNL__ \ | |
111 | lwz r3,FM_ARG0(r1) __ASMNL__ \ | |
112 | 1: | |
113 | ||
114 | .data | |
115 | not_a_slock: | |
116 | STRINGD "not a simple lock!\n\000" | |
117 | .text | |
118 | ||
119 | #define CHECK_NO_SIMPLELOCKS() \ | |
120 | bt 24+disLkNmSimpb,2f __ASMNL__ \ | |
121 | mfmsr r11 __ASMNL__ \ | |
122 | rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \ | |
123 | mtmsr r10 __ASMNL__ \ | |
124 | mfsprg r10,0 __ASMNL__ \ | |
125 | lwz r10,PP_CPU_DATA(r10) __ASMNL__ \ | |
126 | lwz r10,CPU_SIMPLE_LOCK_COUNT(r10) __ASMNL__ \ | |
127 | cmpwi r10,0 __ASMNL__ \ | |
128 | beq+ 1f __ASMNL__ \ | |
129 | lis r3,hi16(simple_locks_held) __ASMNL__ \ | |
130 | ori r3,r3,lo16(simple_locks_held) __ASMNL__ \ | |
131 | bl EXT(panic) __ASMNL__ \ | |
132 | lwz r3,FM_ARG0(r1) __ASMNL__ \ | |
133 | 1: __ASMNL__ \ | |
134 | mtmsr r11 __ASMNL__ \ | |
135 | 2: | |
136 | ||
137 | .data | |
138 | simple_locks_held: | |
139 | STRINGD "simple locks held!\n\000" | |
140 | .text | |
141 | ||
142 | /* | |
143 | * Verifies return to the correct thread in "unlock" situations. | |
144 | */ | |
145 | ||
146 | #define CHECK_THREAD(thread_offset) \ | |
147 | bt 24+disLkThreadb,2f __ASMNL__ \ | |
148 | mfmsr r11 __ASMNL__ \ | |
149 | rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \ | |
150 | mtmsr r10 __ASMNL__ \ | |
151 | mfsprg r10,0 __ASMNL__ \ | |
152 | lwz r10,PP_CPU_DATA(r10) __ASMNL__ \ | |
153 | lwz r10,CPU_ACTIVE_THREAD(r10) __ASMNL__ \ | |
154 | cmpwi r10,0 __ASMNL__ \ | |
155 | beq- 1f __ASMNL__ \ | |
156 | lwz r9,thread_offset(r3) __ASMNL__ \ | |
157 | cmpw r9,r10 __ASMNL__ \ | |
158 | beq+ 1f __ASMNL__ \ | |
159 | lis r3,hi16(wrong_thread) __ASMNL__ \ | |
160 | ori r3,r3,lo16(wrong_thread) __ASMNL__ \ | |
161 | bl EXT(panic) __ASMNL__ \ | |
162 | lwz r3,FM_ARG0(r1) __ASMNL__ \ | |
163 | 1: __ASMNL__ \ | |
164 | mtmsr r11 __ASMNL__ \ | |
165 | 2: | |
166 | .data | |
167 | wrong_thread: | |
168 | STRINGD "wrong thread!\n\000" | |
169 | .text | |
170 | ||
171 | #define CHECK_MYLOCK(thread_offset) \ | |
172 | bt 24+disLkMyLckb,2f __ASMNL__ \ | |
173 | mfmsr r11 __ASMNL__ \ | |
174 | rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \ | |
175 | mtmsr r10 __ASMNL__ \ | |
176 | mfsprg r10,0 __ASMNL__ \ | |
177 | lwz r10,PP_CPU_DATA(r10) __ASMNL__ \ | |
178 | lwz r10,CPU_ACTIVE_THREAD(r10) __ASMNL__ \ | |
179 | cmpwi r10,0 __ASMNL__ \ | |
180 | beq- 1f __ASMNL__ \ | |
181 | lwz r9, thread_offset(r3) __ASMNL__ \ | |
182 | cmpw r9,r10 __ASMNL__ \ | |
183 | bne+ 1f __ASMNL__ \ | |
184 | lis r3, HIGH_ADDR(mylock_attempt) __ASMNL__ \ | |
185 | ori r3,r3,LOW_ADDR(mylock_attempt) __ASMNL__ \ | |
186 | bl EXT(panic) __ASMNL__ \ | |
187 | lwz r3,FM_ARG0(r1) __ASMNL__ \ | |
188 | 1: __ASMNL__ \ | |
189 | mtmsr r11 __ASMNL__ \ | |
190 | 2: | |
191 | ||
192 | .data | |
193 | mylock_attempt: | |
194 | STRINGD "mylock attempt!\n\000" | |
195 | .text | |
196 | ||
197 | #else /* MACH_LDEBUG */ | |
198 | ||
199 | #define CHECK_SETUP(rg) | |
200 | #define CHECK_MUTEX_TYPE() | |
201 | #define CHECK_SIMPLE_LOCK_TYPE() | |
202 | #define CHECK_THREAD(thread_offset) | |
203 | #define CHECK_NO_SIMPLELOCKS() | |
204 | #define CHECK_MYLOCK(thread_offset) | |
205 | ||
206 | #endif /* MACH_LDEBUG */ | |
207 | ||
208 | /* | |
209 | * void hw_lock_init(hw_lock_t) | |
210 | * | |
211 | * Initialize a hardware lock. These locks should be cache aligned and a multiple | |
212 | * of cache size. | |
213 | */ | |
214 | ||
215 | ENTRY(hw_lock_init, TAG_NO_FRAME_USED) | |
216 | ||
217 | li r0, 0 /* set lock to free == 0 */ | |
218 | stw r0, 0(r3) /* Initialize the lock */ | |
219 | blr | |
220 | ||
221 | /* | |
222 | * void hw_lock_unlock(hw_lock_t) | |
223 | * | |
224 | * Unconditionally release lock. | |
0b4e3aa0 | 225 | * Release preemption level. |
1c79356b A |
226 | */ |
227 | ||
228 | ||
229 | .align 5 | |
230 | .globl EXT(hw_lock_unlock) | |
231 | ||
232 | LEXT(hw_lock_unlock) | |
233 | ||
234 | #if 0 | |
235 | lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
236 | lis r5,0xFFFF /* (TEST/DEBUG) */ | |
237 | oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
238 | sc /* (TEST/DEBUG) */ | |
239 | #endif | |
240 | sync /* Flush writes done under lock */ | |
241 | li r0, 0 /* set lock to free */ | |
242 | stw r0, 0(r3) | |
243 | ||
1c79356b | 244 | b epStart /* Go enable preemption... */ |
1c79356b A |
245 | |
246 | ||
247 | /* | |
248 | * Special case for internal use. Uses same lock code, but sets up so | |
249 | * that there will be no disabling of preemption after locking. Generally | |
250 | * used for mutex locks when obtaining the interlock although there is | |
251 | * nothing stopping other uses. | |
252 | */ | |
253 | ||
254 | lockLock: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */ | |
255 | ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */ | |
256 | cmplwi cr1,r1,0 /* Set flag to disable disable preemption */ | |
257 | lwz r4,0(r4) /* Get the timerout value */ | |
258 | b lockComm /* Join on up... */ | |
259 | ||
260 | /* | |
261 | * void hw_lock_lock(hw_lock_t) | |
262 | * | |
263 | * Acquire lock, spinning until it becomes available. | |
0b4e3aa0 | 264 | * Return with preemption disabled. |
1c79356b A |
265 | * Apparently not used except by mach_perf. |
266 | * We will just set a default timeout and jump into the NORMAL timeout lock. | |
267 | */ | |
268 | ||
269 | .align 5 | |
270 | .globl EXT(hw_lock_lock) | |
271 | ||
272 | LEXT(hw_lock_lock) | |
273 | ||
274 | lockDisa: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */ | |
275 | ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */ | |
276 | cmplw cr1,r1,r1 /* Set flag to enable disable preemption */ | |
277 | lwz r4,0(r4) /* Get the timerout value */ | |
278 | b lockComm /* Join on up... */ | |
279 | ||
280 | /* | |
281 | * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout) | |
282 | * | |
283 | * Try to acquire spin-lock. Return success (1) or failure (0). | |
284 | * Attempt will fail after timeout ticks of the timebase. | |
285 | * We try fairly hard to get this lock. We disable for interruptions, but | |
286 | * reenable after a "short" timeout (128 ticks, we may want to change this). | |
287 | * After checking to see if the large timeout value (passed in) has expired and a | |
288 | * sufficient number of cycles have gone by (to insure pending 'rupts are taken), | |
289 | * we return either in abject failure, or disable and go back to the lock sniff routine. | |
290 | * If the sniffer finds the lock free, it jumps right up and tries to grab it. | |
291 | * | |
292 | * One programming note: NEVER DO NOTHING IN HERE NO HOW THAT WILL FORCE US TO CALL | |
293 | * THIS WITH TRANSLATION OR INTERRUPTIONS EITHER ON OR OFF, GOSH DARN IT! | |
294 | * | |
295 | */ | |
296 | .align 5 | |
297 | .globl EXT(hw_lock_to) | |
298 | ||
299 | LEXT(hw_lock_to) | |
300 | ||
301 | #if 0 | |
302 | lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
303 | lis r5,0xEEEE /* (TEST/DEBUG) */ | |
304 | oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
305 | sc /* (TEST/DEBUG) */ | |
306 | #endif | |
307 | ||
308 | #if CHECKNMI | |
309 | mflr r12 ; (TEST/DEBUG) | |
310 | bl EXT(ml_sense_nmi) ; (TEST/DEBUG) | |
311 | mtlr r12 ; (TEST/DEBUG) | |
312 | #endif | |
313 | ||
314 | cmplw cr1,r1,r1 /* Set flag to enable disable preemption */ | |
315 | ||
316 | lockComm: mfmsr r9 /* Get the MSR value */ | |
317 | mr r5,r3 /* Get the address of the lock */ | |
318 | rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */ | |
319 | ||
320 | mtmsr r7 /* Turn off interruptions */ | |
321 | mftb r8 /* Get the low part of the time base */ | |
322 | ||
1c79356b | 323 | lcktry: lwarx r6,0,r5 /* Grab the lock value */ |
0b4e3aa0 A |
324 | andi. r3,r6,ILK_LOCKED /* Is it locked? */ |
325 | ori r6,r6,ILK_LOCKED /* Set interlock */ | |
1c79356b | 326 | bne- lcksniff /* Yeah, wait for it to clear... */ |
0b4e3aa0 | 327 | stwcx. r6,0,r5 /* Try to seize that there durn lock */ |
1c79356b | 328 | bne- lcktry /* Couldn't get it... */ |
0b4e3aa0 A |
329 | li r3,1 /* return true */ |
330 | isync /* Make sure we don't use a speculativily loaded value */ | |
1c79356b A |
331 | beq+ cr1,daPreComm /* We got it, go disable preemption if we're supposed to... */ |
332 | mtmsr r9 ; Restore interrupt state | |
333 | blr /* Go on home... */ | |
1c79356b A |
334 | |
335 | .align 5 | |
336 | ||
337 | lcksniff: lwz r3,0(r5) /* Get that lock in here */ | |
0b4e3aa0 | 338 | andi. r3,r3,ILK_LOCKED /* Is it free yet? */ |
1c79356b A |
339 | beq+ lcktry /* Yeah, try for it again... */ |
340 | ||
341 | mftb r10 /* Time stamp us now */ | |
342 | sub r10,r10,r8 /* Get the elapsed time */ | |
343 | cmplwi r10,128 /* Have we been spinning for 128 tb ticks? */ | |
344 | blt+ lcksniff /* Not yet... */ | |
345 | ||
346 | mtmsr r9 /* Say, any interrupts pending? */ | |
347 | ||
348 | /* The following instructions force the pipeline to be interlocked to that only one | |
349 | instruction is issued per cycle. The insures that we stay enabled for a long enough | |
350 | time; if it's too short, pending interruptions will not have a chance to be taken */ | |
351 | ||
352 | subi r4,r4,128 /* Back off elapsed time from timeout value */ | |
353 | or r4,r4,r4 /* Do nothing here but force a single cycle delay */ | |
354 | mr. r4,r4 /* See if we used the whole timeout */ | |
355 | li r3,0 /* Assume a timeout return code */ | |
356 | or r4,r4,r4 /* Do nothing here but force a single cycle delay */ | |
357 | ||
358 | ble- lckfail /* We failed */ | |
359 | mtmsr r7 /* Disable for interruptions */ | |
360 | mftb r8 /* Get the low part of the time base */ | |
361 | b lcksniff /* Now that we've opened an enable window, keep trying... */ | |
362 | ||
1c79356b A |
363 | lckfail: /* We couldn't get the lock */ |
364 | li r3,0 /* Set failure return code */ | |
365 | blr /* Return, head hanging low... */ | |
366 | ||
367 | ||
368 | /* | |
369 | * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout) | |
370 | * | |
371 | * Try to acquire spin-lock. The second parameter is the bit mask to test and set. | |
372 | * multiple bits may be set. Return success (1) or failure (0). | |
373 | * Attempt will fail after timeout ticks of the timebase. | |
374 | * We try fairly hard to get this lock. We disable for interruptions, but | |
375 | * reenable after a "short" timeout (128 ticks, we may want to shorten this). | |
376 | * After checking to see if the large timeout value (passed in) has expired and a | |
377 | * sufficient number of cycles have gone by (to insure pending 'rupts are taken), | |
378 | * we return either in abject failure, or disable and go back to the lock sniff routine. | |
379 | * If the sniffer finds the lock free, it jumps right up and tries to grab it. | |
380 | * | |
381 | * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY | |
382 | * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND | |
383 | * RESTORE FROM THE STACK. | |
384 | * | |
385 | */ | |
386 | ||
387 | .align 5 | |
388 | ||
389 | nop ; Force loop alignment to cache line | |
390 | nop | |
391 | nop | |
392 | nop | |
393 | ||
394 | .globl EXT(hw_lock_bit) | |
395 | ||
396 | LEXT(hw_lock_bit) | |
397 | ||
398 | mfmsr r9 /* Get the MSR value */ | |
399 | rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */ | |
400 | ||
401 | mtmsr r7 /* Turn off interruptions */ | |
402 | ||
403 | mftb r8 /* Get the low part of the time base */ | |
404 | ||
1c79356b A |
405 | bittry: lwarx r6,0,r3 /* Grab the lock value */ |
406 | and. r0,r6,r4 /* See if any of the lock bits are on */ | |
407 | or r6,r6,r4 /* Turn on the lock bits */ | |
408 | bne- bitsniff /* Yeah, wait for it to clear... */ | |
409 | stwcx. r6,0,r3 /* Try to seize that there durn lock */ | |
410 | beq+ bitgot /* We got it, yahoo... */ | |
411 | b bittry /* Just start up again if the store failed... */ | |
412 | ||
413 | .align 5 | |
414 | ||
415 | bitsniff: lwz r6,0(r3) /* Get that lock in here */ | |
416 | and. r0,r6,r4 /* See if any of the lock bits are on */ | |
417 | beq+ bittry /* Yeah, try for it again... */ | |
418 | ||
419 | mftb r6 /* Time stamp us now */ | |
420 | sub r6,r6,r8 /* Get the elapsed time */ | |
421 | cmplwi r6,128 /* Have we been spinning for 128 tb ticks? */ | |
422 | blt+ bitsniff /* Not yet... */ | |
423 | ||
424 | mtmsr r9 /* Say, any interrupts pending? */ | |
425 | ||
426 | /* The following instructions force the pipeline to be interlocked to that only one | |
427 | instruction is issued per cycle. The insures that we stay enabled for a long enough | |
428 | time. If it's too short, pending interruptions will not have a chance to be taken | |
429 | */ | |
430 | ||
431 | subi r5,r5,128 /* Back off elapsed time from timeout value */ | |
432 | or r5,r5,r5 /* Do nothing here but force a single cycle delay */ | |
433 | mr. r5,r5 /* See if we used the whole timeout */ | |
434 | or r5,r5,r5 /* Do nothing here but force a single cycle delay */ | |
435 | ||
436 | ble- bitfail /* We failed */ | |
437 | mtmsr r7 /* Disable for interruptions */ | |
438 | mftb r8 /* Get the low part of the time base */ | |
439 | b bitsniff /* Now that we've opened an enable window, keep trying... */ | |
440 | ||
441 | .align 5 | |
442 | ||
443 | bitgot: mtmsr r9 /* Enable for interruptions */ | |
444 | li r3,1 /* Set good return code */ | |
445 | isync /* Make sure we don't use a speculativily loaded value */ | |
446 | blr | |
447 | ||
448 | bitfail: li r3,0 /* Set failure return code */ | |
449 | blr /* Return, head hanging low... */ | |
450 | ||
451 | ||
452 | /* | |
453 | * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit) | |
454 | * | |
455 | * Release bit based spin-lock. The second parameter is the bit mask to clear. | |
456 | * Multiple bits may be cleared. | |
457 | * | |
458 | * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY | |
459 | * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND | |
460 | * RESTORE FROM THE STACK. | |
461 | */ | |
462 | ||
463 | .align 5 | |
464 | .globl EXT(hw_unlock_bit) | |
465 | ||
466 | LEXT(hw_unlock_bit) | |
467 | ||
468 | sync | |
1c79356b A |
469 | |
470 | ubittry: lwarx r0,0,r3 /* Grab the lock value */ | |
471 | andc r0,r0,r4 /* Clear the lock bits */ | |
472 | stwcx. r0,0,r3 /* Try to clear that there durn lock */ | |
473 | bne- ubittry /* Try again, couldn't save it... */ | |
474 | ||
475 | blr /* Leave... */ | |
476 | ||
477 | /* | |
478 | * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value, | |
479 | * unsigned int newb, unsigned int timeout) | |
480 | * | |
481 | * Try to acquire spin-lock. The second parameter is the bit mask to check. | |
482 | * The third is the value of those bits and the 4th is what to set them to. | |
483 | * Return success (1) or failure (0). | |
484 | * Attempt will fail after timeout ticks of the timebase. | |
485 | * We try fairly hard to get this lock. We disable for interruptions, but | |
486 | * reenable after a "short" timeout (128 ticks, we may want to shorten this). | |
487 | * After checking to see if the large timeout value (passed in) has expired and a | |
488 | * sufficient number of cycles have gone by (to insure pending 'rupts are taken), | |
489 | * we return either in abject failure, or disable and go back to the lock sniff routine. | |
490 | * If the sniffer finds the lock free, it jumps right up and tries to grab it. | |
491 | * | |
492 | */ | |
493 | ||
494 | .align 5 | |
495 | ||
496 | nop ; Force loop alignment to cache line | |
497 | nop | |
498 | nop | |
499 | nop | |
500 | ||
501 | .globl EXT(hw_lock_mbits) | |
502 | ||
503 | LEXT(hw_lock_mbits) | |
504 | ||
505 | mfmsr r9 ; Get the MSR value | |
506 | rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Get MSR that is uninterruptible | |
507 | ||
508 | mtmsr r8 ; Turn off interruptions | |
509 | ||
510 | mftb r10 ; Get the low part of the time base | |
511 | ||
1c79356b A |
512 | mbittry: lwarx r12,0,r3 ; Grab the lock value |
513 | and r0,r12,r4 ; Clear extra bits | |
514 | or r12,r12,r6 ; Turn on the lock bits | |
515 | cmplw r0,r5 ; Are these the right bits? | |
516 | bne- mbitsniff ; Nope, wait for it to clear... | |
517 | stwcx. r12,0,r3 ; Try to seize that there durn lock | |
518 | beq+ mbitgot ; We got it, yahoo... | |
519 | b mbittry ; Just start up again if the store failed... | |
520 | ||
521 | .align 5 | |
522 | ||
523 | mbitsniff: lwz r12,0(r3) ; Get that lock in here | |
524 | and r0,r12,r4 ; Clear extra bits | |
525 | or r12,r12,r6 ; Turn on the lock bits | |
526 | cmplw r0,r5 ; Are these the right bits? | |
527 | beq+ mbittry ; Yeah, try for it again... | |
528 | ||
529 | mftb r11 ; Time stamp us now | |
530 | sub r11,r11,r10 ; Get the elapsed time | |
531 | cmplwi r11,128 ; Have we been spinning for 128 tb ticks? | |
532 | blt+ mbitsniff ; Not yet... | |
533 | ||
534 | mtmsr r9 ; Say, any interrupts pending? | |
535 | ||
536 | ; The following instructions force the pipeline to be interlocked to that only one | |
537 | ; instruction is issued per cycle. The insures that we stay enabled for a long enough | |
538 | ; time. If it is too short, pending interruptions will not have a chance to be taken | |
539 | ||
540 | subi r7,r7,128 ; Back off elapsed time from timeout value | |
541 | or r7,r7,r7 ; Do nothing here but force a single cycle delay | |
542 | mr. r7,r7 ; See if we used the whole timeout | |
543 | or r7,r7,r7 ; Do nothing here but force a single cycle delay | |
544 | ||
545 | ble- mbitfail ; We failed | |
546 | mtmsr r8 ; Disable for interruptions | |
547 | mftb r10 ; Get the low part of the time base | |
548 | b mbitsniff ; Now that we have opened an enable window, keep trying... | |
549 | ||
550 | .align 5 | |
551 | ||
552 | mbitgot: mtmsr r9 ; Enable for interruptions | |
553 | li r3,1 ; Set good return code | |
554 | isync ; Make sure we do not use a speculativily loaded value | |
555 | blr | |
556 | ||
557 | mbitfail: li r3,0 ; Set failure return code | |
558 | blr ; Return, head hanging low... | |
559 | ||
560 | ||
561 | /* | |
562 | * unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout) | |
563 | * | |
564 | * Spin until word hits 0 or timeout. | |
565 | * Return success (1) or failure (0). | |
566 | * Attempt will fail after timeout ticks of the timebase. | |
567 | * | |
568 | * The theory is that a processor will bump a counter as it signals | |
569 | * other processors. Then it will spin untl the counter hits 0 (or | |
570 | * times out). The other processors, as it receives the signal will | |
571 | * decrement the counter. | |
572 | * | |
573 | * The other processors use interlocked update to decrement, this one | |
574 | * does not need to interlock. | |
575 | * | |
576 | */ | |
577 | ||
578 | .align 5 | |
579 | ||
580 | .globl EXT(hw_cpu_sync) | |
581 | ||
582 | LEXT(hw_cpu_sync) | |
583 | ||
584 | mftb r10 ; Get the low part of the time base | |
585 | mr r9,r3 ; Save the sync word address | |
586 | li r3,1 ; Assume we work | |
587 | ||
588 | csynctry: lwz r11,0(r9) ; Grab the sync value | |
589 | mr. r11,r11 ; Counter hit 0? | |
590 | beqlr- ; Yeah, we are sunk... | |
591 | mftb r12 ; Time stamp us now | |
592 | ||
593 | sub r12,r12,r10 ; Get the elapsed time | |
594 | cmplw r4,r12 ; Have we gone too long? | |
595 | bge+ csynctry ; Not yet... | |
596 | ||
597 | li r3,0 ; Set failure... | |
598 | blr ; Return, head hanging low... | |
599 | ||
600 | /* | |
601 | * unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout) | |
602 | * | |
603 | * Spin until word changes or timeout. | |
604 | * Return success (1) or failure (0). | |
605 | * Attempt will fail after timeout ticks of the timebase. | |
606 | * | |
607 | * This is used to insure that a processor passes a certain point. | |
608 | * An example of use is to monitor the last interrupt time in the | |
609 | * per_proc block. This can be used to insure that the other processor | |
610 | * has seen at least one interrupt since a specific time. | |
611 | * | |
612 | */ | |
613 | ||
614 | .align 5 | |
615 | ||
0b4e3aa0 | 616 | .globl EXT(hw_cpu_wcng) |
1c79356b A |
617 | |
618 | LEXT(hw_cpu_wcng) | |
619 | ||
620 | mftb r10 ; Get the low part of the time base | |
621 | mr r9,r3 ; Save the sync word address | |
622 | li r3,1 ; Assume we work | |
623 | ||
624 | wcngtry: lwz r11,0(r9) ; Grab the value | |
625 | cmplw r11,r4 ; Do they still match? | |
626 | bnelr- ; Nope, cool... | |
627 | mftb r12 ; Time stamp us now | |
628 | ||
629 | sub r12,r12,r10 ; Get the elapsed time | |
630 | cmplw r5,r12 ; Have we gone too long? | |
631 | bge+ wcngtry ; Not yet... | |
632 | ||
633 | li r3,0 ; Set failure... | |
634 | blr ; Return, head hanging low... | |
635 | ||
636 | ||
637 | /* | |
638 | * unsigned int hw_lock_try(hw_lock_t) | |
639 | * | |
0b4e3aa0 A |
640 | * Try to acquire spin-lock. Return success (1) or failure (0) |
641 | * Returns with preemption disabled on success. | |
1c79356b A |
642 | * |
643 | */ | |
644 | .align 5 | |
645 | .globl EXT(hw_lock_try) | |
646 | ||
647 | LEXT(hw_lock_try) | |
648 | ||
649 | #if 0 | |
650 | lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
651 | lis r5,0x9999 /* (TEST/DEBUG) */ | |
652 | oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
653 | sc /* (TEST/DEBUG) */ | |
654 | #endif | |
655 | mfmsr r9 /* Save the MSR value */ | |
1c79356b A |
656 | rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruption bit */ |
657 | ||
658 | #if MACH_LDEBUG | |
659 | lis r5, 0x10 /* roughly 1E6 */ | |
660 | mtctr r5 | |
661 | #endif /* MACH_LDEBUG */ | |
662 | ||
663 | mtmsr r7 /* Disable interruptions and thus, preemption */ | |
664 | ||
1c79356b A |
665 | .L_lock_try_loop: |
666 | ||
667 | #if MACH_LDEBUG | |
668 | bdnz+ 0f /* Count attempts */ | |
669 | mtmsr r9 /* Restore enablement */ | |
670 | BREAKPOINT_TRAP /* Get to debugger */ | |
671 | mtmsr r7 /* Disable interruptions and thus, preemption */ | |
672 | 0: | |
673 | #endif /* MACH_LDEBUG */ | |
674 | ||
675 | lwarx r5,0,r3 /* Ld from addr of arg and reserve */ | |
676 | ||
0b4e3aa0 A |
677 | andi. r6,r5,ILK_LOCKED /* TEST... */ |
678 | ori r5,r5,ILK_LOCKED | |
1c79356b A |
679 | bne- .L_lock_try_failed /* branch if taken. Predict free */ |
680 | ||
0b4e3aa0 | 681 | stwcx. r5,0,r3 /* And SET (if still reserved) */ |
1c79356b A |
682 | mfsprg r6,0 /* Get the per_proc block */ |
683 | bne- .L_lock_try_loop /* If set failed, loop back */ | |
684 | ||
685 | lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */ | |
686 | isync | |
687 | ||
1c79356b A |
688 | lwz r5,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */ |
689 | addi r5,r5,1 /* Bring up the disable count */ | |
690 | stw r5,CPU_PREEMPTION_LEVEL(r6) /* Save it back */ | |
691 | ||
1c79356b A |
692 | mtmsr r9 /* Allow interruptions now */ |
693 | li r3,1 /* Set that the lock was free */ | |
694 | blr | |
695 | ||
696 | .L_lock_try_failed: | |
697 | mtmsr r9 /* Allow interruptions now */ | |
698 | li r3,0 /* FAILURE - lock was taken */ | |
699 | blr | |
700 | ||
701 | /* | |
702 | * unsigned int hw_lock_held(hw_lock_t) | |
703 | * | |
704 | * Return 1 if lock is held | |
0b4e3aa0 | 705 | * Doesn't change preemption state. |
1c79356b A |
706 | * N.B. Racy, of course. |
707 | * | |
708 | */ | |
709 | .align 5 | |
710 | .globl EXT(hw_lock_held) | |
711 | ||
712 | LEXT(hw_lock_held) | |
713 | ||
714 | #if 0 | |
715 | lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
716 | lis r5,0x8888 /* (TEST/DEBUG) */ | |
717 | oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
718 | sc /* (TEST/DEBUG) */ | |
719 | #endif | |
720 | isync /* Make sure we don't use a speculativily fetched lock */ | |
721 | lwz r3, 0(r3) /* Return value of lock */ | |
722 | blr | |
723 | ||
724 | /* | |
725 | * unsigned int hw_compare_and_store(unsigned int old, unsigned int new, unsigned int *area) | |
726 | * | |
727 | * Compare old to area if equal, store new, and return true | |
728 | * else return false and no store | |
729 | * This is an atomic operation | |
730 | * | |
731 | */ | |
732 | .align 5 | |
733 | .globl EXT(hw_compare_and_store) | |
734 | ||
735 | LEXT(hw_compare_and_store) | |
736 | ||
737 | mr r6,r3 /* Save the old value */ | |
738 | ||
1c79356b A |
739 | cstry: lwarx r9,0,r5 /* Grab the area value */ |
740 | li r3,1 /* Assume it works */ | |
741 | cmplw cr0,r9,r6 /* Does it match the old value? */ | |
742 | bne- csfail /* No, it must have changed... */ | |
743 | stwcx. r4,0,r5 /* Try to save the new value */ | |
744 | bne- cstry /* Didn't get it, try again... */ | |
745 | isync /* Just hold up prefetch */ | |
746 | blr /* Return... */ | |
747 | ||
748 | csfail: li r3,0 /* Set failure */ | |
749 | blr /* Better luck next time... */ | |
750 | ||
751 | ||
752 | /* | |
0b4e3aa0 | 753 | * unsigned int hw_atomic_add(unsigned int *area, int val) |
1c79356b A |
754 | * |
755 | * Atomically add the second parameter to the first. | |
756 | * Returns the result. | |
757 | * | |
758 | */ | |
759 | .align 5 | |
760 | .globl EXT(hw_atomic_add) | |
761 | ||
762 | LEXT(hw_atomic_add) | |
763 | ||
764 | mr r6,r3 /* Save the area */ | |
765 | ||
1c79356b A |
766 | addtry: lwarx r3,0,r6 /* Grab the area value */ |
767 | add r3,r3,r4 /* Add the value */ | |
768 | stwcx. r3,0,r6 /* Try to save the new value */ | |
769 | bne- addtry /* Didn't get it, try again... */ | |
770 | blr /* Return... */ | |
771 | ||
772 | ||
773 | /* | |
0b4e3aa0 | 774 | * unsigned int hw_atomic_sub(unsigned int *area, int val) |
1c79356b A |
775 | * |
776 | * Atomically subtract the second parameter from the first. | |
777 | * Returns the result. | |
778 | * | |
779 | */ | |
780 | .align 5 | |
781 | .globl EXT(hw_atomic_sub) | |
782 | ||
783 | LEXT(hw_atomic_sub) | |
784 | ||
785 | mr r6,r3 /* Save the area */ | |
786 | ||
1c79356b A |
787 | subtry: lwarx r3,0,r6 /* Grab the area value */ |
788 | sub r3,r3,r4 /* Subtract the value */ | |
789 | stwcx. r3,0,r6 /* Try to save the new value */ | |
790 | bne- subtry /* Didn't get it, try again... */ | |
791 | blr /* Return... */ | |
792 | ||
793 | ||
0b4e3aa0 A |
794 | /* |
795 | * unsigned int hw_atomic_or(unsigned int *area, int val) | |
796 | * | |
797 | * Atomically ORs the second parameter into the first. | |
798 | * Returns the result. | |
799 | * | |
800 | */ | |
801 | .align 5 | |
802 | .globl EXT(hw_atomic_or) | |
803 | ||
804 | LEXT(hw_atomic_or) | |
805 | ||
806 | mr r6,r3 ; Save the area | |
807 | ||
808 | ortry: lwarx r3,0,r6 ; Grab the area value | |
809 | or r3,r3,r4 ; OR the value | |
810 | stwcx. r3,0,r6 ; Try to save the new value | |
811 | bne- ortry ; Did not get it, try again... | |
812 | blr ; Return... | |
813 | ||
814 | ||
815 | /* | |
816 | * unsigned int hw_atomic_and(unsigned int *area, int val) | |
817 | * | |
818 | * Atomically ANDs the second parameter with the first. | |
819 | * Returns the result. | |
820 | * | |
821 | */ | |
822 | .align 5 | |
823 | .globl EXT(hw_atomic_and) | |
824 | ||
825 | LEXT(hw_atomic_and) | |
826 | ||
827 | mr r6,r3 ; Save the area | |
828 | ||
829 | andtry: lwarx r3,0,r6 ; Grab the area value | |
830 | and r3,r3,r4 ; AND the value | |
831 | stwcx. r3,0,r6 ; Try to save the new value | |
832 | bne- andtry ; Did not get it, try again... | |
833 | blr ; Return... | |
834 | ||
835 | ||
1c79356b A |
836 | /* |
837 | * void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp) | |
838 | * | |
839 | * Atomically inserts the element at the head of the list | |
840 | * anchor is the pointer to the first element | |
841 | * element is the pointer to the element to insert | |
842 | * disp is the displacement into the element to the chain pointer | |
843 | * | |
844 | */ | |
845 | .align 5 | |
846 | .globl EXT(hw_queue_atomic) | |
847 | ||
848 | LEXT(hw_queue_atomic) | |
849 | ||
850 | mr r7,r4 /* Make end point the same as start */ | |
851 | mr r8,r5 /* Copy the displacement also */ | |
852 | b hw_queue_comm /* Join common code... */ | |
853 | ||
854 | /* | |
855 | * void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp) | |
856 | * | |
857 | * Atomically inserts the list of elements at the head of the list | |
858 | * anchor is the pointer to the first element | |
859 | * first is the pointer to the first element to insert | |
860 | * last is the pointer to the last element to insert | |
861 | * disp is the displacement into the element to the chain pointer | |
862 | * | |
863 | */ | |
864 | .align 5 | |
865 | .globl EXT(hw_queue_atomic_list) | |
866 | ||
867 | LEXT(hw_queue_atomic_list) | |
868 | ||
869 | mr r7,r5 /* Make end point the same as start */ | |
870 | mr r8,r6 /* Copy the displacement also */ | |
871 | ||
872 | hw_queue_comm: | |
1c79356b A |
873 | lwarx r9,0,r3 /* Pick up the anchor */ |
874 | stwx r9,r8,r7 /* Chain that to the end of the new stuff */ | |
875 | stwcx. r4,0,r3 /* Try to chain into the front */ | |
0b4e3aa0 | 876 | bne- hw_queue_comm /* Didn't make it, try again... */ |
1c79356b A |
877 | |
878 | blr /* Return... */ | |
879 | ||
880 | /* | |
881 | * unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp) | |
882 | * | |
883 | * Atomically removes the first element in a list and returns it. | |
884 | * anchor is the pointer to the first element | |
885 | * disp is the displacement into the element to the chain pointer | |
886 | * Returns element if found, 0 if empty. | |
887 | * | |
888 | */ | |
889 | .align 5 | |
890 | .globl EXT(hw_dequeue_atomic) | |
891 | ||
892 | LEXT(hw_dequeue_atomic) | |
893 | ||
894 | mr r5,r3 /* Save the anchor */ | |
895 | ||
896 | hw_dequeue_comm: | |
1c79356b A |
897 | lwarx r3,0,r5 /* Pick up the anchor */ |
898 | mr. r3,r3 /* Is the list empty? */ | |
899 | beqlr- /* Leave it list empty... */ | |
900 | lwzx r9,r4,r3 /* Get the next in line */ | |
901 | stwcx. r9,0,r5 /* Try to chain into the front */ | |
902 | beqlr+ ; Got the thing, go away with it... | |
0b4e3aa0 | 903 | b hw_dequeue_comm ; Did not make it, try again... |
1c79356b A |
904 | |
905 | /* | |
906 | * void mutex_init(mutex_t* l, etap_event_t etap) | |
907 | */ | |
908 | ||
909 | ENTRY(mutex_init,TAG_NO_FRAME_USED) | |
910 | ||
0b4e3aa0 A |
911 | PROLOG(0) |
912 | li r10, 0 | |
913 | stw r10, LOCK_DATA(r3) /* clear lock word */ | |
914 | sth r10, MUTEX_WAITERS(r3) /* init waiter count */ | |
1c79356b A |
915 | |
916 | #if MACH_LDEBUG | |
0b4e3aa0 A |
917 | stw r10, MUTEX_PC(r3) /* init caller pc */ |
918 | stw r10, MUTEX_THREAD(r3) /* and owning thread */ | |
919 | li r10, MUTEX_TAG | |
920 | stw r10, MUTEX_TYPE(r3) /* set lock type */ | |
1c79356b A |
921 | #endif /* MACH_LDEBUG */ |
922 | ||
923 | #if ETAP_LOCK_TRACE | |
0b4e3aa0 | 924 | bl EXT(etap_mutex_init) /* init ETAP data */ |
1c79356b A |
925 | #endif /* ETAP_LOCK_TRACE */ |
926 | ||
0b4e3aa0 A |
927 | EPILOG |
928 | blr | |
1c79356b A |
929 | |
930 | /* | |
0b4e3aa0 | 931 | * void mutex_lock(mutex_t*) |
1c79356b A |
932 | */ |
933 | ||
934 | .align 5 | |
0b4e3aa0 A |
935 | .globl EXT(mutex_lock) |
936 | LEXT(mutex_lock) | |
1c79356b | 937 | |
0b4e3aa0 | 938 | .globl EXT(_mutex_lock) |
1c79356b A |
939 | LEXT(_mutex_lock) |
940 | ||
0b4e3aa0 A |
941 | #if !MACH_LDEBUG |
942 | L_mutex_lock_loop: | |
943 | lwarx r5,0,r3 | |
944 | andi. r4,r5,ILK_LOCKED|MUTEX_LOCKED | |
945 | bne- L_mutex_lock_slow | |
946 | ori r5,r5,MUTEX_LOCKED | |
947 | stwcx. r5,0,r3 | |
948 | bne- L_mutex_lock_loop | |
949 | isync | |
950 | blr | |
951 | L_mutex_lock_slow: | |
952 | #endif | |
1c79356b A |
953 | #if CHECKNMI |
954 | mflr r12 ; (TEST/DEBUG) | |
955 | bl EXT(ml_sense_nmi) ; (TEST/DEBUG) | |
956 | mtlr r12 ; (TEST/DEBUG) | |
957 | #endif | |
958 | ||
959 | PROLOG(12) | |
0b4e3aa0 A |
960 | #if MACH_LDEBUG |
961 | bl EXT(assert_wait_possible) | |
962 | mr. r3,r3 | |
963 | bne L_mutex_lock_assert_wait_1 | |
964 | lis r3,hi16(L_mutex_lock_assert_wait_panic_str) | |
965 | ori r3,r3,lo16(L_mutex_lock_assert_wait_panic_str) | |
966 | bl EXT(panic) | |
967 | ||
968 | .data | |
969 | L_mutex_lock_assert_wait_panic_str: | |
970 | STRINGD "mutex_lock: assert_wait_possible false\n\000" | |
971 | .text | |
972 | ||
973 | L_mutex_lock_assert_wait_1: | |
974 | lwz r3,FM_ARG0(r1) | |
975 | #endif | |
1c79356b A |
976 | |
977 | #if ETAP_LOCK_TRACE | |
978 | li r0, 0 | |
979 | stw r0,SWT_HI(r1) /* set wait time to 0 (HI) */ | |
980 | stw r0,SWT_LO(r1) /* set wait time to 0 (LO) */ | |
981 | stw r0,MISSED(r1) /* clear local miss marker */ | |
982 | #endif /* ETAP_LOCK_TRACE */ | |
983 | ||
984 | CHECK_SETUP(r12) | |
985 | CHECK_MUTEX_TYPE() | |
986 | CHECK_NO_SIMPLELOCKS() | |
987 | ||
988 | .L_ml_retry: | |
989 | #if 0 | |
990 | mfsprg r4,0 /* (TEST/DEBUG) */ | |
991 | lwz r4,PP_CPU_DATA(r4) /* (TEST/DEBUG) */ | |
992 | lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
993 | lwz r4,CPU_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */ | |
994 | lis r5,0xAAAA /* (TEST/DEBUG) */ | |
995 | oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
996 | sc /* (TEST/DEBUG) */ | |
997 | #endif | |
998 | ||
999 | bl lockDisa /* Go get a lock on the mutex's interlock lock */ | |
1000 | mr. r4,r3 /* Did we get it? */ | |
1001 | lwz r3,FM_ARG0(r1) /* Restore the lock address */ | |
1002 | bne+ mlGotInt /* We got it just fine... */ | |
1003 | ||
1004 | lis r3,HIGH_ADDR(mutex_failed1) ; Get the failed mutex message | |
1005 | ori r3,r3,LOW_ADDR(mutex_failed1) ; Get the failed mutex message | |
1006 | bl EXT(panic) ; Call panic | |
1007 | BREAKPOINT_TRAP ; We die here anyway, can not get the lock | |
1008 | ||
1009 | .data | |
1010 | mutex_failed1: | |
1011 | STRINGD "We can't get a mutex interlock lock on mutex_lock\n\000" | |
1012 | .text | |
1013 | ||
1014 | mlGotInt: | |
1015 | ||
1016 | /* Note that there is no reason to do a load and reserve here. We already | |
1017 | hold the interlock lock and no one can touch this field unless they | |
1018 | have that, so, we're free to play */ | |
1019 | ||
0b4e3aa0 A |
1020 | lwz r4,LOCK_DATA(r3) /* Get the mutex's lock field */ |
1021 | andi. r9,r4,MUTEX_LOCKED /* So, can we have it? */ | |
1022 | ori r10,r4,MUTEX_LOCKED /* Set the lock value */ | |
1c79356b | 1023 | bne- mlInUse /* Nope, sombody's playing already... */ |
1c79356b A |
1024 | |
1025 | #if MACH_LDEBUG | |
1026 | mfmsr r11 | |
0b4e3aa0 A |
1027 | rlwinm r5,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 |
1028 | mtmsr r5 | |
1c79356b | 1029 | mfsprg r9,0 /* Get the per_proc block */ |
0b4e3aa0 | 1030 | lwz r5,0(r1) /* Get previous save frame */ |
1c79356b | 1031 | lwz r9,PP_CPU_DATA(r9) /* Point to the cpu data area */ |
0b4e3aa0 | 1032 | lwz r5,FM_LR_SAVE(r5) /* Get our caller's address */ |
1c79356b | 1033 | lwz r8, CPU_ACTIVE_THREAD(r9) /* Get the active thread */ |
0b4e3aa0 | 1034 | stw r5,MUTEX_PC(r3) /* Save our caller */ |
1c79356b A |
1035 | mr. r8,r8 /* Is there any thread? */ |
1036 | stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */ | |
1037 | beq- .L_ml_no_active_thread /* No owning thread... */ | |
1038 | lwz r9,THREAD_MUTEX_COUNT(r8) /* Get the mutex count */ | |
1039 | addi r9,r9,1 /* Bump it up */ | |
1040 | stw r9,THREAD_MUTEX_COUNT(r8) /* Stash it back */ | |
1041 | .L_ml_no_active_thread: | |
1042 | mtmsr r11 | |
1043 | #endif /* MACH_LDEBUG */ | |
1044 | ||
0b4e3aa0 A |
1045 | rlwinm r10,r10,0,0,30 /* Get the unlock value */ |
1046 | stw r10,LOCK_DATA(r3) /* grab the mutexlock and free the interlock */ | |
1c79356b A |
1047 | |
1048 | #if ETAP_LOCK_TRACE | |
1049 | mflr r4 | |
1050 | lwz r5,SWT_HI(r1) | |
1051 | lwz r6,SWT_LO(r1) | |
1052 | bl EXT(etap_mutex_hold) /* collect hold timestamp */ | |
1053 | #endif /* ETAP_LOCK_TRACE */ | |
1054 | ||
1055 | EPILOG /* Restore all saved registers */ | |
1056 | ||
1c79356b | 1057 | b epStart /* Go enable preemption... */ |
1c79356b A |
1058 | |
1059 | /* | |
1060 | * We come to here when we have a resource conflict. In other words, | |
1061 | * the mutex is held. | |
1062 | */ | |
1063 | ||
1064 | mlInUse: | |
1065 | ||
1066 | #if ETAP_LOCK_TRACE | |
1067 | lwz r7,MISSED(r1) | |
1068 | cmpwi r7,0 /* did we already take a wait timestamp ? */ | |
1069 | bne .L_ml_block /* yup. carry-on */ | |
1070 | bl EXT(etap_mutex_miss) /* get wait timestamp */ | |
1071 | stw r3,SWT_HI(r1) /* store timestamp */ | |
1072 | stw r4,SWT_LO(r1) | |
1073 | li r7, 1 /* mark wait timestamp as taken */ | |
1074 | stw r7,MISSED(r1) | |
1075 | lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */ | |
1076 | .L_ml_block: | |
1077 | #endif /* ETAP_LOCK_TRACE */ | |
1078 | ||
1079 | CHECK_SETUP(r12) | |
1080 | CHECK_MYLOCK(MUTEX_THREAD) /* Assert we don't own the lock already */ | |
1081 | ||
1082 | ||
1083 | /* Note that we come in here with the interlock set. The wait routine | |
1084 | * will unlock it before waiting. | |
1085 | */ | |
0b4e3aa0 A |
1086 | addis r4,r4,1 /* Bump the wait count */ |
1087 | stw r4,LOCK_DATA(r3) | |
1c79356b A |
1088 | bl EXT(mutex_lock_wait) /* Wait for our turn at the lock */ |
1089 | ||
1090 | lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */ | |
1091 | b .L_ml_retry /* and try again... */ | |
1092 | ||
1093 | ||
1094 | /* | |
1095 | * void _mutex_try(mutex_t*) | |
1096 | * | |
1097 | */ | |
1098 | ||
1099 | .align 5 | |
0b4e3aa0 A |
1100 | .globl EXT(mutex_try) |
1101 | LEXT(mutex_try) | |
1c79356b | 1102 | .globl EXT(_mutex_try) |
1c79356b | 1103 | LEXT(_mutex_try) |
0b4e3aa0 A |
1104 | #if !MACH_LDEBUG |
1105 | L_mutex_try_loop: | |
1106 | lwarx r5,0,r3 | |
1107 | andi. r4,r5,ILK_LOCKED|MUTEX_LOCKED | |
1108 | bne- L_mutex_try_slow | |
1109 | ori r5,r5,MUTEX_LOCKED | |
1110 | stwcx. r5,0,r3 | |
1111 | bne- L_mutex_try_loop | |
1112 | isync | |
1113 | li r3, 1 | |
1114 | blr | |
1115 | L_mutex_try_slow: | |
1116 | #endif | |
1c79356b A |
1117 | |
1118 | PROLOG(8) /* reserve space for SWT_HI and SWT_LO */ | |
1119 | ||
1120 | #if ETAP_LOCK_TRACE | |
1121 | li r5, 0 | |
1122 | stw r5, STW_HI(r1) /* set wait time to 0 (HI) */ | |
1123 | stw r5, SWT_LO(r1) /* set wait time to 0 (LO) */ | |
1124 | #endif /* ETAP_LOCK_TRACE */ | |
1125 | ||
1126 | #if 0 | |
1127 | lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
1128 | lis r5,0xBBBB /* (TEST/DEBUG) */ | |
1129 | oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
1130 | sc /* (TEST/DEBUG) */ | |
1131 | #endif | |
1132 | CHECK_SETUP(r12) | |
1133 | CHECK_MUTEX_TYPE() | |
1134 | CHECK_NO_SIMPLELOCKS() | |
1135 | ||
0b4e3aa0 A |
1136 | lwz r6,LOCK_DATA(r3) /* Quick check */ |
1137 | andi. r6,r6,MUTEX_LOCKED /* to see if someone has this lock already */ | |
1c79356b A |
1138 | bne- mtFail /* Someone's got it already... */ |
1139 | ||
1140 | bl lockDisa /* Go get a lock on the mutex's interlock lock */ | |
1141 | mr. r4,r3 /* Did we get it? */ | |
1142 | lwz r3,FM_ARG0(r1) /* Restore the lock address */ | |
1143 | bne+ mtGotInt /* We got it just fine... */ | |
1144 | ||
1145 | lis r3,HIGH_ADDR(mutex_failed2) ; Get the failed mutex message | |
1146 | ori r3,r3,LOW_ADDR(mutex_failed2) ; Get the failed mutex message | |
1147 | bl EXT(panic) ; Call panic | |
1148 | BREAKPOINT_TRAP ; We die here anyway, can not get the lock | |
1149 | ||
1150 | .data | |
1151 | mutex_failed2: | |
1152 | STRINGD "We can't get a mutex interlock lock on mutex_try\n\000" | |
1153 | .text | |
1154 | ||
1155 | mtGotInt: | |
1156 | ||
1157 | /* Note that there is no reason to do a load and reserve here. We already | |
1158 | hold the interlock and no one can touch at this field unless they | |
1159 | have that, so, we're free to play */ | |
1160 | ||
0b4e3aa0 A |
1161 | lwz r4,LOCK_DATA(r3) /* Get the mutex's lock field */ |
1162 | andi. r9,r4,MUTEX_LOCKED /* So, can we have it? */ | |
1163 | ori r10,r4,MUTEX_LOCKED /* Set the lock value */ | |
1c79356b A |
1164 | bne- mtInUse /* Nope, sombody's playing already... */ |
1165 | ||
1c79356b A |
1166 | #if MACH_LDEBUG |
1167 | mfmsr r11 | |
0b4e3aa0 A |
1168 | rlwinm r5,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 |
1169 | mtmsr r5 | |
1c79356b | 1170 | mfsprg r9,0 /* Get the per_proc block */ |
0b4e3aa0 | 1171 | lwz r5,0(r1) /* Get previous save frame */ |
1c79356b | 1172 | lwz r9,PP_CPU_DATA(r9) /* Point to the cpu data area */ |
0b4e3aa0 | 1173 | lwz r5,FM_LR_SAVE(r5) /* Get our caller's address */ |
1c79356b | 1174 | lwz r8, CPU_ACTIVE_THREAD(r9) /* Get the active thread */ |
0b4e3aa0 | 1175 | stw r5,MUTEX_PC(r3) /* Save our caller */ |
1c79356b A |
1176 | mr. r8,r8 /* Is there any thread? */ |
1177 | stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */ | |
1178 | beq- .L_mt_no_active_thread /* No owning thread... */ | |
1179 | lwz r9, THREAD_MUTEX_COUNT(r8) /* Get the mutex count */ | |
1180 | addi r9, r9, 1 /* Bump it up */ | |
1181 | stw r9, THREAD_MUTEX_COUNT(r8) /* Stash it back */ | |
1182 | .L_mt_no_active_thread: | |
1183 | mtmsr r11 | |
1184 | #endif /* MACH_LDEBUG */ | |
1185 | ||
0b4e3aa0 | 1186 | rlwinm r10,r10,0,0,30 /* Get the unlock value */ |
1c79356b | 1187 | sync /* Push it all out */ |
0b4e3aa0 A |
1188 | stw r10,LOCK_DATA(r3) /* grab the mutexlock and free the interlock */ |
1189 | isync /* stop speculative instructions */ | |
1c79356b A |
1190 | |
1191 | #if ETAP_LOCK_TRACE | |
1192 | lwz r4,0(r1) /* Back chain the stack */ | |
1193 | lwz r5,SWT_HI(r1) | |
1194 | lwz r4,FM_LR_SAVE(r4) /* Get our caller's address */ | |
1195 | lwz r6,SWT_LO(r1) | |
1196 | bl EXT(etap_mutex_hold) /* collect hold timestamp */ | |
1197 | #endif /* ETAP_LOCK_TRACE */ | |
1198 | ||
1c79356b | 1199 | bl epStart /* Go enable preemption... */ |
0b4e3aa0 | 1200 | |
1c79356b A |
1201 | li r3, 1 |
1202 | EPILOG /* Restore all saved registers */ | |
1203 | blr /* Return... */ | |
1204 | ||
1205 | /* | |
1206 | * We come to here when we have a resource conflict. In other words, | |
1207 | * the mutex is held. | |
1208 | */ | |
1209 | ||
0b4e3aa0 A |
1210 | mtInUse: |
1211 | rlwinm r10,r10,0,0,30 /* Get the unlock value */ | |
1212 | stw r10,LOCK_DATA(r3) /* free the interlock */ | |
1c79356b | 1213 | bl epStart /* Go enable preemption... */ |
1c79356b A |
1214 | |
1215 | mtFail: li r3,0 /* Set failure code */ | |
1216 | EPILOG /* Restore all saved registers */ | |
1217 | blr /* Return... */ | |
1218 | ||
1219 | ||
1220 | /* | |
1221 | * void mutex_unlock(mutex_t* l) | |
1222 | */ | |
1223 | ||
1224 | .align 5 | |
1225 | .globl EXT(mutex_unlock) | |
1226 | ||
1227 | LEXT(mutex_unlock) | |
0b4e3aa0 A |
1228 | #if !MACH_LDEBUG |
1229 | L_mutex_unlock_loop: | |
1230 | lwarx r5,0,r3 | |
1231 | rlwinm. r4,r5,16,15,31 /* Bail if pending waiter or interlock set */ | |
1232 | rlwinm r5,r5,0,0,29 /* Clear the mutexlock */ | |
1233 | bne- L_mutex_unlock_slow | |
1234 | stwcx. r5,0,r3 | |
1235 | bne- L_mutex_unlock_loop | |
1236 | sync | |
1237 | blr | |
1238 | L_mutex_unlock_slow: | |
1239 | #endif | |
1c79356b A |
1240 | PROLOG(0) |
1241 | ||
1242 | #if ETAP_LOCK_TRACE | |
1243 | bl EXT(etap_mutex_unlock) /* collect ETAP data */ | |
1244 | lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */ | |
1245 | #endif /* ETAP_LOCK_TRACE */ | |
1246 | ||
1247 | CHECK_SETUP(r12) | |
1248 | CHECK_MUTEX_TYPE() | |
1249 | CHECK_THREAD(MUTEX_THREAD) | |
1250 | ||
1251 | #if 0 | |
1252 | mfsprg r4,0 /* (TEST/DEBUG) */ | |
1253 | lwz r4,PP_CPU_DATA(r4) /* (TEST/DEBUG) */ | |
1254 | lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
1255 | lwz r4,CPU_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */ | |
1256 | lis r5,0xCCCC /* (TEST/DEBUG) */ | |
1257 | oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
1258 | sc /* (TEST/DEBUG) */ | |
1259 | #endif | |
1260 | bl lockDisa /* Go get a lock on the mutex's interlock lock */ | |
1261 | mr. r4,r3 /* Did we get it? */ | |
1262 | lwz r3,FM_ARG0(r1) /* Restore the lock address */ | |
1263 | bne+ muGotInt /* We got it just fine... */ | |
1264 | ||
1265 | lis r3,HIGH_ADDR(mutex_failed3) ; Get the failed mutex message | |
1266 | ori r3,r3,LOW_ADDR(mutex_failed3) ; Get the failed mutex message | |
1267 | bl EXT(panic) ; Call panic | |
1268 | BREAKPOINT_TRAP ; We die here anyway, can not get the lock | |
1269 | ||
1270 | .data | |
1271 | mutex_failed3: | |
1272 | STRINGD "We can't get a mutex interlock lock on mutex_unlock\n\000" | |
1273 | .text | |
1274 | ||
1275 | ||
1276 | muGotInt: | |
0b4e3aa0 A |
1277 | lhz r5,LOCK_DATA(r3) |
1278 | mr. r5,r5 /* are there any waiters ? */ | |
1c79356b A |
1279 | beq+ muUnlock /* Nope, we're done... */ |
1280 | ||
1281 | bl EXT(mutex_unlock_wakeup) /* yes, wake a thread */ | |
1282 | lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */ | |
0b4e3aa0 A |
1283 | lhz r5,LOCK_DATA(r3) /* load the wait count */ |
1284 | subi r5,r5,1 | |
1c79356b A |
1285 | |
1286 | muUnlock: | |
1287 | #if MACH_LDEBUG | |
1288 | mfmsr r11 | |
1289 | rlwinm r9,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 | |
1290 | mtmsr r9 | |
1291 | mfsprg r9,0 | |
1292 | lwz r9,PP_CPU_DATA(r9) | |
1293 | lwz r9,CPU_ACTIVE_THREAD(r9) | |
0b4e3aa0 | 1294 | stw r9,MUTEX_THREAD(r3) /* disown thread */ |
1c79356b A |
1295 | cmpwi r9,0 |
1296 | beq- .L_mu_no_active_thread | |
1297 | lwz r8,THREAD_MUTEX_COUNT(r9) | |
1298 | subi r8,r8,1 | |
1299 | stw r8,THREAD_MUTEX_COUNT(r9) | |
1300 | .L_mu_no_active_thread: | |
1301 | mtmsr r11 | |
1302 | #endif /* MACH_LDEBUG */ | |
1303 | ||
0b4e3aa0 | 1304 | rlwinm r5,r5,16,0,15 /* Shift wait count */ |
1c79356b | 1305 | sync /* Make sure it's all there before we release */ |
0b4e3aa0 | 1306 | stw r5,LOCK_DATA(r3) /* unlock the interlock and lock */ |
1c79356b A |
1307 | |
1308 | EPILOG /* Deal with the stack now, enable_preemption doesn't always want one */ | |
1c79356b | 1309 | b epStart /* Go enable preemption... */ |
1c79356b A |
1310 | |
1311 | /* | |
1312 | * void interlock_unlock(hw_lock_t lock) | |
1313 | */ | |
1314 | ||
1315 | .align 5 | |
1316 | .globl EXT(interlock_unlock) | |
1317 | ||
1318 | LEXT(interlock_unlock) | |
1319 | ||
1320 | #if 0 | |
1321 | lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
1322 | lis r5,0xDDDD /* (TEST/DEBUG) */ | |
1323 | oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
1324 | sc /* (TEST/DEBUG) */ | |
1325 | #endif | |
0b4e3aa0 A |
1326 | lwz r10,LOCK_DATA(r3) |
1327 | rlwinm r10,r10,0,0,30 | |
1c79356b | 1328 | sync |
0b4e3aa0 | 1329 | stw r10,LOCK_DATA(r3) |
1c79356b | 1330 | |
1c79356b | 1331 | b epStart /* Go enable preemption... */ |
1c79356b | 1332 | |
1c79356b A |
1333 | /* |
1334 | * Here is where we enable preemption. We need to be protected | |
1335 | * against ourselves, we can't chance getting interrupted and modifying | |
1336 | * our processor wide preemption count after we'sve loaded it up. So, | |
1337 | * we need to disable all 'rupts. Actually, we could use a compare | |
1338 | * and swap to do this, but, since there are no MP considerations | |
1339 | * (we are dealing with a CPU local field) it is much, much faster | |
1340 | * to disable. | |
1341 | * | |
1342 | * Note that if we are not genned MP, the calls here will be no-opped via | |
1343 | * a #define and since the _mp forms are the same, likewise a #define | |
1344 | * will be used to route to the other forms | |
1345 | */ | |
1346 | ||
1347 | /* This version does not check if we get preempted or not */ | |
1348 | ||
1349 | ||
1350 | .align 4 | |
1351 | .globl EXT(_enable_preemption_no_check) | |
1352 | ||
1353 | LEXT(_enable_preemption_no_check) | |
1354 | cmplw cr1,r1,r1 /* Force zero cr so we know not to check if preempted */ | |
1355 | b epCommn /* Join up with the other enable code... */ | |
1356 | ||
1357 | ||
1358 | /* This version checks if we get preempted or not */ | |
1359 | ||
1360 | .align 5 | |
1361 | .globl EXT(_enable_preemption) | |
1362 | ||
1363 | LEXT(_enable_preemption) | |
1364 | ||
1365 | epStart: cmplwi cr1,r1,0 /* Force non-zero cr so we know to check if preempted */ | |
1366 | ||
1367 | /* | |
1368 | * Common enable preemption code | |
1369 | */ | |
1370 | ||
1371 | epCommn: mfmsr r9 /* Save the old MSR */ | |
1372 | rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ | |
1373 | mtmsr r8 /* Interrupts off */ | |
1374 | ||
1375 | mfsprg r3,0 /* Get the per_proc block */ | |
1376 | lwz r6,PP_CPU_DATA(r3) /* Get the pointer to the CPU data from per proc */ | |
1377 | li r8,-1 /* Get a decrimenter */ | |
1378 | lwz r5,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */ | |
1379 | add. r5,r5,r8 /* Bring down the disable count */ | |
1380 | #if 0 | |
1381 | mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early | |
1382 | mr. r4,r4 ; (TEST/DEBUG) | |
1383 | beq- epskptrc0 ; (TEST/DEBUG) | |
1384 | lis r0,hi16(CutTrace) ; (TEST/DEBUG) | |
1385 | lis r4,0xBBBB ; (TEST/DEBUG) | |
1386 | oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG) | |
1387 | sc ; (TEST/DEBUG) | |
1388 | epskptrc0: mr. r5,r5 ; (TEST/DEBUG) | |
1389 | #endif | |
1390 | #if MACH_LDEBUG | |
1391 | blt- epTooFar /* Yeah, we did... */ | |
1392 | #endif /* MACH_LDEBUG */ | |
1393 | stw r5,CPU_PREEMPTION_LEVEL(r6) /* Save it back */ | |
1394 | ||
1395 | beq+ epCheckPreempt /* Go check if we need to be preempted... */ | |
1396 | ||
1397 | epNoCheck: mtmsr r9 /* Restore the interrupt level */ | |
1398 | blr /* Leave... */ | |
1399 | ||
1400 | #if MACH_LDEBUG | |
1401 | epTooFar: | |
1402 | lis r6,HIGH_ADDR(EXT(panic)) /* First half of panic call */ | |
1403 | lis r3,HIGH_ADDR(epTooFarStr) /* First half of panic string */ | |
1404 | ori r6,r6,LOW_ADDR(EXT(panic)) /* Second half of panic call */ | |
1405 | ori r3,r3,LOW_ADDR(epTooFarStr) /* Second half of panic string */ | |
1406 | mtlr r6 /* Get the address of the panic routine */ | |
1407 | mtmsr r9 /* Restore interruptions */ | |
1408 | blrl /* Panic... */ | |
1409 | ||
1410 | .data | |
1411 | epTooFarStr: | |
1412 | STRINGD "_enable_preemption: preemption_level <= 0!\000" | |
1413 | .text | |
1414 | #endif /* MACH_LDEBUG */ | |
1415 | ||
1416 | .align 5 | |
1417 | ||
1418 | epCheckPreempt: | |
1419 | lwz r7,PP_NEED_AST(r3) /* Get the AST request address */ | |
1420 | li r5,AST_URGENT /* Get the requests we do honor */ | |
1421 | lwz r7,0(r7) /* Get the actual, real live, extra special AST word */ | |
1422 | lis r0,HIGH_ADDR(DoPreemptCall) /* Just in case, get the top of firmware call */ | |
1423 | and. r7,r7,r5 ; Should we preempt? | |
1424 | ori r0,r0,LOW_ADDR(DoPreemptCall) /* Merge in bottom part */ | |
1425 | beq+ epCPno ; No preemption here... | |
1426 | ||
1427 | andi. r3,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off | |
1428 | ||
1429 | epCPno: mtmsr r9 /* Allow interrupts if we can */ | |
1430 | beqlr+ ; We probably will not preempt... | |
1431 | sc /* Do the preemption */ | |
1432 | blr /* Now, go away now... */ | |
1433 | ||
1434 | /* | |
1435 | * Here is where we disable preemption. Since preemption is on a | |
1436 | * per processor basis (a thread runs on one CPU at a time) we don't | |
1437 | * need any cross-processor synchronization. We do, however, need to | |
1438 | * be interrupt safe, so we don't preempt while in the process of | |
1439 | * disabling it. We could use SPLs, but since we always want complete | |
1440 | * disablement, and this is platform specific code, we'll just kick the | |
1441 | * MSR. We'll save a couple of orders of magnitude over using SPLs. | |
1442 | */ | |
1443 | ||
1444 | .align 5 | |
1445 | ||
1446 | nop ; Use these 5 nops to force daPreComm | |
1447 | nop ; to a line boundary. | |
1448 | nop | |
1449 | nop | |
1450 | nop | |
1451 | ||
1452 | .globl EXT(_disable_preemption) | |
1453 | ||
1454 | LEXT(_disable_preemption) | |
1455 | ||
1456 | daPreAll: mfmsr r9 /* Save the old MSR */ | |
1457 | rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ | |
1458 | mtmsr r8 /* Interrupts off */ | |
1459 | ||
1460 | daPreComm: mfsprg r6,0 /* Get the per_proc block */ | |
1461 | lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */ | |
1462 | lwz r5,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */ | |
1463 | addi r5,r5,1 /* Bring up the disable count */ | |
1464 | stw r5,CPU_PREEMPTION_LEVEL(r6) /* Save it back */ | |
1465 | #if 0 | |
1466 | mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early | |
1467 | mr. r4,r4 ; (TEST/DEBUG) | |
1468 | beq- epskptrc1 ; (TEST/DEBUG) | |
1469 | lis r0,hi16(CutTrace) ; (TEST/DEBUG) | |
1470 | lis r4,0xAAAA ; (TEST/DEBUG) | |
1471 | oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG) | |
1472 | sc ; (TEST/DEBUG) | |
1473 | epskptrc1: ; (TEST/DEBUG) | |
1474 | #endif | |
1475 | ||
1476 | ; | |
1477 | ; Set PREEMPTSTACK above to enable a preemption traceback stack. | |
1478 | ; | |
1479 | ; NOTE: make sure that PREEMPTSTACK in aligned_data is | |
1480 | ; set the same as it is here. This is the number of | |
1481 | ; traceback entries we can handle per processor | |
1482 | ; | |
1483 | ; A value of 0 disables the stack. | |
1484 | ; | |
1485 | #if PREEMPTSTACK | |
1486 | cmplwi r5,PREEMPTSTACK ; Maximum depth | |
1487 | lwz r6,CPU_ACTIVE_THREAD(r6) ; Get the pointer to the currently active thread | |
1488 | bgt- nopredeb ; Too many to stack... | |
1489 | mr. r6,r6 ; During boot? | |
1490 | beq- nopredeb ; Yes, do not do backtrace... | |
1491 | lwz r6,THREAD_TOP_ACT(r6) ; Point to the active activation | |
1492 | lwz r6,ACT_MACT_PCB(r6) ; Get the last savearea used | |
1493 | mr. r0,r6 ; Any saved context? | |
1494 | beq- nosaveds ; No... | |
1495 | lwz r0,saver1(r6) ; Get end of savearea chain | |
1496 | ||
1497 | nosaveds: li r11,0 ; Clear callers callers callers return | |
1498 | li r10,0 ; Clear callers callers callers callers return | |
1499 | li r8,0 ; Clear callers callers callers callers callers return | |
1500 | lwz r2,0(r1) ; Get callers callers stack frame | |
1501 | lwz r12,8(r2) ; Get our callers return | |
1502 | lwz r4,0(r2) ; Back chain | |
1503 | ||
1504 | xor r2,r4,r2 ; Form difference | |
1505 | cmplwi r2,8192 ; Within a couple of pages? | |
1506 | mr r2,r4 ; Move register | |
1507 | bge- nosaveher2 ; No, no back chain then... | |
1508 | lwz r11,8(r2) ; Get our callers return | |
1509 | lwz r4,0(r2) ; Back chain | |
1510 | ||
1511 | xor r2,r4,r2 ; Form difference | |
1512 | cmplwi r2,8192 ; Within a couple of pages? | |
1513 | mr r2,r4 ; Move register | |
1514 | bge- nosaveher2 ; No, no back chain then... | |
1515 | lwz r10,8(r2) ; Get our callers return | |
1516 | lwz r4,0(r2) ; Back chain | |
1517 | ||
1518 | xor r2,r4,r2 ; Form difference | |
1519 | cmplwi r2,8192 ; Within a couple of pages? | |
1520 | mr r2,r4 ; Move register | |
1521 | bge- nosaveher2 ; No, no back chain then... | |
1522 | lwz r8,8(r2) ; Get our callers return | |
1523 | ||
1524 | nosaveher2: | |
1525 | addi r5,r5,-1 ; Get index to slot | |
1526 | mfspr r6,pir ; Get our processor | |
1527 | mflr r4 ; Get our return | |
1528 | rlwinm r6,r6,8,0,23 ; Index to processor slot | |
1529 | lis r2,hi16(EXT(DBGpreempt)) ; Stack high order | |
1530 | rlwinm r5,r5,4,0,27 ; Index to stack slot | |
1531 | ori r2,r2,lo16(EXT(DBGpreempt)) ; Stack low order | |
1532 | add r2,r2,r5 ; Point to slot | |
1533 | add r2,r2,r6 ; Move to processor | |
1534 | stw r4,0(r2) ; Save our return | |
1535 | stw r11,4(r2) ; Save callers caller | |
1536 | stw r10,8(r2) ; Save callers callers caller | |
1537 | stw r8,12(r2) ; Save callers callers callers caller | |
1538 | nopredeb: | |
1539 | #endif | |
1540 | mtmsr r9 /* Allow interruptions now */ | |
1541 | ||
1542 | blr /* Return... */ | |
1543 | ||
1544 | /* | |
1545 | * Return the active thread for both inside and outside osfmk consumption | |
1546 | */ | |
1547 | ||
1548 | .align 5 | |
1549 | .globl EXT(current_thread) | |
1550 | ||
1551 | LEXT(current_thread) | |
1552 | ||
1553 | mfmsr r9 /* Save the old MSR */ | |
1554 | rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ | |
1555 | mtmsr r8 /* Interrupts off */ | |
1556 | mfsprg r6,0 /* Get the per_proc */ | |
1557 | lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */ | |
1558 | lwz r3,CPU_ACTIVE_THREAD(r6) /* Get the active thread */ | |
1559 | mtmsr r9 /* Restore interruptions to entry */ | |
1560 | blr /* Return... */ | |
1561 | ||
1562 | ||
1563 | /* | |
1564 | * Return the current preemption level | |
1565 | */ | |
1566 | ||
1567 | .align 5 | |
1568 | .globl EXT(get_preemption_level) | |
1569 | ||
1570 | LEXT(get_preemption_level) | |
1571 | ||
1572 | mfmsr r9 /* Save the old MSR */ | |
1573 | rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ | |
1574 | mtmsr r8 /* Interrupts off */ | |
1575 | mfsprg r6,0 /* Get the per_proc */ | |
1576 | lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */ | |
1577 | lwz r3,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */ | |
1578 | mtmsr r9 /* Restore interruptions to entry */ | |
1579 | blr /* Return... */ | |
1580 | ||
1581 | ||
1582 | /* | |
1583 | * Return the simple lock count | |
1584 | */ | |
1585 | ||
1586 | .align 5 | |
1587 | .globl EXT(get_simple_lock_count) | |
1588 | ||
1589 | LEXT(get_simple_lock_count) | |
1590 | ||
1591 | mfmsr r9 /* Save the old MSR */ | |
1592 | rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ | |
1593 | mtmsr r8 /* Interrupts off */ | |
1594 | mfsprg r6,0 /* Get the per_proc */ | |
1595 | lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */ | |
1596 | lwz r3,CPU_SIMPLE_LOCK_COUNT(r6) /* Get the simple lock count */ | |
1597 | mtmsr r9 /* Restore interruptions to entry */ | |
1598 | blr /* Return... */ | |
1599 | ||
0b4e3aa0 A |
1600 | /* |
1601 | * fast_usimple_lock(): | |
1602 | * | |
1603 | * If EE is off, get the simple lock without incrementing the preemption count and | |
1604 | * mark The simple lock with SLOCK_FAST. | |
1605 | * If EE is on, call usimple_lock(). | |
1606 | */ | |
1607 | .align 5 | |
1608 | .globl EXT(fast_usimple_lock) | |
1609 | ||
1610 | LEXT(fast_usimple_lock) | |
1611 | ||
1612 | mfmsr r9 | |
1613 | andi. r7,r9,lo16(MASK(MSR_EE)) | |
1614 | bne- L_usimple_lock_c | |
1615 | L_usimple_lock_loop: | |
1616 | lwarx r4,0,r3 | |
1617 | li r5,ILK_LOCKED|SLOCK_FAST | |
1618 | mr. r4,r4 | |
1619 | bne- L_usimple_lock_c | |
1620 | stwcx. r5,0,r3 | |
1621 | bne- L_usimple_lock_loop | |
1622 | isync | |
1623 | blr | |
1624 | L_usimple_lock_c: | |
1625 | b EXT(usimple_lock) | |
1626 | ||
1627 | /* | |
1628 | * fast_usimple_lock_try(): | |
1629 | * | |
1630 | * If EE is off, try to get the simple lock. The preemption count doesn't get incremented and | |
1631 | * if successfully held, the simple lock is marked with SLOCK_FAST. | |
1632 | * If EE is on, call usimple_lock_try() | |
1633 | */ | |
1634 | .align 5 | |
1635 | .globl EXT(fast_usimple_lock_try) | |
1636 | ||
1637 | LEXT(fast_usimple_lock_try) | |
1638 | ||
1639 | mfmsr r9 | |
1640 | andi. r7,r9,lo16(MASK(MSR_EE)) | |
1641 | bne- L_usimple_lock_try_c | |
1642 | L_usimple_lock_try_loop: | |
1643 | lwarx r4,0,r3 | |
1644 | li r5,ILK_LOCKED|SLOCK_FAST | |
1645 | mr. r4,r4 | |
1646 | bne- L_usimple_lock_try_fail | |
1647 | stwcx. r5,0,r3 | |
1648 | bne- L_usimple_lock_try_loop | |
1649 | li r3,1 | |
1650 | isync | |
1651 | blr | |
1652 | L_usimple_lock_try_fail: | |
1653 | li r3,0 | |
1654 | blr | |
1655 | L_usimple_lock_try_c: | |
1656 | b EXT(usimple_lock_try) | |
1657 | ||
1658 | /* | |
1659 | * fast_usimple_unlock(): | |
1660 | * | |
1661 | * If the simple lock is marked SLOCK_FAST, release it without decrementing the preemption count. | |
1662 | * Call usimple_unlock() otherwise. | |
1663 | */ | |
1664 | .align 5 | |
1665 | .globl EXT(fast_usimple_unlock) | |
1666 | ||
1667 | LEXT(fast_usimple_unlock) | |
1668 | ||
1669 | lwz r5,LOCK_DATA(r3) | |
1670 | li r0,0 | |
1671 | cmpi cr0,r5,ILK_LOCKED|SLOCK_FAST | |
1672 | bne- L_usimple_unlock_c | |
1673 | sync | |
1674 | #if 0 | |
1675 | mfmsr r9 | |
1676 | andi. r7,r9,lo16(MASK(MSR_EE)) | |
1677 | beq L_usimple_unlock_cont | |
1678 | lis r3,hi16(L_usimple_unlock_panic) | |
1679 | ori r3,r3,lo16(L_usimple_unlock_panic) | |
1680 | bl EXT(panic) | |
1681 | ||
1682 | .data | |
1683 | L_usimple_unlock_panic: | |
1684 | STRINGD "fast_usimple_unlock: interrupts not disabled\n\000" | |
1685 | .text | |
1686 | L_usimple_unlock_cont: | |
1687 | #endif | |
1688 | stw r0, LOCK_DATA(r3) | |
1689 | blr | |
1690 | L_usimple_unlock_c: | |
1691 | b EXT(usimple_unlock) | |
1692 |