]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/locks.c
064a58252f081420534e3223fc1ed42272955c20
[apple/xnu.git] / osfmk / kern / locks.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 #include <mach_kdb.h>
57 #include <mach_ldebug.h>
58 #include <debug.h>
59
60 #include <mach/kern_return.h>
61 #include <mach/mach_host_server.h>
62 #include <mach_debug/lockgroup_info.h>
63
64 #include <kern/locks.h>
65 #include <kern/misc_protos.h>
66 #include <kern/kalloc.h>
67 #include <kern/thread.h>
68 #include <kern/processor.h>
69 #include <kern/sched_prim.h>
70 #include <kern/debug.h>
71 #include <string.h>
72
73
74 #include <sys/kdebug.h>
75
76 #if CONFIG_DTRACE
77 /*
78 * We need only enough declarations from the BSD-side to be able to
79 * test if our probe is active, and to call __dtrace_probe(). Setting
80 * NEED_DTRACE_DEFS gets a local copy of those definitions pulled in.
81 */
82 #define NEED_DTRACE_DEFS
83 #include <../bsd/sys/lockstat.h>
84 #endif
85
86 #define LCK_MTX_SLEEP_CODE 0
87 #define LCK_MTX_SLEEP_DEADLINE_CODE 1
88 #define LCK_MTX_LCK_WAIT_CODE 2
89 #define LCK_MTX_UNLCK_WAKEUP_CODE 3
90
91
92 static queue_head_t lck_grp_queue;
93 static unsigned int lck_grp_cnt;
94
95 decl_mutex_data(static,lck_grp_lock)
96
97 lck_grp_attr_t LockDefaultGroupAttr;
98 lck_grp_t LockCompatGroup;
99 lck_attr_t LockDefaultLckAttr;
100
101 /*
102 * Routine: lck_mod_init
103 */
104
105 void
106 lck_mod_init(
107 void)
108 {
109 queue_init(&lck_grp_queue);
110 mutex_init(&lck_grp_lock, 0);
111 lck_grp_cnt = 0;
112 lck_grp_attr_setdefault( &LockDefaultGroupAttr);
113 lck_grp_init( &LockCompatGroup, "Compatibility APIs", LCK_GRP_ATTR_NULL);
114 lck_attr_setdefault(&LockDefaultLckAttr);
115 }
116
117 /*
118 * Routine: lck_grp_attr_alloc_init
119 */
120
121 lck_grp_attr_t *
122 lck_grp_attr_alloc_init(
123 void)
124 {
125 lck_grp_attr_t *attr;
126
127 if ((attr = (lck_grp_attr_t *)kalloc(sizeof(lck_grp_attr_t))) != 0)
128 lck_grp_attr_setdefault(attr);
129
130 return(attr);
131 }
132
133
134 /*
135 * Routine: lck_grp_attr_setdefault
136 */
137
138 void
139 lck_grp_attr_setdefault(
140 lck_grp_attr_t *attr)
141 {
142 if (LcksOpts & enaLkStat)
143 attr->grp_attr_val = LCK_GRP_ATTR_STAT;
144 else
145 attr->grp_attr_val = 0;
146 }
147
148
149 /*
150 * Routine: lck_grp_attr_setstat
151 */
152
153 void
154 lck_grp_attr_setstat(
155 lck_grp_attr_t *attr)
156 {
157 (void)hw_atomic_or(&attr->grp_attr_val, LCK_GRP_ATTR_STAT);
158 }
159
160
161 /*
162 * Routine: lck_grp_attr_free
163 */
164
165 void
166 lck_grp_attr_free(
167 lck_grp_attr_t *attr)
168 {
169 kfree(attr, sizeof(lck_grp_attr_t));
170 }
171
172
173 /*
174 * Routine: lck_grp_alloc_init
175 */
176
177 lck_grp_t *
178 lck_grp_alloc_init(
179 const char* grp_name,
180 lck_grp_attr_t *attr)
181 {
182 lck_grp_t *grp;
183
184 if ((grp = (lck_grp_t *)kalloc(sizeof(lck_grp_t))) != 0)
185 lck_grp_init(grp, grp_name, attr);
186
187 return(grp);
188 }
189
190
191 /*
192 * Routine: lck_grp_init
193 */
194
195 void
196 lck_grp_init(
197 lck_grp_t *grp,
198 const char* grp_name,
199 lck_grp_attr_t *attr)
200 {
201 bzero((void *)grp, sizeof(lck_grp_t));
202
203 (void) strncpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);
204
205 if (attr != LCK_GRP_ATTR_NULL)
206 grp->lck_grp_attr = attr->grp_attr_val;
207 else if (LcksOpts & enaLkStat)
208 grp->lck_grp_attr = LCK_GRP_ATTR_STAT;
209 else
210 grp->lck_grp_attr = LCK_ATTR_NONE;
211
212 grp->lck_grp_refcnt = 1;
213
214 mutex_lock(&lck_grp_lock);
215 enqueue_tail(&lck_grp_queue, (queue_entry_t)grp);
216 lck_grp_cnt++;
217 mutex_unlock(&lck_grp_lock);
218
219 }
220
221
222 /*
223 * Routine: lck_grp_free
224 */
225
226 void
227 lck_grp_free(
228 lck_grp_t *grp)
229 {
230 mutex_lock(&lck_grp_lock);
231 lck_grp_cnt--;
232 (void)remque((queue_entry_t)grp);
233 mutex_unlock(&lck_grp_lock);
234 lck_grp_deallocate(grp);
235 }
236
237
238 /*
239 * Routine: lck_grp_reference
240 */
241
242 void
243 lck_grp_reference(
244 lck_grp_t *grp)
245 {
246 (void)hw_atomic_add(&grp->lck_grp_refcnt, 1);
247 }
248
249
250 /*
251 * Routine: lck_grp_deallocate
252 */
253
254 void
255 lck_grp_deallocate(
256 lck_grp_t *grp)
257 {
258 if (hw_atomic_sub(&grp->lck_grp_refcnt, 1) == 0)
259 kfree(grp, sizeof(lck_grp_t));
260 }
261
262 /*
263 * Routine: lck_grp_lckcnt_incr
264 */
265
266 void
267 lck_grp_lckcnt_incr(
268 lck_grp_t *grp,
269 lck_type_t lck_type)
270 {
271 unsigned int *lckcnt;
272
273 switch (lck_type) {
274 case LCK_TYPE_SPIN:
275 lckcnt = &grp->lck_grp_spincnt;
276 break;
277 case LCK_TYPE_MTX:
278 lckcnt = &grp->lck_grp_mtxcnt;
279 break;
280 case LCK_TYPE_RW:
281 lckcnt = &grp->lck_grp_rwcnt;
282 break;
283 default:
284 return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type);
285 }
286
287 (void)hw_atomic_add(lckcnt, 1);
288 }
289
290 /*
291 * Routine: lck_grp_lckcnt_decr
292 */
293
294 void
295 lck_grp_lckcnt_decr(
296 lck_grp_t *grp,
297 lck_type_t lck_type)
298 {
299 unsigned int *lckcnt;
300
301 switch (lck_type) {
302 case LCK_TYPE_SPIN:
303 lckcnt = &grp->lck_grp_spincnt;
304 break;
305 case LCK_TYPE_MTX:
306 lckcnt = &grp->lck_grp_mtxcnt;
307 break;
308 case LCK_TYPE_RW:
309 lckcnt = &grp->lck_grp_rwcnt;
310 break;
311 default:
312 return panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type);
313 }
314
315 (void)hw_atomic_sub(lckcnt, 1);
316 }
317
318 /*
319 * Routine: lck_attr_alloc_init
320 */
321
322 lck_attr_t *
323 lck_attr_alloc_init(
324 void)
325 {
326 lck_attr_t *attr;
327
328 if ((attr = (lck_attr_t *)kalloc(sizeof(lck_attr_t))) != 0)
329 lck_attr_setdefault(attr);
330
331 return(attr);
332 }
333
334
335 /*
336 * Routine: lck_attr_setdefault
337 */
338
339 void
340 lck_attr_setdefault(
341 lck_attr_t *attr)
342 {
343 #if !DEBUG
344 if (LcksOpts & enaLkDeb)
345 attr->lck_attr_val = LCK_ATTR_DEBUG;
346 else
347 attr->lck_attr_val = LCK_ATTR_NONE;
348 #else
349 attr->lck_attr_val = LCK_ATTR_DEBUG;
350 #endif
351
352 }
353
354
355 /*
356 * Routine: lck_attr_setdebug
357 */
358 void
359 lck_attr_setdebug(
360 lck_attr_t *attr)
361 {
362 (void)hw_atomic_or(&attr->lck_attr_val, LCK_ATTR_DEBUG);
363 }
364
365 /*
366 * Routine: lck_attr_setdebug
367 */
368 void
369 lck_attr_cleardebug(
370 lck_attr_t *attr)
371 {
372 (void)hw_atomic_and(&attr->lck_attr_val, ~LCK_ATTR_DEBUG);
373 }
374
375
376 /*
377 * Routine: lck_attr_rw_shared_priority
378 */
379 void
380 lck_attr_rw_shared_priority(
381 lck_attr_t *attr)
382 {
383 (void)hw_atomic_or(&attr->lck_attr_val, LCK_ATTR_RW_SHARED_PRIORITY);
384 }
385
386
387 /*
388 * Routine: lck_attr_free
389 */
390 void
391 lck_attr_free(
392 lck_attr_t *attr)
393 {
394 kfree(attr, sizeof(lck_attr_t));
395 }
396
397
398 /*
399 * Routine: lck_spin_sleep
400 */
401 wait_result_t
402 lck_spin_sleep(
403 lck_spin_t *lck,
404 lck_sleep_action_t lck_sleep_action,
405 event_t event,
406 wait_interrupt_t interruptible)
407 {
408 wait_result_t res;
409
410 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
411 panic("Invalid lock sleep action %x\n", lck_sleep_action);
412
413 res = assert_wait(event, interruptible);
414 if (res == THREAD_WAITING) {
415 lck_spin_unlock(lck);
416 res = thread_block(THREAD_CONTINUE_NULL);
417 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
418 lck_spin_lock(lck);
419 }
420 else
421 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
422 lck_spin_unlock(lck);
423
424 return res;
425 }
426
427
428 /*
429 * Routine: lck_spin_sleep_deadline
430 */
431 wait_result_t
432 lck_spin_sleep_deadline(
433 lck_spin_t *lck,
434 lck_sleep_action_t lck_sleep_action,
435 event_t event,
436 wait_interrupt_t interruptible,
437 uint64_t deadline)
438 {
439 wait_result_t res;
440
441 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
442 panic("Invalid lock sleep action %x\n", lck_sleep_action);
443
444 res = assert_wait_deadline(event, interruptible, deadline);
445 if (res == THREAD_WAITING) {
446 lck_spin_unlock(lck);
447 res = thread_block(THREAD_CONTINUE_NULL);
448 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
449 lck_spin_lock(lck);
450 }
451 else
452 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
453 lck_spin_unlock(lck);
454
455 return res;
456 }
457
458
459 /*
460 * Routine: lck_mtx_sleep
461 */
462 wait_result_t
463 lck_mtx_sleep(
464 lck_mtx_t *lck,
465 lck_sleep_action_t lck_sleep_action,
466 event_t event,
467 wait_interrupt_t interruptible)
468 {
469 wait_result_t res;
470
471 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_START,
472 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
473
474 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
475 panic("Invalid lock sleep action %x\n", lck_sleep_action);
476
477 res = assert_wait(event, interruptible);
478 if (res == THREAD_WAITING) {
479 lck_mtx_unlock(lck);
480 res = thread_block(THREAD_CONTINUE_NULL);
481 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
482 lck_mtx_lock(lck);
483 }
484 else
485 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
486 lck_mtx_unlock(lck);
487
488 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
489
490 return res;
491 }
492
493
494 /*
495 * Routine: lck_mtx_sleep_deadline
496 */
497 wait_result_t
498 lck_mtx_sleep_deadline(
499 lck_mtx_t *lck,
500 lck_sleep_action_t lck_sleep_action,
501 event_t event,
502 wait_interrupt_t interruptible,
503 uint64_t deadline)
504 {
505 wait_result_t res;
506
507 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_START,
508 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
509
510 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
511 panic("Invalid lock sleep action %x\n", lck_sleep_action);
512
513 res = assert_wait_deadline(event, interruptible, deadline);
514 if (res == THREAD_WAITING) {
515 lck_mtx_unlock(lck);
516 res = thread_block(THREAD_CONTINUE_NULL);
517 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
518 lck_mtx_lock(lck);
519 }
520 else
521 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
522 lck_mtx_unlock(lck);
523
524 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
525
526 return res;
527 }
528
529 /*
530 * Routine: lck_mtx_lock_wait
531 *
532 * Invoked in order to wait on contention.
533 *
534 * Called with the interlock locked and
535 * returns it unlocked.
536 */
537 void
538 lck_mtx_lock_wait (
539 lck_mtx_t *lck,
540 thread_t holder)
541 {
542 thread_t self = current_thread();
543 lck_mtx_t *mutex;
544 integer_t priority;
545 spl_t s = splsched();
546 #if CONFIG_DTRACE
547 uint64_t sleep_start = 0;
548
549 if (lockstat_probemap[LS_LCK_MTX_LOCK_BLOCK] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_BLOCK]) {
550 sleep_start = mach_absolute_time();
551 }
552 #endif
553
554 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
555 mutex = lck;
556 else
557 mutex = &lck->lck_mtx_ptr->lck_mtx;
558
559 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
560
561 priority = self->sched_pri;
562 if (priority < self->priority)
563 priority = self->priority;
564 if (priority > MINPRI_KERNEL)
565 priority = MINPRI_KERNEL;
566 else
567 if (priority < BASEPRI_DEFAULT)
568 priority = BASEPRI_DEFAULT;
569
570 thread_lock(holder);
571 if (mutex->lck_mtx_pri == 0)
572 holder->promotions++;
573 if (holder->priority < MINPRI_KERNEL) {
574 holder->sched_mode |= TH_MODE_PROMOTED;
575 if ( mutex->lck_mtx_pri < priority &&
576 holder->sched_pri < priority ) {
577 KERNEL_DEBUG_CONSTANT(
578 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
579 holder->sched_pri, priority, (int)holder, (int)lck, 0);
580
581 set_sched_pri(holder, priority);
582 }
583 }
584 thread_unlock(holder);
585 splx(s);
586
587 if (mutex->lck_mtx_pri < priority)
588 mutex->lck_mtx_pri = priority;
589 if (self->pending_promoter[self->pending_promoter_index] == NULL) {
590 self->pending_promoter[self->pending_promoter_index] = mutex;
591 mutex->lck_mtx_waiters++;
592 }
593 else
594 if (self->pending_promoter[self->pending_promoter_index] != mutex) {
595 self->pending_promoter[++self->pending_promoter_index] = mutex;
596 mutex->lck_mtx_waiters++;
597 }
598
599 assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
600 lck_mtx_ilk_unlock(mutex);
601
602 thread_block(THREAD_CONTINUE_NULL);
603
604 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
605 #if CONFIG_DTRACE
606 /*
607 * Record the Dtrace lockstat probe for blocking, block time
608 * measured from when we were entered.
609 */
610 if (sleep_start) {
611 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
612 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_BLOCK, lck,
613 mach_absolute_time() - sleep_start);
614 } else {
615 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_BLOCK, lck,
616 mach_absolute_time() - sleep_start);
617 }
618 }
619 #endif
620 }
621
622 /*
623 * Routine: lck_mtx_lock_acquire
624 *
625 * Invoked on acquiring the mutex when there is
626 * contention.
627 *
628 * Returns the current number of waiters.
629 *
630 * Called with the interlock locked.
631 */
632 int
633 lck_mtx_lock_acquire(
634 lck_mtx_t *lck)
635 {
636 thread_t thread = current_thread();
637 lck_mtx_t *mutex;
638
639 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
640 mutex = lck;
641 else
642 mutex = &lck->lck_mtx_ptr->lck_mtx;
643
644 if (thread->pending_promoter[thread->pending_promoter_index] == mutex) {
645 thread->pending_promoter[thread->pending_promoter_index] = NULL;
646 if (thread->pending_promoter_index > 0)
647 thread->pending_promoter_index--;
648 mutex->lck_mtx_waiters--;
649 }
650
651 if (mutex->lck_mtx_waiters > 0) {
652 integer_t priority = mutex->lck_mtx_pri;
653 spl_t s = splsched();
654
655 thread_lock(thread);
656 thread->promotions++;
657 if (thread->priority < MINPRI_KERNEL) {
658 thread->sched_mode |= TH_MODE_PROMOTED;
659 if (thread->sched_pri < priority) {
660 KERNEL_DEBUG_CONSTANT(
661 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
662 thread->sched_pri, priority, 0, (int)lck, 0);
663
664 set_sched_pri(thread, priority);
665 }
666 }
667 thread_unlock(thread);
668 splx(s);
669 }
670 else
671 mutex->lck_mtx_pri = 0;
672
673 return (mutex->lck_mtx_waiters);
674 }
675
676 /*
677 * Routine: lck_mtx_unlock_wakeup
678 *
679 * Invoked on unlock when there is contention.
680 *
681 * Called with the interlock locked.
682 */
683 void
684 lck_mtx_unlock_wakeup (
685 lck_mtx_t *lck,
686 thread_t holder)
687 {
688 thread_t thread = current_thread();
689 lck_mtx_t *mutex;
690
691 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
692 mutex = lck;
693 else
694 mutex = &lck->lck_mtx_ptr->lck_mtx;
695
696
697 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
698
699 if (thread != holder)
700 panic("lck_mtx_unlock_wakeup: mutex %p holder %p\n", mutex, holder);
701
702 if (thread->promotions > 0) {
703 spl_t s = splsched();
704
705 thread_lock(thread);
706 if ( --thread->promotions == 0 &&
707 (thread->sched_mode & TH_MODE_PROMOTED) ) {
708 thread->sched_mode &= ~TH_MODE_PROMOTED;
709 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
710 KERNEL_DEBUG_CONSTANT(
711 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
712 thread->sched_pri, DEPRESSPRI, 0, (int)lck, 0);
713
714 set_sched_pri(thread, DEPRESSPRI);
715 }
716 else {
717 if (thread->priority < thread->sched_pri) {
718 KERNEL_DEBUG_CONSTANT(
719 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
720 DBG_FUNC_NONE,
721 thread->sched_pri, thread->priority,
722 0, (int)lck, 0);
723 }
724
725 compute_priority(thread, FALSE);
726 }
727 }
728 thread_unlock(thread);
729 splx(s);
730 }
731 assert(mutex->lck_mtx_waiters > 0);
732 thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
733
734 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
735 }
736
737 void
738 lck_mtx_unlockspin_wakeup (
739 lck_mtx_t *lck)
740 {
741 assert(lck->lck_mtx_waiters > 0);
742 thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
743
744 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_NONE, (int)lck, 0, 0, 1, 0);
745 #if CONFIG_DTRACE
746 /*
747 * When there are waiters, we skip the hot-patch spot in the
748 * fastpath, so we record it here.
749 */
750 LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, lck, 0);
751 #endif
752 }
753
754
755 /*
756 * Routine: mutex_pause
757 *
758 * Called by former callers of simple_lock_pause().
759 */
760 #define MAX_COLLISION_COUNTS 32
761 #define MAX_COLLISION 8
762
763 unsigned int max_collision_count[MAX_COLLISION_COUNTS];
764
765 uint32_t collision_backoffs[MAX_COLLISION] = {
766 10, 50, 100, 200, 400, 600, 800, 1000
767 };
768
769
770 void
771 mutex_pause(uint32_t collisions)
772 {
773 wait_result_t wait_result;
774 uint32_t back_off;
775
776 if (collisions >= MAX_COLLISION_COUNTS)
777 collisions = MAX_COLLISION_COUNTS - 1;
778 max_collision_count[collisions]++;
779
780 if (collisions >= MAX_COLLISION)
781 collisions = MAX_COLLISION - 1;
782 back_off = collision_backoffs[collisions];
783
784 wait_result = assert_wait_timeout((event_t)mutex_pause, THREAD_UNINT, back_off, NSEC_PER_USEC);
785 assert(wait_result == THREAD_WAITING);
786
787 wait_result = thread_block(THREAD_CONTINUE_NULL);
788 assert(wait_result == THREAD_TIMED_OUT);
789 }
790
791
792 unsigned int mutex_yield_wait = 0;
793 unsigned int mutex_yield_no_wait = 0;
794
795 void
796 mutex_yield(
797 mutex_t *mutex)
798 {
799 lck_mtx_t *lck;
800
801 #if DEBUG
802 _mutex_assert(mutex, MA_OWNED);
803 #endif /* DEBUG */
804
805 lck = (lck_mtx_t *) mutex;
806 if (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT)
807 lck = &lck->lck_mtx_ptr->lck_mtx;
808
809 if (! lck->lck_mtx_waiters) {
810 mutex_yield_no_wait++;
811 } else {
812 mutex_yield_wait++;
813 mutex_unlock(mutex);
814 mutex_pause(0);
815 mutex_lock(mutex);
816 }
817 }
818
819
820 /*
821 * Routine: lck_rw_sleep
822 */
823 wait_result_t
824 lck_rw_sleep(
825 lck_rw_t *lck,
826 lck_sleep_action_t lck_sleep_action,
827 event_t event,
828 wait_interrupt_t interruptible)
829 {
830 wait_result_t res;
831 lck_rw_type_t lck_rw_type;
832
833 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
834 panic("Invalid lock sleep action %x\n", lck_sleep_action);
835
836 res = assert_wait(event, interruptible);
837 if (res == THREAD_WAITING) {
838 lck_rw_type = lck_rw_done(lck);
839 res = thread_block(THREAD_CONTINUE_NULL);
840 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
841 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
842 lck_rw_lock(lck, lck_rw_type);
843 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
844 lck_rw_lock_exclusive(lck);
845 else
846 lck_rw_lock_shared(lck);
847 }
848 }
849 else
850 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
851 (void)lck_rw_done(lck);
852
853 return res;
854 }
855
856
857 /*
858 * Routine: lck_rw_sleep_deadline
859 */
860 wait_result_t
861 lck_rw_sleep_deadline(
862 lck_rw_t *lck,
863 lck_sleep_action_t lck_sleep_action,
864 event_t event,
865 wait_interrupt_t interruptible,
866 uint64_t deadline)
867 {
868 wait_result_t res;
869 lck_rw_type_t lck_rw_type;
870
871 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
872 panic("Invalid lock sleep action %x\n", lck_sleep_action);
873
874 res = assert_wait_deadline(event, interruptible, deadline);
875 if (res == THREAD_WAITING) {
876 lck_rw_type = lck_rw_done(lck);
877 res = thread_block(THREAD_CONTINUE_NULL);
878 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
879 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
880 lck_rw_lock(lck, lck_rw_type);
881 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
882 lck_rw_lock_exclusive(lck);
883 else
884 lck_rw_lock_shared(lck);
885 }
886 }
887 else
888 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
889 (void)lck_rw_done(lck);
890
891 return res;
892 }
893
894 kern_return_t
895 host_lockgroup_info(
896 host_t host,
897 lockgroup_info_array_t *lockgroup_infop,
898 mach_msg_type_number_t *lockgroup_infoCntp)
899 {
900 lockgroup_info_t *lockgroup_info_base;
901 lockgroup_info_t *lockgroup_info;
902 vm_offset_t lockgroup_info_addr;
903 vm_size_t lockgroup_info_size;
904 lck_grp_t *lck_grp;
905 unsigned int i;
906 vm_size_t used;
907 vm_map_copy_t copy;
908 kern_return_t kr;
909
910 if (host == HOST_NULL)
911 return KERN_INVALID_HOST;
912
913 mutex_lock(&lck_grp_lock);
914
915 lockgroup_info_size = round_page(lck_grp_cnt * sizeof *lockgroup_info);
916 kr = kmem_alloc_pageable(ipc_kernel_map,
917 &lockgroup_info_addr, lockgroup_info_size);
918 if (kr != KERN_SUCCESS) {
919 mutex_unlock(&lck_grp_lock);
920 return(kr);
921 }
922
923 lockgroup_info_base = (lockgroup_info_t *) lockgroup_info_addr;
924 lck_grp = (lck_grp_t *)queue_first(&lck_grp_queue);
925 lockgroup_info = lockgroup_info_base;
926
927 for (i = 0; i < lck_grp_cnt; i++) {
928
929 lockgroup_info->lock_spin_cnt = lck_grp->lck_grp_spincnt;
930 lockgroup_info->lock_spin_util_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_util_cnt;
931 lockgroup_info->lock_spin_held_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cnt;
932 lockgroup_info->lock_spin_miss_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_miss_cnt;
933 lockgroup_info->lock_spin_held_max = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_max;
934 lockgroup_info->lock_spin_held_cum = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cum;
935
936 lockgroup_info->lock_mtx_cnt = lck_grp->lck_grp_mtxcnt;
937 lockgroup_info->lock_mtx_util_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt;
938 lockgroup_info->lock_mtx_held_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt;
939 lockgroup_info->lock_mtx_miss_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt;
940 lockgroup_info->lock_mtx_wait_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt;
941 lockgroup_info->lock_mtx_held_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max;
942 lockgroup_info->lock_mtx_held_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cum;
943 lockgroup_info->lock_mtx_wait_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_max;
944 lockgroup_info->lock_mtx_wait_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cum;
945
946 lockgroup_info->lock_rw_cnt = lck_grp->lck_grp_rwcnt;
947 lockgroup_info->lock_rw_util_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt;
948 lockgroup_info->lock_rw_held_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cnt;
949 lockgroup_info->lock_rw_miss_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt;
950 lockgroup_info->lock_rw_wait_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt;
951 lockgroup_info->lock_rw_held_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_max;
952 lockgroup_info->lock_rw_held_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cum;
953 lockgroup_info->lock_rw_wait_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_max;
954 lockgroup_info->lock_rw_wait_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cum;
955
956 (void) strncpy(lockgroup_info->lockgroup_name,lck_grp->lck_grp_name, LOCKGROUP_MAX_NAME);
957
958 lck_grp = (lck_grp_t *)(queue_next((queue_entry_t)(lck_grp)));
959 lockgroup_info++;
960 }
961
962 *lockgroup_infoCntp = lck_grp_cnt;
963 mutex_unlock(&lck_grp_lock);
964
965 used = (*lockgroup_infoCntp) * sizeof *lockgroup_info;
966
967 if (used != lockgroup_info_size)
968 bzero((char *) lockgroup_info, lockgroup_info_size - used);
969
970 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
971 (vm_map_size_t)lockgroup_info_size, TRUE, &copy);
972 assert(kr == KERN_SUCCESS);
973
974 *lockgroup_infop = (lockgroup_info_t *) copy;
975
976 return(KERN_SUCCESS);
977 }
978
979 /*
980 * Compatibility module
981 */
982
983 extern lck_rw_t *lock_alloc_EXT( boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
984 extern void lock_done_EXT(lck_rw_t *lock);
985 extern void lock_free_EXT(lck_rw_t *lock);
986 extern void lock_init_EXT(lck_rw_t *lock, boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
987 extern void lock_read_EXT(lck_rw_t *lock);
988 extern boolean_t lock_read_to_write_EXT(lck_rw_t *lock);
989 extern void lock_write_EXT(lck_rw_t *lock);
990 extern void lock_write_to_read_EXT(lck_rw_t *lock);
991 extern wait_result_t thread_sleep_lock_write_EXT(
992 event_t event, lck_rw_t *lock, wait_interrupt_t interruptible);
993
994 extern lck_mtx_t *mutex_alloc_EXT(unsigned short tag);
995 extern void mutex_free_EXT(lck_mtx_t *mutex);
996 extern void mutex_init_EXT(lck_mtx_t *mutex, unsigned short tag);
997 extern void mutex_lock_EXT(lck_mtx_t *mutex);
998 extern boolean_t mutex_try_EXT(lck_mtx_t *mutex);
999 extern void mutex_unlock_EXT(lck_mtx_t *mutex);
1000 extern wait_result_t thread_sleep_mutex_EXT(
1001 event_t event, lck_mtx_t *mutex, wait_interrupt_t interruptible);
1002 extern wait_result_t thread_sleep_mutex_deadline_EXT(
1003 event_t event, lck_mtx_t *mutex, uint64_t deadline, wait_interrupt_t interruptible);
1004
1005 extern void usimple_lock_EXT(lck_spin_t *lock);
1006 extern void usimple_lock_init_EXT(lck_spin_t *lock, unsigned short tag);
1007 extern unsigned int usimple_lock_try_EXT(lck_spin_t *lock);
1008 extern void usimple_unlock_EXT(lck_spin_t *lock);
1009 extern wait_result_t thread_sleep_usimple_lock_EXT(event_t event, lck_spin_t *lock, wait_interrupt_t interruptible);
1010
1011 lck_rw_t *
1012 lock_alloc_EXT(
1013 __unused boolean_t can_sleep,
1014 __unused unsigned short tag0,
1015 __unused unsigned short tag1)
1016 {
1017 return( lck_rw_alloc_init( &LockCompatGroup, LCK_ATTR_NULL));
1018 }
1019
1020 void
1021 lock_done_EXT(
1022 lck_rw_t *lock)
1023 {
1024 (void) lck_rw_done(lock);
1025 }
1026
1027 void
1028 lock_free_EXT(
1029 lck_rw_t *lock)
1030 {
1031 lck_rw_free(lock, &LockCompatGroup);
1032 }
1033
1034 void
1035 lock_init_EXT(
1036 lck_rw_t *lock,
1037 __unused boolean_t can_sleep,
1038 __unused unsigned short tag0,
1039 __unused unsigned short tag1)
1040 {
1041 lck_rw_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
1042 }
1043
1044 void
1045 lock_read_EXT(
1046 lck_rw_t *lock)
1047 {
1048 lck_rw_lock_shared( lock);
1049 }
1050
1051 boolean_t
1052 lock_read_to_write_EXT(
1053 lck_rw_t *lock)
1054 {
1055 return( lck_rw_lock_shared_to_exclusive(lock));
1056 }
1057
1058 void
1059 lock_write_EXT(
1060 lck_rw_t *lock)
1061 {
1062 lck_rw_lock_exclusive(lock);
1063 }
1064
1065 void
1066 lock_write_to_read_EXT(
1067 lck_rw_t *lock)
1068 {
1069 lck_rw_lock_exclusive_to_shared(lock);
1070 }
1071
1072 wait_result_t
1073 thread_sleep_lock_write_EXT(
1074 event_t event,
1075 lck_rw_t *lock,
1076 wait_interrupt_t interruptible)
1077 {
1078 return( lck_rw_sleep(lock, LCK_SLEEP_EXCLUSIVE, event, interruptible));
1079 }
1080
1081 lck_mtx_t *
1082 mutex_alloc_EXT(
1083 __unused unsigned short tag)
1084 {
1085 return(lck_mtx_alloc_init(&LockCompatGroup, LCK_ATTR_NULL));
1086 }
1087
1088 void
1089 mutex_free_EXT(
1090 lck_mtx_t *mutex)
1091 {
1092 lck_mtx_free(mutex, &LockCompatGroup);
1093 }
1094
1095 void
1096 mutex_init_EXT(
1097 lck_mtx_t *mutex,
1098 __unused unsigned short tag)
1099 {
1100 lck_mtx_init(mutex, &LockCompatGroup, LCK_ATTR_NULL);
1101 }
1102
1103 void
1104 mutex_lock_EXT(
1105 lck_mtx_t *mutex)
1106 {
1107 lck_mtx_lock(mutex);
1108 }
1109
1110 boolean_t
1111 mutex_try_EXT(
1112 lck_mtx_t *mutex)
1113 {
1114 return(lck_mtx_try_lock(mutex));
1115 }
1116
1117 void
1118 mutex_unlock_EXT(
1119 lck_mtx_t *mutex)
1120 {
1121 lck_mtx_unlock(mutex);
1122 }
1123
1124 wait_result_t
1125 thread_sleep_mutex_EXT(
1126 event_t event,
1127 lck_mtx_t *mutex,
1128 wait_interrupt_t interruptible)
1129 {
1130 return( lck_mtx_sleep(mutex, LCK_SLEEP_DEFAULT, event, interruptible));
1131 }
1132
1133 wait_result_t
1134 thread_sleep_mutex_deadline_EXT(
1135 event_t event,
1136 lck_mtx_t *mutex,
1137 uint64_t deadline,
1138 wait_interrupt_t interruptible)
1139 {
1140 return( lck_mtx_sleep_deadline(mutex, LCK_SLEEP_DEFAULT, event, interruptible, deadline));
1141 }
1142
1143 void
1144 usimple_lock_EXT(
1145 lck_spin_t *lock)
1146 {
1147 lck_spin_lock(lock);
1148 }
1149
1150 void
1151 usimple_lock_init_EXT(
1152 lck_spin_t *lock,
1153 __unused unsigned short tag)
1154 {
1155 lck_spin_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
1156 }
1157
1158 unsigned int
1159 usimple_lock_try_EXT(
1160 lck_spin_t *lock)
1161 {
1162 return(lck_spin_try_lock(lock));
1163 }
1164
1165 void
1166 usimple_unlock_EXT(
1167 lck_spin_t *lock)
1168 {
1169 lck_spin_unlock(lock);
1170 }
1171
1172 wait_result_t
1173 thread_sleep_usimple_lock_EXT(
1174 event_t event,
1175 lck_spin_t *lock,
1176 wait_interrupt_t interruptible)
1177 {
1178 return( lck_spin_sleep(lock, LCK_SLEEP_DEFAULT, event, interruptible));
1179 }