]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/locks.c
xnu-2422.1.72.tar.gz
[apple/xnu.git] / osfmk / kern / locks.c
CommitLineData
91447636 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
91447636 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
91447636 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
91447636
A
56#include <mach_ldebug.h>
57#include <debug.h>
58
59#include <mach/kern_return.h>
60#include <mach/mach_host_server.h>
61#include <mach_debug/lockgroup_info.h>
62
63#include <kern/locks.h>
64#include <kern/misc_protos.h>
65#include <kern/kalloc.h>
66#include <kern/thread.h>
67#include <kern/processor.h>
68#include <kern/sched_prim.h>
69#include <kern/debug.h>
70#include <string.h>
71
72
73#include <sys/kdebug.h>
74
2d21ac55
A
75#if CONFIG_DTRACE
76/*
77 * We need only enough declarations from the BSD-side to be able to
78 * test if our probe is active, and to call __dtrace_probe(). Setting
79 * NEED_DTRACE_DEFS gets a local copy of those definitions pulled in.
80 */
81#define NEED_DTRACE_DEFS
82#include <../bsd/sys/lockstat.h>
83#endif
84
91447636
A
85#define LCK_MTX_SLEEP_CODE 0
86#define LCK_MTX_SLEEP_DEADLINE_CODE 1
87#define LCK_MTX_LCK_WAIT_CODE 2
88#define LCK_MTX_UNLCK_WAKEUP_CODE 3
89
90
91static queue_head_t lck_grp_queue;
92static unsigned int lck_grp_cnt;
93
b0d623f7
A
94decl_lck_mtx_data(static,lck_grp_lock)
95static lck_mtx_ext_t lck_grp_lock_ext;
91447636
A
96
97lck_grp_attr_t LockDefaultGroupAttr;
b0d623f7
A
98lck_grp_t LockCompatGroup;
99lck_attr_t LockDefaultLckAttr;
91447636
A
100
101/*
102 * Routine: lck_mod_init
103 */
104
105void
106lck_mod_init(
107 void)
108{
6d2010ae
A
109 /*
110 * Obtain "lcks" options:this currently controls lock statistics
111 */
112 if (!PE_parse_boot_argn("lcks", &LcksOpts, sizeof (LcksOpts)))
113 LcksOpts = 0;
114
91447636 115 queue_init(&lck_grp_queue);
b0d623f7
A
116
117 /*
118 * Need to bootstrap the LockCompatGroup instead of calling lck_grp_init() here. This avoids
119 * grabbing the lck_grp_lock before it is initialized.
120 */
121
122 bzero(&LockCompatGroup, sizeof(lck_grp_t));
123 (void) strncpy(LockCompatGroup.lck_grp_name, "Compatibility APIs", LCK_GRP_MAX_NAME);
124
125 if (LcksOpts & enaLkStat)
126 LockCompatGroup.lck_grp_attr = LCK_GRP_ATTR_STAT;
127 else
128 LockCompatGroup.lck_grp_attr = LCK_ATTR_NONE;
129
130 LockCompatGroup.lck_grp_refcnt = 1;
131
132 enqueue_tail(&lck_grp_queue, (queue_entry_t)&LockCompatGroup);
133 lck_grp_cnt = 1;
134
135 lck_grp_attr_setdefault(&LockDefaultGroupAttr);
91447636 136 lck_attr_setdefault(&LockDefaultLckAttr);
b0d623f7
A
137
138 lck_mtx_init_ext(&lck_grp_lock, &lck_grp_lock_ext, &LockCompatGroup, &LockDefaultLckAttr);
139
91447636
A
140}
141
142/*
143 * Routine: lck_grp_attr_alloc_init
144 */
145
146lck_grp_attr_t *
147lck_grp_attr_alloc_init(
148 void)
149{
150 lck_grp_attr_t *attr;
151
152 if ((attr = (lck_grp_attr_t *)kalloc(sizeof(lck_grp_attr_t))) != 0)
153 lck_grp_attr_setdefault(attr);
154
155 return(attr);
156}
157
158
159/*
160 * Routine: lck_grp_attr_setdefault
161 */
162
163void
164lck_grp_attr_setdefault(
165 lck_grp_attr_t *attr)
166{
167 if (LcksOpts & enaLkStat)
168 attr->grp_attr_val = LCK_GRP_ATTR_STAT;
169 else
170 attr->grp_attr_val = 0;
171}
172
173
174/*
175 * Routine: lck_grp_attr_setstat
176 */
177
178void
179lck_grp_attr_setstat(
180 lck_grp_attr_t *attr)
181{
2d21ac55 182 (void)hw_atomic_or(&attr->grp_attr_val, LCK_GRP_ATTR_STAT);
91447636
A
183}
184
185
186/*
187 * Routine: lck_grp_attr_free
188 */
189
190void
191lck_grp_attr_free(
192 lck_grp_attr_t *attr)
193{
194 kfree(attr, sizeof(lck_grp_attr_t));
195}
196
197
198/*
199 * Routine: lck_grp_alloc_init
200 */
201
202lck_grp_t *
203lck_grp_alloc_init(
204 const char* grp_name,
205 lck_grp_attr_t *attr)
206{
207 lck_grp_t *grp;
208
209 if ((grp = (lck_grp_t *)kalloc(sizeof(lck_grp_t))) != 0)
210 lck_grp_init(grp, grp_name, attr);
211
212 return(grp);
213}
214
215
216/*
217 * Routine: lck_grp_init
218 */
219
220void
221lck_grp_init(
222 lck_grp_t *grp,
223 const char* grp_name,
224 lck_grp_attr_t *attr)
225{
226 bzero((void *)grp, sizeof(lck_grp_t));
227
228 (void) strncpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);
229
230 if (attr != LCK_GRP_ATTR_NULL)
231 grp->lck_grp_attr = attr->grp_attr_val;
232 else if (LcksOpts & enaLkStat)
233 grp->lck_grp_attr = LCK_GRP_ATTR_STAT;
234 else
235 grp->lck_grp_attr = LCK_ATTR_NONE;
236
237 grp->lck_grp_refcnt = 1;
238
b0d623f7 239 lck_mtx_lock(&lck_grp_lock);
91447636
A
240 enqueue_tail(&lck_grp_queue, (queue_entry_t)grp);
241 lck_grp_cnt++;
b0d623f7 242 lck_mtx_unlock(&lck_grp_lock);
91447636
A
243
244}
245
246
247/*
248 * Routine: lck_grp_free
249 */
250
251void
252lck_grp_free(
253 lck_grp_t *grp)
254{
b0d623f7 255 lck_mtx_lock(&lck_grp_lock);
91447636
A
256 lck_grp_cnt--;
257 (void)remque((queue_entry_t)grp);
b0d623f7 258 lck_mtx_unlock(&lck_grp_lock);
91447636
A
259 lck_grp_deallocate(grp);
260}
261
262
263/*
264 * Routine: lck_grp_reference
265 */
266
267void
268lck_grp_reference(
269 lck_grp_t *grp)
270{
2d21ac55 271 (void)hw_atomic_add(&grp->lck_grp_refcnt, 1);
91447636
A
272}
273
274
275/*
276 * Routine: lck_grp_deallocate
277 */
278
279void
280lck_grp_deallocate(
281 lck_grp_t *grp)
282{
2d21ac55 283 if (hw_atomic_sub(&grp->lck_grp_refcnt, 1) == 0)
91447636
A
284 kfree(grp, sizeof(lck_grp_t));
285}
286
287/*
288 * Routine: lck_grp_lckcnt_incr
289 */
290
291void
292lck_grp_lckcnt_incr(
293 lck_grp_t *grp,
294 lck_type_t lck_type)
295{
296 unsigned int *lckcnt;
297
298 switch (lck_type) {
299 case LCK_TYPE_SPIN:
300 lckcnt = &grp->lck_grp_spincnt;
301 break;
302 case LCK_TYPE_MTX:
303 lckcnt = &grp->lck_grp_mtxcnt;
304 break;
305 case LCK_TYPE_RW:
306 lckcnt = &grp->lck_grp_rwcnt;
307 break;
308 default:
309 return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type);
310 }
311
2d21ac55 312 (void)hw_atomic_add(lckcnt, 1);
91447636
A
313}
314
315/*
316 * Routine: lck_grp_lckcnt_decr
317 */
318
319void
320lck_grp_lckcnt_decr(
321 lck_grp_t *grp,
322 lck_type_t lck_type)
323{
324 unsigned int *lckcnt;
325
326 switch (lck_type) {
327 case LCK_TYPE_SPIN:
328 lckcnt = &grp->lck_grp_spincnt;
329 break;
330 case LCK_TYPE_MTX:
331 lckcnt = &grp->lck_grp_mtxcnt;
332 break;
333 case LCK_TYPE_RW:
334 lckcnt = &grp->lck_grp_rwcnt;
335 break;
336 default:
337 return panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type);
338 }
339
2d21ac55 340 (void)hw_atomic_sub(lckcnt, 1);
91447636
A
341}
342
343/*
344 * Routine: lck_attr_alloc_init
345 */
346
347lck_attr_t *
348lck_attr_alloc_init(
349 void)
350{
351 lck_attr_t *attr;
352
353 if ((attr = (lck_attr_t *)kalloc(sizeof(lck_attr_t))) != 0)
354 lck_attr_setdefault(attr);
355
356 return(attr);
357}
358
359
360/*
361 * Routine: lck_attr_setdefault
362 */
363
364void
365lck_attr_setdefault(
366 lck_attr_t *attr)
367{
316670eb 368#if __i386__ || __x86_64__
91447636 369#if !DEBUG
593a1d5f
A
370 if (LcksOpts & enaLkDeb)
371 attr->lck_attr_val = LCK_ATTR_DEBUG;
372 else
373 attr->lck_attr_val = LCK_ATTR_NONE;
91447636 374#else
593a1d5f
A
375 attr->lck_attr_val = LCK_ATTR_DEBUG;
376#endif /* !DEBUG */
316670eb
A
377#else
378#error Unknown architecture.
379#endif /* __arm__ */
91447636
A
380}
381
382
383/*
384 * Routine: lck_attr_setdebug
385 */
386void
387lck_attr_setdebug(
388 lck_attr_t *attr)
389{
2d21ac55
A
390 (void)hw_atomic_or(&attr->lck_attr_val, LCK_ATTR_DEBUG);
391}
392
393/*
394 * Routine: lck_attr_setdebug
395 */
396void
397lck_attr_cleardebug(
398 lck_attr_t *attr)
399{
400 (void)hw_atomic_and(&attr->lck_attr_val, ~LCK_ATTR_DEBUG);
91447636
A
401}
402
403
0c530ab8
A
404/*
405 * Routine: lck_attr_rw_shared_priority
406 */
407void
408lck_attr_rw_shared_priority(
409 lck_attr_t *attr)
410{
2d21ac55 411 (void)hw_atomic_or(&attr->lck_attr_val, LCK_ATTR_RW_SHARED_PRIORITY);
0c530ab8
A
412}
413
414
91447636
A
415/*
416 * Routine: lck_attr_free
417 */
418void
419lck_attr_free(
420 lck_attr_t *attr)
421{
422 kfree(attr, sizeof(lck_attr_t));
423}
424
425
426/*
427 * Routine: lck_spin_sleep
428 */
429wait_result_t
430lck_spin_sleep(
431 lck_spin_t *lck,
432 lck_sleep_action_t lck_sleep_action,
433 event_t event,
434 wait_interrupt_t interruptible)
435{
436 wait_result_t res;
437
438 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
439 panic("Invalid lock sleep action %x\n", lck_sleep_action);
440
441 res = assert_wait(event, interruptible);
442 if (res == THREAD_WAITING) {
443 lck_spin_unlock(lck);
444 res = thread_block(THREAD_CONTINUE_NULL);
445 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
446 lck_spin_lock(lck);
447 }
448 else
449 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
450 lck_spin_unlock(lck);
451
452 return res;
453}
454
455
456/*
457 * Routine: lck_spin_sleep_deadline
458 */
459wait_result_t
460lck_spin_sleep_deadline(
461 lck_spin_t *lck,
462 lck_sleep_action_t lck_sleep_action,
463 event_t event,
464 wait_interrupt_t interruptible,
465 uint64_t deadline)
466{
467 wait_result_t res;
468
469 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
470 panic("Invalid lock sleep action %x\n", lck_sleep_action);
471
472 res = assert_wait_deadline(event, interruptible, deadline);
473 if (res == THREAD_WAITING) {
474 lck_spin_unlock(lck);
475 res = thread_block(THREAD_CONTINUE_NULL);
476 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
477 lck_spin_lock(lck);
478 }
479 else
480 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
481 lck_spin_unlock(lck);
482
483 return res;
484}
485
486
487/*
488 * Routine: lck_mtx_sleep
489 */
490wait_result_t
491lck_mtx_sleep(
492 lck_mtx_t *lck,
493 lck_sleep_action_t lck_sleep_action,
494 event_t event,
495 wait_interrupt_t interruptible)
496{
497 wait_result_t res;
498
499 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_START,
500 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
501
502 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
503 panic("Invalid lock sleep action %x\n", lck_sleep_action);
504
505 res = assert_wait(event, interruptible);
506 if (res == THREAD_WAITING) {
507 lck_mtx_unlock(lck);
508 res = thread_block(THREAD_CONTINUE_NULL);
b0d623f7
A
509 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
510 if ((lck_sleep_action & LCK_SLEEP_SPIN))
511 lck_mtx_lock_spin(lck);
512 else
513 lck_mtx_lock(lck);
514 }
91447636
A
515 }
516 else
517 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
518 lck_mtx_unlock(lck);
519
520 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
521
522 return res;
523}
524
525
526/*
527 * Routine: lck_mtx_sleep_deadline
528 */
529wait_result_t
530lck_mtx_sleep_deadline(
531 lck_mtx_t *lck,
532 lck_sleep_action_t lck_sleep_action,
533 event_t event,
534 wait_interrupt_t interruptible,
535 uint64_t deadline)
536{
537 wait_result_t res;
538
539 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_START,
540 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
541
542 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
543 panic("Invalid lock sleep action %x\n", lck_sleep_action);
544
545 res = assert_wait_deadline(event, interruptible, deadline);
546 if (res == THREAD_WAITING) {
547 lck_mtx_unlock(lck);
548 res = thread_block(THREAD_CONTINUE_NULL);
6d2010ae
A
549 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
550 if ((lck_sleep_action & LCK_SLEEP_SPIN))
551 lck_mtx_lock_spin(lck);
552 else
553 lck_mtx_lock(lck);
554 }
91447636
A
555 }
556 else
557 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
558 lck_mtx_unlock(lck);
559
560 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
561
562 return res;
563}
564
565/*
566 * Routine: lck_mtx_lock_wait
567 *
568 * Invoked in order to wait on contention.
569 *
570 * Called with the interlock locked and
571 * returns it unlocked.
572 */
573void
574lck_mtx_lock_wait (
575 lck_mtx_t *lck,
576 thread_t holder)
577{
578 thread_t self = current_thread();
579 lck_mtx_t *mutex;
580 integer_t priority;
581 spl_t s = splsched();
2d21ac55
A
582#if CONFIG_DTRACE
583 uint64_t sleep_start = 0;
584
585 if (lockstat_probemap[LS_LCK_MTX_LOCK_BLOCK] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_BLOCK]) {
586 sleep_start = mach_absolute_time();
587 }
588#endif
91447636
A
589
590 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
591 mutex = lck;
592 else
593 mutex = &lck->lck_mtx_ptr->lck_mtx;
594
595 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
596
597 priority = self->sched_pri;
598 if (priority < self->priority)
599 priority = self->priority;
91447636
A
600 if (priority < BASEPRI_DEFAULT)
601 priority = BASEPRI_DEFAULT;
602
603 thread_lock(holder);
604 if (mutex->lck_mtx_pri == 0)
605 holder->promotions++;
6d2010ae 606 holder->sched_flags |= TH_SFLAG_PROMOTED;
4a3eedf9 607 if ( mutex->lck_mtx_pri < priority &&
91447636 608 holder->sched_pri < priority ) {
4a3eedf9
A
609 KERNEL_DEBUG_CONSTANT(
610 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
b0d623f7 611 holder->sched_pri, priority, holder, lck, 0);
39236c6e
A
612 /* This can potentially elevate the holder into the realtime
613 * priority band; the implementation in locks_i386.c enforces a
614 * MAXPRI_KERNEL ceiling.
615 */
4a3eedf9 616 set_sched_pri(holder, priority);
91447636
A
617 }
618 thread_unlock(holder);
619 splx(s);
620
621 if (mutex->lck_mtx_pri < priority)
622 mutex->lck_mtx_pri = priority;
623 if (self->pending_promoter[self->pending_promoter_index] == NULL) {
624 self->pending_promoter[self->pending_promoter_index] = mutex;
625 mutex->lck_mtx_waiters++;
626 }
627 else
628 if (self->pending_promoter[self->pending_promoter_index] != mutex) {
629 self->pending_promoter[++self->pending_promoter_index] = mutex;
630 mutex->lck_mtx_waiters++;
631 }
632
633 assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
634 lck_mtx_ilk_unlock(mutex);
635
636 thread_block(THREAD_CONTINUE_NULL);
637
638 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
2d21ac55
A
639#if CONFIG_DTRACE
640 /*
641 * Record the Dtrace lockstat probe for blocking, block time
642 * measured from when we were entered.
643 */
644 if (sleep_start) {
645 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
646 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_BLOCK, lck,
647 mach_absolute_time() - sleep_start);
648 } else {
649 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_BLOCK, lck,
650 mach_absolute_time() - sleep_start);
651 }
652 }
653#endif
91447636
A
654}
655
656/*
657 * Routine: lck_mtx_lock_acquire
658 *
659 * Invoked on acquiring the mutex when there is
660 * contention.
661 *
662 * Returns the current number of waiters.
663 *
664 * Called with the interlock locked.
665 */
666int
667lck_mtx_lock_acquire(
668 lck_mtx_t *lck)
669{
670 thread_t thread = current_thread();
671 lck_mtx_t *mutex;
672
673 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
674 mutex = lck;
675 else
676 mutex = &lck->lck_mtx_ptr->lck_mtx;
677
678 if (thread->pending_promoter[thread->pending_promoter_index] == mutex) {
679 thread->pending_promoter[thread->pending_promoter_index] = NULL;
680 if (thread->pending_promoter_index > 0)
681 thread->pending_promoter_index--;
682 mutex->lck_mtx_waiters--;
683 }
684
685 if (mutex->lck_mtx_waiters > 0) {
686 integer_t priority = mutex->lck_mtx_pri;
687 spl_t s = splsched();
688
689 thread_lock(thread);
690 thread->promotions++;
6d2010ae 691 thread->sched_flags |= TH_SFLAG_PROMOTED;
4a3eedf9
A
692 if (thread->sched_pri < priority) {
693 KERNEL_DEBUG_CONSTANT(
694 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
b0d623f7 695 thread->sched_pri, priority, 0, lck, 0);
91447636 696
4a3eedf9 697 set_sched_pri(thread, priority);
91447636
A
698 }
699 thread_unlock(thread);
700 splx(s);
701 }
702 else
703 mutex->lck_mtx_pri = 0;
704
39236c6e
A
705#if CONFIG_DTRACE
706 if (lockstat_probemap[LS_LCK_MTX_LOCK_ACQUIRE] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_ACQUIRE]) {
707 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
708 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, lck, 0);
709 } else {
710 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, lck, 0);
711 }
712 }
713#endif
91447636
A
714 return (mutex->lck_mtx_waiters);
715}
716
717/*
718 * Routine: lck_mtx_unlock_wakeup
719 *
720 * Invoked on unlock when there is contention.
721 *
722 * Called with the interlock locked.
723 */
724void
725lck_mtx_unlock_wakeup (
726 lck_mtx_t *lck,
727 thread_t holder)
728{
729 thread_t thread = current_thread();
730 lck_mtx_t *mutex;
731
732 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
733 mutex = lck;
734 else
735 mutex = &lck->lck_mtx_ptr->lck_mtx;
736
6d2010ae
A
737 if (thread != holder)
738 panic("lck_mtx_unlock_wakeup: mutex %p holder %p\n", mutex, holder);
91447636
A
739
740 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
741
6d2010ae
A
742 assert(mutex->lck_mtx_waiters > 0);
743 thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
91447636
A
744
745 if (thread->promotions > 0) {
746 spl_t s = splsched();
747
748 thread_lock(thread);
749 if ( --thread->promotions == 0 &&
6d2010ae
A
750 (thread->sched_flags & TH_SFLAG_PROMOTED) ) {
751 thread->sched_flags &= ~TH_SFLAG_PROMOTED;
752 if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
91447636
A
753 KERNEL_DEBUG_CONSTANT(
754 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
b0d623f7 755 thread->sched_pri, DEPRESSPRI, 0, lck, 0);
91447636
A
756
757 set_sched_pri(thread, DEPRESSPRI);
758 }
759 else {
760 if (thread->priority < thread->sched_pri) {
761 KERNEL_DEBUG_CONSTANT(
762 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
763 DBG_FUNC_NONE,
764 thread->sched_pri, thread->priority,
b0d623f7 765 0, lck, 0);
91447636
A
766 }
767
6d2010ae 768 SCHED(compute_priority)(thread, FALSE);
91447636
A
769 }
770 }
771 thread_unlock(thread);
772 splx(s);
773 }
91447636
A
774
775 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
776}
777
2d21ac55
A
778void
779lck_mtx_unlockspin_wakeup (
780 lck_mtx_t *lck)
781{
782 assert(lck->lck_mtx_waiters > 0);
783 thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
784
785 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_NONE, (int)lck, 0, 0, 1, 0);
786#if CONFIG_DTRACE
787 /*
788 * When there are waiters, we skip the hot-patch spot in the
789 * fastpath, so we record it here.
790 */
791 LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, lck, 0);
792#endif
793}
794
795
91447636
A
796/*
797 * Routine: mutex_pause
798 *
799 * Called by former callers of simple_lock_pause().
800 */
2d21ac55
A
801#define MAX_COLLISION_COUNTS 32
802#define MAX_COLLISION 8
803
804unsigned int max_collision_count[MAX_COLLISION_COUNTS];
805
806uint32_t collision_backoffs[MAX_COLLISION] = {
807 10, 50, 100, 200, 400, 600, 800, 1000
808};
809
91447636
A
810
811void
2d21ac55 812mutex_pause(uint32_t collisions)
91447636
A
813{
814 wait_result_t wait_result;
2d21ac55 815 uint32_t back_off;
91447636 816
2d21ac55
A
817 if (collisions >= MAX_COLLISION_COUNTS)
818 collisions = MAX_COLLISION_COUNTS - 1;
819 max_collision_count[collisions]++;
820
821 if (collisions >= MAX_COLLISION)
822 collisions = MAX_COLLISION - 1;
823 back_off = collision_backoffs[collisions];
824
825 wait_result = assert_wait_timeout((event_t)mutex_pause, THREAD_UNINT, back_off, NSEC_PER_USEC);
91447636
A
826 assert(wait_result == THREAD_WAITING);
827
828 wait_result = thread_block(THREAD_CONTINUE_NULL);
829 assert(wait_result == THREAD_TIMED_OUT);
830}
831
2d21ac55
A
832
833unsigned int mutex_yield_wait = 0;
834unsigned int mutex_yield_no_wait = 0;
835
836void
b0d623f7
A
837lck_mtx_yield(
838 lck_mtx_t *lck)
2d21ac55 839{
b0d623f7
A
840 int waiters;
841
2d21ac55 842#if DEBUG
b0d623f7 843 lck_mtx_assert(lck, LCK_MTX_ASSERT_OWNED);
2d21ac55 844#endif /* DEBUG */
b0d623f7 845
2d21ac55 846 if (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT)
b0d623f7
A
847 waiters = lck->lck_mtx_ptr->lck_mtx.lck_mtx_waiters;
848 else
849 waiters = lck->lck_mtx_waiters;
2d21ac55 850
b0d623f7 851 if ( !waiters) {
2d21ac55
A
852 mutex_yield_no_wait++;
853 } else {
854 mutex_yield_wait++;
b0d623f7 855 lck_mtx_unlock(lck);
2d21ac55 856 mutex_pause(0);
b0d623f7 857 lck_mtx_lock(lck);
2d21ac55
A
858 }
859}
860
861
91447636
A
862/*
863 * Routine: lck_rw_sleep
864 */
865wait_result_t
866lck_rw_sleep(
867 lck_rw_t *lck,
868 lck_sleep_action_t lck_sleep_action,
869 event_t event,
870 wait_interrupt_t interruptible)
871{
872 wait_result_t res;
873 lck_rw_type_t lck_rw_type;
874
875 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
876 panic("Invalid lock sleep action %x\n", lck_sleep_action);
877
878 res = assert_wait(event, interruptible);
879 if (res == THREAD_WAITING) {
880 lck_rw_type = lck_rw_done(lck);
881 res = thread_block(THREAD_CONTINUE_NULL);
882 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
883 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
884 lck_rw_lock(lck, lck_rw_type);
885 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
886 lck_rw_lock_exclusive(lck);
887 else
888 lck_rw_lock_shared(lck);
889 }
890 }
891 else
892 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
893 (void)lck_rw_done(lck);
894
895 return res;
896}
897
898
899/*
900 * Routine: lck_rw_sleep_deadline
901 */
902wait_result_t
903lck_rw_sleep_deadline(
904 lck_rw_t *lck,
905 lck_sleep_action_t lck_sleep_action,
906 event_t event,
907 wait_interrupt_t interruptible,
908 uint64_t deadline)
909{
910 wait_result_t res;
911 lck_rw_type_t lck_rw_type;
912
913 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
914 panic("Invalid lock sleep action %x\n", lck_sleep_action);
915
916 res = assert_wait_deadline(event, interruptible, deadline);
917 if (res == THREAD_WAITING) {
918 lck_rw_type = lck_rw_done(lck);
919 res = thread_block(THREAD_CONTINUE_NULL);
920 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
921 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
922 lck_rw_lock(lck, lck_rw_type);
923 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
924 lck_rw_lock_exclusive(lck);
925 else
926 lck_rw_lock_shared(lck);
927 }
928 }
929 else
930 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
931 (void)lck_rw_done(lck);
932
933 return res;
934}
935
39236c6e
A
936/*
937 * Reader-writer lock promotion
938 *
939 * We support a limited form of reader-writer
940 * lock promotion whose effects are:
941 *
942 * * Qualifying threads have decay disabled
943 * * Scheduler priority is reset to a floor of
944 * of their statically assigned priority
945 * or BASEPRI_BACKGROUND
946 *
947 * The rationale is that lck_rw_ts do not have
948 * a single owner, so we cannot apply a directed
949 * priority boost from all waiting threads
950 * to all holding threads without maintaining
951 * lists of all shared owners and all waiting
952 * threads for every lock.
953 *
954 * Instead (and to preserve the uncontended fast-
955 * path), acquiring (or attempting to acquire)
956 * a RW lock in shared or exclusive lock increments
957 * a per-thread counter. Only if that thread stops
958 * making forward progress (for instance blocking
959 * on a mutex, or being preempted) do we consult
960 * the counter and apply the priority floor.
961 * When the thread becomes runnable again (or in
962 * the case of preemption it never stopped being
963 * runnable), it has the priority boost and should
964 * be in a good position to run on the CPU and
965 * release all RW locks (at which point the priority
966 * boost is cleared).
967 *
968 * Care must be taken to ensure that priority
969 * boosts are not retained indefinitely, since unlike
970 * mutex priority boosts (where the boost is tied
971 * to the mutex lifecycle), the boost is tied
972 * to the thread and independent of any particular
973 * lck_rw_t. Assertions are in place on return
974 * to userspace so that the boost is not held
975 * indefinitely.
976 *
977 * The routines that increment/decrement the
978 * per-thread counter should err on the side of
979 * incrementing any time a preemption is possible
980 * and the lock would be visible to the rest of the
981 * system as held (so it should be incremented before
982 * interlocks are dropped/preemption is enabled, or
983 * before a CAS is executed to acquire the lock).
984 *
985 */
986
987/*
988 * lck_rw_clear_promotion: Undo priority promotions when the last RW
989 * lock is released by a thread (if a promotion was active)
990 */
991void lck_rw_clear_promotion(thread_t thread)
992{
993 assert(thread->rwlock_count == 0);
994
995 /* Cancel any promotions if the thread had actually blocked while holding a RW lock */
996 spl_t s = splsched();
997
998 thread_lock(thread);
999
1000 if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) {
1001 thread->sched_flags &= ~TH_SFLAG_RW_PROMOTED;
1002
1003 if (thread->sched_flags & TH_SFLAG_PROMOTED) {
1004 /* Thread still has a mutex promotion */
1005 } else if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
1006 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE) | DBG_FUNC_NONE,
1007 thread->sched_pri, DEPRESSPRI, 0, 0, 0);
1008
1009 set_sched_pri(thread, DEPRESSPRI);
1010 } else {
1011 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE) | DBG_FUNC_NONE,
1012 thread->sched_pri, thread->priority, 0, 0, 0);
1013
1014 SCHED(compute_priority)(thread, FALSE);
1015 }
1016 }
1017
1018 thread_unlock(thread);
1019 splx(s);
1020}
1021
91447636
A
1022kern_return_t
1023host_lockgroup_info(
1024 host_t host,
1025 lockgroup_info_array_t *lockgroup_infop,
1026 mach_msg_type_number_t *lockgroup_infoCntp)
1027{
1028 lockgroup_info_t *lockgroup_info_base;
1029 lockgroup_info_t *lockgroup_info;
1030 vm_offset_t lockgroup_info_addr;
1031 vm_size_t lockgroup_info_size;
1032 lck_grp_t *lck_grp;
1033 unsigned int i;
1034 vm_size_t used;
1035 vm_map_copy_t copy;
1036 kern_return_t kr;
1037
1038 if (host == HOST_NULL)
1039 return KERN_INVALID_HOST;
1040
b0d623f7 1041 lck_mtx_lock(&lck_grp_lock);
91447636
A
1042
1043 lockgroup_info_size = round_page(lck_grp_cnt * sizeof *lockgroup_info);
1044 kr = kmem_alloc_pageable(ipc_kernel_map,
1045 &lockgroup_info_addr, lockgroup_info_size);
1046 if (kr != KERN_SUCCESS) {
b0d623f7 1047 lck_mtx_unlock(&lck_grp_lock);
91447636
A
1048 return(kr);
1049 }
1050
1051 lockgroup_info_base = (lockgroup_info_t *) lockgroup_info_addr;
1052 lck_grp = (lck_grp_t *)queue_first(&lck_grp_queue);
1053 lockgroup_info = lockgroup_info_base;
1054
1055 for (i = 0; i < lck_grp_cnt; i++) {
1056
1057 lockgroup_info->lock_spin_cnt = lck_grp->lck_grp_spincnt;
1058 lockgroup_info->lock_spin_util_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_util_cnt;
1059 lockgroup_info->lock_spin_held_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cnt;
1060 lockgroup_info->lock_spin_miss_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_miss_cnt;
1061 lockgroup_info->lock_spin_held_max = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_max;
1062 lockgroup_info->lock_spin_held_cum = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cum;
1063
1064 lockgroup_info->lock_mtx_cnt = lck_grp->lck_grp_mtxcnt;
1065 lockgroup_info->lock_mtx_util_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt;
1066 lockgroup_info->lock_mtx_held_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt;
1067 lockgroup_info->lock_mtx_miss_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt;
1068 lockgroup_info->lock_mtx_wait_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt;
1069 lockgroup_info->lock_mtx_held_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max;
1070 lockgroup_info->lock_mtx_held_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cum;
1071 lockgroup_info->lock_mtx_wait_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_max;
1072 lockgroup_info->lock_mtx_wait_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cum;
1073
1074 lockgroup_info->lock_rw_cnt = lck_grp->lck_grp_rwcnt;
1075 lockgroup_info->lock_rw_util_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt;
1076 lockgroup_info->lock_rw_held_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cnt;
1077 lockgroup_info->lock_rw_miss_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt;
1078 lockgroup_info->lock_rw_wait_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt;
1079 lockgroup_info->lock_rw_held_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_max;
1080 lockgroup_info->lock_rw_held_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cum;
1081 lockgroup_info->lock_rw_wait_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_max;
1082 lockgroup_info->lock_rw_wait_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cum;
1083
1084 (void) strncpy(lockgroup_info->lockgroup_name,lck_grp->lck_grp_name, LOCKGROUP_MAX_NAME);
1085
1086 lck_grp = (lck_grp_t *)(queue_next((queue_entry_t)(lck_grp)));
1087 lockgroup_info++;
1088 }
1089
1090 *lockgroup_infoCntp = lck_grp_cnt;
b0d623f7 1091 lck_mtx_unlock(&lck_grp_lock);
91447636
A
1092
1093 used = (*lockgroup_infoCntp) * sizeof *lockgroup_info;
1094
1095 if (used != lockgroup_info_size)
1096 bzero((char *) lockgroup_info, lockgroup_info_size - used);
1097
1098 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
1099 (vm_map_size_t)lockgroup_info_size, TRUE, &copy);
1100 assert(kr == KERN_SUCCESS);
1101
1102 *lockgroup_infop = (lockgroup_info_t *) copy;
1103
1104 return(KERN_SUCCESS);
1105}
1106
1107/*
1108 * Compatibility module
1109 */
1110
1111extern lck_rw_t *lock_alloc_EXT( boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
1112extern void lock_done_EXT(lck_rw_t *lock);
1113extern void lock_free_EXT(lck_rw_t *lock);
1114extern void lock_init_EXT(lck_rw_t *lock, boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
1115extern void lock_read_EXT(lck_rw_t *lock);
1116extern boolean_t lock_read_to_write_EXT(lck_rw_t *lock);
1117extern void lock_write_EXT(lck_rw_t *lock);
1118extern void lock_write_to_read_EXT(lck_rw_t *lock);
1119extern wait_result_t thread_sleep_lock_write_EXT(
1120 event_t event, lck_rw_t *lock, wait_interrupt_t interruptible);
1121
91447636
A
1122extern void usimple_lock_EXT(lck_spin_t *lock);
1123extern void usimple_lock_init_EXT(lck_spin_t *lock, unsigned short tag);
1124extern unsigned int usimple_lock_try_EXT(lck_spin_t *lock);
1125extern void usimple_unlock_EXT(lck_spin_t *lock);
1126extern wait_result_t thread_sleep_usimple_lock_EXT(event_t event, lck_spin_t *lock, wait_interrupt_t interruptible);
1127
b0d623f7
A
1128
1129lck_mtx_t* mutex_alloc_EXT(__unused unsigned short tag);
1130void mutex_free_EXT(lck_mtx_t *mutex);
1131void mutex_init_EXT(lck_mtx_t *mutex, __unused unsigned short tag);
1132wait_result_t thread_sleep_mutex_EXT(event_t event, lck_mtx_t *mutex, wait_interrupt_t interruptible);
1133wait_result_t thread_sleep_mutex_deadline_EXT(event_t event, lck_mtx_t *mutex, uint64_t deadline, wait_interrupt_t interruptible);
1134
91447636
A
1135lck_rw_t *
1136lock_alloc_EXT(
1137 __unused boolean_t can_sleep,
1138 __unused unsigned short tag0,
1139 __unused unsigned short tag1)
1140{
1141 return( lck_rw_alloc_init( &LockCompatGroup, LCK_ATTR_NULL));
1142}
1143
1144void
1145lock_done_EXT(
1146 lck_rw_t *lock)
1147{
1148 (void) lck_rw_done(lock);
1149}
1150
1151void
1152lock_free_EXT(
1153 lck_rw_t *lock)
1154{
1155 lck_rw_free(lock, &LockCompatGroup);
1156}
1157
1158void
1159lock_init_EXT(
1160 lck_rw_t *lock,
1161 __unused boolean_t can_sleep,
1162 __unused unsigned short tag0,
1163 __unused unsigned short tag1)
1164{
1165 lck_rw_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
1166}
1167
1168void
1169lock_read_EXT(
1170 lck_rw_t *lock)
1171{
1172 lck_rw_lock_shared( lock);
1173}
1174
1175boolean_t
1176lock_read_to_write_EXT(
1177 lck_rw_t *lock)
1178{
1179 return( lck_rw_lock_shared_to_exclusive(lock));
1180}
1181
1182void
1183lock_write_EXT(
1184 lck_rw_t *lock)
1185{
1186 lck_rw_lock_exclusive(lock);
1187}
1188
1189void
1190lock_write_to_read_EXT(
1191 lck_rw_t *lock)
1192{
1193 lck_rw_lock_exclusive_to_shared(lock);
1194}
1195
1196wait_result_t
1197thread_sleep_lock_write_EXT(
1198 event_t event,
1199 lck_rw_t *lock,
1200 wait_interrupt_t interruptible)
1201{
1202 return( lck_rw_sleep(lock, LCK_SLEEP_EXCLUSIVE, event, interruptible));
1203}
1204
91447636
A
1205void
1206usimple_lock_EXT(
1207 lck_spin_t *lock)
1208{
1209 lck_spin_lock(lock);
1210}
1211
1212void
1213usimple_lock_init_EXT(
1214 lck_spin_t *lock,
1215 __unused unsigned short tag)
1216{
1217 lck_spin_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
1218}
1219
1220unsigned int
1221usimple_lock_try_EXT(
1222 lck_spin_t *lock)
1223{
0c530ab8 1224 return(lck_spin_try_lock(lock));
91447636
A
1225}
1226
1227void
1228usimple_unlock_EXT(
1229 lck_spin_t *lock)
1230{
1231 lck_spin_unlock(lock);
1232}
1233
1234wait_result_t
1235thread_sleep_usimple_lock_EXT(
1236 event_t event,
1237 lck_spin_t *lock,
1238 wait_interrupt_t interruptible)
1239{
1240 return( lck_spin_sleep(lock, LCK_SLEEP_DEFAULT, event, interruptible));
1241}
b0d623f7
A
1242lck_mtx_t *
1243mutex_alloc_EXT(
1244 __unused unsigned short tag)
1245{
1246 return(lck_mtx_alloc_init(&LockCompatGroup, LCK_ATTR_NULL));
1247}
1248
1249void
1250mutex_free_EXT(
1251 lck_mtx_t *mutex)
1252{
1253 lck_mtx_free(mutex, &LockCompatGroup);
1254}
1255
1256void
1257mutex_init_EXT(
1258 lck_mtx_t *mutex,
1259 __unused unsigned short tag)
1260{
1261 lck_mtx_init(mutex, &LockCompatGroup, LCK_ATTR_NULL);
1262}
1263
1264wait_result_t
1265thread_sleep_mutex_EXT(
1266 event_t event,
1267 lck_mtx_t *mutex,
1268 wait_interrupt_t interruptible)
1269{
1270 return( lck_mtx_sleep(mutex, LCK_SLEEP_DEFAULT, event, interruptible));
1271}
1272
1273wait_result_t
1274thread_sleep_mutex_deadline_EXT(
1275 event_t event,
1276 lck_mtx_t *mutex,
1277 uint64_t deadline,
1278 wait_interrupt_t interruptible)
1279{
1280 return( lck_mtx_sleep_deadline(mutex, LCK_SLEEP_DEFAULT, event, interruptible, deadline));
1281}