]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/locks.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / kern / locks.c
CommitLineData
91447636 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
91447636 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
91447636 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56#include <mach_kdb.h>
57#include <mach_ldebug.h>
58#include <debug.h>
59
60#include <mach/kern_return.h>
61#include <mach/mach_host_server.h>
62#include <mach_debug/lockgroup_info.h>
63
64#include <kern/locks.h>
65#include <kern/misc_protos.h>
66#include <kern/kalloc.h>
67#include <kern/thread.h>
68#include <kern/processor.h>
69#include <kern/sched_prim.h>
70#include <kern/debug.h>
71#include <string.h>
72
73
74#include <sys/kdebug.h>
75
2d21ac55
A
76#if CONFIG_DTRACE
77/*
78 * We need only enough declarations from the BSD-side to be able to
79 * test if our probe is active, and to call __dtrace_probe(). Setting
80 * NEED_DTRACE_DEFS gets a local copy of those definitions pulled in.
81 */
82#define NEED_DTRACE_DEFS
83#include <../bsd/sys/lockstat.h>
84#endif
85
91447636
A
86#define LCK_MTX_SLEEP_CODE 0
87#define LCK_MTX_SLEEP_DEADLINE_CODE 1
88#define LCK_MTX_LCK_WAIT_CODE 2
89#define LCK_MTX_UNLCK_WAKEUP_CODE 3
90
91
92static queue_head_t lck_grp_queue;
93static unsigned int lck_grp_cnt;
94
b0d623f7
A
95decl_lck_mtx_data(static,lck_grp_lock)
96static lck_mtx_ext_t lck_grp_lock_ext;
91447636
A
97
98lck_grp_attr_t LockDefaultGroupAttr;
b0d623f7
A
99lck_grp_t LockCompatGroup;
100lck_attr_t LockDefaultLckAttr;
91447636
A
101
102/*
103 * Routine: lck_mod_init
104 */
105
106void
107lck_mod_init(
108 void)
109{
110 queue_init(&lck_grp_queue);
b0d623f7
A
111
112 /*
113 * Need to bootstrap the LockCompatGroup instead of calling lck_grp_init() here. This avoids
114 * grabbing the lck_grp_lock before it is initialized.
115 */
116
117 bzero(&LockCompatGroup, sizeof(lck_grp_t));
118 (void) strncpy(LockCompatGroup.lck_grp_name, "Compatibility APIs", LCK_GRP_MAX_NAME);
119
120 if (LcksOpts & enaLkStat)
121 LockCompatGroup.lck_grp_attr = LCK_GRP_ATTR_STAT;
122 else
123 LockCompatGroup.lck_grp_attr = LCK_ATTR_NONE;
124
125 LockCompatGroup.lck_grp_refcnt = 1;
126
127 enqueue_tail(&lck_grp_queue, (queue_entry_t)&LockCompatGroup);
128 lck_grp_cnt = 1;
129
130 lck_grp_attr_setdefault(&LockDefaultGroupAttr);
91447636 131 lck_attr_setdefault(&LockDefaultLckAttr);
b0d623f7
A
132
133 lck_mtx_init_ext(&lck_grp_lock, &lck_grp_lock_ext, &LockCompatGroup, &LockDefaultLckAttr);
134
91447636
A
135}
136
137/*
138 * Routine: lck_grp_attr_alloc_init
139 */
140
141lck_grp_attr_t *
142lck_grp_attr_alloc_init(
143 void)
144{
145 lck_grp_attr_t *attr;
146
147 if ((attr = (lck_grp_attr_t *)kalloc(sizeof(lck_grp_attr_t))) != 0)
148 lck_grp_attr_setdefault(attr);
149
150 return(attr);
151}
152
153
154/*
155 * Routine: lck_grp_attr_setdefault
156 */
157
158void
159lck_grp_attr_setdefault(
160 lck_grp_attr_t *attr)
161{
162 if (LcksOpts & enaLkStat)
163 attr->grp_attr_val = LCK_GRP_ATTR_STAT;
164 else
165 attr->grp_attr_val = 0;
166}
167
168
169/*
170 * Routine: lck_grp_attr_setstat
171 */
172
173void
174lck_grp_attr_setstat(
175 lck_grp_attr_t *attr)
176{
2d21ac55 177 (void)hw_atomic_or(&attr->grp_attr_val, LCK_GRP_ATTR_STAT);
91447636
A
178}
179
180
181/*
182 * Routine: lck_grp_attr_free
183 */
184
185void
186lck_grp_attr_free(
187 lck_grp_attr_t *attr)
188{
189 kfree(attr, sizeof(lck_grp_attr_t));
190}
191
192
193/*
194 * Routine: lck_grp_alloc_init
195 */
196
197lck_grp_t *
198lck_grp_alloc_init(
199 const char* grp_name,
200 lck_grp_attr_t *attr)
201{
202 lck_grp_t *grp;
203
204 if ((grp = (lck_grp_t *)kalloc(sizeof(lck_grp_t))) != 0)
205 lck_grp_init(grp, grp_name, attr);
206
207 return(grp);
208}
209
210
211/*
212 * Routine: lck_grp_init
213 */
214
215void
216lck_grp_init(
217 lck_grp_t *grp,
218 const char* grp_name,
219 lck_grp_attr_t *attr)
220{
221 bzero((void *)grp, sizeof(lck_grp_t));
222
223 (void) strncpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);
224
225 if (attr != LCK_GRP_ATTR_NULL)
226 grp->lck_grp_attr = attr->grp_attr_val;
227 else if (LcksOpts & enaLkStat)
228 grp->lck_grp_attr = LCK_GRP_ATTR_STAT;
229 else
230 grp->lck_grp_attr = LCK_ATTR_NONE;
231
232 grp->lck_grp_refcnt = 1;
233
b0d623f7 234 lck_mtx_lock(&lck_grp_lock);
91447636
A
235 enqueue_tail(&lck_grp_queue, (queue_entry_t)grp);
236 lck_grp_cnt++;
b0d623f7 237 lck_mtx_unlock(&lck_grp_lock);
91447636
A
238
239}
240
241
242/*
243 * Routine: lck_grp_free
244 */
245
246void
247lck_grp_free(
248 lck_grp_t *grp)
249{
b0d623f7 250 lck_mtx_lock(&lck_grp_lock);
91447636
A
251 lck_grp_cnt--;
252 (void)remque((queue_entry_t)grp);
b0d623f7 253 lck_mtx_unlock(&lck_grp_lock);
91447636
A
254 lck_grp_deallocate(grp);
255}
256
257
258/*
259 * Routine: lck_grp_reference
260 */
261
262void
263lck_grp_reference(
264 lck_grp_t *grp)
265{
2d21ac55 266 (void)hw_atomic_add(&grp->lck_grp_refcnt, 1);
91447636
A
267}
268
269
270/*
271 * Routine: lck_grp_deallocate
272 */
273
274void
275lck_grp_deallocate(
276 lck_grp_t *grp)
277{
2d21ac55 278 if (hw_atomic_sub(&grp->lck_grp_refcnt, 1) == 0)
91447636
A
279 kfree(grp, sizeof(lck_grp_t));
280}
281
282/*
283 * Routine: lck_grp_lckcnt_incr
284 */
285
286void
287lck_grp_lckcnt_incr(
288 lck_grp_t *grp,
289 lck_type_t lck_type)
290{
291 unsigned int *lckcnt;
292
293 switch (lck_type) {
294 case LCK_TYPE_SPIN:
295 lckcnt = &grp->lck_grp_spincnt;
296 break;
297 case LCK_TYPE_MTX:
298 lckcnt = &grp->lck_grp_mtxcnt;
299 break;
300 case LCK_TYPE_RW:
301 lckcnt = &grp->lck_grp_rwcnt;
302 break;
303 default:
304 return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type);
305 }
306
2d21ac55 307 (void)hw_atomic_add(lckcnt, 1);
91447636
A
308}
309
310/*
311 * Routine: lck_grp_lckcnt_decr
312 */
313
314void
315lck_grp_lckcnt_decr(
316 lck_grp_t *grp,
317 lck_type_t lck_type)
318{
319 unsigned int *lckcnt;
320
321 switch (lck_type) {
322 case LCK_TYPE_SPIN:
323 lckcnt = &grp->lck_grp_spincnt;
324 break;
325 case LCK_TYPE_MTX:
326 lckcnt = &grp->lck_grp_mtxcnt;
327 break;
328 case LCK_TYPE_RW:
329 lckcnt = &grp->lck_grp_rwcnt;
330 break;
331 default:
332 return panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type);
333 }
334
2d21ac55 335 (void)hw_atomic_sub(lckcnt, 1);
91447636
A
336}
337
338/*
339 * Routine: lck_attr_alloc_init
340 */
341
342lck_attr_t *
343lck_attr_alloc_init(
344 void)
345{
346 lck_attr_t *attr;
347
348 if ((attr = (lck_attr_t *)kalloc(sizeof(lck_attr_t))) != 0)
349 lck_attr_setdefault(attr);
350
351 return(attr);
352}
353
354
355/*
356 * Routine: lck_attr_setdefault
357 */
358
359void
360lck_attr_setdefault(
361 lck_attr_t *attr)
362{
363#if !DEBUG
593a1d5f
A
364 if (LcksOpts & enaLkDeb)
365 attr->lck_attr_val = LCK_ATTR_DEBUG;
366 else
367 attr->lck_attr_val = LCK_ATTR_NONE;
91447636 368#else
593a1d5f
A
369 attr->lck_attr_val = LCK_ATTR_DEBUG;
370#endif /* !DEBUG */
91447636
A
371}
372
373
374/*
375 * Routine: lck_attr_setdebug
376 */
377void
378lck_attr_setdebug(
379 lck_attr_t *attr)
380{
2d21ac55
A
381 (void)hw_atomic_or(&attr->lck_attr_val, LCK_ATTR_DEBUG);
382}
383
384/*
385 * Routine: lck_attr_setdebug
386 */
387void
388lck_attr_cleardebug(
389 lck_attr_t *attr)
390{
391 (void)hw_atomic_and(&attr->lck_attr_val, ~LCK_ATTR_DEBUG);
91447636
A
392}
393
394
0c530ab8
A
395/*
396 * Routine: lck_attr_rw_shared_priority
397 */
398void
399lck_attr_rw_shared_priority(
400 lck_attr_t *attr)
401{
2d21ac55 402 (void)hw_atomic_or(&attr->lck_attr_val, LCK_ATTR_RW_SHARED_PRIORITY);
0c530ab8
A
403}
404
405
91447636
A
406/*
407 * Routine: lck_attr_free
408 */
409void
410lck_attr_free(
411 lck_attr_t *attr)
412{
413 kfree(attr, sizeof(lck_attr_t));
414}
415
416
417/*
418 * Routine: lck_spin_sleep
419 */
420wait_result_t
421lck_spin_sleep(
422 lck_spin_t *lck,
423 lck_sleep_action_t lck_sleep_action,
424 event_t event,
425 wait_interrupt_t interruptible)
426{
427 wait_result_t res;
428
429 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
430 panic("Invalid lock sleep action %x\n", lck_sleep_action);
431
432 res = assert_wait(event, interruptible);
433 if (res == THREAD_WAITING) {
434 lck_spin_unlock(lck);
435 res = thread_block(THREAD_CONTINUE_NULL);
436 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
437 lck_spin_lock(lck);
438 }
439 else
440 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
441 lck_spin_unlock(lck);
442
443 return res;
444}
445
446
447/*
448 * Routine: lck_spin_sleep_deadline
449 */
450wait_result_t
451lck_spin_sleep_deadline(
452 lck_spin_t *lck,
453 lck_sleep_action_t lck_sleep_action,
454 event_t event,
455 wait_interrupt_t interruptible,
456 uint64_t deadline)
457{
458 wait_result_t res;
459
460 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
461 panic("Invalid lock sleep action %x\n", lck_sleep_action);
462
463 res = assert_wait_deadline(event, interruptible, deadline);
464 if (res == THREAD_WAITING) {
465 lck_spin_unlock(lck);
466 res = thread_block(THREAD_CONTINUE_NULL);
467 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
468 lck_spin_lock(lck);
469 }
470 else
471 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
472 lck_spin_unlock(lck);
473
474 return res;
475}
476
477
478/*
479 * Routine: lck_mtx_sleep
480 */
481wait_result_t
482lck_mtx_sleep(
483 lck_mtx_t *lck,
484 lck_sleep_action_t lck_sleep_action,
485 event_t event,
486 wait_interrupt_t interruptible)
487{
488 wait_result_t res;
489
490 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_START,
491 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
492
493 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
494 panic("Invalid lock sleep action %x\n", lck_sleep_action);
495
496 res = assert_wait(event, interruptible);
497 if (res == THREAD_WAITING) {
498 lck_mtx_unlock(lck);
499 res = thread_block(THREAD_CONTINUE_NULL);
b0d623f7
A
500 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
501 if ((lck_sleep_action & LCK_SLEEP_SPIN))
502 lck_mtx_lock_spin(lck);
503 else
504 lck_mtx_lock(lck);
505 }
91447636
A
506 }
507 else
508 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
509 lck_mtx_unlock(lck);
510
511 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
512
513 return res;
514}
515
516
517/*
518 * Routine: lck_mtx_sleep_deadline
519 */
520wait_result_t
521lck_mtx_sleep_deadline(
522 lck_mtx_t *lck,
523 lck_sleep_action_t lck_sleep_action,
524 event_t event,
525 wait_interrupt_t interruptible,
526 uint64_t deadline)
527{
528 wait_result_t res;
529
530 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_START,
531 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
532
533 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
534 panic("Invalid lock sleep action %x\n", lck_sleep_action);
535
536 res = assert_wait_deadline(event, interruptible, deadline);
537 if (res == THREAD_WAITING) {
538 lck_mtx_unlock(lck);
539 res = thread_block(THREAD_CONTINUE_NULL);
540 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
541 lck_mtx_lock(lck);
542 }
543 else
544 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
545 lck_mtx_unlock(lck);
546
547 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
548
549 return res;
550}
551
552/*
553 * Routine: lck_mtx_lock_wait
554 *
555 * Invoked in order to wait on contention.
556 *
557 * Called with the interlock locked and
558 * returns it unlocked.
559 */
560void
561lck_mtx_lock_wait (
562 lck_mtx_t *lck,
563 thread_t holder)
564{
565 thread_t self = current_thread();
566 lck_mtx_t *mutex;
567 integer_t priority;
568 spl_t s = splsched();
2d21ac55
A
569#if CONFIG_DTRACE
570 uint64_t sleep_start = 0;
571
572 if (lockstat_probemap[LS_LCK_MTX_LOCK_BLOCK] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_BLOCK]) {
573 sleep_start = mach_absolute_time();
574 }
575#endif
91447636
A
576
577 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
578 mutex = lck;
579 else
580 mutex = &lck->lck_mtx_ptr->lck_mtx;
581
582 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
583
584 priority = self->sched_pri;
585 if (priority < self->priority)
586 priority = self->priority;
91447636
A
587 if (priority < BASEPRI_DEFAULT)
588 priority = BASEPRI_DEFAULT;
589
590 thread_lock(holder);
591 if (mutex->lck_mtx_pri == 0)
592 holder->promotions++;
4a3eedf9
A
593 holder->sched_mode |= TH_MODE_PROMOTED;
594 if ( mutex->lck_mtx_pri < priority &&
91447636 595 holder->sched_pri < priority ) {
4a3eedf9
A
596 KERNEL_DEBUG_CONSTANT(
597 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
b0d623f7 598 holder->sched_pri, priority, holder, lck, 0);
91447636 599
4a3eedf9 600 set_sched_pri(holder, priority);
91447636
A
601 }
602 thread_unlock(holder);
603 splx(s);
604
605 if (mutex->lck_mtx_pri < priority)
606 mutex->lck_mtx_pri = priority;
607 if (self->pending_promoter[self->pending_promoter_index] == NULL) {
608 self->pending_promoter[self->pending_promoter_index] = mutex;
609 mutex->lck_mtx_waiters++;
610 }
611 else
612 if (self->pending_promoter[self->pending_promoter_index] != mutex) {
613 self->pending_promoter[++self->pending_promoter_index] = mutex;
614 mutex->lck_mtx_waiters++;
615 }
616
617 assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
618 lck_mtx_ilk_unlock(mutex);
619
620 thread_block(THREAD_CONTINUE_NULL);
621
622 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
2d21ac55
A
623#if CONFIG_DTRACE
624 /*
625 * Record the Dtrace lockstat probe for blocking, block time
626 * measured from when we were entered.
627 */
628 if (sleep_start) {
629 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
630 LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_BLOCK, lck,
631 mach_absolute_time() - sleep_start);
632 } else {
633 LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_BLOCK, lck,
634 mach_absolute_time() - sleep_start);
635 }
636 }
637#endif
91447636
A
638}
639
640/*
641 * Routine: lck_mtx_lock_acquire
642 *
643 * Invoked on acquiring the mutex when there is
644 * contention.
645 *
646 * Returns the current number of waiters.
647 *
648 * Called with the interlock locked.
649 */
650int
651lck_mtx_lock_acquire(
652 lck_mtx_t *lck)
653{
654 thread_t thread = current_thread();
655 lck_mtx_t *mutex;
656
657 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
658 mutex = lck;
659 else
660 mutex = &lck->lck_mtx_ptr->lck_mtx;
661
662 if (thread->pending_promoter[thread->pending_promoter_index] == mutex) {
663 thread->pending_promoter[thread->pending_promoter_index] = NULL;
664 if (thread->pending_promoter_index > 0)
665 thread->pending_promoter_index--;
666 mutex->lck_mtx_waiters--;
667 }
668
669 if (mutex->lck_mtx_waiters > 0) {
670 integer_t priority = mutex->lck_mtx_pri;
671 spl_t s = splsched();
672
673 thread_lock(thread);
674 thread->promotions++;
4a3eedf9
A
675 thread->sched_mode |= TH_MODE_PROMOTED;
676 if (thread->sched_pri < priority) {
677 KERNEL_DEBUG_CONSTANT(
678 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
b0d623f7 679 thread->sched_pri, priority, 0, lck, 0);
91447636 680
4a3eedf9 681 set_sched_pri(thread, priority);
91447636
A
682 }
683 thread_unlock(thread);
684 splx(s);
685 }
686 else
687 mutex->lck_mtx_pri = 0;
688
689 return (mutex->lck_mtx_waiters);
690}
691
692/*
693 * Routine: lck_mtx_unlock_wakeup
694 *
695 * Invoked on unlock when there is contention.
696 *
697 * Called with the interlock locked.
698 */
699void
700lck_mtx_unlock_wakeup (
701 lck_mtx_t *lck,
702 thread_t holder)
703{
704 thread_t thread = current_thread();
705 lck_mtx_t *mutex;
706
707 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
708 mutex = lck;
709 else
710 mutex = &lck->lck_mtx_ptr->lck_mtx;
711
712
713 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
714
715 if (thread != holder)
2d21ac55 716 panic("lck_mtx_unlock_wakeup: mutex %p holder %p\n", mutex, holder);
91447636
A
717
718 if (thread->promotions > 0) {
719 spl_t s = splsched();
720
721 thread_lock(thread);
722 if ( --thread->promotions == 0 &&
723 (thread->sched_mode & TH_MODE_PROMOTED) ) {
724 thread->sched_mode &= ~TH_MODE_PROMOTED;
725 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
726 KERNEL_DEBUG_CONSTANT(
727 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
b0d623f7 728 thread->sched_pri, DEPRESSPRI, 0, lck, 0);
91447636
A
729
730 set_sched_pri(thread, DEPRESSPRI);
731 }
732 else {
733 if (thread->priority < thread->sched_pri) {
734 KERNEL_DEBUG_CONSTANT(
735 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
736 DBG_FUNC_NONE,
737 thread->sched_pri, thread->priority,
b0d623f7 738 0, lck, 0);
91447636
A
739 }
740
741 compute_priority(thread, FALSE);
742 }
743 }
744 thread_unlock(thread);
745 splx(s);
746 }
747 assert(mutex->lck_mtx_waiters > 0);
748 thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
749
750 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
751}
752
2d21ac55
A
753void
754lck_mtx_unlockspin_wakeup (
755 lck_mtx_t *lck)
756{
757 assert(lck->lck_mtx_waiters > 0);
758 thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
759
760 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_NONE, (int)lck, 0, 0, 1, 0);
761#if CONFIG_DTRACE
762 /*
763 * When there are waiters, we skip the hot-patch spot in the
764 * fastpath, so we record it here.
765 */
766 LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, lck, 0);
767#endif
768}
769
770
91447636
A
771/*
772 * Routine: mutex_pause
773 *
774 * Called by former callers of simple_lock_pause().
775 */
2d21ac55
A
776#define MAX_COLLISION_COUNTS 32
777#define MAX_COLLISION 8
778
779unsigned int max_collision_count[MAX_COLLISION_COUNTS];
780
781uint32_t collision_backoffs[MAX_COLLISION] = {
782 10, 50, 100, 200, 400, 600, 800, 1000
783};
784
91447636
A
785
786void
2d21ac55 787mutex_pause(uint32_t collisions)
91447636
A
788{
789 wait_result_t wait_result;
2d21ac55 790 uint32_t back_off;
91447636 791
2d21ac55
A
792 if (collisions >= MAX_COLLISION_COUNTS)
793 collisions = MAX_COLLISION_COUNTS - 1;
794 max_collision_count[collisions]++;
795
796 if (collisions >= MAX_COLLISION)
797 collisions = MAX_COLLISION - 1;
798 back_off = collision_backoffs[collisions];
799
800 wait_result = assert_wait_timeout((event_t)mutex_pause, THREAD_UNINT, back_off, NSEC_PER_USEC);
91447636
A
801 assert(wait_result == THREAD_WAITING);
802
803 wait_result = thread_block(THREAD_CONTINUE_NULL);
804 assert(wait_result == THREAD_TIMED_OUT);
805}
806
2d21ac55
A
807
808unsigned int mutex_yield_wait = 0;
809unsigned int mutex_yield_no_wait = 0;
810
811void
b0d623f7
A
812lck_mtx_yield(
813 lck_mtx_t *lck)
2d21ac55 814{
b0d623f7
A
815 int waiters;
816
2d21ac55 817#if DEBUG
b0d623f7 818 lck_mtx_assert(lck, LCK_MTX_ASSERT_OWNED);
2d21ac55 819#endif /* DEBUG */
b0d623f7 820
2d21ac55 821 if (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT)
b0d623f7
A
822 waiters = lck->lck_mtx_ptr->lck_mtx.lck_mtx_waiters;
823 else
824 waiters = lck->lck_mtx_waiters;
2d21ac55 825
b0d623f7 826 if ( !waiters) {
2d21ac55
A
827 mutex_yield_no_wait++;
828 } else {
829 mutex_yield_wait++;
b0d623f7 830 lck_mtx_unlock(lck);
2d21ac55 831 mutex_pause(0);
b0d623f7 832 lck_mtx_lock(lck);
2d21ac55
A
833 }
834}
835
836
91447636
A
837/*
838 * Routine: lck_rw_sleep
839 */
840wait_result_t
841lck_rw_sleep(
842 lck_rw_t *lck,
843 lck_sleep_action_t lck_sleep_action,
844 event_t event,
845 wait_interrupt_t interruptible)
846{
847 wait_result_t res;
848 lck_rw_type_t lck_rw_type;
849
850 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
851 panic("Invalid lock sleep action %x\n", lck_sleep_action);
852
853 res = assert_wait(event, interruptible);
854 if (res == THREAD_WAITING) {
855 lck_rw_type = lck_rw_done(lck);
856 res = thread_block(THREAD_CONTINUE_NULL);
857 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
858 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
859 lck_rw_lock(lck, lck_rw_type);
860 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
861 lck_rw_lock_exclusive(lck);
862 else
863 lck_rw_lock_shared(lck);
864 }
865 }
866 else
867 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
868 (void)lck_rw_done(lck);
869
870 return res;
871}
872
873
874/*
875 * Routine: lck_rw_sleep_deadline
876 */
877wait_result_t
878lck_rw_sleep_deadline(
879 lck_rw_t *lck,
880 lck_sleep_action_t lck_sleep_action,
881 event_t event,
882 wait_interrupt_t interruptible,
883 uint64_t deadline)
884{
885 wait_result_t res;
886 lck_rw_type_t lck_rw_type;
887
888 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
889 panic("Invalid lock sleep action %x\n", lck_sleep_action);
890
891 res = assert_wait_deadline(event, interruptible, deadline);
892 if (res == THREAD_WAITING) {
893 lck_rw_type = lck_rw_done(lck);
894 res = thread_block(THREAD_CONTINUE_NULL);
895 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
896 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
897 lck_rw_lock(lck, lck_rw_type);
898 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
899 lck_rw_lock_exclusive(lck);
900 else
901 lck_rw_lock_shared(lck);
902 }
903 }
904 else
905 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
906 (void)lck_rw_done(lck);
907
908 return res;
909}
910
911kern_return_t
912host_lockgroup_info(
913 host_t host,
914 lockgroup_info_array_t *lockgroup_infop,
915 mach_msg_type_number_t *lockgroup_infoCntp)
916{
917 lockgroup_info_t *lockgroup_info_base;
918 lockgroup_info_t *lockgroup_info;
919 vm_offset_t lockgroup_info_addr;
920 vm_size_t lockgroup_info_size;
921 lck_grp_t *lck_grp;
922 unsigned int i;
923 vm_size_t used;
924 vm_map_copy_t copy;
925 kern_return_t kr;
926
927 if (host == HOST_NULL)
928 return KERN_INVALID_HOST;
929
b0d623f7 930 lck_mtx_lock(&lck_grp_lock);
91447636
A
931
932 lockgroup_info_size = round_page(lck_grp_cnt * sizeof *lockgroup_info);
933 kr = kmem_alloc_pageable(ipc_kernel_map,
934 &lockgroup_info_addr, lockgroup_info_size);
935 if (kr != KERN_SUCCESS) {
b0d623f7 936 lck_mtx_unlock(&lck_grp_lock);
91447636
A
937 return(kr);
938 }
939
940 lockgroup_info_base = (lockgroup_info_t *) lockgroup_info_addr;
941 lck_grp = (lck_grp_t *)queue_first(&lck_grp_queue);
942 lockgroup_info = lockgroup_info_base;
943
944 for (i = 0; i < lck_grp_cnt; i++) {
945
946 lockgroup_info->lock_spin_cnt = lck_grp->lck_grp_spincnt;
947 lockgroup_info->lock_spin_util_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_util_cnt;
948 lockgroup_info->lock_spin_held_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cnt;
949 lockgroup_info->lock_spin_miss_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_miss_cnt;
950 lockgroup_info->lock_spin_held_max = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_max;
951 lockgroup_info->lock_spin_held_cum = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cum;
952
953 lockgroup_info->lock_mtx_cnt = lck_grp->lck_grp_mtxcnt;
954 lockgroup_info->lock_mtx_util_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt;
955 lockgroup_info->lock_mtx_held_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt;
956 lockgroup_info->lock_mtx_miss_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt;
957 lockgroup_info->lock_mtx_wait_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt;
958 lockgroup_info->lock_mtx_held_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max;
959 lockgroup_info->lock_mtx_held_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cum;
960 lockgroup_info->lock_mtx_wait_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_max;
961 lockgroup_info->lock_mtx_wait_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cum;
962
963 lockgroup_info->lock_rw_cnt = lck_grp->lck_grp_rwcnt;
964 lockgroup_info->lock_rw_util_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt;
965 lockgroup_info->lock_rw_held_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cnt;
966 lockgroup_info->lock_rw_miss_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt;
967 lockgroup_info->lock_rw_wait_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt;
968 lockgroup_info->lock_rw_held_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_max;
969 lockgroup_info->lock_rw_held_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cum;
970 lockgroup_info->lock_rw_wait_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_max;
971 lockgroup_info->lock_rw_wait_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cum;
972
973 (void) strncpy(lockgroup_info->lockgroup_name,lck_grp->lck_grp_name, LOCKGROUP_MAX_NAME);
974
975 lck_grp = (lck_grp_t *)(queue_next((queue_entry_t)(lck_grp)));
976 lockgroup_info++;
977 }
978
979 *lockgroup_infoCntp = lck_grp_cnt;
b0d623f7 980 lck_mtx_unlock(&lck_grp_lock);
91447636
A
981
982 used = (*lockgroup_infoCntp) * sizeof *lockgroup_info;
983
984 if (used != lockgroup_info_size)
985 bzero((char *) lockgroup_info, lockgroup_info_size - used);
986
987 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
988 (vm_map_size_t)lockgroup_info_size, TRUE, &copy);
989 assert(kr == KERN_SUCCESS);
990
991 *lockgroup_infop = (lockgroup_info_t *) copy;
992
993 return(KERN_SUCCESS);
994}
995
996/*
997 * Compatibility module
998 */
999
1000extern lck_rw_t *lock_alloc_EXT( boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
1001extern void lock_done_EXT(lck_rw_t *lock);
1002extern void lock_free_EXT(lck_rw_t *lock);
1003extern void lock_init_EXT(lck_rw_t *lock, boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
1004extern void lock_read_EXT(lck_rw_t *lock);
1005extern boolean_t lock_read_to_write_EXT(lck_rw_t *lock);
1006extern void lock_write_EXT(lck_rw_t *lock);
1007extern void lock_write_to_read_EXT(lck_rw_t *lock);
1008extern wait_result_t thread_sleep_lock_write_EXT(
1009 event_t event, lck_rw_t *lock, wait_interrupt_t interruptible);
1010
91447636
A
1011extern void usimple_lock_EXT(lck_spin_t *lock);
1012extern void usimple_lock_init_EXT(lck_spin_t *lock, unsigned short tag);
1013extern unsigned int usimple_lock_try_EXT(lck_spin_t *lock);
1014extern void usimple_unlock_EXT(lck_spin_t *lock);
1015extern wait_result_t thread_sleep_usimple_lock_EXT(event_t event, lck_spin_t *lock, wait_interrupt_t interruptible);
1016
b0d623f7
A
1017
1018lck_mtx_t* mutex_alloc_EXT(__unused unsigned short tag);
1019void mutex_free_EXT(lck_mtx_t *mutex);
1020void mutex_init_EXT(lck_mtx_t *mutex, __unused unsigned short tag);
1021wait_result_t thread_sleep_mutex_EXT(event_t event, lck_mtx_t *mutex, wait_interrupt_t interruptible);
1022wait_result_t thread_sleep_mutex_deadline_EXT(event_t event, lck_mtx_t *mutex, uint64_t deadline, wait_interrupt_t interruptible);
1023
91447636
A
1024lck_rw_t *
1025lock_alloc_EXT(
1026 __unused boolean_t can_sleep,
1027 __unused unsigned short tag0,
1028 __unused unsigned short tag1)
1029{
1030 return( lck_rw_alloc_init( &LockCompatGroup, LCK_ATTR_NULL));
1031}
1032
1033void
1034lock_done_EXT(
1035 lck_rw_t *lock)
1036{
1037 (void) lck_rw_done(lock);
1038}
1039
1040void
1041lock_free_EXT(
1042 lck_rw_t *lock)
1043{
1044 lck_rw_free(lock, &LockCompatGroup);
1045}
1046
1047void
1048lock_init_EXT(
1049 lck_rw_t *lock,
1050 __unused boolean_t can_sleep,
1051 __unused unsigned short tag0,
1052 __unused unsigned short tag1)
1053{
1054 lck_rw_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
1055}
1056
1057void
1058lock_read_EXT(
1059 lck_rw_t *lock)
1060{
1061 lck_rw_lock_shared( lock);
1062}
1063
1064boolean_t
1065lock_read_to_write_EXT(
1066 lck_rw_t *lock)
1067{
1068 return( lck_rw_lock_shared_to_exclusive(lock));
1069}
1070
1071void
1072lock_write_EXT(
1073 lck_rw_t *lock)
1074{
1075 lck_rw_lock_exclusive(lock);
1076}
1077
1078void
1079lock_write_to_read_EXT(
1080 lck_rw_t *lock)
1081{
1082 lck_rw_lock_exclusive_to_shared(lock);
1083}
1084
1085wait_result_t
1086thread_sleep_lock_write_EXT(
1087 event_t event,
1088 lck_rw_t *lock,
1089 wait_interrupt_t interruptible)
1090{
1091 return( lck_rw_sleep(lock, LCK_SLEEP_EXCLUSIVE, event, interruptible));
1092}
1093
91447636
A
1094void
1095usimple_lock_EXT(
1096 lck_spin_t *lock)
1097{
1098 lck_spin_lock(lock);
1099}
1100
1101void
1102usimple_lock_init_EXT(
1103 lck_spin_t *lock,
1104 __unused unsigned short tag)
1105{
1106 lck_spin_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
1107}
1108
1109unsigned int
1110usimple_lock_try_EXT(
1111 lck_spin_t *lock)
1112{
0c530ab8 1113 return(lck_spin_try_lock(lock));
91447636
A
1114}
1115
1116void
1117usimple_unlock_EXT(
1118 lck_spin_t *lock)
1119{
1120 lck_spin_unlock(lock);
1121}
1122
1123wait_result_t
1124thread_sleep_usimple_lock_EXT(
1125 event_t event,
1126 lck_spin_t *lock,
1127 wait_interrupt_t interruptible)
1128{
1129 return( lck_spin_sleep(lock, LCK_SLEEP_DEFAULT, event, interruptible));
1130}
b0d623f7
A
1131lck_mtx_t *
1132mutex_alloc_EXT(
1133 __unused unsigned short tag)
1134{
1135 return(lck_mtx_alloc_init(&LockCompatGroup, LCK_ATTR_NULL));
1136}
1137
1138void
1139mutex_free_EXT(
1140 lck_mtx_t *mutex)
1141{
1142 lck_mtx_free(mutex, &LockCompatGroup);
1143}
1144
1145void
1146mutex_init_EXT(
1147 lck_mtx_t *mutex,
1148 __unused unsigned short tag)
1149{
1150 lck_mtx_init(mutex, &LockCompatGroup, LCK_ATTR_NULL);
1151}
1152
1153wait_result_t
1154thread_sleep_mutex_EXT(
1155 event_t event,
1156 lck_mtx_t *mutex,
1157 wait_interrupt_t interruptible)
1158{
1159 return( lck_mtx_sleep(mutex, LCK_SLEEP_DEFAULT, event, interruptible));
1160}
1161
1162wait_result_t
1163thread_sleep_mutex_deadline_EXT(
1164 event_t event,
1165 lck_mtx_t *mutex,
1166 uint64_t deadline,
1167 wait_interrupt_t interruptible)
1168{
1169 return( lck_mtx_sleep_deadline(mutex, LCK_SLEEP_DEFAULT, event, interruptible, deadline));
1170}