]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/locks.c
xnu-792.6.61.tar.gz
[apple/xnu.git] / osfmk / kern / locks.c
CommitLineData
91447636
A
1/*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
91447636 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
91447636
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
91447636
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50#include <mach_kdb.h>
51#include <mach_ldebug.h>
52#include <debug.h>
53
54#include <mach/kern_return.h>
55#include <mach/mach_host_server.h>
56#include <mach_debug/lockgroup_info.h>
57
58#include <kern/locks.h>
59#include <kern/misc_protos.h>
60#include <kern/kalloc.h>
61#include <kern/thread.h>
62#include <kern/processor.h>
63#include <kern/sched_prim.h>
64#include <kern/debug.h>
65#include <string.h>
66
67
68#include <sys/kdebug.h>
69
70#define LCK_MTX_SLEEP_CODE 0
71#define LCK_MTX_SLEEP_DEADLINE_CODE 1
72#define LCK_MTX_LCK_WAIT_CODE 2
73#define LCK_MTX_UNLCK_WAKEUP_CODE 3
74
75
76static queue_head_t lck_grp_queue;
77static unsigned int lck_grp_cnt;
78
79decl_mutex_data(static,lck_grp_lock)
80
81lck_grp_attr_t LockDefaultGroupAttr;
82lck_grp_t LockCompatGroup;
83lck_attr_t LockDefaultLckAttr;
84
85/*
86 * Routine: lck_mod_init
87 */
88
89void
90lck_mod_init(
91 void)
92{
93 queue_init(&lck_grp_queue);
94 mutex_init(&lck_grp_lock, 0);
95 lck_grp_cnt = 0;
96 lck_grp_attr_setdefault( &LockDefaultGroupAttr);
97 lck_grp_init( &LockCompatGroup, "Compatibility APIs", LCK_GRP_ATTR_NULL);
98 lck_attr_setdefault(&LockDefaultLckAttr);
99}
100
101/*
102 * Routine: lck_grp_attr_alloc_init
103 */
104
105lck_grp_attr_t *
106lck_grp_attr_alloc_init(
107 void)
108{
109 lck_grp_attr_t *attr;
110
111 if ((attr = (lck_grp_attr_t *)kalloc(sizeof(lck_grp_attr_t))) != 0)
112 lck_grp_attr_setdefault(attr);
113
114 return(attr);
115}
116
117
118/*
119 * Routine: lck_grp_attr_setdefault
120 */
121
122void
123lck_grp_attr_setdefault(
124 lck_grp_attr_t *attr)
125{
126 if (LcksOpts & enaLkStat)
127 attr->grp_attr_val = LCK_GRP_ATTR_STAT;
128 else
129 attr->grp_attr_val = 0;
130}
131
132
133/*
134 * Routine: lck_grp_attr_setstat
135 */
136
137void
138lck_grp_attr_setstat(
139 lck_grp_attr_t *attr)
140{
141 (void)hw_atomic_or((uint32_t *)&attr->grp_attr_val, LCK_GRP_ATTR_STAT);
142}
143
144
145/*
146 * Routine: lck_grp_attr_free
147 */
148
149void
150lck_grp_attr_free(
151 lck_grp_attr_t *attr)
152{
153 kfree(attr, sizeof(lck_grp_attr_t));
154}
155
156
157/*
158 * Routine: lck_grp_alloc_init
159 */
160
161lck_grp_t *
162lck_grp_alloc_init(
163 const char* grp_name,
164 lck_grp_attr_t *attr)
165{
166 lck_grp_t *grp;
167
168 if ((grp = (lck_grp_t *)kalloc(sizeof(lck_grp_t))) != 0)
169 lck_grp_init(grp, grp_name, attr);
170
171 return(grp);
172}
173
174
175/*
176 * Routine: lck_grp_init
177 */
178
179void
180lck_grp_init(
181 lck_grp_t *grp,
182 const char* grp_name,
183 lck_grp_attr_t *attr)
184{
185 bzero((void *)grp, sizeof(lck_grp_t));
186
187 (void) strncpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);
188
189 if (attr != LCK_GRP_ATTR_NULL)
190 grp->lck_grp_attr = attr->grp_attr_val;
191 else if (LcksOpts & enaLkStat)
192 grp->lck_grp_attr = LCK_GRP_ATTR_STAT;
193 else
194 grp->lck_grp_attr = LCK_ATTR_NONE;
195
196 grp->lck_grp_refcnt = 1;
197
198 mutex_lock(&lck_grp_lock);
199 enqueue_tail(&lck_grp_queue, (queue_entry_t)grp);
200 lck_grp_cnt++;
201 mutex_unlock(&lck_grp_lock);
202
203}
204
205
206/*
207 * Routine: lck_grp_free
208 */
209
210void
211lck_grp_free(
212 lck_grp_t *grp)
213{
214 mutex_lock(&lck_grp_lock);
215 lck_grp_cnt--;
216 (void)remque((queue_entry_t)grp);
217 mutex_unlock(&lck_grp_lock);
218 lck_grp_deallocate(grp);
219}
220
221
222/*
223 * Routine: lck_grp_reference
224 */
225
226void
227lck_grp_reference(
228 lck_grp_t *grp)
229{
230 (void)hw_atomic_add((uint32_t *)(&grp->lck_grp_refcnt), 1);
231}
232
233
234/*
235 * Routine: lck_grp_deallocate
236 */
237
238void
239lck_grp_deallocate(
240 lck_grp_t *grp)
241{
242 if (hw_atomic_sub((uint32_t *)(&grp->lck_grp_refcnt), 1) == 0)
243 kfree(grp, sizeof(lck_grp_t));
244}
245
246/*
247 * Routine: lck_grp_lckcnt_incr
248 */
249
250void
251lck_grp_lckcnt_incr(
252 lck_grp_t *grp,
253 lck_type_t lck_type)
254{
255 unsigned int *lckcnt;
256
257 switch (lck_type) {
258 case LCK_TYPE_SPIN:
259 lckcnt = &grp->lck_grp_spincnt;
260 break;
261 case LCK_TYPE_MTX:
262 lckcnt = &grp->lck_grp_mtxcnt;
263 break;
264 case LCK_TYPE_RW:
265 lckcnt = &grp->lck_grp_rwcnt;
266 break;
267 default:
268 return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type);
269 }
270
271 (void)hw_atomic_add((uint32_t *)lckcnt, 1);
272}
273
274/*
275 * Routine: lck_grp_lckcnt_decr
276 */
277
278void
279lck_grp_lckcnt_decr(
280 lck_grp_t *grp,
281 lck_type_t lck_type)
282{
283 unsigned int *lckcnt;
284
285 switch (lck_type) {
286 case LCK_TYPE_SPIN:
287 lckcnt = &grp->lck_grp_spincnt;
288 break;
289 case LCK_TYPE_MTX:
290 lckcnt = &grp->lck_grp_mtxcnt;
291 break;
292 case LCK_TYPE_RW:
293 lckcnt = &grp->lck_grp_rwcnt;
294 break;
295 default:
296 return panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type);
297 }
298
299 (void)hw_atomic_sub((uint32_t *)lckcnt, 1);
300}
301
302/*
303 * Routine: lck_attr_alloc_init
304 */
305
306lck_attr_t *
307lck_attr_alloc_init(
308 void)
309{
310 lck_attr_t *attr;
311
312 if ((attr = (lck_attr_t *)kalloc(sizeof(lck_attr_t))) != 0)
313 lck_attr_setdefault(attr);
314
315 return(attr);
316}
317
318
319/*
320 * Routine: lck_attr_setdefault
321 */
322
323void
324lck_attr_setdefault(
325 lck_attr_t *attr)
326{
327#if !DEBUG
328 if (LcksOpts & enaLkDeb)
329 attr->lck_attr_val = LCK_ATTR_DEBUG;
330 else
331 attr->lck_attr_val = LCK_ATTR_NONE;
332#else
333 attr->lck_attr_val = LCK_ATTR_DEBUG;
334#endif
335
336}
337
338
339/*
340 * Routine: lck_attr_setdebug
341 */
342void
343lck_attr_setdebug(
344 lck_attr_t *attr)
345{
346 (void)hw_atomic_or((uint32_t *)&attr->lck_attr_val, LCK_ATTR_DEBUG);
347}
348
349
350/*
351 * Routine: lck_attr_free
352 */
353void
354lck_attr_free(
355 lck_attr_t *attr)
356{
357 kfree(attr, sizeof(lck_attr_t));
358}
359
360
361/*
362 * Routine: lck_spin_sleep
363 */
364wait_result_t
365lck_spin_sleep(
366 lck_spin_t *lck,
367 lck_sleep_action_t lck_sleep_action,
368 event_t event,
369 wait_interrupt_t interruptible)
370{
371 wait_result_t res;
372
373 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
374 panic("Invalid lock sleep action %x\n", lck_sleep_action);
375
376 res = assert_wait(event, interruptible);
377 if (res == THREAD_WAITING) {
378 lck_spin_unlock(lck);
379 res = thread_block(THREAD_CONTINUE_NULL);
380 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
381 lck_spin_lock(lck);
382 }
383 else
384 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
385 lck_spin_unlock(lck);
386
387 return res;
388}
389
390
391/*
392 * Routine: lck_spin_sleep_deadline
393 */
394wait_result_t
395lck_spin_sleep_deadline(
396 lck_spin_t *lck,
397 lck_sleep_action_t lck_sleep_action,
398 event_t event,
399 wait_interrupt_t interruptible,
400 uint64_t deadline)
401{
402 wait_result_t res;
403
404 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
405 panic("Invalid lock sleep action %x\n", lck_sleep_action);
406
407 res = assert_wait_deadline(event, interruptible, deadline);
408 if (res == THREAD_WAITING) {
409 lck_spin_unlock(lck);
410 res = thread_block(THREAD_CONTINUE_NULL);
411 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
412 lck_spin_lock(lck);
413 }
414 else
415 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
416 lck_spin_unlock(lck);
417
418 return res;
419}
420
421
422/*
423 * Routine: lck_mtx_sleep
424 */
425wait_result_t
426lck_mtx_sleep(
427 lck_mtx_t *lck,
428 lck_sleep_action_t lck_sleep_action,
429 event_t event,
430 wait_interrupt_t interruptible)
431{
432 wait_result_t res;
433
434 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_START,
435 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
436
437 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
438 panic("Invalid lock sleep action %x\n", lck_sleep_action);
439
440 res = assert_wait(event, interruptible);
441 if (res == THREAD_WAITING) {
442 lck_mtx_unlock(lck);
443 res = thread_block(THREAD_CONTINUE_NULL);
444 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
445 lck_mtx_lock(lck);
446 }
447 else
448 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
449 lck_mtx_unlock(lck);
450
451 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
452
453 return res;
454}
455
456
457/*
458 * Routine: lck_mtx_sleep_deadline
459 */
460wait_result_t
461lck_mtx_sleep_deadline(
462 lck_mtx_t *lck,
463 lck_sleep_action_t lck_sleep_action,
464 event_t event,
465 wait_interrupt_t interruptible,
466 uint64_t deadline)
467{
468 wait_result_t res;
469
470 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_START,
471 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
472
473 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
474 panic("Invalid lock sleep action %x\n", lck_sleep_action);
475
476 res = assert_wait_deadline(event, interruptible, deadline);
477 if (res == THREAD_WAITING) {
478 lck_mtx_unlock(lck);
479 res = thread_block(THREAD_CONTINUE_NULL);
480 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
481 lck_mtx_lock(lck);
482 }
483 else
484 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
485 lck_mtx_unlock(lck);
486
487 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
488
489 return res;
490}
491
492/*
493 * Routine: lck_mtx_lock_wait
494 *
495 * Invoked in order to wait on contention.
496 *
497 * Called with the interlock locked and
498 * returns it unlocked.
499 */
500void
501lck_mtx_lock_wait (
502 lck_mtx_t *lck,
503 thread_t holder)
504{
505 thread_t self = current_thread();
506 lck_mtx_t *mutex;
507 integer_t priority;
508 spl_t s = splsched();
509
510 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
511 mutex = lck;
512 else
513 mutex = &lck->lck_mtx_ptr->lck_mtx;
514
515 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
516
517 priority = self->sched_pri;
518 if (priority < self->priority)
519 priority = self->priority;
520 if (priority > MINPRI_KERNEL)
521 priority = MINPRI_KERNEL;
522 else
523 if (priority < BASEPRI_DEFAULT)
524 priority = BASEPRI_DEFAULT;
525
526 thread_lock(holder);
527 if (mutex->lck_mtx_pri == 0)
528 holder->promotions++;
529 if (holder->priority < MINPRI_KERNEL) {
530 holder->sched_mode |= TH_MODE_PROMOTED;
531 if ( mutex->lck_mtx_pri < priority &&
532 holder->sched_pri < priority ) {
533 KERNEL_DEBUG_CONSTANT(
534 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
535 holder->sched_pri, priority, (int)holder, (int)lck, 0);
536
537 set_sched_pri(holder, priority);
538 }
539 }
540 thread_unlock(holder);
541 splx(s);
542
543 if (mutex->lck_mtx_pri < priority)
544 mutex->lck_mtx_pri = priority;
545 if (self->pending_promoter[self->pending_promoter_index] == NULL) {
546 self->pending_promoter[self->pending_promoter_index] = mutex;
547 mutex->lck_mtx_waiters++;
548 }
549 else
550 if (self->pending_promoter[self->pending_promoter_index] != mutex) {
551 self->pending_promoter[++self->pending_promoter_index] = mutex;
552 mutex->lck_mtx_waiters++;
553 }
554
555 assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
556 lck_mtx_ilk_unlock(mutex);
557
558 thread_block(THREAD_CONTINUE_NULL);
559
560 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
561}
562
563/*
564 * Routine: lck_mtx_lock_acquire
565 *
566 * Invoked on acquiring the mutex when there is
567 * contention.
568 *
569 * Returns the current number of waiters.
570 *
571 * Called with the interlock locked.
572 */
573int
574lck_mtx_lock_acquire(
575 lck_mtx_t *lck)
576{
577 thread_t thread = current_thread();
578 lck_mtx_t *mutex;
579
580 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
581 mutex = lck;
582 else
583 mutex = &lck->lck_mtx_ptr->lck_mtx;
584
585 if (thread->pending_promoter[thread->pending_promoter_index] == mutex) {
586 thread->pending_promoter[thread->pending_promoter_index] = NULL;
587 if (thread->pending_promoter_index > 0)
588 thread->pending_promoter_index--;
589 mutex->lck_mtx_waiters--;
590 }
591
592 if (mutex->lck_mtx_waiters > 0) {
593 integer_t priority = mutex->lck_mtx_pri;
594 spl_t s = splsched();
595
596 thread_lock(thread);
597 thread->promotions++;
598 if (thread->priority < MINPRI_KERNEL) {
599 thread->sched_mode |= TH_MODE_PROMOTED;
600 if (thread->sched_pri < priority) {
601 KERNEL_DEBUG_CONSTANT(
602 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
603 thread->sched_pri, priority, 0, (int)lck, 0);
604
605 set_sched_pri(thread, priority);
606 }
607 }
608 thread_unlock(thread);
609 splx(s);
610 }
611 else
612 mutex->lck_mtx_pri = 0;
613
614 return (mutex->lck_mtx_waiters);
615}
616
617/*
618 * Routine: lck_mtx_unlock_wakeup
619 *
620 * Invoked on unlock when there is contention.
621 *
622 * Called with the interlock locked.
623 */
624void
625lck_mtx_unlock_wakeup (
626 lck_mtx_t *lck,
627 thread_t holder)
628{
629 thread_t thread = current_thread();
630 lck_mtx_t *mutex;
631
632 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
633 mutex = lck;
634 else
635 mutex = &lck->lck_mtx_ptr->lck_mtx;
636
637
638 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
639
640 if (thread != holder)
641 panic("lck_mtx_unlock_wakeup: mutex %x holder %x\n", mutex, holder);
642
643 if (thread->promotions > 0) {
644 spl_t s = splsched();
645
646 thread_lock(thread);
647 if ( --thread->promotions == 0 &&
648 (thread->sched_mode & TH_MODE_PROMOTED) ) {
649 thread->sched_mode &= ~TH_MODE_PROMOTED;
650 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
651 KERNEL_DEBUG_CONSTANT(
652 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
653 thread->sched_pri, DEPRESSPRI, 0, (int)lck, 0);
654
655 set_sched_pri(thread, DEPRESSPRI);
656 }
657 else {
658 if (thread->priority < thread->sched_pri) {
659 KERNEL_DEBUG_CONSTANT(
660 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
661 DBG_FUNC_NONE,
662 thread->sched_pri, thread->priority,
663 0, (int)lck, 0);
664 }
665
666 compute_priority(thread, FALSE);
667 }
668 }
669 thread_unlock(thread);
670 splx(s);
671 }
672 assert(mutex->lck_mtx_waiters > 0);
673 thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
674
675 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
676}
677
678/*
679 * Routine: mutex_pause
680 *
681 * Called by former callers of simple_lock_pause().
682 */
683
684void
685mutex_pause(void)
686{
687 wait_result_t wait_result;
688
689 wait_result = assert_wait_timeout((event_t)mutex_pause, THREAD_UNINT, 1, 1000*NSEC_PER_USEC);
690 assert(wait_result == THREAD_WAITING);
691
692 wait_result = thread_block(THREAD_CONTINUE_NULL);
693 assert(wait_result == THREAD_TIMED_OUT);
694}
695
696/*
697 * Routine: lck_rw_sleep
698 */
699wait_result_t
700lck_rw_sleep(
701 lck_rw_t *lck,
702 lck_sleep_action_t lck_sleep_action,
703 event_t event,
704 wait_interrupt_t interruptible)
705{
706 wait_result_t res;
707 lck_rw_type_t lck_rw_type;
708
709 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
710 panic("Invalid lock sleep action %x\n", lck_sleep_action);
711
712 res = assert_wait(event, interruptible);
713 if (res == THREAD_WAITING) {
714 lck_rw_type = lck_rw_done(lck);
715 res = thread_block(THREAD_CONTINUE_NULL);
716 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
717 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
718 lck_rw_lock(lck, lck_rw_type);
719 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
720 lck_rw_lock_exclusive(lck);
721 else
722 lck_rw_lock_shared(lck);
723 }
724 }
725 else
726 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
727 (void)lck_rw_done(lck);
728
729 return res;
730}
731
732
733/*
734 * Routine: lck_rw_sleep_deadline
735 */
736wait_result_t
737lck_rw_sleep_deadline(
738 lck_rw_t *lck,
739 lck_sleep_action_t lck_sleep_action,
740 event_t event,
741 wait_interrupt_t interruptible,
742 uint64_t deadline)
743{
744 wait_result_t res;
745 lck_rw_type_t lck_rw_type;
746
747 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
748 panic("Invalid lock sleep action %x\n", lck_sleep_action);
749
750 res = assert_wait_deadline(event, interruptible, deadline);
751 if (res == THREAD_WAITING) {
752 lck_rw_type = lck_rw_done(lck);
753 res = thread_block(THREAD_CONTINUE_NULL);
754 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
755 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
756 lck_rw_lock(lck, lck_rw_type);
757 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
758 lck_rw_lock_exclusive(lck);
759 else
760 lck_rw_lock_shared(lck);
761 }
762 }
763 else
764 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
765 (void)lck_rw_done(lck);
766
767 return res;
768}
769
770kern_return_t
771host_lockgroup_info(
772 host_t host,
773 lockgroup_info_array_t *lockgroup_infop,
774 mach_msg_type_number_t *lockgroup_infoCntp)
775{
776 lockgroup_info_t *lockgroup_info_base;
777 lockgroup_info_t *lockgroup_info;
778 vm_offset_t lockgroup_info_addr;
779 vm_size_t lockgroup_info_size;
780 lck_grp_t *lck_grp;
781 unsigned int i;
782 vm_size_t used;
783 vm_map_copy_t copy;
784 kern_return_t kr;
785
786 if (host == HOST_NULL)
787 return KERN_INVALID_HOST;
788
789 mutex_lock(&lck_grp_lock);
790
791 lockgroup_info_size = round_page(lck_grp_cnt * sizeof *lockgroup_info);
792 kr = kmem_alloc_pageable(ipc_kernel_map,
793 &lockgroup_info_addr, lockgroup_info_size);
794 if (kr != KERN_SUCCESS) {
795 mutex_unlock(&lck_grp_lock);
796 return(kr);
797 }
798
799 lockgroup_info_base = (lockgroup_info_t *) lockgroup_info_addr;
800 lck_grp = (lck_grp_t *)queue_first(&lck_grp_queue);
801 lockgroup_info = lockgroup_info_base;
802
803 for (i = 0; i < lck_grp_cnt; i++) {
804
805 lockgroup_info->lock_spin_cnt = lck_grp->lck_grp_spincnt;
806 lockgroup_info->lock_spin_util_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_util_cnt;
807 lockgroup_info->lock_spin_held_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cnt;
808 lockgroup_info->lock_spin_miss_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_miss_cnt;
809 lockgroup_info->lock_spin_held_max = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_max;
810 lockgroup_info->lock_spin_held_cum = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cum;
811
812 lockgroup_info->lock_mtx_cnt = lck_grp->lck_grp_mtxcnt;
813 lockgroup_info->lock_mtx_util_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt;
814 lockgroup_info->lock_mtx_held_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt;
815 lockgroup_info->lock_mtx_miss_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt;
816 lockgroup_info->lock_mtx_wait_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt;
817 lockgroup_info->lock_mtx_held_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max;
818 lockgroup_info->lock_mtx_held_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cum;
819 lockgroup_info->lock_mtx_wait_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_max;
820 lockgroup_info->lock_mtx_wait_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cum;
821
822 lockgroup_info->lock_rw_cnt = lck_grp->lck_grp_rwcnt;
823 lockgroup_info->lock_rw_util_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt;
824 lockgroup_info->lock_rw_held_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cnt;
825 lockgroup_info->lock_rw_miss_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt;
826 lockgroup_info->lock_rw_wait_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt;
827 lockgroup_info->lock_rw_held_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_max;
828 lockgroup_info->lock_rw_held_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cum;
829 lockgroup_info->lock_rw_wait_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_max;
830 lockgroup_info->lock_rw_wait_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cum;
831
832 (void) strncpy(lockgroup_info->lockgroup_name,lck_grp->lck_grp_name, LOCKGROUP_MAX_NAME);
833
834 lck_grp = (lck_grp_t *)(queue_next((queue_entry_t)(lck_grp)));
835 lockgroup_info++;
836 }
837
838 *lockgroup_infoCntp = lck_grp_cnt;
839 mutex_unlock(&lck_grp_lock);
840
841 used = (*lockgroup_infoCntp) * sizeof *lockgroup_info;
842
843 if (used != lockgroup_info_size)
844 bzero((char *) lockgroup_info, lockgroup_info_size - used);
845
846 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
847 (vm_map_size_t)lockgroup_info_size, TRUE, &copy);
848 assert(kr == KERN_SUCCESS);
849
850 *lockgroup_infop = (lockgroup_info_t *) copy;
851
852 return(KERN_SUCCESS);
853}
854
855/*
856 * Compatibility module
857 */
858
859extern lck_rw_t *lock_alloc_EXT( boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
860extern void lock_done_EXT(lck_rw_t *lock);
861extern void lock_free_EXT(lck_rw_t *lock);
862extern void lock_init_EXT(lck_rw_t *lock, boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
863extern void lock_read_EXT(lck_rw_t *lock);
864extern boolean_t lock_read_to_write_EXT(lck_rw_t *lock);
865extern void lock_write_EXT(lck_rw_t *lock);
866extern void lock_write_to_read_EXT(lck_rw_t *lock);
867extern wait_result_t thread_sleep_lock_write_EXT(
868 event_t event, lck_rw_t *lock, wait_interrupt_t interruptible);
869
870extern lck_mtx_t *mutex_alloc_EXT(unsigned short tag);
871extern void mutex_free_EXT(lck_mtx_t *mutex);
872extern void mutex_init_EXT(lck_mtx_t *mutex, unsigned short tag);
873extern void mutex_lock_EXT(lck_mtx_t *mutex);
874extern boolean_t mutex_try_EXT(lck_mtx_t *mutex);
875extern void mutex_unlock_EXT(lck_mtx_t *mutex);
876extern wait_result_t thread_sleep_mutex_EXT(
877 event_t event, lck_mtx_t *mutex, wait_interrupt_t interruptible);
878extern wait_result_t thread_sleep_mutex_deadline_EXT(
879 event_t event, lck_mtx_t *mutex, uint64_t deadline, wait_interrupt_t interruptible);
880
881extern void usimple_lock_EXT(lck_spin_t *lock);
882extern void usimple_lock_init_EXT(lck_spin_t *lock, unsigned short tag);
883extern unsigned int usimple_lock_try_EXT(lck_spin_t *lock);
884extern void usimple_unlock_EXT(lck_spin_t *lock);
885extern wait_result_t thread_sleep_usimple_lock_EXT(event_t event, lck_spin_t *lock, wait_interrupt_t interruptible);
886
887lck_rw_t *
888lock_alloc_EXT(
889 __unused boolean_t can_sleep,
890 __unused unsigned short tag0,
891 __unused unsigned short tag1)
892{
893 return( lck_rw_alloc_init( &LockCompatGroup, LCK_ATTR_NULL));
894}
895
896void
897lock_done_EXT(
898 lck_rw_t *lock)
899{
900 (void) lck_rw_done(lock);
901}
902
903void
904lock_free_EXT(
905 lck_rw_t *lock)
906{
907 lck_rw_free(lock, &LockCompatGroup);
908}
909
910void
911lock_init_EXT(
912 lck_rw_t *lock,
913 __unused boolean_t can_sleep,
914 __unused unsigned short tag0,
915 __unused unsigned short tag1)
916{
917 lck_rw_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
918}
919
920void
921lock_read_EXT(
922 lck_rw_t *lock)
923{
924 lck_rw_lock_shared( lock);
925}
926
927boolean_t
928lock_read_to_write_EXT(
929 lck_rw_t *lock)
930{
931 return( lck_rw_lock_shared_to_exclusive(lock));
932}
933
934void
935lock_write_EXT(
936 lck_rw_t *lock)
937{
938 lck_rw_lock_exclusive(lock);
939}
940
941void
942lock_write_to_read_EXT(
943 lck_rw_t *lock)
944{
945 lck_rw_lock_exclusive_to_shared(lock);
946}
947
948wait_result_t
949thread_sleep_lock_write_EXT(
950 event_t event,
951 lck_rw_t *lock,
952 wait_interrupt_t interruptible)
953{
954 return( lck_rw_sleep(lock, LCK_SLEEP_EXCLUSIVE, event, interruptible));
955}
956
957lck_mtx_t *
958mutex_alloc_EXT(
959 __unused unsigned short tag)
960{
961 return(lck_mtx_alloc_init(&LockCompatGroup, LCK_ATTR_NULL));
962}
963
964void
965mutex_free_EXT(
966 lck_mtx_t *mutex)
967{
968 lck_mtx_free(mutex, &LockCompatGroup);
969}
970
971void
972mutex_init_EXT(
973 lck_mtx_t *mutex,
974 __unused unsigned short tag)
975{
976 lck_mtx_init(mutex, &LockCompatGroup, LCK_ATTR_NULL);
977}
978
979void
980mutex_lock_EXT(
981 lck_mtx_t *mutex)
982{
983 lck_mtx_lock(mutex);
984}
985
986boolean_t
987mutex_try_EXT(
988 lck_mtx_t *mutex)
989{
990 return(lck_mtx_try_lock(mutex));
991}
992
993void
994mutex_unlock_EXT(
995 lck_mtx_t *mutex)
996{
997 lck_mtx_unlock(mutex);
998}
999
1000wait_result_t
1001thread_sleep_mutex_EXT(
1002 event_t event,
1003 lck_mtx_t *mutex,
1004 wait_interrupt_t interruptible)
1005{
1006 return( lck_mtx_sleep(mutex, LCK_SLEEP_DEFAULT, event, interruptible));
1007}
1008
1009wait_result_t
1010thread_sleep_mutex_deadline_EXT(
1011 event_t event,
1012 lck_mtx_t *mutex,
1013 uint64_t deadline,
1014 wait_interrupt_t interruptible)
1015{
1016 return( lck_mtx_sleep_deadline(mutex, LCK_SLEEP_DEFAULT, event, interruptible, deadline));
1017}
1018
1019void
1020usimple_lock_EXT(
1021 lck_spin_t *lock)
1022{
1023 lck_spin_lock(lock);
1024}
1025
1026void
1027usimple_lock_init_EXT(
1028 lck_spin_t *lock,
1029 __unused unsigned short tag)
1030{
1031 lck_spin_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
1032}
1033
1034unsigned int
1035usimple_lock_try_EXT(
1036 lck_spin_t *lock)
1037{
1038 lck_spin_try_lock(lock);
1039}
1040
1041void
1042usimple_unlock_EXT(
1043 lck_spin_t *lock)
1044{
1045 lck_spin_unlock(lock);
1046}
1047
1048wait_result_t
1049thread_sleep_usimple_lock_EXT(
1050 event_t event,
1051 lck_spin_t *lock,
1052 wait_interrupt_t interruptible)
1053{
1054 return( lck_spin_sleep(lock, LCK_SLEEP_DEFAULT, event, interruptible));
1055}