]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/locks.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / kern / locks.c
CommitLineData
91447636
A
1/*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
91447636 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56#include <mach_kdb.h>
57#include <mach_ldebug.h>
58#include <debug.h>
59
60#include <mach/kern_return.h>
61#include <mach/mach_host_server.h>
62#include <mach_debug/lockgroup_info.h>
63
64#include <kern/locks.h>
65#include <kern/misc_protos.h>
66#include <kern/kalloc.h>
67#include <kern/thread.h>
68#include <kern/processor.h>
69#include <kern/sched_prim.h>
70#include <kern/debug.h>
71#include <string.h>
72
73
74#include <sys/kdebug.h>
75
76#define LCK_MTX_SLEEP_CODE 0
77#define LCK_MTX_SLEEP_DEADLINE_CODE 1
78#define LCK_MTX_LCK_WAIT_CODE 2
79#define LCK_MTX_UNLCK_WAKEUP_CODE 3
80
81
82static queue_head_t lck_grp_queue;
83static unsigned int lck_grp_cnt;
84
85decl_mutex_data(static,lck_grp_lock)
86
87lck_grp_attr_t LockDefaultGroupAttr;
88lck_grp_t LockCompatGroup;
89lck_attr_t LockDefaultLckAttr;
90
91/*
92 * Routine: lck_mod_init
93 */
94
95void
96lck_mod_init(
97 void)
98{
99 queue_init(&lck_grp_queue);
100 mutex_init(&lck_grp_lock, 0);
101 lck_grp_cnt = 0;
102 lck_grp_attr_setdefault( &LockDefaultGroupAttr);
103 lck_grp_init( &LockCompatGroup, "Compatibility APIs", LCK_GRP_ATTR_NULL);
104 lck_attr_setdefault(&LockDefaultLckAttr);
105}
106
107/*
108 * Routine: lck_grp_attr_alloc_init
109 */
110
111lck_grp_attr_t *
112lck_grp_attr_alloc_init(
113 void)
114{
115 lck_grp_attr_t *attr;
116
117 if ((attr = (lck_grp_attr_t *)kalloc(sizeof(lck_grp_attr_t))) != 0)
118 lck_grp_attr_setdefault(attr);
119
120 return(attr);
121}
122
123
124/*
125 * Routine: lck_grp_attr_setdefault
126 */
127
128void
129lck_grp_attr_setdefault(
130 lck_grp_attr_t *attr)
131{
132 if (LcksOpts & enaLkStat)
133 attr->grp_attr_val = LCK_GRP_ATTR_STAT;
134 else
135 attr->grp_attr_val = 0;
136}
137
138
139/*
140 * Routine: lck_grp_attr_setstat
141 */
142
143void
144lck_grp_attr_setstat(
145 lck_grp_attr_t *attr)
146{
147 (void)hw_atomic_or((uint32_t *)&attr->grp_attr_val, LCK_GRP_ATTR_STAT);
148}
149
150
151/*
152 * Routine: lck_grp_attr_free
153 */
154
155void
156lck_grp_attr_free(
157 lck_grp_attr_t *attr)
158{
159 kfree(attr, sizeof(lck_grp_attr_t));
160}
161
162
163/*
164 * Routine: lck_grp_alloc_init
165 */
166
167lck_grp_t *
168lck_grp_alloc_init(
169 const char* grp_name,
170 lck_grp_attr_t *attr)
171{
172 lck_grp_t *grp;
173
174 if ((grp = (lck_grp_t *)kalloc(sizeof(lck_grp_t))) != 0)
175 lck_grp_init(grp, grp_name, attr);
176
177 return(grp);
178}
179
180
181/*
182 * Routine: lck_grp_init
183 */
184
185void
186lck_grp_init(
187 lck_grp_t *grp,
188 const char* grp_name,
189 lck_grp_attr_t *attr)
190{
191 bzero((void *)grp, sizeof(lck_grp_t));
192
193 (void) strncpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);
194
195 if (attr != LCK_GRP_ATTR_NULL)
196 grp->lck_grp_attr = attr->grp_attr_val;
197 else if (LcksOpts & enaLkStat)
198 grp->lck_grp_attr = LCK_GRP_ATTR_STAT;
199 else
200 grp->lck_grp_attr = LCK_ATTR_NONE;
201
202 grp->lck_grp_refcnt = 1;
203
204 mutex_lock(&lck_grp_lock);
205 enqueue_tail(&lck_grp_queue, (queue_entry_t)grp);
206 lck_grp_cnt++;
207 mutex_unlock(&lck_grp_lock);
208
209}
210
211
212/*
213 * Routine: lck_grp_free
214 */
215
216void
217lck_grp_free(
218 lck_grp_t *grp)
219{
220 mutex_lock(&lck_grp_lock);
221 lck_grp_cnt--;
222 (void)remque((queue_entry_t)grp);
223 mutex_unlock(&lck_grp_lock);
224 lck_grp_deallocate(grp);
225}
226
227
228/*
229 * Routine: lck_grp_reference
230 */
231
232void
233lck_grp_reference(
234 lck_grp_t *grp)
235{
236 (void)hw_atomic_add((uint32_t *)(&grp->lck_grp_refcnt), 1);
237}
238
239
240/*
241 * Routine: lck_grp_deallocate
242 */
243
244void
245lck_grp_deallocate(
246 lck_grp_t *grp)
247{
248 if (hw_atomic_sub((uint32_t *)(&grp->lck_grp_refcnt), 1) == 0)
249 kfree(grp, sizeof(lck_grp_t));
250}
251
252/*
253 * Routine: lck_grp_lckcnt_incr
254 */
255
256void
257lck_grp_lckcnt_incr(
258 lck_grp_t *grp,
259 lck_type_t lck_type)
260{
261 unsigned int *lckcnt;
262
263 switch (lck_type) {
264 case LCK_TYPE_SPIN:
265 lckcnt = &grp->lck_grp_spincnt;
266 break;
267 case LCK_TYPE_MTX:
268 lckcnt = &grp->lck_grp_mtxcnt;
269 break;
270 case LCK_TYPE_RW:
271 lckcnt = &grp->lck_grp_rwcnt;
272 break;
273 default:
274 return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type);
275 }
276
277 (void)hw_atomic_add((uint32_t *)lckcnt, 1);
278}
279
280/*
281 * Routine: lck_grp_lckcnt_decr
282 */
283
284void
285lck_grp_lckcnt_decr(
286 lck_grp_t *grp,
287 lck_type_t lck_type)
288{
289 unsigned int *lckcnt;
290
291 switch (lck_type) {
292 case LCK_TYPE_SPIN:
293 lckcnt = &grp->lck_grp_spincnt;
294 break;
295 case LCK_TYPE_MTX:
296 lckcnt = &grp->lck_grp_mtxcnt;
297 break;
298 case LCK_TYPE_RW:
299 lckcnt = &grp->lck_grp_rwcnt;
300 break;
301 default:
302 return panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type);
303 }
304
305 (void)hw_atomic_sub((uint32_t *)lckcnt, 1);
306}
307
308/*
309 * Routine: lck_attr_alloc_init
310 */
311
312lck_attr_t *
313lck_attr_alloc_init(
314 void)
315{
316 lck_attr_t *attr;
317
318 if ((attr = (lck_attr_t *)kalloc(sizeof(lck_attr_t))) != 0)
319 lck_attr_setdefault(attr);
320
321 return(attr);
322}
323
324
325/*
326 * Routine: lck_attr_setdefault
327 */
328
329void
330lck_attr_setdefault(
331 lck_attr_t *attr)
332{
333#if !DEBUG
334 if (LcksOpts & enaLkDeb)
335 attr->lck_attr_val = LCK_ATTR_DEBUG;
336 else
337 attr->lck_attr_val = LCK_ATTR_NONE;
338#else
339 attr->lck_attr_val = LCK_ATTR_DEBUG;
340#endif
341
342}
343
344
345/*
346 * Routine: lck_attr_setdebug
347 */
348void
349lck_attr_setdebug(
350 lck_attr_t *attr)
351{
352 (void)hw_atomic_or((uint32_t *)&attr->lck_attr_val, LCK_ATTR_DEBUG);
353}
354
355
89b3af67
A
356/*
357 * Routine: lck_attr_rw_shared_priority
358 */
359void
360lck_attr_rw_shared_priority(
361 lck_attr_t *attr)
362{
363 (void)hw_atomic_or((uint32_t *)&attr->lck_attr_val, LCK_ATTR_RW_SHARED_PRIORITY);
364}
365
366
91447636
A
367/*
368 * Routine: lck_attr_free
369 */
370void
371lck_attr_free(
372 lck_attr_t *attr)
373{
374 kfree(attr, sizeof(lck_attr_t));
375}
376
377
378/*
379 * Routine: lck_spin_sleep
380 */
381wait_result_t
382lck_spin_sleep(
383 lck_spin_t *lck,
384 lck_sleep_action_t lck_sleep_action,
385 event_t event,
386 wait_interrupt_t interruptible)
387{
388 wait_result_t res;
389
390 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
391 panic("Invalid lock sleep action %x\n", lck_sleep_action);
392
393 res = assert_wait(event, interruptible);
394 if (res == THREAD_WAITING) {
395 lck_spin_unlock(lck);
396 res = thread_block(THREAD_CONTINUE_NULL);
397 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
398 lck_spin_lock(lck);
399 }
400 else
401 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
402 lck_spin_unlock(lck);
403
404 return res;
405}
406
407
408/*
409 * Routine: lck_spin_sleep_deadline
410 */
411wait_result_t
412lck_spin_sleep_deadline(
413 lck_spin_t *lck,
414 lck_sleep_action_t lck_sleep_action,
415 event_t event,
416 wait_interrupt_t interruptible,
417 uint64_t deadline)
418{
419 wait_result_t res;
420
421 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
422 panic("Invalid lock sleep action %x\n", lck_sleep_action);
423
424 res = assert_wait_deadline(event, interruptible, deadline);
425 if (res == THREAD_WAITING) {
426 lck_spin_unlock(lck);
427 res = thread_block(THREAD_CONTINUE_NULL);
428 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
429 lck_spin_lock(lck);
430 }
431 else
432 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
433 lck_spin_unlock(lck);
434
435 return res;
436}
437
438
439/*
440 * Routine: lck_mtx_sleep
441 */
442wait_result_t
443lck_mtx_sleep(
444 lck_mtx_t *lck,
445 lck_sleep_action_t lck_sleep_action,
446 event_t event,
447 wait_interrupt_t interruptible)
448{
449 wait_result_t res;
450
451 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_START,
452 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
453
454 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
455 panic("Invalid lock sleep action %x\n", lck_sleep_action);
456
457 res = assert_wait(event, interruptible);
458 if (res == THREAD_WAITING) {
459 lck_mtx_unlock(lck);
460 res = thread_block(THREAD_CONTINUE_NULL);
461 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
462 lck_mtx_lock(lck);
463 }
464 else
465 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
466 lck_mtx_unlock(lck);
467
468 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
469
470 return res;
471}
472
473
474/*
475 * Routine: lck_mtx_sleep_deadline
476 */
477wait_result_t
478lck_mtx_sleep_deadline(
479 lck_mtx_t *lck,
480 lck_sleep_action_t lck_sleep_action,
481 event_t event,
482 wait_interrupt_t interruptible,
483 uint64_t deadline)
484{
485 wait_result_t res;
486
487 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_START,
488 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
489
490 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
491 panic("Invalid lock sleep action %x\n", lck_sleep_action);
492
493 res = assert_wait_deadline(event, interruptible, deadline);
494 if (res == THREAD_WAITING) {
495 lck_mtx_unlock(lck);
496 res = thread_block(THREAD_CONTINUE_NULL);
497 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
498 lck_mtx_lock(lck);
499 }
500 else
501 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
502 lck_mtx_unlock(lck);
503
504 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
505
506 return res;
507}
508
509/*
510 * Routine: lck_mtx_lock_wait
511 *
512 * Invoked in order to wait on contention.
513 *
514 * Called with the interlock locked and
515 * returns it unlocked.
516 */
517void
518lck_mtx_lock_wait (
519 lck_mtx_t *lck,
520 thread_t holder)
521{
522 thread_t self = current_thread();
523 lck_mtx_t *mutex;
524 integer_t priority;
525 spl_t s = splsched();
526
527 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
528 mutex = lck;
529 else
530 mutex = &lck->lck_mtx_ptr->lck_mtx;
531
532 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
533
534 priority = self->sched_pri;
535 if (priority < self->priority)
536 priority = self->priority;
537 if (priority > MINPRI_KERNEL)
538 priority = MINPRI_KERNEL;
539 else
540 if (priority < BASEPRI_DEFAULT)
541 priority = BASEPRI_DEFAULT;
542
543 thread_lock(holder);
544 if (mutex->lck_mtx_pri == 0)
545 holder->promotions++;
546 if (holder->priority < MINPRI_KERNEL) {
547 holder->sched_mode |= TH_MODE_PROMOTED;
548 if ( mutex->lck_mtx_pri < priority &&
549 holder->sched_pri < priority ) {
550 KERNEL_DEBUG_CONSTANT(
551 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
552 holder->sched_pri, priority, (int)holder, (int)lck, 0);
553
554 set_sched_pri(holder, priority);
555 }
556 }
557 thread_unlock(holder);
558 splx(s);
559
560 if (mutex->lck_mtx_pri < priority)
561 mutex->lck_mtx_pri = priority;
562 if (self->pending_promoter[self->pending_promoter_index] == NULL) {
563 self->pending_promoter[self->pending_promoter_index] = mutex;
564 mutex->lck_mtx_waiters++;
565 }
566 else
567 if (self->pending_promoter[self->pending_promoter_index] != mutex) {
568 self->pending_promoter[++self->pending_promoter_index] = mutex;
569 mutex->lck_mtx_waiters++;
570 }
571
572 assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
573 lck_mtx_ilk_unlock(mutex);
574
575 thread_block(THREAD_CONTINUE_NULL);
576
577 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
578}
579
580/*
581 * Routine: lck_mtx_lock_acquire
582 *
583 * Invoked on acquiring the mutex when there is
584 * contention.
585 *
586 * Returns the current number of waiters.
587 *
588 * Called with the interlock locked.
589 */
590int
591lck_mtx_lock_acquire(
592 lck_mtx_t *lck)
593{
594 thread_t thread = current_thread();
595 lck_mtx_t *mutex;
596
597 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
598 mutex = lck;
599 else
600 mutex = &lck->lck_mtx_ptr->lck_mtx;
601
602 if (thread->pending_promoter[thread->pending_promoter_index] == mutex) {
603 thread->pending_promoter[thread->pending_promoter_index] = NULL;
604 if (thread->pending_promoter_index > 0)
605 thread->pending_promoter_index--;
606 mutex->lck_mtx_waiters--;
607 }
608
609 if (mutex->lck_mtx_waiters > 0) {
610 integer_t priority = mutex->lck_mtx_pri;
611 spl_t s = splsched();
612
613 thread_lock(thread);
614 thread->promotions++;
615 if (thread->priority < MINPRI_KERNEL) {
616 thread->sched_mode |= TH_MODE_PROMOTED;
617 if (thread->sched_pri < priority) {
618 KERNEL_DEBUG_CONSTANT(
619 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
620 thread->sched_pri, priority, 0, (int)lck, 0);
621
622 set_sched_pri(thread, priority);
623 }
624 }
625 thread_unlock(thread);
626 splx(s);
627 }
628 else
629 mutex->lck_mtx_pri = 0;
630
631 return (mutex->lck_mtx_waiters);
632}
633
634/*
635 * Routine: lck_mtx_unlock_wakeup
636 *
637 * Invoked on unlock when there is contention.
638 *
639 * Called with the interlock locked.
640 */
641void
642lck_mtx_unlock_wakeup (
643 lck_mtx_t *lck,
644 thread_t holder)
645{
646 thread_t thread = current_thread();
647 lck_mtx_t *mutex;
648
649 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
650 mutex = lck;
651 else
652 mutex = &lck->lck_mtx_ptr->lck_mtx;
653
654
655 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
656
657 if (thread != holder)
658 panic("lck_mtx_unlock_wakeup: mutex %x holder %x\n", mutex, holder);
659
660 if (thread->promotions > 0) {
661 spl_t s = splsched();
662
663 thread_lock(thread);
664 if ( --thread->promotions == 0 &&
665 (thread->sched_mode & TH_MODE_PROMOTED) ) {
666 thread->sched_mode &= ~TH_MODE_PROMOTED;
667 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
668 KERNEL_DEBUG_CONSTANT(
669 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
670 thread->sched_pri, DEPRESSPRI, 0, (int)lck, 0);
671
672 set_sched_pri(thread, DEPRESSPRI);
673 }
674 else {
675 if (thread->priority < thread->sched_pri) {
676 KERNEL_DEBUG_CONSTANT(
677 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
678 DBG_FUNC_NONE,
679 thread->sched_pri, thread->priority,
680 0, (int)lck, 0);
681 }
682
683 compute_priority(thread, FALSE);
684 }
685 }
686 thread_unlock(thread);
687 splx(s);
688 }
689 assert(mutex->lck_mtx_waiters > 0);
690 thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
691
692 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
693}
694
695/*
696 * Routine: mutex_pause
697 *
698 * Called by former callers of simple_lock_pause().
699 */
700
701void
702mutex_pause(void)
703{
704 wait_result_t wait_result;
705
706 wait_result = assert_wait_timeout((event_t)mutex_pause, THREAD_UNINT, 1, 1000*NSEC_PER_USEC);
707 assert(wait_result == THREAD_WAITING);
708
709 wait_result = thread_block(THREAD_CONTINUE_NULL);
710 assert(wait_result == THREAD_TIMED_OUT);
711}
712
713/*
714 * Routine: lck_rw_sleep
715 */
716wait_result_t
717lck_rw_sleep(
718 lck_rw_t *lck,
719 lck_sleep_action_t lck_sleep_action,
720 event_t event,
721 wait_interrupt_t interruptible)
722{
723 wait_result_t res;
724 lck_rw_type_t lck_rw_type;
725
726 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
727 panic("Invalid lock sleep action %x\n", lck_sleep_action);
728
729 res = assert_wait(event, interruptible);
730 if (res == THREAD_WAITING) {
731 lck_rw_type = lck_rw_done(lck);
732 res = thread_block(THREAD_CONTINUE_NULL);
733 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
734 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
735 lck_rw_lock(lck, lck_rw_type);
736 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
737 lck_rw_lock_exclusive(lck);
738 else
739 lck_rw_lock_shared(lck);
740 }
741 }
742 else
743 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
744 (void)lck_rw_done(lck);
745
746 return res;
747}
748
749
750/*
751 * Routine: lck_rw_sleep_deadline
752 */
753wait_result_t
754lck_rw_sleep_deadline(
755 lck_rw_t *lck,
756 lck_sleep_action_t lck_sleep_action,
757 event_t event,
758 wait_interrupt_t interruptible,
759 uint64_t deadline)
760{
761 wait_result_t res;
762 lck_rw_type_t lck_rw_type;
763
764 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
765 panic("Invalid lock sleep action %x\n", lck_sleep_action);
766
767 res = assert_wait_deadline(event, interruptible, deadline);
768 if (res == THREAD_WAITING) {
769 lck_rw_type = lck_rw_done(lck);
770 res = thread_block(THREAD_CONTINUE_NULL);
771 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
772 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
773 lck_rw_lock(lck, lck_rw_type);
774 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
775 lck_rw_lock_exclusive(lck);
776 else
777 lck_rw_lock_shared(lck);
778 }
779 }
780 else
781 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
782 (void)lck_rw_done(lck);
783
784 return res;
785}
786
787kern_return_t
788host_lockgroup_info(
789 host_t host,
790 lockgroup_info_array_t *lockgroup_infop,
791 mach_msg_type_number_t *lockgroup_infoCntp)
792{
793 lockgroup_info_t *lockgroup_info_base;
794 lockgroup_info_t *lockgroup_info;
795 vm_offset_t lockgroup_info_addr;
796 vm_size_t lockgroup_info_size;
797 lck_grp_t *lck_grp;
798 unsigned int i;
799 vm_size_t used;
800 vm_map_copy_t copy;
801 kern_return_t kr;
802
803 if (host == HOST_NULL)
804 return KERN_INVALID_HOST;
805
806 mutex_lock(&lck_grp_lock);
807
808 lockgroup_info_size = round_page(lck_grp_cnt * sizeof *lockgroup_info);
809 kr = kmem_alloc_pageable(ipc_kernel_map,
810 &lockgroup_info_addr, lockgroup_info_size);
811 if (kr != KERN_SUCCESS) {
812 mutex_unlock(&lck_grp_lock);
813 return(kr);
814 }
815
816 lockgroup_info_base = (lockgroup_info_t *) lockgroup_info_addr;
817 lck_grp = (lck_grp_t *)queue_first(&lck_grp_queue);
818 lockgroup_info = lockgroup_info_base;
819
820 for (i = 0; i < lck_grp_cnt; i++) {
821
822 lockgroup_info->lock_spin_cnt = lck_grp->lck_grp_spincnt;
823 lockgroup_info->lock_spin_util_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_util_cnt;
824 lockgroup_info->lock_spin_held_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cnt;
825 lockgroup_info->lock_spin_miss_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_miss_cnt;
826 lockgroup_info->lock_spin_held_max = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_max;
827 lockgroup_info->lock_spin_held_cum = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cum;
828
829 lockgroup_info->lock_mtx_cnt = lck_grp->lck_grp_mtxcnt;
830 lockgroup_info->lock_mtx_util_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt;
831 lockgroup_info->lock_mtx_held_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt;
832 lockgroup_info->lock_mtx_miss_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt;
833 lockgroup_info->lock_mtx_wait_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt;
834 lockgroup_info->lock_mtx_held_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max;
835 lockgroup_info->lock_mtx_held_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cum;
836 lockgroup_info->lock_mtx_wait_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_max;
837 lockgroup_info->lock_mtx_wait_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cum;
838
839 lockgroup_info->lock_rw_cnt = lck_grp->lck_grp_rwcnt;
840 lockgroup_info->lock_rw_util_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt;
841 lockgroup_info->lock_rw_held_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cnt;
842 lockgroup_info->lock_rw_miss_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt;
843 lockgroup_info->lock_rw_wait_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt;
844 lockgroup_info->lock_rw_held_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_max;
845 lockgroup_info->lock_rw_held_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cum;
846 lockgroup_info->lock_rw_wait_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_max;
847 lockgroup_info->lock_rw_wait_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cum;
848
849 (void) strncpy(lockgroup_info->lockgroup_name,lck_grp->lck_grp_name, LOCKGROUP_MAX_NAME);
850
851 lck_grp = (lck_grp_t *)(queue_next((queue_entry_t)(lck_grp)));
852 lockgroup_info++;
853 }
854
855 *lockgroup_infoCntp = lck_grp_cnt;
856 mutex_unlock(&lck_grp_lock);
857
858 used = (*lockgroup_infoCntp) * sizeof *lockgroup_info;
859
860 if (used != lockgroup_info_size)
861 bzero((char *) lockgroup_info, lockgroup_info_size - used);
862
863 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
864 (vm_map_size_t)lockgroup_info_size, TRUE, &copy);
865 assert(kr == KERN_SUCCESS);
866
867 *lockgroup_infop = (lockgroup_info_t *) copy;
868
869 return(KERN_SUCCESS);
870}
871
872/*
873 * Compatibility module
874 */
875
876extern lck_rw_t *lock_alloc_EXT( boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
877extern void lock_done_EXT(lck_rw_t *lock);
878extern void lock_free_EXT(lck_rw_t *lock);
879extern void lock_init_EXT(lck_rw_t *lock, boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
880extern void lock_read_EXT(lck_rw_t *lock);
881extern boolean_t lock_read_to_write_EXT(lck_rw_t *lock);
882extern void lock_write_EXT(lck_rw_t *lock);
883extern void lock_write_to_read_EXT(lck_rw_t *lock);
884extern wait_result_t thread_sleep_lock_write_EXT(
885 event_t event, lck_rw_t *lock, wait_interrupt_t interruptible);
886
887extern lck_mtx_t *mutex_alloc_EXT(unsigned short tag);
888extern void mutex_free_EXT(lck_mtx_t *mutex);
889extern void mutex_init_EXT(lck_mtx_t *mutex, unsigned short tag);
890extern void mutex_lock_EXT(lck_mtx_t *mutex);
891extern boolean_t mutex_try_EXT(lck_mtx_t *mutex);
892extern void mutex_unlock_EXT(lck_mtx_t *mutex);
893extern wait_result_t thread_sleep_mutex_EXT(
894 event_t event, lck_mtx_t *mutex, wait_interrupt_t interruptible);
895extern wait_result_t thread_sleep_mutex_deadline_EXT(
896 event_t event, lck_mtx_t *mutex, uint64_t deadline, wait_interrupt_t interruptible);
897
898extern void usimple_lock_EXT(lck_spin_t *lock);
899extern void usimple_lock_init_EXT(lck_spin_t *lock, unsigned short tag);
900extern unsigned int usimple_lock_try_EXT(lck_spin_t *lock);
901extern void usimple_unlock_EXT(lck_spin_t *lock);
902extern wait_result_t thread_sleep_usimple_lock_EXT(event_t event, lck_spin_t *lock, wait_interrupt_t interruptible);
903
904lck_rw_t *
905lock_alloc_EXT(
906 __unused boolean_t can_sleep,
907 __unused unsigned short tag0,
908 __unused unsigned short tag1)
909{
910 return( lck_rw_alloc_init( &LockCompatGroup, LCK_ATTR_NULL));
911}
912
913void
914lock_done_EXT(
915 lck_rw_t *lock)
916{
917 (void) lck_rw_done(lock);
918}
919
920void
921lock_free_EXT(
922 lck_rw_t *lock)
923{
924 lck_rw_free(lock, &LockCompatGroup);
925}
926
927void
928lock_init_EXT(
929 lck_rw_t *lock,
930 __unused boolean_t can_sleep,
931 __unused unsigned short tag0,
932 __unused unsigned short tag1)
933{
934 lck_rw_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
935}
936
937void
938lock_read_EXT(
939 lck_rw_t *lock)
940{
941 lck_rw_lock_shared( lock);
942}
943
944boolean_t
945lock_read_to_write_EXT(
946 lck_rw_t *lock)
947{
948 return( lck_rw_lock_shared_to_exclusive(lock));
949}
950
951void
952lock_write_EXT(
953 lck_rw_t *lock)
954{
955 lck_rw_lock_exclusive(lock);
956}
957
958void
959lock_write_to_read_EXT(
960 lck_rw_t *lock)
961{
962 lck_rw_lock_exclusive_to_shared(lock);
963}
964
965wait_result_t
966thread_sleep_lock_write_EXT(
967 event_t event,
968 lck_rw_t *lock,
969 wait_interrupt_t interruptible)
970{
971 return( lck_rw_sleep(lock, LCK_SLEEP_EXCLUSIVE, event, interruptible));
972}
973
974lck_mtx_t *
975mutex_alloc_EXT(
976 __unused unsigned short tag)
977{
978 return(lck_mtx_alloc_init(&LockCompatGroup, LCK_ATTR_NULL));
979}
980
981void
982mutex_free_EXT(
983 lck_mtx_t *mutex)
984{
985 lck_mtx_free(mutex, &LockCompatGroup);
986}
987
988void
989mutex_init_EXT(
990 lck_mtx_t *mutex,
991 __unused unsigned short tag)
992{
993 lck_mtx_init(mutex, &LockCompatGroup, LCK_ATTR_NULL);
994}
995
996void
997mutex_lock_EXT(
998 lck_mtx_t *mutex)
999{
1000 lck_mtx_lock(mutex);
1001}
1002
1003boolean_t
1004mutex_try_EXT(
1005 lck_mtx_t *mutex)
1006{
1007 return(lck_mtx_try_lock(mutex));
1008}
1009
1010void
1011mutex_unlock_EXT(
1012 lck_mtx_t *mutex)
1013{
1014 lck_mtx_unlock(mutex);
1015}
1016
1017wait_result_t
1018thread_sleep_mutex_EXT(
1019 event_t event,
1020 lck_mtx_t *mutex,
1021 wait_interrupt_t interruptible)
1022{
1023 return( lck_mtx_sleep(mutex, LCK_SLEEP_DEFAULT, event, interruptible));
1024}
1025
1026wait_result_t
1027thread_sleep_mutex_deadline_EXT(
1028 event_t event,
1029 lck_mtx_t *mutex,
1030 uint64_t deadline,
1031 wait_interrupt_t interruptible)
1032{
1033 return( lck_mtx_sleep_deadline(mutex, LCK_SLEEP_DEFAULT, event, interruptible, deadline));
1034}
1035
1036void
1037usimple_lock_EXT(
1038 lck_spin_t *lock)
1039{
1040 lck_spin_lock(lock);
1041}
1042
1043void
1044usimple_lock_init_EXT(
1045 lck_spin_t *lock,
1046 __unused unsigned short tag)
1047{
1048 lck_spin_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
1049}
1050
1051unsigned int
1052usimple_lock_try_EXT(
1053 lck_spin_t *lock)
1054{
89b3af67 1055 return(lck_spin_try_lock(lock));
91447636
A
1056}
1057
1058void
1059usimple_unlock_EXT(
1060 lck_spin_t *lock)
1061{
1062 lck_spin_unlock(lock);
1063}
1064
1065wait_result_t
1066thread_sleep_usimple_lock_EXT(
1067 event_t event,
1068 lck_spin_t *lock,
1069 wait_interrupt_t interruptible)
1070{
1071 return( lck_spin_sleep(lock, LCK_SLEEP_DEFAULT, event, interruptible));
1072}