]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/locks.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / kern / locks.c
CommitLineData
91447636
A
1/*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
8ad349bb 4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
91447636 5 *
8ad349bb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
91447636
A
29 */
30/*
31 * @OSF_COPYRIGHT@
32 */
33/*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58#include <mach_kdb.h>
59#include <mach_ldebug.h>
60#include <debug.h>
61
62#include <mach/kern_return.h>
63#include <mach/mach_host_server.h>
64#include <mach_debug/lockgroup_info.h>
65
66#include <kern/locks.h>
67#include <kern/misc_protos.h>
68#include <kern/kalloc.h>
69#include <kern/thread.h>
70#include <kern/processor.h>
71#include <kern/sched_prim.h>
72#include <kern/debug.h>
73#include <string.h>
74
75
76#include <sys/kdebug.h>
77
78#define LCK_MTX_SLEEP_CODE 0
79#define LCK_MTX_SLEEP_DEADLINE_CODE 1
80#define LCK_MTX_LCK_WAIT_CODE 2
81#define LCK_MTX_UNLCK_WAKEUP_CODE 3
82
83
84static queue_head_t lck_grp_queue;
85static unsigned int lck_grp_cnt;
86
87decl_mutex_data(static,lck_grp_lock)
88
89lck_grp_attr_t LockDefaultGroupAttr;
90lck_grp_t LockCompatGroup;
91lck_attr_t LockDefaultLckAttr;
92
93/*
94 * Routine: lck_mod_init
95 */
96
97void
98lck_mod_init(
99 void)
100{
101 queue_init(&lck_grp_queue);
102 mutex_init(&lck_grp_lock, 0);
103 lck_grp_cnt = 0;
104 lck_grp_attr_setdefault( &LockDefaultGroupAttr);
105 lck_grp_init( &LockCompatGroup, "Compatibility APIs", LCK_GRP_ATTR_NULL);
106 lck_attr_setdefault(&LockDefaultLckAttr);
107}
108
109/*
110 * Routine: lck_grp_attr_alloc_init
111 */
112
113lck_grp_attr_t *
114lck_grp_attr_alloc_init(
115 void)
116{
117 lck_grp_attr_t *attr;
118
119 if ((attr = (lck_grp_attr_t *)kalloc(sizeof(lck_grp_attr_t))) != 0)
120 lck_grp_attr_setdefault(attr);
121
122 return(attr);
123}
124
125
126/*
127 * Routine: lck_grp_attr_setdefault
128 */
129
130void
131lck_grp_attr_setdefault(
132 lck_grp_attr_t *attr)
133{
134 if (LcksOpts & enaLkStat)
135 attr->grp_attr_val = LCK_GRP_ATTR_STAT;
136 else
137 attr->grp_attr_val = 0;
138}
139
140
141/*
142 * Routine: lck_grp_attr_setstat
143 */
144
145void
146lck_grp_attr_setstat(
147 lck_grp_attr_t *attr)
148{
149 (void)hw_atomic_or((uint32_t *)&attr->grp_attr_val, LCK_GRP_ATTR_STAT);
150}
151
152
153/*
154 * Routine: lck_grp_attr_free
155 */
156
157void
158lck_grp_attr_free(
159 lck_grp_attr_t *attr)
160{
161 kfree(attr, sizeof(lck_grp_attr_t));
162}
163
164
165/*
166 * Routine: lck_grp_alloc_init
167 */
168
169lck_grp_t *
170lck_grp_alloc_init(
171 const char* grp_name,
172 lck_grp_attr_t *attr)
173{
174 lck_grp_t *grp;
175
176 if ((grp = (lck_grp_t *)kalloc(sizeof(lck_grp_t))) != 0)
177 lck_grp_init(grp, grp_name, attr);
178
179 return(grp);
180}
181
182
183/*
184 * Routine: lck_grp_init
185 */
186
187void
188lck_grp_init(
189 lck_grp_t *grp,
190 const char* grp_name,
191 lck_grp_attr_t *attr)
192{
193 bzero((void *)grp, sizeof(lck_grp_t));
194
195 (void) strncpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);
196
197 if (attr != LCK_GRP_ATTR_NULL)
198 grp->lck_grp_attr = attr->grp_attr_val;
199 else if (LcksOpts & enaLkStat)
200 grp->lck_grp_attr = LCK_GRP_ATTR_STAT;
201 else
202 grp->lck_grp_attr = LCK_ATTR_NONE;
203
204 grp->lck_grp_refcnt = 1;
205
206 mutex_lock(&lck_grp_lock);
207 enqueue_tail(&lck_grp_queue, (queue_entry_t)grp);
208 lck_grp_cnt++;
209 mutex_unlock(&lck_grp_lock);
210
211}
212
213
214/*
215 * Routine: lck_grp_free
216 */
217
218void
219lck_grp_free(
220 lck_grp_t *grp)
221{
222 mutex_lock(&lck_grp_lock);
223 lck_grp_cnt--;
224 (void)remque((queue_entry_t)grp);
225 mutex_unlock(&lck_grp_lock);
226 lck_grp_deallocate(grp);
227}
228
229
230/*
231 * Routine: lck_grp_reference
232 */
233
234void
235lck_grp_reference(
236 lck_grp_t *grp)
237{
238 (void)hw_atomic_add((uint32_t *)(&grp->lck_grp_refcnt), 1);
239}
240
241
242/*
243 * Routine: lck_grp_deallocate
244 */
245
246void
247lck_grp_deallocate(
248 lck_grp_t *grp)
249{
250 if (hw_atomic_sub((uint32_t *)(&grp->lck_grp_refcnt), 1) == 0)
251 kfree(grp, sizeof(lck_grp_t));
252}
253
254/*
255 * Routine: lck_grp_lckcnt_incr
256 */
257
258void
259lck_grp_lckcnt_incr(
260 lck_grp_t *grp,
261 lck_type_t lck_type)
262{
263 unsigned int *lckcnt;
264
265 switch (lck_type) {
266 case LCK_TYPE_SPIN:
267 lckcnt = &grp->lck_grp_spincnt;
268 break;
269 case LCK_TYPE_MTX:
270 lckcnt = &grp->lck_grp_mtxcnt;
271 break;
272 case LCK_TYPE_RW:
273 lckcnt = &grp->lck_grp_rwcnt;
274 break;
275 default:
276 return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type);
277 }
278
279 (void)hw_atomic_add((uint32_t *)lckcnt, 1);
280}
281
282/*
283 * Routine: lck_grp_lckcnt_decr
284 */
285
286void
287lck_grp_lckcnt_decr(
288 lck_grp_t *grp,
289 lck_type_t lck_type)
290{
291 unsigned int *lckcnt;
292
293 switch (lck_type) {
294 case LCK_TYPE_SPIN:
295 lckcnt = &grp->lck_grp_spincnt;
296 break;
297 case LCK_TYPE_MTX:
298 lckcnt = &grp->lck_grp_mtxcnt;
299 break;
300 case LCK_TYPE_RW:
301 lckcnt = &grp->lck_grp_rwcnt;
302 break;
303 default:
304 return panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type);
305 }
306
307 (void)hw_atomic_sub((uint32_t *)lckcnt, 1);
308}
309
310/*
311 * Routine: lck_attr_alloc_init
312 */
313
314lck_attr_t *
315lck_attr_alloc_init(
316 void)
317{
318 lck_attr_t *attr;
319
320 if ((attr = (lck_attr_t *)kalloc(sizeof(lck_attr_t))) != 0)
321 lck_attr_setdefault(attr);
322
323 return(attr);
324}
325
326
327/*
328 * Routine: lck_attr_setdefault
329 */
330
331void
332lck_attr_setdefault(
333 lck_attr_t *attr)
334{
335#if !DEBUG
336 if (LcksOpts & enaLkDeb)
337 attr->lck_attr_val = LCK_ATTR_DEBUG;
338 else
339 attr->lck_attr_val = LCK_ATTR_NONE;
340#else
341 attr->lck_attr_val = LCK_ATTR_DEBUG;
342#endif
343
344}
345
346
347/*
348 * Routine: lck_attr_setdebug
349 */
350void
351lck_attr_setdebug(
352 lck_attr_t *attr)
353{
354 (void)hw_atomic_or((uint32_t *)&attr->lck_attr_val, LCK_ATTR_DEBUG);
355}
356
357
5d5c5d0d
A
358/*
359 * Routine: lck_attr_rw_shared_priority
360 */
361void
362lck_attr_rw_shared_priority(
363 lck_attr_t *attr)
364{
365 (void)hw_atomic_or((uint32_t *)&attr->lck_attr_val, LCK_ATTR_RW_SHARED_PRIORITY);
366}
367
368
91447636
A
369/*
370 * Routine: lck_attr_free
371 */
372void
373lck_attr_free(
374 lck_attr_t *attr)
375{
376 kfree(attr, sizeof(lck_attr_t));
377}
378
379
380/*
381 * Routine: lck_spin_sleep
382 */
383wait_result_t
384lck_spin_sleep(
385 lck_spin_t *lck,
386 lck_sleep_action_t lck_sleep_action,
387 event_t event,
388 wait_interrupt_t interruptible)
389{
390 wait_result_t res;
391
392 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
393 panic("Invalid lock sleep action %x\n", lck_sleep_action);
394
395 res = assert_wait(event, interruptible);
396 if (res == THREAD_WAITING) {
397 lck_spin_unlock(lck);
398 res = thread_block(THREAD_CONTINUE_NULL);
399 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
400 lck_spin_lock(lck);
401 }
402 else
403 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
404 lck_spin_unlock(lck);
405
406 return res;
407}
408
409
410/*
411 * Routine: lck_spin_sleep_deadline
412 */
413wait_result_t
414lck_spin_sleep_deadline(
415 lck_spin_t *lck,
416 lck_sleep_action_t lck_sleep_action,
417 event_t event,
418 wait_interrupt_t interruptible,
419 uint64_t deadline)
420{
421 wait_result_t res;
422
423 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
424 panic("Invalid lock sleep action %x\n", lck_sleep_action);
425
426 res = assert_wait_deadline(event, interruptible, deadline);
427 if (res == THREAD_WAITING) {
428 lck_spin_unlock(lck);
429 res = thread_block(THREAD_CONTINUE_NULL);
430 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
431 lck_spin_lock(lck);
432 }
433 else
434 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
435 lck_spin_unlock(lck);
436
437 return res;
438}
439
440
441/*
442 * Routine: lck_mtx_sleep
443 */
444wait_result_t
445lck_mtx_sleep(
446 lck_mtx_t *lck,
447 lck_sleep_action_t lck_sleep_action,
448 event_t event,
449 wait_interrupt_t interruptible)
450{
451 wait_result_t res;
452
453 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_START,
454 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
455
456 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
457 panic("Invalid lock sleep action %x\n", lck_sleep_action);
458
459 res = assert_wait(event, interruptible);
460 if (res == THREAD_WAITING) {
461 lck_mtx_unlock(lck);
462 res = thread_block(THREAD_CONTINUE_NULL);
463 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
464 lck_mtx_lock(lck);
465 }
466 else
467 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
468 lck_mtx_unlock(lck);
469
470 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
471
472 return res;
473}
474
475
476/*
477 * Routine: lck_mtx_sleep_deadline
478 */
479wait_result_t
480lck_mtx_sleep_deadline(
481 lck_mtx_t *lck,
482 lck_sleep_action_t lck_sleep_action,
483 event_t event,
484 wait_interrupt_t interruptible,
485 uint64_t deadline)
486{
487 wait_result_t res;
488
489 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_START,
490 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
491
492 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
493 panic("Invalid lock sleep action %x\n", lck_sleep_action);
494
495 res = assert_wait_deadline(event, interruptible, deadline);
496 if (res == THREAD_WAITING) {
497 lck_mtx_unlock(lck);
498 res = thread_block(THREAD_CONTINUE_NULL);
499 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
500 lck_mtx_lock(lck);
501 }
502 else
503 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
504 lck_mtx_unlock(lck);
505
506 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
507
508 return res;
509}
510
511/*
512 * Routine: lck_mtx_lock_wait
513 *
514 * Invoked in order to wait on contention.
515 *
516 * Called with the interlock locked and
517 * returns it unlocked.
518 */
519void
520lck_mtx_lock_wait (
521 lck_mtx_t *lck,
522 thread_t holder)
523{
524 thread_t self = current_thread();
525 lck_mtx_t *mutex;
526 integer_t priority;
527 spl_t s = splsched();
528
529 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
530 mutex = lck;
531 else
532 mutex = &lck->lck_mtx_ptr->lck_mtx;
533
534 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
535
536 priority = self->sched_pri;
537 if (priority < self->priority)
538 priority = self->priority;
539 if (priority > MINPRI_KERNEL)
540 priority = MINPRI_KERNEL;
541 else
542 if (priority < BASEPRI_DEFAULT)
543 priority = BASEPRI_DEFAULT;
544
545 thread_lock(holder);
546 if (mutex->lck_mtx_pri == 0)
547 holder->promotions++;
548 if (holder->priority < MINPRI_KERNEL) {
549 holder->sched_mode |= TH_MODE_PROMOTED;
550 if ( mutex->lck_mtx_pri < priority &&
551 holder->sched_pri < priority ) {
552 KERNEL_DEBUG_CONSTANT(
553 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
554 holder->sched_pri, priority, (int)holder, (int)lck, 0);
555
556 set_sched_pri(holder, priority);
557 }
558 }
559 thread_unlock(holder);
560 splx(s);
561
562 if (mutex->lck_mtx_pri < priority)
563 mutex->lck_mtx_pri = priority;
564 if (self->pending_promoter[self->pending_promoter_index] == NULL) {
565 self->pending_promoter[self->pending_promoter_index] = mutex;
566 mutex->lck_mtx_waiters++;
567 }
568 else
569 if (self->pending_promoter[self->pending_promoter_index] != mutex) {
570 self->pending_promoter[++self->pending_promoter_index] = mutex;
571 mutex->lck_mtx_waiters++;
572 }
573
574 assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
575 lck_mtx_ilk_unlock(mutex);
576
577 thread_block(THREAD_CONTINUE_NULL);
578
579 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
580}
581
582/*
583 * Routine: lck_mtx_lock_acquire
584 *
585 * Invoked on acquiring the mutex when there is
586 * contention.
587 *
588 * Returns the current number of waiters.
589 *
590 * Called with the interlock locked.
591 */
592int
593lck_mtx_lock_acquire(
594 lck_mtx_t *lck)
595{
596 thread_t thread = current_thread();
597 lck_mtx_t *mutex;
598
599 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
600 mutex = lck;
601 else
602 mutex = &lck->lck_mtx_ptr->lck_mtx;
603
604 if (thread->pending_promoter[thread->pending_promoter_index] == mutex) {
605 thread->pending_promoter[thread->pending_promoter_index] = NULL;
606 if (thread->pending_promoter_index > 0)
607 thread->pending_promoter_index--;
608 mutex->lck_mtx_waiters--;
609 }
610
611 if (mutex->lck_mtx_waiters > 0) {
612 integer_t priority = mutex->lck_mtx_pri;
613 spl_t s = splsched();
614
615 thread_lock(thread);
616 thread->promotions++;
617 if (thread->priority < MINPRI_KERNEL) {
618 thread->sched_mode |= TH_MODE_PROMOTED;
619 if (thread->sched_pri < priority) {
620 KERNEL_DEBUG_CONSTANT(
621 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
622 thread->sched_pri, priority, 0, (int)lck, 0);
623
624 set_sched_pri(thread, priority);
625 }
626 }
627 thread_unlock(thread);
628 splx(s);
629 }
630 else
631 mutex->lck_mtx_pri = 0;
632
633 return (mutex->lck_mtx_waiters);
634}
635
636/*
637 * Routine: lck_mtx_unlock_wakeup
638 *
639 * Invoked on unlock when there is contention.
640 *
641 * Called with the interlock locked.
642 */
643void
644lck_mtx_unlock_wakeup (
645 lck_mtx_t *lck,
646 thread_t holder)
647{
648 thread_t thread = current_thread();
649 lck_mtx_t *mutex;
650
651 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
652 mutex = lck;
653 else
654 mutex = &lck->lck_mtx_ptr->lck_mtx;
655
656
657 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
658
659 if (thread != holder)
660 panic("lck_mtx_unlock_wakeup: mutex %x holder %x\n", mutex, holder);
661
662 if (thread->promotions > 0) {
663 spl_t s = splsched();
664
665 thread_lock(thread);
666 if ( --thread->promotions == 0 &&
667 (thread->sched_mode & TH_MODE_PROMOTED) ) {
668 thread->sched_mode &= ~TH_MODE_PROMOTED;
669 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
670 KERNEL_DEBUG_CONSTANT(
671 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
672 thread->sched_pri, DEPRESSPRI, 0, (int)lck, 0);
673
674 set_sched_pri(thread, DEPRESSPRI);
675 }
676 else {
677 if (thread->priority < thread->sched_pri) {
678 KERNEL_DEBUG_CONSTANT(
679 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
680 DBG_FUNC_NONE,
681 thread->sched_pri, thread->priority,
682 0, (int)lck, 0);
683 }
684
685 compute_priority(thread, FALSE);
686 }
687 }
688 thread_unlock(thread);
689 splx(s);
690 }
691 assert(mutex->lck_mtx_waiters > 0);
692 thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
693
694 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
695}
696
697/*
698 * Routine: mutex_pause
699 *
700 * Called by former callers of simple_lock_pause().
701 */
702
703void
704mutex_pause(void)
705{
706 wait_result_t wait_result;
707
708 wait_result = assert_wait_timeout((event_t)mutex_pause, THREAD_UNINT, 1, 1000*NSEC_PER_USEC);
709 assert(wait_result == THREAD_WAITING);
710
711 wait_result = thread_block(THREAD_CONTINUE_NULL);
712 assert(wait_result == THREAD_TIMED_OUT);
713}
714
715/*
716 * Routine: lck_rw_sleep
717 */
718wait_result_t
719lck_rw_sleep(
720 lck_rw_t *lck,
721 lck_sleep_action_t lck_sleep_action,
722 event_t event,
723 wait_interrupt_t interruptible)
724{
725 wait_result_t res;
726 lck_rw_type_t lck_rw_type;
727
728 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
729 panic("Invalid lock sleep action %x\n", lck_sleep_action);
730
731 res = assert_wait(event, interruptible);
732 if (res == THREAD_WAITING) {
733 lck_rw_type = lck_rw_done(lck);
734 res = thread_block(THREAD_CONTINUE_NULL);
735 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
736 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
737 lck_rw_lock(lck, lck_rw_type);
738 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
739 lck_rw_lock_exclusive(lck);
740 else
741 lck_rw_lock_shared(lck);
742 }
743 }
744 else
745 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
746 (void)lck_rw_done(lck);
747
748 return res;
749}
750
751
752/*
753 * Routine: lck_rw_sleep_deadline
754 */
755wait_result_t
756lck_rw_sleep_deadline(
757 lck_rw_t *lck,
758 lck_sleep_action_t lck_sleep_action,
759 event_t event,
760 wait_interrupt_t interruptible,
761 uint64_t deadline)
762{
763 wait_result_t res;
764 lck_rw_type_t lck_rw_type;
765
766 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
767 panic("Invalid lock sleep action %x\n", lck_sleep_action);
768
769 res = assert_wait_deadline(event, interruptible, deadline);
770 if (res == THREAD_WAITING) {
771 lck_rw_type = lck_rw_done(lck);
772 res = thread_block(THREAD_CONTINUE_NULL);
773 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
774 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
775 lck_rw_lock(lck, lck_rw_type);
776 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
777 lck_rw_lock_exclusive(lck);
778 else
779 lck_rw_lock_shared(lck);
780 }
781 }
782 else
783 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
784 (void)lck_rw_done(lck);
785
786 return res;
787}
788
789kern_return_t
790host_lockgroup_info(
791 host_t host,
792 lockgroup_info_array_t *lockgroup_infop,
793 mach_msg_type_number_t *lockgroup_infoCntp)
794{
795 lockgroup_info_t *lockgroup_info_base;
796 lockgroup_info_t *lockgroup_info;
797 vm_offset_t lockgroup_info_addr;
798 vm_size_t lockgroup_info_size;
799 lck_grp_t *lck_grp;
800 unsigned int i;
801 vm_size_t used;
802 vm_map_copy_t copy;
803 kern_return_t kr;
804
805 if (host == HOST_NULL)
806 return KERN_INVALID_HOST;
807
808 mutex_lock(&lck_grp_lock);
809
810 lockgroup_info_size = round_page(lck_grp_cnt * sizeof *lockgroup_info);
811 kr = kmem_alloc_pageable(ipc_kernel_map,
812 &lockgroup_info_addr, lockgroup_info_size);
813 if (kr != KERN_SUCCESS) {
814 mutex_unlock(&lck_grp_lock);
815 return(kr);
816 }
817
818 lockgroup_info_base = (lockgroup_info_t *) lockgroup_info_addr;
819 lck_grp = (lck_grp_t *)queue_first(&lck_grp_queue);
820 lockgroup_info = lockgroup_info_base;
821
822 for (i = 0; i < lck_grp_cnt; i++) {
823
824 lockgroup_info->lock_spin_cnt = lck_grp->lck_grp_spincnt;
825 lockgroup_info->lock_spin_util_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_util_cnt;
826 lockgroup_info->lock_spin_held_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cnt;
827 lockgroup_info->lock_spin_miss_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_miss_cnt;
828 lockgroup_info->lock_spin_held_max = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_max;
829 lockgroup_info->lock_spin_held_cum = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cum;
830
831 lockgroup_info->lock_mtx_cnt = lck_grp->lck_grp_mtxcnt;
832 lockgroup_info->lock_mtx_util_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt;
833 lockgroup_info->lock_mtx_held_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt;
834 lockgroup_info->lock_mtx_miss_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt;
835 lockgroup_info->lock_mtx_wait_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt;
836 lockgroup_info->lock_mtx_held_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max;
837 lockgroup_info->lock_mtx_held_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cum;
838 lockgroup_info->lock_mtx_wait_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_max;
839 lockgroup_info->lock_mtx_wait_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cum;
840
841 lockgroup_info->lock_rw_cnt = lck_grp->lck_grp_rwcnt;
842 lockgroup_info->lock_rw_util_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt;
843 lockgroup_info->lock_rw_held_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cnt;
844 lockgroup_info->lock_rw_miss_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt;
845 lockgroup_info->lock_rw_wait_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt;
846 lockgroup_info->lock_rw_held_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_max;
847 lockgroup_info->lock_rw_held_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cum;
848 lockgroup_info->lock_rw_wait_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_max;
849 lockgroup_info->lock_rw_wait_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cum;
850
851 (void) strncpy(lockgroup_info->lockgroup_name,lck_grp->lck_grp_name, LOCKGROUP_MAX_NAME);
852
853 lck_grp = (lck_grp_t *)(queue_next((queue_entry_t)(lck_grp)));
854 lockgroup_info++;
855 }
856
857 *lockgroup_infoCntp = lck_grp_cnt;
858 mutex_unlock(&lck_grp_lock);
859
860 used = (*lockgroup_infoCntp) * sizeof *lockgroup_info;
861
862 if (used != lockgroup_info_size)
863 bzero((char *) lockgroup_info, lockgroup_info_size - used);
864
865 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
866 (vm_map_size_t)lockgroup_info_size, TRUE, &copy);
867 assert(kr == KERN_SUCCESS);
868
869 *lockgroup_infop = (lockgroup_info_t *) copy;
870
871 return(KERN_SUCCESS);
872}
873
874/*
875 * Compatibility module
876 */
877
878extern lck_rw_t *lock_alloc_EXT( boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
879extern void lock_done_EXT(lck_rw_t *lock);
880extern void lock_free_EXT(lck_rw_t *lock);
881extern void lock_init_EXT(lck_rw_t *lock, boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
882extern void lock_read_EXT(lck_rw_t *lock);
883extern boolean_t lock_read_to_write_EXT(lck_rw_t *lock);
884extern void lock_write_EXT(lck_rw_t *lock);
885extern void lock_write_to_read_EXT(lck_rw_t *lock);
886extern wait_result_t thread_sleep_lock_write_EXT(
887 event_t event, lck_rw_t *lock, wait_interrupt_t interruptible);
888
889extern lck_mtx_t *mutex_alloc_EXT(unsigned short tag);
890extern void mutex_free_EXT(lck_mtx_t *mutex);
891extern void mutex_init_EXT(lck_mtx_t *mutex, unsigned short tag);
892extern void mutex_lock_EXT(lck_mtx_t *mutex);
893extern boolean_t mutex_try_EXT(lck_mtx_t *mutex);
894extern void mutex_unlock_EXT(lck_mtx_t *mutex);
895extern wait_result_t thread_sleep_mutex_EXT(
896 event_t event, lck_mtx_t *mutex, wait_interrupt_t interruptible);
897extern wait_result_t thread_sleep_mutex_deadline_EXT(
898 event_t event, lck_mtx_t *mutex, uint64_t deadline, wait_interrupt_t interruptible);
899
900extern void usimple_lock_EXT(lck_spin_t *lock);
901extern void usimple_lock_init_EXT(lck_spin_t *lock, unsigned short tag);
902extern unsigned int usimple_lock_try_EXT(lck_spin_t *lock);
903extern void usimple_unlock_EXT(lck_spin_t *lock);
904extern wait_result_t thread_sleep_usimple_lock_EXT(event_t event, lck_spin_t *lock, wait_interrupt_t interruptible);
905
906lck_rw_t *
907lock_alloc_EXT(
908 __unused boolean_t can_sleep,
909 __unused unsigned short tag0,
910 __unused unsigned short tag1)
911{
912 return( lck_rw_alloc_init( &LockCompatGroup, LCK_ATTR_NULL));
913}
914
915void
916lock_done_EXT(
917 lck_rw_t *lock)
918{
919 (void) lck_rw_done(lock);
920}
921
922void
923lock_free_EXT(
924 lck_rw_t *lock)
925{
926 lck_rw_free(lock, &LockCompatGroup);
927}
928
929void
930lock_init_EXT(
931 lck_rw_t *lock,
932 __unused boolean_t can_sleep,
933 __unused unsigned short tag0,
934 __unused unsigned short tag1)
935{
936 lck_rw_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
937}
938
939void
940lock_read_EXT(
941 lck_rw_t *lock)
942{
943 lck_rw_lock_shared( lock);
944}
945
946boolean_t
947lock_read_to_write_EXT(
948 lck_rw_t *lock)
949{
950 return( lck_rw_lock_shared_to_exclusive(lock));
951}
952
953void
954lock_write_EXT(
955 lck_rw_t *lock)
956{
957 lck_rw_lock_exclusive(lock);
958}
959
960void
961lock_write_to_read_EXT(
962 lck_rw_t *lock)
963{
964 lck_rw_lock_exclusive_to_shared(lock);
965}
966
967wait_result_t
968thread_sleep_lock_write_EXT(
969 event_t event,
970 lck_rw_t *lock,
971 wait_interrupt_t interruptible)
972{
973 return( lck_rw_sleep(lock, LCK_SLEEP_EXCLUSIVE, event, interruptible));
974}
975
976lck_mtx_t *
977mutex_alloc_EXT(
978 __unused unsigned short tag)
979{
980 return(lck_mtx_alloc_init(&LockCompatGroup, LCK_ATTR_NULL));
981}
982
983void
984mutex_free_EXT(
985 lck_mtx_t *mutex)
986{
987 lck_mtx_free(mutex, &LockCompatGroup);
988}
989
990void
991mutex_init_EXT(
992 lck_mtx_t *mutex,
993 __unused unsigned short tag)
994{
995 lck_mtx_init(mutex, &LockCompatGroup, LCK_ATTR_NULL);
996}
997
998void
999mutex_lock_EXT(
1000 lck_mtx_t *mutex)
1001{
1002 lck_mtx_lock(mutex);
1003}
1004
1005boolean_t
1006mutex_try_EXT(
1007 lck_mtx_t *mutex)
1008{
1009 return(lck_mtx_try_lock(mutex));
1010}
1011
1012void
1013mutex_unlock_EXT(
1014 lck_mtx_t *mutex)
1015{
1016 lck_mtx_unlock(mutex);
1017}
1018
1019wait_result_t
1020thread_sleep_mutex_EXT(
1021 event_t event,
1022 lck_mtx_t *mutex,
1023 wait_interrupt_t interruptible)
1024{
1025 return( lck_mtx_sleep(mutex, LCK_SLEEP_DEFAULT, event, interruptible));
1026}
1027
1028wait_result_t
1029thread_sleep_mutex_deadline_EXT(
1030 event_t event,
1031 lck_mtx_t *mutex,
1032 uint64_t deadline,
1033 wait_interrupt_t interruptible)
1034{
1035 return( lck_mtx_sleep_deadline(mutex, LCK_SLEEP_DEFAULT, event, interruptible, deadline));
1036}
1037
1038void
1039usimple_lock_EXT(
1040 lck_spin_t *lock)
1041{
1042 lck_spin_lock(lock);
1043}
1044
1045void
1046usimple_lock_init_EXT(
1047 lck_spin_t *lock,
1048 __unused unsigned short tag)
1049{
1050 lck_spin_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
1051}
1052
1053unsigned int
1054usimple_lock_try_EXT(
1055 lck_spin_t *lock)
1056{
5d5c5d0d 1057 return(lck_spin_try_lock(lock));
91447636
A
1058}
1059
1060void
1061usimple_unlock_EXT(
1062 lck_spin_t *lock)
1063{
1064 lck_spin_unlock(lock);
1065}
1066
1067wait_result_t
1068thread_sleep_usimple_lock_EXT(
1069 event_t event,
1070 lck_spin_t *lock,
1071 wait_interrupt_t interruptible)
1072{
1073 return( lck_spin_sleep(lock, LCK_SLEEP_DEFAULT, event, interruptible));
1074}