]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/locks.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / osfmk / kern / locks.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 #include <mach_kdb.h>
57 #include <mach_ldebug.h>
58 #include <debug.h>
59
60 #include <mach/kern_return.h>
61 #include <mach/mach_host_server.h>
62 #include <mach_debug/lockgroup_info.h>
63
64 #include <kern/locks.h>
65 #include <kern/misc_protos.h>
66 #include <kern/kalloc.h>
67 #include <kern/thread.h>
68 #include <kern/processor.h>
69 #include <kern/sched_prim.h>
70 #include <kern/debug.h>
71 #include <string.h>
72
73
74 #include <sys/kdebug.h>
75
76 #define LCK_MTX_SLEEP_CODE 0
77 #define LCK_MTX_SLEEP_DEADLINE_CODE 1
78 #define LCK_MTX_LCK_WAIT_CODE 2
79 #define LCK_MTX_UNLCK_WAKEUP_CODE 3
80
81
82 static queue_head_t lck_grp_queue;
83 static unsigned int lck_grp_cnt;
84
85 decl_mutex_data(static,lck_grp_lock)
86
87 lck_grp_attr_t LockDefaultGroupAttr;
88 lck_grp_t LockCompatGroup;
89 lck_attr_t LockDefaultLckAttr;
90
91 /*
92 * Routine: lck_mod_init
93 */
94
95 void
96 lck_mod_init(
97 void)
98 {
99 queue_init(&lck_grp_queue);
100 mutex_init(&lck_grp_lock, 0);
101 lck_grp_cnt = 0;
102 lck_grp_attr_setdefault( &LockDefaultGroupAttr);
103 lck_grp_init( &LockCompatGroup, "Compatibility APIs", LCK_GRP_ATTR_NULL);
104 lck_attr_setdefault(&LockDefaultLckAttr);
105 }
106
107 /*
108 * Routine: lck_grp_attr_alloc_init
109 */
110
111 lck_grp_attr_t *
112 lck_grp_attr_alloc_init(
113 void)
114 {
115 lck_grp_attr_t *attr;
116
117 if ((attr = (lck_grp_attr_t *)kalloc(sizeof(lck_grp_attr_t))) != 0)
118 lck_grp_attr_setdefault(attr);
119
120 return(attr);
121 }
122
123
124 /*
125 * Routine: lck_grp_attr_setdefault
126 */
127
128 void
129 lck_grp_attr_setdefault(
130 lck_grp_attr_t *attr)
131 {
132 if (LcksOpts & enaLkStat)
133 attr->grp_attr_val = LCK_GRP_ATTR_STAT;
134 else
135 attr->grp_attr_val = 0;
136 }
137
138
139 /*
140 * Routine: lck_grp_attr_setstat
141 */
142
143 void
144 lck_grp_attr_setstat(
145 lck_grp_attr_t *attr)
146 {
147 (void)hw_atomic_or((uint32_t *)&attr->grp_attr_val, LCK_GRP_ATTR_STAT);
148 }
149
150
151 /*
152 * Routine: lck_grp_attr_free
153 */
154
155 void
156 lck_grp_attr_free(
157 lck_grp_attr_t *attr)
158 {
159 kfree(attr, sizeof(lck_grp_attr_t));
160 }
161
162
163 /*
164 * Routine: lck_grp_alloc_init
165 */
166
167 lck_grp_t *
168 lck_grp_alloc_init(
169 const char* grp_name,
170 lck_grp_attr_t *attr)
171 {
172 lck_grp_t *grp;
173
174 if ((grp = (lck_grp_t *)kalloc(sizeof(lck_grp_t))) != 0)
175 lck_grp_init(grp, grp_name, attr);
176
177 return(grp);
178 }
179
180
181 /*
182 * Routine: lck_grp_init
183 */
184
185 void
186 lck_grp_init(
187 lck_grp_t *grp,
188 const char* grp_name,
189 lck_grp_attr_t *attr)
190 {
191 bzero((void *)grp, sizeof(lck_grp_t));
192
193 (void) strncpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);
194
195 if (attr != LCK_GRP_ATTR_NULL)
196 grp->lck_grp_attr = attr->grp_attr_val;
197 else if (LcksOpts & enaLkStat)
198 grp->lck_grp_attr = LCK_GRP_ATTR_STAT;
199 else
200 grp->lck_grp_attr = LCK_ATTR_NONE;
201
202 grp->lck_grp_refcnt = 1;
203
204 mutex_lock(&lck_grp_lock);
205 enqueue_tail(&lck_grp_queue, (queue_entry_t)grp);
206 lck_grp_cnt++;
207 mutex_unlock(&lck_grp_lock);
208
209 }
210
211
212 /*
213 * Routine: lck_grp_free
214 */
215
216 void
217 lck_grp_free(
218 lck_grp_t *grp)
219 {
220 mutex_lock(&lck_grp_lock);
221 lck_grp_cnt--;
222 (void)remque((queue_entry_t)grp);
223 mutex_unlock(&lck_grp_lock);
224 lck_grp_deallocate(grp);
225 }
226
227
228 /*
229 * Routine: lck_grp_reference
230 */
231
232 void
233 lck_grp_reference(
234 lck_grp_t *grp)
235 {
236 (void)hw_atomic_add((uint32_t *)(&grp->lck_grp_refcnt), 1);
237 }
238
239
240 /*
241 * Routine: lck_grp_deallocate
242 */
243
244 void
245 lck_grp_deallocate(
246 lck_grp_t *grp)
247 {
248 if (hw_atomic_sub((uint32_t *)(&grp->lck_grp_refcnt), 1) == 0)
249 kfree(grp, sizeof(lck_grp_t));
250 }
251
252 /*
253 * Routine: lck_grp_lckcnt_incr
254 */
255
256 void
257 lck_grp_lckcnt_incr(
258 lck_grp_t *grp,
259 lck_type_t lck_type)
260 {
261 unsigned int *lckcnt;
262
263 switch (lck_type) {
264 case LCK_TYPE_SPIN:
265 lckcnt = &grp->lck_grp_spincnt;
266 break;
267 case LCK_TYPE_MTX:
268 lckcnt = &grp->lck_grp_mtxcnt;
269 break;
270 case LCK_TYPE_RW:
271 lckcnt = &grp->lck_grp_rwcnt;
272 break;
273 default:
274 return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type);
275 }
276
277 (void)hw_atomic_add((uint32_t *)lckcnt, 1);
278 }
279
280 /*
281 * Routine: lck_grp_lckcnt_decr
282 */
283
284 void
285 lck_grp_lckcnt_decr(
286 lck_grp_t *grp,
287 lck_type_t lck_type)
288 {
289 unsigned int *lckcnt;
290
291 switch (lck_type) {
292 case LCK_TYPE_SPIN:
293 lckcnt = &grp->lck_grp_spincnt;
294 break;
295 case LCK_TYPE_MTX:
296 lckcnt = &grp->lck_grp_mtxcnt;
297 break;
298 case LCK_TYPE_RW:
299 lckcnt = &grp->lck_grp_rwcnt;
300 break;
301 default:
302 return panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type);
303 }
304
305 (void)hw_atomic_sub((uint32_t *)lckcnt, 1);
306 }
307
308 /*
309 * Routine: lck_attr_alloc_init
310 */
311
312 lck_attr_t *
313 lck_attr_alloc_init(
314 void)
315 {
316 lck_attr_t *attr;
317
318 if ((attr = (lck_attr_t *)kalloc(sizeof(lck_attr_t))) != 0)
319 lck_attr_setdefault(attr);
320
321 return(attr);
322 }
323
324
325 /*
326 * Routine: lck_attr_setdefault
327 */
328
329 void
330 lck_attr_setdefault(
331 lck_attr_t *attr)
332 {
333 #if !DEBUG
334 if (LcksOpts & enaLkDeb)
335 attr->lck_attr_val = LCK_ATTR_DEBUG;
336 else
337 attr->lck_attr_val = LCK_ATTR_NONE;
338 #else
339 attr->lck_attr_val = LCK_ATTR_DEBUG;
340 #endif
341
342 }
343
344
345 /*
346 * Routine: lck_attr_setdebug
347 */
348 void
349 lck_attr_setdebug(
350 lck_attr_t *attr)
351 {
352 (void)hw_atomic_or((uint32_t *)&attr->lck_attr_val, LCK_ATTR_DEBUG);
353 }
354
355
356 /*
357 * Routine: lck_attr_free
358 */
359 void
360 lck_attr_free(
361 lck_attr_t *attr)
362 {
363 kfree(attr, sizeof(lck_attr_t));
364 }
365
366
367 /*
368 * Routine: lck_spin_sleep
369 */
370 wait_result_t
371 lck_spin_sleep(
372 lck_spin_t *lck,
373 lck_sleep_action_t lck_sleep_action,
374 event_t event,
375 wait_interrupt_t interruptible)
376 {
377 wait_result_t res;
378
379 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
380 panic("Invalid lock sleep action %x\n", lck_sleep_action);
381
382 res = assert_wait(event, interruptible);
383 if (res == THREAD_WAITING) {
384 lck_spin_unlock(lck);
385 res = thread_block(THREAD_CONTINUE_NULL);
386 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
387 lck_spin_lock(lck);
388 }
389 else
390 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
391 lck_spin_unlock(lck);
392
393 return res;
394 }
395
396
397 /*
398 * Routine: lck_spin_sleep_deadline
399 */
400 wait_result_t
401 lck_spin_sleep_deadline(
402 lck_spin_t *lck,
403 lck_sleep_action_t lck_sleep_action,
404 event_t event,
405 wait_interrupt_t interruptible,
406 uint64_t deadline)
407 {
408 wait_result_t res;
409
410 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
411 panic("Invalid lock sleep action %x\n", lck_sleep_action);
412
413 res = assert_wait_deadline(event, interruptible, deadline);
414 if (res == THREAD_WAITING) {
415 lck_spin_unlock(lck);
416 res = thread_block(THREAD_CONTINUE_NULL);
417 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
418 lck_spin_lock(lck);
419 }
420 else
421 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
422 lck_spin_unlock(lck);
423
424 return res;
425 }
426
427
428 /*
429 * Routine: lck_mtx_sleep
430 */
431 wait_result_t
432 lck_mtx_sleep(
433 lck_mtx_t *lck,
434 lck_sleep_action_t lck_sleep_action,
435 event_t event,
436 wait_interrupt_t interruptible)
437 {
438 wait_result_t res;
439
440 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_START,
441 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
442
443 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
444 panic("Invalid lock sleep action %x\n", lck_sleep_action);
445
446 res = assert_wait(event, interruptible);
447 if (res == THREAD_WAITING) {
448 lck_mtx_unlock(lck);
449 res = thread_block(THREAD_CONTINUE_NULL);
450 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
451 lck_mtx_lock(lck);
452 }
453 else
454 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
455 lck_mtx_unlock(lck);
456
457 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
458
459 return res;
460 }
461
462
463 /*
464 * Routine: lck_mtx_sleep_deadline
465 */
466 wait_result_t
467 lck_mtx_sleep_deadline(
468 lck_mtx_t *lck,
469 lck_sleep_action_t lck_sleep_action,
470 event_t event,
471 wait_interrupt_t interruptible,
472 uint64_t deadline)
473 {
474 wait_result_t res;
475
476 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_START,
477 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
478
479 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
480 panic("Invalid lock sleep action %x\n", lck_sleep_action);
481
482 res = assert_wait_deadline(event, interruptible, deadline);
483 if (res == THREAD_WAITING) {
484 lck_mtx_unlock(lck);
485 res = thread_block(THREAD_CONTINUE_NULL);
486 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
487 lck_mtx_lock(lck);
488 }
489 else
490 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
491 lck_mtx_unlock(lck);
492
493 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
494
495 return res;
496 }
497
498 /*
499 * Routine: lck_mtx_lock_wait
500 *
501 * Invoked in order to wait on contention.
502 *
503 * Called with the interlock locked and
504 * returns it unlocked.
505 */
506 void
507 lck_mtx_lock_wait (
508 lck_mtx_t *lck,
509 thread_t holder)
510 {
511 thread_t self = current_thread();
512 lck_mtx_t *mutex;
513 integer_t priority;
514 spl_t s = splsched();
515
516 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
517 mutex = lck;
518 else
519 mutex = &lck->lck_mtx_ptr->lck_mtx;
520
521 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
522
523 priority = self->sched_pri;
524 if (priority < self->priority)
525 priority = self->priority;
526 if (priority > MINPRI_KERNEL)
527 priority = MINPRI_KERNEL;
528 else
529 if (priority < BASEPRI_DEFAULT)
530 priority = BASEPRI_DEFAULT;
531
532 thread_lock(holder);
533 if (mutex->lck_mtx_pri == 0)
534 holder->promotions++;
535 if (holder->priority < MINPRI_KERNEL) {
536 holder->sched_mode |= TH_MODE_PROMOTED;
537 if ( mutex->lck_mtx_pri < priority &&
538 holder->sched_pri < priority ) {
539 KERNEL_DEBUG_CONSTANT(
540 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
541 holder->sched_pri, priority, (int)holder, (int)lck, 0);
542
543 set_sched_pri(holder, priority);
544 }
545 }
546 thread_unlock(holder);
547 splx(s);
548
549 if (mutex->lck_mtx_pri < priority)
550 mutex->lck_mtx_pri = priority;
551 if (self->pending_promoter[self->pending_promoter_index] == NULL) {
552 self->pending_promoter[self->pending_promoter_index] = mutex;
553 mutex->lck_mtx_waiters++;
554 }
555 else
556 if (self->pending_promoter[self->pending_promoter_index] != mutex) {
557 self->pending_promoter[++self->pending_promoter_index] = mutex;
558 mutex->lck_mtx_waiters++;
559 }
560
561 assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
562 lck_mtx_ilk_unlock(mutex);
563
564 thread_block(THREAD_CONTINUE_NULL);
565
566 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
567 }
568
569 /*
570 * Routine: lck_mtx_lock_acquire
571 *
572 * Invoked on acquiring the mutex when there is
573 * contention.
574 *
575 * Returns the current number of waiters.
576 *
577 * Called with the interlock locked.
578 */
579 int
580 lck_mtx_lock_acquire(
581 lck_mtx_t *lck)
582 {
583 thread_t thread = current_thread();
584 lck_mtx_t *mutex;
585
586 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
587 mutex = lck;
588 else
589 mutex = &lck->lck_mtx_ptr->lck_mtx;
590
591 if (thread->pending_promoter[thread->pending_promoter_index] == mutex) {
592 thread->pending_promoter[thread->pending_promoter_index] = NULL;
593 if (thread->pending_promoter_index > 0)
594 thread->pending_promoter_index--;
595 mutex->lck_mtx_waiters--;
596 }
597
598 if (mutex->lck_mtx_waiters > 0) {
599 integer_t priority = mutex->lck_mtx_pri;
600 spl_t s = splsched();
601
602 thread_lock(thread);
603 thread->promotions++;
604 if (thread->priority < MINPRI_KERNEL) {
605 thread->sched_mode |= TH_MODE_PROMOTED;
606 if (thread->sched_pri < priority) {
607 KERNEL_DEBUG_CONSTANT(
608 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
609 thread->sched_pri, priority, 0, (int)lck, 0);
610
611 set_sched_pri(thread, priority);
612 }
613 }
614 thread_unlock(thread);
615 splx(s);
616 }
617 else
618 mutex->lck_mtx_pri = 0;
619
620 return (mutex->lck_mtx_waiters);
621 }
622
623 /*
624 * Routine: lck_mtx_unlock_wakeup
625 *
626 * Invoked on unlock when there is contention.
627 *
628 * Called with the interlock locked.
629 */
630 void
631 lck_mtx_unlock_wakeup (
632 lck_mtx_t *lck,
633 thread_t holder)
634 {
635 thread_t thread = current_thread();
636 lck_mtx_t *mutex;
637
638 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
639 mutex = lck;
640 else
641 mutex = &lck->lck_mtx_ptr->lck_mtx;
642
643
644 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
645
646 if (thread != holder)
647 panic("lck_mtx_unlock_wakeup: mutex %x holder %x\n", mutex, holder);
648
649 if (thread->promotions > 0) {
650 spl_t s = splsched();
651
652 thread_lock(thread);
653 if ( --thread->promotions == 0 &&
654 (thread->sched_mode & TH_MODE_PROMOTED) ) {
655 thread->sched_mode &= ~TH_MODE_PROMOTED;
656 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
657 KERNEL_DEBUG_CONSTANT(
658 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
659 thread->sched_pri, DEPRESSPRI, 0, (int)lck, 0);
660
661 set_sched_pri(thread, DEPRESSPRI);
662 }
663 else {
664 if (thread->priority < thread->sched_pri) {
665 KERNEL_DEBUG_CONSTANT(
666 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
667 DBG_FUNC_NONE,
668 thread->sched_pri, thread->priority,
669 0, (int)lck, 0);
670 }
671
672 compute_priority(thread, FALSE);
673 }
674 }
675 thread_unlock(thread);
676 splx(s);
677 }
678 assert(mutex->lck_mtx_waiters > 0);
679 thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
680
681 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
682 }
683
684 /*
685 * Routine: mutex_pause
686 *
687 * Called by former callers of simple_lock_pause().
688 */
689
690 void
691 mutex_pause(void)
692 {
693 wait_result_t wait_result;
694
695 wait_result = assert_wait_timeout((event_t)mutex_pause, THREAD_UNINT, 1, 1000*NSEC_PER_USEC);
696 assert(wait_result == THREAD_WAITING);
697
698 wait_result = thread_block(THREAD_CONTINUE_NULL);
699 assert(wait_result == THREAD_TIMED_OUT);
700 }
701
702 /*
703 * Routine: lck_rw_sleep
704 */
705 wait_result_t
706 lck_rw_sleep(
707 lck_rw_t *lck,
708 lck_sleep_action_t lck_sleep_action,
709 event_t event,
710 wait_interrupt_t interruptible)
711 {
712 wait_result_t res;
713 lck_rw_type_t lck_rw_type;
714
715 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
716 panic("Invalid lock sleep action %x\n", lck_sleep_action);
717
718 res = assert_wait(event, interruptible);
719 if (res == THREAD_WAITING) {
720 lck_rw_type = lck_rw_done(lck);
721 res = thread_block(THREAD_CONTINUE_NULL);
722 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
723 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
724 lck_rw_lock(lck, lck_rw_type);
725 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
726 lck_rw_lock_exclusive(lck);
727 else
728 lck_rw_lock_shared(lck);
729 }
730 }
731 else
732 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
733 (void)lck_rw_done(lck);
734
735 return res;
736 }
737
738
739 /*
740 * Routine: lck_rw_sleep_deadline
741 */
742 wait_result_t
743 lck_rw_sleep_deadline(
744 lck_rw_t *lck,
745 lck_sleep_action_t lck_sleep_action,
746 event_t event,
747 wait_interrupt_t interruptible,
748 uint64_t deadline)
749 {
750 wait_result_t res;
751 lck_rw_type_t lck_rw_type;
752
753 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
754 panic("Invalid lock sleep action %x\n", lck_sleep_action);
755
756 res = assert_wait_deadline(event, interruptible, deadline);
757 if (res == THREAD_WAITING) {
758 lck_rw_type = lck_rw_done(lck);
759 res = thread_block(THREAD_CONTINUE_NULL);
760 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
761 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
762 lck_rw_lock(lck, lck_rw_type);
763 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
764 lck_rw_lock_exclusive(lck);
765 else
766 lck_rw_lock_shared(lck);
767 }
768 }
769 else
770 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
771 (void)lck_rw_done(lck);
772
773 return res;
774 }
775
776 kern_return_t
777 host_lockgroup_info(
778 host_t host,
779 lockgroup_info_array_t *lockgroup_infop,
780 mach_msg_type_number_t *lockgroup_infoCntp)
781 {
782 lockgroup_info_t *lockgroup_info_base;
783 lockgroup_info_t *lockgroup_info;
784 vm_offset_t lockgroup_info_addr;
785 vm_size_t lockgroup_info_size;
786 lck_grp_t *lck_grp;
787 unsigned int i;
788 vm_size_t used;
789 vm_map_copy_t copy;
790 kern_return_t kr;
791
792 if (host == HOST_NULL)
793 return KERN_INVALID_HOST;
794
795 mutex_lock(&lck_grp_lock);
796
797 lockgroup_info_size = round_page(lck_grp_cnt * sizeof *lockgroup_info);
798 kr = kmem_alloc_pageable(ipc_kernel_map,
799 &lockgroup_info_addr, lockgroup_info_size);
800 if (kr != KERN_SUCCESS) {
801 mutex_unlock(&lck_grp_lock);
802 return(kr);
803 }
804
805 lockgroup_info_base = (lockgroup_info_t *) lockgroup_info_addr;
806 lck_grp = (lck_grp_t *)queue_first(&lck_grp_queue);
807 lockgroup_info = lockgroup_info_base;
808
809 for (i = 0; i < lck_grp_cnt; i++) {
810
811 lockgroup_info->lock_spin_cnt = lck_grp->lck_grp_spincnt;
812 lockgroup_info->lock_spin_util_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_util_cnt;
813 lockgroup_info->lock_spin_held_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cnt;
814 lockgroup_info->lock_spin_miss_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_miss_cnt;
815 lockgroup_info->lock_spin_held_max = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_max;
816 lockgroup_info->lock_spin_held_cum = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cum;
817
818 lockgroup_info->lock_mtx_cnt = lck_grp->lck_grp_mtxcnt;
819 lockgroup_info->lock_mtx_util_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt;
820 lockgroup_info->lock_mtx_held_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt;
821 lockgroup_info->lock_mtx_miss_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt;
822 lockgroup_info->lock_mtx_wait_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt;
823 lockgroup_info->lock_mtx_held_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max;
824 lockgroup_info->lock_mtx_held_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cum;
825 lockgroup_info->lock_mtx_wait_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_max;
826 lockgroup_info->lock_mtx_wait_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cum;
827
828 lockgroup_info->lock_rw_cnt = lck_grp->lck_grp_rwcnt;
829 lockgroup_info->lock_rw_util_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt;
830 lockgroup_info->lock_rw_held_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cnt;
831 lockgroup_info->lock_rw_miss_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt;
832 lockgroup_info->lock_rw_wait_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt;
833 lockgroup_info->lock_rw_held_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_max;
834 lockgroup_info->lock_rw_held_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cum;
835 lockgroup_info->lock_rw_wait_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_max;
836 lockgroup_info->lock_rw_wait_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cum;
837
838 (void) strncpy(lockgroup_info->lockgroup_name,lck_grp->lck_grp_name, LOCKGROUP_MAX_NAME);
839
840 lck_grp = (lck_grp_t *)(queue_next((queue_entry_t)(lck_grp)));
841 lockgroup_info++;
842 }
843
844 *lockgroup_infoCntp = lck_grp_cnt;
845 mutex_unlock(&lck_grp_lock);
846
847 used = (*lockgroup_infoCntp) * sizeof *lockgroup_info;
848
849 if (used != lockgroup_info_size)
850 bzero((char *) lockgroup_info, lockgroup_info_size - used);
851
852 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
853 (vm_map_size_t)lockgroup_info_size, TRUE, &copy);
854 assert(kr == KERN_SUCCESS);
855
856 *lockgroup_infop = (lockgroup_info_t *) copy;
857
858 return(KERN_SUCCESS);
859 }
860
861 /*
862 * Compatibility module
863 */
864
865 extern lck_rw_t *lock_alloc_EXT( boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
866 extern void lock_done_EXT(lck_rw_t *lock);
867 extern void lock_free_EXT(lck_rw_t *lock);
868 extern void lock_init_EXT(lck_rw_t *lock, boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
869 extern void lock_read_EXT(lck_rw_t *lock);
870 extern boolean_t lock_read_to_write_EXT(lck_rw_t *lock);
871 extern void lock_write_EXT(lck_rw_t *lock);
872 extern void lock_write_to_read_EXT(lck_rw_t *lock);
873 extern wait_result_t thread_sleep_lock_write_EXT(
874 event_t event, lck_rw_t *lock, wait_interrupt_t interruptible);
875
876 extern lck_mtx_t *mutex_alloc_EXT(unsigned short tag);
877 extern void mutex_free_EXT(lck_mtx_t *mutex);
878 extern void mutex_init_EXT(lck_mtx_t *mutex, unsigned short tag);
879 extern void mutex_lock_EXT(lck_mtx_t *mutex);
880 extern boolean_t mutex_try_EXT(lck_mtx_t *mutex);
881 extern void mutex_unlock_EXT(lck_mtx_t *mutex);
882 extern wait_result_t thread_sleep_mutex_EXT(
883 event_t event, lck_mtx_t *mutex, wait_interrupt_t interruptible);
884 extern wait_result_t thread_sleep_mutex_deadline_EXT(
885 event_t event, lck_mtx_t *mutex, uint64_t deadline, wait_interrupt_t interruptible);
886
887 extern void usimple_lock_EXT(lck_spin_t *lock);
888 extern void usimple_lock_init_EXT(lck_spin_t *lock, unsigned short tag);
889 extern unsigned int usimple_lock_try_EXT(lck_spin_t *lock);
890 extern void usimple_unlock_EXT(lck_spin_t *lock);
891 extern wait_result_t thread_sleep_usimple_lock_EXT(event_t event, lck_spin_t *lock, wait_interrupt_t interruptible);
892
893 lck_rw_t *
894 lock_alloc_EXT(
895 __unused boolean_t can_sleep,
896 __unused unsigned short tag0,
897 __unused unsigned short tag1)
898 {
899 return( lck_rw_alloc_init( &LockCompatGroup, LCK_ATTR_NULL));
900 }
901
902 void
903 lock_done_EXT(
904 lck_rw_t *lock)
905 {
906 (void) lck_rw_done(lock);
907 }
908
909 void
910 lock_free_EXT(
911 lck_rw_t *lock)
912 {
913 lck_rw_free(lock, &LockCompatGroup);
914 }
915
916 void
917 lock_init_EXT(
918 lck_rw_t *lock,
919 __unused boolean_t can_sleep,
920 __unused unsigned short tag0,
921 __unused unsigned short tag1)
922 {
923 lck_rw_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
924 }
925
926 void
927 lock_read_EXT(
928 lck_rw_t *lock)
929 {
930 lck_rw_lock_shared( lock);
931 }
932
933 boolean_t
934 lock_read_to_write_EXT(
935 lck_rw_t *lock)
936 {
937 return( lck_rw_lock_shared_to_exclusive(lock));
938 }
939
940 void
941 lock_write_EXT(
942 lck_rw_t *lock)
943 {
944 lck_rw_lock_exclusive(lock);
945 }
946
947 void
948 lock_write_to_read_EXT(
949 lck_rw_t *lock)
950 {
951 lck_rw_lock_exclusive_to_shared(lock);
952 }
953
954 wait_result_t
955 thread_sleep_lock_write_EXT(
956 event_t event,
957 lck_rw_t *lock,
958 wait_interrupt_t interruptible)
959 {
960 return( lck_rw_sleep(lock, LCK_SLEEP_EXCLUSIVE, event, interruptible));
961 }
962
963 lck_mtx_t *
964 mutex_alloc_EXT(
965 __unused unsigned short tag)
966 {
967 return(lck_mtx_alloc_init(&LockCompatGroup, LCK_ATTR_NULL));
968 }
969
970 void
971 mutex_free_EXT(
972 lck_mtx_t *mutex)
973 {
974 lck_mtx_free(mutex, &LockCompatGroup);
975 }
976
977 void
978 mutex_init_EXT(
979 lck_mtx_t *mutex,
980 __unused unsigned short tag)
981 {
982 lck_mtx_init(mutex, &LockCompatGroup, LCK_ATTR_NULL);
983 }
984
985 void
986 mutex_lock_EXT(
987 lck_mtx_t *mutex)
988 {
989 lck_mtx_lock(mutex);
990 }
991
992 boolean_t
993 mutex_try_EXT(
994 lck_mtx_t *mutex)
995 {
996 return(lck_mtx_try_lock(mutex));
997 }
998
999 void
1000 mutex_unlock_EXT(
1001 lck_mtx_t *mutex)
1002 {
1003 lck_mtx_unlock(mutex);
1004 }
1005
1006 wait_result_t
1007 thread_sleep_mutex_EXT(
1008 event_t event,
1009 lck_mtx_t *mutex,
1010 wait_interrupt_t interruptible)
1011 {
1012 return( lck_mtx_sleep(mutex, LCK_SLEEP_DEFAULT, event, interruptible));
1013 }
1014
1015 wait_result_t
1016 thread_sleep_mutex_deadline_EXT(
1017 event_t event,
1018 lck_mtx_t *mutex,
1019 uint64_t deadline,
1020 wait_interrupt_t interruptible)
1021 {
1022 return( lck_mtx_sleep_deadline(mutex, LCK_SLEEP_DEFAULT, event, interruptible, deadline));
1023 }
1024
1025 void
1026 usimple_lock_EXT(
1027 lck_spin_t *lock)
1028 {
1029 lck_spin_lock(lock);
1030 }
1031
1032 void
1033 usimple_lock_init_EXT(
1034 lck_spin_t *lock,
1035 __unused unsigned short tag)
1036 {
1037 lck_spin_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
1038 }
1039
1040 unsigned int
1041 usimple_lock_try_EXT(
1042 lck_spin_t *lock)
1043 {
1044 lck_spin_try_lock(lock);
1045 }
1046
1047 void
1048 usimple_unlock_EXT(
1049 lck_spin_t *lock)
1050 {
1051 lck_spin_unlock(lock);
1052 }
1053
1054 wait_result_t
1055 thread_sleep_usimple_lock_EXT(
1056 event_t event,
1057 lck_spin_t *lock,
1058 wait_interrupt_t interruptible)
1059 {
1060 return( lck_spin_sleep(lock, LCK_SLEEP_DEFAULT, event, interruptible));
1061 }