]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/locks.c
1431b00d293ba0dcbd4db888b0447c80607039b2
[apple/xnu.git] / osfmk / kern / locks.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 #include <mach_kdb.h>
59 #include <mach_ldebug.h>
60 #include <debug.h>
61
62 #include <mach/kern_return.h>
63 #include <mach/mach_host_server.h>
64 #include <mach_debug/lockgroup_info.h>
65
66 #include <kern/locks.h>
67 #include <kern/misc_protos.h>
68 #include <kern/kalloc.h>
69 #include <kern/thread.h>
70 #include <kern/processor.h>
71 #include <kern/sched_prim.h>
72 #include <kern/debug.h>
73 #include <string.h>
74
75
76 #include <sys/kdebug.h>
77
78 #define LCK_MTX_SLEEP_CODE 0
79 #define LCK_MTX_SLEEP_DEADLINE_CODE 1
80 #define LCK_MTX_LCK_WAIT_CODE 2
81 #define LCK_MTX_UNLCK_WAKEUP_CODE 3
82
83
84 static queue_head_t lck_grp_queue;
85 static unsigned int lck_grp_cnt;
86
87 decl_mutex_data(static,lck_grp_lock)
88
89 lck_grp_attr_t LockDefaultGroupAttr;
90 lck_grp_t LockCompatGroup;
91 lck_attr_t LockDefaultLckAttr;
92
93 /*
94 * Routine: lck_mod_init
95 */
96
97 void
98 lck_mod_init(
99 void)
100 {
101 queue_init(&lck_grp_queue);
102 mutex_init(&lck_grp_lock, 0);
103 lck_grp_cnt = 0;
104 lck_grp_attr_setdefault( &LockDefaultGroupAttr);
105 lck_grp_init( &LockCompatGroup, "Compatibility APIs", LCK_GRP_ATTR_NULL);
106 lck_attr_setdefault(&LockDefaultLckAttr);
107 }
108
109 /*
110 * Routine: lck_grp_attr_alloc_init
111 */
112
113 lck_grp_attr_t *
114 lck_grp_attr_alloc_init(
115 void)
116 {
117 lck_grp_attr_t *attr;
118
119 if ((attr = (lck_grp_attr_t *)kalloc(sizeof(lck_grp_attr_t))) != 0)
120 lck_grp_attr_setdefault(attr);
121
122 return(attr);
123 }
124
125
126 /*
127 * Routine: lck_grp_attr_setdefault
128 */
129
130 void
131 lck_grp_attr_setdefault(
132 lck_grp_attr_t *attr)
133 {
134 if (LcksOpts & enaLkStat)
135 attr->grp_attr_val = LCK_GRP_ATTR_STAT;
136 else
137 attr->grp_attr_val = 0;
138 }
139
140
141 /*
142 * Routine: lck_grp_attr_setstat
143 */
144
145 void
146 lck_grp_attr_setstat(
147 lck_grp_attr_t *attr)
148 {
149 (void)hw_atomic_or((uint32_t *)&attr->grp_attr_val, LCK_GRP_ATTR_STAT);
150 }
151
152
153 /*
154 * Routine: lck_grp_attr_free
155 */
156
157 void
158 lck_grp_attr_free(
159 lck_grp_attr_t *attr)
160 {
161 kfree(attr, sizeof(lck_grp_attr_t));
162 }
163
164
165 /*
166 * Routine: lck_grp_alloc_init
167 */
168
169 lck_grp_t *
170 lck_grp_alloc_init(
171 const char* grp_name,
172 lck_grp_attr_t *attr)
173 {
174 lck_grp_t *grp;
175
176 if ((grp = (lck_grp_t *)kalloc(sizeof(lck_grp_t))) != 0)
177 lck_grp_init(grp, grp_name, attr);
178
179 return(grp);
180 }
181
182
183 /*
184 * Routine: lck_grp_init
185 */
186
187 void
188 lck_grp_init(
189 lck_grp_t *grp,
190 const char* grp_name,
191 lck_grp_attr_t *attr)
192 {
193 bzero((void *)grp, sizeof(lck_grp_t));
194
195 (void) strncpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);
196
197 if (attr != LCK_GRP_ATTR_NULL)
198 grp->lck_grp_attr = attr->grp_attr_val;
199 else if (LcksOpts & enaLkStat)
200 grp->lck_grp_attr = LCK_GRP_ATTR_STAT;
201 else
202 grp->lck_grp_attr = LCK_ATTR_NONE;
203
204 grp->lck_grp_refcnt = 1;
205
206 mutex_lock(&lck_grp_lock);
207 enqueue_tail(&lck_grp_queue, (queue_entry_t)grp);
208 lck_grp_cnt++;
209 mutex_unlock(&lck_grp_lock);
210
211 }
212
213
214 /*
215 * Routine: lck_grp_free
216 */
217
218 void
219 lck_grp_free(
220 lck_grp_t *grp)
221 {
222 mutex_lock(&lck_grp_lock);
223 lck_grp_cnt--;
224 (void)remque((queue_entry_t)grp);
225 mutex_unlock(&lck_grp_lock);
226 lck_grp_deallocate(grp);
227 }
228
229
230 /*
231 * Routine: lck_grp_reference
232 */
233
234 void
235 lck_grp_reference(
236 lck_grp_t *grp)
237 {
238 (void)hw_atomic_add((uint32_t *)(&grp->lck_grp_refcnt), 1);
239 }
240
241
242 /*
243 * Routine: lck_grp_deallocate
244 */
245
246 void
247 lck_grp_deallocate(
248 lck_grp_t *grp)
249 {
250 if (hw_atomic_sub((uint32_t *)(&grp->lck_grp_refcnt), 1) == 0)
251 kfree(grp, sizeof(lck_grp_t));
252 }
253
254 /*
255 * Routine: lck_grp_lckcnt_incr
256 */
257
258 void
259 lck_grp_lckcnt_incr(
260 lck_grp_t *grp,
261 lck_type_t lck_type)
262 {
263 unsigned int *lckcnt;
264
265 switch (lck_type) {
266 case LCK_TYPE_SPIN:
267 lckcnt = &grp->lck_grp_spincnt;
268 break;
269 case LCK_TYPE_MTX:
270 lckcnt = &grp->lck_grp_mtxcnt;
271 break;
272 case LCK_TYPE_RW:
273 lckcnt = &grp->lck_grp_rwcnt;
274 break;
275 default:
276 return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type);
277 }
278
279 (void)hw_atomic_add((uint32_t *)lckcnt, 1);
280 }
281
282 /*
283 * Routine: lck_grp_lckcnt_decr
284 */
285
286 void
287 lck_grp_lckcnt_decr(
288 lck_grp_t *grp,
289 lck_type_t lck_type)
290 {
291 unsigned int *lckcnt;
292
293 switch (lck_type) {
294 case LCK_TYPE_SPIN:
295 lckcnt = &grp->lck_grp_spincnt;
296 break;
297 case LCK_TYPE_MTX:
298 lckcnt = &grp->lck_grp_mtxcnt;
299 break;
300 case LCK_TYPE_RW:
301 lckcnt = &grp->lck_grp_rwcnt;
302 break;
303 default:
304 return panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type);
305 }
306
307 (void)hw_atomic_sub((uint32_t *)lckcnt, 1);
308 }
309
310 /*
311 * Routine: lck_attr_alloc_init
312 */
313
314 lck_attr_t *
315 lck_attr_alloc_init(
316 void)
317 {
318 lck_attr_t *attr;
319
320 if ((attr = (lck_attr_t *)kalloc(sizeof(lck_attr_t))) != 0)
321 lck_attr_setdefault(attr);
322
323 return(attr);
324 }
325
326
327 /*
328 * Routine: lck_attr_setdefault
329 */
330
331 void
332 lck_attr_setdefault(
333 lck_attr_t *attr)
334 {
335 #if !DEBUG
336 if (LcksOpts & enaLkDeb)
337 attr->lck_attr_val = LCK_ATTR_DEBUG;
338 else
339 attr->lck_attr_val = LCK_ATTR_NONE;
340 #else
341 attr->lck_attr_val = LCK_ATTR_DEBUG;
342 #endif
343
344 }
345
346
347 /*
348 * Routine: lck_attr_setdebug
349 */
350 void
351 lck_attr_setdebug(
352 lck_attr_t *attr)
353 {
354 (void)hw_atomic_or((uint32_t *)&attr->lck_attr_val, LCK_ATTR_DEBUG);
355 }
356
357
358 /*
359 * Routine: lck_attr_free
360 */
361 void
362 lck_attr_free(
363 lck_attr_t *attr)
364 {
365 kfree(attr, sizeof(lck_attr_t));
366 }
367
368
369 /*
370 * Routine: lck_spin_sleep
371 */
372 wait_result_t
373 lck_spin_sleep(
374 lck_spin_t *lck,
375 lck_sleep_action_t lck_sleep_action,
376 event_t event,
377 wait_interrupt_t interruptible)
378 {
379 wait_result_t res;
380
381 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
382 panic("Invalid lock sleep action %x\n", lck_sleep_action);
383
384 res = assert_wait(event, interruptible);
385 if (res == THREAD_WAITING) {
386 lck_spin_unlock(lck);
387 res = thread_block(THREAD_CONTINUE_NULL);
388 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
389 lck_spin_lock(lck);
390 }
391 else
392 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
393 lck_spin_unlock(lck);
394
395 return res;
396 }
397
398
399 /*
400 * Routine: lck_spin_sleep_deadline
401 */
402 wait_result_t
403 lck_spin_sleep_deadline(
404 lck_spin_t *lck,
405 lck_sleep_action_t lck_sleep_action,
406 event_t event,
407 wait_interrupt_t interruptible,
408 uint64_t deadline)
409 {
410 wait_result_t res;
411
412 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
413 panic("Invalid lock sleep action %x\n", lck_sleep_action);
414
415 res = assert_wait_deadline(event, interruptible, deadline);
416 if (res == THREAD_WAITING) {
417 lck_spin_unlock(lck);
418 res = thread_block(THREAD_CONTINUE_NULL);
419 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
420 lck_spin_lock(lck);
421 }
422 else
423 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
424 lck_spin_unlock(lck);
425
426 return res;
427 }
428
429
430 /*
431 * Routine: lck_mtx_sleep
432 */
433 wait_result_t
434 lck_mtx_sleep(
435 lck_mtx_t *lck,
436 lck_sleep_action_t lck_sleep_action,
437 event_t event,
438 wait_interrupt_t interruptible)
439 {
440 wait_result_t res;
441
442 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_START,
443 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
444
445 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
446 panic("Invalid lock sleep action %x\n", lck_sleep_action);
447
448 res = assert_wait(event, interruptible);
449 if (res == THREAD_WAITING) {
450 lck_mtx_unlock(lck);
451 res = thread_block(THREAD_CONTINUE_NULL);
452 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
453 lck_mtx_lock(lck);
454 }
455 else
456 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
457 lck_mtx_unlock(lck);
458
459 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
460
461 return res;
462 }
463
464
465 /*
466 * Routine: lck_mtx_sleep_deadline
467 */
468 wait_result_t
469 lck_mtx_sleep_deadline(
470 lck_mtx_t *lck,
471 lck_sleep_action_t lck_sleep_action,
472 event_t event,
473 wait_interrupt_t interruptible,
474 uint64_t deadline)
475 {
476 wait_result_t res;
477
478 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_START,
479 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
480
481 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
482 panic("Invalid lock sleep action %x\n", lck_sleep_action);
483
484 res = assert_wait_deadline(event, interruptible, deadline);
485 if (res == THREAD_WAITING) {
486 lck_mtx_unlock(lck);
487 res = thread_block(THREAD_CONTINUE_NULL);
488 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
489 lck_mtx_lock(lck);
490 }
491 else
492 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
493 lck_mtx_unlock(lck);
494
495 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
496
497 return res;
498 }
499
500 /*
501 * Routine: lck_mtx_lock_wait
502 *
503 * Invoked in order to wait on contention.
504 *
505 * Called with the interlock locked and
506 * returns it unlocked.
507 */
508 void
509 lck_mtx_lock_wait (
510 lck_mtx_t *lck,
511 thread_t holder)
512 {
513 thread_t self = current_thread();
514 lck_mtx_t *mutex;
515 integer_t priority;
516 spl_t s = splsched();
517
518 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
519 mutex = lck;
520 else
521 mutex = &lck->lck_mtx_ptr->lck_mtx;
522
523 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
524
525 priority = self->sched_pri;
526 if (priority < self->priority)
527 priority = self->priority;
528 if (priority > MINPRI_KERNEL)
529 priority = MINPRI_KERNEL;
530 else
531 if (priority < BASEPRI_DEFAULT)
532 priority = BASEPRI_DEFAULT;
533
534 thread_lock(holder);
535 if (mutex->lck_mtx_pri == 0)
536 holder->promotions++;
537 if (holder->priority < MINPRI_KERNEL) {
538 holder->sched_mode |= TH_MODE_PROMOTED;
539 if ( mutex->lck_mtx_pri < priority &&
540 holder->sched_pri < priority ) {
541 KERNEL_DEBUG_CONSTANT(
542 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
543 holder->sched_pri, priority, (int)holder, (int)lck, 0);
544
545 set_sched_pri(holder, priority);
546 }
547 }
548 thread_unlock(holder);
549 splx(s);
550
551 if (mutex->lck_mtx_pri < priority)
552 mutex->lck_mtx_pri = priority;
553 if (self->pending_promoter[self->pending_promoter_index] == NULL) {
554 self->pending_promoter[self->pending_promoter_index] = mutex;
555 mutex->lck_mtx_waiters++;
556 }
557 else
558 if (self->pending_promoter[self->pending_promoter_index] != mutex) {
559 self->pending_promoter[++self->pending_promoter_index] = mutex;
560 mutex->lck_mtx_waiters++;
561 }
562
563 assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
564 lck_mtx_ilk_unlock(mutex);
565
566 thread_block(THREAD_CONTINUE_NULL);
567
568 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
569 }
570
571 /*
572 * Routine: lck_mtx_lock_acquire
573 *
574 * Invoked on acquiring the mutex when there is
575 * contention.
576 *
577 * Returns the current number of waiters.
578 *
579 * Called with the interlock locked.
580 */
581 int
582 lck_mtx_lock_acquire(
583 lck_mtx_t *lck)
584 {
585 thread_t thread = current_thread();
586 lck_mtx_t *mutex;
587
588 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
589 mutex = lck;
590 else
591 mutex = &lck->lck_mtx_ptr->lck_mtx;
592
593 if (thread->pending_promoter[thread->pending_promoter_index] == mutex) {
594 thread->pending_promoter[thread->pending_promoter_index] = NULL;
595 if (thread->pending_promoter_index > 0)
596 thread->pending_promoter_index--;
597 mutex->lck_mtx_waiters--;
598 }
599
600 if (mutex->lck_mtx_waiters > 0) {
601 integer_t priority = mutex->lck_mtx_pri;
602 spl_t s = splsched();
603
604 thread_lock(thread);
605 thread->promotions++;
606 if (thread->priority < MINPRI_KERNEL) {
607 thread->sched_mode |= TH_MODE_PROMOTED;
608 if (thread->sched_pri < priority) {
609 KERNEL_DEBUG_CONSTANT(
610 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
611 thread->sched_pri, priority, 0, (int)lck, 0);
612
613 set_sched_pri(thread, priority);
614 }
615 }
616 thread_unlock(thread);
617 splx(s);
618 }
619 else
620 mutex->lck_mtx_pri = 0;
621
622 return (mutex->lck_mtx_waiters);
623 }
624
625 /*
626 * Routine: lck_mtx_unlock_wakeup
627 *
628 * Invoked on unlock when there is contention.
629 *
630 * Called with the interlock locked.
631 */
632 void
633 lck_mtx_unlock_wakeup (
634 lck_mtx_t *lck,
635 thread_t holder)
636 {
637 thread_t thread = current_thread();
638 lck_mtx_t *mutex;
639
640 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
641 mutex = lck;
642 else
643 mutex = &lck->lck_mtx_ptr->lck_mtx;
644
645
646 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
647
648 if (thread != holder)
649 panic("lck_mtx_unlock_wakeup: mutex %x holder %x\n", mutex, holder);
650
651 if (thread->promotions > 0) {
652 spl_t s = splsched();
653
654 thread_lock(thread);
655 if ( --thread->promotions == 0 &&
656 (thread->sched_mode & TH_MODE_PROMOTED) ) {
657 thread->sched_mode &= ~TH_MODE_PROMOTED;
658 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
659 KERNEL_DEBUG_CONSTANT(
660 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
661 thread->sched_pri, DEPRESSPRI, 0, (int)lck, 0);
662
663 set_sched_pri(thread, DEPRESSPRI);
664 }
665 else {
666 if (thread->priority < thread->sched_pri) {
667 KERNEL_DEBUG_CONSTANT(
668 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
669 DBG_FUNC_NONE,
670 thread->sched_pri, thread->priority,
671 0, (int)lck, 0);
672 }
673
674 compute_priority(thread, FALSE);
675 }
676 }
677 thread_unlock(thread);
678 splx(s);
679 }
680 assert(mutex->lck_mtx_waiters > 0);
681 thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
682
683 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
684 }
685
686 /*
687 * Routine: mutex_pause
688 *
689 * Called by former callers of simple_lock_pause().
690 */
691
692 void
693 mutex_pause(void)
694 {
695 wait_result_t wait_result;
696
697 wait_result = assert_wait_timeout((event_t)mutex_pause, THREAD_UNINT, 1, 1000*NSEC_PER_USEC);
698 assert(wait_result == THREAD_WAITING);
699
700 wait_result = thread_block(THREAD_CONTINUE_NULL);
701 assert(wait_result == THREAD_TIMED_OUT);
702 }
703
704 /*
705 * Routine: lck_rw_sleep
706 */
707 wait_result_t
708 lck_rw_sleep(
709 lck_rw_t *lck,
710 lck_sleep_action_t lck_sleep_action,
711 event_t event,
712 wait_interrupt_t interruptible)
713 {
714 wait_result_t res;
715 lck_rw_type_t lck_rw_type;
716
717 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
718 panic("Invalid lock sleep action %x\n", lck_sleep_action);
719
720 res = assert_wait(event, interruptible);
721 if (res == THREAD_WAITING) {
722 lck_rw_type = lck_rw_done(lck);
723 res = thread_block(THREAD_CONTINUE_NULL);
724 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
725 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
726 lck_rw_lock(lck, lck_rw_type);
727 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
728 lck_rw_lock_exclusive(lck);
729 else
730 lck_rw_lock_shared(lck);
731 }
732 }
733 else
734 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
735 (void)lck_rw_done(lck);
736
737 return res;
738 }
739
740
741 /*
742 * Routine: lck_rw_sleep_deadline
743 */
744 wait_result_t
745 lck_rw_sleep_deadline(
746 lck_rw_t *lck,
747 lck_sleep_action_t lck_sleep_action,
748 event_t event,
749 wait_interrupt_t interruptible,
750 uint64_t deadline)
751 {
752 wait_result_t res;
753 lck_rw_type_t lck_rw_type;
754
755 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
756 panic("Invalid lock sleep action %x\n", lck_sleep_action);
757
758 res = assert_wait_deadline(event, interruptible, deadline);
759 if (res == THREAD_WAITING) {
760 lck_rw_type = lck_rw_done(lck);
761 res = thread_block(THREAD_CONTINUE_NULL);
762 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
763 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
764 lck_rw_lock(lck, lck_rw_type);
765 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
766 lck_rw_lock_exclusive(lck);
767 else
768 lck_rw_lock_shared(lck);
769 }
770 }
771 else
772 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
773 (void)lck_rw_done(lck);
774
775 return res;
776 }
777
778 kern_return_t
779 host_lockgroup_info(
780 host_t host,
781 lockgroup_info_array_t *lockgroup_infop,
782 mach_msg_type_number_t *lockgroup_infoCntp)
783 {
784 lockgroup_info_t *lockgroup_info_base;
785 lockgroup_info_t *lockgroup_info;
786 vm_offset_t lockgroup_info_addr;
787 vm_size_t lockgroup_info_size;
788 lck_grp_t *lck_grp;
789 unsigned int i;
790 vm_size_t used;
791 vm_map_copy_t copy;
792 kern_return_t kr;
793
794 if (host == HOST_NULL)
795 return KERN_INVALID_HOST;
796
797 mutex_lock(&lck_grp_lock);
798
799 lockgroup_info_size = round_page(lck_grp_cnt * sizeof *lockgroup_info);
800 kr = kmem_alloc_pageable(ipc_kernel_map,
801 &lockgroup_info_addr, lockgroup_info_size);
802 if (kr != KERN_SUCCESS) {
803 mutex_unlock(&lck_grp_lock);
804 return(kr);
805 }
806
807 lockgroup_info_base = (lockgroup_info_t *) lockgroup_info_addr;
808 lck_grp = (lck_grp_t *)queue_first(&lck_grp_queue);
809 lockgroup_info = lockgroup_info_base;
810
811 for (i = 0; i < lck_grp_cnt; i++) {
812
813 lockgroup_info->lock_spin_cnt = lck_grp->lck_grp_spincnt;
814 lockgroup_info->lock_spin_util_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_util_cnt;
815 lockgroup_info->lock_spin_held_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cnt;
816 lockgroup_info->lock_spin_miss_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_miss_cnt;
817 lockgroup_info->lock_spin_held_max = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_max;
818 lockgroup_info->lock_spin_held_cum = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cum;
819
820 lockgroup_info->lock_mtx_cnt = lck_grp->lck_grp_mtxcnt;
821 lockgroup_info->lock_mtx_util_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt;
822 lockgroup_info->lock_mtx_held_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt;
823 lockgroup_info->lock_mtx_miss_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt;
824 lockgroup_info->lock_mtx_wait_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt;
825 lockgroup_info->lock_mtx_held_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max;
826 lockgroup_info->lock_mtx_held_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cum;
827 lockgroup_info->lock_mtx_wait_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_max;
828 lockgroup_info->lock_mtx_wait_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cum;
829
830 lockgroup_info->lock_rw_cnt = lck_grp->lck_grp_rwcnt;
831 lockgroup_info->lock_rw_util_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt;
832 lockgroup_info->lock_rw_held_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cnt;
833 lockgroup_info->lock_rw_miss_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt;
834 lockgroup_info->lock_rw_wait_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt;
835 lockgroup_info->lock_rw_held_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_max;
836 lockgroup_info->lock_rw_held_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cum;
837 lockgroup_info->lock_rw_wait_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_max;
838 lockgroup_info->lock_rw_wait_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cum;
839
840 (void) strncpy(lockgroup_info->lockgroup_name,lck_grp->lck_grp_name, LOCKGROUP_MAX_NAME);
841
842 lck_grp = (lck_grp_t *)(queue_next((queue_entry_t)(lck_grp)));
843 lockgroup_info++;
844 }
845
846 *lockgroup_infoCntp = lck_grp_cnt;
847 mutex_unlock(&lck_grp_lock);
848
849 used = (*lockgroup_infoCntp) * sizeof *lockgroup_info;
850
851 if (used != lockgroup_info_size)
852 bzero((char *) lockgroup_info, lockgroup_info_size - used);
853
854 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
855 (vm_map_size_t)lockgroup_info_size, TRUE, &copy);
856 assert(kr == KERN_SUCCESS);
857
858 *lockgroup_infop = (lockgroup_info_t *) copy;
859
860 return(KERN_SUCCESS);
861 }
862
863 /*
864 * Compatibility module
865 */
866
867 extern lck_rw_t *lock_alloc_EXT( boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
868 extern void lock_done_EXT(lck_rw_t *lock);
869 extern void lock_free_EXT(lck_rw_t *lock);
870 extern void lock_init_EXT(lck_rw_t *lock, boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
871 extern void lock_read_EXT(lck_rw_t *lock);
872 extern boolean_t lock_read_to_write_EXT(lck_rw_t *lock);
873 extern void lock_write_EXT(lck_rw_t *lock);
874 extern void lock_write_to_read_EXT(lck_rw_t *lock);
875 extern wait_result_t thread_sleep_lock_write_EXT(
876 event_t event, lck_rw_t *lock, wait_interrupt_t interruptible);
877
878 extern lck_mtx_t *mutex_alloc_EXT(unsigned short tag);
879 extern void mutex_free_EXT(lck_mtx_t *mutex);
880 extern void mutex_init_EXT(lck_mtx_t *mutex, unsigned short tag);
881 extern void mutex_lock_EXT(lck_mtx_t *mutex);
882 extern boolean_t mutex_try_EXT(lck_mtx_t *mutex);
883 extern void mutex_unlock_EXT(lck_mtx_t *mutex);
884 extern wait_result_t thread_sleep_mutex_EXT(
885 event_t event, lck_mtx_t *mutex, wait_interrupt_t interruptible);
886 extern wait_result_t thread_sleep_mutex_deadline_EXT(
887 event_t event, lck_mtx_t *mutex, uint64_t deadline, wait_interrupt_t interruptible);
888
889 extern void usimple_lock_EXT(lck_spin_t *lock);
890 extern void usimple_lock_init_EXT(lck_spin_t *lock, unsigned short tag);
891 extern unsigned int usimple_lock_try_EXT(lck_spin_t *lock);
892 extern void usimple_unlock_EXT(lck_spin_t *lock);
893 extern wait_result_t thread_sleep_usimple_lock_EXT(event_t event, lck_spin_t *lock, wait_interrupt_t interruptible);
894
895 lck_rw_t *
896 lock_alloc_EXT(
897 __unused boolean_t can_sleep,
898 __unused unsigned short tag0,
899 __unused unsigned short tag1)
900 {
901 return( lck_rw_alloc_init( &LockCompatGroup, LCK_ATTR_NULL));
902 }
903
904 void
905 lock_done_EXT(
906 lck_rw_t *lock)
907 {
908 (void) lck_rw_done(lock);
909 }
910
911 void
912 lock_free_EXT(
913 lck_rw_t *lock)
914 {
915 lck_rw_free(lock, &LockCompatGroup);
916 }
917
918 void
919 lock_init_EXT(
920 lck_rw_t *lock,
921 __unused boolean_t can_sleep,
922 __unused unsigned short tag0,
923 __unused unsigned short tag1)
924 {
925 lck_rw_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
926 }
927
928 void
929 lock_read_EXT(
930 lck_rw_t *lock)
931 {
932 lck_rw_lock_shared( lock);
933 }
934
935 boolean_t
936 lock_read_to_write_EXT(
937 lck_rw_t *lock)
938 {
939 return( lck_rw_lock_shared_to_exclusive(lock));
940 }
941
942 void
943 lock_write_EXT(
944 lck_rw_t *lock)
945 {
946 lck_rw_lock_exclusive(lock);
947 }
948
949 void
950 lock_write_to_read_EXT(
951 lck_rw_t *lock)
952 {
953 lck_rw_lock_exclusive_to_shared(lock);
954 }
955
956 wait_result_t
957 thread_sleep_lock_write_EXT(
958 event_t event,
959 lck_rw_t *lock,
960 wait_interrupt_t interruptible)
961 {
962 return( lck_rw_sleep(lock, LCK_SLEEP_EXCLUSIVE, event, interruptible));
963 }
964
965 lck_mtx_t *
966 mutex_alloc_EXT(
967 __unused unsigned short tag)
968 {
969 return(lck_mtx_alloc_init(&LockCompatGroup, LCK_ATTR_NULL));
970 }
971
972 void
973 mutex_free_EXT(
974 lck_mtx_t *mutex)
975 {
976 lck_mtx_free(mutex, &LockCompatGroup);
977 }
978
979 void
980 mutex_init_EXT(
981 lck_mtx_t *mutex,
982 __unused unsigned short tag)
983 {
984 lck_mtx_init(mutex, &LockCompatGroup, LCK_ATTR_NULL);
985 }
986
987 void
988 mutex_lock_EXT(
989 lck_mtx_t *mutex)
990 {
991 lck_mtx_lock(mutex);
992 }
993
994 boolean_t
995 mutex_try_EXT(
996 lck_mtx_t *mutex)
997 {
998 return(lck_mtx_try_lock(mutex));
999 }
1000
1001 void
1002 mutex_unlock_EXT(
1003 lck_mtx_t *mutex)
1004 {
1005 lck_mtx_unlock(mutex);
1006 }
1007
1008 wait_result_t
1009 thread_sleep_mutex_EXT(
1010 event_t event,
1011 lck_mtx_t *mutex,
1012 wait_interrupt_t interruptible)
1013 {
1014 return( lck_mtx_sleep(mutex, LCK_SLEEP_DEFAULT, event, interruptible));
1015 }
1016
1017 wait_result_t
1018 thread_sleep_mutex_deadline_EXT(
1019 event_t event,
1020 lck_mtx_t *mutex,
1021 uint64_t deadline,
1022 wait_interrupt_t interruptible)
1023 {
1024 return( lck_mtx_sleep_deadline(mutex, LCK_SLEEP_DEFAULT, event, interruptible, deadline));
1025 }
1026
1027 void
1028 usimple_lock_EXT(
1029 lck_spin_t *lock)
1030 {
1031 lck_spin_lock(lock);
1032 }
1033
1034 void
1035 usimple_lock_init_EXT(
1036 lck_spin_t *lock,
1037 __unused unsigned short tag)
1038 {
1039 lck_spin_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
1040 }
1041
1042 unsigned int
1043 usimple_lock_try_EXT(
1044 lck_spin_t *lock)
1045 {
1046 lck_spin_try_lock(lock);
1047 }
1048
1049 void
1050 usimple_unlock_EXT(
1051 lck_spin_t *lock)
1052 {
1053 lck_spin_unlock(lock);
1054 }
1055
1056 wait_result_t
1057 thread_sleep_usimple_lock_EXT(
1058 event_t event,
1059 lck_spin_t *lock,
1060 wait_interrupt_t interruptible)
1061 {
1062 return( lck_spin_sleep(lock, LCK_SLEEP_DEFAULT, event, interruptible));
1063 }