]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/locks.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / kern / locks.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51 #include <mach_kdb.h>
52 #include <mach_ldebug.h>
53 #include <debug.h>
54
55 #include <mach/kern_return.h>
56 #include <mach/mach_host_server.h>
57 #include <mach_debug/lockgroup_info.h>
58
59 #include <kern/locks.h>
60 #include <kern/misc_protos.h>
61 #include <kern/kalloc.h>
62 #include <kern/thread.h>
63 #include <kern/processor.h>
64 #include <kern/sched_prim.h>
65 #include <kern/debug.h>
66 #include <string.h>
67
68
69 #include <sys/kdebug.h>
70
71 #define LCK_MTX_SLEEP_CODE 0
72 #define LCK_MTX_SLEEP_DEADLINE_CODE 1
73 #define LCK_MTX_LCK_WAIT_CODE 2
74 #define LCK_MTX_UNLCK_WAKEUP_CODE 3
75
76
77 static queue_head_t lck_grp_queue;
78 static unsigned int lck_grp_cnt;
79
80 decl_mutex_data(static,lck_grp_lock)
81
82 lck_grp_attr_t LockDefaultGroupAttr;
83 lck_grp_t LockCompatGroup;
84 lck_attr_t LockDefaultLckAttr;
85
86 /*
87 * Routine: lck_mod_init
88 */
89
90 void
91 lck_mod_init(
92 void)
93 {
94 queue_init(&lck_grp_queue);
95 mutex_init(&lck_grp_lock, 0);
96 lck_grp_cnt = 0;
97 lck_grp_attr_setdefault( &LockDefaultGroupAttr);
98 lck_grp_init( &LockCompatGroup, "Compatibility APIs", LCK_GRP_ATTR_NULL);
99 lck_attr_setdefault(&LockDefaultLckAttr);
100 }
101
102 /*
103 * Routine: lck_grp_attr_alloc_init
104 */
105
106 lck_grp_attr_t *
107 lck_grp_attr_alloc_init(
108 void)
109 {
110 lck_grp_attr_t *attr;
111
112 if ((attr = (lck_grp_attr_t *)kalloc(sizeof(lck_grp_attr_t))) != 0)
113 lck_grp_attr_setdefault(attr);
114
115 return(attr);
116 }
117
118
119 /*
120 * Routine: lck_grp_attr_setdefault
121 */
122
123 void
124 lck_grp_attr_setdefault(
125 lck_grp_attr_t *attr)
126 {
127 if (LcksOpts & enaLkStat)
128 attr->grp_attr_val = LCK_GRP_ATTR_STAT;
129 else
130 attr->grp_attr_val = 0;
131 }
132
133
134 /*
135 * Routine: lck_grp_attr_setstat
136 */
137
138 void
139 lck_grp_attr_setstat(
140 lck_grp_attr_t *attr)
141 {
142 (void)hw_atomic_or((uint32_t *)&attr->grp_attr_val, LCK_GRP_ATTR_STAT);
143 }
144
145
146 /*
147 * Routine: lck_grp_attr_free
148 */
149
150 void
151 lck_grp_attr_free(
152 lck_grp_attr_t *attr)
153 {
154 kfree(attr, sizeof(lck_grp_attr_t));
155 }
156
157
158 /*
159 * Routine: lck_grp_alloc_init
160 */
161
162 lck_grp_t *
163 lck_grp_alloc_init(
164 const char* grp_name,
165 lck_grp_attr_t *attr)
166 {
167 lck_grp_t *grp;
168
169 if ((grp = (lck_grp_t *)kalloc(sizeof(lck_grp_t))) != 0)
170 lck_grp_init(grp, grp_name, attr);
171
172 return(grp);
173 }
174
175
176 /*
177 * Routine: lck_grp_init
178 */
179
180 void
181 lck_grp_init(
182 lck_grp_t *grp,
183 const char* grp_name,
184 lck_grp_attr_t *attr)
185 {
186 bzero((void *)grp, sizeof(lck_grp_t));
187
188 (void) strncpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);
189
190 if (attr != LCK_GRP_ATTR_NULL)
191 grp->lck_grp_attr = attr->grp_attr_val;
192 else if (LcksOpts & enaLkStat)
193 grp->lck_grp_attr = LCK_GRP_ATTR_STAT;
194 else
195 grp->lck_grp_attr = LCK_ATTR_NONE;
196
197 grp->lck_grp_refcnt = 1;
198
199 mutex_lock(&lck_grp_lock);
200 enqueue_tail(&lck_grp_queue, (queue_entry_t)grp);
201 lck_grp_cnt++;
202 mutex_unlock(&lck_grp_lock);
203
204 }
205
206
207 /*
208 * Routine: lck_grp_free
209 */
210
211 void
212 lck_grp_free(
213 lck_grp_t *grp)
214 {
215 mutex_lock(&lck_grp_lock);
216 lck_grp_cnt--;
217 (void)remque((queue_entry_t)grp);
218 mutex_unlock(&lck_grp_lock);
219 lck_grp_deallocate(grp);
220 }
221
222
223 /*
224 * Routine: lck_grp_reference
225 */
226
227 void
228 lck_grp_reference(
229 lck_grp_t *grp)
230 {
231 (void)hw_atomic_add((uint32_t *)(&grp->lck_grp_refcnt), 1);
232 }
233
234
235 /*
236 * Routine: lck_grp_deallocate
237 */
238
239 void
240 lck_grp_deallocate(
241 lck_grp_t *grp)
242 {
243 if (hw_atomic_sub((uint32_t *)(&grp->lck_grp_refcnt), 1) == 0)
244 kfree(grp, sizeof(lck_grp_t));
245 }
246
247 /*
248 * Routine: lck_grp_lckcnt_incr
249 */
250
251 void
252 lck_grp_lckcnt_incr(
253 lck_grp_t *grp,
254 lck_type_t lck_type)
255 {
256 unsigned int *lckcnt;
257
258 switch (lck_type) {
259 case LCK_TYPE_SPIN:
260 lckcnt = &grp->lck_grp_spincnt;
261 break;
262 case LCK_TYPE_MTX:
263 lckcnt = &grp->lck_grp_mtxcnt;
264 break;
265 case LCK_TYPE_RW:
266 lckcnt = &grp->lck_grp_rwcnt;
267 break;
268 default:
269 return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type);
270 }
271
272 (void)hw_atomic_add((uint32_t *)lckcnt, 1);
273 }
274
275 /*
276 * Routine: lck_grp_lckcnt_decr
277 */
278
279 void
280 lck_grp_lckcnt_decr(
281 lck_grp_t *grp,
282 lck_type_t lck_type)
283 {
284 unsigned int *lckcnt;
285
286 switch (lck_type) {
287 case LCK_TYPE_SPIN:
288 lckcnt = &grp->lck_grp_spincnt;
289 break;
290 case LCK_TYPE_MTX:
291 lckcnt = &grp->lck_grp_mtxcnt;
292 break;
293 case LCK_TYPE_RW:
294 lckcnt = &grp->lck_grp_rwcnt;
295 break;
296 default:
297 return panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type);
298 }
299
300 (void)hw_atomic_sub((uint32_t *)lckcnt, 1);
301 }
302
303 /*
304 * Routine: lck_attr_alloc_init
305 */
306
307 lck_attr_t *
308 lck_attr_alloc_init(
309 void)
310 {
311 lck_attr_t *attr;
312
313 if ((attr = (lck_attr_t *)kalloc(sizeof(lck_attr_t))) != 0)
314 lck_attr_setdefault(attr);
315
316 return(attr);
317 }
318
319
320 /*
321 * Routine: lck_attr_setdefault
322 */
323
324 void
325 lck_attr_setdefault(
326 lck_attr_t *attr)
327 {
328 #if !DEBUG
329 if (LcksOpts & enaLkDeb)
330 attr->lck_attr_val = LCK_ATTR_DEBUG;
331 else
332 attr->lck_attr_val = LCK_ATTR_NONE;
333 #else
334 attr->lck_attr_val = LCK_ATTR_DEBUG;
335 #endif
336
337 }
338
339
340 /*
341 * Routine: lck_attr_setdebug
342 */
343 void
344 lck_attr_setdebug(
345 lck_attr_t *attr)
346 {
347 (void)hw_atomic_or((uint32_t *)&attr->lck_attr_val, LCK_ATTR_DEBUG);
348 }
349
350
351 /*
352 * Routine: lck_attr_free
353 */
354 void
355 lck_attr_free(
356 lck_attr_t *attr)
357 {
358 kfree(attr, sizeof(lck_attr_t));
359 }
360
361
362 /*
363 * Routine: lck_spin_sleep
364 */
365 wait_result_t
366 lck_spin_sleep(
367 lck_spin_t *lck,
368 lck_sleep_action_t lck_sleep_action,
369 event_t event,
370 wait_interrupt_t interruptible)
371 {
372 wait_result_t res;
373
374 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
375 panic("Invalid lock sleep action %x\n", lck_sleep_action);
376
377 res = assert_wait(event, interruptible);
378 if (res == THREAD_WAITING) {
379 lck_spin_unlock(lck);
380 res = thread_block(THREAD_CONTINUE_NULL);
381 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
382 lck_spin_lock(lck);
383 }
384 else
385 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
386 lck_spin_unlock(lck);
387
388 return res;
389 }
390
391
392 /*
393 * Routine: lck_spin_sleep_deadline
394 */
395 wait_result_t
396 lck_spin_sleep_deadline(
397 lck_spin_t *lck,
398 lck_sleep_action_t lck_sleep_action,
399 event_t event,
400 wait_interrupt_t interruptible,
401 uint64_t deadline)
402 {
403 wait_result_t res;
404
405 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
406 panic("Invalid lock sleep action %x\n", lck_sleep_action);
407
408 res = assert_wait_deadline(event, interruptible, deadline);
409 if (res == THREAD_WAITING) {
410 lck_spin_unlock(lck);
411 res = thread_block(THREAD_CONTINUE_NULL);
412 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
413 lck_spin_lock(lck);
414 }
415 else
416 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
417 lck_spin_unlock(lck);
418
419 return res;
420 }
421
422
423 /*
424 * Routine: lck_mtx_sleep
425 */
426 wait_result_t
427 lck_mtx_sleep(
428 lck_mtx_t *lck,
429 lck_sleep_action_t lck_sleep_action,
430 event_t event,
431 wait_interrupt_t interruptible)
432 {
433 wait_result_t res;
434
435 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_START,
436 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
437
438 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
439 panic("Invalid lock sleep action %x\n", lck_sleep_action);
440
441 res = assert_wait(event, interruptible);
442 if (res == THREAD_WAITING) {
443 lck_mtx_unlock(lck);
444 res = thread_block(THREAD_CONTINUE_NULL);
445 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
446 lck_mtx_lock(lck);
447 }
448 else
449 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
450 lck_mtx_unlock(lck);
451
452 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
453
454 return res;
455 }
456
457
458 /*
459 * Routine: lck_mtx_sleep_deadline
460 */
461 wait_result_t
462 lck_mtx_sleep_deadline(
463 lck_mtx_t *lck,
464 lck_sleep_action_t lck_sleep_action,
465 event_t event,
466 wait_interrupt_t interruptible,
467 uint64_t deadline)
468 {
469 wait_result_t res;
470
471 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_START,
472 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
473
474 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
475 panic("Invalid lock sleep action %x\n", lck_sleep_action);
476
477 res = assert_wait_deadline(event, interruptible, deadline);
478 if (res == THREAD_WAITING) {
479 lck_mtx_unlock(lck);
480 res = thread_block(THREAD_CONTINUE_NULL);
481 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
482 lck_mtx_lock(lck);
483 }
484 else
485 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
486 lck_mtx_unlock(lck);
487
488 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
489
490 return res;
491 }
492
493 /*
494 * Routine: lck_mtx_lock_wait
495 *
496 * Invoked in order to wait on contention.
497 *
498 * Called with the interlock locked and
499 * returns it unlocked.
500 */
501 void
502 lck_mtx_lock_wait (
503 lck_mtx_t *lck,
504 thread_t holder)
505 {
506 thread_t self = current_thread();
507 lck_mtx_t *mutex;
508 integer_t priority;
509 spl_t s = splsched();
510
511 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
512 mutex = lck;
513 else
514 mutex = &lck->lck_mtx_ptr->lck_mtx;
515
516 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
517
518 priority = self->sched_pri;
519 if (priority < self->priority)
520 priority = self->priority;
521 if (priority > MINPRI_KERNEL)
522 priority = MINPRI_KERNEL;
523 else
524 if (priority < BASEPRI_DEFAULT)
525 priority = BASEPRI_DEFAULT;
526
527 thread_lock(holder);
528 if (mutex->lck_mtx_pri == 0)
529 holder->promotions++;
530 if (holder->priority < MINPRI_KERNEL) {
531 holder->sched_mode |= TH_MODE_PROMOTED;
532 if ( mutex->lck_mtx_pri < priority &&
533 holder->sched_pri < priority ) {
534 KERNEL_DEBUG_CONSTANT(
535 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
536 holder->sched_pri, priority, (int)holder, (int)lck, 0);
537
538 set_sched_pri(holder, priority);
539 }
540 }
541 thread_unlock(holder);
542 splx(s);
543
544 if (mutex->lck_mtx_pri < priority)
545 mutex->lck_mtx_pri = priority;
546 if (self->pending_promoter[self->pending_promoter_index] == NULL) {
547 self->pending_promoter[self->pending_promoter_index] = mutex;
548 mutex->lck_mtx_waiters++;
549 }
550 else
551 if (self->pending_promoter[self->pending_promoter_index] != mutex) {
552 self->pending_promoter[++self->pending_promoter_index] = mutex;
553 mutex->lck_mtx_waiters++;
554 }
555
556 assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
557 lck_mtx_ilk_unlock(mutex);
558
559 thread_block(THREAD_CONTINUE_NULL);
560
561 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
562 }
563
564 /*
565 * Routine: lck_mtx_lock_acquire
566 *
567 * Invoked on acquiring the mutex when there is
568 * contention.
569 *
570 * Returns the current number of waiters.
571 *
572 * Called with the interlock locked.
573 */
574 int
575 lck_mtx_lock_acquire(
576 lck_mtx_t *lck)
577 {
578 thread_t thread = current_thread();
579 lck_mtx_t *mutex;
580
581 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
582 mutex = lck;
583 else
584 mutex = &lck->lck_mtx_ptr->lck_mtx;
585
586 if (thread->pending_promoter[thread->pending_promoter_index] == mutex) {
587 thread->pending_promoter[thread->pending_promoter_index] = NULL;
588 if (thread->pending_promoter_index > 0)
589 thread->pending_promoter_index--;
590 mutex->lck_mtx_waiters--;
591 }
592
593 if (mutex->lck_mtx_waiters > 0) {
594 integer_t priority = mutex->lck_mtx_pri;
595 spl_t s = splsched();
596
597 thread_lock(thread);
598 thread->promotions++;
599 if (thread->priority < MINPRI_KERNEL) {
600 thread->sched_mode |= TH_MODE_PROMOTED;
601 if (thread->sched_pri < priority) {
602 KERNEL_DEBUG_CONSTANT(
603 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
604 thread->sched_pri, priority, 0, (int)lck, 0);
605
606 set_sched_pri(thread, priority);
607 }
608 }
609 thread_unlock(thread);
610 splx(s);
611 }
612 else
613 mutex->lck_mtx_pri = 0;
614
615 return (mutex->lck_mtx_waiters);
616 }
617
618 /*
619 * Routine: lck_mtx_unlock_wakeup
620 *
621 * Invoked on unlock when there is contention.
622 *
623 * Called with the interlock locked.
624 */
625 void
626 lck_mtx_unlock_wakeup (
627 lck_mtx_t *lck,
628 thread_t holder)
629 {
630 thread_t thread = current_thread();
631 lck_mtx_t *mutex;
632
633 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
634 mutex = lck;
635 else
636 mutex = &lck->lck_mtx_ptr->lck_mtx;
637
638
639 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
640
641 if (thread != holder)
642 panic("lck_mtx_unlock_wakeup: mutex %x holder %x\n", mutex, holder);
643
644 if (thread->promotions > 0) {
645 spl_t s = splsched();
646
647 thread_lock(thread);
648 if ( --thread->promotions == 0 &&
649 (thread->sched_mode & TH_MODE_PROMOTED) ) {
650 thread->sched_mode &= ~TH_MODE_PROMOTED;
651 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
652 KERNEL_DEBUG_CONSTANT(
653 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
654 thread->sched_pri, DEPRESSPRI, 0, (int)lck, 0);
655
656 set_sched_pri(thread, DEPRESSPRI);
657 }
658 else {
659 if (thread->priority < thread->sched_pri) {
660 KERNEL_DEBUG_CONSTANT(
661 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
662 DBG_FUNC_NONE,
663 thread->sched_pri, thread->priority,
664 0, (int)lck, 0);
665 }
666
667 compute_priority(thread, FALSE);
668 }
669 }
670 thread_unlock(thread);
671 splx(s);
672 }
673 assert(mutex->lck_mtx_waiters > 0);
674 thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
675
676 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
677 }
678
679 /*
680 * Routine: mutex_pause
681 *
682 * Called by former callers of simple_lock_pause().
683 */
684
685 void
686 mutex_pause(void)
687 {
688 wait_result_t wait_result;
689
690 wait_result = assert_wait_timeout((event_t)mutex_pause, THREAD_UNINT, 1, 1000*NSEC_PER_USEC);
691 assert(wait_result == THREAD_WAITING);
692
693 wait_result = thread_block(THREAD_CONTINUE_NULL);
694 assert(wait_result == THREAD_TIMED_OUT);
695 }
696
697 /*
698 * Routine: lck_rw_sleep
699 */
700 wait_result_t
701 lck_rw_sleep(
702 lck_rw_t *lck,
703 lck_sleep_action_t lck_sleep_action,
704 event_t event,
705 wait_interrupt_t interruptible)
706 {
707 wait_result_t res;
708 lck_rw_type_t lck_rw_type;
709
710 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
711 panic("Invalid lock sleep action %x\n", lck_sleep_action);
712
713 res = assert_wait(event, interruptible);
714 if (res == THREAD_WAITING) {
715 lck_rw_type = lck_rw_done(lck);
716 res = thread_block(THREAD_CONTINUE_NULL);
717 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
718 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
719 lck_rw_lock(lck, lck_rw_type);
720 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
721 lck_rw_lock_exclusive(lck);
722 else
723 lck_rw_lock_shared(lck);
724 }
725 }
726 else
727 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
728 (void)lck_rw_done(lck);
729
730 return res;
731 }
732
733
734 /*
735 * Routine: lck_rw_sleep_deadline
736 */
737 wait_result_t
738 lck_rw_sleep_deadline(
739 lck_rw_t *lck,
740 lck_sleep_action_t lck_sleep_action,
741 event_t event,
742 wait_interrupt_t interruptible,
743 uint64_t deadline)
744 {
745 wait_result_t res;
746 lck_rw_type_t lck_rw_type;
747
748 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
749 panic("Invalid lock sleep action %x\n", lck_sleep_action);
750
751 res = assert_wait_deadline(event, interruptible, deadline);
752 if (res == THREAD_WAITING) {
753 lck_rw_type = lck_rw_done(lck);
754 res = thread_block(THREAD_CONTINUE_NULL);
755 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
756 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
757 lck_rw_lock(lck, lck_rw_type);
758 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
759 lck_rw_lock_exclusive(lck);
760 else
761 lck_rw_lock_shared(lck);
762 }
763 }
764 else
765 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
766 (void)lck_rw_done(lck);
767
768 return res;
769 }
770
771 kern_return_t
772 host_lockgroup_info(
773 host_t host,
774 lockgroup_info_array_t *lockgroup_infop,
775 mach_msg_type_number_t *lockgroup_infoCntp)
776 {
777 lockgroup_info_t *lockgroup_info_base;
778 lockgroup_info_t *lockgroup_info;
779 vm_offset_t lockgroup_info_addr;
780 vm_size_t lockgroup_info_size;
781 lck_grp_t *lck_grp;
782 unsigned int i;
783 vm_size_t used;
784 vm_map_copy_t copy;
785 kern_return_t kr;
786
787 if (host == HOST_NULL)
788 return KERN_INVALID_HOST;
789
790 mutex_lock(&lck_grp_lock);
791
792 lockgroup_info_size = round_page(lck_grp_cnt * sizeof *lockgroup_info);
793 kr = kmem_alloc_pageable(ipc_kernel_map,
794 &lockgroup_info_addr, lockgroup_info_size);
795 if (kr != KERN_SUCCESS) {
796 mutex_unlock(&lck_grp_lock);
797 return(kr);
798 }
799
800 lockgroup_info_base = (lockgroup_info_t *) lockgroup_info_addr;
801 lck_grp = (lck_grp_t *)queue_first(&lck_grp_queue);
802 lockgroup_info = lockgroup_info_base;
803
804 for (i = 0; i < lck_grp_cnt; i++) {
805
806 lockgroup_info->lock_spin_cnt = lck_grp->lck_grp_spincnt;
807 lockgroup_info->lock_spin_util_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_util_cnt;
808 lockgroup_info->lock_spin_held_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cnt;
809 lockgroup_info->lock_spin_miss_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_miss_cnt;
810 lockgroup_info->lock_spin_held_max = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_max;
811 lockgroup_info->lock_spin_held_cum = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cum;
812
813 lockgroup_info->lock_mtx_cnt = lck_grp->lck_grp_mtxcnt;
814 lockgroup_info->lock_mtx_util_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt;
815 lockgroup_info->lock_mtx_held_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt;
816 lockgroup_info->lock_mtx_miss_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt;
817 lockgroup_info->lock_mtx_wait_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt;
818 lockgroup_info->lock_mtx_held_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max;
819 lockgroup_info->lock_mtx_held_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cum;
820 lockgroup_info->lock_mtx_wait_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_max;
821 lockgroup_info->lock_mtx_wait_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cum;
822
823 lockgroup_info->lock_rw_cnt = lck_grp->lck_grp_rwcnt;
824 lockgroup_info->lock_rw_util_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt;
825 lockgroup_info->lock_rw_held_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cnt;
826 lockgroup_info->lock_rw_miss_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt;
827 lockgroup_info->lock_rw_wait_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt;
828 lockgroup_info->lock_rw_held_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_max;
829 lockgroup_info->lock_rw_held_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cum;
830 lockgroup_info->lock_rw_wait_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_max;
831 lockgroup_info->lock_rw_wait_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cum;
832
833 (void) strncpy(lockgroup_info->lockgroup_name,lck_grp->lck_grp_name, LOCKGROUP_MAX_NAME);
834
835 lck_grp = (lck_grp_t *)(queue_next((queue_entry_t)(lck_grp)));
836 lockgroup_info++;
837 }
838
839 *lockgroup_infoCntp = lck_grp_cnt;
840 mutex_unlock(&lck_grp_lock);
841
842 used = (*lockgroup_infoCntp) * sizeof *lockgroup_info;
843
844 if (used != lockgroup_info_size)
845 bzero((char *) lockgroup_info, lockgroup_info_size - used);
846
847 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
848 (vm_map_size_t)lockgroup_info_size, TRUE, &copy);
849 assert(kr == KERN_SUCCESS);
850
851 *lockgroup_infop = (lockgroup_info_t *) copy;
852
853 return(KERN_SUCCESS);
854 }
855
856 /*
857 * Compatibility module
858 */
859
860 extern lck_rw_t *lock_alloc_EXT( boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
861 extern void lock_done_EXT(lck_rw_t *lock);
862 extern void lock_free_EXT(lck_rw_t *lock);
863 extern void lock_init_EXT(lck_rw_t *lock, boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
864 extern void lock_read_EXT(lck_rw_t *lock);
865 extern boolean_t lock_read_to_write_EXT(lck_rw_t *lock);
866 extern void lock_write_EXT(lck_rw_t *lock);
867 extern void lock_write_to_read_EXT(lck_rw_t *lock);
868 extern wait_result_t thread_sleep_lock_write_EXT(
869 event_t event, lck_rw_t *lock, wait_interrupt_t interruptible);
870
871 extern lck_mtx_t *mutex_alloc_EXT(unsigned short tag);
872 extern void mutex_free_EXT(lck_mtx_t *mutex);
873 extern void mutex_init_EXT(lck_mtx_t *mutex, unsigned short tag);
874 extern void mutex_lock_EXT(lck_mtx_t *mutex);
875 extern boolean_t mutex_try_EXT(lck_mtx_t *mutex);
876 extern void mutex_unlock_EXT(lck_mtx_t *mutex);
877 extern wait_result_t thread_sleep_mutex_EXT(
878 event_t event, lck_mtx_t *mutex, wait_interrupt_t interruptible);
879 extern wait_result_t thread_sleep_mutex_deadline_EXT(
880 event_t event, lck_mtx_t *mutex, uint64_t deadline, wait_interrupt_t interruptible);
881
882 extern void usimple_lock_EXT(lck_spin_t *lock);
883 extern void usimple_lock_init_EXT(lck_spin_t *lock, unsigned short tag);
884 extern unsigned int usimple_lock_try_EXT(lck_spin_t *lock);
885 extern void usimple_unlock_EXT(lck_spin_t *lock);
886 extern wait_result_t thread_sleep_usimple_lock_EXT(event_t event, lck_spin_t *lock, wait_interrupt_t interruptible);
887
888 lck_rw_t *
889 lock_alloc_EXT(
890 __unused boolean_t can_sleep,
891 __unused unsigned short tag0,
892 __unused unsigned short tag1)
893 {
894 return( lck_rw_alloc_init( &LockCompatGroup, LCK_ATTR_NULL));
895 }
896
897 void
898 lock_done_EXT(
899 lck_rw_t *lock)
900 {
901 (void) lck_rw_done(lock);
902 }
903
904 void
905 lock_free_EXT(
906 lck_rw_t *lock)
907 {
908 lck_rw_free(lock, &LockCompatGroup);
909 }
910
911 void
912 lock_init_EXT(
913 lck_rw_t *lock,
914 __unused boolean_t can_sleep,
915 __unused unsigned short tag0,
916 __unused unsigned short tag1)
917 {
918 lck_rw_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
919 }
920
921 void
922 lock_read_EXT(
923 lck_rw_t *lock)
924 {
925 lck_rw_lock_shared( lock);
926 }
927
928 boolean_t
929 lock_read_to_write_EXT(
930 lck_rw_t *lock)
931 {
932 return( lck_rw_lock_shared_to_exclusive(lock));
933 }
934
935 void
936 lock_write_EXT(
937 lck_rw_t *lock)
938 {
939 lck_rw_lock_exclusive(lock);
940 }
941
942 void
943 lock_write_to_read_EXT(
944 lck_rw_t *lock)
945 {
946 lck_rw_lock_exclusive_to_shared(lock);
947 }
948
949 wait_result_t
950 thread_sleep_lock_write_EXT(
951 event_t event,
952 lck_rw_t *lock,
953 wait_interrupt_t interruptible)
954 {
955 return( lck_rw_sleep(lock, LCK_SLEEP_EXCLUSIVE, event, interruptible));
956 }
957
958 lck_mtx_t *
959 mutex_alloc_EXT(
960 __unused unsigned short tag)
961 {
962 return(lck_mtx_alloc_init(&LockCompatGroup, LCK_ATTR_NULL));
963 }
964
965 void
966 mutex_free_EXT(
967 lck_mtx_t *mutex)
968 {
969 lck_mtx_free(mutex, &LockCompatGroup);
970 }
971
972 void
973 mutex_init_EXT(
974 lck_mtx_t *mutex,
975 __unused unsigned short tag)
976 {
977 lck_mtx_init(mutex, &LockCompatGroup, LCK_ATTR_NULL);
978 }
979
980 void
981 mutex_lock_EXT(
982 lck_mtx_t *mutex)
983 {
984 lck_mtx_lock(mutex);
985 }
986
987 boolean_t
988 mutex_try_EXT(
989 lck_mtx_t *mutex)
990 {
991 return(lck_mtx_try_lock(mutex));
992 }
993
994 void
995 mutex_unlock_EXT(
996 lck_mtx_t *mutex)
997 {
998 lck_mtx_unlock(mutex);
999 }
1000
1001 wait_result_t
1002 thread_sleep_mutex_EXT(
1003 event_t event,
1004 lck_mtx_t *mutex,
1005 wait_interrupt_t interruptible)
1006 {
1007 return( lck_mtx_sleep(mutex, LCK_SLEEP_DEFAULT, event, interruptible));
1008 }
1009
1010 wait_result_t
1011 thread_sleep_mutex_deadline_EXT(
1012 event_t event,
1013 lck_mtx_t *mutex,
1014 uint64_t deadline,
1015 wait_interrupt_t interruptible)
1016 {
1017 return( lck_mtx_sleep_deadline(mutex, LCK_SLEEP_DEFAULT, event, interruptible, deadline));
1018 }
1019
1020 void
1021 usimple_lock_EXT(
1022 lck_spin_t *lock)
1023 {
1024 lck_spin_lock(lock);
1025 }
1026
1027 void
1028 usimple_lock_init_EXT(
1029 lck_spin_t *lock,
1030 __unused unsigned short tag)
1031 {
1032 lck_spin_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
1033 }
1034
1035 unsigned int
1036 usimple_lock_try_EXT(
1037 lck_spin_t *lock)
1038 {
1039 lck_spin_try_lock(lock);
1040 }
1041
1042 void
1043 usimple_unlock_EXT(
1044 lck_spin_t *lock)
1045 {
1046 lck_spin_unlock(lock);
1047 }
1048
1049 wait_result_t
1050 thread_sleep_usimple_lock_EXT(
1051 event_t event,
1052 lck_spin_t *lock,
1053 wait_interrupt_t interruptible)
1054 {
1055 return( lck_spin_sleep(lock, LCK_SLEEP_DEFAULT, event, interruptible));
1056 }