]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/locks.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / kern / locks.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 #include <mach_kdb.h>
51 #include <mach_ldebug.h>
52 #include <debug.h>
53
54 #include <mach/kern_return.h>
55 #include <mach/mach_host_server.h>
56 #include <mach_debug/lockgroup_info.h>
57
58 #include <kern/locks.h>
59 #include <kern/misc_protos.h>
60 #include <kern/kalloc.h>
61 #include <kern/thread.h>
62 #include <kern/processor.h>
63 #include <kern/sched_prim.h>
64 #include <kern/debug.h>
65 #include <string.h>
66
67
68 #include <sys/kdebug.h>
69
70 #define LCK_MTX_SLEEP_CODE 0
71 #define LCK_MTX_SLEEP_DEADLINE_CODE 1
72 #define LCK_MTX_LCK_WAIT_CODE 2
73 #define LCK_MTX_UNLCK_WAKEUP_CODE 3
74
75
76 static queue_head_t lck_grp_queue;
77 static unsigned int lck_grp_cnt;
78
79 decl_mutex_data(static,lck_grp_lock)
80
81 lck_grp_attr_t LockDefaultGroupAttr;
82 lck_grp_t LockCompatGroup;
83 lck_attr_t LockDefaultLckAttr;
84
85 /*
86 * Routine: lck_mod_init
87 */
88
89 void
90 lck_mod_init(
91 void)
92 {
93 queue_init(&lck_grp_queue);
94 mutex_init(&lck_grp_lock, 0);
95 lck_grp_cnt = 0;
96 lck_grp_attr_setdefault( &LockDefaultGroupAttr);
97 lck_grp_init( &LockCompatGroup, "Compatibility APIs", LCK_GRP_ATTR_NULL);
98 lck_attr_setdefault(&LockDefaultLckAttr);
99 }
100
101 /*
102 * Routine: lck_grp_attr_alloc_init
103 */
104
105 lck_grp_attr_t *
106 lck_grp_attr_alloc_init(
107 void)
108 {
109 lck_grp_attr_t *attr;
110
111 if ((attr = (lck_grp_attr_t *)kalloc(sizeof(lck_grp_attr_t))) != 0)
112 lck_grp_attr_setdefault(attr);
113
114 return(attr);
115 }
116
117
118 /*
119 * Routine: lck_grp_attr_setdefault
120 */
121
122 void
123 lck_grp_attr_setdefault(
124 lck_grp_attr_t *attr)
125 {
126 if (LcksOpts & enaLkStat)
127 attr->grp_attr_val = LCK_GRP_ATTR_STAT;
128 else
129 attr->grp_attr_val = 0;
130 }
131
132
133 /*
134 * Routine: lck_grp_attr_setstat
135 */
136
137 void
138 lck_grp_attr_setstat(
139 lck_grp_attr_t *attr)
140 {
141 (void)hw_atomic_or((uint32_t *)&attr->grp_attr_val, LCK_GRP_ATTR_STAT);
142 }
143
144
145 /*
146 * Routine: lck_grp_attr_free
147 */
148
149 void
150 lck_grp_attr_free(
151 lck_grp_attr_t *attr)
152 {
153 kfree(attr, sizeof(lck_grp_attr_t));
154 }
155
156
157 /*
158 * Routine: lck_grp_alloc_init
159 */
160
161 lck_grp_t *
162 lck_grp_alloc_init(
163 const char* grp_name,
164 lck_grp_attr_t *attr)
165 {
166 lck_grp_t *grp;
167
168 if ((grp = (lck_grp_t *)kalloc(sizeof(lck_grp_t))) != 0)
169 lck_grp_init(grp, grp_name, attr);
170
171 return(grp);
172 }
173
174
175 /*
176 * Routine: lck_grp_init
177 */
178
179 void
180 lck_grp_init(
181 lck_grp_t *grp,
182 const char* grp_name,
183 lck_grp_attr_t *attr)
184 {
185 bzero((void *)grp, sizeof(lck_grp_t));
186
187 (void) strncpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);
188
189 if (attr != LCK_GRP_ATTR_NULL)
190 grp->lck_grp_attr = attr->grp_attr_val;
191 else if (LcksOpts & enaLkStat)
192 grp->lck_grp_attr = LCK_GRP_ATTR_STAT;
193 else
194 grp->lck_grp_attr = LCK_ATTR_NONE;
195
196 grp->lck_grp_refcnt = 1;
197
198 mutex_lock(&lck_grp_lock);
199 enqueue_tail(&lck_grp_queue, (queue_entry_t)grp);
200 lck_grp_cnt++;
201 mutex_unlock(&lck_grp_lock);
202
203 }
204
205
206 /*
207 * Routine: lck_grp_free
208 */
209
210 void
211 lck_grp_free(
212 lck_grp_t *grp)
213 {
214 mutex_lock(&lck_grp_lock);
215 lck_grp_cnt--;
216 (void)remque((queue_entry_t)grp);
217 mutex_unlock(&lck_grp_lock);
218 lck_grp_deallocate(grp);
219 }
220
221
222 /*
223 * Routine: lck_grp_reference
224 */
225
226 void
227 lck_grp_reference(
228 lck_grp_t *grp)
229 {
230 (void)hw_atomic_add((uint32_t *)(&grp->lck_grp_refcnt), 1);
231 }
232
233
234 /*
235 * Routine: lck_grp_deallocate
236 */
237
238 void
239 lck_grp_deallocate(
240 lck_grp_t *grp)
241 {
242 if (hw_atomic_sub((uint32_t *)(&grp->lck_grp_refcnt), 1) == 0)
243 kfree(grp, sizeof(lck_grp_t));
244 }
245
246 /*
247 * Routine: lck_grp_lckcnt_incr
248 */
249
250 void
251 lck_grp_lckcnt_incr(
252 lck_grp_t *grp,
253 lck_type_t lck_type)
254 {
255 unsigned int *lckcnt;
256
257 switch (lck_type) {
258 case LCK_TYPE_SPIN:
259 lckcnt = &grp->lck_grp_spincnt;
260 break;
261 case LCK_TYPE_MTX:
262 lckcnt = &grp->lck_grp_mtxcnt;
263 break;
264 case LCK_TYPE_RW:
265 lckcnt = &grp->lck_grp_rwcnt;
266 break;
267 default:
268 return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type);
269 }
270
271 (void)hw_atomic_add((uint32_t *)lckcnt, 1);
272 }
273
274 /*
275 * Routine: lck_grp_lckcnt_decr
276 */
277
278 void
279 lck_grp_lckcnt_decr(
280 lck_grp_t *grp,
281 lck_type_t lck_type)
282 {
283 unsigned int *lckcnt;
284
285 switch (lck_type) {
286 case LCK_TYPE_SPIN:
287 lckcnt = &grp->lck_grp_spincnt;
288 break;
289 case LCK_TYPE_MTX:
290 lckcnt = &grp->lck_grp_mtxcnt;
291 break;
292 case LCK_TYPE_RW:
293 lckcnt = &grp->lck_grp_rwcnt;
294 break;
295 default:
296 return panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type);
297 }
298
299 (void)hw_atomic_sub((uint32_t *)lckcnt, 1);
300 }
301
302 /*
303 * Routine: lck_attr_alloc_init
304 */
305
306 lck_attr_t *
307 lck_attr_alloc_init(
308 void)
309 {
310 lck_attr_t *attr;
311
312 if ((attr = (lck_attr_t *)kalloc(sizeof(lck_attr_t))) != 0)
313 lck_attr_setdefault(attr);
314
315 return(attr);
316 }
317
318
319 /*
320 * Routine: lck_attr_setdefault
321 */
322
323 void
324 lck_attr_setdefault(
325 lck_attr_t *attr)
326 {
327 #if !DEBUG
328 if (LcksOpts & enaLkDeb)
329 attr->lck_attr_val = LCK_ATTR_DEBUG;
330 else
331 attr->lck_attr_val = LCK_ATTR_NONE;
332 #else
333 attr->lck_attr_val = LCK_ATTR_DEBUG;
334 #endif
335
336 }
337
338
339 /*
340 * Routine: lck_attr_setdebug
341 */
342 void
343 lck_attr_setdebug(
344 lck_attr_t *attr)
345 {
346 (void)hw_atomic_or((uint32_t *)&attr->lck_attr_val, LCK_ATTR_DEBUG);
347 }
348
349
350 /*
351 * Routine: lck_attr_rw_shared_priority
352 */
353 void
354 lck_attr_rw_shared_priority(
355 lck_attr_t *attr)
356 {
357 (void)hw_atomic_or((uint32_t *)&attr->lck_attr_val, LCK_ATTR_RW_SHARED_PRIORITY);
358 }
359
360
361 /*
362 * Routine: lck_attr_free
363 */
364 void
365 lck_attr_free(
366 lck_attr_t *attr)
367 {
368 kfree(attr, sizeof(lck_attr_t));
369 }
370
371
372 /*
373 * Routine: lck_spin_sleep
374 */
375 wait_result_t
376 lck_spin_sleep(
377 lck_spin_t *lck,
378 lck_sleep_action_t lck_sleep_action,
379 event_t event,
380 wait_interrupt_t interruptible)
381 {
382 wait_result_t res;
383
384 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
385 panic("Invalid lock sleep action %x\n", lck_sleep_action);
386
387 res = assert_wait(event, interruptible);
388 if (res == THREAD_WAITING) {
389 lck_spin_unlock(lck);
390 res = thread_block(THREAD_CONTINUE_NULL);
391 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
392 lck_spin_lock(lck);
393 }
394 else
395 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
396 lck_spin_unlock(lck);
397
398 return res;
399 }
400
401
402 /*
403 * Routine: lck_spin_sleep_deadline
404 */
405 wait_result_t
406 lck_spin_sleep_deadline(
407 lck_spin_t *lck,
408 lck_sleep_action_t lck_sleep_action,
409 event_t event,
410 wait_interrupt_t interruptible,
411 uint64_t deadline)
412 {
413 wait_result_t res;
414
415 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
416 panic("Invalid lock sleep action %x\n", lck_sleep_action);
417
418 res = assert_wait_deadline(event, interruptible, deadline);
419 if (res == THREAD_WAITING) {
420 lck_spin_unlock(lck);
421 res = thread_block(THREAD_CONTINUE_NULL);
422 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
423 lck_spin_lock(lck);
424 }
425 else
426 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
427 lck_spin_unlock(lck);
428
429 return res;
430 }
431
432
433 /*
434 * Routine: lck_mtx_sleep
435 */
436 wait_result_t
437 lck_mtx_sleep(
438 lck_mtx_t *lck,
439 lck_sleep_action_t lck_sleep_action,
440 event_t event,
441 wait_interrupt_t interruptible)
442 {
443 wait_result_t res;
444
445 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_START,
446 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
447
448 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
449 panic("Invalid lock sleep action %x\n", lck_sleep_action);
450
451 res = assert_wait(event, interruptible);
452 if (res == THREAD_WAITING) {
453 lck_mtx_unlock(lck);
454 res = thread_block(THREAD_CONTINUE_NULL);
455 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
456 lck_mtx_lock(lck);
457 }
458 else
459 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
460 lck_mtx_unlock(lck);
461
462 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
463
464 return res;
465 }
466
467
468 /*
469 * Routine: lck_mtx_sleep_deadline
470 */
471 wait_result_t
472 lck_mtx_sleep_deadline(
473 lck_mtx_t *lck,
474 lck_sleep_action_t lck_sleep_action,
475 event_t event,
476 wait_interrupt_t interruptible,
477 uint64_t deadline)
478 {
479 wait_result_t res;
480
481 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_START,
482 (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
483
484 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
485 panic("Invalid lock sleep action %x\n", lck_sleep_action);
486
487 res = assert_wait_deadline(event, interruptible, deadline);
488 if (res == THREAD_WAITING) {
489 lck_mtx_unlock(lck);
490 res = thread_block(THREAD_CONTINUE_NULL);
491 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK))
492 lck_mtx_lock(lck);
493 }
494 else
495 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
496 lck_mtx_unlock(lck);
497
498 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
499
500 return res;
501 }
502
503 /*
504 * Routine: lck_mtx_lock_wait
505 *
506 * Invoked in order to wait on contention.
507 *
508 * Called with the interlock locked and
509 * returns it unlocked.
510 */
511 void
512 lck_mtx_lock_wait (
513 lck_mtx_t *lck,
514 thread_t holder)
515 {
516 thread_t self = current_thread();
517 lck_mtx_t *mutex;
518 integer_t priority;
519 spl_t s = splsched();
520
521 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
522 mutex = lck;
523 else
524 mutex = &lck->lck_mtx_ptr->lck_mtx;
525
526 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
527
528 priority = self->sched_pri;
529 if (priority < self->priority)
530 priority = self->priority;
531 if (priority > MINPRI_KERNEL)
532 priority = MINPRI_KERNEL;
533 else
534 if (priority < BASEPRI_DEFAULT)
535 priority = BASEPRI_DEFAULT;
536
537 thread_lock(holder);
538 if (mutex->lck_mtx_pri == 0)
539 holder->promotions++;
540 if (holder->priority < MINPRI_KERNEL) {
541 holder->sched_mode |= TH_MODE_PROMOTED;
542 if ( mutex->lck_mtx_pri < priority &&
543 holder->sched_pri < priority ) {
544 KERNEL_DEBUG_CONSTANT(
545 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
546 holder->sched_pri, priority, (int)holder, (int)lck, 0);
547
548 set_sched_pri(holder, priority);
549 }
550 }
551 thread_unlock(holder);
552 splx(s);
553
554 if (mutex->lck_mtx_pri < priority)
555 mutex->lck_mtx_pri = priority;
556 if (self->pending_promoter[self->pending_promoter_index] == NULL) {
557 self->pending_promoter[self->pending_promoter_index] = mutex;
558 mutex->lck_mtx_waiters++;
559 }
560 else
561 if (self->pending_promoter[self->pending_promoter_index] != mutex) {
562 self->pending_promoter[++self->pending_promoter_index] = mutex;
563 mutex->lck_mtx_waiters++;
564 }
565
566 assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
567 lck_mtx_ilk_unlock(mutex);
568
569 thread_block(THREAD_CONTINUE_NULL);
570
571 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
572 }
573
574 /*
575 * Routine: lck_mtx_lock_acquire
576 *
577 * Invoked on acquiring the mutex when there is
578 * contention.
579 *
580 * Returns the current number of waiters.
581 *
582 * Called with the interlock locked.
583 */
584 int
585 lck_mtx_lock_acquire(
586 lck_mtx_t *lck)
587 {
588 thread_t thread = current_thread();
589 lck_mtx_t *mutex;
590
591 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
592 mutex = lck;
593 else
594 mutex = &lck->lck_mtx_ptr->lck_mtx;
595
596 if (thread->pending_promoter[thread->pending_promoter_index] == mutex) {
597 thread->pending_promoter[thread->pending_promoter_index] = NULL;
598 if (thread->pending_promoter_index > 0)
599 thread->pending_promoter_index--;
600 mutex->lck_mtx_waiters--;
601 }
602
603 if (mutex->lck_mtx_waiters > 0) {
604 integer_t priority = mutex->lck_mtx_pri;
605 spl_t s = splsched();
606
607 thread_lock(thread);
608 thread->promotions++;
609 if (thread->priority < MINPRI_KERNEL) {
610 thread->sched_mode |= TH_MODE_PROMOTED;
611 if (thread->sched_pri < priority) {
612 KERNEL_DEBUG_CONSTANT(
613 MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
614 thread->sched_pri, priority, 0, (int)lck, 0);
615
616 set_sched_pri(thread, priority);
617 }
618 }
619 thread_unlock(thread);
620 splx(s);
621 }
622 else
623 mutex->lck_mtx_pri = 0;
624
625 return (mutex->lck_mtx_waiters);
626 }
627
628 /*
629 * Routine: lck_mtx_unlock_wakeup
630 *
631 * Invoked on unlock when there is contention.
632 *
633 * Called with the interlock locked.
634 */
635 void
636 lck_mtx_unlock_wakeup (
637 lck_mtx_t *lck,
638 thread_t holder)
639 {
640 thread_t thread = current_thread();
641 lck_mtx_t *mutex;
642
643 if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
644 mutex = lck;
645 else
646 mutex = &lck->lck_mtx_ptr->lck_mtx;
647
648
649 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
650
651 if (thread != holder)
652 panic("lck_mtx_unlock_wakeup: mutex %x holder %x\n", mutex, holder);
653
654 if (thread->promotions > 0) {
655 spl_t s = splsched();
656
657 thread_lock(thread);
658 if ( --thread->promotions == 0 &&
659 (thread->sched_mode & TH_MODE_PROMOTED) ) {
660 thread->sched_mode &= ~TH_MODE_PROMOTED;
661 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
662 KERNEL_DEBUG_CONSTANT(
663 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
664 thread->sched_pri, DEPRESSPRI, 0, (int)lck, 0);
665
666 set_sched_pri(thread, DEPRESSPRI);
667 }
668 else {
669 if (thread->priority < thread->sched_pri) {
670 KERNEL_DEBUG_CONSTANT(
671 MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
672 DBG_FUNC_NONE,
673 thread->sched_pri, thread->priority,
674 0, (int)lck, 0);
675 }
676
677 compute_priority(thread, FALSE);
678 }
679 }
680 thread_unlock(thread);
681 splx(s);
682 }
683 assert(mutex->lck_mtx_waiters > 0);
684 thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
685
686 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
687 }
688
689 /*
690 * Routine: mutex_pause
691 *
692 * Called by former callers of simple_lock_pause().
693 */
694
695 void
696 mutex_pause(void)
697 {
698 wait_result_t wait_result;
699
700 wait_result = assert_wait_timeout((event_t)mutex_pause, THREAD_UNINT, 1, 1000*NSEC_PER_USEC);
701 assert(wait_result == THREAD_WAITING);
702
703 wait_result = thread_block(THREAD_CONTINUE_NULL);
704 assert(wait_result == THREAD_TIMED_OUT);
705 }
706
707 /*
708 * Routine: lck_rw_sleep
709 */
710 wait_result_t
711 lck_rw_sleep(
712 lck_rw_t *lck,
713 lck_sleep_action_t lck_sleep_action,
714 event_t event,
715 wait_interrupt_t interruptible)
716 {
717 wait_result_t res;
718 lck_rw_type_t lck_rw_type;
719
720 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
721 panic("Invalid lock sleep action %x\n", lck_sleep_action);
722
723 res = assert_wait(event, interruptible);
724 if (res == THREAD_WAITING) {
725 lck_rw_type = lck_rw_done(lck);
726 res = thread_block(THREAD_CONTINUE_NULL);
727 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
728 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
729 lck_rw_lock(lck, lck_rw_type);
730 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
731 lck_rw_lock_exclusive(lck);
732 else
733 lck_rw_lock_shared(lck);
734 }
735 }
736 else
737 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
738 (void)lck_rw_done(lck);
739
740 return res;
741 }
742
743
744 /*
745 * Routine: lck_rw_sleep_deadline
746 */
747 wait_result_t
748 lck_rw_sleep_deadline(
749 lck_rw_t *lck,
750 lck_sleep_action_t lck_sleep_action,
751 event_t event,
752 wait_interrupt_t interruptible,
753 uint64_t deadline)
754 {
755 wait_result_t res;
756 lck_rw_type_t lck_rw_type;
757
758 if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
759 panic("Invalid lock sleep action %x\n", lck_sleep_action);
760
761 res = assert_wait_deadline(event, interruptible, deadline);
762 if (res == THREAD_WAITING) {
763 lck_rw_type = lck_rw_done(lck);
764 res = thread_block(THREAD_CONTINUE_NULL);
765 if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
766 if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
767 lck_rw_lock(lck, lck_rw_type);
768 else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
769 lck_rw_lock_exclusive(lck);
770 else
771 lck_rw_lock_shared(lck);
772 }
773 }
774 else
775 if (lck_sleep_action & LCK_SLEEP_UNLOCK)
776 (void)lck_rw_done(lck);
777
778 return res;
779 }
780
781 kern_return_t
782 host_lockgroup_info(
783 host_t host,
784 lockgroup_info_array_t *lockgroup_infop,
785 mach_msg_type_number_t *lockgroup_infoCntp)
786 {
787 lockgroup_info_t *lockgroup_info_base;
788 lockgroup_info_t *lockgroup_info;
789 vm_offset_t lockgroup_info_addr;
790 vm_size_t lockgroup_info_size;
791 lck_grp_t *lck_grp;
792 unsigned int i;
793 vm_size_t used;
794 vm_map_copy_t copy;
795 kern_return_t kr;
796
797 if (host == HOST_NULL)
798 return KERN_INVALID_HOST;
799
800 mutex_lock(&lck_grp_lock);
801
802 lockgroup_info_size = round_page(lck_grp_cnt * sizeof *lockgroup_info);
803 kr = kmem_alloc_pageable(ipc_kernel_map,
804 &lockgroup_info_addr, lockgroup_info_size);
805 if (kr != KERN_SUCCESS) {
806 mutex_unlock(&lck_grp_lock);
807 return(kr);
808 }
809
810 lockgroup_info_base = (lockgroup_info_t *) lockgroup_info_addr;
811 lck_grp = (lck_grp_t *)queue_first(&lck_grp_queue);
812 lockgroup_info = lockgroup_info_base;
813
814 for (i = 0; i < lck_grp_cnt; i++) {
815
816 lockgroup_info->lock_spin_cnt = lck_grp->lck_grp_spincnt;
817 lockgroup_info->lock_spin_util_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_util_cnt;
818 lockgroup_info->lock_spin_held_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cnt;
819 lockgroup_info->lock_spin_miss_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_miss_cnt;
820 lockgroup_info->lock_spin_held_max = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_max;
821 lockgroup_info->lock_spin_held_cum = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cum;
822
823 lockgroup_info->lock_mtx_cnt = lck_grp->lck_grp_mtxcnt;
824 lockgroup_info->lock_mtx_util_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt;
825 lockgroup_info->lock_mtx_held_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt;
826 lockgroup_info->lock_mtx_miss_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt;
827 lockgroup_info->lock_mtx_wait_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt;
828 lockgroup_info->lock_mtx_held_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max;
829 lockgroup_info->lock_mtx_held_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cum;
830 lockgroup_info->lock_mtx_wait_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_max;
831 lockgroup_info->lock_mtx_wait_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cum;
832
833 lockgroup_info->lock_rw_cnt = lck_grp->lck_grp_rwcnt;
834 lockgroup_info->lock_rw_util_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt;
835 lockgroup_info->lock_rw_held_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cnt;
836 lockgroup_info->lock_rw_miss_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt;
837 lockgroup_info->lock_rw_wait_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt;
838 lockgroup_info->lock_rw_held_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_max;
839 lockgroup_info->lock_rw_held_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cum;
840 lockgroup_info->lock_rw_wait_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_max;
841 lockgroup_info->lock_rw_wait_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cum;
842
843 (void) strncpy(lockgroup_info->lockgroup_name,lck_grp->lck_grp_name, LOCKGROUP_MAX_NAME);
844
845 lck_grp = (lck_grp_t *)(queue_next((queue_entry_t)(lck_grp)));
846 lockgroup_info++;
847 }
848
849 *lockgroup_infoCntp = lck_grp_cnt;
850 mutex_unlock(&lck_grp_lock);
851
852 used = (*lockgroup_infoCntp) * sizeof *lockgroup_info;
853
854 if (used != lockgroup_info_size)
855 bzero((char *) lockgroup_info, lockgroup_info_size - used);
856
857 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
858 (vm_map_size_t)lockgroup_info_size, TRUE, &copy);
859 assert(kr == KERN_SUCCESS);
860
861 *lockgroup_infop = (lockgroup_info_t *) copy;
862
863 return(KERN_SUCCESS);
864 }
865
866 /*
867 * Compatibility module
868 */
869
870 extern lck_rw_t *lock_alloc_EXT( boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
871 extern void lock_done_EXT(lck_rw_t *lock);
872 extern void lock_free_EXT(lck_rw_t *lock);
873 extern void lock_init_EXT(lck_rw_t *lock, boolean_t can_sleep, unsigned short tag0, unsigned short tag1);
874 extern void lock_read_EXT(lck_rw_t *lock);
875 extern boolean_t lock_read_to_write_EXT(lck_rw_t *lock);
876 extern void lock_write_EXT(lck_rw_t *lock);
877 extern void lock_write_to_read_EXT(lck_rw_t *lock);
878 extern wait_result_t thread_sleep_lock_write_EXT(
879 event_t event, lck_rw_t *lock, wait_interrupt_t interruptible);
880
881 extern lck_mtx_t *mutex_alloc_EXT(unsigned short tag);
882 extern void mutex_free_EXT(lck_mtx_t *mutex);
883 extern void mutex_init_EXT(lck_mtx_t *mutex, unsigned short tag);
884 extern void mutex_lock_EXT(lck_mtx_t *mutex);
885 extern boolean_t mutex_try_EXT(lck_mtx_t *mutex);
886 extern void mutex_unlock_EXT(lck_mtx_t *mutex);
887 extern wait_result_t thread_sleep_mutex_EXT(
888 event_t event, lck_mtx_t *mutex, wait_interrupt_t interruptible);
889 extern wait_result_t thread_sleep_mutex_deadline_EXT(
890 event_t event, lck_mtx_t *mutex, uint64_t deadline, wait_interrupt_t interruptible);
891
892 extern void usimple_lock_EXT(lck_spin_t *lock);
893 extern void usimple_lock_init_EXT(lck_spin_t *lock, unsigned short tag);
894 extern unsigned int usimple_lock_try_EXT(lck_spin_t *lock);
895 extern void usimple_unlock_EXT(lck_spin_t *lock);
896 extern wait_result_t thread_sleep_usimple_lock_EXT(event_t event, lck_spin_t *lock, wait_interrupt_t interruptible);
897
898 lck_rw_t *
899 lock_alloc_EXT(
900 __unused boolean_t can_sleep,
901 __unused unsigned short tag0,
902 __unused unsigned short tag1)
903 {
904 return( lck_rw_alloc_init( &LockCompatGroup, LCK_ATTR_NULL));
905 }
906
907 void
908 lock_done_EXT(
909 lck_rw_t *lock)
910 {
911 (void) lck_rw_done(lock);
912 }
913
914 void
915 lock_free_EXT(
916 lck_rw_t *lock)
917 {
918 lck_rw_free(lock, &LockCompatGroup);
919 }
920
921 void
922 lock_init_EXT(
923 lck_rw_t *lock,
924 __unused boolean_t can_sleep,
925 __unused unsigned short tag0,
926 __unused unsigned short tag1)
927 {
928 lck_rw_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
929 }
930
931 void
932 lock_read_EXT(
933 lck_rw_t *lock)
934 {
935 lck_rw_lock_shared( lock);
936 }
937
938 boolean_t
939 lock_read_to_write_EXT(
940 lck_rw_t *lock)
941 {
942 return( lck_rw_lock_shared_to_exclusive(lock));
943 }
944
945 void
946 lock_write_EXT(
947 lck_rw_t *lock)
948 {
949 lck_rw_lock_exclusive(lock);
950 }
951
952 void
953 lock_write_to_read_EXT(
954 lck_rw_t *lock)
955 {
956 lck_rw_lock_exclusive_to_shared(lock);
957 }
958
959 wait_result_t
960 thread_sleep_lock_write_EXT(
961 event_t event,
962 lck_rw_t *lock,
963 wait_interrupt_t interruptible)
964 {
965 return( lck_rw_sleep(lock, LCK_SLEEP_EXCLUSIVE, event, interruptible));
966 }
967
968 lck_mtx_t *
969 mutex_alloc_EXT(
970 __unused unsigned short tag)
971 {
972 return(lck_mtx_alloc_init(&LockCompatGroup, LCK_ATTR_NULL));
973 }
974
975 void
976 mutex_free_EXT(
977 lck_mtx_t *mutex)
978 {
979 lck_mtx_free(mutex, &LockCompatGroup);
980 }
981
982 void
983 mutex_init_EXT(
984 lck_mtx_t *mutex,
985 __unused unsigned short tag)
986 {
987 lck_mtx_init(mutex, &LockCompatGroup, LCK_ATTR_NULL);
988 }
989
990 void
991 mutex_lock_EXT(
992 lck_mtx_t *mutex)
993 {
994 lck_mtx_lock(mutex);
995 }
996
997 boolean_t
998 mutex_try_EXT(
999 lck_mtx_t *mutex)
1000 {
1001 return(lck_mtx_try_lock(mutex));
1002 }
1003
1004 void
1005 mutex_unlock_EXT(
1006 lck_mtx_t *mutex)
1007 {
1008 lck_mtx_unlock(mutex);
1009 }
1010
1011 wait_result_t
1012 thread_sleep_mutex_EXT(
1013 event_t event,
1014 lck_mtx_t *mutex,
1015 wait_interrupt_t interruptible)
1016 {
1017 return( lck_mtx_sleep(mutex, LCK_SLEEP_DEFAULT, event, interruptible));
1018 }
1019
1020 wait_result_t
1021 thread_sleep_mutex_deadline_EXT(
1022 event_t event,
1023 lck_mtx_t *mutex,
1024 uint64_t deadline,
1025 wait_interrupt_t interruptible)
1026 {
1027 return( lck_mtx_sleep_deadline(mutex, LCK_SLEEP_DEFAULT, event, interruptible, deadline));
1028 }
1029
1030 void
1031 usimple_lock_EXT(
1032 lck_spin_t *lock)
1033 {
1034 lck_spin_lock(lock);
1035 }
1036
1037 void
1038 usimple_lock_init_EXT(
1039 lck_spin_t *lock,
1040 __unused unsigned short tag)
1041 {
1042 lck_spin_init(lock, &LockCompatGroup, LCK_ATTR_NULL);
1043 }
1044
1045 unsigned int
1046 usimple_lock_try_EXT(
1047 lck_spin_t *lock)
1048 {
1049 return(lck_spin_try_lock(lock));
1050 }
1051
1052 void
1053 usimple_unlock_EXT(
1054 lck_spin_t *lock)
1055 {
1056 lck_spin_unlock(lock);
1057 }
1058
1059 wait_result_t
1060 thread_sleep_usimple_lock_EXT(
1061 event_t event,
1062 lck_spin_t *lock,
1063 wait_interrupt_t interruptible)
1064 {
1065 return( lck_spin_sleep(lock, LCK_SLEEP_DEFAULT, event, interruptible));
1066 }