]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/locks.c
xnu-3248.30.4.tar.gz
[apple/xnu.git] / osfmk / kern / locks.c
index 87189b89f693d89f46a5dfec3fc1936df9a271f7..2f782cd124a016d2f090480372c05df1a26e74a1 100644 (file)
@@ -87,7 +87,6 @@
 #define        LCK_MTX_LCK_WAIT_CODE           2
 #define        LCK_MTX_UNLCK_WAKEUP_CODE       3
 
-
 static queue_head_t    lck_grp_queue;
 static unsigned int    lck_grp_cnt;
 
@@ -196,7 +195,7 @@ lck_grp_attr_free(
 
 
 /*
- * Routine:    lck_grp_alloc_init
+ * Routine: lck_grp_alloc_init
  */
 
 lck_grp_t *
@@ -212,27 +211,23 @@ lck_grp_alloc_init(
        return(grp);
 }
 
-
 /*
- * Routine:    lck_grp_init
+ * Routine: lck_grp_init
  */
 
 void
-lck_grp_init(
-       lck_grp_t               *grp,               
-       const char*             grp_name,           
-       lck_grp_attr_t  *attr)             
+lck_grp_init(lck_grp_t * grp, const char * grp_name, lck_grp_attr_t * attr)
 {
        bzero((void *)grp, sizeof(lck_grp_t));
 
-       (void) strncpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);
+       (void)strlcpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);
 
        if (attr != LCK_GRP_ATTR_NULL)
                grp->lck_grp_attr = attr->grp_attr_val;
        else if (LcksOpts & enaLkStat)
-                grp->lck_grp_attr = LCK_GRP_ATTR_STAT;
-        else
-                grp->lck_grp_attr = LCK_ATTR_NONE;
+               grp->lck_grp_attr = LCK_GRP_ATTR_STAT;
+       else
+               grp->lck_grp_attr = LCK_ATTR_NONE;
 
        grp->lck_grp_refcnt = 1;
 
@@ -240,10 +235,8 @@ lck_grp_init(
        enqueue_tail(&lck_grp_queue, (queue_entry_t)grp);
        lck_grp_cnt++;
        lck_mtx_unlock(&lck_grp_lock);
-
 }
 
-
 /*
  * Routine:    lck_grp_free
  */
@@ -484,6 +477,39 @@ lck_spin_sleep_deadline(
 }
 
 
+/*
+ * Routine:    lck_mtx_clear_promoted
+ *
+ * Handle clearing of TH_SFLAG_PROMOTED,
+ * adjusting thread priority as needed.
+ *
+ * Called with thread lock held
+ */
+static void
+lck_mtx_clear_promoted (
+       thread_t                        thread,
+       __kdebug_only uintptr_t         trace_lck)
+{
+       thread->sched_flags &= ~TH_SFLAG_PROMOTED;
+
+       if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) {
+               /* Thread still has a RW lock promotion */
+       } else if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
+               KERNEL_DEBUG_CONSTANT(
+                       MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
+                               thread->sched_pri, DEPRESSPRI, 0, trace_lck, 0);
+               set_sched_pri(thread, DEPRESSPRI);
+       } else {
+               if (thread->base_pri < thread->sched_pri) {
+                       KERNEL_DEBUG_CONSTANT(
+                               MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
+                                       thread->sched_pri, thread->base_pri, 0, trace_lck, 0);
+               }
+               thread_recompute_sched_pri(thread, FALSE);
+       }
+}
+
+
 /*
  * Routine:    lck_mtx_sleep
  */
@@ -498,7 +524,7 @@ lck_mtx_sleep(
        thread_t                thread = current_thread();
  
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_START,
-                    (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
+                    VM_KERNEL_UNSLIDE_OR_PERM(lck), (int)lck_sleep_action, VM_KERNEL_UNSLIDE_OR_PERM(event), (int)interruptible, 0);
 
        if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
                panic("Invalid lock sleep action %x\n", lck_sleep_action);
@@ -556,7 +582,7 @@ lck_mtx_sleep_deadline(
        thread_t                thread = current_thread();
 
        KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_START,
-                    (int)lck, (int)lck_sleep_action, (int)event, (int)interruptible, 0);
+                    VM_KERNEL_UNSLIDE_OR_PERM(lck), (int)lck_sleep_action, VM_KERNEL_UNSLIDE_OR_PERM(event), (int)interruptible, 0);
 
        if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
                panic("Invalid lock sleep action %x\n", lck_sleep_action);
@@ -610,6 +636,8 @@ lck_mtx_lock_wait (
 {
        thread_t                self = current_thread();
        lck_mtx_t               *mutex;
+       __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck);
+       __kdebug_only uintptr_t trace_holder = VM_KERNEL_UNSLIDE_OR_PERM(holder);
        integer_t               priority;
        spl_t                   s = splsched();
 #if    CONFIG_DTRACE
@@ -625,11 +653,11 @@ lck_mtx_lock_wait (
        else
                mutex = &lck->lck_mtx_ptr->lck_mtx;
 
-       KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
+       KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, trace_lck, trace_holder, 0, 0, 0);
 
        priority = self->sched_pri;
-       if (priority < self->priority)
-               priority = self->priority;
+       if (priority < self->base_pri)
+               priority = self->base_pri;
        if (priority < BASEPRI_DEFAULT)
                priority = BASEPRI_DEFAULT;
 
@@ -640,11 +668,10 @@ lck_mtx_lock_wait (
        if (mutex->lck_mtx_pri == 0)
                holder->promotions++;
        holder->sched_flags |= TH_SFLAG_PROMOTED;
-       if (            mutex->lck_mtx_pri < priority   &&
-                               holder->sched_pri < priority            ) {
+       if (mutex->lck_mtx_pri < priority && holder->sched_pri < priority) {
                KERNEL_DEBUG_CONSTANT(
                        MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
-                                       holder->sched_pri, priority, holder, lck, 0);
+                                       holder->sched_pri, priority, trace_holder, trace_lck, 0);
                set_sched_pri(holder, priority);
        }
        thread_unlock(holder);
@@ -662,7 +689,7 @@ lck_mtx_lock_wait (
                mutex->lck_mtx_waiters++;
        }
 
-       assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
+       assert_wait(LCK_MTX_EVENT(mutex), THREAD_UNINT);
        lck_mtx_ilk_unlock(mutex);
 
        thread_block(THREAD_CONTINUE_NULL);
@@ -701,6 +728,9 @@ lck_mtx_lock_acquire(
 {
        thread_t                thread = current_thread();
        lck_mtx_t               *mutex;
+       integer_t               priority;
+       spl_t                   s;
+       __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck);
 
        if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
                mutex = lck;
@@ -714,26 +744,38 @@ lck_mtx_lock_acquire(
                mutex->lck_mtx_waiters--;
        }
 
-       if (mutex->lck_mtx_waiters > 0) {
-               integer_t               priority = mutex->lck_mtx_pri;
-               spl_t                   s = splsched();
+       if (mutex->lck_mtx_waiters)
+               priority = mutex->lck_mtx_pri;
+       else {
+               mutex->lck_mtx_pri = 0;
+               priority = 0;
+       }
 
+       if (priority || thread->was_promoted_on_wakeup) {
+               s = splsched();
                thread_lock(thread);
-               thread->promotions++;
-               thread->sched_flags |= TH_SFLAG_PROMOTED;
-               if (thread->sched_pri < priority) {
-                       KERNEL_DEBUG_CONSTANT(
-                               MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
-                                               thread->sched_pri, priority, 0, lck, 0);
-                       /* Do not promote past promotion ceiling */
-                       assert(priority <= MAXPRI_PROMOTE);
-                       set_sched_pri(thread, priority);
+
+               if (priority) {
+                       thread->promotions++;
+                       thread->sched_flags |= TH_SFLAG_PROMOTED;
+                       if (thread->sched_pri < priority) {
+                               KERNEL_DEBUG_CONSTANT(
+                                       MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
+                                                       thread->sched_pri, priority, 0, trace_lck, 0);
+                               /* Do not promote past promotion ceiling */
+                               assert(priority <= MAXPRI_PROMOTE);
+                               set_sched_pri(thread, priority);
+                       }
+               }
+               if (thread->was_promoted_on_wakeup) {
+                       thread->was_promoted_on_wakeup = 0;
+                       if (thread->promotions == 0)
+                               lck_mtx_clear_promoted(thread, trace_lck);
                }
+
                thread_unlock(thread);
                splx(s);
        }
-       else
-               mutex->lck_mtx_pri = 0;
 
 #if CONFIG_DTRACE
        if (lockstat_probemap[LS_LCK_MTX_LOCK_ACQUIRE] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_ACQUIRE]) {
@@ -761,6 +803,7 @@ lck_mtx_unlock_wakeup (
 {
        thread_t                thread = current_thread();
        lck_mtx_t               *mutex;
+       __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck);
 
        if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
                mutex = lck;
@@ -770,40 +813,20 @@ lck_mtx_unlock_wakeup (
        if (thread != holder)
                panic("lck_mtx_unlock_wakeup: mutex %p holder %p\n", mutex, holder);
 
-       KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);
+       KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(holder), 0, 0, 0);
 
        assert(mutex->lck_mtx_waiters > 0);
-       thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
+       if (mutex->lck_mtx_waiters > 1)
+               thread_wakeup_one_with_pri(LCK_MTX_EVENT(lck), lck->lck_mtx_pri);
+       else
+               thread_wakeup_one(LCK_MTX_EVENT(lck));
 
        if (thread->promotions > 0) {
                spl_t           s = splsched();
 
                thread_lock(thread);
-               if (    --thread->promotions == 0                               &&
-                               (thread->sched_flags & TH_SFLAG_PROMOTED)               ) {
-                       thread->sched_flags &= ~TH_SFLAG_PROMOTED;
-
-                       if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) {
-                               /* Thread still has a RW lock promotion */
-                       } else if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) {
-                               KERNEL_DEBUG_CONSTANT(
-                                       MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE,
-                                                 thread->sched_pri, DEPRESSPRI, 0, lck, 0);
-
-                               set_sched_pri(thread, DEPRESSPRI);
-                       }
-                       else {
-                               if (thread->priority < thread->sched_pri) {
-                                       KERNEL_DEBUG_CONSTANT(
-                                               MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) |
-                                                                                                                       DBG_FUNC_NONE,
-                                                       thread->sched_pri, thread->priority,
-                                                                       0, lck, 0);
-                               }
-
-                               SCHED(compute_priority)(thread, FALSE);
-                       }
-               }
+               if (--thread->promotions == 0 && (thread->sched_flags & TH_SFLAG_PROMOTED))
+                       lck_mtx_clear_promoted(thread, trace_lck);
                thread_unlock(thread);
                splx(s);
        }
@@ -816,9 +839,9 @@ lck_mtx_unlockspin_wakeup (
        lck_mtx_t                       *lck)
 {
        assert(lck->lck_mtx_waiters > 0);
-       thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int)));
+       thread_wakeup_one(LCK_MTX_EVENT(lck));
 
-       KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_NONE, (int)lck, 0, 0, 1, 0);
+       KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(lck), 0, 0, 1, 0);
 #if CONFIG_DTRACE
        /*
         * When there are waiters, we skip the hot-patch spot in the
@@ -1085,9 +1108,9 @@ void lck_rw_clear_promotion(thread_t thread)
                        set_sched_pri(thread, DEPRESSPRI);
                } else {
                        KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE) | DBG_FUNC_NONE,
-                                                                 thread->sched_pri, thread->priority, 0, 0, 0);
+                                                                 thread->sched_pri, thread->base_pri, 0, 0, 0);
                        
-                       SCHED(compute_priority)(thread, FALSE);
+                       thread_recompute_sched_pri(thread, FALSE);
                }
        }
 
@@ -1105,9 +1128,9 @@ host_lockgroup_info(
        lockgroup_info_t        *lockgroup_info;
        vm_offset_t                     lockgroup_info_addr;
        vm_size_t                       lockgroup_info_size;
+       vm_size_t                       lockgroup_info_vmsize;
        lck_grp_t                       *lck_grp;
        unsigned int            i;
-       vm_size_t                       used;
        vm_map_copy_t           copy;
        kern_return_t           kr;
 
@@ -1116,9 +1139,10 @@ host_lockgroup_info(
 
        lck_mtx_lock(&lck_grp_lock);
 
-       lockgroup_info_size = round_page(lck_grp_cnt * sizeof *lockgroup_info);
+       lockgroup_info_size = lck_grp_cnt * sizeof(*lockgroup_info);
+       lockgroup_info_vmsize = round_page(lockgroup_info_size);
        kr = kmem_alloc_pageable(ipc_kernel_map,
-                                                &lockgroup_info_addr, lockgroup_info_size);
+                                                &lockgroup_info_addr, lockgroup_info_vmsize, VM_KERN_MEMORY_IPC);
        if (kr != KERN_SUCCESS) {
                lck_mtx_unlock(&lck_grp_lock);
                return(kr);
@@ -1166,10 +1190,8 @@ host_lockgroup_info(
        *lockgroup_infoCntp = lck_grp_cnt;
        lck_mtx_unlock(&lck_grp_lock);
 
-       used = (*lockgroup_infoCntp) * sizeof *lockgroup_info;
-
-       if (used != lockgroup_info_size)
-               bzero((char *) lockgroup_info, lockgroup_info_size - used);
+       if (lockgroup_info_size != lockgroup_info_vmsize)
+               bzero((char *)lockgroup_info, lockgroup_info_vmsize - lockgroup_info_size);
 
        kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
                           (vm_map_size_t)lockgroup_info_size, TRUE, &copy);