]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/waitq.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / kern / waitq.c
index e408f029b1606dc7346b91c5061f5390a33a9afb..1674f379aa0d8909d6e337ed3cd5539686683ffe 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2015-2020 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  *
@@ -66,6 +66,7 @@
 #include <kern/kern_types.h>
 #include <kern/ltable.h>
 #include <kern/mach_param.h>
+#include <kern/percpu.h>
 #include <kern/queue.h>
 #include <kern/sched_prim.h>
 #include <kern/simple_lock.h>
@@ -122,9 +123,16 @@ static kern_return_t waitq_select_thread_locked(struct waitq *waitq,
     event64_t event,
     thread_t thread, spl_t *spl);
 
-#define WAITQ_SET_MAX (task_max * 3)
-static zone_t waitq_set_zone;
+ZONE_DECLARE(waitq_set_zone, "waitq sets",
+    sizeof(struct waitq_set), ZC_NOENCRYPT);
 
+/* waitq prepost cache */
+#define WQP_CACHE_MAX   50
+struct wqp_cache {
+       uint64_t        head;
+       unsigned int    avail;
+};
+static struct wqp_cache PERCPU_DATA(wqp_cache);
 
 #define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align)))
 #define ROUNDDOWN(x, y)  (((x)/(y))*(y))
@@ -134,7 +142,7 @@ static zone_t waitq_set_zone;
 static __inline__ void waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int skip);
 #endif
 
-lck_grp_t waitq_lck_grp;
+LCK_GRP_DECLARE(waitq_lck_grp, "waitq");
 
 #if __arm64__
 
@@ -744,7 +752,7 @@ wq_prepost_refill_cpu_cache(uint32_t nalloc)
        }
 
        disable_preemption();
-       cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
+       cache = PERCPU_GET(wqp_cache);
 
        /* check once more before putting these elements on the list */
        if (cache->avail >= WQP_CACHE_MAX) {
@@ -776,14 +784,14 @@ wq_prepost_ensure_free_space(void)
        struct wqp_cache *cache;
 
        if (g_min_free_cache == 0) {
-               g_min_free_cache = (WQP_CACHE_MAX * ml_get_max_cpus());
+               g_min_free_cache = (WQP_CACHE_MAX * ml_wait_max_cpus());
        }
 
        /*
         * Ensure that we always have a pool of per-CPU prepost elements
         */
        disable_preemption();
-       cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
+       cache = PERCPU_GET(wqp_cache);
        free_elem = cache->avail;
        enable_preemption();
 
@@ -828,7 +836,7 @@ wq_prepost_alloc(int type, int nelem)
         * allocating RESERVED elements
         */
        disable_preemption();
-       cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
+       cache = PERCPU_GET(wqp_cache);
        if (nelem <= (int)cache->avail) {
                struct lt_elem *first, *next = NULL;
                int nalloc = nelem;
@@ -1048,7 +1056,7 @@ wq_prepost_release_rlist(struct wq_prepost *wqp)
         * if our cache is running low.
         */
        disable_preemption();
-       cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
+       cache = PERCPU_GET(wqp_cache);
        if (cache->avail < WQP_CACHE_MAX) {
                struct lt_elem *tmp = NULL;
                if (cache->head != LT_IDX_MAX) {
@@ -1121,14 +1129,14 @@ restart:
                        /* the caller wants to remove the only prepost here */
                        assert(wqp_id == wqset->wqset_prepost_id);
                        wqset->wqset_prepost_id = 0;
-               /* fall through */
+                       OS_FALLTHROUGH;
                case WQ_ITERATE_CONTINUE:
                        wq_prepost_put(wqp);
                        ret = WQ_ITERATE_SUCCESS;
                        break;
                case WQ_ITERATE_RESTART:
                        wq_prepost_put(wqp);
-               /* fall through */
+                       OS_FALLTHROUGH;
                case WQ_ITERATE_DROPPED:
                        goto restart;
                default:
@@ -1196,7 +1204,7 @@ restart:
                        goto next_prepost;
                case WQ_ITERATE_RESTART:
                        wq_prepost_put(wqp);
-               /* fall-through */
+                       OS_FALLTHROUGH;
                case WQ_ITERATE_DROPPED:
                        /* the callback dropped the ref to wqp: just restart */
                        goto restart;
@@ -1882,8 +1890,7 @@ waitq_thread_remove(struct waitq *wq,
                    VM_KERNEL_UNSLIDE_OR_PERM(waitq_to_turnstile(wq)),
                    thread_tid(thread),
                    0, 0, 0);
-               priority_queue_remove(&wq->waitq_prio_queue, &thread->wait_prioq_links,
-                   PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
+               priority_queue_remove(&wq->waitq_prio_queue, &thread->wait_prioq_links);
        } else {
                remqueue(&(thread->wait_links));
        }
@@ -1901,8 +1908,6 @@ waitq_bootstrap(void)
        }
        wqdbg("Minimum free table elements: %d", tmp32);
 
-       lck_grp_init(&waitq_lck_grp, "waitq", LCK_GRP_ATTR_NULL);
-
        /*
         * Determine the amount of memory we're willing to reserve for
         * the waitqueue hash table
@@ -1953,12 +1958,6 @@ waitq_bootstrap(void)
                waitq_init(&global_waitqs[i], SYNC_POLICY_FIFO | SYNC_POLICY_DISABLE_IRQ);
        }
 
-       waitq_set_zone = zinit(sizeof(struct waitq_set),
-           WAITQ_SET_MAX * sizeof(struct waitq_set),
-           sizeof(struct waitq_set),
-           "waitq sets");
-       zone_change(waitq_set_zone, Z_NOENCRYPT, TRUE);
-
        /* initialize the global waitq link table */
        wql_init();
 
@@ -2150,13 +2149,13 @@ waitq_select_walk_cb(struct waitq *waitq, void *ctx,
                         */
 
                        disable_preemption();
-                       os_atomic_inc(hook, relaxed);
+                       os_atomic_add(hook, (uint16_t)1, relaxed);
                        waitq_set_unlock(wqset);
 
                        waitq_set__CALLING_PREPOST_HOOK__(hook);
 
                        /* Note: after this decrement, the wqset may be deallocated */
-                       os_atomic_dec(hook, relaxed);
+                       os_atomic_add(hook, (uint16_t)-1, relaxed);
                        enable_preemption();
                        return ret;
                }
@@ -2297,8 +2296,7 @@ waitq_prioq_iterate_locked(struct waitq *safeq, struct waitq *waitq,
 
                if (remove_op) {
                        thread = priority_queue_remove_max(&safeq->waitq_prio_queue,
-                           struct thread, wait_prioq_links,
-                           PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
+                           struct thread, wait_prioq_links);
                } else {
                        /* For the peek operation, the only valid value for max_threads is 1 */
                        assert(max_threads == 1);
@@ -3096,7 +3094,7 @@ waitq_wakeup64_all_locked(struct waitq *waitq,
                assert_thread_magic(thread);
                remqueue(&thread->wait_links);
                maybe_adjust_thread_pri(thread, priority, waitq);
-               ret = thread_go(thread, result);
+               ret = thread_go(thread, result, WQ_OPTION_NONE);
                assert(ret == KERN_SUCCESS);
                thread_unlock(thread);
        }
@@ -3124,7 +3122,8 @@ waitq_wakeup64_one_locked(struct waitq *waitq,
     wait_result_t result,
     uint64_t *reserved_preposts,
     int priority,
-    waitq_lock_state_t lock_state)
+    waitq_lock_state_t lock_state,
+    waitq_options_t option)
 {
        thread_t thread;
        spl_t th_spl;
@@ -3147,7 +3146,7 @@ waitq_wakeup64_one_locked(struct waitq *waitq,
 
        if (thread != THREAD_NULL) {
                maybe_adjust_thread_pri(thread, priority, waitq);
-               kern_return_t ret = thread_go(thread, result);
+               kern_return_t ret = thread_go(thread, result, option);
                assert(ret == KERN_SUCCESS);
                thread_unlock(thread);
                splx(th_spl);
@@ -3198,7 +3197,7 @@ waitq_wakeup64_identify_locked(struct waitq     *waitq,
 
        if (thread != THREAD_NULL) {
                kern_return_t __assert_only ret;
-               ret = thread_go(thread, result);
+               ret = thread_go(thread, result, WQ_OPTION_NONE);
                assert(ret == KERN_SUCCESS);
        }
 
@@ -3252,7 +3251,7 @@ waitq_wakeup64_thread_locked(struct waitq *waitq,
                return KERN_NOT_WAITING;
        }
 
-       ret = thread_go(thread, result);
+       ret = thread_go(thread, result, WQ_OPTION_NONE);
        assert(ret == KERN_SUCCESS);
        thread_unlock(thread);
        splx(th_spl);
@@ -3298,8 +3297,7 @@ waitq_init(struct waitq *waitq, int policy)
        waitq_lock_init(waitq);
        if (waitq_is_turnstile_queue(waitq)) {
                /* For turnstile, initialize it as a priority queue */
-               priority_queue_init(&waitq->waitq_prio_queue,
-                   PRIORITY_QUEUE_BUILTIN_MAX_HEAP);
+               priority_queue_init(&waitq->waitq_prio_queue);
                assert(waitq->waitq_fifo == 0);
        } else if (policy & SYNC_POLICY_TURNSTILE_PROXY) {
                waitq->waitq_ts = TURNSTILE_NULL;
@@ -4946,7 +4944,7 @@ waitq_alloc_prepost_reservation(int nalloc, struct waitq *waitq,
         */
        if (waitq) {
                disable_preemption();
-               cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
+               cache = PERCPU_GET(wqp_cache);
                if (nalloc <= (int)cache->avail) {
                        goto do_alloc;
                }
@@ -5546,7 +5544,7 @@ waitq_wakeup64_one(struct waitq *waitq, event64_t wake_event,
 
        /* waitq is locked upon return */
        kr = waitq_wakeup64_one_locked(waitq, wake_event, result,
-           &reserved_preposts, priority, WAITQ_UNLOCK);
+           &reserved_preposts, priority, WAITQ_UNLOCK, WQ_OPTION_NONE);
 
        if (waitq_irq_safe(waitq)) {
                splx(spl);
@@ -5638,7 +5636,7 @@ waitq_wakeup64_thread(struct waitq *waitq,
        waitq_unlock(waitq);
 
        if (ret == KERN_SUCCESS) {
-               ret = thread_go(thread, result);
+               ret = thread_go(thread, result, WQ_OPTION_NONE);
                assert(ret == KERN_SUCCESS);
                thread_unlock(thread);
                splx(th_spl);