/*
- * Copyright (c) 2015-2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2015-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <kern/kern_types.h>
#include <kern/ltable.h>
#include <kern/mach_param.h>
+#include <kern/percpu.h>
#include <kern/queue.h>
#include <kern/sched_prim.h>
#include <kern/simple_lock.h>
event64_t event,
thread_t thread, spl_t *spl);
-#define WAITQ_SET_MAX (task_max * 3)
-static zone_t waitq_set_zone;
+ZONE_DECLARE(waitq_set_zone, "waitq sets",
+ sizeof(struct waitq_set), ZC_NOENCRYPT);
+/* waitq prepost cache */
+#define WQP_CACHE_MAX 50
+struct wqp_cache {
+ uint64_t head;
+ unsigned int avail;
+};
+static struct wqp_cache PERCPU_DATA(wqp_cache);
#define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align)))
#define ROUNDDOWN(x, y) (((x)/(y))*(y))
static __inline__ void waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int skip);
#endif
-lck_grp_t waitq_lck_grp;
+LCK_GRP_DECLARE(waitq_lck_grp, "waitq");
#if __arm64__
}
disable_preemption();
- cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
+ cache = PERCPU_GET(wqp_cache);
/* check once more before putting these elements on the list */
if (cache->avail >= WQP_CACHE_MAX) {
struct wqp_cache *cache;
if (g_min_free_cache == 0) {
- g_min_free_cache = (WQP_CACHE_MAX * ml_get_max_cpus());
+ g_min_free_cache = (WQP_CACHE_MAX * ml_wait_max_cpus());
}
/*
* Ensure that we always have a pool of per-CPU prepost elements
*/
disable_preemption();
- cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
+ cache = PERCPU_GET(wqp_cache);
free_elem = cache->avail;
enable_preemption();
* allocating RESERVED elements
*/
disable_preemption();
- cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
+ cache = PERCPU_GET(wqp_cache);
if (nelem <= (int)cache->avail) {
struct lt_elem *first, *next = NULL;
int nalloc = nelem;
* if our cache is running low.
*/
disable_preemption();
- cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
+ cache = PERCPU_GET(wqp_cache);
if (cache->avail < WQP_CACHE_MAX) {
struct lt_elem *tmp = NULL;
if (cache->head != LT_IDX_MAX) {
/* the caller wants to remove the only prepost here */
assert(wqp_id == wqset->wqset_prepost_id);
wqset->wqset_prepost_id = 0;
- /* fall through */
+ OS_FALLTHROUGH;
case WQ_ITERATE_CONTINUE:
wq_prepost_put(wqp);
ret = WQ_ITERATE_SUCCESS;
break;
case WQ_ITERATE_RESTART:
wq_prepost_put(wqp);
- /* fall through */
+ OS_FALLTHROUGH;
case WQ_ITERATE_DROPPED:
goto restart;
default:
goto next_prepost;
case WQ_ITERATE_RESTART:
wq_prepost_put(wqp);
- /* fall-through */
+ OS_FALLTHROUGH;
case WQ_ITERATE_DROPPED:
/* the callback dropped the ref to wqp: just restart */
goto restart;
VM_KERNEL_UNSLIDE_OR_PERM(waitq_to_turnstile(wq)),
thread_tid(thread),
0, 0, 0);
- priority_queue_remove(&wq->waitq_prio_queue, &thread->wait_prioq_links,
- PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
+ priority_queue_remove(&wq->waitq_prio_queue, &thread->wait_prioq_links);
} else {
remqueue(&(thread->wait_links));
}
}
wqdbg("Minimum free table elements: %d", tmp32);
- lck_grp_init(&waitq_lck_grp, "waitq", LCK_GRP_ATTR_NULL);
-
/*
* Determine the amount of memory we're willing to reserve for
* the waitqueue hash table
waitq_init(&global_waitqs[i], SYNC_POLICY_FIFO | SYNC_POLICY_DISABLE_IRQ);
}
- waitq_set_zone = zinit(sizeof(struct waitq_set),
- WAITQ_SET_MAX * sizeof(struct waitq_set),
- sizeof(struct waitq_set),
- "waitq sets");
- zone_change(waitq_set_zone, Z_NOENCRYPT, TRUE);
-
/* initialize the global waitq link table */
wql_init();
*/
disable_preemption();
- os_atomic_inc(hook, relaxed);
+ os_atomic_add(hook, (uint16_t)1, relaxed);
waitq_set_unlock(wqset);
waitq_set__CALLING_PREPOST_HOOK__(hook);
/* Note: after this decrement, the wqset may be deallocated */
- os_atomic_dec(hook, relaxed);
+ os_atomic_add(hook, (uint16_t)-1, relaxed);
enable_preemption();
return ret;
}
if (remove_op) {
thread = priority_queue_remove_max(&safeq->waitq_prio_queue,
- struct thread, wait_prioq_links,
- PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE);
+ struct thread, wait_prioq_links);
} else {
/* For the peek operation, the only valid value for max_threads is 1 */
assert(max_threads == 1);
assert_thread_magic(thread);
remqueue(&thread->wait_links);
maybe_adjust_thread_pri(thread, priority, waitq);
- ret = thread_go(thread, result);
+ ret = thread_go(thread, result, WQ_OPTION_NONE);
assert(ret == KERN_SUCCESS);
thread_unlock(thread);
}
wait_result_t result,
uint64_t *reserved_preposts,
int priority,
- waitq_lock_state_t lock_state)
+ waitq_lock_state_t lock_state,
+ waitq_options_t option)
{
thread_t thread;
spl_t th_spl;
if (thread != THREAD_NULL) {
maybe_adjust_thread_pri(thread, priority, waitq);
- kern_return_t ret = thread_go(thread, result);
+ kern_return_t ret = thread_go(thread, result, option);
assert(ret == KERN_SUCCESS);
thread_unlock(thread);
splx(th_spl);
if (thread != THREAD_NULL) {
kern_return_t __assert_only ret;
- ret = thread_go(thread, result);
+ ret = thread_go(thread, result, WQ_OPTION_NONE);
assert(ret == KERN_SUCCESS);
}
return KERN_NOT_WAITING;
}
- ret = thread_go(thread, result);
+ ret = thread_go(thread, result, WQ_OPTION_NONE);
assert(ret == KERN_SUCCESS);
thread_unlock(thread);
splx(th_spl);
waitq_lock_init(waitq);
if (waitq_is_turnstile_queue(waitq)) {
/* For turnstile, initialize it as a priority queue */
- priority_queue_init(&waitq->waitq_prio_queue,
- PRIORITY_QUEUE_BUILTIN_MAX_HEAP);
+ priority_queue_init(&waitq->waitq_prio_queue);
assert(waitq->waitq_fifo == 0);
} else if (policy & SYNC_POLICY_TURNSTILE_PROXY) {
waitq->waitq_ts = TURNSTILE_NULL;
*/
if (waitq) {
disable_preemption();
- cache = &PROCESSOR_DATA(current_processor(), wqp_cache);
+ cache = PERCPU_GET(wqp_cache);
if (nalloc <= (int)cache->avail) {
goto do_alloc;
}
/* waitq is locked upon return */
kr = waitq_wakeup64_one_locked(waitq, wake_event, result,
- &reserved_preposts, priority, WAITQ_UNLOCK);
+ &reserved_preposts, priority, WAITQ_UNLOCK, WQ_OPTION_NONE);
if (waitq_irq_safe(waitq)) {
splx(spl);
waitq_unlock(waitq);
if (ret == KERN_SUCCESS) {
- ret = thread_go(thread, result);
+ ret = thread_go(thread, result, WQ_OPTION_NONE);
assert(ret == KERN_SUCCESS);
thread_unlock(thread);
splx(th_spl);