+ if ((evq = p->p_evlist.tqh_first) != NULL) {
+ /*
+ * found one... make a local copy while it's still on the queue
+ * to prevent it from changing while in the midst of copying
+ * don't want to hold the proc lock across a copyout because
+ * it might block on a page fault at the target in user space
+ */
+ erp = &evq->ee_req;
+
+ if (IS_64BIT_PROCESS(p))
+ bcopy((caddr_t)erp, (caddr_t)&uer.er64, sizeof (struct eventreq64));
+ else {
+ uer.er32.er_type = erp->er_type;
+ uer.er32.er_handle = erp->er_handle;
+ uer.er32.er_data = (uint32_t)erp->er_data;
+ uer.er32.er_ecnt = erp->er_ecnt;
+ uer.er32.er_rcnt = erp->er_rcnt;
+ uer.er32.er_wcnt = erp->er_wcnt;
+ uer.er32.er_eventbits = erp->er_eventbits;
+ }
+ TAILQ_REMOVE(&p->p_evlist, evq, ee_plist);
+
+ evq->ee_flags &= ~EV_QUEUED;
+
+ proc_unlock(p);
+
+ if (IS_64BIT_PROCESS(p))
+ error = copyout((caddr_t)&uer.er64, uap->u_req, sizeof(struct eventreq64));
+ else
+ error = copyout((caddr_t)&uer.er32, uap->u_req, sizeof(struct eventreq32));
+
+ KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, error,
+ evq->ee_req.er_handle,evq->ee_req.er_eventbits,(uint32_t)evq,0);
+ return (error);
+ }
+ else {
+ if (uap->tv && interval == 0) {
+ proc_unlock(p);
+ *retval = 1; // poll failed
+
+ KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, error,0,0,0,0);
+ return (error);
+ }
+ if (interval != 0)
+ clock_absolutetime_interval_to_deadline(interval, &abstime);
+ else
+ abstime = 0;
+
+ KERNEL_DEBUG(DBG_MISC_WAIT, 1,(uint32_t)&p->p_evlist,0,0,0);
+
+ error = msleep1(&p->p_evlist, &p->p_mlock, (PSOCK | PCATCH), "waitevent", abstime);
+
+ KERNEL_DEBUG(DBG_MISC_WAIT, 2,(uint32_t)&p->p_evlist,0,0,0);
+
+ if (error == 0)
+ goto retry;
+ if (error == ERESTART)
+ error = EINTR;
+ if (error == EWOULDBLOCK) {
+ *retval = 1;
+ error = 0;
+ }
+ }
+ proc_unlock(p);
+
+ KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, 0,0,0,0,0);
+ return (error);
+}
+
+
+/*
+ * modwatch system call. user passes in event to modify.
+ * if we find it we reset the event bits and que/deque event
+ * it needed.
+ */
+int
+modwatch(proc_t p, struct modwatch_args *uap, __unused int *retval)
+{
+ struct eventreq64 er;
+ struct eventreq64 *erp = &er;
+ struct eventqelt *evq = NULL; /* protected by error return */
+ int error;
+ struct fileproc *fp;
+ int flag;
+
+ KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_START, 0,0,0,0,0);
+
+ /*
+ * get user's request pkt
+ * just need the er_type and er_handle which sit above the
+ * problematic er_data (32/64 issue)... so only copy in
+ * those 2 fields
+ */
+ if ((error = copyin(uap->u_req, (caddr_t)erp, sizeof(er.er_type) + sizeof(er.er_handle)))) {
+ KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, error,0,0,0,0);
+ return(error);
+ }
+ proc_fdlock(p);
+
+ if (erp->er_type != EV_FD) {
+ error = EINVAL;
+ } else if ((error = fp_lookup(p, erp->er_handle, &fp, 1)) != 0) {
+ error = EBADF;
+#if SOCKETS
+ } else if (fp->f_type == DTYPE_SOCKET) {
+ socket_lock((struct socket *)fp->f_data, 1);
+ evq = ((struct socket *)fp->f_data)->so_evlist.tqh_first;
+#endif /* SOCKETS */
+ } else if (fp->f_type == DTYPE_PIPE) {
+ PIPE_LOCK((struct pipe *)fp->f_data);
+ evq = ((struct pipe *)fp->f_data)->pipe_evlist.tqh_first;
+ } else {
+ fp_drop(p, erp->er_handle, fp, 1);
+ error = EINVAL;
+ }
+
+ if (error) {
+ proc_fdunlock(p);
+ KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, error,0,0,0,0);
+ return(error);
+ }
+
+ if ((uap->u_eventmask == EV_RM) && (fp->f_flags & FP_WAITEVENT)) {
+ fp->f_flags &= ~FP_WAITEVENT;
+ }
+ proc_fdunlock(p);
+
+ // locate event if possible
+ for ( ; evq != NULL; evq = evq->ee_slist.tqe_next) {
+ if (evq->ee_proc == p)
+ break;
+ }
+ if (evq == NULL) {
+#if SOCKETS
+ if (fp->f_type == DTYPE_SOCKET)
+ socket_unlock((struct socket *)fp->f_data, 1);
+ else
+#endif /* SOCKETS */
+ PIPE_UNLOCK((struct pipe *)fp->f_data);
+ fp_drop(p, erp->er_handle, fp, 0);
+ KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, EINVAL,0,0,0,0);
+ return(EINVAL);
+ }
+ KERNEL_DEBUG(DBG_MISC_MOD, erp->er_handle,uap->u_eventmask,(uint32_t)evq,0,0);
+
+ if (uap->u_eventmask == EV_RM) {
+ EVPROCDEQUE(p, evq);
+
+#if SOCKETS
+ if (fp->f_type == DTYPE_SOCKET) {
+ TAILQ_REMOVE(&((struct socket *)fp->f_data)->so_evlist, evq, ee_slist);
+ socket_unlock((struct socket *)fp->f_data, 1);
+ } else
+#endif /* SOCKETS */
+ {
+ TAILQ_REMOVE(&((struct pipe *)fp->f_data)->pipe_evlist, evq, ee_slist);
+ PIPE_UNLOCK((struct pipe *)fp->f_data);
+ }
+ fp_drop(p, erp->er_handle, fp, 0);
+ FREE(evq, M_TEMP);
+ KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, 0,0,0,0,0);
+ return(0);
+ }
+ switch (uap->u_eventmask & EV_MASK) {
+
+ case 0:
+ flag = 0;
+ break;
+
+ case EV_RE:
+ case EV_WR:
+ case EV_RE|EV_WR:
+ flag = EV_RWBYTES;
+ break;
+
+ case EV_EX:
+ flag = EV_OOB;
+ break;
+
+ case EV_EX|EV_RE:
+ case EV_EX|EV_WR:
+ case EV_EX|EV_RE|EV_WR:
+ flag = EV_OOB|EV_RWBYTES;
+ break;
+
+ default:
+#if SOCKETS
+ if (fp->f_type == DTYPE_SOCKET)
+ socket_unlock((struct socket *)fp->f_data, 1);
+ else
+#endif /* SOCKETS */
+ PIPE_UNLOCK((struct pipe *)fp->f_data);
+ fp_drop(p, erp->er_handle, fp, 0);
+ KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, EINVAL,0,0,0,0);
+ return(EINVAL);
+ }
+ /*
+ * since we're holding the socket/pipe lock, the event
+ * cannot go from the unqueued state to the queued state
+ * however, it can go from the queued state to the unqueued state
+ * since that direction is protected by the proc_lock...
+ * so do a quick check for EV_QUEUED w/o holding the proc lock
+ * since by far the common case will be NOT EV_QUEUED, this saves
+ * us taking the proc_lock the majority of the time
+ */
+ if (evq->ee_flags & EV_QUEUED) {
+ /*
+ * EVPROCDEQUE will recheck the state after it grabs the proc_lock
+ */
+ EVPROCDEQUE(p, evq);
+ }
+ /*
+ * while the event is off the proc queue and
+ * we're holding the socket/pipe lock
+ * it's safe to update these fields...
+ */
+ evq->ee_req.er_eventbits = 0;
+ evq->ee_eventmask = uap->u_eventmask & EV_MASK;
+
+#if SOCKETS
+ if (fp->f_type == DTYPE_SOCKET) {
+ postevent((struct socket *)fp->f_data, 0, flag);
+ socket_unlock((struct socket *)fp->f_data, 1);
+ } else
+#endif /* SOCKETS */
+ {
+ postpipeevent((struct pipe *)fp->f_data, flag);
+ PIPE_UNLOCK((struct pipe *)fp->f_data);
+ }
+ fp_drop(p, erp->er_handle, fp, 0);
+ KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, evq->ee_req.er_handle,evq->ee_eventmask,(uint32_t)fp->f_data,flag,0);
+ return(0);
+}
+
+/* this routine is called from the close of fd with proc_fdlock held */
+int
+waitevent_close(struct proc *p, struct fileproc *fp)
+{
+ struct eventqelt *evq;
+
+
+ fp->f_flags &= ~FP_WAITEVENT;
+
+#if SOCKETS
+ if (fp->f_type == DTYPE_SOCKET) {
+ socket_lock((struct socket *)fp->f_data, 1);
+ evq = ((struct socket *)fp->f_data)->so_evlist.tqh_first;
+ } else
+#endif /* SOCKETS */
+ if (fp->f_type == DTYPE_PIPE) {
+ PIPE_LOCK((struct pipe *)fp->f_data);
+ evq = ((struct pipe *)fp->f_data)->pipe_evlist.tqh_first;
+ }
+ else {
+ return(EINVAL);
+ }
+ proc_fdunlock(p);
+
+
+ // locate event if possible
+ for ( ; evq != NULL; evq = evq->ee_slist.tqe_next) {
+ if (evq->ee_proc == p)
+ break;
+ }
+ if (evq == NULL) {
+#if SOCKETS
+ if (fp->f_type == DTYPE_SOCKET)
+ socket_unlock((struct socket *)fp->f_data, 1);
+ else
+#endif /* SOCKETS */
+ PIPE_UNLOCK((struct pipe *)fp->f_data);
+
+ proc_fdlock(p);
+
+ return(EINVAL);
+ }
+ EVPROCDEQUE(p, evq);
+
+#if SOCKETS
+ if (fp->f_type == DTYPE_SOCKET) {
+ TAILQ_REMOVE(&((struct socket *)fp->f_data)->so_evlist, evq, ee_slist);
+ socket_unlock((struct socket *)fp->f_data, 1);
+ } else
+#endif /* SOCKETS */
+ {
+ TAILQ_REMOVE(&((struct pipe *)fp->f_data)->pipe_evlist, evq, ee_slist);
+ PIPE_UNLOCK((struct pipe *)fp->f_data);
+ }
+ FREE(evq, M_TEMP);
+
+ proc_fdlock(p);
+
+ return(0);
+}
+
+
+/*
+ * gethostuuid
+ *
+ * Description: Get the host UUID from IOKit and return it to user space.
+ *
+ * Parameters: uuid_buf Pointer to buffer to receive UUID
+ * timeout Timespec for timout
+ * spi SPI, skip sandbox check (temporary)
+ *
+ * Returns: 0 Success
+ * EWOULDBLOCK Timeout is too short
+ * copyout:EFAULT Bad user buffer
+ * mac_system_check_info:EPERM Client not allowed to perform this operation
+ *
+ * Notes: A timeout seems redundant, since if it's tolerable to not
+ * have a system UUID in hand, then why ask for one?
+ */
+int
+gethostuuid(struct proc *p, struct gethostuuid_args *uap, __unused int32_t *retval)
+{
+ kern_return_t kret;
+ int error;
+ mach_timespec_t mach_ts; /* for IOKit call */
+ __darwin_uuid_t uuid_kern; /* for IOKit call */
+
+ if (!uap->spi) {
+#if CONFIG_EMBEDDED
+#if CONFIG_MACF
+ if ((error = mac_system_check_info(kauth_cred_get(), "hw.uuid")) != 0) {
+ /* EPERM invokes userspace upcall if present */
+ return (error);
+ }
+#endif
+#endif
+ }
+
+ /* Convert the 32/64 bit timespec into a mach_timespec_t */
+ if ( proc_is64bit(p) ) {
+ struct user64_timespec ts;
+ error = copyin(uap->timeoutp, &ts, sizeof(ts));
+ if (error)
+ return (error);
+ mach_ts.tv_sec = ts.tv_sec;
+ mach_ts.tv_nsec = ts.tv_nsec;
+ } else {
+ struct user32_timespec ts;
+ error = copyin(uap->timeoutp, &ts, sizeof(ts) );
+ if (error)
+ return (error);
+ mach_ts.tv_sec = ts.tv_sec;
+ mach_ts.tv_nsec = ts.tv_nsec;
+ }
+
+ /* Call IOKit with the stack buffer to get the UUID */
+ kret = IOBSDGetPlatformUUID(uuid_kern, mach_ts);
+
+ /*
+ * If we get it, copy out the data to the user buffer; note that a
+ * uuid_t is an array of characters, so this is size invariant for
+ * 32 vs. 64 bit.
+ */
+ if (kret == KERN_SUCCESS) {
+ error = copyout(uuid_kern, uap->uuid_buf, sizeof(uuid_kern));
+ } else {
+ error = EWOULDBLOCK;
+ }
+
+ return (error);
+}
+
+/*
+ * ledger
+ *
+ * Description: Omnibus system call for ledger operations
+ */
+int
+ledger(struct proc *p, struct ledger_args *args, __unused int32_t *retval)
+{
+#if !CONFIG_MACF
+#pragma unused(p)
+#endif
+ int rval, pid, len, error;
+#ifdef LEDGER_DEBUG
+ struct ledger_limit_args lla;
+#endif
+ task_t task;
+ proc_t proc;
+
+ /* Finish copying in the necessary args before taking the proc lock */
+ error = 0;
+ len = 0;
+ if (args->cmd == LEDGER_ENTRY_INFO)
+ error = copyin(args->arg3, (char *)&len, sizeof (len));
+ else if (args->cmd == LEDGER_TEMPLATE_INFO)
+ error = copyin(args->arg2, (char *)&len, sizeof (len));
+ else if (args->cmd == LEDGER_LIMIT)
+#ifdef LEDGER_DEBUG
+ error = copyin(args->arg2, (char *)&lla, sizeof (lla));
+#else
+ return (EINVAL);
+#endif
+ else if ((args->cmd < 0) || (args->cmd > LEDGER_MAX_CMD))
+ return (EINVAL);
+
+ if (error)
+ return (error);
+ if (len < 0)
+ return (EINVAL);
+
+ rval = 0;
+ if (args->cmd != LEDGER_TEMPLATE_INFO) {
+ pid = args->arg1;
+ proc = proc_find(pid);
+ if (proc == NULL)
+ return (ESRCH);
+
+#if CONFIG_MACF
+ error = mac_proc_check_ledger(p, proc, args->cmd);
+ if (error) {
+ proc_rele(proc);
+ return (error);
+ }
+#endif
+
+ task = proc->task;
+ }
+
+ switch (args->cmd) {
+#ifdef LEDGER_DEBUG
+ case LEDGER_LIMIT: {
+ if (!kauth_cred_issuser(kauth_cred_get()))
+ rval = EPERM;
+ rval = ledger_limit(task, &lla);
+ proc_rele(proc);
+ break;
+ }
+#endif
+ case LEDGER_INFO: {
+ struct ledger_info info;
+
+ rval = ledger_info(task, &info);
+ proc_rele(proc);
+ if (rval == 0)
+ rval = copyout(&info, args->arg2,
+ sizeof (info));
+ break;
+ }
+
+ case LEDGER_ENTRY_INFO: {
+ void *buf;
+ int sz;
+
+ rval = ledger_get_task_entry_info_multiple(task, &buf, &len);
+ proc_rele(proc);
+ if ((rval == 0) && (len >= 0)) {
+ sz = len * sizeof (struct ledger_entry_info);
+ rval = copyout(buf, args->arg2, sz);
+ kfree(buf, sz);
+ }
+ if (rval == 0)
+ rval = copyout(&len, args->arg3, sizeof (len));
+ break;
+ }
+
+ case LEDGER_TEMPLATE_INFO: {
+ void *buf;
+ int sz;
+
+ rval = ledger_template_info(&buf, &len);
+ if ((rval == 0) && (len >= 0)) {
+ sz = len * sizeof (struct ledger_template_info);
+ rval = copyout(buf, args->arg1, sz);
+ kfree(buf, sz);
+ }
+ if (rval == 0)
+ rval = copyout(&len, args->arg2, sizeof (len));
+ break;
+ }
+
+ default:
+ panic("ledger syscall logic error -- command type %d", args->cmd);
+ proc_rele(proc);
+ rval = EINVAL;
+ }
+
+ return (rval);
+}
+
+int
+telemetry(__unused struct proc *p, struct telemetry_args *args, __unused int32_t *retval)
+{
+ int error = 0;
+
+ switch (args->cmd) {
+#if CONFIG_TELEMETRY
+ case TELEMETRY_CMD_TIMER_EVENT:
+ error = telemetry_timer_event(args->deadline, args->interval, args->leeway);
+ break;
+#endif /* CONFIG_TELEMETRY */
+ case TELEMETRY_CMD_VOUCHER_NAME:
+ if (thread_set_voucher_name((mach_port_name_t)args->deadline))
+ error = EINVAL;
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return (error);
+}
+
+#if DEVELOPMENT || DEBUG
+#if CONFIG_WAITQ_DEBUG
+static uint64_t g_wqset_num = 0;
+struct g_wqset {
+ queue_chain_t link;
+ struct waitq_set *wqset;
+};
+
+static queue_head_t g_wqset_list;
+static struct waitq_set *g_waitq_set = NULL;
+
+static inline struct waitq_set *sysctl_get_wqset(int idx)
+{
+ struct g_wqset *gwqs;
+
+ if (!g_wqset_num)
+ queue_init(&g_wqset_list);
+
+ /* don't bother with locks: this is test-only code! */
+ qe_foreach_element(gwqs, &g_wqset_list, link) {
+ if ((int)(wqset_id(gwqs->wqset) & 0xffffffff) == idx)
+ return gwqs->wqset;
+ }
+
+ /* allocate a new one */
+ ++g_wqset_num;
+ gwqs = (struct g_wqset *)kalloc(sizeof(*gwqs));
+ assert(gwqs != NULL);
+
+ gwqs->wqset = waitq_set_alloc(SYNC_POLICY_FIFO|SYNC_POLICY_PREPOST, NULL);
+ enqueue_tail(&g_wqset_list, &gwqs->link);
+ printf("[WQ]: created new waitq set 0x%llx\n", wqset_id(gwqs->wqset));
+
+ return gwqs->wqset;
+}
+
+#define MAX_GLOBAL_TEST_QUEUES 64
+static int g_wq_init = 0;
+static struct waitq g_wq[MAX_GLOBAL_TEST_QUEUES];
+
+static inline struct waitq *global_test_waitq(int idx)
+{
+ if (idx < 0)
+ return NULL;
+
+ if (!g_wq_init) {
+ g_wq_init = 1;
+ for (int i = 0; i < MAX_GLOBAL_TEST_QUEUES; i++)
+ waitq_init(&g_wq[i], SYNC_POLICY_FIFO);
+ }
+
+ return &g_wq[idx % MAX_GLOBAL_TEST_QUEUES];
+}
+
+static int sysctl_waitq_wakeup_one SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int error;
+ int index;
+ struct waitq *waitq;
+ kern_return_t kr;
+ int64_t event64 = 0;
+
+ error = SYSCTL_IN(req, &event64, sizeof(event64));
+ if (error)
+ return error;
+
+ if (!req->newptr)
+ return SYSCTL_OUT(req, &event64, sizeof(event64));
+
+ if (event64 < 0) {
+ index = (int)((-event64) & 0xffffffff);
+ waitq = wqset_waitq(sysctl_get_wqset(index));
+ index = -index;
+ } else {
+ index = (int)event64;
+ waitq = global_test_waitq(index);
+ }
+
+ event64 = 0;
+
+ printf("[WQ]: Waking one thread on waitq [%d] event:0x%llx\n",
+ index, event64);
+ kr = waitq_wakeup64_one(waitq, (event64_t)event64, THREAD_AWAKENED,
+ WAITQ_ALL_PRIORITIES);
+ printf("[WQ]: \tkr=%d\n", kr);
+
+ return SYSCTL_OUT(req, &kr, sizeof(kr));
+}
+SYSCTL_PROC(_kern, OID_AUTO, waitq_wakeup_one, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_waitq_wakeup_one, "Q", "wakeup one thread waiting on given event");
+
+
+static int sysctl_waitq_wakeup_all SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int error;
+ int index;
+ struct waitq *waitq;
+ kern_return_t kr;
+ int64_t event64 = 0;
+
+ error = SYSCTL_IN(req, &event64, sizeof(event64));
+ if (error)
+ return error;
+
+ if (!req->newptr)
+ return SYSCTL_OUT(req, &event64, sizeof(event64));
+
+ if (event64 < 0) {
+ index = (int)((-event64) & 0xffffffff);
+ waitq = wqset_waitq(sysctl_get_wqset(index));
+ index = -index;
+ } else {
+ index = (int)event64;
+ waitq = global_test_waitq(index);
+ }
+
+ event64 = 0;
+
+ printf("[WQ]: Waking all threads on waitq [%d] event:0x%llx\n",
+ index, event64);
+ kr = waitq_wakeup64_all(waitq, (event64_t)event64,
+ THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
+ printf("[WQ]: \tkr=%d\n", kr);
+
+ return SYSCTL_OUT(req, &kr, sizeof(kr));
+}
+SYSCTL_PROC(_kern, OID_AUTO, waitq_wakeup_all, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_waitq_wakeup_all, "Q", "wakeup all threads waiting on given event");
+
+
+static int sysctl_waitq_wait SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int error;
+ int index;
+ struct waitq *waitq;
+ kern_return_t kr;
+ int64_t event64 = 0;
+
+ error = SYSCTL_IN(req, &event64, sizeof(event64));
+ if (error)
+ return error;
+
+ if (!req->newptr)
+ return SYSCTL_OUT(req, &event64, sizeof(event64));
+
+ if (event64 < 0) {
+ index = (int)((-event64) & 0xffffffff);
+ waitq = wqset_waitq(sysctl_get_wqset(index));
+ index = -index;
+ } else {
+ index = (int)event64;
+ waitq = global_test_waitq(index);
+ }
+
+ event64 = 0;
+
+ printf("[WQ]: Current thread waiting on waitq [%d] event:0x%llx\n",
+ index, event64);
+ kr = waitq_assert_wait64(waitq, (event64_t)event64, THREAD_INTERRUPTIBLE, 0);
+ if (kr == THREAD_WAITING)
+ thread_block(THREAD_CONTINUE_NULL);
+ printf("[WQ]: \tWoke Up: kr=%d\n", kr);
+
+ return SYSCTL_OUT(req, &kr, sizeof(kr));
+}
+SYSCTL_PROC(_kern, OID_AUTO, waitq_wait, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_waitq_wait, "Q", "start waiting on given event");
+
+
+static int sysctl_wqset_select SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int error;
+ struct waitq_set *wqset;
+ uint64_t event64 = 0;
+
+ error = SYSCTL_IN(req, &event64, sizeof(event64));
+ if (error)
+ return error;
+
+ if (!req->newptr)
+ goto out;
+
+ wqset = sysctl_get_wqset((int)(event64 & 0xffffffff));
+ g_waitq_set = wqset;
+
+ event64 = wqset_id(wqset);
+ printf("[WQ]: selected wqset 0x%llx\n", event64);
+
+out:
+ if (g_waitq_set)
+ event64 = wqset_id(g_waitq_set);
+ else
+ event64 = (uint64_t)(-1);
+
+ return SYSCTL_OUT(req, &event64, sizeof(event64));
+}
+SYSCTL_PROC(_kern, OID_AUTO, wqset_select, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_wqset_select, "Q", "select/create a global waitq set");
+
+
+static int sysctl_waitq_link SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int error;
+ int index;
+ struct waitq *waitq;
+ struct waitq_set *wqset;
+ kern_return_t kr;
+ uint64_t reserved_link = 0;
+ int64_t event64 = 0;
+
+ error = SYSCTL_IN(req, &event64, sizeof(event64));
+ if (error)
+ return error;
+
+ if (!req->newptr)
+ return SYSCTL_OUT(req, &event64, sizeof(event64));
+
+ if (!g_waitq_set)
+ g_waitq_set = sysctl_get_wqset(1);
+ wqset = g_waitq_set;
+
+ if (event64 < 0) {
+ struct waitq_set *tmp;
+ index = (int)((-event64) & 0xffffffff);
+ tmp = sysctl_get_wqset(index);
+ if (tmp == wqset)
+ goto out;
+ waitq = wqset_waitq(tmp);
+ index = -index;
+ } else {
+ index = (int)event64;
+ waitq = global_test_waitq(index);
+ }
+
+ printf("[WQ]: linking waitq [%d] to global wqset (0x%llx)\n",
+ index, wqset_id(wqset));
+ reserved_link = waitq_link_reserve(waitq);
+ kr = waitq_link(waitq, wqset, WAITQ_SHOULD_LOCK, &reserved_link);
+ waitq_link_release(reserved_link);
+
+ printf("[WQ]: \tkr=%d\n", kr);
+
+out:
+ return SYSCTL_OUT(req, &kr, sizeof(kr));
+}
+SYSCTL_PROC(_kern, OID_AUTO, waitq_link, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_waitq_link, "Q", "link global waitq to test waitq set");
+
+
+static int sysctl_waitq_unlink SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int error;
+ int index;
+ struct waitq *waitq;
+ struct waitq_set *wqset;
+ kern_return_t kr;
+ uint64_t event64 = 0;
+
+ error = SYSCTL_IN(req, &event64, sizeof(event64));
+ if (error)
+ return error;
+
+ if (!req->newptr)
+ return SYSCTL_OUT(req, &event64, sizeof(event64));
+
+ if (!g_waitq_set)
+ g_waitq_set = sysctl_get_wqset(1);
+ wqset = g_waitq_set;
+
+ index = (int)event64;
+ waitq = global_test_waitq(index);
+
+ printf("[WQ]: unlinking waitq [%d] from global wqset (0x%llx)\n",
+ index, wqset_id(wqset));
+
+ kr = waitq_unlink(waitq, wqset);
+ printf("[WQ]: \tkr=%d\n", kr);
+
+ return SYSCTL_OUT(req, &kr, sizeof(kr));
+}
+SYSCTL_PROC(_kern, OID_AUTO, waitq_unlink, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_waitq_unlink, "Q", "unlink global waitq from test waitq set");
+
+
+static int sysctl_waitq_clear_prepost SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ struct waitq *waitq;
+ uint64_t event64 = 0;
+ int error, index;
+
+ error = SYSCTL_IN(req, &event64, sizeof(event64));
+ if (error)
+ return error;
+
+ if (!req->newptr)
+ return SYSCTL_OUT(req, &event64, sizeof(event64));
+
+ index = (int)event64;
+ waitq = global_test_waitq(index);
+
+ printf("[WQ]: clearing prepost on waitq [%d]\n", index);
+ waitq_clear_prepost(waitq);
+
+ return SYSCTL_OUT(req, &event64, sizeof(event64));
+}
+SYSCTL_PROC(_kern, OID_AUTO, waitq_clear_prepost, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_waitq_clear_prepost, "Q", "clear prepost on given waitq");
+
+
+static int sysctl_wqset_unlink_all SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int error;
+ struct waitq_set *wqset;
+ kern_return_t kr;
+ uint64_t event64 = 0;
+
+ error = SYSCTL_IN(req, &event64, sizeof(event64));
+ if (error)
+ return error;
+
+ if (!req->newptr)
+ return SYSCTL_OUT(req, &event64, sizeof(event64));
+
+ if (!g_waitq_set)
+ g_waitq_set = sysctl_get_wqset(1);
+ wqset = g_waitq_set;
+
+ printf("[WQ]: unlinking all queues from global wqset (0x%llx)\n",
+ wqset_id(wqset));
+
+ kr = waitq_set_unlink_all(wqset);
+ printf("[WQ]: \tkr=%d\n", kr);
+
+ return SYSCTL_OUT(req, &kr, sizeof(kr));
+}
+SYSCTL_PROC(_kern, OID_AUTO, wqset_unlink_all, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_wqset_unlink_all, "Q", "unlink all queues from test waitq set");
+
+
+static int sysctl_wqset_clear_preposts SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ struct waitq_set *wqset = NULL;
+ uint64_t event64 = 0;
+ int error, index;
+
+ error = SYSCTL_IN(req, &event64, sizeof(event64));
+ if (error)
+ return error;
+
+ if (!req->newptr)
+ goto out;
+
+ index = (int)((event64) & 0xffffffff);
+ wqset = sysctl_get_wqset(index);
+ assert(wqset != NULL);
+
+ printf("[WQ]: clearing preposts on wqset 0x%llx\n", wqset_id(wqset));
+ waitq_set_clear_preposts(wqset);
+
+out:
+ if (wqset)
+ event64 = wqset_id(wqset);
+ else
+ event64 = (uint64_t)(-1);
+
+ return SYSCTL_OUT(req, &event64, sizeof(event64));
+}
+SYSCTL_PROC(_kern, OID_AUTO, wqset_clear_preposts, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_wqset_clear_preposts, "Q", "clear preposts on given waitq set");
+
+#endif /* CONFIG_WAITQ_DEBUG */
+#endif /* DEVELOPMENT || DEBUG */