+
+int
+pthread_workqueue_attr_setovercommit_np(pthread_workqueue_attr_t * attr, int ocomm)
+{
+int error = 0;
+
+ if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
+ attr->overcommit = ocomm;
+ } else {
+ error = EINVAL;
+ }
+ return (error);
+}
+/* XXXXXXXXXXXXX Pthread Workqueue support routines XXXXXXXXXXXXXXXXXX */
+
+static void
+workqueue_list_lock()
+{
+ OSSpinLockLock(&__workqueue_list_lock);
+}
+
+static void
+workqueue_list_unlock()
+{
+ OSSpinLockUnlock(&__workqueue_list_lock);
+}
+
+int
+pthread_workqueue_init_np()
+{
+ int ret;
+
+ if (__workqueue_newspis != 0)
+ return(EPERM);
+ __workqueue_oldspis = 1;
+
+ workqueue_list_lock();
+ ret =_pthread_work_internal_init();
+ workqueue_list_unlock();
+
+ return(ret);
+}
+
+int
+pthread_workqueue_requestconcurrency_np(int queue, int request_concurrency)
+{
+ int error = 0;
+
+ if (__workqueue_newspis != 0)
+ return(EPERM);
+
+ if (queue < 0 || queue > WORKQ_NUM_PRIOQUEUE)
+ return(EINVAL);
+
+ error =__workq_kernreturn(WQOPS_THREAD_SETCONC, NULL, request_concurrency, queue);
+
+ if (error == -1)
+ return(errno);
+ return(0);
+}
+
+void
+pthread_workqueue_atfork_prepare(void)
+{
+ /*
+ * NOTE: Any workq additions here
+ * should be for i386,x86_64 only
+ */
+ dispatch_atfork_prepare();
+}
+
+void
+pthread_workqueue_atfork_parent(void)
+{
+ /*
+ * NOTE: Any workq additions here
+ * should be for i386,x86_64 only
+ */
+ dispatch_atfork_parent();
+}
+
+void
+pthread_workqueue_atfork_child(void)
+{
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+ pthread_t self = pthread_self();
+
+ __workqueue_list_lock = OS_SPINLOCK_INIT;
+
+ /* already using new spis? */
+ if (__workqueue_newspis != 0) {
+ /* prepare the kernel for workq action */
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+ __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)), _pthread_start, &workq_targetconc[0], (uintptr_t)(&self->tsd[__PTK_LIBDISPATCH_KEY0]) - (uintptr_t)(&self->tsd[0]));
+#else
+ __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)),NULL,NULL,0);
+#endif
+ (void)__workq_open();
+ kernel_workq_setup = 1;
+ return;
+ }
+
+ /* not using old spis either? */
+ if (__workqueue_oldspis == 0)
+ return;
+
+ /*
+ * NOTE: workq additions here
+ * are for i386,x86_64 only as
+ * ppc and arm do not support it
+ */
+ if (kernel_workq_setup != 0){
+ kernel_workq_setup = 0;
+ _pthread_work_internal_init();
+ }
+#endif
+ dispatch_atfork_child();
+}
+
+static int
+_pthread_work_internal_init(void)
+{
+ int i, error;
+ pthread_workqueue_head_t headp;
+ pthread_workqueue_t wq;
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+ pthread_t self = pthread_self();
+#endif
+
+ if (kernel_workq_setup == 0) {
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+ __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)), _pthread_start, &workq_targetconc[0], (uintptr_t)(&self->tsd[__PTK_LIBDISPATCH_KEY0]) - (uintptr_t)(&self->tsd[0]));
+#else
+ __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)),NULL,NULL,0);
+#endif
+
+ _pthread_wq_attr_default.queueprio = WORKQ_DEFAULT_PRIOQUEUE;
+ _pthread_wq_attr_default.sig = PTHREAD_WORKQUEUE_ATTR_SIG;
+
+ for( i = 0; i< WORKQ_NUM_PRIOQUEUE; i++) {
+ headp = __pthread_wq_head_tbl[i];
+ TAILQ_INIT(&headp->wqhead);
+ headp->next_workq = 0;
+ }
+
+ __workqueue_pool_ptr = NULL;
+ __workqueue_pool_size = round_page(sizeof(struct _pthread_workitem) * WORKITEM_POOL_SIZE);
+
+ __workqueue_pool_ptr = (struct _pthread_workitem *)mmap(NULL, __workqueue_pool_size,
+ PROT_READ|PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE,
+ 0,
+ 0);
+
+ if (__workqueue_pool_ptr == MAP_FAILED) {
+ /* Not expected to fail, if it does, always malloc for work items */
+ __workqueue_nitems = WORKITEM_POOL_SIZE;
+ __workqueue_pool_ptr = NULL;
+ } else
+ __workqueue_nitems = 0;
+
+ /* sets up the workitem pool */
+ grow_workitem();
+
+ /* since the size is less than a page, leaving this in malloc pool */
+ wq = (struct _pthread_workqueue *)malloc(sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE);
+ bzero(wq, (sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE));
+ for (i = 0; i < WORKQUEUE_POOL_SIZE; i++) {
+ TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, &wq[i], wq_list);
+ }
+
+ if (error = __workq_open()) {
+ TAILQ_INIT(&__pthread_workitem_pool_head);
+ TAILQ_INIT(&__pthread_workqueue_pool_head);
+ if (__workqueue_pool_ptr != NULL) {
+ munmap((void *)__workqueue_pool_ptr, __workqueue_pool_size);
+ }
+ free(wq);
+ return(ENOMEM);
+ }
+ kernel_workq_setup = 1;
+ }
+ return(0);
+}
+
+
+/* This routine is called with list lock held */
+static pthread_workitem_t
+alloc_workitem(void)
+{
+ pthread_workitem_t witem;
+
+ if (TAILQ_EMPTY(&__pthread_workitem_pool_head)) {
+ /* the chunk size is set so some multiple of it is pool size */
+ if (__workqueue_nitems < WORKITEM_POOL_SIZE) {
+ grow_workitem();
+ } else {
+ workqueue_list_unlock();
+ witem = malloc(sizeof(struct _pthread_workitem));
+ workqueue_list_lock();
+ witem->fromcache = 0;
+ goto out;
+ }
+ }
+ witem = TAILQ_FIRST(&__pthread_workitem_pool_head);
+ TAILQ_REMOVE(&__pthread_workitem_pool_head, witem, item_entry);
+ witem->fromcache = 1;
+out:
+ witem->flags = 0;
+ witem->item_entry.tqe_next = 0;
+ witem->item_entry.tqe_prev = 0;
+ user_workitem_count++;
+ return(witem);
+}
+
+/* This routine is called with list lock held */
+static void
+free_workitem(pthread_workitem_t witem)
+{
+ user_workitem_count--;
+ witem->flags = 0;
+ if (witem->fromcache != 0)
+ TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, witem, item_entry);
+ else
+ free(witem);
+}
+
+static void
+grow_workitem(void)
+{
+ pthread_workitem_t witemp;
+ int i;
+
+ witemp = &__workqueue_pool_ptr[__workqueue_nitems];
+ bzero(witemp, (sizeof(struct _pthread_workitem) * WORKITEM_CHUNK_SIZE));
+ for (i = 0; i < WORKITEM_CHUNK_SIZE; i++) {
+ witemp[i].fromcache = 1;
+ TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, &witemp[i], item_entry);
+ }
+ __workqueue_nitems += WORKITEM_CHUNK_SIZE;
+}
+
+/* This routine is called with list lock held */
+static pthread_workqueue_t
+alloc_workqueue(void)
+{
+ pthread_workqueue_t wq;
+
+ if (TAILQ_EMPTY(&__pthread_workqueue_pool_head)) {
+ workqueue_list_unlock();
+ wq = malloc(sizeof(struct _pthread_workqueue));
+ workqueue_list_lock();
+ } else {
+ wq = TAILQ_FIRST(&__pthread_workqueue_pool_head);
+ TAILQ_REMOVE(&__pthread_workqueue_pool_head, wq, wq_list);
+ }
+ user_workq_count++;
+ return(wq);
+}
+
+/* This routine is called with list lock held */
+static void
+free_workqueue(pthread_workqueue_t wq)
+{
+ user_workq_count--;
+ TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, wq, wq_list);
+}
+
+static void
+_pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * attr)
+{
+ bzero(wq, sizeof(struct _pthread_workqueue));
+ if (attr != NULL) {
+ wq->queueprio = attr->queueprio;
+ wq->overcommit = attr->overcommit;
+ } else {
+ wq->queueprio = WORKQ_DEFAULT_PRIOQUEUE;
+ wq->overcommit = 0;
+ }
+ LOCK_INIT(wq->lock);
+ wq->flags = 0;
+ TAILQ_INIT(&wq->item_listhead);
+ TAILQ_INIT(&wq->item_kernhead);
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080ac, wq, &wq->item_listhead, wq->item_listhead.tqh_first, wq->item_listhead.tqh_last, 0);
+#endif
+ wq->wq_list.tqe_next = 0;
+ wq->wq_list.tqe_prev = 0;
+ wq->sig = PTHREAD_WORKQUEUE_SIG;
+ wq->headp = __pthread_wq_head_tbl[wq->queueprio];
+}
+
+int
+valid_workq(pthread_workqueue_t workq)
+{
+ if (workq->sig == PTHREAD_WORKQUEUE_SIG)
+ return(1);
+ else
+ return(0);
+}
+
+
+/* called with list lock */
+static void
+pick_nextworkqueue_droplock()
+{
+ int i, curwqprio, val, found;
+ pthread_workqueue_head_t headp;
+ pthread_workqueue_t workq;
+ pthread_workqueue_t nworkq = NULL;
+
+#if WQ_TRACE
+ __kdebug_trace(0x9008098, kernel_workq_count, 0, 0, 0, 0);
+#endif
+loop:
+ while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) {
+ found = 0;
+ for (i = 0; i < WORKQ_NUM_PRIOQUEUE; i++) {
+ wqreadyprio = i; /* because there is nothing else higher to run */
+ headp = __pthread_wq_head_tbl[i];
+
+ if (TAILQ_EMPTY(&headp->wqhead))
+ continue;
+ workq = headp->next_workq;
+ if (workq == NULL)
+ workq = TAILQ_FIRST(&headp->wqhead);
+ curwqprio = workq->queueprio;
+ nworkq = workq; /* starting pt */
+ while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) {
+ headp->next_workq = TAILQ_NEXT(workq, wq_list);
+ if (headp->next_workq == NULL)
+ headp->next_workq = TAILQ_FIRST(&headp->wqhead);
+#if WQ_TRACE
+ __kdebug_trace(0x9008098, kernel_workq_count, workq, 0, 1, 0);
+#endif
+ val = post_nextworkitem(workq);
+
+ if (val != 0) {
+ /* things could have changed so reasses */
+ /* If kernel queue is full , skip */
+ if (kernel_workq_count >= KERNEL_WORKQ_ELEM_MAX)
+ break;
+ /* If anything with higher prio arrived, then reevaluate */
+ if (wqreadyprio < curwqprio)
+ goto loop; /* we need re evaluate again */
+ /* we can post some more work items */
+ found = 1;
+ }
+
+ /* cannot use workq here as it could be freed */
+ if (TAILQ_EMPTY(&headp->wqhead))
+ break;
+ /* if we found nothing to run and only one workqueue in the list, skip */
+ if ((val == 0) && (workq == headp->next_workq))
+ break;
+ workq = headp->next_workq;
+ if (workq == NULL)
+ workq = TAILQ_FIRST(&headp->wqhead);
+ if (val != 0)
+ nworkq = workq;
+ /* if we found nothing to run and back to workq where we started */
+ if ((val == 0) && (workq == nworkq))
+ break;
+ }
+ if (kernel_workq_count >= KERNEL_WORKQ_ELEM_MAX)
+ break;
+ }
+ /* nothing found to run? */
+ if (found == 0)
+ break;
+ }
+ workqueue_list_unlock();
+}
+
+static int
+post_nextworkitem(pthread_workqueue_t workq)
+{
+ int error, prio;
+ pthread_workitem_t witem;
+ pthread_workqueue_head_t headp;
+ void (*func)(pthread_workqueue_t, void *);
+
+ if ((workq->flags & PTHREAD_WORKQ_SUSPEND) == PTHREAD_WORKQ_SUSPEND) {
+ return(0);
+ }
+#if WQ_TRACE
+ __kdebug_trace(0x900809c, workq, workq->item_listhead.tqh_first, 0, 1, 0);
+#endif
+ if (TAILQ_EMPTY(&workq->item_listhead)) {
+ return(0);
+ }
+ if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON)
+ return(0);
+
+ witem = TAILQ_FIRST(&workq->item_listhead);
+ headp = workq->headp;
+#if WQ_TRACE
+ __kdebug_trace(0x900809c, workq, witem, 0, 0xee, 0);
+#endif
+ if ((witem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) {
+#if WQ_TRACE
+ __kdebug_trace(0x9000064, workq, 0, 0, 2, 0);
+#endif
+
+ if ((witem->flags & PTH_WQITEM_APPLIED) != 0) {
+ return(0);
+ }
+ /* Also barrier when nothing is there needs to be handled */
+ /* Nothing to wait for */
+ if (workq->kq_count != 0) {
+ witem->flags |= PTH_WQITEM_APPLIED;
+ workq->flags |= PTHREAD_WORKQ_BARRIER_ON;
+ workq->barrier_count = workq->kq_count;
+#if WQ_TRACE
+ __kdebug_trace(0x9000064, 1, workq->barrier_count, 0, 0, 0);
+#endif
+ return(1);
+ } else {
+#if WQ_TRACE
+ __kdebug_trace(0x9000064, 2, workq->barrier_count, 0, 0, 0);
+#endif
+ if (witem->func != NULL) {
+ /* since we are going to drop list lock */
+ witem->flags |= PTH_WQITEM_APPLIED;
+ workq->flags |= PTHREAD_WORKQ_BARRIER_ON;
+ workqueue_list_unlock();
+ func = (void (*)(pthread_workqueue_t, void *))witem->func;
+ (*func)(workq, witem->func_arg);
+#if WQ_TRACE
+ __kdebug_trace(0x9000064, 3, workq->barrier_count, 0, 0, 0);
+#endif
+ workqueue_list_lock();
+ workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
+ }
+ TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
+#endif
+ free_workitem(witem);
+#if WQ_TRACE
+ __kdebug_trace(0x9000064, 4, workq->barrier_count, 0, 0, 0);
+#endif
+ return(1);
+ }
+ } else if ((witem->flags & PTH_WQITEM_DESTROY) == PTH_WQITEM_DESTROY) {
+#if WQ_TRACE
+ __kdebug_trace(0x9000068, 1, workq->kq_count, 0, 0, 0);
+#endif
+ if ((witem->flags & PTH_WQITEM_APPLIED) != 0) {
+ return(0);
+ }
+ witem->flags |= PTH_WQITEM_APPLIED;
+ workq->flags |= (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON);
+ workq->barrier_count = workq->kq_count;
+ workq->term_callback = (void (*)(struct _pthread_workqueue *,void *))witem->func;
+ workq->term_callarg = witem->func_arg;
+ TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
+#endif
+ if ((TAILQ_EMPTY(&workq->item_listhead)) && (workq->kq_count == 0)) {
+ if (!(TAILQ_EMPTY(&workq->item_kernhead))) {
+#if WQ_TRACE
+ __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 0xff, 0);
+#endif
+ }
+ free_workitem(witem);
+ workq->flags |= PTHREAD_WORKQ_DESTROYED;
+#if WQ_TRACE
+ __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 1, 0);
+#endif
+ headp = __pthread_wq_head_tbl[workq->queueprio];
+ if (headp->next_workq == workq) {
+ headp->next_workq = TAILQ_NEXT(workq, wq_list);
+ if (headp->next_workq == NULL) {
+ headp->next_workq = TAILQ_FIRST(&headp->wqhead);
+ if (headp->next_workq == workq)
+ headp->next_workq = NULL;
+ }
+ }
+ workq->sig = 0;
+ TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
+ if (workq->term_callback != NULL) {
+ workqueue_list_unlock();
+ (*workq->term_callback)(workq, workq->term_callarg);
+ workqueue_list_lock();
+ }
+ free_workqueue(workq);
+ return(1);
+ } else {
+ TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry);
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080b0, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
+#endif
+ }
+#if WQ_TRACE
+ __kdebug_trace(0x9000068, 2, workq->barrier_count, 0, 0, 0);
+#endif
+ return(1);
+ } else {
+#if WQ_TRACE
+ __kdebug_trace(0x9000060, witem, workq, witem->func_arg, 0xfff, 0);
+#endif
+ TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
+#endif
+ TAILQ_INSERT_TAIL(&workq->item_kernhead, witem, item_entry);
+ if ((witem->flags & PTH_WQITEM_KERN_COUNT) == 0) {
+ workq->kq_count++;
+ witem->flags |= PTH_WQITEM_KERN_COUNT;
+ }
+ OSAtomicIncrement32Barrier(&kernel_workq_count);
+ workqueue_list_unlock();
+
+ prio = workq->queueprio;
+ if (workq->overcommit != 0) {
+ prio |= WORKQUEUE_OVERCOMMIT;
+ }
+
+ if (( error =__workq_kernreturn(WQOPS_QUEUE_ADD, witem, workq->affinity, prio)) == -1) {
+ OSAtomicDecrement32Barrier(&kernel_workq_count);
+ workqueue_list_lock();
+#if WQ_TRACE
+ __kdebug_trace(0x900007c, witem, workq, witem->func_arg, workq->kq_count, 0);
+#endif
+ TAILQ_REMOVE(&workq->item_kernhead, witem, item_entry);
+ TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry);
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080b0, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
+#endif
+ if ((workq->flags & (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON)) != 0)
+ workq->flags |= PTHREAD_WORKQ_REQUEUED;
+ } else
+ workqueue_list_lock();
+#if WQ_TRACE
+ __kdebug_trace(0x9000060, witem, workq, witem->func_arg, workq->kq_count, 0);
+#endif
+ return(1);
+ }
+ /* noone should come here */
+#if 1
+ printf("error in logic for next workitem\n");
+ LIBC_ABORT("error in logic for next workitem");
+#endif
+ return(0);
+}
+
+void
+_pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse)
+{
+ int ret;
+ pthread_attr_t *attrs = &_pthread_attr_default;
+ pthread_workqueue_t workq;
+#if WQ_DEBUG
+ pthread_t pself;
+#endif
+ int thread_reuse = 0;
+ int thread_priority = 0;
+ int thread_newspi = 0;
+ int thread_options = 0;
+
+ if (reuse & WQ_FLAG_THREAD_NEWSPI) {
+ thread_reuse = reuse & WQ_FLAG_THREAD_REUSE;
+ if ((reuse & WQ_FLAG_THREAD_OVERCOMMIT) != 0)
+ thread_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT;
+ thread_priority = reuse & WQ_FLAG_THREAD_PRIOMASK;
+ thread_newspi = 1;
+ workq = NULL;
+ } else {
+ thread_reuse = (reuse == 0)? 0: WQ_FLAG_THREAD_REUSE;
+ workq = item->workq;
+ }
+
+
+ if (thread_reuse == 0) {
+ /* reuse is set to 0, when a thread is newly created to run a workitem */
+ _pthread_struct_init(self, attrs, stackaddr, DEFAULT_STACK_SIZE, 1, 1);
+ self->wqthread = 1;
+ self->wqkillset = 0;
+ self->parentcheck = 1;
+
+ /* These are not joinable threads */
+ self->detached &= ~PTHREAD_CREATE_JOINABLE;
+ self->detached |= PTHREAD_CREATE_DETACHED;
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+ _pthread_set_self(self);
+#endif
+#if WQ_TRACE
+ __kdebug_trace(0x9000050, self, item, item->func_arg, 0, 0);
+#endif
+ self->kernel_thread = kport;
+ if (thread_newspi != 0) {
+ self->fun = (void *(*)(void *))__libdispatch_workerfunction;
+ self->arg = thread_priority;
+ } else {
+ self->fun = (void *(*)(void *))item->func;
+ self->arg = item->func_arg;
+ }
+ /* Add to the pthread list */
+ LOCK(_pthread_list_lock);
+ TAILQ_INSERT_TAIL(&__pthread_head, self, plist);
+#if PTH_LISTTRACE
+ __kdebug_trace(0x900000c, self, 0, 0, 10, 0);
+#endif
+ _pthread_count++;
+ UNLOCK(_pthread_list_lock);
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+ if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1)
+ printf("Failed to set thread_id in pthread_wqthread\n");
+#endif
+
+ } else {
+ /* reuse is set to 1, when a thread is resued to run another work item */
+#if WQ_TRACE
+ __kdebug_trace(0x9000054, self, item, item->func_arg, 0, 0);
+#endif
+ /* reset all tsd from 1 to KEYS_MAX */
+ if (self == NULL)
+ LIBC_ABORT("_pthread_wqthread: pthread %p setup to be NULL", self);
+
+ if (thread_newspi != 0) {
+ self->fun = (void *(*)(void *))__libdispatch_workerfunction;
+ self->arg = NULL;
+ } else {
+ self->fun = (void *(*)(void *))item->func;
+ self->arg = item->func_arg;
+ }
+ }
+
+#if WQ_DEBUG
+ if (reuse == 0) {
+ pself = pthread_self();
+ if (self != pself) {
+#if WQ_TRACE
+ __kdebug_trace(0x9000078, self, pself, item->func_arg, 0, 0);
+#endif
+ printf("pthread_self not set: pself %p, passed in %p\n", pself, self);
+ _pthread_set_self(self);
+ pself = pthread_self();
+ if (self != pself)
+ printf("(2)pthread_self not set: pself %p, passed in %p\n", pself, self);
+ pself = self;
+ }
+ } else {
+ pself = pthread_self();
+ if (self != pself) {
+ printf("(3)pthread_self not set in reuse: pself %p, passed in %p\n", pself, self);
+ LIBC_ABORT("(3)pthread_self not set in reuse: pself %p, passed in %p", pself, self);
+ }
+ }
+#endif /* WQ_DEBUG */
+
+ if (thread_newspi != 0) {
+ (*__libdispatch_workerfunction)(thread_priority, thread_options, NULL);
+ _pthread_workq_return(self);
+ } else {
+ self->cur_workq = workq;
+ self->cur_workitem = item;
+ OSAtomicDecrement32Barrier(&kernel_workq_count);
+
+ ret = (int)(intptr_t)(*self->fun)(self->arg);
+ /* If we reach here without going through the above initialization path then don't go through
+ * with the teardown code path ( e.g. setjmp/longjmp ). Instead just exit this thread.
+ */
+ if (self != pthread_self()) {
+ pthread_exit(PTHREAD_CANCELED);
+ }
+
+ workqueue_exit(self, workq, item);
+ }
+}
+
+static void
+workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item)
+{
+ pthread_workitem_t baritem;
+ pthread_workqueue_head_t headp;
+ void (*func)(pthread_workqueue_t, void *);
+
+ workqueue_list_lock();
+
+ TAILQ_REMOVE(&workq->item_kernhead, item, item_entry);
+ workq->kq_count--;
+#if WQ_TRACE
+ __kdebug_trace(0x9000070, self, 1, item->func_arg, workq->kq_count, 0);
+#endif
+ free_workitem(item);
+
+ if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON) {
+ workq->barrier_count--;
+#if WQ_TRACE
+ __kdebug_trace(0x9000084, self, workq->barrier_count, workq->kq_count, 1, 0);
+#endif
+ if (workq->barrier_count <= 0 ) {
+ /* Need to remove barrier item from the list */
+ baritem = TAILQ_FIRST(&workq->item_listhead);
+#if WQ_DEBUG
+ if ((baritem->flags & (PTH_WQITEM_BARRIER | PTH_WQITEM_DESTROY| PTH_WQITEM_APPLIED)) == 0)
+ printf("Incorect bar item being removed in barrier processing\n");
+#endif /* WQ_DEBUG */
+ /* if the front item is a barrier and call back is registered, run that */
+ if (((baritem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) && (baritem->func != NULL)) {
+ workqueue_list_unlock();
+ func = (void (*)(pthread_workqueue_t, void *))baritem->func;
+ (*func)(workq, baritem->func_arg);
+ workqueue_list_lock();
+ }
+ TAILQ_REMOVE(&workq->item_listhead, baritem, item_entry);
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
+#endif
+ free_workitem(baritem);
+ workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
+#if WQ_TRACE
+ __kdebug_trace(0x9000058, self, item, item->func_arg, 0, 0);
+#endif
+ if ((workq->flags & PTHREAD_WORKQ_TERM_ON) != 0) {
+ headp = __pthread_wq_head_tbl[workq->queueprio];
+ workq->flags |= PTHREAD_WORKQ_DESTROYED;
+#if WQ_TRACE
+ __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 2, 0);
+#endif
+ if (headp->next_workq == workq) {
+ headp->next_workq = TAILQ_NEXT(workq, wq_list);
+ if (headp->next_workq == NULL) {
+ headp->next_workq = TAILQ_FIRST(&headp->wqhead);
+ if (headp->next_workq == workq)
+ headp->next_workq = NULL;
+ }
+ }
+ TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
+ workq->sig = 0;
+ if (workq->term_callback != NULL) {
+ workqueue_list_unlock();
+ (*workq->term_callback)(workq, workq->term_callarg);
+ workqueue_list_lock();
+ }
+ free_workqueue(workq);
+ } else {
+ /* if there are higher prio schedulabel item reset to wqreadyprio */
+ if ((workq->queueprio < wqreadyprio) && (!(TAILQ_EMPTY(&workq->item_listhead))))
+ wqreadyprio = workq->queueprio;
+ }
+ }
+ }
+#if WQ_TRACE
+ else {
+ __kdebug_trace(0x9000070, self, 2, item->func_arg, workq->barrier_count, 0);
+ }
+
+ __kdebug_trace(0x900005c, self, item, 0, 0, 0);
+#endif
+ pick_nextworkqueue_droplock();
+ _pthread_workq_return(self);
+}
+
+static void
+_pthread_workq_return(pthread_t self)
+{
+ __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0);
+
+ /* This is the way to terminate the thread */
+ _pthread_exit(self, NULL);
+}
+
+
+/* XXXXXXXXXXXXX Pthread Workqueue functions XXXXXXXXXXXXXXXXXX */
+
+int
+pthread_workqueue_setdispatch_np(void (*worker_func)(int, int, void *))
+{
+ int error = 0;
+
+ if (__workqueue_oldspis != 0)
+ return(EPERM);
+
+ __workqueue_newspis = 1;
+
+ if (__libdispatch_workerfunction == NULL) {
+ __libdispatch_workerfunction = worker_func;
+ /* check whether the kernel supports new SPIs */
+ error = __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP, NULL, 0, 0);
+ if (error == -1){
+ __libdispatch_workerfunction = NULL;
+ error = ENOTSUP;
+ __workqueue_newspis = 0;
+ } else {
+ /* prepare the kernel for workq action */
+ (void)__workq_open();
+ kernel_workq_setup = 1;
+ if (__is_threaded == 0)
+ __is_threaded = 1;
+ __workqueue_newspis = 1;
+ }
+ } else {
+ error = EBUSY;
+ }
+
+ return(error);
+}
+
+int
+pthread_workqueue_addthreads_np(int queue_priority, int options, int numthreads)
+{
+ int priority = queue_priority & WQ_FLAG_THREAD_PRIOMASK;
+ int error = 0;
+
+ /* new spi not inited yet?? */
+ if (__workqueue_newspis == 0)
+ return(EPERM);
+
+
+ if ((options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT) != 0)
+ priority |= WORKQUEUE_OVERCOMMIT;
+
+ error = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, priority);
+
+ if (error == -1)
+ return(errno);
+ else
+ return(0);
+}
+
+int
+pthread_workqueue_create_np(pthread_workqueue_t * workqp, const pthread_workqueue_attr_t * attr)
+{
+ pthread_workqueue_t wq;
+ pthread_workqueue_head_t headp;
+
+#if defined(__ppc__)
+ IF_ROSETTA() {
+ return(ENOTSUP);
+ }
+#endif
+ if (__workqueue_newspis != 0)
+ return(EPERM);
+
+ if (__workqueue_oldspis == 0)
+ __workqueue_oldspis = 1;
+
+ if ((attr != NULL) && (attr->sig != PTHREAD_WORKQUEUE_ATTR_SIG)) {
+ return(EINVAL);
+ }
+
+ if (__is_threaded == 0)
+ __is_threaded = 1;
+
+ workqueue_list_lock();
+ if (kernel_workq_setup == 0) {
+ int ret = _pthread_work_internal_init();
+ if (ret != 0) {
+ workqueue_list_unlock();
+ return(ret);
+ }
+ }
+
+ wq = alloc_workqueue();
+
+ _pthread_workq_init(wq, attr);
+
+ headp = __pthread_wq_head_tbl[wq->queueprio];
+ TAILQ_INSERT_TAIL(&headp->wqhead, wq, wq_list);
+ if (headp->next_workq == NULL) {
+ headp->next_workq = TAILQ_FIRST(&headp->wqhead);
+ }
+
+ workqueue_list_unlock();
+
+ *workqp = wq;
+
+ return(0);
+}
+
+int
+pthread_workqueue_additem_np(pthread_workqueue_t workq, void ( *workitem_func)(void *), void * workitem_arg, pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp)
+{
+ pthread_workitem_t witem;
+
+ if (__workqueue_newspis != 0)
+ return(EPERM);
+
+ if (valid_workq(workq) == 0) {
+ return(EINVAL);
+ }
+
+ workqueue_list_lock();
+
+ /*
+ * Allocate the workitem here as it can drop the lock.
+ * Also we can evaluate the workqueue state only once.
+ */
+ witem = alloc_workitem();
+ witem->func = workitem_func;
+ witem->func_arg = workitem_arg;
+ witem->workq = workq;
+
+ /* alloc workitem can drop the lock, check the state */
+ if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
+ free_workitem(witem);
+ workqueue_list_unlock();
+ *itemhandlep = 0;
+ return(ESRCH);
+ }
+
+ if (itemhandlep != NULL)
+ *itemhandlep = (pthread_workitem_handle_t *)witem;
+ if (gencountp != NULL)
+ *gencountp = 0;
+#if WQ_TRACE
+ __kdebug_trace(0x9008090, witem, witem->func, witem->func_arg, workq, 0);
+#endif
+ TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry);
+#if WQ_LISTTRACE
+ __kdebug_trace(0x90080a4, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
+#endif
+
+ if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio))
+ wqreadyprio = workq->queueprio;
+
+ pick_nextworkqueue_droplock();
+
+ return(0);
+}
+
+int
+pthread_workqueue_getovercommit_np(pthread_workqueue_t workq, unsigned int *ocommp)
+{
+ if (__workqueue_newspis != 0)
+ return(EPERM);
+
+ if (valid_workq(workq) == 0) {
+ return(EINVAL);
+ }
+
+ if (ocommp != NULL)
+ *ocommp = workq->overcommit;
+ return(0);
+}
+
+
+#else /* !BUILDING_VARIANT ] [ */
+extern int __unix_conforming;
+extern int _pthread_count;
+extern int __workqueue_newspis;
+extern int __workqueue_oldspis;
+
+extern pthread_lock_t _pthread_list_lock;
+extern void _pthread_testcancel(pthread_t thread, int isconforming);
+extern int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr, int conforming);
+
+#endif /* !BUILDING_VARIANT ] */
+
+#if __DARWIN_UNIX03
+
+__private_extern__ void
+__posix_join_cleanup(void *arg)
+{
+ pthread_t thread = (pthread_t)arg;
+ int already_exited, res;
+ void * dummy;
+ semaphore_t death;
+ int newstyle;
+
+ LOCK(thread->lock);
+ already_exited = (thread->detached & _PTHREAD_EXITED);
+
+ newstyle = thread->newstyle;
+
+#if WQ_TRACE
+ __kdebug_trace(0x900002c, thread, newstyle, 0, 0, 0);
+#endif
+ if (newstyle == 0) {
+ death = thread->death;
+ if (!already_exited){
+ thread->joiner = (struct _pthread *)NULL;
+ UNLOCK(thread->lock);
+ restore_sem_to_pool(death);
+ } else {
+ UNLOCK(thread->lock);
+ while ((res = _pthread_reap_thread(thread,
+ thread->kernel_thread,
+ &dummy, 1)) == EAGAIN)
+ {
+ sched_yield();
+ }
+ restore_sem_to_pool(death);
+
+ }
+
+ } else {
+ /* leave another thread to join */
+ thread->joiner = (struct _pthread *)NULL;
+ UNLOCK(thread->lock);
+ }
+}
+
+#endif /* __DARWIN_UNIX03 */
+
+
+/*
+ * Wait for a thread to terminate and obtain its exit value.
+ */
+/*
+int
+pthread_join(pthread_t thread,
+ void **value_ptr)
+
+moved to pthread_cancelable.c */
+
+/*
+ * Cancel a thread
+ */
+int
+pthread_cancel(pthread_t thread)
+{
+#if __DARWIN_UNIX03
+ if (__unix_conforming == 0)
+ __unix_conforming = 1;
+#endif /* __DARWIN_UNIX03 */
+
+ if (_pthread_lookup_thread(thread, NULL, 0) != 0)
+ return(ESRCH);
+
+ /* if the thread is a workqueue thread, then return error */
+ if (thread->wqthread != 0) {
+ return(ENOTSUP);
+ }
+#if __DARWIN_UNIX03
+ int state;
+
+ LOCK(thread->lock);
+ state = thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
+ UNLOCK(thread->lock);
+ if (state & PTHREAD_CANCEL_ENABLE)
+ __pthread_markcancel(thread->kernel_thread);
+#else /* __DARWIN_UNIX03 */
+ thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
+#endif /* __DARWIN_UNIX03 */
+ return (0);
+}
+
+void
+pthread_testcancel(void)
+{
+ pthread_t self = pthread_self();
+
+#if __DARWIN_UNIX03
+ if (__unix_conforming == 0)
+ __unix_conforming = 1;
+ _pthread_testcancel(self, 1);
+#else /* __DARWIN_UNIX03 */
+ _pthread_testcancel(self, 0);
+#endif /* __DARWIN_UNIX03 */
+
+}
+
+
+/*
+ * Query/update the cancelability 'state' of a thread
+ */
+int
+pthread_setcancelstate(int state, int *oldstate)
+{
+#if __DARWIN_UNIX03
+ if (__unix_conforming == 0) {
+ __unix_conforming = 1;
+ }
+ return (_pthread_setcancelstate_internal(state, oldstate, 1));
+#else /* __DARWIN_UNIX03 */
+ return (_pthread_setcancelstate_internal(state, oldstate, 0));
+#endif /* __DARWIN_UNIX03 */
+
+}
+
+
+
+/*
+ * Query/update the cancelability 'type' of a thread
+ */
+int
+pthread_setcanceltype(int type, int *oldtype)
+{
+ pthread_t self = pthread_self();
+
+#if __DARWIN_UNIX03
+ if (__unix_conforming == 0)
+ __unix_conforming = 1;
+#endif /* __DARWIN_UNIX03 */
+
+ if ((type != PTHREAD_CANCEL_DEFERRED) &&
+ (type != PTHREAD_CANCEL_ASYNCHRONOUS))
+ return EINVAL;
+ self = pthread_self();
+ LOCK(self->lock);
+ if (oldtype)
+ *oldtype = self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK;
+ self->cancel_state &= ~_PTHREAD_CANCEL_TYPE_MASK;
+ self->cancel_state |= type;
+ UNLOCK(self->lock);
+#if !__DARWIN_UNIX03
+ _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
+#endif /* __DARWIN_UNIX03 */
+ return (0);
+}
+
+int
+pthread_sigmask(int how, const sigset_t * set, sigset_t * oset)
+{
+#if __DARWIN_UNIX03
+ int err = 0;
+
+ if (__pthread_sigmask(how, set, oset) == -1) {
+ err = errno;
+ }
+ return(err);
+#else /* __DARWIN_UNIX03 */
+ return(__pthread_sigmask(how, set, oset));
+#endif /* __DARWIN_UNIX03 */
+}
+
+/*
+int
+sigwait(const sigset_t * set, int * sig)
+
+moved to pthread_cancelable.c */