zone_t semaphore_zone;
unsigned int semaphore_max;
+os_refgrp_decl(static, sema_refgrp, "semaphore", NULL);
+
/* Forward declarations */
* Initialize the semaphore values.
*/
s->port = IP_NULL;
- s->ref_count = 1;
+ os_ref_init(&s->ref_count, &sema_refgrp);
s->count = value;
s->active = TRUE;
s->owner = task;
if (semaphore->owner != task) {
semaphore_unlock(semaphore);
+ semaphore_dereference(semaphore);
splx(spl_level);
task_unlock(task);
return KERN_INVALID_ARGUMENT;
}
-
+
semaphore_destroy_internal(task, semaphore);
/* semaphore unlocked */
thread_t self = current_thread();
wait_semaphore->count = -1; /* we don't keep an actual count */
- thread_lock(self);
+
+ thread_set_pending_block_hint(self, kThreadWaitSemaphore);
(void)waitq_assert_wait64_locked(
&wait_semaphore->waitq,
SEMAPHORE_EVENT,
TIMEOUT_URGENCY_USER_NORMAL,
deadline, TIMEOUT_NO_LEEWAY,
self);
- thread_unlock(self);
}
semaphore_unlock(wait_semaphore);
splx(spl_level);
semaphore_reference(
semaphore_t semaphore)
{
- (void)hw_atomic_add(&semaphore->ref_count, 1);
+ os_ref_retain(&semaphore->ref_count);
}
/*
if (semaphore == NULL)
return;
- if (hw_atomic_sub(&semaphore->ref_count, 1) != 0)
+ if (os_ref_release(&semaphore->ref_count) > 0) {
return;
+ }
/*
* Last ref, clean up the port [if any]
zfree(semaphore_zone, semaphore);
}
+#define WAITQ_TO_SEMA(wq) ((semaphore_t) ((uintptr_t)(wq) - offsetof(struct semaphore, waitq)))
+void
+kdp_sema_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
+{
+ semaphore_t sem = WAITQ_TO_SEMA(waitq);
+ assert(event == SEMAPHORE_EVENT);
+ assert(kdp_is_in_zone(sem, "semaphores"));
+ waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port);
+ if (sem->owner)
+ waitinfo->owner = pid_from_task(sem->owner);
+}