+static void
+kctl_tbl_grow()
+{
+ struct kctl **new_table;
+ uintptr_t new_size;
+
+ lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
+
+ if (kctl_tbl_growing) {
+ /* Another thread is allocating */
+ kctl_tbl_growing_waiting++;
+
+ do {
+ (void) msleep((caddr_t) &kctl_tbl_growing, ctl_mtx,
+ PSOCK | PCATCH, "kctl_tbl_growing", 0);
+ } while (kctl_tbl_growing);
+ kctl_tbl_growing_waiting--;
+ }
+ /* Another thread grew the table */
+ if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size)
+ return;
+
+ /* Verify we have a sane size */
+ if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
+ kctlstat.kcs_tbl_size_too_big++;
+ if (ctl_debug)
+ printf("%s kctl_tbl_size %lu too big\n",
+ __func__, kctl_tbl_size);
+ return;
+ }
+ kctl_tbl_growing = 1;
+
+ new_size = kctl_tbl_size + KCTL_TBL_INC;
+
+ lck_mtx_unlock(ctl_mtx);
+ new_table = _MALLOC(sizeof(struct kctl *) * new_size,
+ M_TEMP, M_WAIT | M_ZERO);
+ lck_mtx_lock(ctl_mtx);
+
+ if (new_table != NULL) {
+ if (kctl_table != NULL) {
+ bcopy(kctl_table, new_table,
+ kctl_tbl_size * sizeof(struct kctl *));
+
+ _FREE(kctl_table, M_TEMP);
+ }
+ kctl_table = new_table;
+ kctl_tbl_size = new_size;
+ }
+
+ kctl_tbl_growing = 0;
+
+ if (kctl_tbl_growing_waiting) {
+ wakeup(&kctl_tbl_growing);
+ }
+}
+
+#define KCTLREF_INDEX_MASK 0x0000FFFF
+#define KCTLREF_GENCNT_MASK 0xFFFF0000
+#define KCTLREF_GENCNT_SHIFT 16
+
+static kern_ctl_ref
+kctl_make_ref(struct kctl *kctl)
+{
+ uintptr_t i;
+
+ lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
+
+ if (kctl_tbl_count >= kctl_tbl_size)
+ kctl_tbl_grow();
+
+ kctl->kctlref = NULL;
+ for (i = 0; i < kctl_tbl_size; i++) {
+ if (kctl_table[i] == NULL) {
+ uintptr_t ref;
+
+ /*
+ * Reference is index plus one
+ */
+ kctl_ref_gencnt += 1;
+
+ /*
+ * Add generation count as salt to reference to prevent
+ * use after deregister
+ */
+ ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
+ KCTLREF_GENCNT_MASK) +
+ ((i + 1) & KCTLREF_INDEX_MASK);
+
+ kctl->kctlref = (void *)(ref);
+ kctl_table[i] = kctl;
+ kctl_tbl_count++;
+ break;
+ }
+ }
+
+ if (kctl->kctlref == NULL)
+ panic("%s no space in table", __func__);
+
+ if (ctl_debug > 0)
+ printf("%s %p for %p\n",
+ __func__, kctl->kctlref, kctl);
+
+ return (kctl->kctlref);
+}
+
+static void
+kctl_delete_ref(kern_ctl_ref kctlref)
+{
+ /*
+ * Reference is index plus one
+ */
+ uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
+
+ lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
+
+ if (i < kctl_tbl_size) {
+ struct kctl *kctl = kctl_table[i];
+
+ if (kctl->kctlref == kctlref) {
+ kctl_table[i] = NULL;
+ kctl_tbl_count--;
+ } else {
+ kctlstat.kcs_bad_kctlref++;
+ }
+ } else {
+ kctlstat.kcs_bad_kctlref++;
+ }
+}
+
+static struct kctl *
+kctl_from_ref(kern_ctl_ref kctlref)
+{
+ /*
+ * Reference is index plus one
+ */
+ uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
+ struct kctl *kctl = NULL;
+
+ lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
+
+ if (i >= kctl_tbl_size) {
+ kctlstat.kcs_bad_kctlref++;
+ return (NULL);
+ }
+ kctl = kctl_table[i];
+ if (kctl->kctlref != kctlref) {
+ kctlstat.kcs_bad_kctlref++;
+ return (NULL);
+ }
+ return (kctl);
+}
+