+__private_extern__ int
+dlil_create_input_thread(
+ ifnet_t ifp, struct dlil_threading_info *inputthread)
+{
+ int error;
+
+ bzero(inputthread, sizeof(*inputthread));
+ // loopback ifp may not be configured at dlil_init time.
+ if (ifp == lo_ifp)
+ strlcat(inputthread->input_name, "dlil_input_main_thread_mtx", 32);
+ else
+ snprintf(inputthread->input_name, 32, "dlil_input_%s%d_mtx", ifp->if_name, ifp->if_unit);
+
+ inputthread->lck_grp = lck_grp_alloc_init(inputthread->input_name, dlil_grp_attributes);
+ inputthread->input_lck = lck_mtx_alloc_init(inputthread->lck_grp, dlil_lck_attributes);
+
+ error= kernel_thread_start((thread_continue_t)dlil_input_thread_func, inputthread, &inputthread->input_thread);
+ if (error == 0) {
+ ml_thread_policy(inputthread->input_thread, MACHINE_GROUP,
+ (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
+ /*
+ * Except for the loopback dlil input thread, we create
+ * an affinity set so that the matching workloop thread
+ * can be scheduled on the same processor set.
+ */
+ if (net_affinity && inputthread != dlil_lo_thread_ptr) {
+ struct thread *tp = inputthread->input_thread;
+ u_int32_t tag;
+ /*
+ * Randomize to reduce the probability
+ * of affinity tag namespace collision.
+ */
+ read_random(&tag, sizeof (tag));
+ if (dlil_affinity_set(tp, tag) == KERN_SUCCESS) {
+ thread_reference(tp);
+ inputthread->tag = tag;
+ inputthread->net_affinity = TRUE;
+ }
+ }
+ } else {
+ panic("dlil_create_input_thread: couldn't create thread\n");
+ }
+ OSAddAtomic(1, &cur_dlil_input_threads);
+#if DLIL_DEBUG
+ printf("dlil_create_input_thread: threadinfo: %p input_thread=%p threads: cur=%d max=%d\n",
+ inputthread, inputthread->input_thread, dlil_multithreaded_input, cur_dlil_input_threads);
+#endif
+ return error;
+}
+__private_extern__ void
+dlil_terminate_input_thread(
+ struct dlil_threading_info *inputthread)
+{
+ OSAddAtomic(-1, &cur_dlil_input_threads);
+
+ lck_mtx_unlock(inputthread->input_lck);
+ lck_mtx_free(inputthread->input_lck, inputthread->lck_grp);
+ lck_grp_free(inputthread->lck_grp);
+
+ FREE(inputthread, M_NKE);
+
+ /* For the extra reference count from kernel_thread_start() */
+ thread_deallocate(current_thread());
+
+ thread_terminate(current_thread());
+}
+
+static kern_return_t
+dlil_affinity_set(struct thread *tp, u_int32_t tag)
+{
+ thread_affinity_policy_data_t policy;
+
+ bzero(&policy, sizeof (policy));
+ policy.affinity_tag = tag;
+ return (thread_policy_set(tp, THREAD_AFFINITY_POLICY,
+ (thread_policy_t)&policy, THREAD_AFFINITY_POLICY_COUNT));
+}
+