+ frag6_freef(ip6q.ip6q_next, &dfq6, &diq6);
+ }
+ lck_mtx_unlock(&ip6qlock);
+
+ /* free fragments that need to be freed */
+ if (!MBUFQ_EMPTY(&dfq6))
+ MBUFQ_DRAIN(&dfq6);
+
+ frag6_icmp6_timeex_error(&diq6);
+
+ VERIFY(MBUFQ_EMPTY(&dfq6));
+ VERIFY(MBUFQ_EMPTY(&diq6));
+}
+
+static struct ip6q *
+ip6q_alloc(int how)
+{
+ struct mbuf *t;
+ struct ip6q *q6;
+
+ /*
+ * See comments in ip6q_updateparams(). Keep the count separate
+ * from frag6_nfragpackets since the latter represents the elements
+ * already in the reassembly queues.
+ */
+ if (ip6q_limit > 0 && ip6q_count > ip6q_limit)
+ return (NULL);
+
+ t = m_get(how, MT_FTABLE);
+ if (t != NULL) {
+ atomic_add_32(&ip6q_count, 1);
+ q6 = mtod(t, struct ip6q *);
+ bzero(q6, sizeof (*q6));
+ } else {
+ q6 = NULL;
+ }
+ return (q6);
+}
+
+static void
+ip6q_free(struct ip6q *q6)
+{
+ (void) m_free(dtom(q6));
+ atomic_add_32(&ip6q_count, -1);
+}
+
+static struct ip6asfrag *
+ip6af_alloc(int how)
+{
+ struct mbuf *t;
+ struct ip6asfrag *af6;
+
+ /*
+ * See comments in ip6q_updateparams(). Keep the count separate
+ * from frag6_nfrags since the latter represents the elements
+ * already in the reassembly queues.
+ */
+ if (ip6af_limit > 0 && ip6af_count > ip6af_limit)
+ return (NULL);
+
+ t = m_get(how, MT_FTABLE);
+ if (t != NULL) {
+ atomic_add_32(&ip6af_count, 1);
+ af6 = mtod(t, struct ip6asfrag *);
+ bzero(af6, sizeof (*af6));
+ } else {
+ af6 = NULL;
+ }
+ return (af6);
+}
+
+static void
+ip6af_free(struct ip6asfrag *af6)
+{
+ (void) m_free(dtom(af6));
+ atomic_add_32(&ip6af_count, -1);
+}
+
+static void
+ip6q_updateparams(void)
+{
+ lck_mtx_assert(&ip6qlock, LCK_MTX_ASSERT_OWNED);
+ /*
+ * -1 for unlimited allocation.
+ */
+ if (ip6_maxfragpackets < 0)
+ ip6q_limit = 0;
+ if (ip6_maxfrags < 0)
+ ip6af_limit = 0;
+ /*
+ * Positive number for specific bound.
+ */
+ if (ip6_maxfragpackets > 0)
+ ip6q_limit = ip6_maxfragpackets;
+ if (ip6_maxfrags > 0)
+ ip6af_limit = ip6_maxfrags;
+ /*
+ * Zero specifies no further fragment queue allocation -- set the
+ * bound very low, but rely on implementation elsewhere to actually
+ * prevent allocation and reclaim current queues.
+ */
+ if (ip6_maxfragpackets == 0)
+ ip6q_limit = 1;
+ if (ip6_maxfrags == 0)
+ ip6af_limit = 1;
+ /*
+ * Arm the purge timer if not already and if there's work to do
+ */
+ frag6_sched_timeout();
+}
+
+static int
+sysctl_maxfragpackets SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error, i;
+
+ lck_mtx_lock(&ip6qlock);
+ i = ip6_maxfragpackets;
+ error = sysctl_handle_int(oidp, &i, 0, req);
+ if (error || req->newptr == USER_ADDR_NULL)
+ goto done;
+ /* impose bounds */
+ if (i < -1 || i > (nmbclusters / 4)) {
+ error = EINVAL;
+ goto done;
+ }
+ ip6_maxfragpackets = i;
+ ip6q_updateparams();
+done:
+ lck_mtx_unlock(&ip6qlock);
+ return (error);
+}
+
+static int
+sysctl_maxfrags SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error, i;
+
+ lck_mtx_lock(&ip6qlock);
+ i = ip6_maxfrags;
+ error = sysctl_handle_int(oidp, &i, 0, req);
+ if (error || req->newptr == USER_ADDR_NULL)
+ goto done;
+ /* impose bounds */
+ if (i < -1 || i > (nmbclusters / 4)) {
+ error = EINVAL;
+ goto done;