]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2019 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | /* $FreeBSD: src/sys/netinet6/frag6.c,v 1.2.2.5 2001/07/03 11:01:50 ume Exp $ */ | |
30 | /* $KAME: frag6.c,v 1.31 2001/05/17 13:45:34 jinmei Exp $ */ | |
31 | ||
32 | /* | |
33 | * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. | |
34 | * All rights reserved. | |
35 | * | |
36 | * Redistribution and use in source and binary forms, with or without | |
37 | * modification, are permitted provided that the following conditions | |
38 | * are met: | |
39 | * 1. Redistributions of source code must retain the above copyright | |
40 | * notice, this list of conditions and the following disclaimer. | |
41 | * 2. Redistributions in binary form must reproduce the above copyright | |
42 | * notice, this list of conditions and the following disclaimer in the | |
43 | * documentation and/or other materials provided with the distribution. | |
44 | * 3. Neither the name of the project nor the names of its contributors | |
45 | * may be used to endorse or promote products derived from this software | |
46 | * without specific prior written permission. | |
47 | * | |
48 | * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND | |
49 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
50 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
51 | * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE | |
52 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
53 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
54 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
55 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
56 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
57 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
58 | * SUCH DAMAGE. | |
59 | */ | |
60 | ||
61 | #include <sys/param.h> | |
62 | #include <sys/systm.h> | |
63 | #include <sys/malloc.h> | |
64 | #include <sys/mcache.h> | |
65 | #include <sys/mbuf.h> | |
66 | #include <sys/domain.h> | |
67 | #include <sys/protosw.h> | |
68 | #include <sys/socket.h> | |
69 | #include <sys/errno.h> | |
70 | #include <sys/time.h> | |
71 | #include <sys/kernel.h> | |
72 | #include <sys/syslog.h> | |
73 | #include <kern/queue.h> | |
74 | #include <kern/locks.h> | |
75 | ||
76 | #include <net/if.h> | |
77 | #include <net/route.h> | |
78 | ||
79 | #include <netinet/in.h> | |
80 | #include <netinet/in_var.h> | |
81 | #include <netinet/ip.h> | |
82 | #include <netinet/ip_var.h> | |
83 | #include <netinet/ip6.h> | |
84 | #include <netinet6/ip6_var.h> | |
85 | #include <netinet/icmp6.h> | |
86 | ||
87 | #include <net/net_osdep.h> | |
88 | #include <dev/random/randomdev.h> | |
89 | ||
90 | /* | |
91 | * Define it to get a correct behavior on per-interface statistics. | |
92 | */ | |
93 | #define IN6_IFSTAT_STRICT | |
94 | ||
95 | MBUFQ_HEAD(fq6_head); | |
96 | ||
97 | static void frag6_save_context(struct mbuf *, int); | |
98 | static void frag6_scrub_context(struct mbuf *); | |
99 | static int frag6_restore_context(struct mbuf *); | |
100 | ||
101 | static void frag6_icmp6_paramprob_error(struct fq6_head *); | |
102 | static void frag6_icmp6_timeex_error(struct fq6_head *); | |
103 | ||
104 | static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *); | |
105 | static void frag6_deq(struct ip6asfrag *); | |
106 | static void frag6_insque(struct ip6q *, struct ip6q *); | |
107 | static void frag6_remque(struct ip6q *); | |
108 | static void frag6_freef(struct ip6q *, struct fq6_head *, struct fq6_head *); | |
109 | ||
110 | static int frag6_timeout_run; /* frag6 timer is scheduled to run */ | |
111 | static void frag6_timeout(void *); | |
112 | static void frag6_sched_timeout(void); | |
113 | ||
114 | static struct ip6q *ip6q_alloc(int); | |
115 | static void ip6q_free(struct ip6q *); | |
116 | static void ip6q_updateparams(void); | |
117 | static struct ip6asfrag *ip6af_alloc(int); | |
118 | static void ip6af_free(struct ip6asfrag *); | |
119 | ||
120 | decl_lck_mtx_data(static, ip6qlock); | |
121 | static lck_attr_t *ip6qlock_attr; | |
122 | static lck_grp_t *ip6qlock_grp; | |
123 | static lck_grp_attr_t *ip6qlock_grp_attr; | |
124 | ||
125 | /* IPv6 fragment reassembly queues (protected by ip6qlock) */ | |
126 | static struct ip6q ip6q; /* ip6 reassembly queues */ | |
127 | static int ip6_maxfragpackets; /* max packets in reass queues */ | |
128 | static u_int32_t frag6_nfragpackets; /* # of packets in reass queues */ | |
129 | static int ip6_maxfrags; /* max fragments in reass queues */ | |
130 | static u_int32_t frag6_nfrags; /* # of fragments in reass queues */ | |
131 | static u_int32_t ip6q_limit; /* ip6q allocation limit */ | |
132 | static u_int32_t ip6q_count; /* current # of allocated ip6q's */ | |
133 | static u_int32_t ip6af_limit; /* ip6asfrag allocation limit */ | |
134 | static u_int32_t ip6af_count; /* current # of allocated ip6asfrag's */ | |
135 | ||
136 | static int sysctl_maxfragpackets SYSCTL_HANDLER_ARGS; | |
137 | static int sysctl_maxfrags SYSCTL_HANDLER_ARGS; | |
138 | ||
139 | SYSCTL_DECL(_net_inet6_ip6); | |
140 | ||
141 | SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets, | |
142 | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_maxfragpackets, 0, | |
143 | sysctl_maxfragpackets, "I", | |
144 | "Maximum number of IPv6 fragment reassembly queue entries"); | |
145 | ||
146 | SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, fragpackets, | |
147 | CTLFLAG_RD | CTLFLAG_LOCKED, &frag6_nfragpackets, 0, | |
148 | "Current number of IPv6 fragment reassembly queue entries"); | |
149 | ||
150 | SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_MAXFRAGS, maxfrags, | |
151 | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_maxfrags, 0, | |
152 | sysctl_maxfrags, "I", "Maximum number of IPv6 fragments allowed"); | |
153 | ||
154 | /* | |
155 | * Initialise reassembly queue and fragment identifier. | |
156 | */ | |
157 | void | |
158 | frag6_init(void) | |
159 | { | |
160 | /* ip6q_alloc() uses mbufs for IPv6 fragment queue structures */ | |
161 | _CASSERT(sizeof(struct ip6q) <= _MLEN); | |
162 | /* ip6af_alloc() uses mbufs for IPv6 fragment queue structures */ | |
163 | _CASSERT(sizeof(struct ip6asfrag) <= _MLEN); | |
164 | ||
165 | /* IPv6 fragment reassembly queue lock */ | |
166 | ip6qlock_grp_attr = lck_grp_attr_alloc_init(); | |
167 | ip6qlock_grp = lck_grp_alloc_init("ip6qlock", ip6qlock_grp_attr); | |
168 | ip6qlock_attr = lck_attr_alloc_init(); | |
169 | lck_mtx_init(&ip6qlock, ip6qlock_grp, ip6qlock_attr); | |
170 | ||
171 | lck_mtx_lock(&ip6qlock); | |
172 | /* Initialize IPv6 reassembly queue. */ | |
173 | ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q; | |
174 | ||
175 | /* same limits as IPv4 */ | |
176 | ip6_maxfragpackets = nmbclusters / 32; | |
177 | ip6_maxfrags = ip6_maxfragpackets * 2; | |
178 | ip6q_updateparams(); | |
179 | lck_mtx_unlock(&ip6qlock); | |
180 | } | |
181 | ||
182 | static void | |
183 | frag6_save_context(struct mbuf *m, int val) | |
184 | { | |
185 | m->m_pkthdr.pkt_hdr = (void *)(uintptr_t)val; | |
186 | } | |
187 | ||
188 | static void | |
189 | frag6_scrub_context(struct mbuf *m) | |
190 | { | |
191 | m->m_pkthdr.pkt_hdr = NULL; | |
192 | } | |
193 | ||
194 | static int | |
195 | frag6_restore_context(struct mbuf *m) | |
196 | { | |
197 | return (int)m->m_pkthdr.pkt_hdr; | |
198 | } | |
199 | ||
200 | /* | |
201 | * Send any deferred ICMP param problem error messages; caller must not be | |
202 | * holding ip6qlock and is expected to have saved the per-packet parameter | |
203 | * value via frag6_save_context(). | |
204 | */ | |
205 | static void | |
206 | frag6_icmp6_paramprob_error(struct fq6_head *diq6) | |
207 | { | |
208 | LCK_MTX_ASSERT(&ip6qlock, LCK_MTX_ASSERT_NOTOWNED); | |
209 | ||
210 | if (!MBUFQ_EMPTY(diq6)) { | |
211 | struct mbuf *merr, *merr_tmp; | |
212 | int param; | |
213 | MBUFQ_FOREACH_SAFE(merr, diq6, merr_tmp) { | |
214 | MBUFQ_REMOVE(diq6, merr); | |
215 | MBUFQ_NEXT(merr) = NULL; | |
216 | param = frag6_restore_context(merr); | |
217 | frag6_scrub_context(merr); | |
218 | icmp6_error(merr, ICMP6_PARAM_PROB, | |
219 | ICMP6_PARAMPROB_HEADER, param); | |
220 | } | |
221 | } | |
222 | } | |
223 | ||
224 | /* | |
225 | * Send any deferred ICMP time exceeded error messages; | |
226 | * caller must not be holding ip6qlock. | |
227 | */ | |
228 | static void | |
229 | frag6_icmp6_timeex_error(struct fq6_head *diq6) | |
230 | { | |
231 | LCK_MTX_ASSERT(&ip6qlock, LCK_MTX_ASSERT_NOTOWNED); | |
232 | ||
233 | if (!MBUFQ_EMPTY(diq6)) { | |
234 | struct mbuf *m, *m_tmp; | |
235 | MBUFQ_FOREACH_SAFE(m, diq6, m_tmp) { | |
236 | MBUFQ_REMOVE(diq6, m); | |
237 | MBUFQ_NEXT(m) = NULL; | |
238 | icmp6_error_flag(m, ICMP6_TIME_EXCEEDED, | |
239 | ICMP6_TIME_EXCEED_REASSEMBLY, 0, 0); | |
240 | } | |
241 | } | |
242 | } | |
243 | ||
244 | /* | |
245 | * In RFC2460, fragment and reassembly rule do not agree with each other, | |
246 | * in terms of next header field handling in fragment header. | |
247 | * While the sender will use the same value for all of the fragmented packets, | |
248 | * receiver is suggested not to check the consistency. | |
249 | * | |
250 | * fragment rule (p20): | |
251 | * (2) A Fragment header containing: | |
252 | * The Next Header value that identifies the first header of | |
253 | * the Fragmentable Part of the original packet. | |
254 | * -> next header field is same for all fragments | |
255 | * | |
256 | * reassembly rule (p21): | |
257 | * The Next Header field of the last header of the Unfragmentable | |
258 | * Part is obtained from the Next Header field of the first | |
259 | * fragment's Fragment header. | |
260 | * -> should grab it from the first fragment only | |
261 | * | |
262 | * The following note also contradicts with fragment rule - noone is going to | |
263 | * send different fragment with different next header field. | |
264 | * | |
265 | * additional note (p22): | |
266 | * The Next Header values in the Fragment headers of different | |
267 | * fragments of the same original packet may differ. Only the value | |
268 | * from the Offset zero fragment packet is used for reassembly. | |
269 | * -> should grab it from the first fragment only | |
270 | * | |
271 | * There is no explicit reason given in the RFC. Historical reason maybe? | |
272 | */ | |
273 | /* | |
274 | * Fragment input | |
275 | */ | |
276 | int | |
277 | frag6_input(struct mbuf **mp, int *offp, int proto) | |
278 | { | |
279 | #pragma unused(proto) | |
280 | struct mbuf *m = *mp, *t; | |
281 | struct ip6_hdr *ip6; | |
282 | struct ip6_frag *ip6f; | |
283 | struct ip6q *q6; | |
284 | struct ip6asfrag *af6, *ip6af, *af6dwn; | |
285 | int offset = *offp, nxt, i, next; | |
286 | int first_frag = 0; | |
287 | int fragoff, frgpartlen; /* must be larger than u_int16_t */ | |
288 | struct ifnet *dstifp = NULL; | |
289 | u_int8_t ecn, ecn0; | |
290 | uint32_t csum, csum_flags; | |
291 | struct fq6_head diq6; | |
292 | int locked = 0; | |
293 | ||
294 | VERIFY(m->m_flags & M_PKTHDR); | |
295 | ||
296 | MBUFQ_INIT(&diq6); /* for deferred ICMP param problem errors */ | |
297 | ||
298 | /* Expect 32-bit aligned data pointer on strict-align platforms */ | |
299 | MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); | |
300 | ||
301 | ip6 = mtod(m, struct ip6_hdr *); | |
302 | IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), goto done); | |
303 | ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset); | |
304 | ||
305 | #ifdef IN6_IFSTAT_STRICT | |
306 | /* find the destination interface of the packet. */ | |
307 | if (m->m_pkthdr.pkt_flags & PKTF_IFAINFO) { | |
308 | uint32_t idx; | |
309 | ||
310 | if (ip6_getdstifaddr_info(m, &idx, NULL) == 0) { | |
311 | if (idx > 0 && idx <= if_index) { | |
312 | ifnet_head_lock_shared(); | |
313 | dstifp = ifindex2ifnet[idx]; | |
314 | ifnet_head_done(); | |
315 | } | |
316 | } | |
317 | } | |
318 | #endif /* IN6_IFSTAT_STRICT */ | |
319 | ||
320 | /* we are violating the spec, this may not be the dst interface */ | |
321 | if (dstifp == NULL) { | |
322 | dstifp = m->m_pkthdr.rcvif; | |
323 | } | |
324 | ||
325 | /* jumbo payload can't contain a fragment header */ | |
326 | if (ip6->ip6_plen == 0) { | |
327 | icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset); | |
328 | in6_ifstat_inc(dstifp, ifs6_reass_fail); | |
329 | m = NULL; | |
330 | goto done; | |
331 | } | |
332 | ||
333 | /* | |
334 | * check whether fragment packet's fragment length is | |
335 | * multiple of 8 octets. | |
336 | * sizeof(struct ip6_frag) == 8 | |
337 | * sizeof(struct ip6_hdr) = 40 | |
338 | */ | |
339 | if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) && | |
340 | (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) { | |
341 | icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, | |
342 | offsetof(struct ip6_hdr, ip6_plen)); | |
343 | in6_ifstat_inc(dstifp, ifs6_reass_fail); | |
344 | m = NULL; | |
345 | goto done; | |
346 | } | |
347 | ||
348 | /* If ip6_maxfragpackets or ip6_maxfrags is 0, never accept fragments */ | |
349 | if (ip6_maxfragpackets == 0 || ip6_maxfrags == 0) { | |
350 | ip6stat.ip6s_fragments++; | |
351 | ip6stat.ip6s_fragdropped++; | |
352 | in6_ifstat_inc(dstifp, ifs6_reass_fail); | |
353 | m_freem(m); | |
354 | m = NULL; | |
355 | goto done; | |
356 | } | |
357 | ||
358 | /* offset now points to data portion */ | |
359 | offset += sizeof(struct ip6_frag); | |
360 | ||
361 | /* | |
362 | * RFC 6946: Handle "atomic" fragments (offset and m bit set to 0) | |
363 | * upfront, unrelated to any reassembly. Just skip the fragment header. | |
364 | */ | |
365 | if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) { | |
366 | /* | |
367 | * Mark packet as reassembled. | |
368 | * In ICMPv6 processing, we drop certain | |
369 | * NDP messages that are not expected to | |
370 | * have fragment header based on recommendations | |
371 | * against security vulnerability as described in | |
372 | * RFC 6980. | |
373 | * Treat atomic fragments as re-assembled packets as well. | |
374 | */ | |
375 | m->m_pkthdr.pkt_flags |= PKTF_REASSEMBLED; | |
376 | ip6stat.ip6s_atmfrag_rcvd++; | |
377 | in6_ifstat_inc(dstifp, ifs6_atmfrag_rcvd); | |
378 | *offp = offset; | |
379 | return ip6f->ip6f_nxt; | |
380 | } | |
381 | ||
382 | /* | |
383 | * Leverage partial checksum offload for simple UDP/IP fragments, | |
384 | * as that is the most common case. | |
385 | * | |
386 | * Perform 1's complement adjustment of octets that got included/ | |
387 | * excluded in the hardware-calculated checksum value. Also take | |
388 | * care of any trailing bytes and subtract out their partial sum. | |
389 | */ | |
390 | if (ip6f->ip6f_nxt == IPPROTO_UDP && | |
391 | offset == (sizeof(*ip6) + sizeof(*ip6f)) && | |
392 | (m->m_pkthdr.csum_flags & | |
393 | (CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_PSEUDO_HDR)) == | |
394 | (CSUM_DATA_VALID | CSUM_PARTIAL)) { | |
395 | uint32_t start = m->m_pkthdr.csum_rx_start; | |
396 | uint32_t ip_len = (sizeof(*ip6) + ntohs(ip6->ip6_plen)); | |
397 | int32_t trailer = (m_pktlen(m) - ip_len); | |
398 | uint32_t swbytes = (uint32_t)trailer; | |
399 | ||
400 | csum = m->m_pkthdr.csum_rx_val; | |
401 | ||
402 | ASSERT(trailer >= 0); | |
403 | if (start != offset || trailer != 0) { | |
404 | uint16_t s = 0, d = 0; | |
405 | ||
406 | if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) { | |
407 | s = ip6->ip6_src.s6_addr16[1]; | |
408 | ip6->ip6_src.s6_addr16[1] = 0; | |
409 | } | |
410 | if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) { | |
411 | d = ip6->ip6_dst.s6_addr16[1]; | |
412 | ip6->ip6_dst.s6_addr16[1] = 0; | |
413 | } | |
414 | ||
415 | /* callee folds in sum */ | |
416 | csum = m_adj_sum16(m, start, offset, | |
417 | (ip_len - offset), csum); | |
418 | if (offset > start) { | |
419 | swbytes += (offset - start); | |
420 | } else { | |
421 | swbytes += (start - offset); | |
422 | } | |
423 | ||
424 | if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) { | |
425 | ip6->ip6_src.s6_addr16[1] = s; | |
426 | } | |
427 | if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) { | |
428 | ip6->ip6_dst.s6_addr16[1] = d; | |
429 | } | |
430 | } | |
431 | csum_flags = m->m_pkthdr.csum_flags; | |
432 | ||
433 | if (swbytes != 0) { | |
434 | udp_in6_cksum_stats(swbytes); | |
435 | } | |
436 | if (trailer != 0) { | |
437 | m_adj(m, -trailer); | |
438 | } | |
439 | } else { | |
440 | csum = 0; | |
441 | csum_flags = 0; | |
442 | } | |
443 | ||
444 | /* Invalidate checksum */ | |
445 | m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; | |
446 | ||
447 | ip6stat.ip6s_fragments++; | |
448 | in6_ifstat_inc(dstifp, ifs6_reass_reqd); | |
449 | ||
450 | lck_mtx_lock(&ip6qlock); | |
451 | locked = 1; | |
452 | ||
453 | for (q6 = ip6q.ip6q_next; q6 != &ip6q; q6 = q6->ip6q_next) { | |
454 | if (ip6f->ip6f_ident == q6->ip6q_ident && | |
455 | IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) && | |
456 | IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)) { | |
457 | break; | |
458 | } | |
459 | } | |
460 | ||
461 | if (q6 == &ip6q) { | |
462 | /* | |
463 | * the first fragment to arrive, create a reassembly queue. | |
464 | */ | |
465 | first_frag = 1; | |
466 | ||
467 | q6 = ip6q_alloc(M_DONTWAIT); | |
468 | if (q6 == NULL) { | |
469 | goto dropfrag; | |
470 | } | |
471 | ||
472 | frag6_insque(q6, &ip6q); | |
473 | frag6_nfragpackets++; | |
474 | ||
475 | /* ip6q_nxt will be filled afterwards, from 1st fragment */ | |
476 | q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6; | |
477 | #ifdef notyet | |
478 | q6->ip6q_nxtp = (u_char *)nxtp; | |
479 | #endif | |
480 | q6->ip6q_ident = ip6f->ip6f_ident; | |
481 | q6->ip6q_ttl = IPV6_FRAGTTL; | |
482 | q6->ip6q_src = ip6->ip6_src; | |
483 | q6->ip6q_dst = ip6->ip6_dst; | |
484 | q6->ip6q_ecn = | |
485 | (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK; | |
486 | q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */ | |
487 | ||
488 | q6->ip6q_nfrag = 0; | |
489 | ||
490 | /* | |
491 | * If the first fragment has valid checksum offload | |
492 | * info, the rest of fragments are eligible as well. | |
493 | */ | |
494 | if (csum_flags != 0) { | |
495 | q6->ip6q_csum = csum; | |
496 | q6->ip6q_csum_flags = csum_flags; | |
497 | } | |
498 | } | |
499 | ||
500 | /* | |
501 | * If it's the 1st fragment, record the length of the | |
502 | * unfragmentable part and the next header of the fragment header. | |
503 | */ | |
504 | fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK); | |
505 | if (fragoff == 0) { | |
506 | q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) - | |
507 | sizeof(struct ip6_frag); | |
508 | q6->ip6q_nxt = ip6f->ip6f_nxt; | |
509 | } | |
510 | ||
511 | /* | |
512 | * Check that the reassembled packet would not exceed 65535 bytes | |
513 | * in size. | |
514 | * If it would exceed, discard the fragment and return an ICMP error. | |
515 | */ | |
516 | frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset; | |
517 | if (q6->ip6q_unfrglen >= 0) { | |
518 | /* The 1st fragment has already arrived. */ | |
519 | if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) { | |
520 | lck_mtx_unlock(&ip6qlock); | |
521 | locked = 0; | |
522 | icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, | |
523 | offset - sizeof(struct ip6_frag) + | |
524 | offsetof(struct ip6_frag, ip6f_offlg)); | |
525 | m = NULL; | |
526 | goto done; | |
527 | } | |
528 | } else if (fragoff + frgpartlen > IPV6_MAXPACKET) { | |
529 | lck_mtx_unlock(&ip6qlock); | |
530 | locked = 0; | |
531 | icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, | |
532 | offset - sizeof(struct ip6_frag) + | |
533 | offsetof(struct ip6_frag, ip6f_offlg)); | |
534 | m = NULL; | |
535 | goto done; | |
536 | } | |
537 | /* | |
538 | * If it's the first fragment, do the above check for each | |
539 | * fragment already stored in the reassembly queue. | |
540 | */ | |
541 | if (fragoff == 0) { | |
542 | for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; | |
543 | af6 = af6dwn) { | |
544 | af6dwn = af6->ip6af_down; | |
545 | ||
546 | if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen > | |
547 | IPV6_MAXPACKET) { | |
548 | struct mbuf *merr = IP6_REASS_MBUF(af6); | |
549 | struct ip6_hdr *ip6err; | |
550 | int erroff = af6->ip6af_offset; | |
551 | ||
552 | /* dequeue the fragment. */ | |
553 | frag6_deq(af6); | |
554 | ip6af_free(af6); | |
555 | ||
556 | /* adjust pointer. */ | |
557 | ip6err = mtod(merr, struct ip6_hdr *); | |
558 | ||
559 | /* | |
560 | * Restore source and destination addresses | |
561 | * in the erroneous IPv6 header. | |
562 | */ | |
563 | ip6err->ip6_src = q6->ip6q_src; | |
564 | ip6err->ip6_dst = q6->ip6q_dst; | |
565 | ||
566 | frag6_save_context(merr, | |
567 | erroff - sizeof(struct ip6_frag) + | |
568 | offsetof(struct ip6_frag, ip6f_offlg)); | |
569 | ||
570 | MBUFQ_ENQUEUE(&diq6, merr); | |
571 | } | |
572 | } | |
573 | } | |
574 | ||
575 | ip6af = ip6af_alloc(M_DONTWAIT); | |
576 | if (ip6af == NULL) { | |
577 | goto dropfrag; | |
578 | } | |
579 | ||
580 | ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG; | |
581 | ip6af->ip6af_off = fragoff; | |
582 | ip6af->ip6af_frglen = frgpartlen; | |
583 | ip6af->ip6af_offset = offset; | |
584 | IP6_REASS_MBUF(ip6af) = m; | |
585 | ||
586 | if (first_frag) { | |
587 | af6 = (struct ip6asfrag *)q6; | |
588 | goto insert; | |
589 | } | |
590 | ||
591 | /* | |
592 | * Handle ECN by comparing this segment with the first one; | |
593 | * if CE is set, do not lose CE. | |
594 | * drop if CE and not-ECT are mixed for the same packet. | |
595 | */ | |
596 | ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK; | |
597 | ecn0 = q6->ip6q_ecn; | |
598 | if (ecn == IPTOS_ECN_CE) { | |
599 | if (ecn0 == IPTOS_ECN_NOTECT) { | |
600 | ip6af_free(ip6af); | |
601 | goto dropfrag; | |
602 | } | |
603 | if (ecn0 != IPTOS_ECN_CE) { | |
604 | q6->ip6q_ecn = IPTOS_ECN_CE; | |
605 | } | |
606 | } | |
607 | if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) { | |
608 | ip6af_free(ip6af); | |
609 | goto dropfrag; | |
610 | } | |
611 | ||
612 | /* | |
613 | * Find a segment which begins after this one does. | |
614 | */ | |
615 | for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; | |
616 | af6 = af6->ip6af_down) { | |
617 | if (af6->ip6af_off > ip6af->ip6af_off) { | |
618 | break; | |
619 | } | |
620 | } | |
621 | ||
622 | #if 0 | |
623 | /* | |
624 | * If there is a preceding segment, it may provide some of | |
625 | * our data already. If so, drop the data from the incoming | |
626 | * segment. If it provides all of our data, drop us. | |
627 | * | |
628 | * If some of the data is dropped from the preceding | |
629 | * segment, then it's checksum is invalidated. | |
630 | */ | |
631 | if (af6->ip6af_up != (struct ip6asfrag *)q6) { | |
632 | i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen | |
633 | - ip6af->ip6af_off; | |
634 | if (i > 0) { | |
635 | if (i >= ip6af->ip6af_frglen) { | |
636 | goto dropfrag; | |
637 | } | |
638 | m_adj(IP6_REASS_MBUF(ip6af), i); | |
639 | q6->ip6q_csum_flags = 0; | |
640 | ip6af->ip6af_off += i; | |
641 | ip6af->ip6af_frglen -= i; | |
642 | } | |
643 | } | |
644 | ||
645 | /* | |
646 | * While we overlap succeeding segments trim them or, | |
647 | * if they are completely covered, dequeue them. | |
648 | */ | |
649 | while (af6 != (struct ip6asfrag *)q6 && | |
650 | ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) { | |
651 | i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; | |
652 | if (i < af6->ip6af_frglen) { | |
653 | af6->ip6af_frglen -= i; | |
654 | af6->ip6af_off += i; | |
655 | m_adj(IP6_REASS_MBUF(af6), i); | |
656 | q6->ip6q_csum_flags = 0; | |
657 | break; | |
658 | } | |
659 | af6 = af6->ip6af_down; | |
660 | m_freem(IP6_REASS_MBUF(af6->ip6af_up)); | |
661 | frag6_deq(af6->ip6af_up); | |
662 | } | |
663 | #else | |
664 | /* | |
665 | * If the incoming framgent overlaps some existing fragments in | |
666 | * the reassembly queue, drop it, since it is dangerous to override | |
667 | * existing fragments from a security point of view. | |
668 | * We don't know which fragment is the bad guy - here we trust | |
669 | * fragment that came in earlier, with no real reason. | |
670 | * | |
671 | * Note: due to changes after disabling this part, mbuf passed to | |
672 | * m_adj() below now does not meet the requirement. | |
673 | */ | |
674 | if (af6->ip6af_up != (struct ip6asfrag *)q6) { | |
675 | i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen | |
676 | - ip6af->ip6af_off; | |
677 | if (i > 0) { | |
678 | #if 0 /* suppress the noisy log */ | |
679 | log(LOG_ERR, "%d bytes of a fragment from %s " | |
680 | "overlaps the previous fragment\n", | |
681 | i, ip6_sprintf(&q6->ip6q_src)); | |
682 | #endif | |
683 | ip6af_free(ip6af); | |
684 | goto dropfrag; | |
685 | } | |
686 | } | |
687 | if (af6 != (struct ip6asfrag *)q6) { | |
688 | i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; | |
689 | if (i > 0) { | |
690 | #if 0 /* suppress the noisy log */ | |
691 | log(LOG_ERR, "%d bytes of a fragment from %s " | |
692 | "overlaps the succeeding fragment", | |
693 | i, ip6_sprintf(&q6->ip6q_src)); | |
694 | #endif | |
695 | ip6af_free(ip6af); | |
696 | goto dropfrag; | |
697 | } | |
698 | } | |
699 | #endif | |
700 | ||
701 | /* | |
702 | * If this fragment contains similar checksum offload info | |
703 | * as that of the existing ones, accumulate checksum. Otherwise, | |
704 | * invalidate checksum offload info for the entire datagram. | |
705 | */ | |
706 | if (csum_flags != 0 && csum_flags == q6->ip6q_csum_flags) { | |
707 | q6->ip6q_csum += csum; | |
708 | } else if (q6->ip6q_csum_flags != 0) { | |
709 | q6->ip6q_csum_flags = 0; | |
710 | } | |
711 | ||
712 | insert: | |
713 | ||
714 | /* | |
715 | * Stick new segment in its place; | |
716 | * check for complete reassembly. | |
717 | * Move to front of packet queue, as we are | |
718 | * the most recently active fragmented packet. | |
719 | */ | |
720 | frag6_enq(ip6af, af6->ip6af_up); | |
721 | frag6_nfrags++; | |
722 | q6->ip6q_nfrag++; | |
723 | #if 0 /* xxx */ | |
724 | if (q6 != ip6q.ip6q_next) { | |
725 | frag6_remque(q6); | |
726 | frag6_insque(q6, &ip6q); | |
727 | } | |
728 | #endif | |
729 | next = 0; | |
730 | for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; | |
731 | af6 = af6->ip6af_down) { | |
732 | if (af6->ip6af_off != next) { | |
733 | lck_mtx_unlock(&ip6qlock); | |
734 | locked = 0; | |
735 | m = NULL; | |
736 | goto done; | |
737 | } | |
738 | next += af6->ip6af_frglen; | |
739 | } | |
740 | if (af6->ip6af_up->ip6af_mff) { | |
741 | lck_mtx_unlock(&ip6qlock); | |
742 | locked = 0; | |
743 | m = NULL; | |
744 | goto done; | |
745 | } | |
746 | ||
747 | /* | |
748 | * Reassembly is complete; concatenate fragments. | |
749 | */ | |
750 | ip6af = q6->ip6q_down; | |
751 | t = m = IP6_REASS_MBUF(ip6af); | |
752 | af6 = ip6af->ip6af_down; | |
753 | frag6_deq(ip6af); | |
754 | while (af6 != (struct ip6asfrag *)q6) { | |
755 | af6dwn = af6->ip6af_down; | |
756 | frag6_deq(af6); | |
757 | while (t->m_next) { | |
758 | t = t->m_next; | |
759 | } | |
760 | t->m_next = IP6_REASS_MBUF(af6); | |
761 | m_adj(t->m_next, af6->ip6af_offset); | |
762 | ip6af_free(af6); | |
763 | af6 = af6dwn; | |
764 | } | |
765 | ||
766 | /* | |
767 | * Store partial hardware checksum info from the fragment queue; | |
768 | * the receive start offset is set to 40 bytes (see code at the | |
769 | * top of this routine.) | |
770 | */ | |
771 | if (q6->ip6q_csum_flags != 0) { | |
772 | csum = q6->ip6q_csum; | |
773 | ||
774 | ADDCARRY(csum); | |
775 | ||
776 | m->m_pkthdr.csum_rx_val = csum; | |
777 | m->m_pkthdr.csum_rx_start = sizeof(struct ip6_hdr); | |
778 | m->m_pkthdr.csum_flags = q6->ip6q_csum_flags; | |
779 | } else if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) || | |
780 | (m->m_pkthdr.pkt_flags & PKTF_LOOP)) { | |
781 | /* loopback checksums are always OK */ | |
782 | m->m_pkthdr.csum_data = 0xffff; | |
783 | m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR; | |
784 | } | |
785 | ||
786 | /* adjust offset to point where the original next header starts */ | |
787 | offset = ip6af->ip6af_offset - sizeof(struct ip6_frag); | |
788 | ip6af_free(ip6af); | |
789 | ip6 = mtod(m, struct ip6_hdr *); | |
790 | ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr)); | |
791 | ip6->ip6_src = q6->ip6q_src; | |
792 | ip6->ip6_dst = q6->ip6q_dst; | |
793 | if (q6->ip6q_ecn == IPTOS_ECN_CE) { | |
794 | ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20); | |
795 | } | |
796 | ||
797 | nxt = q6->ip6q_nxt; | |
798 | #ifdef notyet | |
799 | *q6->ip6q_nxtp = (u_char)(nxt & 0xff); | |
800 | #endif | |
801 | ||
802 | /* Delete frag6 header */ | |
803 | if (m->m_len >= offset + sizeof(struct ip6_frag)) { | |
804 | /* This is the only possible case with !PULLDOWN_TEST */ | |
805 | ovbcopy((caddr_t)ip6, (caddr_t)ip6 + sizeof(struct ip6_frag), | |
806 | offset); | |
807 | m->m_data += sizeof(struct ip6_frag); | |
808 | m->m_len -= sizeof(struct ip6_frag); | |
809 | } else { | |
810 | /* this comes with no copy if the boundary is on cluster */ | |
811 | if ((t = m_split(m, offset, M_DONTWAIT)) == NULL) { | |
812 | frag6_remque(q6); | |
813 | frag6_nfragpackets--; | |
814 | frag6_nfrags -= q6->ip6q_nfrag; | |
815 | ip6q_free(q6); | |
816 | goto dropfrag; | |
817 | } | |
818 | m_adj(t, sizeof(struct ip6_frag)); | |
819 | m_cat(m, t); | |
820 | } | |
821 | ||
822 | /* | |
823 | * Store NXT to the original. | |
824 | */ | |
825 | { | |
826 | char *prvnxtp = ip6_get_prevhdr(m, offset); /* XXX */ | |
827 | *prvnxtp = nxt; | |
828 | } | |
829 | ||
830 | frag6_remque(q6); | |
831 | frag6_nfragpackets--; | |
832 | frag6_nfrags -= q6->ip6q_nfrag; | |
833 | ip6q_free(q6); | |
834 | ||
835 | if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */ | |
836 | m_fixhdr(m); | |
837 | /* | |
838 | * Mark packet as reassembled | |
839 | * In ICMPv6 processing, we drop certain | |
840 | * NDP messages that are not expected to | |
841 | * have fragment header based on recommendations | |
842 | * against security vulnerability as described in | |
843 | * RFC 6980. | |
844 | */ | |
845 | m->m_pkthdr.pkt_flags |= PKTF_REASSEMBLED; | |
846 | } | |
847 | ip6stat.ip6s_reassembled++; | |
848 | ||
849 | /* | |
850 | * Tell launch routine the next header | |
851 | */ | |
852 | *mp = m; | |
853 | *offp = offset; | |
854 | ||
855 | /* arm the purge timer if not already and if there's work to do */ | |
856 | frag6_sched_timeout(); | |
857 | lck_mtx_unlock(&ip6qlock); | |
858 | in6_ifstat_inc(dstifp, ifs6_reass_ok); | |
859 | frag6_icmp6_paramprob_error(&diq6); | |
860 | VERIFY(MBUFQ_EMPTY(&diq6)); | |
861 | return nxt; | |
862 | ||
863 | done: | |
864 | VERIFY(m == NULL); | |
865 | if (!locked) { | |
866 | if (frag6_nfragpackets == 0) { | |
867 | frag6_icmp6_paramprob_error(&diq6); | |
868 | VERIFY(MBUFQ_EMPTY(&diq6)); | |
869 | return IPPROTO_DONE; | |
870 | } | |
871 | lck_mtx_lock(&ip6qlock); | |
872 | } | |
873 | /* arm the purge timer if not already and if there's work to do */ | |
874 | frag6_sched_timeout(); | |
875 | lck_mtx_unlock(&ip6qlock); | |
876 | frag6_icmp6_paramprob_error(&diq6); | |
877 | VERIFY(MBUFQ_EMPTY(&diq6)); | |
878 | return IPPROTO_DONE; | |
879 | ||
880 | dropfrag: | |
881 | ip6stat.ip6s_fragdropped++; | |
882 | /* arm the purge timer if not already and if there's work to do */ | |
883 | frag6_sched_timeout(); | |
884 | lck_mtx_unlock(&ip6qlock); | |
885 | in6_ifstat_inc(dstifp, ifs6_reass_fail); | |
886 | m_freem(m); | |
887 | frag6_icmp6_paramprob_error(&diq6); | |
888 | VERIFY(MBUFQ_EMPTY(&diq6)); | |
889 | return IPPROTO_DONE; | |
890 | } | |
891 | ||
892 | /* | |
893 | * Free a fragment reassembly header and all | |
894 | * associated datagrams. | |
895 | */ | |
896 | void | |
897 | frag6_freef(struct ip6q *q6, struct fq6_head *dfq6, struct fq6_head *diq6) | |
898 | { | |
899 | struct ip6asfrag *af6, *down6; | |
900 | ||
901 | LCK_MTX_ASSERT(&ip6qlock, LCK_MTX_ASSERT_OWNED); | |
902 | ||
903 | for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; | |
904 | af6 = down6) { | |
905 | struct mbuf *m = IP6_REASS_MBUF(af6); | |
906 | ||
907 | down6 = af6->ip6af_down; | |
908 | frag6_deq(af6); | |
909 | ||
910 | /* | |
911 | * Return ICMP time exceeded error for the 1st fragment. | |
912 | * Just free other fragments. | |
913 | */ | |
914 | if (af6->ip6af_off == 0) { | |
915 | struct ip6_hdr *ip6; | |
916 | ||
917 | /* adjust pointer */ | |
918 | ip6 = mtod(m, struct ip6_hdr *); | |
919 | ||
920 | /* restore source and destination addresses */ | |
921 | ip6->ip6_src = q6->ip6q_src; | |
922 | ip6->ip6_dst = q6->ip6q_dst; | |
923 | ||
924 | MBUFQ_ENQUEUE(diq6, m); | |
925 | } else { | |
926 | MBUFQ_ENQUEUE(dfq6, m); | |
927 | } | |
928 | ip6af_free(af6); | |
929 | } | |
930 | frag6_remque(q6); | |
931 | frag6_nfragpackets--; | |
932 | frag6_nfrags -= q6->ip6q_nfrag; | |
933 | ip6q_free(q6); | |
934 | } | |
935 | ||
936 | /* | |
937 | * Put an ip fragment on a reassembly chain. | |
938 | * Like insque, but pointers in middle of structure. | |
939 | */ | |
940 | void | |
941 | frag6_enq(struct ip6asfrag *af6, struct ip6asfrag *up6) | |
942 | { | |
943 | LCK_MTX_ASSERT(&ip6qlock, LCK_MTX_ASSERT_OWNED); | |
944 | ||
945 | af6->ip6af_up = up6; | |
946 | af6->ip6af_down = up6->ip6af_down; | |
947 | up6->ip6af_down->ip6af_up = af6; | |
948 | up6->ip6af_down = af6; | |
949 | } | |
950 | ||
951 | /* | |
952 | * To frag6_enq as remque is to insque. | |
953 | */ | |
954 | void | |
955 | frag6_deq(struct ip6asfrag *af6) | |
956 | { | |
957 | LCK_MTX_ASSERT(&ip6qlock, LCK_MTX_ASSERT_OWNED); | |
958 | ||
959 | af6->ip6af_up->ip6af_down = af6->ip6af_down; | |
960 | af6->ip6af_down->ip6af_up = af6->ip6af_up; | |
961 | } | |
962 | ||
963 | void | |
964 | frag6_insque(struct ip6q *new, struct ip6q *old) | |
965 | { | |
966 | LCK_MTX_ASSERT(&ip6qlock, LCK_MTX_ASSERT_OWNED); | |
967 | ||
968 | new->ip6q_prev = old; | |
969 | new->ip6q_next = old->ip6q_next; | |
970 | old->ip6q_next->ip6q_prev = new; | |
971 | old->ip6q_next = new; | |
972 | } | |
973 | ||
974 | void | |
975 | frag6_remque(struct ip6q *p6) | |
976 | { | |
977 | LCK_MTX_ASSERT(&ip6qlock, LCK_MTX_ASSERT_OWNED); | |
978 | ||
979 | p6->ip6q_prev->ip6q_next = p6->ip6q_next; | |
980 | p6->ip6q_next->ip6q_prev = p6->ip6q_prev; | |
981 | } | |
982 | ||
983 | /* | |
984 | * IPv6 reassembling timer processing; | |
985 | * if a timer expires on a reassembly | |
986 | * queue, discard it. | |
987 | */ | |
988 | static void | |
989 | frag6_timeout(void *arg) | |
990 | { | |
991 | #pragma unused(arg) | |
992 | struct fq6_head dfq6, diq6; | |
993 | struct ip6q *q6; | |
994 | ||
995 | MBUFQ_INIT(&dfq6); /* for deferred frees */ | |
996 | MBUFQ_INIT(&diq6); /* for deferred ICMP time exceeded errors */ | |
997 | ||
998 | /* | |
999 | * Update coarse-grained networking timestamp (in sec.); the idea | |
1000 | * is to piggy-back on the timeout callout to update the counter | |
1001 | * returnable via net_uptime(). | |
1002 | */ | |
1003 | net_update_uptime(); | |
1004 | ||
1005 | lck_mtx_lock(&ip6qlock); | |
1006 | q6 = ip6q.ip6q_next; | |
1007 | if (q6) { | |
1008 | while (q6 != &ip6q) { | |
1009 | --q6->ip6q_ttl; | |
1010 | q6 = q6->ip6q_next; | |
1011 | if (q6->ip6q_prev->ip6q_ttl == 0) { | |
1012 | ip6stat.ip6s_fragtimeout++; | |
1013 | /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ | |
1014 | frag6_freef(q6->ip6q_prev, &dfq6, &diq6); | |
1015 | } | |
1016 | } | |
1017 | } | |
1018 | /* | |
1019 | * If we are over the maximum number of fragments | |
1020 | * (due to the limit being lowered), drain off | |
1021 | * enough to get down to the new limit. | |
1022 | */ | |
1023 | if (ip6_maxfragpackets >= 0) { | |
1024 | while (frag6_nfragpackets > (unsigned)ip6_maxfragpackets && | |
1025 | ip6q.ip6q_prev) { | |
1026 | ip6stat.ip6s_fragoverflow++; | |
1027 | /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ | |
1028 | frag6_freef(ip6q.ip6q_prev, &dfq6, &diq6); | |
1029 | } | |
1030 | } | |
1031 | /* re-arm the purge timer if there's work to do */ | |
1032 | frag6_timeout_run = 0; | |
1033 | frag6_sched_timeout(); | |
1034 | lck_mtx_unlock(&ip6qlock); | |
1035 | ||
1036 | /* free fragments that need to be freed */ | |
1037 | if (!MBUFQ_EMPTY(&dfq6)) { | |
1038 | MBUFQ_DRAIN(&dfq6); | |
1039 | } | |
1040 | ||
1041 | frag6_icmp6_timeex_error(&diq6); | |
1042 | ||
1043 | VERIFY(MBUFQ_EMPTY(&dfq6)); | |
1044 | VERIFY(MBUFQ_EMPTY(&diq6)); | |
1045 | } | |
1046 | ||
1047 | static void | |
1048 | frag6_sched_timeout(void) | |
1049 | { | |
1050 | LCK_MTX_ASSERT(&ip6qlock, LCK_MTX_ASSERT_OWNED); | |
1051 | ||
1052 | if (!frag6_timeout_run && frag6_nfragpackets > 0) { | |
1053 | frag6_timeout_run = 1; | |
1054 | timeout(frag6_timeout, NULL, hz); | |
1055 | } | |
1056 | } | |
1057 | ||
1058 | /* | |
1059 | * Drain off all datagram fragments. | |
1060 | */ | |
1061 | void | |
1062 | frag6_drain(void) | |
1063 | { | |
1064 | struct fq6_head dfq6, diq6; | |
1065 | ||
1066 | MBUFQ_INIT(&dfq6); /* for deferred frees */ | |
1067 | MBUFQ_INIT(&diq6); /* for deferred ICMP time exceeded errors */ | |
1068 | ||
1069 | lck_mtx_lock(&ip6qlock); | |
1070 | while (ip6q.ip6q_next != &ip6q) { | |
1071 | ip6stat.ip6s_fragdropped++; | |
1072 | /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ | |
1073 | frag6_freef(ip6q.ip6q_next, &dfq6, &diq6); | |
1074 | } | |
1075 | lck_mtx_unlock(&ip6qlock); | |
1076 | ||
1077 | /* free fragments that need to be freed */ | |
1078 | if (!MBUFQ_EMPTY(&dfq6)) { | |
1079 | MBUFQ_DRAIN(&dfq6); | |
1080 | } | |
1081 | ||
1082 | frag6_icmp6_timeex_error(&diq6); | |
1083 | ||
1084 | VERIFY(MBUFQ_EMPTY(&dfq6)); | |
1085 | VERIFY(MBUFQ_EMPTY(&diq6)); | |
1086 | } | |
1087 | ||
1088 | static struct ip6q * | |
1089 | ip6q_alloc(int how) | |
1090 | { | |
1091 | struct mbuf *t; | |
1092 | struct ip6q *q6; | |
1093 | ||
1094 | /* | |
1095 | * See comments in ip6q_updateparams(). Keep the count separate | |
1096 | * from frag6_nfragpackets since the latter represents the elements | |
1097 | * already in the reassembly queues. | |
1098 | */ | |
1099 | if (ip6q_limit > 0 && ip6q_count > ip6q_limit) { | |
1100 | return NULL; | |
1101 | } | |
1102 | ||
1103 | t = m_get(how, MT_FTABLE); | |
1104 | if (t != NULL) { | |
1105 | atomic_add_32(&ip6q_count, 1); | |
1106 | q6 = mtod(t, struct ip6q *); | |
1107 | bzero(q6, sizeof(*q6)); | |
1108 | } else { | |
1109 | q6 = NULL; | |
1110 | } | |
1111 | return q6; | |
1112 | } | |
1113 | ||
1114 | static void | |
1115 | ip6q_free(struct ip6q *q6) | |
1116 | { | |
1117 | (void) m_free(dtom(q6)); | |
1118 | atomic_add_32(&ip6q_count, -1); | |
1119 | } | |
1120 | ||
1121 | static struct ip6asfrag * | |
1122 | ip6af_alloc(int how) | |
1123 | { | |
1124 | struct mbuf *t; | |
1125 | struct ip6asfrag *af6; | |
1126 | ||
1127 | /* | |
1128 | * See comments in ip6q_updateparams(). Keep the count separate | |
1129 | * from frag6_nfrags since the latter represents the elements | |
1130 | * already in the reassembly queues. | |
1131 | */ | |
1132 | if (ip6af_limit > 0 && ip6af_count > ip6af_limit) { | |
1133 | return NULL; | |
1134 | } | |
1135 | ||
1136 | t = m_get(how, MT_FTABLE); | |
1137 | if (t != NULL) { | |
1138 | atomic_add_32(&ip6af_count, 1); | |
1139 | af6 = mtod(t, struct ip6asfrag *); | |
1140 | bzero(af6, sizeof(*af6)); | |
1141 | } else { | |
1142 | af6 = NULL; | |
1143 | } | |
1144 | return af6; | |
1145 | } | |
1146 | ||
1147 | static void | |
1148 | ip6af_free(struct ip6asfrag *af6) | |
1149 | { | |
1150 | (void) m_free(dtom(af6)); | |
1151 | atomic_add_32(&ip6af_count, -1); | |
1152 | } | |
1153 | ||
1154 | static void | |
1155 | ip6q_updateparams(void) | |
1156 | { | |
1157 | LCK_MTX_ASSERT(&ip6qlock, LCK_MTX_ASSERT_OWNED); | |
1158 | /* | |
1159 | * -1 for unlimited allocation. | |
1160 | */ | |
1161 | if (ip6_maxfragpackets < 0) { | |
1162 | ip6q_limit = 0; | |
1163 | } | |
1164 | if (ip6_maxfrags < 0) { | |
1165 | ip6af_limit = 0; | |
1166 | } | |
1167 | /* | |
1168 | * Positive number for specific bound. | |
1169 | */ | |
1170 | if (ip6_maxfragpackets > 0) { | |
1171 | ip6q_limit = ip6_maxfragpackets; | |
1172 | } | |
1173 | if (ip6_maxfrags > 0) { | |
1174 | ip6af_limit = ip6_maxfrags; | |
1175 | } | |
1176 | /* | |
1177 | * Zero specifies no further fragment queue allocation -- set the | |
1178 | * bound very low, but rely on implementation elsewhere to actually | |
1179 | * prevent allocation and reclaim current queues. | |
1180 | */ | |
1181 | if (ip6_maxfragpackets == 0) { | |
1182 | ip6q_limit = 1; | |
1183 | } | |
1184 | if (ip6_maxfrags == 0) { | |
1185 | ip6af_limit = 1; | |
1186 | } | |
1187 | /* | |
1188 | * Arm the purge timer if not already and if there's work to do | |
1189 | */ | |
1190 | frag6_sched_timeout(); | |
1191 | } | |
1192 | ||
1193 | static int | |
1194 | sysctl_maxfragpackets SYSCTL_HANDLER_ARGS | |
1195 | { | |
1196 | #pragma unused(arg1, arg2) | |
1197 | int error, i; | |
1198 | ||
1199 | lck_mtx_lock(&ip6qlock); | |
1200 | i = ip6_maxfragpackets; | |
1201 | error = sysctl_handle_int(oidp, &i, 0, req); | |
1202 | if (error || req->newptr == USER_ADDR_NULL) { | |
1203 | goto done; | |
1204 | } | |
1205 | /* impose bounds */ | |
1206 | if (i < -1 || i > (nmbclusters / 4)) { | |
1207 | error = EINVAL; | |
1208 | goto done; | |
1209 | } | |
1210 | ip6_maxfragpackets = i; | |
1211 | ip6q_updateparams(); | |
1212 | done: | |
1213 | lck_mtx_unlock(&ip6qlock); | |
1214 | return error; | |
1215 | } | |
1216 | ||
1217 | static int | |
1218 | sysctl_maxfrags SYSCTL_HANDLER_ARGS | |
1219 | { | |
1220 | #pragma unused(arg1, arg2) | |
1221 | int error, i; | |
1222 | ||
1223 | lck_mtx_lock(&ip6qlock); | |
1224 | i = ip6_maxfrags; | |
1225 | error = sysctl_handle_int(oidp, &i, 0, req); | |
1226 | if (error || req->newptr == USER_ADDR_NULL) { | |
1227 | goto done; | |
1228 | } | |
1229 | /* impose bounds */ | |
1230 | if (i < -1 || i > (nmbclusters / 4)) { | |
1231 | error = EINVAL; | |
1232 | goto done; | |
1233 | } | |
1234 | ip6_maxfrags = i; | |
1235 | ip6q_updateparams(); /* see if we need to arm timer */ | |
1236 | done: | |
1237 | lck_mtx_unlock(&ip6qlock); | |
1238 | return error; | |
1239 | } |