]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/frag6.c
00174a628b581014fd67060af8e6300df2ee478e
[apple/xnu.git] / bsd / netinet6 / frag6.c
1 /*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/frag6.c,v 1.2.2.5 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: frag6.c,v 1.31 2001/05/17 13:45:34 jinmei Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/malloc.h>
64 #include <sys/mcache.h>
65 #include <sys/mbuf.h>
66 #include <sys/domain.h>
67 #include <sys/protosw.h>
68 #include <sys/socket.h>
69 #include <sys/errno.h>
70 #include <sys/time.h>
71 #include <sys/kernel.h>
72 #include <sys/syslog.h>
73 #include <kern/queue.h>
74 #include <kern/locks.h>
75
76 #include <net/if.h>
77 #include <net/route.h>
78
79 #include <netinet/in.h>
80 #include <netinet/in_var.h>
81 #include <netinet/ip.h>
82 #include <netinet/ip6.h>
83 #include <netinet6/ip6_var.h>
84 #include <netinet/icmp6.h>
85
86 #include <net/net_osdep.h>
87
88 /*
89 * Define it to get a correct behavior on per-interface statistics.
90 * You will need to perform an extra routing table lookup, per fragment,
91 * to do it. This may, or may not be, a performance hit.
92 */
93 #define IN6_IFSTAT_STRICT
94
95 static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *);
96 static void frag6_deq(struct ip6asfrag *);
97 static void frag6_insque(struct ip6q *, struct ip6q *);
98 static void frag6_remque(struct ip6q *);
99 static void frag6_freef(struct ip6q *);
100
101 /* XXX we eventually need splreass6, or some real semaphore */
102 int frag6_doing_reass;
103 u_int frag6_nfragpackets;
104 static u_int frag6_nfrags;
105 struct ip6q ip6q; /* ip6 reassemble queue */
106
107
108 extern lck_mtx_t *inet6_domain_mutex;
109 /*
110 * Initialise reassembly queue and fragment identifier.
111 */
112 void
113 frag6_init()
114 {
115 struct timeval tv;
116
117 ip6_maxfragpackets = nmbclusters / 32;
118 ip6_maxfrags = nmbclusters / 4;
119
120 /*
121 * in many cases, random() here does NOT return random number
122 * as initialization during bootstrap time occur in fixed order.
123 */
124 microtime(&tv);
125 ip6_id = random() ^ tv.tv_usec;
126 ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q;
127 }
128
129 /*
130 * In RFC2460, fragment and reassembly rule do not agree with each other,
131 * in terms of next header field handling in fragment header.
132 * While the sender will use the same value for all of the fragmented packets,
133 * receiver is suggested not to check the consistency.
134 *
135 * fragment rule (p20):
136 * (2) A Fragment header containing:
137 * The Next Header value that identifies the first header of
138 * the Fragmentable Part of the original packet.
139 * -> next header field is same for all fragments
140 *
141 * reassembly rule (p21):
142 * The Next Header field of the last header of the Unfragmentable
143 * Part is obtained from the Next Header field of the first
144 * fragment's Fragment header.
145 * -> should grab it from the first fragment only
146 *
147 * The following note also contradicts with fragment rule - noone is going to
148 * send different fragment with different next header field.
149 *
150 * additional note (p22):
151 * The Next Header values in the Fragment headers of different
152 * fragments of the same original packet may differ. Only the value
153 * from the Offset zero fragment packet is used for reassembly.
154 * -> should grab it from the first fragment only
155 *
156 * There is no explicit reason given in the RFC. Historical reason maybe?
157 */
158 /*
159 * Fragment input
160 * NOTE: this function is called with the inet6_domain_mutex held from ip6_input.
161 * inet6_domain_mutex is protecting he frag6 queue manipulation.
162 */
163 int
164 frag6_input(struct mbuf **mp, int *offp, int proto)
165 {
166 #pragma unused(proto)
167 struct mbuf *m = *mp, *t;
168 struct ip6_hdr *ip6;
169 struct ip6_frag *ip6f;
170 struct ip6q *q6;
171 struct ip6asfrag *af6, *ip6af, *af6dwn;
172 int offset = *offp, nxt, i, next;
173 int first_frag = 0;
174 int fragoff, frgpartlen; /* must be larger than u_int16_t */
175 struct ifnet *dstifp;
176 struct ifaddr *ifa = NULL;
177 u_int8_t ecn, ecn0;
178
179 #ifdef IN6_IFSTAT_STRICT
180 struct route_in6 ro;
181 struct sockaddr_in6 *dst;
182 #endif
183
184 /* Expect 32-bit aligned data pointer on strict-align platforms */
185 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
186
187 ip6 = mtod(m, struct ip6_hdr *);
188 #ifndef PULLDOWN_TEST
189 IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), return IPPROTO_DONE);
190 ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
191 #else
192 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f));
193 if (ip6f == NULL)
194 return IPPROTO_DONE;
195 #endif
196
197 dstifp = NULL;
198 #ifdef IN6_IFSTAT_STRICT
199 /* find the destination interface of the packet. */
200 bzero(&ro, sizeof (ro));
201 dst = (struct sockaddr_in6 *)&ro.ro_dst;
202 dst->sin6_family = AF_INET6;
203 dst->sin6_len = sizeof (struct sockaddr_in6);
204 dst->sin6_addr = ip6->ip6_dst;
205
206 rtalloc((struct route *)&ro);
207 if (ro.ro_rt != NULL) {
208 RT_LOCK(ro.ro_rt);
209 if ((ifa = ro.ro_rt->rt_ifa) != NULL) {
210 IFA_ADDREF(ifa);
211 dstifp = ((struct in6_ifaddr *)ro.ro_rt->rt_ifa)->ia_ifp;
212 }
213 RT_UNLOCK(ro.ro_rt);
214 rtfree(ro.ro_rt);
215 ro.ro_rt = NULL;
216 }
217 #else
218 /* we are violating the spec, this is not the destination interface */
219 if ((m->m_flags & M_PKTHDR) != 0)
220 dstifp = m->m_pkthdr.rcvif;
221 #endif
222
223 /* jumbo payload can't contain a fragment header */
224 if (ip6->ip6_plen == 0) {
225 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
226 in6_ifstat_inc(dstifp, ifs6_reass_fail);
227 if (ifa != NULL)
228 IFA_REMREF(ifa);
229 return IPPROTO_DONE;
230 }
231
232 /*
233 * check whether fragment packet's fragment length is
234 * multiple of 8 octets.
235 * sizeof(struct ip6_frag) == 8
236 * sizeof(struct ip6_hdr) = 40
237 */
238 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
239 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
240 icmp6_error(m, ICMP6_PARAM_PROB,
241 ICMP6_PARAMPROB_HEADER,
242 offsetof(struct ip6_hdr, ip6_plen));
243 in6_ifstat_inc(dstifp, ifs6_reass_fail);
244 if (ifa != NULL)
245 IFA_REMREF(ifa);
246 return IPPROTO_DONE;
247 }
248
249 ip6stat.ip6s_fragments++;
250 in6_ifstat_inc(dstifp, ifs6_reass_reqd);
251
252 /* offset now points to data portion */
253 offset += sizeof(struct ip6_frag);
254
255 frag6_doing_reass = 1;
256
257 /*
258 * Enforce upper bound on number of fragments.
259 * If maxfrag is 0, never accept fragments.
260 * If maxfrag is -1, accept all fragments without limitation.
261 */
262 if (ip6_maxfrags < 0)
263 ;
264 else if (frag6_nfrags >= (u_int)ip6_maxfrags)
265 goto dropfrag;
266
267 for (q6 = ip6q.ip6q_next; q6 != &ip6q; q6 = q6->ip6q_next)
268 if (ip6f->ip6f_ident == q6->ip6q_ident &&
269 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
270 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst))
271 break;
272
273 if (q6 == &ip6q) {
274 /*
275 * the first fragment to arrive, create a reassembly queue.
276 */
277 first_frag = 1;
278
279 /*
280 * Enforce upper bound on number of fragmented packets
281 * for which we attempt reassembly;
282 * If maxfrag is 0, never accept fragments.
283 * If maxfrag is -1, accept all fragments without limitation.
284 */
285 if (ip6_maxfragpackets < 0)
286 ;
287 else if (frag6_nfragpackets >= (u_int)ip6_maxfragpackets)
288 goto dropfrag;
289 frag6_nfragpackets++;
290 q6 = (struct ip6q *)_MALLOC(sizeof(struct ip6q), M_FTABLE,
291 M_DONTWAIT);
292 if (q6 == NULL)
293 goto dropfrag;
294 bzero(q6, sizeof(*q6));
295
296 frag6_insque(q6, &ip6q);
297
298 /* ip6q_nxt will be filled afterwards, from 1st fragment */
299 q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6;
300 #ifdef notyet
301 q6->ip6q_nxtp = (u_char *)nxtp;
302 #endif
303 q6->ip6q_ident = ip6f->ip6f_ident;
304 q6->ip6q_ttl = IPV6_FRAGTTL;
305 q6->ip6q_src = ip6->ip6_src;
306 q6->ip6q_dst = ip6->ip6_dst;
307 q6->ip6q_ecn =
308 (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
309 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
310
311 q6->ip6q_nfrag = 0;
312 }
313
314 /*
315 * If it's the 1st fragment, record the length of the
316 * unfragmentable part and the next header of the fragment header.
317 */
318 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
319 if (fragoff == 0) {
320 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr)
321 - sizeof(struct ip6_frag);
322 q6->ip6q_nxt = ip6f->ip6f_nxt;
323 }
324
325 /*
326 * Check that the reassembled packet would not exceed 65535 bytes
327 * in size.
328 * If it would exceed, discard the fragment and return an ICMP error.
329 */
330 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
331 if (q6->ip6q_unfrglen >= 0) {
332 /* The 1st fragment has already arrived. */
333 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
334 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
335 offset - sizeof(struct ip6_frag) +
336 offsetof(struct ip6_frag, ip6f_offlg));
337 frag6_doing_reass = 0;
338 if (ifa != NULL)
339 IFA_REMREF(ifa);
340 return(IPPROTO_DONE);
341 }
342 }
343 else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
344 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
345 offset - sizeof(struct ip6_frag) +
346 offsetof(struct ip6_frag, ip6f_offlg));
347 frag6_doing_reass = 0;
348 if (ifa != NULL)
349 IFA_REMREF(ifa);
350 return(IPPROTO_DONE);
351 }
352 /*
353 * If it's the first fragment, do the above check for each
354 * fragment already stored in the reassembly queue.
355 */
356 if (fragoff == 0) {
357 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
358 af6 = af6dwn) {
359 af6dwn = af6->ip6af_down;
360
361 if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen >
362 IPV6_MAXPACKET) {
363 struct mbuf *merr = IP6_REASS_MBUF(af6);
364 struct ip6_hdr *ip6err;
365 int erroff = af6->ip6af_offset;
366
367 /* dequeue the fragment. */
368 frag6_deq(af6);
369 FREE(af6, M_FTABLE);
370
371 /* adjust pointer. */
372 ip6err = mtod(merr, struct ip6_hdr *);
373
374 /*
375 * Restore source and destination addresses
376 * in the erroneous IPv6 header.
377 */
378 ip6err->ip6_src = q6->ip6q_src;
379 ip6err->ip6_dst = q6->ip6q_dst;
380
381 icmp6_error(merr, ICMP6_PARAM_PROB,
382 ICMP6_PARAMPROB_HEADER,
383 erroff - sizeof(struct ip6_frag) +
384 offsetof(struct ip6_frag, ip6f_offlg));
385 }
386 }
387 }
388
389 ip6af = (struct ip6asfrag *)_MALLOC(sizeof(struct ip6asfrag), M_FTABLE,
390 M_DONTWAIT);
391 if (ip6af == NULL)
392 goto dropfrag;
393 bzero(ip6af, sizeof(*ip6af));
394 ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG;
395 ip6af->ip6af_off = fragoff;
396 ip6af->ip6af_frglen = frgpartlen;
397 ip6af->ip6af_offset = offset;
398 IP6_REASS_MBUF(ip6af) = m;
399
400 if (first_frag) {
401 af6 = (struct ip6asfrag *)q6;
402 goto insert;
403 }
404
405 /*
406 * Handle ECN by comparing this segment with the first one;
407 * if CE is set, do not lose CE.
408 * drop if CE and not-ECT are mixed for the same packet.
409 */
410 ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
411 ecn0 = q6->ip6q_ecn;
412 if (ecn == IPTOS_ECN_CE) {
413 if (ecn0 == IPTOS_ECN_NOTECT) {
414 FREE(ip6af, M_FTABLE);
415 goto dropfrag;
416 }
417 if (ecn0 != IPTOS_ECN_CE)
418 q6->ip6q_ecn = IPTOS_ECN_CE;
419 }
420 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
421 FREE(ip6af, M_FTABLE);
422 goto dropfrag;
423 }
424
425 /*
426 * Find a segment which begins after this one does.
427 */
428 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
429 af6 = af6->ip6af_down)
430 if (af6->ip6af_off > ip6af->ip6af_off)
431 break;
432
433 #if 0
434 /*
435 * If there is a preceding segment, it may provide some of
436 * our data already. If so, drop the data from the incoming
437 * segment. If it provides all of our data, drop us.
438 */
439 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
440 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
441 - ip6af->ip6af_off;
442 if (i > 0) {
443 if (i >= ip6af->ip6af_frglen)
444 goto dropfrag;
445 m_adj(IP6_REASS_MBUF(ip6af), i);
446 ip6af->ip6af_off += i;
447 ip6af->ip6af_frglen -= i;
448 }
449 }
450
451 /*
452 * While we overlap succeeding segments trim them or,
453 * if they are completely covered, dequeue them.
454 */
455 while (af6 != (struct ip6asfrag *)q6 &&
456 ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) {
457 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
458 if (i < af6->ip6af_frglen) {
459 af6->ip6af_frglen -= i;
460 af6->ip6af_off += i;
461 m_adj(IP6_REASS_MBUF(af6), i);
462 break;
463 }
464 af6 = af6->ip6af_down;
465 m_freem(IP6_REASS_MBUF(af6->ip6af_up));
466 frag6_deq(af6->ip6af_up);
467 }
468 #else
469 /*
470 * If the incoming framgent overlaps some existing fragments in
471 * the reassembly queue, drop it, since it is dangerous to override
472 * existing fragments from a security point of view.
473 * We don't know which fragment is the bad guy - here we trust
474 * fragment that came in earlier, with no real reason.
475 *
476 * Note: due to changes after disabling this part, mbuf passed to
477 * m_adj() below now does not meet the requirement.
478 */
479 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
480 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
481 - ip6af->ip6af_off;
482 if (i > 0) {
483 #if 0 /* suppress the noisy log */
484 log(LOG_ERR, "%d bytes of a fragment from %s "
485 "overlaps the previous fragment\n",
486 i, ip6_sprintf(&q6->ip6q_src));
487 #endif
488 FREE(ip6af, M_FTABLE);
489 goto dropfrag;
490 }
491 }
492 if (af6 != (struct ip6asfrag *)q6) {
493 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
494 if (i > 0) {
495 #if 0 /* suppress the noisy log */
496 log(LOG_ERR, "%d bytes of a fragment from %s "
497 "overlaps the succeeding fragment",
498 i, ip6_sprintf(&q6->ip6q_src));
499 #endif
500 FREE(ip6af, M_FTABLE);
501 goto dropfrag;
502 }
503 }
504 #endif
505
506 insert:
507
508 /*
509 * Stick new segment in its place;
510 * check for complete reassembly.
511 * Move to front of packet queue, as we are
512 * the most recently active fragmented packet.
513 */
514 frag6_enq(ip6af, af6->ip6af_up);
515 frag6_nfrags++;
516 q6->ip6q_nfrag++;
517 #if 0 /* xxx */
518 if (q6 != ip6q.ip6q_next) {
519 frag6_remque(q6);
520 frag6_insque(q6, &ip6q);
521 }
522 #endif
523 next = 0;
524 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
525 af6 = af6->ip6af_down) {
526 if (af6->ip6af_off != next) {
527 frag6_doing_reass = 0;
528 if (ifa != NULL)
529 IFA_REMREF(ifa);
530 return IPPROTO_DONE;
531 }
532 next += af6->ip6af_frglen;
533 }
534 if (af6->ip6af_up->ip6af_mff) {
535 frag6_doing_reass = 0;
536 if (ifa != NULL)
537 IFA_REMREF(ifa);
538 return IPPROTO_DONE;
539 }
540
541 /*
542 * Reassembly is complete; concatenate fragments.
543 */
544 ip6af = q6->ip6q_down;
545 t = m = IP6_REASS_MBUF(ip6af);
546 af6 = ip6af->ip6af_down;
547 frag6_deq(ip6af);
548 while (af6 != (struct ip6asfrag *)q6) {
549 af6dwn = af6->ip6af_down;
550 frag6_deq(af6);
551 while (t->m_next)
552 t = t->m_next;
553 t->m_next = IP6_REASS_MBUF(af6);
554 m_adj(t->m_next, af6->ip6af_offset);
555 FREE(af6, M_FTABLE);
556 af6 = af6dwn;
557 }
558
559 /* adjust offset to point where the original next header starts */
560 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
561 FREE(ip6af, M_FTABLE);
562 ip6 = mtod(m, struct ip6_hdr *);
563 ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr));
564 ip6->ip6_src = q6->ip6q_src;
565 ip6->ip6_dst = q6->ip6q_dst;
566 if (q6->ip6q_ecn == IPTOS_ECN_CE)
567 ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
568
569 nxt = q6->ip6q_nxt;
570 #if notyet
571 *q6->ip6q_nxtp = (u_char)(nxt & 0xff);
572 #endif
573
574 /* Delete frag6 header */
575 if (m->m_len >= offset + sizeof(struct ip6_frag)) {
576 /* This is the only possible case with !PULLDOWN_TEST */
577 ovbcopy((caddr_t)ip6, (caddr_t)ip6 + sizeof(struct ip6_frag),
578 offset);
579 m->m_data += sizeof(struct ip6_frag);
580 m->m_len -= sizeof(struct ip6_frag);
581 } else {
582 /* this comes with no copy if the boundary is on cluster */
583 if ((t = m_split(m, offset, M_DONTWAIT)) == NULL) {
584 frag6_remque(q6);
585 frag6_nfrags -= q6->ip6q_nfrag;
586 FREE(q6, M_FTABLE);
587 frag6_nfragpackets--;
588 goto dropfrag;
589 }
590 m_adj(t, sizeof(struct ip6_frag));
591 m_cat(m, t);
592 }
593
594 /*
595 * Store NXT to the original.
596 */
597 {
598 char *prvnxtp = ip6_get_prevhdr(m, offset); /* XXX */
599 *prvnxtp = nxt;
600 }
601
602 frag6_remque(q6);
603 frag6_nfrags -= q6->ip6q_nfrag;
604 FREE(q6, M_FTABLE);
605 frag6_nfragpackets--;
606
607 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
608 int plen = 0;
609 for (t = m; t; t = t->m_next)
610 plen += t->m_len;
611 m->m_pkthdr.len = plen;
612 }
613
614 ip6stat.ip6s_reassembled++;
615 in6_ifstat_inc(dstifp, ifs6_reass_ok);
616
617 /*
618 * Tell launch routine the next header
619 */
620
621 *mp = m;
622 *offp = offset;
623
624 frag6_doing_reass = 0;
625 if (ifa != NULL)
626 IFA_REMREF(ifa);
627 return nxt;
628
629 dropfrag:
630 in6_ifstat_inc(dstifp, ifs6_reass_fail);
631 ip6stat.ip6s_fragdropped++;
632 m_freem(m);
633 frag6_doing_reass = 0;
634 if (ifa != NULL)
635 IFA_REMREF(ifa);
636 return IPPROTO_DONE;
637 }
638
639 /*
640 * Free a fragment reassembly header and all
641 * associated datagrams.
642 */
643 void
644 frag6_freef(q6)
645 struct ip6q *q6;
646 {
647 struct ip6asfrag *af6, *down6;
648
649 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
650 af6 = down6) {
651 struct mbuf *m = IP6_REASS_MBUF(af6);
652
653 down6 = af6->ip6af_down;
654 frag6_deq(af6);
655
656 /*
657 * Return ICMP time exceeded error for the 1st fragment.
658 * Just free other fragments.
659 */
660 if (af6->ip6af_off == 0) {
661 struct ip6_hdr *ip6;
662
663 /* adjust pointer */
664 ip6 = mtod(m, struct ip6_hdr *);
665
666 /* restore source and destination addresses */
667 ip6->ip6_src = q6->ip6q_src;
668 ip6->ip6_dst = q6->ip6q_dst;
669 icmp6_error(m, ICMP6_TIME_EXCEEDED,
670 ICMP6_TIME_EXCEED_REASSEMBLY, 0);
671 } else
672 m_freem(m);
673 FREE(af6, M_FTABLE);
674
675 }
676 frag6_remque(q6);
677 frag6_nfrags -= q6->ip6q_nfrag;
678 FREE(q6, M_FTABLE);
679 frag6_nfragpackets--;
680 }
681
682 /*
683 * Put an ip fragment on a reassembly chain.
684 * Like insque, but pointers in middle of structure.
685 */
686 void
687 frag6_enq(af6, up6)
688 struct ip6asfrag *af6, *up6;
689 {
690 af6->ip6af_up = up6;
691 af6->ip6af_down = up6->ip6af_down;
692 up6->ip6af_down->ip6af_up = af6;
693 up6->ip6af_down = af6;
694 }
695
696 /*
697 * To frag6_enq as remque is to insque.
698 */
699 void
700 frag6_deq(af6)
701 struct ip6asfrag *af6;
702 {
703 af6->ip6af_up->ip6af_down = af6->ip6af_down;
704 af6->ip6af_down->ip6af_up = af6->ip6af_up;
705 }
706
707 void
708 frag6_insque(new, old)
709 struct ip6q *new, *old;
710 {
711 new->ip6q_prev = old;
712 new->ip6q_next = old->ip6q_next;
713 old->ip6q_next->ip6q_prev= new;
714 old->ip6q_next = new;
715 }
716
717 void
718 frag6_remque(p6)
719 struct ip6q *p6;
720 {
721 p6->ip6q_prev->ip6q_next = p6->ip6q_next;
722 p6->ip6q_next->ip6q_prev = p6->ip6q_prev;
723 }
724
725 /*
726 * IPv6 reassembling timer processing;
727 * if a timer expires on a reassembly
728 * queue, discard it.
729 */
730 void
731 frag6_slowtimo()
732 {
733 struct ip6q *q6;
734 lck_mtx_lock(inet6_domain_mutex);
735
736 frag6_doing_reass = 1;
737 q6 = ip6q.ip6q_next;
738 if (q6)
739 while (q6 != &ip6q) {
740 --q6->ip6q_ttl;
741 q6 = q6->ip6q_next;
742 if (q6->ip6q_prev->ip6q_ttl == 0) {
743 ip6stat.ip6s_fragtimeout++;
744 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
745 frag6_freef(q6->ip6q_prev);
746 }
747 }
748 /*
749 * If we are over the maximum number of fragments
750 * (due to the limit being lowered), drain off
751 * enough to get down to the new limit.
752 */
753 while (frag6_nfragpackets > (u_int)ip6_maxfragpackets &&
754 ip6q.ip6q_prev) {
755 ip6stat.ip6s_fragoverflow++;
756 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
757 frag6_freef(ip6q.ip6q_prev);
758 }
759 frag6_doing_reass = 0;
760 lck_mtx_unlock(inet6_domain_mutex);
761 }
762
763 /*
764 * Drain off all datagram fragments.
765 */
766 void
767 frag6_drain()
768 {
769 if (frag6_doing_reass)
770 return;
771 lck_mtx_lock(inet6_domain_mutex);
772 while (ip6q.ip6q_next != &ip6q) {
773 ip6stat.ip6s_fragdropped++;
774 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
775 frag6_freef(ip6q.ip6q_next);
776 }
777 lck_mtx_unlock(inet6_domain_mutex);
778 }