]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/frag6.c
xnu-1699.22.81.tar.gz
[apple/xnu.git] / bsd / netinet6 / frag6.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/frag6.c,v 1.2.2.5 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: frag6.c,v 1.31 2001/05/17 13:45:34 jinmei Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/malloc.h>
64 #include <sys/mbuf.h>
65 #include <sys/domain.h>
66 #include <sys/protosw.h>
67 #include <sys/socket.h>
68 #include <sys/errno.h>
69 #include <sys/time.h>
70 #include <sys/kernel.h>
71 #include <sys/syslog.h>
72 #include <kern/queue.h>
73 #include <kern/locks.h>
74
75 #include <net/if.h>
76 #include <net/route.h>
77
78 #include <netinet/in.h>
79 #include <netinet/in_var.h>
80 #include <netinet/ip.h>
81 #include <netinet/ip6.h>
82 #include <netinet6/ip6_var.h>
83 #include <netinet/icmp6.h>
84
85 #include <net/net_osdep.h>
86
87 /*
88 * Define it to get a correct behavior on per-interface statistics.
89 * You will need to perform an extra routing table lookup, per fragment,
90 * to do it. This may, or may not be, a performance hit.
91 */
92 #define IN6_IFSTAT_STRICT
93
94 static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *);
95 static void frag6_deq(struct ip6asfrag *);
96 static void frag6_insque(struct ip6q *, struct ip6q *);
97 static void frag6_remque(struct ip6q *);
98 static void frag6_freef(struct ip6q *);
99
100 /* XXX we eventually need splreass6, or some real semaphore */
101 int frag6_doing_reass;
102 u_int frag6_nfragpackets;
103 static u_int frag6_nfrags;
104 struct ip6q ip6q; /* ip6 reassemble queue */
105
106
107 extern lck_mtx_t *inet6_domain_mutex;
108 /*
109 * Initialise reassembly queue and fragment identifier.
110 */
111 void
112 frag6_init()
113 {
114 struct timeval tv;
115
116 ip6_maxfragpackets = nmbclusters / 32;
117 ip6_maxfrags = nmbclusters / 4;
118
119 /*
120 * in many cases, random() here does NOT return random number
121 * as initialization during bootstrap time occur in fixed order.
122 */
123 microtime(&tv);
124 ip6_id = random() ^ tv.tv_usec;
125 ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q;
126 }
127
128 /*
129 * In RFC2460, fragment and reassembly rule do not agree with each other,
130 * in terms of next header field handling in fragment header.
131 * While the sender will use the same value for all of the fragmented packets,
132 * receiver is suggested not to check the consistency.
133 *
134 * fragment rule (p20):
135 * (2) A Fragment header containing:
136 * The Next Header value that identifies the first header of
137 * the Fragmentable Part of the original packet.
138 * -> next header field is same for all fragments
139 *
140 * reassembly rule (p21):
141 * The Next Header field of the last header of the Unfragmentable
142 * Part is obtained from the Next Header field of the first
143 * fragment's Fragment header.
144 * -> should grab it from the first fragment only
145 *
146 * The following note also contradicts with fragment rule - noone is going to
147 * send different fragment with different next header field.
148 *
149 * additional note (p22):
150 * The Next Header values in the Fragment headers of different
151 * fragments of the same original packet may differ. Only the value
152 * from the Offset zero fragment packet is used for reassembly.
153 * -> should grab it from the first fragment only
154 *
155 * There is no explicit reason given in the RFC. Historical reason maybe?
156 */
157 /*
158 * Fragment input
159 * NOTE: this function is called with the inet6_domain_mutex held from ip6_input.
160 * inet6_domain_mutex is protecting he frag6 queue manipulation.
161 */
162 int
163 frag6_input(struct mbuf **mp, int *offp, int proto)
164 {
165 #pragma unused(proto)
166 struct mbuf *m = *mp, *t;
167 struct ip6_hdr *ip6;
168 struct ip6_frag *ip6f;
169 struct ip6q *q6;
170 struct ip6asfrag *af6, *ip6af, *af6dwn;
171 int offset = *offp, nxt, i, next;
172 int first_frag = 0;
173 int fragoff, frgpartlen; /* must be larger than u_int16_t */
174 struct ifnet *dstifp;
175 struct ifaddr *ifa = NULL;
176 u_int8_t ecn, ecn0;
177
178 #ifdef IN6_IFSTAT_STRICT
179 struct route_in6 ro;
180 struct sockaddr_in6 *dst;
181 #endif
182
183 ip6 = mtod(m, struct ip6_hdr *);
184 #ifndef PULLDOWN_TEST
185 IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), return IPPROTO_DONE);
186 ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
187 #else
188 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f));
189 if (ip6f == NULL)
190 return IPPROTO_DONE;
191 #endif
192
193 dstifp = NULL;
194 #ifdef IN6_IFSTAT_STRICT
195 /* find the destination interface of the packet. */
196 bzero(&ro, sizeof (ro));
197 dst = (struct sockaddr_in6 *)&ro.ro_dst;
198 dst->sin6_family = AF_INET6;
199 dst->sin6_len = sizeof (struct sockaddr_in6);
200 dst->sin6_addr = ip6->ip6_dst;
201
202 rtalloc((struct route *)&ro);
203 if (ro.ro_rt != NULL) {
204 RT_LOCK(ro.ro_rt);
205 if ((ifa = ro.ro_rt->rt_ifa) != NULL) {
206 IFA_ADDREF(ifa);
207 dstifp = ((struct in6_ifaddr *)ro.ro_rt->rt_ifa)->ia_ifp;
208 }
209 RT_UNLOCK(ro.ro_rt);
210 rtfree(ro.ro_rt);
211 ro.ro_rt = NULL;
212 }
213 #else
214 /* we are violating the spec, this is not the destination interface */
215 if ((m->m_flags & M_PKTHDR) != 0)
216 dstifp = m->m_pkthdr.rcvif;
217 #endif
218
219 /* jumbo payload can't contain a fragment header */
220 if (ip6->ip6_plen == 0) {
221 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
222 in6_ifstat_inc(dstifp, ifs6_reass_fail);
223 if (ifa != NULL)
224 IFA_REMREF(ifa);
225 return IPPROTO_DONE;
226 }
227
228 /*
229 * check whether fragment packet's fragment length is
230 * multiple of 8 octets.
231 * sizeof(struct ip6_frag) == 8
232 * sizeof(struct ip6_hdr) = 40
233 */
234 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
235 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
236 icmp6_error(m, ICMP6_PARAM_PROB,
237 ICMP6_PARAMPROB_HEADER,
238 offsetof(struct ip6_hdr, ip6_plen));
239 in6_ifstat_inc(dstifp, ifs6_reass_fail);
240 if (ifa != NULL)
241 IFA_REMREF(ifa);
242 return IPPROTO_DONE;
243 }
244
245 ip6stat.ip6s_fragments++;
246 in6_ifstat_inc(dstifp, ifs6_reass_reqd);
247
248 /* offset now points to data portion */
249 offset += sizeof(struct ip6_frag);
250
251 frag6_doing_reass = 1;
252
253 /*
254 * Enforce upper bound on number of fragments.
255 * If maxfrag is 0, never accept fragments.
256 * If maxfrag is -1, accept all fragments without limitation.
257 */
258 if (ip6_maxfrags < 0)
259 ;
260 else if (frag6_nfrags >= (u_int)ip6_maxfrags)
261 goto dropfrag;
262
263 for (q6 = ip6q.ip6q_next; q6 != &ip6q; q6 = q6->ip6q_next)
264 if (ip6f->ip6f_ident == q6->ip6q_ident &&
265 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
266 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst))
267 break;
268
269 if (q6 == &ip6q) {
270 /*
271 * the first fragment to arrive, create a reassembly queue.
272 */
273 first_frag = 1;
274
275 /*
276 * Enforce upper bound on number of fragmented packets
277 * for which we attempt reassembly;
278 * If maxfrag is 0, never accept fragments.
279 * If maxfrag is -1, accept all fragments without limitation.
280 */
281 if (ip6_maxfragpackets < 0)
282 ;
283 else if (frag6_nfragpackets >= (u_int)ip6_maxfragpackets)
284 goto dropfrag;
285 frag6_nfragpackets++;
286 q6 = (struct ip6q *)_MALLOC(sizeof(struct ip6q), M_FTABLE,
287 M_DONTWAIT);
288 if (q6 == NULL)
289 goto dropfrag;
290 bzero(q6, sizeof(*q6));
291
292 frag6_insque(q6, &ip6q);
293
294 /* ip6q_nxt will be filled afterwards, from 1st fragment */
295 q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6;
296 #ifdef notyet
297 q6->ip6q_nxtp = (u_char *)nxtp;
298 #endif
299 q6->ip6q_ident = ip6f->ip6f_ident;
300 q6->ip6q_ttl = IPV6_FRAGTTL;
301 q6->ip6q_src = ip6->ip6_src;
302 q6->ip6q_dst = ip6->ip6_dst;
303 q6->ip6q_ecn =
304 (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
305 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
306
307 q6->ip6q_nfrag = 0;
308 }
309
310 /*
311 * If it's the 1st fragment, record the length of the
312 * unfragmentable part and the next header of the fragment header.
313 */
314 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
315 if (fragoff == 0) {
316 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr)
317 - sizeof(struct ip6_frag);
318 q6->ip6q_nxt = ip6f->ip6f_nxt;
319 }
320
321 /*
322 * Check that the reassembled packet would not exceed 65535 bytes
323 * in size.
324 * If it would exceed, discard the fragment and return an ICMP error.
325 */
326 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
327 if (q6->ip6q_unfrglen >= 0) {
328 /* The 1st fragment has already arrived. */
329 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
330 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
331 offset - sizeof(struct ip6_frag) +
332 offsetof(struct ip6_frag, ip6f_offlg));
333 frag6_doing_reass = 0;
334 if (ifa != NULL)
335 IFA_REMREF(ifa);
336 return(IPPROTO_DONE);
337 }
338 }
339 else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
340 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
341 offset - sizeof(struct ip6_frag) +
342 offsetof(struct ip6_frag, ip6f_offlg));
343 frag6_doing_reass = 0;
344 if (ifa != NULL)
345 IFA_REMREF(ifa);
346 return(IPPROTO_DONE);
347 }
348 /*
349 * If it's the first fragment, do the above check for each
350 * fragment already stored in the reassembly queue.
351 */
352 if (fragoff == 0) {
353 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
354 af6 = af6dwn) {
355 af6dwn = af6->ip6af_down;
356
357 if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen >
358 IPV6_MAXPACKET) {
359 struct mbuf *merr = IP6_REASS_MBUF(af6);
360 struct ip6_hdr *ip6err;
361 int erroff = af6->ip6af_offset;
362
363 /* dequeue the fragment. */
364 frag6_deq(af6);
365 FREE(af6, M_FTABLE);
366
367 /* adjust pointer. */
368 ip6err = mtod(merr, struct ip6_hdr *);
369
370 /*
371 * Restore source and destination addresses
372 * in the erroneous IPv6 header.
373 */
374 ip6err->ip6_src = q6->ip6q_src;
375 ip6err->ip6_dst = q6->ip6q_dst;
376
377 icmp6_error(merr, ICMP6_PARAM_PROB,
378 ICMP6_PARAMPROB_HEADER,
379 erroff - sizeof(struct ip6_frag) +
380 offsetof(struct ip6_frag, ip6f_offlg));
381 }
382 }
383 }
384
385 ip6af = (struct ip6asfrag *)_MALLOC(sizeof(struct ip6asfrag), M_FTABLE,
386 M_DONTWAIT);
387 if (ip6af == NULL)
388 goto dropfrag;
389 bzero(ip6af, sizeof(*ip6af));
390 ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG;
391 ip6af->ip6af_off = fragoff;
392 ip6af->ip6af_frglen = frgpartlen;
393 ip6af->ip6af_offset = offset;
394 IP6_REASS_MBUF(ip6af) = m;
395
396 if (first_frag) {
397 af6 = (struct ip6asfrag *)q6;
398 goto insert;
399 }
400
401 /*
402 * Handle ECN by comparing this segment with the first one;
403 * if CE is set, do not lose CE.
404 * drop if CE and not-ECT are mixed for the same packet.
405 */
406 ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
407 ecn0 = q6->ip6q_ecn;
408 if (ecn == IPTOS_ECN_CE) {
409 if (ecn0 == IPTOS_ECN_NOTECT) {
410 FREE(ip6af, M_FTABLE);
411 goto dropfrag;
412 }
413 if (ecn0 != IPTOS_ECN_CE)
414 q6->ip6q_ecn = IPTOS_ECN_CE;
415 }
416 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
417 FREE(ip6af, M_FTABLE);
418 goto dropfrag;
419 }
420
421 /*
422 * Find a segment which begins after this one does.
423 */
424 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
425 af6 = af6->ip6af_down)
426 if (af6->ip6af_off > ip6af->ip6af_off)
427 break;
428
429 #if 0
430 /*
431 * If there is a preceding segment, it may provide some of
432 * our data already. If so, drop the data from the incoming
433 * segment. If it provides all of our data, drop us.
434 */
435 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
436 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
437 - ip6af->ip6af_off;
438 if (i > 0) {
439 if (i >= ip6af->ip6af_frglen)
440 goto dropfrag;
441 m_adj(IP6_REASS_MBUF(ip6af), i);
442 ip6af->ip6af_off += i;
443 ip6af->ip6af_frglen -= i;
444 }
445 }
446
447 /*
448 * While we overlap succeeding segments trim them or,
449 * if they are completely covered, dequeue them.
450 */
451 while (af6 != (struct ip6asfrag *)q6 &&
452 ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) {
453 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
454 if (i < af6->ip6af_frglen) {
455 af6->ip6af_frglen -= i;
456 af6->ip6af_off += i;
457 m_adj(IP6_REASS_MBUF(af6), i);
458 break;
459 }
460 af6 = af6->ip6af_down;
461 m_freem(IP6_REASS_MBUF(af6->ip6af_up));
462 frag6_deq(af6->ip6af_up);
463 }
464 #else
465 /*
466 * If the incoming framgent overlaps some existing fragments in
467 * the reassembly queue, drop it, since it is dangerous to override
468 * existing fragments from a security point of view.
469 * We don't know which fragment is the bad guy - here we trust
470 * fragment that came in earlier, with no real reason.
471 *
472 * Note: due to changes after disabling this part, mbuf passed to
473 * m_adj() below now does not meet the requirement.
474 */
475 if (af6->ip6af_up != (struct ip6asfrag *)q6) {
476 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen
477 - ip6af->ip6af_off;
478 if (i > 0) {
479 #if 0 /* suppress the noisy log */
480 log(LOG_ERR, "%d bytes of a fragment from %s "
481 "overlaps the previous fragment\n",
482 i, ip6_sprintf(&q6->ip6q_src));
483 #endif
484 FREE(ip6af, M_FTABLE);
485 goto dropfrag;
486 }
487 }
488 if (af6 != (struct ip6asfrag *)q6) {
489 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off;
490 if (i > 0) {
491 #if 0 /* suppress the noisy log */
492 log(LOG_ERR, "%d bytes of a fragment from %s "
493 "overlaps the succeeding fragment",
494 i, ip6_sprintf(&q6->ip6q_src));
495 #endif
496 FREE(ip6af, M_FTABLE);
497 goto dropfrag;
498 }
499 }
500 #endif
501
502 insert:
503
504 /*
505 * Stick new segment in its place;
506 * check for complete reassembly.
507 * Move to front of packet queue, as we are
508 * the most recently active fragmented packet.
509 */
510 frag6_enq(ip6af, af6->ip6af_up);
511 frag6_nfrags++;
512 q6->ip6q_nfrag++;
513 #if 0 /* xxx */
514 if (q6 != ip6q.ip6q_next) {
515 frag6_remque(q6);
516 frag6_insque(q6, &ip6q);
517 }
518 #endif
519 next = 0;
520 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
521 af6 = af6->ip6af_down) {
522 if (af6->ip6af_off != next) {
523 frag6_doing_reass = 0;
524 if (ifa != NULL)
525 IFA_REMREF(ifa);
526 return IPPROTO_DONE;
527 }
528 next += af6->ip6af_frglen;
529 }
530 if (af6->ip6af_up->ip6af_mff) {
531 frag6_doing_reass = 0;
532 if (ifa != NULL)
533 IFA_REMREF(ifa);
534 return IPPROTO_DONE;
535 }
536
537 /*
538 * Reassembly is complete; concatenate fragments.
539 */
540 ip6af = q6->ip6q_down;
541 t = m = IP6_REASS_MBUF(ip6af);
542 af6 = ip6af->ip6af_down;
543 frag6_deq(ip6af);
544 while (af6 != (struct ip6asfrag *)q6) {
545 af6dwn = af6->ip6af_down;
546 frag6_deq(af6);
547 while (t->m_next)
548 t = t->m_next;
549 t->m_next = IP6_REASS_MBUF(af6);
550 m_adj(t->m_next, af6->ip6af_offset);
551 FREE(af6, M_FTABLE);
552 af6 = af6dwn;
553 }
554
555 /* adjust offset to point where the original next header starts */
556 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
557 FREE(ip6af, M_FTABLE);
558 ip6 = mtod(m, struct ip6_hdr *);
559 ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr));
560 ip6->ip6_src = q6->ip6q_src;
561 ip6->ip6_dst = q6->ip6q_dst;
562 if (q6->ip6q_ecn == IPTOS_ECN_CE)
563 ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
564
565 nxt = q6->ip6q_nxt;
566 #if notyet
567 *q6->ip6q_nxtp = (u_char)(nxt & 0xff);
568 #endif
569
570 /* Delete frag6 header */
571 if (m->m_len >= offset + sizeof(struct ip6_frag)) {
572 /* This is the only possible case with !PULLDOWN_TEST */
573 ovbcopy((caddr_t)ip6, (caddr_t)ip6 + sizeof(struct ip6_frag),
574 offset);
575 m->m_data += sizeof(struct ip6_frag);
576 m->m_len -= sizeof(struct ip6_frag);
577 } else {
578 /* this comes with no copy if the boundary is on cluster */
579 if ((t = m_split(m, offset, M_DONTWAIT)) == NULL) {
580 frag6_remque(q6);
581 frag6_nfrags -= q6->ip6q_nfrag;
582 FREE(q6, M_FTABLE);
583 frag6_nfragpackets--;
584 goto dropfrag;
585 }
586 m_adj(t, sizeof(struct ip6_frag));
587 m_cat(m, t);
588 }
589
590 /*
591 * Store NXT to the original.
592 */
593 {
594 char *prvnxtp = ip6_get_prevhdr(m, offset); /* XXX */
595 *prvnxtp = nxt;
596 }
597
598 frag6_remque(q6);
599 frag6_nfrags -= q6->ip6q_nfrag;
600 FREE(q6, M_FTABLE);
601 frag6_nfragpackets--;
602
603 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
604 int plen = 0;
605 for (t = m; t; t = t->m_next)
606 plen += t->m_len;
607 m->m_pkthdr.len = plen;
608 }
609
610 ip6stat.ip6s_reassembled++;
611 in6_ifstat_inc(dstifp, ifs6_reass_ok);
612
613 /*
614 * Tell launch routine the next header
615 */
616
617 *mp = m;
618 *offp = offset;
619
620 frag6_doing_reass = 0;
621 if (ifa != NULL)
622 IFA_REMREF(ifa);
623 return nxt;
624
625 dropfrag:
626 in6_ifstat_inc(dstifp, ifs6_reass_fail);
627 ip6stat.ip6s_fragdropped++;
628 m_freem(m);
629 frag6_doing_reass = 0;
630 if (ifa != NULL)
631 IFA_REMREF(ifa);
632 return IPPROTO_DONE;
633 }
634
635 /*
636 * Free a fragment reassembly header and all
637 * associated datagrams.
638 */
639 void
640 frag6_freef(q6)
641 struct ip6q *q6;
642 {
643 struct ip6asfrag *af6, *down6;
644
645 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6;
646 af6 = down6) {
647 struct mbuf *m = IP6_REASS_MBUF(af6);
648
649 down6 = af6->ip6af_down;
650 frag6_deq(af6);
651
652 /*
653 * Return ICMP time exceeded error for the 1st fragment.
654 * Just free other fragments.
655 */
656 if (af6->ip6af_off == 0) {
657 struct ip6_hdr *ip6;
658
659 /* adjust pointer */
660 ip6 = mtod(m, struct ip6_hdr *);
661
662 /* restore source and destination addresses */
663 ip6->ip6_src = q6->ip6q_src;
664 ip6->ip6_dst = q6->ip6q_dst;
665 icmp6_error(m, ICMP6_TIME_EXCEEDED,
666 ICMP6_TIME_EXCEED_REASSEMBLY, 0);
667 } else
668 m_freem(m);
669 FREE(af6, M_FTABLE);
670
671 }
672 frag6_remque(q6);
673 frag6_nfrags -= q6->ip6q_nfrag;
674 FREE(q6, M_FTABLE);
675 frag6_nfragpackets--;
676 }
677
678 /*
679 * Put an ip fragment on a reassembly chain.
680 * Like insque, but pointers in middle of structure.
681 */
682 void
683 frag6_enq(af6, up6)
684 struct ip6asfrag *af6, *up6;
685 {
686 af6->ip6af_up = up6;
687 af6->ip6af_down = up6->ip6af_down;
688 up6->ip6af_down->ip6af_up = af6;
689 up6->ip6af_down = af6;
690 }
691
692 /*
693 * To frag6_enq as remque is to insque.
694 */
695 void
696 frag6_deq(af6)
697 struct ip6asfrag *af6;
698 {
699 af6->ip6af_up->ip6af_down = af6->ip6af_down;
700 af6->ip6af_down->ip6af_up = af6->ip6af_up;
701 }
702
703 void
704 frag6_insque(new, old)
705 struct ip6q *new, *old;
706 {
707 new->ip6q_prev = old;
708 new->ip6q_next = old->ip6q_next;
709 old->ip6q_next->ip6q_prev= new;
710 old->ip6q_next = new;
711 }
712
713 void
714 frag6_remque(p6)
715 struct ip6q *p6;
716 {
717 p6->ip6q_prev->ip6q_next = p6->ip6q_next;
718 p6->ip6q_next->ip6q_prev = p6->ip6q_prev;
719 }
720
721 /*
722 * IPv6 reassembling timer processing;
723 * if a timer expires on a reassembly
724 * queue, discard it.
725 */
726 void
727 frag6_slowtimo()
728 {
729 struct ip6q *q6;
730 lck_mtx_lock(inet6_domain_mutex);
731
732 frag6_doing_reass = 1;
733 q6 = ip6q.ip6q_next;
734 if (q6)
735 while (q6 != &ip6q) {
736 --q6->ip6q_ttl;
737 q6 = q6->ip6q_next;
738 if (q6->ip6q_prev->ip6q_ttl == 0) {
739 ip6stat.ip6s_fragtimeout++;
740 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
741 frag6_freef(q6->ip6q_prev);
742 }
743 }
744 /*
745 * If we are over the maximum number of fragments
746 * (due to the limit being lowered), drain off
747 * enough to get down to the new limit.
748 */
749 while (frag6_nfragpackets > (u_int)ip6_maxfragpackets &&
750 ip6q.ip6q_prev) {
751 ip6stat.ip6s_fragoverflow++;
752 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
753 frag6_freef(ip6q.ip6q_prev);
754 }
755 frag6_doing_reass = 0;
756 lck_mtx_unlock(inet6_domain_mutex);
757 }
758
759 /*
760 * Drain off all datagram fragments.
761 */
762 void
763 frag6_drain()
764 {
765 if (frag6_doing_reass)
766 return;
767 lck_mtx_lock(inet6_domain_mutex);
768 while (ip6q.ip6q_next != &ip6q) {
769 ip6stat.ip6s_fragdropped++;
770 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
771 frag6_freef(ip6q.ip6q_next);
772 }
773 lck_mtx_unlock(inet6_domain_mutex);
774 }