]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_input.c
xnu-792.2.4.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_input.c
1 /* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
2 /* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
3
4 /*
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 /*
34 * RFC1827/2406 Encapsulated Security Payload.
35 */
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/domain.h>
42 #include <sys/protosw.h>
43 #include <sys/socket.h>
44 #include <sys/errno.h>
45 #include <sys/time.h>
46 #include <sys/kernel.h>
47 #include <sys/syslog.h>
48
49 #include <net/if.h>
50 #include <net/route.h>
51 #include <kern/cpu_number.h>
52 #include <kern/locks.h>
53
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/ip.h>
57 #include <netinet/ip_var.h>
58 #include <netinet/in_var.h>
59 #include <netinet/ip_ecn.h>
60 #include <netinet/in_pcb.h>
61 #if INET6
62 #include <netinet6/ip6_ecn.h>
63 #endif
64
65 #if INET6
66 #include <netinet/ip6.h>
67 #include <netinet6/in6_pcb.h>
68 #include <netinet6/ip6_var.h>
69 #include <netinet/icmp6.h>
70 #include <netinet6/ip6protosw.h>
71 #endif
72
73 #include <netinet6/ipsec.h>
74 #if INET6
75 #include <netinet6/ipsec6.h>
76 #endif
77 #include <netinet6/ah.h>
78 #if INET6
79 #include <netinet6/ah6.h>
80 #endif
81 #include <netinet6/esp.h>
82 #if INET6
83 #include <netinet6/esp6.h>
84 #endif
85 #include <netkey/key.h>
86 #include <netkey/keydb.h>
87 #include <netkey/key_debug.h>
88
89
90 #include <net/net_osdep.h>
91
92 #include <sys/kdebug.h>
93 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
94 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
95 #define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
96 #define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
97 #define IPLEN_FLIPPED
98
99 extern lck_mtx_t *sadb_mutex;
100 #if INET
101 extern struct protosw inetsw[];
102
103 #define ESPMAXLEN \
104 (sizeof(struct esp) < sizeof(struct newesp) \
105 ? sizeof(struct newesp) : sizeof(struct esp))
106
107 void
108 esp4_input(m, off)
109 struct mbuf *m;
110 int off;
111 {
112 struct ip *ip;
113 struct esp *esp;
114 struct esptail esptail;
115 u_int32_t spi;
116 struct secasvar *sav = NULL;
117 size_t taillen;
118 u_int16_t nxt;
119 const struct esp_algorithm *algo;
120 int ivlen;
121 size_t hlen;
122 size_t esplen;
123 int s;
124
125 lck_mtx_lock(sadb_mutex);
126
127 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0,0,0,0,0);
128 /* sanity check for alignment. */
129 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
130 ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
131 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
132 ipsecstat.in_inval++;
133 goto bad;
134 }
135
136 if (m->m_len < off + ESPMAXLEN) {
137 m = m_pullup(m, off + ESPMAXLEN);
138 if (!m) {
139 ipseclog((LOG_DEBUG,
140 "IPv4 ESP input: can't pullup in esp4_input\n"));
141 ipsecstat.in_inval++;
142 goto bad;
143 }
144 }
145
146 ip = mtod(m, struct ip *);
147 esp = (struct esp *)(((u_int8_t *)ip) + off);
148 #ifdef _IP_VHL
149 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
150 #else
151 hlen = ip->ip_hl << 2;
152 #endif
153
154 /* find the sassoc. */
155 spi = esp->esp_spi;
156
157 if ((sav = key_allocsa(AF_INET,
158 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
159 IPPROTO_ESP, spi)) == 0) {
160 ipseclog((LOG_WARNING,
161 "IPv4 ESP input: no key association found for spi %u\n",
162 (u_int32_t)ntohl(spi)));
163 ipsecstat.in_nosa++;
164 goto bad;
165 }
166 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
167 printf("DP esp4_input called to allocate SA:%p\n", sav));
168 if (sav->state != SADB_SASTATE_MATURE
169 && sav->state != SADB_SASTATE_DYING) {
170 ipseclog((LOG_DEBUG,
171 "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
172 (u_int32_t)ntohl(spi)));
173 ipsecstat.in_badspi++;
174 goto bad;
175 }
176 algo = esp_algorithm_lookup(sav->alg_enc);
177 if (!algo) {
178 ipseclog((LOG_DEBUG, "IPv4 ESP input: "
179 "unsupported encryption algorithm for spi %u\n",
180 (u_int32_t)ntohl(spi)));
181 ipsecstat.in_badspi++;
182 goto bad;
183 }
184
185 /* check if we have proper ivlen information */
186 ivlen = sav->ivlen;
187 if (ivlen < 0) {
188 ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
189 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
190 ipsecstat.in_inval++;
191 goto bad;
192 }
193
194 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
195 && (sav->alg_auth && sav->key_auth)))
196 goto noreplaycheck;
197
198 if (sav->alg_auth == SADB_X_AALG_NULL ||
199 sav->alg_auth == SADB_AALG_NONE)
200 goto noreplaycheck;
201
202 /*
203 * check for sequence number.
204 */
205 if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav))
206 ; /*okey*/
207 else {
208 ipsecstat.in_espreplay++;
209 ipseclog((LOG_WARNING,
210 "replay packet in IPv4 ESP input: %s %s\n",
211 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
212 goto bad;
213 }
214
215 /* check ICV */
216 {
217 u_char sum0[AH_MAXSUMSIZE];
218 u_char sum[AH_MAXSUMSIZE];
219 const struct ah_algorithm *sumalgo;
220 size_t siz;
221
222 sumalgo = ah_algorithm_lookup(sav->alg_auth);
223 if (!sumalgo)
224 goto noreplaycheck;
225 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
226 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
227 ipsecstat.in_inval++;
228 goto bad;
229 }
230 if (AH_MAXSUMSIZE < siz) {
231 ipseclog((LOG_DEBUG,
232 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
233 (u_long)siz));
234 ipsecstat.in_inval++;
235 goto bad;
236 }
237
238 m_copydata(m, m->m_pkthdr.len - siz, siz, &sum0[0]);
239
240 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
241 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
242 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
243 ipsecstat.in_espauthfail++;
244 goto bad;
245 }
246
247 if (bcmp(sum0, sum, siz) != 0) {
248 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
249 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
250 ipsecstat.in_espauthfail++;
251 goto bad;
252 }
253
254 /* strip off the authentication data */
255 m_adj(m, -siz);
256 ip = mtod(m, struct ip *);
257 #ifdef IPLEN_FLIPPED
258 ip->ip_len = ip->ip_len - siz;
259 #else
260 ip->ip_len = htons(ntohs(ip->ip_len) - siz);
261 #endif
262 m->m_flags |= M_AUTHIPDGM;
263 ipsecstat.in_espauthsucc++;
264 }
265
266 /*
267 * update sequence number.
268 */
269 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
270 if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) {
271 ipsecstat.in_espreplay++;
272 goto bad;
273 }
274 }
275
276 noreplaycheck:
277
278 /* process main esp header. */
279 if (sav->flags & SADB_X_EXT_OLD) {
280 /* RFC 1827 */
281 esplen = sizeof(struct esp);
282 } else {
283 /* RFC 2406 */
284 if (sav->flags & SADB_X_EXT_DERIV)
285 esplen = sizeof(struct esp);
286 else
287 esplen = sizeof(struct newesp);
288 }
289
290 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
291 ipseclog((LOG_WARNING,
292 "IPv4 ESP input: packet too short\n"));
293 ipsecstat.in_inval++;
294 goto bad;
295 }
296
297 if (m->m_len < off + esplen + ivlen) {
298 m = m_pullup(m, off + esplen + ivlen);
299 if (!m) {
300 ipseclog((LOG_DEBUG,
301 "IPv4 ESP input: can't pullup in esp4_input\n"));
302 ipsecstat.in_inval++;
303 goto bad;
304 }
305 }
306
307 /*
308 * pre-compute and cache intermediate key
309 */
310 if (esp_schedule(algo, sav) != 0) {
311 ipsecstat.in_inval++;
312 goto bad;
313 }
314
315 /*
316 * decrypt the packet.
317 */
318 if (!algo->decrypt)
319 panic("internal error: no decrypt function");
320 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0,0,0,0,0);
321 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
322 /* m is already freed */
323 m = NULL;
324 ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
325 ipsec_logsastr(sav)));
326 ipsecstat.in_inval++;
327 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
328 goto bad;
329 }
330 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2,0,0,0,0);
331 ipsecstat.in_esphist[sav->alg_enc]++;
332
333 m->m_flags |= M_DECRYPTED;
334
335 /*
336 * find the trailer of the ESP.
337 */
338 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
339 (caddr_t)&esptail);
340 nxt = esptail.esp_nxt;
341 taillen = esptail.esp_padlen + sizeof(esptail);
342
343 if (m->m_pkthdr.len < taillen
344 || m->m_pkthdr.len - taillen < hlen) { /*?*/
345 ipseclog((LOG_WARNING,
346 "bad pad length in IPv4 ESP input: %s %s\n",
347 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
348 ipsecstat.in_inval++;
349 goto bad;
350 }
351
352 /* strip off the trailing pad area. */
353 m_adj(m, -taillen);
354
355 #ifdef IPLEN_FLIPPED
356 ip->ip_len = ip->ip_len - taillen;
357 #else
358 ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
359 #endif
360
361 /* was it transmitted over the IPsec tunnel SA? */
362 if (ipsec4_tunnel_validate(m, off + esplen + ivlen, nxt, sav)) {
363 /*
364 * strip off all the headers that precedes ESP header.
365 * IP4 xx ESP IP4' payload -> IP4' payload
366 *
367 * XXX more sanity checks
368 * XXX relationship with gif?
369 */
370 u_int8_t tos;
371
372 tos = ip->ip_tos;
373 m_adj(m, off + esplen + ivlen);
374 if (m->m_len < sizeof(*ip)) {
375 m = m_pullup(m, sizeof(*ip));
376 if (!m) {
377 ipsecstat.in_inval++;
378 goto bad;
379 }
380 }
381 ip = mtod(m, struct ip *);
382 /* ECN consideration. */
383 ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos);
384 if (!key_checktunnelsanity(sav, AF_INET,
385 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
386 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
387 "in IPv4 ESP input: %s %s\n",
388 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
389 ipsecstat.in_inval++;
390 goto bad;
391 }
392
393 key_sa_recordxfer(sav, m);
394 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
395 ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
396 ipsecstat.in_nomem++;
397 goto bad;
398 }
399
400 /* Clear the csum flags, they can't be valid for the inner headers */
401 m->m_pkthdr.csum_flags = 0;
402
403 lck_mtx_unlock(sadb_mutex);
404 proto_input(PF_INET, m);
405 lck_mtx_lock(sadb_mutex);
406 nxt = IPPROTO_DONE;
407 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2,0,0,0,0);
408 } else {
409 /*
410 * strip off ESP header and IV.
411 * even in m_pulldown case, we need to strip off ESP so that
412 * we can always compute checksum for AH correctly.
413 */
414 size_t stripsiz;
415
416 stripsiz = esplen + ivlen;
417
418 ip = mtod(m, struct ip *);
419 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
420 m->m_data += stripsiz;
421 m->m_len -= stripsiz;
422 m->m_pkthdr.len -= stripsiz;
423
424 ip = mtod(m, struct ip *);
425 #ifdef IPLEN_FLIPPED
426 ip->ip_len = ip->ip_len - stripsiz;
427 #else
428 ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
429 #endif
430 ip->ip_p = nxt;
431
432 key_sa_recordxfer(sav, m);
433 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
434 ipsecstat.in_nomem++;
435 goto bad;
436 }
437
438 /*
439 * Set the csum valid flag, if we authenticated the
440 * packet, the payload shouldn't be corrupt unless
441 * it was corrupted before being signed on the other
442 * side.
443 */
444 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
445 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
446 m->m_pkthdr.csum_data = 0xFFFF;
447 }
448
449 if (nxt != IPPROTO_DONE) {
450 if ((ip_protox[nxt]->pr_flags & PR_LASTHDR) != 0 &&
451 ipsec4_in_reject(m, NULL)) {
452 ipsecstat.in_polvio++;
453 goto bad;
454 }
455 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 3,0,0,0,0);
456 lck_mtx_unlock(sadb_mutex);
457 ip_proto_dispatch_in(m, off, nxt, 0);
458 lck_mtx_lock(sadb_mutex);
459 } else
460 m_freem(m);
461 m = NULL;
462 }
463
464 if (sav) {
465 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
466 printf("DP esp4_input call free SA:%p\n", sav));
467 key_freesav(sav);
468 }
469 ipsecstat.in_success++;
470 lck_mtx_unlock(sadb_mutex);
471 return;
472
473 bad:
474 if (sav) {
475 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
476 printf("DP esp4_input call free SA:%p\n", sav));
477 key_freesav(sav);
478 }
479 lck_mtx_unlock(sadb_mutex);
480 if (m)
481 m_freem(m);
482 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4,0,0,0,0);
483 return;
484 }
485 #endif /* INET */
486
487 #if INET6
488 int
489 esp6_input(mp, offp)
490 struct mbuf **mp;
491 int *offp;
492 {
493 struct mbuf *m = *mp;
494 int off = *offp;
495 struct ip6_hdr *ip6;
496 struct esp *esp;
497 struct esptail esptail;
498 u_int32_t spi;
499 struct secasvar *sav = NULL;
500 size_t taillen;
501 u_int16_t nxt;
502 const struct esp_algorithm *algo;
503 int ivlen;
504 size_t esplen;
505 int s;
506
507 lck_mtx_lock(sadb_mutex);
508
509 /* sanity check for alignment. */
510 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
511 ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
512 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
513 ipsec6stat.in_inval++;
514 goto bad;
515 }
516
517 #ifndef PULLDOWN_TEST
518 IP6_EXTHDR_CHECK(m, off, ESPMAXLEN, {lck_mtx_unlock(sadb_mutex); return IPPROTO_DONE;});
519 esp = (struct esp *)(mtod(m, caddr_t) + off);
520 #else
521 IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
522 if (esp == NULL) {
523 ipsec6stat.in_inval++;
524 lck_mtx_unlock(sadb_mutex);
525 return IPPROTO_DONE;
526 }
527 #endif
528 ip6 = mtod(m, struct ip6_hdr *);
529
530 if (ntohs(ip6->ip6_plen) == 0) {
531 ipseclog((LOG_ERR, "IPv6 ESP input: "
532 "ESP with IPv6 jumbogram is not supported.\n"));
533 ipsec6stat.in_inval++;
534 goto bad;
535 }
536
537 /* find the sassoc. */
538 spi = esp->esp_spi;
539
540 if ((sav = key_allocsa(AF_INET6,
541 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
542 IPPROTO_ESP, spi)) == 0) {
543 ipseclog((LOG_WARNING,
544 "IPv6 ESP input: no key association found for spi %u\n",
545 (u_int32_t)ntohl(spi)));
546 ipsec6stat.in_nosa++;
547 goto bad;
548 }
549 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
550 printf("DP esp6_input called to allocate SA:%p\n", sav));
551 if (sav->state != SADB_SASTATE_MATURE
552 && sav->state != SADB_SASTATE_DYING) {
553 ipseclog((LOG_DEBUG,
554 "IPv6 ESP input: non-mature/dying SA found for spi %u\n",
555 (u_int32_t)ntohl(spi)));
556 ipsec6stat.in_badspi++;
557 goto bad;
558 }
559 algo = esp_algorithm_lookup(sav->alg_enc);
560 if (!algo) {
561 ipseclog((LOG_DEBUG, "IPv6 ESP input: "
562 "unsupported encryption algorithm for spi %u\n",
563 (u_int32_t)ntohl(spi)));
564 ipsec6stat.in_badspi++;
565 goto bad;
566 }
567
568 /* check if we have proper ivlen information */
569 ivlen = sav->ivlen;
570 if (ivlen < 0) {
571 ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n",
572 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
573 ipsec6stat.in_badspi++;
574 goto bad;
575 }
576
577 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
578 && (sav->alg_auth && sav->key_auth)))
579 goto noreplaycheck;
580
581 if (sav->alg_auth == SADB_X_AALG_NULL ||
582 sav->alg_auth == SADB_AALG_NONE)
583 goto noreplaycheck;
584
585 /*
586 * check for sequence number.
587 */
588 if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav))
589 ; /*okey*/
590 else {
591 ipsec6stat.in_espreplay++;
592 ipseclog((LOG_WARNING,
593 "replay packet in IPv6 ESP input: %s %s\n",
594 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
595 goto bad;
596 }
597
598 /* check ICV */
599 {
600 u_char sum0[AH_MAXSUMSIZE];
601 u_char sum[AH_MAXSUMSIZE];
602 const struct ah_algorithm *sumalgo;
603 size_t siz;
604
605 sumalgo = ah_algorithm_lookup(sav->alg_auth);
606 if (!sumalgo)
607 goto noreplaycheck;
608 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
609 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
610 ipsecstat.in_inval++;
611 goto bad;
612 }
613 if (AH_MAXSUMSIZE < siz) {
614 ipseclog((LOG_DEBUG,
615 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
616 (u_long)siz));
617 ipsec6stat.in_inval++;
618 goto bad;
619 }
620
621 m_copydata(m, m->m_pkthdr.len - siz, siz, &sum0[0]);
622
623 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
624 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
625 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
626 ipsec6stat.in_espauthfail++;
627 goto bad;
628 }
629
630 if (bcmp(sum0, sum, siz) != 0) {
631 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
632 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
633 ipsec6stat.in_espauthfail++;
634 goto bad;
635 }
636
637 /* strip off the authentication data */
638 m_adj(m, -siz);
639 ip6 = mtod(m, struct ip6_hdr *);
640 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz);
641
642 m->m_flags |= M_AUTHIPDGM;
643 ipsec6stat.in_espauthsucc++;
644 }
645
646 /*
647 * update sequence number.
648 */
649 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
650 if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) {
651 ipsec6stat.in_espreplay++;
652 goto bad;
653 }
654 }
655
656 noreplaycheck:
657
658 /* process main esp header. */
659 if (sav->flags & SADB_X_EXT_OLD) {
660 /* RFC 1827 */
661 esplen = sizeof(struct esp);
662 } else {
663 /* RFC 2406 */
664 if (sav->flags & SADB_X_EXT_DERIV)
665 esplen = sizeof(struct esp);
666 else
667 esplen = sizeof(struct newesp);
668 }
669
670 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
671 ipseclog((LOG_WARNING,
672 "IPv6 ESP input: packet too short\n"));
673 ipsec6stat.in_inval++;
674 goto bad;
675 }
676
677 #ifndef PULLDOWN_TEST
678 IP6_EXTHDR_CHECK(m, off, esplen + ivlen, return IPPROTO_DONE); /*XXX*/
679 #else
680 IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
681 if (esp == NULL) {
682 ipsec6stat.in_inval++;
683 m = NULL;
684 goto bad;
685 }
686 #endif
687 ip6 = mtod(m, struct ip6_hdr *); /*set it again just in case*/
688
689 /*
690 * pre-compute and cache intermediate key
691 */
692 if (esp_schedule(algo, sav) != 0) {
693 ipsec6stat.in_inval++;
694 goto bad;
695 }
696
697 /*
698 * decrypt the packet.
699 */
700 if (!algo->decrypt)
701 panic("internal error: no decrypt function");
702 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
703 /* m is already freed */
704 m = NULL;
705 ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
706 ipsec_logsastr(sav)));
707 ipsec6stat.in_inval++;
708 goto bad;
709 }
710 ipsec6stat.in_esphist[sav->alg_enc]++;
711
712 m->m_flags |= M_DECRYPTED;
713
714 /*
715 * find the trailer of the ESP.
716 */
717 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
718 (caddr_t)&esptail);
719 nxt = esptail.esp_nxt;
720 taillen = esptail.esp_padlen + sizeof(esptail);
721
722 if (m->m_pkthdr.len < taillen
723 || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /*?*/
724 ipseclog((LOG_WARNING,
725 "bad pad length in IPv6 ESP input: %s %s\n",
726 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
727 ipsec6stat.in_inval++;
728 goto bad;
729 }
730
731 /* strip off the trailing pad area. */
732 m_adj(m, -taillen);
733
734 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
735
736 /* was it transmitted over the IPsec tunnel SA? */
737 if (ipsec6_tunnel_validate(m, off + esplen + ivlen, nxt, sav)) {
738 /*
739 * strip off all the headers that precedes ESP header.
740 * IP6 xx ESP IP6' payload -> IP6' payload
741 *
742 * XXX more sanity checks
743 * XXX relationship with gif?
744 */
745 u_int32_t flowinfo; /*net endian*/
746 flowinfo = ip6->ip6_flow;
747 m_adj(m, off + esplen + ivlen);
748 if (m->m_len < sizeof(*ip6)) {
749 #ifndef PULLDOWN_TEST
750 /*
751 * m_pullup is prohibited in KAME IPv6 input processing
752 * but there's no other way!
753 */
754 #else
755 /* okay to pullup in m_pulldown style */
756 #endif
757 m = m_pullup(m, sizeof(*ip6));
758 if (!m) {
759 ipsec6stat.in_inval++;
760 goto bad;
761 }
762 }
763 ip6 = mtod(m, struct ip6_hdr *);
764 /* ECN consideration. */
765 ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow);
766 if (!key_checktunnelsanity(sav, AF_INET6,
767 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
768 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
769 "in IPv6 ESP input: %s %s\n",
770 ipsec6_logpacketstr(ip6, spi),
771 ipsec_logsastr(sav)));
772 ipsec6stat.in_inval++;
773 goto bad;
774 }
775
776 key_sa_recordxfer(sav, m);
777 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
778 ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) {
779 ipsec6stat.in_nomem++;
780 goto bad;
781 }
782 lck_mtx_unlock(sadb_mutex);
783 proto_input(PF_INET6, m);
784 lck_mtx_lock(sadb_mutex);
785 nxt = IPPROTO_DONE;
786 } else {
787 /*
788 * strip off ESP header and IV.
789 * even in m_pulldown case, we need to strip off ESP so that
790 * we can always compute checksum for AH correctly.
791 */
792 size_t stripsiz;
793 char *prvnxtp;
794
795 /*
796 * Set the next header field of the previous header correctly.
797 */
798 prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
799 *prvnxtp = nxt;
800
801 stripsiz = esplen + ivlen;
802
803 ip6 = mtod(m, struct ip6_hdr *);
804 if (m->m_len >= stripsiz + off) {
805 ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
806 m->m_data += stripsiz;
807 m->m_len -= stripsiz;
808 m->m_pkthdr.len -= stripsiz;
809 } else {
810 /*
811 * this comes with no copy if the boundary is on
812 * cluster
813 */
814 struct mbuf *n;
815
816 n = m_split(m, off, M_DONTWAIT);
817 if (n == NULL) {
818 /* m is retained by m_split */
819 goto bad;
820 }
821 m_adj(n, stripsiz);
822 /* m_cat does not update m_pkthdr.len */
823 m->m_pkthdr.len += n->m_pkthdr.len;
824 m_cat(m, n);
825 }
826
827 #ifndef PULLDOWN_TEST
828 /*
829 * KAME requires that the packet to be contiguous on the
830 * mbuf. We need to make that sure.
831 * this kind of code should be avoided.
832 * XXX other conditions to avoid running this part?
833 */
834 if (m->m_len != m->m_pkthdr.len) {
835 struct mbuf *n = NULL;
836 int maxlen;
837
838 MGETHDR(n, M_DONTWAIT, MT_HEADER);
839 maxlen = MHLEN;
840 if (n)
841 M_COPY_PKTHDR(n, m);
842 if (n && m->m_pkthdr.len > maxlen) {
843 MCLGET(n, M_DONTWAIT);
844 maxlen = MCLBYTES;
845 if ((n->m_flags & M_EXT) == 0) {
846 m_free(n);
847 n = NULL;
848 }
849 }
850 if (!n) {
851 printf("esp6_input: mbuf allocation failed\n");
852 goto bad;
853 }
854
855 if (m->m_pkthdr.len <= maxlen) {
856 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
857 n->m_len = m->m_pkthdr.len;
858 n->m_pkthdr.len = m->m_pkthdr.len;
859 n->m_next = NULL;
860 m_freem(m);
861 } else {
862 m_copydata(m, 0, maxlen, mtod(n, caddr_t));
863 n->m_len = maxlen;
864 n->m_pkthdr.len = m->m_pkthdr.len;
865 n->m_next = m;
866 m_adj(m, maxlen);
867 m->m_flags &= ~M_PKTHDR;
868 }
869 m = n;
870 }
871 #endif
872
873 ip6 = mtod(m, struct ip6_hdr *);
874 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
875
876 key_sa_recordxfer(sav, m);
877 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
878 ipsec6stat.in_nomem++;
879 goto bad;
880 }
881 }
882
883 *offp = off;
884 *mp = m;
885
886 if (sav) {
887 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
888 printf("DP esp6_input call free SA:%p\n", sav));
889 key_freesav(sav);
890 }
891 ipsec6stat.in_success++;
892 lck_mtx_unlock(sadb_mutex);
893 return nxt;
894
895 bad:
896 if (sav) {
897 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
898 printf("DP esp6_input call free SA:%p\n", sav));
899 key_freesav(sav);
900 }
901 lck_mtx_unlock(sadb_mutex);
902 if (m)
903 m_freem(m);
904 return IPPROTO_DONE;
905 }
906
907 void
908 esp6_ctlinput(cmd, sa, d)
909 int cmd;
910 struct sockaddr *sa;
911 void *d;
912 {
913 const struct newesp *espp;
914 struct newesp esp;
915 struct ip6ctlparam *ip6cp = NULL, ip6cp1;
916 struct secasvar *sav;
917 struct ip6_hdr *ip6;
918 struct mbuf *m;
919 int off;
920 struct sockaddr_in6 *sa6_src, *sa6_dst;
921
922 if (sa->sa_family != AF_INET6 ||
923 sa->sa_len != sizeof(struct sockaddr_in6))
924 return;
925 if ((unsigned)cmd >= PRC_NCMDS)
926 return;
927
928 /* if the parameter is from icmp6, decode it. */
929 if (d != NULL) {
930 ip6cp = (struct ip6ctlparam *)d;
931 m = ip6cp->ip6c_m;
932 ip6 = ip6cp->ip6c_ip6;
933 off = ip6cp->ip6c_off;
934 } else {
935 m = NULL;
936 ip6 = NULL;
937 }
938
939 if (ip6) {
940 /*
941 * Notify the error to all possible sockets via pfctlinput2.
942 * Since the upper layer information (such as protocol type,
943 * source and destination ports) is embedded in the encrypted
944 * data and might have been cut, we can't directly call
945 * an upper layer ctlinput function. However, the pcbnotify
946 * function will consider source and destination addresses
947 * as well as the flow info value, and may be able to find
948 * some PCB that should be notified.
949 * Although pfctlinput2 will call esp6_ctlinput(), there is
950 * no possibility of an infinite loop of function calls,
951 * because we don't pass the inner IPv6 header.
952 */
953 bzero(&ip6cp1, sizeof(ip6cp1));
954 ip6cp1.ip6c_src = ip6cp->ip6c_src;
955 pfctlinput2(cmd, sa, (void *)&ip6cp1);
956
957 /*
958 * Then go to special cases that need ESP header information.
959 * XXX: We assume that when ip6 is non NULL,
960 * M and OFF are valid.
961 */
962
963 /* check if we can safely examine src and dst ports */
964 if (m->m_pkthdr.len < off + sizeof(esp))
965 return;
966
967 if (m->m_len < off + sizeof(esp)) {
968 /*
969 * this should be rare case,
970 * so we compromise on this copy...
971 */
972 m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
973 espp = &esp;
974 } else
975 espp = (struct newesp*)(mtod(m, caddr_t) + off);
976
977 if (cmd == PRC_MSGSIZE) {
978 int valid = 0;
979
980 /*
981 * Check to see if we have a valid SA corresponding to
982 * the address in the ICMP message payload.
983 */
984 sa6_src = ip6cp->ip6c_src;
985 sa6_dst = (struct sockaddr_in6 *)sa;
986 lck_mtx_lock(sadb_mutex);
987 sav = key_allocsa(AF_INET6,
988 (caddr_t)&sa6_src->sin6_addr,
989 (caddr_t)&sa6_dst->sin6_addr,
990 IPPROTO_ESP, espp->esp_spi);
991 if (sav) {
992 if (sav->state == SADB_SASTATE_MATURE ||
993 sav->state == SADB_SASTATE_DYING)
994 valid++;
995 key_freesav(sav);
996 }
997 lck_mtx_unlock(sadb_mutex);
998
999 /* XXX Further validation? */
1000
1001 /*
1002 * Depending on the value of "valid" and routing table
1003 * size (mtudisc_{hi,lo}wat), we will:
1004 * - recalcurate the new MTU and create the
1005 * corresponding routing entry, or
1006 * - ignore the MTU change notification.
1007 */
1008 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1009 }
1010 } else {
1011 /* we normally notify any pcb here */
1012 }
1013 }
1014 #endif /* INET6 */