]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_input.c
xnu-1456.1.26.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_input.c
1 /*
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 /*
62 * RFC1827/2406 Encapsulated Security Payload.
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/domain.h>
70 #include <sys/protosw.h>
71 #include <sys/socket.h>
72 #include <sys/errno.h>
73 #include <sys/time.h>
74 #include <sys/kernel.h>
75 #include <sys/syslog.h>
76
77 #include <net/if.h>
78 #include <net/route.h>
79 #include <kern/cpu_number.h>
80 #include <kern/locks.h>
81
82 #include <netinet/in.h>
83 #include <netinet/in_systm.h>
84 #include <netinet/ip.h>
85 #include <netinet/ip_var.h>
86 #include <netinet/in_var.h>
87 #include <netinet/ip_ecn.h>
88 #include <netinet/in_pcb.h>
89 #include <netinet/udp.h>
90 #if INET6
91 #include <netinet6/ip6_ecn.h>
92 #endif
93
94 #if INET6
95 #include <netinet/ip6.h>
96 #include <netinet6/in6_pcb.h>
97 #include <netinet6/ip6_var.h>
98 #include <netinet/icmp6.h>
99 #include <netinet6/ip6protosw.h>
100 #endif
101
102 #include <netinet6/ipsec.h>
103 #if INET6
104 #include <netinet6/ipsec6.h>
105 #endif
106 #include <netinet6/ah.h>
107 #if INET6
108 #include <netinet6/ah6.h>
109 #endif
110 #include <netinet6/esp.h>
111 #if INET6
112 #include <netinet6/esp6.h>
113 #endif
114 #include <netkey/key.h>
115 #include <netkey/keydb.h>
116 #include <netkey/key_debug.h>
117
118 #include <net/kpi_protocol.h>
119 #include <netinet/kpi_ipfilter_var.h>
120
121 #include <net/net_osdep.h>
122
123 #include <sys/kdebug.h>
124 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
125 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
126 #define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
127 #define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
128 #define IPLEN_FLIPPED
129
130 extern lck_mtx_t *sadb_mutex;
131
132 #if INET
133 extern struct protosw inetsw[];
134
135 #define ESPMAXLEN \
136 (sizeof(struct esp) < sizeof(struct newesp) \
137 ? sizeof(struct newesp) : sizeof(struct esp))
138
139 void
140 esp4_input(m, off)
141 struct mbuf *m;
142 int off;
143 {
144 struct ip *ip;
145 struct ip6_hdr *ip6;
146 struct esp *esp;
147 struct esptail esptail;
148 u_int32_t spi;
149 struct secasvar *sav = NULL;
150 size_t taillen;
151 u_int16_t nxt;
152 const struct esp_algorithm *algo;
153 int ivlen;
154 size_t hlen;
155 size_t esplen;
156 sa_family_t ifamily;
157
158 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0,0,0,0,0);
159 /* sanity check for alignment. */
160 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
161 ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
162 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
163 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
164 goto bad;
165 }
166
167 if (m->m_len < off + ESPMAXLEN) {
168 m = m_pullup(m, off + ESPMAXLEN);
169 if (!m) {
170 ipseclog((LOG_DEBUG,
171 "IPv4 ESP input: can't pullup in esp4_input\n"));
172 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
173 goto bad;
174 }
175 }
176
177 ip = mtod(m, struct ip *);
178 esp = (struct esp *)(((u_int8_t *)ip) + off);
179 #ifdef _IP_VHL
180 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
181 #else
182 hlen = ip->ip_hl << 2;
183 #endif
184
185 /* find the sassoc. */
186 spi = esp->esp_spi;
187
188 if ((sav = key_allocsa(AF_INET,
189 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
190 IPPROTO_ESP, spi)) == 0) {
191 ipseclog((LOG_WARNING,
192 "IPv4 ESP input: no key association found for spi %u\n",
193 (u_int32_t)ntohl(spi)));
194 IPSEC_STAT_INCREMENT(ipsecstat.in_nosa);
195 goto bad;
196 }
197 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
198 printf("DP esp4_input called to allocate SA:%p\n", sav));
199 if (sav->state != SADB_SASTATE_MATURE
200 && sav->state != SADB_SASTATE_DYING) {
201 ipseclog((LOG_DEBUG,
202 "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
203 (u_int32_t)ntohl(spi)));
204 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
205 goto bad;
206 }
207 algo = esp_algorithm_lookup(sav->alg_enc);
208 if (!algo) {
209 ipseclog((LOG_DEBUG, "IPv4 ESP input: "
210 "unsupported encryption algorithm for spi %u\n",
211 (u_int32_t)ntohl(spi)));
212 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
213 goto bad;
214 }
215
216 /* check if we have proper ivlen information */
217 ivlen = sav->ivlen;
218 if (ivlen < 0) {
219 ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
220 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
221 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
222 goto bad;
223 }
224
225 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
226 && (sav->alg_auth && sav->key_auth)))
227 goto noreplaycheck;
228
229 if (sav->alg_auth == SADB_X_AALG_NULL ||
230 sav->alg_auth == SADB_AALG_NONE)
231 goto noreplaycheck;
232
233 /*
234 * check for sequence number.
235 */
236 if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav))
237 ; /*okey*/
238 else {
239 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
240 ipseclog((LOG_WARNING,
241 "replay packet in IPv4 ESP input: %s %s\n",
242 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
243 goto bad;
244 }
245
246 /* check ICV */
247 {
248 u_char sum0[AH_MAXSUMSIZE];
249 u_char sum[AH_MAXSUMSIZE];
250 const struct ah_algorithm *sumalgo;
251 size_t siz;
252
253 sumalgo = ah_algorithm_lookup(sav->alg_auth);
254 if (!sumalgo)
255 goto noreplaycheck;
256 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
257 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
258 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
259 goto bad;
260 }
261 if (AH_MAXSUMSIZE < siz) {
262 ipseclog((LOG_DEBUG,
263 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
264 (u_int32_t)siz));
265 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
266 goto bad;
267 }
268
269 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
270
271 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
272 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
273 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
274 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
275 goto bad;
276 }
277
278 if (bcmp(sum0, sum, siz) != 0) {
279 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
280 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
281 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
282 goto bad;
283 }
284
285 /* strip off the authentication data */
286 m_adj(m, -siz);
287 ip = mtod(m, struct ip *);
288 #ifdef IPLEN_FLIPPED
289 ip->ip_len = ip->ip_len - siz;
290 #else
291 ip->ip_len = htons(ntohs(ip->ip_len) - siz);
292 #endif
293 m->m_flags |= M_AUTHIPDGM;
294 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthsucc);
295 }
296
297 /*
298 * update sequence number.
299 */
300 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
301 if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) {
302 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
303 goto bad;
304 }
305 }
306
307 noreplaycheck:
308
309 /* process main esp header. */
310 if (sav->flags & SADB_X_EXT_OLD) {
311 /* RFC 1827 */
312 esplen = sizeof(struct esp);
313 } else {
314 /* RFC 2406 */
315 if (sav->flags & SADB_X_EXT_DERIV)
316 esplen = sizeof(struct esp);
317 else
318 esplen = sizeof(struct newesp);
319 }
320
321 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
322 ipseclog((LOG_WARNING,
323 "IPv4 ESP input: packet too short\n"));
324 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
325 goto bad;
326 }
327
328 if (m->m_len < off + esplen + ivlen) {
329 m = m_pullup(m, off + esplen + ivlen);
330 if (!m) {
331 ipseclog((LOG_DEBUG,
332 "IPv4 ESP input: can't pullup in esp4_input\n"));
333 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
334 goto bad;
335 }
336 }
337
338 /*
339 * pre-compute and cache intermediate key
340 */
341 if (esp_schedule(algo, sav) != 0) {
342 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
343 goto bad;
344 }
345
346 /*
347 * decrypt the packet.
348 */
349 if (!algo->decrypt)
350 panic("internal error: no decrypt function");
351 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0,0,0,0,0);
352 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
353 /* m is already freed */
354 m = NULL;
355 ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
356 ipsec_logsastr(sav)));
357 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
358 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
359 goto bad;
360 }
361 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2,0,0,0,0);
362 IPSEC_STAT_INCREMENT(ipsecstat.in_esphist[sav->alg_enc]);
363
364 m->m_flags |= M_DECRYPTED;
365
366 /*
367 * find the trailer of the ESP.
368 */
369 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
370 (caddr_t)&esptail);
371 nxt = esptail.esp_nxt;
372 taillen = esptail.esp_padlen + sizeof(esptail);
373
374 if (m->m_pkthdr.len < taillen
375 || m->m_pkthdr.len - taillen < hlen) { /*?*/
376 ipseclog((LOG_WARNING,
377 "bad pad length in IPv4 ESP input: %s %s\n",
378 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
379 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
380 goto bad;
381 }
382
383 /* strip off the trailing pad area. */
384 m_adj(m, -taillen);
385 ip = mtod(m, struct ip *);
386 #ifdef IPLEN_FLIPPED
387 ip->ip_len = ip->ip_len - taillen;
388 #else
389 ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
390 #endif
391
392 /* was it transmitted over the IPsec tunnel SA? */
393 if (ipsec4_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) {
394 /*
395 * strip off all the headers that precedes ESP header.
396 * IP4 xx ESP IP4' payload -> IP4' payload
397 *
398 * XXX more sanity checks
399 * XXX relationship with gif?
400 */
401 u_int8_t tos;
402
403 tos = ip->ip_tos;
404 m_adj(m, off + esplen + ivlen);
405 if (ifamily == AF_INET) {
406 if (m->m_len < sizeof(*ip)) {
407 m = m_pullup(m, sizeof(*ip));
408 if (!m) {
409 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
410 goto bad;
411 }
412 }
413 ip = mtod(m, struct ip *);
414 /* ECN consideration. */
415 ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos);
416 if (!key_checktunnelsanity(sav, AF_INET,
417 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
418 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
419 "in ESP input: %s %s\n",
420 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
421 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
422 goto bad;
423 }
424 #if INET6
425 } else if (ifamily == AF_INET6) {
426 #ifndef PULLDOWN_TEST
427 /*
428 * m_pullup is prohibited in KAME IPv6 input processing
429 * but there's no other way!
430 */
431 #else
432 /* okay to pullup in m_pulldown style */
433 #endif
434 if (m->m_len < sizeof(*ip6)) {
435 m = m_pullup(m, sizeof(*ip6));
436 if (!m) {
437 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
438 goto bad;
439 }
440 }
441
442 ip6 = mtod(m, struct ip6_hdr *);
443
444 /* ECN consideration. */
445 /* XXX To be fixed later if needed */
446 // ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos);
447
448 if (!key_checktunnelsanity(sav, AF_INET6,
449 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
450 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
451 "in ESP input: %s %s\n",
452 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
453 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
454 goto bad;
455 }
456 #endif /* INET6 */
457 } else {
458 ipseclog((LOG_ERR, "ipsec tunnel unsupported address family "
459 "in ESP input\n"));
460 goto bad;
461 }
462
463 key_sa_recordxfer(sav, m);
464 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
465 ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
466 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
467 goto bad;
468 }
469
470 /* Clear the csum flags, they can't be valid for the inner headers */
471 m->m_pkthdr.csum_flags = 0;
472 proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m);
473 nxt = IPPROTO_DONE;
474 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2,0,0,0,0);
475 } else {
476 /*
477 * strip off ESP header and IV.
478 * even in m_pulldown case, we need to strip off ESP so that
479 * we can always compute checksum for AH correctly.
480 */
481 size_t stripsiz;
482
483 stripsiz = esplen + ivlen;
484
485 ip = mtod(m, struct ip *);
486 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
487 m->m_data += stripsiz;
488 m->m_len -= stripsiz;
489 m->m_pkthdr.len -= stripsiz;
490
491 ip = mtod(m, struct ip *);
492 #ifdef IPLEN_FLIPPED
493 ip->ip_len = ip->ip_len - stripsiz;
494 #else
495 ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
496 #endif
497 ip->ip_p = nxt;
498
499 key_sa_recordxfer(sav, m);
500 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
501 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
502 goto bad;
503 }
504
505 /*
506 * Set the csum valid flag, if we authenticated the
507 * packet, the payload shouldn't be corrupt unless
508 * it was corrupted before being signed on the other
509 * side.
510 */
511 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
512 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
513 m->m_pkthdr.csum_data = 0xFFFF;
514 }
515
516 if (nxt != IPPROTO_DONE) {
517 if ((ip_protox[nxt]->pr_flags & PR_LASTHDR) != 0 &&
518 ipsec4_in_reject(m, NULL)) {
519 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
520 goto bad;
521 }
522 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 3,0,0,0,0);
523
524 /* translate encapsulated UDP port ? */
525 if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
526 struct udphdr *udp;
527
528 if (nxt != IPPROTO_UDP) { /* not UPD packet - drop it */
529 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
530 goto bad;
531 }
532
533 if (m->m_len < off + sizeof(struct udphdr)) {
534 m = m_pullup(m, off + sizeof(struct udphdr));
535 if (!m) {
536 ipseclog((LOG_DEBUG,
537 "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
538 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
539 goto bad;
540 }
541 ip = mtod(m, struct ip *);
542 }
543 udp = (struct udphdr *)(((u_int8_t *)ip) + off);
544
545 lck_mtx_lock(sadb_mutex);
546 if (sav->natt_encapsulated_src_port == 0) {
547 sav->natt_encapsulated_src_port = udp->uh_sport;
548 } else if (sav->natt_encapsulated_src_port != udp->uh_sport) { /* something wrong */
549 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
550 lck_mtx_unlock(sadb_mutex);
551 goto bad;
552 }
553 lck_mtx_unlock(sadb_mutex);
554 udp->uh_sport = htons(sav->remote_ike_port);
555 udp->uh_sum = 0;
556 }
557 ip_proto_dispatch_in(m, off, nxt, 0);
558 } else
559 m_freem(m);
560 m = NULL;
561 }
562
563 if (sav) {
564 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
565 printf("DP esp4_input call free SA:%p\n", sav));
566 key_freesav(sav, KEY_SADB_UNLOCKED);
567 }
568 IPSEC_STAT_INCREMENT(ipsecstat.in_success);
569 return;
570
571 bad:
572 if (sav) {
573 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
574 printf("DP esp4_input call free SA:%p\n", sav));
575 key_freesav(sav, KEY_SADB_UNLOCKED);
576 }
577 if (m)
578 m_freem(m);
579 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4,0,0,0,0);
580 return;
581 }
582 #endif /* INET */
583
584 #if INET6
585 int
586 esp6_input(mp, offp)
587 struct mbuf **mp;
588 int *offp;
589 {
590 struct mbuf *m = *mp;
591 int off = *offp;
592 struct ip6_hdr *ip6;
593 struct esp *esp;
594 struct esptail esptail;
595 u_int32_t spi;
596 struct secasvar *sav = NULL;
597 size_t taillen;
598 u_int16_t nxt;
599 const struct esp_algorithm *algo;
600 int ivlen;
601 size_t esplen;
602
603 /* sanity check for alignment. */
604 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
605 ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
606 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
607 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
608 goto bad;
609 }
610
611 #ifndef PULLDOWN_TEST
612 IP6_EXTHDR_CHECK(m, off, ESPMAXLEN, {return IPPROTO_DONE;});
613 esp = (struct esp *)(mtod(m, caddr_t) + off);
614 #else
615 IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
616 if (esp == NULL) {
617 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
618 return IPPROTO_DONE;
619 }
620 #endif
621 ip6 = mtod(m, struct ip6_hdr *);
622
623 if (ntohs(ip6->ip6_plen) == 0) {
624 ipseclog((LOG_ERR, "IPv6 ESP input: "
625 "ESP with IPv6 jumbogram is not supported.\n"));
626 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
627 goto bad;
628 }
629
630 /* find the sassoc. */
631 spi = esp->esp_spi;
632
633 if ((sav = key_allocsa(AF_INET6,
634 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
635 IPPROTO_ESP, spi)) == 0) {
636 ipseclog((LOG_WARNING,
637 "IPv6 ESP input: no key association found for spi %u\n",
638 (u_int32_t)ntohl(spi)));
639 IPSEC_STAT_INCREMENT(ipsec6stat.in_nosa);
640 goto bad;
641 }
642 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
643 printf("DP esp6_input called to allocate SA:%p\n", sav));
644 if (sav->state != SADB_SASTATE_MATURE
645 && sav->state != SADB_SASTATE_DYING) {
646 ipseclog((LOG_DEBUG,
647 "IPv6 ESP input: non-mature/dying SA found for spi %u\n",
648 (u_int32_t)ntohl(spi)));
649 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
650 goto bad;
651 }
652 algo = esp_algorithm_lookup(sav->alg_enc);
653 if (!algo) {
654 ipseclog((LOG_DEBUG, "IPv6 ESP input: "
655 "unsupported encryption algorithm for spi %u\n",
656 (u_int32_t)ntohl(spi)));
657 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
658 goto bad;
659 }
660
661 /* check if we have proper ivlen information */
662 ivlen = sav->ivlen;
663 if (ivlen < 0) {
664 ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n",
665 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
666 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
667 goto bad;
668 }
669
670 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
671 && (sav->alg_auth && sav->key_auth)))
672 goto noreplaycheck;
673
674 if (sav->alg_auth == SADB_X_AALG_NULL ||
675 sav->alg_auth == SADB_AALG_NONE)
676 goto noreplaycheck;
677
678 /*
679 * check for sequence number.
680 */
681 if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav))
682 ; /*okey*/
683 else {
684 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
685 ipseclog((LOG_WARNING,
686 "replay packet in IPv6 ESP input: %s %s\n",
687 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
688 goto bad;
689 }
690
691 /* check ICV */
692 {
693 u_char sum0[AH_MAXSUMSIZE];
694 u_char sum[AH_MAXSUMSIZE];
695 const struct ah_algorithm *sumalgo;
696 size_t siz;
697
698 sumalgo = ah_algorithm_lookup(sav->alg_auth);
699 if (!sumalgo)
700 goto noreplaycheck;
701 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
702 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
703 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
704 goto bad;
705 }
706 if (AH_MAXSUMSIZE < siz) {
707 ipseclog((LOG_DEBUG,
708 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
709 (u_int32_t)siz));
710 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
711 goto bad;
712 }
713
714 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
715
716 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
717 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
718 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
719 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
720 goto bad;
721 }
722
723 if (bcmp(sum0, sum, siz) != 0) {
724 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
725 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
726 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
727 goto bad;
728 }
729
730 /* strip off the authentication data */
731 m_adj(m, -siz);
732 ip6 = mtod(m, struct ip6_hdr *);
733 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz);
734
735 m->m_flags |= M_AUTHIPDGM;
736 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc);
737 }
738
739 /*
740 * update sequence number.
741 */
742 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
743 if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) {
744 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
745 goto bad;
746 }
747 }
748
749 noreplaycheck:
750
751 /* process main esp header. */
752 if (sav->flags & SADB_X_EXT_OLD) {
753 /* RFC 1827 */
754 esplen = sizeof(struct esp);
755 } else {
756 /* RFC 2406 */
757 if (sav->flags & SADB_X_EXT_DERIV)
758 esplen = sizeof(struct esp);
759 else
760 esplen = sizeof(struct newesp);
761 }
762
763 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
764 ipseclog((LOG_WARNING,
765 "IPv6 ESP input: packet too short\n"));
766 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
767 goto bad;
768 }
769
770 #ifndef PULLDOWN_TEST
771 IP6_EXTHDR_CHECK(m, off, esplen + ivlen, return IPPROTO_DONE); /*XXX*/
772 #else
773 IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
774 if (esp == NULL) {
775 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
776 m = NULL;
777 goto bad;
778 }
779 #endif
780 ip6 = mtod(m, struct ip6_hdr *); /*set it again just in case*/
781
782 /*
783 * pre-compute and cache intermediate key
784 */
785 if (esp_schedule(algo, sav) != 0) {
786 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
787 goto bad;
788 }
789
790 /*
791 * decrypt the packet.
792 */
793 if (!algo->decrypt)
794 panic("internal error: no decrypt function");
795 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
796 /* m is already freed */
797 m = NULL;
798 ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
799 ipsec_logsastr(sav)));
800 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
801 goto bad;
802 }
803 IPSEC_STAT_INCREMENT(ipsec6stat.in_esphist[sav->alg_enc]);
804
805 m->m_flags |= M_DECRYPTED;
806
807 /*
808 * find the trailer of the ESP.
809 */
810 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
811 (caddr_t)&esptail);
812 nxt = esptail.esp_nxt;
813 taillen = esptail.esp_padlen + sizeof(esptail);
814
815 if (m->m_pkthdr.len < taillen
816 || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /*?*/
817 ipseclog((LOG_WARNING,
818 "bad pad length in IPv6 ESP input: %s %s\n",
819 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
820 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
821 goto bad;
822 }
823
824 /* strip off the trailing pad area. */
825 m_adj(m, -taillen);
826 ip6 = mtod(m, struct ip6_hdr *);
827 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
828
829 /* was it transmitted over the IPsec tunnel SA? */
830 if (ipsec6_tunnel_validate(m, off + esplen + ivlen, nxt, sav)) {
831 /*
832 * strip off all the headers that precedes ESP header.
833 * IP6 xx ESP IP6' payload -> IP6' payload
834 *
835 * XXX more sanity checks
836 * XXX relationship with gif?
837 */
838 u_int32_t flowinfo; /*net endian*/
839 flowinfo = ip6->ip6_flow;
840 m_adj(m, off + esplen + ivlen);
841 if (m->m_len < sizeof(*ip6)) {
842 #ifndef PULLDOWN_TEST
843 /*
844 * m_pullup is prohibited in KAME IPv6 input processing
845 * but there's no other way!
846 */
847 #else
848 /* okay to pullup in m_pulldown style */
849 #endif
850 m = m_pullup(m, sizeof(*ip6));
851 if (!m) {
852 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
853 goto bad;
854 }
855 }
856 ip6 = mtod(m, struct ip6_hdr *);
857 /* ECN consideration. */
858 ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow);
859 if (!key_checktunnelsanity(sav, AF_INET6,
860 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
861 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
862 "in IPv6 ESP input: %s %s\n",
863 ipsec6_logpacketstr(ip6, spi),
864 ipsec_logsastr(sav)));
865 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
866 goto bad;
867 }
868
869 key_sa_recordxfer(sav, m);
870 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
871 ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) {
872 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
873 goto bad;
874 }
875 proto_input(PF_INET6, m);
876 nxt = IPPROTO_DONE;
877 } else {
878 /*
879 * strip off ESP header and IV.
880 * even in m_pulldown case, we need to strip off ESP so that
881 * we can always compute checksum for AH correctly.
882 */
883 size_t stripsiz;
884 char *prvnxtp;
885
886 /*
887 * Set the next header field of the previous header correctly.
888 */
889 prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
890 *prvnxtp = nxt;
891
892 stripsiz = esplen + ivlen;
893
894 ip6 = mtod(m, struct ip6_hdr *);
895 if (m->m_len >= stripsiz + off) {
896 ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
897 m->m_data += stripsiz;
898 m->m_len -= stripsiz;
899 m->m_pkthdr.len -= stripsiz;
900 } else {
901 /*
902 * this comes with no copy if the boundary is on
903 * cluster
904 */
905 struct mbuf *n;
906
907 n = m_split(m, off, M_DONTWAIT);
908 if (n == NULL) {
909 /* m is retained by m_split */
910 goto bad;
911 }
912 m_adj(n, stripsiz);
913 /* m_cat does not update m_pkthdr.len */
914 m->m_pkthdr.len += n->m_pkthdr.len;
915 m_cat(m, n);
916 }
917
918 #ifndef PULLDOWN_TEST
919 /*
920 * KAME requires that the packet to be contiguous on the
921 * mbuf. We need to make that sure.
922 * this kind of code should be avoided.
923 * XXX other conditions to avoid running this part?
924 */
925 if (m->m_len != m->m_pkthdr.len) {
926 struct mbuf *n = NULL;
927 int maxlen;
928
929 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
930 maxlen = MHLEN;
931 if (n)
932 M_COPY_PKTHDR(n, m);
933 if (n && m->m_pkthdr.len > maxlen) {
934 MCLGET(n, M_DONTWAIT);
935 maxlen = MCLBYTES;
936 if ((n->m_flags & M_EXT) == 0) {
937 m_free(n);
938 n = NULL;
939 }
940 }
941 if (!n) {
942 printf("esp6_input: mbuf allocation failed\n");
943 goto bad;
944 }
945
946 if (m->m_pkthdr.len <= maxlen) {
947 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
948 n->m_len = m->m_pkthdr.len;
949 n->m_pkthdr.len = m->m_pkthdr.len;
950 n->m_next = NULL;
951 m_freem(m);
952 } else {
953 m_copydata(m, 0, maxlen, mtod(n, caddr_t));
954 n->m_len = maxlen;
955 n->m_pkthdr.len = m->m_pkthdr.len;
956 n->m_next = m;
957 m_adj(m, maxlen);
958 m->m_flags &= ~M_PKTHDR;
959 }
960 m = n;
961 }
962 #endif
963
964 ip6 = mtod(m, struct ip6_hdr *);
965 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
966
967 key_sa_recordxfer(sav, m);
968 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
969 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
970 goto bad;
971 }
972 }
973
974 *offp = off;
975 *mp = m;
976
977 if (sav) {
978 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
979 printf("DP esp6_input call free SA:%p\n", sav));
980 key_freesav(sav, KEY_SADB_UNLOCKED);
981 }
982 IPSEC_STAT_INCREMENT(ipsec6stat.in_success);
983 return nxt;
984
985 bad:
986 if (sav) {
987 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
988 printf("DP esp6_input call free SA:%p\n", sav));
989 key_freesav(sav, KEY_SADB_UNLOCKED);
990 }
991 if (m)
992 m_freem(m);
993 return IPPROTO_DONE;
994 }
995
996 void
997 esp6_ctlinput(cmd, sa, d)
998 int cmd;
999 struct sockaddr *sa;
1000 void *d;
1001 {
1002 const struct newesp *espp;
1003 struct newesp esp;
1004 struct ip6ctlparam *ip6cp = NULL, ip6cp1;
1005 struct secasvar *sav;
1006 struct ip6_hdr *ip6;
1007 struct mbuf *m;
1008 int off;
1009 struct sockaddr_in6 *sa6_src, *sa6_dst;
1010
1011 if (sa->sa_family != AF_INET6 ||
1012 sa->sa_len != sizeof(struct sockaddr_in6))
1013 return;
1014 if ((unsigned)cmd >= PRC_NCMDS)
1015 return;
1016
1017 /* if the parameter is from icmp6, decode it. */
1018 if (d != NULL) {
1019 ip6cp = (struct ip6ctlparam *)d;
1020 m = ip6cp->ip6c_m;
1021 ip6 = ip6cp->ip6c_ip6;
1022 off = ip6cp->ip6c_off;
1023 } else {
1024 m = NULL;
1025 ip6 = NULL;
1026 }
1027
1028 if (ip6) {
1029 /*
1030 * Notify the error to all possible sockets via pfctlinput2.
1031 * Since the upper layer information (such as protocol type,
1032 * source and destination ports) is embedded in the encrypted
1033 * data and might have been cut, we can't directly call
1034 * an upper layer ctlinput function. However, the pcbnotify
1035 * function will consider source and destination addresses
1036 * as well as the flow info value, and may be able to find
1037 * some PCB that should be notified.
1038 * Although pfctlinput2 will call esp6_ctlinput(), there is
1039 * no possibility of an infinite loop of function calls,
1040 * because we don't pass the inner IPv6 header.
1041 */
1042 bzero(&ip6cp1, sizeof(ip6cp1));
1043 ip6cp1.ip6c_src = ip6cp->ip6c_src;
1044 pfctlinput2(cmd, sa, (void *)&ip6cp1);
1045
1046 /*
1047 * Then go to special cases that need ESP header information.
1048 * XXX: We assume that when ip6 is non NULL,
1049 * M and OFF are valid.
1050 */
1051
1052 /* check if we can safely examine src and dst ports */
1053 if (m->m_pkthdr.len < off + sizeof(esp))
1054 return;
1055
1056 if (m->m_len < off + sizeof(esp)) {
1057 /*
1058 * this should be rare case,
1059 * so we compromise on this copy...
1060 */
1061 m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
1062 espp = &esp;
1063 } else
1064 espp = (struct newesp*)(mtod(m, caddr_t) + off);
1065
1066 if (cmd == PRC_MSGSIZE) {
1067 int valid = 0;
1068
1069 /*
1070 * Check to see if we have a valid SA corresponding to
1071 * the address in the ICMP message payload.
1072 */
1073 sa6_src = ip6cp->ip6c_src;
1074 sa6_dst = (struct sockaddr_in6 *)sa;
1075 sav = key_allocsa(AF_INET6,
1076 (caddr_t)&sa6_src->sin6_addr,
1077 (caddr_t)&sa6_dst->sin6_addr,
1078 IPPROTO_ESP, espp->esp_spi);
1079 if (sav) {
1080 if (sav->state == SADB_SASTATE_MATURE ||
1081 sav->state == SADB_SASTATE_DYING)
1082 valid++;
1083 key_freesav(sav, KEY_SADB_LOCKED);
1084 }
1085
1086 /* XXX Further validation? */
1087
1088 /*
1089 * Depending on the value of "valid" and routing table
1090 * size (mtudisc_{hi,lo}wat), we will:
1091 * - recalcurate the new MTU and create the
1092 * corresponding routing entry, or
1093 * - ignore the MTU change notification.
1094 */
1095 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1096 }
1097 } else {
1098 /* we normally notify any pcb here */
1099 }
1100 }
1101 #endif /* INET6 */