]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_output.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_output.c
1 /*
2 * Copyright (c) 2008-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/esp_output.c,v 1.1.2.3 2002/04/28 05:40:26 suz Exp $ */
30 /* $KAME: esp_output.c,v 1.44 2001/07/26 06:53:15 jinmei Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 #define _IP_VHL
62
63 /*
64 * RFC1827/2406 Encapsulated Security Payload.
65 */
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/malloc.h>
70 #include <sys/mbuf.h>
71 #include <sys/domain.h>
72 #include <sys/protosw.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/errno.h>
76 #include <sys/time.h>
77 #include <sys/kernel.h>
78 #include <sys/syslog.h>
79
80 #include <net/if.h>
81 #include <net/route.h>
82 #include <net/multi_layer_pkt_log.h>
83
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/ip.h>
87 #include <netinet/in_var.h>
88 #include <netinet/udp.h> /* for nat traversal */
89 #include <netinet/tcp.h>
90 #include <netinet/in_tclass.h>
91
92 #if INET6
93 #include <netinet/ip6.h>
94 #include <netinet6/ip6_var.h>
95 #include <netinet/icmp6.h>
96 #endif
97
98 #include <netinet6/ipsec.h>
99 #if INET6
100 #include <netinet6/ipsec6.h>
101 #endif
102 #include <netinet6/ah.h>
103 #if INET6
104 #include <netinet6/ah6.h>
105 #endif
106 #include <netinet6/esp.h>
107 #if INET6
108 #include <netinet6/esp6.h>
109 #endif
110 #include <netkey/key.h>
111 #include <netkey/keydb.h>
112
113 #include <net/net_osdep.h>
114
115 #include <sys/kdebug.h>
116 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
117 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
118 #define DBG_FNC_ESPOUT NETDBG_CODE(DBG_NETIPSEC, (4 << 8))
119 #define DBG_FNC_ENCRYPT NETDBG_CODE(DBG_NETIPSEC, (5 << 8))
120
121 static int esp_output(struct mbuf *, u_char *, struct mbuf *,
122 int, struct secasvar *sav);
123
124 extern int esp_udp_encap_port;
125 extern u_int64_t natt_now;
126
127 extern lck_mtx_t *sadb_mutex;
128
129 /*
130 * compute ESP header size.
131 */
132 size_t
133 esp_hdrsiz(__unused struct ipsecrequest *isr)
134 {
135 #if 0
136 /* sanity check */
137 if (isr == NULL) {
138 panic("esp_hdrsiz: NULL was passed.\n");
139 }
140
141
142 lck_mtx_lock(sadb_mutex);
143 {
144 struct secasvar *sav;
145 const struct esp_algorithm *algo;
146 const struct ah_algorithm *aalgo;
147 size_t ivlen;
148 size_t authlen;
149 size_t hdrsiz;
150 size_t maxpad;
151
152 /*%%%% this needs to change - no sav in ipsecrequest any more */
153 sav = isr->sav;
154
155 if (isr->saidx.proto != IPPROTO_ESP) {
156 panic("unsupported mode passed to esp_hdrsiz");
157 }
158
159 if (sav == NULL) {
160 goto estimate;
161 }
162 if (sav->state != SADB_SASTATE_MATURE
163 && sav->state != SADB_SASTATE_DYING) {
164 goto estimate;
165 }
166
167 /* we need transport mode ESP. */
168 algo = esp_algorithm_lookup(sav->alg_enc);
169 if (!algo) {
170 goto estimate;
171 }
172 ivlen = sav->ivlen;
173 if (ivlen < 0) {
174 goto estimate;
175 }
176
177 if (algo->padbound) {
178 maxpad = algo->padbound;
179 } else {
180 maxpad = 4;
181 }
182 maxpad += 1; /* maximum 'extendsiz' is padbound + 1, see esp_output */
183
184 if (sav->flags & SADB_X_EXT_OLD) {
185 /* RFC 1827 */
186 hdrsiz = sizeof(struct esp) + ivlen + maxpad;
187 } else {
188 /* RFC 2406 */
189 aalgo = ah_algorithm_lookup(sav->alg_auth);
190 if (aalgo && sav->replay[0] != NULL && sav->key_auth) {
191 authlen = (aalgo->sumsiz)(sav);
192 } else {
193 authlen = 0;
194 }
195 hdrsiz = sizeof(struct newesp) + ivlen + maxpad + authlen;
196 }
197
198 /*
199 * If the security association indicates that NATT is required,
200 * add the size of the NATT encapsulation header:
201 */
202 if ((sav->flags & SADB_X_EXT_NATT) != 0) {
203 hdrsiz += sizeof(struct udphdr) + 4;
204 }
205
206 lck_mtx_unlock(sadb_mutex);
207 return hdrsiz;
208 }
209 estimate:
210 lck_mtx_unlock(sadb_mutex);
211 #endif
212 /*
213 * ASSUMING:
214 * sizeof(struct newesp) > sizeof(struct esp). (8)
215 * esp_max_ivlen() = max ivlen for CBC mode
216 * 17 = (maximum padding length without random padding length)
217 * + (Pad Length field) + (Next Header field).
218 * 64 = maximum ICV we support.
219 * sizeof(struct udphdr) in case NAT traversal is used
220 */
221 return sizeof(struct newesp) + esp_max_ivlen() + 17 + AH_MAXSUMSIZE + sizeof(struct udphdr);
222 }
223
224 /*
225 * Modify the packet so that the payload is encrypted.
226 * The mbuf (m) must start with IPv4 or IPv6 header.
227 * On failure, free the given mbuf and return NULL.
228 *
229 * on invocation:
230 * m nexthdrp md
231 * v v v
232 * IP ......... payload
233 * during the encryption:
234 * m nexthdrp mprev md
235 * v v v v
236 * IP ............... esp iv payload pad padlen nxthdr
237 * <--><-><------><--------------->
238 * esplen plen extendsiz
239 * ivlen
240 * <-----> esphlen
241 * <-> hlen
242 * <-----------------> espoff
243 */
244 static int
245 esp_output(
246 struct mbuf *m,
247 u_char *nexthdrp,
248 struct mbuf *md,
249 int af,
250 struct secasvar *sav)
251 {
252 struct mbuf *n;
253 struct mbuf *mprev;
254 struct esp *esp;
255 struct esptail *esptail;
256 const struct esp_algorithm *algo;
257 struct tcphdr th = {};
258 u_int32_t spi;
259 u_int32_t seq;
260 u_int32_t inner_payload_len = 0;
261 u_int8_t inner_protocol = 0;
262 u_int8_t nxt = 0;
263 size_t plen; /*payload length to be encrypted*/
264 size_t espoff;
265 size_t esphlen; /* sizeof(struct esp/newesp) + ivlen */
266 int ivlen;
267 int afnumber;
268 size_t extendsiz;
269 int error = 0;
270 struct ipsecstat *stat;
271 struct udphdr *udp = NULL;
272 int udp_encapsulate = (sav->flags & SADB_X_EXT_NATT && (af == AF_INET || af == AF_INET6) &&
273 ((esp_udp_encap_port & 0xFFFF) != 0 || sav->natt_encapsulated_src_port != 0));
274
275 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_START, sav->ivlen, 0, 0, 0, 0);
276 switch (af) {
277 #if INET
278 case AF_INET:
279 afnumber = 4;
280 stat = &ipsecstat;
281 break;
282 #endif
283 #if INET6
284 case AF_INET6:
285 afnumber = 6;
286 stat = &ipsec6stat;
287 break;
288 #endif
289 default:
290 ipseclog((LOG_ERR, "esp_output: unsupported af %d\n", af));
291 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 1, 0, 0, 0, 0);
292 return 0; /* no change at all */
293 }
294
295 mbuf_traffic_class_t traffic_class = 0;
296 if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
297 SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
298 u_int8_t dscp = 0;
299 switch (af) {
300 #if INET
301 case AF_INET:
302 {
303 struct ip *ip = mtod(m, struct ip *);
304 dscp = ip->ip_tos >> IPTOS_DSCP_SHIFT;
305 break;
306 }
307 #endif /*INET*/
308 #if INET6
309 case AF_INET6:
310 {
311 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
312 dscp = (ntohl(ip6->ip6_flow) & IP6FLOW_DSCP_MASK) >> IP6FLOW_DSCP_SHIFT;
313 break;
314 }
315 #endif /*INET6*/
316 default:
317 panic("esp_output: should not reach here");
318 }
319 traffic_class = rfc4594_dscp_to_tc(dscp);
320 }
321
322 /* some sanity check */
323 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] == NULL) {
324 switch (af) {
325 #if INET
326 case AF_INET:
327 {
328 struct ip *ip;
329
330 ip = mtod(m, struct ip *);
331 ipseclog((LOG_DEBUG, "esp4_output: internal error: "
332 "sav->replay is null: %x->%x, SPI=%u\n",
333 (u_int32_t)ntohl(ip->ip_src.s_addr),
334 (u_int32_t)ntohl(ip->ip_dst.s_addr),
335 (u_int32_t)ntohl(sav->spi)));
336 IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
337 break;
338 }
339 #endif /*INET*/
340 #if INET6
341 case AF_INET6:
342 ipseclog((LOG_DEBUG, "esp6_output: internal error: "
343 "sav->replay is null: SPI=%u\n",
344 (u_int32_t)ntohl(sav->spi)));
345 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
346 break;
347 #endif /*INET6*/
348 default:
349 panic("esp_output: should not reach here");
350 }
351 m_freem(m);
352 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 2, 0, 0, 0, 0);
353 return EINVAL;
354 }
355
356 algo = esp_algorithm_lookup(sav->alg_enc);
357 if (!algo) {
358 ipseclog((LOG_ERR, "esp_output: unsupported algorithm: "
359 "SPI=%u\n", (u_int32_t)ntohl(sav->spi)));
360 m_freem(m);
361 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 3, 0, 0, 0, 0);
362 return EINVAL;
363 }
364 spi = sav->spi;
365 ivlen = sav->ivlen;
366 /* should be okey */
367 if (ivlen < 0) {
368 panic("invalid ivlen");
369 }
370
371 {
372 /*
373 * insert ESP header.
374 * XXX inserts ESP header right after IPv4 header. should
375 * chase the header chain.
376 * XXX sequential number
377 */
378 #if INET
379 struct ip *ip = NULL;
380 #endif
381 #if INET6
382 struct ip6_hdr *ip6 = NULL;
383 #endif
384 size_t esplen; /* sizeof(struct esp/newesp) */
385 size_t hlen = 0; /* ip header len */
386
387 if (sav->flags & SADB_X_EXT_OLD) {
388 /* RFC 1827 */
389 esplen = sizeof(struct esp);
390 } else {
391 /* RFC 2406 */
392 if (sav->flags & SADB_X_EXT_DERIV) {
393 esplen = sizeof(struct esp);
394 } else {
395 esplen = sizeof(struct newesp);
396 }
397 }
398 esphlen = esplen + ivlen;
399
400 for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) {
401 ;
402 }
403 if (mprev == NULL || mprev->m_next != md) {
404 ipseclog((LOG_DEBUG, "esp%d_output: md is not in chain\n",
405 afnumber));
406 m_freem(m);
407 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 4, 0, 0, 0, 0);
408 return EINVAL;
409 }
410
411 plen = 0;
412 for (n = md; n; n = n->m_next) {
413 plen += n->m_len;
414 }
415
416 switch (af) {
417 #if INET
418 case AF_INET:
419 ip = mtod(m, struct ip *);
420 #ifdef _IP_VHL
421 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
422 #else
423 hlen = ip->ip_hl << 2;
424 #endif
425 break;
426 #endif
427 #if INET6
428 case AF_INET6:
429 ip6 = mtod(m, struct ip6_hdr *);
430 hlen = sizeof(*ip6);
431 break;
432 #endif
433 }
434
435 /* grab info for packet logging */
436 struct secashead *sah = sav->sah;
437 if (net_mpklog_enabled &&
438 sah != NULL && sah->ipsec_if != NULL) {
439 ifnet_t ifp = sah->ipsec_if;
440
441 if ((ifp->if_xflags & IFXF_MPK_LOG) == IFXF_MPK_LOG) {
442 size_t iphlen = 0;
443
444 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
445 struct ip *inner_ip = mtod(md, struct ip *);
446 if (IP_VHL_V(inner_ip->ip_vhl) == IPVERSION) {
447 #ifdef _IP_VHL
448 iphlen = IP_VHL_HL(inner_ip->ip_vhl) << 2;
449 #else
450 iphlen = inner_ip->ip_hl << 2;
451 #endif
452 inner_protocol = inner_ip->ip_p;
453 } else if (IP_VHL_V(inner_ip->ip_vhl) == IPV6_VERSION) {
454 struct ip6_hdr *inner_ip6 = mtod(md, struct ip6_hdr *);
455 iphlen = sizeof(struct ip6_hdr);
456 inner_protocol = inner_ip6->ip6_nxt;
457 }
458
459 if (inner_protocol == IPPROTO_TCP) {
460 if ((int)(iphlen + sizeof(th)) <=
461 (m->m_pkthdr.len - m->m_len)) {
462 m_copydata(md, iphlen, sizeof(th), (u_int8_t *)&th);
463 }
464
465 inner_payload_len = m->m_pkthdr.len - m->m_len - iphlen - (th.th_off << 2);
466 }
467 } else {
468 iphlen = hlen;
469 if (af == AF_INET) {
470 inner_protocol = ip->ip_p;
471 } else if (af == AF_INET6) {
472 inner_protocol = ip6->ip6_nxt;
473 }
474
475 if (inner_protocol == IPPROTO_TCP) {
476 if ((int)(iphlen + sizeof(th)) <=
477 m->m_pkthdr.len) {
478 m_copydata(m, iphlen, sizeof(th), (u_int8_t *)&th);
479 }
480
481 inner_payload_len = m->m_pkthdr.len - iphlen - (th.th_off << 2);
482 }
483 }
484 }
485 }
486
487 /* make the packet over-writable */
488 mprev->m_next = NULL;
489 if ((md = ipsec_copypkt(md)) == NULL) {
490 m_freem(m);
491 error = ENOBUFS;
492 goto fail;
493 }
494 mprev->m_next = md;
495
496 /*
497 * Translate UDP source port back to its original value.
498 * SADB_X_EXT_NATT_MULTIPLEUSERS is only set for transort mode.
499 */
500 if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
501 /* if not UDP - drop it */
502 if (ip->ip_p != IPPROTO_UDP) {
503 IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
504 m_freem(m);
505 error = EINVAL;
506 goto fail;
507 }
508
509 udp = mtod(md, struct udphdr *);
510
511 /* if src port not set in sav - find it */
512 if (sav->natt_encapsulated_src_port == 0) {
513 if (key_natt_get_translated_port(sav) == 0) {
514 m_freem(m);
515 error = EINVAL;
516 goto fail;
517 }
518 }
519 if (sav->remote_ike_port == htons(udp->uh_dport)) {
520 /* translate UDP port */
521 udp->uh_dport = sav->natt_encapsulated_src_port;
522 udp->uh_sum = 0; /* don't need checksum with ESP auth */
523 } else {
524 /* drop the packet - can't translate the port */
525 IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
526 m_freem(m);
527 error = EINVAL;
528 goto fail;
529 }
530 }
531
532
533 espoff = m->m_pkthdr.len - plen;
534
535 if (udp_encapsulate) {
536 esphlen += sizeof(struct udphdr);
537 espoff += sizeof(struct udphdr);
538 }
539
540 /*
541 * grow the mbuf to accomodate ESP header.
542 * before: IP ... payload
543 * after: IP ... [UDP] ESP IV payload
544 */
545 if (M_LEADINGSPACE(md) < esphlen || (md->m_flags & M_EXT) != 0) {
546 MGET(n, M_DONTWAIT, MT_DATA);
547 if (!n) {
548 m_freem(m);
549 error = ENOBUFS;
550 goto fail;
551 }
552 n->m_len = esphlen;
553 mprev->m_next = n;
554 n->m_next = md;
555 m->m_pkthdr.len += esphlen;
556 if (udp_encapsulate) {
557 udp = mtod(n, struct udphdr *);
558 esp = (struct esp *)(void *)((caddr_t)udp + sizeof(struct udphdr));
559 } else {
560 esp = mtod(n, struct esp *);
561 }
562 } else {
563 md->m_len += esphlen;
564 md->m_data -= esphlen;
565 m->m_pkthdr.len += esphlen;
566 esp = mtod(md, struct esp *);
567 if (udp_encapsulate) {
568 udp = mtod(md, struct udphdr *);
569 esp = (struct esp *)(void *)((caddr_t)udp + sizeof(struct udphdr));
570 } else {
571 esp = mtod(md, struct esp *);
572 }
573 }
574
575 switch (af) {
576 #if INET
577 case AF_INET:
578 if (esphlen < (IP_MAXPACKET - ntohs(ip->ip_len))) {
579 ip->ip_len = htons(ntohs(ip->ip_len) + esphlen);
580 } else {
581 ipseclog((LOG_ERR,
582 "IPv4 ESP output: size exceeds limit\n"));
583 IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
584 m_freem(m);
585 error = EMSGSIZE;
586 goto fail;
587 }
588 break;
589 #endif
590 #if INET6
591 case AF_INET6:
592 /* total packet length will be computed in ip6_output() */
593 break;
594 #endif
595 }
596 }
597
598 /* initialize esp header. */
599 esp->esp_spi = spi;
600 if ((sav->flags & SADB_X_EXT_OLD) == 0) {
601 struct newesp *nesp;
602 nesp = (struct newesp *)esp;
603 if (sav->replay[traffic_class]->count == sav->replay[traffic_class]->lastseq) {
604 if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
605 /* XXX Is it noisy ? */
606 ipseclog((LOG_WARNING,
607 "replay counter overflowed. %s\n",
608 ipsec_logsastr(sav)));
609 IPSEC_STAT_INCREMENT(stat->out_inval);
610 m_freem(m);
611 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 5, 0, 0, 0, 0);
612 return EINVAL;
613 }
614 }
615 lck_mtx_lock(sadb_mutex);
616 sav->replay[traffic_class]->count++;
617 lck_mtx_unlock(sadb_mutex);
618 /*
619 * XXX sequence number must not be cycled, if the SA is
620 * installed by IKE daemon.
621 */
622 nesp->esp_seq = htonl(sav->replay[traffic_class]->count);
623 seq = sav->replay[traffic_class]->count;
624 }
625
626 {
627 /*
628 * find the last mbuf. make some room for ESP trailer.
629 */
630 #if INET
631 struct ip *ip = NULL;
632 #endif
633 size_t padbound;
634 u_char *extend;
635 int i;
636 int randpadmax;
637
638 if (algo->padbound) {
639 padbound = algo->padbound;
640 } else {
641 padbound = 4;
642 }
643 /* ESP packet, including nxthdr field, must be length of 4n */
644 if (padbound < 4) {
645 padbound = 4;
646 }
647
648 extendsiz = padbound - (plen % padbound);
649 if (extendsiz == 1) {
650 extendsiz = padbound + 1;
651 }
652
653 /* random padding */
654 switch (af) {
655 #if INET
656 case AF_INET:
657 randpadmax = ip4_esp_randpad;
658 break;
659 #endif
660 #if INET6
661 case AF_INET6:
662 randpadmax = ip6_esp_randpad;
663 break;
664 #endif
665 default:
666 randpadmax = -1;
667 break;
668 }
669 if (randpadmax < 0 || plen + extendsiz >= randpadmax) {
670 ;
671 } else {
672 int pad;
673
674 /* round */
675 randpadmax = (randpadmax / padbound) * padbound;
676 pad = (randpadmax - plen + extendsiz) / padbound;
677
678 if (pad > 0) {
679 pad = (random() % pad) * padbound;
680 } else {
681 pad = 0;
682 }
683
684 /*
685 * make sure we do not pad too much.
686 * MLEN limitation comes from the trailer attachment
687 * code below.
688 * 256 limitation comes from sequential padding.
689 * also, the 1-octet length field in ESP trailer imposes
690 * limitation (but is less strict than sequential padding
691 * as length field do not count the last 2 octets).
692 */
693 if (extendsiz + pad <= MLEN && extendsiz + pad < 256) {
694 extendsiz += pad;
695 }
696 }
697
698 #if DIAGNOSTIC
699 if (extendsiz > MLEN || extendsiz >= 256) {
700 panic("extendsiz too big in esp_output");
701 }
702 #endif
703
704 n = m;
705 while (n->m_next) {
706 n = n->m_next;
707 }
708
709 /*
710 * if M_EXT, the external mbuf data may be shared among
711 * two consequtive TCP packets, and it may be unsafe to use the
712 * trailing space.
713 */
714 if (!(n->m_flags & M_EXT) && extendsiz < M_TRAILINGSPACE(n)) {
715 extend = mtod(n, u_char *) + n->m_len;
716 n->m_len += extendsiz;
717 m->m_pkthdr.len += extendsiz;
718 } else {
719 struct mbuf *nn;
720
721 MGET(nn, M_DONTWAIT, MT_DATA);
722 if (!nn) {
723 ipseclog((LOG_DEBUG, "esp%d_output: can't alloc mbuf",
724 afnumber));
725 m_freem(m);
726 error = ENOBUFS;
727 goto fail;
728 }
729 extend = mtod(nn, u_char *);
730 nn->m_len = extendsiz;
731 nn->m_next = NULL;
732 n->m_next = nn;
733 n = nn;
734 m->m_pkthdr.len += extendsiz;
735 }
736 switch (sav->flags & SADB_X_EXT_PMASK) {
737 case SADB_X_EXT_PRAND:
738 key_randomfill(extend, extendsiz);
739 break;
740 case SADB_X_EXT_PZERO:
741 bzero(extend, extendsiz);
742 break;
743 case SADB_X_EXT_PSEQ:
744 for (i = 0; i < extendsiz; i++) {
745 extend[i] = (i + 1) & 0xff;
746 }
747 break;
748 }
749
750 nxt = *nexthdrp;
751 if (udp_encapsulate) {
752 *nexthdrp = IPPROTO_UDP;
753
754 /* Fill out the UDP header */
755 if (sav->natt_encapsulated_src_port != 0) {
756 udp->uh_sport = (u_short)sav->natt_encapsulated_src_port;
757 } else {
758 udp->uh_sport = htons((u_short)esp_udp_encap_port);
759 }
760 udp->uh_dport = htons(sav->remote_ike_port);
761 // udp->uh_len set later, after all length tweaks are complete
762 udp->uh_sum = 0;
763
764 /* Update last sent so we know if we need to send keepalive */
765 sav->natt_last_activity = natt_now;
766 } else {
767 *nexthdrp = IPPROTO_ESP;
768 }
769
770 /* initialize esp trailer. */
771 esptail = (struct esptail *)
772 (mtod(n, u_int8_t *) + n->m_len - sizeof(struct esptail));
773 esptail->esp_nxt = nxt;
774 esptail->esp_padlen = extendsiz - 2;
775
776 /* modify IP header (for ESP header part only) */
777 switch (af) {
778 #if INET
779 case AF_INET:
780 ip = mtod(m, struct ip *);
781 if (extendsiz < (IP_MAXPACKET - ntohs(ip->ip_len))) {
782 ip->ip_len = htons(ntohs(ip->ip_len) + extendsiz);
783 } else {
784 ipseclog((LOG_ERR,
785 "IPv4 ESP output: size exceeds limit\n"));
786 IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
787 m_freem(m);
788 error = EMSGSIZE;
789 goto fail;
790 }
791 break;
792 #endif
793 #if INET6
794 case AF_INET6:
795 /* total packet length will be computed in ip6_output() */
796 break;
797 #endif
798 }
799 }
800
801 /*
802 * pre-compute and cache intermediate key
803 */
804 error = esp_schedule(algo, sav);
805 if (error) {
806 m_freem(m);
807 IPSEC_STAT_INCREMENT(stat->out_inval);
808 goto fail;
809 }
810
811 /*
812 * encrypt the packet, based on security association
813 * and the algorithm specified.
814 */
815 if (!algo->encrypt) {
816 panic("internal error: no encrypt function");
817 }
818 KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_START, 0, 0, 0, 0, 0);
819 if ((*algo->encrypt)(m, espoff, plen + extendsiz, sav, algo, ivlen)) {
820 /* m is already freed */
821 ipseclog((LOG_ERR, "packet encryption failure\n"));
822 IPSEC_STAT_INCREMENT(stat->out_inval);
823 error = EINVAL;
824 KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1, error, 0, 0, 0);
825 goto fail;
826 }
827 KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 2, 0, 0, 0, 0);
828
829 /*
830 * calculate ICV if required.
831 */
832 size_t siz = 0;
833 u_char authbuf[AH_MAXSUMSIZE] __attribute__((aligned(4)));
834
835 if (algo->finalizeencrypt) {
836 siz = algo->icvlen;
837 if ((*algo->finalizeencrypt)(sav, authbuf, siz)) {
838 ipseclog((LOG_ERR, "packet encryption ICV failure\n"));
839 IPSEC_STAT_INCREMENT(stat->out_inval);
840 error = EINVAL;
841 KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1, error, 0, 0, 0);
842 goto fail;
843 }
844 goto fill_icv;
845 }
846
847 if (!sav->replay[traffic_class]) {
848 goto noantireplay;
849 }
850 if (!sav->key_auth) {
851 goto noantireplay;
852 }
853 if (sav->key_auth == SADB_AALG_NONE) {
854 goto noantireplay;
855 }
856
857 {
858 const struct ah_algorithm *aalgo;
859
860 aalgo = ah_algorithm_lookup(sav->alg_auth);
861 if (!aalgo) {
862 goto noantireplay;
863 }
864 siz = ((aalgo->sumsiz)(sav) + 3) & ~(4 - 1);
865 if (AH_MAXSUMSIZE < siz) {
866 panic("assertion failed for AH_MAXSUMSIZE");
867 }
868
869 if (esp_auth(m, espoff, m->m_pkthdr.len - espoff, sav, authbuf)) {
870 ipseclog((LOG_ERR, "ESP checksum generation failure\n"));
871 m_freem(m);
872 error = EINVAL;
873 IPSEC_STAT_INCREMENT(stat->out_inval);
874 goto fail;
875 }
876 }
877
878 fill_icv:
879 {
880 struct ip *ip;
881 u_char *p;
882
883 n = m;
884 while (n->m_next) {
885 n = n->m_next;
886 }
887
888 if (!(n->m_flags & M_EXT) && siz < M_TRAILINGSPACE(n)) { /* XXX */
889 n->m_len += siz;
890 m->m_pkthdr.len += siz;
891 p = mtod(n, u_char *) + n->m_len - siz;
892 } else {
893 struct mbuf *nn;
894
895 MGET(nn, M_DONTWAIT, MT_DATA);
896 if (!nn) {
897 ipseclog((LOG_DEBUG, "can't alloc mbuf in esp%d_output",
898 afnumber));
899 m_freem(m);
900 error = ENOBUFS;
901 goto fail;
902 }
903 nn->m_len = siz;
904 nn->m_next = NULL;
905 n->m_next = nn;
906 n = nn;
907 m->m_pkthdr.len += siz;
908 p = mtod(nn, u_char *);
909 }
910 bcopy(authbuf, p, siz);
911
912 /* modify IP header (for ESP header part only) */
913 switch (af) {
914 #if INET
915 case AF_INET:
916 ip = mtod(m, struct ip *);
917 if (siz < (IP_MAXPACKET - ntohs(ip->ip_len))) {
918 ip->ip_len = htons(ntohs(ip->ip_len) + siz);
919 } else {
920 ipseclog((LOG_ERR,
921 "IPv4 ESP output: size exceeds limit\n"));
922 IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
923 m_freem(m);
924 error = EMSGSIZE;
925 goto fail;
926 }
927 break;
928 #endif
929 #if INET6
930 case AF_INET6:
931 /* total packet length will be computed in ip6_output() */
932 break;
933 #endif
934 }
935 }
936
937 if (udp_encapsulate) {
938 struct ip *ip;
939 struct ip6_hdr *ip6;
940
941 switch (af) {
942 case AF_INET:
943 ip = mtod(m, struct ip *);
944 udp->uh_ulen = htons(ntohs(ip->ip_len) - (IP_VHL_HL(ip->ip_vhl) << 2));
945 break;
946 case AF_INET6:
947 ip6 = mtod(m, struct ip6_hdr *);
948 udp->uh_ulen = htons(plen + siz + extendsiz + esphlen);
949 udp->uh_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htonl(ntohs(udp->uh_ulen) + IPPROTO_UDP));
950 m->m_pkthdr.csum_flags = (CSUM_UDPIPV6 | CSUM_ZERO_INVERT);
951 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
952 break;
953 }
954 }
955
956 noantireplay:
957 if (net_mpklog_enabled && sav->sah != NULL &&
958 sav->sah->ipsec_if != NULL &&
959 (sav->sah->ipsec_if->if_xflags & IFXF_MPK_LOG) &&
960 inner_protocol == IPPROTO_TCP) {
961 MPKL_ESP_OUTPUT_TCP(esp_mpkl_log_object,
962 ntohl(spi), seq,
963 ntohs(th.th_sport), ntohs(th.th_dport),
964 ntohl(th.th_seq), ntohl(th.th_ack),
965 th.th_flags, inner_payload_len);
966 }
967
968 lck_mtx_lock(sadb_mutex);
969 if (!m) {
970 ipseclog((LOG_ERR,
971 "NULL mbuf after encryption in esp%d_output", afnumber));
972 } else {
973 stat->out_success++;
974 }
975 stat->out_esphist[sav->alg_enc]++;
976 lck_mtx_unlock(sadb_mutex);
977 key_sa_recordxfer(sav, m);
978 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 6, 0, 0, 0, 0);
979 return 0;
980
981 fail:
982 #if 1
983 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 7, error, 0, 0, 0);
984 return error;
985 #else
986 panic("something bad in esp_output");
987 #endif
988 }
989
990 #if INET
991 int
992 esp4_output(
993 struct mbuf *m,
994 struct secasvar *sav)
995 {
996 struct ip *ip;
997 if (m->m_len < sizeof(struct ip)) {
998 ipseclog((LOG_DEBUG, "esp4_output: first mbuf too short\n"));
999 m_freem(m);
1000 return EINVAL;
1001 }
1002 ip = mtod(m, struct ip *);
1003 /* XXX assumes that m->m_next points to payload */
1004 return esp_output(m, &ip->ip_p, m->m_next, AF_INET, sav);
1005 }
1006 #endif /*INET*/
1007
1008 #if INET6
1009 int
1010 esp6_output(
1011 struct mbuf *m,
1012 u_char *nexthdrp,
1013 struct mbuf *md,
1014 struct secasvar *sav)
1015 {
1016 if (m->m_len < sizeof(struct ip6_hdr)) {
1017 ipseclog((LOG_DEBUG, "esp6_output: first mbuf too short\n"));
1018 m_freem(m);
1019 return EINVAL;
1020 }
1021 return esp_output(m, nexthdrp, md, AF_INET6, sav);
1022 }
1023 #endif /*INET6*/