]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_output.c
xnu-1228.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_output.c
1 /* $FreeBSD: src/sys/netinet6/esp_output.c,v 1.1.2.3 2002/04/28 05:40:26 suz Exp $ */
2 /* $KAME: esp_output.c,v 1.44 2001/07/26 06:53:15 jinmei Exp $ */
3
4 /*
5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #define _IP_VHL
34
35 /*
36 * RFC1827/2406 Encapsulated Security Payload.
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/domain.h>
44 #include <sys/protosw.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/errno.h>
48 #include <sys/time.h>
49 #include <sys/kernel.h>
50 #include <sys/syslog.h>
51
52 #include <net/if.h>
53 #include <net/route.h>
54
55 #include <netinet/in.h>
56 #include <netinet/in_systm.h>
57 #include <netinet/ip.h>
58 #include <netinet/in_var.h>
59 #include <netinet/udp.h> /* for nat traversal */
60
61 #if INET6
62 #include <netinet/ip6.h>
63 #include <netinet6/ip6_var.h>
64 #include <netinet/icmp6.h>
65 #endif
66
67 #include <netinet6/ipsec.h>
68 #if INET6
69 #include <netinet6/ipsec6.h>
70 #endif
71 #include <netinet6/ah.h>
72 #if INET6
73 #include <netinet6/ah6.h>
74 #endif
75 #include <netinet6/esp.h>
76 #if INET6
77 #include <netinet6/esp6.h>
78 #endif
79 #include <netkey/key.h>
80 #include <netkey/keydb.h>
81
82 #include <net/net_osdep.h>
83
84 #include <sys/kdebug.h>
85 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
86 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
87 #define DBG_FNC_ESPOUT NETDBG_CODE(DBG_NETIPSEC, (4 << 8))
88 #define DBG_FNC_ENCRYPT NETDBG_CODE(DBG_NETIPSEC, (5 << 8))
89
90 static int esp_output(struct mbuf *, u_char *, struct mbuf *,
91 int, struct secasvar *sav);
92
93 extern int esp_udp_encap_port;
94 extern u_int32_t natt_now;
95
96 extern lck_mtx_t *sadb_mutex;
97
98 /*
99 * compute ESP header size.
100 */
101 size_t
102 esp_hdrsiz(isr)
103 struct ipsecrequest *isr;
104 {
105
106 /* sanity check */
107 if (isr == NULL)
108 panic("esp_hdrsiz: NULL was passed.\n");
109
110
111 #if 0
112 lck_mtx_lock(sadb_mutex);
113 {
114 struct secasvar *sav;
115 const struct esp_algorithm *algo;
116 const struct ah_algorithm *aalgo;
117 size_t ivlen;
118 size_t authlen;
119 size_t hdrsiz;
120 size_t maxpad;
121
122 /*%%%% this needs to change - no sav in ipsecrequest any more */
123 sav = isr->sav;
124
125 if (isr->saidx.proto != IPPROTO_ESP)
126 panic("unsupported mode passed to esp_hdrsiz");
127
128 if (sav == NULL)
129 goto estimate;
130 if (sav->state != SADB_SASTATE_MATURE
131 && sav->state != SADB_SASTATE_DYING)
132 goto estimate;
133
134 /* we need transport mode ESP. */
135 algo = esp_algorithm_lookup(sav->alg_enc);
136 if (!algo)
137 goto estimate;
138 ivlen = sav->ivlen;
139 if (ivlen < 0)
140 goto estimate;
141
142 if (algo->padbound)
143 maxpad = algo->padbound;
144 else
145 maxpad = 4;
146 maxpad += 1; /* maximum 'extendsiz' is padbound + 1, see esp_output */
147
148 if (sav->flags & SADB_X_EXT_OLD) {
149 /* RFC 1827 */
150 hdrsiz = sizeof(struct esp) + ivlen + maxpad;
151 } else {
152 /* RFC 2406 */
153 aalgo = ah_algorithm_lookup(sav->alg_auth);
154 if (aalgo && sav->replay && sav->key_auth)
155 authlen = (aalgo->sumsiz)(sav);
156 else
157 authlen = 0;
158 hdrsiz = sizeof(struct newesp) + ivlen + maxpad + authlen;
159 }
160
161 /*
162 * If the security association indicates that NATT is required,
163 * add the size of the NATT encapsulation header:
164 */
165 if ((sav->flags & SADB_X_EXT_NATT) != 0) hdrsiz += sizeof(struct udphdr) + 4;
166
167 lck_mtx_unlock(sadb_mutex);
168 return hdrsiz;
169 }
170 estimate:
171 lck_mtx_unlock(sadb_mutex);
172 #endif
173 /*
174 * ASSUMING:
175 * sizeof(struct newesp) > sizeof(struct esp). (8)
176 * esp_max_ivlen() = max ivlen for CBC mode
177 * 17 = (maximum padding length without random padding length)
178 * + (Pad Length field) + (Next Header field).
179 * 16 = maximum ICV we support.
180 * sizeof(struct udphdr) in case NAT traversal is used
181 */
182 return sizeof(struct newesp) + esp_max_ivlen() + 17 + 16 + sizeof(struct udphdr);
183 }
184
185 /*
186 * Modify the packet so that the payload is encrypted.
187 * The mbuf (m) must start with IPv4 or IPv6 header.
188 * On failure, free the given mbuf and return NULL.
189 *
190 * on invocation:
191 * m nexthdrp md
192 * v v v
193 * IP ......... payload
194 * during the encryption:
195 * m nexthdrp mprev md
196 * v v v v
197 * IP ............... esp iv payload pad padlen nxthdr
198 * <--><-><------><--------------->
199 * esplen plen extendsiz
200 * ivlen
201 * <-----> esphlen
202 * <-> hlen
203 * <-----------------> espoff
204 */
205 static int
206 esp_output(m, nexthdrp, md, af, sav)
207 struct mbuf *m;
208 u_char *nexthdrp;
209 struct mbuf *md;
210 int af;
211 struct secasvar *sav;
212 {
213 struct mbuf *n;
214 struct mbuf *mprev;
215 struct esp *esp;
216 struct esptail *esptail;
217 const struct esp_algorithm *algo;
218 u_int32_t spi;
219 u_int8_t nxt = 0;
220 size_t plen; /*payload length to be encrypted*/
221 size_t espoff;
222 int ivlen;
223 int afnumber;
224 size_t extendsiz;
225 int error = 0;
226 struct ipsecstat *stat;
227 struct udphdr *udp = NULL;
228 int udp_encapsulate = (sav->flags & SADB_X_EXT_NATT && af == AF_INET &&
229 (esp_udp_encap_port & 0xFFFF) != 0);
230
231 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_START, sav->ivlen,0,0,0,0);
232 switch (af) {
233 #if INET
234 case AF_INET:
235 afnumber = 4;
236 stat = &ipsecstat;
237 break;
238 #endif
239 #if INET6
240 case AF_INET6:
241 afnumber = 6;
242 stat = &ipsec6stat;
243 break;
244 #endif
245 default:
246 ipseclog((LOG_ERR, "esp_output: unsupported af %d\n", af));
247 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 1,0,0,0,0);
248 return 0; /* no change at all */
249 }
250
251 /* some sanity check */
252 if ((sav->flags & SADB_X_EXT_OLD) == 0 && !sav->replay) {
253 switch (af) {
254 #if INET
255 case AF_INET:
256 {
257 struct ip *ip;
258
259 ip = mtod(m, struct ip *);
260 ipseclog((LOG_DEBUG, "esp4_output: internal error: "
261 "sav->replay is null: %x->%x, SPI=%u\n",
262 (u_int32_t)ntohl(ip->ip_src.s_addr),
263 (u_int32_t)ntohl(ip->ip_dst.s_addr),
264 (u_int32_t)ntohl(sav->spi)));
265 IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
266 break;
267 }
268 #endif /*INET*/
269 #if INET6
270 case AF_INET6:
271 ipseclog((LOG_DEBUG, "esp6_output: internal error: "
272 "sav->replay is null: SPI=%u\n",
273 (u_int32_t)ntohl(sav->spi)));
274 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
275 break;
276 #endif /*INET6*/
277 default:
278 panic("esp_output: should not reach here");
279 }
280 m_freem(m);
281 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 2,0,0,0,0);
282 return EINVAL;
283 }
284
285 algo = esp_algorithm_lookup(sav->alg_enc);
286 if (!algo) {
287 ipseclog((LOG_ERR, "esp_output: unsupported algorithm: "
288 "SPI=%u\n", (u_int32_t)ntohl(sav->spi)));
289 m_freem(m);
290 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 3,0,0,0,0);
291 return EINVAL;
292 }
293 spi = sav->spi;
294 ivlen = sav->ivlen;
295 /* should be okey */
296 if (ivlen < 0) {
297 panic("invalid ivlen");
298 }
299
300 {
301 /*
302 * insert ESP header.
303 * XXX inserts ESP header right after IPv4 header. should
304 * chase the header chain.
305 * XXX sequential number
306 */
307 #if INET
308 struct ip *ip = NULL;
309 #endif
310 #if INET6
311 struct ip6_hdr *ip6 = NULL;
312 #endif
313 size_t esplen; /* sizeof(struct esp/newesp) */
314 size_t esphlen; /* sizeof(struct esp/newesp) + ivlen */
315 size_t hlen = 0; /* ip header len */
316
317 if (sav->flags & SADB_X_EXT_OLD) {
318 /* RFC 1827 */
319 esplen = sizeof(struct esp);
320 } else {
321 /* RFC 2406 */
322 if (sav->flags & SADB_X_EXT_DERIV)
323 esplen = sizeof(struct esp);
324 else
325 esplen = sizeof(struct newesp);
326 }
327 esphlen = esplen + ivlen;
328
329 for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next)
330 ;
331 if (mprev == NULL || mprev->m_next != md) {
332 ipseclog((LOG_DEBUG, "esp%d_output: md is not in chain\n",
333 afnumber));
334 m_freem(m);
335 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 4,0,0,0,0);
336 return EINVAL;
337 }
338
339 plen = 0;
340 for (n = md; n; n = n->m_next)
341 plen += n->m_len;
342
343 switch (af) {
344 #if INET
345 case AF_INET:
346 ip = mtod(m, struct ip *);
347 #ifdef _IP_VHL
348 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
349 #else
350 hlen = ip->ip_hl << 2;
351 #endif
352 break;
353 #endif
354 #if INET6
355 case AF_INET6:
356 ip6 = mtod(m, struct ip6_hdr *);
357 hlen = sizeof(*ip6);
358 break;
359 #endif
360 }
361
362 /* make the packet over-writable */
363 mprev->m_next = NULL;
364 if ((md = ipsec_copypkt(md)) == NULL) {
365 m_freem(m);
366 error = ENOBUFS;
367 goto fail;
368 }
369 mprev->m_next = md;
370
371 /*
372 * Translate UDP source port back to its original value.
373 * SADB_X_EXT_NATT_MULTIPLEUSERS is only set for transort mode.
374 */
375 if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
376 /* if not UDP - drop it */
377 if (ip->ip_p != IPPROTO_UDP) {
378 IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
379 m_freem(m);
380 error = EINVAL;
381 goto fail;
382 }
383
384 udp = mtod(md, struct udphdr *);
385
386 /* if src port not set in sav - find it */
387 if (sav->natt_encapsulated_src_port == 0)
388 if (key_natt_get_translated_port(sav) == 0) {
389 m_freem(m);
390 error = EINVAL;
391 goto fail;
392 }
393 if (sav->remote_ike_port == htons(udp->uh_dport)) {
394 /* translate UDP port */
395 udp->uh_dport = sav->natt_encapsulated_src_port;
396 udp->uh_sum = 0; /* don't need checksum with ESP auth */
397 } else {
398 /* drop the packet - can't translate the port */
399 IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
400 m_freem(m);
401 error = EINVAL;
402 goto fail;
403 }
404 }
405
406
407 espoff = m->m_pkthdr.len - plen;
408
409 if (udp_encapsulate) {
410 esphlen += sizeof(struct udphdr);
411 espoff += sizeof(struct udphdr);
412 }
413
414 /*
415 * grow the mbuf to accomodate ESP header.
416 * before: IP ... payload
417 * after: IP ... [UDP] ESP IV payload
418 */
419 if (M_LEADINGSPACE(md) < esphlen || (md->m_flags & M_EXT) != 0) {
420 MGET(n, M_DONTWAIT, MT_DATA);
421 if (!n) {
422 m_freem(m);
423 error = ENOBUFS;
424 goto fail;
425 }
426 n->m_len = esphlen;
427 mprev->m_next = n;
428 n->m_next = md;
429 m->m_pkthdr.len += esphlen;
430 if (udp_encapsulate) {
431 udp = mtod(n, struct udphdr *);
432 esp = (struct esp *)((caddr_t)udp + sizeof(struct udphdr));
433 } else {
434 esp = mtod(n, struct esp *);
435 }
436 } else {
437 md->m_len += esphlen;
438 md->m_data -= esphlen;
439 m->m_pkthdr.len += esphlen;
440 esp = mtod(md, struct esp *);
441 if (udp_encapsulate) {
442 udp = mtod(md, struct udphdr *);
443 esp = (struct esp *)((caddr_t)udp + sizeof(struct udphdr));
444 } else {
445 esp = mtod(md, struct esp *);
446 }
447 }
448
449 switch (af) {
450 #if INET
451 case AF_INET:
452 if (esphlen < (IP_MAXPACKET - ntohs(ip->ip_len)))
453 ip->ip_len = htons(ntohs(ip->ip_len) + esphlen);
454 else {
455 ipseclog((LOG_ERR,
456 "IPv4 ESP output: size exceeds limit\n"));
457 IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
458 m_freem(m);
459 error = EMSGSIZE;
460 goto fail;
461 }
462 break;
463 #endif
464 #if INET6
465 case AF_INET6:
466 /* total packet length will be computed in ip6_output() */
467 break;
468 #endif
469 }
470 }
471
472 /* initialize esp header. */
473 esp->esp_spi = spi;
474 if ((sav->flags & SADB_X_EXT_OLD) == 0) {
475 struct newesp *nesp;
476 nesp = (struct newesp *)esp;
477 if (sav->replay->count == ~0) {
478 if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
479 /* XXX Is it noisy ? */
480 ipseclog((LOG_WARNING,
481 "replay counter overflowed. %s\n",
482 ipsec_logsastr(sav)));
483 IPSEC_STAT_INCREMENT(stat->out_inval);
484 m_freem(m);
485 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 5,0,0,0,0);
486 return EINVAL;
487 }
488 }
489 lck_mtx_lock(sadb_mutex);
490 sav->replay->count++;
491 lck_mtx_unlock(sadb_mutex);
492 /*
493 * XXX sequence number must not be cycled, if the SA is
494 * installed by IKE daemon.
495 */
496 nesp->esp_seq = htonl(sav->replay->count);
497 }
498
499 {
500 /*
501 * find the last mbuf. make some room for ESP trailer.
502 */
503 #if INET
504 struct ip *ip = NULL;
505 #endif
506 size_t padbound;
507 u_char *extend;
508 int i;
509 int randpadmax;
510
511 if (algo->padbound)
512 padbound = algo->padbound;
513 else
514 padbound = 4;
515 /* ESP packet, including nxthdr field, must be length of 4n */
516 if (padbound < 4)
517 padbound = 4;
518
519 extendsiz = padbound - (plen % padbound);
520 if (extendsiz == 1)
521 extendsiz = padbound + 1;
522
523 /* random padding */
524 switch (af) {
525 #if INET
526 case AF_INET:
527 randpadmax = ip4_esp_randpad;
528 break;
529 #endif
530 #if INET6
531 case AF_INET6:
532 randpadmax = ip6_esp_randpad;
533 break;
534 #endif
535 default:
536 randpadmax = -1;
537 break;
538 }
539 if (randpadmax < 0 || plen + extendsiz >= randpadmax)
540 ;
541 else {
542 int pad;
543
544 /* round */
545 randpadmax = (randpadmax / padbound) * padbound;
546 pad = (randpadmax - plen + extendsiz) / padbound;
547
548 if (pad > 0)
549 pad = (random() % pad) * padbound;
550 else
551 pad = 0;
552
553 /*
554 * make sure we do not pad too much.
555 * MLEN limitation comes from the trailer attachment
556 * code below.
557 * 256 limitation comes from sequential padding.
558 * also, the 1-octet length field in ESP trailer imposes
559 * limitation (but is less strict than sequential padding
560 * as length field do not count the last 2 octets).
561 */
562 if (extendsiz + pad <= MLEN && extendsiz + pad < 256)
563 extendsiz += pad;
564 }
565
566 #if DIAGNOSTIC
567 if (extendsiz > MLEN || extendsiz >= 256)
568 panic("extendsiz too big in esp_output");
569 #endif
570
571 n = m;
572 while (n->m_next)
573 n = n->m_next;
574
575 /*
576 * if M_EXT, the external mbuf data may be shared among
577 * two consequtive TCP packets, and it may be unsafe to use the
578 * trailing space.
579 */
580 if (!(n->m_flags & M_EXT) && extendsiz < M_TRAILINGSPACE(n)) {
581 extend = mtod(n, u_char *) + n->m_len;
582 n->m_len += extendsiz;
583 m->m_pkthdr.len += extendsiz;
584 } else {
585 struct mbuf *nn;
586
587 MGET(nn, M_DONTWAIT, MT_DATA);
588 if (!nn) {
589 ipseclog((LOG_DEBUG, "esp%d_output: can't alloc mbuf",
590 afnumber));
591 m_freem(m);
592 error = ENOBUFS;
593 goto fail;
594 }
595 extend = mtod(nn, u_char *);
596 nn->m_len = extendsiz;
597 nn->m_next = NULL;
598 n->m_next = nn;
599 n = nn;
600 m->m_pkthdr.len += extendsiz;
601 }
602 switch (sav->flags & SADB_X_EXT_PMASK) {
603 case SADB_X_EXT_PRAND:
604 key_randomfill(extend, extendsiz);
605 break;
606 case SADB_X_EXT_PZERO:
607 bzero(extend, extendsiz);
608 break;
609 case SADB_X_EXT_PSEQ:
610 for (i = 0; i < extendsiz; i++)
611 extend[i] = (i + 1) & 0xff;
612 break;
613 }
614
615 nxt = *nexthdrp;
616 if (udp_encapsulate) {
617 *nexthdrp = IPPROTO_UDP;
618
619 /* Fill out the UDP header */
620 udp->uh_sport = ntohs((u_short)esp_udp_encap_port);
621 udp->uh_dport = ntohs(sav->remote_ike_port);
622 // udp->uh_len set later, after all length tweaks are complete
623 udp->uh_sum = 0;
624
625 /* Update last sent so we know if we need to send keepalive */
626 sav->natt_last_activity = natt_now;
627 } else {
628 *nexthdrp = IPPROTO_ESP;
629 }
630
631 /* initialize esp trailer. */
632 esptail = (struct esptail *)
633 (mtod(n, u_int8_t *) + n->m_len - sizeof(struct esptail));
634 esptail->esp_nxt = nxt;
635 esptail->esp_padlen = extendsiz - 2;
636
637 /* modify IP header (for ESP header part only) */
638 switch (af) {
639 #if INET
640 case AF_INET:
641 ip = mtod(m, struct ip *);
642 if (extendsiz < (IP_MAXPACKET - ntohs(ip->ip_len)))
643 ip->ip_len = htons(ntohs(ip->ip_len) + extendsiz);
644 else {
645 ipseclog((LOG_ERR,
646 "IPv4 ESP output: size exceeds limit\n"));
647 IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
648 m_freem(m);
649 error = EMSGSIZE;
650 goto fail;
651 }
652 break;
653 #endif
654 #if INET6
655 case AF_INET6:
656 /* total packet length will be computed in ip6_output() */
657 break;
658 #endif
659 }
660 }
661
662 /*
663 * pre-compute and cache intermediate key
664 */
665 error = esp_schedule(algo, sav);
666 if (error) {
667 m_freem(m);
668 IPSEC_STAT_INCREMENT(stat->out_inval);
669 goto fail;
670 }
671
672 /*
673 * encrypt the packet, based on security association
674 * and the algorithm specified.
675 */
676 if (!algo->encrypt)
677 panic("internal error: no encrypt function");
678 KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_START, 0,0,0,0,0);
679 if ((*algo->encrypt)(m, espoff, plen + extendsiz, sav, algo, ivlen)) {
680 /* m is already freed */
681 ipseclog((LOG_ERR, "packet encryption failure\n"));
682 IPSEC_STAT_INCREMENT(stat->out_inval);
683 error = EINVAL;
684 KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1,error,0,0,0);
685 goto fail;
686 }
687 KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 2,0,0,0,0);
688
689 /*
690 * calculate ICV if required.
691 */
692 if (!sav->replay)
693 goto noantireplay;
694 if (!sav->key_auth)
695 goto noantireplay;
696 if (sav->key_auth == SADB_AALG_NONE)
697 goto noantireplay;
698
699 {
700 const struct ah_algorithm *aalgo;
701 u_char authbuf[AH_MAXSUMSIZE];
702 u_char *p;
703 size_t siz;
704 #if INET
705 struct ip *ip;
706 #endif
707
708 aalgo = ah_algorithm_lookup(sav->alg_auth);
709 if (!aalgo)
710 goto noantireplay;
711 siz = ((aalgo->sumsiz)(sav) + 3) & ~(4 - 1);
712 if (AH_MAXSUMSIZE < siz)
713 panic("assertion failed for AH_MAXSUMSIZE");
714
715 if (esp_auth(m, espoff, m->m_pkthdr.len - espoff, sav, authbuf)) {
716 ipseclog((LOG_ERR, "ESP checksum generation failure\n"));
717 m_freem(m);
718 error = EINVAL;
719 IPSEC_STAT_INCREMENT(stat->out_inval);
720 goto fail;
721 }
722
723 n = m;
724 while (n->m_next)
725 n = n->m_next;
726
727 if (!(n->m_flags & M_EXT) && siz < M_TRAILINGSPACE(n)) { /* XXX */
728 n->m_len += siz;
729 m->m_pkthdr.len += siz;
730 p = mtod(n, u_char *) + n->m_len - siz;
731 } else {
732 struct mbuf *nn;
733
734 MGET(nn, M_DONTWAIT, MT_DATA);
735 if (!nn) {
736 ipseclog((LOG_DEBUG, "can't alloc mbuf in esp%d_output",
737 afnumber));
738 m_freem(m);
739 error = ENOBUFS;
740 goto fail;
741 }
742 nn->m_len = siz;
743 nn->m_next = NULL;
744 n->m_next = nn;
745 n = nn;
746 m->m_pkthdr.len += siz;
747 p = mtod(nn, u_char *);
748 }
749 bcopy(authbuf, p, siz);
750
751 /* modify IP header (for ESP header part only) */
752 switch (af) {
753 #if INET
754 case AF_INET:
755 ip = mtod(m, struct ip *);
756 if (siz < (IP_MAXPACKET - ntohs(ip->ip_len)))
757 ip->ip_len = htons(ntohs(ip->ip_len) + siz);
758 else {
759 ipseclog((LOG_ERR,
760 "IPv4 ESP output: size exceeds limit\n"));
761 IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
762 m_freem(m);
763 error = EMSGSIZE;
764 goto fail;
765 }
766 break;
767 #endif
768 #if INET6
769 case AF_INET6:
770 /* total packet length will be computed in ip6_output() */
771 break;
772 #endif
773 }
774 }
775
776 if (udp_encapsulate) {
777 struct ip *ip;
778 ip = mtod(m, struct ip *);
779 udp->uh_ulen = htons(ntohs(ip->ip_len) - (IP_VHL_HL(ip->ip_vhl) << 2));
780 }
781
782
783 noantireplay:
784 lck_mtx_lock(sadb_mutex);
785 if (!m) {
786 ipseclog((LOG_ERR,
787 "NULL mbuf after encryption in esp%d_output", afnumber));
788 } else
789 stat->out_success++;
790 stat->out_esphist[sav->alg_enc]++;
791 lck_mtx_unlock(sadb_mutex);
792 key_sa_recordxfer(sav, m);
793 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 6,0,0,0,0);
794 return 0;
795
796 fail:
797 #if 1
798 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 7,error,0,0,0);
799 return error;
800 #else
801 panic("something bad in esp_output");
802 #endif
803 }
804
805 #if INET
806 int
807 esp4_output(m, sav)
808 struct mbuf *m;
809 struct secasvar *sav;
810 {
811 struct ip *ip;
812 if (m->m_len < sizeof(struct ip)) {
813 ipseclog((LOG_DEBUG, "esp4_output: first mbuf too short\n"));
814 m_freem(m);
815 return EINVAL;
816 }
817 ip = mtod(m, struct ip *);
818 /* XXX assumes that m->m_next points to payload */
819 return esp_output(m, &ip->ip_p, m->m_next, AF_INET, sav);
820 }
821 #endif /*INET*/
822
823 #if INET6
824 int
825 esp6_output(m, nexthdrp, md, sav)
826 struct mbuf *m;
827 u_char *nexthdrp;
828 struct mbuf *md;
829 struct secasvar *sav;
830 {
831 if (m->m_len < sizeof(struct ip6_hdr)) {
832 ipseclog((LOG_DEBUG, "esp6_output: first mbuf too short\n"));
833 m_freem(m);
834 return EINVAL;
835 }
836 return esp_output(m, nexthdrp, md, AF_INET6, sav);
837 }
838 #endif /*INET6*/