/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2008-2017 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include <net/if.h>
#include <net/route.h>
+#include <net/multi_layer_pkt_log.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/in_var.h>
#include <netinet/udp.h> /* for nat traversal */
+#include <netinet/tcp.h>
+#include <netinet/in_tclass.h>
#if INET6
#include <netinet/ip6.h>
#include <net/net_osdep.h>
#include <sys/kdebug.h>
-#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
-#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
-#define DBG_FNC_ESPOUT NETDBG_CODE(DBG_NETIPSEC, (4 << 8))
-#define DBG_FNC_ENCRYPT NETDBG_CODE(DBG_NETIPSEC, (5 << 8))
+#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
+#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
+#define DBG_FNC_ESPOUT NETDBG_CODE(DBG_NETIPSEC, (4 << 8))
+#define DBG_FNC_ENCRYPT NETDBG_CODE(DBG_NETIPSEC, (5 << 8))
static int esp_output(struct mbuf *, u_char *, struct mbuf *,
- int, struct secasvar *sav);
+ int, struct secasvar *sav);
-extern int esp_udp_encap_port;
-extern u_int32_t natt_now;
+extern int esp_udp_encap_port;
+extern u_int64_t natt_now;
extern lck_mtx_t *sadb_mutex;
* compute ESP header size.
*/
size_t
-esp_hdrsiz(isr)
- struct ipsecrequest *isr;
+esp_hdrsiz(__unused struct ipsecrequest *isr)
{
-
+#if 0
/* sanity check */
- if (isr == NULL)
+ if (isr == NULL) {
panic("esp_hdrsiz: NULL was passed.\n");
+ }
-#if 0
lck_mtx_lock(sadb_mutex);
{
struct secasvar *sav;
size_t authlen;
size_t hdrsiz;
size_t maxpad;
-
+
/*%%%% this needs to change - no sav in ipsecrequest any more */
sav = isr->sav;
-
- if (isr->saidx.proto != IPPROTO_ESP)
+
+ if (isr->saidx.proto != IPPROTO_ESP) {
panic("unsupported mode passed to esp_hdrsiz");
-
- if (sav == NULL)
+ }
+
+ if (sav == NULL) {
goto estimate;
+ }
if (sav->state != SADB_SASTATE_MATURE
- && sav->state != SADB_SASTATE_DYING)
+ && sav->state != SADB_SASTATE_DYING) {
goto estimate;
-
+ }
+
/* we need transport mode ESP. */
algo = esp_algorithm_lookup(sav->alg_enc);
- if (!algo)
+ if (!algo) {
goto estimate;
+ }
ivlen = sav->ivlen;
- if (ivlen < 0)
+ if (ivlen < 0) {
goto estimate;
-
- if (algo->padbound)
+ }
+
+ if (algo->padbound) {
maxpad = algo->padbound;
- else
+ } else {
maxpad = 4;
+ }
maxpad += 1; /* maximum 'extendsiz' is padbound + 1, see esp_output */
-
+
if (sav->flags & SADB_X_EXT_OLD) {
/* RFC 1827 */
hdrsiz = sizeof(struct esp) + ivlen + maxpad;
} else {
/* RFC 2406 */
aalgo = ah_algorithm_lookup(sav->alg_auth);
- if (aalgo && sav->replay && sav->key_auth)
+ if (aalgo && sav->replay[0] != NULL && sav->key_auth) {
authlen = (aalgo->sumsiz)(sav);
- else
+ } else {
authlen = 0;
+ }
hdrsiz = sizeof(struct newesp) + ivlen + maxpad + authlen;
}
-
+
/*
* If the security association indicates that NATT is required,
* add the size of the NATT encapsulation header:
*/
- if ((sav->flags & SADB_X_EXT_NATT) != 0) hdrsiz += sizeof(struct udphdr) + 4;
-
+ if ((sav->flags & SADB_X_EXT_NATT) != 0) {
+ hdrsiz += sizeof(struct udphdr) + 4;
+ }
+
lck_mtx_unlock(sadb_mutex);
return hdrsiz;
}
estimate:
- lck_mtx_unlock(sadb_mutex);
+ lck_mtx_unlock(sadb_mutex);
#endif
/*
* ASSUMING:
* <-----------------> espoff
*/
static int
-esp_output(m, nexthdrp, md, af, sav)
- struct mbuf *m;
- u_char *nexthdrp;
- struct mbuf *md;
- int af;
- struct secasvar *sav;
+esp_output(
+ struct mbuf *m,
+ u_char *nexthdrp,
+ struct mbuf *md,
+ int af,
+ struct secasvar *sav)
{
struct mbuf *n;
struct mbuf *mprev;
struct esp *esp;
struct esptail *esptail;
const struct esp_algorithm *algo;
+ struct tcphdr th = {};
u_int32_t spi;
+ u_int32_t seq;
+ u_int32_t inner_payload_len = 0;
+ u_int8_t inner_protocol = 0;
u_int8_t nxt = 0;
- size_t plen; /*payload length to be encrypted*/
+ size_t plen; /*payload length to be encrypted*/
size_t espoff;
+ size_t esphlen; /* sizeof(struct esp/newesp) + ivlen */
int ivlen;
int afnumber;
size_t extendsiz;
int error = 0;
struct ipsecstat *stat;
struct udphdr *udp = NULL;
- int udp_encapsulate = (sav->flags & SADB_X_EXT_NATT && af == AF_INET &&
- (esp_udp_encap_port & 0xFFFF) != 0);
+ int udp_encapsulate = (sav->flags & SADB_X_EXT_NATT && (af == AF_INET || af == AF_INET6) &&
+ ((esp_udp_encap_port & 0xFFFF) != 0 || sav->natt_encapsulated_src_port != 0));
- KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_START, sav->ivlen,0,0,0,0);
+ KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_START, sav->ivlen, 0, 0, 0, 0);
switch (af) {
#if INET
case AF_INET:
#endif
default:
ipseclog((LOG_ERR, "esp_output: unsupported af %d\n", af));
- KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 1,0,0,0,0);
- return 0; /* no change at all */
+ KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 1, 0, 0, 0, 0);
+ return 0; /* no change at all */
+ }
+
+ mbuf_traffic_class_t traffic_class = 0;
+ if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
+ SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
+ u_int8_t dscp = 0;
+ switch (af) {
+#if INET
+ case AF_INET:
+ {
+ struct ip *ip = mtod(m, struct ip *);
+ dscp = ip->ip_tos >> IPTOS_DSCP_SHIFT;
+ break;
+ }
+#endif /*INET*/
+#if INET6
+ case AF_INET6:
+ {
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+ dscp = (ntohl(ip6->ip6_flow) & IP6FLOW_DSCP_MASK) >> IP6FLOW_DSCP_SHIFT;
+ break;
+ }
+#endif /*INET6*/
+ default:
+ panic("esp_output: should not reach here");
+ }
+ traffic_class = rfc4594_dscp_to_tc(dscp);
}
/* some sanity check */
- if ((sav->flags & SADB_X_EXT_OLD) == 0 && !sav->replay) {
+ if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] == NULL) {
switch (af) {
#if INET
case AF_INET:
- {
+ {
struct ip *ip;
ip = mtod(m, struct ip *);
ipseclog((LOG_DEBUG, "esp4_output: internal error: "
- "sav->replay is null: %x->%x, SPI=%u\n",
- (u_int32_t)ntohl(ip->ip_src.s_addr),
- (u_int32_t)ntohl(ip->ip_dst.s_addr),
- (u_int32_t)ntohl(sav->spi)));
+ "sav->replay is null: %x->%x, SPI=%u\n",
+ (u_int32_t)ntohl(ip->ip_src.s_addr),
+ (u_int32_t)ntohl(ip->ip_dst.s_addr),
+ (u_int32_t)ntohl(sav->spi)));
IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
break;
- }
+ }
#endif /*INET*/
#if INET6
case AF_INET6:
ipseclog((LOG_DEBUG, "esp6_output: internal error: "
- "sav->replay is null: SPI=%u\n",
- (u_int32_t)ntohl(sav->spi)));
+ "sav->replay is null: SPI=%u\n",
+ (u_int32_t)ntohl(sav->spi)));
IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
break;
#endif /*INET6*/
panic("esp_output: should not reach here");
}
m_freem(m);
- KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 2,0,0,0,0);
+ KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 2, 0, 0, 0, 0);
return EINVAL;
}
ipseclog((LOG_ERR, "esp_output: unsupported algorithm: "
"SPI=%u\n", (u_int32_t)ntohl(sav->spi)));
m_freem(m);
- KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 3,0,0,0,0);
+ KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 3, 0, 0, 0, 0);
return EINVAL;
}
spi = sav->spi;
panic("invalid ivlen");
}
- {
- /*
- * insert ESP header.
- * XXX inserts ESP header right after IPv4 header. should
- * chase the header chain.
- * XXX sequential number
- */
+ {
+ /*
+ * insert ESP header.
+ * XXX inserts ESP header right after IPv4 header. should
+ * chase the header chain.
+ * XXX sequential number
+ */
#if INET
- struct ip *ip = NULL;
+ struct ip *ip = NULL;
#endif
#if INET6
- struct ip6_hdr *ip6 = NULL;
+ struct ip6_hdr *ip6 = NULL;
#endif
- size_t esplen; /* sizeof(struct esp/newesp) */
- size_t esphlen; /* sizeof(struct esp/newesp) + ivlen */
- size_t hlen = 0; /* ip header len */
+ size_t esplen; /* sizeof(struct esp/newesp) */
+ size_t hlen = 0; /* ip header len */
- if (sav->flags & SADB_X_EXT_OLD) {
- /* RFC 1827 */
- esplen = sizeof(struct esp);
- } else {
- /* RFC 2406 */
- if (sav->flags & SADB_X_EXT_DERIV)
+ if (sav->flags & SADB_X_EXT_OLD) {
+ /* RFC 1827 */
esplen = sizeof(struct esp);
- else
- esplen = sizeof(struct newesp);
- }
- esphlen = esplen + ivlen;
+ } else {
+ /* RFC 2406 */
+ if (sav->flags & SADB_X_EXT_DERIV) {
+ esplen = sizeof(struct esp);
+ } else {
+ esplen = sizeof(struct newesp);
+ }
+ }
+ esphlen = esplen + ivlen;
- for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next)
- ;
- if (mprev == NULL || mprev->m_next != md) {
- ipseclog((LOG_DEBUG, "esp%d_output: md is not in chain\n",
- afnumber));
- m_freem(m);
- KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 4,0,0,0,0);
- return EINVAL;
- }
+ for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) {
+ ;
+ }
+ if (mprev == NULL || mprev->m_next != md) {
+ ipseclog((LOG_DEBUG, "esp%d_output: md is not in chain\n",
+ afnumber));
+ m_freem(m);
+ KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 4, 0, 0, 0, 0);
+ return EINVAL;
+ }
- plen = 0;
- for (n = md; n; n = n->m_next)
- plen += n->m_len;
+ plen = 0;
+ for (n = md; n; n = n->m_next) {
+ plen += n->m_len;
+ }
- switch (af) {
+ switch (af) {
#if INET
- case AF_INET:
- ip = mtod(m, struct ip *);
+ case AF_INET:
+ ip = mtod(m, struct ip *);
#ifdef _IP_VHL
- hlen = IP_VHL_HL(ip->ip_vhl) << 2;
+ hlen = IP_VHL_HL(ip->ip_vhl) << 2;
#else
- hlen = ip->ip_hl << 2;
+ hlen = ip->ip_hl << 2;
#endif
- break;
+ break;
#endif
#if INET6
- case AF_INET6:
- ip6 = mtod(m, struct ip6_hdr *);
- hlen = sizeof(*ip6);
- break;
+ case AF_INET6:
+ ip6 = mtod(m, struct ip6_hdr *);
+ hlen = sizeof(*ip6);
+ break;
#endif
- }
+ }
- /* make the packet over-writable */
- mprev->m_next = NULL;
- if ((md = ipsec_copypkt(md)) == NULL) {
- m_freem(m);
- error = ENOBUFS;
- goto fail;
- }
- mprev->m_next = md;
-
- /*
- * Translate UDP source port back to its original value.
- * SADB_X_EXT_NATT_MULTIPLEUSERS is only set for transort mode.
- */
- if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
- /* if not UDP - drop it */
- if (ip->ip_p != IPPROTO_UDP) {
- IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
+ /* grab info for packet logging */
+ struct secashead *sah = sav->sah;
+ if (net_mpklog_enabled &&
+ sah != NULL && sah->ipsec_if != NULL) {
+ ifnet_t ifp = sah->ipsec_if;
+
+ if ((ifp->if_xflags & IFXF_MPK_LOG) == IFXF_MPK_LOG) {
+ size_t iphlen = 0;
+
+ if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
+ struct ip *inner_ip = mtod(md, struct ip *);
+ if (IP_VHL_V(inner_ip->ip_vhl) == IPVERSION) {
+#ifdef _IP_VHL
+ iphlen = IP_VHL_HL(inner_ip->ip_vhl) << 2;
+#else
+ iphlen = inner_ip->ip_hl << 2;
+#endif
+ inner_protocol = inner_ip->ip_p;
+ } else if (IP_VHL_V(inner_ip->ip_vhl) == IPV6_VERSION) {
+ struct ip6_hdr *inner_ip6 = mtod(md, struct ip6_hdr *);
+ iphlen = sizeof(struct ip6_hdr);
+ inner_protocol = inner_ip6->ip6_nxt;
+ }
+
+ if (inner_protocol == IPPROTO_TCP) {
+ if ((int)(iphlen + sizeof(th)) <=
+ (m->m_pkthdr.len - m->m_len)) {
+ m_copydata(md, iphlen, sizeof(th), (u_int8_t *)&th);
+ }
+
+ inner_payload_len = m->m_pkthdr.len - m->m_len - iphlen - (th.th_off << 2);
+ }
+ } else {
+ iphlen = hlen;
+ if (af == AF_INET) {
+ inner_protocol = ip->ip_p;
+ } else if (af == AF_INET6) {
+ inner_protocol = ip6->ip6_nxt;
+ }
+
+ if (inner_protocol == IPPROTO_TCP) {
+ if ((int)(iphlen + sizeof(th)) <=
+ m->m_pkthdr.len) {
+ m_copydata(m, iphlen, sizeof(th), (u_int8_t *)&th);
+ }
+
+ inner_payload_len = m->m_pkthdr.len - iphlen - (th.th_off << 2);
+ }
+ }
+ }
+ }
+
+ /* make the packet over-writable */
+ mprev->m_next = NULL;
+ if ((md = ipsec_copypkt(md)) == NULL) {
m_freem(m);
- error = EINVAL;
+ error = ENOBUFS;
goto fail;
- }
-
- udp = mtod(md, struct udphdr *);
+ }
+ mprev->m_next = md;
- /* if src port not set in sav - find it */
- if (sav->natt_encapsulated_src_port == 0)
- if (key_natt_get_translated_port(sav) == 0) {
+ /*
+ * Translate UDP source port back to its original value.
+ * SADB_X_EXT_NATT_MULTIPLEUSERS is only set for transort mode.
+ */
+ if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
+ /* if not UDP - drop it */
+ if (ip->ip_p != IPPROTO_UDP) {
+ IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
m_freem(m);
error = EINVAL;
goto fail;
}
- if (sav->remote_ike_port == htons(udp->uh_dport)) {
- /* translate UDP port */
- udp->uh_dport = sav->natt_encapsulated_src_port;
- udp->uh_sum = 0; /* don't need checksum with ESP auth */
- } else {
- /* drop the packet - can't translate the port */
- IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
- m_freem(m);
- error = EINVAL;
- goto fail;
- }
- }
-
- espoff = m->m_pkthdr.len - plen;
-
- if (udp_encapsulate) {
- esphlen += sizeof(struct udphdr);
- espoff += sizeof(struct udphdr);
- }
+ udp = mtod(md, struct udphdr *);
- /*
- * grow the mbuf to accomodate ESP header.
- * before: IP ... payload
- * after: IP ... [UDP] ESP IV payload
- */
- if (M_LEADINGSPACE(md) < esphlen || (md->m_flags & M_EXT) != 0) {
- MGET(n, M_DONTWAIT, MT_DATA);
- if (!n) {
- m_freem(m);
- error = ENOBUFS;
- goto fail;
+ /* if src port not set in sav - find it */
+ if (sav->natt_encapsulated_src_port == 0) {
+ if (key_natt_get_translated_port(sav) == 0) {
+ m_freem(m);
+ error = EINVAL;
+ goto fail;
+ }
+ }
+ if (sav->remote_ike_port == htons(udp->uh_dport)) {
+ /* translate UDP port */
+ udp->uh_dport = sav->natt_encapsulated_src_port;
+ udp->uh_sum = 0; /* don't need checksum with ESP auth */
+ } else {
+ /* drop the packet - can't translate the port */
+ IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
+ m_freem(m);
+ error = EINVAL;
+ goto fail;
+ }
}
- n->m_len = esphlen;
- mprev->m_next = n;
- n->m_next = md;
- m->m_pkthdr.len += esphlen;
+
+
+ espoff = m->m_pkthdr.len - plen;
+
if (udp_encapsulate) {
- udp = mtod(n, struct udphdr *);
- esp = (struct esp *)((caddr_t)udp + sizeof(struct udphdr));
- } else {
- esp = mtod(n, struct esp *);
+ esphlen += sizeof(struct udphdr);
+ espoff += sizeof(struct udphdr);
}
- } else {
- md->m_len += esphlen;
- md->m_data -= esphlen;
- m->m_pkthdr.len += esphlen;
- esp = mtod(md, struct esp *);
- if (udp_encapsulate) {
- udp = mtod(md, struct udphdr *);
- esp = (struct esp *)((caddr_t)udp + sizeof(struct udphdr));
+
+ /*
+ * grow the mbuf to accomodate ESP header.
+ * before: IP ... payload
+ * after: IP ... [UDP] ESP IV payload
+ */
+ if (M_LEADINGSPACE(md) < esphlen || (md->m_flags & M_EXT) != 0) {
+ MGET(n, M_DONTWAIT, MT_DATA);
+ if (!n) {
+ m_freem(m);
+ error = ENOBUFS;
+ goto fail;
+ }
+ n->m_len = esphlen;
+ mprev->m_next = n;
+ n->m_next = md;
+ m->m_pkthdr.len += esphlen;
+ if (udp_encapsulate) {
+ udp = mtod(n, struct udphdr *);
+ esp = (struct esp *)(void *)((caddr_t)udp + sizeof(struct udphdr));
+ } else {
+ esp = mtod(n, struct esp *);
+ }
} else {
+ md->m_len += esphlen;
+ md->m_data -= esphlen;
+ m->m_pkthdr.len += esphlen;
esp = mtod(md, struct esp *);
+ if (udp_encapsulate) {
+ udp = mtod(md, struct udphdr *);
+ esp = (struct esp *)(void *)((caddr_t)udp + sizeof(struct udphdr));
+ } else {
+ esp = mtod(md, struct esp *);
+ }
}
- }
-
- switch (af) {
+
+ switch (af) {
#if INET
- case AF_INET:
- if (esphlen < (IP_MAXPACKET - ntohs(ip->ip_len)))
- ip->ip_len = htons(ntohs(ip->ip_len) + esphlen);
- else {
- ipseclog((LOG_ERR,
- "IPv4 ESP output: size exceeds limit\n"));
- IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
- m_freem(m);
- error = EMSGSIZE;
- goto fail;
- }
- break;
+ case AF_INET:
+ if (esphlen < (IP_MAXPACKET - ntohs(ip->ip_len))) {
+ ip->ip_len = htons(ntohs(ip->ip_len) + esphlen);
+ } else {
+ ipseclog((LOG_ERR,
+ "IPv4 ESP output: size exceeds limit\n"));
+ IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
+ m_freem(m);
+ error = EMSGSIZE;
+ goto fail;
+ }
+ break;
#endif
#if INET6
- case AF_INET6:
- /* total packet length will be computed in ip6_output() */
- break;
+ case AF_INET6:
+ /* total packet length will be computed in ip6_output() */
+ break;
#endif
+ }
}
- }
/* initialize esp header. */
esp->esp_spi = spi;
if ((sav->flags & SADB_X_EXT_OLD) == 0) {
struct newesp *nesp;
nesp = (struct newesp *)esp;
- if (sav->replay->count == ~0) {
+ if (sav->replay[traffic_class]->count == sav->replay[traffic_class]->lastseq) {
if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
/* XXX Is it noisy ? */
ipseclog((LOG_WARNING,
ipsec_logsastr(sav)));
IPSEC_STAT_INCREMENT(stat->out_inval);
m_freem(m);
- KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 5,0,0,0,0);
+ KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 5, 0, 0, 0, 0);
return EINVAL;
}
}
lck_mtx_lock(sadb_mutex);
- sav->replay->count++;
+ sav->replay[traffic_class]->count++;
lck_mtx_unlock(sadb_mutex);
/*
* XXX sequence number must not be cycled, if the SA is
* installed by IKE daemon.
*/
- nesp->esp_seq = htonl(sav->replay->count);
+ nesp->esp_seq = htonl(sav->replay[traffic_class]->count);
+ seq = sav->replay[traffic_class]->count;
}
- {
- /*
- * find the last mbuf. make some room for ESP trailer.
- */
+ {
+ /*
+ * find the last mbuf. make some room for ESP trailer.
+ */
#if INET
- struct ip *ip = NULL;
+ struct ip *ip = NULL;
#endif
- size_t padbound;
- u_char *extend;
- int i;
- int randpadmax;
-
- if (algo->padbound)
- padbound = algo->padbound;
- else
- padbound = 4;
- /* ESP packet, including nxthdr field, must be length of 4n */
- if (padbound < 4)
- padbound = 4;
-
- extendsiz = padbound - (plen % padbound);
- if (extendsiz == 1)
- extendsiz = padbound + 1;
-
- /* random padding */
- switch (af) {
+ size_t padbound;
+ u_char *extend;
+ int i;
+ int randpadmax;
+
+ if (algo->padbound) {
+ padbound = algo->padbound;
+ } else {
+ padbound = 4;
+ }
+ /* ESP packet, including nxthdr field, must be length of 4n */
+ if (padbound < 4) {
+ padbound = 4;
+ }
+
+ extendsiz = padbound - (plen % padbound);
+ if (extendsiz == 1) {
+ extendsiz = padbound + 1;
+ }
+
+ /* random padding */
+ switch (af) {
#if INET
- case AF_INET:
- randpadmax = ip4_esp_randpad;
- break;
+ case AF_INET:
+ randpadmax = ip4_esp_randpad;
+ break;
#endif
#if INET6
- case AF_INET6:
- randpadmax = ip6_esp_randpad;
- break;
+ case AF_INET6:
+ randpadmax = ip6_esp_randpad;
+ break;
#endif
- default:
- randpadmax = -1;
- break;
- }
- if (randpadmax < 0 || plen + extendsiz >= randpadmax)
- ;
- else {
- int pad;
+ default:
+ randpadmax = -1;
+ break;
+ }
+ if (randpadmax < 0 || plen + extendsiz >= randpadmax) {
+ ;
+ } else {
+ int pad;
- /* round */
- randpadmax = (randpadmax / padbound) * padbound;
- pad = (randpadmax - plen + extendsiz) / padbound;
+ /* round */
+ randpadmax = (randpadmax / padbound) * padbound;
+ pad = (randpadmax - plen + extendsiz) / padbound;
- if (pad > 0)
- pad = (random() % pad) * padbound;
- else
- pad = 0;
+ if (pad > 0) {
+ pad = (random() % pad) * padbound;
+ } else {
+ pad = 0;
+ }
- /*
- * make sure we do not pad too much.
- * MLEN limitation comes from the trailer attachment
- * code below.
- * 256 limitation comes from sequential padding.
- * also, the 1-octet length field in ESP trailer imposes
- * limitation (but is less strict than sequential padding
- * as length field do not count the last 2 octets).
- */
- if (extendsiz + pad <= MLEN && extendsiz + pad < 256)
- extendsiz += pad;
- }
+ /*
+ * make sure we do not pad too much.
+ * MLEN limitation comes from the trailer attachment
+ * code below.
+ * 256 limitation comes from sequential padding.
+ * also, the 1-octet length field in ESP trailer imposes
+ * limitation (but is less strict than sequential padding
+ * as length field do not count the last 2 octets).
+ */
+ if (extendsiz + pad <= MLEN && extendsiz + pad < 256) {
+ extendsiz += pad;
+ }
+ }
#if DIAGNOSTIC
- if (extendsiz > MLEN || extendsiz >= 256)
- panic("extendsiz too big in esp_output");
+ if (extendsiz > MLEN || extendsiz >= 256) {
+ panic("extendsiz too big in esp_output");
+ }
#endif
- n = m;
- while (n->m_next)
- n = n->m_next;
+ n = m;
+ while (n->m_next) {
+ n = n->m_next;
+ }
- /*
- * if M_EXT, the external mbuf data may be shared among
- * two consequtive TCP packets, and it may be unsafe to use the
- * trailing space.
- */
- if (!(n->m_flags & M_EXT) && extendsiz < M_TRAILINGSPACE(n)) {
- extend = mtod(n, u_char *) + n->m_len;
- n->m_len += extendsiz;
- m->m_pkthdr.len += extendsiz;
- } else {
- struct mbuf *nn;
+ /*
+ * if M_EXT, the external mbuf data may be shared among
+ * two consequtive TCP packets, and it may be unsafe to use the
+ * trailing space.
+ */
+ if (!(n->m_flags & M_EXT) && extendsiz < M_TRAILINGSPACE(n)) {
+ extend = mtod(n, u_char *) + n->m_len;
+ n->m_len += extendsiz;
+ m->m_pkthdr.len += extendsiz;
+ } else {
+ struct mbuf *nn;
- MGET(nn, M_DONTWAIT, MT_DATA);
- if (!nn) {
- ipseclog((LOG_DEBUG, "esp%d_output: can't alloc mbuf",
- afnumber));
- m_freem(m);
- error = ENOBUFS;
- goto fail;
+ MGET(nn, M_DONTWAIT, MT_DATA);
+ if (!nn) {
+ ipseclog((LOG_DEBUG, "esp%d_output: can't alloc mbuf",
+ afnumber));
+ m_freem(m);
+ error = ENOBUFS;
+ goto fail;
+ }
+ extend = mtod(nn, u_char *);
+ nn->m_len = extendsiz;
+ nn->m_next = NULL;
+ n->m_next = nn;
+ n = nn;
+ m->m_pkthdr.len += extendsiz;
+ }
+ switch (sav->flags & SADB_X_EXT_PMASK) {
+ case SADB_X_EXT_PRAND:
+ key_randomfill(extend, extendsiz);
+ break;
+ case SADB_X_EXT_PZERO:
+ bzero(extend, extendsiz);
+ break;
+ case SADB_X_EXT_PSEQ:
+ for (i = 0; i < extendsiz; i++) {
+ extend[i] = (i + 1) & 0xff;
+ }
+ break;
}
- extend = mtod(nn, u_char *);
- nn->m_len = extendsiz;
- nn->m_next = NULL;
- n->m_next = nn;
- n = nn;
- m->m_pkthdr.len += extendsiz;
- }
- switch (sav->flags & SADB_X_EXT_PMASK) {
- case SADB_X_EXT_PRAND:
- key_randomfill(extend, extendsiz);
- break;
- case SADB_X_EXT_PZERO:
- bzero(extend, extendsiz);
- break;
- case SADB_X_EXT_PSEQ:
- for (i = 0; i < extendsiz; i++)
- extend[i] = (i + 1) & 0xff;
- break;
- }
-
- nxt = *nexthdrp;
- if (udp_encapsulate) {
- *nexthdrp = IPPROTO_UDP;
-
- /* Fill out the UDP header */
- udp->uh_sport = ntohs((u_short)esp_udp_encap_port);
- udp->uh_dport = ntohs(sav->remote_ike_port);
-// udp->uh_len set later, after all length tweaks are complete
- udp->uh_sum = 0;
-
- /* Update last sent so we know if we need to send keepalive */
- sav->natt_last_activity = natt_now;
- } else {
- *nexthdrp = IPPROTO_ESP;
- }
- /* initialize esp trailer. */
- esptail = (struct esptail *)
- (mtod(n, u_int8_t *) + n->m_len - sizeof(struct esptail));
- esptail->esp_nxt = nxt;
- esptail->esp_padlen = extendsiz - 2;
+ nxt = *nexthdrp;
+ if (udp_encapsulate) {
+ *nexthdrp = IPPROTO_UDP;
- /* modify IP header (for ESP header part only) */
- switch (af) {
-#if INET
- case AF_INET:
- ip = mtod(m, struct ip *);
- if (extendsiz < (IP_MAXPACKET - ntohs(ip->ip_len)))
- ip->ip_len = htons(ntohs(ip->ip_len) + extendsiz);
- else {
- ipseclog((LOG_ERR,
- "IPv4 ESP output: size exceeds limit\n"));
- IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
- m_freem(m);
- error = EMSGSIZE;
- goto fail;
+ /* Fill out the UDP header */
+ if (sav->natt_encapsulated_src_port != 0) {
+ udp->uh_sport = (u_short)sav->natt_encapsulated_src_port;
+ } else {
+ udp->uh_sport = htons((u_short)esp_udp_encap_port);
+ }
+ udp->uh_dport = htons(sav->remote_ike_port);
+ // udp->uh_len set later, after all length tweaks are complete
+ udp->uh_sum = 0;
+
+ /* Update last sent so we know if we need to send keepalive */
+ sav->natt_last_activity = natt_now;
+ } else {
+ *nexthdrp = IPPROTO_ESP;
}
- break;
+
+ /* initialize esp trailer. */
+ esptail = (struct esptail *)
+ (mtod(n, u_int8_t *) + n->m_len - sizeof(struct esptail));
+ esptail->esp_nxt = nxt;
+ esptail->esp_padlen = extendsiz - 2;
+
+ /* modify IP header (for ESP header part only) */
+ switch (af) {
+#if INET
+ case AF_INET:
+ ip = mtod(m, struct ip *);
+ if (extendsiz < (IP_MAXPACKET - ntohs(ip->ip_len))) {
+ ip->ip_len = htons(ntohs(ip->ip_len) + extendsiz);
+ } else {
+ ipseclog((LOG_ERR,
+ "IPv4 ESP output: size exceeds limit\n"));
+ IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
+ m_freem(m);
+ error = EMSGSIZE;
+ goto fail;
+ }
+ break;
#endif
#if INET6
- case AF_INET6:
- /* total packet length will be computed in ip6_output() */
- break;
+ case AF_INET6:
+ /* total packet length will be computed in ip6_output() */
+ break;
#endif
+ }
}
- }
/*
* pre-compute and cache intermediate key
* encrypt the packet, based on security association
* and the algorithm specified.
*/
- if (!algo->encrypt)
+ if (!algo->encrypt) {
panic("internal error: no encrypt function");
- KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_START, 0,0,0,0,0);
+ }
+ KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_START, 0, 0, 0, 0, 0);
if ((*algo->encrypt)(m, espoff, plen + extendsiz, sav, algo, ivlen)) {
/* m is already freed */
ipseclog((LOG_ERR, "packet encryption failure\n"));
IPSEC_STAT_INCREMENT(stat->out_inval);
error = EINVAL;
- KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1,error,0,0,0);
+ KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1, error, 0, 0, 0);
goto fail;
}
- KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 2,0,0,0,0);
+ KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 2, 0, 0, 0, 0);
/*
* calculate ICV if required.
*/
- if (!sav->replay)
+ size_t siz = 0;
+ u_char authbuf[AH_MAXSUMSIZE] __attribute__((aligned(4)));
+
+ if (algo->finalizeencrypt) {
+ siz = algo->icvlen;
+ if ((*algo->finalizeencrypt)(sav, authbuf, siz)) {
+ ipseclog((LOG_ERR, "packet encryption ICV failure\n"));
+ IPSEC_STAT_INCREMENT(stat->out_inval);
+ error = EINVAL;
+ KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1, error, 0, 0, 0);
+ goto fail;
+ }
+ goto fill_icv;
+ }
+
+ if (!sav->replay[traffic_class]) {
goto noantireplay;
- if (!sav->key_auth)
+ }
+ if (!sav->key_auth) {
goto noantireplay;
- if (sav->key_auth == SADB_AALG_NONE)
+ }
+ if (sav->key_auth == SADB_AALG_NONE) {
goto noantireplay;
+ }
- {
+ {
const struct ah_algorithm *aalgo;
- u_char authbuf[AH_MAXSUMSIZE];
- u_char *p;
- size_t siz;
- #if INET
- struct ip *ip;
- #endif
-
+
aalgo = ah_algorithm_lookup(sav->alg_auth);
- if (!aalgo)
+ if (!aalgo) {
goto noantireplay;
+ }
siz = ((aalgo->sumsiz)(sav) + 3) & ~(4 - 1);
- if (AH_MAXSUMSIZE < siz)
+ if (AH_MAXSUMSIZE < siz) {
panic("assertion failed for AH_MAXSUMSIZE");
-
+ }
+
if (esp_auth(m, espoff, m->m_pkthdr.len - espoff, sav, authbuf)) {
ipseclog((LOG_ERR, "ESP checksum generation failure\n"));
m_freem(m);
IPSEC_STAT_INCREMENT(stat->out_inval);
goto fail;
}
-
+ }
+
+fill_icv:
+ {
+ struct ip *ip;
+ u_char *p;
+
n = m;
- while (n->m_next)
+ while (n->m_next) {
n = n->m_next;
-
+ }
+
if (!(n->m_flags & M_EXT) && siz < M_TRAILINGSPACE(n)) { /* XXX */
n->m_len += siz;
m->m_pkthdr.len += siz;
p = mtod(n, u_char *) + n->m_len - siz;
} else {
struct mbuf *nn;
-
+
MGET(nn, M_DONTWAIT, MT_DATA);
if (!nn) {
ipseclog((LOG_DEBUG, "can't alloc mbuf in esp%d_output",
- afnumber));
+ afnumber));
m_freem(m);
error = ENOBUFS;
goto fail;
p = mtod(nn, u_char *);
}
bcopy(authbuf, p, siz);
-
+
/* modify IP header (for ESP header part only) */
switch (af) {
#if INET
case AF_INET:
ip = mtod(m, struct ip *);
- if (siz < (IP_MAXPACKET - ntohs(ip->ip_len)))
+ if (siz < (IP_MAXPACKET - ntohs(ip->ip_len))) {
ip->ip_len = htons(ntohs(ip->ip_len) + siz);
- else {
+ } else {
ipseclog((LOG_ERR,
- "IPv4 ESP output: size exceeds limit\n"));
+ "IPv4 ESP output: size exceeds limit\n"));
IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
m_freem(m);
error = EMSGSIZE;
break;
#endif
}
- }
-
+ }
+
if (udp_encapsulate) {
struct ip *ip;
- ip = mtod(m, struct ip *);
- udp->uh_ulen = htons(ntohs(ip->ip_len) - (IP_VHL_HL(ip->ip_vhl) << 2));
- }
+ struct ip6_hdr *ip6;
+ switch (af) {
+ case AF_INET:
+ ip = mtod(m, struct ip *);
+ udp->uh_ulen = htons(ntohs(ip->ip_len) - (IP_VHL_HL(ip->ip_vhl) << 2));
+ break;
+ case AF_INET6:
+ ip6 = mtod(m, struct ip6_hdr *);
+ udp->uh_ulen = htons(plen + siz + extendsiz + esphlen);
+ udp->uh_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htonl(ntohs(udp->uh_ulen) + IPPROTO_UDP));
+ m->m_pkthdr.csum_flags = (CSUM_UDPIPV6 | CSUM_ZERO_INVERT);
+ m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
+ break;
+ }
+ }
noantireplay:
+ if (net_mpklog_enabled && sav->sah != NULL &&
+ sav->sah->ipsec_if != NULL &&
+ (sav->sah->ipsec_if->if_xflags & IFXF_MPK_LOG) &&
+ inner_protocol == IPPROTO_TCP) {
+ MPKL_ESP_OUTPUT_TCP(esp_mpkl_log_object,
+ ntohl(spi), seq,
+ ntohs(th.th_sport), ntohs(th.th_dport),
+ ntohl(th.th_seq), ntohl(th.th_ack),
+ th.th_flags, inner_payload_len);
+ }
+
lck_mtx_lock(sadb_mutex);
if (!m) {
ipseclog((LOG_ERR,
"NULL mbuf after encryption in esp%d_output", afnumber));
- } else
+ } else {
stat->out_success++;
+ }
stat->out_esphist[sav->alg_enc]++;
lck_mtx_unlock(sadb_mutex);
key_sa_recordxfer(sav, m);
- KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 6,0,0,0,0);
+ KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 6, 0, 0, 0, 0);
return 0;
fail:
#if 1
- KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 7,error,0,0,0);
+ KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 7, error, 0, 0, 0);
return error;
#else
panic("something bad in esp_output");
#if INET
int
-esp4_output(m, sav)
- struct mbuf *m;
- struct secasvar *sav;
+esp4_output(
+ struct mbuf *m,
+ struct secasvar *sav)
{
struct ip *ip;
if (m->m_len < sizeof(struct ip)) {
#if INET6
int
-esp6_output(m, nexthdrp, md, sav)
- struct mbuf *m;
- u_char *nexthdrp;
- struct mbuf *md;
- struct secasvar *sav;
+esp6_output(
+ struct mbuf *m,
+ u_char *nexthdrp,
+ struct mbuf *md,
+ struct secasvar *sav)
{
if (m->m_len < sizeof(struct ip6_hdr)) {
ipseclog((LOG_DEBUG, "esp6_output: first mbuf too short\n"));