]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/ipsec.c
xnu-3248.60.10.tar.gz
[apple/xnu.git] / bsd / netinet6 / ipsec.c
1 /*
2 * Copyright (c) 2008-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/ipsec.c,v 1.3.2.7 2001/07/19 06:37:23 kris Exp $ */
30 /* $KAME: ipsec.c,v 1.103 2001/05/24 07:14:18 sakane Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 /*
62 * IPsec controller part.
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/mcache.h>
70 #include <sys/domain.h>
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 #include <sys/errno.h>
75 #include <sys/time.h>
76 #include <sys/kernel.h>
77 #include <sys/syslog.h>
78 #include <sys/sysctl.h>
79 #include <kern/locks.h>
80 #include <sys/kauth.h>
81 #include <libkern/OSAtomic.h>
82
83 #include <net/if.h>
84 #include <net/route.h>
85 #include <net/if_ipsec.h>
86
87 #include <netinet/in.h>
88 #include <netinet/in_systm.h>
89 #include <netinet/ip.h>
90 #include <netinet/ip_var.h>
91 #include <netinet/in_var.h>
92 #include <netinet/udp.h>
93 #include <netinet/udp_var.h>
94 #include <netinet/ip_ecn.h>
95 #if INET6
96 #include <netinet6/ip6_ecn.h>
97 #endif
98 #include <netinet/tcp.h>
99 #include <netinet/udp.h>
100
101 #include <netinet/ip6.h>
102 #if INET6
103 #include <netinet6/ip6_var.h>
104 #endif
105 #include <netinet/in_pcb.h>
106 #if INET6
107 #include <netinet/icmp6.h>
108 #endif
109
110 #include <netinet6/ipsec.h>
111 #if INET6
112 #include <netinet6/ipsec6.h>
113 #endif
114 #include <netinet6/ah.h>
115 #if INET6
116 #include <netinet6/ah6.h>
117 #endif
118 #if IPSEC_ESP
119 #include <netinet6/esp.h>
120 #if INET6
121 #include <netinet6/esp6.h>
122 #endif
123 #endif
124 #include <netinet6/ipcomp.h>
125 #if INET6
126 #include <netinet6/ipcomp6.h>
127 #endif
128 #include <netkey/key.h>
129 #include <netkey/keydb.h>
130 #include <netkey/key_debug.h>
131
132 #include <net/net_osdep.h>
133
134 #if IPSEC_DEBUG
135 int ipsec_debug = 1;
136 #else
137 int ipsec_debug = 0;
138 #endif
139
140 #include <sys/kdebug.h>
141 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
142 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
143 #define DBG_FNC_GETPOL_SOCK NETDBG_CODE(DBG_NETIPSEC, (1 << 8))
144 #define DBG_FNC_GETPOL_ADDR NETDBG_CODE(DBG_NETIPSEC, (2 << 8))
145 #define DBG_FNC_IPSEC_OUT NETDBG_CODE(DBG_NETIPSEC, (3 << 8))
146
147 extern lck_mtx_t *sadb_mutex;
148
149 struct ipsecstat ipsecstat;
150 int ip4_ah_cleartos = 1;
151 int ip4_ah_offsetmask = 0; /* maybe IP_DF? */
152 int ip4_ipsec_dfbit = 0; /* DF bit on encap. 0: clear 1: set 2: copy */
153 int ip4_esp_trans_deflev = IPSEC_LEVEL_USE;
154 int ip4_esp_net_deflev = IPSEC_LEVEL_USE;
155 int ip4_ah_trans_deflev = IPSEC_LEVEL_USE;
156 int ip4_ah_net_deflev = IPSEC_LEVEL_USE;
157 struct secpolicy ip4_def_policy;
158 int ip4_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
159 int ip4_esp_randpad = -1;
160 int esp_udp_encap_port = 0;
161 static int sysctl_def_policy SYSCTL_HANDLER_ARGS;
162 extern int natt_keepalive_interval;
163 extern u_int32_t natt_now;
164
165 struct ipsec_tag;
166
167 SYSCTL_DECL(_net_inet_ipsec);
168 #if INET6
169 SYSCTL_DECL(_net_inet6_ipsec6);
170 #endif
171 /* net.inet.ipsec */
172 SYSCTL_STRUCT(_net_inet_ipsec, IPSECCTL_STATS,
173 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsecstat, ipsecstat, "");
174 SYSCTL_PROC(_net_inet_ipsec, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
175 &ip4_def_policy.policy, 0, &sysctl_def_policy, "I", "");
176 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
177 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_trans_deflev, 0, "");
178 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
179 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_net_deflev, 0, "");
180 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
181 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_trans_deflev, 0, "");
182 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
183 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_net_deflev, 0, "");
184 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_CLEARTOS,
185 ah_cleartos, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_cleartos, 0, "");
186 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_OFFSETMASK,
187 ah_offsetmask, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_offsetmask, 0, "");
188 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DFBIT,
189 dfbit, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_dfbit, 0, "");
190 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ECN,
191 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_ecn, 0, "");
192 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEBUG,
193 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
194 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ESP_RANDPAD,
195 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_randpad, 0, "");
196
197 /* for performance, we bypass ipsec until a security policy is set */
198 int ipsec_bypass = 1;
199 SYSCTL_INT(_net_inet_ipsec, OID_AUTO, bypass, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec_bypass,0, "");
200
201 /*
202 * NAT Traversal requires a UDP port for encapsulation,
203 * esp_udp_encap_port controls which port is used. Racoon
204 * must set this port to the port racoon is using locally
205 * for nat traversal.
206 */
207 SYSCTL_INT(_net_inet_ipsec, OID_AUTO, esp_port,
208 CTLFLAG_RW | CTLFLAG_LOCKED, &esp_udp_encap_port, 0, "");
209
210 #if INET6
211 struct ipsecstat ipsec6stat;
212 int ip6_esp_trans_deflev = IPSEC_LEVEL_USE;
213 int ip6_esp_net_deflev = IPSEC_LEVEL_USE;
214 int ip6_ah_trans_deflev = IPSEC_LEVEL_USE;
215 int ip6_ah_net_deflev = IPSEC_LEVEL_USE;
216 struct secpolicy ip6_def_policy;
217 int ip6_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
218 int ip6_esp_randpad = -1;
219
220 /* net.inet6.ipsec6 */
221 SYSCTL_STRUCT(_net_inet6_ipsec6, IPSECCTL_STATS,
222 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec6stat, ipsecstat, "");
223 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_POLICY,
224 def_policy, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_def_policy.policy, 0, "");
225 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
226 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_trans_deflev, 0, "");
227 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
228 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_net_deflev, 0, "");
229 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
230 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_trans_deflev, 0, "");
231 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
232 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_net_deflev, 0, "");
233 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ECN,
234 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ipsec_ecn, 0, "");
235 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEBUG,
236 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
237 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ESP_RANDPAD,
238 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_randpad, 0, "");
239 #endif /* INET6 */
240
241 static int ipsec_setspidx_interface(struct secpolicyindex *, u_int, struct mbuf *,
242 int, int, int);
243 static int ipsec_setspidx_mbuf(struct secpolicyindex *, u_int, u_int,
244 struct mbuf *, int);
245 static int ipsec4_setspidx_inpcb(struct mbuf *, struct inpcb *pcb);
246 #if INET6
247 static int ipsec6_setspidx_in6pcb(struct mbuf *, struct in6pcb *pcb);
248 #endif
249 static int ipsec_setspidx(struct mbuf *, struct secpolicyindex *, int, int);
250 static void ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
251 static int ipsec4_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
252 #if INET6
253 static void ipsec6_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
254 static int ipsec6_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
255 #endif
256 static struct inpcbpolicy *ipsec_newpcbpolicy(void);
257 static void ipsec_delpcbpolicy(struct inpcbpolicy *);
258 static struct secpolicy *ipsec_deepcopy_policy(struct secpolicy *src);
259 static int ipsec_set_policy(struct secpolicy **pcb_sp,
260 int optname, caddr_t request, size_t len, int priv);
261 static void vshiftl(unsigned char *, int, int);
262 static int ipsec_in_reject(struct secpolicy *, struct mbuf *);
263 #if INET6
264 static int ipsec64_encapsulate(struct mbuf *, struct secasvar *);
265 static int ipsec6_update_routecache_and_output(struct ipsec_output_state *state, struct secasvar *sav);
266 static int ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav);
267 #endif
268 static struct ipsec_tag *ipsec_addaux(struct mbuf *);
269 static struct ipsec_tag *ipsec_findaux(struct mbuf *);
270 static void ipsec_optaux(struct mbuf *, struct ipsec_tag *);
271 int ipsec_send_natt_keepalive(struct secasvar *sav);
272 bool ipsec_fill_offload_frame(ifnet_t ifp, struct secasvar *sav, struct ifnet_keepalive_offload_frame *frame, size_t frame_data_offset);
273
274 static int
275 sysctl_def_policy SYSCTL_HANDLER_ARGS
276 {
277 int old_policy = ip4_def_policy.policy;
278 int error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
279
280 #pragma unused(arg1, arg2)
281
282 if (ip4_def_policy.policy != IPSEC_POLICY_NONE &&
283 ip4_def_policy.policy != IPSEC_POLICY_DISCARD) {
284 ip4_def_policy.policy = old_policy;
285 return EINVAL;
286 }
287
288 /* Turn off the bypass if the default security policy changes */
289 if (ipsec_bypass != 0 && ip4_def_policy.policy != IPSEC_POLICY_NONE)
290 ipsec_bypass = 0;
291
292 return error;
293 }
294
295 /*
296 * For OUTBOUND packet having a socket. Searching SPD for packet,
297 * and return a pointer to SP.
298 * OUT: NULL: no apropreate SP found, the following value is set to error.
299 * 0 : bypass
300 * EACCES : discard packet.
301 * ENOENT : ipsec_acquire() in progress, maybe.
302 * others : error occurred.
303 * others: a pointer to SP
304 *
305 * NOTE: IPv6 mapped adddress concern is implemented here.
306 */
307 struct secpolicy *
308 ipsec4_getpolicybysock(struct mbuf *m,
309 u_int dir,
310 struct socket *so,
311 int *error)
312 {
313 struct inpcbpolicy *pcbsp = NULL;
314 struct secpolicy *currsp = NULL; /* policy on socket */
315 struct secpolicy *kernsp = NULL; /* policy on kernel */
316
317 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
318 /* sanity check */
319 if (m == NULL || so == NULL || error == NULL)
320 panic("ipsec4_getpolicybysock: NULL pointer was passed.\n");
321
322 if (so->so_pcb == NULL) {
323 printf("ipsec4_getpolicybysock: so->so_pcb == NULL\n");
324 return ipsec4_getpolicybyaddr(m, dir, 0, error);
325 }
326
327 switch (SOCK_DOM(so)) {
328 case PF_INET:
329 pcbsp = sotoinpcb(so)->inp_sp;
330 break;
331 #if INET6
332 case PF_INET6:
333 pcbsp = sotoin6pcb(so)->in6p_sp;
334 break;
335 #endif
336 }
337
338 if (!pcbsp){
339 /* Socket has not specified an IPSEC policy */
340 return ipsec4_getpolicybyaddr(m, dir, 0, error);
341 }
342
343 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_START, 0,0,0,0,0);
344
345 switch (SOCK_DOM(so)) {
346 case PF_INET:
347 /* set spidx in pcb */
348 *error = ipsec4_setspidx_inpcb(m, sotoinpcb(so));
349 break;
350 #if INET6
351 case PF_INET6:
352 /* set spidx in pcb */
353 *error = ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
354 break;
355 #endif
356 default:
357 panic("ipsec4_getpolicybysock: unsupported address family\n");
358 }
359 if (*error) {
360 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 1,*error,0,0,0);
361 return NULL;
362 }
363
364 /* sanity check */
365 if (pcbsp == NULL)
366 panic("ipsec4_getpolicybysock: pcbsp is NULL.\n");
367
368 switch (dir) {
369 case IPSEC_DIR_INBOUND:
370 currsp = pcbsp->sp_in;
371 break;
372 case IPSEC_DIR_OUTBOUND:
373 currsp = pcbsp->sp_out;
374 break;
375 default:
376 panic("ipsec4_getpolicybysock: illegal direction.\n");
377 }
378
379 /* sanity check */
380 if (currsp == NULL)
381 panic("ipsec4_getpolicybysock: currsp is NULL.\n");
382
383 /* when privilieged socket */
384 if (pcbsp->priv) {
385 switch (currsp->policy) {
386 case IPSEC_POLICY_BYPASS:
387 lck_mtx_lock(sadb_mutex);
388 currsp->refcnt++;
389 lck_mtx_unlock(sadb_mutex);
390 *error = 0;
391 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 2,*error,0,0,0);
392 return currsp;
393
394 case IPSEC_POLICY_ENTRUST:
395 /* look for a policy in SPD */
396 kernsp = key_allocsp(&currsp->spidx, dir);
397
398 /* SP found */
399 if (kernsp != NULL) {
400 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
401 printf("DP ipsec4_getpolicybysock called "
402 "to allocate SP:0x%llx\n",
403 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
404 *error = 0;
405 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 3,*error,0,0,0);
406 return kernsp;
407 }
408
409 /* no SP found */
410 lck_mtx_lock(sadb_mutex);
411 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
412 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
413 ipseclog((LOG_INFO,
414 "fixed system default policy: %d->%d\n",
415 ip4_def_policy.policy, IPSEC_POLICY_NONE));
416 ip4_def_policy.policy = IPSEC_POLICY_NONE;
417 }
418 ip4_def_policy.refcnt++;
419 lck_mtx_unlock(sadb_mutex);
420 *error = 0;
421 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 4,*error,0,0,0);
422 return &ip4_def_policy;
423
424 case IPSEC_POLICY_IPSEC:
425 lck_mtx_lock(sadb_mutex);
426 currsp->refcnt++;
427 lck_mtx_unlock(sadb_mutex);
428 *error = 0;
429 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 5,*error,0,0,0);
430 return currsp;
431
432 default:
433 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
434 "Invalid policy for PCB %d\n", currsp->policy));
435 *error = EINVAL;
436 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 6,*error,0,0,0);
437 return NULL;
438 }
439 /* NOTREACHED */
440 }
441
442 /* when non-privilieged socket */
443 /* look for a policy in SPD */
444 kernsp = key_allocsp(&currsp->spidx, dir);
445
446 /* SP found */
447 if (kernsp != NULL) {
448 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
449 printf("DP ipsec4_getpolicybysock called "
450 "to allocate SP:0x%llx\n",
451 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
452 *error = 0;
453 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 7,*error,0,0,0);
454 return kernsp;
455 }
456
457 /* no SP found */
458 switch (currsp->policy) {
459 case IPSEC_POLICY_BYPASS:
460 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
461 "Illegal policy for non-priviliged defined %d\n",
462 currsp->policy));
463 *error = EINVAL;
464 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 8,*error,0,0,0);
465 return NULL;
466
467 case IPSEC_POLICY_ENTRUST:
468 lck_mtx_lock(sadb_mutex);
469 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
470 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
471 ipseclog((LOG_INFO,
472 "fixed system default policy: %d->%d\n",
473 ip4_def_policy.policy, IPSEC_POLICY_NONE));
474 ip4_def_policy.policy = IPSEC_POLICY_NONE;
475 }
476 ip4_def_policy.refcnt++;
477 lck_mtx_unlock(sadb_mutex);
478 *error = 0;
479 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 9,*error,0,0,0);
480 return &ip4_def_policy;
481
482 case IPSEC_POLICY_IPSEC:
483 lck_mtx_lock(sadb_mutex);
484 currsp->refcnt++;
485 lck_mtx_unlock(sadb_mutex);
486 *error = 0;
487 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 10,*error,0,0,0);
488 return currsp;
489
490 default:
491 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
492 "Invalid policy for PCB %d\n", currsp->policy));
493 *error = EINVAL;
494 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 11,*error,0,0,0);
495 return NULL;
496 }
497 /* NOTREACHED */
498 }
499
500 /*
501 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
502 * and return a pointer to SP.
503 * OUT: positive: a pointer to the entry for security policy leaf matched.
504 * NULL: no apropreate SP found, the following value is set to error.
505 * 0 : bypass
506 * EACCES : discard packet.
507 * ENOENT : ipsec_acquire() in progress, maybe.
508 * others : error occurred.
509 */
510 struct secpolicy *
511 ipsec4_getpolicybyaddr(struct mbuf *m,
512 u_int dir,
513 int flag,
514 int *error)
515 {
516 struct secpolicy *sp = NULL;
517
518 if (ipsec_bypass != 0)
519 return 0;
520
521 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
522
523 /* sanity check */
524 if (m == NULL || error == NULL)
525 panic("ipsec4_getpolicybyaddr: NULL pointer was passed.\n");
526 {
527 struct secpolicyindex spidx;
528
529 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0);
530 bzero(&spidx, sizeof(spidx));
531
532 /* make a index to look for a policy */
533 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET, m,
534 (flag & IP_FORWARDING) ? 0 : 1);
535
536 if (*error != 0) {
537 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,*error,0,0,0);
538 return NULL;
539 }
540
541 sp = key_allocsp(&spidx, dir);
542 }
543
544 /* SP found */
545 if (sp != NULL) {
546 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
547 printf("DP ipsec4_getpolicybyaddr called "
548 "to allocate SP:0x%llx\n",
549 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
550 *error = 0;
551 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,*error,0,0,0);
552 return sp;
553 }
554
555 /* no SP found */
556 lck_mtx_lock(sadb_mutex);
557 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
558 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
559 ipseclog((LOG_INFO, "fixed system default policy:%d->%d\n",
560 ip4_def_policy.policy,
561 IPSEC_POLICY_NONE));
562 ip4_def_policy.policy = IPSEC_POLICY_NONE;
563 }
564 ip4_def_policy.refcnt++;
565 lck_mtx_unlock(sadb_mutex);
566 *error = 0;
567 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 3,*error,0,0,0);
568 return &ip4_def_policy;
569 }
570
571 /* Match with bound interface rather than src addr.
572 * Unlike getpolicybyaddr, do not set the default policy.
573 * Return 0 if should continue processing, or -1 if packet
574 * should be dropped.
575 */
576 int
577 ipsec4_getpolicybyinterface(struct mbuf *m,
578 u_int dir,
579 int *flags,
580 struct ip_out_args *ipoa,
581 struct secpolicy **sp)
582 {
583 struct secpolicyindex spidx;
584 int error = 0;
585
586 if (ipsec_bypass != 0)
587 return 0;
588
589 /* Sanity check */
590 if (m == NULL || ipoa == NULL || sp == NULL)
591 panic("ipsec4_getpolicybyinterface: NULL pointer was passed.\n");
592
593 if (ipoa->ipoa_boundif == IFSCOPE_NONE)
594 return 0;
595
596 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0);
597 bzero(&spidx, sizeof(spidx));
598
599 /* make a index to look for a policy */
600 error = ipsec_setspidx_interface(&spidx, dir, m, (*flags & IP_FORWARDING) ? 0 : 1,
601 ipoa->ipoa_boundif, 4);
602
603 if (error != 0) {
604 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,error,0,0,0);
605 return 0;
606 }
607
608 *sp = key_allocsp(&spidx, dir);
609
610 /* Return SP, whether NULL or not */
611 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
612 if ((*sp)->ipsec_if == NULL) {
613 /* Invalid to capture on an interface without redirect */
614 key_freesp(*sp, KEY_SADB_UNLOCKED);
615 *sp = NULL;
616 return -1;
617 } else if ((*sp)->disabled) {
618 /* Disabled policies go in the clear */
619 key_freesp(*sp, KEY_SADB_UNLOCKED);
620 *sp = NULL;
621 *flags |= IP_NOIPSEC; /* Avoid later IPSec check */
622 } else {
623 /* If policy is enabled, redirect to ipsec interface */
624 ipoa->ipoa_boundif = (*sp)->ipsec_if->if_index;
625 }
626 }
627
628 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,error,0,0,0);
629
630 return 0;
631 }
632
633
634 #if INET6
635 /*
636 * For OUTBOUND packet having a socket. Searching SPD for packet,
637 * and return a pointer to SP.
638 * OUT: NULL: no apropreate SP found, the following value is set to error.
639 * 0 : bypass
640 * EACCES : discard packet.
641 * ENOENT : ipsec_acquire() in progress, maybe.
642 * others : error occurred.
643 * others: a pointer to SP
644 */
645 struct secpolicy *
646 ipsec6_getpolicybysock(struct mbuf *m,
647 u_int dir,
648 struct socket *so,
649 int *error)
650 {
651 struct inpcbpolicy *pcbsp = NULL;
652 struct secpolicy *currsp = NULL; /* policy on socket */
653 struct secpolicy *kernsp = NULL; /* policy on kernel */
654
655 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
656
657 /* sanity check */
658 if (m == NULL || so == NULL || error == NULL)
659 panic("ipsec6_getpolicybysock: NULL pointer was passed.\n");
660
661 #if DIAGNOSTIC
662 if (SOCK_DOM(so) != PF_INET6)
663 panic("ipsec6_getpolicybysock: socket domain != inet6\n");
664 #endif
665
666 pcbsp = sotoin6pcb(so)->in6p_sp;
667
668 if (!pcbsp){
669 return ipsec6_getpolicybyaddr(m, dir, 0, error);
670 }
671
672 /* set spidx in pcb */
673 ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
674
675 /* sanity check */
676 if (pcbsp == NULL)
677 panic("ipsec6_getpolicybysock: pcbsp is NULL.\n");
678
679 switch (dir) {
680 case IPSEC_DIR_INBOUND:
681 currsp = pcbsp->sp_in;
682 break;
683 case IPSEC_DIR_OUTBOUND:
684 currsp = pcbsp->sp_out;
685 break;
686 default:
687 panic("ipsec6_getpolicybysock: illegal direction.\n");
688 }
689
690 /* sanity check */
691 if (currsp == NULL)
692 panic("ipsec6_getpolicybysock: currsp is NULL.\n");
693
694 /* when privilieged socket */
695 if (pcbsp->priv) {
696 switch (currsp->policy) {
697 case IPSEC_POLICY_BYPASS:
698 lck_mtx_lock(sadb_mutex);
699 currsp->refcnt++;
700 lck_mtx_unlock(sadb_mutex);
701 *error = 0;
702 return currsp;
703
704 case IPSEC_POLICY_ENTRUST:
705 /* look for a policy in SPD */
706 kernsp = key_allocsp(&currsp->spidx, dir);
707
708 /* SP found */
709 if (kernsp != NULL) {
710 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
711 printf("DP ipsec6_getpolicybysock called "
712 "to allocate SP:0x%llx\n",
713 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
714 *error = 0;
715 return kernsp;
716 }
717
718 /* no SP found */
719 lck_mtx_lock(sadb_mutex);
720 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
721 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
722 ipseclog((LOG_INFO,
723 "fixed system default policy: %d->%d\n",
724 ip6_def_policy.policy, IPSEC_POLICY_NONE));
725 ip6_def_policy.policy = IPSEC_POLICY_NONE;
726 }
727 ip6_def_policy.refcnt++;
728 lck_mtx_unlock(sadb_mutex);
729 *error = 0;
730 return &ip6_def_policy;
731
732 case IPSEC_POLICY_IPSEC:
733 lck_mtx_lock(sadb_mutex);
734 currsp->refcnt++;
735 lck_mtx_unlock(sadb_mutex);
736 *error = 0;
737 return currsp;
738
739 default:
740 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
741 "Invalid policy for PCB %d\n", currsp->policy));
742 *error = EINVAL;
743 return NULL;
744 }
745 /* NOTREACHED */
746 }
747
748 /* when non-privilieged socket */
749 /* look for a policy in SPD */
750 kernsp = key_allocsp(&currsp->spidx, dir);
751
752 /* SP found */
753 if (kernsp != NULL) {
754 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
755 printf("DP ipsec6_getpolicybysock called "
756 "to allocate SP:0x%llx\n",
757 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
758 *error = 0;
759 return kernsp;
760 }
761
762 /* no SP found */
763 switch (currsp->policy) {
764 case IPSEC_POLICY_BYPASS:
765 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
766 "Illegal policy for non-priviliged defined %d\n",
767 currsp->policy));
768 *error = EINVAL;
769 return NULL;
770
771 case IPSEC_POLICY_ENTRUST:
772 lck_mtx_lock(sadb_mutex);
773 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
774 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
775 ipseclog((LOG_INFO,
776 "fixed system default policy: %d->%d\n",
777 ip6_def_policy.policy, IPSEC_POLICY_NONE));
778 ip6_def_policy.policy = IPSEC_POLICY_NONE;
779 }
780 ip6_def_policy.refcnt++;
781 lck_mtx_unlock(sadb_mutex);
782 *error = 0;
783 return &ip6_def_policy;
784
785 case IPSEC_POLICY_IPSEC:
786 lck_mtx_lock(sadb_mutex);
787 currsp->refcnt++;
788 lck_mtx_unlock(sadb_mutex);
789 *error = 0;
790 return currsp;
791
792 default:
793 ipseclog((LOG_ERR,
794 "ipsec6_policybysock: Invalid policy for PCB %d\n",
795 currsp->policy));
796 *error = EINVAL;
797 return NULL;
798 }
799 /* NOTREACHED */
800 }
801
802 /*
803 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
804 * and return a pointer to SP.
805 * `flag' means that packet is to be forwarded whether or not.
806 * flag = 1: forwad
807 * OUT: positive: a pointer to the entry for security policy leaf matched.
808 * NULL: no apropreate SP found, the following value is set to error.
809 * 0 : bypass
810 * EACCES : discard packet.
811 * ENOENT : ipsec_acquire() in progress, maybe.
812 * others : error occurred.
813 */
814 #ifndef IP_FORWARDING
815 #define IP_FORWARDING 1
816 #endif
817
818 struct secpolicy *
819 ipsec6_getpolicybyaddr(struct mbuf *m,
820 u_int dir,
821 int flag,
822 int *error)
823 {
824 struct secpolicy *sp = NULL;
825
826 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
827
828 /* sanity check */
829 if (m == NULL || error == NULL)
830 panic("ipsec6_getpolicybyaddr: NULL pointer was passed.\n");
831
832 {
833 struct secpolicyindex spidx;
834
835 bzero(&spidx, sizeof(spidx));
836
837 /* make a index to look for a policy */
838 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET6, m,
839 (flag & IP_FORWARDING) ? 0 : 1);
840
841 if (*error != 0)
842 return NULL;
843
844 sp = key_allocsp(&spidx, dir);
845 }
846
847 /* SP found */
848 if (sp != NULL) {
849 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
850 printf("DP ipsec6_getpolicybyaddr called "
851 "to allocate SP:0x%llx\n",
852 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
853 *error = 0;
854 return sp;
855 }
856
857 /* no SP found */
858 lck_mtx_lock(sadb_mutex);
859 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
860 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
861 ipseclog((LOG_INFO, "fixed system default policy: %d->%d\n",
862 ip6_def_policy.policy, IPSEC_POLICY_NONE));
863 ip6_def_policy.policy = IPSEC_POLICY_NONE;
864 }
865 ip6_def_policy.refcnt++;
866 lck_mtx_unlock(sadb_mutex);
867 *error = 0;
868 return &ip6_def_policy;
869 }
870
871 /* Match with bound interface rather than src addr.
872 * Unlike getpolicybyaddr, do not set the default policy.
873 * Return 0 if should continue processing, or -1 if packet
874 * should be dropped.
875 */
876 int
877 ipsec6_getpolicybyinterface(struct mbuf *m,
878 u_int dir,
879 int flag,
880 struct ip6_out_args *ip6oap,
881 int *noipsec,
882 struct secpolicy **sp)
883 {
884 struct secpolicyindex spidx;
885 int error = 0;
886
887 if (ipsec_bypass != 0)
888 return 0;
889
890 /* Sanity check */
891 if (m == NULL || sp == NULL || noipsec == NULL || ip6oap == NULL)
892 panic("ipsec6_getpolicybyinterface: NULL pointer was passed.\n");
893
894 *noipsec = 0;
895
896 if (ip6oap->ip6oa_boundif == IFSCOPE_NONE)
897 return 0;
898
899 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0);
900 bzero(&spidx, sizeof(spidx));
901
902 /* make a index to look for a policy */
903 error = ipsec_setspidx_interface(&spidx, dir, m, (flag & IP_FORWARDING) ? 0 : 1,
904 ip6oap->ip6oa_boundif, 6);
905
906 if (error != 0) {
907 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,error,0,0,0);
908 return 0;
909 }
910
911 *sp = key_allocsp(&spidx, dir);
912
913 /* Return SP, whether NULL or not */
914 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
915 if ((*sp)->ipsec_if == NULL) {
916 /* Invalid to capture on an interface without redirect */
917 key_freesp(*sp, KEY_SADB_UNLOCKED);
918 *sp = NULL;
919 return -1;
920 } else if ((*sp)->disabled) {
921 /* Disabled policies go in the clear */
922 key_freesp(*sp, KEY_SADB_UNLOCKED);
923 *sp = NULL;
924 *noipsec = 1; /* Avoid later IPSec check */
925 } else {
926 /* If policy is enabled, redirect to ipsec interface */
927 ip6oap->ip6oa_boundif = (*sp)->ipsec_if->if_index;
928 }
929 }
930
931 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,*error,0,0,0);
932
933 return 0;
934 }
935 #endif /* INET6 */
936
937 /*
938 * set IP address into spidx from mbuf.
939 * When Forwarding packet and ICMP echo reply, this function is used.
940 *
941 * IN: get the followings from mbuf.
942 * protocol family, src, dst, next protocol
943 * OUT:
944 * 0: success.
945 * other: failure, and set errno.
946 */
947 static int
948 ipsec_setspidx_mbuf(
949 struct secpolicyindex *spidx,
950 u_int dir,
951 __unused u_int family,
952 struct mbuf *m,
953 int needport)
954 {
955 int error;
956
957 /* sanity check */
958 if (spidx == NULL || m == NULL)
959 panic("ipsec_setspidx_mbuf: NULL pointer was passed.\n");
960
961 bzero(spidx, sizeof(*spidx));
962
963 error = ipsec_setspidx(m, spidx, needport, 0);
964 if (error)
965 goto bad;
966 spidx->dir = dir;
967
968 return 0;
969
970 bad:
971 /* XXX initialize */
972 bzero(spidx, sizeof(*spidx));
973 return EINVAL;
974 }
975
976 static int
977 ipsec_setspidx_interface(
978 struct secpolicyindex *spidx,
979 u_int dir,
980 struct mbuf *m,
981 int needport,
982 int ifindex,
983 int ip_version)
984 {
985 int error;
986
987 /* sanity check */
988 if (spidx == NULL || m == NULL)
989 panic("ipsec_setspidx_interface: NULL pointer was passed.\n");
990
991 bzero(spidx, sizeof(*spidx));
992
993 error = ipsec_setspidx(m, spidx, needport, ip_version);
994 if (error)
995 goto bad;
996 spidx->dir = dir;
997
998 if (ifindex != 0) {
999 ifnet_head_lock_shared();
1000 spidx->internal_if = ifindex2ifnet[ifindex];
1001 ifnet_head_done();
1002 } else {
1003 spidx->internal_if = NULL;
1004 }
1005
1006 return 0;
1007
1008 bad:
1009 return EINVAL;
1010 }
1011
1012 static int
1013 ipsec4_setspidx_inpcb(m, pcb)
1014 struct mbuf *m;
1015 struct inpcb *pcb;
1016 {
1017 struct secpolicyindex *spidx;
1018 int error;
1019
1020 if (ipsec_bypass != 0)
1021 return 0;
1022
1023 /* sanity check */
1024 if (pcb == NULL)
1025 panic("ipsec4_setspidx_inpcb: no PCB found.\n");
1026 if (pcb->inp_sp == NULL)
1027 panic("ipsec4_setspidx_inpcb: no inp_sp found.\n");
1028 if (pcb->inp_sp->sp_out == NULL || pcb->inp_sp->sp_in == NULL)
1029 panic("ipsec4_setspidx_inpcb: no sp_in/out found.\n");
1030
1031 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1032 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1033
1034 spidx = &pcb->inp_sp->sp_in->spidx;
1035 error = ipsec_setspidx(m, spidx, 1, 0);
1036 if (error)
1037 goto bad;
1038 spidx->dir = IPSEC_DIR_INBOUND;
1039
1040 spidx = &pcb->inp_sp->sp_out->spidx;
1041 error = ipsec_setspidx(m, spidx, 1, 0);
1042 if (error)
1043 goto bad;
1044 spidx->dir = IPSEC_DIR_OUTBOUND;
1045
1046 return 0;
1047
1048 bad:
1049 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1050 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1051 return error;
1052 }
1053
1054 #if INET6
1055 static int
1056 ipsec6_setspidx_in6pcb(m, pcb)
1057 struct mbuf *m;
1058 struct in6pcb *pcb;
1059 {
1060 struct secpolicyindex *spidx;
1061 int error;
1062
1063 /* sanity check */
1064 if (pcb == NULL)
1065 panic("ipsec6_setspidx_in6pcb: no PCB found.\n");
1066 if (pcb->in6p_sp == NULL)
1067 panic("ipsec6_setspidx_in6pcb: no in6p_sp found.\n");
1068 if (pcb->in6p_sp->sp_out == NULL || pcb->in6p_sp->sp_in == NULL)
1069 panic("ipsec6_setspidx_in6pcb: no sp_in/out found.\n");
1070
1071 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1072 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1073
1074 spidx = &pcb->in6p_sp->sp_in->spidx;
1075 error = ipsec_setspidx(m, spidx, 1, 0);
1076 if (error)
1077 goto bad;
1078 spidx->dir = IPSEC_DIR_INBOUND;
1079
1080 spidx = &pcb->in6p_sp->sp_out->spidx;
1081 error = ipsec_setspidx(m, spidx, 1, 0);
1082 if (error)
1083 goto bad;
1084 spidx->dir = IPSEC_DIR_OUTBOUND;
1085
1086 return 0;
1087
1088 bad:
1089 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1090 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1091 return error;
1092 }
1093 #endif
1094
1095 /*
1096 * configure security policy index (src/dst/proto/sport/dport)
1097 * by looking at the content of mbuf.
1098 * the caller is responsible for error recovery (like clearing up spidx).
1099 */
1100 static int
1101 ipsec_setspidx(struct mbuf *m,
1102 struct secpolicyindex *spidx,
1103 int needport,
1104 int force_ip_version)
1105 {
1106 struct ip *ip = NULL;
1107 struct ip ipbuf;
1108 u_int v;
1109 struct mbuf *n;
1110 int len;
1111 int error;
1112
1113 if (m == NULL)
1114 panic("ipsec_setspidx: m == 0 passed.\n");
1115
1116 /*
1117 * validate m->m_pkthdr.len. we see incorrect length if we
1118 * mistakenly call this function with inconsistent mbuf chain
1119 * (like 4.4BSD tcp/udp processing). XXX should we panic here?
1120 */
1121 len = 0;
1122 for (n = m; n; n = n->m_next)
1123 len += n->m_len;
1124 if (m->m_pkthdr.len != len) {
1125 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1126 printf("ipsec_setspidx: "
1127 "total of m_len(%d) != pkthdr.len(%d), "
1128 "ignored.\n",
1129 len, m->m_pkthdr.len));
1130 return EINVAL;
1131 }
1132
1133 if (m->m_pkthdr.len < sizeof(struct ip)) {
1134 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1135 printf("ipsec_setspidx: "
1136 "pkthdr.len(%d) < sizeof(struct ip), ignored.\n",
1137 m->m_pkthdr.len));
1138 return EINVAL;
1139 }
1140
1141 if (m->m_len >= sizeof(*ip))
1142 ip = mtod(m, struct ip *);
1143 else {
1144 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1145 ip = &ipbuf;
1146 }
1147
1148 if (force_ip_version) {
1149 v = force_ip_version;
1150 } else {
1151 #ifdef _IP_VHL
1152 v = _IP_VHL_V(ip->ip_vhl);
1153 #else
1154 v = ip->ip_v;
1155 #endif
1156 }
1157 switch (v) {
1158 case 4:
1159 error = ipsec4_setspidx_ipaddr(m, spidx);
1160 if (error)
1161 return error;
1162 ipsec4_get_ulp(m, spidx, needport);
1163 return 0;
1164 #if INET6
1165 case 6:
1166 if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) {
1167 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1168 printf("ipsec_setspidx: "
1169 "pkthdr.len(%d) < sizeof(struct ip6_hdr), "
1170 "ignored.\n", m->m_pkthdr.len));
1171 return EINVAL;
1172 }
1173 error = ipsec6_setspidx_ipaddr(m, spidx);
1174 if (error)
1175 return error;
1176 ipsec6_get_ulp(m, spidx, needport);
1177 return 0;
1178 #endif
1179 default:
1180 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1181 printf("ipsec_setspidx: "
1182 "unknown IP version %u, ignored.\n", v));
1183 return EINVAL;
1184 }
1185 }
1186
1187 static void
1188 ipsec4_get_ulp(m, spidx, needport)
1189 struct mbuf *m;
1190 struct secpolicyindex *spidx;
1191 int needport;
1192 {
1193 struct ip ip;
1194 struct ip6_ext ip6e;
1195 u_int8_t nxt;
1196 int off;
1197 struct tcphdr th;
1198 struct udphdr uh;
1199
1200 /* sanity check */
1201 if (m == NULL)
1202 panic("ipsec4_get_ulp: NULL pointer was passed.\n");
1203 if (m->m_pkthdr.len < sizeof(ip))
1204 panic("ipsec4_get_ulp: too short\n");
1205
1206 /* set default */
1207 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1208 ((struct sockaddr_in *)&spidx->src)->sin_port = IPSEC_PORT_ANY;
1209 ((struct sockaddr_in *)&spidx->dst)->sin_port = IPSEC_PORT_ANY;
1210
1211 m_copydata(m, 0, sizeof(ip), (caddr_t)&ip);
1212 /* ip_input() flips it into host endian XXX need more checking */
1213 if (ip.ip_off & (IP_MF | IP_OFFMASK))
1214 return;
1215
1216 nxt = ip.ip_p;
1217 #ifdef _IP_VHL
1218 off = _IP_VHL_HL(ip->ip_vhl) << 2;
1219 #else
1220 off = ip.ip_hl << 2;
1221 #endif
1222 while (off < m->m_pkthdr.len) {
1223 switch (nxt) {
1224 case IPPROTO_TCP:
1225 spidx->ul_proto = nxt;
1226 if (!needport)
1227 return;
1228 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len)
1229 return;
1230 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1231 ((struct sockaddr_in *)&spidx->src)->sin_port =
1232 th.th_sport;
1233 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1234 th.th_dport;
1235 return;
1236 case IPPROTO_UDP:
1237 spidx->ul_proto = nxt;
1238 if (!needport)
1239 return;
1240 if (off + sizeof(struct udphdr) > m->m_pkthdr.len)
1241 return;
1242 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1243 ((struct sockaddr_in *)&spidx->src)->sin_port =
1244 uh.uh_sport;
1245 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1246 uh.uh_dport;
1247 return;
1248 case IPPROTO_AH:
1249 if (off + sizeof(ip6e) > m->m_pkthdr.len)
1250 return;
1251 m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
1252 off += (ip6e.ip6e_len + 2) << 2;
1253 nxt = ip6e.ip6e_nxt;
1254 break;
1255 case IPPROTO_ICMP:
1256 default:
1257 /* XXX intermediate headers??? */
1258 spidx->ul_proto = nxt;
1259 return;
1260 }
1261 }
1262 }
1263
1264 /* assumes that m is sane */
1265 static int
1266 ipsec4_setspidx_ipaddr(m, spidx)
1267 struct mbuf *m;
1268 struct secpolicyindex *spidx;
1269 {
1270 struct ip *ip = NULL;
1271 struct ip ipbuf;
1272 struct sockaddr_in *sin;
1273
1274 if (m->m_len >= sizeof(*ip))
1275 ip = mtod(m, struct ip *);
1276 else {
1277 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1278 ip = &ipbuf;
1279 }
1280
1281 sin = (struct sockaddr_in *)&spidx->src;
1282 bzero(sin, sizeof(*sin));
1283 sin->sin_family = AF_INET;
1284 sin->sin_len = sizeof(struct sockaddr_in);
1285 bcopy(&ip->ip_src, &sin->sin_addr, sizeof(ip->ip_src));
1286 spidx->prefs = sizeof(struct in_addr) << 3;
1287
1288 sin = (struct sockaddr_in *)&spidx->dst;
1289 bzero(sin, sizeof(*sin));
1290 sin->sin_family = AF_INET;
1291 sin->sin_len = sizeof(struct sockaddr_in);
1292 bcopy(&ip->ip_dst, &sin->sin_addr, sizeof(ip->ip_dst));
1293 spidx->prefd = sizeof(struct in_addr) << 3;
1294
1295 return 0;
1296 }
1297
1298 #if INET6
1299 static void
1300 ipsec6_get_ulp(struct mbuf *m,
1301 struct secpolicyindex *spidx,
1302 int needport)
1303 {
1304 int off, nxt;
1305 struct tcphdr th;
1306 struct udphdr uh;
1307
1308 /* sanity check */
1309 if (m == NULL)
1310 panic("ipsec6_get_ulp: NULL pointer was passed.\n");
1311
1312 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1313 printf("ipsec6_get_ulp:\n"); kdebug_mbuf(m));
1314
1315 /* set default */
1316 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1317 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = IPSEC_PORT_ANY;
1318 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = IPSEC_PORT_ANY;
1319
1320 nxt = -1;
1321 off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt);
1322 if (off < 0 || m->m_pkthdr.len < off)
1323 return;
1324
1325 switch (nxt) {
1326 case IPPROTO_TCP:
1327 spidx->ul_proto = nxt;
1328 if (!needport)
1329 break;
1330 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len)
1331 break;
1332 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1333 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = th.th_sport;
1334 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = th.th_dport;
1335 break;
1336 case IPPROTO_UDP:
1337 spidx->ul_proto = nxt;
1338 if (!needport)
1339 break;
1340 if (off + sizeof(struct udphdr) > m->m_pkthdr.len)
1341 break;
1342 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1343 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = uh.uh_sport;
1344 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = uh.uh_dport;
1345 break;
1346 case IPPROTO_ICMPV6:
1347 default:
1348 /* XXX intermediate headers??? */
1349 spidx->ul_proto = nxt;
1350 break;
1351 }
1352 }
1353
1354 /* assumes that m is sane */
1355 static int
1356 ipsec6_setspidx_ipaddr(struct mbuf *m,
1357 struct secpolicyindex *spidx)
1358 {
1359 struct ip6_hdr *ip6 = NULL;
1360 struct ip6_hdr ip6buf;
1361 struct sockaddr_in6 *sin6;
1362
1363 if (m->m_len >= sizeof(*ip6))
1364 ip6 = mtod(m, struct ip6_hdr *);
1365 else {
1366 m_copydata(m, 0, sizeof(ip6buf), (caddr_t)&ip6buf);
1367 ip6 = &ip6buf;
1368 }
1369
1370 sin6 = (struct sockaddr_in6 *)&spidx->src;
1371 bzero(sin6, sizeof(*sin6));
1372 sin6->sin6_family = AF_INET6;
1373 sin6->sin6_len = sizeof(struct sockaddr_in6);
1374 bcopy(&ip6->ip6_src, &sin6->sin6_addr, sizeof(ip6->ip6_src));
1375 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
1376 sin6->sin6_addr.s6_addr16[1] = 0;
1377 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
1378 }
1379 spidx->prefs = sizeof(struct in6_addr) << 3;
1380
1381 sin6 = (struct sockaddr_in6 *)&spidx->dst;
1382 bzero(sin6, sizeof(*sin6));
1383 sin6->sin6_family = AF_INET6;
1384 sin6->sin6_len = sizeof(struct sockaddr_in6);
1385 bcopy(&ip6->ip6_dst, &sin6->sin6_addr, sizeof(ip6->ip6_dst));
1386 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
1387 sin6->sin6_addr.s6_addr16[1] = 0;
1388 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
1389 }
1390 spidx->prefd = sizeof(struct in6_addr) << 3;
1391
1392 return 0;
1393 }
1394 #endif
1395
1396 static struct inpcbpolicy *
1397 ipsec_newpcbpolicy()
1398 {
1399 struct inpcbpolicy *p;
1400
1401 p = (struct inpcbpolicy *)_MALLOC(sizeof(*p), M_SECA, M_WAITOK);
1402 return p;
1403 }
1404
1405 static void
1406 ipsec_delpcbpolicy(struct inpcbpolicy *p)
1407 {
1408 FREE(p, M_SECA);
1409 }
1410
1411 /* initialize policy in PCB */
1412 int
1413 ipsec_init_policy(struct socket *so,
1414 struct inpcbpolicy **pcb_sp)
1415 {
1416 struct inpcbpolicy *new;
1417
1418 /* sanity check. */
1419 if (so == NULL || pcb_sp == NULL)
1420 panic("ipsec_init_policy: NULL pointer was passed.\n");
1421
1422 new = ipsec_newpcbpolicy();
1423 if (new == NULL) {
1424 ipseclog((LOG_DEBUG, "ipsec_init_policy: No more memory.\n"));
1425 return ENOBUFS;
1426 }
1427 bzero(new, sizeof(*new));
1428
1429 #ifdef __APPLE__
1430 if (kauth_cred_issuser(so->so_cred))
1431 #else
1432 if (so->so_cred != 0 && !suser(so->so_cred->pc_ucred, NULL))
1433 #endif
1434 new->priv = 1;
1435 else
1436 new->priv = 0;
1437
1438 if ((new->sp_in = key_newsp()) == NULL) {
1439 ipsec_delpcbpolicy(new);
1440 return ENOBUFS;
1441 }
1442 new->sp_in->state = IPSEC_SPSTATE_ALIVE;
1443 new->sp_in->policy = IPSEC_POLICY_ENTRUST;
1444
1445 if ((new->sp_out = key_newsp()) == NULL) {
1446 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1447 ipsec_delpcbpolicy(new);
1448 return ENOBUFS;
1449 }
1450 new->sp_out->state = IPSEC_SPSTATE_ALIVE;
1451 new->sp_out->policy = IPSEC_POLICY_ENTRUST;
1452
1453 *pcb_sp = new;
1454
1455 return 0;
1456 }
1457
1458 /* copy old ipsec policy into new */
1459 int
1460 ipsec_copy_policy(struct inpcbpolicy *old,
1461 struct inpcbpolicy *new)
1462 {
1463 struct secpolicy *sp;
1464
1465 if (ipsec_bypass != 0)
1466 return 0;
1467
1468 sp = ipsec_deepcopy_policy(old->sp_in);
1469 if (sp) {
1470 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1471 new->sp_in = sp;
1472 } else
1473 return ENOBUFS;
1474
1475 sp = ipsec_deepcopy_policy(old->sp_out);
1476 if (sp) {
1477 key_freesp(new->sp_out, KEY_SADB_UNLOCKED);
1478 new->sp_out = sp;
1479 } else
1480 return ENOBUFS;
1481
1482 new->priv = old->priv;
1483
1484 return 0;
1485 }
1486
1487 /* deep-copy a policy in PCB */
1488 static struct secpolicy *
1489 ipsec_deepcopy_policy(struct secpolicy *src)
1490 {
1491 struct ipsecrequest *newchain = NULL;
1492 struct ipsecrequest *p;
1493 struct ipsecrequest **q;
1494 struct ipsecrequest *r;
1495 struct secpolicy *dst;
1496
1497 if (src == NULL)
1498 return NULL;
1499 dst = key_newsp();
1500 if (dst == NULL)
1501 return NULL;
1502
1503 /*
1504 * deep-copy IPsec request chain. This is required since struct
1505 * ipsecrequest is not reference counted.
1506 */
1507 q = &newchain;
1508 for (p = src->req; p; p = p->next) {
1509 *q = (struct ipsecrequest *)_MALLOC(sizeof(struct ipsecrequest),
1510 M_SECA, M_WAITOK | M_ZERO);
1511 if (*q == NULL)
1512 goto fail;
1513 (*q)->next = NULL;
1514
1515 (*q)->saidx.proto = p->saidx.proto;
1516 (*q)->saidx.mode = p->saidx.mode;
1517 (*q)->level = p->level;
1518 (*q)->saidx.reqid = p->saidx.reqid;
1519
1520 bcopy(&p->saidx.src, &(*q)->saidx.src, sizeof((*q)->saidx.src));
1521 bcopy(&p->saidx.dst, &(*q)->saidx.dst, sizeof((*q)->saidx.dst));
1522
1523 (*q)->sp = dst;
1524
1525 q = &((*q)->next);
1526 }
1527
1528 dst->req = newchain;
1529 dst->state = src->state;
1530 dst->policy = src->policy;
1531 /* do not touch the refcnt fields */
1532
1533 return dst;
1534
1535 fail:
1536 for (p = newchain; p; p = r) {
1537 r = p->next;
1538 FREE(p, M_SECA);
1539 p = NULL;
1540 }
1541 key_freesp(dst, KEY_SADB_UNLOCKED);
1542 return NULL;
1543 }
1544
1545 /* set policy and ipsec request if present. */
1546 static int
1547 ipsec_set_policy(struct secpolicy **pcb_sp,
1548 __unused int optname,
1549 caddr_t request,
1550 size_t len,
1551 int priv)
1552 {
1553 struct sadb_x_policy *xpl;
1554 struct secpolicy *newsp = NULL;
1555 int error;
1556
1557 /* sanity check. */
1558 if (pcb_sp == NULL || *pcb_sp == NULL || request == NULL)
1559 return EINVAL;
1560 if (len < sizeof(*xpl))
1561 return EINVAL;
1562 xpl = (struct sadb_x_policy *)(void *)request;
1563
1564 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1565 printf("ipsec_set_policy: passed policy\n");
1566 kdebug_sadb_x_policy((struct sadb_ext *)xpl));
1567
1568 /* check policy type */
1569 /* ipsec_set_policy() accepts IPSEC, ENTRUST and BYPASS. */
1570 if (xpl->sadb_x_policy_type == IPSEC_POLICY_DISCARD
1571 || xpl->sadb_x_policy_type == IPSEC_POLICY_NONE)
1572 return EINVAL;
1573
1574 /* check privileged socket */
1575 if (priv == 0 && xpl->sadb_x_policy_type == IPSEC_POLICY_BYPASS)
1576 return EACCES;
1577
1578 /* allocation new SP entry */
1579 if ((newsp = key_msg2sp(xpl, len, &error)) == NULL)
1580 return error;
1581
1582 newsp->state = IPSEC_SPSTATE_ALIVE;
1583
1584 /* clear old SP and set new SP */
1585 key_freesp(*pcb_sp, KEY_SADB_UNLOCKED);
1586 *pcb_sp = newsp;
1587 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1588 printf("ipsec_set_policy: new policy\n");
1589 kdebug_secpolicy(newsp));
1590
1591 return 0;
1592 }
1593
1594 int
1595 ipsec4_set_policy(struct inpcb *inp,
1596 int optname,
1597 caddr_t request,
1598 size_t len,
1599 int priv)
1600 {
1601 struct sadb_x_policy *xpl;
1602 struct secpolicy **pcb_sp;
1603 int error = 0;
1604 struct sadb_x_policy xpl_aligned_buf;
1605 u_int8_t *xpl_unaligned;
1606
1607 /* sanity check. */
1608 if (inp == NULL || request == NULL)
1609 return EINVAL;
1610 if (len < sizeof(*xpl))
1611 return EINVAL;
1612 xpl = (struct sadb_x_policy *)(void *)request;
1613
1614 /* This is a new mbuf allocated by soopt_getm() */
1615 if (IPSEC_IS_P2ALIGNED(xpl)) {
1616 xpl_unaligned = NULL;
1617 } else {
1618 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1619 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1620 xpl = (__typeof__(xpl))&xpl_aligned_buf;
1621 }
1622
1623 if (inp->inp_sp == NULL) {
1624 error = ipsec_init_policy(inp->inp_socket, &inp->inp_sp);
1625 if (error)
1626 return error;
1627 }
1628
1629 /* select direction */
1630 switch (xpl->sadb_x_policy_dir) {
1631 case IPSEC_DIR_INBOUND:
1632 pcb_sp = &inp->inp_sp->sp_in;
1633 break;
1634 case IPSEC_DIR_OUTBOUND:
1635 pcb_sp = &inp->inp_sp->sp_out;
1636 break;
1637 default:
1638 ipseclog((LOG_ERR, "ipsec4_set_policy: invalid direction=%u\n",
1639 xpl->sadb_x_policy_dir));
1640 return EINVAL;
1641 }
1642
1643 /* turn bypass off */
1644 if (ipsec_bypass != 0)
1645 ipsec_bypass = 0;
1646
1647 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1648 }
1649
1650 /* delete policy in PCB */
1651 int
1652 ipsec4_delete_pcbpolicy(struct inpcb *inp)
1653 {
1654
1655 /* sanity check. */
1656 if (inp == NULL)
1657 panic("ipsec4_delete_pcbpolicy: NULL pointer was passed.\n");
1658
1659 if (inp->inp_sp == NULL)
1660 return 0;
1661
1662 if (inp->inp_sp->sp_in != NULL) {
1663 key_freesp(inp->inp_sp->sp_in, KEY_SADB_UNLOCKED);
1664 inp->inp_sp->sp_in = NULL;
1665 }
1666
1667 if (inp->inp_sp->sp_out != NULL) {
1668 key_freesp(inp->inp_sp->sp_out, KEY_SADB_UNLOCKED);
1669 inp->inp_sp->sp_out = NULL;
1670 }
1671
1672 ipsec_delpcbpolicy(inp->inp_sp);
1673 inp->inp_sp = NULL;
1674
1675 return 0;
1676 }
1677
1678 #if INET6
1679 int
1680 ipsec6_set_policy(struct in6pcb *in6p,
1681 int optname,
1682 caddr_t request,
1683 size_t len,
1684 int priv)
1685 {
1686 struct sadb_x_policy *xpl;
1687 struct secpolicy **pcb_sp;
1688 int error = 0;
1689 struct sadb_x_policy xpl_aligned_buf;
1690 u_int8_t *xpl_unaligned;
1691
1692 /* sanity check. */
1693 if (in6p == NULL || request == NULL)
1694 return EINVAL;
1695 if (len < sizeof(*xpl))
1696 return EINVAL;
1697 xpl = (struct sadb_x_policy *)(void *)request;
1698
1699 /* This is a new mbuf allocated by soopt_getm() */
1700 if (IPSEC_IS_P2ALIGNED(xpl)) {
1701 xpl_unaligned = NULL;
1702 } else {
1703 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1704 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1705 xpl = (__typeof__(xpl))&xpl_aligned_buf;
1706 }
1707
1708 if (in6p->in6p_sp == NULL) {
1709 error = ipsec_init_policy(in6p->inp_socket, &in6p->in6p_sp);
1710 if (error)
1711 return error;
1712 }
1713
1714 /* select direction */
1715 switch (xpl->sadb_x_policy_dir) {
1716 case IPSEC_DIR_INBOUND:
1717 pcb_sp = &in6p->in6p_sp->sp_in;
1718 break;
1719 case IPSEC_DIR_OUTBOUND:
1720 pcb_sp = &in6p->in6p_sp->sp_out;
1721 break;
1722 default:
1723 ipseclog((LOG_ERR, "ipsec6_set_policy: invalid direction=%u\n",
1724 xpl->sadb_x_policy_dir));
1725 return EINVAL;
1726 }
1727
1728 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1729 }
1730
1731 int
1732 ipsec6_delete_pcbpolicy(struct in6pcb *in6p)
1733 {
1734
1735 /* sanity check. */
1736 if (in6p == NULL)
1737 panic("ipsec6_delete_pcbpolicy: NULL pointer was passed.\n");
1738
1739 if (in6p->in6p_sp == NULL)
1740 return 0;
1741
1742 if (in6p->in6p_sp->sp_in != NULL) {
1743 key_freesp(in6p->in6p_sp->sp_in, KEY_SADB_UNLOCKED);
1744 in6p->in6p_sp->sp_in = NULL;
1745 }
1746
1747 if (in6p->in6p_sp->sp_out != NULL) {
1748 key_freesp(in6p->in6p_sp->sp_out, KEY_SADB_UNLOCKED);
1749 in6p->in6p_sp->sp_out = NULL;
1750 }
1751
1752 ipsec_delpcbpolicy(in6p->in6p_sp);
1753 in6p->in6p_sp = NULL;
1754
1755 return 0;
1756 }
1757 #endif
1758
1759 /*
1760 * return current level.
1761 * Either IPSEC_LEVEL_USE or IPSEC_LEVEL_REQUIRE are always returned.
1762 */
1763 u_int
1764 ipsec_get_reqlevel(isr)
1765 struct ipsecrequest *isr;
1766 {
1767 u_int level = 0;
1768 u_int esp_trans_deflev = 0, esp_net_deflev = 0, ah_trans_deflev = 0, ah_net_deflev = 0;
1769
1770 /* sanity check */
1771 if (isr == NULL || isr->sp == NULL)
1772 panic("ipsec_get_reqlevel: NULL pointer is passed.\n");
1773 if (((struct sockaddr *)&isr->sp->spidx.src)->sa_family
1774 != ((struct sockaddr *)&isr->sp->spidx.dst)->sa_family)
1775 panic("ipsec_get_reqlevel: family mismatched.\n");
1776
1777 /* XXX note that we have ipseclog() expanded here - code sync issue */
1778 #define IPSEC_CHECK_DEFAULT(lev) \
1779 (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE \
1780 && (lev) != IPSEC_LEVEL_UNIQUE) \
1781 ? (ipsec_debug \
1782 ? log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\
1783 (lev), IPSEC_LEVEL_REQUIRE) \
1784 : (void)0), \
1785 (lev) = IPSEC_LEVEL_REQUIRE, \
1786 (lev) \
1787 : (lev))
1788
1789 /* set default level */
1790 switch (((struct sockaddr *)&isr->sp->spidx.src)->sa_family) {
1791 #if INET
1792 case AF_INET:
1793 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_trans_deflev);
1794 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_net_deflev);
1795 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_trans_deflev);
1796 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_net_deflev);
1797 break;
1798 #endif
1799 #if INET6
1800 case AF_INET6:
1801 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_trans_deflev);
1802 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_net_deflev);
1803 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_trans_deflev);
1804 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_net_deflev);
1805 break;
1806 #endif /* INET6 */
1807 default:
1808 panic("key_get_reqlevel: Unknown family. %d\n",
1809 ((struct sockaddr *)&isr->sp->spidx.src)->sa_family);
1810 }
1811
1812 #undef IPSEC_CHECK_DEFAULT
1813
1814 /* set level */
1815 switch (isr->level) {
1816 case IPSEC_LEVEL_DEFAULT:
1817 switch (isr->saidx.proto) {
1818 case IPPROTO_ESP:
1819 if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
1820 level = esp_net_deflev;
1821 else
1822 level = esp_trans_deflev;
1823 break;
1824 case IPPROTO_AH:
1825 if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
1826 level = ah_net_deflev;
1827 else
1828 level = ah_trans_deflev;
1829 break;
1830 case IPPROTO_IPCOMP:
1831 /*
1832 * we don't really care, as IPcomp document says that
1833 * we shouldn't compress small packets
1834 */
1835 level = IPSEC_LEVEL_USE;
1836 break;
1837 default:
1838 panic("ipsec_get_reqlevel: "
1839 "Illegal protocol defined %u\n",
1840 isr->saidx.proto);
1841 }
1842 break;
1843
1844 case IPSEC_LEVEL_USE:
1845 case IPSEC_LEVEL_REQUIRE:
1846 level = isr->level;
1847 break;
1848 case IPSEC_LEVEL_UNIQUE:
1849 level = IPSEC_LEVEL_REQUIRE;
1850 break;
1851
1852 default:
1853 panic("ipsec_get_reqlevel: Illegal IPsec level %u\n",
1854 isr->level);
1855 }
1856
1857 return level;
1858 }
1859
1860 /*
1861 * Check AH/ESP integrity.
1862 * OUT:
1863 * 0: valid
1864 * 1: invalid
1865 */
1866 static int
1867 ipsec_in_reject(sp, m)
1868 struct secpolicy *sp;
1869 struct mbuf *m;
1870 {
1871 struct ipsecrequest *isr;
1872 u_int level;
1873 int need_auth, need_conf, need_icv;
1874
1875 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
1876 printf("ipsec_in_reject: using SP\n");
1877 kdebug_secpolicy(sp));
1878
1879 /* check policy */
1880 switch (sp->policy) {
1881 case IPSEC_POLICY_DISCARD:
1882 case IPSEC_POLICY_GENERATE:
1883 return 1;
1884 case IPSEC_POLICY_BYPASS:
1885 case IPSEC_POLICY_NONE:
1886 return 0;
1887
1888 case IPSEC_POLICY_IPSEC:
1889 break;
1890
1891 case IPSEC_POLICY_ENTRUST:
1892 default:
1893 panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy);
1894 }
1895
1896 need_auth = 0;
1897 need_conf = 0;
1898 need_icv = 0;
1899
1900 /* XXX should compare policy against ipsec header history */
1901
1902 for (isr = sp->req; isr != NULL; isr = isr->next) {
1903
1904 /* get current level */
1905 level = ipsec_get_reqlevel(isr);
1906
1907 switch (isr->saidx.proto) {
1908 case IPPROTO_ESP:
1909 if (level == IPSEC_LEVEL_REQUIRE) {
1910 need_conf++;
1911
1912 #if 0
1913 /* this won't work with multiple input threads - isr->sav would change
1914 * with every packet and is not necessarily related to the current packet
1915 * being processed. If ESP processing is required - the esp code should
1916 * make sure that the integrity check is present and correct. I don't see
1917 * why it would be necessary to check for the presence of the integrity
1918 * check value here. I think this is just wrong.
1919 * isr->sav has been removed.
1920 * %%%%%% this needs to be re-worked at some point but I think the code below can
1921 * be ignored for now.
1922 */
1923 if (isr->sav != NULL
1924 && isr->sav->flags == SADB_X_EXT_NONE
1925 && isr->sav->alg_auth != SADB_AALG_NONE)
1926 need_icv++;
1927 #endif
1928 }
1929 break;
1930 case IPPROTO_AH:
1931 if (level == IPSEC_LEVEL_REQUIRE) {
1932 need_auth++;
1933 need_icv++;
1934 }
1935 break;
1936 case IPPROTO_IPCOMP:
1937 /*
1938 * we don't really care, as IPcomp document says that
1939 * we shouldn't compress small packets, IPComp policy
1940 * should always be treated as being in "use" level.
1941 */
1942 break;
1943 }
1944 }
1945
1946 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1947 printf("ipsec_in_reject: auth:%d conf:%d icv:%d m_flags:%x\n",
1948 need_auth, need_conf, need_icv, m->m_flags));
1949
1950 if ((need_conf && !(m->m_flags & M_DECRYPTED))
1951 || (!need_auth && need_icv && !(m->m_flags & M_AUTHIPDGM))
1952 || (need_auth && !(m->m_flags & M_AUTHIPHDR)))
1953 return 1;
1954
1955 return 0;
1956 }
1957
1958 /*
1959 * Check AH/ESP integrity.
1960 * This function is called from tcp_input(), udp_input(),
1961 * and {ah,esp}4_input for tunnel mode
1962 */
1963 int
1964 ipsec4_in_reject_so(m, so)
1965 struct mbuf *m;
1966 struct socket *so;
1967 {
1968 struct secpolicy *sp = NULL;
1969 int error;
1970 int result;
1971
1972 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
1973 /* sanity check */
1974 if (m == NULL)
1975 return 0; /* XXX should be panic ? */
1976
1977 /* get SP for this packet.
1978 * When we are called from ip_forward(), we call
1979 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
1980 */
1981 if (so == NULL)
1982 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
1983 else
1984 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
1985
1986 if (sp == NULL)
1987 return 0; /* XXX should be panic ?
1988 * -> No, there may be error. */
1989
1990 result = ipsec_in_reject(sp, m);
1991 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1992 printf("DP ipsec4_in_reject_so call free SP:0x%llx\n",
1993 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
1994 key_freesp(sp, KEY_SADB_UNLOCKED);
1995
1996 return result;
1997 }
1998
1999 int
2000 ipsec4_in_reject(m, inp)
2001 struct mbuf *m;
2002 struct inpcb *inp;
2003 {
2004
2005 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2006 if (inp == NULL)
2007 return ipsec4_in_reject_so(m, NULL);
2008 if (inp->inp_socket)
2009 return ipsec4_in_reject_so(m, inp->inp_socket);
2010 else
2011 panic("ipsec4_in_reject: invalid inpcb/socket");
2012
2013 /* NOTREACHED */
2014 return 0;
2015 }
2016
2017 #if INET6
2018 /*
2019 * Check AH/ESP integrity.
2020 * This function is called from tcp6_input(), udp6_input(),
2021 * and {ah,esp}6_input for tunnel mode
2022 */
2023 int
2024 ipsec6_in_reject_so(m, so)
2025 struct mbuf *m;
2026 struct socket *so;
2027 {
2028 struct secpolicy *sp = NULL;
2029 int error;
2030 int result;
2031
2032 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2033 /* sanity check */
2034 if (m == NULL)
2035 return 0; /* XXX should be panic ? */
2036
2037 /* get SP for this packet.
2038 * When we are called from ip_forward(), we call
2039 * ipsec6_getpolicybyaddr() with IP_FORWARDING flag.
2040 */
2041 if (so == NULL)
2042 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
2043 else
2044 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
2045
2046 if (sp == NULL)
2047 return 0; /* XXX should be panic ? */
2048
2049 result = ipsec_in_reject(sp, m);
2050 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2051 printf("DP ipsec6_in_reject_so call free SP:0x%llx\n",
2052 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2053 key_freesp(sp, KEY_SADB_UNLOCKED);
2054
2055 return result;
2056 }
2057
2058 int
2059 ipsec6_in_reject(m, in6p)
2060 struct mbuf *m;
2061 struct in6pcb *in6p;
2062 {
2063
2064 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2065 if (in6p == NULL)
2066 return ipsec6_in_reject_so(m, NULL);
2067 if (in6p->in6p_socket)
2068 return ipsec6_in_reject_so(m, in6p->in6p_socket);
2069 else
2070 panic("ipsec6_in_reject: invalid in6p/socket");
2071
2072 /* NOTREACHED */
2073 return 0;
2074 }
2075 #endif
2076
2077 /*
2078 * compute the byte size to be occupied by IPsec header.
2079 * in case it is tunneled, it includes the size of outer IP header.
2080 * NOTE: SP passed is free in this function.
2081 */
2082 size_t
2083 ipsec_hdrsiz(sp)
2084 struct secpolicy *sp;
2085 {
2086 struct ipsecrequest *isr;
2087 size_t siz, clen;
2088
2089 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2090 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2091 printf("ipsec_hdrsiz: using SP\n");
2092 kdebug_secpolicy(sp));
2093
2094 /* check policy */
2095 switch (sp->policy) {
2096 case IPSEC_POLICY_DISCARD:
2097 case IPSEC_POLICY_GENERATE:
2098 case IPSEC_POLICY_BYPASS:
2099 case IPSEC_POLICY_NONE:
2100 return 0;
2101
2102 case IPSEC_POLICY_IPSEC:
2103 break;
2104
2105 case IPSEC_POLICY_ENTRUST:
2106 default:
2107 panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy);
2108 }
2109
2110 siz = 0;
2111
2112 for (isr = sp->req; isr != NULL; isr = isr->next) {
2113
2114 clen = 0;
2115
2116 switch (isr->saidx.proto) {
2117 case IPPROTO_ESP:
2118 #if IPSEC_ESP
2119 clen = esp_hdrsiz(isr);
2120 #else
2121 clen = 0; /*XXX*/
2122 #endif
2123 break;
2124 case IPPROTO_AH:
2125 clen = ah_hdrsiz(isr);
2126 break;
2127 case IPPROTO_IPCOMP:
2128 clen = sizeof(struct ipcomp);
2129 break;
2130 }
2131
2132 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
2133 switch (((struct sockaddr *)&isr->saidx.dst)->sa_family) {
2134 case AF_INET:
2135 clen += sizeof(struct ip);
2136 break;
2137 #if INET6
2138 case AF_INET6:
2139 clen += sizeof(struct ip6_hdr);
2140 break;
2141 #endif
2142 default:
2143 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2144 "unknown AF %d in IPsec tunnel SA\n",
2145 ((struct sockaddr *)&isr->saidx.dst)->sa_family));
2146 break;
2147 }
2148 }
2149 siz += clen;
2150 }
2151
2152 return siz;
2153 }
2154
2155 /* This function is called from ip_forward() and ipsec4_hdrsize_tcp(). */
2156 size_t
2157 ipsec4_hdrsiz(m, dir, inp)
2158 struct mbuf *m;
2159 u_int dir;
2160 struct inpcb *inp;
2161 {
2162 struct secpolicy *sp = NULL;
2163 int error;
2164 size_t size;
2165
2166 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2167 /* sanity check */
2168 if (m == NULL)
2169 return 0; /* XXX should be panic ? */
2170 if (inp != NULL && inp->inp_socket == NULL)
2171 panic("ipsec4_hdrsize: why is socket NULL but there is PCB.");
2172
2173 /* get SP for this packet.
2174 * When we are called from ip_forward(), we call
2175 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2176 */
2177 if (inp == NULL)
2178 sp = ipsec4_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2179 else
2180 sp = ipsec4_getpolicybyaddr(m, dir, 0, &error);
2181
2182 if (sp == NULL)
2183 return 0; /* XXX should be panic ? */
2184
2185 size = ipsec_hdrsiz(sp);
2186 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2187 printf("DP ipsec4_hdrsiz call free SP:0x%llx\n",
2188 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2189 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2190 printf("ipsec4_hdrsiz: size:%lu.\n", (u_int32_t)size));
2191 key_freesp(sp, KEY_SADB_UNLOCKED);
2192
2193 return size;
2194 }
2195
2196 #if INET6
2197 /* This function is called from ipsec6_hdrsize_tcp(),
2198 * and maybe from ip6_forward.()
2199 */
2200 size_t
2201 ipsec6_hdrsiz(m, dir, in6p)
2202 struct mbuf *m;
2203 u_int dir;
2204 struct in6pcb *in6p;
2205 {
2206 struct secpolicy *sp = NULL;
2207 int error;
2208 size_t size;
2209
2210 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2211 /* sanity check */
2212 if (m == NULL)
2213 return 0; /* XXX shoud be panic ? */
2214 if (in6p != NULL && in6p->in6p_socket == NULL)
2215 panic("ipsec6_hdrsize: why is socket NULL but there is PCB.");
2216
2217 /* get SP for this packet */
2218 /* XXX Is it right to call with IP_FORWARDING. */
2219 if (in6p == NULL)
2220 sp = ipsec6_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2221 else
2222 sp = ipsec6_getpolicybyaddr(m, dir, 0, &error);
2223
2224 if (sp == NULL)
2225 return 0;
2226 size = ipsec_hdrsiz(sp);
2227 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2228 printf("DP ipsec6_hdrsiz call free SP:0x%llx\n",
2229 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2230 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2231 printf("ipsec6_hdrsiz: size:%lu.\n", (u_int32_t)size));
2232 key_freesp(sp, KEY_SADB_UNLOCKED);
2233
2234 return size;
2235 }
2236 #endif /*INET6*/
2237
2238 #if INET
2239 /*
2240 * encapsulate for ipsec tunnel.
2241 * ip->ip_src must be fixed later on.
2242 */
2243 int
2244 ipsec4_encapsulate(m, sav)
2245 struct mbuf *m;
2246 struct secasvar *sav;
2247 {
2248 struct ip *oip;
2249 struct ip *ip;
2250 size_t hlen;
2251 size_t plen;
2252
2253 /* can't tunnel between different AFs */
2254 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2255 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2256 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2257 m_freem(m);
2258 return EINVAL;
2259 }
2260 #if 0
2261 /* XXX if the dst is myself, perform nothing. */
2262 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2263 m_freem(m);
2264 return EINVAL;
2265 }
2266 #endif
2267
2268 if (m->m_len < sizeof(*ip))
2269 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2270
2271 ip = mtod(m, struct ip *);
2272 #ifdef _IP_VHL
2273 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2274 #else
2275 hlen = ip->ip_hl << 2;
2276 #endif
2277
2278 if (m->m_len != hlen)
2279 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2280
2281 /* generate header checksum */
2282 ip->ip_sum = 0;
2283 #ifdef _IP_VHL
2284 ip->ip_sum = in_cksum(m, hlen);
2285 #else
2286 ip->ip_sum = in_cksum(m, hlen);
2287 #endif
2288
2289 plen = m->m_pkthdr.len;
2290
2291 /*
2292 * grow the mbuf to accomodate the new IPv4 header.
2293 * NOTE: IPv4 options will never be copied.
2294 */
2295 if (M_LEADINGSPACE(m->m_next) < hlen) {
2296 struct mbuf *n;
2297 MGET(n, M_DONTWAIT, MT_DATA);
2298 if (!n) {
2299 m_freem(m);
2300 return ENOBUFS;
2301 }
2302 n->m_len = hlen;
2303 n->m_next = m->m_next;
2304 m->m_next = n;
2305 m->m_pkthdr.len += hlen;
2306 oip = mtod(n, struct ip *);
2307 } else {
2308 m->m_next->m_len += hlen;
2309 m->m_next->m_data -= hlen;
2310 m->m_pkthdr.len += hlen;
2311 oip = mtod(m->m_next, struct ip *);
2312 }
2313 ip = mtod(m, struct ip *);
2314 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2315 m->m_len = sizeof(struct ip);
2316 m->m_pkthdr.len -= (hlen - sizeof(struct ip));
2317
2318 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2319 /* ECN consideration. */
2320 ip_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &oip->ip_tos);
2321 #ifdef _IP_VHL
2322 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
2323 #else
2324 ip->ip_hl = sizeof(struct ip) >> 2;
2325 #endif
2326 ip->ip_off &= htons(~IP_OFFMASK);
2327 ip->ip_off &= htons(~IP_MF);
2328 switch (ip4_ipsec_dfbit) {
2329 case 0: /* clear DF bit */
2330 ip->ip_off &= htons(~IP_DF);
2331 break;
2332 case 1: /* set DF bit */
2333 ip->ip_off |= htons(IP_DF);
2334 break;
2335 default: /* copy DF bit */
2336 break;
2337 }
2338 ip->ip_p = IPPROTO_IPIP;
2339 if (plen + sizeof(struct ip) < IP_MAXPACKET)
2340 ip->ip_len = htons(plen + sizeof(struct ip));
2341 else {
2342 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2343 "leave ip_len as is (invalid packet)\n"));
2344 }
2345 ip->ip_id = ip_randomid();
2346 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2347 &ip->ip_src, sizeof(ip->ip_src));
2348 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2349 &ip->ip_dst, sizeof(ip->ip_dst));
2350 ip->ip_ttl = IPDEFTTL;
2351
2352 /* XXX Should ip_src be updated later ? */
2353
2354 return 0;
2355 }
2356
2357 /*
2358 * encapsulate for ipsec tunnel.
2359 * ip->ip_src must be fixed later on.
2360 */
2361 int
2362 ipsec4_encapsulate_utun_esp_keepalive(m_ptr, sav)
2363 struct mbuf **m_ptr;
2364 struct secasvar *sav;
2365 {
2366 struct ip *ip;
2367 size_t plen;
2368 struct mbuf *m = *m_ptr;
2369
2370 /* can't tunnel between different AFs */
2371 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2372 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2373 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2374 m_freem(m);
2375 *m_ptr = NULL;
2376 return EINVAL;
2377 }
2378
2379 plen = m->m_pkthdr.len;
2380
2381 /*
2382 * grow the mbuf to accomodate the new IPv4 header.
2383 * NOTE: IPv4 options will never be copied.
2384 */
2385 {
2386 struct mbuf *n;
2387 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
2388 if (!n) {
2389 m_freem(m);
2390 *m_ptr = NULL;
2391 return ENOBUFS;
2392 }
2393 if (m->m_flags & M_PKTHDR) {
2394 M_COPY_PKTHDR(n, m);
2395 m->m_flags &= ~M_PKTHDR;
2396 }
2397 MH_ALIGN(n, sizeof(*ip));
2398 n->m_len = sizeof(*ip);
2399 n->m_next = m;
2400 n->m_pkthdr.len = (plen + n->m_len);
2401 m_fixhdr(m);
2402 m = n;
2403 *m_ptr = m;
2404 plen = m->m_pkthdr.len;
2405 }
2406 ip = mtod(m, __typeof__(ip));
2407
2408 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2409 // ip_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &oip->ip_tos);
2410 #ifdef _IP_VHL
2411 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(*ip) >> 2);
2412 #else
2413 ip->ip_hl = sizeof(*ip) >> 2;
2414 #endif
2415 ip->ip_off &= htons(~IP_OFFMASK);
2416 ip->ip_off &= htons(~IP_MF);
2417 switch (ip4_ipsec_dfbit) {
2418 case 0: /* clear DF bit */
2419 ip->ip_off &= htons(~IP_DF);
2420 break;
2421 case 1: /* set DF bit */
2422 ip->ip_off |= htons(IP_DF);
2423 break;
2424 default: /* copy DF bit */
2425 break;
2426 }
2427 ip->ip_p = IPPROTO_IPIP;
2428 if (plen < IP_MAXPACKET)
2429 ip->ip_len = htons(plen);
2430 else {
2431 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2432 "leave ip_len as is (invalid packet)\n"));
2433 }
2434 ip->ip_id = ip_randomid();
2435 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2436 &ip->ip_src, sizeof(ip->ip_src));
2437 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2438 &ip->ip_dst, sizeof(ip->ip_dst));
2439 ip->ip_ttl = IPDEFTTL;
2440
2441 /* XXX Should ip_src be updated later ? */
2442
2443 return 0;
2444 }
2445 #endif /*INET*/
2446
2447 #if INET6
2448 int
2449 ipsec6_encapsulate(m, sav)
2450 struct mbuf *m;
2451 struct secasvar *sav;
2452 {
2453 struct ip6_hdr *oip6;
2454 struct ip6_hdr *ip6;
2455 size_t plen;
2456
2457 /* can't tunnel between different AFs */
2458 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2459 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2460 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2461 m_freem(m);
2462 return EINVAL;
2463 }
2464 #if 0
2465 /* XXX if the dst is myself, perform nothing. */
2466 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2467 m_freem(m);
2468 return EINVAL;
2469 }
2470 #endif
2471
2472 plen = m->m_pkthdr.len;
2473
2474 /*
2475 * grow the mbuf to accomodate the new IPv6 header.
2476 */
2477 if (m->m_len != sizeof(struct ip6_hdr))
2478 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2479 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2480 struct mbuf *n;
2481 MGET(n, M_DONTWAIT, MT_DATA);
2482 if (!n) {
2483 m_freem(m);
2484 return ENOBUFS;
2485 }
2486 n->m_len = sizeof(struct ip6_hdr);
2487 n->m_next = m->m_next;
2488 m->m_next = n;
2489 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2490 oip6 = mtod(n, struct ip6_hdr *);
2491 } else {
2492 m->m_next->m_len += sizeof(struct ip6_hdr);
2493 m->m_next->m_data -= sizeof(struct ip6_hdr);
2494 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2495 oip6 = mtod(m->m_next, struct ip6_hdr *);
2496 }
2497 ip6 = mtod(m, struct ip6_hdr *);
2498 ovbcopy((caddr_t)ip6, (caddr_t)oip6, sizeof(struct ip6_hdr));
2499
2500 /* Fake link-local scope-class addresses */
2501 if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_src))
2502 oip6->ip6_src.s6_addr16[1] = 0;
2503 if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_dst))
2504 oip6->ip6_dst.s6_addr16[1] = 0;
2505
2506 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2507 /* ECN consideration. */
2508 ip6_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip6->ip6_flow);
2509 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr))
2510 ip6->ip6_plen = htons(plen);
2511 else {
2512 /* ip6->ip6_plen will be updated in ip6_output() */
2513 }
2514 ip6->ip6_nxt = IPPROTO_IPV6;
2515 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2516 &ip6->ip6_src, sizeof(ip6->ip6_src));
2517 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2518 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2519 ip6->ip6_hlim = IPV6_DEFHLIM;
2520
2521 /* XXX Should ip6_src be updated later ? */
2522
2523 return 0;
2524 }
2525
2526 static int
2527 ipsec64_encapsulate(m, sav)
2528 struct mbuf *m;
2529 struct secasvar *sav;
2530 {
2531 struct ip6_hdr *ip6, *ip6i;
2532 struct ip *ip;
2533 size_t plen;
2534 u_int8_t hlim;
2535
2536 /* tunneling over IPv4 */
2537 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2538 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2539 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2540 m_freem(m);
2541 return EINVAL;
2542 }
2543 #if 0
2544 /* XXX if the dst is myself, perform nothing. */
2545 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2546 m_freem(m);
2547 return EINVAL;
2548 }
2549 #endif
2550
2551 plen = m->m_pkthdr.len;
2552 ip6 = mtod(m, struct ip6_hdr *);
2553 hlim = ip6->ip6_hlim;
2554 /*
2555 * grow the mbuf to accomodate the new IPv4 header.
2556 */
2557 if (m->m_len != sizeof(struct ip6_hdr))
2558 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2559 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2560 struct mbuf *n;
2561 MGET(n, M_DONTWAIT, MT_DATA);
2562 if (!n) {
2563 m_freem(m);
2564 return ENOBUFS;
2565 }
2566 n->m_len = sizeof(struct ip6_hdr);
2567 n->m_next = m->m_next;
2568 m->m_next = n;
2569 m->m_pkthdr.len += sizeof(struct ip);
2570 ip6i = mtod(n, struct ip6_hdr *);
2571 } else {
2572 m->m_next->m_len += sizeof(struct ip6_hdr);
2573 m->m_next->m_data -= sizeof(struct ip6_hdr);
2574 m->m_pkthdr.len += sizeof(struct ip);
2575 ip6i = mtod(m->m_next, struct ip6_hdr *);
2576 }
2577
2578 bcopy(ip6, ip6i, sizeof(struct ip6_hdr));
2579 ip = mtod(m, struct ip *);
2580 m->m_len = sizeof(struct ip);
2581 /*
2582 * Fill in some of the IPv4 fields - we don't need all of them
2583 * because the rest will be filled in by ip_output
2584 */
2585 ip->ip_v = IPVERSION;
2586 ip->ip_hl = sizeof(struct ip) >> 2;
2587 ip->ip_id = 0;
2588 ip->ip_sum = 0;
2589 ip->ip_tos = 0;
2590 ip->ip_off = 0;
2591 ip->ip_ttl = hlim;
2592 ip->ip_p = IPPROTO_IPV6;
2593
2594 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2595 /* ECN consideration. */
2596 ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6->ip6_flow);
2597
2598 if (plen + sizeof(struct ip) < IP_MAXPACKET)
2599 ip->ip_len = htons(plen + sizeof(struct ip));
2600 else {
2601 ip->ip_len = htons(plen);
2602 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2603 "leave ip_len as is (invalid packet)\n"));
2604 }
2605 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2606 &ip->ip_src, sizeof(ip->ip_src));
2607 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2608 &ip->ip_dst, sizeof(ip->ip_dst));
2609
2610 return 0;
2611 }
2612
2613 int
2614 ipsec6_encapsulate_utun_esp_keepalive(m_ptr, sav)
2615 struct mbuf **m_ptr;
2616 struct secasvar *sav;
2617 {
2618 struct ip6_hdr *ip6;
2619 size_t plen;
2620 struct mbuf *m = *m_ptr;
2621
2622 /* can't tunnel between different AFs */
2623 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2624 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2625 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2626 m_freem(m);
2627 *m_ptr = NULL;
2628 return EINVAL;
2629 }
2630
2631 plen = m->m_pkthdr.len;
2632
2633 /*
2634 * grow the mbuf to accomodate the new IPv6 header.
2635 */
2636 {
2637 struct mbuf *n;
2638 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
2639 if (!n) {
2640 m_freem(m);
2641 *m_ptr = NULL;
2642 return ENOBUFS;
2643 }
2644 if (m->m_flags & M_PKTHDR) {
2645 M_COPY_PKTHDR(n, m);
2646 m->m_flags &= ~M_PKTHDR;
2647 }
2648 MH_ALIGN(n, sizeof(*ip6));
2649 n->m_len = sizeof(*ip6);
2650 n->m_next = m;
2651 n->m_pkthdr.len = (plen + n->m_len);
2652 m_fixhdr(m);
2653 m = n;
2654 *m_ptr = m;
2655 plen = m->m_pkthdr.len;
2656 }
2657 ip6 = mtod(m, __typeof__(ip6));
2658
2659 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2660 if (plen < IPV6_MAXPACKET)
2661 ip6->ip6_plen = htons(plen);
2662 else {
2663 /* ip6->ip6_plen will be updated in ip6_output() */
2664 }
2665 ip6->ip6_nxt = IPPROTO_IPV6;
2666 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2667 &ip6->ip6_src, sizeof(ip6->ip6_src));
2668 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2669 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2670 ip6->ip6_hlim = IPV6_DEFHLIM;
2671
2672 /* XXX Should ip6_src be updated later ? */
2673
2674 return 0;
2675 }
2676
2677 int
2678 ipsec6_update_routecache_and_output(state, sav)
2679 struct ipsec_output_state *state;
2680 struct secasvar *sav;
2681 {
2682 struct sockaddr_in6* dst6;
2683 struct route *ro6;
2684 struct ip6_hdr *ip6;
2685 errno_t error = 0;
2686
2687 int plen;
2688 struct ip6_out_args ip6oa;
2689 struct route_in6 ro6_new;
2690 struct flowadv *adv = NULL;
2691
2692 if (!state->m) {
2693 return EINVAL;
2694 }
2695 ip6 = mtod(state->m, struct ip6_hdr *);
2696
2697 // grab sadb_mutex, before updating sah's route cache
2698 lck_mtx_lock(sadb_mutex);
2699 ro6 = &sav->sah->sa_route;
2700 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
2701 if (ro6->ro_rt) {
2702 RT_LOCK(ro6->ro_rt);
2703 }
2704 if (ROUTE_UNUSABLE(ro6) ||
2705 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
2706 if (ro6->ro_rt != NULL)
2707 RT_UNLOCK(ro6->ro_rt);
2708 ROUTE_RELEASE(ro6);
2709 }
2710 if (ro6->ro_rt == 0) {
2711 bzero(dst6, sizeof(*dst6));
2712 dst6->sin6_family = AF_INET6;
2713 dst6->sin6_len = sizeof(*dst6);
2714 dst6->sin6_addr = ip6->ip6_dst;
2715 rtalloc(ro6);
2716 if (ro6->ro_rt) {
2717 RT_LOCK(ro6->ro_rt);
2718 }
2719 }
2720 if (ro6->ro_rt == 0) {
2721 ip6stat.ip6s_noroute++;
2722 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
2723 error = EHOSTUNREACH;
2724 // release sadb_mutex, after updating sah's route cache
2725 lck_mtx_unlock(sadb_mutex);
2726 return error;
2727 }
2728
2729 /*
2730 * adjust state->dst if tunnel endpoint is offlink
2731 *
2732 * XXX: caching rt_gateway value in the state is
2733 * not really good, since it may point elsewhere
2734 * when the gateway gets modified to a larger
2735 * sockaddr via rt_setgate(). This is currently
2736 * addressed by SA_SIZE roundup in that routine.
2737 */
2738 if (ro6->ro_rt->rt_flags & RTF_GATEWAY)
2739 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
2740 RT_UNLOCK(ro6->ro_rt);
2741 ROUTE_RELEASE(&state->ro);
2742 route_copyout(&state->ro, ro6, sizeof(state->ro));
2743 state->dst = (struct sockaddr *)dst6;
2744 state->tunneled = 6;
2745 // release sadb_mutex, after updating sah's route cache
2746 lck_mtx_unlock(sadb_mutex);
2747
2748 state->m = ipsec6_splithdr(state->m);
2749 if (!state->m) {
2750 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
2751 error = ENOMEM;
2752 return error;
2753 }
2754
2755 ip6 = mtod(state->m, struct ip6_hdr *);
2756 switch (sav->sah->saidx.proto) {
2757 case IPPROTO_ESP:
2758 #if IPSEC_ESP
2759 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2760 #else
2761 m_freem(state->m);
2762 error = EINVAL;
2763 #endif
2764 break;
2765 case IPPROTO_AH:
2766 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2767 break;
2768 case IPPROTO_IPCOMP:
2769 /* XXX code should be here */
2770 /*FALLTHROUGH*/
2771 default:
2772 ipseclog((LOG_ERR, "%s: unknown ipsec protocol %d\n", __FUNCTION__, sav->sah->saidx.proto));
2773 m_freem(state->m);
2774 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2775 error = EINVAL;
2776 break;
2777 }
2778 if (error) {
2779 // If error, packet already freed by above output routines
2780 state->m = NULL;
2781 return error;
2782 }
2783
2784 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
2785 if (plen > IPV6_MAXPACKET) {
2786 ipseclog((LOG_ERR, "%s: IPsec with IPv6 jumbogram is not supported\n", __FUNCTION__));
2787 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2788 error = EINVAL;/*XXX*/
2789 return error;
2790 }
2791 ip6 = mtod(state->m, struct ip6_hdr *);
2792 ip6->ip6_plen = htons(plen);
2793
2794 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET6);
2795
2796 /* Increment statistics */
2797 ifnet_stat_increment_out(sav->sah->ipsec_if, 1, mbuf_pkthdr_len(state->m), 0);
2798
2799 /* Send to ip6_output */
2800 bzero(&ro6_new, sizeof(ro6_new));
2801 bzero(&ip6oa, sizeof(ip6oa));
2802 ip6oa.ip6oa_flowadv.code = 0;
2803 ip6oa.ip6oa_flags = IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR;
2804 if (state->outgoing_if) {
2805 ip6oa.ip6oa_boundif = state->outgoing_if;
2806 ip6oa.ip6oa_flags |= IPOAF_BOUND_IF;
2807 }
2808
2809 adv = &ip6oa.ip6oa_flowadv;
2810 (void) ip6_output(state->m, NULL, &ro6_new, IPV6_OUTARGS, NULL, NULL, &ip6oa);
2811
2812 if (adv->code == FADV_FLOW_CONTROLLED || adv->code == FADV_SUSPENDED) {
2813 error = ENOBUFS;
2814 ifnet_disable_output(sav->sah->ipsec_if);
2815 return error;
2816 }
2817
2818 return 0;
2819 }
2820
2821 int
2822 ipsec46_encapsulate(state, sav)
2823 struct secasvar *sav;
2824 struct ipsec_output_state *state;
2825 {
2826 struct mbuf *m;
2827 struct ip6_hdr *ip6;
2828 struct ip *oip;
2829 struct ip *ip;
2830 size_t hlen;
2831 size_t plen;
2832
2833 m = state->m;
2834 if (!m) {
2835 return EINVAL;
2836 }
2837
2838 /* can't tunnel between different AFs */
2839 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2840 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2841 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2842 m_freem(m);
2843 return EINVAL;
2844 }
2845 #if 0
2846 /* XXX if the dst is myself, perform nothing. */
2847 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2848 m_freem(m);
2849 return EINVAL;
2850 }
2851 #endif
2852
2853 if (m->m_len < sizeof(*ip)) {
2854 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2855 return EINVAL;
2856 }
2857
2858 ip = mtod(m, struct ip *);
2859 #ifdef _IP_VHL
2860 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2861 #else
2862 hlen = ip->ip_hl << 2;
2863 #endif
2864
2865 if (m->m_len != hlen) {
2866 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2867 return EINVAL;
2868 }
2869
2870 /* generate header checksum */
2871 ip->ip_sum = 0;
2872 #ifdef _IP_VHL
2873 ip->ip_sum = in_cksum(m, hlen);
2874 #else
2875 ip->ip_sum = in_cksum(m, hlen);
2876 #endif
2877
2878 plen = m->m_pkthdr.len; // save original IPv4 packet len, this will be ipv6 payload len
2879
2880 /*
2881 * First move the IPv4 header to the second mbuf in the chain
2882 */
2883 if (M_LEADINGSPACE(m->m_next) < hlen) {
2884 struct mbuf *n;
2885 MGET(n, M_DONTWAIT, MT_DATA);
2886 if (!n) {
2887 m_freem(m);
2888 return ENOBUFS;
2889 }
2890 n->m_len = hlen;
2891 n->m_next = m->m_next;
2892 m->m_next = n;
2893 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2894 oip = mtod(n, struct ip *);
2895 } else {
2896 m->m_next->m_len += hlen;
2897 m->m_next->m_data -= hlen;
2898 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2899 oip = mtod(m->m_next, struct ip *);
2900 }
2901 ip = mtod(m, struct ip *);
2902 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2903
2904 /*
2905 * Grow the first mbuf to accomodate the new IPv6 header.
2906 */
2907 if (M_LEADINGSPACE(m) < sizeof(struct ip6_hdr) - hlen) {
2908 struct mbuf *n;
2909 MGETHDR(n, M_DONTWAIT, MT_HEADER);
2910 if (!n) {
2911 m_freem(m);
2912 return ENOBUFS;
2913 }
2914 M_COPY_PKTHDR(n, m);
2915 MH_ALIGN(n, sizeof(struct ip6_hdr));
2916 n->m_len = sizeof(struct ip6_hdr);
2917 n->m_next = m->m_next;
2918 m->m_next = NULL;
2919 m_freem(m);
2920 state->m = n;
2921 m = state->m;
2922 } else {
2923 m->m_len += (sizeof(struct ip6_hdr) - hlen);
2924 m->m_data -= (sizeof(struct ip6_hdr) - hlen);
2925 }
2926 ip6 = mtod(m, struct ip6_hdr *);
2927 ip6->ip6_flow = 0;
2928 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2929 ip6->ip6_vfc |= IPV6_VERSION;
2930
2931 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2932 /* ECN consideration. */
2933 ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &ip->ip_tos);
2934 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr))
2935 ip6->ip6_plen = htons(plen);
2936 else {
2937 /* ip6->ip6_plen will be updated in ip6_output() */
2938 }
2939
2940 ip6->ip6_nxt = IPPROTO_IPV4;
2941 ip6->ip6_hlim = IPV6_DEFHLIM;
2942
2943 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2944 &ip6->ip6_src, sizeof(ip6->ip6_src));
2945 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2946 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2947
2948 return 0;
2949 }
2950
2951 #endif /*INET6*/
2952
2953 /*
2954 * Check the variable replay window.
2955 * ipsec_chkreplay() performs replay check before ICV verification.
2956 * ipsec_updatereplay() updates replay bitmap. This must be called after
2957 * ICV verification (it also performs replay check, which is usually done
2958 * beforehand).
2959 * 0 (zero) is returned if packet disallowed, 1 if packet permitted.
2960 *
2961 * based on RFC 2401.
2962 */
2963 int
2964 ipsec_chkreplay(seq, sav)
2965 u_int32_t seq;
2966 struct secasvar *sav;
2967 {
2968 const struct secreplay *replay;
2969 u_int32_t diff;
2970 int fr;
2971 u_int32_t wsizeb; /* constant: bits of window size */
2972 int frlast; /* constant: last frame */
2973
2974
2975 /* sanity check */
2976 if (sav == NULL)
2977 panic("ipsec_chkreplay: NULL pointer was passed.\n");
2978
2979 lck_mtx_lock(sadb_mutex);
2980 replay = sav->replay;
2981
2982 if (replay->wsize == 0) {
2983 lck_mtx_unlock(sadb_mutex);
2984 return 1; /* no need to check replay. */
2985 }
2986
2987 /* constant */
2988 frlast = replay->wsize - 1;
2989 wsizeb = replay->wsize << 3;
2990
2991 /* sequence number of 0 is invalid */
2992 if (seq == 0) {
2993 lck_mtx_unlock(sadb_mutex);
2994 return 0;
2995 }
2996
2997 /* first time is always okay */
2998 if (replay->count == 0) {
2999 lck_mtx_unlock(sadb_mutex);
3000 return 1;
3001 }
3002
3003 if (seq > replay->lastseq) {
3004 /* larger sequences are okay */
3005 lck_mtx_unlock(sadb_mutex);
3006 return 1;
3007 } else {
3008 /* seq is equal or less than lastseq. */
3009 diff = replay->lastseq - seq;
3010
3011 /* over range to check, i.e. too old or wrapped */
3012 if (diff >= wsizeb) {
3013 lck_mtx_unlock(sadb_mutex);
3014 return 0;
3015 }
3016
3017 fr = frlast - diff / 8;
3018
3019 /* this packet already seen ? */
3020 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
3021 lck_mtx_unlock(sadb_mutex);
3022 return 0;
3023 }
3024
3025 /* out of order but good */
3026 lck_mtx_unlock(sadb_mutex);
3027 return 1;
3028 }
3029 }
3030
3031 /*
3032 * check replay counter whether to update or not.
3033 * OUT: 0: OK
3034 * 1: NG
3035 */
3036 int
3037 ipsec_updatereplay(seq, sav)
3038 u_int32_t seq;
3039 struct secasvar *sav;
3040 {
3041 struct secreplay *replay;
3042 u_int32_t diff;
3043 int fr;
3044 u_int32_t wsizeb; /* constant: bits of window size */
3045 int frlast; /* constant: last frame */
3046
3047 /* sanity check */
3048 if (sav == NULL)
3049 panic("ipsec_chkreplay: NULL pointer was passed.\n");
3050
3051 lck_mtx_lock(sadb_mutex);
3052 replay = sav->replay;
3053
3054 if (replay->wsize == 0)
3055 goto ok; /* no need to check replay. */
3056
3057 /* constant */
3058 frlast = replay->wsize - 1;
3059 wsizeb = replay->wsize << 3;
3060
3061 /* sequence number of 0 is invalid */
3062 if (seq == 0)
3063 return 1;
3064
3065 /* first time */
3066 if (replay->count == 0) {
3067 replay->lastseq = seq;
3068 bzero(replay->bitmap, replay->wsize);
3069 (replay->bitmap)[frlast] = 1;
3070 goto ok;
3071 }
3072
3073 if (seq > replay->lastseq) {
3074 /* seq is larger than lastseq. */
3075 diff = seq - replay->lastseq;
3076
3077 /* new larger sequence number */
3078 if (diff < wsizeb) {
3079 /* In window */
3080 /* set bit for this packet */
3081 vshiftl((unsigned char *) replay->bitmap, diff, replay->wsize);
3082 (replay->bitmap)[frlast] |= 1;
3083 } else {
3084 /* this packet has a "way larger" */
3085 bzero(replay->bitmap, replay->wsize);
3086 (replay->bitmap)[frlast] = 1;
3087 }
3088 replay->lastseq = seq;
3089
3090 /* larger is good */
3091 } else {
3092 /* seq is equal or less than lastseq. */
3093 diff = replay->lastseq - seq;
3094
3095 /* over range to check, i.e. too old or wrapped */
3096 if (diff >= wsizeb) {
3097 lck_mtx_unlock(sadb_mutex);
3098 return 1;
3099 }
3100
3101 fr = frlast - diff / 8;
3102
3103 /* this packet already seen ? */
3104 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
3105 lck_mtx_unlock(sadb_mutex);
3106 return 1;
3107 }
3108
3109 /* mark as seen */
3110 (replay->bitmap)[fr] |= (1 << (diff % 8));
3111
3112 /* out of order but good */
3113 }
3114
3115 ok:
3116 if (replay->count == ~0) {
3117
3118 /* set overflow flag */
3119 replay->overflow++;
3120
3121 /* don't increment, no more packets accepted */
3122 if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
3123 lck_mtx_unlock(sadb_mutex);
3124 return 1;
3125 }
3126
3127 ipseclog((LOG_WARNING, "replay counter made %d cycle. %s\n",
3128 replay->overflow, ipsec_logsastr(sav)));
3129 }
3130
3131 replay->count++;
3132
3133 lck_mtx_unlock(sadb_mutex);
3134 return 0;
3135 }
3136
3137 /*
3138 * shift variable length buffer to left.
3139 * IN: bitmap: pointer to the buffer
3140 * nbit: the number of to shift.
3141 * wsize: buffer size (bytes).
3142 */
3143 static void
3144 vshiftl(bitmap, nbit, wsize)
3145 unsigned char *bitmap;
3146 int nbit, wsize;
3147 {
3148 int s, j, i;
3149 unsigned char over;
3150
3151 for (j = 0; j < nbit; j += 8) {
3152 s = (nbit - j < 8) ? (nbit - j): 8;
3153 bitmap[0] <<= s;
3154 for (i = 1; i < wsize; i++) {
3155 over = (bitmap[i] >> (8 - s));
3156 bitmap[i] <<= s;
3157 bitmap[i-1] |= over;
3158 }
3159 }
3160
3161 return;
3162 }
3163
3164 const char *
3165 ipsec4_logpacketstr(ip, spi)
3166 struct ip *ip;
3167 u_int32_t spi;
3168 {
3169 static char buf[256] __attribute__((aligned(4)));
3170 char *p;
3171 u_int8_t *s, *d;
3172
3173 s = (u_int8_t *)(&ip->ip_src);
3174 d = (u_int8_t *)(&ip->ip_dst);
3175
3176 p = buf;
3177 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3178 while (p && *p)
3179 p++;
3180 snprintf(p, sizeof(buf) - (p - buf), "src=%u.%u.%u.%u",
3181 s[0], s[1], s[2], s[3]);
3182 while (p && *p)
3183 p++;
3184 snprintf(p, sizeof(buf) - (p - buf), " dst=%u.%u.%u.%u",
3185 d[0], d[1], d[2], d[3]);
3186 while (p && *p)
3187 p++;
3188 snprintf(p, sizeof(buf) - (p - buf), ")");
3189
3190 return buf;
3191 }
3192
3193 #if INET6
3194 const char *
3195 ipsec6_logpacketstr(ip6, spi)
3196 struct ip6_hdr *ip6;
3197 u_int32_t spi;
3198 {
3199 static char buf[256] __attribute__((aligned(4)));
3200 char *p;
3201
3202 p = buf;
3203 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3204 while (p && *p)
3205 p++;
3206 snprintf(p, sizeof(buf) - (p - buf), "src=%s",
3207 ip6_sprintf(&ip6->ip6_src));
3208 while (p && *p)
3209 p++;
3210 snprintf(p, sizeof(buf) - (p - buf), " dst=%s",
3211 ip6_sprintf(&ip6->ip6_dst));
3212 while (p && *p)
3213 p++;
3214 snprintf(p, sizeof(buf) - (p - buf), ")");
3215
3216 return buf;
3217 }
3218 #endif /*INET6*/
3219
3220 const char *
3221 ipsec_logsastr(sav)
3222 struct secasvar *sav;
3223 {
3224 static char buf[256] __attribute__((aligned(4)));
3225 char *p;
3226 struct secasindex *saidx = &sav->sah->saidx;
3227
3228 /* validity check */
3229 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
3230 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family)
3231 panic("ipsec_logsastr: family mismatched.\n");
3232
3233 p = buf;
3234 snprintf(buf, sizeof(buf), "SA(SPI=%u ", (u_int32_t)ntohl(sav->spi));
3235 while (p && *p)
3236 p++;
3237 if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET) {
3238 u_int8_t *s, *d;
3239 s = (u_int8_t *)&((struct sockaddr_in *)&saidx->src)->sin_addr;
3240 d = (u_int8_t *)&((struct sockaddr_in *)&saidx->dst)->sin_addr;
3241 snprintf(p, sizeof(buf) - (p - buf),
3242 "src=%d.%d.%d.%d dst=%d.%d.%d.%d",
3243 s[0], s[1], s[2], s[3], d[0], d[1], d[2], d[3]);
3244 }
3245 #if INET6
3246 else if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET6) {
3247 snprintf(p, sizeof(buf) - (p - buf),
3248 "src=%s",
3249 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->src)->sin6_addr));
3250 while (p && *p)
3251 p++;
3252 snprintf(p, sizeof(buf) - (p - buf),
3253 " dst=%s",
3254 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->dst)->sin6_addr));
3255 }
3256 #endif
3257 while (p && *p)
3258 p++;
3259 snprintf(p, sizeof(buf) - (p - buf), ")");
3260
3261 return buf;
3262 }
3263
3264 void
3265 ipsec_dumpmbuf(m)
3266 struct mbuf *m;
3267 {
3268 int totlen;
3269 int i;
3270 u_char *p;
3271
3272 totlen = 0;
3273 printf("---\n");
3274 while (m) {
3275 p = mtod(m, u_char *);
3276 for (i = 0; i < m->m_len; i++) {
3277 printf("%02x ", p[i]);
3278 totlen++;
3279 if (totlen % 16 == 0)
3280 printf("\n");
3281 }
3282 m = m->m_next;
3283 }
3284 if (totlen % 16 != 0)
3285 printf("\n");
3286 printf("---\n");
3287 }
3288
3289 #if INET
3290 /*
3291 * IPsec output logic for IPv4.
3292 */
3293 static int
3294 ipsec4_output_internal(struct ipsec_output_state *state, struct secasvar *sav)
3295 {
3296 struct ip *ip = NULL;
3297 int error = 0;
3298 struct sockaddr_in *dst4;
3299 struct route *ro4;
3300
3301 /* validity check */
3302 if (sav == NULL || sav->sah == NULL) {
3303 error = EINVAL;
3304 goto bad;
3305 }
3306
3307 /*
3308 * If there is no valid SA, we give up to process any
3309 * more. In such a case, the SA's status is changed
3310 * from DYING to DEAD after allocating. If a packet
3311 * send to the receiver by dead SA, the receiver can
3312 * not decode a packet because SA has been dead.
3313 */
3314 if (sav->state != SADB_SASTATE_MATURE
3315 && sav->state != SADB_SASTATE_DYING) {
3316 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3317 error = EINVAL;
3318 goto bad;
3319 }
3320
3321 state->outgoing_if = sav->sah->outgoing_if;
3322
3323 /*
3324 * There may be the case that SA status will be changed when
3325 * we are refering to one. So calling splsoftnet().
3326 */
3327
3328 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3329 /*
3330 * build IPsec tunnel.
3331 */
3332 state->m = ipsec4_splithdr(state->m);
3333 if (!state->m) {
3334 error = ENOMEM;
3335 goto bad;
3336 }
3337
3338 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3339 error = ipsec46_encapsulate(state, sav);
3340 if (error) {
3341 // packet already freed by encapsulation error handling
3342 state->m = NULL;
3343 return error;
3344 }
3345
3346 error = ipsec6_update_routecache_and_output(state, sav);
3347 return error;
3348
3349 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3350 error = ipsec4_encapsulate(state->m, sav);
3351 if (error) {
3352 state->m = NULL;
3353 goto bad;
3354 }
3355 ip = mtod(state->m, struct ip *);
3356
3357 // grab sadb_mutex, before updating sah's route cache
3358 lck_mtx_lock(sadb_mutex);
3359 ro4= &sav->sah->sa_route;
3360 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3361 if (ro4->ro_rt != NULL) {
3362 RT_LOCK(ro4->ro_rt);
3363 }
3364 if (ROUTE_UNUSABLE(ro4) ||
3365 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3366 if (ro4->ro_rt != NULL)
3367 RT_UNLOCK(ro4->ro_rt);
3368 ROUTE_RELEASE(ro4);
3369 }
3370 if (ro4->ro_rt == 0) {
3371 dst4->sin_family = AF_INET;
3372 dst4->sin_len = sizeof(*dst4);
3373 dst4->sin_addr = ip->ip_dst;
3374 rtalloc(ro4);
3375 if (ro4->ro_rt == 0) {
3376 OSAddAtomic(1, &ipstat.ips_noroute);
3377 error = EHOSTUNREACH;
3378 // release sadb_mutex, after updating sah's route cache
3379 lck_mtx_unlock(sadb_mutex);
3380 goto bad;
3381 }
3382 RT_LOCK(ro4->ro_rt);
3383 }
3384
3385 /*
3386 * adjust state->dst if tunnel endpoint is offlink
3387 *
3388 * XXX: caching rt_gateway value in the state is
3389 * not really good, since it may point elsewhere
3390 * when the gateway gets modified to a larger
3391 * sockaddr via rt_setgate(). This is currently
3392 * addressed by SA_SIZE roundup in that routine.
3393 */
3394 if (ro4->ro_rt->rt_flags & RTF_GATEWAY)
3395 dst4 = (struct sockaddr_in *)(void *)ro4->ro_rt->rt_gateway;
3396 RT_UNLOCK(ro4->ro_rt);
3397 ROUTE_RELEASE(&state->ro);
3398 route_copyout(&state->ro, ro4, sizeof(state->ro));
3399 state->dst = (struct sockaddr *)dst4;
3400 state->tunneled = 4;
3401 // release sadb_mutex, after updating sah's route cache
3402 lck_mtx_unlock(sadb_mutex);
3403 } else {
3404 ipseclog((LOG_ERR, "%s: family mismatched between inner and outer spi=%u\n",
3405 __FUNCTION__, (u_int32_t)ntohl(sav->spi)));
3406 error = EAFNOSUPPORT;
3407 goto bad;
3408 }
3409 }
3410
3411 state->m = ipsec4_splithdr(state->m);
3412 if (!state->m) {
3413 error = ENOMEM;
3414 goto bad;
3415 }
3416 switch (sav->sah->saidx.proto) {
3417 case IPPROTO_ESP:
3418 #if IPSEC_ESP
3419 if ((error = esp4_output(state->m, sav)) != 0) {
3420 state->m = NULL;
3421 goto bad;
3422 }
3423 break;
3424 #else
3425 m_freem(state->m);
3426 state->m = NULL;
3427 error = EINVAL;
3428 goto bad;
3429 #endif
3430 case IPPROTO_AH:
3431 if ((error = ah4_output(state->m, sav)) != 0) {
3432 state->m = NULL;
3433 goto bad;
3434 }
3435 break;
3436 case IPPROTO_IPCOMP:
3437 if ((error = ipcomp4_output(state->m, sav)) != 0) {
3438 state->m = NULL;
3439 goto bad;
3440 }
3441 break;
3442 default:
3443 ipseclog((LOG_ERR,
3444 "ipsec4_output: unknown ipsec protocol %d\n",
3445 sav->sah->saidx.proto));
3446 m_freem(state->m);
3447 state->m = NULL;
3448 error = EINVAL;
3449 goto bad;
3450 }
3451
3452 if (state->m == 0) {
3453 error = ENOMEM;
3454 goto bad;
3455 }
3456
3457 return 0;
3458
3459 bad:
3460 return error;
3461 }
3462
3463 int
3464 ipsec4_interface_output(struct ipsec_output_state *state, ifnet_t interface)
3465 {
3466 int error = 0;
3467 struct secasvar *sav = NULL;
3468
3469 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3470
3471 if (!state)
3472 panic("state == NULL in ipsec4_output");
3473 if (!state->m)
3474 panic("state->m == NULL in ipsec4_output");
3475 if (!state->dst)
3476 panic("state->dst == NULL in ipsec4_output");
3477
3478 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET);
3479 if (sav == NULL) {
3480 goto bad;
3481 }
3482
3483 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3484 goto bad;
3485 }
3486
3487 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0,0,0,0,0);
3488 if (sav)
3489 key_freesav(sav, KEY_SADB_UNLOCKED);
3490 return 0;
3491
3492 bad:
3493 if (sav)
3494 key_freesav(sav, KEY_SADB_UNLOCKED);
3495 m_freem(state->m);
3496 state->m = NULL;
3497 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error,0,0,0,0);
3498 return error;
3499 }
3500
3501 int
3502 ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused int flags)
3503 {
3504 struct ip *ip = NULL;
3505 struct ipsecrequest *isr = NULL;
3506 struct secasindex saidx;
3507 struct secasvar *sav = NULL;
3508 int error = 0;
3509 struct sockaddr_in *sin;
3510
3511 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3512
3513 if (!state)
3514 panic("state == NULL in ipsec4_output");
3515 if (!state->m)
3516 panic("state->m == NULL in ipsec4_output");
3517 if (!state->dst)
3518 panic("state->dst == NULL in ipsec4_output");
3519
3520 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_START, 0,0,0,0,0);
3521
3522 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3523 printf("ipsec4_output: applied SP\n");
3524 kdebug_secpolicy(sp));
3525
3526 for (isr = sp->req; isr != NULL; isr = isr->next) {
3527 /* make SA index for search proper SA */
3528 ip = mtod(state->m, struct ip *);
3529 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3530 saidx.mode = isr->saidx.mode;
3531 saidx.reqid = isr->saidx.reqid;
3532 sin = (struct sockaddr_in *)&saidx.src;
3533 if (sin->sin_len == 0) {
3534 sin->sin_len = sizeof(*sin);
3535 sin->sin_family = AF_INET;
3536 sin->sin_port = IPSEC_PORT_ANY;
3537 bcopy(&ip->ip_src, &sin->sin_addr,
3538 sizeof(sin->sin_addr));
3539 }
3540 sin = (struct sockaddr_in *)&saidx.dst;
3541 if (sin->sin_len == 0) {
3542 sin->sin_len = sizeof(*sin);
3543 sin->sin_family = AF_INET;
3544 sin->sin_port = IPSEC_PORT_ANY;
3545 /*
3546 * Get port from packet if upper layer is UDP and nat traversal
3547 * is enabled and transport mode.
3548 */
3549
3550 if ((esp_udp_encap_port & 0xFFFF) != 0 &&
3551 isr->saidx.mode == IPSEC_MODE_TRANSPORT) {
3552
3553 if (ip->ip_p == IPPROTO_UDP) {
3554 struct udphdr *udp;
3555 size_t hlen;
3556 #ifdef _IP_VHL
3557 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
3558 #else
3559 hlen = ip->ip_hl << 2;
3560 #endif
3561 if (state->m->m_len < hlen + sizeof(struct udphdr)) {
3562 state->m = m_pullup(state->m, hlen + sizeof(struct udphdr));
3563 if (!state->m) {
3564 ipseclog((LOG_DEBUG, "IPv4 output: can't pullup UDP header\n"));
3565 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
3566 goto bad;
3567 }
3568 ip = mtod(state->m, struct ip *);
3569 }
3570 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + hlen);
3571 sin->sin_port = udp->uh_dport;
3572 }
3573 }
3574
3575 bcopy(&ip->ip_dst, &sin->sin_addr,
3576 sizeof(sin->sin_addr));
3577 }
3578
3579 if ((error = key_checkrequest(isr, &saidx, &sav)) != 0) {
3580 /*
3581 * IPsec processing is required, but no SA found.
3582 * I assume that key_acquire() had been called
3583 * to get/establish the SA. Here I discard
3584 * this packet because it is responsibility for
3585 * upper layer to retransmit the packet.
3586 */
3587 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3588 goto bad;
3589 }
3590
3591 /* validity check */
3592 if (sav == NULL) {
3593 switch (ipsec_get_reqlevel(isr)) {
3594 case IPSEC_LEVEL_USE:
3595 continue;
3596 case IPSEC_LEVEL_REQUIRE:
3597 /* must be not reached here. */
3598 panic("ipsec4_output: no SA found, but required.");
3599 }
3600 }
3601
3602 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3603 goto bad;
3604 }
3605 }
3606
3607 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0,0,0,0,0);
3608 if (sav)
3609 key_freesav(sav, KEY_SADB_UNLOCKED);
3610 return 0;
3611
3612 bad:
3613 if (sav)
3614 key_freesav(sav, KEY_SADB_UNLOCKED);
3615 m_freem(state->m);
3616 state->m = NULL;
3617 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error,0,0,0,0);
3618 return error;
3619 }
3620
3621 #endif
3622
3623 #if INET6
3624 /*
3625 * IPsec output logic for IPv6, transport mode.
3626 */
3627 static int
3628 ipsec6_output_trans_internal(
3629 struct ipsec_output_state *state,
3630 struct secasvar *sav,
3631 u_char *nexthdrp,
3632 struct mbuf *mprev)
3633 {
3634 struct ip6_hdr *ip6;
3635 int error = 0;
3636 int plen;
3637
3638 /* validity check */
3639 if (sav == NULL || sav->sah == NULL) {
3640 error = EINVAL;
3641 goto bad;
3642 }
3643
3644 /*
3645 * If there is no valid SA, we give up to process.
3646 * see same place at ipsec4_output().
3647 */
3648 if (sav->state != SADB_SASTATE_MATURE
3649 && sav->state != SADB_SASTATE_DYING) {
3650 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3651 error = EINVAL;
3652 goto bad;
3653 }
3654
3655 state->outgoing_if = sav->sah->outgoing_if;
3656
3657 switch (sav->sah->saidx.proto) {
3658 case IPPROTO_ESP:
3659 #if IPSEC_ESP
3660 error = esp6_output(state->m, nexthdrp, mprev->m_next, sav);
3661 #else
3662 m_freem(state->m);
3663 error = EINVAL;
3664 #endif
3665 break;
3666 case IPPROTO_AH:
3667 error = ah6_output(state->m, nexthdrp, mprev->m_next, sav);
3668 break;
3669 case IPPROTO_IPCOMP:
3670 error = ipcomp6_output(state->m, nexthdrp, mprev->m_next, sav);
3671 break;
3672 default:
3673 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3674 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3675 m_freem(state->m);
3676 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3677 error = EINVAL;
3678 break;
3679 }
3680 if (error) {
3681 state->m = NULL;
3682 goto bad;
3683 }
3684 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
3685 if (plen > IPV6_MAXPACKET) {
3686 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3687 "IPsec with IPv6 jumbogram is not supported\n"));
3688 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3689 error = EINVAL; /*XXX*/
3690 goto bad;
3691 }
3692 ip6 = mtod(state->m, struct ip6_hdr *);
3693 ip6->ip6_plen = htons(plen);
3694
3695 return 0;
3696 bad:
3697 return error;
3698 }
3699
3700 int
3701 ipsec6_output_trans(
3702 struct ipsec_output_state *state,
3703 u_char *nexthdrp,
3704 struct mbuf *mprev,
3705 struct secpolicy *sp,
3706 __unused int flags,
3707 int *tun)
3708 {
3709 struct ip6_hdr *ip6;
3710 struct ipsecrequest *isr = NULL;
3711 struct secasindex saidx;
3712 int error = 0;
3713 struct sockaddr_in6 *sin6;
3714 struct secasvar *sav = NULL;
3715
3716 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3717
3718 if (!state)
3719 panic("state == NULL in ipsec6_output_trans");
3720 if (!state->m)
3721 panic("state->m == NULL in ipsec6_output_trans");
3722 if (!nexthdrp)
3723 panic("nexthdrp == NULL in ipsec6_output_trans");
3724 if (!mprev)
3725 panic("mprev == NULL in ipsec6_output_trans");
3726 if (!sp)
3727 panic("sp == NULL in ipsec6_output_trans");
3728 if (!tun)
3729 panic("tun == NULL in ipsec6_output_trans");
3730
3731 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3732 printf("ipsec6_output_trans: applyed SP\n");
3733 kdebug_secpolicy(sp));
3734
3735 *tun = 0;
3736 for (isr = sp->req; isr; isr = isr->next) {
3737 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
3738 /* the rest will be handled by ipsec6_output_tunnel() */
3739 break;
3740 }
3741
3742 /* make SA index for search proper SA */
3743 ip6 = mtod(state->m, struct ip6_hdr *);
3744 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3745 saidx.mode = isr->saidx.mode;
3746 saidx.reqid = isr->saidx.reqid;
3747 sin6 = (struct sockaddr_in6 *)&saidx.src;
3748 if (sin6->sin6_len == 0) {
3749 sin6->sin6_len = sizeof(*sin6);
3750 sin6->sin6_family = AF_INET6;
3751 sin6->sin6_port = IPSEC_PORT_ANY;
3752 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
3753 sizeof(ip6->ip6_src));
3754 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
3755 /* fix scope id for comparing SPD */
3756 sin6->sin6_addr.s6_addr16[1] = 0;
3757 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
3758 }
3759 }
3760 sin6 = (struct sockaddr_in6 *)&saidx.dst;
3761 if (sin6->sin6_len == 0) {
3762 sin6->sin6_len = sizeof(*sin6);
3763 sin6->sin6_family = AF_INET6;
3764 sin6->sin6_port = IPSEC_PORT_ANY;
3765 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
3766 sizeof(ip6->ip6_dst));
3767 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
3768 /* fix scope id for comparing SPD */
3769 sin6->sin6_addr.s6_addr16[1] = 0;
3770 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
3771 }
3772 }
3773
3774 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
3775 /*
3776 * IPsec processing is required, but no SA found.
3777 * I assume that key_acquire() had been called
3778 * to get/establish the SA. Here I discard
3779 * this packet because it is responsibility for
3780 * upper layer to retransmit the packet.
3781 */
3782 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3783 error = ENOENT;
3784
3785 /*
3786 * Notify the fact that the packet is discarded
3787 * to ourselves. I believe this is better than
3788 * just silently discarding. (jinmei@kame.net)
3789 * XXX: should we restrict the error to TCP packets?
3790 * XXX: should we directly notify sockets via
3791 * pfctlinputs?
3792 */
3793 icmp6_error(state->m, ICMP6_DST_UNREACH,
3794 ICMP6_DST_UNREACH_ADMIN, 0);
3795 state->m = NULL; /* icmp6_error freed the mbuf */
3796 goto bad;
3797 }
3798
3799 /* validity check */
3800 if (sav == NULL) {
3801 switch (ipsec_get_reqlevel(isr)) {
3802 case IPSEC_LEVEL_USE:
3803 continue;
3804 case IPSEC_LEVEL_REQUIRE:
3805 /* must be not reached here. */
3806 panic("ipsec6_output_trans: no SA found, but required.");
3807 }
3808 }
3809
3810 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
3811 goto bad;
3812 }
3813 }
3814
3815 /* if we have more to go, we need a tunnel mode processing */
3816 if (isr != NULL)
3817 *tun = 1;
3818
3819 if (sav)
3820 key_freesav(sav, KEY_SADB_UNLOCKED);
3821 return 0;
3822
3823 bad:
3824 if (sav)
3825 key_freesav(sav, KEY_SADB_UNLOCKED);
3826 m_freem(state->m);
3827 state->m = NULL;
3828 return error;
3829 }
3830
3831 /*
3832 * IPsec output logic for IPv6, tunnel mode.
3833 */
3834 static int
3835 ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar *sav, int *must_be_last)
3836 {
3837 struct ip6_hdr *ip6;
3838 int error = 0;
3839 int plen;
3840 struct sockaddr_in6* dst6;
3841 struct route *ro6;
3842
3843 /* validity check */
3844 if (sav == NULL || sav->sah == NULL || sav->sah->saidx.mode != IPSEC_MODE_TUNNEL) {
3845 error = EINVAL;
3846 goto bad;
3847 }
3848
3849 /*
3850 * If there is no valid SA, we give up to process.
3851 * see same place at ipsec4_output().
3852 */
3853 if (sav->state != SADB_SASTATE_MATURE
3854 && sav->state != SADB_SASTATE_DYING) {
3855 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3856 error = EINVAL;
3857 goto bad;
3858 }
3859
3860 state->outgoing_if = sav->sah->outgoing_if;
3861
3862 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3863 /*
3864 * build IPsec tunnel.
3865 */
3866 state->m = ipsec6_splithdr(state->m);
3867 if (!state->m) {
3868 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
3869 error = ENOMEM;
3870 goto bad;
3871 }
3872
3873 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3874 error = ipsec6_encapsulate(state->m, sav);
3875 if (error) {
3876 state->m = 0;
3877 goto bad;
3878 }
3879 ip6 = mtod(state->m, struct ip6_hdr *);
3880 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3881
3882 struct ip *ip;
3883 struct sockaddr_in* dst4;
3884 struct route *ro4 = NULL;
3885 struct route ro4_copy;
3886 struct ip_out_args ipoa = { IFSCOPE_NONE, { 0 },
3887 IPOAF_SELECT_SRCIF, 0 };
3888
3889 if (must_be_last)
3890 *must_be_last = 1;
3891
3892 state->tunneled = 4; /* must not process any further in ip6_output */
3893 error = ipsec64_encapsulate(state->m, sav);
3894 if (error) {
3895 state->m = 0;
3896 goto bad;
3897 }
3898 /* Now we have an IPv4 packet */
3899 ip = mtod(state->m, struct ip *);
3900
3901 // grab sadb_mutex, to update sah's route cache and get a local copy of it
3902 lck_mtx_lock(sadb_mutex);
3903 ro4 = &sav->sah->sa_route;
3904 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3905 if (ro4->ro_rt) {
3906 RT_LOCK(ro4->ro_rt);
3907 }
3908 if (ROUTE_UNUSABLE(ro4) ||
3909 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3910 if (ro4->ro_rt != NULL)
3911 RT_UNLOCK(ro4->ro_rt);
3912 ROUTE_RELEASE(ro4);
3913 }
3914 if (ro4->ro_rt == NULL) {
3915 dst4->sin_family = AF_INET;
3916 dst4->sin_len = sizeof(*dst4);
3917 dst4->sin_addr = ip->ip_dst;
3918 } else {
3919 RT_UNLOCK(ro4->ro_rt);
3920 }
3921 route_copyout(&ro4_copy, ro4, sizeof(ro4_copy));
3922 // release sadb_mutex, after updating sah's route cache and getting a local copy
3923 lck_mtx_unlock(sadb_mutex);
3924 state->m = ipsec4_splithdr(state->m);
3925 if (!state->m) {
3926 error = ENOMEM;
3927 ROUTE_RELEASE(&ro4_copy);
3928 goto bad;
3929 }
3930 switch (sav->sah->saidx.proto) {
3931 case IPPROTO_ESP:
3932 #if IPSEC_ESP
3933 if ((error = esp4_output(state->m, sav)) != 0) {
3934 state->m = NULL;
3935 ROUTE_RELEASE(&ro4_copy);
3936 goto bad;
3937 }
3938 break;
3939
3940 #else
3941 m_freem(state->m);
3942 state->m = NULL;
3943 error = EINVAL;
3944 ROUTE_RELEASE(&ro4_copy);
3945 goto bad;
3946 #endif
3947 case IPPROTO_AH:
3948 if ((error = ah4_output(state->m, sav)) != 0) {
3949 state->m = NULL;
3950 ROUTE_RELEASE(&ro4_copy);
3951 goto bad;
3952 }
3953 break;
3954 case IPPROTO_IPCOMP:
3955 if ((error = ipcomp4_output(state->m, sav)) != 0) {
3956 state->m = NULL;
3957 ROUTE_RELEASE(&ro4_copy);
3958 goto bad;
3959 }
3960 break;
3961 default:
3962 ipseclog((LOG_ERR,
3963 "ipsec4_output: unknown ipsec protocol %d\n",
3964 sav->sah->saidx.proto));
3965 m_freem(state->m);
3966 state->m = NULL;
3967 error = EINVAL;
3968 ROUTE_RELEASE(&ro4_copy);
3969 goto bad;
3970 }
3971
3972 if (state->m == 0) {
3973 error = ENOMEM;
3974 ROUTE_RELEASE(&ro4_copy);
3975 goto bad;
3976 }
3977 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET);
3978 ip = mtod(state->m, struct ip *);
3979 ip->ip_len = ntohs(ip->ip_len); /* flip len field before calling ip_output */
3980 error = ip_output(state->m, NULL, &ro4_copy, IP_OUTARGS, NULL, &ipoa);
3981 state->m = NULL;
3982 // grab sadb_mutex, to synchronize the sah's route cache with the local copy
3983 lck_mtx_lock(sadb_mutex);
3984 route_copyin(&ro4_copy, ro4, sizeof(ro4_copy));
3985 lck_mtx_unlock(sadb_mutex);
3986 if (error != 0)
3987 goto bad;
3988 goto done;
3989 } else {
3990 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3991 "unsupported inner family, spi=%u\n",
3992 (u_int32_t)ntohl(sav->spi)));
3993 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3994 error = EAFNOSUPPORT;
3995 goto bad;
3996 }
3997
3998 // grab sadb_mutex, before updating sah's route cache
3999 lck_mtx_lock(sadb_mutex);
4000 ro6 = &sav->sah->sa_route;
4001 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
4002 if (ro6->ro_rt) {
4003 RT_LOCK(ro6->ro_rt);
4004 }
4005 if (ROUTE_UNUSABLE(ro6) ||
4006 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
4007 if (ro6->ro_rt != NULL)
4008 RT_UNLOCK(ro6->ro_rt);
4009 ROUTE_RELEASE(ro6);
4010 }
4011 if (ro6->ro_rt == 0) {
4012 bzero(dst6, sizeof(*dst6));
4013 dst6->sin6_family = AF_INET6;
4014 dst6->sin6_len = sizeof(*dst6);
4015 dst6->sin6_addr = ip6->ip6_dst;
4016 rtalloc(ro6);
4017 if (ro6->ro_rt) {
4018 RT_LOCK(ro6->ro_rt);
4019 }
4020 }
4021 if (ro6->ro_rt == 0) {
4022 ip6stat.ip6s_noroute++;
4023 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
4024 error = EHOSTUNREACH;
4025 // release sadb_mutex, after updating sah's route cache
4026 lck_mtx_unlock(sadb_mutex);
4027 goto bad;
4028 }
4029
4030 /*
4031 * adjust state->dst if tunnel endpoint is offlink
4032 *
4033 * XXX: caching rt_gateway value in the state is
4034 * not really good, since it may point elsewhere
4035 * when the gateway gets modified to a larger
4036 * sockaddr via rt_setgate(). This is currently
4037 * addressed by SA_SIZE roundup in that routine.
4038 */
4039 if (ro6->ro_rt->rt_flags & RTF_GATEWAY)
4040 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
4041 RT_UNLOCK(ro6->ro_rt);
4042 ROUTE_RELEASE(&state->ro);
4043 route_copyout(&state->ro, ro6, sizeof(state->ro));
4044 state->dst = (struct sockaddr *)dst6;
4045 state->tunneled = 6;
4046 // release sadb_mutex, after updating sah's route cache
4047 lck_mtx_unlock(sadb_mutex);
4048 }
4049
4050 state->m = ipsec6_splithdr(state->m);
4051 if (!state->m) {
4052 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
4053 error = ENOMEM;
4054 goto bad;
4055 }
4056 ip6 = mtod(state->m, struct ip6_hdr *);
4057 switch (sav->sah->saidx.proto) {
4058 case IPPROTO_ESP:
4059 #if IPSEC_ESP
4060 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
4061 #else
4062 m_freem(state->m);
4063 error = EINVAL;
4064 #endif
4065 break;
4066 case IPPROTO_AH:
4067 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
4068 break;
4069 case IPPROTO_IPCOMP:
4070 /* XXX code should be here */
4071 /*FALLTHROUGH*/
4072 default:
4073 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4074 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
4075 m_freem(state->m);
4076 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
4077 error = EINVAL;
4078 break;
4079 }
4080 if (error) {
4081 state->m = NULL;
4082 goto bad;
4083 }
4084 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
4085 if (plen > IPV6_MAXPACKET) {
4086 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4087 "IPsec with IPv6 jumbogram is not supported\n"));
4088 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
4089 error = EINVAL; /*XXX*/
4090 goto bad;
4091 }
4092 ip6 = mtod(state->m, struct ip6_hdr *);
4093 ip6->ip6_plen = htons(plen);
4094 done:
4095 return 0;
4096
4097 bad:
4098 return error;
4099 }
4100
4101 int
4102 ipsec6_output_tunnel(
4103 struct ipsec_output_state *state,
4104 struct secpolicy *sp,
4105 __unused int flags)
4106 {
4107 struct ip6_hdr *ip6;
4108 struct ipsecrequest *isr = NULL;
4109 struct secasindex saidx;
4110 struct secasvar *sav = NULL;
4111 int error = 0;
4112
4113 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4114
4115 if (!state)
4116 panic("state == NULL in ipsec6_output_tunnel");
4117 if (!state->m)
4118 panic("state->m == NULL in ipsec6_output_tunnel");
4119 if (!sp)
4120 panic("sp == NULL in ipsec6_output_tunnel");
4121
4122 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
4123 printf("ipsec6_output_tunnel: applyed SP\n");
4124 kdebug_secpolicy(sp));
4125
4126 /*
4127 * transport mode ipsec (before the 1st tunnel mode) is already
4128 * processed by ipsec6_output_trans().
4129 */
4130 for (isr = sp->req; isr; isr = isr->next) {
4131 if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
4132 break;
4133 }
4134
4135 for (/* already initialized */; isr; isr = isr->next) {
4136 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
4137 /* When tunnel mode, SA peers must be specified. */
4138 bcopy(&isr->saidx, &saidx, sizeof(saidx));
4139 } else {
4140 /* make SA index to look for a proper SA */
4141 struct sockaddr_in6 *sin6;
4142
4143 bzero(&saidx, sizeof(saidx));
4144 saidx.proto = isr->saidx.proto;
4145 saidx.mode = isr->saidx.mode;
4146 saidx.reqid = isr->saidx.reqid;
4147
4148 ip6 = mtod(state->m, struct ip6_hdr *);
4149 sin6 = (struct sockaddr_in6 *)&saidx.src;
4150 if (sin6->sin6_len == 0) {
4151 sin6->sin6_len = sizeof(*sin6);
4152 sin6->sin6_family = AF_INET6;
4153 sin6->sin6_port = IPSEC_PORT_ANY;
4154 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
4155 sizeof(ip6->ip6_src));
4156 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
4157 /* fix scope id for comparing SPD */
4158 sin6->sin6_addr.s6_addr16[1] = 0;
4159 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
4160 }
4161 }
4162 sin6 = (struct sockaddr_in6 *)&saidx.dst;
4163 if (sin6->sin6_len == 0) {
4164 sin6->sin6_len = sizeof(*sin6);
4165 sin6->sin6_family = AF_INET6;
4166 sin6->sin6_port = IPSEC_PORT_ANY;
4167 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
4168 sizeof(ip6->ip6_dst));
4169 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
4170 /* fix scope id for comparing SPD */
4171 sin6->sin6_addr.s6_addr16[1] = 0;
4172 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
4173 }
4174 }
4175 }
4176
4177 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
4178 /*
4179 * IPsec processing is required, but no SA found.
4180 * I assume that key_acquire() had been called
4181 * to get/establish the SA. Here I discard
4182 * this packet because it is responsibility for
4183 * upper layer to retransmit the packet.
4184 */
4185 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4186 error = ENOENT;
4187 goto bad;
4188 }
4189
4190 /* validity check */
4191 if (sav == NULL) {
4192 switch (ipsec_get_reqlevel(isr)) {
4193 case IPSEC_LEVEL_USE:
4194 continue;
4195 case IPSEC_LEVEL_REQUIRE:
4196 /* must be not reached here. */
4197 panic("ipsec6_output_tunnel: no SA found, but required.");
4198 }
4199 }
4200
4201 /*
4202 * If there is no valid SA, we give up to process.
4203 * see same place at ipsec4_output().
4204 */
4205 if (sav->state != SADB_SASTATE_MATURE
4206 && sav->state != SADB_SASTATE_DYING) {
4207 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4208 error = EINVAL;
4209 goto bad;
4210 }
4211
4212 int must_be_last = 0;
4213
4214 if ((error = ipsec6_output_tunnel_internal(state, sav, &must_be_last)) != 0) {
4215 goto bad;
4216 }
4217
4218 if (must_be_last && isr->next) {
4219 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4220 "IPv4 must be outer layer, spi=%u\n",
4221 (u_int32_t)ntohl(sav->spi)));
4222 error = EINVAL;
4223 goto bad;
4224 }
4225 }
4226
4227 if (sav)
4228 key_freesav(sav, KEY_SADB_UNLOCKED);
4229 return 0;
4230
4231 bad:
4232 if (sav)
4233 key_freesav(sav, KEY_SADB_UNLOCKED);
4234 if (state->m)
4235 m_freem(state->m);
4236 state->m = NULL;
4237 return error;
4238 }
4239
4240 int
4241 ipsec6_interface_output(struct ipsec_output_state *state, ifnet_t interface, u_char *nexthdrp, struct mbuf *mprev)
4242 {
4243 int error = 0;
4244 struct secasvar *sav = NULL;
4245
4246 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4247
4248 if (!state)
4249 panic("state == NULL in ipsec6_output");
4250 if (!state->m)
4251 panic("state->m == NULL in ipsec6_output");
4252 if (!nexthdrp)
4253 panic("nexthdrp == NULL in ipsec6_output");
4254 if (!mprev)
4255 panic("mprev == NULL in ipsec6_output");
4256
4257 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET6);
4258 if (sav == NULL) {
4259 goto bad;
4260 }
4261
4262 if (sav->sah && sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
4263 if ((error = ipsec6_output_tunnel_internal(state, sav, NULL)) != 0) {
4264 goto bad;
4265 }
4266 }
4267 else {
4268 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
4269 goto bad;
4270 }
4271 }
4272
4273 if (sav)
4274 key_freesav(sav, KEY_SADB_UNLOCKED);
4275 return 0;
4276
4277 bad:
4278 if (sav)
4279 key_freesav(sav, KEY_SADB_UNLOCKED);
4280 m_freem(state->m);
4281 state->m = NULL;
4282 return error;
4283 }
4284 #endif /*INET6*/
4285
4286 #if INET
4287 /*
4288 * Chop IP header and option off from the payload.
4289 */
4290 struct mbuf *
4291 ipsec4_splithdr(m)
4292 struct mbuf *m;
4293 {
4294 struct mbuf *mh;
4295 struct ip *ip;
4296 int hlen;
4297
4298 if (m->m_len < sizeof(struct ip))
4299 panic("ipsec4_splithdr: first mbuf too short, m_len %d, pkt_len %d, m_flag %x", m->m_len, m->m_pkthdr.len, m->m_flags);
4300 ip = mtod(m, struct ip *);
4301 #ifdef _IP_VHL
4302 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
4303 #else
4304 hlen = ip->ip_hl << 2;
4305 #endif
4306 if (m->m_len > hlen) {
4307 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4308 if (!mh) {
4309 m_freem(m);
4310 return NULL;
4311 }
4312 M_COPY_PKTHDR(mh, m);
4313 MH_ALIGN(mh, hlen);
4314 m->m_flags &= ~M_PKTHDR;
4315 m_mchtype(m, MT_DATA);
4316 m->m_len -= hlen;
4317 m->m_data += hlen;
4318 mh->m_next = m;
4319 m = mh;
4320 m->m_len = hlen;
4321 bcopy((caddr_t)ip, mtod(m, caddr_t), hlen);
4322 } else if (m->m_len < hlen) {
4323 m = m_pullup(m, hlen);
4324 if (!m)
4325 return NULL;
4326 }
4327 return m;
4328 }
4329 #endif
4330
4331 #if INET6
4332 struct mbuf *
4333 ipsec6_splithdr(m)
4334 struct mbuf *m;
4335 {
4336 struct mbuf *mh;
4337 struct ip6_hdr *ip6;
4338 int hlen;
4339
4340 if (m->m_len < sizeof(struct ip6_hdr))
4341 panic("ipsec6_splithdr: first mbuf too short");
4342 ip6 = mtod(m, struct ip6_hdr *);
4343 hlen = sizeof(struct ip6_hdr);
4344 if (m->m_len > hlen) {
4345 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4346 if (!mh) {
4347 m_freem(m);
4348 return NULL;
4349 }
4350 M_COPY_PKTHDR(mh, m);
4351 MH_ALIGN(mh, hlen);
4352 m->m_flags &= ~M_PKTHDR;
4353 m_mchtype(m, MT_DATA);
4354 m->m_len -= hlen;
4355 m->m_data += hlen;
4356 mh->m_next = m;
4357 m = mh;
4358 m->m_len = hlen;
4359 bcopy((caddr_t)ip6, mtod(m, caddr_t), hlen);
4360 } else if (m->m_len < hlen) {
4361 m = m_pullup(m, hlen);
4362 if (!m)
4363 return NULL;
4364 }
4365 return m;
4366 }
4367 #endif
4368
4369 /* validate inbound IPsec tunnel packet. */
4370 int
4371 ipsec4_tunnel_validate(m, off, nxt0, sav, ifamily)
4372 struct mbuf *m; /* no pullup permitted, m->m_len >= ip */
4373 int off;
4374 u_int nxt0;
4375 struct secasvar *sav;
4376 sa_family_t *ifamily;
4377 {
4378 u_int8_t nxt = nxt0 & 0xff;
4379 struct sockaddr_in *sin;
4380 struct sockaddr_in osrc, odst, i4src, i4dst;
4381 struct sockaddr_in6 i6src, i6dst;
4382 int hlen;
4383 struct secpolicy *sp;
4384 struct ip *oip;
4385
4386 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4387
4388 #if DIAGNOSTIC
4389 if (m->m_len < sizeof(struct ip))
4390 panic("too short mbuf on ipsec4_tunnel_validate");
4391 #endif
4392 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6)
4393 return 0;
4394 if (m->m_pkthdr.len < off + sizeof(struct ip))
4395 return 0;
4396 /* do not decapsulate if the SA is for transport mode only */
4397 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT)
4398 return 0;
4399
4400 oip = mtod(m, struct ip *);
4401 #ifdef _IP_VHL
4402 hlen = _IP_VHL_HL(oip->ip_vhl) << 2;
4403 #else
4404 hlen = oip->ip_hl << 2;
4405 #endif
4406 if (hlen != sizeof(struct ip))
4407 return 0;
4408
4409 sin = (struct sockaddr_in *)&sav->sah->saidx.dst;
4410 if (sin->sin_family != AF_INET)
4411 return 0;
4412 if (bcmp(&oip->ip_dst, &sin->sin_addr, sizeof(oip->ip_dst)) != 0)
4413 return 0;
4414
4415 if (sav->utun_in_fn ||
4416 sav->sah->ipsec_if != NULL) {
4417 // the ipsec/utun interface SAs don't have a policies.
4418 if (nxt == IPPROTO_IPV4) {
4419 *ifamily = AF_INET;
4420 } else if (nxt == IPPROTO_IPV6) {
4421 *ifamily = AF_INET6;
4422 } else {
4423 return 0;
4424 }
4425 return 1;
4426 }
4427
4428 /* XXX slow */
4429 bzero(&osrc, sizeof(osrc));
4430 bzero(&odst, sizeof(odst));
4431 osrc.sin_family = odst.sin_family = AF_INET;
4432 osrc.sin_len = odst.sin_len = sizeof(struct sockaddr_in);
4433 osrc.sin_addr = oip->ip_src;
4434 odst.sin_addr = oip->ip_dst;
4435 /*
4436 * RFC2401 5.2.1 (b): (assume that we are using tunnel mode)
4437 * - if the inner destination is multicast address, there can be
4438 * multiple permissible inner source address. implementation
4439 * may want to skip verification of inner source address against
4440 * SPD selector.
4441 * - if the inner protocol is ICMP, the packet may be an error report
4442 * from routers on the other side of the VPN cloud (R in the
4443 * following diagram). in this case, we cannot verify inner source
4444 * address against SPD selector.
4445 * me -- gw === gw -- R -- you
4446 *
4447 * we consider the first bullet to be users responsibility on SPD entry
4448 * configuration (if you need to encrypt multicast traffic, set
4449 * the source range of SPD selector to 0.0.0.0/0, or have explicit
4450 * address ranges for possible senders).
4451 * the second bullet is not taken care of (yet).
4452 *
4453 * therefore, we do not do anything special about inner source.
4454 */
4455 if (nxt == IPPROTO_IPV4) {
4456 bzero(&i4src, sizeof(struct sockaddr_in));
4457 bzero(&i4dst, sizeof(struct sockaddr_in));
4458 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4459 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4460 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4461 (caddr_t)&i4src.sin_addr);
4462 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4463 (caddr_t)&i4dst.sin_addr);
4464 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4465 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4466 } else if (nxt == IPPROTO_IPV6) {
4467 bzero(&i6src, sizeof(struct sockaddr_in6));
4468 bzero(&i6dst, sizeof(struct sockaddr_in6));
4469 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4470 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4471 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4472 (caddr_t)&i6src.sin6_addr);
4473 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4474 (caddr_t)&i6dst.sin6_addr);
4475 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4476 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4477 } else
4478 return 0; /* unsupported family */
4479
4480 if (!sp)
4481 return 0;
4482
4483 key_freesp(sp, KEY_SADB_UNLOCKED);
4484
4485 return 1;
4486 }
4487
4488 #if INET6
4489 /* validate inbound IPsec tunnel packet. */
4490 int
4491 ipsec6_tunnel_validate(m, off, nxt0, sav, ifamily)
4492 struct mbuf *m; /* no pullup permitted, m->m_len >= ip */
4493 int off;
4494 u_int nxt0;
4495 struct secasvar *sav;
4496 sa_family_t *ifamily;
4497 {
4498 u_int8_t nxt = nxt0 & 0xff;
4499 struct sockaddr_in6 *sin6;
4500 struct sockaddr_in i4src, i4dst;
4501 struct sockaddr_in6 osrc, odst, i6src, i6dst;
4502 struct secpolicy *sp;
4503 struct ip6_hdr *oip6;
4504
4505 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4506
4507 #if DIAGNOSTIC
4508 if (m->m_len < sizeof(struct ip6_hdr))
4509 panic("too short mbuf on ipsec6_tunnel_validate");
4510 #endif
4511 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6)
4512 return 0;
4513
4514 if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr))
4515 return 0;
4516 /* do not decapsulate if the SA is for transport mode only */
4517 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT)
4518 return 0;
4519
4520 oip6 = mtod(m, struct ip6_hdr *);
4521 /* AF_INET should be supported, but at this moment we don't. */
4522 sin6 = (struct sockaddr_in6 *)&sav->sah->saidx.dst;
4523 if (sin6->sin6_family != AF_INET6)
4524 return 0;
4525 if (!IN6_ARE_ADDR_EQUAL(&oip6->ip6_dst, &sin6->sin6_addr))
4526 return 0;
4527
4528 if (sav->utun_in_fn ||
4529 sav->sah->ipsec_if != NULL) {
4530 // the ipsec/utun interface SAs don't have a policies.
4531 if (nxt == IPPROTO_IPV4) {
4532 *ifamily = AF_INET;
4533 } else if (nxt == IPPROTO_IPV6) {
4534 *ifamily = AF_INET6;
4535 } else {
4536 return 0;
4537 }
4538 return 1;
4539 }
4540
4541 /* XXX slow */
4542 bzero(&osrc, sizeof(osrc));
4543 bzero(&odst, sizeof(odst));
4544 osrc.sin6_family = odst.sin6_family = AF_INET6;
4545 osrc.sin6_len = odst.sin6_len = sizeof(struct sockaddr_in6);
4546 osrc.sin6_addr = oip6->ip6_src;
4547 odst.sin6_addr = oip6->ip6_dst;
4548
4549 /*
4550 * regarding to inner source address validation, see a long comment
4551 * in ipsec4_tunnel_validate.
4552 */
4553
4554 if (nxt == IPPROTO_IPV4) {
4555 bzero(&i4src, sizeof(struct sockaddr_in));
4556 bzero(&i4dst, sizeof(struct sockaddr_in));
4557 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4558 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4559 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4560 (caddr_t)&i4src.sin_addr);
4561 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4562 (caddr_t)&i4dst.sin_addr);
4563 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4564 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4565 } else if (nxt == IPPROTO_IPV6) {
4566 bzero(&i6src, sizeof(struct sockaddr_in6));
4567 bzero(&i6dst, sizeof(struct sockaddr_in6));
4568 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4569 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4570 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4571 (caddr_t)&i6src.sin6_addr);
4572 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4573 (caddr_t)&i6dst.sin6_addr);
4574 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4575 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4576 } else
4577 return 0; /* unsupported family */
4578 /*
4579 * when there is no suitable inbound policy for the packet of the ipsec
4580 * tunnel mode, the kernel never decapsulate the tunneled packet
4581 * as the ipsec tunnel mode even when the system wide policy is "none".
4582 * then the kernel leaves the generic tunnel module to process this
4583 * packet. if there is no rule of the generic tunnel, the packet
4584 * is rejected and the statistics will be counted up.
4585 */
4586 if (!sp)
4587 return 0;
4588 key_freesp(sp, KEY_SADB_UNLOCKED);
4589
4590 return 1;
4591 }
4592 #endif
4593
4594 /*
4595 * Make a mbuf chain for encryption.
4596 * If the original mbuf chain contains a mbuf with a cluster,
4597 * allocate a new cluster and copy the data to the new cluster.
4598 * XXX: this hack is inefficient, but is necessary to handle cases
4599 * of TCP retransmission...
4600 */
4601 struct mbuf *
4602 ipsec_copypkt(m)
4603 struct mbuf *m;
4604 {
4605 struct mbuf *n, **mpp, *mnew;
4606
4607 for (n = m, mpp = &m; n; n = n->m_next) {
4608 if (n->m_flags & M_EXT) {
4609 /*
4610 * Make a copy only if there are more than one references
4611 * to the cluster.
4612 * XXX: is this approach effective?
4613 */
4614 if (
4615 n->m_ext.ext_free ||
4616 m_mclhasreference(n)
4617 )
4618 {
4619 int remain, copied;
4620 struct mbuf *mm;
4621
4622 if (n->m_flags & M_PKTHDR) {
4623 MGETHDR(mnew, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4624 if (mnew == NULL)
4625 goto fail;
4626 M_COPY_PKTHDR(mnew, n);
4627 }
4628 else {
4629 MGET(mnew, M_DONTWAIT, MT_DATA);
4630 if (mnew == NULL)
4631 goto fail;
4632 }
4633 mnew->m_len = 0;
4634 mm = mnew;
4635
4636 /*
4637 * Copy data. If we don't have enough space to
4638 * store the whole data, allocate a cluster
4639 * or additional mbufs.
4640 * XXX: we don't use m_copyback(), since the
4641 * function does not use clusters and thus is
4642 * inefficient.
4643 */
4644 remain = n->m_len;
4645 copied = 0;
4646 while (1) {
4647 int len;
4648 struct mbuf *mn;
4649
4650 if (remain <= (mm->m_flags & M_PKTHDR ? MHLEN : MLEN))
4651 len = remain;
4652 else { /* allocate a cluster */
4653 MCLGET(mm, M_DONTWAIT);
4654 if (!(mm->m_flags & M_EXT)) {
4655 m_free(mm);
4656 goto fail;
4657 }
4658 len = remain < MCLBYTES ?
4659 remain : MCLBYTES;
4660 }
4661
4662 bcopy(n->m_data + copied, mm->m_data,
4663 len);
4664
4665 copied += len;
4666 remain -= len;
4667 mm->m_len = len;
4668
4669 if (remain <= 0) /* completed? */
4670 break;
4671
4672 /* need another mbuf */
4673 MGETHDR(mn, M_DONTWAIT, MT_HEADER); /* XXXMAC: tags copied next time in loop? */
4674 if (mn == NULL)
4675 goto fail;
4676 mn->m_pkthdr.rcvif = NULL;
4677 mm->m_next = mn;
4678 mm = mn;
4679 }
4680
4681 /* adjust chain */
4682 mm->m_next = m_free(n);
4683 n = mm;
4684 *mpp = mnew;
4685 mpp = &n->m_next;
4686
4687 continue;
4688 }
4689 }
4690 *mpp = n;
4691 mpp = &n->m_next;
4692 }
4693
4694 return(m);
4695 fail:
4696 m_freem(m);
4697 return(NULL);
4698 }
4699
4700 /*
4701 * Tags are allocated as mbufs for now, since our minimum size is MLEN, we
4702 * should make use of up to that much space.
4703 */
4704 #define IPSEC_TAG_HEADER \
4705
4706 struct ipsec_tag {
4707 struct socket *socket;
4708 u_int32_t history_count;
4709 struct ipsec_history history[];
4710 };
4711
4712 #define IPSEC_TAG_SIZE (MLEN - sizeof(struct m_tag))
4713 #define IPSEC_TAG_HDR_SIZE (offsetof(struct ipsec_tag, history[0]))
4714 #define IPSEC_HISTORY_MAX ((IPSEC_TAG_SIZE - IPSEC_TAG_HDR_SIZE) / \
4715 sizeof(struct ipsec_history))
4716
4717 static struct ipsec_tag *
4718 ipsec_addaux(
4719 struct mbuf *m)
4720 {
4721 struct m_tag *tag;
4722
4723 /* Check if the tag already exists */
4724 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4725
4726 if (tag == NULL) {
4727 struct ipsec_tag *itag;
4728
4729 /* Allocate a tag */
4730 tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC,
4731 IPSEC_TAG_SIZE, M_DONTWAIT, m);
4732
4733 if (tag) {
4734 itag = (struct ipsec_tag*)(tag + 1);
4735 itag->socket = 0;
4736 itag->history_count = 0;
4737
4738 m_tag_prepend(m, tag);
4739 }
4740 }
4741
4742 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4743 }
4744
4745 static struct ipsec_tag *
4746 ipsec_findaux(
4747 struct mbuf *m)
4748 {
4749 struct m_tag *tag;
4750
4751 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4752
4753 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4754 }
4755
4756 void
4757 ipsec_delaux(
4758 struct mbuf *m)
4759 {
4760 struct m_tag *tag;
4761
4762 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4763
4764 if (tag) {
4765 m_tag_delete(m, tag);
4766 }
4767 }
4768
4769 /* if the aux buffer is unnecessary, nuke it. */
4770 static void
4771 ipsec_optaux(
4772 struct mbuf *m,
4773 struct ipsec_tag *itag)
4774 {
4775 if (itag && itag->socket == NULL && itag->history_count == 0) {
4776 m_tag_delete(m, ((struct m_tag*)itag) - 1);
4777 }
4778 }
4779
4780 int
4781 ipsec_setsocket(struct mbuf *m, struct socket *so)
4782 {
4783 struct ipsec_tag *tag;
4784
4785 /* if so == NULL, don't insist on getting the aux mbuf */
4786 if (so) {
4787 tag = ipsec_addaux(m);
4788 if (!tag)
4789 return ENOBUFS;
4790 } else
4791 tag = ipsec_findaux(m);
4792 if (tag) {
4793 tag->socket = so;
4794 ipsec_optaux(m, tag);
4795 }
4796 return 0;
4797 }
4798
4799 struct socket *
4800 ipsec_getsocket(struct mbuf *m)
4801 {
4802 struct ipsec_tag *itag;
4803
4804 itag = ipsec_findaux(m);
4805 if (itag)
4806 return itag->socket;
4807 else
4808 return NULL;
4809 }
4810
4811 int
4812 ipsec_addhist(
4813 struct mbuf *m,
4814 int proto,
4815 u_int32_t spi)
4816 {
4817 struct ipsec_tag *itag;
4818 struct ipsec_history *p;
4819 itag = ipsec_addaux(m);
4820 if (!itag)
4821 return ENOBUFS;
4822 if (itag->history_count == IPSEC_HISTORY_MAX)
4823 return ENOSPC; /* XXX */
4824
4825 p = &itag->history[itag->history_count];
4826 itag->history_count++;
4827
4828 bzero(p, sizeof(*p));
4829 p->ih_proto = proto;
4830 p->ih_spi = spi;
4831
4832 return 0;
4833 }
4834
4835 struct ipsec_history *
4836 ipsec_gethist(
4837 struct mbuf *m,
4838 int *lenp)
4839 {
4840 struct ipsec_tag *itag;
4841
4842 itag = ipsec_findaux(m);
4843 if (!itag)
4844 return NULL;
4845 if (itag->history_count == 0)
4846 return NULL;
4847 if (lenp)
4848 *lenp = (int)(itag->history_count * sizeof(struct ipsec_history));
4849 return itag->history;
4850 }
4851
4852 void
4853 ipsec_clearhist(
4854 struct mbuf *m)
4855 {
4856 struct ipsec_tag *itag;
4857
4858 itag = ipsec_findaux(m);
4859 if (itag) {
4860 itag->history_count = 0;
4861 }
4862 ipsec_optaux(m, itag);
4863 }
4864
4865 __private_extern__ int
4866 ipsec_send_natt_keepalive(
4867 struct secasvar *sav)
4868 {
4869 struct mbuf *m;
4870 struct ip *ip;
4871 int error;
4872 struct ip_out_args ipoa =
4873 { IFSCOPE_NONE, { 0 }, IPOAF_SELECT_SRCIF, 0 };
4874 struct route ro;
4875 int keepalive_interval = natt_keepalive_interval;
4876
4877 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4878
4879 if ((esp_udp_encap_port & 0xFFFF) == 0 || sav->remote_ike_port == 0) return FALSE;
4880
4881 if (sav->natt_interval != 0) {
4882 keepalive_interval = (int)sav->natt_interval;
4883 }
4884
4885 // natt timestamp may have changed... reverify
4886 if ((natt_now - sav->natt_last_activity) < keepalive_interval) return FALSE;
4887
4888 if (sav->flags & SADB_X_EXT_ESP_KEEPALIVE) return FALSE; // don't send these from the kernel
4889
4890 m = m_gethdr(M_NOWAIT, MT_DATA);
4891 if (m == NULL) return FALSE;
4892
4893 ip = (__typeof__(ip))m_mtod(m);
4894
4895 // this sends one type of NATT keepalives (Type 1, ESP keepalives, aren't sent by kernel)
4896 if ((sav->flags & SADB_X_EXT_ESP_KEEPALIVE) == 0) {
4897 struct udphdr *uh;
4898
4899 /*
4900 * Type 2: a UDP packet complete with IP header.
4901 * We must do this because UDP output requires
4902 * an inpcb which we don't have. UDP packet
4903 * contains one byte payload. The byte is set
4904 * to 0xFF.
4905 */
4906 uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip));
4907 m->m_len = sizeof(struct udpiphdr) + 1;
4908 bzero(m_mtod(m), m->m_len);
4909 m->m_pkthdr.len = m->m_len;
4910
4911 ip->ip_len = m->m_len;
4912 ip->ip_ttl = ip_defttl;
4913 ip->ip_p = IPPROTO_UDP;
4914 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4915 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4916 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4917 } else {
4918 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4919 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4920 }
4921 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4922 uh->uh_dport = htons(sav->remote_ike_port);
4923 uh->uh_ulen = htons(1 + sizeof(*uh));
4924 uh->uh_sum = 0;
4925 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip) + sizeof(*uh)) = 0xFF;
4926 }
4927
4928 // grab sadb_mutex, to get a local copy of sah's route cache
4929 lck_mtx_lock(sadb_mutex);
4930 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
4931 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET)
4932 ROUTE_RELEASE(&sav->sah->sa_route);
4933
4934 route_copyout(&ro, &sav->sah->sa_route, sizeof(ro));
4935 lck_mtx_unlock(sadb_mutex);
4936
4937 necp_mark_packet_as_keepalive(m, TRUE);
4938
4939 error = ip_output(m, NULL, &ro, IP_OUTARGS | IP_NOIPSEC, NULL, &ipoa);
4940
4941 // grab sadb_mutex, to synchronize the sah's route cache with the local copy
4942 lck_mtx_lock(sadb_mutex);
4943 route_copyin(&ro, &sav->sah->sa_route, sizeof(ro));
4944 lck_mtx_unlock(sadb_mutex);
4945 if (error == 0) {
4946 sav->natt_last_activity = natt_now;
4947 return TRUE;
4948 }
4949 return FALSE;
4950 }
4951
4952 __private_extern__ bool
4953 ipsec_fill_offload_frame(ifnet_t ifp,
4954 struct secasvar *sav,
4955 struct ifnet_keepalive_offload_frame *frame,
4956 size_t frame_data_offset)
4957 {
4958 u_int8_t *data = NULL;
4959 struct ip *ip = NULL;
4960 struct udphdr *uh = NULL;
4961
4962 if (sav == NULL || sav->sah == NULL || frame == NULL ||
4963 (ifp != NULL && ifp->if_index != sav->sah->outgoing_if) ||
4964 sav->sah->saidx.dst.ss_family != AF_INET ||
4965 !(sav->flags & SADB_X_EXT_NATT) ||
4966 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE) ||
4967 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE_OFFLOAD) ||
4968 sav->flags & SADB_X_EXT_ESP_KEEPALIVE ||
4969 (esp_udp_encap_port & 0xFFFF) == 0 ||
4970 sav->remote_ike_port == 0 ||
4971 (natt_keepalive_interval == 0 && sav->natt_interval == 0 && sav->natt_offload_interval == 0)) {
4972 /* SA is not eligible for keepalive offload on this interface */
4973 return (FALSE);
4974 }
4975
4976 if (frame_data_offset + sizeof(struct udpiphdr) + 1 >
4977 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
4978 /* Not enough room in this data frame */
4979 return (FALSE);
4980 }
4981
4982 data = frame->data;
4983 ip = (__typeof__(ip))(void *)(data + frame_data_offset);
4984 uh = (__typeof__(uh))(void *)(data + frame_data_offset + sizeof(*ip));
4985
4986 frame->length = frame_data_offset + sizeof(struct udpiphdr) + 1;
4987 frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_IPSEC;
4988 frame->ether_type = IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4;
4989
4990 bzero(data, IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE);
4991
4992 ip->ip_v = IPVERSION;
4993 ip->ip_hl = sizeof(struct ip) >> 2;
4994 ip->ip_off &= htons(~IP_OFFMASK);
4995 ip->ip_off &= htons(~IP_MF);
4996 switch (ip4_ipsec_dfbit) {
4997 case 0: /* clear DF bit */
4998 ip->ip_off &= htons(~IP_DF);
4999 break;
5000 case 1: /* set DF bit */
5001 ip->ip_off |= htons(IP_DF);
5002 break;
5003 default: /* copy DF bit */
5004 break;
5005 }
5006 ip->ip_len = htons(sizeof(struct udpiphdr) + 1);
5007 ip->ip_id = ip_randomid();
5008 ip->ip_ttl = ip_defttl;
5009 ip->ip_p = IPPROTO_UDP;
5010 ip->ip_sum = 0;
5011 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
5012 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5013 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5014 } else {
5015 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5016 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5017 }
5018 ip->ip_sum = in_cksum_hdr_opt(ip);
5019 uh->uh_sport = htons((u_short)esp_udp_encap_port);
5020 uh->uh_dport = htons(sav->remote_ike_port);
5021 uh->uh_ulen = htons(1 + sizeof(*uh));
5022 uh->uh_sum = 0;
5023 *(u_int8_t*)(data + frame_data_offset + sizeof(*ip) + sizeof(*uh)) = 0xFF;
5024
5025 if (sav->natt_offload_interval != 0) {
5026 frame->interval = sav->natt_offload_interval;
5027 } else if (sav->natt_interval != 0) {
5028 frame->interval = sav->natt_interval;
5029 } else {
5030 frame->interval = natt_keepalive_interval;
5031 }
5032 return (TRUE);
5033 }