]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/ipsec.c
xnu-6153.41.3.tar.gz
[apple/xnu.git] / bsd / netinet6 / ipsec.c
1 /*
2 * Copyright (c) 2008-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/ipsec.c,v 1.3.2.7 2001/07/19 06:37:23 kris Exp $ */
30 /* $KAME: ipsec.c,v 1.103 2001/05/24 07:14:18 sakane Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 /*
62 * IPsec controller part.
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/mcache.h>
70 #include <sys/domain.h>
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 #include <sys/errno.h>
75 #include <sys/time.h>
76 #include <sys/kernel.h>
77 #include <sys/syslog.h>
78 #include <sys/sysctl.h>
79 #include <sys/priv.h>
80 #include <kern/locks.h>
81 #include <sys/kauth.h>
82 #include <sys/bitstring.h>
83
84 #include <libkern/OSAtomic.h>
85
86 #include <net/if.h>
87 #include <net/route.h>
88 #include <net/if_ipsec.h>
89 #include <net/if_ports_used.h>
90
91 #include <netinet/in.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/ip.h>
94 #include <netinet/ip_var.h>
95 #include <netinet/in_var.h>
96 #include <netinet/udp.h>
97 #include <netinet/udp_var.h>
98 #include <netinet/ip_ecn.h>
99 #if INET6
100 #include <netinet6/ip6_ecn.h>
101 #endif
102 #include <netinet/tcp.h>
103 #include <netinet/udp.h>
104
105 #include <netinet/ip6.h>
106 #if INET6
107 #include <netinet6/ip6_var.h>
108 #endif
109 #include <netinet/in_pcb.h>
110 #if INET6
111 #include <netinet/icmp6.h>
112 #endif
113
114 #include <netinet6/ipsec.h>
115 #if INET6
116 #include <netinet6/ipsec6.h>
117 #endif
118 #include <netinet6/ah.h>
119 #if INET6
120 #include <netinet6/ah6.h>
121 #endif
122 #if IPSEC_ESP
123 #include <netinet6/esp.h>
124 #if INET6
125 #include <netinet6/esp6.h>
126 #endif
127 #endif
128 #include <netkey/key.h>
129 #include <netkey/keydb.h>
130 #include <netkey/key_debug.h>
131
132 #include <net/net_osdep.h>
133
134 #include <IOKit/pwr_mgt/IOPM.h>
135
136 #include <os/log_private.h>
137
138 #if IPSEC_DEBUG
139 int ipsec_debug = 1;
140 #else
141 int ipsec_debug = 0;
142 #endif
143
144 #include <sys/kdebug.h>
145 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
146 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
147 #define DBG_FNC_GETPOL_SOCK NETDBG_CODE(DBG_NETIPSEC, (1 << 8))
148 #define DBG_FNC_GETPOL_ADDR NETDBG_CODE(DBG_NETIPSEC, (2 << 8))
149 #define DBG_FNC_IPSEC_OUT NETDBG_CODE(DBG_NETIPSEC, (3 << 8))
150
151 extern lck_mtx_t *sadb_mutex;
152
153 struct ipsecstat ipsecstat;
154 int ip4_ah_cleartos = 1;
155 int ip4_ah_offsetmask = 0; /* maybe IP_DF? */
156 int ip4_ipsec_dfbit = 0; /* DF bit on encap. 0: clear 1: set 2: copy */
157 int ip4_esp_trans_deflev = IPSEC_LEVEL_USE;
158 int ip4_esp_net_deflev = IPSEC_LEVEL_USE;
159 int ip4_ah_trans_deflev = IPSEC_LEVEL_USE;
160 int ip4_ah_net_deflev = IPSEC_LEVEL_USE;
161 struct secpolicy ip4_def_policy;
162 int ip4_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
163 int ip4_esp_randpad = -1;
164 int esp_udp_encap_port = 0;
165 static int sysctl_def_policy SYSCTL_HANDLER_ARGS;
166 extern int natt_keepalive_interval;
167 extern u_int64_t natt_now;
168
169 struct ipsec_tag;
170
171 void *sleep_wake_handle = NULL;
172 bool ipsec_save_wake_pkt = false;
173
174 SYSCTL_DECL(_net_inet_ipsec);
175 #if INET6
176 SYSCTL_DECL(_net_inet6_ipsec6);
177 #endif
178 /* net.inet.ipsec */
179 SYSCTL_STRUCT(_net_inet_ipsec, IPSECCTL_STATS,
180 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsecstat, ipsecstat, "");
181 SYSCTL_PROC(_net_inet_ipsec, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
182 &ip4_def_policy.policy, 0, &sysctl_def_policy, "I", "");
183 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
184 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_trans_deflev, 0, "");
185 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
186 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_net_deflev, 0, "");
187 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
188 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_trans_deflev, 0, "");
189 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
190 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_net_deflev, 0, "");
191 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_CLEARTOS,
192 ah_cleartos, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_cleartos, 0, "");
193 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_OFFSETMASK,
194 ah_offsetmask, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_offsetmask, 0, "");
195 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DFBIT,
196 dfbit, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_dfbit, 0, "");
197 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ECN,
198 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_ecn, 0, "");
199 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEBUG,
200 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
201 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ESP_RANDPAD,
202 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_randpad, 0, "");
203
204 /* for performance, we bypass ipsec until a security policy is set */
205 int ipsec_bypass = 1;
206 SYSCTL_INT(_net_inet_ipsec, OID_AUTO, bypass, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec_bypass, 0, "");
207
208 /*
209 * NAT Traversal requires a UDP port for encapsulation,
210 * esp_udp_encap_port controls which port is used. Racoon
211 * must set this port to the port racoon is using locally
212 * for nat traversal.
213 */
214 SYSCTL_INT(_net_inet_ipsec, OID_AUTO, esp_port,
215 CTLFLAG_RW | CTLFLAG_LOCKED, &esp_udp_encap_port, 0, "");
216
217 #if INET6
218 struct ipsecstat ipsec6stat;
219 int ip6_esp_trans_deflev = IPSEC_LEVEL_USE;
220 int ip6_esp_net_deflev = IPSEC_LEVEL_USE;
221 int ip6_ah_trans_deflev = IPSEC_LEVEL_USE;
222 int ip6_ah_net_deflev = IPSEC_LEVEL_USE;
223 struct secpolicy ip6_def_policy;
224 int ip6_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
225 int ip6_esp_randpad = -1;
226
227 /* net.inet6.ipsec6 */
228 SYSCTL_STRUCT(_net_inet6_ipsec6, IPSECCTL_STATS,
229 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec6stat, ipsecstat, "");
230 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_POLICY,
231 def_policy, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_def_policy.policy, 0, "");
232 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
233 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_trans_deflev, 0, "");
234 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
235 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_net_deflev, 0, "");
236 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
237 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_trans_deflev, 0, "");
238 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
239 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_net_deflev, 0, "");
240 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ECN,
241 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ipsec_ecn, 0, "");
242 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEBUG,
243 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
244 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ESP_RANDPAD,
245 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_randpad, 0, "");
246 #endif /* INET6 */
247
248 SYSCTL_DECL(_net_link_generic_system);
249
250 struct ipsec_wake_pkt_info ipsec_wake_pkt;
251
252 static int ipsec_setspidx_interface(struct secpolicyindex *, u_int, struct mbuf *,
253 int, int, int);
254 static int ipsec_setspidx_mbuf(struct secpolicyindex *, u_int, u_int,
255 struct mbuf *, int);
256 static int ipsec4_setspidx_inpcb(struct mbuf *, struct inpcb *pcb);
257 #if INET6
258 static int ipsec6_setspidx_in6pcb(struct mbuf *, struct in6pcb *pcb);
259 #endif
260 static int ipsec_setspidx(struct mbuf *, struct secpolicyindex *, int, int);
261 static void ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
262 static int ipsec4_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
263 #if INET6
264 static void ipsec6_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
265 static int ipsec6_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
266 #endif
267 static struct inpcbpolicy *ipsec_newpcbpolicy(void);
268 static void ipsec_delpcbpolicy(struct inpcbpolicy *);
269 static struct secpolicy *ipsec_deepcopy_policy(struct secpolicy *src);
270 static int ipsec_set_policy(struct secpolicy **pcb_sp,
271 int optname, caddr_t request, size_t len, int priv);
272 static void vshiftl(unsigned char *, int, int);
273 static int ipsec_in_reject(struct secpolicy *, struct mbuf *);
274 #if INET6
275 static int ipsec64_encapsulate(struct mbuf *, struct secasvar *);
276 static int ipsec6_update_routecache_and_output(struct ipsec_output_state *state, struct secasvar *sav);
277 static int ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav);
278 #endif
279 static struct ipsec_tag *ipsec_addaux(struct mbuf *);
280 static struct ipsec_tag *ipsec_findaux(struct mbuf *);
281 static void ipsec_optaux(struct mbuf *, struct ipsec_tag *);
282 int ipsec_send_natt_keepalive(struct secasvar *sav);
283 bool ipsec_fill_offload_frame(ifnet_t ifp, struct secasvar *sav, struct ifnet_keepalive_offload_frame *frame, size_t frame_data_offset);
284
285 extern bool IOPMCopySleepWakeUUIDKey(char *, size_t);
286 extern void *registerSleepWakeInterest(void *, void *, void *);
287
288 static int
289 sysctl_def_policy SYSCTL_HANDLER_ARGS
290 {
291 int new_policy = ip4_def_policy.policy;
292 int error = sysctl_handle_int(oidp, &new_policy, 0, req);
293
294 #pragma unused(arg1, arg2)
295 if (error == 0) {
296 if (new_policy != IPSEC_POLICY_NONE &&
297 new_policy != IPSEC_POLICY_DISCARD) {
298 return EINVAL;
299 }
300 ip4_def_policy.policy = new_policy;
301
302 /* Turn off the bypass if the default security policy changes */
303 if (ipsec_bypass != 0 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
304 ipsec_bypass = 0;
305 }
306 }
307
308 return error;
309 }
310
311 /*
312 * For OUTBOUND packet having a socket. Searching SPD for packet,
313 * and return a pointer to SP.
314 * OUT: NULL: no apropreate SP found, the following value is set to error.
315 * 0 : bypass
316 * EACCES : discard packet.
317 * ENOENT : ipsec_acquire() in progress, maybe.
318 * others : error occurred.
319 * others: a pointer to SP
320 *
321 * NOTE: IPv6 mapped adddress concern is implemented here.
322 */
323 struct secpolicy *
324 ipsec4_getpolicybysock(struct mbuf *m,
325 u_int dir,
326 struct socket *so,
327 int *error)
328 {
329 struct inpcbpolicy *pcbsp = NULL;
330 struct secpolicy *currsp = NULL; /* policy on socket */
331 struct secpolicy *kernsp = NULL; /* policy on kernel */
332
333 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
334 /* sanity check */
335 if (m == NULL || so == NULL || error == NULL) {
336 panic("ipsec4_getpolicybysock: NULL pointer was passed.\n");
337 }
338
339 if (so->so_pcb == NULL) {
340 printf("ipsec4_getpolicybysock: so->so_pcb == NULL\n");
341 return ipsec4_getpolicybyaddr(m, dir, 0, error);
342 }
343
344 switch (SOCK_DOM(so)) {
345 case PF_INET:
346 pcbsp = sotoinpcb(so)->inp_sp;
347 break;
348 #if INET6
349 case PF_INET6:
350 pcbsp = sotoin6pcb(so)->in6p_sp;
351 break;
352 #endif
353 }
354
355 if (!pcbsp) {
356 /* Socket has not specified an IPSEC policy */
357 return ipsec4_getpolicybyaddr(m, dir, 0, error);
358 }
359
360 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_START, 0, 0, 0, 0, 0);
361
362 switch (SOCK_DOM(so)) {
363 case PF_INET:
364 /* set spidx in pcb */
365 *error = ipsec4_setspidx_inpcb(m, sotoinpcb(so));
366 break;
367 #if INET6
368 case PF_INET6:
369 /* set spidx in pcb */
370 *error = ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
371 break;
372 #endif
373 default:
374 panic("ipsec4_getpolicybysock: unsupported address family\n");
375 }
376 if (*error) {
377 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 1, *error, 0, 0, 0);
378 return NULL;
379 }
380
381 /* sanity check */
382 if (pcbsp == NULL) {
383 panic("ipsec4_getpolicybysock: pcbsp is NULL.\n");
384 }
385
386 switch (dir) {
387 case IPSEC_DIR_INBOUND:
388 currsp = pcbsp->sp_in;
389 break;
390 case IPSEC_DIR_OUTBOUND:
391 currsp = pcbsp->sp_out;
392 break;
393 default:
394 panic("ipsec4_getpolicybysock: illegal direction.\n");
395 }
396
397 /* sanity check */
398 if (currsp == NULL) {
399 panic("ipsec4_getpolicybysock: currsp is NULL.\n");
400 }
401
402 /* when privilieged socket */
403 if (pcbsp->priv) {
404 switch (currsp->policy) {
405 case IPSEC_POLICY_BYPASS:
406 lck_mtx_lock(sadb_mutex);
407 currsp->refcnt++;
408 lck_mtx_unlock(sadb_mutex);
409 *error = 0;
410 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 2, *error, 0, 0, 0);
411 return currsp;
412
413 case IPSEC_POLICY_ENTRUST:
414 /* look for a policy in SPD */
415 kernsp = key_allocsp(&currsp->spidx, dir);
416
417 /* SP found */
418 if (kernsp != NULL) {
419 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
420 printf("DP ipsec4_getpolicybysock called "
421 "to allocate SP:0x%llx\n",
422 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
423 *error = 0;
424 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 3, *error, 0, 0, 0);
425 return kernsp;
426 }
427
428 /* no SP found */
429 lck_mtx_lock(sadb_mutex);
430 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
431 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
432 ipseclog((LOG_INFO,
433 "fixed system default policy: %d->%d\n",
434 ip4_def_policy.policy, IPSEC_POLICY_NONE));
435 ip4_def_policy.policy = IPSEC_POLICY_NONE;
436 }
437 ip4_def_policy.refcnt++;
438 lck_mtx_unlock(sadb_mutex);
439 *error = 0;
440 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 4, *error, 0, 0, 0);
441 return &ip4_def_policy;
442
443 case IPSEC_POLICY_IPSEC:
444 lck_mtx_lock(sadb_mutex);
445 currsp->refcnt++;
446 lck_mtx_unlock(sadb_mutex);
447 *error = 0;
448 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 5, *error, 0, 0, 0);
449 return currsp;
450
451 default:
452 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
453 "Invalid policy for PCB %d\n", currsp->policy));
454 *error = EINVAL;
455 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 6, *error, 0, 0, 0);
456 return NULL;
457 }
458 /* NOTREACHED */
459 }
460
461 /* when non-privilieged socket */
462 /* look for a policy in SPD */
463 kernsp = key_allocsp(&currsp->spidx, dir);
464
465 /* SP found */
466 if (kernsp != NULL) {
467 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
468 printf("DP ipsec4_getpolicybysock called "
469 "to allocate SP:0x%llx\n",
470 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
471 *error = 0;
472 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 7, *error, 0, 0, 0);
473 return kernsp;
474 }
475
476 /* no SP found */
477 switch (currsp->policy) {
478 case IPSEC_POLICY_BYPASS:
479 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
480 "Illegal policy for non-priviliged defined %d\n",
481 currsp->policy));
482 *error = EINVAL;
483 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 8, *error, 0, 0, 0);
484 return NULL;
485
486 case IPSEC_POLICY_ENTRUST:
487 lck_mtx_lock(sadb_mutex);
488 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
489 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
490 ipseclog((LOG_INFO,
491 "fixed system default policy: %d->%d\n",
492 ip4_def_policy.policy, IPSEC_POLICY_NONE));
493 ip4_def_policy.policy = IPSEC_POLICY_NONE;
494 }
495 ip4_def_policy.refcnt++;
496 lck_mtx_unlock(sadb_mutex);
497 *error = 0;
498 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 9, *error, 0, 0, 0);
499 return &ip4_def_policy;
500
501 case IPSEC_POLICY_IPSEC:
502 lck_mtx_lock(sadb_mutex);
503 currsp->refcnt++;
504 lck_mtx_unlock(sadb_mutex);
505 *error = 0;
506 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 10, *error, 0, 0, 0);
507 return currsp;
508
509 default:
510 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
511 "Invalid policy for PCB %d\n", currsp->policy));
512 *error = EINVAL;
513 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 11, *error, 0, 0, 0);
514 return NULL;
515 }
516 /* NOTREACHED */
517 }
518
519 /*
520 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
521 * and return a pointer to SP.
522 * OUT: positive: a pointer to the entry for security policy leaf matched.
523 * NULL: no apropreate SP found, the following value is set to error.
524 * 0 : bypass
525 * EACCES : discard packet.
526 * ENOENT : ipsec_acquire() in progress, maybe.
527 * others : error occurred.
528 */
529 struct secpolicy *
530 ipsec4_getpolicybyaddr(struct mbuf *m,
531 u_int dir,
532 int flag,
533 int *error)
534 {
535 struct secpolicy *sp = NULL;
536
537 if (ipsec_bypass != 0) {
538 return 0;
539 }
540
541 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
542
543 /* sanity check */
544 if (m == NULL || error == NULL) {
545 panic("ipsec4_getpolicybyaddr: NULL pointer was passed.\n");
546 }
547 {
548 struct secpolicyindex spidx;
549
550 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
551 bzero(&spidx, sizeof(spidx));
552
553 /* make a index to look for a policy */
554 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET, m,
555 (flag & IP_FORWARDING) ? 0 : 1);
556
557 if (*error != 0) {
558 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, *error, 0, 0, 0);
559 return NULL;
560 }
561
562 sp = key_allocsp(&spidx, dir);
563 }
564
565 /* SP found */
566 if (sp != NULL) {
567 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
568 printf("DP ipsec4_getpolicybyaddr called "
569 "to allocate SP:0x%llx\n",
570 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
571 *error = 0;
572 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, *error, 0, 0, 0);
573 return sp;
574 }
575
576 /* no SP found */
577 lck_mtx_lock(sadb_mutex);
578 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
579 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
580 ipseclog((LOG_INFO, "fixed system default policy:%d->%d\n",
581 ip4_def_policy.policy,
582 IPSEC_POLICY_NONE));
583 ip4_def_policy.policy = IPSEC_POLICY_NONE;
584 }
585 ip4_def_policy.refcnt++;
586 lck_mtx_unlock(sadb_mutex);
587 *error = 0;
588 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 3, *error, 0, 0, 0);
589 return &ip4_def_policy;
590 }
591
592 /* Match with bound interface rather than src addr.
593 * Unlike getpolicybyaddr, do not set the default policy.
594 * Return 0 if should continue processing, or -1 if packet
595 * should be dropped.
596 */
597 int
598 ipsec4_getpolicybyinterface(struct mbuf *m,
599 u_int dir,
600 int *flags,
601 struct ip_out_args *ipoa,
602 struct secpolicy **sp)
603 {
604 struct secpolicyindex spidx;
605 int error = 0;
606
607 if (ipsec_bypass != 0) {
608 return 0;
609 }
610
611 /* Sanity check */
612 if (m == NULL || ipoa == NULL || sp == NULL) {
613 panic("ipsec4_getpolicybyinterface: NULL pointer was passed.\n");
614 }
615
616 if (ipoa->ipoa_boundif == IFSCOPE_NONE) {
617 return 0;
618 }
619
620 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
621 bzero(&spidx, sizeof(spidx));
622
623 /* make a index to look for a policy */
624 error = ipsec_setspidx_interface(&spidx, dir, m, (*flags & IP_FORWARDING) ? 0 : 1,
625 ipoa->ipoa_boundif, 4);
626
627 if (error != 0) {
628 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, error, 0, 0, 0);
629 return 0;
630 }
631
632 *sp = key_allocsp(&spidx, dir);
633
634 /* Return SP, whether NULL or not */
635 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
636 if ((*sp)->ipsec_if == NULL) {
637 /* Invalid to capture on an interface without redirect */
638 key_freesp(*sp, KEY_SADB_UNLOCKED);
639 *sp = NULL;
640 return -1;
641 } else if ((*sp)->disabled) {
642 /* Disabled policies go in the clear */
643 key_freesp(*sp, KEY_SADB_UNLOCKED);
644 *sp = NULL;
645 *flags |= IP_NOIPSEC; /* Avoid later IPsec check */
646 } else {
647 /* If policy is enabled, redirect to ipsec interface */
648 ipoa->ipoa_boundif = (*sp)->ipsec_if->if_index;
649 }
650 }
651
652 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, error, 0, 0, 0);
653
654 return 0;
655 }
656
657
658 #if INET6
659 /*
660 * For OUTBOUND packet having a socket. Searching SPD for packet,
661 * and return a pointer to SP.
662 * OUT: NULL: no apropreate SP found, the following value is set to error.
663 * 0 : bypass
664 * EACCES : discard packet.
665 * ENOENT : ipsec_acquire() in progress, maybe.
666 * others : error occurred.
667 * others: a pointer to SP
668 */
669 struct secpolicy *
670 ipsec6_getpolicybysock(struct mbuf *m,
671 u_int dir,
672 struct socket *so,
673 int *error)
674 {
675 struct inpcbpolicy *pcbsp = NULL;
676 struct secpolicy *currsp = NULL; /* policy on socket */
677 struct secpolicy *kernsp = NULL; /* policy on kernel */
678
679 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
680
681 /* sanity check */
682 if (m == NULL || so == NULL || error == NULL) {
683 panic("ipsec6_getpolicybysock: NULL pointer was passed.\n");
684 }
685
686 #if DIAGNOSTIC
687 if (SOCK_DOM(so) != PF_INET6) {
688 panic("ipsec6_getpolicybysock: socket domain != inet6\n");
689 }
690 #endif
691
692 pcbsp = sotoin6pcb(so)->in6p_sp;
693
694 if (!pcbsp) {
695 return ipsec6_getpolicybyaddr(m, dir, 0, error);
696 }
697
698 /* set spidx in pcb */
699 ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
700
701 /* sanity check */
702 if (pcbsp == NULL) {
703 panic("ipsec6_getpolicybysock: pcbsp is NULL.\n");
704 }
705
706 switch (dir) {
707 case IPSEC_DIR_INBOUND:
708 currsp = pcbsp->sp_in;
709 break;
710 case IPSEC_DIR_OUTBOUND:
711 currsp = pcbsp->sp_out;
712 break;
713 default:
714 panic("ipsec6_getpolicybysock: illegal direction.\n");
715 }
716
717 /* sanity check */
718 if (currsp == NULL) {
719 panic("ipsec6_getpolicybysock: currsp is NULL.\n");
720 }
721
722 /* when privilieged socket */
723 if (pcbsp->priv) {
724 switch (currsp->policy) {
725 case IPSEC_POLICY_BYPASS:
726 lck_mtx_lock(sadb_mutex);
727 currsp->refcnt++;
728 lck_mtx_unlock(sadb_mutex);
729 *error = 0;
730 return currsp;
731
732 case IPSEC_POLICY_ENTRUST:
733 /* look for a policy in SPD */
734 kernsp = key_allocsp(&currsp->spidx, dir);
735
736 /* SP found */
737 if (kernsp != NULL) {
738 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
739 printf("DP ipsec6_getpolicybysock called "
740 "to allocate SP:0x%llx\n",
741 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
742 *error = 0;
743 return kernsp;
744 }
745
746 /* no SP found */
747 lck_mtx_lock(sadb_mutex);
748 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
749 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
750 ipseclog((LOG_INFO,
751 "fixed system default policy: %d->%d\n",
752 ip6_def_policy.policy, IPSEC_POLICY_NONE));
753 ip6_def_policy.policy = IPSEC_POLICY_NONE;
754 }
755 ip6_def_policy.refcnt++;
756 lck_mtx_unlock(sadb_mutex);
757 *error = 0;
758 return &ip6_def_policy;
759
760 case IPSEC_POLICY_IPSEC:
761 lck_mtx_lock(sadb_mutex);
762 currsp->refcnt++;
763 lck_mtx_unlock(sadb_mutex);
764 *error = 0;
765 return currsp;
766
767 default:
768 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
769 "Invalid policy for PCB %d\n", currsp->policy));
770 *error = EINVAL;
771 return NULL;
772 }
773 /* NOTREACHED */
774 }
775
776 /* when non-privilieged socket */
777 /* look for a policy in SPD */
778 kernsp = key_allocsp(&currsp->spidx, dir);
779
780 /* SP found */
781 if (kernsp != NULL) {
782 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
783 printf("DP ipsec6_getpolicybysock called "
784 "to allocate SP:0x%llx\n",
785 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
786 *error = 0;
787 return kernsp;
788 }
789
790 /* no SP found */
791 switch (currsp->policy) {
792 case IPSEC_POLICY_BYPASS:
793 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
794 "Illegal policy for non-priviliged defined %d\n",
795 currsp->policy));
796 *error = EINVAL;
797 return NULL;
798
799 case IPSEC_POLICY_ENTRUST:
800 lck_mtx_lock(sadb_mutex);
801 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
802 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
803 ipseclog((LOG_INFO,
804 "fixed system default policy: %d->%d\n",
805 ip6_def_policy.policy, IPSEC_POLICY_NONE));
806 ip6_def_policy.policy = IPSEC_POLICY_NONE;
807 }
808 ip6_def_policy.refcnt++;
809 lck_mtx_unlock(sadb_mutex);
810 *error = 0;
811 return &ip6_def_policy;
812
813 case IPSEC_POLICY_IPSEC:
814 lck_mtx_lock(sadb_mutex);
815 currsp->refcnt++;
816 lck_mtx_unlock(sadb_mutex);
817 *error = 0;
818 return currsp;
819
820 default:
821 ipseclog((LOG_ERR,
822 "ipsec6_policybysock: Invalid policy for PCB %d\n",
823 currsp->policy));
824 *error = EINVAL;
825 return NULL;
826 }
827 /* NOTREACHED */
828 }
829
830 /*
831 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
832 * and return a pointer to SP.
833 * `flag' means that packet is to be forwarded whether or not.
834 * flag = 1: forwad
835 * OUT: positive: a pointer to the entry for security policy leaf matched.
836 * NULL: no apropreate SP found, the following value is set to error.
837 * 0 : bypass
838 * EACCES : discard packet.
839 * ENOENT : ipsec_acquire() in progress, maybe.
840 * others : error occurred.
841 */
842 #ifndef IP_FORWARDING
843 #define IP_FORWARDING 1
844 #endif
845
846 struct secpolicy *
847 ipsec6_getpolicybyaddr(struct mbuf *m,
848 u_int dir,
849 int flag,
850 int *error)
851 {
852 struct secpolicy *sp = NULL;
853
854 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
855
856 /* sanity check */
857 if (m == NULL || error == NULL) {
858 panic("ipsec6_getpolicybyaddr: NULL pointer was passed.\n");
859 }
860
861 {
862 struct secpolicyindex spidx;
863
864 bzero(&spidx, sizeof(spidx));
865
866 /* make a index to look for a policy */
867 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET6, m,
868 (flag & IP_FORWARDING) ? 0 : 1);
869
870 if (*error != 0) {
871 return NULL;
872 }
873
874 sp = key_allocsp(&spidx, dir);
875 }
876
877 /* SP found */
878 if (sp != NULL) {
879 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
880 printf("DP ipsec6_getpolicybyaddr called "
881 "to allocate SP:0x%llx\n",
882 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
883 *error = 0;
884 return sp;
885 }
886
887 /* no SP found */
888 lck_mtx_lock(sadb_mutex);
889 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
890 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
891 ipseclog((LOG_INFO, "fixed system default policy: %d->%d\n",
892 ip6_def_policy.policy, IPSEC_POLICY_NONE));
893 ip6_def_policy.policy = IPSEC_POLICY_NONE;
894 }
895 ip6_def_policy.refcnt++;
896 lck_mtx_unlock(sadb_mutex);
897 *error = 0;
898 return &ip6_def_policy;
899 }
900
901 /* Match with bound interface rather than src addr.
902 * Unlike getpolicybyaddr, do not set the default policy.
903 * Return 0 if should continue processing, or -1 if packet
904 * should be dropped.
905 */
906 int
907 ipsec6_getpolicybyinterface(struct mbuf *m,
908 u_int dir,
909 int flag,
910 struct ip6_out_args *ip6oap,
911 int *noipsec,
912 struct secpolicy **sp)
913 {
914 struct secpolicyindex spidx;
915 int error = 0;
916
917 if (ipsec_bypass != 0) {
918 return 0;
919 }
920
921 /* Sanity check */
922 if (m == NULL || sp == NULL || noipsec == NULL || ip6oap == NULL) {
923 panic("ipsec6_getpolicybyinterface: NULL pointer was passed.\n");
924 }
925
926 *noipsec = 0;
927
928 if (ip6oap->ip6oa_boundif == IFSCOPE_NONE) {
929 return 0;
930 }
931
932 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
933 bzero(&spidx, sizeof(spidx));
934
935 /* make a index to look for a policy */
936 error = ipsec_setspidx_interface(&spidx, dir, m, (flag & IP_FORWARDING) ? 0 : 1,
937 ip6oap->ip6oa_boundif, 6);
938
939 if (error != 0) {
940 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, error, 0, 0, 0);
941 return 0;
942 }
943
944 *sp = key_allocsp(&spidx, dir);
945
946 /* Return SP, whether NULL or not */
947 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
948 if ((*sp)->ipsec_if == NULL) {
949 /* Invalid to capture on an interface without redirect */
950 key_freesp(*sp, KEY_SADB_UNLOCKED);
951 *sp = NULL;
952 return -1;
953 } else if ((*sp)->disabled) {
954 /* Disabled policies go in the clear */
955 key_freesp(*sp, KEY_SADB_UNLOCKED);
956 *sp = NULL;
957 *noipsec = 1; /* Avoid later IPsec check */
958 } else {
959 /* If policy is enabled, redirect to ipsec interface */
960 ip6oap->ip6oa_boundif = (*sp)->ipsec_if->if_index;
961 }
962 }
963
964 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, *error, 0, 0, 0);
965
966 return 0;
967 }
968 #endif /* INET6 */
969
970 /*
971 * set IP address into spidx from mbuf.
972 * When Forwarding packet and ICMP echo reply, this function is used.
973 *
974 * IN: get the followings from mbuf.
975 * protocol family, src, dst, next protocol
976 * OUT:
977 * 0: success.
978 * other: failure, and set errno.
979 */
980 static int
981 ipsec_setspidx_mbuf(
982 struct secpolicyindex *spidx,
983 u_int dir,
984 __unused u_int family,
985 struct mbuf *m,
986 int needport)
987 {
988 int error;
989
990 /* sanity check */
991 if (spidx == NULL || m == NULL) {
992 panic("ipsec_setspidx_mbuf: NULL pointer was passed.\n");
993 }
994
995 bzero(spidx, sizeof(*spidx));
996
997 error = ipsec_setspidx(m, spidx, needport, 0);
998 if (error) {
999 goto bad;
1000 }
1001 spidx->dir = dir;
1002
1003 return 0;
1004
1005 bad:
1006 /* XXX initialize */
1007 bzero(spidx, sizeof(*spidx));
1008 return EINVAL;
1009 }
1010
1011 static int
1012 ipsec_setspidx_interface(
1013 struct secpolicyindex *spidx,
1014 u_int dir,
1015 struct mbuf *m,
1016 int needport,
1017 int ifindex,
1018 int ip_version)
1019 {
1020 int error;
1021
1022 /* sanity check */
1023 if (spidx == NULL || m == NULL) {
1024 panic("ipsec_setspidx_interface: NULL pointer was passed.\n");
1025 }
1026
1027 bzero(spidx, sizeof(*spidx));
1028
1029 error = ipsec_setspidx(m, spidx, needport, ip_version);
1030 if (error) {
1031 goto bad;
1032 }
1033 spidx->dir = dir;
1034
1035 if (ifindex != 0) {
1036 ifnet_head_lock_shared();
1037 spidx->internal_if = ifindex2ifnet[ifindex];
1038 ifnet_head_done();
1039 } else {
1040 spidx->internal_if = NULL;
1041 }
1042
1043 return 0;
1044
1045 bad:
1046 return EINVAL;
1047 }
1048
1049 static int
1050 ipsec4_setspidx_inpcb(struct mbuf *m, struct inpcb *pcb)
1051 {
1052 struct secpolicyindex *spidx;
1053 int error;
1054
1055 if (ipsec_bypass != 0) {
1056 return 0;
1057 }
1058
1059 /* sanity check */
1060 if (pcb == NULL) {
1061 panic("ipsec4_setspidx_inpcb: no PCB found.\n");
1062 }
1063 if (pcb->inp_sp == NULL) {
1064 panic("ipsec4_setspidx_inpcb: no inp_sp found.\n");
1065 }
1066 if (pcb->inp_sp->sp_out == NULL || pcb->inp_sp->sp_in == NULL) {
1067 panic("ipsec4_setspidx_inpcb: no sp_in/out found.\n");
1068 }
1069
1070 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1071 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1072
1073 spidx = &pcb->inp_sp->sp_in->spidx;
1074 error = ipsec_setspidx(m, spidx, 1, 0);
1075 if (error) {
1076 goto bad;
1077 }
1078 spidx->dir = IPSEC_DIR_INBOUND;
1079
1080 spidx = &pcb->inp_sp->sp_out->spidx;
1081 error = ipsec_setspidx(m, spidx, 1, 0);
1082 if (error) {
1083 goto bad;
1084 }
1085 spidx->dir = IPSEC_DIR_OUTBOUND;
1086
1087 return 0;
1088
1089 bad:
1090 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1091 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1092 return error;
1093 }
1094
1095 #if INET6
1096 static int
1097 ipsec6_setspidx_in6pcb(struct mbuf *m, struct in6pcb *pcb)
1098 {
1099 struct secpolicyindex *spidx;
1100 int error;
1101
1102 /* sanity check */
1103 if (pcb == NULL) {
1104 panic("ipsec6_setspidx_in6pcb: no PCB found.\n");
1105 }
1106 if (pcb->in6p_sp == NULL) {
1107 panic("ipsec6_setspidx_in6pcb: no in6p_sp found.\n");
1108 }
1109 if (pcb->in6p_sp->sp_out == NULL || pcb->in6p_sp->sp_in == NULL) {
1110 panic("ipsec6_setspidx_in6pcb: no sp_in/out found.\n");
1111 }
1112
1113 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1114 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1115
1116 spidx = &pcb->in6p_sp->sp_in->spidx;
1117 error = ipsec_setspidx(m, spidx, 1, 0);
1118 if (error) {
1119 goto bad;
1120 }
1121 spidx->dir = IPSEC_DIR_INBOUND;
1122
1123 spidx = &pcb->in6p_sp->sp_out->spidx;
1124 error = ipsec_setspidx(m, spidx, 1, 0);
1125 if (error) {
1126 goto bad;
1127 }
1128 spidx->dir = IPSEC_DIR_OUTBOUND;
1129
1130 return 0;
1131
1132 bad:
1133 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1134 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1135 return error;
1136 }
1137 #endif
1138
1139 /*
1140 * configure security policy index (src/dst/proto/sport/dport)
1141 * by looking at the content of mbuf.
1142 * the caller is responsible for error recovery (like clearing up spidx).
1143 */
1144 static int
1145 ipsec_setspidx(struct mbuf *m,
1146 struct secpolicyindex *spidx,
1147 int needport,
1148 int force_ip_version)
1149 {
1150 struct ip *ip = NULL;
1151 struct ip ipbuf;
1152 u_int v;
1153 struct mbuf *n;
1154 int len;
1155 int error;
1156
1157 if (m == NULL) {
1158 panic("ipsec_setspidx: m == 0 passed.\n");
1159 }
1160
1161 /*
1162 * validate m->m_pkthdr.len. we see incorrect length if we
1163 * mistakenly call this function with inconsistent mbuf chain
1164 * (like 4.4BSD tcp/udp processing). XXX should we panic here?
1165 */
1166 len = 0;
1167 for (n = m; n; n = n->m_next) {
1168 len += n->m_len;
1169 }
1170 if (m->m_pkthdr.len != len) {
1171 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1172 printf("ipsec_setspidx: "
1173 "total of m_len(%d) != pkthdr.len(%d), "
1174 "ignored.\n",
1175 len, m->m_pkthdr.len));
1176 return EINVAL;
1177 }
1178
1179 if (m->m_pkthdr.len < sizeof(struct ip)) {
1180 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1181 printf("ipsec_setspidx: "
1182 "pkthdr.len(%d) < sizeof(struct ip), ignored.\n",
1183 m->m_pkthdr.len));
1184 return EINVAL;
1185 }
1186
1187 if (m->m_len >= sizeof(*ip)) {
1188 ip = mtod(m, struct ip *);
1189 } else {
1190 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1191 ip = &ipbuf;
1192 }
1193
1194 if (force_ip_version) {
1195 v = force_ip_version;
1196 } else {
1197 #ifdef _IP_VHL
1198 v = _IP_VHL_V(ip->ip_vhl);
1199 #else
1200 v = ip->ip_v;
1201 #endif
1202 }
1203 switch (v) {
1204 case 4:
1205 error = ipsec4_setspidx_ipaddr(m, spidx);
1206 if (error) {
1207 return error;
1208 }
1209 ipsec4_get_ulp(m, spidx, needport);
1210 return 0;
1211 #if INET6
1212 case 6:
1213 if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) {
1214 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1215 printf("ipsec_setspidx: "
1216 "pkthdr.len(%d) < sizeof(struct ip6_hdr), "
1217 "ignored.\n", m->m_pkthdr.len));
1218 return EINVAL;
1219 }
1220 error = ipsec6_setspidx_ipaddr(m, spidx);
1221 if (error) {
1222 return error;
1223 }
1224 ipsec6_get_ulp(m, spidx, needport);
1225 return 0;
1226 #endif
1227 default:
1228 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1229 printf("ipsec_setspidx: "
1230 "unknown IP version %u, ignored.\n", v));
1231 return EINVAL;
1232 }
1233 }
1234
1235 static void
1236 ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport)
1237 {
1238 struct ip ip;
1239 struct ip6_ext ip6e;
1240 u_int8_t nxt;
1241 int off;
1242 struct tcphdr th;
1243 struct udphdr uh;
1244
1245 /* sanity check */
1246 if (m == NULL) {
1247 panic("ipsec4_get_ulp: NULL pointer was passed.\n");
1248 }
1249 if (m->m_pkthdr.len < sizeof(ip)) {
1250 panic("ipsec4_get_ulp: too short\n");
1251 }
1252
1253 /* set default */
1254 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1255 ((struct sockaddr_in *)&spidx->src)->sin_port = IPSEC_PORT_ANY;
1256 ((struct sockaddr_in *)&spidx->dst)->sin_port = IPSEC_PORT_ANY;
1257
1258 m_copydata(m, 0, sizeof(ip), (caddr_t)&ip);
1259 /* ip_input() flips it into host endian XXX need more checking */
1260 if (ip.ip_off & (IP_MF | IP_OFFMASK)) {
1261 return;
1262 }
1263
1264 nxt = ip.ip_p;
1265 #ifdef _IP_VHL
1266 off = _IP_VHL_HL(ip->ip_vhl) << 2;
1267 #else
1268 off = ip.ip_hl << 2;
1269 #endif
1270 while (off < m->m_pkthdr.len) {
1271 switch (nxt) {
1272 case IPPROTO_TCP:
1273 spidx->ul_proto = nxt;
1274 if (!needport) {
1275 return;
1276 }
1277 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) {
1278 return;
1279 }
1280 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1281 ((struct sockaddr_in *)&spidx->src)->sin_port =
1282 th.th_sport;
1283 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1284 th.th_dport;
1285 return;
1286 case IPPROTO_UDP:
1287 spidx->ul_proto = nxt;
1288 if (!needport) {
1289 return;
1290 }
1291 if (off + sizeof(struct udphdr) > m->m_pkthdr.len) {
1292 return;
1293 }
1294 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1295 ((struct sockaddr_in *)&spidx->src)->sin_port =
1296 uh.uh_sport;
1297 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1298 uh.uh_dport;
1299 return;
1300 case IPPROTO_AH:
1301 if (off + sizeof(ip6e) > m->m_pkthdr.len) {
1302 return;
1303 }
1304 m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
1305 off += (ip6e.ip6e_len + 2) << 2;
1306 nxt = ip6e.ip6e_nxt;
1307 break;
1308 case IPPROTO_ICMP:
1309 default:
1310 /* XXX intermediate headers??? */
1311 spidx->ul_proto = nxt;
1312 return;
1313 }
1314 }
1315 }
1316
1317 /* assumes that m is sane */
1318 static int
1319 ipsec4_setspidx_ipaddr(struct mbuf *m, struct secpolicyindex *spidx)
1320 {
1321 struct ip *ip = NULL;
1322 struct ip ipbuf;
1323 struct sockaddr_in *sin;
1324
1325 if (m->m_len >= sizeof(*ip)) {
1326 ip = mtod(m, struct ip *);
1327 } else {
1328 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1329 ip = &ipbuf;
1330 }
1331
1332 sin = (struct sockaddr_in *)&spidx->src;
1333 bzero(sin, sizeof(*sin));
1334 sin->sin_family = AF_INET;
1335 sin->sin_len = sizeof(struct sockaddr_in);
1336 bcopy(&ip->ip_src, &sin->sin_addr, sizeof(ip->ip_src));
1337 spidx->prefs = sizeof(struct in_addr) << 3;
1338
1339 sin = (struct sockaddr_in *)&spidx->dst;
1340 bzero(sin, sizeof(*sin));
1341 sin->sin_family = AF_INET;
1342 sin->sin_len = sizeof(struct sockaddr_in);
1343 bcopy(&ip->ip_dst, &sin->sin_addr, sizeof(ip->ip_dst));
1344 spidx->prefd = sizeof(struct in_addr) << 3;
1345
1346 return 0;
1347 }
1348
1349 #if INET6
1350 static void
1351 ipsec6_get_ulp(struct mbuf *m,
1352 struct secpolicyindex *spidx,
1353 int needport)
1354 {
1355 int off, nxt;
1356 struct tcphdr th;
1357 struct udphdr uh;
1358
1359 /* sanity check */
1360 if (m == NULL) {
1361 panic("ipsec6_get_ulp: NULL pointer was passed.\n");
1362 }
1363
1364 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1365 printf("ipsec6_get_ulp:\n"); kdebug_mbuf(m));
1366
1367 /* set default */
1368 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1369 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = IPSEC_PORT_ANY;
1370 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = IPSEC_PORT_ANY;
1371
1372 nxt = -1;
1373 off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt);
1374 if (off < 0 || m->m_pkthdr.len < off) {
1375 return;
1376 }
1377
1378 switch (nxt) {
1379 case IPPROTO_TCP:
1380 spidx->ul_proto = nxt;
1381 if (!needport) {
1382 break;
1383 }
1384 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) {
1385 break;
1386 }
1387 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1388 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = th.th_sport;
1389 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = th.th_dport;
1390 break;
1391 case IPPROTO_UDP:
1392 spidx->ul_proto = nxt;
1393 if (!needport) {
1394 break;
1395 }
1396 if (off + sizeof(struct udphdr) > m->m_pkthdr.len) {
1397 break;
1398 }
1399 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1400 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = uh.uh_sport;
1401 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = uh.uh_dport;
1402 break;
1403 case IPPROTO_ICMPV6:
1404 default:
1405 /* XXX intermediate headers??? */
1406 spidx->ul_proto = nxt;
1407 break;
1408 }
1409 }
1410
1411 /* assumes that m is sane */
1412 static int
1413 ipsec6_setspidx_ipaddr(struct mbuf *m,
1414 struct secpolicyindex *spidx)
1415 {
1416 struct ip6_hdr *ip6 = NULL;
1417 struct ip6_hdr ip6buf;
1418 struct sockaddr_in6 *sin6;
1419
1420 if (m->m_len >= sizeof(*ip6)) {
1421 ip6 = mtod(m, struct ip6_hdr *);
1422 } else {
1423 m_copydata(m, 0, sizeof(ip6buf), (caddr_t)&ip6buf);
1424 ip6 = &ip6buf;
1425 }
1426
1427 sin6 = (struct sockaddr_in6 *)&spidx->src;
1428 bzero(sin6, sizeof(*sin6));
1429 sin6->sin6_family = AF_INET6;
1430 sin6->sin6_len = sizeof(struct sockaddr_in6);
1431 bcopy(&ip6->ip6_src, &sin6->sin6_addr, sizeof(ip6->ip6_src));
1432 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
1433 sin6->sin6_addr.s6_addr16[1] = 0;
1434 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
1435 }
1436 spidx->prefs = sizeof(struct in6_addr) << 3;
1437
1438 sin6 = (struct sockaddr_in6 *)&spidx->dst;
1439 bzero(sin6, sizeof(*sin6));
1440 sin6->sin6_family = AF_INET6;
1441 sin6->sin6_len = sizeof(struct sockaddr_in6);
1442 bcopy(&ip6->ip6_dst, &sin6->sin6_addr, sizeof(ip6->ip6_dst));
1443 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
1444 sin6->sin6_addr.s6_addr16[1] = 0;
1445 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
1446 }
1447 spidx->prefd = sizeof(struct in6_addr) << 3;
1448
1449 return 0;
1450 }
1451 #endif
1452
1453 static struct inpcbpolicy *
1454 ipsec_newpcbpolicy(void)
1455 {
1456 struct inpcbpolicy *p;
1457
1458 p = (struct inpcbpolicy *)_MALLOC(sizeof(*p), M_SECA, M_WAITOK);
1459 return p;
1460 }
1461
1462 static void
1463 ipsec_delpcbpolicy(struct inpcbpolicy *p)
1464 {
1465 FREE(p, M_SECA);
1466 }
1467
1468 /* initialize policy in PCB */
1469 int
1470 ipsec_init_policy(struct socket *so,
1471 struct inpcbpolicy **pcb_sp)
1472 {
1473 struct inpcbpolicy *new;
1474
1475 /* sanity check. */
1476 if (so == NULL || pcb_sp == NULL) {
1477 panic("ipsec_init_policy: NULL pointer was passed.\n");
1478 }
1479
1480 new = ipsec_newpcbpolicy();
1481 if (new == NULL) {
1482 ipseclog((LOG_DEBUG, "ipsec_init_policy: No more memory.\n"));
1483 return ENOBUFS;
1484 }
1485 bzero(new, sizeof(*new));
1486
1487 #ifdef __APPLE__
1488 if (kauth_cred_issuser(so->so_cred))
1489 #else
1490 if (so->so_cred != 0 && !suser(so->so_cred->pc_ucred, NULL))
1491 #endif
1492 { new->priv = 1;} else {
1493 new->priv = 0;
1494 }
1495
1496 if ((new->sp_in = key_newsp()) == NULL) {
1497 ipsec_delpcbpolicy(new);
1498 return ENOBUFS;
1499 }
1500 new->sp_in->state = IPSEC_SPSTATE_ALIVE;
1501 new->sp_in->policy = IPSEC_POLICY_ENTRUST;
1502
1503 if ((new->sp_out = key_newsp()) == NULL) {
1504 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1505 ipsec_delpcbpolicy(new);
1506 return ENOBUFS;
1507 }
1508 new->sp_out->state = IPSEC_SPSTATE_ALIVE;
1509 new->sp_out->policy = IPSEC_POLICY_ENTRUST;
1510
1511 *pcb_sp = new;
1512
1513 return 0;
1514 }
1515
1516 /* copy old ipsec policy into new */
1517 int
1518 ipsec_copy_policy(struct inpcbpolicy *old,
1519 struct inpcbpolicy *new)
1520 {
1521 struct secpolicy *sp;
1522
1523 if (ipsec_bypass != 0) {
1524 return 0;
1525 }
1526
1527 sp = ipsec_deepcopy_policy(old->sp_in);
1528 if (sp) {
1529 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1530 new->sp_in = sp;
1531 } else {
1532 return ENOBUFS;
1533 }
1534
1535 sp = ipsec_deepcopy_policy(old->sp_out);
1536 if (sp) {
1537 key_freesp(new->sp_out, KEY_SADB_UNLOCKED);
1538 new->sp_out = sp;
1539 } else {
1540 return ENOBUFS;
1541 }
1542
1543 new->priv = old->priv;
1544
1545 return 0;
1546 }
1547
1548 /* deep-copy a policy in PCB */
1549 static struct secpolicy *
1550 ipsec_deepcopy_policy(struct secpolicy *src)
1551 {
1552 struct ipsecrequest *newchain = NULL;
1553 struct ipsecrequest *p;
1554 struct ipsecrequest **q;
1555 struct ipsecrequest *r;
1556 struct secpolicy *dst;
1557
1558 if (src == NULL) {
1559 return NULL;
1560 }
1561 dst = key_newsp();
1562 if (dst == NULL) {
1563 return NULL;
1564 }
1565
1566 /*
1567 * deep-copy IPsec request chain. This is required since struct
1568 * ipsecrequest is not reference counted.
1569 */
1570 q = &newchain;
1571 for (p = src->req; p; p = p->next) {
1572 *q = (struct ipsecrequest *)_MALLOC(sizeof(struct ipsecrequest),
1573 M_SECA, M_WAITOK | M_ZERO);
1574 if (*q == NULL) {
1575 goto fail;
1576 }
1577 (*q)->next = NULL;
1578
1579 (*q)->saidx.proto = p->saidx.proto;
1580 (*q)->saidx.mode = p->saidx.mode;
1581 (*q)->level = p->level;
1582 (*q)->saidx.reqid = p->saidx.reqid;
1583
1584 bcopy(&p->saidx.src, &(*q)->saidx.src, sizeof((*q)->saidx.src));
1585 bcopy(&p->saidx.dst, &(*q)->saidx.dst, sizeof((*q)->saidx.dst));
1586
1587 (*q)->sp = dst;
1588
1589 q = &((*q)->next);
1590 }
1591
1592 dst->req = newchain;
1593 dst->state = src->state;
1594 dst->policy = src->policy;
1595 /* do not touch the refcnt fields */
1596
1597 return dst;
1598
1599 fail:
1600 for (p = newchain; p; p = r) {
1601 r = p->next;
1602 FREE(p, M_SECA);
1603 p = NULL;
1604 }
1605 key_freesp(dst, KEY_SADB_UNLOCKED);
1606 return NULL;
1607 }
1608
1609 /* set policy and ipsec request if present. */
1610 static int
1611 ipsec_set_policy(struct secpolicy **pcb_sp,
1612 __unused int optname,
1613 caddr_t request,
1614 size_t len,
1615 int priv)
1616 {
1617 struct sadb_x_policy *xpl;
1618 struct secpolicy *newsp = NULL;
1619 int error;
1620
1621 /* sanity check. */
1622 if (pcb_sp == NULL || *pcb_sp == NULL || request == NULL) {
1623 return EINVAL;
1624 }
1625 if (len < sizeof(*xpl)) {
1626 return EINVAL;
1627 }
1628 xpl = (struct sadb_x_policy *)(void *)request;
1629
1630 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1631 printf("ipsec_set_policy: passed policy\n");
1632 kdebug_sadb_x_policy((struct sadb_ext *)xpl));
1633
1634 /* check policy type */
1635 /* ipsec_set_policy() accepts IPSEC, ENTRUST and BYPASS. */
1636 if (xpl->sadb_x_policy_type == IPSEC_POLICY_DISCARD
1637 || xpl->sadb_x_policy_type == IPSEC_POLICY_NONE) {
1638 return EINVAL;
1639 }
1640
1641 /* check privileged socket */
1642 if (priv == 0 && xpl->sadb_x_policy_type == IPSEC_POLICY_BYPASS) {
1643 return EACCES;
1644 }
1645
1646 /* allocation new SP entry */
1647 if ((newsp = key_msg2sp(xpl, len, &error)) == NULL) {
1648 return error;
1649 }
1650
1651 newsp->state = IPSEC_SPSTATE_ALIVE;
1652
1653 /* clear old SP and set new SP */
1654 key_freesp(*pcb_sp, KEY_SADB_UNLOCKED);
1655 *pcb_sp = newsp;
1656 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1657 printf("ipsec_set_policy: new policy\n");
1658 kdebug_secpolicy(newsp));
1659
1660 return 0;
1661 }
1662
1663 int
1664 ipsec4_set_policy(struct inpcb *inp,
1665 int optname,
1666 caddr_t request,
1667 size_t len,
1668 int priv)
1669 {
1670 struct sadb_x_policy *xpl;
1671 struct secpolicy **pcb_sp;
1672 int error = 0;
1673 struct sadb_x_policy xpl_aligned_buf;
1674 u_int8_t *xpl_unaligned;
1675
1676 /* sanity check. */
1677 if (inp == NULL || request == NULL) {
1678 return EINVAL;
1679 }
1680 if (len < sizeof(*xpl)) {
1681 return EINVAL;
1682 }
1683 xpl = (struct sadb_x_policy *)(void *)request;
1684
1685 /* This is a new mbuf allocated by soopt_getm() */
1686 if (IPSEC_IS_P2ALIGNED(xpl)) {
1687 xpl_unaligned = NULL;
1688 } else {
1689 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1690 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1691 xpl = (__typeof__(xpl)) & xpl_aligned_buf;
1692 }
1693
1694 if (inp->inp_sp == NULL) {
1695 error = ipsec_init_policy(inp->inp_socket, &inp->inp_sp);
1696 if (error) {
1697 return error;
1698 }
1699 }
1700
1701 /* select direction */
1702 switch (xpl->sadb_x_policy_dir) {
1703 case IPSEC_DIR_INBOUND:
1704 pcb_sp = &inp->inp_sp->sp_in;
1705 break;
1706 case IPSEC_DIR_OUTBOUND:
1707 pcb_sp = &inp->inp_sp->sp_out;
1708 break;
1709 default:
1710 ipseclog((LOG_ERR, "ipsec4_set_policy: invalid direction=%u\n",
1711 xpl->sadb_x_policy_dir));
1712 return EINVAL;
1713 }
1714
1715 /* turn bypass off */
1716 if (ipsec_bypass != 0) {
1717 ipsec_bypass = 0;
1718 }
1719
1720 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1721 }
1722
1723 /* delete policy in PCB */
1724 int
1725 ipsec4_delete_pcbpolicy(struct inpcb *inp)
1726 {
1727 /* sanity check. */
1728 if (inp == NULL) {
1729 panic("ipsec4_delete_pcbpolicy: NULL pointer was passed.\n");
1730 }
1731
1732 if (inp->inp_sp == NULL) {
1733 return 0;
1734 }
1735
1736 if (inp->inp_sp->sp_in != NULL) {
1737 key_freesp(inp->inp_sp->sp_in, KEY_SADB_UNLOCKED);
1738 inp->inp_sp->sp_in = NULL;
1739 }
1740
1741 if (inp->inp_sp->sp_out != NULL) {
1742 key_freesp(inp->inp_sp->sp_out, KEY_SADB_UNLOCKED);
1743 inp->inp_sp->sp_out = NULL;
1744 }
1745
1746 ipsec_delpcbpolicy(inp->inp_sp);
1747 inp->inp_sp = NULL;
1748
1749 return 0;
1750 }
1751
1752 #if INET6
1753 int
1754 ipsec6_set_policy(struct in6pcb *in6p,
1755 int optname,
1756 caddr_t request,
1757 size_t len,
1758 int priv)
1759 {
1760 struct sadb_x_policy *xpl;
1761 struct secpolicy **pcb_sp;
1762 int error = 0;
1763 struct sadb_x_policy xpl_aligned_buf;
1764 u_int8_t *xpl_unaligned;
1765
1766 /* sanity check. */
1767 if (in6p == NULL || request == NULL) {
1768 return EINVAL;
1769 }
1770 if (len < sizeof(*xpl)) {
1771 return EINVAL;
1772 }
1773 xpl = (struct sadb_x_policy *)(void *)request;
1774
1775 /* This is a new mbuf allocated by soopt_getm() */
1776 if (IPSEC_IS_P2ALIGNED(xpl)) {
1777 xpl_unaligned = NULL;
1778 } else {
1779 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1780 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1781 xpl = (__typeof__(xpl)) & xpl_aligned_buf;
1782 }
1783
1784 if (in6p->in6p_sp == NULL) {
1785 error = ipsec_init_policy(in6p->inp_socket, &in6p->in6p_sp);
1786 if (error) {
1787 return error;
1788 }
1789 }
1790
1791 /* select direction */
1792 switch (xpl->sadb_x_policy_dir) {
1793 case IPSEC_DIR_INBOUND:
1794 pcb_sp = &in6p->in6p_sp->sp_in;
1795 break;
1796 case IPSEC_DIR_OUTBOUND:
1797 pcb_sp = &in6p->in6p_sp->sp_out;
1798 break;
1799 default:
1800 ipseclog((LOG_ERR, "ipsec6_set_policy: invalid direction=%u\n",
1801 xpl->sadb_x_policy_dir));
1802 return EINVAL;
1803 }
1804
1805 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1806 }
1807
1808 int
1809 ipsec6_delete_pcbpolicy(struct in6pcb *in6p)
1810 {
1811 /* sanity check. */
1812 if (in6p == NULL) {
1813 panic("ipsec6_delete_pcbpolicy: NULL pointer was passed.\n");
1814 }
1815
1816 if (in6p->in6p_sp == NULL) {
1817 return 0;
1818 }
1819
1820 if (in6p->in6p_sp->sp_in != NULL) {
1821 key_freesp(in6p->in6p_sp->sp_in, KEY_SADB_UNLOCKED);
1822 in6p->in6p_sp->sp_in = NULL;
1823 }
1824
1825 if (in6p->in6p_sp->sp_out != NULL) {
1826 key_freesp(in6p->in6p_sp->sp_out, KEY_SADB_UNLOCKED);
1827 in6p->in6p_sp->sp_out = NULL;
1828 }
1829
1830 ipsec_delpcbpolicy(in6p->in6p_sp);
1831 in6p->in6p_sp = NULL;
1832
1833 return 0;
1834 }
1835 #endif
1836
1837 /*
1838 * return current level.
1839 * Either IPSEC_LEVEL_USE or IPSEC_LEVEL_REQUIRE are always returned.
1840 */
1841 u_int
1842 ipsec_get_reqlevel(struct ipsecrequest *isr)
1843 {
1844 u_int level = 0;
1845 u_int esp_trans_deflev = 0, esp_net_deflev = 0, ah_trans_deflev = 0, ah_net_deflev = 0;
1846
1847 /* sanity check */
1848 if (isr == NULL || isr->sp == NULL) {
1849 panic("ipsec_get_reqlevel: NULL pointer is passed.\n");
1850 }
1851 if (((struct sockaddr *)&isr->sp->spidx.src)->sa_family
1852 != ((struct sockaddr *)&isr->sp->spidx.dst)->sa_family) {
1853 panic("ipsec_get_reqlevel: family mismatched.\n");
1854 }
1855
1856 /* XXX note that we have ipseclog() expanded here - code sync issue */
1857 #define IPSEC_CHECK_DEFAULT(lev) \
1858 (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE \
1859 && (lev) != IPSEC_LEVEL_UNIQUE) \
1860 ? (ipsec_debug \
1861 ? log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\
1862 (lev), IPSEC_LEVEL_REQUIRE) \
1863 : (void)0), \
1864 (lev) = IPSEC_LEVEL_REQUIRE, \
1865 (lev) \
1866 : (lev))
1867
1868 /* set default level */
1869 switch (((struct sockaddr *)&isr->sp->spidx.src)->sa_family) {
1870 #if INET
1871 case AF_INET:
1872 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_trans_deflev);
1873 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_net_deflev);
1874 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_trans_deflev);
1875 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_net_deflev);
1876 break;
1877 #endif
1878 #if INET6
1879 case AF_INET6:
1880 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_trans_deflev);
1881 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_net_deflev);
1882 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_trans_deflev);
1883 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_net_deflev);
1884 break;
1885 #endif /* INET6 */
1886 default:
1887 panic("key_get_reqlevel: Unknown family. %d\n",
1888 ((struct sockaddr *)&isr->sp->spidx.src)->sa_family);
1889 }
1890
1891 #undef IPSEC_CHECK_DEFAULT
1892
1893 /* set level */
1894 switch (isr->level) {
1895 case IPSEC_LEVEL_DEFAULT:
1896 switch (isr->saidx.proto) {
1897 case IPPROTO_ESP:
1898 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
1899 level = esp_net_deflev;
1900 } else {
1901 level = esp_trans_deflev;
1902 }
1903 break;
1904 case IPPROTO_AH:
1905 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
1906 level = ah_net_deflev;
1907 } else {
1908 level = ah_trans_deflev;
1909 }
1910 break;
1911 case IPPROTO_IPCOMP:
1912 ipseclog((LOG_ERR, "ipsec_get_reqlevel: "
1913 "still got IPCOMP - exiting\n"));
1914 break;
1915 default:
1916 panic("ipsec_get_reqlevel: "
1917 "Illegal protocol defined %u\n",
1918 isr->saidx.proto);
1919 }
1920 break;
1921
1922 case IPSEC_LEVEL_USE:
1923 case IPSEC_LEVEL_REQUIRE:
1924 level = isr->level;
1925 break;
1926 case IPSEC_LEVEL_UNIQUE:
1927 level = IPSEC_LEVEL_REQUIRE;
1928 break;
1929
1930 default:
1931 panic("ipsec_get_reqlevel: Illegal IPsec level %u\n",
1932 isr->level);
1933 }
1934
1935 return level;
1936 }
1937
1938 /*
1939 * Check AH/ESP integrity.
1940 * OUT:
1941 * 0: valid
1942 * 1: invalid
1943 */
1944 static int
1945 ipsec_in_reject(struct secpolicy *sp, struct mbuf *m)
1946 {
1947 struct ipsecrequest *isr;
1948 u_int level;
1949 int need_auth, need_conf, need_icv;
1950
1951 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
1952 printf("ipsec_in_reject: using SP\n");
1953 kdebug_secpolicy(sp));
1954
1955 /* check policy */
1956 switch (sp->policy) {
1957 case IPSEC_POLICY_DISCARD:
1958 case IPSEC_POLICY_GENERATE:
1959 return 1;
1960 case IPSEC_POLICY_BYPASS:
1961 case IPSEC_POLICY_NONE:
1962 return 0;
1963
1964 case IPSEC_POLICY_IPSEC:
1965 break;
1966
1967 case IPSEC_POLICY_ENTRUST:
1968 default:
1969 panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy);
1970 }
1971
1972 need_auth = 0;
1973 need_conf = 0;
1974 need_icv = 0;
1975
1976 /* XXX should compare policy against ipsec header history */
1977
1978 for (isr = sp->req; isr != NULL; isr = isr->next) {
1979 /* get current level */
1980 level = ipsec_get_reqlevel(isr);
1981
1982 switch (isr->saidx.proto) {
1983 case IPPROTO_ESP:
1984 if (level == IPSEC_LEVEL_REQUIRE) {
1985 need_conf++;
1986
1987 #if 0
1988 /* this won't work with multiple input threads - isr->sav would change
1989 * with every packet and is not necessarily related to the current packet
1990 * being processed. If ESP processing is required - the esp code should
1991 * make sure that the integrity check is present and correct. I don't see
1992 * why it would be necessary to check for the presence of the integrity
1993 * check value here. I think this is just wrong.
1994 * isr->sav has been removed.
1995 * %%%%%% this needs to be re-worked at some point but I think the code below can
1996 * be ignored for now.
1997 */
1998 if (isr->sav != NULL
1999 && isr->sav->flags == SADB_X_EXT_NONE
2000 && isr->sav->alg_auth != SADB_AALG_NONE) {
2001 need_icv++;
2002 }
2003 #endif
2004 }
2005 break;
2006 case IPPROTO_AH:
2007 if (level == IPSEC_LEVEL_REQUIRE) {
2008 need_auth++;
2009 need_icv++;
2010 }
2011 break;
2012 case IPPROTO_IPCOMP:
2013 /*
2014 * we don't really care, as IPcomp document says that
2015 * we shouldn't compress small packets, IPComp policy
2016 * should always be treated as being in "use" level.
2017 */
2018 break;
2019 }
2020 }
2021
2022 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
2023 printf("ipsec_in_reject: auth:%d conf:%d icv:%d m_flags:%x\n",
2024 need_auth, need_conf, need_icv, m->m_flags));
2025
2026 if ((need_conf && !(m->m_flags & M_DECRYPTED))
2027 || (!need_auth && need_icv && !(m->m_flags & M_AUTHIPDGM))
2028 || (need_auth && !(m->m_flags & M_AUTHIPHDR))) {
2029 return 1;
2030 }
2031
2032 return 0;
2033 }
2034
2035 /*
2036 * Check AH/ESP integrity.
2037 * This function is called from tcp_input(), udp_input(),
2038 * and {ah,esp}4_input for tunnel mode
2039 */
2040 int
2041 ipsec4_in_reject_so(struct mbuf *m, struct socket *so)
2042 {
2043 struct secpolicy *sp = NULL;
2044 int error;
2045 int result;
2046
2047 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2048 /* sanity check */
2049 if (m == NULL) {
2050 return 0; /* XXX should be panic ? */
2051 }
2052 /* get SP for this packet.
2053 * When we are called from ip_forward(), we call
2054 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2055 */
2056 if (so == NULL) {
2057 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
2058 } else {
2059 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
2060 }
2061
2062 if (sp == NULL) {
2063 return 0; /* XXX should be panic ?
2064 * -> No, there may be error. */
2065 }
2066 result = ipsec_in_reject(sp, m);
2067 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2068 printf("DP ipsec4_in_reject_so call free SP:0x%llx\n",
2069 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2070 key_freesp(sp, KEY_SADB_UNLOCKED);
2071
2072 return result;
2073 }
2074
2075 int
2076 ipsec4_in_reject(struct mbuf *m, struct inpcb *inp)
2077 {
2078 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2079 if (inp == NULL) {
2080 return ipsec4_in_reject_so(m, NULL);
2081 }
2082 if (inp->inp_socket) {
2083 return ipsec4_in_reject_so(m, inp->inp_socket);
2084 } else {
2085 panic("ipsec4_in_reject: invalid inpcb/socket");
2086 }
2087
2088 /* NOTREACHED */
2089 return 0;
2090 }
2091
2092 #if INET6
2093 /*
2094 * Check AH/ESP integrity.
2095 * This function is called from tcp6_input(), udp6_input(),
2096 * and {ah,esp}6_input for tunnel mode
2097 */
2098 int
2099 ipsec6_in_reject_so(struct mbuf *m, struct socket *so)
2100 {
2101 struct secpolicy *sp = NULL;
2102 int error;
2103 int result;
2104
2105 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2106 /* sanity check */
2107 if (m == NULL) {
2108 return 0; /* XXX should be panic ? */
2109 }
2110 /* get SP for this packet.
2111 * When we are called from ip_forward(), we call
2112 * ipsec6_getpolicybyaddr() with IP_FORWARDING flag.
2113 */
2114 if (so == NULL) {
2115 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
2116 } else {
2117 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
2118 }
2119
2120 if (sp == NULL) {
2121 return 0; /* XXX should be panic ? */
2122 }
2123 result = ipsec_in_reject(sp, m);
2124 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2125 printf("DP ipsec6_in_reject_so call free SP:0x%llx\n",
2126 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2127 key_freesp(sp, KEY_SADB_UNLOCKED);
2128
2129 return result;
2130 }
2131
2132 int
2133 ipsec6_in_reject(struct mbuf *m, struct in6pcb *in6p)
2134 {
2135 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2136 if (in6p == NULL) {
2137 return ipsec6_in_reject_so(m, NULL);
2138 }
2139 if (in6p->in6p_socket) {
2140 return ipsec6_in_reject_so(m, in6p->in6p_socket);
2141 } else {
2142 panic("ipsec6_in_reject: invalid in6p/socket");
2143 }
2144
2145 /* NOTREACHED */
2146 return 0;
2147 }
2148 #endif
2149
2150 /*
2151 * compute the byte size to be occupied by IPsec header.
2152 * in case it is tunneled, it includes the size of outer IP header.
2153 * NOTE: SP passed is free in this function.
2154 */
2155 size_t
2156 ipsec_hdrsiz(struct secpolicy *sp)
2157 {
2158 struct ipsecrequest *isr;
2159 size_t siz, clen;
2160
2161 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2162 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2163 printf("ipsec_hdrsiz: using SP\n");
2164 kdebug_secpolicy(sp));
2165
2166 /* check policy */
2167 switch (sp->policy) {
2168 case IPSEC_POLICY_DISCARD:
2169 case IPSEC_POLICY_GENERATE:
2170 case IPSEC_POLICY_BYPASS:
2171 case IPSEC_POLICY_NONE:
2172 return 0;
2173
2174 case IPSEC_POLICY_IPSEC:
2175 break;
2176
2177 case IPSEC_POLICY_ENTRUST:
2178 default:
2179 panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy);
2180 }
2181
2182 siz = 0;
2183
2184 for (isr = sp->req; isr != NULL; isr = isr->next) {
2185 clen = 0;
2186
2187 switch (isr->saidx.proto) {
2188 case IPPROTO_ESP:
2189 #if IPSEC_ESP
2190 clen = esp_hdrsiz(isr);
2191 #else
2192 clen = 0; /*XXX*/
2193 #endif
2194 break;
2195 case IPPROTO_AH:
2196 clen = ah_hdrsiz(isr);
2197 break;
2198 default:
2199 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2200 "unknown protocol %u\n",
2201 isr->saidx.proto));
2202 break;
2203 }
2204
2205 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
2206 switch (((struct sockaddr *)&isr->saidx.dst)->sa_family) {
2207 case AF_INET:
2208 clen += sizeof(struct ip);
2209 break;
2210 #if INET6
2211 case AF_INET6:
2212 clen += sizeof(struct ip6_hdr);
2213 break;
2214 #endif
2215 default:
2216 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2217 "unknown AF %d in IPsec tunnel SA\n",
2218 ((struct sockaddr *)&isr->saidx.dst)->sa_family));
2219 break;
2220 }
2221 }
2222 siz += clen;
2223 }
2224
2225 return siz;
2226 }
2227
2228 /* This function is called from ip_forward() and ipsec4_hdrsize_tcp(). */
2229 size_t
2230 ipsec4_hdrsiz(struct mbuf *m, u_int dir, struct inpcb *inp)
2231 {
2232 struct secpolicy *sp = NULL;
2233 int error;
2234 size_t size;
2235
2236 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2237 /* sanity check */
2238 if (m == NULL) {
2239 return 0; /* XXX should be panic ? */
2240 }
2241 if (inp != NULL && inp->inp_socket == NULL) {
2242 panic("ipsec4_hdrsize: why is socket NULL but there is PCB.");
2243 }
2244
2245 /* get SP for this packet.
2246 * When we are called from ip_forward(), we call
2247 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2248 */
2249 if (inp == NULL) {
2250 sp = ipsec4_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2251 } else {
2252 sp = ipsec4_getpolicybyaddr(m, dir, 0, &error);
2253 }
2254
2255 if (sp == NULL) {
2256 return 0; /* XXX should be panic ? */
2257 }
2258 size = ipsec_hdrsiz(sp);
2259 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2260 printf("DP ipsec4_hdrsiz call free SP:0x%llx\n",
2261 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2262 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2263 printf("ipsec4_hdrsiz: size:%lu.\n", (u_int32_t)size));
2264 key_freesp(sp, KEY_SADB_UNLOCKED);
2265
2266 return size;
2267 }
2268
2269 #if INET6
2270 /* This function is called from ipsec6_hdrsize_tcp(),
2271 * and maybe from ip6_forward.()
2272 */
2273 size_t
2274 ipsec6_hdrsiz(struct mbuf *m, u_int dir, struct in6pcb *in6p)
2275 {
2276 struct secpolicy *sp = NULL;
2277 int error;
2278 size_t size;
2279
2280 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2281 /* sanity check */
2282 if (m == NULL) {
2283 return 0; /* XXX shoud be panic ? */
2284 }
2285 if (in6p != NULL && in6p->in6p_socket == NULL) {
2286 panic("ipsec6_hdrsize: why is socket NULL but there is PCB.");
2287 }
2288
2289 /* get SP for this packet */
2290 /* XXX Is it right to call with IP_FORWARDING. */
2291 if (in6p == NULL) {
2292 sp = ipsec6_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2293 } else {
2294 sp = ipsec6_getpolicybyaddr(m, dir, 0, &error);
2295 }
2296
2297 if (sp == NULL) {
2298 return 0;
2299 }
2300 size = ipsec_hdrsiz(sp);
2301 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2302 printf("DP ipsec6_hdrsiz call free SP:0x%llx\n",
2303 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2304 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2305 printf("ipsec6_hdrsiz: size:%lu.\n", (u_int32_t)size));
2306 key_freesp(sp, KEY_SADB_UNLOCKED);
2307
2308 return size;
2309 }
2310 #endif /*INET6*/
2311
2312 #if INET
2313 /*
2314 * encapsulate for ipsec tunnel.
2315 * ip->ip_src must be fixed later on.
2316 */
2317 int
2318 ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav)
2319 {
2320 struct ip *oip;
2321 struct ip *ip;
2322 size_t hlen;
2323 size_t plen;
2324
2325 /* can't tunnel between different AFs */
2326 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2327 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2328 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2329 m_freem(m);
2330 return EINVAL;
2331 }
2332 #if 0
2333 /* XXX if the dst is myself, perform nothing. */
2334 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2335 m_freem(m);
2336 return EINVAL;
2337 }
2338 #endif
2339
2340 if (m->m_len < sizeof(*ip)) {
2341 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2342 }
2343
2344 ip = mtod(m, struct ip *);
2345 #ifdef _IP_VHL
2346 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2347 #else
2348 hlen = ip->ip_hl << 2;
2349 #endif
2350
2351 if (m->m_len != hlen) {
2352 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2353 }
2354
2355 /* generate header checksum */
2356 ip->ip_sum = 0;
2357 #ifdef _IP_VHL
2358 ip->ip_sum = in_cksum(m, hlen);
2359 #else
2360 ip->ip_sum = in_cksum(m, hlen);
2361 #endif
2362
2363 plen = m->m_pkthdr.len;
2364
2365 /*
2366 * grow the mbuf to accomodate the new IPv4 header.
2367 * NOTE: IPv4 options will never be copied.
2368 */
2369 if (M_LEADINGSPACE(m->m_next) < hlen) {
2370 struct mbuf *n;
2371 MGET(n, M_DONTWAIT, MT_DATA);
2372 if (!n) {
2373 m_freem(m);
2374 return ENOBUFS;
2375 }
2376 n->m_len = hlen;
2377 n->m_next = m->m_next;
2378 m->m_next = n;
2379 m->m_pkthdr.len += hlen;
2380 oip = mtod(n, struct ip *);
2381 } else {
2382 m->m_next->m_len += hlen;
2383 m->m_next->m_data -= hlen;
2384 m->m_pkthdr.len += hlen;
2385 oip = mtod(m->m_next, struct ip *);
2386 }
2387 ip = mtod(m, struct ip *);
2388 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2389 m->m_len = sizeof(struct ip);
2390 m->m_pkthdr.len -= (hlen - sizeof(struct ip));
2391
2392 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2393 /* ECN consideration. */
2394 ip_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &oip->ip_tos);
2395 #ifdef _IP_VHL
2396 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
2397 #else
2398 ip->ip_hl = sizeof(struct ip) >> 2;
2399 #endif
2400 ip->ip_off &= htons(~IP_OFFMASK);
2401 ip->ip_off &= htons(~IP_MF);
2402 switch (ip4_ipsec_dfbit) {
2403 case 0: /* clear DF bit */
2404 ip->ip_off &= htons(~IP_DF);
2405 break;
2406 case 1: /* set DF bit */
2407 ip->ip_off |= htons(IP_DF);
2408 break;
2409 default: /* copy DF bit */
2410 break;
2411 }
2412 ip->ip_p = IPPROTO_IPIP;
2413 if (plen + sizeof(struct ip) < IP_MAXPACKET) {
2414 ip->ip_len = htons(plen + sizeof(struct ip));
2415 } else {
2416 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2417 "leave ip_len as is (invalid packet)\n"));
2418 }
2419 if (rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off))) {
2420 ip->ip_id = 0;
2421 } else {
2422 ip->ip_id = ip_randomid();
2423 }
2424 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2425 &ip->ip_src, sizeof(ip->ip_src));
2426 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2427 &ip->ip_dst, sizeof(ip->ip_dst));
2428 ip->ip_ttl = IPDEFTTL;
2429
2430 /* XXX Should ip_src be updated later ? */
2431
2432 return 0;
2433 }
2434
2435 #endif /*INET*/
2436
2437 #if INET6
2438 int
2439 ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav)
2440 {
2441 struct ip6_hdr *oip6;
2442 struct ip6_hdr *ip6;
2443 size_t plen;
2444
2445 /* can't tunnel between different AFs */
2446 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2447 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2448 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2449 m_freem(m);
2450 return EINVAL;
2451 }
2452 #if 0
2453 /* XXX if the dst is myself, perform nothing. */
2454 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2455 m_freem(m);
2456 return EINVAL;
2457 }
2458 #endif
2459
2460 plen = m->m_pkthdr.len;
2461
2462 /*
2463 * grow the mbuf to accomodate the new IPv6 header.
2464 */
2465 if (m->m_len != sizeof(struct ip6_hdr)) {
2466 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2467 }
2468 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2469 struct mbuf *n;
2470 MGET(n, M_DONTWAIT, MT_DATA);
2471 if (!n) {
2472 m_freem(m);
2473 return ENOBUFS;
2474 }
2475 n->m_len = sizeof(struct ip6_hdr);
2476 n->m_next = m->m_next;
2477 m->m_next = n;
2478 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2479 oip6 = mtod(n, struct ip6_hdr *);
2480 } else {
2481 m->m_next->m_len += sizeof(struct ip6_hdr);
2482 m->m_next->m_data -= sizeof(struct ip6_hdr);
2483 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2484 oip6 = mtod(m->m_next, struct ip6_hdr *);
2485 }
2486 ip6 = mtod(m, struct ip6_hdr *);
2487 ovbcopy((caddr_t)ip6, (caddr_t)oip6, sizeof(struct ip6_hdr));
2488
2489 /* Fake link-local scope-class addresses */
2490 if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_src)) {
2491 oip6->ip6_src.s6_addr16[1] = 0;
2492 }
2493 if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_dst)) {
2494 oip6->ip6_dst.s6_addr16[1] = 0;
2495 }
2496
2497 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2498 /* ECN consideration. */
2499 ip6_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip6->ip6_flow);
2500 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) {
2501 ip6->ip6_plen = htons(plen);
2502 } else {
2503 /* ip6->ip6_plen will be updated in ip6_output() */
2504 }
2505 ip6->ip6_nxt = IPPROTO_IPV6;
2506 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2507 &ip6->ip6_src, sizeof(ip6->ip6_src));
2508 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2509 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2510 ip6->ip6_hlim = IPV6_DEFHLIM;
2511
2512 /* XXX Should ip6_src be updated later ? */
2513
2514 return 0;
2515 }
2516
2517 static int
2518 ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav)
2519 {
2520 struct ip6_hdr *ip6, *ip6i;
2521 struct ip *ip;
2522 size_t plen;
2523 u_int8_t hlim;
2524
2525 /* tunneling over IPv4 */
2526 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2527 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2528 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2529 m_freem(m);
2530 return EINVAL;
2531 }
2532 #if 0
2533 /* XXX if the dst is myself, perform nothing. */
2534 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2535 m_freem(m);
2536 return EINVAL;
2537 }
2538 #endif
2539
2540 plen = m->m_pkthdr.len;
2541 ip6 = mtod(m, struct ip6_hdr *);
2542 hlim = ip6->ip6_hlim;
2543 /*
2544 * grow the mbuf to accomodate the new IPv4 header.
2545 */
2546 if (m->m_len != sizeof(struct ip6_hdr)) {
2547 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2548 }
2549 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2550 struct mbuf *n;
2551 MGET(n, M_DONTWAIT, MT_DATA);
2552 if (!n) {
2553 m_freem(m);
2554 return ENOBUFS;
2555 }
2556 n->m_len = sizeof(struct ip6_hdr);
2557 n->m_next = m->m_next;
2558 m->m_next = n;
2559 m->m_pkthdr.len += sizeof(struct ip);
2560 ip6i = mtod(n, struct ip6_hdr *);
2561 } else {
2562 m->m_next->m_len += sizeof(struct ip6_hdr);
2563 m->m_next->m_data -= sizeof(struct ip6_hdr);
2564 m->m_pkthdr.len += sizeof(struct ip);
2565 ip6i = mtod(m->m_next, struct ip6_hdr *);
2566 }
2567
2568 bcopy(ip6, ip6i, sizeof(struct ip6_hdr));
2569 ip = mtod(m, struct ip *);
2570 m->m_len = sizeof(struct ip);
2571 /*
2572 * Fill in some of the IPv4 fields - we don't need all of them
2573 * because the rest will be filled in by ip_output
2574 */
2575 ip->ip_v = IPVERSION;
2576 ip->ip_hl = sizeof(struct ip) >> 2;
2577 ip->ip_id = 0;
2578 ip->ip_sum = 0;
2579 ip->ip_tos = 0;
2580 ip->ip_off = 0;
2581 ip->ip_ttl = hlim;
2582 ip->ip_p = IPPROTO_IPV6;
2583
2584 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2585 /* ECN consideration. */
2586 ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6->ip6_flow);
2587
2588 if (plen + sizeof(struct ip) < IP_MAXPACKET) {
2589 ip->ip_len = htons(plen + sizeof(struct ip));
2590 } else {
2591 ip->ip_len = htons(plen);
2592 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2593 "leave ip_len as is (invalid packet)\n"));
2594 }
2595 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2596 &ip->ip_src, sizeof(ip->ip_src));
2597 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2598 &ip->ip_dst, sizeof(ip->ip_dst));
2599
2600 return 0;
2601 }
2602
2603 int
2604 ipsec6_update_routecache_and_output(
2605 struct ipsec_output_state *state,
2606 struct secasvar *sav)
2607 {
2608 struct sockaddr_in6* dst6;
2609 struct route_in6 *ro6;
2610 struct ip6_hdr *ip6;
2611 errno_t error = 0;
2612
2613 int plen;
2614 struct ip6_out_args ip6oa;
2615 struct route_in6 ro6_new;
2616 struct flowadv *adv = NULL;
2617
2618 if (!state->m) {
2619 return EINVAL;
2620 }
2621 ip6 = mtod(state->m, struct ip6_hdr *);
2622
2623 // grab sadb_mutex, before updating sah's route cache
2624 lck_mtx_lock(sadb_mutex);
2625 ro6 = &sav->sah->sa_route;
2626 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
2627 if (ro6->ro_rt) {
2628 RT_LOCK(ro6->ro_rt);
2629 }
2630 if (ROUTE_UNUSABLE(ro6) ||
2631 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
2632 if (ro6->ro_rt != NULL) {
2633 RT_UNLOCK(ro6->ro_rt);
2634 }
2635 ROUTE_RELEASE(ro6);
2636 }
2637 if (ro6->ro_rt == 0) {
2638 bzero(dst6, sizeof(*dst6));
2639 dst6->sin6_family = AF_INET6;
2640 dst6->sin6_len = sizeof(*dst6);
2641 dst6->sin6_addr = ip6->ip6_dst;
2642 rtalloc_scoped((struct route *)ro6, sav->sah->outgoing_if);
2643 if (ro6->ro_rt) {
2644 RT_LOCK(ro6->ro_rt);
2645 }
2646 }
2647 if (ro6->ro_rt == 0) {
2648 ip6stat.ip6s_noroute++;
2649 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
2650 error = EHOSTUNREACH;
2651 // release sadb_mutex, after updating sah's route cache
2652 lck_mtx_unlock(sadb_mutex);
2653 return error;
2654 }
2655
2656 /*
2657 * adjust state->dst if tunnel endpoint is offlink
2658 *
2659 * XXX: caching rt_gateway value in the state is
2660 * not really good, since it may point elsewhere
2661 * when the gateway gets modified to a larger
2662 * sockaddr via rt_setgate(). This is currently
2663 * addressed by SA_SIZE roundup in that routine.
2664 */
2665 if (ro6->ro_rt->rt_flags & RTF_GATEWAY) {
2666 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
2667 }
2668 RT_UNLOCK(ro6->ro_rt);
2669 ROUTE_RELEASE(&state->ro);
2670 route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6));
2671 state->dst = (struct sockaddr *)dst6;
2672 state->tunneled = 6;
2673 // release sadb_mutex, after updating sah's route cache
2674 lck_mtx_unlock(sadb_mutex);
2675
2676 state->m = ipsec6_splithdr(state->m);
2677 if (!state->m) {
2678 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
2679 error = ENOMEM;
2680 return error;
2681 }
2682
2683 ip6 = mtod(state->m, struct ip6_hdr *);
2684 switch (sav->sah->saidx.proto) {
2685 case IPPROTO_ESP:
2686 #if IPSEC_ESP
2687 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2688 #else
2689 m_freem(state->m);
2690 error = EINVAL;
2691 #endif
2692 break;
2693 case IPPROTO_AH:
2694 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2695 break;
2696 default:
2697 ipseclog((LOG_ERR, "%s: unknown ipsec protocol %d\n", __FUNCTION__, sav->sah->saidx.proto));
2698 m_freem(state->m);
2699 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2700 error = EINVAL;
2701 break;
2702 }
2703 if (error) {
2704 // If error, packet already freed by above output routines
2705 state->m = NULL;
2706 return error;
2707 }
2708
2709 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
2710 if (plen > IPV6_MAXPACKET) {
2711 ipseclog((LOG_ERR, "%s: IPsec with IPv6 jumbogram is not supported\n", __FUNCTION__));
2712 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2713 error = EINVAL;/*XXX*/
2714 return error;
2715 }
2716 ip6 = mtod(state->m, struct ip6_hdr *);
2717 ip6->ip6_plen = htons(plen);
2718
2719 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET6);
2720 ipsec_set_ip6oa_for_interface(sav->sah->ipsec_if, &ip6oa);
2721
2722 /* Increment statistics */
2723 ifnet_stat_increment_out(sav->sah->ipsec_if, 1, mbuf_pkthdr_len(state->m), 0);
2724
2725 /* Send to ip6_output */
2726 bzero(&ro6_new, sizeof(ro6_new));
2727 bzero(&ip6oa, sizeof(ip6oa));
2728 ip6oa.ip6oa_flowadv.code = 0;
2729 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
2730 if (state->outgoing_if) {
2731 ip6oa.ip6oa_boundif = state->outgoing_if;
2732 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
2733 }
2734
2735 adv = &ip6oa.ip6oa_flowadv;
2736 (void) ip6_output(state->m, NULL, &ro6_new, IPV6_OUTARGS, NULL, NULL, &ip6oa);
2737 state->m = NULL;
2738
2739 if (adv->code == FADV_FLOW_CONTROLLED || adv->code == FADV_SUSPENDED) {
2740 error = ENOBUFS;
2741 ifnet_disable_output(sav->sah->ipsec_if);
2742 return error;
2743 }
2744
2745 return 0;
2746 }
2747
2748 int
2749 ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav)
2750 {
2751 struct mbuf *m;
2752 struct ip6_hdr *ip6;
2753 struct ip *oip;
2754 struct ip *ip;
2755 size_t hlen;
2756 size_t plen;
2757
2758 m = state->m;
2759 if (!m) {
2760 return EINVAL;
2761 }
2762
2763 /* can't tunnel between different AFs */
2764 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2765 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2766 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2767 m_freem(m);
2768 return EINVAL;
2769 }
2770 #if 0
2771 /* XXX if the dst is myself, perform nothing. */
2772 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2773 m_freem(m);
2774 return EINVAL;
2775 }
2776 #endif
2777
2778 if (m->m_len < sizeof(*ip)) {
2779 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2780 return EINVAL;
2781 }
2782
2783 ip = mtod(m, struct ip *);
2784 #ifdef _IP_VHL
2785 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2786 #else
2787 hlen = ip->ip_hl << 2;
2788 #endif
2789
2790 if (m->m_len != hlen) {
2791 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2792 return EINVAL;
2793 }
2794
2795 /* generate header checksum */
2796 ip->ip_sum = 0;
2797 #ifdef _IP_VHL
2798 ip->ip_sum = in_cksum(m, hlen);
2799 #else
2800 ip->ip_sum = in_cksum(m, hlen);
2801 #endif
2802
2803 plen = m->m_pkthdr.len; // save original IPv4 packet len, this will be ipv6 payload len
2804
2805 /*
2806 * First move the IPv4 header to the second mbuf in the chain
2807 */
2808 if (M_LEADINGSPACE(m->m_next) < hlen) {
2809 struct mbuf *n;
2810 MGET(n, M_DONTWAIT, MT_DATA);
2811 if (!n) {
2812 m_freem(m);
2813 return ENOBUFS;
2814 }
2815 n->m_len = hlen;
2816 n->m_next = m->m_next;
2817 m->m_next = n;
2818 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2819 oip = mtod(n, struct ip *);
2820 } else {
2821 m->m_next->m_len += hlen;
2822 m->m_next->m_data -= hlen;
2823 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2824 oip = mtod(m->m_next, struct ip *);
2825 }
2826 ip = mtod(m, struct ip *);
2827 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2828
2829 /*
2830 * Grow the first mbuf to accomodate the new IPv6 header.
2831 */
2832 if (M_LEADINGSPACE(m) < sizeof(struct ip6_hdr) - hlen) {
2833 struct mbuf *n;
2834 MGETHDR(n, M_DONTWAIT, MT_HEADER);
2835 if (!n) {
2836 m_freem(m);
2837 return ENOBUFS;
2838 }
2839 M_COPY_PKTHDR(n, m);
2840 MH_ALIGN(n, sizeof(struct ip6_hdr));
2841 n->m_len = sizeof(struct ip6_hdr);
2842 n->m_next = m->m_next;
2843 m->m_next = NULL;
2844 m_freem(m);
2845 state->m = n;
2846 m = state->m;
2847 } else {
2848 m->m_len += (sizeof(struct ip6_hdr) - hlen);
2849 m->m_data -= (sizeof(struct ip6_hdr) - hlen);
2850 }
2851 ip6 = mtod(m, struct ip6_hdr *);
2852 ip6->ip6_flow = 0;
2853 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2854 ip6->ip6_vfc |= IPV6_VERSION;
2855
2856 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2857 /* ECN consideration. */
2858 ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &ip->ip_tos);
2859 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) {
2860 ip6->ip6_plen = htons(plen);
2861 } else {
2862 /* ip6->ip6_plen will be updated in ip6_output() */
2863 }
2864
2865 ip6->ip6_nxt = IPPROTO_IPV4;
2866 ip6->ip6_hlim = IPV6_DEFHLIM;
2867
2868 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2869 &ip6->ip6_src, sizeof(ip6->ip6_src));
2870 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2871 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2872
2873 return 0;
2874 }
2875
2876 #endif /*INET6*/
2877
2878 /*
2879 * Check the variable replay window.
2880 * ipsec_chkreplay() performs replay check before ICV verification.
2881 * ipsec_updatereplay() updates replay bitmap. This must be called after
2882 * ICV verification (it also performs replay check, which is usually done
2883 * beforehand).
2884 * 0 (zero) is returned if packet disallowed, 1 if packet permitted.
2885 *
2886 * based on RFC 2401.
2887 */
2888 int
2889 ipsec_chkreplay(u_int32_t seq, struct secasvar *sav, u_int8_t replay_index)
2890 {
2891 const struct secreplay *replay;
2892 u_int32_t diff;
2893 int fr;
2894 u_int32_t wsizeb; /* constant: bits of window size */
2895 int frlast; /* constant: last frame */
2896
2897
2898 /* sanity check */
2899 if (sav == NULL) {
2900 panic("ipsec_chkreplay: NULL pointer was passed.\n");
2901 }
2902
2903 lck_mtx_lock(sadb_mutex);
2904 replay = sav->replay[replay_index];
2905
2906 if (replay->wsize == 0) {
2907 lck_mtx_unlock(sadb_mutex);
2908 return 1; /* no need to check replay. */
2909 }
2910
2911 /* constant */
2912 frlast = replay->wsize - 1;
2913 wsizeb = replay->wsize << 3;
2914
2915 /* sequence number of 0 is invalid */
2916 if (seq == 0) {
2917 lck_mtx_unlock(sadb_mutex);
2918 return 0;
2919 }
2920
2921 /* first time is always okay */
2922 if (replay->count == 0) {
2923 lck_mtx_unlock(sadb_mutex);
2924 return 1;
2925 }
2926
2927 if (seq > replay->lastseq) {
2928 /* larger sequences are okay */
2929 lck_mtx_unlock(sadb_mutex);
2930 return 1;
2931 } else {
2932 /* seq is equal or less than lastseq. */
2933 diff = replay->lastseq - seq;
2934
2935 /* over range to check, i.e. too old or wrapped */
2936 if (diff >= wsizeb) {
2937 lck_mtx_unlock(sadb_mutex);
2938 return 0;
2939 }
2940
2941 fr = frlast - diff / 8;
2942
2943 /* this packet already seen ? */
2944 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2945 lck_mtx_unlock(sadb_mutex);
2946 return 0;
2947 }
2948
2949 /* out of order but good */
2950 lck_mtx_unlock(sadb_mutex);
2951 return 1;
2952 }
2953 }
2954
2955 /*
2956 * check replay counter whether to update or not.
2957 * OUT: 0: OK
2958 * 1: NG
2959 */
2960 int
2961 ipsec_updatereplay(u_int32_t seq, struct secasvar *sav, u_int8_t replay_index)
2962 {
2963 struct secreplay *replay;
2964 u_int32_t diff;
2965 int fr;
2966 u_int32_t wsizeb; /* constant: bits of window size */
2967 int frlast; /* constant: last frame */
2968
2969 /* sanity check */
2970 if (sav == NULL) {
2971 panic("ipsec_chkreplay: NULL pointer was passed.\n");
2972 }
2973
2974 lck_mtx_lock(sadb_mutex);
2975 replay = sav->replay[replay_index];
2976
2977 if (replay->wsize == 0) {
2978 goto ok; /* no need to check replay. */
2979 }
2980 /* constant */
2981 frlast = replay->wsize - 1;
2982 wsizeb = replay->wsize << 3;
2983
2984 /* sequence number of 0 is invalid */
2985 if (seq == 0) {
2986 lck_mtx_unlock(sadb_mutex);
2987 return 1;
2988 }
2989
2990 /* first time */
2991 if (replay->count == 0) {
2992 replay->lastseq = seq;
2993 bzero(replay->bitmap, replay->wsize);
2994 (replay->bitmap)[frlast] = 1;
2995 goto ok;
2996 }
2997
2998 if (seq > replay->lastseq) {
2999 /* seq is larger than lastseq. */
3000 diff = seq - replay->lastseq;
3001
3002 /* new larger sequence number */
3003 if (diff < wsizeb) {
3004 /* In window */
3005 /* set bit for this packet */
3006 vshiftl((unsigned char *) replay->bitmap, diff, replay->wsize);
3007 (replay->bitmap)[frlast] |= 1;
3008 } else {
3009 /* this packet has a "way larger" */
3010 bzero(replay->bitmap, replay->wsize);
3011 (replay->bitmap)[frlast] = 1;
3012 }
3013 replay->lastseq = seq;
3014
3015 /* larger is good */
3016 } else {
3017 /* seq is equal or less than lastseq. */
3018 diff = replay->lastseq - seq;
3019
3020 /* over range to check, i.e. too old or wrapped */
3021 if (diff >= wsizeb) {
3022 lck_mtx_unlock(sadb_mutex);
3023 return 1;
3024 }
3025
3026 fr = frlast - diff / 8;
3027
3028 /* this packet already seen ? */
3029 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
3030 lck_mtx_unlock(sadb_mutex);
3031 return 1;
3032 }
3033
3034 /* mark as seen */
3035 (replay->bitmap)[fr] |= (1 << (diff % 8));
3036
3037 /* out of order but good */
3038 }
3039
3040 ok:
3041 if (replay->count == ~0) {
3042 /* set overflow flag */
3043 replay->overflow++;
3044
3045 /* don't increment, no more packets accepted */
3046 if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
3047 lck_mtx_unlock(sadb_mutex);
3048 return 1;
3049 }
3050
3051 ipseclog((LOG_WARNING, "replay counter made %d cycle. %s\n",
3052 replay->overflow, ipsec_logsastr(sav)));
3053 }
3054
3055 replay->count++;
3056
3057 lck_mtx_unlock(sadb_mutex);
3058 return 0;
3059 }
3060
3061 /*
3062 * shift variable length buffer to left.
3063 * IN: bitmap: pointer to the buffer
3064 * nbit: the number of to shift.
3065 * wsize: buffer size (bytes).
3066 */
3067 static void
3068 vshiftl(unsigned char *bitmap, int nbit, int wsize)
3069 {
3070 int s, j, i;
3071 unsigned char over;
3072
3073 for (j = 0; j < nbit; j += 8) {
3074 s = (nbit - j < 8) ? (nbit - j): 8;
3075 bitmap[0] <<= s;
3076 for (i = 1; i < wsize; i++) {
3077 over = (bitmap[i] >> (8 - s));
3078 bitmap[i] <<= s;
3079 bitmap[i - 1] |= over;
3080 }
3081 }
3082
3083 return;
3084 }
3085
3086 const char *
3087 ipsec4_logpacketstr(struct ip *ip, u_int32_t spi)
3088 {
3089 static char buf[256] __attribute__((aligned(4)));
3090 char *p;
3091 u_int8_t *s, *d;
3092
3093 s = (u_int8_t *)(&ip->ip_src);
3094 d = (u_int8_t *)(&ip->ip_dst);
3095
3096 p = buf;
3097 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3098 while (p && *p) {
3099 p++;
3100 }
3101 snprintf(p, sizeof(buf) - (p - buf), "src=%u.%u.%u.%u",
3102 s[0], s[1], s[2], s[3]);
3103 while (p && *p) {
3104 p++;
3105 }
3106 snprintf(p, sizeof(buf) - (p - buf), " dst=%u.%u.%u.%u",
3107 d[0], d[1], d[2], d[3]);
3108 while (p && *p) {
3109 p++;
3110 }
3111 snprintf(p, sizeof(buf) - (p - buf), ")");
3112
3113 return buf;
3114 }
3115
3116 #if INET6
3117 const char *
3118 ipsec6_logpacketstr(struct ip6_hdr *ip6, u_int32_t spi)
3119 {
3120 static char buf[256] __attribute__((aligned(4)));
3121 char *p;
3122
3123 p = buf;
3124 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3125 while (p && *p) {
3126 p++;
3127 }
3128 snprintf(p, sizeof(buf) - (p - buf), "src=%s",
3129 ip6_sprintf(&ip6->ip6_src));
3130 while (p && *p) {
3131 p++;
3132 }
3133 snprintf(p, sizeof(buf) - (p - buf), " dst=%s",
3134 ip6_sprintf(&ip6->ip6_dst));
3135 while (p && *p) {
3136 p++;
3137 }
3138 snprintf(p, sizeof(buf) - (p - buf), ")");
3139
3140 return buf;
3141 }
3142 #endif /*INET6*/
3143
3144 const char *
3145 ipsec_logsastr(struct secasvar *sav)
3146 {
3147 static char buf[256] __attribute__((aligned(4)));
3148 char *p;
3149 struct secasindex *saidx = &sav->sah->saidx;
3150
3151 /* validity check */
3152 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
3153 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family) {
3154 panic("ipsec_logsastr: family mismatched.\n");
3155 }
3156
3157 p = buf;
3158 snprintf(buf, sizeof(buf), "SA(SPI=%u ", (u_int32_t)ntohl(sav->spi));
3159 while (p && *p) {
3160 p++;
3161 }
3162 if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET) {
3163 u_int8_t *s, *d;
3164 s = (u_int8_t *)&((struct sockaddr_in *)&saidx->src)->sin_addr;
3165 d = (u_int8_t *)&((struct sockaddr_in *)&saidx->dst)->sin_addr;
3166 snprintf(p, sizeof(buf) - (p - buf),
3167 "src=%d.%d.%d.%d dst=%d.%d.%d.%d",
3168 s[0], s[1], s[2], s[3], d[0], d[1], d[2], d[3]);
3169 }
3170 #if INET6
3171 else if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET6) {
3172 snprintf(p, sizeof(buf) - (p - buf),
3173 "src=%s",
3174 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->src)->sin6_addr));
3175 while (p && *p) {
3176 p++;
3177 }
3178 snprintf(p, sizeof(buf) - (p - buf),
3179 " dst=%s",
3180 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->dst)->sin6_addr));
3181 }
3182 #endif
3183 while (p && *p) {
3184 p++;
3185 }
3186 snprintf(p, sizeof(buf) - (p - buf), ")");
3187
3188 return buf;
3189 }
3190
3191 void
3192 ipsec_dumpmbuf(struct mbuf *m)
3193 {
3194 int totlen;
3195 int i;
3196 u_char *p;
3197
3198 totlen = 0;
3199 printf("---\n");
3200 while (m) {
3201 p = mtod(m, u_char *);
3202 for (i = 0; i < m->m_len; i++) {
3203 printf("%02x ", p[i]);
3204 totlen++;
3205 if (totlen % 16 == 0) {
3206 printf("\n");
3207 }
3208 }
3209 m = m->m_next;
3210 }
3211 if (totlen % 16 != 0) {
3212 printf("\n");
3213 }
3214 printf("---\n");
3215 }
3216
3217 #if INET
3218 /*
3219 * IPsec output logic for IPv4.
3220 */
3221 static int
3222 ipsec4_output_internal(struct ipsec_output_state *state, struct secasvar *sav)
3223 {
3224 struct ip *ip = NULL;
3225 int error = 0;
3226 struct sockaddr_in *dst4;
3227 struct route *ro4;
3228
3229 /* validity check */
3230 if (sav == NULL || sav->sah == NULL) {
3231 error = EINVAL;
3232 goto bad;
3233 }
3234
3235 /*
3236 * If there is no valid SA, we give up to process any
3237 * more. In such a case, the SA's status is changed
3238 * from DYING to DEAD after allocating. If a packet
3239 * send to the receiver by dead SA, the receiver can
3240 * not decode a packet because SA has been dead.
3241 */
3242 if (sav->state != SADB_SASTATE_MATURE
3243 && sav->state != SADB_SASTATE_DYING) {
3244 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3245 error = EINVAL;
3246 goto bad;
3247 }
3248
3249 state->outgoing_if = sav->sah->outgoing_if;
3250
3251 /*
3252 * There may be the case that SA status will be changed when
3253 * we are refering to one. So calling splsoftnet().
3254 */
3255
3256 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3257 /*
3258 * build IPsec tunnel.
3259 */
3260 state->m = ipsec4_splithdr(state->m);
3261 if (!state->m) {
3262 error = ENOMEM;
3263 goto bad;
3264 }
3265
3266 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3267 error = ipsec46_encapsulate(state, sav);
3268 if (error) {
3269 // packet already freed by encapsulation error handling
3270 state->m = NULL;
3271 return error;
3272 }
3273
3274 error = ipsec6_update_routecache_and_output(state, sav);
3275 return error;
3276 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3277 error = ipsec4_encapsulate(state->m, sav);
3278 if (error) {
3279 state->m = NULL;
3280 goto bad;
3281 }
3282 ip = mtod(state->m, struct ip *);
3283
3284 // grab sadb_mutex, before updating sah's route cache
3285 lck_mtx_lock(sadb_mutex);
3286 ro4 = (struct route *)&sav->sah->sa_route;
3287 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3288 if (ro4->ro_rt != NULL) {
3289 RT_LOCK(ro4->ro_rt);
3290 }
3291 if (ROUTE_UNUSABLE(ro4) ||
3292 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3293 if (ro4->ro_rt != NULL) {
3294 RT_UNLOCK(ro4->ro_rt);
3295 }
3296 ROUTE_RELEASE(ro4);
3297 }
3298 if (ro4->ro_rt == 0) {
3299 dst4->sin_family = AF_INET;
3300 dst4->sin_len = sizeof(*dst4);
3301 dst4->sin_addr = ip->ip_dst;
3302 rtalloc_scoped(ro4, sav->sah->outgoing_if);
3303 if (ro4->ro_rt == 0) {
3304 OSAddAtomic(1, &ipstat.ips_noroute);
3305 error = EHOSTUNREACH;
3306 // release sadb_mutex, after updating sah's route cache
3307 lck_mtx_unlock(sadb_mutex);
3308 goto bad;
3309 }
3310 RT_LOCK(ro4->ro_rt);
3311 }
3312
3313 /*
3314 * adjust state->dst if tunnel endpoint is offlink
3315 *
3316 * XXX: caching rt_gateway value in the state is
3317 * not really good, since it may point elsewhere
3318 * when the gateway gets modified to a larger
3319 * sockaddr via rt_setgate(). This is currently
3320 * addressed by SA_SIZE roundup in that routine.
3321 */
3322 if (ro4->ro_rt->rt_flags & RTF_GATEWAY) {
3323 dst4 = (struct sockaddr_in *)(void *)ro4->ro_rt->rt_gateway;
3324 }
3325 RT_UNLOCK(ro4->ro_rt);
3326 ROUTE_RELEASE(&state->ro);
3327 route_copyout((struct route *)&state->ro, ro4, sizeof(struct route));
3328 state->dst = (struct sockaddr *)dst4;
3329 state->tunneled = 4;
3330 // release sadb_mutex, after updating sah's route cache
3331 lck_mtx_unlock(sadb_mutex);
3332 } else {
3333 ipseclog((LOG_ERR, "%s: family mismatched between inner and outer spi=%u\n",
3334 __FUNCTION__, (u_int32_t)ntohl(sav->spi)));
3335 error = EAFNOSUPPORT;
3336 goto bad;
3337 }
3338 }
3339
3340 state->m = ipsec4_splithdr(state->m);
3341 if (!state->m) {
3342 error = ENOMEM;
3343 goto bad;
3344 }
3345 switch (sav->sah->saidx.proto) {
3346 case IPPROTO_ESP:
3347 #if IPSEC_ESP
3348 if ((error = esp4_output(state->m, sav)) != 0) {
3349 state->m = NULL;
3350 goto bad;
3351 }
3352 break;
3353 #else
3354 m_freem(state->m);
3355 state->m = NULL;
3356 error = EINVAL;
3357 goto bad;
3358 #endif
3359 case IPPROTO_AH:
3360 if ((error = ah4_output(state->m, sav)) != 0) {
3361 state->m = NULL;
3362 goto bad;
3363 }
3364 break;
3365 default:
3366 ipseclog((LOG_ERR,
3367 "ipsec4_output: unknown ipsec protocol %d\n",
3368 sav->sah->saidx.proto));
3369 m_freem(state->m);
3370 state->m = NULL;
3371 error = EPROTONOSUPPORT;
3372 goto bad;
3373 }
3374
3375 if (state->m == 0) {
3376 error = ENOMEM;
3377 goto bad;
3378 }
3379
3380 return 0;
3381
3382 bad:
3383 return error;
3384 }
3385
3386 int
3387 ipsec4_interface_output(struct ipsec_output_state *state, ifnet_t interface)
3388 {
3389 int error = 0;
3390 struct secasvar *sav = NULL;
3391
3392 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3393
3394 if (state == NULL) {
3395 panic("state == NULL in ipsec4_output");
3396 }
3397 if (state->m == NULL) {
3398 panic("state->m == NULL in ipsec4_output");
3399 }
3400 if (state->dst == NULL) {
3401 panic("state->dst == NULL in ipsec4_output");
3402 }
3403
3404 struct ip *ip = mtod(state->m, struct ip *);
3405
3406 struct sockaddr_in src = {};
3407 src.sin_family = AF_INET;
3408 src.sin_len = sizeof(src);
3409 memcpy(&src.sin_addr, &ip->ip_src, sizeof(src.sin_addr));
3410
3411 struct sockaddr_in dst = {};
3412 dst.sin_family = AF_INET;
3413 dst.sin_len = sizeof(dst);
3414 memcpy(&dst.sin_addr, &ip->ip_dst, sizeof(dst.sin_addr));
3415
3416 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET,
3417 (struct sockaddr *)&src,
3418 (struct sockaddr *)&dst);
3419 if (sav == NULL) {
3420 goto bad;
3421 }
3422
3423 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3424 goto bad;
3425 }
3426
3427 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3428 if (sav) {
3429 key_freesav(sav, KEY_SADB_UNLOCKED);
3430 }
3431 return 0;
3432
3433 bad:
3434 if (sav) {
3435 key_freesav(sav, KEY_SADB_UNLOCKED);
3436 }
3437 m_freem(state->m);
3438 state->m = NULL;
3439 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error, 0, 0, 0, 0);
3440 return error;
3441 }
3442
3443 int
3444 ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused int flags)
3445 {
3446 struct ip *ip = NULL;
3447 struct ipsecrequest *isr = NULL;
3448 struct secasindex saidx;
3449 struct secasvar *sav = NULL;
3450 int error = 0;
3451 struct sockaddr_in *sin;
3452
3453 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3454
3455 if (!state) {
3456 panic("state == NULL in ipsec4_output");
3457 }
3458 if (!state->m) {
3459 panic("state->m == NULL in ipsec4_output");
3460 }
3461 if (!state->dst) {
3462 panic("state->dst == NULL in ipsec4_output");
3463 }
3464
3465 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
3466
3467 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3468 printf("ipsec4_output: applied SP\n");
3469 kdebug_secpolicy(sp));
3470
3471 for (isr = sp->req; isr != NULL; isr = isr->next) {
3472 /* make SA index for search proper SA */
3473 ip = mtod(state->m, struct ip *);
3474 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3475 saidx.mode = isr->saidx.mode;
3476 saidx.reqid = isr->saidx.reqid;
3477 sin = (struct sockaddr_in *)&saidx.src;
3478 if (sin->sin_len == 0) {
3479 sin->sin_len = sizeof(*sin);
3480 sin->sin_family = AF_INET;
3481 sin->sin_port = IPSEC_PORT_ANY;
3482 bcopy(&ip->ip_src, &sin->sin_addr,
3483 sizeof(sin->sin_addr));
3484 }
3485 sin = (struct sockaddr_in *)&saidx.dst;
3486 if (sin->sin_len == 0) {
3487 sin->sin_len = sizeof(*sin);
3488 sin->sin_family = AF_INET;
3489 sin->sin_port = IPSEC_PORT_ANY;
3490 /*
3491 * Get port from packet if upper layer is UDP and nat traversal
3492 * is enabled and transport mode.
3493 */
3494
3495 if ((esp_udp_encap_port & 0xFFFF) != 0 &&
3496 isr->saidx.mode == IPSEC_MODE_TRANSPORT) {
3497 if (ip->ip_p == IPPROTO_UDP) {
3498 struct udphdr *udp;
3499 size_t hlen;
3500 #ifdef _IP_VHL
3501 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
3502 #else
3503 hlen = ip->ip_hl << 2;
3504 #endif
3505 if (state->m->m_len < hlen + sizeof(struct udphdr)) {
3506 state->m = m_pullup(state->m, hlen + sizeof(struct udphdr));
3507 if (!state->m) {
3508 ipseclog((LOG_DEBUG, "IPv4 output: can't pullup UDP header\n"));
3509 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
3510 goto bad;
3511 }
3512 ip = mtod(state->m, struct ip *);
3513 }
3514 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + hlen);
3515 sin->sin_port = udp->uh_dport;
3516 }
3517 }
3518
3519 bcopy(&ip->ip_dst, &sin->sin_addr,
3520 sizeof(sin->sin_addr));
3521 }
3522
3523 if ((error = key_checkrequest(isr, &saidx, &sav)) != 0) {
3524 /*
3525 * IPsec processing is required, but no SA found.
3526 * I assume that key_acquire() had been called
3527 * to get/establish the SA. Here I discard
3528 * this packet because it is responsibility for
3529 * upper layer to retransmit the packet.
3530 */
3531 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3532 goto bad;
3533 }
3534
3535 /* validity check */
3536 if (sav == NULL) {
3537 switch (ipsec_get_reqlevel(isr)) {
3538 case IPSEC_LEVEL_USE:
3539 continue;
3540 case IPSEC_LEVEL_REQUIRE:
3541 /* must be not reached here. */
3542 panic("ipsec4_output: no SA found, but required.");
3543 }
3544 }
3545
3546 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3547 goto bad;
3548 }
3549 }
3550
3551 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3552 if (sav) {
3553 key_freesav(sav, KEY_SADB_UNLOCKED);
3554 }
3555 return 0;
3556
3557 bad:
3558 if (sav) {
3559 key_freesav(sav, KEY_SADB_UNLOCKED);
3560 }
3561 m_freem(state->m);
3562 state->m = NULL;
3563 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error, 0, 0, 0, 0);
3564 return error;
3565 }
3566
3567 #endif
3568
3569 #if INET6
3570 /*
3571 * IPsec output logic for IPv6, transport mode.
3572 */
3573 static int
3574 ipsec6_output_trans_internal(
3575 struct ipsec_output_state *state,
3576 struct secasvar *sav,
3577 u_char *nexthdrp,
3578 struct mbuf *mprev)
3579 {
3580 struct ip6_hdr *ip6;
3581 int error = 0;
3582 int plen;
3583
3584 /* validity check */
3585 if (sav == NULL || sav->sah == NULL) {
3586 error = EINVAL;
3587 goto bad;
3588 }
3589
3590 /*
3591 * If there is no valid SA, we give up to process.
3592 * see same place at ipsec4_output().
3593 */
3594 if (sav->state != SADB_SASTATE_MATURE
3595 && sav->state != SADB_SASTATE_DYING) {
3596 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3597 error = EINVAL;
3598 goto bad;
3599 }
3600
3601 state->outgoing_if = sav->sah->outgoing_if;
3602
3603 switch (sav->sah->saidx.proto) {
3604 case IPPROTO_ESP:
3605 #if IPSEC_ESP
3606 error = esp6_output(state->m, nexthdrp, mprev->m_next, sav);
3607 #else
3608 m_freem(state->m);
3609 error = EINVAL;
3610 #endif
3611 break;
3612 case IPPROTO_AH:
3613 error = ah6_output(state->m, nexthdrp, mprev->m_next, sav);
3614 break;
3615 default:
3616 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3617 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3618 m_freem(state->m);
3619 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3620 error = EPROTONOSUPPORT;
3621 break;
3622 }
3623 if (error) {
3624 state->m = NULL;
3625 goto bad;
3626 }
3627 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
3628 if (plen > IPV6_MAXPACKET) {
3629 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3630 "IPsec with IPv6 jumbogram is not supported\n"));
3631 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3632 error = EINVAL; /*XXX*/
3633 goto bad;
3634 }
3635 ip6 = mtod(state->m, struct ip6_hdr *);
3636 ip6->ip6_plen = htons(plen);
3637
3638 return 0;
3639 bad:
3640 return error;
3641 }
3642
3643 int
3644 ipsec6_output_trans(
3645 struct ipsec_output_state *state,
3646 u_char *nexthdrp,
3647 struct mbuf *mprev,
3648 struct secpolicy *sp,
3649 __unused int flags,
3650 int *tun)
3651 {
3652 struct ip6_hdr *ip6;
3653 struct ipsecrequest *isr = NULL;
3654 struct secasindex saidx;
3655 int error = 0;
3656 struct sockaddr_in6 *sin6;
3657 struct secasvar *sav = NULL;
3658
3659 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3660
3661 if (!state) {
3662 panic("state == NULL in ipsec6_output_trans");
3663 }
3664 if (!state->m) {
3665 panic("state->m == NULL in ipsec6_output_trans");
3666 }
3667 if (!nexthdrp) {
3668 panic("nexthdrp == NULL in ipsec6_output_trans");
3669 }
3670 if (!mprev) {
3671 panic("mprev == NULL in ipsec6_output_trans");
3672 }
3673 if (!sp) {
3674 panic("sp == NULL in ipsec6_output_trans");
3675 }
3676 if (!tun) {
3677 panic("tun == NULL in ipsec6_output_trans");
3678 }
3679
3680 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3681 printf("ipsec6_output_trans: applyed SP\n");
3682 kdebug_secpolicy(sp));
3683
3684 *tun = 0;
3685 for (isr = sp->req; isr; isr = isr->next) {
3686 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
3687 /* the rest will be handled by ipsec6_output_tunnel() */
3688 break;
3689 }
3690
3691 /* make SA index for search proper SA */
3692 ip6 = mtod(state->m, struct ip6_hdr *);
3693 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3694 saidx.mode = isr->saidx.mode;
3695 saidx.reqid = isr->saidx.reqid;
3696 sin6 = (struct sockaddr_in6 *)&saidx.src;
3697 if (sin6->sin6_len == 0) {
3698 sin6->sin6_len = sizeof(*sin6);
3699 sin6->sin6_family = AF_INET6;
3700 sin6->sin6_port = IPSEC_PORT_ANY;
3701 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
3702 sizeof(ip6->ip6_src));
3703 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
3704 /* fix scope id for comparing SPD */
3705 sin6->sin6_addr.s6_addr16[1] = 0;
3706 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
3707 }
3708 }
3709 sin6 = (struct sockaddr_in6 *)&saidx.dst;
3710 if (sin6->sin6_len == 0) {
3711 sin6->sin6_len = sizeof(*sin6);
3712 sin6->sin6_family = AF_INET6;
3713 sin6->sin6_port = IPSEC_PORT_ANY;
3714 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
3715 sizeof(ip6->ip6_dst));
3716 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
3717 /* fix scope id for comparing SPD */
3718 sin6->sin6_addr.s6_addr16[1] = 0;
3719 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
3720 }
3721 }
3722
3723 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
3724 /*
3725 * IPsec processing is required, but no SA found.
3726 * I assume that key_acquire() had been called
3727 * to get/establish the SA. Here I discard
3728 * this packet because it is responsibility for
3729 * upper layer to retransmit the packet.
3730 */
3731 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3732 error = ENOENT;
3733
3734 /*
3735 * Notify the fact that the packet is discarded
3736 * to ourselves. I believe this is better than
3737 * just silently discarding. (jinmei@kame.net)
3738 * XXX: should we restrict the error to TCP packets?
3739 * XXX: should we directly notify sockets via
3740 * pfctlinputs?
3741 */
3742 icmp6_error(state->m, ICMP6_DST_UNREACH,
3743 ICMP6_DST_UNREACH_ADMIN, 0);
3744 state->m = NULL; /* icmp6_error freed the mbuf */
3745 goto bad;
3746 }
3747
3748 /* validity check */
3749 if (sav == NULL) {
3750 switch (ipsec_get_reqlevel(isr)) {
3751 case IPSEC_LEVEL_USE:
3752 continue;
3753 case IPSEC_LEVEL_REQUIRE:
3754 /* must be not reached here. */
3755 panic("ipsec6_output_trans: no SA found, but required.");
3756 }
3757 }
3758
3759 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
3760 goto bad;
3761 }
3762 }
3763
3764 /* if we have more to go, we need a tunnel mode processing */
3765 if (isr != NULL) {
3766 *tun = 1;
3767 }
3768
3769 if (sav) {
3770 key_freesav(sav, KEY_SADB_UNLOCKED);
3771 }
3772 return 0;
3773
3774 bad:
3775 if (sav) {
3776 key_freesav(sav, KEY_SADB_UNLOCKED);
3777 }
3778 m_freem(state->m);
3779 state->m = NULL;
3780 return error;
3781 }
3782
3783 /*
3784 * IPsec output logic for IPv6, tunnel mode.
3785 */
3786 static int
3787 ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar *sav, int *must_be_last)
3788 {
3789 struct ip6_hdr *ip6;
3790 int error = 0;
3791 int plen;
3792 struct sockaddr_in6* dst6;
3793 struct route_in6 *ro6;
3794
3795 /* validity check */
3796 if (sav == NULL || sav->sah == NULL || sav->sah->saidx.mode != IPSEC_MODE_TUNNEL) {
3797 error = EINVAL;
3798 goto bad;
3799 }
3800
3801 /*
3802 * If there is no valid SA, we give up to process.
3803 * see same place at ipsec4_output().
3804 */
3805 if (sav->state != SADB_SASTATE_MATURE
3806 && sav->state != SADB_SASTATE_DYING) {
3807 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3808 error = EINVAL;
3809 goto bad;
3810 }
3811
3812 state->outgoing_if = sav->sah->outgoing_if;
3813
3814 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3815 /*
3816 * build IPsec tunnel.
3817 */
3818 state->m = ipsec6_splithdr(state->m);
3819 if (!state->m) {
3820 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
3821 error = ENOMEM;
3822 goto bad;
3823 }
3824
3825 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3826 error = ipsec6_encapsulate(state->m, sav);
3827 if (error) {
3828 state->m = 0;
3829 goto bad;
3830 }
3831 ip6 = mtod(state->m, struct ip6_hdr *);
3832 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3833 struct ip *ip;
3834 struct sockaddr_in* dst4;
3835 struct route *ro4 = NULL;
3836 struct route ro4_copy;
3837 struct ip_out_args ipoa;
3838
3839 bzero(&ipoa, sizeof(ipoa));
3840 ipoa.ipoa_boundif = IFSCOPE_NONE;
3841 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
3842 ipoa.ipoa_sotc = SO_TC_UNSPEC;
3843 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
3844
3845 if (must_be_last) {
3846 *must_be_last = 1;
3847 }
3848
3849 state->tunneled = 4; /* must not process any further in ip6_output */
3850 error = ipsec64_encapsulate(state->m, sav);
3851 if (error) {
3852 state->m = 0;
3853 goto bad;
3854 }
3855 /* Now we have an IPv4 packet */
3856 ip = mtod(state->m, struct ip *);
3857
3858 // grab sadb_mutex, to update sah's route cache and get a local copy of it
3859 lck_mtx_lock(sadb_mutex);
3860 ro4 = (struct route *)&sav->sah->sa_route;
3861 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3862 if (ro4->ro_rt) {
3863 RT_LOCK(ro4->ro_rt);
3864 }
3865 if (ROUTE_UNUSABLE(ro4) ||
3866 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3867 if (ro4->ro_rt != NULL) {
3868 RT_UNLOCK(ro4->ro_rt);
3869 }
3870 ROUTE_RELEASE(ro4);
3871 }
3872 if (ro4->ro_rt == NULL) {
3873 dst4->sin_family = AF_INET;
3874 dst4->sin_len = sizeof(*dst4);
3875 dst4->sin_addr = ip->ip_dst;
3876 } else {
3877 RT_UNLOCK(ro4->ro_rt);
3878 }
3879 route_copyout(&ro4_copy, ro4, sizeof(struct route));
3880 // release sadb_mutex, after updating sah's route cache and getting a local copy
3881 lck_mtx_unlock(sadb_mutex);
3882 state->m = ipsec4_splithdr(state->m);
3883 if (!state->m) {
3884 error = ENOMEM;
3885 ROUTE_RELEASE(&ro4_copy);
3886 goto bad;
3887 }
3888 switch (sav->sah->saidx.proto) {
3889 case IPPROTO_ESP:
3890 #if IPSEC_ESP
3891 if ((error = esp4_output(state->m, sav)) != 0) {
3892 state->m = NULL;
3893 ROUTE_RELEASE(&ro4_copy);
3894 goto bad;
3895 }
3896 break;
3897
3898 #else
3899 m_freem(state->m);
3900 state->m = NULL;
3901 error = EINVAL;
3902 ROUTE_RELEASE(&ro4_copy);
3903 goto bad;
3904 #endif
3905 case IPPROTO_AH:
3906 if ((error = ah4_output(state->m, sav)) != 0) {
3907 state->m = NULL;
3908 ROUTE_RELEASE(&ro4_copy);
3909 goto bad;
3910 }
3911 break;
3912 default:
3913 ipseclog((LOG_ERR,
3914 "ipsec4_output: unknown ipsec protocol %d\n",
3915 sav->sah->saidx.proto));
3916 m_freem(state->m);
3917 state->m = NULL;
3918 error = EPROTONOSUPPORT;
3919 ROUTE_RELEASE(&ro4_copy);
3920 goto bad;
3921 }
3922
3923 if (state->m == 0) {
3924 error = ENOMEM;
3925 ROUTE_RELEASE(&ro4_copy);
3926 goto bad;
3927 }
3928 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET);
3929 ipsec_set_ipoa_for_interface(sav->sah->ipsec_if, &ipoa);
3930
3931 ip = mtod(state->m, struct ip *);
3932 ip->ip_len = ntohs(ip->ip_len); /* flip len field before calling ip_output */
3933 error = ip_output(state->m, NULL, &ro4_copy, IP_OUTARGS, NULL, &ipoa);
3934 state->m = NULL;
3935 // grab sadb_mutex, to synchronize the sah's route cache with the local copy
3936 lck_mtx_lock(sadb_mutex);
3937 route_copyin(&ro4_copy, ro4, sizeof(struct route));
3938 lck_mtx_unlock(sadb_mutex);
3939 if (error != 0) {
3940 goto bad;
3941 }
3942 goto done;
3943 } else {
3944 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3945 "unsupported inner family, spi=%u\n",
3946 (u_int32_t)ntohl(sav->spi)));
3947 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3948 error = EAFNOSUPPORT;
3949 goto bad;
3950 }
3951
3952 // grab sadb_mutex, before updating sah's route cache
3953 lck_mtx_lock(sadb_mutex);
3954 ro6 = &sav->sah->sa_route;
3955 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
3956 if (ro6->ro_rt) {
3957 RT_LOCK(ro6->ro_rt);
3958 }
3959 if (ROUTE_UNUSABLE(ro6) ||
3960 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
3961 if (ro6->ro_rt != NULL) {
3962 RT_UNLOCK(ro6->ro_rt);
3963 }
3964 ROUTE_RELEASE(ro6);
3965 }
3966 if (ro6->ro_rt == 0) {
3967 bzero(dst6, sizeof(*dst6));
3968 dst6->sin6_family = AF_INET6;
3969 dst6->sin6_len = sizeof(*dst6);
3970 dst6->sin6_addr = ip6->ip6_dst;
3971 rtalloc_scoped((struct route *)ro6, sav->sah->outgoing_if);
3972 if (ro6->ro_rt) {
3973 RT_LOCK(ro6->ro_rt);
3974 }
3975 }
3976 if (ro6->ro_rt == 0) {
3977 ip6stat.ip6s_noroute++;
3978 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
3979 error = EHOSTUNREACH;
3980 // release sadb_mutex, after updating sah's route cache
3981 lck_mtx_unlock(sadb_mutex);
3982 goto bad;
3983 }
3984
3985 /*
3986 * adjust state->dst if tunnel endpoint is offlink
3987 *
3988 * XXX: caching rt_gateway value in the state is
3989 * not really good, since it may point elsewhere
3990 * when the gateway gets modified to a larger
3991 * sockaddr via rt_setgate(). This is currently
3992 * addressed by SA_SIZE roundup in that routine.
3993 */
3994 if (ro6->ro_rt->rt_flags & RTF_GATEWAY) {
3995 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
3996 }
3997 RT_UNLOCK(ro6->ro_rt);
3998 ROUTE_RELEASE(&state->ro);
3999 route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6));
4000 state->dst = (struct sockaddr *)dst6;
4001 state->tunneled = 6;
4002 // release sadb_mutex, after updating sah's route cache
4003 lck_mtx_unlock(sadb_mutex);
4004 }
4005
4006 state->m = ipsec6_splithdr(state->m);
4007 if (!state->m) {
4008 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
4009 error = ENOMEM;
4010 goto bad;
4011 }
4012 ip6 = mtod(state->m, struct ip6_hdr *);
4013 switch (sav->sah->saidx.proto) {
4014 case IPPROTO_ESP:
4015 #if IPSEC_ESP
4016 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
4017 #else
4018 m_freem(state->m);
4019 error = EINVAL;
4020 #endif
4021 break;
4022 case IPPROTO_AH:
4023 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
4024 break;
4025 default:
4026 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4027 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
4028 m_freem(state->m);
4029 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
4030 error = EINVAL;
4031 break;
4032 }
4033 if (error) {
4034 state->m = NULL;
4035 goto bad;
4036 }
4037 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
4038 if (plen > IPV6_MAXPACKET) {
4039 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4040 "IPsec with IPv6 jumbogram is not supported\n"));
4041 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
4042 error = EINVAL; /*XXX*/
4043 goto bad;
4044 }
4045 ip6 = mtod(state->m, struct ip6_hdr *);
4046 ip6->ip6_plen = htons(plen);
4047 done:
4048 return 0;
4049
4050 bad:
4051 return error;
4052 }
4053
4054 int
4055 ipsec6_output_tunnel(
4056 struct ipsec_output_state *state,
4057 struct secpolicy *sp,
4058 __unused int flags)
4059 {
4060 struct ip6_hdr *ip6;
4061 struct ipsecrequest *isr = NULL;
4062 struct secasindex saidx;
4063 struct secasvar *sav = NULL;
4064 int error = 0;
4065
4066 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4067
4068 if (!state) {
4069 panic("state == NULL in ipsec6_output_tunnel");
4070 }
4071 if (!state->m) {
4072 panic("state->m == NULL in ipsec6_output_tunnel");
4073 }
4074 if (!sp) {
4075 panic("sp == NULL in ipsec6_output_tunnel");
4076 }
4077
4078 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
4079 printf("ipsec6_output_tunnel: applyed SP\n");
4080 kdebug_secpolicy(sp));
4081
4082 /*
4083 * transport mode ipsec (before the 1st tunnel mode) is already
4084 * processed by ipsec6_output_trans().
4085 */
4086 for (isr = sp->req; isr; isr = isr->next) {
4087 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
4088 break;
4089 }
4090 }
4091
4092 for (/* already initialized */; isr; isr = isr->next) {
4093 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
4094 /* When tunnel mode, SA peers must be specified. */
4095 bcopy(&isr->saidx, &saidx, sizeof(saidx));
4096 } else {
4097 /* make SA index to look for a proper SA */
4098 struct sockaddr_in6 *sin6;
4099
4100 bzero(&saidx, sizeof(saidx));
4101 saidx.proto = isr->saidx.proto;
4102 saidx.mode = isr->saidx.mode;
4103 saidx.reqid = isr->saidx.reqid;
4104
4105 ip6 = mtod(state->m, struct ip6_hdr *);
4106 sin6 = (struct sockaddr_in6 *)&saidx.src;
4107 if (sin6->sin6_len == 0) {
4108 sin6->sin6_len = sizeof(*sin6);
4109 sin6->sin6_family = AF_INET6;
4110 sin6->sin6_port = IPSEC_PORT_ANY;
4111 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
4112 sizeof(ip6->ip6_src));
4113 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
4114 /* fix scope id for comparing SPD */
4115 sin6->sin6_addr.s6_addr16[1] = 0;
4116 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
4117 }
4118 }
4119 sin6 = (struct sockaddr_in6 *)&saidx.dst;
4120 if (sin6->sin6_len == 0) {
4121 sin6->sin6_len = sizeof(*sin6);
4122 sin6->sin6_family = AF_INET6;
4123 sin6->sin6_port = IPSEC_PORT_ANY;
4124 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
4125 sizeof(ip6->ip6_dst));
4126 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
4127 /* fix scope id for comparing SPD */
4128 sin6->sin6_addr.s6_addr16[1] = 0;
4129 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
4130 }
4131 }
4132 }
4133
4134 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
4135 /*
4136 * IPsec processing is required, but no SA found.
4137 * I assume that key_acquire() had been called
4138 * to get/establish the SA. Here I discard
4139 * this packet because it is responsibility for
4140 * upper layer to retransmit the packet.
4141 */
4142 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4143 error = ENOENT;
4144 goto bad;
4145 }
4146
4147 /* validity check */
4148 if (sav == NULL) {
4149 switch (ipsec_get_reqlevel(isr)) {
4150 case IPSEC_LEVEL_USE:
4151 continue;
4152 case IPSEC_LEVEL_REQUIRE:
4153 /* must be not reached here. */
4154 panic("ipsec6_output_tunnel: no SA found, but required.");
4155 }
4156 }
4157
4158 /*
4159 * If there is no valid SA, we give up to process.
4160 * see same place at ipsec4_output().
4161 */
4162 if (sav->state != SADB_SASTATE_MATURE
4163 && sav->state != SADB_SASTATE_DYING) {
4164 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4165 error = EINVAL;
4166 goto bad;
4167 }
4168
4169 int must_be_last = 0;
4170
4171 if ((error = ipsec6_output_tunnel_internal(state, sav, &must_be_last)) != 0) {
4172 goto bad;
4173 }
4174
4175 if (must_be_last && isr->next) {
4176 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4177 "IPv4 must be outer layer, spi=%u\n",
4178 (u_int32_t)ntohl(sav->spi)));
4179 error = EINVAL;
4180 goto bad;
4181 }
4182 }
4183
4184 if (sav) {
4185 key_freesav(sav, KEY_SADB_UNLOCKED);
4186 }
4187 return 0;
4188
4189 bad:
4190 if (sav) {
4191 key_freesav(sav, KEY_SADB_UNLOCKED);
4192 }
4193 if (state->m) {
4194 m_freem(state->m);
4195 }
4196 state->m = NULL;
4197 return error;
4198 }
4199
4200 int
4201 ipsec6_interface_output(struct ipsec_output_state *state, ifnet_t interface, u_char *nexthdrp, struct mbuf *mprev)
4202 {
4203 int error = 0;
4204 struct secasvar *sav = NULL;
4205
4206 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4207
4208 if (state == NULL) {
4209 panic("state == NULL in ipsec6_output");
4210 }
4211 if (state->m == NULL) {
4212 panic("state->m == NULL in ipsec6_output");
4213 }
4214 if (nexthdrp == NULL) {
4215 panic("nexthdrp == NULL in ipsec6_output");
4216 }
4217 if (mprev == NULL) {
4218 panic("mprev == NULL in ipsec6_output");
4219 }
4220
4221 struct ip6_hdr *ip6 = mtod(state->m, struct ip6_hdr *);
4222
4223 struct sockaddr_in6 src = {};
4224 src.sin6_family = AF_INET6;
4225 src.sin6_len = sizeof(src);
4226 memcpy(&src.sin6_addr, &ip6->ip6_src, sizeof(src.sin6_addr));
4227
4228 struct sockaddr_in6 dst = {};
4229 dst.sin6_family = AF_INET6;
4230 dst.sin6_len = sizeof(dst);
4231 memcpy(&dst.sin6_addr, &ip6->ip6_dst, sizeof(dst.sin6_addr));
4232
4233 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET6,
4234 (struct sockaddr *)&src,
4235 (struct sockaddr *)&dst);
4236 if (sav == NULL) {
4237 goto bad;
4238 }
4239
4240 if (sav->sah && sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
4241 if ((error = ipsec6_output_tunnel_internal(state, sav, NULL)) != 0) {
4242 goto bad;
4243 }
4244 } else {
4245 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
4246 goto bad;
4247 }
4248 }
4249
4250 if (sav) {
4251 key_freesav(sav, KEY_SADB_UNLOCKED);
4252 }
4253 return 0;
4254
4255 bad:
4256 if (sav) {
4257 key_freesav(sav, KEY_SADB_UNLOCKED);
4258 }
4259 m_freem(state->m);
4260 state->m = NULL;
4261 return error;
4262 }
4263 #endif /*INET6*/
4264
4265 #if INET
4266 /*
4267 * Chop IP header and option off from the payload.
4268 */
4269 struct mbuf *
4270 ipsec4_splithdr(struct mbuf *m)
4271 {
4272 struct mbuf *mh;
4273 struct ip *ip;
4274 int hlen;
4275
4276 if (m->m_len < sizeof(struct ip)) {
4277 panic("ipsec4_splithdr: first mbuf too short, m_len %d, pkt_len %d, m_flag %x", m->m_len, m->m_pkthdr.len, m->m_flags);
4278 }
4279 ip = mtod(m, struct ip *);
4280 #ifdef _IP_VHL
4281 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
4282 #else
4283 hlen = ip->ip_hl << 2;
4284 #endif
4285 if (m->m_len > hlen) {
4286 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4287 if (!mh) {
4288 m_freem(m);
4289 return NULL;
4290 }
4291 M_COPY_PKTHDR(mh, m);
4292 MH_ALIGN(mh, hlen);
4293 m->m_flags &= ~M_PKTHDR;
4294 m_mchtype(m, MT_DATA);
4295 m->m_len -= hlen;
4296 m->m_data += hlen;
4297 mh->m_next = m;
4298 m = mh;
4299 m->m_len = hlen;
4300 bcopy((caddr_t)ip, mtod(m, caddr_t), hlen);
4301 } else if (m->m_len < hlen) {
4302 m = m_pullup(m, hlen);
4303 if (!m) {
4304 return NULL;
4305 }
4306 }
4307 return m;
4308 }
4309 #endif
4310
4311 #if INET6
4312 struct mbuf *
4313 ipsec6_splithdr(struct mbuf *m)
4314 {
4315 struct mbuf *mh;
4316 struct ip6_hdr *ip6;
4317 int hlen;
4318
4319 if (m->m_len < sizeof(struct ip6_hdr)) {
4320 panic("ipsec6_splithdr: first mbuf too short");
4321 }
4322 ip6 = mtod(m, struct ip6_hdr *);
4323 hlen = sizeof(struct ip6_hdr);
4324 if (m->m_len > hlen) {
4325 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4326 if (!mh) {
4327 m_freem(m);
4328 return NULL;
4329 }
4330 M_COPY_PKTHDR(mh, m);
4331 MH_ALIGN(mh, hlen);
4332 m->m_flags &= ~M_PKTHDR;
4333 m_mchtype(m, MT_DATA);
4334 m->m_len -= hlen;
4335 m->m_data += hlen;
4336 mh->m_next = m;
4337 m = mh;
4338 m->m_len = hlen;
4339 bcopy((caddr_t)ip6, mtod(m, caddr_t), hlen);
4340 } else if (m->m_len < hlen) {
4341 m = m_pullup(m, hlen);
4342 if (!m) {
4343 return NULL;
4344 }
4345 }
4346 return m;
4347 }
4348 #endif
4349
4350 /* validate inbound IPsec tunnel packet. */
4351 int
4352 ipsec4_tunnel_validate(
4353 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4354 int off,
4355 u_int nxt0,
4356 struct secasvar *sav,
4357 sa_family_t *ifamily)
4358 {
4359 u_int8_t nxt = nxt0 & 0xff;
4360 struct sockaddr_in *sin;
4361 struct sockaddr_in osrc, odst, i4src, i4dst;
4362 struct sockaddr_in6 i6src, i6dst;
4363 int hlen;
4364 struct secpolicy *sp;
4365 struct ip *oip;
4366
4367 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4368
4369 #if DIAGNOSTIC
4370 if (m->m_len < sizeof(struct ip)) {
4371 panic("too short mbuf on ipsec4_tunnel_validate");
4372 }
4373 #endif
4374 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6) {
4375 return 0;
4376 }
4377 if (m->m_pkthdr.len < off + sizeof(struct ip)) {
4378 return 0;
4379 }
4380 /* do not decapsulate if the SA is for transport mode only */
4381 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) {
4382 return 0;
4383 }
4384
4385 oip = mtod(m, struct ip *);
4386 #ifdef _IP_VHL
4387 hlen = _IP_VHL_HL(oip->ip_vhl) << 2;
4388 #else
4389 hlen = oip->ip_hl << 2;
4390 #endif
4391 if (hlen != sizeof(struct ip)) {
4392 return 0;
4393 }
4394
4395 sin = (struct sockaddr_in *)&sav->sah->saidx.dst;
4396 if (sin->sin_family != AF_INET) {
4397 return 0;
4398 }
4399 if (bcmp(&oip->ip_dst, &sin->sin_addr, sizeof(oip->ip_dst)) != 0) {
4400 return 0;
4401 }
4402
4403 if (sav->sah->ipsec_if != NULL) {
4404 // the ipsec interface SAs don't have a policies.
4405 if (nxt == IPPROTO_IPV4) {
4406 *ifamily = AF_INET;
4407 } else if (nxt == IPPROTO_IPV6) {
4408 *ifamily = AF_INET6;
4409 } else {
4410 return 0;
4411 }
4412 return 1;
4413 }
4414
4415 /* XXX slow */
4416 bzero(&osrc, sizeof(osrc));
4417 bzero(&odst, sizeof(odst));
4418 osrc.sin_family = odst.sin_family = AF_INET;
4419 osrc.sin_len = odst.sin_len = sizeof(struct sockaddr_in);
4420 osrc.sin_addr = oip->ip_src;
4421 odst.sin_addr = oip->ip_dst;
4422 /*
4423 * RFC2401 5.2.1 (b): (assume that we are using tunnel mode)
4424 * - if the inner destination is multicast address, there can be
4425 * multiple permissible inner source address. implementation
4426 * may want to skip verification of inner source address against
4427 * SPD selector.
4428 * - if the inner protocol is ICMP, the packet may be an error report
4429 * from routers on the other side of the VPN cloud (R in the
4430 * following diagram). in this case, we cannot verify inner source
4431 * address against SPD selector.
4432 * me -- gw === gw -- R -- you
4433 *
4434 * we consider the first bullet to be users responsibility on SPD entry
4435 * configuration (if you need to encrypt multicast traffic, set
4436 * the source range of SPD selector to 0.0.0.0/0, or have explicit
4437 * address ranges for possible senders).
4438 * the second bullet is not taken care of (yet).
4439 *
4440 * therefore, we do not do anything special about inner source.
4441 */
4442 if (nxt == IPPROTO_IPV4) {
4443 bzero(&i4src, sizeof(struct sockaddr_in));
4444 bzero(&i4dst, sizeof(struct sockaddr_in));
4445 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4446 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4447 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4448 (caddr_t)&i4src.sin_addr);
4449 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4450 (caddr_t)&i4dst.sin_addr);
4451 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4452 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4453 } else if (nxt == IPPROTO_IPV6) {
4454 bzero(&i6src, sizeof(struct sockaddr_in6));
4455 bzero(&i6dst, sizeof(struct sockaddr_in6));
4456 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4457 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4458 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4459 (caddr_t)&i6src.sin6_addr);
4460 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4461 (caddr_t)&i6dst.sin6_addr);
4462 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4463 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4464 } else {
4465 return 0; /* unsupported family */
4466 }
4467 if (!sp) {
4468 return 0;
4469 }
4470
4471 key_freesp(sp, KEY_SADB_UNLOCKED);
4472
4473 return 1;
4474 }
4475
4476 #if INET6
4477 /* validate inbound IPsec tunnel packet. */
4478 int
4479 ipsec6_tunnel_validate(
4480 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4481 int off,
4482 u_int nxt0,
4483 struct secasvar *sav,
4484 sa_family_t *ifamily)
4485 {
4486 u_int8_t nxt = nxt0 & 0xff;
4487 struct sockaddr_in6 *sin6;
4488 struct sockaddr_in i4src, i4dst;
4489 struct sockaddr_in6 osrc, odst, i6src, i6dst;
4490 struct secpolicy *sp;
4491 struct ip6_hdr *oip6;
4492
4493 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4494
4495 #if DIAGNOSTIC
4496 if (m->m_len < sizeof(struct ip6_hdr)) {
4497 panic("too short mbuf on ipsec6_tunnel_validate");
4498 }
4499 #endif
4500 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6) {
4501 return 0;
4502 }
4503
4504 if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr)) {
4505 return 0;
4506 }
4507 /* do not decapsulate if the SA is for transport mode only */
4508 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) {
4509 return 0;
4510 }
4511
4512 oip6 = mtod(m, struct ip6_hdr *);
4513 /* AF_INET should be supported, but at this moment we don't. */
4514 sin6 = (struct sockaddr_in6 *)&sav->sah->saidx.dst;
4515 if (sin6->sin6_family != AF_INET6) {
4516 return 0;
4517 }
4518 if (!IN6_ARE_ADDR_EQUAL(&oip6->ip6_dst, &sin6->sin6_addr)) {
4519 return 0;
4520 }
4521
4522 if (sav->sah->ipsec_if != NULL) {
4523 // the ipsec interface SAs don't have a policies.
4524 if (nxt == IPPROTO_IPV4) {
4525 *ifamily = AF_INET;
4526 } else if (nxt == IPPROTO_IPV6) {
4527 *ifamily = AF_INET6;
4528 } else {
4529 return 0;
4530 }
4531 return 1;
4532 }
4533
4534 /* XXX slow */
4535 bzero(&osrc, sizeof(osrc));
4536 bzero(&odst, sizeof(odst));
4537 osrc.sin6_family = odst.sin6_family = AF_INET6;
4538 osrc.sin6_len = odst.sin6_len = sizeof(struct sockaddr_in6);
4539 osrc.sin6_addr = oip6->ip6_src;
4540 odst.sin6_addr = oip6->ip6_dst;
4541
4542 /*
4543 * regarding to inner source address validation, see a long comment
4544 * in ipsec4_tunnel_validate.
4545 */
4546
4547 if (nxt == IPPROTO_IPV4) {
4548 bzero(&i4src, sizeof(struct sockaddr_in));
4549 bzero(&i4dst, sizeof(struct sockaddr_in));
4550 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4551 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4552 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4553 (caddr_t)&i4src.sin_addr);
4554 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4555 (caddr_t)&i4dst.sin_addr);
4556 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4557 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4558 } else if (nxt == IPPROTO_IPV6) {
4559 bzero(&i6src, sizeof(struct sockaddr_in6));
4560 bzero(&i6dst, sizeof(struct sockaddr_in6));
4561 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4562 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4563 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4564 (caddr_t)&i6src.sin6_addr);
4565 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4566 (caddr_t)&i6dst.sin6_addr);
4567 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4568 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4569 } else {
4570 return 0; /* unsupported family */
4571 }
4572 /*
4573 * when there is no suitable inbound policy for the packet of the ipsec
4574 * tunnel mode, the kernel never decapsulate the tunneled packet
4575 * as the ipsec tunnel mode even when the system wide policy is "none".
4576 * then the kernel leaves the generic tunnel module to process this
4577 * packet. if there is no rule of the generic tunnel, the packet
4578 * is rejected and the statistics will be counted up.
4579 */
4580 if (!sp) {
4581 return 0;
4582 }
4583 key_freesp(sp, KEY_SADB_UNLOCKED);
4584
4585 return 1;
4586 }
4587 #endif
4588
4589 /*
4590 * Make a mbuf chain for encryption.
4591 * If the original mbuf chain contains a mbuf with a cluster,
4592 * allocate a new cluster and copy the data to the new cluster.
4593 * XXX: this hack is inefficient, but is necessary to handle cases
4594 * of TCP retransmission...
4595 */
4596 struct mbuf *
4597 ipsec_copypkt(struct mbuf *m)
4598 {
4599 struct mbuf *n, **mpp, *mnew;
4600
4601 for (n = m, mpp = &m; n; n = n->m_next) {
4602 if (n->m_flags & M_EXT) {
4603 /*
4604 * Make a copy only if there are more than one references
4605 * to the cluster.
4606 * XXX: is this approach effective?
4607 */
4608 if (
4609 m_get_ext_free(n) != NULL ||
4610 m_mclhasreference(n)
4611 ) {
4612 int remain, copied;
4613 struct mbuf *mm;
4614
4615 if (n->m_flags & M_PKTHDR) {
4616 MGETHDR(mnew, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4617 if (mnew == NULL) {
4618 goto fail;
4619 }
4620 M_COPY_PKTHDR(mnew, n);
4621 } else {
4622 MGET(mnew, M_DONTWAIT, MT_DATA);
4623 if (mnew == NULL) {
4624 goto fail;
4625 }
4626 }
4627 mnew->m_len = 0;
4628 mm = mnew;
4629
4630 /*
4631 * Copy data. If we don't have enough space to
4632 * store the whole data, allocate a cluster
4633 * or additional mbufs.
4634 * XXX: we don't use m_copyback(), since the
4635 * function does not use clusters and thus is
4636 * inefficient.
4637 */
4638 remain = n->m_len;
4639 copied = 0;
4640 while (1) {
4641 int len;
4642 struct mbuf *mn;
4643
4644 if (remain <= (mm->m_flags & M_PKTHDR ? MHLEN : MLEN)) {
4645 len = remain;
4646 } else { /* allocate a cluster */
4647 MCLGET(mm, M_DONTWAIT);
4648 if (!(mm->m_flags & M_EXT)) {
4649 m_free(mm);
4650 goto fail;
4651 }
4652 len = remain < MCLBYTES ?
4653 remain : MCLBYTES;
4654 }
4655
4656 bcopy(n->m_data + copied, mm->m_data,
4657 len);
4658
4659 copied += len;
4660 remain -= len;
4661 mm->m_len = len;
4662
4663 if (remain <= 0) { /* completed? */
4664 break;
4665 }
4666
4667 /* need another mbuf */
4668 MGETHDR(mn, M_DONTWAIT, MT_HEADER); /* XXXMAC: tags copied next time in loop? */
4669 if (mn == NULL) {
4670 goto fail;
4671 }
4672 mn->m_pkthdr.rcvif = NULL;
4673 mm->m_next = mn;
4674 mm = mn;
4675 }
4676
4677 /* adjust chain */
4678 mm->m_next = m_free(n);
4679 n = mm;
4680 *mpp = mnew;
4681 mpp = &n->m_next;
4682
4683 continue;
4684 }
4685 }
4686 *mpp = n;
4687 mpp = &n->m_next;
4688 }
4689
4690 return m;
4691 fail:
4692 m_freem(m);
4693 return NULL;
4694 }
4695
4696 /*
4697 * Tags are allocated as mbufs for now, since our minimum size is MLEN, we
4698 * should make use of up to that much space.
4699 */
4700 #define IPSEC_TAG_HEADER \
4701
4702 struct ipsec_tag {
4703 struct socket *socket;
4704 u_int32_t history_count;
4705 struct ipsec_history history[];
4706 #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
4707 /* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers
4708 * are 32-bit:
4709 * Aligning to 64-bit since we case to m_tag which is 64-bit aligned.
4710 */
4711 } __attribute__ ((aligned(8)));
4712 #else
4713 };
4714 #endif
4715
4716 #define IPSEC_TAG_SIZE (MLEN - sizeof(struct m_tag))
4717 #define IPSEC_TAG_HDR_SIZE (offsetof(struct ipsec_tag, history[0]))
4718 #define IPSEC_HISTORY_MAX ((IPSEC_TAG_SIZE - IPSEC_TAG_HDR_SIZE) / \
4719 sizeof(struct ipsec_history))
4720
4721 static struct ipsec_tag *
4722 ipsec_addaux(
4723 struct mbuf *m)
4724 {
4725 struct m_tag *tag;
4726
4727 /* Check if the tag already exists */
4728 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4729
4730 if (tag == NULL) {
4731 struct ipsec_tag *itag;
4732
4733 /* Allocate a tag */
4734 tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC,
4735 IPSEC_TAG_SIZE, M_DONTWAIT, m);
4736
4737 if (tag) {
4738 itag = (struct ipsec_tag*)(tag + 1);
4739 itag->socket = 0;
4740 itag->history_count = 0;
4741
4742 m_tag_prepend(m, tag);
4743 }
4744 }
4745
4746 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4747 }
4748
4749 static struct ipsec_tag *
4750 ipsec_findaux(
4751 struct mbuf *m)
4752 {
4753 struct m_tag *tag;
4754
4755 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4756
4757 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4758 }
4759
4760 void
4761 ipsec_delaux(
4762 struct mbuf *m)
4763 {
4764 struct m_tag *tag;
4765
4766 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4767
4768 if (tag) {
4769 m_tag_delete(m, tag);
4770 }
4771 }
4772
4773 /* if the aux buffer is unnecessary, nuke it. */
4774 static void
4775 ipsec_optaux(
4776 struct mbuf *m,
4777 struct ipsec_tag *itag)
4778 {
4779 if (itag && itag->socket == NULL && itag->history_count == 0) {
4780 m_tag_delete(m, ((struct m_tag*)itag) - 1);
4781 }
4782 }
4783
4784 int
4785 ipsec_setsocket(struct mbuf *m, struct socket *so)
4786 {
4787 struct ipsec_tag *tag;
4788
4789 /* if so == NULL, don't insist on getting the aux mbuf */
4790 if (so) {
4791 tag = ipsec_addaux(m);
4792 if (!tag) {
4793 return ENOBUFS;
4794 }
4795 } else {
4796 tag = ipsec_findaux(m);
4797 }
4798 if (tag) {
4799 tag->socket = so;
4800 ipsec_optaux(m, tag);
4801 }
4802 return 0;
4803 }
4804
4805 struct socket *
4806 ipsec_getsocket(struct mbuf *m)
4807 {
4808 struct ipsec_tag *itag;
4809
4810 itag = ipsec_findaux(m);
4811 if (itag) {
4812 return itag->socket;
4813 } else {
4814 return NULL;
4815 }
4816 }
4817
4818 int
4819 ipsec_addhist(
4820 struct mbuf *m,
4821 int proto,
4822 u_int32_t spi)
4823 {
4824 struct ipsec_tag *itag;
4825 struct ipsec_history *p;
4826 itag = ipsec_addaux(m);
4827 if (!itag) {
4828 return ENOBUFS;
4829 }
4830 if (itag->history_count == IPSEC_HISTORY_MAX) {
4831 return ENOSPC; /* XXX */
4832 }
4833 p = &itag->history[itag->history_count];
4834 itag->history_count++;
4835
4836 bzero(p, sizeof(*p));
4837 p->ih_proto = proto;
4838 p->ih_spi = spi;
4839
4840 return 0;
4841 }
4842
4843 struct ipsec_history *
4844 ipsec_gethist(
4845 struct mbuf *m,
4846 int *lenp)
4847 {
4848 struct ipsec_tag *itag;
4849
4850 itag = ipsec_findaux(m);
4851 if (!itag) {
4852 return NULL;
4853 }
4854 if (itag->history_count == 0) {
4855 return NULL;
4856 }
4857 if (lenp) {
4858 *lenp = (int)(itag->history_count * sizeof(struct ipsec_history));
4859 }
4860 return itag->history;
4861 }
4862
4863 void
4864 ipsec_clearhist(
4865 struct mbuf *m)
4866 {
4867 struct ipsec_tag *itag;
4868
4869 itag = ipsec_findaux(m);
4870 if (itag) {
4871 itag->history_count = 0;
4872 }
4873 ipsec_optaux(m, itag);
4874 }
4875
4876 __private_extern__ boolean_t
4877 ipsec_send_natt_keepalive(
4878 struct secasvar *sav)
4879 {
4880 struct mbuf *m = NULL;
4881 int error = 0;
4882 int keepalive_interval = natt_keepalive_interval;
4883
4884 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4885 lck_mtx_lock(sadb_mutex);
4886
4887 if (((esp_udp_encap_port & 0xFFFF) == 0 && sav->natt_encapsulated_src_port == 0) || sav->remote_ike_port == 0) {
4888 lck_mtx_unlock(sadb_mutex);
4889 return FALSE;
4890 }
4891
4892 if (sav->natt_interval != 0) {
4893 keepalive_interval = (int)sav->natt_interval;
4894 }
4895
4896 // natt timestamp may have changed... reverify
4897 if ((natt_now - sav->natt_last_activity) < keepalive_interval) {
4898 lck_mtx_unlock(sadb_mutex);
4899 return FALSE;
4900 }
4901
4902 if (sav->flags & SADB_X_EXT_ESP_KEEPALIVE) {
4903 lck_mtx_unlock(sadb_mutex);
4904 return FALSE; // don't send these from the kernel
4905 }
4906
4907 lck_mtx_unlock(sadb_mutex);
4908
4909 m = m_gethdr(M_NOWAIT, MT_DATA);
4910 if (m == NULL) {
4911 return FALSE;
4912 }
4913
4914 lck_mtx_lock(sadb_mutex);
4915 if (sav->sah->saidx.dst.ss_family == AF_INET) {
4916 struct ip_out_args ipoa = {};
4917 struct route ro = {};
4918
4919 ipoa.ipoa_boundif = IFSCOPE_NONE;
4920 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
4921 ipoa.ipoa_sotc = SO_TC_UNSPEC;
4922 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
4923
4924 struct ip *ip = (__typeof__(ip))m_mtod(m);
4925
4926 /*
4927 * Type 2: a UDP packet complete with IP header.
4928 * We must do this because UDP output requires
4929 * an inpcb which we don't have. UDP packet
4930 * contains one byte payload. The byte is set
4931 * to 0xFF.
4932 */
4933 struct udphdr *uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip));
4934 m->m_len = sizeof(struct udpiphdr) + 1;
4935 bzero(m_mtod(m), m->m_len);
4936 m->m_pkthdr.len = m->m_len;
4937
4938 ip->ip_len = m->m_len;
4939 ip->ip_ttl = ip_defttl;
4940 ip->ip_p = IPPROTO_UDP;
4941 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4942 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4943 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4944 } else {
4945 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4946 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4947 }
4948 if (sav->natt_encapsulated_src_port != 0) {
4949 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
4950 } else {
4951 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4952 }
4953 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4954 uh->uh_dport = htons(sav->remote_ike_port);
4955 uh->uh_ulen = htons(1 + sizeof(*uh));
4956 uh->uh_sum = 0;
4957 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip) + sizeof(*uh)) = 0xFF;
4958
4959 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
4960 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET) {
4961 ROUTE_RELEASE(&sav->sah->sa_route);
4962 }
4963
4964 route_copyout(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route));
4965 lck_mtx_unlock(sadb_mutex);
4966
4967 necp_mark_packet_as_keepalive(m, TRUE);
4968 error = ip_output(m, NULL, &ro, IP_OUTARGS | IP_NOIPSEC, NULL, &ipoa);
4969
4970 lck_mtx_lock(sadb_mutex);
4971 route_copyin(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route));
4972 } else if (sav->sah->saidx.dst.ss_family == AF_INET6) {
4973 struct ip6_out_args ip6oa = {};
4974 struct route_in6 ro6 = {};
4975
4976 ip6oa.ip6oa_flowadv.code = 0;
4977 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
4978 if (sav->sah->outgoing_if) {
4979 ip6oa.ip6oa_boundif = sav->sah->outgoing_if;
4980 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
4981 }
4982
4983 struct ip6_hdr *ip6 = (__typeof__(ip6))m_mtod(m);
4984
4985 /*
4986 * Type 2: a UDP packet complete with IPv6 header.
4987 * We must do this because UDP output requires
4988 * an inpcb which we don't have. UDP packet
4989 * contains one byte payload. The byte is set
4990 * to 0xFF.
4991 */
4992 struct udphdr *uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip6));
4993 m->m_len = sizeof(struct udphdr) + sizeof(struct ip6_hdr) + 1;
4994 bzero(m_mtod(m), m->m_len);
4995 m->m_pkthdr.len = m->m_len;
4996
4997 ip6->ip6_flow = 0;
4998 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
4999 ip6->ip6_vfc |= IPV6_VERSION;
5000 ip6->ip6_nxt = IPPROTO_UDP;
5001 ip6->ip6_hlim = ip6_defhlim;
5002 ip6->ip6_plen = htons(sizeof(struct udphdr) + 1);
5003 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
5004 ip6->ip6_src = ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr;
5005 ip6->ip6_dst = ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr;
5006 } else {
5007 ip6->ip6_src = ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr;
5008 ip6->ip6_dst = ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr;
5009 }
5010
5011 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
5012 ip6->ip6_src.s6_addr16[1] = 0;
5013 }
5014 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
5015 ip6->ip6_dst.s6_addr16[1] = 0;
5016 }
5017
5018 if (sav->natt_encapsulated_src_port != 0) {
5019 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
5020 } else {
5021 uh->uh_sport = htons((u_short)esp_udp_encap_port);
5022 }
5023 uh->uh_dport = htons(sav->remote_ike_port);
5024 uh->uh_ulen = htons(1 + sizeof(*uh));
5025 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip6) + sizeof(*uh)) = 0xFF;
5026 uh->uh_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htonl(ntohs(uh->uh_ulen) + IPPROTO_UDP));
5027 m->m_pkthdr.csum_flags = (CSUM_UDPIPV6 | CSUM_ZERO_INVERT);
5028 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
5029
5030 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
5031 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET6) {
5032 ROUTE_RELEASE(&sav->sah->sa_route);
5033 }
5034
5035 route_copyout((struct route *)&ro6, (struct route *)&sav->sah->sa_route, sizeof(struct route_in6));
5036 lck_mtx_unlock(sadb_mutex);
5037
5038 necp_mark_packet_as_keepalive(m, TRUE);
5039 error = ip6_output(m, NULL, &ro6, IPV6_OUTARGS, NULL, NULL, &ip6oa);
5040
5041 lck_mtx_lock(sadb_mutex);
5042 route_copyin((struct route *)&ro6, (struct route *)&sav->sah->sa_route, sizeof(struct route_in6));
5043 } else {
5044 ipseclog((LOG_ERR, "nat keepalive: invalid address family %u\n", sav->sah->saidx.dst.ss_family));
5045 lck_mtx_unlock(sadb_mutex);
5046 m_freem(m);
5047 return FALSE;
5048 }
5049
5050 if (error == 0) {
5051 sav->natt_last_activity = natt_now;
5052 lck_mtx_unlock(sadb_mutex);
5053 return TRUE;
5054 }
5055
5056 lck_mtx_unlock(sadb_mutex);
5057 return FALSE;
5058 }
5059
5060 __private_extern__ bool
5061 ipsec_fill_offload_frame(ifnet_t ifp,
5062 struct secasvar *sav,
5063 struct ifnet_keepalive_offload_frame *frame,
5064 size_t frame_data_offset)
5065 {
5066 u_int8_t *data = NULL;
5067 struct ip *ip = NULL;
5068 struct udphdr *uh = NULL;
5069
5070 if (sav == NULL || sav->sah == NULL || frame == NULL ||
5071 (ifp != NULL && ifp->if_index != sav->sah->outgoing_if) ||
5072 sav->sah->saidx.dst.ss_family != AF_INET ||
5073 !(sav->flags & SADB_X_EXT_NATT) ||
5074 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE) ||
5075 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE_OFFLOAD) ||
5076 sav->flags & SADB_X_EXT_ESP_KEEPALIVE ||
5077 ((esp_udp_encap_port & 0xFFFF) == 0 && sav->natt_encapsulated_src_port == 0) ||
5078 sav->remote_ike_port == 0 ||
5079 (natt_keepalive_interval == 0 && sav->natt_interval == 0 && sav->natt_offload_interval == 0)) {
5080 /* SA is not eligible for keepalive offload on this interface */
5081 return FALSE;
5082 }
5083
5084 if (frame_data_offset + sizeof(struct udpiphdr) + 1 >
5085 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
5086 /* Not enough room in this data frame */
5087 return FALSE;
5088 }
5089
5090 data = frame->data;
5091 ip = (__typeof__(ip))(void *)(data + frame_data_offset);
5092 uh = (__typeof__(uh))(void *)(data + frame_data_offset + sizeof(*ip));
5093
5094 frame->length = frame_data_offset + sizeof(struct udpiphdr) + 1;
5095 frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_IPSEC;
5096 frame->ether_type = IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4;
5097
5098 bzero(data, IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE);
5099
5100 ip->ip_v = IPVERSION;
5101 ip->ip_hl = sizeof(struct ip) >> 2;
5102 ip->ip_off &= htons(~IP_OFFMASK);
5103 ip->ip_off &= htons(~IP_MF);
5104 switch (ip4_ipsec_dfbit) {
5105 case 0: /* clear DF bit */
5106 ip->ip_off &= htons(~IP_DF);
5107 break;
5108 case 1: /* set DF bit */
5109 ip->ip_off |= htons(IP_DF);
5110 break;
5111 default: /* copy DF bit */
5112 break;
5113 }
5114 ip->ip_len = htons(sizeof(struct udpiphdr) + 1);
5115 if (rfc6864 && IP_OFF_IS_ATOMIC(htons(ip->ip_off))) {
5116 ip->ip_id = 0;
5117 } else {
5118 ip->ip_id = ip_randomid();
5119 }
5120 ip->ip_ttl = ip_defttl;
5121 ip->ip_p = IPPROTO_UDP;
5122 ip->ip_sum = 0;
5123 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
5124 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5125 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5126 } else {
5127 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5128 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5129 }
5130 ip->ip_sum = in_cksum_hdr_opt(ip);
5131 /* Fill out the UDP header */
5132 if (sav->natt_encapsulated_src_port != 0) {
5133 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
5134 } else {
5135 uh->uh_sport = htons((u_short)esp_udp_encap_port);
5136 }
5137 uh->uh_dport = htons(sav->remote_ike_port);
5138 uh->uh_ulen = htons(1 + sizeof(*uh));
5139 uh->uh_sum = 0;
5140 *(u_int8_t*)(data + frame_data_offset + sizeof(*ip) + sizeof(*uh)) = 0xFF;
5141
5142 if (sav->natt_offload_interval != 0) {
5143 frame->interval = sav->natt_offload_interval;
5144 } else if (sav->natt_interval != 0) {
5145 frame->interval = sav->natt_interval;
5146 } else {
5147 frame->interval = natt_keepalive_interval;
5148 }
5149 return TRUE;
5150 }
5151
5152 static int
5153 sysctl_ipsec_wake_packet SYSCTL_HANDLER_ARGS
5154 {
5155 #pragma unused(oidp, arg1, arg2)
5156 if (req->newptr != USER_ADDR_NULL) {
5157 ipseclog((LOG_ERR, "ipsec: invalid parameters"));
5158 return EINVAL;
5159 }
5160
5161 struct proc *p = current_proc();
5162 if (p != NULL) {
5163 uid_t uid = kauth_cred_getuid(proc_ucred(p));
5164 if (uid != 0 && priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_IPSEC_WAKE_PACKET, 0) != 0) {
5165 ipseclog((LOG_ERR, "process does not hold necessary entitlement to get ipsec wake packet"));
5166 return EPERM;
5167 }
5168
5169 int result = sysctl_io_opaque(req, &ipsec_wake_pkt, sizeof(ipsec_wake_pkt), NULL);
5170
5171 ipseclog((LOG_NOTICE, "%s: uuid %s spi %u seq %u len %u result %d",
5172 __func__,
5173 ipsec_wake_pkt.wake_uuid,
5174 ipsec_wake_pkt.wake_pkt_spi,
5175 ipsec_wake_pkt.wake_pkt_seq,
5176 ipsec_wake_pkt.wake_pkt_len,
5177 result));
5178
5179 return result;
5180 }
5181
5182 return EINVAL;
5183 }
5184
5185 SYSCTL_PROC(_net_link_generic_system, OID_AUTO, ipsec_wake_pkt, CTLTYPE_STRUCT | CTLFLAG_RD |
5186 CTLFLAG_LOCKED, 0, 0, &sysctl_ipsec_wake_packet, "S,ipsec wake packet", "");
5187
5188 void
5189 ipsec_save_wake_packet(struct mbuf *wake_mbuf, u_int32_t spi, u_int32_t seq)
5190 {
5191 if (wake_mbuf == NULL) {
5192 ipseclog((LOG_ERR, "ipsec: bad wake packet"));
5193 return;
5194 }
5195
5196 lck_mtx_lock(sadb_mutex);
5197 if (__probable(!ipsec_save_wake_pkt)) {
5198 goto done;
5199 }
5200
5201 u_int16_t max_len = (wake_mbuf->m_pkthdr.len > IPSEC_MAX_WAKE_PKT_LEN) ? IPSEC_MAX_WAKE_PKT_LEN : wake_mbuf->m_pkthdr.len;
5202 m_copydata(wake_mbuf, 0, max_len, (void *)ipsec_wake_pkt.wake_pkt);
5203 ipsec_wake_pkt.wake_pkt_len = max_len;
5204
5205 ipsec_wake_pkt.wake_pkt_spi = spi;
5206 ipsec_wake_pkt.wake_pkt_seq = seq;
5207
5208 ipseclog((LOG_NOTICE, "%s: uuid %s spi %u seq %u len %u",
5209 __func__,
5210 ipsec_wake_pkt.wake_uuid,
5211 ipsec_wake_pkt.wake_pkt_spi,
5212 ipsec_wake_pkt.wake_pkt_seq,
5213 ipsec_wake_pkt.wake_pkt_len));
5214
5215 struct kev_msg ev_msg = { 0 };
5216 ev_msg.vendor_code = KEV_VENDOR_APPLE;
5217 ev_msg.kev_class = KEV_NETWORK_CLASS;
5218 ev_msg.kev_subclass = KEV_IPSEC_SUBCLASS;
5219 ev_msg.kev_subclass = KEV_IPSEC_WAKE_PACKET;
5220 int result = kev_post_msg(&ev_msg);
5221 if (result != 0) {
5222 os_log_error(OS_LOG_DEFAULT, "%s: kev_post_msg() failed with error %d for wake uuid %s",
5223 __func__, result, ipsec_wake_pkt.wake_uuid);
5224 }
5225
5226 ipsec_save_wake_pkt = false;
5227 done:
5228 lck_mtx_unlock(sadb_mutex);
5229 return;
5230 }
5231
5232 static void
5233 ipsec_get_local_ports(void)
5234 {
5235 errno_t error;
5236 ifnet_t *ifp_list;
5237 uint32_t count, i;
5238 static uint8_t port_bitmap[bitstr_size(IP_PORTRANGE_SIZE)];
5239
5240 error = ifnet_list_get_all(IFNET_FAMILY_IPSEC, &ifp_list, &count);
5241 if (error != 0) {
5242 os_log_error(OS_LOG_DEFAULT, "%s: ifnet_list_get_all() failed %d",
5243 __func__, error);
5244 return;
5245 }
5246 for (i = 0; i < count; i++) {
5247 ifnet_t ifp = ifp_list[i];
5248
5249 /*
5250 * Get all the TCP and UDP ports for IPv4 and IPv6
5251 */
5252 error = ifnet_get_local_ports_extended(ifp, PF_UNSPEC,
5253 IFNET_GET_LOCAL_PORTS_WILDCARDOK |
5254 IFNET_GET_LOCAL_PORTS_NOWAKEUPOK |
5255 IFNET_GET_LOCAL_PORTS_ANYTCPSTATEOK,
5256 port_bitmap);
5257 if (error != 0) {
5258 os_log_error(OS_LOG_DEFAULT, "%s: ifnet_get_local_ports_extended(%s) failed %d",
5259 __func__, if_name(ifp), error);
5260 }
5261 }
5262 ifnet_list_free(ifp_list);
5263 }
5264
5265 static IOReturn
5266 ipsec_sleep_wake_handler(void *target, void *refCon, UInt32 messageType,
5267 void *provider, void *messageArgument, vm_size_t argSize)
5268 {
5269 #pragma unused(target, refCon, provider, messageArgument, argSize)
5270 switch (messageType) {
5271 case kIOMessageSystemWillSleep:
5272 ipsec_get_local_ports();
5273 memset(&ipsec_wake_pkt, 0, sizeof(ipsec_wake_pkt));
5274 IOPMCopySleepWakeUUIDKey(ipsec_wake_pkt.wake_uuid,
5275 sizeof(ipsec_wake_pkt.wake_uuid));
5276 ipseclog((LOG_NOTICE,
5277 "ipsec: system will sleep, uuid: %s", ipsec_wake_pkt.wake_uuid));
5278 break;
5279 case kIOMessageSystemWillPowerOn:
5280 ipsec_save_wake_pkt = true;
5281 ipseclog((LOG_NOTICE,
5282 "ipsec: system will powered on, uuid: %s", ipsec_wake_pkt.wake_uuid));
5283 break;
5284 default:
5285 break;
5286 }
5287
5288 return IOPMAckImplied;
5289 }
5290
5291 void
5292 ipsec_monitor_sleep_wake(void)
5293 {
5294 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
5295
5296 if (sleep_wake_handle == NULL) {
5297 sleep_wake_handle = registerSleepWakeInterest(ipsec_sleep_wake_handler,
5298 NULL, NULL);
5299 if (sleep_wake_handle != NULL) {
5300 ipseclog((LOG_INFO,
5301 "ipsec: monitoring sleep wake"));
5302 }
5303 }
5304 }