]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/ipsec.c
2e11f4515eafe4e78b258daeab7a690c6a2118bc
[apple/xnu.git] / bsd / netinet6 / ipsec.c
1 /*
2 * Copyright (c) 2008-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/ipsec.c,v 1.3.2.7 2001/07/19 06:37:23 kris Exp $ */
30 /* $KAME: ipsec.c,v 1.103 2001/05/24 07:14:18 sakane Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 /*
62 * IPsec controller part.
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/mcache.h>
70 #include <sys/domain.h>
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 #include <sys/errno.h>
75 #include <sys/time.h>
76 #include <sys/kernel.h>
77 #include <sys/syslog.h>
78 #include <sys/sysctl.h>
79 #include <sys/priv.h>
80 #include <kern/locks.h>
81 #include <sys/kauth.h>
82 #include <sys/bitstring.h>
83
84 #include <libkern/OSAtomic.h>
85 #include <libkern/sysctl.h>
86
87 #include <net/if.h>
88 #include <net/route.h>
89 #include <net/if_ipsec.h>
90 #include <net/if_ports_used.h>
91
92 #include <netinet/in.h>
93 #include <netinet/in_systm.h>
94 #include <netinet/ip.h>
95 #include <netinet/ip_var.h>
96 #include <netinet/in_var.h>
97 #include <netinet/udp.h>
98 #include <netinet/udp_var.h>
99 #include <netinet/ip_ecn.h>
100 #include <netinet6/ip6_ecn.h>
101 #include <netinet/tcp.h>
102 #include <netinet/udp.h>
103
104 #include <netinet/ip6.h>
105 #include <netinet6/ip6_var.h>
106 #include <netinet/in_pcb.h>
107 #include <netinet/icmp6.h>
108
109 #include <netinet6/ipsec.h>
110 #include <netinet6/ipsec6.h>
111 #include <netinet6/ah.h>
112 #include <netinet6/ah6.h>
113 #if IPSEC_ESP
114 #include <netinet6/esp.h>
115 #include <netinet6/esp6.h>
116 #endif
117 #include <netkey/key.h>
118 #include <netkey/keydb.h>
119 #include <netkey/key_debug.h>
120
121 #include <net/net_osdep.h>
122
123 #include <IOKit/pwr_mgt/IOPM.h>
124
125 #include <os/log_private.h>
126
127 #if IPSEC_DEBUG
128 int ipsec_debug = 1;
129 #else
130 int ipsec_debug = 0;
131 #endif
132
133 #include <sys/kdebug.h>
134 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
135 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
136 #define DBG_FNC_GETPOL_SOCK NETDBG_CODE(DBG_NETIPSEC, (1 << 8))
137 #define DBG_FNC_GETPOL_ADDR NETDBG_CODE(DBG_NETIPSEC, (2 << 8))
138 #define DBG_FNC_IPSEC_OUT NETDBG_CODE(DBG_NETIPSEC, (3 << 8))
139
140 extern lck_mtx_t *sadb_mutex;
141
142 struct ipsecstat ipsecstat;
143 int ip4_ah_cleartos = 1;
144 int ip4_ah_offsetmask = 0; /* maybe IP_DF? */
145 int ip4_ipsec_dfbit = 0; /* DF bit on encap. 0: clear 1: set 2: copy */
146 int ip4_esp_trans_deflev = IPSEC_LEVEL_USE;
147 int ip4_esp_net_deflev = IPSEC_LEVEL_USE;
148 int ip4_ah_trans_deflev = IPSEC_LEVEL_USE;
149 int ip4_ah_net_deflev = IPSEC_LEVEL_USE;
150 struct secpolicy ip4_def_policy;
151 int ip4_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
152 int ip4_esp_randpad = -1;
153 int esp_udp_encap_port = 0;
154 static int sysctl_def_policy SYSCTL_HANDLER_ARGS;
155 extern int natt_keepalive_interval;
156 extern u_int64_t natt_now;
157
158 struct ipsec_tag;
159
160 void *sleep_wake_handle = NULL;
161 bool ipsec_save_wake_pkt = false;
162
163 SYSCTL_DECL(_net_inet_ipsec);
164 SYSCTL_DECL(_net_inet6_ipsec6);
165 /* net.inet.ipsec */
166 SYSCTL_STRUCT(_net_inet_ipsec, IPSECCTL_STATS,
167 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsecstat, ipsecstat, "");
168 SYSCTL_PROC(_net_inet_ipsec, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
169 &ip4_def_policy.policy, 0, &sysctl_def_policy, "I", "");
170 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
171 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_trans_deflev, 0, "");
172 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
173 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_net_deflev, 0, "");
174 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
175 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_trans_deflev, 0, "");
176 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
177 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_net_deflev, 0, "");
178 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_CLEARTOS,
179 ah_cleartos, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_cleartos, 0, "");
180 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_OFFSETMASK,
181 ah_offsetmask, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_offsetmask, 0, "");
182 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DFBIT,
183 dfbit, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_dfbit, 0, "");
184 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ECN,
185 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_ecn, 0, "");
186 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEBUG,
187 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
188 SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ESP_RANDPAD,
189 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_randpad, 0, "");
190
191 /* for performance, we bypass ipsec until a security policy is set */
192 int ipsec_bypass = 1;
193 SYSCTL_INT(_net_inet_ipsec, OID_AUTO, bypass, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec_bypass, 0, "");
194
195 /*
196 * NAT Traversal requires a UDP port for encapsulation,
197 * esp_udp_encap_port controls which port is used. Racoon
198 * must set this port to the port racoon is using locally
199 * for nat traversal.
200 */
201 SYSCTL_INT(_net_inet_ipsec, OID_AUTO, esp_port,
202 CTLFLAG_RW | CTLFLAG_LOCKED, &esp_udp_encap_port, 0, "");
203
204 struct ipsecstat ipsec6stat;
205 int ip6_esp_trans_deflev = IPSEC_LEVEL_USE;
206 int ip6_esp_net_deflev = IPSEC_LEVEL_USE;
207 int ip6_ah_trans_deflev = IPSEC_LEVEL_USE;
208 int ip6_ah_net_deflev = IPSEC_LEVEL_USE;
209 struct secpolicy ip6_def_policy;
210 int ip6_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
211 int ip6_esp_randpad = -1;
212
213 /* net.inet6.ipsec6 */
214 SYSCTL_STRUCT(_net_inet6_ipsec6, IPSECCTL_STATS,
215 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec6stat, ipsecstat, "");
216 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_POLICY,
217 def_policy, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_def_policy.policy, 0, "");
218 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
219 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_trans_deflev, 0, "");
220 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
221 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_net_deflev, 0, "");
222 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
223 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_trans_deflev, 0, "");
224 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
225 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_net_deflev, 0, "");
226 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ECN,
227 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ipsec_ecn, 0, "");
228 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEBUG,
229 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
230 SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ESP_RANDPAD,
231 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_randpad, 0, "");
232
233 SYSCTL_DECL(_net_link_generic_system);
234
235 struct ipsec_wake_pkt_info ipsec_wake_pkt;
236
237 static int ipsec_setspidx_interface(struct secpolicyindex *, u_int8_t, struct mbuf *,
238 int, int, int);
239 static int ipsec_setspidx_mbuf(struct secpolicyindex *, u_int8_t, u_int,
240 struct mbuf *, int);
241 static int ipsec4_setspidx_inpcb(struct mbuf *, struct inpcb *pcb);
242 static int ipsec6_setspidx_in6pcb(struct mbuf *, struct in6pcb *pcb);
243 static int ipsec_setspidx(struct mbuf *, struct secpolicyindex *, int, int);
244 static void ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
245 static int ipsec4_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
246 static void ipsec6_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
247 static int ipsec6_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
248 static struct inpcbpolicy *ipsec_newpcbpolicy(void);
249 static void ipsec_delpcbpolicy(struct inpcbpolicy *);
250 static struct secpolicy *ipsec_deepcopy_policy(struct secpolicy *src);
251 static int ipsec_set_policy(struct secpolicy **pcb_sp,
252 int optname, caddr_t request, size_t len, int priv);
253 static void vshiftl(unsigned char *, int, size_t);
254 static int ipsec_in_reject(struct secpolicy *, struct mbuf *);
255 static int ipsec64_encapsulate(struct mbuf *, struct secasvar *);
256 static int ipsec6_update_routecache_and_output(struct ipsec_output_state *state, struct secasvar *sav);
257 static int ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav);
258 static struct ipsec_tag *ipsec_addaux(struct mbuf *);
259 static struct ipsec_tag *ipsec_findaux(struct mbuf *);
260 static void ipsec_optaux(struct mbuf *, struct ipsec_tag *);
261 int ipsec_send_natt_keepalive(struct secasvar *sav);
262 bool ipsec_fill_offload_frame(ifnet_t ifp, struct secasvar *sav, struct ifnet_keepalive_offload_frame *frame, size_t frame_data_offset);
263
264 extern bool IOPMCopySleepWakeUUIDKey(char *, size_t);
265
266 typedef IOReturn (*IOServiceInterestHandler)( void * target, void * refCon,
267 UInt32 messageType, void * provider,
268 void * messageArgument, vm_size_t argSize );
269 extern void *registerSleepWakeInterest(IOServiceInterestHandler, void *, void *);
270
271 static int
272 sysctl_def_policy SYSCTL_HANDLER_ARGS
273 {
274 int new_policy = ip4_def_policy.policy;
275 int error = sysctl_handle_int(oidp, &new_policy, 0, req);
276
277 #pragma unused(arg1, arg2)
278 if (error == 0) {
279 if (new_policy != IPSEC_POLICY_NONE &&
280 new_policy != IPSEC_POLICY_DISCARD) {
281 return EINVAL;
282 }
283 ip4_def_policy.policy = new_policy;
284
285 /* Turn off the bypass if the default security policy changes */
286 if (ipsec_bypass != 0 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
287 ipsec_bypass = 0;
288 }
289 }
290
291 return error;
292 }
293
294 /*
295 * For OUTBOUND packet having a socket. Searching SPD for packet,
296 * and return a pointer to SP.
297 * OUT: NULL: no apropreate SP found, the following value is set to error.
298 * 0 : bypass
299 * EACCES : discard packet.
300 * ENOENT : ipsec_acquire() in progress, maybe.
301 * others : error occurred.
302 * others: a pointer to SP
303 *
304 * NOTE: IPv6 mapped adddress concern is implemented here.
305 */
306 struct secpolicy *
307 ipsec4_getpolicybysock(struct mbuf *m,
308 u_int8_t dir,
309 struct socket *so,
310 int *error)
311 {
312 struct inpcbpolicy *pcbsp = NULL;
313 struct secpolicy *currsp = NULL; /* policy on socket */
314 struct secpolicy *kernsp = NULL; /* policy on kernel */
315
316 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
317 /* sanity check */
318 if (m == NULL || so == NULL || error == NULL) {
319 panic("ipsec4_getpolicybysock: NULL pointer was passed.\n");
320 }
321
322 if (so->so_pcb == NULL) {
323 printf("ipsec4_getpolicybysock: so->so_pcb == NULL\n");
324 return ipsec4_getpolicybyaddr(m, dir, 0, error);
325 }
326
327 switch (SOCK_DOM(so)) {
328 case PF_INET:
329 pcbsp = sotoinpcb(so)->inp_sp;
330 break;
331 case PF_INET6:
332 pcbsp = sotoin6pcb(so)->in6p_sp;
333 break;
334 }
335
336 if (!pcbsp) {
337 /* Socket has not specified an IPSEC policy */
338 return ipsec4_getpolicybyaddr(m, dir, 0, error);
339 }
340
341 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_START, 0, 0, 0, 0, 0);
342
343 switch (SOCK_DOM(so)) {
344 case PF_INET:
345 /* set spidx in pcb */
346 *error = ipsec4_setspidx_inpcb(m, sotoinpcb(so));
347 break;
348 case PF_INET6:
349 /* set spidx in pcb */
350 *error = ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
351 break;
352 default:
353 panic("ipsec4_getpolicybysock: unsupported address family\n");
354 }
355 if (*error) {
356 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 1, *error, 0, 0, 0);
357 return NULL;
358 }
359
360 /* sanity check */
361 if (pcbsp == NULL) {
362 panic("ipsec4_getpolicybysock: pcbsp is NULL.\n");
363 }
364
365 switch (dir) {
366 case IPSEC_DIR_INBOUND:
367 currsp = pcbsp->sp_in;
368 break;
369 case IPSEC_DIR_OUTBOUND:
370 currsp = pcbsp->sp_out;
371 break;
372 default:
373 panic("ipsec4_getpolicybysock: illegal direction.\n");
374 }
375
376 /* sanity check */
377 if (currsp == NULL) {
378 panic("ipsec4_getpolicybysock: currsp is NULL.\n");
379 }
380
381 /* when privilieged socket */
382 if (pcbsp->priv) {
383 switch (currsp->policy) {
384 case IPSEC_POLICY_BYPASS:
385 lck_mtx_lock(sadb_mutex);
386 currsp->refcnt++;
387 lck_mtx_unlock(sadb_mutex);
388 *error = 0;
389 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 2, *error, 0, 0, 0);
390 return currsp;
391
392 case IPSEC_POLICY_ENTRUST:
393 /* look for a policy in SPD */
394 kernsp = key_allocsp(&currsp->spidx, dir);
395
396 /* SP found */
397 if (kernsp != NULL) {
398 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
399 printf("DP ipsec4_getpolicybysock called "
400 "to allocate SP:0x%llx\n",
401 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
402 *error = 0;
403 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 3, *error, 0, 0, 0);
404 return kernsp;
405 }
406
407 /* no SP found */
408 lck_mtx_lock(sadb_mutex);
409 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
410 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
411 ipseclog((LOG_INFO,
412 "fixed system default policy: %d->%d\n",
413 ip4_def_policy.policy, IPSEC_POLICY_NONE));
414 ip4_def_policy.policy = IPSEC_POLICY_NONE;
415 }
416 ip4_def_policy.refcnt++;
417 lck_mtx_unlock(sadb_mutex);
418 *error = 0;
419 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 4, *error, 0, 0, 0);
420 return &ip4_def_policy;
421
422 case IPSEC_POLICY_IPSEC:
423 lck_mtx_lock(sadb_mutex);
424 currsp->refcnt++;
425 lck_mtx_unlock(sadb_mutex);
426 *error = 0;
427 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 5, *error, 0, 0, 0);
428 return currsp;
429
430 default:
431 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
432 "Invalid policy for PCB %d\n", currsp->policy));
433 *error = EINVAL;
434 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 6, *error, 0, 0, 0);
435 return NULL;
436 }
437 /* NOTREACHED */
438 }
439
440 /* when non-privilieged socket */
441 /* look for a policy in SPD */
442 kernsp = key_allocsp(&currsp->spidx, dir);
443
444 /* SP found */
445 if (kernsp != NULL) {
446 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
447 printf("DP ipsec4_getpolicybysock called "
448 "to allocate SP:0x%llx\n",
449 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
450 *error = 0;
451 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 7, *error, 0, 0, 0);
452 return kernsp;
453 }
454
455 /* no SP found */
456 switch (currsp->policy) {
457 case IPSEC_POLICY_BYPASS:
458 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
459 "Illegal policy for non-priviliged defined %d\n",
460 currsp->policy));
461 *error = EINVAL;
462 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 8, *error, 0, 0, 0);
463 return NULL;
464
465 case IPSEC_POLICY_ENTRUST:
466 lck_mtx_lock(sadb_mutex);
467 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
468 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
469 ipseclog((LOG_INFO,
470 "fixed system default policy: %d->%d\n",
471 ip4_def_policy.policy, IPSEC_POLICY_NONE));
472 ip4_def_policy.policy = IPSEC_POLICY_NONE;
473 }
474 ip4_def_policy.refcnt++;
475 lck_mtx_unlock(sadb_mutex);
476 *error = 0;
477 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 9, *error, 0, 0, 0);
478 return &ip4_def_policy;
479
480 case IPSEC_POLICY_IPSEC:
481 lck_mtx_lock(sadb_mutex);
482 currsp->refcnt++;
483 lck_mtx_unlock(sadb_mutex);
484 *error = 0;
485 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 10, *error, 0, 0, 0);
486 return currsp;
487
488 default:
489 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
490 "Invalid policy for PCB %d\n", currsp->policy));
491 *error = EINVAL;
492 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 11, *error, 0, 0, 0);
493 return NULL;
494 }
495 /* NOTREACHED */
496 }
497
498 /*
499 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
500 * and return a pointer to SP.
501 * OUT: positive: a pointer to the entry for security policy leaf matched.
502 * NULL: no apropreate SP found, the following value is set to error.
503 * 0 : bypass
504 * EACCES : discard packet.
505 * ENOENT : ipsec_acquire() in progress, maybe.
506 * others : error occurred.
507 */
508 struct secpolicy *
509 ipsec4_getpolicybyaddr(struct mbuf *m,
510 u_int8_t dir,
511 int flag,
512 int *error)
513 {
514 struct secpolicy *sp = NULL;
515
516 if (ipsec_bypass != 0) {
517 return 0;
518 }
519
520 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
521
522 /* sanity check */
523 if (m == NULL || error == NULL) {
524 panic("ipsec4_getpolicybyaddr: NULL pointer was passed.\n");
525 }
526 {
527 struct secpolicyindex spidx;
528
529 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
530 bzero(&spidx, sizeof(spidx));
531
532 /* make a index to look for a policy */
533 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET, m,
534 (flag & IP_FORWARDING) ? 0 : 1);
535
536 if (*error != 0) {
537 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, *error, 0, 0, 0);
538 return NULL;
539 }
540
541 sp = key_allocsp(&spidx, dir);
542 }
543
544 /* SP found */
545 if (sp != NULL) {
546 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
547 printf("DP ipsec4_getpolicybyaddr called "
548 "to allocate SP:0x%llx\n",
549 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
550 *error = 0;
551 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, *error, 0, 0, 0);
552 return sp;
553 }
554
555 /* no SP found */
556 lck_mtx_lock(sadb_mutex);
557 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
558 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
559 ipseclog((LOG_INFO, "fixed system default policy:%d->%d\n",
560 ip4_def_policy.policy,
561 IPSEC_POLICY_NONE));
562 ip4_def_policy.policy = IPSEC_POLICY_NONE;
563 }
564 ip4_def_policy.refcnt++;
565 lck_mtx_unlock(sadb_mutex);
566 *error = 0;
567 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 3, *error, 0, 0, 0);
568 return &ip4_def_policy;
569 }
570
571 /* Match with bound interface rather than src addr.
572 * Unlike getpolicybyaddr, do not set the default policy.
573 * Return 0 if should continue processing, or -1 if packet
574 * should be dropped.
575 */
576 int
577 ipsec4_getpolicybyinterface(struct mbuf *m,
578 u_int8_t dir,
579 int *flags,
580 struct ip_out_args *ipoa,
581 struct secpolicy **sp)
582 {
583 struct secpolicyindex spidx;
584 int error = 0;
585
586 if (ipsec_bypass != 0) {
587 return 0;
588 }
589
590 /* Sanity check */
591 if (m == NULL || ipoa == NULL || sp == NULL) {
592 panic("ipsec4_getpolicybyinterface: NULL pointer was passed.\n");
593 }
594
595 if (ipoa->ipoa_boundif == IFSCOPE_NONE) {
596 return 0;
597 }
598
599 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
600 bzero(&spidx, sizeof(spidx));
601
602 /* make a index to look for a policy */
603 error = ipsec_setspidx_interface(&spidx, dir, m, (*flags & IP_FORWARDING) ? 0 : 1,
604 ipoa->ipoa_boundif, 4);
605
606 if (error != 0) {
607 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, error, 0, 0, 0);
608 return 0;
609 }
610
611 *sp = key_allocsp(&spidx, dir);
612
613 /* Return SP, whether NULL or not */
614 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
615 if ((*sp)->ipsec_if == NULL) {
616 /* Invalid to capture on an interface without redirect */
617 key_freesp(*sp, KEY_SADB_UNLOCKED);
618 *sp = NULL;
619 return -1;
620 } else if ((*sp)->disabled) {
621 /* Disabled policies go in the clear */
622 key_freesp(*sp, KEY_SADB_UNLOCKED);
623 *sp = NULL;
624 *flags |= IP_NOIPSEC; /* Avoid later IPsec check */
625 } else {
626 /* If policy is enabled, redirect to ipsec interface */
627 ipoa->ipoa_boundif = (*sp)->ipsec_if->if_index;
628 }
629 }
630
631 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, error, 0, 0, 0);
632
633 return 0;
634 }
635
636
637 /*
638 * For OUTBOUND packet having a socket. Searching SPD for packet,
639 * and return a pointer to SP.
640 * OUT: NULL: no apropreate SP found, the following value is set to error.
641 * 0 : bypass
642 * EACCES : discard packet.
643 * ENOENT : ipsec_acquire() in progress, maybe.
644 * others : error occurred.
645 * others: a pointer to SP
646 */
647 struct secpolicy *
648 ipsec6_getpolicybysock(struct mbuf *m,
649 u_int8_t dir,
650 struct socket *so,
651 int *error)
652 {
653 struct inpcbpolicy *pcbsp = NULL;
654 struct secpolicy *currsp = NULL; /* policy on socket */
655 struct secpolicy *kernsp = NULL; /* policy on kernel */
656
657 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
658
659 /* sanity check */
660 if (m == NULL || so == NULL || error == NULL) {
661 panic("ipsec6_getpolicybysock: NULL pointer was passed.\n");
662 }
663
664 #if DIAGNOSTIC
665 if (SOCK_DOM(so) != PF_INET6) {
666 panic("ipsec6_getpolicybysock: socket domain != inet6\n");
667 }
668 #endif
669
670 pcbsp = sotoin6pcb(so)->in6p_sp;
671
672 if (!pcbsp) {
673 return ipsec6_getpolicybyaddr(m, dir, 0, error);
674 }
675
676 /* set spidx in pcb */
677 ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
678
679 /* sanity check */
680 if (pcbsp == NULL) {
681 panic("ipsec6_getpolicybysock: pcbsp is NULL.\n");
682 }
683
684 switch (dir) {
685 case IPSEC_DIR_INBOUND:
686 currsp = pcbsp->sp_in;
687 break;
688 case IPSEC_DIR_OUTBOUND:
689 currsp = pcbsp->sp_out;
690 break;
691 default:
692 panic("ipsec6_getpolicybysock: illegal direction.\n");
693 }
694
695 /* sanity check */
696 if (currsp == NULL) {
697 panic("ipsec6_getpolicybysock: currsp is NULL.\n");
698 }
699
700 /* when privilieged socket */
701 if (pcbsp->priv) {
702 switch (currsp->policy) {
703 case IPSEC_POLICY_BYPASS:
704 lck_mtx_lock(sadb_mutex);
705 currsp->refcnt++;
706 lck_mtx_unlock(sadb_mutex);
707 *error = 0;
708 return currsp;
709
710 case IPSEC_POLICY_ENTRUST:
711 /* look for a policy in SPD */
712 kernsp = key_allocsp(&currsp->spidx, dir);
713
714 /* SP found */
715 if (kernsp != NULL) {
716 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
717 printf("DP ipsec6_getpolicybysock called "
718 "to allocate SP:0x%llx\n",
719 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
720 *error = 0;
721 return kernsp;
722 }
723
724 /* no SP found */
725 lck_mtx_lock(sadb_mutex);
726 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
727 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
728 ipseclog((LOG_INFO,
729 "fixed system default policy: %d->%d\n",
730 ip6_def_policy.policy, IPSEC_POLICY_NONE));
731 ip6_def_policy.policy = IPSEC_POLICY_NONE;
732 }
733 ip6_def_policy.refcnt++;
734 lck_mtx_unlock(sadb_mutex);
735 *error = 0;
736 return &ip6_def_policy;
737
738 case IPSEC_POLICY_IPSEC:
739 lck_mtx_lock(sadb_mutex);
740 currsp->refcnt++;
741 lck_mtx_unlock(sadb_mutex);
742 *error = 0;
743 return currsp;
744
745 default:
746 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
747 "Invalid policy for PCB %d\n", currsp->policy));
748 *error = EINVAL;
749 return NULL;
750 }
751 /* NOTREACHED */
752 }
753
754 /* when non-privilieged socket */
755 /* look for a policy in SPD */
756 kernsp = key_allocsp(&currsp->spidx, dir);
757
758 /* SP found */
759 if (kernsp != NULL) {
760 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
761 printf("DP ipsec6_getpolicybysock called "
762 "to allocate SP:0x%llx\n",
763 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
764 *error = 0;
765 return kernsp;
766 }
767
768 /* no SP found */
769 switch (currsp->policy) {
770 case IPSEC_POLICY_BYPASS:
771 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
772 "Illegal policy for non-priviliged defined %d\n",
773 currsp->policy));
774 *error = EINVAL;
775 return NULL;
776
777 case IPSEC_POLICY_ENTRUST:
778 lck_mtx_lock(sadb_mutex);
779 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
780 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
781 ipseclog((LOG_INFO,
782 "fixed system default policy: %d->%d\n",
783 ip6_def_policy.policy, IPSEC_POLICY_NONE));
784 ip6_def_policy.policy = IPSEC_POLICY_NONE;
785 }
786 ip6_def_policy.refcnt++;
787 lck_mtx_unlock(sadb_mutex);
788 *error = 0;
789 return &ip6_def_policy;
790
791 case IPSEC_POLICY_IPSEC:
792 lck_mtx_lock(sadb_mutex);
793 currsp->refcnt++;
794 lck_mtx_unlock(sadb_mutex);
795 *error = 0;
796 return currsp;
797
798 default:
799 ipseclog((LOG_ERR,
800 "ipsec6_policybysock: Invalid policy for PCB %d\n",
801 currsp->policy));
802 *error = EINVAL;
803 return NULL;
804 }
805 /* NOTREACHED */
806 }
807
808 /*
809 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
810 * and return a pointer to SP.
811 * `flag' means that packet is to be forwarded whether or not.
812 * flag = 1: forwad
813 * OUT: positive: a pointer to the entry for security policy leaf matched.
814 * NULL: no apropreate SP found, the following value is set to error.
815 * 0 : bypass
816 * EACCES : discard packet.
817 * ENOENT : ipsec_acquire() in progress, maybe.
818 * others : error occurred.
819 */
820 #ifndef IP_FORWARDING
821 #define IP_FORWARDING 1
822 #endif
823
824 struct secpolicy *
825 ipsec6_getpolicybyaddr(struct mbuf *m,
826 u_int8_t dir,
827 int flag,
828 int *error)
829 {
830 struct secpolicy *sp = NULL;
831
832 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
833
834 /* sanity check */
835 if (m == NULL || error == NULL) {
836 panic("ipsec6_getpolicybyaddr: NULL pointer was passed.\n");
837 }
838
839 {
840 struct secpolicyindex spidx;
841
842 bzero(&spidx, sizeof(spidx));
843
844 /* make a index to look for a policy */
845 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET6, m,
846 (flag & IP_FORWARDING) ? 0 : 1);
847
848 if (*error != 0) {
849 return NULL;
850 }
851
852 sp = key_allocsp(&spidx, dir);
853 }
854
855 /* SP found */
856 if (sp != NULL) {
857 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
858 printf("DP ipsec6_getpolicybyaddr called "
859 "to allocate SP:0x%llx\n",
860 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
861 *error = 0;
862 return sp;
863 }
864
865 /* no SP found */
866 lck_mtx_lock(sadb_mutex);
867 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
868 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
869 ipseclog((LOG_INFO, "fixed system default policy: %d->%d\n",
870 ip6_def_policy.policy, IPSEC_POLICY_NONE));
871 ip6_def_policy.policy = IPSEC_POLICY_NONE;
872 }
873 ip6_def_policy.refcnt++;
874 lck_mtx_unlock(sadb_mutex);
875 *error = 0;
876 return &ip6_def_policy;
877 }
878
879 /* Match with bound interface rather than src addr.
880 * Unlike getpolicybyaddr, do not set the default policy.
881 * Return 0 if should continue processing, or -1 if packet
882 * should be dropped.
883 */
884 int
885 ipsec6_getpolicybyinterface(struct mbuf *m,
886 u_int8_t dir,
887 int flag,
888 struct ip6_out_args *ip6oap,
889 int *noipsec,
890 struct secpolicy **sp)
891 {
892 struct secpolicyindex spidx;
893 int error = 0;
894
895 if (ipsec_bypass != 0) {
896 return 0;
897 }
898
899 /* Sanity check */
900 if (m == NULL || sp == NULL || noipsec == NULL || ip6oap == NULL) {
901 panic("ipsec6_getpolicybyinterface: NULL pointer was passed.\n");
902 }
903
904 *noipsec = 0;
905
906 if (ip6oap->ip6oa_boundif == IFSCOPE_NONE) {
907 return 0;
908 }
909
910 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
911 bzero(&spidx, sizeof(spidx));
912
913 /* make a index to look for a policy */
914 error = ipsec_setspidx_interface(&spidx, dir, m, (flag & IP_FORWARDING) ? 0 : 1,
915 ip6oap->ip6oa_boundif, 6);
916
917 if (error != 0) {
918 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, error, 0, 0, 0);
919 return 0;
920 }
921
922 *sp = key_allocsp(&spidx, dir);
923
924 /* Return SP, whether NULL or not */
925 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
926 if ((*sp)->ipsec_if == NULL) {
927 /* Invalid to capture on an interface without redirect */
928 key_freesp(*sp, KEY_SADB_UNLOCKED);
929 *sp = NULL;
930 return -1;
931 } else if ((*sp)->disabled) {
932 /* Disabled policies go in the clear */
933 key_freesp(*sp, KEY_SADB_UNLOCKED);
934 *sp = NULL;
935 *noipsec = 1; /* Avoid later IPsec check */
936 } else {
937 /* If policy is enabled, redirect to ipsec interface */
938 ip6oap->ip6oa_boundif = (*sp)->ipsec_if->if_index;
939 }
940 }
941
942 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, *error, 0, 0, 0);
943
944 return 0;
945 }
946
947 /*
948 * set IP address into spidx from mbuf.
949 * When Forwarding packet and ICMP echo reply, this function is used.
950 *
951 * IN: get the followings from mbuf.
952 * protocol family, src, dst, next protocol
953 * OUT:
954 * 0: success.
955 * other: failure, and set errno.
956 */
957 static int
958 ipsec_setspidx_mbuf(
959 struct secpolicyindex *spidx,
960 u_int8_t dir,
961 __unused u_int family,
962 struct mbuf *m,
963 int needport)
964 {
965 int error;
966
967 /* sanity check */
968 if (spidx == NULL || m == NULL) {
969 panic("ipsec_setspidx_mbuf: NULL pointer was passed.\n");
970 }
971
972 bzero(spidx, sizeof(*spidx));
973
974 error = ipsec_setspidx(m, spidx, needport, 0);
975 if (error) {
976 goto bad;
977 }
978 spidx->dir = dir;
979
980 return 0;
981
982 bad:
983 /* XXX initialize */
984 bzero(spidx, sizeof(*spidx));
985 return EINVAL;
986 }
987
988 static int
989 ipsec_setspidx_interface(
990 struct secpolicyindex *spidx,
991 u_int8_t dir,
992 struct mbuf *m,
993 int needport,
994 int ifindex,
995 int ip_version)
996 {
997 int error;
998
999 /* sanity check */
1000 if (spidx == NULL || m == NULL) {
1001 panic("ipsec_setspidx_interface: NULL pointer was passed.\n");
1002 }
1003
1004 bzero(spidx, sizeof(*spidx));
1005
1006 error = ipsec_setspidx(m, spidx, needport, ip_version);
1007 if (error) {
1008 goto bad;
1009 }
1010 spidx->dir = dir;
1011
1012 if (ifindex != 0) {
1013 ifnet_head_lock_shared();
1014 spidx->internal_if = ifindex2ifnet[ifindex];
1015 ifnet_head_done();
1016 } else {
1017 spidx->internal_if = NULL;
1018 }
1019
1020 return 0;
1021
1022 bad:
1023 return EINVAL;
1024 }
1025
1026 static int
1027 ipsec4_setspidx_inpcb(struct mbuf *m, struct inpcb *pcb)
1028 {
1029 struct secpolicyindex *spidx;
1030 int error;
1031
1032 if (ipsec_bypass != 0) {
1033 return 0;
1034 }
1035
1036 /* sanity check */
1037 if (pcb == NULL) {
1038 panic("ipsec4_setspidx_inpcb: no PCB found.\n");
1039 }
1040 if (pcb->inp_sp == NULL) {
1041 panic("ipsec4_setspidx_inpcb: no inp_sp found.\n");
1042 }
1043 if (pcb->inp_sp->sp_out == NULL || pcb->inp_sp->sp_in == NULL) {
1044 panic("ipsec4_setspidx_inpcb: no sp_in/out found.\n");
1045 }
1046
1047 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1048 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1049
1050 spidx = &pcb->inp_sp->sp_in->spidx;
1051 error = ipsec_setspidx(m, spidx, 1, 0);
1052 if (error) {
1053 goto bad;
1054 }
1055 spidx->dir = IPSEC_DIR_INBOUND;
1056
1057 spidx = &pcb->inp_sp->sp_out->spidx;
1058 error = ipsec_setspidx(m, spidx, 1, 0);
1059 if (error) {
1060 goto bad;
1061 }
1062 spidx->dir = IPSEC_DIR_OUTBOUND;
1063
1064 return 0;
1065
1066 bad:
1067 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1068 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1069 return error;
1070 }
1071
1072 static int
1073 ipsec6_setspidx_in6pcb(struct mbuf *m, struct in6pcb *pcb)
1074 {
1075 struct secpolicyindex *spidx;
1076 int error;
1077
1078 /* sanity check */
1079 if (pcb == NULL) {
1080 panic("ipsec6_setspidx_in6pcb: no PCB found.\n");
1081 }
1082 if (pcb->in6p_sp == NULL) {
1083 panic("ipsec6_setspidx_in6pcb: no in6p_sp found.\n");
1084 }
1085 if (pcb->in6p_sp->sp_out == NULL || pcb->in6p_sp->sp_in == NULL) {
1086 panic("ipsec6_setspidx_in6pcb: no sp_in/out found.\n");
1087 }
1088
1089 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1090 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1091
1092 spidx = &pcb->in6p_sp->sp_in->spidx;
1093 error = ipsec_setspidx(m, spidx, 1, 0);
1094 if (error) {
1095 goto bad;
1096 }
1097 spidx->dir = IPSEC_DIR_INBOUND;
1098
1099 spidx = &pcb->in6p_sp->sp_out->spidx;
1100 error = ipsec_setspidx(m, spidx, 1, 0);
1101 if (error) {
1102 goto bad;
1103 }
1104 spidx->dir = IPSEC_DIR_OUTBOUND;
1105
1106 return 0;
1107
1108 bad:
1109 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1110 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1111 return error;
1112 }
1113
1114 /*
1115 * configure security policy index (src/dst/proto/sport/dport)
1116 * by looking at the content of mbuf.
1117 * the caller is responsible for error recovery (like clearing up spidx).
1118 */
1119 static int
1120 ipsec_setspidx(struct mbuf *m,
1121 struct secpolicyindex *spidx,
1122 int needport,
1123 int force_ip_version)
1124 {
1125 struct ip *ip = NULL;
1126 struct ip ipbuf;
1127 u_int v;
1128 struct mbuf *n;
1129 int len;
1130 int error;
1131
1132 if (m == NULL) {
1133 panic("ipsec_setspidx: m == 0 passed.\n");
1134 }
1135
1136 /*
1137 * validate m->m_pkthdr.len. we see incorrect length if we
1138 * mistakenly call this function with inconsistent mbuf chain
1139 * (like 4.4BSD tcp/udp processing). XXX should we panic here?
1140 */
1141 len = 0;
1142 for (n = m; n; n = n->m_next) {
1143 len += n->m_len;
1144 }
1145 if (m->m_pkthdr.len != len) {
1146 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1147 printf("ipsec_setspidx: "
1148 "total of m_len(%d) != pkthdr.len(%d), "
1149 "ignored.\n",
1150 len, m->m_pkthdr.len));
1151 return EINVAL;
1152 }
1153
1154 if (m->m_pkthdr.len < sizeof(struct ip)) {
1155 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1156 printf("ipsec_setspidx: "
1157 "pkthdr.len(%d) < sizeof(struct ip), ignored.\n",
1158 m->m_pkthdr.len));
1159 return EINVAL;
1160 }
1161
1162 if (m->m_len >= sizeof(*ip)) {
1163 ip = mtod(m, struct ip *);
1164 } else {
1165 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1166 ip = &ipbuf;
1167 }
1168
1169 if (force_ip_version) {
1170 v = force_ip_version;
1171 } else {
1172 #ifdef _IP_VHL
1173 v = _IP_VHL_V(ip->ip_vhl);
1174 #else
1175 v = ip->ip_v;
1176 #endif
1177 }
1178 switch (v) {
1179 case 4:
1180 error = ipsec4_setspidx_ipaddr(m, spidx);
1181 if (error) {
1182 return error;
1183 }
1184 ipsec4_get_ulp(m, spidx, needport);
1185 return 0;
1186 case 6:
1187 if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) {
1188 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1189 printf("ipsec_setspidx: "
1190 "pkthdr.len(%d) < sizeof(struct ip6_hdr), "
1191 "ignored.\n", m->m_pkthdr.len));
1192 return EINVAL;
1193 }
1194 error = ipsec6_setspidx_ipaddr(m, spidx);
1195 if (error) {
1196 return error;
1197 }
1198 ipsec6_get_ulp(m, spidx, needport);
1199 return 0;
1200 default:
1201 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1202 printf("ipsec_setspidx: "
1203 "unknown IP version %u, ignored.\n", v));
1204 return EINVAL;
1205 }
1206 }
1207
1208 static void
1209 ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport)
1210 {
1211 struct ip ip;
1212 struct ip6_ext ip6e;
1213 u_int8_t nxt;
1214 int off;
1215 struct tcphdr th;
1216 struct udphdr uh;
1217
1218 /* sanity check */
1219 if (m == NULL) {
1220 panic("ipsec4_get_ulp: NULL pointer was passed.\n");
1221 }
1222 if (m->m_pkthdr.len < sizeof(ip)) {
1223 panic("ipsec4_get_ulp: too short\n");
1224 }
1225
1226 /* set default */
1227 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1228 ((struct sockaddr_in *)&spidx->src)->sin_port = IPSEC_PORT_ANY;
1229 ((struct sockaddr_in *)&spidx->dst)->sin_port = IPSEC_PORT_ANY;
1230
1231 m_copydata(m, 0, sizeof(ip), (caddr_t)&ip);
1232 /* ip_input() flips it into host endian XXX need more checking */
1233 if (ip.ip_off & (IP_MF | IP_OFFMASK)) {
1234 return;
1235 }
1236
1237 nxt = ip.ip_p;
1238 #ifdef _IP_VHL
1239 off = _IP_VHL_HL(ip->ip_vhl) << 2;
1240 #else
1241 off = ip.ip_hl << 2;
1242 #endif
1243 while (off < m->m_pkthdr.len) {
1244 switch (nxt) {
1245 case IPPROTO_TCP:
1246 spidx->ul_proto = nxt;
1247 if (!needport) {
1248 return;
1249 }
1250 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) {
1251 return;
1252 }
1253 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1254 ((struct sockaddr_in *)&spidx->src)->sin_port =
1255 th.th_sport;
1256 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1257 th.th_dport;
1258 return;
1259 case IPPROTO_UDP:
1260 spidx->ul_proto = nxt;
1261 if (!needport) {
1262 return;
1263 }
1264 if (off + sizeof(struct udphdr) > m->m_pkthdr.len) {
1265 return;
1266 }
1267 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1268 ((struct sockaddr_in *)&spidx->src)->sin_port =
1269 uh.uh_sport;
1270 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1271 uh.uh_dport;
1272 return;
1273 case IPPROTO_AH:
1274 if (off + sizeof(ip6e) > m->m_pkthdr.len) {
1275 return;
1276 }
1277 m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
1278 off += (ip6e.ip6e_len + 2) << 2;
1279 nxt = ip6e.ip6e_nxt;
1280 break;
1281 case IPPROTO_ICMP:
1282 default:
1283 /* XXX intermediate headers??? */
1284 spidx->ul_proto = nxt;
1285 return;
1286 }
1287 }
1288 }
1289
1290 /* assumes that m is sane */
1291 static int
1292 ipsec4_setspidx_ipaddr(struct mbuf *m, struct secpolicyindex *spidx)
1293 {
1294 struct ip *ip = NULL;
1295 struct ip ipbuf;
1296 struct sockaddr_in *sin;
1297
1298 if (m->m_len >= sizeof(*ip)) {
1299 ip = mtod(m, struct ip *);
1300 } else {
1301 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1302 ip = &ipbuf;
1303 }
1304
1305 sin = (struct sockaddr_in *)&spidx->src;
1306 bzero(sin, sizeof(*sin));
1307 sin->sin_family = AF_INET;
1308 sin->sin_len = sizeof(struct sockaddr_in);
1309 bcopy(&ip->ip_src, &sin->sin_addr, sizeof(ip->ip_src));
1310 spidx->prefs = sizeof(struct in_addr) << 3;
1311
1312 sin = (struct sockaddr_in *)&spidx->dst;
1313 bzero(sin, sizeof(*sin));
1314 sin->sin_family = AF_INET;
1315 sin->sin_len = sizeof(struct sockaddr_in);
1316 bcopy(&ip->ip_dst, &sin->sin_addr, sizeof(ip->ip_dst));
1317 spidx->prefd = sizeof(struct in_addr) << 3;
1318
1319 return 0;
1320 }
1321
1322 static void
1323 ipsec6_get_ulp(struct mbuf *m,
1324 struct secpolicyindex *spidx,
1325 int needport)
1326 {
1327 int off, nxt;
1328 struct tcphdr th;
1329 struct udphdr uh;
1330
1331 /* sanity check */
1332 if (m == NULL) {
1333 panic("ipsec6_get_ulp: NULL pointer was passed.\n");
1334 }
1335
1336 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1337 printf("ipsec6_get_ulp:\n"); kdebug_mbuf(m));
1338
1339 /* set default */
1340 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1341 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = IPSEC_PORT_ANY;
1342 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = IPSEC_PORT_ANY;
1343
1344 nxt = -1;
1345 off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt);
1346 if (off < 0 || m->m_pkthdr.len < off) {
1347 return;
1348 }
1349
1350 VERIFY(nxt <= UINT8_MAX);
1351 switch (nxt) {
1352 case IPPROTO_TCP:
1353 spidx->ul_proto = (u_int8_t)nxt;
1354 if (!needport) {
1355 break;
1356 }
1357 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) {
1358 break;
1359 }
1360 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1361 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = th.th_sport;
1362 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = th.th_dport;
1363 break;
1364 case IPPROTO_UDP:
1365 spidx->ul_proto = (u_int8_t)nxt;
1366 if (!needport) {
1367 break;
1368 }
1369 if (off + sizeof(struct udphdr) > m->m_pkthdr.len) {
1370 break;
1371 }
1372 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1373 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = uh.uh_sport;
1374 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = uh.uh_dport;
1375 break;
1376 case IPPROTO_ICMPV6:
1377 default:
1378 /* XXX intermediate headers??? */
1379 spidx->ul_proto = (u_int8_t)nxt;
1380 break;
1381 }
1382 }
1383
1384 /* assumes that m is sane */
1385 static int
1386 ipsec6_setspidx_ipaddr(struct mbuf *m,
1387 struct secpolicyindex *spidx)
1388 {
1389 struct ip6_hdr *ip6 = NULL;
1390 struct ip6_hdr ip6buf;
1391 struct sockaddr_in6 *sin6;
1392
1393 if (m->m_len >= sizeof(*ip6)) {
1394 ip6 = mtod(m, struct ip6_hdr *);
1395 } else {
1396 m_copydata(m, 0, sizeof(ip6buf), (caddr_t)&ip6buf);
1397 ip6 = &ip6buf;
1398 }
1399
1400 sin6 = (struct sockaddr_in6 *)&spidx->src;
1401 bzero(sin6, sizeof(*sin6));
1402 sin6->sin6_family = AF_INET6;
1403 sin6->sin6_len = sizeof(struct sockaddr_in6);
1404 bcopy(&ip6->ip6_src, &sin6->sin6_addr, sizeof(ip6->ip6_src));
1405 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
1406 sin6->sin6_addr.s6_addr16[1] = 0;
1407 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
1408 }
1409 spidx->prefs = sizeof(struct in6_addr) << 3;
1410
1411 sin6 = (struct sockaddr_in6 *)&spidx->dst;
1412 bzero(sin6, sizeof(*sin6));
1413 sin6->sin6_family = AF_INET6;
1414 sin6->sin6_len = sizeof(struct sockaddr_in6);
1415 bcopy(&ip6->ip6_dst, &sin6->sin6_addr, sizeof(ip6->ip6_dst));
1416 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
1417 sin6->sin6_addr.s6_addr16[1] = 0;
1418 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
1419 }
1420 spidx->prefd = sizeof(struct in6_addr) << 3;
1421
1422 return 0;
1423 }
1424
1425 static struct inpcbpolicy *
1426 ipsec_newpcbpolicy(void)
1427 {
1428 struct inpcbpolicy *p;
1429
1430 p = (struct inpcbpolicy *)_MALLOC(sizeof(*p), M_SECA, M_WAITOK);
1431 return p;
1432 }
1433
1434 static void
1435 ipsec_delpcbpolicy(struct inpcbpolicy *p)
1436 {
1437 FREE(p, M_SECA);
1438 }
1439
1440 /* initialize policy in PCB */
1441 int
1442 ipsec_init_policy(struct socket *so,
1443 struct inpcbpolicy **pcb_sp)
1444 {
1445 struct inpcbpolicy *new;
1446
1447 /* sanity check. */
1448 if (so == NULL || pcb_sp == NULL) {
1449 panic("ipsec_init_policy: NULL pointer was passed.\n");
1450 }
1451
1452 new = ipsec_newpcbpolicy();
1453 if (new == NULL) {
1454 ipseclog((LOG_DEBUG, "ipsec_init_policy: No more memory.\n"));
1455 return ENOBUFS;
1456 }
1457 bzero(new, sizeof(*new));
1458
1459 #ifdef __APPLE__
1460 if (kauth_cred_issuser(so->so_cred))
1461 #else
1462 if (so->so_cred != 0 && !suser(so->so_cred->pc_ucred, NULL))
1463 #endif
1464 { new->priv = 1;} else {
1465 new->priv = 0;
1466 }
1467
1468 if ((new->sp_in = key_newsp()) == NULL) {
1469 ipsec_delpcbpolicy(new);
1470 return ENOBUFS;
1471 }
1472 new->sp_in->state = IPSEC_SPSTATE_ALIVE;
1473 new->sp_in->policy = IPSEC_POLICY_ENTRUST;
1474
1475 if ((new->sp_out = key_newsp()) == NULL) {
1476 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1477 ipsec_delpcbpolicy(new);
1478 return ENOBUFS;
1479 }
1480 new->sp_out->state = IPSEC_SPSTATE_ALIVE;
1481 new->sp_out->policy = IPSEC_POLICY_ENTRUST;
1482
1483 *pcb_sp = new;
1484
1485 return 0;
1486 }
1487
1488 /* copy old ipsec policy into new */
1489 int
1490 ipsec_copy_policy(struct inpcbpolicy *old,
1491 struct inpcbpolicy *new)
1492 {
1493 struct secpolicy *sp;
1494
1495 if (ipsec_bypass != 0) {
1496 return 0;
1497 }
1498
1499 sp = ipsec_deepcopy_policy(old->sp_in);
1500 if (sp) {
1501 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1502 new->sp_in = sp;
1503 } else {
1504 return ENOBUFS;
1505 }
1506
1507 sp = ipsec_deepcopy_policy(old->sp_out);
1508 if (sp) {
1509 key_freesp(new->sp_out, KEY_SADB_UNLOCKED);
1510 new->sp_out = sp;
1511 } else {
1512 return ENOBUFS;
1513 }
1514
1515 new->priv = old->priv;
1516
1517 return 0;
1518 }
1519
1520 /* deep-copy a policy in PCB */
1521 static struct secpolicy *
1522 ipsec_deepcopy_policy(struct secpolicy *src)
1523 {
1524 struct ipsecrequest *newchain = NULL;
1525 struct ipsecrequest *p;
1526 struct ipsecrequest **q;
1527 struct ipsecrequest *r;
1528 struct secpolicy *dst;
1529
1530 if (src == NULL) {
1531 return NULL;
1532 }
1533 dst = key_newsp();
1534 if (dst == NULL) {
1535 return NULL;
1536 }
1537
1538 /*
1539 * deep-copy IPsec request chain. This is required since struct
1540 * ipsecrequest is not reference counted.
1541 */
1542 q = &newchain;
1543 for (p = src->req; p; p = p->next) {
1544 *q = (struct ipsecrequest *)_MALLOC(sizeof(struct ipsecrequest),
1545 M_SECA, M_WAITOK | M_ZERO);
1546 if (*q == NULL) {
1547 goto fail;
1548 }
1549 (*q)->next = NULL;
1550
1551 (*q)->saidx.proto = p->saidx.proto;
1552 (*q)->saidx.mode = p->saidx.mode;
1553 (*q)->level = p->level;
1554 (*q)->saidx.reqid = p->saidx.reqid;
1555
1556 bcopy(&p->saidx.src, &(*q)->saidx.src, sizeof((*q)->saidx.src));
1557 bcopy(&p->saidx.dst, &(*q)->saidx.dst, sizeof((*q)->saidx.dst));
1558
1559 (*q)->sp = dst;
1560
1561 q = &((*q)->next);
1562 }
1563
1564 dst->req = newchain;
1565 dst->state = src->state;
1566 dst->policy = src->policy;
1567 /* do not touch the refcnt fields */
1568
1569 return dst;
1570
1571 fail:
1572 for (p = newchain; p; p = r) {
1573 r = p->next;
1574 FREE(p, M_SECA);
1575 p = NULL;
1576 }
1577 key_freesp(dst, KEY_SADB_UNLOCKED);
1578 return NULL;
1579 }
1580
1581 /* set policy and ipsec request if present. */
1582 static int
1583 ipsec_set_policy(struct secpolicy **pcb_sp,
1584 __unused int optname,
1585 caddr_t request,
1586 size_t len,
1587 int priv)
1588 {
1589 struct sadb_x_policy *xpl;
1590 struct secpolicy *newsp = NULL;
1591 int error;
1592
1593 /* sanity check. */
1594 if (pcb_sp == NULL || *pcb_sp == NULL || request == NULL) {
1595 return EINVAL;
1596 }
1597 if (len < sizeof(*xpl)) {
1598 return EINVAL;
1599 }
1600 xpl = (struct sadb_x_policy *)(void *)request;
1601
1602 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1603 printf("ipsec_set_policy: passed policy\n");
1604 kdebug_sadb_x_policy((struct sadb_ext *)xpl));
1605
1606 /* check policy type */
1607 /* ipsec_set_policy() accepts IPSEC, ENTRUST and BYPASS. */
1608 if (xpl->sadb_x_policy_type == IPSEC_POLICY_DISCARD
1609 || xpl->sadb_x_policy_type == IPSEC_POLICY_NONE) {
1610 return EINVAL;
1611 }
1612
1613 /* check privileged socket */
1614 if (priv == 0 && xpl->sadb_x_policy_type == IPSEC_POLICY_BYPASS) {
1615 return EACCES;
1616 }
1617
1618 /* allocation new SP entry */
1619 if ((newsp = key_msg2sp(xpl, len, &error)) == NULL) {
1620 return error;
1621 }
1622
1623 newsp->state = IPSEC_SPSTATE_ALIVE;
1624
1625 /* clear old SP and set new SP */
1626 key_freesp(*pcb_sp, KEY_SADB_UNLOCKED);
1627 *pcb_sp = newsp;
1628 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1629 printf("ipsec_set_policy: new policy\n");
1630 kdebug_secpolicy(newsp));
1631
1632 return 0;
1633 }
1634
1635 int
1636 ipsec4_set_policy(struct inpcb *inp,
1637 int optname,
1638 caddr_t request,
1639 size_t len,
1640 int priv)
1641 {
1642 struct sadb_x_policy *xpl;
1643 struct secpolicy **pcb_sp;
1644 int error = 0;
1645 struct sadb_x_policy xpl_aligned_buf;
1646 u_int8_t *xpl_unaligned;
1647
1648 /* sanity check. */
1649 if (inp == NULL || request == NULL) {
1650 return EINVAL;
1651 }
1652 if (len < sizeof(*xpl)) {
1653 return EINVAL;
1654 }
1655 xpl = (struct sadb_x_policy *)(void *)request;
1656
1657 /* This is a new mbuf allocated by soopt_getm() */
1658 if (IPSEC_IS_P2ALIGNED(xpl)) {
1659 xpl_unaligned = NULL;
1660 } else {
1661 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1662 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1663 xpl = (__typeof__(xpl)) & xpl_aligned_buf;
1664 }
1665
1666 if (inp->inp_sp == NULL) {
1667 error = ipsec_init_policy(inp->inp_socket, &inp->inp_sp);
1668 if (error) {
1669 return error;
1670 }
1671 }
1672
1673 /* select direction */
1674 switch (xpl->sadb_x_policy_dir) {
1675 case IPSEC_DIR_INBOUND:
1676 pcb_sp = &inp->inp_sp->sp_in;
1677 break;
1678 case IPSEC_DIR_OUTBOUND:
1679 pcb_sp = &inp->inp_sp->sp_out;
1680 break;
1681 default:
1682 ipseclog((LOG_ERR, "ipsec4_set_policy: invalid direction=%u\n",
1683 xpl->sadb_x_policy_dir));
1684 return EINVAL;
1685 }
1686
1687 /* turn bypass off */
1688 if (ipsec_bypass != 0) {
1689 ipsec_bypass = 0;
1690 }
1691
1692 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1693 }
1694
1695 /* delete policy in PCB */
1696 int
1697 ipsec4_delete_pcbpolicy(struct inpcb *inp)
1698 {
1699 /* sanity check. */
1700 if (inp == NULL) {
1701 panic("ipsec4_delete_pcbpolicy: NULL pointer was passed.\n");
1702 }
1703
1704 if (inp->inp_sp == NULL) {
1705 return 0;
1706 }
1707
1708 if (inp->inp_sp->sp_in != NULL) {
1709 key_freesp(inp->inp_sp->sp_in, KEY_SADB_UNLOCKED);
1710 inp->inp_sp->sp_in = NULL;
1711 }
1712
1713 if (inp->inp_sp->sp_out != NULL) {
1714 key_freesp(inp->inp_sp->sp_out, KEY_SADB_UNLOCKED);
1715 inp->inp_sp->sp_out = NULL;
1716 }
1717
1718 ipsec_delpcbpolicy(inp->inp_sp);
1719 inp->inp_sp = NULL;
1720
1721 return 0;
1722 }
1723
1724 int
1725 ipsec6_set_policy(struct in6pcb *in6p,
1726 int optname,
1727 caddr_t request,
1728 size_t len,
1729 int priv)
1730 {
1731 struct sadb_x_policy *xpl;
1732 struct secpolicy **pcb_sp;
1733 int error = 0;
1734 struct sadb_x_policy xpl_aligned_buf;
1735 u_int8_t *xpl_unaligned;
1736
1737 /* sanity check. */
1738 if (in6p == NULL || request == NULL) {
1739 return EINVAL;
1740 }
1741 if (len < sizeof(*xpl)) {
1742 return EINVAL;
1743 }
1744 xpl = (struct sadb_x_policy *)(void *)request;
1745
1746 /* This is a new mbuf allocated by soopt_getm() */
1747 if (IPSEC_IS_P2ALIGNED(xpl)) {
1748 xpl_unaligned = NULL;
1749 } else {
1750 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1751 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1752 xpl = (__typeof__(xpl)) & xpl_aligned_buf;
1753 }
1754
1755 if (in6p->in6p_sp == NULL) {
1756 error = ipsec_init_policy(in6p->inp_socket, &in6p->in6p_sp);
1757 if (error) {
1758 return error;
1759 }
1760 }
1761
1762 /* select direction */
1763 switch (xpl->sadb_x_policy_dir) {
1764 case IPSEC_DIR_INBOUND:
1765 pcb_sp = &in6p->in6p_sp->sp_in;
1766 break;
1767 case IPSEC_DIR_OUTBOUND:
1768 pcb_sp = &in6p->in6p_sp->sp_out;
1769 break;
1770 default:
1771 ipseclog((LOG_ERR, "ipsec6_set_policy: invalid direction=%u\n",
1772 xpl->sadb_x_policy_dir));
1773 return EINVAL;
1774 }
1775
1776 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1777 }
1778
1779 int
1780 ipsec6_delete_pcbpolicy(struct in6pcb *in6p)
1781 {
1782 /* sanity check. */
1783 if (in6p == NULL) {
1784 panic("ipsec6_delete_pcbpolicy: NULL pointer was passed.\n");
1785 }
1786
1787 if (in6p->in6p_sp == NULL) {
1788 return 0;
1789 }
1790
1791 if (in6p->in6p_sp->sp_in != NULL) {
1792 key_freesp(in6p->in6p_sp->sp_in, KEY_SADB_UNLOCKED);
1793 in6p->in6p_sp->sp_in = NULL;
1794 }
1795
1796 if (in6p->in6p_sp->sp_out != NULL) {
1797 key_freesp(in6p->in6p_sp->sp_out, KEY_SADB_UNLOCKED);
1798 in6p->in6p_sp->sp_out = NULL;
1799 }
1800
1801 ipsec_delpcbpolicy(in6p->in6p_sp);
1802 in6p->in6p_sp = NULL;
1803
1804 return 0;
1805 }
1806
1807 /*
1808 * return current level.
1809 * Either IPSEC_LEVEL_USE or IPSEC_LEVEL_REQUIRE are always returned.
1810 */
1811 u_int
1812 ipsec_get_reqlevel(struct ipsecrequest *isr)
1813 {
1814 u_int level = 0;
1815 u_int esp_trans_deflev = 0, esp_net_deflev = 0, ah_trans_deflev = 0, ah_net_deflev = 0;
1816
1817 /* sanity check */
1818 if (isr == NULL || isr->sp == NULL) {
1819 panic("ipsec_get_reqlevel: NULL pointer is passed.\n");
1820 }
1821 if (((struct sockaddr *)&isr->sp->spidx.src)->sa_family
1822 != ((struct sockaddr *)&isr->sp->spidx.dst)->sa_family) {
1823 panic("ipsec_get_reqlevel: family mismatched.\n");
1824 }
1825
1826 /* XXX note that we have ipseclog() expanded here - code sync issue */
1827 #define IPSEC_CHECK_DEFAULT(lev) \
1828 (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE \
1829 && (lev) != IPSEC_LEVEL_UNIQUE) \
1830 ? (ipsec_debug \
1831 ? log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\
1832 (lev), IPSEC_LEVEL_REQUIRE) \
1833 : (void)0), \
1834 (lev) = IPSEC_LEVEL_REQUIRE, \
1835 (lev) \
1836 : (lev))
1837
1838 /* set default level */
1839 switch (((struct sockaddr *)&isr->sp->spidx.src)->sa_family) {
1840 case AF_INET:
1841 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_trans_deflev);
1842 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_net_deflev);
1843 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_trans_deflev);
1844 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_net_deflev);
1845 break;
1846 case AF_INET6:
1847 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_trans_deflev);
1848 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_net_deflev);
1849 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_trans_deflev);
1850 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_net_deflev);
1851 break;
1852 default:
1853 panic("key_get_reqlevel: Unknown family. %d\n",
1854 ((struct sockaddr *)&isr->sp->spidx.src)->sa_family);
1855 }
1856
1857 #undef IPSEC_CHECK_DEFAULT
1858
1859 /* set level */
1860 switch (isr->level) {
1861 case IPSEC_LEVEL_DEFAULT:
1862 switch (isr->saidx.proto) {
1863 case IPPROTO_ESP:
1864 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
1865 level = esp_net_deflev;
1866 } else {
1867 level = esp_trans_deflev;
1868 }
1869 break;
1870 case IPPROTO_AH:
1871 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
1872 level = ah_net_deflev;
1873 } else {
1874 level = ah_trans_deflev;
1875 }
1876 break;
1877 case IPPROTO_IPCOMP:
1878 ipseclog((LOG_ERR, "ipsec_get_reqlevel: "
1879 "still got IPCOMP - exiting\n"));
1880 break;
1881 default:
1882 panic("ipsec_get_reqlevel: "
1883 "Illegal protocol defined %u\n",
1884 isr->saidx.proto);
1885 }
1886 break;
1887
1888 case IPSEC_LEVEL_USE:
1889 case IPSEC_LEVEL_REQUIRE:
1890 level = isr->level;
1891 break;
1892 case IPSEC_LEVEL_UNIQUE:
1893 level = IPSEC_LEVEL_REQUIRE;
1894 break;
1895
1896 default:
1897 panic("ipsec_get_reqlevel: Illegal IPsec level %u\n",
1898 isr->level);
1899 }
1900
1901 return level;
1902 }
1903
1904 /*
1905 * Check AH/ESP integrity.
1906 * OUT:
1907 * 0: valid
1908 * 1: invalid
1909 */
1910 static int
1911 ipsec_in_reject(struct secpolicy *sp, struct mbuf *m)
1912 {
1913 struct ipsecrequest *isr;
1914 u_int level;
1915 int need_auth, need_conf, need_icv;
1916
1917 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
1918 printf("ipsec_in_reject: using SP\n");
1919 kdebug_secpolicy(sp));
1920
1921 /* check policy */
1922 switch (sp->policy) {
1923 case IPSEC_POLICY_DISCARD:
1924 case IPSEC_POLICY_GENERATE:
1925 return 1;
1926 case IPSEC_POLICY_BYPASS:
1927 case IPSEC_POLICY_NONE:
1928 return 0;
1929
1930 case IPSEC_POLICY_IPSEC:
1931 break;
1932
1933 case IPSEC_POLICY_ENTRUST:
1934 default:
1935 panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy);
1936 }
1937
1938 need_auth = 0;
1939 need_conf = 0;
1940 need_icv = 0;
1941
1942 /* XXX should compare policy against ipsec header history */
1943
1944 for (isr = sp->req; isr != NULL; isr = isr->next) {
1945 /* get current level */
1946 level = ipsec_get_reqlevel(isr);
1947
1948 switch (isr->saidx.proto) {
1949 case IPPROTO_ESP:
1950 if (level == IPSEC_LEVEL_REQUIRE) {
1951 need_conf++;
1952
1953 #if 0
1954 /* this won't work with multiple input threads - isr->sav would change
1955 * with every packet and is not necessarily related to the current packet
1956 * being processed. If ESP processing is required - the esp code should
1957 * make sure that the integrity check is present and correct. I don't see
1958 * why it would be necessary to check for the presence of the integrity
1959 * check value here. I think this is just wrong.
1960 * isr->sav has been removed.
1961 * %%%%%% this needs to be re-worked at some point but I think the code below can
1962 * be ignored for now.
1963 */
1964 if (isr->sav != NULL
1965 && isr->sav->flags == SADB_X_EXT_NONE
1966 && isr->sav->alg_auth != SADB_AALG_NONE) {
1967 need_icv++;
1968 }
1969 #endif
1970 }
1971 break;
1972 case IPPROTO_AH:
1973 if (level == IPSEC_LEVEL_REQUIRE) {
1974 need_auth++;
1975 need_icv++;
1976 }
1977 break;
1978 case IPPROTO_IPCOMP:
1979 /*
1980 * we don't really care, as IPcomp document says that
1981 * we shouldn't compress small packets, IPComp policy
1982 * should always be treated as being in "use" level.
1983 */
1984 break;
1985 }
1986 }
1987
1988 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1989 printf("ipsec_in_reject: auth:%d conf:%d icv:%d m_flags:%x\n",
1990 need_auth, need_conf, need_icv, m->m_flags));
1991
1992 if ((need_conf && !(m->m_flags & M_DECRYPTED))
1993 || (!need_auth && need_icv && !(m->m_flags & M_AUTHIPDGM))
1994 || (need_auth && !(m->m_flags & M_AUTHIPHDR))) {
1995 return 1;
1996 }
1997
1998 return 0;
1999 }
2000
2001 /*
2002 * Check AH/ESP integrity.
2003 * This function is called from tcp_input(), udp_input(),
2004 * and {ah,esp}4_input for tunnel mode
2005 */
2006 int
2007 ipsec4_in_reject_so(struct mbuf *m, struct socket *so)
2008 {
2009 struct secpolicy *sp = NULL;
2010 int error;
2011 int result;
2012
2013 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2014 /* sanity check */
2015 if (m == NULL) {
2016 return 0; /* XXX should be panic ? */
2017 }
2018 /* get SP for this packet.
2019 * When we are called from ip_forward(), we call
2020 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2021 */
2022 if (so == NULL) {
2023 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
2024 } else {
2025 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
2026 }
2027
2028 if (sp == NULL) {
2029 return 0; /* XXX should be panic ?
2030 * -> No, there may be error. */
2031 }
2032 result = ipsec_in_reject(sp, m);
2033 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2034 printf("DP ipsec4_in_reject_so call free SP:0x%llx\n",
2035 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2036 key_freesp(sp, KEY_SADB_UNLOCKED);
2037
2038 return result;
2039 }
2040
2041 int
2042 ipsec4_in_reject(struct mbuf *m, struct inpcb *inp)
2043 {
2044 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2045 if (inp == NULL) {
2046 return ipsec4_in_reject_so(m, NULL);
2047 }
2048 if (inp->inp_socket) {
2049 return ipsec4_in_reject_so(m, inp->inp_socket);
2050 } else {
2051 panic("ipsec4_in_reject: invalid inpcb/socket");
2052 }
2053
2054 /* NOTREACHED */
2055 return 0;
2056 }
2057
2058 /*
2059 * Check AH/ESP integrity.
2060 * This function is called from tcp6_input(), udp6_input(),
2061 * and {ah,esp}6_input for tunnel mode
2062 */
2063 int
2064 ipsec6_in_reject_so(struct mbuf *m, struct socket *so)
2065 {
2066 struct secpolicy *sp = NULL;
2067 int error;
2068 int result;
2069
2070 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2071 /* sanity check */
2072 if (m == NULL) {
2073 return 0; /* XXX should be panic ? */
2074 }
2075 /* get SP for this packet.
2076 * When we are called from ip_forward(), we call
2077 * ipsec6_getpolicybyaddr() with IP_FORWARDING flag.
2078 */
2079 if (so == NULL) {
2080 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
2081 } else {
2082 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
2083 }
2084
2085 if (sp == NULL) {
2086 return 0; /* XXX should be panic ? */
2087 }
2088 result = ipsec_in_reject(sp, m);
2089 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2090 printf("DP ipsec6_in_reject_so call free SP:0x%llx\n",
2091 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2092 key_freesp(sp, KEY_SADB_UNLOCKED);
2093
2094 return result;
2095 }
2096
2097 int
2098 ipsec6_in_reject(struct mbuf *m, struct in6pcb *in6p)
2099 {
2100 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2101 if (in6p == NULL) {
2102 return ipsec6_in_reject_so(m, NULL);
2103 }
2104 if (in6p->in6p_socket) {
2105 return ipsec6_in_reject_so(m, in6p->in6p_socket);
2106 } else {
2107 panic("ipsec6_in_reject: invalid in6p/socket");
2108 }
2109
2110 /* NOTREACHED */
2111 return 0;
2112 }
2113
2114 /*
2115 * compute the byte size to be occupied by IPsec header.
2116 * in case it is tunneled, it includes the size of outer IP header.
2117 * NOTE: SP passed is free in this function.
2118 */
2119 size_t
2120 ipsec_hdrsiz(struct secpolicy *sp)
2121 {
2122 struct ipsecrequest *isr;
2123 size_t siz, clen;
2124
2125 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2126 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2127 printf("ipsec_hdrsiz: using SP\n");
2128 kdebug_secpolicy(sp));
2129
2130 /* check policy */
2131 switch (sp->policy) {
2132 case IPSEC_POLICY_DISCARD:
2133 case IPSEC_POLICY_GENERATE:
2134 case IPSEC_POLICY_BYPASS:
2135 case IPSEC_POLICY_NONE:
2136 return 0;
2137
2138 case IPSEC_POLICY_IPSEC:
2139 break;
2140
2141 case IPSEC_POLICY_ENTRUST:
2142 default:
2143 panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy);
2144 }
2145
2146 siz = 0;
2147
2148 for (isr = sp->req; isr != NULL; isr = isr->next) {
2149 clen = 0;
2150
2151 switch (isr->saidx.proto) {
2152 case IPPROTO_ESP:
2153 #if IPSEC_ESP
2154 clen = esp_hdrsiz(isr);
2155 #else
2156 clen = 0; /*XXX*/
2157 #endif
2158 break;
2159 case IPPROTO_AH:
2160 clen = ah_hdrsiz(isr);
2161 break;
2162 default:
2163 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2164 "unknown protocol %u\n",
2165 isr->saidx.proto));
2166 break;
2167 }
2168
2169 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
2170 switch (((struct sockaddr *)&isr->saidx.dst)->sa_family) {
2171 case AF_INET:
2172 clen += sizeof(struct ip);
2173 break;
2174 case AF_INET6:
2175 clen += sizeof(struct ip6_hdr);
2176 break;
2177 default:
2178 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2179 "unknown AF %d in IPsec tunnel SA\n",
2180 ((struct sockaddr *)&isr->saidx.dst)->sa_family));
2181 break;
2182 }
2183 }
2184 siz += clen;
2185 }
2186
2187 return siz;
2188 }
2189
2190 /* This function is called from ip_forward() and ipsec4_hdrsize_tcp(). */
2191 size_t
2192 ipsec4_hdrsiz(struct mbuf *m, u_int8_t dir, struct inpcb *inp)
2193 {
2194 struct secpolicy *sp = NULL;
2195 int error;
2196 size_t size;
2197
2198 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2199 /* sanity check */
2200 if (m == NULL) {
2201 return 0; /* XXX should be panic ? */
2202 }
2203 if (inp != NULL && inp->inp_socket == NULL) {
2204 panic("ipsec4_hdrsize: why is socket NULL but there is PCB.");
2205 }
2206
2207 /* get SP for this packet.
2208 * When we are called from ip_forward(), we call
2209 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2210 */
2211 if (inp == NULL) {
2212 sp = ipsec4_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2213 } else {
2214 sp = ipsec4_getpolicybyaddr(m, dir, 0, &error);
2215 }
2216
2217 if (sp == NULL) {
2218 return 0; /* XXX should be panic ? */
2219 }
2220 size = ipsec_hdrsiz(sp);
2221 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2222 printf("DP ipsec4_hdrsiz call free SP:0x%llx\n",
2223 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2224 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2225 printf("ipsec4_hdrsiz: size:%lu.\n", (u_int32_t)size));
2226 key_freesp(sp, KEY_SADB_UNLOCKED);
2227
2228 return size;
2229 }
2230
2231 /* This function is called from ipsec6_hdrsize_tcp(),
2232 * and maybe from ip6_forward.()
2233 */
2234 size_t
2235 ipsec6_hdrsiz(struct mbuf *m, u_int8_t dir, struct in6pcb *in6p)
2236 {
2237 struct secpolicy *sp = NULL;
2238 int error;
2239 size_t size;
2240
2241 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2242 /* sanity check */
2243 if (m == NULL) {
2244 return 0; /* XXX shoud be panic ? */
2245 }
2246 if (in6p != NULL && in6p->in6p_socket == NULL) {
2247 panic("ipsec6_hdrsize: why is socket NULL but there is PCB.");
2248 }
2249
2250 /* get SP for this packet */
2251 /* XXX Is it right to call with IP_FORWARDING. */
2252 if (in6p == NULL) {
2253 sp = ipsec6_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2254 } else {
2255 sp = ipsec6_getpolicybyaddr(m, dir, 0, &error);
2256 }
2257
2258 if (sp == NULL) {
2259 return 0;
2260 }
2261 size = ipsec_hdrsiz(sp);
2262 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2263 printf("DP ipsec6_hdrsiz call free SP:0x%llx\n",
2264 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2265 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2266 printf("ipsec6_hdrsiz: size:%lu.\n", (u_int32_t)size));
2267 key_freesp(sp, KEY_SADB_UNLOCKED);
2268
2269 return size;
2270 }
2271
2272 /*
2273 * encapsulate for ipsec tunnel.
2274 * ip->ip_src must be fixed later on.
2275 */
2276 int
2277 ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav)
2278 {
2279 struct ip *oip;
2280 struct ip *ip;
2281 size_t plen;
2282 u_int32_t hlen;
2283
2284 /* can't tunnel between different AFs */
2285 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2286 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2287 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2288 m_freem(m);
2289 return EINVAL;
2290 }
2291
2292 if (m->m_len < sizeof(*ip)) {
2293 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2294 }
2295
2296 ip = mtod(m, struct ip *);
2297 #ifdef _IP_VHL
2298 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2299 #else
2300 hlen = ip->ip_hl << 2;
2301 #endif
2302
2303 if (m->m_len != hlen) {
2304 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2305 }
2306
2307 /* generate header checksum */
2308 ip->ip_sum = 0;
2309 #ifdef _IP_VHL
2310 ip->ip_sum = in_cksum(m, hlen);
2311 #else
2312 ip->ip_sum = in_cksum(m, hlen);
2313 #endif
2314
2315 plen = m->m_pkthdr.len;
2316
2317 /*
2318 * grow the mbuf to accomodate the new IPv4 header.
2319 * NOTE: IPv4 options will never be copied.
2320 */
2321 if (M_LEADINGSPACE(m->m_next) < hlen) {
2322 struct mbuf *n;
2323 MGET(n, M_DONTWAIT, MT_DATA);
2324 if (!n) {
2325 m_freem(m);
2326 return ENOBUFS;
2327 }
2328 n->m_len = hlen;
2329 n->m_next = m->m_next;
2330 m->m_next = n;
2331 m->m_pkthdr.len += hlen;
2332 oip = mtod(n, struct ip *);
2333 } else {
2334 m->m_next->m_len += hlen;
2335 m->m_next->m_data -= hlen;
2336 m->m_pkthdr.len += hlen;
2337 oip = mtod(m->m_next, struct ip *);
2338 }
2339 ip = mtod(m, struct ip *);
2340 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2341 m->m_len = sizeof(struct ip);
2342 m->m_pkthdr.len -= (hlen - sizeof(struct ip));
2343
2344 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2345 /* ECN consideration. */
2346 ip_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &oip->ip_tos);
2347 #ifdef _IP_VHL
2348 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
2349 #else
2350 ip->ip_hl = sizeof(struct ip) >> 2;
2351 #endif
2352 ip->ip_off &= htons(~IP_OFFMASK);
2353 ip->ip_off &= htons(~IP_MF);
2354 switch (ip4_ipsec_dfbit) {
2355 case 0: /* clear DF bit */
2356 ip->ip_off &= htons(~IP_DF);
2357 break;
2358 case 1: /* set DF bit */
2359 ip->ip_off |= htons(IP_DF);
2360 break;
2361 default: /* copy DF bit */
2362 break;
2363 }
2364 ip->ip_p = IPPROTO_IPIP;
2365 if (plen + sizeof(struct ip) < IP_MAXPACKET) {
2366 ip->ip_len = htons((u_int16_t)(plen + sizeof(struct ip)));
2367 } else {
2368 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2369 "leave ip_len as is (invalid packet)\n"));
2370 }
2371 if (rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off))) {
2372 ip->ip_id = 0;
2373 } else {
2374 ip->ip_id = ip_randomid();
2375 }
2376 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2377 &ip->ip_src, sizeof(ip->ip_src));
2378 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2379 &ip->ip_dst, sizeof(ip->ip_dst));
2380 ip->ip_ttl = IPDEFTTL;
2381
2382 /* XXX Should ip_src be updated later ? */
2383
2384 return 0;
2385 }
2386
2387
2388 int
2389 ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav)
2390 {
2391 struct ip6_hdr *oip6;
2392 struct ip6_hdr *ip6;
2393 size_t plen;
2394
2395 /* can't tunnel between different AFs */
2396 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2397 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2398 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2399 m_freem(m);
2400 return EINVAL;
2401 }
2402
2403 plen = m->m_pkthdr.len;
2404
2405 /*
2406 * grow the mbuf to accomodate the new IPv6 header.
2407 */
2408 if (m->m_len != sizeof(struct ip6_hdr)) {
2409 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2410 }
2411 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2412 struct mbuf *n;
2413 MGET(n, M_DONTWAIT, MT_DATA);
2414 if (!n) {
2415 m_freem(m);
2416 return ENOBUFS;
2417 }
2418 n->m_len = sizeof(struct ip6_hdr);
2419 n->m_next = m->m_next;
2420 m->m_next = n;
2421 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2422 oip6 = mtod(n, struct ip6_hdr *);
2423 } else {
2424 m->m_next->m_len += sizeof(struct ip6_hdr);
2425 m->m_next->m_data -= sizeof(struct ip6_hdr);
2426 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2427 oip6 = mtod(m->m_next, struct ip6_hdr *);
2428 }
2429 ip6 = mtod(m, struct ip6_hdr *);
2430 ovbcopy((caddr_t)ip6, (caddr_t)oip6, sizeof(struct ip6_hdr));
2431
2432 /* Fake link-local scope-class addresses */
2433 if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_src)) {
2434 oip6->ip6_src.s6_addr16[1] = 0;
2435 }
2436 if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_dst)) {
2437 oip6->ip6_dst.s6_addr16[1] = 0;
2438 }
2439
2440 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2441 /* ECN consideration. */
2442 ip6_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip6->ip6_flow);
2443 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) {
2444 ip6->ip6_plen = htons((u_int16_t)plen);
2445 } else {
2446 /* ip6->ip6_plen will be updated in ip6_output() */
2447 }
2448 ip6->ip6_nxt = IPPROTO_IPV6;
2449 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2450 &ip6->ip6_src, sizeof(ip6->ip6_src));
2451 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2452 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2453 ip6->ip6_hlim = IPV6_DEFHLIM;
2454
2455 /* XXX Should ip6_src be updated later ? */
2456
2457 return 0;
2458 }
2459
2460 static int
2461 ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav)
2462 {
2463 struct ip6_hdr *ip6, *ip6i;
2464 struct ip *ip;
2465 size_t plen;
2466 u_int8_t hlim;
2467
2468 /* tunneling over IPv4 */
2469 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2470 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2471 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2472 m_freem(m);
2473 return EINVAL;
2474 }
2475
2476 plen = m->m_pkthdr.len;
2477 ip6 = mtod(m, struct ip6_hdr *);
2478 hlim = ip6->ip6_hlim;
2479 /*
2480 * grow the mbuf to accomodate the new IPv4 header.
2481 */
2482 if (m->m_len != sizeof(struct ip6_hdr)) {
2483 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2484 }
2485 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2486 struct mbuf *n;
2487 MGET(n, M_DONTWAIT, MT_DATA);
2488 if (!n) {
2489 m_freem(m);
2490 return ENOBUFS;
2491 }
2492 n->m_len = sizeof(struct ip6_hdr);
2493 n->m_next = m->m_next;
2494 m->m_next = n;
2495 m->m_pkthdr.len += sizeof(struct ip);
2496 ip6i = mtod(n, struct ip6_hdr *);
2497 } else {
2498 m->m_next->m_len += sizeof(struct ip6_hdr);
2499 m->m_next->m_data -= sizeof(struct ip6_hdr);
2500 m->m_pkthdr.len += sizeof(struct ip);
2501 ip6i = mtod(m->m_next, struct ip6_hdr *);
2502 }
2503
2504 bcopy(ip6, ip6i, sizeof(struct ip6_hdr));
2505 ip = mtod(m, struct ip *);
2506 m->m_len = sizeof(struct ip);
2507 /*
2508 * Fill in some of the IPv4 fields - we don't need all of them
2509 * because the rest will be filled in by ip_output
2510 */
2511 ip->ip_v = IPVERSION;
2512 ip->ip_hl = sizeof(struct ip) >> 2;
2513 ip->ip_id = 0;
2514 ip->ip_sum = 0;
2515 ip->ip_tos = 0;
2516 ip->ip_off = 0;
2517 ip->ip_ttl = hlim;
2518 ip->ip_p = IPPROTO_IPV6;
2519
2520 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2521 /* ECN consideration. */
2522 ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6i->ip6_flow);
2523
2524 if (plen + sizeof(struct ip) < IP_MAXPACKET) {
2525 ip->ip_len = htons((u_int16_t)(plen + sizeof(struct ip)));
2526 } else {
2527 ip->ip_len = htons((u_int16_t)plen);
2528 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2529 "leave ip_len as is (invalid packet)\n"));
2530 }
2531 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2532 &ip->ip_src, sizeof(ip->ip_src));
2533 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2534 &ip->ip_dst, sizeof(ip->ip_dst));
2535
2536 return 0;
2537 }
2538
2539 int
2540 ipsec6_update_routecache_and_output(
2541 struct ipsec_output_state *state,
2542 struct secasvar *sav)
2543 {
2544 struct sockaddr_in6* dst6;
2545 struct route_in6 *ro6;
2546 struct ip6_hdr *ip6;
2547 errno_t error = 0;
2548
2549 int plen;
2550 struct ip6_out_args ip6oa;
2551 struct route_in6 ro6_new;
2552 struct flowadv *adv = NULL;
2553
2554 if (!state->m) {
2555 return EINVAL;
2556 }
2557 ip6 = mtod(state->m, struct ip6_hdr *);
2558
2559 // grab sadb_mutex, before updating sah's route cache
2560 lck_mtx_lock(sadb_mutex);
2561 ro6 = &sav->sah->sa_route;
2562 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
2563 if (ro6->ro_rt) {
2564 RT_LOCK(ro6->ro_rt);
2565 }
2566 if (ROUTE_UNUSABLE(ro6) ||
2567 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
2568 if (ro6->ro_rt != NULL) {
2569 RT_UNLOCK(ro6->ro_rt);
2570 }
2571 ROUTE_RELEASE(ro6);
2572 }
2573 if (ro6->ro_rt == 0) {
2574 bzero(dst6, sizeof(*dst6));
2575 dst6->sin6_family = AF_INET6;
2576 dst6->sin6_len = sizeof(*dst6);
2577 dst6->sin6_addr = ip6->ip6_dst;
2578 rtalloc_scoped((struct route *)ro6, sav->sah->outgoing_if);
2579 if (ro6->ro_rt) {
2580 RT_LOCK(ro6->ro_rt);
2581 }
2582 }
2583 if (ro6->ro_rt == 0) {
2584 ip6stat.ip6s_noroute++;
2585 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
2586 error = EHOSTUNREACH;
2587 // release sadb_mutex, after updating sah's route cache
2588 lck_mtx_unlock(sadb_mutex);
2589 return error;
2590 }
2591
2592 /*
2593 * adjust state->dst if tunnel endpoint is offlink
2594 *
2595 * XXX: caching rt_gateway value in the state is
2596 * not really good, since it may point elsewhere
2597 * when the gateway gets modified to a larger
2598 * sockaddr via rt_setgate(). This is currently
2599 * addressed by SA_SIZE roundup in that routine.
2600 */
2601 if (ro6->ro_rt->rt_flags & RTF_GATEWAY) {
2602 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
2603 }
2604 RT_UNLOCK(ro6->ro_rt);
2605 ROUTE_RELEASE(&state->ro);
2606 route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6));
2607 state->dst = (struct sockaddr *)dst6;
2608 state->tunneled = 6;
2609 // release sadb_mutex, after updating sah's route cache
2610 lck_mtx_unlock(sadb_mutex);
2611
2612 state->m = ipsec6_splithdr(state->m);
2613 if (!state->m) {
2614 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
2615 error = ENOMEM;
2616 return error;
2617 }
2618
2619 ip6 = mtod(state->m, struct ip6_hdr *);
2620 switch (sav->sah->saidx.proto) {
2621 case IPPROTO_ESP:
2622 #if IPSEC_ESP
2623 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2624 #else
2625 m_freem(state->m);
2626 error = EINVAL;
2627 #endif
2628 break;
2629 case IPPROTO_AH:
2630 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2631 break;
2632 default:
2633 ipseclog((LOG_ERR, "%s: unknown ipsec protocol %d\n", __FUNCTION__, sav->sah->saidx.proto));
2634 m_freem(state->m);
2635 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2636 error = EINVAL;
2637 break;
2638 }
2639 if (error) {
2640 // If error, packet already freed by above output routines
2641 state->m = NULL;
2642 return error;
2643 }
2644
2645 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
2646 if (plen > IPV6_MAXPACKET) {
2647 ipseclog((LOG_ERR, "%s: IPsec with IPv6 jumbogram is not supported\n", __FUNCTION__));
2648 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2649 error = EINVAL;/*XXX*/
2650 return error;
2651 }
2652 ip6 = mtod(state->m, struct ip6_hdr *);
2653 ip6->ip6_plen = htons((u_int16_t)plen);
2654
2655 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET6);
2656 ipsec_set_ip6oa_for_interface(sav->sah->ipsec_if, &ip6oa);
2657
2658 /* Increment statistics */
2659 ifnet_stat_increment_out(sav->sah->ipsec_if, 1, (u_int32_t)mbuf_pkthdr_len(state->m), 0);
2660
2661 /* Send to ip6_output */
2662 bzero(&ro6_new, sizeof(ro6_new));
2663 bzero(&ip6oa, sizeof(ip6oa));
2664 ip6oa.ip6oa_flowadv.code = 0;
2665 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
2666 if (state->outgoing_if) {
2667 ip6oa.ip6oa_boundif = state->outgoing_if;
2668 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
2669 }
2670
2671 adv = &ip6oa.ip6oa_flowadv;
2672 (void) ip6_output(state->m, NULL, &ro6_new, IPV6_OUTARGS, NULL, NULL, &ip6oa);
2673 state->m = NULL;
2674
2675 if (adv->code == FADV_FLOW_CONTROLLED || adv->code == FADV_SUSPENDED) {
2676 error = ENOBUFS;
2677 ifnet_disable_output(sav->sah->ipsec_if);
2678 return error;
2679 }
2680
2681 return 0;
2682 }
2683
2684 int
2685 ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav)
2686 {
2687 struct mbuf *m;
2688 struct ip6_hdr *ip6;
2689 struct ip *oip;
2690 struct ip *ip;
2691 size_t plen;
2692 u_int32_t hlen;
2693
2694 m = state->m;
2695 if (!m) {
2696 return EINVAL;
2697 }
2698
2699 /* can't tunnel between different AFs */
2700 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2701 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2702 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2703 m_freem(m);
2704 return EINVAL;
2705 }
2706
2707 if (m->m_len < sizeof(*ip)) {
2708 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2709 return EINVAL;
2710 }
2711
2712 ip = mtod(m, struct ip *);
2713 #ifdef _IP_VHL
2714 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2715 #else
2716 hlen = ip->ip_hl << 2;
2717 #endif
2718
2719 if (m->m_len != hlen) {
2720 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2721 return EINVAL;
2722 }
2723
2724 /* generate header checksum */
2725 ip->ip_sum = 0;
2726 #ifdef _IP_VHL
2727 ip->ip_sum = in_cksum(m, hlen);
2728 #else
2729 ip->ip_sum = in_cksum(m, hlen);
2730 #endif
2731
2732 plen = m->m_pkthdr.len; // save original IPv4 packet len, this will be ipv6 payload len
2733
2734 /*
2735 * First move the IPv4 header to the second mbuf in the chain
2736 */
2737 if (M_LEADINGSPACE(m->m_next) < hlen) {
2738 struct mbuf *n;
2739 MGET(n, M_DONTWAIT, MT_DATA);
2740 if (!n) {
2741 m_freem(m);
2742 return ENOBUFS;
2743 }
2744 n->m_len = hlen;
2745 n->m_next = m->m_next;
2746 m->m_next = n;
2747 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2748 oip = mtod(n, struct ip *);
2749 } else {
2750 m->m_next->m_len += hlen;
2751 m->m_next->m_data -= hlen;
2752 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2753 oip = mtod(m->m_next, struct ip *);
2754 }
2755 ip = mtod(m, struct ip *);
2756 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2757
2758 /*
2759 * Grow the first mbuf to accomodate the new IPv6 header.
2760 */
2761 if (M_LEADINGSPACE(m) < sizeof(struct ip6_hdr) - hlen) {
2762 struct mbuf *n;
2763 MGETHDR(n, M_DONTWAIT, MT_HEADER);
2764 if (!n) {
2765 m_freem(m);
2766 return ENOBUFS;
2767 }
2768 M_COPY_PKTHDR(n, m);
2769 MH_ALIGN(n, sizeof(struct ip6_hdr));
2770 n->m_len = sizeof(struct ip6_hdr);
2771 n->m_next = m->m_next;
2772 m->m_next = NULL;
2773 m_freem(m);
2774 state->m = n;
2775 m = state->m;
2776 } else {
2777 m->m_len += (sizeof(struct ip6_hdr) - hlen);
2778 m->m_data -= (sizeof(struct ip6_hdr) - hlen);
2779 }
2780 ip6 = mtod(m, struct ip6_hdr *);
2781 ip6->ip6_flow = 0;
2782 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2783 ip6->ip6_vfc |= IPV6_VERSION;
2784
2785 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2786 /* ECN consideration. */
2787 ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip->ip_tos);
2788 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) {
2789 ip6->ip6_plen = htons((u_int16_t)plen);
2790 } else {
2791 /* ip6->ip6_plen will be updated in ip6_output() */
2792 }
2793
2794 ip6->ip6_nxt = IPPROTO_IPV4;
2795 ip6->ip6_hlim = IPV6_DEFHLIM;
2796
2797 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2798 &ip6->ip6_src, sizeof(ip6->ip6_src));
2799 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2800 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2801
2802 return 0;
2803 }
2804
2805 /*
2806 * Check the variable replay window.
2807 * ipsec_chkreplay() performs replay check before ICV verification.
2808 * ipsec_updatereplay() updates replay bitmap. This must be called after
2809 * ICV verification (it also performs replay check, which is usually done
2810 * beforehand).
2811 * 0 (zero) is returned if packet disallowed, 1 if packet permitted.
2812 *
2813 * based on RFC 2401.
2814 */
2815 int
2816 ipsec_chkreplay(u_int32_t seq, struct secasvar *sav, u_int8_t replay_index)
2817 {
2818 const struct secreplay *replay;
2819 u_int32_t diff;
2820 size_t fr;
2821 size_t wsizeb; /* constant: bits of window size */
2822 size_t frlast; /* constant: last frame */
2823
2824
2825 /* sanity check */
2826 if (sav == NULL) {
2827 panic("ipsec_chkreplay: NULL pointer was passed.\n");
2828 }
2829
2830 lck_mtx_lock(sadb_mutex);
2831 replay = sav->replay[replay_index];
2832
2833 if (replay->wsize == 0) {
2834 lck_mtx_unlock(sadb_mutex);
2835 return 1; /* no need to check replay. */
2836 }
2837
2838 /* constant */
2839 frlast = replay->wsize - 1;
2840 wsizeb = replay->wsize << 3;
2841
2842 /* sequence number of 0 is invalid */
2843 if (seq == 0) {
2844 lck_mtx_unlock(sadb_mutex);
2845 return 0;
2846 }
2847
2848 /* first time is always okay */
2849 if (replay->count == 0) {
2850 lck_mtx_unlock(sadb_mutex);
2851 return 1;
2852 }
2853
2854 if (seq > replay->lastseq) {
2855 /* larger sequences are okay */
2856 lck_mtx_unlock(sadb_mutex);
2857 return 1;
2858 } else {
2859 /* seq is equal or less than lastseq. */
2860 diff = replay->lastseq - seq;
2861
2862 /* over range to check, i.e. too old or wrapped */
2863 if (diff >= wsizeb) {
2864 lck_mtx_unlock(sadb_mutex);
2865 return 0;
2866 }
2867
2868 fr = frlast - diff / 8;
2869
2870 /* this packet already seen ? */
2871 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2872 lck_mtx_unlock(sadb_mutex);
2873 return 0;
2874 }
2875
2876 /* out of order but good */
2877 lck_mtx_unlock(sadb_mutex);
2878 return 1;
2879 }
2880 }
2881
2882 /*
2883 * check replay counter whether to update or not.
2884 * OUT: 0: OK
2885 * 1: NG
2886 */
2887 int
2888 ipsec_updatereplay(u_int32_t seq, struct secasvar *sav, u_int8_t replay_index)
2889 {
2890 struct secreplay *replay;
2891 u_int32_t diff;
2892 size_t fr;
2893 size_t wsizeb; /* constant: bits of window size */
2894 size_t frlast; /* constant: last frame */
2895
2896 /* sanity check */
2897 if (sav == NULL) {
2898 panic("ipsec_chkreplay: NULL pointer was passed.\n");
2899 }
2900
2901 lck_mtx_lock(sadb_mutex);
2902 replay = sav->replay[replay_index];
2903
2904 if (replay->wsize == 0) {
2905 goto ok; /* no need to check replay. */
2906 }
2907 /* constant */
2908 frlast = replay->wsize - 1;
2909 wsizeb = replay->wsize << 3;
2910
2911 /* sequence number of 0 is invalid */
2912 if (seq == 0) {
2913 lck_mtx_unlock(sadb_mutex);
2914 return 1;
2915 }
2916
2917 /* first time */
2918 if (replay->count == 0) {
2919 replay->lastseq = seq;
2920 bzero(replay->bitmap, replay->wsize);
2921 (replay->bitmap)[frlast] = 1;
2922 goto ok;
2923 }
2924
2925 if (seq > replay->lastseq) {
2926 /* seq is larger than lastseq. */
2927 diff = seq - replay->lastseq;
2928
2929 /* new larger sequence number */
2930 if (diff < wsizeb) {
2931 /* In window */
2932 /* set bit for this packet */
2933 vshiftl((unsigned char *) replay->bitmap, diff, replay->wsize);
2934 (replay->bitmap)[frlast] |= 1;
2935 } else {
2936 /* this packet has a "way larger" */
2937 bzero(replay->bitmap, replay->wsize);
2938 (replay->bitmap)[frlast] = 1;
2939 }
2940 replay->lastseq = seq;
2941
2942 /* larger is good */
2943 } else {
2944 /* seq is equal or less than lastseq. */
2945 diff = replay->lastseq - seq;
2946
2947 /* over range to check, i.e. too old or wrapped */
2948 if (diff >= wsizeb) {
2949 lck_mtx_unlock(sadb_mutex);
2950 return 1;
2951 }
2952
2953 fr = frlast - diff / 8;
2954
2955 /* this packet already seen ? */
2956 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2957 lck_mtx_unlock(sadb_mutex);
2958 return 1;
2959 }
2960
2961 /* mark as seen */
2962 (replay->bitmap)[fr] |= (1 << (diff % 8));
2963
2964 /* out of order but good */
2965 }
2966
2967 ok:
2968 if (replay->count == ~0) {
2969 /* set overflow flag */
2970 replay->overflow++;
2971
2972 /* don't increment, no more packets accepted */
2973 if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
2974 lck_mtx_unlock(sadb_mutex);
2975 return 1;
2976 }
2977
2978 ipseclog((LOG_WARNING, "replay counter made %d cycle. %s\n",
2979 replay->overflow, ipsec_logsastr(sav)));
2980 }
2981
2982 replay->count++;
2983
2984 lck_mtx_unlock(sadb_mutex);
2985 return 0;
2986 }
2987
2988 /*
2989 * shift variable length buffer to left.
2990 * IN: bitmap: pointer to the buffer
2991 * nbit: the number of to shift.
2992 * wsize: buffer size (bytes).
2993 */
2994 static void
2995 vshiftl(unsigned char *bitmap, int nbit, size_t wsize)
2996 {
2997 size_t i;
2998 int s, j;
2999 unsigned char over;
3000
3001 for (j = 0; j < nbit; j += 8) {
3002 s = (nbit - j < 8) ? (nbit - j): 8;
3003 bitmap[0] <<= s;
3004 for (i = 1; i < wsize; i++) {
3005 over = (bitmap[i] >> (8 - s));
3006 bitmap[i] <<= s;
3007 bitmap[i - 1] |= over;
3008 }
3009 }
3010
3011 return;
3012 }
3013
3014 const char *
3015 ipsec4_logpacketstr(struct ip *ip, u_int32_t spi)
3016 {
3017 static char buf[256] __attribute__((aligned(4)));
3018 char *p;
3019 u_int8_t *s, *d;
3020
3021 s = (u_int8_t *)(&ip->ip_src);
3022 d = (u_int8_t *)(&ip->ip_dst);
3023
3024 p = buf;
3025 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3026 while (p && *p) {
3027 p++;
3028 }
3029 snprintf(p, sizeof(buf) - (p - buf), "src=%u.%u.%u.%u",
3030 s[0], s[1], s[2], s[3]);
3031 while (p && *p) {
3032 p++;
3033 }
3034 snprintf(p, sizeof(buf) - (p - buf), " dst=%u.%u.%u.%u",
3035 d[0], d[1], d[2], d[3]);
3036 while (p && *p) {
3037 p++;
3038 }
3039 snprintf(p, sizeof(buf) - (p - buf), ")");
3040
3041 return buf;
3042 }
3043
3044 const char *
3045 ipsec6_logpacketstr(struct ip6_hdr *ip6, u_int32_t spi)
3046 {
3047 static char buf[256] __attribute__((aligned(4)));
3048 char *p;
3049
3050 p = buf;
3051 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3052 while (p && *p) {
3053 p++;
3054 }
3055 snprintf(p, sizeof(buf) - (p - buf), "src=%s",
3056 ip6_sprintf(&ip6->ip6_src));
3057 while (p && *p) {
3058 p++;
3059 }
3060 snprintf(p, sizeof(buf) - (p - buf), " dst=%s",
3061 ip6_sprintf(&ip6->ip6_dst));
3062 while (p && *p) {
3063 p++;
3064 }
3065 snprintf(p, sizeof(buf) - (p - buf), ")");
3066
3067 return buf;
3068 }
3069
3070 const char *
3071 ipsec_logsastr(struct secasvar *sav)
3072 {
3073 static char buf[256] __attribute__((aligned(4)));
3074 char *p;
3075 struct secasindex *saidx = &sav->sah->saidx;
3076
3077 /* validity check */
3078 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
3079 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family) {
3080 panic("ipsec_logsastr: family mismatched.\n");
3081 }
3082
3083 p = buf;
3084 snprintf(buf, sizeof(buf), "SA(SPI=%u ", (u_int32_t)ntohl(sav->spi));
3085 while (p && *p) {
3086 p++;
3087 }
3088 if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET) {
3089 u_int8_t *s, *d;
3090 s = (u_int8_t *)&((struct sockaddr_in *)&saidx->src)->sin_addr;
3091 d = (u_int8_t *)&((struct sockaddr_in *)&saidx->dst)->sin_addr;
3092 snprintf(p, sizeof(buf) - (p - buf),
3093 "src=%d.%d.%d.%d dst=%d.%d.%d.%d",
3094 s[0], s[1], s[2], s[3], d[0], d[1], d[2], d[3]);
3095 } else if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET6) {
3096 snprintf(p, sizeof(buf) - (p - buf),
3097 "src=%s",
3098 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->src)->sin6_addr));
3099 while (p && *p) {
3100 p++;
3101 }
3102 snprintf(p, sizeof(buf) - (p - buf),
3103 " dst=%s",
3104 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->dst)->sin6_addr));
3105 }
3106 while (p && *p) {
3107 p++;
3108 }
3109 snprintf(p, sizeof(buf) - (p - buf), ")");
3110
3111 return buf;
3112 }
3113
3114 void
3115 ipsec_dumpmbuf(struct mbuf *m)
3116 {
3117 int totlen;
3118 int i;
3119 u_char *p;
3120
3121 totlen = 0;
3122 printf("---\n");
3123 while (m) {
3124 p = mtod(m, u_char *);
3125 for (i = 0; i < m->m_len; i++) {
3126 printf("%02x ", p[i]);
3127 totlen++;
3128 if (totlen % 16 == 0) {
3129 printf("\n");
3130 }
3131 }
3132 m = m->m_next;
3133 }
3134 if (totlen % 16 != 0) {
3135 printf("\n");
3136 }
3137 printf("---\n");
3138 }
3139
3140 #if INET
3141 /*
3142 * IPsec output logic for IPv4.
3143 */
3144 static int
3145 ipsec4_output_internal(struct ipsec_output_state *state, struct secasvar *sav)
3146 {
3147 struct ip *ip = NULL;
3148 int error = 0;
3149 struct sockaddr_in *dst4;
3150 struct route *ro4;
3151
3152 /* validity check */
3153 if (sav == NULL || sav->sah == NULL) {
3154 error = EINVAL;
3155 goto bad;
3156 }
3157
3158 /*
3159 * If there is no valid SA, we give up to process any
3160 * more. In such a case, the SA's status is changed
3161 * from DYING to DEAD after allocating. If a packet
3162 * send to the receiver by dead SA, the receiver can
3163 * not decode a packet because SA has been dead.
3164 */
3165 if (sav->state != SADB_SASTATE_MATURE
3166 && sav->state != SADB_SASTATE_DYING) {
3167 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3168 error = EINVAL;
3169 goto bad;
3170 }
3171
3172 state->outgoing_if = sav->sah->outgoing_if;
3173
3174 /*
3175 * There may be the case that SA status will be changed when
3176 * we are refering to one. So calling splsoftnet().
3177 */
3178
3179 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3180 /*
3181 * build IPsec tunnel.
3182 */
3183 state->m = ipsec4_splithdr(state->m);
3184 if (!state->m) {
3185 error = ENOMEM;
3186 goto bad;
3187 }
3188
3189 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3190 error = ipsec46_encapsulate(state, sav);
3191 if (error) {
3192 // packet already freed by encapsulation error handling
3193 state->m = NULL;
3194 return error;
3195 }
3196
3197 error = ipsec6_update_routecache_and_output(state, sav);
3198 return error;
3199 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3200 error = ipsec4_encapsulate(state->m, sav);
3201 if (error) {
3202 state->m = NULL;
3203 goto bad;
3204 }
3205 ip = mtod(state->m, struct ip *);
3206
3207 // grab sadb_mutex, before updating sah's route cache
3208 lck_mtx_lock(sadb_mutex);
3209 ro4 = (struct route *)&sav->sah->sa_route;
3210 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3211 if (ro4->ro_rt != NULL) {
3212 RT_LOCK(ro4->ro_rt);
3213 }
3214 if (ROUTE_UNUSABLE(ro4) ||
3215 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3216 if (ro4->ro_rt != NULL) {
3217 RT_UNLOCK(ro4->ro_rt);
3218 }
3219 ROUTE_RELEASE(ro4);
3220 }
3221 if (ro4->ro_rt == 0) {
3222 dst4->sin_family = AF_INET;
3223 dst4->sin_len = sizeof(*dst4);
3224 dst4->sin_addr = ip->ip_dst;
3225 rtalloc_scoped(ro4, sav->sah->outgoing_if);
3226 if (ro4->ro_rt == 0) {
3227 OSAddAtomic(1, &ipstat.ips_noroute);
3228 error = EHOSTUNREACH;
3229 // release sadb_mutex, after updating sah's route cache
3230 lck_mtx_unlock(sadb_mutex);
3231 goto bad;
3232 }
3233 RT_LOCK(ro4->ro_rt);
3234 }
3235
3236 /*
3237 * adjust state->dst if tunnel endpoint is offlink
3238 *
3239 * XXX: caching rt_gateway value in the state is
3240 * not really good, since it may point elsewhere
3241 * when the gateway gets modified to a larger
3242 * sockaddr via rt_setgate(). This is currently
3243 * addressed by SA_SIZE roundup in that routine.
3244 */
3245 if (ro4->ro_rt->rt_flags & RTF_GATEWAY) {
3246 dst4 = (struct sockaddr_in *)(void *)ro4->ro_rt->rt_gateway;
3247 }
3248 RT_UNLOCK(ro4->ro_rt);
3249 ROUTE_RELEASE(&state->ro);
3250 route_copyout((struct route *)&state->ro, ro4, sizeof(struct route));
3251 state->dst = (struct sockaddr *)dst4;
3252 state->tunneled = 4;
3253 // release sadb_mutex, after updating sah's route cache
3254 lck_mtx_unlock(sadb_mutex);
3255 } else {
3256 ipseclog((LOG_ERR, "%s: family mismatched between inner and outer spi=%u\n",
3257 __FUNCTION__, (u_int32_t)ntohl(sav->spi)));
3258 error = EAFNOSUPPORT;
3259 goto bad;
3260 }
3261 }
3262
3263 state->m = ipsec4_splithdr(state->m);
3264 if (!state->m) {
3265 error = ENOMEM;
3266 goto bad;
3267 }
3268 switch (sav->sah->saidx.proto) {
3269 case IPPROTO_ESP:
3270 #if IPSEC_ESP
3271 if ((error = esp4_output(state->m, sav)) != 0) {
3272 state->m = NULL;
3273 goto bad;
3274 }
3275 break;
3276 #else
3277 m_freem(state->m);
3278 state->m = NULL;
3279 error = EINVAL;
3280 goto bad;
3281 #endif
3282 case IPPROTO_AH:
3283 if ((error = ah4_output(state->m, sav)) != 0) {
3284 state->m = NULL;
3285 goto bad;
3286 }
3287 break;
3288 default:
3289 ipseclog((LOG_ERR,
3290 "ipsec4_output: unknown ipsec protocol %d\n",
3291 sav->sah->saidx.proto));
3292 m_freem(state->m);
3293 state->m = NULL;
3294 error = EPROTONOSUPPORT;
3295 goto bad;
3296 }
3297
3298 if (state->m == 0) {
3299 error = ENOMEM;
3300 goto bad;
3301 }
3302
3303 return 0;
3304
3305 bad:
3306 return error;
3307 }
3308
3309 int
3310 ipsec4_interface_output(struct ipsec_output_state *state, ifnet_t interface)
3311 {
3312 int error = 0;
3313 struct secasvar *sav = NULL;
3314
3315 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3316
3317 if (state == NULL) {
3318 panic("state == NULL in ipsec4_output");
3319 }
3320 if (state->m == NULL) {
3321 panic("state->m == NULL in ipsec4_output");
3322 }
3323 if (state->dst == NULL) {
3324 panic("state->dst == NULL in ipsec4_output");
3325 }
3326
3327 struct ip *ip = mtod(state->m, struct ip *);
3328
3329 struct sockaddr_in src = {};
3330 src.sin_family = AF_INET;
3331 src.sin_len = sizeof(src);
3332 memcpy(&src.sin_addr, &ip->ip_src, sizeof(src.sin_addr));
3333
3334 struct sockaddr_in dst = {};
3335 dst.sin_family = AF_INET;
3336 dst.sin_len = sizeof(dst);
3337 memcpy(&dst.sin_addr, &ip->ip_dst, sizeof(dst.sin_addr));
3338
3339 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET,
3340 (struct sockaddr *)&src,
3341 (struct sockaddr *)&dst);
3342 if (sav == NULL) {
3343 goto bad;
3344 }
3345
3346 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3347 goto bad;
3348 }
3349
3350 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3351 if (sav) {
3352 key_freesav(sav, KEY_SADB_UNLOCKED);
3353 }
3354 return 0;
3355
3356 bad:
3357 if (sav) {
3358 key_freesav(sav, KEY_SADB_UNLOCKED);
3359 }
3360 m_freem(state->m);
3361 state->m = NULL;
3362 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error, 0, 0, 0, 0);
3363 return error;
3364 }
3365
3366 int
3367 ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused int flags)
3368 {
3369 struct ip *ip = NULL;
3370 struct ipsecrequest *isr = NULL;
3371 struct secasindex saidx;
3372 struct secasvar *sav = NULL;
3373 int error = 0;
3374 struct sockaddr_in *sin;
3375
3376 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3377
3378 if (!state) {
3379 panic("state == NULL in ipsec4_output");
3380 }
3381 if (!state->m) {
3382 panic("state->m == NULL in ipsec4_output");
3383 }
3384 if (!state->dst) {
3385 panic("state->dst == NULL in ipsec4_output");
3386 }
3387
3388 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
3389
3390 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3391 printf("ipsec4_output: applied SP\n");
3392 kdebug_secpolicy(sp));
3393
3394 for (isr = sp->req; isr != NULL; isr = isr->next) {
3395 /* make SA index for search proper SA */
3396 ip = mtod(state->m, struct ip *);
3397 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3398 saidx.mode = isr->saidx.mode;
3399 saidx.reqid = isr->saidx.reqid;
3400 sin = (struct sockaddr_in *)&saidx.src;
3401 if (sin->sin_len == 0) {
3402 sin->sin_len = sizeof(*sin);
3403 sin->sin_family = AF_INET;
3404 sin->sin_port = IPSEC_PORT_ANY;
3405 bcopy(&ip->ip_src, &sin->sin_addr,
3406 sizeof(sin->sin_addr));
3407 }
3408 sin = (struct sockaddr_in *)&saidx.dst;
3409 if (sin->sin_len == 0) {
3410 sin->sin_len = sizeof(*sin);
3411 sin->sin_family = AF_INET;
3412 sin->sin_port = IPSEC_PORT_ANY;
3413 /*
3414 * Get port from packet if upper layer is UDP and nat traversal
3415 * is enabled and transport mode.
3416 */
3417
3418 if ((esp_udp_encap_port & 0xFFFF) != 0 &&
3419 isr->saidx.mode == IPSEC_MODE_TRANSPORT) {
3420 if (ip->ip_p == IPPROTO_UDP) {
3421 struct udphdr *udp;
3422 u_int32_t hlen;
3423 #ifdef _IP_VHL
3424 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
3425 #else
3426 hlen = ip->ip_hl << 2;
3427 #endif
3428 if (state->m->m_len < hlen + sizeof(struct udphdr)) {
3429 state->m = m_pullup(state->m, hlen + sizeof(struct udphdr));
3430 if (!state->m) {
3431 ipseclog((LOG_DEBUG, "IPv4 output: can't pullup UDP header\n"));
3432 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
3433 goto bad;
3434 }
3435 ip = mtod(state->m, struct ip *);
3436 }
3437 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + hlen);
3438 sin->sin_port = udp->uh_dport;
3439 }
3440 }
3441
3442 bcopy(&ip->ip_dst, &sin->sin_addr,
3443 sizeof(sin->sin_addr));
3444 }
3445
3446 if ((error = key_checkrequest(isr, &saidx, &sav)) != 0) {
3447 /*
3448 * IPsec processing is required, but no SA found.
3449 * I assume that key_acquire() had been called
3450 * to get/establish the SA. Here I discard
3451 * this packet because it is responsibility for
3452 * upper layer to retransmit the packet.
3453 */
3454 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3455 goto bad;
3456 }
3457
3458 /* validity check */
3459 if (sav == NULL) {
3460 switch (ipsec_get_reqlevel(isr)) {
3461 case IPSEC_LEVEL_USE:
3462 continue;
3463 case IPSEC_LEVEL_REQUIRE:
3464 /* must be not reached here. */
3465 panic("ipsec4_output: no SA found, but required.");
3466 }
3467 }
3468
3469 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3470 goto bad;
3471 }
3472 }
3473
3474 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3475 if (sav) {
3476 key_freesav(sav, KEY_SADB_UNLOCKED);
3477 }
3478 return 0;
3479
3480 bad:
3481 if (sav) {
3482 key_freesav(sav, KEY_SADB_UNLOCKED);
3483 }
3484 m_freem(state->m);
3485 state->m = NULL;
3486 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error, 0, 0, 0, 0);
3487 return error;
3488 }
3489
3490 #endif
3491
3492 /*
3493 * IPsec output logic for IPv6, transport mode.
3494 */
3495 static int
3496 ipsec6_output_trans_internal(
3497 struct ipsec_output_state *state,
3498 struct secasvar *sav,
3499 u_char *nexthdrp,
3500 struct mbuf *mprev)
3501 {
3502 struct ip6_hdr *ip6;
3503 size_t plen;
3504 int error = 0;
3505
3506 /* validity check */
3507 if (sav == NULL || sav->sah == NULL) {
3508 error = EINVAL;
3509 goto bad;
3510 }
3511
3512 /*
3513 * If there is no valid SA, we give up to process.
3514 * see same place at ipsec4_output().
3515 */
3516 if (sav->state != SADB_SASTATE_MATURE
3517 && sav->state != SADB_SASTATE_DYING) {
3518 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3519 error = EINVAL;
3520 goto bad;
3521 }
3522
3523 state->outgoing_if = sav->sah->outgoing_if;
3524
3525 switch (sav->sah->saidx.proto) {
3526 case IPPROTO_ESP:
3527 #if IPSEC_ESP
3528 error = esp6_output(state->m, nexthdrp, mprev->m_next, sav);
3529 #else
3530 m_freem(state->m);
3531 error = EINVAL;
3532 #endif
3533 break;
3534 case IPPROTO_AH:
3535 error = ah6_output(state->m, nexthdrp, mprev->m_next, sav);
3536 break;
3537 default:
3538 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3539 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3540 m_freem(state->m);
3541 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3542 error = EPROTONOSUPPORT;
3543 break;
3544 }
3545 if (error) {
3546 state->m = NULL;
3547 goto bad;
3548 }
3549 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
3550 if (plen > IPV6_MAXPACKET) {
3551 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3552 "IPsec with IPv6 jumbogram is not supported\n"));
3553 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3554 error = EINVAL; /*XXX*/
3555 goto bad;
3556 }
3557 ip6 = mtod(state->m, struct ip6_hdr *);
3558 ip6->ip6_plen = htons((u_int16_t)plen);
3559
3560 return 0;
3561 bad:
3562 return error;
3563 }
3564
3565 int
3566 ipsec6_output_trans(
3567 struct ipsec_output_state *state,
3568 u_char *nexthdrp,
3569 struct mbuf *mprev,
3570 struct secpolicy *sp,
3571 __unused int flags,
3572 int *tun)
3573 {
3574 struct ip6_hdr *ip6;
3575 struct ipsecrequest *isr = NULL;
3576 struct secasindex saidx;
3577 int error = 0;
3578 struct sockaddr_in6 *sin6;
3579 struct secasvar *sav = NULL;
3580
3581 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3582
3583 if (!state) {
3584 panic("state == NULL in ipsec6_output_trans");
3585 }
3586 if (!state->m) {
3587 panic("state->m == NULL in ipsec6_output_trans");
3588 }
3589 if (!nexthdrp) {
3590 panic("nexthdrp == NULL in ipsec6_output_trans");
3591 }
3592 if (!mprev) {
3593 panic("mprev == NULL in ipsec6_output_trans");
3594 }
3595 if (!sp) {
3596 panic("sp == NULL in ipsec6_output_trans");
3597 }
3598 if (!tun) {
3599 panic("tun == NULL in ipsec6_output_trans");
3600 }
3601
3602 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3603 printf("ipsec6_output_trans: applyed SP\n");
3604 kdebug_secpolicy(sp));
3605
3606 *tun = 0;
3607 for (isr = sp->req; isr; isr = isr->next) {
3608 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
3609 /* the rest will be handled by ipsec6_output_tunnel() */
3610 break;
3611 }
3612
3613 /* make SA index for search proper SA */
3614 ip6 = mtod(state->m, struct ip6_hdr *);
3615 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3616 saidx.mode = isr->saidx.mode;
3617 saidx.reqid = isr->saidx.reqid;
3618 sin6 = (struct sockaddr_in6 *)&saidx.src;
3619 if (sin6->sin6_len == 0) {
3620 sin6->sin6_len = sizeof(*sin6);
3621 sin6->sin6_family = AF_INET6;
3622 sin6->sin6_port = IPSEC_PORT_ANY;
3623 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
3624 sizeof(ip6->ip6_src));
3625 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
3626 /* fix scope id for comparing SPD */
3627 sin6->sin6_addr.s6_addr16[1] = 0;
3628 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
3629 }
3630 }
3631 sin6 = (struct sockaddr_in6 *)&saidx.dst;
3632 if (sin6->sin6_len == 0) {
3633 sin6->sin6_len = sizeof(*sin6);
3634 sin6->sin6_family = AF_INET6;
3635 sin6->sin6_port = IPSEC_PORT_ANY;
3636 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
3637 sizeof(ip6->ip6_dst));
3638 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
3639 /* fix scope id for comparing SPD */
3640 sin6->sin6_addr.s6_addr16[1] = 0;
3641 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
3642 }
3643 }
3644
3645 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
3646 /*
3647 * IPsec processing is required, but no SA found.
3648 * I assume that key_acquire() had been called
3649 * to get/establish the SA. Here I discard
3650 * this packet because it is responsibility for
3651 * upper layer to retransmit the packet.
3652 */
3653 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3654 error = ENOENT;
3655
3656 /*
3657 * Notify the fact that the packet is discarded
3658 * to ourselves. I believe this is better than
3659 * just silently discarding. (jinmei@kame.net)
3660 * XXX: should we restrict the error to TCP packets?
3661 * XXX: should we directly notify sockets via
3662 * pfctlinputs?
3663 */
3664 icmp6_error(state->m, ICMP6_DST_UNREACH,
3665 ICMP6_DST_UNREACH_ADMIN, 0);
3666 state->m = NULL; /* icmp6_error freed the mbuf */
3667 goto bad;
3668 }
3669
3670 /* validity check */
3671 if (sav == NULL) {
3672 switch (ipsec_get_reqlevel(isr)) {
3673 case IPSEC_LEVEL_USE:
3674 continue;
3675 case IPSEC_LEVEL_REQUIRE:
3676 /* must be not reached here. */
3677 panic("ipsec6_output_trans: no SA found, but required.");
3678 }
3679 }
3680
3681 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
3682 goto bad;
3683 }
3684 }
3685
3686 /* if we have more to go, we need a tunnel mode processing */
3687 if (isr != NULL) {
3688 *tun = 1;
3689 }
3690
3691 if (sav) {
3692 key_freesav(sav, KEY_SADB_UNLOCKED);
3693 }
3694 return 0;
3695
3696 bad:
3697 if (sav) {
3698 key_freesav(sav, KEY_SADB_UNLOCKED);
3699 }
3700 m_freem(state->m);
3701 state->m = NULL;
3702 return error;
3703 }
3704
3705 /*
3706 * IPsec output logic for IPv6, tunnel mode.
3707 */
3708 static int
3709 ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar *sav, int *must_be_last)
3710 {
3711 struct ip6_hdr *ip6;
3712 struct sockaddr_in6* dst6;
3713 struct route_in6 *ro6;
3714 size_t plen;
3715 int error = 0;
3716
3717 /* validity check */
3718 if (sav == NULL || sav->sah == NULL || sav->sah->saidx.mode != IPSEC_MODE_TUNNEL) {
3719 error = EINVAL;
3720 goto bad;
3721 }
3722
3723 /*
3724 * If there is no valid SA, we give up to process.
3725 * see same place at ipsec4_output().
3726 */
3727 if (sav->state != SADB_SASTATE_MATURE
3728 && sav->state != SADB_SASTATE_DYING) {
3729 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3730 error = EINVAL;
3731 goto bad;
3732 }
3733
3734 state->outgoing_if = sav->sah->outgoing_if;
3735
3736 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3737 /*
3738 * build IPsec tunnel.
3739 */
3740 state->m = ipsec6_splithdr(state->m);
3741 if (!state->m) {
3742 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
3743 error = ENOMEM;
3744 goto bad;
3745 }
3746
3747 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3748 error = ipsec6_encapsulate(state->m, sav);
3749 if (error) {
3750 state->m = 0;
3751 goto bad;
3752 }
3753 ip6 = mtod(state->m, struct ip6_hdr *);
3754 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3755 struct ip *ip;
3756 struct sockaddr_in* dst4;
3757 struct route *ro4 = NULL;
3758 struct route ro4_copy;
3759 struct ip_out_args ipoa;
3760
3761 bzero(&ipoa, sizeof(ipoa));
3762 ipoa.ipoa_boundif = IFSCOPE_NONE;
3763 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
3764 ipoa.ipoa_sotc = SO_TC_UNSPEC;
3765 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
3766
3767 if (must_be_last) {
3768 *must_be_last = 1;
3769 }
3770
3771 state->tunneled = 4; /* must not process any further in ip6_output */
3772 error = ipsec64_encapsulate(state->m, sav);
3773 if (error) {
3774 state->m = 0;
3775 goto bad;
3776 }
3777 /* Now we have an IPv4 packet */
3778 ip = mtod(state->m, struct ip *);
3779
3780 // grab sadb_mutex, to update sah's route cache and get a local copy of it
3781 lck_mtx_lock(sadb_mutex);
3782 ro4 = (struct route *)&sav->sah->sa_route;
3783 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3784 if (ro4->ro_rt) {
3785 RT_LOCK(ro4->ro_rt);
3786 }
3787 if (ROUTE_UNUSABLE(ro4) ||
3788 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3789 if (ro4->ro_rt != NULL) {
3790 RT_UNLOCK(ro4->ro_rt);
3791 }
3792 ROUTE_RELEASE(ro4);
3793 }
3794 if (ro4->ro_rt == NULL) {
3795 dst4->sin_family = AF_INET;
3796 dst4->sin_len = sizeof(*dst4);
3797 dst4->sin_addr = ip->ip_dst;
3798 } else {
3799 RT_UNLOCK(ro4->ro_rt);
3800 }
3801 route_copyout(&ro4_copy, ro4, sizeof(struct route));
3802 // release sadb_mutex, after updating sah's route cache and getting a local copy
3803 lck_mtx_unlock(sadb_mutex);
3804 state->m = ipsec4_splithdr(state->m);
3805 if (!state->m) {
3806 error = ENOMEM;
3807 ROUTE_RELEASE(&ro4_copy);
3808 goto bad;
3809 }
3810 switch (sav->sah->saidx.proto) {
3811 case IPPROTO_ESP:
3812 #if IPSEC_ESP
3813 if ((error = esp4_output(state->m, sav)) != 0) {
3814 state->m = NULL;
3815 ROUTE_RELEASE(&ro4_copy);
3816 goto bad;
3817 }
3818 break;
3819
3820 #else
3821 m_freem(state->m);
3822 state->m = NULL;
3823 error = EINVAL;
3824 ROUTE_RELEASE(&ro4_copy);
3825 goto bad;
3826 #endif
3827 case IPPROTO_AH:
3828 if ((error = ah4_output(state->m, sav)) != 0) {
3829 state->m = NULL;
3830 ROUTE_RELEASE(&ro4_copy);
3831 goto bad;
3832 }
3833 break;
3834 default:
3835 ipseclog((LOG_ERR,
3836 "ipsec4_output: unknown ipsec protocol %d\n",
3837 sav->sah->saidx.proto));
3838 m_freem(state->m);
3839 state->m = NULL;
3840 error = EPROTONOSUPPORT;
3841 ROUTE_RELEASE(&ro4_copy);
3842 goto bad;
3843 }
3844
3845 if (state->m == 0) {
3846 error = ENOMEM;
3847 ROUTE_RELEASE(&ro4_copy);
3848 goto bad;
3849 }
3850 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET);
3851 ipsec_set_ipoa_for_interface(sav->sah->ipsec_if, &ipoa);
3852
3853 ip = mtod(state->m, struct ip *);
3854 ip->ip_len = ntohs(ip->ip_len); /* flip len field before calling ip_output */
3855 error = ip_output(state->m, NULL, &ro4_copy, IP_OUTARGS, NULL, &ipoa);
3856 state->m = NULL;
3857 // grab sadb_mutex, to synchronize the sah's route cache with the local copy
3858 lck_mtx_lock(sadb_mutex);
3859 route_copyin(&ro4_copy, ro4, sizeof(struct route));
3860 lck_mtx_unlock(sadb_mutex);
3861 if (error != 0) {
3862 goto bad;
3863 }
3864 goto done;
3865 } else {
3866 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3867 "unsupported inner family, spi=%u\n",
3868 (u_int32_t)ntohl(sav->spi)));
3869 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3870 error = EAFNOSUPPORT;
3871 goto bad;
3872 }
3873
3874 // grab sadb_mutex, before updating sah's route cache
3875 lck_mtx_lock(sadb_mutex);
3876 ro6 = &sav->sah->sa_route;
3877 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
3878 if (ro6->ro_rt) {
3879 RT_LOCK(ro6->ro_rt);
3880 }
3881 if (ROUTE_UNUSABLE(ro6) ||
3882 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
3883 if (ro6->ro_rt != NULL) {
3884 RT_UNLOCK(ro6->ro_rt);
3885 }
3886 ROUTE_RELEASE(ro6);
3887 }
3888 if (ro6->ro_rt == 0) {
3889 bzero(dst6, sizeof(*dst6));
3890 dst6->sin6_family = AF_INET6;
3891 dst6->sin6_len = sizeof(*dst6);
3892 dst6->sin6_addr = ip6->ip6_dst;
3893 rtalloc_scoped((struct route *)ro6, sav->sah->outgoing_if);
3894 if (ro6->ro_rt) {
3895 RT_LOCK(ro6->ro_rt);
3896 }
3897 }
3898 if (ro6->ro_rt == 0) {
3899 ip6stat.ip6s_noroute++;
3900 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
3901 error = EHOSTUNREACH;
3902 // release sadb_mutex, after updating sah's route cache
3903 lck_mtx_unlock(sadb_mutex);
3904 goto bad;
3905 }
3906
3907 /*
3908 * adjust state->dst if tunnel endpoint is offlink
3909 *
3910 * XXX: caching rt_gateway value in the state is
3911 * not really good, since it may point elsewhere
3912 * when the gateway gets modified to a larger
3913 * sockaddr via rt_setgate(). This is currently
3914 * addressed by SA_SIZE roundup in that routine.
3915 */
3916 if (ro6->ro_rt->rt_flags & RTF_GATEWAY) {
3917 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
3918 }
3919 RT_UNLOCK(ro6->ro_rt);
3920 ROUTE_RELEASE(&state->ro);
3921 route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6));
3922 state->dst = (struct sockaddr *)dst6;
3923 state->tunneled = 6;
3924 // release sadb_mutex, after updating sah's route cache
3925 lck_mtx_unlock(sadb_mutex);
3926 }
3927
3928 state->m = ipsec6_splithdr(state->m);
3929 if (!state->m) {
3930 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
3931 error = ENOMEM;
3932 goto bad;
3933 }
3934 ip6 = mtod(state->m, struct ip6_hdr *);
3935 switch (sav->sah->saidx.proto) {
3936 case IPPROTO_ESP:
3937 #if IPSEC_ESP
3938 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
3939 #else
3940 m_freem(state->m);
3941 error = EINVAL;
3942 #endif
3943 break;
3944 case IPPROTO_AH:
3945 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
3946 break;
3947 default:
3948 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3949 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3950 m_freem(state->m);
3951 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3952 error = EINVAL;
3953 break;
3954 }
3955 if (error) {
3956 state->m = NULL;
3957 goto bad;
3958 }
3959 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
3960 if (plen > IPV6_MAXPACKET) {
3961 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3962 "IPsec with IPv6 jumbogram is not supported\n"));
3963 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3964 error = EINVAL; /*XXX*/
3965 goto bad;
3966 }
3967 ip6 = mtod(state->m, struct ip6_hdr *);
3968 ip6->ip6_plen = htons((u_int16_t)plen);
3969 done:
3970 return 0;
3971
3972 bad:
3973 return error;
3974 }
3975
3976 int
3977 ipsec6_output_tunnel(
3978 struct ipsec_output_state *state,
3979 struct secpolicy *sp,
3980 __unused int flags)
3981 {
3982 struct ip6_hdr *ip6;
3983 struct ipsecrequest *isr = NULL;
3984 struct secasindex saidx;
3985 struct secasvar *sav = NULL;
3986 int error = 0;
3987
3988 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3989
3990 if (!state) {
3991 panic("state == NULL in ipsec6_output_tunnel");
3992 }
3993 if (!state->m) {
3994 panic("state->m == NULL in ipsec6_output_tunnel");
3995 }
3996 if (!sp) {
3997 panic("sp == NULL in ipsec6_output_tunnel");
3998 }
3999
4000 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
4001 printf("ipsec6_output_tunnel: applyed SP\n");
4002 kdebug_secpolicy(sp));
4003
4004 /*
4005 * transport mode ipsec (before the 1st tunnel mode) is already
4006 * processed by ipsec6_output_trans().
4007 */
4008 for (isr = sp->req; isr; isr = isr->next) {
4009 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
4010 break;
4011 }
4012 }
4013
4014 for (/* already initialized */; isr; isr = isr->next) {
4015 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
4016 /* When tunnel mode, SA peers must be specified. */
4017 bcopy(&isr->saidx, &saidx, sizeof(saidx));
4018 } else {
4019 /* make SA index to look for a proper SA */
4020 struct sockaddr_in6 *sin6;
4021
4022 bzero(&saidx, sizeof(saidx));
4023 saidx.proto = isr->saidx.proto;
4024 saidx.mode = isr->saidx.mode;
4025 saidx.reqid = isr->saidx.reqid;
4026
4027 ip6 = mtod(state->m, struct ip6_hdr *);
4028 sin6 = (struct sockaddr_in6 *)&saidx.src;
4029 if (sin6->sin6_len == 0) {
4030 sin6->sin6_len = sizeof(*sin6);
4031 sin6->sin6_family = AF_INET6;
4032 sin6->sin6_port = IPSEC_PORT_ANY;
4033 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
4034 sizeof(ip6->ip6_src));
4035 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
4036 /* fix scope id for comparing SPD */
4037 sin6->sin6_addr.s6_addr16[1] = 0;
4038 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
4039 }
4040 }
4041 sin6 = (struct sockaddr_in6 *)&saidx.dst;
4042 if (sin6->sin6_len == 0) {
4043 sin6->sin6_len = sizeof(*sin6);
4044 sin6->sin6_family = AF_INET6;
4045 sin6->sin6_port = IPSEC_PORT_ANY;
4046 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
4047 sizeof(ip6->ip6_dst));
4048 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
4049 /* fix scope id for comparing SPD */
4050 sin6->sin6_addr.s6_addr16[1] = 0;
4051 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
4052 }
4053 }
4054 }
4055
4056 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
4057 /*
4058 * IPsec processing is required, but no SA found.
4059 * I assume that key_acquire() had been called
4060 * to get/establish the SA. Here I discard
4061 * this packet because it is responsibility for
4062 * upper layer to retransmit the packet.
4063 */
4064 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4065 error = ENOENT;
4066 goto bad;
4067 }
4068
4069 /* validity check */
4070 if (sav == NULL) {
4071 switch (ipsec_get_reqlevel(isr)) {
4072 case IPSEC_LEVEL_USE:
4073 continue;
4074 case IPSEC_LEVEL_REQUIRE:
4075 /* must be not reached here. */
4076 panic("ipsec6_output_tunnel: no SA found, but required.");
4077 }
4078 }
4079
4080 /*
4081 * If there is no valid SA, we give up to process.
4082 * see same place at ipsec4_output().
4083 */
4084 if (sav->state != SADB_SASTATE_MATURE
4085 && sav->state != SADB_SASTATE_DYING) {
4086 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4087 error = EINVAL;
4088 goto bad;
4089 }
4090
4091 int must_be_last = 0;
4092
4093 if ((error = ipsec6_output_tunnel_internal(state, sav, &must_be_last)) != 0) {
4094 goto bad;
4095 }
4096
4097 if (must_be_last && isr->next) {
4098 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4099 "IPv4 must be outer layer, spi=%u\n",
4100 (u_int32_t)ntohl(sav->spi)));
4101 error = EINVAL;
4102 goto bad;
4103 }
4104 }
4105
4106 if (sav) {
4107 key_freesav(sav, KEY_SADB_UNLOCKED);
4108 }
4109 return 0;
4110
4111 bad:
4112 if (sav) {
4113 key_freesav(sav, KEY_SADB_UNLOCKED);
4114 }
4115 if (state->m) {
4116 m_freem(state->m);
4117 }
4118 state->m = NULL;
4119 return error;
4120 }
4121
4122 int
4123 ipsec6_interface_output(struct ipsec_output_state *state, ifnet_t interface, u_char *nexthdrp, struct mbuf *mprev)
4124 {
4125 int error = 0;
4126 struct secasvar *sav = NULL;
4127
4128 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4129
4130 if (state == NULL) {
4131 panic("state == NULL in ipsec6_output");
4132 }
4133 if (state->m == NULL) {
4134 panic("state->m == NULL in ipsec6_output");
4135 }
4136 if (nexthdrp == NULL) {
4137 panic("nexthdrp == NULL in ipsec6_output");
4138 }
4139 if (mprev == NULL) {
4140 panic("mprev == NULL in ipsec6_output");
4141 }
4142
4143 struct ip6_hdr *ip6 = mtod(state->m, struct ip6_hdr *);
4144
4145 struct sockaddr_in6 src = {};
4146 src.sin6_family = AF_INET6;
4147 src.sin6_len = sizeof(src);
4148 memcpy(&src.sin6_addr, &ip6->ip6_src, sizeof(src.sin6_addr));
4149
4150 struct sockaddr_in6 dst = {};
4151 dst.sin6_family = AF_INET6;
4152 dst.sin6_len = sizeof(dst);
4153 memcpy(&dst.sin6_addr, &ip6->ip6_dst, sizeof(dst.sin6_addr));
4154
4155 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET6,
4156 (struct sockaddr *)&src,
4157 (struct sockaddr *)&dst);
4158 if (sav == NULL) {
4159 goto bad;
4160 }
4161
4162 if (sav->sah && sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
4163 if ((error = ipsec6_output_tunnel_internal(state, sav, NULL)) != 0) {
4164 goto bad;
4165 }
4166 } else {
4167 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
4168 goto bad;
4169 }
4170 }
4171
4172 if (sav) {
4173 key_freesav(sav, KEY_SADB_UNLOCKED);
4174 }
4175 return 0;
4176
4177 bad:
4178 if (sav) {
4179 key_freesav(sav, KEY_SADB_UNLOCKED);
4180 }
4181 m_freem(state->m);
4182 state->m = NULL;
4183 return error;
4184 }
4185
4186 #if INET
4187 /*
4188 * Chop IP header and option off from the payload.
4189 */
4190 struct mbuf *
4191 ipsec4_splithdr(struct mbuf *m)
4192 {
4193 struct mbuf *mh;
4194 struct ip *ip;
4195 int hlen;
4196
4197 if (m->m_len < sizeof(struct ip)) {
4198 panic("ipsec4_splithdr: first mbuf too short, m_len %d, pkt_len %d, m_flag %x", m->m_len, m->m_pkthdr.len, m->m_flags);
4199 }
4200 ip = mtod(m, struct ip *);
4201 #ifdef _IP_VHL
4202 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
4203 #else
4204 hlen = ip->ip_hl << 2;
4205 #endif
4206 if (m->m_len > hlen) {
4207 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4208 if (!mh) {
4209 m_freem(m);
4210 return NULL;
4211 }
4212 M_COPY_PKTHDR(mh, m);
4213 MH_ALIGN(mh, hlen);
4214 m->m_flags &= ~M_PKTHDR;
4215 m_mchtype(m, MT_DATA);
4216 m->m_len -= hlen;
4217 m->m_data += hlen;
4218 mh->m_next = m;
4219 m = mh;
4220 m->m_len = hlen;
4221 bcopy((caddr_t)ip, mtod(m, caddr_t), hlen);
4222 } else if (m->m_len < hlen) {
4223 m = m_pullup(m, hlen);
4224 if (!m) {
4225 return NULL;
4226 }
4227 }
4228 return m;
4229 }
4230 #endif
4231
4232 struct mbuf *
4233 ipsec6_splithdr(struct mbuf *m)
4234 {
4235 struct mbuf *mh;
4236 struct ip6_hdr *ip6;
4237 int hlen;
4238
4239 if (m->m_len < sizeof(struct ip6_hdr)) {
4240 panic("ipsec6_splithdr: first mbuf too short");
4241 }
4242 ip6 = mtod(m, struct ip6_hdr *);
4243 hlen = sizeof(struct ip6_hdr);
4244 if (m->m_len > hlen) {
4245 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4246 if (!mh) {
4247 m_freem(m);
4248 return NULL;
4249 }
4250 M_COPY_PKTHDR(mh, m);
4251 MH_ALIGN(mh, hlen);
4252 m->m_flags &= ~M_PKTHDR;
4253 m_mchtype(m, MT_DATA);
4254 m->m_len -= hlen;
4255 m->m_data += hlen;
4256 mh->m_next = m;
4257 m = mh;
4258 m->m_len = hlen;
4259 bcopy((caddr_t)ip6, mtod(m, caddr_t), hlen);
4260 } else if (m->m_len < hlen) {
4261 m = m_pullup(m, hlen);
4262 if (!m) {
4263 return NULL;
4264 }
4265 }
4266 return m;
4267 }
4268
4269 /* validate inbound IPsec tunnel packet. */
4270 int
4271 ipsec4_tunnel_validate(
4272 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4273 int off,
4274 u_int nxt0,
4275 struct secasvar *sav,
4276 sa_family_t *ifamily)
4277 {
4278 u_int8_t nxt = nxt0 & 0xff;
4279 struct sockaddr_in *sin;
4280 struct sockaddr_in osrc, odst, i4src, i4dst;
4281 struct sockaddr_in6 i6src, i6dst;
4282 int hlen;
4283 struct secpolicy *sp;
4284 struct ip *oip;
4285
4286 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4287
4288 #if DIAGNOSTIC
4289 if (m->m_len < sizeof(struct ip)) {
4290 panic("too short mbuf on ipsec4_tunnel_validate");
4291 }
4292 #endif
4293 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6) {
4294 return 0;
4295 }
4296 if (m->m_pkthdr.len < off + sizeof(struct ip)) {
4297 return 0;
4298 }
4299 /* do not decapsulate if the SA is for transport mode only */
4300 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) {
4301 return 0;
4302 }
4303
4304 oip = mtod(m, struct ip *);
4305 #ifdef _IP_VHL
4306 hlen = _IP_VHL_HL(oip->ip_vhl) << 2;
4307 #else
4308 hlen = oip->ip_hl << 2;
4309 #endif
4310 if (hlen != sizeof(struct ip)) {
4311 return 0;
4312 }
4313
4314 sin = (struct sockaddr_in *)&sav->sah->saidx.dst;
4315 if (sin->sin_family != AF_INET) {
4316 return 0;
4317 }
4318 if (bcmp(&oip->ip_dst, &sin->sin_addr, sizeof(oip->ip_dst)) != 0) {
4319 return 0;
4320 }
4321
4322 if (sav->sah->ipsec_if != NULL) {
4323 // the ipsec interface SAs don't have a policies.
4324 if (nxt == IPPROTO_IPV4) {
4325 *ifamily = AF_INET;
4326 } else if (nxt == IPPROTO_IPV6) {
4327 *ifamily = AF_INET6;
4328 } else {
4329 return 0;
4330 }
4331 return 1;
4332 }
4333
4334 /* XXX slow */
4335 bzero(&osrc, sizeof(osrc));
4336 bzero(&odst, sizeof(odst));
4337 osrc.sin_family = odst.sin_family = AF_INET;
4338 osrc.sin_len = odst.sin_len = sizeof(struct sockaddr_in);
4339 osrc.sin_addr = oip->ip_src;
4340 odst.sin_addr = oip->ip_dst;
4341 /*
4342 * RFC2401 5.2.1 (b): (assume that we are using tunnel mode)
4343 * - if the inner destination is multicast address, there can be
4344 * multiple permissible inner source address. implementation
4345 * may want to skip verification of inner source address against
4346 * SPD selector.
4347 * - if the inner protocol is ICMP, the packet may be an error report
4348 * from routers on the other side of the VPN cloud (R in the
4349 * following diagram). in this case, we cannot verify inner source
4350 * address against SPD selector.
4351 * me -- gw === gw -- R -- you
4352 *
4353 * we consider the first bullet to be users responsibility on SPD entry
4354 * configuration (if you need to encrypt multicast traffic, set
4355 * the source range of SPD selector to 0.0.0.0/0, or have explicit
4356 * address ranges for possible senders).
4357 * the second bullet is not taken care of (yet).
4358 *
4359 * therefore, we do not do anything special about inner source.
4360 */
4361 if (nxt == IPPROTO_IPV4) {
4362 bzero(&i4src, sizeof(struct sockaddr_in));
4363 bzero(&i4dst, sizeof(struct sockaddr_in));
4364 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4365 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4366 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4367 (caddr_t)&i4src.sin_addr);
4368 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4369 (caddr_t)&i4dst.sin_addr);
4370 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4371 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4372 } else if (nxt == IPPROTO_IPV6) {
4373 bzero(&i6src, sizeof(struct sockaddr_in6));
4374 bzero(&i6dst, sizeof(struct sockaddr_in6));
4375 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4376 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4377 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4378 (caddr_t)&i6src.sin6_addr);
4379 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4380 (caddr_t)&i6dst.sin6_addr);
4381 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4382 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4383 } else {
4384 return 0; /* unsupported family */
4385 }
4386 if (!sp) {
4387 return 0;
4388 }
4389
4390 key_freesp(sp, KEY_SADB_UNLOCKED);
4391
4392 return 1;
4393 }
4394
4395 /* validate inbound IPsec tunnel packet. */
4396 int
4397 ipsec6_tunnel_validate(
4398 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4399 int off,
4400 u_int nxt0,
4401 struct secasvar *sav,
4402 sa_family_t *ifamily)
4403 {
4404 u_int8_t nxt = nxt0 & 0xff;
4405 struct sockaddr_in6 *sin6;
4406 struct sockaddr_in i4src, i4dst;
4407 struct sockaddr_in6 osrc, odst, i6src, i6dst;
4408 struct secpolicy *sp;
4409 struct ip6_hdr *oip6;
4410
4411 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4412
4413 #if DIAGNOSTIC
4414 if (m->m_len < sizeof(struct ip6_hdr)) {
4415 panic("too short mbuf on ipsec6_tunnel_validate");
4416 }
4417 #endif
4418 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6) {
4419 return 0;
4420 }
4421
4422 if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr)) {
4423 return 0;
4424 }
4425 /* do not decapsulate if the SA is for transport mode only */
4426 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) {
4427 return 0;
4428 }
4429
4430 oip6 = mtod(m, struct ip6_hdr *);
4431 /* AF_INET should be supported, but at this moment we don't. */
4432 sin6 = (struct sockaddr_in6 *)&sav->sah->saidx.dst;
4433 if (sin6->sin6_family != AF_INET6) {
4434 return 0;
4435 }
4436 if (!IN6_ARE_ADDR_EQUAL(&oip6->ip6_dst, &sin6->sin6_addr)) {
4437 return 0;
4438 }
4439
4440 if (sav->sah->ipsec_if != NULL) {
4441 // the ipsec interface SAs don't have a policies.
4442 if (nxt == IPPROTO_IPV4) {
4443 *ifamily = AF_INET;
4444 } else if (nxt == IPPROTO_IPV6) {
4445 *ifamily = AF_INET6;
4446 } else {
4447 return 0;
4448 }
4449 return 1;
4450 }
4451
4452 /* XXX slow */
4453 bzero(&osrc, sizeof(osrc));
4454 bzero(&odst, sizeof(odst));
4455 osrc.sin6_family = odst.sin6_family = AF_INET6;
4456 osrc.sin6_len = odst.sin6_len = sizeof(struct sockaddr_in6);
4457 osrc.sin6_addr = oip6->ip6_src;
4458 odst.sin6_addr = oip6->ip6_dst;
4459
4460 /*
4461 * regarding to inner source address validation, see a long comment
4462 * in ipsec4_tunnel_validate.
4463 */
4464
4465 if (nxt == IPPROTO_IPV4) {
4466 bzero(&i4src, sizeof(struct sockaddr_in));
4467 bzero(&i4dst, sizeof(struct sockaddr_in));
4468 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4469 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4470 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4471 (caddr_t)&i4src.sin_addr);
4472 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4473 (caddr_t)&i4dst.sin_addr);
4474 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4475 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4476 } else if (nxt == IPPROTO_IPV6) {
4477 bzero(&i6src, sizeof(struct sockaddr_in6));
4478 bzero(&i6dst, sizeof(struct sockaddr_in6));
4479 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4480 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4481 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4482 (caddr_t)&i6src.sin6_addr);
4483 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4484 (caddr_t)&i6dst.sin6_addr);
4485 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4486 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4487 } else {
4488 return 0; /* unsupported family */
4489 }
4490 /*
4491 * when there is no suitable inbound policy for the packet of the ipsec
4492 * tunnel mode, the kernel never decapsulate the tunneled packet
4493 * as the ipsec tunnel mode even when the system wide policy is "none".
4494 * then the kernel leaves the generic tunnel module to process this
4495 * packet. if there is no rule of the generic tunnel, the packet
4496 * is rejected and the statistics will be counted up.
4497 */
4498 if (!sp) {
4499 return 0;
4500 }
4501 key_freesp(sp, KEY_SADB_UNLOCKED);
4502
4503 return 1;
4504 }
4505
4506 /*
4507 * Make a mbuf chain for encryption.
4508 * If the original mbuf chain contains a mbuf with a cluster,
4509 * allocate a new cluster and copy the data to the new cluster.
4510 * XXX: this hack is inefficient, but is necessary to handle cases
4511 * of TCP retransmission...
4512 */
4513 struct mbuf *
4514 ipsec_copypkt(struct mbuf *m)
4515 {
4516 struct mbuf *n, **mpp, *mnew;
4517
4518 for (n = m, mpp = &m; n; n = n->m_next) {
4519 if (n->m_flags & M_EXT) {
4520 /*
4521 * Make a copy only if there are more than one references
4522 * to the cluster.
4523 * XXX: is this approach effective?
4524 */
4525 if (
4526 m_get_ext_free(n) != NULL ||
4527 m_mclhasreference(n)
4528 ) {
4529 int remain, copied;
4530 struct mbuf *mm;
4531
4532 if (n->m_flags & M_PKTHDR) {
4533 MGETHDR(mnew, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4534 if (mnew == NULL) {
4535 goto fail;
4536 }
4537 M_COPY_PKTHDR(mnew, n);
4538 } else {
4539 MGET(mnew, M_DONTWAIT, MT_DATA);
4540 if (mnew == NULL) {
4541 goto fail;
4542 }
4543 }
4544 mnew->m_len = 0;
4545 mm = mnew;
4546
4547 /*
4548 * Copy data. If we don't have enough space to
4549 * store the whole data, allocate a cluster
4550 * or additional mbufs.
4551 * XXX: we don't use m_copyback(), since the
4552 * function does not use clusters and thus is
4553 * inefficient.
4554 */
4555 remain = n->m_len;
4556 copied = 0;
4557 while (1) {
4558 int len;
4559 struct mbuf *mn;
4560
4561 if (remain <= (mm->m_flags & M_PKTHDR ? MHLEN : MLEN)) {
4562 len = remain;
4563 } else { /* allocate a cluster */
4564 MCLGET(mm, M_DONTWAIT);
4565 if (!(mm->m_flags & M_EXT)) {
4566 m_free(mm);
4567 goto fail;
4568 }
4569 len = remain < MCLBYTES ?
4570 remain : MCLBYTES;
4571 }
4572
4573 bcopy(n->m_data + copied, mm->m_data,
4574 len);
4575
4576 copied += len;
4577 remain -= len;
4578 mm->m_len = len;
4579
4580 if (remain <= 0) { /* completed? */
4581 break;
4582 }
4583
4584 /* need another mbuf */
4585 MGETHDR(mn, M_DONTWAIT, MT_HEADER); /* XXXMAC: tags copied next time in loop? */
4586 if (mn == NULL) {
4587 goto fail;
4588 }
4589 mn->m_pkthdr.rcvif = NULL;
4590 mm->m_next = mn;
4591 mm = mn;
4592 }
4593
4594 /* adjust chain */
4595 mm->m_next = m_free(n);
4596 n = mm;
4597 *mpp = mnew;
4598 mpp = &n->m_next;
4599
4600 continue;
4601 }
4602 }
4603 *mpp = n;
4604 mpp = &n->m_next;
4605 }
4606
4607 return m;
4608 fail:
4609 m_freem(m);
4610 return NULL;
4611 }
4612
4613 /*
4614 * Tags are allocated as mbufs for now, since our minimum size is MLEN, we
4615 * should make use of up to that much space.
4616 */
4617 #define IPSEC_TAG_HEADER \
4618
4619 struct ipsec_tag {
4620 struct socket *socket;
4621 u_int32_t history_count;
4622 struct ipsec_history history[];
4623 #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
4624 /* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers
4625 * are 32-bit:
4626 * Aligning to 64-bit since we case to m_tag which is 64-bit aligned.
4627 */
4628 } __attribute__ ((aligned(8)));
4629 #else
4630 };
4631 #endif
4632
4633 #define IPSEC_TAG_SIZE (MLEN - sizeof(struct m_tag))
4634 #define IPSEC_TAG_HDR_SIZE (offsetof(struct ipsec_tag, history[0]))
4635 #define IPSEC_HISTORY_MAX ((IPSEC_TAG_SIZE - IPSEC_TAG_HDR_SIZE) / \
4636 sizeof(struct ipsec_history))
4637
4638 static struct ipsec_tag *
4639 ipsec_addaux(
4640 struct mbuf *m)
4641 {
4642 struct m_tag *tag;
4643
4644 /* Check if the tag already exists */
4645 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4646
4647 if (tag == NULL) {
4648 struct ipsec_tag *itag;
4649
4650 /* Allocate a tag */
4651 tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC,
4652 IPSEC_TAG_SIZE, M_DONTWAIT, m);
4653
4654 if (tag) {
4655 itag = (struct ipsec_tag*)(tag + 1);
4656 itag->socket = 0;
4657 itag->history_count = 0;
4658
4659 m_tag_prepend(m, tag);
4660 }
4661 }
4662
4663 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4664 }
4665
4666 static struct ipsec_tag *
4667 ipsec_findaux(
4668 struct mbuf *m)
4669 {
4670 struct m_tag *tag;
4671
4672 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4673
4674 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4675 }
4676
4677 void
4678 ipsec_delaux(
4679 struct mbuf *m)
4680 {
4681 struct m_tag *tag;
4682
4683 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4684
4685 if (tag) {
4686 m_tag_delete(m, tag);
4687 }
4688 }
4689
4690 /* if the aux buffer is unnecessary, nuke it. */
4691 static void
4692 ipsec_optaux(
4693 struct mbuf *m,
4694 struct ipsec_tag *itag)
4695 {
4696 if (itag && itag->socket == NULL && itag->history_count == 0) {
4697 m_tag_delete(m, ((struct m_tag*)itag) - 1);
4698 }
4699 }
4700
4701 int
4702 ipsec_setsocket(struct mbuf *m, struct socket *so)
4703 {
4704 struct ipsec_tag *tag;
4705
4706 /* if so == NULL, don't insist on getting the aux mbuf */
4707 if (so) {
4708 tag = ipsec_addaux(m);
4709 if (!tag) {
4710 return ENOBUFS;
4711 }
4712 } else {
4713 tag = ipsec_findaux(m);
4714 }
4715 if (tag) {
4716 tag->socket = so;
4717 ipsec_optaux(m, tag);
4718 }
4719 return 0;
4720 }
4721
4722 struct socket *
4723 ipsec_getsocket(struct mbuf *m)
4724 {
4725 struct ipsec_tag *itag;
4726
4727 itag = ipsec_findaux(m);
4728 if (itag) {
4729 return itag->socket;
4730 } else {
4731 return NULL;
4732 }
4733 }
4734
4735 int
4736 ipsec_addhist(
4737 struct mbuf *m,
4738 int proto,
4739 u_int32_t spi)
4740 {
4741 struct ipsec_tag *itag;
4742 struct ipsec_history *p;
4743 itag = ipsec_addaux(m);
4744 if (!itag) {
4745 return ENOBUFS;
4746 }
4747 if (itag->history_count == IPSEC_HISTORY_MAX) {
4748 return ENOSPC; /* XXX */
4749 }
4750 p = &itag->history[itag->history_count];
4751 itag->history_count++;
4752
4753 bzero(p, sizeof(*p));
4754 p->ih_proto = proto;
4755 p->ih_spi = spi;
4756
4757 return 0;
4758 }
4759
4760 struct ipsec_history *
4761 ipsec_gethist(
4762 struct mbuf *m,
4763 int *lenp)
4764 {
4765 struct ipsec_tag *itag;
4766
4767 itag = ipsec_findaux(m);
4768 if (!itag) {
4769 return NULL;
4770 }
4771 if (itag->history_count == 0) {
4772 return NULL;
4773 }
4774 if (lenp) {
4775 *lenp = (int)(itag->history_count * sizeof(struct ipsec_history));
4776 }
4777 return itag->history;
4778 }
4779
4780 void
4781 ipsec_clearhist(
4782 struct mbuf *m)
4783 {
4784 struct ipsec_tag *itag;
4785
4786 itag = ipsec_findaux(m);
4787 if (itag) {
4788 itag->history_count = 0;
4789 }
4790 ipsec_optaux(m, itag);
4791 }
4792
4793 __private_extern__ boolean_t
4794 ipsec_send_natt_keepalive(
4795 struct secasvar *sav)
4796 {
4797 struct mbuf *m = NULL;
4798 int error = 0;
4799 int keepalive_interval = natt_keepalive_interval;
4800
4801 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4802 lck_mtx_lock(sadb_mutex);
4803
4804 if (((esp_udp_encap_port & 0xFFFF) == 0 && sav->natt_encapsulated_src_port == 0) || sav->remote_ike_port == 0) {
4805 lck_mtx_unlock(sadb_mutex);
4806 return FALSE;
4807 }
4808
4809 if (sav->natt_interval != 0) {
4810 keepalive_interval = (int)sav->natt_interval;
4811 }
4812
4813 // natt timestamp may have changed... reverify
4814 if ((natt_now - sav->natt_last_activity) < keepalive_interval) {
4815 lck_mtx_unlock(sadb_mutex);
4816 return FALSE;
4817 }
4818
4819 if (sav->flags & SADB_X_EXT_ESP_KEEPALIVE) {
4820 lck_mtx_unlock(sadb_mutex);
4821 return FALSE; // don't send these from the kernel
4822 }
4823
4824 lck_mtx_unlock(sadb_mutex);
4825
4826 m = m_gethdr(M_NOWAIT, MT_DATA);
4827 if (m == NULL) {
4828 return FALSE;
4829 }
4830
4831 lck_mtx_lock(sadb_mutex);
4832 if (sav->sah->saidx.dst.ss_family == AF_INET) {
4833 struct ip_out_args ipoa = {};
4834 struct route ro = {};
4835
4836 ipoa.ipoa_boundif = IFSCOPE_NONE;
4837 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
4838 ipoa.ipoa_sotc = SO_TC_UNSPEC;
4839 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
4840
4841 struct ip *ip = (__typeof__(ip))m_mtod(m);
4842
4843 /*
4844 * Type 2: a UDP packet complete with IP header.
4845 * We must do this because UDP output requires
4846 * an inpcb which we don't have. UDP packet
4847 * contains one byte payload. The byte is set
4848 * to 0xFF.
4849 */
4850 struct udphdr *uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip));
4851 m->m_len = sizeof(struct udpiphdr) + 1;
4852 bzero(m_mtod(m), m->m_len);
4853 m->m_pkthdr.len = m->m_len;
4854
4855 ip->ip_len = (u_short)m->m_len;
4856 ip->ip_ttl = (u_char)ip_defttl;
4857 ip->ip_p = IPPROTO_UDP;
4858 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4859 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4860 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4861 } else {
4862 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4863 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4864 }
4865 if (sav->natt_encapsulated_src_port != 0) {
4866 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
4867 } else {
4868 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4869 }
4870 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4871 uh->uh_dport = htons(sav->remote_ike_port);
4872 uh->uh_ulen = htons(1 + sizeof(*uh));
4873 uh->uh_sum = 0;
4874 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip) + sizeof(*uh)) = 0xFF;
4875
4876 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
4877 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET) {
4878 ROUTE_RELEASE(&sav->sah->sa_route);
4879 }
4880
4881 route_copyout(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route));
4882 lck_mtx_unlock(sadb_mutex);
4883
4884 necp_mark_packet_as_keepalive(m, TRUE);
4885 error = ip_output(m, NULL, &ro, IP_OUTARGS | IP_NOIPSEC, NULL, &ipoa);
4886
4887 lck_mtx_lock(sadb_mutex);
4888 route_copyin(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route));
4889 } else if (sav->sah->saidx.dst.ss_family == AF_INET6) {
4890 struct ip6_out_args ip6oa = {};
4891 struct route_in6 ro6 = {};
4892
4893 ip6oa.ip6oa_flowadv.code = 0;
4894 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
4895 if (sav->sah->outgoing_if) {
4896 ip6oa.ip6oa_boundif = sav->sah->outgoing_if;
4897 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
4898 }
4899
4900 struct ip6_hdr *ip6 = (__typeof__(ip6))m_mtod(m);
4901
4902 /*
4903 * Type 2: a UDP packet complete with IPv6 header.
4904 * We must do this because UDP output requires
4905 * an inpcb which we don't have. UDP packet
4906 * contains one byte payload. The byte is set
4907 * to 0xFF.
4908 */
4909 struct udphdr *uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip6));
4910 m->m_len = sizeof(struct udphdr) + sizeof(struct ip6_hdr) + 1;
4911 bzero(m_mtod(m), m->m_len);
4912 m->m_pkthdr.len = m->m_len;
4913
4914 ip6->ip6_flow = 0;
4915 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
4916 ip6->ip6_vfc |= IPV6_VERSION;
4917 ip6->ip6_nxt = IPPROTO_UDP;
4918 ip6->ip6_hlim = (u_int8_t)ip6_defhlim;
4919 ip6->ip6_plen = htons(sizeof(struct udphdr) + 1);
4920 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4921 ip6->ip6_src = ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr;
4922 ip6->ip6_dst = ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr;
4923 } else {
4924 ip6->ip6_src = ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr;
4925 ip6->ip6_dst = ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr;
4926 }
4927
4928 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
4929 ip6->ip6_src.s6_addr16[1] = 0;
4930 }
4931 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
4932 ip6->ip6_dst.s6_addr16[1] = 0;
4933 }
4934
4935 if (sav->natt_encapsulated_src_port != 0) {
4936 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
4937 } else {
4938 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4939 }
4940 uh->uh_dport = htons(sav->remote_ike_port);
4941 uh->uh_ulen = htons(1 + sizeof(*uh));
4942 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip6) + sizeof(*uh)) = 0xFF;
4943 uh->uh_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htonl(ntohs(uh->uh_ulen) + IPPROTO_UDP));
4944 m->m_pkthdr.csum_flags = (CSUM_UDPIPV6 | CSUM_ZERO_INVERT);
4945 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
4946
4947 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
4948 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET6) {
4949 ROUTE_RELEASE(&sav->sah->sa_route);
4950 }
4951
4952 route_copyout((struct route *)&ro6, (struct route *)&sav->sah->sa_route, sizeof(struct route_in6));
4953 lck_mtx_unlock(sadb_mutex);
4954
4955 necp_mark_packet_as_keepalive(m, TRUE);
4956 error = ip6_output(m, NULL, &ro6, IPV6_OUTARGS, NULL, NULL, &ip6oa);
4957
4958 lck_mtx_lock(sadb_mutex);
4959 route_copyin((struct route *)&ro6, (struct route *)&sav->sah->sa_route, sizeof(struct route_in6));
4960 } else {
4961 ipseclog((LOG_ERR, "nat keepalive: invalid address family %u\n", sav->sah->saidx.dst.ss_family));
4962 lck_mtx_unlock(sadb_mutex);
4963 m_freem(m);
4964 return FALSE;
4965 }
4966
4967 if (error == 0) {
4968 sav->natt_last_activity = natt_now;
4969 lck_mtx_unlock(sadb_mutex);
4970 return TRUE;
4971 }
4972
4973 lck_mtx_unlock(sadb_mutex);
4974 return FALSE;
4975 }
4976
4977 __private_extern__ bool
4978 ipsec_fill_offload_frame(ifnet_t ifp,
4979 struct secasvar *sav,
4980 struct ifnet_keepalive_offload_frame *frame,
4981 size_t frame_data_offset)
4982 {
4983 u_int8_t *data = NULL;
4984 struct ip *ip = NULL;
4985 struct udphdr *uh = NULL;
4986
4987 if (sav == NULL || sav->sah == NULL || frame == NULL ||
4988 (ifp != NULL && ifp->if_index != sav->sah->outgoing_if) ||
4989 sav->sah->saidx.dst.ss_family != AF_INET ||
4990 !(sav->flags & SADB_X_EXT_NATT) ||
4991 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE) ||
4992 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE_OFFLOAD) ||
4993 sav->flags & SADB_X_EXT_ESP_KEEPALIVE ||
4994 ((esp_udp_encap_port & 0xFFFF) == 0 && sav->natt_encapsulated_src_port == 0) ||
4995 sav->remote_ike_port == 0 ||
4996 (natt_keepalive_interval == 0 && sav->natt_interval == 0 && sav->natt_offload_interval == 0)) {
4997 /* SA is not eligible for keepalive offload on this interface */
4998 return FALSE;
4999 }
5000
5001 if (frame_data_offset + sizeof(struct udpiphdr) + 1 >
5002 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
5003 /* Not enough room in this data frame */
5004 return FALSE;
5005 }
5006
5007 data = frame->data;
5008 ip = (__typeof__(ip))(void *)(data + frame_data_offset);
5009 uh = (__typeof__(uh))(void *)(data + frame_data_offset + sizeof(*ip));
5010
5011 frame->length = (u_int8_t)(frame_data_offset + sizeof(struct udpiphdr) + 1);
5012 frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_IPSEC;
5013 frame->ether_type = IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4;
5014
5015 bzero(data, IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE);
5016
5017 ip->ip_v = IPVERSION;
5018 ip->ip_hl = sizeof(struct ip) >> 2;
5019 ip->ip_off &= htons(~IP_OFFMASK);
5020 ip->ip_off &= htons(~IP_MF);
5021 switch (ip4_ipsec_dfbit) {
5022 case 0: /* clear DF bit */
5023 ip->ip_off &= htons(~IP_DF);
5024 break;
5025 case 1: /* set DF bit */
5026 ip->ip_off |= htons(IP_DF);
5027 break;
5028 default: /* copy DF bit */
5029 break;
5030 }
5031 ip->ip_len = htons(sizeof(struct udpiphdr) + 1);
5032 if (rfc6864 && IP_OFF_IS_ATOMIC(htons(ip->ip_off))) {
5033 ip->ip_id = 0;
5034 } else {
5035 ip->ip_id = ip_randomid();
5036 }
5037 ip->ip_ttl = (u_char)ip_defttl;
5038 ip->ip_p = IPPROTO_UDP;
5039 ip->ip_sum = 0;
5040 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
5041 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5042 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5043 } else {
5044 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5045 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5046 }
5047 ip->ip_sum = in_cksum_hdr_opt(ip);
5048 /* Fill out the UDP header */
5049 if (sav->natt_encapsulated_src_port != 0) {
5050 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
5051 } else {
5052 uh->uh_sport = htons((u_short)esp_udp_encap_port);
5053 }
5054 uh->uh_dport = htons(sav->remote_ike_port);
5055 uh->uh_ulen = htons(1 + sizeof(*uh));
5056 uh->uh_sum = 0;
5057 *(u_int8_t*)(data + frame_data_offset + sizeof(*ip) + sizeof(*uh)) = 0xFF;
5058
5059 if (sav->natt_offload_interval != 0) {
5060 frame->interval = sav->natt_offload_interval;
5061 } else if (sav->natt_interval != 0) {
5062 frame->interval = sav->natt_interval;
5063 } else {
5064 frame->interval = (u_int16_t)natt_keepalive_interval;
5065 }
5066 return TRUE;
5067 }
5068
5069 static int
5070 sysctl_ipsec_wake_packet SYSCTL_HANDLER_ARGS
5071 {
5072 #pragma unused(oidp, arg1, arg2)
5073 if (req->newptr != USER_ADDR_NULL) {
5074 ipseclog((LOG_ERR, "ipsec: invalid parameters"));
5075 return EINVAL;
5076 }
5077
5078 struct proc *p = current_proc();
5079 if (p != NULL) {
5080 uid_t uid = kauth_cred_getuid(proc_ucred(p));
5081 if (uid != 0 && priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_IPSEC_WAKE_PACKET, 0) != 0) {
5082 ipseclog((LOG_ERR, "process does not hold necessary entitlement to get ipsec wake packet"));
5083 return EPERM;
5084 }
5085
5086 int result = sysctl_io_opaque(req, &ipsec_wake_pkt, sizeof(ipsec_wake_pkt), NULL);
5087
5088 ipseclog((LOG_NOTICE, "%s: uuid %s spi %u seq %u len %u result %d",
5089 __func__,
5090 ipsec_wake_pkt.wake_uuid,
5091 ipsec_wake_pkt.wake_pkt_spi,
5092 ipsec_wake_pkt.wake_pkt_seq,
5093 ipsec_wake_pkt.wake_pkt_len,
5094 result));
5095
5096 return result;
5097 }
5098
5099 return EINVAL;
5100 }
5101
5102 SYSCTL_PROC(_net_link_generic_system, OID_AUTO, ipsec_wake_pkt, CTLTYPE_STRUCT | CTLFLAG_RD |
5103 CTLFLAG_LOCKED, 0, 0, &sysctl_ipsec_wake_packet, "S,ipsec wake packet", "");
5104
5105 void
5106 ipsec_save_wake_packet(struct mbuf *wake_mbuf, u_int32_t spi, u_int32_t seq)
5107 {
5108 if (wake_mbuf == NULL) {
5109 ipseclog((LOG_ERR, "ipsec: bad wake packet"));
5110 return;
5111 }
5112
5113 lck_mtx_lock(sadb_mutex);
5114 if (__probable(!ipsec_save_wake_pkt)) {
5115 goto done;
5116 }
5117
5118 u_int16_t max_len = (wake_mbuf->m_pkthdr.len > IPSEC_MAX_WAKE_PKT_LEN) ? IPSEC_MAX_WAKE_PKT_LEN : (u_int16_t)wake_mbuf->m_pkthdr.len;
5119 m_copydata(wake_mbuf, 0, max_len, (void *)ipsec_wake_pkt.wake_pkt);
5120 ipsec_wake_pkt.wake_pkt_len = max_len;
5121
5122 ipsec_wake_pkt.wake_pkt_spi = spi;
5123 ipsec_wake_pkt.wake_pkt_seq = seq;
5124
5125 ipseclog((LOG_NOTICE, "%s: uuid %s spi %u seq %u len %u",
5126 __func__,
5127 ipsec_wake_pkt.wake_uuid,
5128 ipsec_wake_pkt.wake_pkt_spi,
5129 ipsec_wake_pkt.wake_pkt_seq,
5130 ipsec_wake_pkt.wake_pkt_len));
5131
5132 struct kev_msg ev_msg;
5133 bzero(&ev_msg, sizeof(ev_msg));
5134
5135 ev_msg.vendor_code = KEV_VENDOR_APPLE;
5136 ev_msg.kev_class = KEV_NETWORK_CLASS;
5137 ev_msg.kev_subclass = KEV_IPSEC_SUBCLASS;
5138 ev_msg.event_code = KEV_IPSEC_WAKE_PACKET;
5139
5140 struct ipsec_wake_pkt_event_data event_data;
5141 strlcpy(event_data.wake_uuid, ipsec_wake_pkt.wake_uuid, sizeof(event_data.wake_uuid));
5142 ev_msg.dv[0].data_ptr = &event_data;
5143 ev_msg.dv[0].data_length = sizeof(event_data);
5144
5145 int result = kev_post_msg(&ev_msg);
5146 if (result != 0) {
5147 os_log_error(OS_LOG_DEFAULT, "%s: kev_post_msg() failed with error %d for wake uuid %s",
5148 __func__, result, ipsec_wake_pkt.wake_uuid);
5149 }
5150
5151 ipsec_save_wake_pkt = false;
5152 done:
5153 lck_mtx_unlock(sadb_mutex);
5154 return;
5155 }
5156
5157 static void
5158 ipsec_get_local_ports(void)
5159 {
5160 errno_t error;
5161 ifnet_t *ifp_list;
5162 uint32_t count, i;
5163 static uint8_t port_bitmap[bitstr_size(IP_PORTRANGE_SIZE)];
5164
5165 error = ifnet_list_get_all(IFNET_FAMILY_IPSEC, &ifp_list, &count);
5166 if (error != 0) {
5167 os_log_error(OS_LOG_DEFAULT, "%s: ifnet_list_get_all() failed %d",
5168 __func__, error);
5169 return;
5170 }
5171 for (i = 0; i < count; i++) {
5172 ifnet_t ifp = ifp_list[i];
5173
5174 /*
5175 * Get all the TCP and UDP ports for IPv4 and IPv6
5176 */
5177 error = ifnet_get_local_ports_extended(ifp, PF_UNSPEC,
5178 IFNET_GET_LOCAL_PORTS_WILDCARDOK |
5179 IFNET_GET_LOCAL_PORTS_NOWAKEUPOK |
5180 IFNET_GET_LOCAL_PORTS_ANYTCPSTATEOK,
5181 port_bitmap);
5182 if (error != 0) {
5183 os_log_error(OS_LOG_DEFAULT, "%s: ifnet_get_local_ports_extended(%s) failed %d",
5184 __func__, if_name(ifp), error);
5185 }
5186 }
5187 ifnet_list_free(ifp_list);
5188 }
5189
5190 static IOReturn
5191 ipsec_sleep_wake_handler(void *target, void *refCon, UInt32 messageType,
5192 void *provider, void *messageArgument, vm_size_t argSize)
5193 {
5194 #pragma unused(target, refCon, provider, messageArgument, argSize)
5195 switch (messageType) {
5196 case kIOMessageSystemWillSleep:
5197 {
5198 ipsec_get_local_ports();
5199 ipsec_save_wake_pkt = false;
5200 memset(&ipsec_wake_pkt, 0, sizeof(ipsec_wake_pkt));
5201 IOPMCopySleepWakeUUIDKey(ipsec_wake_pkt.wake_uuid,
5202 sizeof(ipsec_wake_pkt.wake_uuid));
5203 ipseclog((LOG_NOTICE,
5204 "ipsec: system will sleep, uuid: %s", ipsec_wake_pkt.wake_uuid));
5205 break;
5206 }
5207 case kIOMessageSystemHasPoweredOn:
5208 {
5209 char wake_reason[128] = {0};
5210 size_t size = sizeof(wake_reason);
5211 if (kernel_sysctlbyname("kern.wakereason", wake_reason, &size, NULL, 0) == 0) {
5212 if (strnstr(wake_reason, "wlan", size) == 0 ||
5213 strnstr(wake_reason, "WL.OutboxNotEmpty", size) == 0 ||
5214 strnstr(wake_reason, "baseband", size) == 0 ||
5215 strnstr(wake_reason, "bluetooth", size) == 0 ||
5216 strnstr(wake_reason, "BT.OutboxNotEmpty", size) == 0) {
5217 ipsec_save_wake_pkt = true;
5218 ipseclog((LOG_NOTICE,
5219 "ipsec: system has powered on, uuid: %s reason %s", ipsec_wake_pkt.wake_uuid, wake_reason));
5220 }
5221 }
5222 break;
5223 }
5224 default:
5225 break;
5226 }
5227
5228 return IOPMAckImplied;
5229 }
5230
5231 void
5232 ipsec_monitor_sleep_wake(void)
5233 {
5234 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
5235
5236 if (sleep_wake_handle == NULL) {
5237 sleep_wake_handle = registerSleepWakeInterest(ipsec_sleep_wake_handler,
5238 NULL, NULL);
5239 if (sleep_wake_handle != NULL) {
5240 ipseclog((LOG_INFO,
5241 "ipsec: monitoring sleep wake"));
5242 }
5243 }
5244 }