]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/netinet6/ipsec.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / bsd / netinet6 / ipsec.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2008-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* $FreeBSD: src/sys/netinet6/ipsec.c,v 1.3.2.7 2001/07/19 06:37:23 kris Exp $ */
30/* $KAME: ipsec.c,v 1.103 2001/05/24 07:14:18 sakane Exp $ */
31
32/*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61/*
62 * IPsec controller part.
63 */
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/malloc.h>
68#include <sys/mbuf.h>
69#include <sys/mcache.h>
70#include <sys/domain.h>
71#include <sys/protosw.h>
72#include <sys/socket.h>
73#include <sys/socketvar.h>
74#include <sys/errno.h>
75#include <sys/time.h>
76#include <sys/kernel.h>
77#include <sys/syslog.h>
78#include <sys/sysctl.h>
79#include <sys/priv.h>
80#include <kern/locks.h>
81#include <sys/kauth.h>
82#include <libkern/OSAtomic.h>
83
84#include <net/if.h>
85#include <net/route.h>
86#include <net/if_ipsec.h>
87
88#include <netinet/in.h>
89#include <netinet/in_systm.h>
90#include <netinet/ip.h>
91#include <netinet/ip_var.h>
92#include <netinet/in_var.h>
93#include <netinet/udp.h>
94#include <netinet/udp_var.h>
95#include <netinet/ip_ecn.h>
96#if INET6
97#include <netinet6/ip6_ecn.h>
98#endif
99#include <netinet/tcp.h>
100#include <netinet/udp.h>
101
102#include <netinet/ip6.h>
103#if INET6
104#include <netinet6/ip6_var.h>
105#endif
106#include <netinet/in_pcb.h>
107#if INET6
108#include <netinet/icmp6.h>
109#endif
110
111#include <netinet6/ipsec.h>
112#if INET6
113#include <netinet6/ipsec6.h>
114#endif
115#include <netinet6/ah.h>
116#if INET6
117#include <netinet6/ah6.h>
118#endif
119#if IPSEC_ESP
120#include <netinet6/esp.h>
121#if INET6
122#include <netinet6/esp6.h>
123#endif
124#endif
125#include <netkey/key.h>
126#include <netkey/keydb.h>
127#include <netkey/key_debug.h>
128
129#include <net/net_osdep.h>
130
131#include <IOKit/pwr_mgt/IOPM.h>
132
133#if IPSEC_DEBUG
134int ipsec_debug = 1;
135#else
136int ipsec_debug = 0;
137#endif
138
139#include <sys/kdebug.h>
140#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
141#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
142#define DBG_FNC_GETPOL_SOCK NETDBG_CODE(DBG_NETIPSEC, (1 << 8))
143#define DBG_FNC_GETPOL_ADDR NETDBG_CODE(DBG_NETIPSEC, (2 << 8))
144#define DBG_FNC_IPSEC_OUT NETDBG_CODE(DBG_NETIPSEC, (3 << 8))
145
146extern lck_mtx_t *sadb_mutex;
147
148struct ipsecstat ipsecstat;
149int ip4_ah_cleartos = 1;
150int ip4_ah_offsetmask = 0; /* maybe IP_DF? */
151int ip4_ipsec_dfbit = 0; /* DF bit on encap. 0: clear 1: set 2: copy */
152int ip4_esp_trans_deflev = IPSEC_LEVEL_USE;
153int ip4_esp_net_deflev = IPSEC_LEVEL_USE;
154int ip4_ah_trans_deflev = IPSEC_LEVEL_USE;
155int ip4_ah_net_deflev = IPSEC_LEVEL_USE;
156struct secpolicy ip4_def_policy;
157int ip4_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
158int ip4_esp_randpad = -1;
159int esp_udp_encap_port = 0;
160static int sysctl_def_policy SYSCTL_HANDLER_ARGS;
161extern int natt_keepalive_interval;
162extern u_int64_t natt_now;
163
164struct ipsec_tag;
165
166void *sleep_wake_handle = NULL;
167bool ipsec_save_wake_pkt = false;
168
169SYSCTL_DECL(_net_inet_ipsec);
170#if INET6
171SYSCTL_DECL(_net_inet6_ipsec6);
172#endif
173/* net.inet.ipsec */
174SYSCTL_STRUCT(_net_inet_ipsec, IPSECCTL_STATS,
175 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsecstat, ipsecstat, "");
176SYSCTL_PROC(_net_inet_ipsec, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
177 &ip4_def_policy.policy, 0, &sysctl_def_policy, "I", "");
178SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
179 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_trans_deflev, 0, "");
180SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
181 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_net_deflev, 0, "");
182SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
183 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_trans_deflev, 0, "");
184SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
185 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_net_deflev, 0, "");
186SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_CLEARTOS,
187 ah_cleartos, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_cleartos, 0, "");
188SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_OFFSETMASK,
189 ah_offsetmask, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_offsetmask, 0, "");
190SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DFBIT,
191 dfbit, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_dfbit, 0, "");
192SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ECN,
193 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_ecn, 0, "");
194SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEBUG,
195 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
196SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ESP_RANDPAD,
197 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_randpad, 0, "");
198
199/* for performance, we bypass ipsec until a security policy is set */
200int ipsec_bypass = 1;
201SYSCTL_INT(_net_inet_ipsec, OID_AUTO, bypass, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec_bypass, 0, "");
202
203/*
204 * NAT Traversal requires a UDP port for encapsulation,
205 * esp_udp_encap_port controls which port is used. Racoon
206 * must set this port to the port racoon is using locally
207 * for nat traversal.
208 */
209SYSCTL_INT(_net_inet_ipsec, OID_AUTO, esp_port,
210 CTLFLAG_RW | CTLFLAG_LOCKED, &esp_udp_encap_port, 0, "");
211
212#if INET6
213struct ipsecstat ipsec6stat;
214int ip6_esp_trans_deflev = IPSEC_LEVEL_USE;
215int ip6_esp_net_deflev = IPSEC_LEVEL_USE;
216int ip6_ah_trans_deflev = IPSEC_LEVEL_USE;
217int ip6_ah_net_deflev = IPSEC_LEVEL_USE;
218struct secpolicy ip6_def_policy;
219int ip6_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
220int ip6_esp_randpad = -1;
221
222/* net.inet6.ipsec6 */
223SYSCTL_STRUCT(_net_inet6_ipsec6, IPSECCTL_STATS,
224 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec6stat, ipsecstat, "");
225SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_POLICY,
226 def_policy, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_def_policy.policy, 0, "");
227SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
228 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_trans_deflev, 0, "");
229SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
230 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_net_deflev, 0, "");
231SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
232 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_trans_deflev, 0, "");
233SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
234 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_net_deflev, 0, "");
235SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ECN,
236 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ipsec_ecn, 0, "");
237SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEBUG,
238 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
239SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ESP_RANDPAD,
240 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_randpad, 0, "");
241#endif /* INET6 */
242
243SYSCTL_DECL(_net_link_generic_system);
244
245struct ipsec_wake_pkt_info ipsec_wake_pkt;
246
247static int ipsec_setspidx_interface(struct secpolicyindex *, u_int, struct mbuf *,
248 int, int, int);
249static int ipsec_setspidx_mbuf(struct secpolicyindex *, u_int, u_int,
250 struct mbuf *, int);
251static int ipsec4_setspidx_inpcb(struct mbuf *, struct inpcb *pcb);
252#if INET6
253static int ipsec6_setspidx_in6pcb(struct mbuf *, struct in6pcb *pcb);
254#endif
255static int ipsec_setspidx(struct mbuf *, struct secpolicyindex *, int, int);
256static void ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
257static int ipsec4_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
258#if INET6
259static void ipsec6_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
260static int ipsec6_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
261#endif
262static struct inpcbpolicy *ipsec_newpcbpolicy(void);
263static void ipsec_delpcbpolicy(struct inpcbpolicy *);
264static struct secpolicy *ipsec_deepcopy_policy(struct secpolicy *src);
265static int ipsec_set_policy(struct secpolicy **pcb_sp,
266 int optname, caddr_t request, size_t len, int priv);
267static void vshiftl(unsigned char *, int, int);
268static int ipsec_in_reject(struct secpolicy *, struct mbuf *);
269#if INET6
270static int ipsec64_encapsulate(struct mbuf *, struct secasvar *);
271static int ipsec6_update_routecache_and_output(struct ipsec_output_state *state, struct secasvar *sav);
272static int ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav);
273#endif
274static struct ipsec_tag *ipsec_addaux(struct mbuf *);
275static struct ipsec_tag *ipsec_findaux(struct mbuf *);
276static void ipsec_optaux(struct mbuf *, struct ipsec_tag *);
277int ipsec_send_natt_keepalive(struct secasvar *sav);
278bool ipsec_fill_offload_frame(ifnet_t ifp, struct secasvar *sav, struct ifnet_keepalive_offload_frame *frame, size_t frame_data_offset);
279
280extern bool IOPMCopySleepWakeUUIDKey(char *, size_t);
281extern void *registerSleepWakeInterest(void *, void *, void *);
282
283static int
284sysctl_def_policy SYSCTL_HANDLER_ARGS
285{
286 int new_policy = ip4_def_policy.policy;
287 int error = sysctl_handle_int(oidp, &new_policy, 0, req);
288
289#pragma unused(arg1, arg2)
290 if (error == 0) {
291 if (new_policy != IPSEC_POLICY_NONE &&
292 new_policy != IPSEC_POLICY_DISCARD) {
293 return EINVAL;
294 }
295 ip4_def_policy.policy = new_policy;
296
297 /* Turn off the bypass if the default security policy changes */
298 if (ipsec_bypass != 0 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
299 ipsec_bypass = 0;
300 }
301 }
302
303 return error;
304}
305
306/*
307 * For OUTBOUND packet having a socket. Searching SPD for packet,
308 * and return a pointer to SP.
309 * OUT: NULL: no apropreate SP found, the following value is set to error.
310 * 0 : bypass
311 * EACCES : discard packet.
312 * ENOENT : ipsec_acquire() in progress, maybe.
313 * others : error occurred.
314 * others: a pointer to SP
315 *
316 * NOTE: IPv6 mapped adddress concern is implemented here.
317 */
318struct secpolicy *
319ipsec4_getpolicybysock(struct mbuf *m,
320 u_int dir,
321 struct socket *so,
322 int *error)
323{
324 struct inpcbpolicy *pcbsp = NULL;
325 struct secpolicy *currsp = NULL; /* policy on socket */
326 struct secpolicy *kernsp = NULL; /* policy on kernel */
327
328 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
329 /* sanity check */
330 if (m == NULL || so == NULL || error == NULL) {
331 panic("ipsec4_getpolicybysock: NULL pointer was passed.\n");
332 }
333
334 if (so->so_pcb == NULL) {
335 printf("ipsec4_getpolicybysock: so->so_pcb == NULL\n");
336 return ipsec4_getpolicybyaddr(m, dir, 0, error);
337 }
338
339 switch (SOCK_DOM(so)) {
340 case PF_INET:
341 pcbsp = sotoinpcb(so)->inp_sp;
342 break;
343#if INET6
344 case PF_INET6:
345 pcbsp = sotoin6pcb(so)->in6p_sp;
346 break;
347#endif
348 }
349
350 if (!pcbsp) {
351 /* Socket has not specified an IPSEC policy */
352 return ipsec4_getpolicybyaddr(m, dir, 0, error);
353 }
354
355 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_START, 0, 0, 0, 0, 0);
356
357 switch (SOCK_DOM(so)) {
358 case PF_INET:
359 /* set spidx in pcb */
360 *error = ipsec4_setspidx_inpcb(m, sotoinpcb(so));
361 break;
362#if INET6
363 case PF_INET6:
364 /* set spidx in pcb */
365 *error = ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
366 break;
367#endif
368 default:
369 panic("ipsec4_getpolicybysock: unsupported address family\n");
370 }
371 if (*error) {
372 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 1, *error, 0, 0, 0);
373 return NULL;
374 }
375
376 /* sanity check */
377 if (pcbsp == NULL) {
378 panic("ipsec4_getpolicybysock: pcbsp is NULL.\n");
379 }
380
381 switch (dir) {
382 case IPSEC_DIR_INBOUND:
383 currsp = pcbsp->sp_in;
384 break;
385 case IPSEC_DIR_OUTBOUND:
386 currsp = pcbsp->sp_out;
387 break;
388 default:
389 panic("ipsec4_getpolicybysock: illegal direction.\n");
390 }
391
392 /* sanity check */
393 if (currsp == NULL) {
394 panic("ipsec4_getpolicybysock: currsp is NULL.\n");
395 }
396
397 /* when privilieged socket */
398 if (pcbsp->priv) {
399 switch (currsp->policy) {
400 case IPSEC_POLICY_BYPASS:
401 lck_mtx_lock(sadb_mutex);
402 currsp->refcnt++;
403 lck_mtx_unlock(sadb_mutex);
404 *error = 0;
405 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 2, *error, 0, 0, 0);
406 return currsp;
407
408 case IPSEC_POLICY_ENTRUST:
409 /* look for a policy in SPD */
410 kernsp = key_allocsp(&currsp->spidx, dir);
411
412 /* SP found */
413 if (kernsp != NULL) {
414 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
415 printf("DP ipsec4_getpolicybysock called "
416 "to allocate SP:0x%llx\n",
417 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
418 *error = 0;
419 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 3, *error, 0, 0, 0);
420 return kernsp;
421 }
422
423 /* no SP found */
424 lck_mtx_lock(sadb_mutex);
425 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
426 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
427 ipseclog((LOG_INFO,
428 "fixed system default policy: %d->%d\n",
429 ip4_def_policy.policy, IPSEC_POLICY_NONE));
430 ip4_def_policy.policy = IPSEC_POLICY_NONE;
431 }
432 ip4_def_policy.refcnt++;
433 lck_mtx_unlock(sadb_mutex);
434 *error = 0;
435 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 4, *error, 0, 0, 0);
436 return &ip4_def_policy;
437
438 case IPSEC_POLICY_IPSEC:
439 lck_mtx_lock(sadb_mutex);
440 currsp->refcnt++;
441 lck_mtx_unlock(sadb_mutex);
442 *error = 0;
443 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 5, *error, 0, 0, 0);
444 return currsp;
445
446 default:
447 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
448 "Invalid policy for PCB %d\n", currsp->policy));
449 *error = EINVAL;
450 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 6, *error, 0, 0, 0);
451 return NULL;
452 }
453 /* NOTREACHED */
454 }
455
456 /* when non-privilieged socket */
457 /* look for a policy in SPD */
458 kernsp = key_allocsp(&currsp->spidx, dir);
459
460 /* SP found */
461 if (kernsp != NULL) {
462 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
463 printf("DP ipsec4_getpolicybysock called "
464 "to allocate SP:0x%llx\n",
465 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
466 *error = 0;
467 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 7, *error, 0, 0, 0);
468 return kernsp;
469 }
470
471 /* no SP found */
472 switch (currsp->policy) {
473 case IPSEC_POLICY_BYPASS:
474 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
475 "Illegal policy for non-priviliged defined %d\n",
476 currsp->policy));
477 *error = EINVAL;
478 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 8, *error, 0, 0, 0);
479 return NULL;
480
481 case IPSEC_POLICY_ENTRUST:
482 lck_mtx_lock(sadb_mutex);
483 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
484 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
485 ipseclog((LOG_INFO,
486 "fixed system default policy: %d->%d\n",
487 ip4_def_policy.policy, IPSEC_POLICY_NONE));
488 ip4_def_policy.policy = IPSEC_POLICY_NONE;
489 }
490 ip4_def_policy.refcnt++;
491 lck_mtx_unlock(sadb_mutex);
492 *error = 0;
493 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 9, *error, 0, 0, 0);
494 return &ip4_def_policy;
495
496 case IPSEC_POLICY_IPSEC:
497 lck_mtx_lock(sadb_mutex);
498 currsp->refcnt++;
499 lck_mtx_unlock(sadb_mutex);
500 *error = 0;
501 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 10, *error, 0, 0, 0);
502 return currsp;
503
504 default:
505 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
506 "Invalid policy for PCB %d\n", currsp->policy));
507 *error = EINVAL;
508 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 11, *error, 0, 0, 0);
509 return NULL;
510 }
511 /* NOTREACHED */
512}
513
514/*
515 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
516 * and return a pointer to SP.
517 * OUT: positive: a pointer to the entry for security policy leaf matched.
518 * NULL: no apropreate SP found, the following value is set to error.
519 * 0 : bypass
520 * EACCES : discard packet.
521 * ENOENT : ipsec_acquire() in progress, maybe.
522 * others : error occurred.
523 */
524struct secpolicy *
525ipsec4_getpolicybyaddr(struct mbuf *m,
526 u_int dir,
527 int flag,
528 int *error)
529{
530 struct secpolicy *sp = NULL;
531
532 if (ipsec_bypass != 0) {
533 return 0;
534 }
535
536 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
537
538 /* sanity check */
539 if (m == NULL || error == NULL) {
540 panic("ipsec4_getpolicybyaddr: NULL pointer was passed.\n");
541 }
542 {
543 struct secpolicyindex spidx;
544
545 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
546 bzero(&spidx, sizeof(spidx));
547
548 /* make a index to look for a policy */
549 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET, m,
550 (flag & IP_FORWARDING) ? 0 : 1);
551
552 if (*error != 0) {
553 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, *error, 0, 0, 0);
554 return NULL;
555 }
556
557 sp = key_allocsp(&spidx, dir);
558 }
559
560 /* SP found */
561 if (sp != NULL) {
562 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
563 printf("DP ipsec4_getpolicybyaddr called "
564 "to allocate SP:0x%llx\n",
565 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
566 *error = 0;
567 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, *error, 0, 0, 0);
568 return sp;
569 }
570
571 /* no SP found */
572 lck_mtx_lock(sadb_mutex);
573 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
574 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
575 ipseclog((LOG_INFO, "fixed system default policy:%d->%d\n",
576 ip4_def_policy.policy,
577 IPSEC_POLICY_NONE));
578 ip4_def_policy.policy = IPSEC_POLICY_NONE;
579 }
580 ip4_def_policy.refcnt++;
581 lck_mtx_unlock(sadb_mutex);
582 *error = 0;
583 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 3, *error, 0, 0, 0);
584 return &ip4_def_policy;
585}
586
587/* Match with bound interface rather than src addr.
588 * Unlike getpolicybyaddr, do not set the default policy.
589 * Return 0 if should continue processing, or -1 if packet
590 * should be dropped.
591 */
592int
593ipsec4_getpolicybyinterface(struct mbuf *m,
594 u_int dir,
595 int *flags,
596 struct ip_out_args *ipoa,
597 struct secpolicy **sp)
598{
599 struct secpolicyindex spidx;
600 int error = 0;
601
602 if (ipsec_bypass != 0) {
603 return 0;
604 }
605
606 /* Sanity check */
607 if (m == NULL || ipoa == NULL || sp == NULL) {
608 panic("ipsec4_getpolicybyinterface: NULL pointer was passed.\n");
609 }
610
611 if (ipoa->ipoa_boundif == IFSCOPE_NONE) {
612 return 0;
613 }
614
615 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
616 bzero(&spidx, sizeof(spidx));
617
618 /* make a index to look for a policy */
619 error = ipsec_setspidx_interface(&spidx, dir, m, (*flags & IP_FORWARDING) ? 0 : 1,
620 ipoa->ipoa_boundif, 4);
621
622 if (error != 0) {
623 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, error, 0, 0, 0);
624 return 0;
625 }
626
627 *sp = key_allocsp(&spidx, dir);
628
629 /* Return SP, whether NULL or not */
630 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
631 if ((*sp)->ipsec_if == NULL) {
632 /* Invalid to capture on an interface without redirect */
633 key_freesp(*sp, KEY_SADB_UNLOCKED);
634 *sp = NULL;
635 return -1;
636 } else if ((*sp)->disabled) {
637 /* Disabled policies go in the clear */
638 key_freesp(*sp, KEY_SADB_UNLOCKED);
639 *sp = NULL;
640 *flags |= IP_NOIPSEC; /* Avoid later IPsec check */
641 } else {
642 /* If policy is enabled, redirect to ipsec interface */
643 ipoa->ipoa_boundif = (*sp)->ipsec_if->if_index;
644 }
645 }
646
647 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, error, 0, 0, 0);
648
649 return 0;
650}
651
652
653#if INET6
654/*
655 * For OUTBOUND packet having a socket. Searching SPD for packet,
656 * and return a pointer to SP.
657 * OUT: NULL: no apropreate SP found, the following value is set to error.
658 * 0 : bypass
659 * EACCES : discard packet.
660 * ENOENT : ipsec_acquire() in progress, maybe.
661 * others : error occurred.
662 * others: a pointer to SP
663 */
664struct secpolicy *
665ipsec6_getpolicybysock(struct mbuf *m,
666 u_int dir,
667 struct socket *so,
668 int *error)
669{
670 struct inpcbpolicy *pcbsp = NULL;
671 struct secpolicy *currsp = NULL; /* policy on socket */
672 struct secpolicy *kernsp = NULL; /* policy on kernel */
673
674 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
675
676 /* sanity check */
677 if (m == NULL || so == NULL || error == NULL) {
678 panic("ipsec6_getpolicybysock: NULL pointer was passed.\n");
679 }
680
681#if DIAGNOSTIC
682 if (SOCK_DOM(so) != PF_INET6) {
683 panic("ipsec6_getpolicybysock: socket domain != inet6\n");
684 }
685#endif
686
687 pcbsp = sotoin6pcb(so)->in6p_sp;
688
689 if (!pcbsp) {
690 return ipsec6_getpolicybyaddr(m, dir, 0, error);
691 }
692
693 /* set spidx in pcb */
694 ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
695
696 /* sanity check */
697 if (pcbsp == NULL) {
698 panic("ipsec6_getpolicybysock: pcbsp is NULL.\n");
699 }
700
701 switch (dir) {
702 case IPSEC_DIR_INBOUND:
703 currsp = pcbsp->sp_in;
704 break;
705 case IPSEC_DIR_OUTBOUND:
706 currsp = pcbsp->sp_out;
707 break;
708 default:
709 panic("ipsec6_getpolicybysock: illegal direction.\n");
710 }
711
712 /* sanity check */
713 if (currsp == NULL) {
714 panic("ipsec6_getpolicybysock: currsp is NULL.\n");
715 }
716
717 /* when privilieged socket */
718 if (pcbsp->priv) {
719 switch (currsp->policy) {
720 case IPSEC_POLICY_BYPASS:
721 lck_mtx_lock(sadb_mutex);
722 currsp->refcnt++;
723 lck_mtx_unlock(sadb_mutex);
724 *error = 0;
725 return currsp;
726
727 case IPSEC_POLICY_ENTRUST:
728 /* look for a policy in SPD */
729 kernsp = key_allocsp(&currsp->spidx, dir);
730
731 /* SP found */
732 if (kernsp != NULL) {
733 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
734 printf("DP ipsec6_getpolicybysock called "
735 "to allocate SP:0x%llx\n",
736 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
737 *error = 0;
738 return kernsp;
739 }
740
741 /* no SP found */
742 lck_mtx_lock(sadb_mutex);
743 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
744 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
745 ipseclog((LOG_INFO,
746 "fixed system default policy: %d->%d\n",
747 ip6_def_policy.policy, IPSEC_POLICY_NONE));
748 ip6_def_policy.policy = IPSEC_POLICY_NONE;
749 }
750 ip6_def_policy.refcnt++;
751 lck_mtx_unlock(sadb_mutex);
752 *error = 0;
753 return &ip6_def_policy;
754
755 case IPSEC_POLICY_IPSEC:
756 lck_mtx_lock(sadb_mutex);
757 currsp->refcnt++;
758 lck_mtx_unlock(sadb_mutex);
759 *error = 0;
760 return currsp;
761
762 default:
763 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
764 "Invalid policy for PCB %d\n", currsp->policy));
765 *error = EINVAL;
766 return NULL;
767 }
768 /* NOTREACHED */
769 }
770
771 /* when non-privilieged socket */
772 /* look for a policy in SPD */
773 kernsp = key_allocsp(&currsp->spidx, dir);
774
775 /* SP found */
776 if (kernsp != NULL) {
777 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
778 printf("DP ipsec6_getpolicybysock called "
779 "to allocate SP:0x%llx\n",
780 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
781 *error = 0;
782 return kernsp;
783 }
784
785 /* no SP found */
786 switch (currsp->policy) {
787 case IPSEC_POLICY_BYPASS:
788 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
789 "Illegal policy for non-priviliged defined %d\n",
790 currsp->policy));
791 *error = EINVAL;
792 return NULL;
793
794 case IPSEC_POLICY_ENTRUST:
795 lck_mtx_lock(sadb_mutex);
796 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
797 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
798 ipseclog((LOG_INFO,
799 "fixed system default policy: %d->%d\n",
800 ip6_def_policy.policy, IPSEC_POLICY_NONE));
801 ip6_def_policy.policy = IPSEC_POLICY_NONE;
802 }
803 ip6_def_policy.refcnt++;
804 lck_mtx_unlock(sadb_mutex);
805 *error = 0;
806 return &ip6_def_policy;
807
808 case IPSEC_POLICY_IPSEC:
809 lck_mtx_lock(sadb_mutex);
810 currsp->refcnt++;
811 lck_mtx_unlock(sadb_mutex);
812 *error = 0;
813 return currsp;
814
815 default:
816 ipseclog((LOG_ERR,
817 "ipsec6_policybysock: Invalid policy for PCB %d\n",
818 currsp->policy));
819 *error = EINVAL;
820 return NULL;
821 }
822 /* NOTREACHED */
823}
824
825/*
826 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
827 * and return a pointer to SP.
828 * `flag' means that packet is to be forwarded whether or not.
829 * flag = 1: forwad
830 * OUT: positive: a pointer to the entry for security policy leaf matched.
831 * NULL: no apropreate SP found, the following value is set to error.
832 * 0 : bypass
833 * EACCES : discard packet.
834 * ENOENT : ipsec_acquire() in progress, maybe.
835 * others : error occurred.
836 */
837#ifndef IP_FORWARDING
838#define IP_FORWARDING 1
839#endif
840
841struct secpolicy *
842ipsec6_getpolicybyaddr(struct mbuf *m,
843 u_int dir,
844 int flag,
845 int *error)
846{
847 struct secpolicy *sp = NULL;
848
849 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
850
851 /* sanity check */
852 if (m == NULL || error == NULL) {
853 panic("ipsec6_getpolicybyaddr: NULL pointer was passed.\n");
854 }
855
856 {
857 struct secpolicyindex spidx;
858
859 bzero(&spidx, sizeof(spidx));
860
861 /* make a index to look for a policy */
862 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET6, m,
863 (flag & IP_FORWARDING) ? 0 : 1);
864
865 if (*error != 0) {
866 return NULL;
867 }
868
869 sp = key_allocsp(&spidx, dir);
870 }
871
872 /* SP found */
873 if (sp != NULL) {
874 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
875 printf("DP ipsec6_getpolicybyaddr called "
876 "to allocate SP:0x%llx\n",
877 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
878 *error = 0;
879 return sp;
880 }
881
882 /* no SP found */
883 lck_mtx_lock(sadb_mutex);
884 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
885 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
886 ipseclog((LOG_INFO, "fixed system default policy: %d->%d\n",
887 ip6_def_policy.policy, IPSEC_POLICY_NONE));
888 ip6_def_policy.policy = IPSEC_POLICY_NONE;
889 }
890 ip6_def_policy.refcnt++;
891 lck_mtx_unlock(sadb_mutex);
892 *error = 0;
893 return &ip6_def_policy;
894}
895
896/* Match with bound interface rather than src addr.
897 * Unlike getpolicybyaddr, do not set the default policy.
898 * Return 0 if should continue processing, or -1 if packet
899 * should be dropped.
900 */
901int
902ipsec6_getpolicybyinterface(struct mbuf *m,
903 u_int dir,
904 int flag,
905 struct ip6_out_args *ip6oap,
906 int *noipsec,
907 struct secpolicy **sp)
908{
909 struct secpolicyindex spidx;
910 int error = 0;
911
912 if (ipsec_bypass != 0) {
913 return 0;
914 }
915
916 /* Sanity check */
917 if (m == NULL || sp == NULL || noipsec == NULL || ip6oap == NULL) {
918 panic("ipsec6_getpolicybyinterface: NULL pointer was passed.\n");
919 }
920
921 *noipsec = 0;
922
923 if (ip6oap->ip6oa_boundif == IFSCOPE_NONE) {
924 return 0;
925 }
926
927 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0);
928 bzero(&spidx, sizeof(spidx));
929
930 /* make a index to look for a policy */
931 error = ipsec_setspidx_interface(&spidx, dir, m, (flag & IP_FORWARDING) ? 0 : 1,
932 ip6oap->ip6oa_boundif, 6);
933
934 if (error != 0) {
935 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, error, 0, 0, 0);
936 return 0;
937 }
938
939 *sp = key_allocsp(&spidx, dir);
940
941 /* Return SP, whether NULL or not */
942 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
943 if ((*sp)->ipsec_if == NULL) {
944 /* Invalid to capture on an interface without redirect */
945 key_freesp(*sp, KEY_SADB_UNLOCKED);
946 *sp = NULL;
947 return -1;
948 } else if ((*sp)->disabled) {
949 /* Disabled policies go in the clear */
950 key_freesp(*sp, KEY_SADB_UNLOCKED);
951 *sp = NULL;
952 *noipsec = 1; /* Avoid later IPsec check */
953 } else {
954 /* If policy is enabled, redirect to ipsec interface */
955 ip6oap->ip6oa_boundif = (*sp)->ipsec_if->if_index;
956 }
957 }
958
959 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, *error, 0, 0, 0);
960
961 return 0;
962}
963#endif /* INET6 */
964
965/*
966 * set IP address into spidx from mbuf.
967 * When Forwarding packet and ICMP echo reply, this function is used.
968 *
969 * IN: get the followings from mbuf.
970 * protocol family, src, dst, next protocol
971 * OUT:
972 * 0: success.
973 * other: failure, and set errno.
974 */
975static int
976ipsec_setspidx_mbuf(
977 struct secpolicyindex *spidx,
978 u_int dir,
979 __unused u_int family,
980 struct mbuf *m,
981 int needport)
982{
983 int error;
984
985 /* sanity check */
986 if (spidx == NULL || m == NULL) {
987 panic("ipsec_setspidx_mbuf: NULL pointer was passed.\n");
988 }
989
990 bzero(spidx, sizeof(*spidx));
991
992 error = ipsec_setspidx(m, spidx, needport, 0);
993 if (error) {
994 goto bad;
995 }
996 spidx->dir = dir;
997
998 return 0;
999
1000bad:
1001 /* XXX initialize */
1002 bzero(spidx, sizeof(*spidx));
1003 return EINVAL;
1004}
1005
1006static int
1007ipsec_setspidx_interface(
1008 struct secpolicyindex *spidx,
1009 u_int dir,
1010 struct mbuf *m,
1011 int needport,
1012 int ifindex,
1013 int ip_version)
1014{
1015 int error;
1016
1017 /* sanity check */
1018 if (spidx == NULL || m == NULL) {
1019 panic("ipsec_setspidx_interface: NULL pointer was passed.\n");
1020 }
1021
1022 bzero(spidx, sizeof(*spidx));
1023
1024 error = ipsec_setspidx(m, spidx, needport, ip_version);
1025 if (error) {
1026 goto bad;
1027 }
1028 spidx->dir = dir;
1029
1030 if (ifindex != 0) {
1031 ifnet_head_lock_shared();
1032 spidx->internal_if = ifindex2ifnet[ifindex];
1033 ifnet_head_done();
1034 } else {
1035 spidx->internal_if = NULL;
1036 }
1037
1038 return 0;
1039
1040bad:
1041 return EINVAL;
1042}
1043
1044static int
1045ipsec4_setspidx_inpcb(struct mbuf *m, struct inpcb *pcb)
1046{
1047 struct secpolicyindex *spidx;
1048 int error;
1049
1050 if (ipsec_bypass != 0) {
1051 return 0;
1052 }
1053
1054 /* sanity check */
1055 if (pcb == NULL) {
1056 panic("ipsec4_setspidx_inpcb: no PCB found.\n");
1057 }
1058 if (pcb->inp_sp == NULL) {
1059 panic("ipsec4_setspidx_inpcb: no inp_sp found.\n");
1060 }
1061 if (pcb->inp_sp->sp_out == NULL || pcb->inp_sp->sp_in == NULL) {
1062 panic("ipsec4_setspidx_inpcb: no sp_in/out found.\n");
1063 }
1064
1065 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1066 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1067
1068 spidx = &pcb->inp_sp->sp_in->spidx;
1069 error = ipsec_setspidx(m, spidx, 1, 0);
1070 if (error) {
1071 goto bad;
1072 }
1073 spidx->dir = IPSEC_DIR_INBOUND;
1074
1075 spidx = &pcb->inp_sp->sp_out->spidx;
1076 error = ipsec_setspidx(m, spidx, 1, 0);
1077 if (error) {
1078 goto bad;
1079 }
1080 spidx->dir = IPSEC_DIR_OUTBOUND;
1081
1082 return 0;
1083
1084bad:
1085 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1086 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1087 return error;
1088}
1089
1090#if INET6
1091static int
1092ipsec6_setspidx_in6pcb(struct mbuf *m, struct in6pcb *pcb)
1093{
1094 struct secpolicyindex *spidx;
1095 int error;
1096
1097 /* sanity check */
1098 if (pcb == NULL) {
1099 panic("ipsec6_setspidx_in6pcb: no PCB found.\n");
1100 }
1101 if (pcb->in6p_sp == NULL) {
1102 panic("ipsec6_setspidx_in6pcb: no in6p_sp found.\n");
1103 }
1104 if (pcb->in6p_sp->sp_out == NULL || pcb->in6p_sp->sp_in == NULL) {
1105 panic("ipsec6_setspidx_in6pcb: no sp_in/out found.\n");
1106 }
1107
1108 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1109 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1110
1111 spidx = &pcb->in6p_sp->sp_in->spidx;
1112 error = ipsec_setspidx(m, spidx, 1, 0);
1113 if (error) {
1114 goto bad;
1115 }
1116 spidx->dir = IPSEC_DIR_INBOUND;
1117
1118 spidx = &pcb->in6p_sp->sp_out->spidx;
1119 error = ipsec_setspidx(m, spidx, 1, 0);
1120 if (error) {
1121 goto bad;
1122 }
1123 spidx->dir = IPSEC_DIR_OUTBOUND;
1124
1125 return 0;
1126
1127bad:
1128 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1129 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1130 return error;
1131}
1132#endif
1133
1134/*
1135 * configure security policy index (src/dst/proto/sport/dport)
1136 * by looking at the content of mbuf.
1137 * the caller is responsible for error recovery (like clearing up spidx).
1138 */
1139static int
1140ipsec_setspidx(struct mbuf *m,
1141 struct secpolicyindex *spidx,
1142 int needport,
1143 int force_ip_version)
1144{
1145 struct ip *ip = NULL;
1146 struct ip ipbuf;
1147 u_int v;
1148 struct mbuf *n;
1149 int len;
1150 int error;
1151
1152 if (m == NULL) {
1153 panic("ipsec_setspidx: m == 0 passed.\n");
1154 }
1155
1156 /*
1157 * validate m->m_pkthdr.len. we see incorrect length if we
1158 * mistakenly call this function with inconsistent mbuf chain
1159 * (like 4.4BSD tcp/udp processing). XXX should we panic here?
1160 */
1161 len = 0;
1162 for (n = m; n; n = n->m_next) {
1163 len += n->m_len;
1164 }
1165 if (m->m_pkthdr.len != len) {
1166 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1167 printf("ipsec_setspidx: "
1168 "total of m_len(%d) != pkthdr.len(%d), "
1169 "ignored.\n",
1170 len, m->m_pkthdr.len));
1171 return EINVAL;
1172 }
1173
1174 if (m->m_pkthdr.len < sizeof(struct ip)) {
1175 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1176 printf("ipsec_setspidx: "
1177 "pkthdr.len(%d) < sizeof(struct ip), ignored.\n",
1178 m->m_pkthdr.len));
1179 return EINVAL;
1180 }
1181
1182 if (m->m_len >= sizeof(*ip)) {
1183 ip = mtod(m, struct ip *);
1184 } else {
1185 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1186 ip = &ipbuf;
1187 }
1188
1189 if (force_ip_version) {
1190 v = force_ip_version;
1191 } else {
1192#ifdef _IP_VHL
1193 v = _IP_VHL_V(ip->ip_vhl);
1194#else
1195 v = ip->ip_v;
1196#endif
1197 }
1198 switch (v) {
1199 case 4:
1200 error = ipsec4_setspidx_ipaddr(m, spidx);
1201 if (error) {
1202 return error;
1203 }
1204 ipsec4_get_ulp(m, spidx, needport);
1205 return 0;
1206#if INET6
1207 case 6:
1208 if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) {
1209 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1210 printf("ipsec_setspidx: "
1211 "pkthdr.len(%d) < sizeof(struct ip6_hdr), "
1212 "ignored.\n", m->m_pkthdr.len));
1213 return EINVAL;
1214 }
1215 error = ipsec6_setspidx_ipaddr(m, spidx);
1216 if (error) {
1217 return error;
1218 }
1219 ipsec6_get_ulp(m, spidx, needport);
1220 return 0;
1221#endif
1222 default:
1223 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1224 printf("ipsec_setspidx: "
1225 "unknown IP version %u, ignored.\n", v));
1226 return EINVAL;
1227 }
1228}
1229
1230static void
1231ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport)
1232{
1233 struct ip ip;
1234 struct ip6_ext ip6e;
1235 u_int8_t nxt;
1236 int off;
1237 struct tcphdr th;
1238 struct udphdr uh;
1239
1240 /* sanity check */
1241 if (m == NULL) {
1242 panic("ipsec4_get_ulp: NULL pointer was passed.\n");
1243 }
1244 if (m->m_pkthdr.len < sizeof(ip)) {
1245 panic("ipsec4_get_ulp: too short\n");
1246 }
1247
1248 /* set default */
1249 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1250 ((struct sockaddr_in *)&spidx->src)->sin_port = IPSEC_PORT_ANY;
1251 ((struct sockaddr_in *)&spidx->dst)->sin_port = IPSEC_PORT_ANY;
1252
1253 m_copydata(m, 0, sizeof(ip), (caddr_t)&ip);
1254 /* ip_input() flips it into host endian XXX need more checking */
1255 if (ip.ip_off & (IP_MF | IP_OFFMASK)) {
1256 return;
1257 }
1258
1259 nxt = ip.ip_p;
1260#ifdef _IP_VHL
1261 off = _IP_VHL_HL(ip->ip_vhl) << 2;
1262#else
1263 off = ip.ip_hl << 2;
1264#endif
1265 while (off < m->m_pkthdr.len) {
1266 switch (nxt) {
1267 case IPPROTO_TCP:
1268 spidx->ul_proto = nxt;
1269 if (!needport) {
1270 return;
1271 }
1272 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) {
1273 return;
1274 }
1275 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1276 ((struct sockaddr_in *)&spidx->src)->sin_port =
1277 th.th_sport;
1278 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1279 th.th_dport;
1280 return;
1281 case IPPROTO_UDP:
1282 spidx->ul_proto = nxt;
1283 if (!needport) {
1284 return;
1285 }
1286 if (off + sizeof(struct udphdr) > m->m_pkthdr.len) {
1287 return;
1288 }
1289 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1290 ((struct sockaddr_in *)&spidx->src)->sin_port =
1291 uh.uh_sport;
1292 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1293 uh.uh_dport;
1294 return;
1295 case IPPROTO_AH:
1296 if (off + sizeof(ip6e) > m->m_pkthdr.len) {
1297 return;
1298 }
1299 m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
1300 off += (ip6e.ip6e_len + 2) << 2;
1301 nxt = ip6e.ip6e_nxt;
1302 break;
1303 case IPPROTO_ICMP:
1304 default:
1305 /* XXX intermediate headers??? */
1306 spidx->ul_proto = nxt;
1307 return;
1308 }
1309 }
1310}
1311
1312/* assumes that m is sane */
1313static int
1314ipsec4_setspidx_ipaddr(struct mbuf *m, struct secpolicyindex *spidx)
1315{
1316 struct ip *ip = NULL;
1317 struct ip ipbuf;
1318 struct sockaddr_in *sin;
1319
1320 if (m->m_len >= sizeof(*ip)) {
1321 ip = mtod(m, struct ip *);
1322 } else {
1323 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1324 ip = &ipbuf;
1325 }
1326
1327 sin = (struct sockaddr_in *)&spidx->src;
1328 bzero(sin, sizeof(*sin));
1329 sin->sin_family = AF_INET;
1330 sin->sin_len = sizeof(struct sockaddr_in);
1331 bcopy(&ip->ip_src, &sin->sin_addr, sizeof(ip->ip_src));
1332 spidx->prefs = sizeof(struct in_addr) << 3;
1333
1334 sin = (struct sockaddr_in *)&spidx->dst;
1335 bzero(sin, sizeof(*sin));
1336 sin->sin_family = AF_INET;
1337 sin->sin_len = sizeof(struct sockaddr_in);
1338 bcopy(&ip->ip_dst, &sin->sin_addr, sizeof(ip->ip_dst));
1339 spidx->prefd = sizeof(struct in_addr) << 3;
1340
1341 return 0;
1342}
1343
1344#if INET6
1345static void
1346ipsec6_get_ulp(struct mbuf *m,
1347 struct secpolicyindex *spidx,
1348 int needport)
1349{
1350 int off, nxt;
1351 struct tcphdr th;
1352 struct udphdr uh;
1353
1354 /* sanity check */
1355 if (m == NULL) {
1356 panic("ipsec6_get_ulp: NULL pointer was passed.\n");
1357 }
1358
1359 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1360 printf("ipsec6_get_ulp:\n"); kdebug_mbuf(m));
1361
1362 /* set default */
1363 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1364 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = IPSEC_PORT_ANY;
1365 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = IPSEC_PORT_ANY;
1366
1367 nxt = -1;
1368 off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt);
1369 if (off < 0 || m->m_pkthdr.len < off) {
1370 return;
1371 }
1372
1373 switch (nxt) {
1374 case IPPROTO_TCP:
1375 spidx->ul_proto = nxt;
1376 if (!needport) {
1377 break;
1378 }
1379 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) {
1380 break;
1381 }
1382 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1383 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = th.th_sport;
1384 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = th.th_dport;
1385 break;
1386 case IPPROTO_UDP:
1387 spidx->ul_proto = nxt;
1388 if (!needport) {
1389 break;
1390 }
1391 if (off + sizeof(struct udphdr) > m->m_pkthdr.len) {
1392 break;
1393 }
1394 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1395 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = uh.uh_sport;
1396 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = uh.uh_dport;
1397 break;
1398 case IPPROTO_ICMPV6:
1399 default:
1400 /* XXX intermediate headers??? */
1401 spidx->ul_proto = nxt;
1402 break;
1403 }
1404}
1405
1406/* assumes that m is sane */
1407static int
1408ipsec6_setspidx_ipaddr(struct mbuf *m,
1409 struct secpolicyindex *spidx)
1410{
1411 struct ip6_hdr *ip6 = NULL;
1412 struct ip6_hdr ip6buf;
1413 struct sockaddr_in6 *sin6;
1414
1415 if (m->m_len >= sizeof(*ip6)) {
1416 ip6 = mtod(m, struct ip6_hdr *);
1417 } else {
1418 m_copydata(m, 0, sizeof(ip6buf), (caddr_t)&ip6buf);
1419 ip6 = &ip6buf;
1420 }
1421
1422 sin6 = (struct sockaddr_in6 *)&spidx->src;
1423 bzero(sin6, sizeof(*sin6));
1424 sin6->sin6_family = AF_INET6;
1425 sin6->sin6_len = sizeof(struct sockaddr_in6);
1426 bcopy(&ip6->ip6_src, &sin6->sin6_addr, sizeof(ip6->ip6_src));
1427 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
1428 sin6->sin6_addr.s6_addr16[1] = 0;
1429 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
1430 }
1431 spidx->prefs = sizeof(struct in6_addr) << 3;
1432
1433 sin6 = (struct sockaddr_in6 *)&spidx->dst;
1434 bzero(sin6, sizeof(*sin6));
1435 sin6->sin6_family = AF_INET6;
1436 sin6->sin6_len = sizeof(struct sockaddr_in6);
1437 bcopy(&ip6->ip6_dst, &sin6->sin6_addr, sizeof(ip6->ip6_dst));
1438 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
1439 sin6->sin6_addr.s6_addr16[1] = 0;
1440 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
1441 }
1442 spidx->prefd = sizeof(struct in6_addr) << 3;
1443
1444 return 0;
1445}
1446#endif
1447
1448static struct inpcbpolicy *
1449ipsec_newpcbpolicy(void)
1450{
1451 struct inpcbpolicy *p;
1452
1453 p = (struct inpcbpolicy *)_MALLOC(sizeof(*p), M_SECA, M_WAITOK);
1454 return p;
1455}
1456
1457static void
1458ipsec_delpcbpolicy(struct inpcbpolicy *p)
1459{
1460 FREE(p, M_SECA);
1461}
1462
1463/* initialize policy in PCB */
1464int
1465ipsec_init_policy(struct socket *so,
1466 struct inpcbpolicy **pcb_sp)
1467{
1468 struct inpcbpolicy *new;
1469
1470 /* sanity check. */
1471 if (so == NULL || pcb_sp == NULL) {
1472 panic("ipsec_init_policy: NULL pointer was passed.\n");
1473 }
1474
1475 new = ipsec_newpcbpolicy();
1476 if (new == NULL) {
1477 ipseclog((LOG_DEBUG, "ipsec_init_policy: No more memory.\n"));
1478 return ENOBUFS;
1479 }
1480 bzero(new, sizeof(*new));
1481
1482#ifdef __APPLE__
1483 if (kauth_cred_issuser(so->so_cred))
1484#else
1485 if (so->so_cred != 0 && !suser(so->so_cred->pc_ucred, NULL))
1486#endif
1487 { new->priv = 1;} else {
1488 new->priv = 0;
1489 }
1490
1491 if ((new->sp_in = key_newsp()) == NULL) {
1492 ipsec_delpcbpolicy(new);
1493 return ENOBUFS;
1494 }
1495 new->sp_in->state = IPSEC_SPSTATE_ALIVE;
1496 new->sp_in->policy = IPSEC_POLICY_ENTRUST;
1497
1498 if ((new->sp_out = key_newsp()) == NULL) {
1499 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1500 ipsec_delpcbpolicy(new);
1501 return ENOBUFS;
1502 }
1503 new->sp_out->state = IPSEC_SPSTATE_ALIVE;
1504 new->sp_out->policy = IPSEC_POLICY_ENTRUST;
1505
1506 *pcb_sp = new;
1507
1508 return 0;
1509}
1510
1511/* copy old ipsec policy into new */
1512int
1513ipsec_copy_policy(struct inpcbpolicy *old,
1514 struct inpcbpolicy *new)
1515{
1516 struct secpolicy *sp;
1517
1518 if (ipsec_bypass != 0) {
1519 return 0;
1520 }
1521
1522 sp = ipsec_deepcopy_policy(old->sp_in);
1523 if (sp) {
1524 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1525 new->sp_in = sp;
1526 } else {
1527 return ENOBUFS;
1528 }
1529
1530 sp = ipsec_deepcopy_policy(old->sp_out);
1531 if (sp) {
1532 key_freesp(new->sp_out, KEY_SADB_UNLOCKED);
1533 new->sp_out = sp;
1534 } else {
1535 return ENOBUFS;
1536 }
1537
1538 new->priv = old->priv;
1539
1540 return 0;
1541}
1542
1543/* deep-copy a policy in PCB */
1544static struct secpolicy *
1545ipsec_deepcopy_policy(struct secpolicy *src)
1546{
1547 struct ipsecrequest *newchain = NULL;
1548 struct ipsecrequest *p;
1549 struct ipsecrequest **q;
1550 struct ipsecrequest *r;
1551 struct secpolicy *dst;
1552
1553 if (src == NULL) {
1554 return NULL;
1555 }
1556 dst = key_newsp();
1557 if (dst == NULL) {
1558 return NULL;
1559 }
1560
1561 /*
1562 * deep-copy IPsec request chain. This is required since struct
1563 * ipsecrequest is not reference counted.
1564 */
1565 q = &newchain;
1566 for (p = src->req; p; p = p->next) {
1567 *q = (struct ipsecrequest *)_MALLOC(sizeof(struct ipsecrequest),
1568 M_SECA, M_WAITOK | M_ZERO);
1569 if (*q == NULL) {
1570 goto fail;
1571 }
1572 (*q)->next = NULL;
1573
1574 (*q)->saidx.proto = p->saidx.proto;
1575 (*q)->saidx.mode = p->saidx.mode;
1576 (*q)->level = p->level;
1577 (*q)->saidx.reqid = p->saidx.reqid;
1578
1579 bcopy(&p->saidx.src, &(*q)->saidx.src, sizeof((*q)->saidx.src));
1580 bcopy(&p->saidx.dst, &(*q)->saidx.dst, sizeof((*q)->saidx.dst));
1581
1582 (*q)->sp = dst;
1583
1584 q = &((*q)->next);
1585 }
1586
1587 dst->req = newchain;
1588 dst->state = src->state;
1589 dst->policy = src->policy;
1590 /* do not touch the refcnt fields */
1591
1592 return dst;
1593
1594fail:
1595 for (p = newchain; p; p = r) {
1596 r = p->next;
1597 FREE(p, M_SECA);
1598 p = NULL;
1599 }
1600 key_freesp(dst, KEY_SADB_UNLOCKED);
1601 return NULL;
1602}
1603
1604/* set policy and ipsec request if present. */
1605static int
1606ipsec_set_policy(struct secpolicy **pcb_sp,
1607 __unused int optname,
1608 caddr_t request,
1609 size_t len,
1610 int priv)
1611{
1612 struct sadb_x_policy *xpl;
1613 struct secpolicy *newsp = NULL;
1614 int error;
1615
1616 /* sanity check. */
1617 if (pcb_sp == NULL || *pcb_sp == NULL || request == NULL) {
1618 return EINVAL;
1619 }
1620 if (len < sizeof(*xpl)) {
1621 return EINVAL;
1622 }
1623 xpl = (struct sadb_x_policy *)(void *)request;
1624
1625 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1626 printf("ipsec_set_policy: passed policy\n");
1627 kdebug_sadb_x_policy((struct sadb_ext *)xpl));
1628
1629 /* check policy type */
1630 /* ipsec_set_policy() accepts IPSEC, ENTRUST and BYPASS. */
1631 if (xpl->sadb_x_policy_type == IPSEC_POLICY_DISCARD
1632 || xpl->sadb_x_policy_type == IPSEC_POLICY_NONE) {
1633 return EINVAL;
1634 }
1635
1636 /* check privileged socket */
1637 if (priv == 0 && xpl->sadb_x_policy_type == IPSEC_POLICY_BYPASS) {
1638 return EACCES;
1639 }
1640
1641 /* allocation new SP entry */
1642 if ((newsp = key_msg2sp(xpl, len, &error)) == NULL) {
1643 return error;
1644 }
1645
1646 newsp->state = IPSEC_SPSTATE_ALIVE;
1647
1648 /* clear old SP and set new SP */
1649 key_freesp(*pcb_sp, KEY_SADB_UNLOCKED);
1650 *pcb_sp = newsp;
1651 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1652 printf("ipsec_set_policy: new policy\n");
1653 kdebug_secpolicy(newsp));
1654
1655 return 0;
1656}
1657
1658int
1659ipsec4_set_policy(struct inpcb *inp,
1660 int optname,
1661 caddr_t request,
1662 size_t len,
1663 int priv)
1664{
1665 struct sadb_x_policy *xpl;
1666 struct secpolicy **pcb_sp;
1667 int error = 0;
1668 struct sadb_x_policy xpl_aligned_buf;
1669 u_int8_t *xpl_unaligned;
1670
1671 /* sanity check. */
1672 if (inp == NULL || request == NULL) {
1673 return EINVAL;
1674 }
1675 if (len < sizeof(*xpl)) {
1676 return EINVAL;
1677 }
1678 xpl = (struct sadb_x_policy *)(void *)request;
1679
1680 /* This is a new mbuf allocated by soopt_getm() */
1681 if (IPSEC_IS_P2ALIGNED(xpl)) {
1682 xpl_unaligned = NULL;
1683 } else {
1684 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1685 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1686 xpl = (__typeof__(xpl)) & xpl_aligned_buf;
1687 }
1688
1689 if (inp->inp_sp == NULL) {
1690 error = ipsec_init_policy(inp->inp_socket, &inp->inp_sp);
1691 if (error) {
1692 return error;
1693 }
1694 }
1695
1696 /* select direction */
1697 switch (xpl->sadb_x_policy_dir) {
1698 case IPSEC_DIR_INBOUND:
1699 pcb_sp = &inp->inp_sp->sp_in;
1700 break;
1701 case IPSEC_DIR_OUTBOUND:
1702 pcb_sp = &inp->inp_sp->sp_out;
1703 break;
1704 default:
1705 ipseclog((LOG_ERR, "ipsec4_set_policy: invalid direction=%u\n",
1706 xpl->sadb_x_policy_dir));
1707 return EINVAL;
1708 }
1709
1710 /* turn bypass off */
1711 if (ipsec_bypass != 0) {
1712 ipsec_bypass = 0;
1713 }
1714
1715 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1716}
1717
1718/* delete policy in PCB */
1719int
1720ipsec4_delete_pcbpolicy(struct inpcb *inp)
1721{
1722 /* sanity check. */
1723 if (inp == NULL) {
1724 panic("ipsec4_delete_pcbpolicy: NULL pointer was passed.\n");
1725 }
1726
1727 if (inp->inp_sp == NULL) {
1728 return 0;
1729 }
1730
1731 if (inp->inp_sp->sp_in != NULL) {
1732 key_freesp(inp->inp_sp->sp_in, KEY_SADB_UNLOCKED);
1733 inp->inp_sp->sp_in = NULL;
1734 }
1735
1736 if (inp->inp_sp->sp_out != NULL) {
1737 key_freesp(inp->inp_sp->sp_out, KEY_SADB_UNLOCKED);
1738 inp->inp_sp->sp_out = NULL;
1739 }
1740
1741 ipsec_delpcbpolicy(inp->inp_sp);
1742 inp->inp_sp = NULL;
1743
1744 return 0;
1745}
1746
1747#if INET6
1748int
1749ipsec6_set_policy(struct in6pcb *in6p,
1750 int optname,
1751 caddr_t request,
1752 size_t len,
1753 int priv)
1754{
1755 struct sadb_x_policy *xpl;
1756 struct secpolicy **pcb_sp;
1757 int error = 0;
1758 struct sadb_x_policy xpl_aligned_buf;
1759 u_int8_t *xpl_unaligned;
1760
1761 /* sanity check. */
1762 if (in6p == NULL || request == NULL) {
1763 return EINVAL;
1764 }
1765 if (len < sizeof(*xpl)) {
1766 return EINVAL;
1767 }
1768 xpl = (struct sadb_x_policy *)(void *)request;
1769
1770 /* This is a new mbuf allocated by soopt_getm() */
1771 if (IPSEC_IS_P2ALIGNED(xpl)) {
1772 xpl_unaligned = NULL;
1773 } else {
1774 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1775 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1776 xpl = (__typeof__(xpl)) & xpl_aligned_buf;
1777 }
1778
1779 if (in6p->in6p_sp == NULL) {
1780 error = ipsec_init_policy(in6p->inp_socket, &in6p->in6p_sp);
1781 if (error) {
1782 return error;
1783 }
1784 }
1785
1786 /* select direction */
1787 switch (xpl->sadb_x_policy_dir) {
1788 case IPSEC_DIR_INBOUND:
1789 pcb_sp = &in6p->in6p_sp->sp_in;
1790 break;
1791 case IPSEC_DIR_OUTBOUND:
1792 pcb_sp = &in6p->in6p_sp->sp_out;
1793 break;
1794 default:
1795 ipseclog((LOG_ERR, "ipsec6_set_policy: invalid direction=%u\n",
1796 xpl->sadb_x_policy_dir));
1797 return EINVAL;
1798 }
1799
1800 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1801}
1802
1803int
1804ipsec6_delete_pcbpolicy(struct in6pcb *in6p)
1805{
1806 /* sanity check. */
1807 if (in6p == NULL) {
1808 panic("ipsec6_delete_pcbpolicy: NULL pointer was passed.\n");
1809 }
1810
1811 if (in6p->in6p_sp == NULL) {
1812 return 0;
1813 }
1814
1815 if (in6p->in6p_sp->sp_in != NULL) {
1816 key_freesp(in6p->in6p_sp->sp_in, KEY_SADB_UNLOCKED);
1817 in6p->in6p_sp->sp_in = NULL;
1818 }
1819
1820 if (in6p->in6p_sp->sp_out != NULL) {
1821 key_freesp(in6p->in6p_sp->sp_out, KEY_SADB_UNLOCKED);
1822 in6p->in6p_sp->sp_out = NULL;
1823 }
1824
1825 ipsec_delpcbpolicy(in6p->in6p_sp);
1826 in6p->in6p_sp = NULL;
1827
1828 return 0;
1829}
1830#endif
1831
1832/*
1833 * return current level.
1834 * Either IPSEC_LEVEL_USE or IPSEC_LEVEL_REQUIRE are always returned.
1835 */
1836u_int
1837ipsec_get_reqlevel(struct ipsecrequest *isr)
1838{
1839 u_int level = 0;
1840 u_int esp_trans_deflev = 0, esp_net_deflev = 0, ah_trans_deflev = 0, ah_net_deflev = 0;
1841
1842 /* sanity check */
1843 if (isr == NULL || isr->sp == NULL) {
1844 panic("ipsec_get_reqlevel: NULL pointer is passed.\n");
1845 }
1846 if (((struct sockaddr *)&isr->sp->spidx.src)->sa_family
1847 != ((struct sockaddr *)&isr->sp->spidx.dst)->sa_family) {
1848 panic("ipsec_get_reqlevel: family mismatched.\n");
1849 }
1850
1851/* XXX note that we have ipseclog() expanded here - code sync issue */
1852#define IPSEC_CHECK_DEFAULT(lev) \
1853 (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE \
1854 && (lev) != IPSEC_LEVEL_UNIQUE) \
1855 ? (ipsec_debug \
1856 ? log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\
1857 (lev), IPSEC_LEVEL_REQUIRE) \
1858 : (void)0), \
1859 (lev) = IPSEC_LEVEL_REQUIRE, \
1860 (lev) \
1861 : (lev))
1862
1863 /* set default level */
1864 switch (((struct sockaddr *)&isr->sp->spidx.src)->sa_family) {
1865#if INET
1866 case AF_INET:
1867 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_trans_deflev);
1868 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_net_deflev);
1869 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_trans_deflev);
1870 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_net_deflev);
1871 break;
1872#endif
1873#if INET6
1874 case AF_INET6:
1875 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_trans_deflev);
1876 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_net_deflev);
1877 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_trans_deflev);
1878 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_net_deflev);
1879 break;
1880#endif /* INET6 */
1881 default:
1882 panic("key_get_reqlevel: Unknown family. %d\n",
1883 ((struct sockaddr *)&isr->sp->spidx.src)->sa_family);
1884 }
1885
1886#undef IPSEC_CHECK_DEFAULT
1887
1888 /* set level */
1889 switch (isr->level) {
1890 case IPSEC_LEVEL_DEFAULT:
1891 switch (isr->saidx.proto) {
1892 case IPPROTO_ESP:
1893 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
1894 level = esp_net_deflev;
1895 } else {
1896 level = esp_trans_deflev;
1897 }
1898 break;
1899 case IPPROTO_AH:
1900 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
1901 level = ah_net_deflev;
1902 } else {
1903 level = ah_trans_deflev;
1904 }
1905 break;
1906 case IPPROTO_IPCOMP:
1907 ipseclog((LOG_ERR, "ipsec_get_reqlevel: "
1908 "still got IPCOMP - exiting\n"));
1909 break;
1910 default:
1911 panic("ipsec_get_reqlevel: "
1912 "Illegal protocol defined %u\n",
1913 isr->saidx.proto);
1914 }
1915 break;
1916
1917 case IPSEC_LEVEL_USE:
1918 case IPSEC_LEVEL_REQUIRE:
1919 level = isr->level;
1920 break;
1921 case IPSEC_LEVEL_UNIQUE:
1922 level = IPSEC_LEVEL_REQUIRE;
1923 break;
1924
1925 default:
1926 panic("ipsec_get_reqlevel: Illegal IPsec level %u\n",
1927 isr->level);
1928 }
1929
1930 return level;
1931}
1932
1933/*
1934 * Check AH/ESP integrity.
1935 * OUT:
1936 * 0: valid
1937 * 1: invalid
1938 */
1939static int
1940ipsec_in_reject(struct secpolicy *sp, struct mbuf *m)
1941{
1942 struct ipsecrequest *isr;
1943 u_int level;
1944 int need_auth, need_conf, need_icv;
1945
1946 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
1947 printf("ipsec_in_reject: using SP\n");
1948 kdebug_secpolicy(sp));
1949
1950 /* check policy */
1951 switch (sp->policy) {
1952 case IPSEC_POLICY_DISCARD:
1953 case IPSEC_POLICY_GENERATE:
1954 return 1;
1955 case IPSEC_POLICY_BYPASS:
1956 case IPSEC_POLICY_NONE:
1957 return 0;
1958
1959 case IPSEC_POLICY_IPSEC:
1960 break;
1961
1962 case IPSEC_POLICY_ENTRUST:
1963 default:
1964 panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy);
1965 }
1966
1967 need_auth = 0;
1968 need_conf = 0;
1969 need_icv = 0;
1970
1971 /* XXX should compare policy against ipsec header history */
1972
1973 for (isr = sp->req; isr != NULL; isr = isr->next) {
1974 /* get current level */
1975 level = ipsec_get_reqlevel(isr);
1976
1977 switch (isr->saidx.proto) {
1978 case IPPROTO_ESP:
1979 if (level == IPSEC_LEVEL_REQUIRE) {
1980 need_conf++;
1981
1982#if 0
1983 /* this won't work with multiple input threads - isr->sav would change
1984 * with every packet and is not necessarily related to the current packet
1985 * being processed. If ESP processing is required - the esp code should
1986 * make sure that the integrity check is present and correct. I don't see
1987 * why it would be necessary to check for the presence of the integrity
1988 * check value here. I think this is just wrong.
1989 * isr->sav has been removed.
1990 * %%%%%% this needs to be re-worked at some point but I think the code below can
1991 * be ignored for now.
1992 */
1993 if (isr->sav != NULL
1994 && isr->sav->flags == SADB_X_EXT_NONE
1995 && isr->sav->alg_auth != SADB_AALG_NONE) {
1996 need_icv++;
1997 }
1998#endif
1999 }
2000 break;
2001 case IPPROTO_AH:
2002 if (level == IPSEC_LEVEL_REQUIRE) {
2003 need_auth++;
2004 need_icv++;
2005 }
2006 break;
2007 case IPPROTO_IPCOMP:
2008 /*
2009 * we don't really care, as IPcomp document says that
2010 * we shouldn't compress small packets, IPComp policy
2011 * should always be treated as being in "use" level.
2012 */
2013 break;
2014 }
2015 }
2016
2017 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
2018 printf("ipsec_in_reject: auth:%d conf:%d icv:%d m_flags:%x\n",
2019 need_auth, need_conf, need_icv, m->m_flags));
2020
2021 if ((need_conf && !(m->m_flags & M_DECRYPTED))
2022 || (!need_auth && need_icv && !(m->m_flags & M_AUTHIPDGM))
2023 || (need_auth && !(m->m_flags & M_AUTHIPHDR))) {
2024 return 1;
2025 }
2026
2027 return 0;
2028}
2029
2030/*
2031 * Check AH/ESP integrity.
2032 * This function is called from tcp_input(), udp_input(),
2033 * and {ah,esp}4_input for tunnel mode
2034 */
2035int
2036ipsec4_in_reject_so(struct mbuf *m, struct socket *so)
2037{
2038 struct secpolicy *sp = NULL;
2039 int error;
2040 int result;
2041
2042 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2043 /* sanity check */
2044 if (m == NULL) {
2045 return 0; /* XXX should be panic ? */
2046 }
2047 /* get SP for this packet.
2048 * When we are called from ip_forward(), we call
2049 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2050 */
2051 if (so == NULL) {
2052 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
2053 } else {
2054 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
2055 }
2056
2057 if (sp == NULL) {
2058 return 0; /* XXX should be panic ?
2059 * -> No, there may be error. */
2060 }
2061 result = ipsec_in_reject(sp, m);
2062 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2063 printf("DP ipsec4_in_reject_so call free SP:0x%llx\n",
2064 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2065 key_freesp(sp, KEY_SADB_UNLOCKED);
2066
2067 return result;
2068}
2069
2070int
2071ipsec4_in_reject(struct mbuf *m, struct inpcb *inp)
2072{
2073 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2074 if (inp == NULL) {
2075 return ipsec4_in_reject_so(m, NULL);
2076 }
2077 if (inp->inp_socket) {
2078 return ipsec4_in_reject_so(m, inp->inp_socket);
2079 } else {
2080 panic("ipsec4_in_reject: invalid inpcb/socket");
2081 }
2082
2083 /* NOTREACHED */
2084 return 0;
2085}
2086
2087#if INET6
2088/*
2089 * Check AH/ESP integrity.
2090 * This function is called from tcp6_input(), udp6_input(),
2091 * and {ah,esp}6_input for tunnel mode
2092 */
2093int
2094ipsec6_in_reject_so(struct mbuf *m, struct socket *so)
2095{
2096 struct secpolicy *sp = NULL;
2097 int error;
2098 int result;
2099
2100 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2101 /* sanity check */
2102 if (m == NULL) {
2103 return 0; /* XXX should be panic ? */
2104 }
2105 /* get SP for this packet.
2106 * When we are called from ip_forward(), we call
2107 * ipsec6_getpolicybyaddr() with IP_FORWARDING flag.
2108 */
2109 if (so == NULL) {
2110 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
2111 } else {
2112 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
2113 }
2114
2115 if (sp == NULL) {
2116 return 0; /* XXX should be panic ? */
2117 }
2118 result = ipsec_in_reject(sp, m);
2119 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2120 printf("DP ipsec6_in_reject_so call free SP:0x%llx\n",
2121 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2122 key_freesp(sp, KEY_SADB_UNLOCKED);
2123
2124 return result;
2125}
2126
2127int
2128ipsec6_in_reject(struct mbuf *m, struct in6pcb *in6p)
2129{
2130 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2131 if (in6p == NULL) {
2132 return ipsec6_in_reject_so(m, NULL);
2133 }
2134 if (in6p->in6p_socket) {
2135 return ipsec6_in_reject_so(m, in6p->in6p_socket);
2136 } else {
2137 panic("ipsec6_in_reject: invalid in6p/socket");
2138 }
2139
2140 /* NOTREACHED */
2141 return 0;
2142}
2143#endif
2144
2145/*
2146 * compute the byte size to be occupied by IPsec header.
2147 * in case it is tunneled, it includes the size of outer IP header.
2148 * NOTE: SP passed is free in this function.
2149 */
2150size_t
2151ipsec_hdrsiz(struct secpolicy *sp)
2152{
2153 struct ipsecrequest *isr;
2154 size_t siz, clen;
2155
2156 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2157 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2158 printf("ipsec_hdrsiz: using SP\n");
2159 kdebug_secpolicy(sp));
2160
2161 /* check policy */
2162 switch (sp->policy) {
2163 case IPSEC_POLICY_DISCARD:
2164 case IPSEC_POLICY_GENERATE:
2165 case IPSEC_POLICY_BYPASS:
2166 case IPSEC_POLICY_NONE:
2167 return 0;
2168
2169 case IPSEC_POLICY_IPSEC:
2170 break;
2171
2172 case IPSEC_POLICY_ENTRUST:
2173 default:
2174 panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy);
2175 }
2176
2177 siz = 0;
2178
2179 for (isr = sp->req; isr != NULL; isr = isr->next) {
2180 clen = 0;
2181
2182 switch (isr->saidx.proto) {
2183 case IPPROTO_ESP:
2184#if IPSEC_ESP
2185 clen = esp_hdrsiz(isr);
2186#else
2187 clen = 0; /*XXX*/
2188#endif
2189 break;
2190 case IPPROTO_AH:
2191 clen = ah_hdrsiz(isr);
2192 break;
2193 default:
2194 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2195 "unknown protocol %u\n",
2196 isr->saidx.proto));
2197 break;
2198 }
2199
2200 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
2201 switch (((struct sockaddr *)&isr->saidx.dst)->sa_family) {
2202 case AF_INET:
2203 clen += sizeof(struct ip);
2204 break;
2205#if INET6
2206 case AF_INET6:
2207 clen += sizeof(struct ip6_hdr);
2208 break;
2209#endif
2210 default:
2211 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2212 "unknown AF %d in IPsec tunnel SA\n",
2213 ((struct sockaddr *)&isr->saidx.dst)->sa_family));
2214 break;
2215 }
2216 }
2217 siz += clen;
2218 }
2219
2220 return siz;
2221}
2222
2223/* This function is called from ip_forward() and ipsec4_hdrsize_tcp(). */
2224size_t
2225ipsec4_hdrsiz(struct mbuf *m, u_int dir, struct inpcb *inp)
2226{
2227 struct secpolicy *sp = NULL;
2228 int error;
2229 size_t size;
2230
2231 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2232 /* sanity check */
2233 if (m == NULL) {
2234 return 0; /* XXX should be panic ? */
2235 }
2236 if (inp != NULL && inp->inp_socket == NULL) {
2237 panic("ipsec4_hdrsize: why is socket NULL but there is PCB.");
2238 }
2239
2240 /* get SP for this packet.
2241 * When we are called from ip_forward(), we call
2242 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2243 */
2244 if (inp == NULL) {
2245 sp = ipsec4_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2246 } else {
2247 sp = ipsec4_getpolicybyaddr(m, dir, 0, &error);
2248 }
2249
2250 if (sp == NULL) {
2251 return 0; /* XXX should be panic ? */
2252 }
2253 size = ipsec_hdrsiz(sp);
2254 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2255 printf("DP ipsec4_hdrsiz call free SP:0x%llx\n",
2256 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2257 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2258 printf("ipsec4_hdrsiz: size:%lu.\n", (u_int32_t)size));
2259 key_freesp(sp, KEY_SADB_UNLOCKED);
2260
2261 return size;
2262}
2263
2264#if INET6
2265/* This function is called from ipsec6_hdrsize_tcp(),
2266 * and maybe from ip6_forward.()
2267 */
2268size_t
2269ipsec6_hdrsiz(struct mbuf *m, u_int dir, struct in6pcb *in6p)
2270{
2271 struct secpolicy *sp = NULL;
2272 int error;
2273 size_t size;
2274
2275 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2276 /* sanity check */
2277 if (m == NULL) {
2278 return 0; /* XXX shoud be panic ? */
2279 }
2280 if (in6p != NULL && in6p->in6p_socket == NULL) {
2281 panic("ipsec6_hdrsize: why is socket NULL but there is PCB.");
2282 }
2283
2284 /* get SP for this packet */
2285 /* XXX Is it right to call with IP_FORWARDING. */
2286 if (in6p == NULL) {
2287 sp = ipsec6_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2288 } else {
2289 sp = ipsec6_getpolicybyaddr(m, dir, 0, &error);
2290 }
2291
2292 if (sp == NULL) {
2293 return 0;
2294 }
2295 size = ipsec_hdrsiz(sp);
2296 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2297 printf("DP ipsec6_hdrsiz call free SP:0x%llx\n",
2298 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2299 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2300 printf("ipsec6_hdrsiz: size:%lu.\n", (u_int32_t)size));
2301 key_freesp(sp, KEY_SADB_UNLOCKED);
2302
2303 return size;
2304}
2305#endif /*INET6*/
2306
2307#if INET
2308/*
2309 * encapsulate for ipsec tunnel.
2310 * ip->ip_src must be fixed later on.
2311 */
2312int
2313ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav)
2314{
2315 struct ip *oip;
2316 struct ip *ip;
2317 size_t hlen;
2318 size_t plen;
2319
2320 /* can't tunnel between different AFs */
2321 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2322 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2323 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2324 m_freem(m);
2325 return EINVAL;
2326 }
2327#if 0
2328 /* XXX if the dst is myself, perform nothing. */
2329 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2330 m_freem(m);
2331 return EINVAL;
2332 }
2333#endif
2334
2335 if (m->m_len < sizeof(*ip)) {
2336 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2337 }
2338
2339 ip = mtod(m, struct ip *);
2340#ifdef _IP_VHL
2341 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2342#else
2343 hlen = ip->ip_hl << 2;
2344#endif
2345
2346 if (m->m_len != hlen) {
2347 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2348 }
2349
2350 /* generate header checksum */
2351 ip->ip_sum = 0;
2352#ifdef _IP_VHL
2353 ip->ip_sum = in_cksum(m, hlen);
2354#else
2355 ip->ip_sum = in_cksum(m, hlen);
2356#endif
2357
2358 plen = m->m_pkthdr.len;
2359
2360 /*
2361 * grow the mbuf to accomodate the new IPv4 header.
2362 * NOTE: IPv4 options will never be copied.
2363 */
2364 if (M_LEADINGSPACE(m->m_next) < hlen) {
2365 struct mbuf *n;
2366 MGET(n, M_DONTWAIT, MT_DATA);
2367 if (!n) {
2368 m_freem(m);
2369 return ENOBUFS;
2370 }
2371 n->m_len = hlen;
2372 n->m_next = m->m_next;
2373 m->m_next = n;
2374 m->m_pkthdr.len += hlen;
2375 oip = mtod(n, struct ip *);
2376 } else {
2377 m->m_next->m_len += hlen;
2378 m->m_next->m_data -= hlen;
2379 m->m_pkthdr.len += hlen;
2380 oip = mtod(m->m_next, struct ip *);
2381 }
2382 ip = mtod(m, struct ip *);
2383 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2384 m->m_len = sizeof(struct ip);
2385 m->m_pkthdr.len -= (hlen - sizeof(struct ip));
2386
2387 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2388 /* ECN consideration. */
2389 ip_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &oip->ip_tos);
2390#ifdef _IP_VHL
2391 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
2392#else
2393 ip->ip_hl = sizeof(struct ip) >> 2;
2394#endif
2395 ip->ip_off &= htons(~IP_OFFMASK);
2396 ip->ip_off &= htons(~IP_MF);
2397 switch (ip4_ipsec_dfbit) {
2398 case 0: /* clear DF bit */
2399 ip->ip_off &= htons(~IP_DF);
2400 break;
2401 case 1: /* set DF bit */
2402 ip->ip_off |= htons(IP_DF);
2403 break;
2404 default: /* copy DF bit */
2405 break;
2406 }
2407 ip->ip_p = IPPROTO_IPIP;
2408 if (plen + sizeof(struct ip) < IP_MAXPACKET) {
2409 ip->ip_len = htons(plen + sizeof(struct ip));
2410 } else {
2411 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2412 "leave ip_len as is (invalid packet)\n"));
2413 }
2414 if (rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off))) {
2415 ip->ip_id = 0;
2416 } else {
2417 ip->ip_id = ip_randomid();
2418 }
2419 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2420 &ip->ip_src, sizeof(ip->ip_src));
2421 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2422 &ip->ip_dst, sizeof(ip->ip_dst));
2423 ip->ip_ttl = IPDEFTTL;
2424
2425 /* XXX Should ip_src be updated later ? */
2426
2427 return 0;
2428}
2429
2430#endif /*INET*/
2431
2432#if INET6
2433int
2434ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav)
2435{
2436 struct ip6_hdr *oip6;
2437 struct ip6_hdr *ip6;
2438 size_t plen;
2439
2440 /* can't tunnel between different AFs */
2441 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2442 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2443 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2444 m_freem(m);
2445 return EINVAL;
2446 }
2447#if 0
2448 /* XXX if the dst is myself, perform nothing. */
2449 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2450 m_freem(m);
2451 return EINVAL;
2452 }
2453#endif
2454
2455 plen = m->m_pkthdr.len;
2456
2457 /*
2458 * grow the mbuf to accomodate the new IPv6 header.
2459 */
2460 if (m->m_len != sizeof(struct ip6_hdr)) {
2461 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2462 }
2463 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2464 struct mbuf *n;
2465 MGET(n, M_DONTWAIT, MT_DATA);
2466 if (!n) {
2467 m_freem(m);
2468 return ENOBUFS;
2469 }
2470 n->m_len = sizeof(struct ip6_hdr);
2471 n->m_next = m->m_next;
2472 m->m_next = n;
2473 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2474 oip6 = mtod(n, struct ip6_hdr *);
2475 } else {
2476 m->m_next->m_len += sizeof(struct ip6_hdr);
2477 m->m_next->m_data -= sizeof(struct ip6_hdr);
2478 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2479 oip6 = mtod(m->m_next, struct ip6_hdr *);
2480 }
2481 ip6 = mtod(m, struct ip6_hdr *);
2482 ovbcopy((caddr_t)ip6, (caddr_t)oip6, sizeof(struct ip6_hdr));
2483
2484 /* Fake link-local scope-class addresses */
2485 if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_src)) {
2486 oip6->ip6_src.s6_addr16[1] = 0;
2487 }
2488 if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_dst)) {
2489 oip6->ip6_dst.s6_addr16[1] = 0;
2490 }
2491
2492 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2493 /* ECN consideration. */
2494 ip6_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip6->ip6_flow);
2495 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) {
2496 ip6->ip6_plen = htons(plen);
2497 } else {
2498 /* ip6->ip6_plen will be updated in ip6_output() */
2499 }
2500 ip6->ip6_nxt = IPPROTO_IPV6;
2501 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2502 &ip6->ip6_src, sizeof(ip6->ip6_src));
2503 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2504 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2505 ip6->ip6_hlim = IPV6_DEFHLIM;
2506
2507 /* XXX Should ip6_src be updated later ? */
2508
2509 return 0;
2510}
2511
2512static int
2513ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav)
2514{
2515 struct ip6_hdr *ip6, *ip6i;
2516 struct ip *ip;
2517 size_t plen;
2518 u_int8_t hlim;
2519
2520 /* tunneling over IPv4 */
2521 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2522 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2523 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2524 m_freem(m);
2525 return EINVAL;
2526 }
2527#if 0
2528 /* XXX if the dst is myself, perform nothing. */
2529 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2530 m_freem(m);
2531 return EINVAL;
2532 }
2533#endif
2534
2535 plen = m->m_pkthdr.len;
2536 ip6 = mtod(m, struct ip6_hdr *);
2537 hlim = ip6->ip6_hlim;
2538 /*
2539 * grow the mbuf to accomodate the new IPv4 header.
2540 */
2541 if (m->m_len != sizeof(struct ip6_hdr)) {
2542 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2543 }
2544 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2545 struct mbuf *n;
2546 MGET(n, M_DONTWAIT, MT_DATA);
2547 if (!n) {
2548 m_freem(m);
2549 return ENOBUFS;
2550 }
2551 n->m_len = sizeof(struct ip6_hdr);
2552 n->m_next = m->m_next;
2553 m->m_next = n;
2554 m->m_pkthdr.len += sizeof(struct ip);
2555 ip6i = mtod(n, struct ip6_hdr *);
2556 } else {
2557 m->m_next->m_len += sizeof(struct ip6_hdr);
2558 m->m_next->m_data -= sizeof(struct ip6_hdr);
2559 m->m_pkthdr.len += sizeof(struct ip);
2560 ip6i = mtod(m->m_next, struct ip6_hdr *);
2561 }
2562
2563 bcopy(ip6, ip6i, sizeof(struct ip6_hdr));
2564 ip = mtod(m, struct ip *);
2565 m->m_len = sizeof(struct ip);
2566 /*
2567 * Fill in some of the IPv4 fields - we don't need all of them
2568 * because the rest will be filled in by ip_output
2569 */
2570 ip->ip_v = IPVERSION;
2571 ip->ip_hl = sizeof(struct ip) >> 2;
2572 ip->ip_id = 0;
2573 ip->ip_sum = 0;
2574 ip->ip_tos = 0;
2575 ip->ip_off = 0;
2576 ip->ip_ttl = hlim;
2577 ip->ip_p = IPPROTO_IPV6;
2578
2579 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2580 /* ECN consideration. */
2581 ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6->ip6_flow);
2582
2583 if (plen + sizeof(struct ip) < IP_MAXPACKET) {
2584 ip->ip_len = htons(plen + sizeof(struct ip));
2585 } else {
2586 ip->ip_len = htons(plen);
2587 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2588 "leave ip_len as is (invalid packet)\n"));
2589 }
2590 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2591 &ip->ip_src, sizeof(ip->ip_src));
2592 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2593 &ip->ip_dst, sizeof(ip->ip_dst));
2594
2595 return 0;
2596}
2597
2598int
2599ipsec6_update_routecache_and_output(
2600 struct ipsec_output_state *state,
2601 struct secasvar *sav)
2602{
2603 struct sockaddr_in6* dst6;
2604 struct route_in6 *ro6;
2605 struct ip6_hdr *ip6;
2606 errno_t error = 0;
2607
2608 int plen;
2609 struct ip6_out_args ip6oa;
2610 struct route_in6 ro6_new;
2611 struct flowadv *adv = NULL;
2612
2613 if (!state->m) {
2614 return EINVAL;
2615 }
2616 ip6 = mtod(state->m, struct ip6_hdr *);
2617
2618 // grab sadb_mutex, before updating sah's route cache
2619 lck_mtx_lock(sadb_mutex);
2620 ro6 = &sav->sah->sa_route;
2621 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
2622 if (ro6->ro_rt) {
2623 RT_LOCK(ro6->ro_rt);
2624 }
2625 if (ROUTE_UNUSABLE(ro6) ||
2626 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
2627 if (ro6->ro_rt != NULL) {
2628 RT_UNLOCK(ro6->ro_rt);
2629 }
2630 ROUTE_RELEASE(ro6);
2631 }
2632 if (ro6->ro_rt == 0) {
2633 bzero(dst6, sizeof(*dst6));
2634 dst6->sin6_family = AF_INET6;
2635 dst6->sin6_len = sizeof(*dst6);
2636 dst6->sin6_addr = ip6->ip6_dst;
2637 rtalloc_scoped((struct route *)ro6, sav->sah->outgoing_if);
2638 if (ro6->ro_rt) {
2639 RT_LOCK(ro6->ro_rt);
2640 }
2641 }
2642 if (ro6->ro_rt == 0) {
2643 ip6stat.ip6s_noroute++;
2644 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
2645 error = EHOSTUNREACH;
2646 // release sadb_mutex, after updating sah's route cache
2647 lck_mtx_unlock(sadb_mutex);
2648 return error;
2649 }
2650
2651 /*
2652 * adjust state->dst if tunnel endpoint is offlink
2653 *
2654 * XXX: caching rt_gateway value in the state is
2655 * not really good, since it may point elsewhere
2656 * when the gateway gets modified to a larger
2657 * sockaddr via rt_setgate(). This is currently
2658 * addressed by SA_SIZE roundup in that routine.
2659 */
2660 if (ro6->ro_rt->rt_flags & RTF_GATEWAY) {
2661 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
2662 }
2663 RT_UNLOCK(ro6->ro_rt);
2664 ROUTE_RELEASE(&state->ro);
2665 route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6));
2666 state->dst = (struct sockaddr *)dst6;
2667 state->tunneled = 6;
2668 // release sadb_mutex, after updating sah's route cache
2669 lck_mtx_unlock(sadb_mutex);
2670
2671 state->m = ipsec6_splithdr(state->m);
2672 if (!state->m) {
2673 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
2674 error = ENOMEM;
2675 return error;
2676 }
2677
2678 ip6 = mtod(state->m, struct ip6_hdr *);
2679 switch (sav->sah->saidx.proto) {
2680 case IPPROTO_ESP:
2681#if IPSEC_ESP
2682 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2683#else
2684 m_freem(state->m);
2685 error = EINVAL;
2686#endif
2687 break;
2688 case IPPROTO_AH:
2689 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2690 break;
2691 default:
2692 ipseclog((LOG_ERR, "%s: unknown ipsec protocol %d\n", __FUNCTION__, sav->sah->saidx.proto));
2693 m_freem(state->m);
2694 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2695 error = EINVAL;
2696 break;
2697 }
2698 if (error) {
2699 // If error, packet already freed by above output routines
2700 state->m = NULL;
2701 return error;
2702 }
2703
2704 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
2705 if (plen > IPV6_MAXPACKET) {
2706 ipseclog((LOG_ERR, "%s: IPsec with IPv6 jumbogram is not supported\n", __FUNCTION__));
2707 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2708 error = EINVAL;/*XXX*/
2709 return error;
2710 }
2711 ip6 = mtod(state->m, struct ip6_hdr *);
2712 ip6->ip6_plen = htons(plen);
2713
2714 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET6);
2715 ipsec_set_ip6oa_for_interface(sav->sah->ipsec_if, &ip6oa);
2716
2717 /* Increment statistics */
2718 ifnet_stat_increment_out(sav->sah->ipsec_if, 1, mbuf_pkthdr_len(state->m), 0);
2719
2720 /* Send to ip6_output */
2721 bzero(&ro6_new, sizeof(ro6_new));
2722 bzero(&ip6oa, sizeof(ip6oa));
2723 ip6oa.ip6oa_flowadv.code = 0;
2724 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
2725 if (state->outgoing_if) {
2726 ip6oa.ip6oa_boundif = state->outgoing_if;
2727 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
2728 }
2729
2730 adv = &ip6oa.ip6oa_flowadv;
2731 (void) ip6_output(state->m, NULL, &ro6_new, IPV6_OUTARGS, NULL, NULL, &ip6oa);
2732 state->m = NULL;
2733
2734 if (adv->code == FADV_FLOW_CONTROLLED || adv->code == FADV_SUSPENDED) {
2735 error = ENOBUFS;
2736 ifnet_disable_output(sav->sah->ipsec_if);
2737 return error;
2738 }
2739
2740 return 0;
2741}
2742
2743int
2744ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav)
2745{
2746 struct mbuf *m;
2747 struct ip6_hdr *ip6;
2748 struct ip *oip;
2749 struct ip *ip;
2750 size_t hlen;
2751 size_t plen;
2752
2753 m = state->m;
2754 if (!m) {
2755 return EINVAL;
2756 }
2757
2758 /* can't tunnel between different AFs */
2759 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2760 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2761 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2762 m_freem(m);
2763 return EINVAL;
2764 }
2765#if 0
2766 /* XXX if the dst is myself, perform nothing. */
2767 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2768 m_freem(m);
2769 return EINVAL;
2770 }
2771#endif
2772
2773 if (m->m_len < sizeof(*ip)) {
2774 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2775 return EINVAL;
2776 }
2777
2778 ip = mtod(m, struct ip *);
2779#ifdef _IP_VHL
2780 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2781#else
2782 hlen = ip->ip_hl << 2;
2783#endif
2784
2785 if (m->m_len != hlen) {
2786 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2787 return EINVAL;
2788 }
2789
2790 /* generate header checksum */
2791 ip->ip_sum = 0;
2792#ifdef _IP_VHL
2793 ip->ip_sum = in_cksum(m, hlen);
2794#else
2795 ip->ip_sum = in_cksum(m, hlen);
2796#endif
2797
2798 plen = m->m_pkthdr.len; // save original IPv4 packet len, this will be ipv6 payload len
2799
2800 /*
2801 * First move the IPv4 header to the second mbuf in the chain
2802 */
2803 if (M_LEADINGSPACE(m->m_next) < hlen) {
2804 struct mbuf *n;
2805 MGET(n, M_DONTWAIT, MT_DATA);
2806 if (!n) {
2807 m_freem(m);
2808 return ENOBUFS;
2809 }
2810 n->m_len = hlen;
2811 n->m_next = m->m_next;
2812 m->m_next = n;
2813 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2814 oip = mtod(n, struct ip *);
2815 } else {
2816 m->m_next->m_len += hlen;
2817 m->m_next->m_data -= hlen;
2818 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2819 oip = mtod(m->m_next, struct ip *);
2820 }
2821 ip = mtod(m, struct ip *);
2822 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2823
2824 /*
2825 * Grow the first mbuf to accomodate the new IPv6 header.
2826 */
2827 if (M_LEADINGSPACE(m) < sizeof(struct ip6_hdr) - hlen) {
2828 struct mbuf *n;
2829 MGETHDR(n, M_DONTWAIT, MT_HEADER);
2830 if (!n) {
2831 m_freem(m);
2832 return ENOBUFS;
2833 }
2834 M_COPY_PKTHDR(n, m);
2835 MH_ALIGN(n, sizeof(struct ip6_hdr));
2836 n->m_len = sizeof(struct ip6_hdr);
2837 n->m_next = m->m_next;
2838 m->m_next = NULL;
2839 m_freem(m);
2840 state->m = n;
2841 m = state->m;
2842 } else {
2843 m->m_len += (sizeof(struct ip6_hdr) - hlen);
2844 m->m_data -= (sizeof(struct ip6_hdr) - hlen);
2845 }
2846 ip6 = mtod(m, struct ip6_hdr *);
2847 ip6->ip6_flow = 0;
2848 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2849 ip6->ip6_vfc |= IPV6_VERSION;
2850
2851 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2852 /* ECN consideration. */
2853 ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &ip->ip_tos);
2854 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) {
2855 ip6->ip6_plen = htons(plen);
2856 } else {
2857 /* ip6->ip6_plen will be updated in ip6_output() */
2858 }
2859
2860 ip6->ip6_nxt = IPPROTO_IPV4;
2861 ip6->ip6_hlim = IPV6_DEFHLIM;
2862
2863 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2864 &ip6->ip6_src, sizeof(ip6->ip6_src));
2865 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2866 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2867
2868 return 0;
2869}
2870
2871#endif /*INET6*/
2872
2873/*
2874 * Check the variable replay window.
2875 * ipsec_chkreplay() performs replay check before ICV verification.
2876 * ipsec_updatereplay() updates replay bitmap. This must be called after
2877 * ICV verification (it also performs replay check, which is usually done
2878 * beforehand).
2879 * 0 (zero) is returned if packet disallowed, 1 if packet permitted.
2880 *
2881 * based on RFC 2401.
2882 */
2883int
2884ipsec_chkreplay(u_int32_t seq, struct secasvar *sav, u_int8_t replay_index)
2885{
2886 const struct secreplay *replay;
2887 u_int32_t diff;
2888 int fr;
2889 u_int32_t wsizeb; /* constant: bits of window size */
2890 int frlast; /* constant: last frame */
2891
2892
2893 /* sanity check */
2894 if (sav == NULL) {
2895 panic("ipsec_chkreplay: NULL pointer was passed.\n");
2896 }
2897
2898 lck_mtx_lock(sadb_mutex);
2899 replay = sav->replay[replay_index];
2900
2901 if (replay->wsize == 0) {
2902 lck_mtx_unlock(sadb_mutex);
2903 return 1; /* no need to check replay. */
2904 }
2905
2906 /* constant */
2907 frlast = replay->wsize - 1;
2908 wsizeb = replay->wsize << 3;
2909
2910 /* sequence number of 0 is invalid */
2911 if (seq == 0) {
2912 lck_mtx_unlock(sadb_mutex);
2913 return 0;
2914 }
2915
2916 /* first time is always okay */
2917 if (replay->count == 0) {
2918 lck_mtx_unlock(sadb_mutex);
2919 return 1;
2920 }
2921
2922 if (seq > replay->lastseq) {
2923 /* larger sequences are okay */
2924 lck_mtx_unlock(sadb_mutex);
2925 return 1;
2926 } else {
2927 /* seq is equal or less than lastseq. */
2928 diff = replay->lastseq - seq;
2929
2930 /* over range to check, i.e. too old or wrapped */
2931 if (diff >= wsizeb) {
2932 lck_mtx_unlock(sadb_mutex);
2933 return 0;
2934 }
2935
2936 fr = frlast - diff / 8;
2937
2938 /* this packet already seen ? */
2939 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2940 lck_mtx_unlock(sadb_mutex);
2941 return 0;
2942 }
2943
2944 /* out of order but good */
2945 lck_mtx_unlock(sadb_mutex);
2946 return 1;
2947 }
2948}
2949
2950/*
2951 * check replay counter whether to update or not.
2952 * OUT: 0: OK
2953 * 1: NG
2954 */
2955int
2956ipsec_updatereplay(u_int32_t seq, struct secasvar *sav, u_int8_t replay_index)
2957{
2958 struct secreplay *replay;
2959 u_int32_t diff;
2960 int fr;
2961 u_int32_t wsizeb; /* constant: bits of window size */
2962 int frlast; /* constant: last frame */
2963
2964 /* sanity check */
2965 if (sav == NULL) {
2966 panic("ipsec_chkreplay: NULL pointer was passed.\n");
2967 }
2968
2969 lck_mtx_lock(sadb_mutex);
2970 replay = sav->replay[replay_index];
2971
2972 if (replay->wsize == 0) {
2973 goto ok; /* no need to check replay. */
2974 }
2975 /* constant */
2976 frlast = replay->wsize - 1;
2977 wsizeb = replay->wsize << 3;
2978
2979 /* sequence number of 0 is invalid */
2980 if (seq == 0) {
2981 lck_mtx_unlock(sadb_mutex);
2982 return 1;
2983 }
2984
2985 /* first time */
2986 if (replay->count == 0) {
2987 replay->lastseq = seq;
2988 bzero(replay->bitmap, replay->wsize);
2989 (replay->bitmap)[frlast] = 1;
2990 goto ok;
2991 }
2992
2993 if (seq > replay->lastseq) {
2994 /* seq is larger than lastseq. */
2995 diff = seq - replay->lastseq;
2996
2997 /* new larger sequence number */
2998 if (diff < wsizeb) {
2999 /* In window */
3000 /* set bit for this packet */
3001 vshiftl((unsigned char *) replay->bitmap, diff, replay->wsize);
3002 (replay->bitmap)[frlast] |= 1;
3003 } else {
3004 /* this packet has a "way larger" */
3005 bzero(replay->bitmap, replay->wsize);
3006 (replay->bitmap)[frlast] = 1;
3007 }
3008 replay->lastseq = seq;
3009
3010 /* larger is good */
3011 } else {
3012 /* seq is equal or less than lastseq. */
3013 diff = replay->lastseq - seq;
3014
3015 /* over range to check, i.e. too old or wrapped */
3016 if (diff >= wsizeb) {
3017 lck_mtx_unlock(sadb_mutex);
3018 return 1;
3019 }
3020
3021 fr = frlast - diff / 8;
3022
3023 /* this packet already seen ? */
3024 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
3025 lck_mtx_unlock(sadb_mutex);
3026 return 1;
3027 }
3028
3029 /* mark as seen */
3030 (replay->bitmap)[fr] |= (1 << (diff % 8));
3031
3032 /* out of order but good */
3033 }
3034
3035ok:
3036 if (replay->count == ~0) {
3037 /* set overflow flag */
3038 replay->overflow++;
3039
3040 /* don't increment, no more packets accepted */
3041 if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
3042 lck_mtx_unlock(sadb_mutex);
3043 return 1;
3044 }
3045
3046 ipseclog((LOG_WARNING, "replay counter made %d cycle. %s\n",
3047 replay->overflow, ipsec_logsastr(sav)));
3048 }
3049
3050 replay->count++;
3051
3052 lck_mtx_unlock(sadb_mutex);
3053 return 0;
3054}
3055
3056/*
3057 * shift variable length buffer to left.
3058 * IN: bitmap: pointer to the buffer
3059 * nbit: the number of to shift.
3060 * wsize: buffer size (bytes).
3061 */
3062static void
3063vshiftl(unsigned char *bitmap, int nbit, int wsize)
3064{
3065 int s, j, i;
3066 unsigned char over;
3067
3068 for (j = 0; j < nbit; j += 8) {
3069 s = (nbit - j < 8) ? (nbit - j): 8;
3070 bitmap[0] <<= s;
3071 for (i = 1; i < wsize; i++) {
3072 over = (bitmap[i] >> (8 - s));
3073 bitmap[i] <<= s;
3074 bitmap[i - 1] |= over;
3075 }
3076 }
3077
3078 return;
3079}
3080
3081const char *
3082ipsec4_logpacketstr(struct ip *ip, u_int32_t spi)
3083{
3084 static char buf[256] __attribute__((aligned(4)));
3085 char *p;
3086 u_int8_t *s, *d;
3087
3088 s = (u_int8_t *)(&ip->ip_src);
3089 d = (u_int8_t *)(&ip->ip_dst);
3090
3091 p = buf;
3092 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3093 while (p && *p) {
3094 p++;
3095 }
3096 snprintf(p, sizeof(buf) - (p - buf), "src=%u.%u.%u.%u",
3097 s[0], s[1], s[2], s[3]);
3098 while (p && *p) {
3099 p++;
3100 }
3101 snprintf(p, sizeof(buf) - (p - buf), " dst=%u.%u.%u.%u",
3102 d[0], d[1], d[2], d[3]);
3103 while (p && *p) {
3104 p++;
3105 }
3106 snprintf(p, sizeof(buf) - (p - buf), ")");
3107
3108 return buf;
3109}
3110
3111#if INET6
3112const char *
3113ipsec6_logpacketstr(struct ip6_hdr *ip6, u_int32_t spi)
3114{
3115 static char buf[256] __attribute__((aligned(4)));
3116 char *p;
3117
3118 p = buf;
3119 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3120 while (p && *p) {
3121 p++;
3122 }
3123 snprintf(p, sizeof(buf) - (p - buf), "src=%s",
3124 ip6_sprintf(&ip6->ip6_src));
3125 while (p && *p) {
3126 p++;
3127 }
3128 snprintf(p, sizeof(buf) - (p - buf), " dst=%s",
3129 ip6_sprintf(&ip6->ip6_dst));
3130 while (p && *p) {
3131 p++;
3132 }
3133 snprintf(p, sizeof(buf) - (p - buf), ")");
3134
3135 return buf;
3136}
3137#endif /*INET6*/
3138
3139const char *
3140ipsec_logsastr(struct secasvar *sav)
3141{
3142 static char buf[256] __attribute__((aligned(4)));
3143 char *p;
3144 struct secasindex *saidx = &sav->sah->saidx;
3145
3146 /* validity check */
3147 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
3148 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family) {
3149 panic("ipsec_logsastr: family mismatched.\n");
3150 }
3151
3152 p = buf;
3153 snprintf(buf, sizeof(buf), "SA(SPI=%u ", (u_int32_t)ntohl(sav->spi));
3154 while (p && *p) {
3155 p++;
3156 }
3157 if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET) {
3158 u_int8_t *s, *d;
3159 s = (u_int8_t *)&((struct sockaddr_in *)&saidx->src)->sin_addr;
3160 d = (u_int8_t *)&((struct sockaddr_in *)&saidx->dst)->sin_addr;
3161 snprintf(p, sizeof(buf) - (p - buf),
3162 "src=%d.%d.%d.%d dst=%d.%d.%d.%d",
3163 s[0], s[1], s[2], s[3], d[0], d[1], d[2], d[3]);
3164 }
3165#if INET6
3166 else if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET6) {
3167 snprintf(p, sizeof(buf) - (p - buf),
3168 "src=%s",
3169 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->src)->sin6_addr));
3170 while (p && *p) {
3171 p++;
3172 }
3173 snprintf(p, sizeof(buf) - (p - buf),
3174 " dst=%s",
3175 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->dst)->sin6_addr));
3176 }
3177#endif
3178 while (p && *p) {
3179 p++;
3180 }
3181 snprintf(p, sizeof(buf) - (p - buf), ")");
3182
3183 return buf;
3184}
3185
3186void
3187ipsec_dumpmbuf(struct mbuf *m)
3188{
3189 int totlen;
3190 int i;
3191 u_char *p;
3192
3193 totlen = 0;
3194 printf("---\n");
3195 while (m) {
3196 p = mtod(m, u_char *);
3197 for (i = 0; i < m->m_len; i++) {
3198 printf("%02x ", p[i]);
3199 totlen++;
3200 if (totlen % 16 == 0) {
3201 printf("\n");
3202 }
3203 }
3204 m = m->m_next;
3205 }
3206 if (totlen % 16 != 0) {
3207 printf("\n");
3208 }
3209 printf("---\n");
3210}
3211
3212#if INET
3213/*
3214 * IPsec output logic for IPv4.
3215 */
3216static int
3217ipsec4_output_internal(struct ipsec_output_state *state, struct secasvar *sav)
3218{
3219 struct ip *ip = NULL;
3220 int error = 0;
3221 struct sockaddr_in *dst4;
3222 struct route *ro4;
3223
3224 /* validity check */
3225 if (sav == NULL || sav->sah == NULL) {
3226 error = EINVAL;
3227 goto bad;
3228 }
3229
3230 /*
3231 * If there is no valid SA, we give up to process any
3232 * more. In such a case, the SA's status is changed
3233 * from DYING to DEAD after allocating. If a packet
3234 * send to the receiver by dead SA, the receiver can
3235 * not decode a packet because SA has been dead.
3236 */
3237 if (sav->state != SADB_SASTATE_MATURE
3238 && sav->state != SADB_SASTATE_DYING) {
3239 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3240 error = EINVAL;
3241 goto bad;
3242 }
3243
3244 state->outgoing_if = sav->sah->outgoing_if;
3245
3246 /*
3247 * There may be the case that SA status will be changed when
3248 * we are refering to one. So calling splsoftnet().
3249 */
3250
3251 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3252 /*
3253 * build IPsec tunnel.
3254 */
3255 state->m = ipsec4_splithdr(state->m);
3256 if (!state->m) {
3257 error = ENOMEM;
3258 goto bad;
3259 }
3260
3261 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3262 error = ipsec46_encapsulate(state, sav);
3263 if (error) {
3264 // packet already freed by encapsulation error handling
3265 state->m = NULL;
3266 return error;
3267 }
3268
3269 error = ipsec6_update_routecache_and_output(state, sav);
3270 return error;
3271 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3272 error = ipsec4_encapsulate(state->m, sav);
3273 if (error) {
3274 state->m = NULL;
3275 goto bad;
3276 }
3277 ip = mtod(state->m, struct ip *);
3278
3279 // grab sadb_mutex, before updating sah's route cache
3280 lck_mtx_lock(sadb_mutex);
3281 ro4 = (struct route *)&sav->sah->sa_route;
3282 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3283 if (ro4->ro_rt != NULL) {
3284 RT_LOCK(ro4->ro_rt);
3285 }
3286 if (ROUTE_UNUSABLE(ro4) ||
3287 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3288 if (ro4->ro_rt != NULL) {
3289 RT_UNLOCK(ro4->ro_rt);
3290 }
3291 ROUTE_RELEASE(ro4);
3292 }
3293 if (ro4->ro_rt == 0) {
3294 dst4->sin_family = AF_INET;
3295 dst4->sin_len = sizeof(*dst4);
3296 dst4->sin_addr = ip->ip_dst;
3297 rtalloc_scoped(ro4, sav->sah->outgoing_if);
3298 if (ro4->ro_rt == 0) {
3299 OSAddAtomic(1, &ipstat.ips_noroute);
3300 error = EHOSTUNREACH;
3301 // release sadb_mutex, after updating sah's route cache
3302 lck_mtx_unlock(sadb_mutex);
3303 goto bad;
3304 }
3305 RT_LOCK(ro4->ro_rt);
3306 }
3307
3308 /*
3309 * adjust state->dst if tunnel endpoint is offlink
3310 *
3311 * XXX: caching rt_gateway value in the state is
3312 * not really good, since it may point elsewhere
3313 * when the gateway gets modified to a larger
3314 * sockaddr via rt_setgate(). This is currently
3315 * addressed by SA_SIZE roundup in that routine.
3316 */
3317 if (ro4->ro_rt->rt_flags & RTF_GATEWAY) {
3318 dst4 = (struct sockaddr_in *)(void *)ro4->ro_rt->rt_gateway;
3319 }
3320 RT_UNLOCK(ro4->ro_rt);
3321 ROUTE_RELEASE(&state->ro);
3322 route_copyout((struct route *)&state->ro, ro4, sizeof(struct route));
3323 state->dst = (struct sockaddr *)dst4;
3324 state->tunneled = 4;
3325 // release sadb_mutex, after updating sah's route cache
3326 lck_mtx_unlock(sadb_mutex);
3327 } else {
3328 ipseclog((LOG_ERR, "%s: family mismatched between inner and outer spi=%u\n",
3329 __FUNCTION__, (u_int32_t)ntohl(sav->spi)));
3330 error = EAFNOSUPPORT;
3331 goto bad;
3332 }
3333 }
3334
3335 state->m = ipsec4_splithdr(state->m);
3336 if (!state->m) {
3337 error = ENOMEM;
3338 goto bad;
3339 }
3340 switch (sav->sah->saidx.proto) {
3341 case IPPROTO_ESP:
3342#if IPSEC_ESP
3343 if ((error = esp4_output(state->m, sav)) != 0) {
3344 state->m = NULL;
3345 goto bad;
3346 }
3347 break;
3348#else
3349 m_freem(state->m);
3350 state->m = NULL;
3351 error = EINVAL;
3352 goto bad;
3353#endif
3354 case IPPROTO_AH:
3355 if ((error = ah4_output(state->m, sav)) != 0) {
3356 state->m = NULL;
3357 goto bad;
3358 }
3359 break;
3360 default:
3361 ipseclog((LOG_ERR,
3362 "ipsec4_output: unknown ipsec protocol %d\n",
3363 sav->sah->saidx.proto));
3364 m_freem(state->m);
3365 state->m = NULL;
3366 error = EPROTONOSUPPORT;
3367 goto bad;
3368 }
3369
3370 if (state->m == 0) {
3371 error = ENOMEM;
3372 goto bad;
3373 }
3374
3375 return 0;
3376
3377bad:
3378 return error;
3379}
3380
3381int
3382ipsec4_interface_output(struct ipsec_output_state *state, ifnet_t interface)
3383{
3384 int error = 0;
3385 struct secasvar *sav = NULL;
3386
3387 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3388
3389 if (state == NULL) {
3390 panic("state == NULL in ipsec4_output");
3391 }
3392 if (state->m == NULL) {
3393 panic("state->m == NULL in ipsec4_output");
3394 }
3395 if (state->dst == NULL) {
3396 panic("state->dst == NULL in ipsec4_output");
3397 }
3398
3399 struct ip *ip = mtod(state->m, struct ip *);
3400
3401 struct sockaddr_in src = {};
3402 src.sin_family = AF_INET;
3403 src.sin_len = sizeof(src);
3404 memcpy(&src.sin_addr, &ip->ip_src, sizeof(src.sin_addr));
3405
3406 struct sockaddr_in dst = {};
3407 dst.sin_family = AF_INET;
3408 dst.sin_len = sizeof(dst);
3409 memcpy(&dst.sin_addr, &ip->ip_dst, sizeof(dst.sin_addr));
3410
3411 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET,
3412 (struct sockaddr *)&src,
3413 (struct sockaddr *)&dst);
3414 if (sav == NULL) {
3415 goto bad;
3416 }
3417
3418 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3419 goto bad;
3420 }
3421
3422 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3423 if (sav) {
3424 key_freesav(sav, KEY_SADB_UNLOCKED);
3425 }
3426 return 0;
3427
3428bad:
3429 if (sav) {
3430 key_freesav(sav, KEY_SADB_UNLOCKED);
3431 }
3432 m_freem(state->m);
3433 state->m = NULL;
3434 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error, 0, 0, 0, 0);
3435 return error;
3436}
3437
3438int
3439ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused int flags)
3440{
3441 struct ip *ip = NULL;
3442 struct ipsecrequest *isr = NULL;
3443 struct secasindex saidx;
3444 struct secasvar *sav = NULL;
3445 int error = 0;
3446 struct sockaddr_in *sin;
3447
3448 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3449
3450 if (!state) {
3451 panic("state == NULL in ipsec4_output");
3452 }
3453 if (!state->m) {
3454 panic("state->m == NULL in ipsec4_output");
3455 }
3456 if (!state->dst) {
3457 panic("state->dst == NULL in ipsec4_output");
3458 }
3459
3460 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
3461
3462 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3463 printf("ipsec4_output: applied SP\n");
3464 kdebug_secpolicy(sp));
3465
3466 for (isr = sp->req; isr != NULL; isr = isr->next) {
3467 /* make SA index for search proper SA */
3468 ip = mtod(state->m, struct ip *);
3469 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3470 saidx.mode = isr->saidx.mode;
3471 saidx.reqid = isr->saidx.reqid;
3472 sin = (struct sockaddr_in *)&saidx.src;
3473 if (sin->sin_len == 0) {
3474 sin->sin_len = sizeof(*sin);
3475 sin->sin_family = AF_INET;
3476 sin->sin_port = IPSEC_PORT_ANY;
3477 bcopy(&ip->ip_src, &sin->sin_addr,
3478 sizeof(sin->sin_addr));
3479 }
3480 sin = (struct sockaddr_in *)&saidx.dst;
3481 if (sin->sin_len == 0) {
3482 sin->sin_len = sizeof(*sin);
3483 sin->sin_family = AF_INET;
3484 sin->sin_port = IPSEC_PORT_ANY;
3485 /*
3486 * Get port from packet if upper layer is UDP and nat traversal
3487 * is enabled and transport mode.
3488 */
3489
3490 if ((esp_udp_encap_port & 0xFFFF) != 0 &&
3491 isr->saidx.mode == IPSEC_MODE_TRANSPORT) {
3492 if (ip->ip_p == IPPROTO_UDP) {
3493 struct udphdr *udp;
3494 size_t hlen;
3495#ifdef _IP_VHL
3496 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
3497#else
3498 hlen = ip->ip_hl << 2;
3499#endif
3500 if (state->m->m_len < hlen + sizeof(struct udphdr)) {
3501 state->m = m_pullup(state->m, hlen + sizeof(struct udphdr));
3502 if (!state->m) {
3503 ipseclog((LOG_DEBUG, "IPv4 output: can't pullup UDP header\n"));
3504 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
3505 goto bad;
3506 }
3507 ip = mtod(state->m, struct ip *);
3508 }
3509 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + hlen);
3510 sin->sin_port = udp->uh_dport;
3511 }
3512 }
3513
3514 bcopy(&ip->ip_dst, &sin->sin_addr,
3515 sizeof(sin->sin_addr));
3516 }
3517
3518 if ((error = key_checkrequest(isr, &saidx, &sav)) != 0) {
3519 /*
3520 * IPsec processing is required, but no SA found.
3521 * I assume that key_acquire() had been called
3522 * to get/establish the SA. Here I discard
3523 * this packet because it is responsibility for
3524 * upper layer to retransmit the packet.
3525 */
3526 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3527 goto bad;
3528 }
3529
3530 /* validity check */
3531 if (sav == NULL) {
3532 switch (ipsec_get_reqlevel(isr)) {
3533 case IPSEC_LEVEL_USE:
3534 continue;
3535 case IPSEC_LEVEL_REQUIRE:
3536 /* must be not reached here. */
3537 panic("ipsec4_output: no SA found, but required.");
3538 }
3539 }
3540
3541 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3542 goto bad;
3543 }
3544 }
3545
3546 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0, 0, 0, 0, 0);
3547 if (sav) {
3548 key_freesav(sav, KEY_SADB_UNLOCKED);
3549 }
3550 return 0;
3551
3552bad:
3553 if (sav) {
3554 key_freesav(sav, KEY_SADB_UNLOCKED);
3555 }
3556 m_freem(state->m);
3557 state->m = NULL;
3558 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error, 0, 0, 0, 0);
3559 return error;
3560}
3561
3562#endif
3563
3564#if INET6
3565/*
3566 * IPsec output logic for IPv6, transport mode.
3567 */
3568static int
3569ipsec6_output_trans_internal(
3570 struct ipsec_output_state *state,
3571 struct secasvar *sav,
3572 u_char *nexthdrp,
3573 struct mbuf *mprev)
3574{
3575 struct ip6_hdr *ip6;
3576 int error = 0;
3577 int plen;
3578
3579 /* validity check */
3580 if (sav == NULL || sav->sah == NULL) {
3581 error = EINVAL;
3582 goto bad;
3583 }
3584
3585 /*
3586 * If there is no valid SA, we give up to process.
3587 * see same place at ipsec4_output().
3588 */
3589 if (sav->state != SADB_SASTATE_MATURE
3590 && sav->state != SADB_SASTATE_DYING) {
3591 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3592 error = EINVAL;
3593 goto bad;
3594 }
3595
3596 state->outgoing_if = sav->sah->outgoing_if;
3597
3598 switch (sav->sah->saidx.proto) {
3599 case IPPROTO_ESP:
3600#if IPSEC_ESP
3601 error = esp6_output(state->m, nexthdrp, mprev->m_next, sav);
3602#else
3603 m_freem(state->m);
3604 error = EINVAL;
3605#endif
3606 break;
3607 case IPPROTO_AH:
3608 error = ah6_output(state->m, nexthdrp, mprev->m_next, sav);
3609 break;
3610 default:
3611 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3612 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3613 m_freem(state->m);
3614 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3615 error = EPROTONOSUPPORT;
3616 break;
3617 }
3618 if (error) {
3619 state->m = NULL;
3620 goto bad;
3621 }
3622 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
3623 if (plen > IPV6_MAXPACKET) {
3624 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3625 "IPsec with IPv6 jumbogram is not supported\n"));
3626 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3627 error = EINVAL; /*XXX*/
3628 goto bad;
3629 }
3630 ip6 = mtod(state->m, struct ip6_hdr *);
3631 ip6->ip6_plen = htons(plen);
3632
3633 return 0;
3634bad:
3635 return error;
3636}
3637
3638int
3639ipsec6_output_trans(
3640 struct ipsec_output_state *state,
3641 u_char *nexthdrp,
3642 struct mbuf *mprev,
3643 struct secpolicy *sp,
3644 __unused int flags,
3645 int *tun)
3646{
3647 struct ip6_hdr *ip6;
3648 struct ipsecrequest *isr = NULL;
3649 struct secasindex saidx;
3650 int error = 0;
3651 struct sockaddr_in6 *sin6;
3652 struct secasvar *sav = NULL;
3653
3654 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3655
3656 if (!state) {
3657 panic("state == NULL in ipsec6_output_trans");
3658 }
3659 if (!state->m) {
3660 panic("state->m == NULL in ipsec6_output_trans");
3661 }
3662 if (!nexthdrp) {
3663 panic("nexthdrp == NULL in ipsec6_output_trans");
3664 }
3665 if (!mprev) {
3666 panic("mprev == NULL in ipsec6_output_trans");
3667 }
3668 if (!sp) {
3669 panic("sp == NULL in ipsec6_output_trans");
3670 }
3671 if (!tun) {
3672 panic("tun == NULL in ipsec6_output_trans");
3673 }
3674
3675 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3676 printf("ipsec6_output_trans: applyed SP\n");
3677 kdebug_secpolicy(sp));
3678
3679 *tun = 0;
3680 for (isr = sp->req; isr; isr = isr->next) {
3681 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
3682 /* the rest will be handled by ipsec6_output_tunnel() */
3683 break;
3684 }
3685
3686 /* make SA index for search proper SA */
3687 ip6 = mtod(state->m, struct ip6_hdr *);
3688 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3689 saidx.mode = isr->saidx.mode;
3690 saidx.reqid = isr->saidx.reqid;
3691 sin6 = (struct sockaddr_in6 *)&saidx.src;
3692 if (sin6->sin6_len == 0) {
3693 sin6->sin6_len = sizeof(*sin6);
3694 sin6->sin6_family = AF_INET6;
3695 sin6->sin6_port = IPSEC_PORT_ANY;
3696 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
3697 sizeof(ip6->ip6_src));
3698 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
3699 /* fix scope id for comparing SPD */
3700 sin6->sin6_addr.s6_addr16[1] = 0;
3701 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
3702 }
3703 }
3704 sin6 = (struct sockaddr_in6 *)&saidx.dst;
3705 if (sin6->sin6_len == 0) {
3706 sin6->sin6_len = sizeof(*sin6);
3707 sin6->sin6_family = AF_INET6;
3708 sin6->sin6_port = IPSEC_PORT_ANY;
3709 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
3710 sizeof(ip6->ip6_dst));
3711 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
3712 /* fix scope id for comparing SPD */
3713 sin6->sin6_addr.s6_addr16[1] = 0;
3714 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
3715 }
3716 }
3717
3718 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
3719 /*
3720 * IPsec processing is required, but no SA found.
3721 * I assume that key_acquire() had been called
3722 * to get/establish the SA. Here I discard
3723 * this packet because it is responsibility for
3724 * upper layer to retransmit the packet.
3725 */
3726 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3727 error = ENOENT;
3728
3729 /*
3730 * Notify the fact that the packet is discarded
3731 * to ourselves. I believe this is better than
3732 * just silently discarding. (jinmei@kame.net)
3733 * XXX: should we restrict the error to TCP packets?
3734 * XXX: should we directly notify sockets via
3735 * pfctlinputs?
3736 */
3737 icmp6_error(state->m, ICMP6_DST_UNREACH,
3738 ICMP6_DST_UNREACH_ADMIN, 0);
3739 state->m = NULL; /* icmp6_error freed the mbuf */
3740 goto bad;
3741 }
3742
3743 /* validity check */
3744 if (sav == NULL) {
3745 switch (ipsec_get_reqlevel(isr)) {
3746 case IPSEC_LEVEL_USE:
3747 continue;
3748 case IPSEC_LEVEL_REQUIRE:
3749 /* must be not reached here. */
3750 panic("ipsec6_output_trans: no SA found, but required.");
3751 }
3752 }
3753
3754 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
3755 goto bad;
3756 }
3757 }
3758
3759 /* if we have more to go, we need a tunnel mode processing */
3760 if (isr != NULL) {
3761 *tun = 1;
3762 }
3763
3764 if (sav) {
3765 key_freesav(sav, KEY_SADB_UNLOCKED);
3766 }
3767 return 0;
3768
3769bad:
3770 if (sav) {
3771 key_freesav(sav, KEY_SADB_UNLOCKED);
3772 }
3773 m_freem(state->m);
3774 state->m = NULL;
3775 return error;
3776}
3777
3778/*
3779 * IPsec output logic for IPv6, tunnel mode.
3780 */
3781static int
3782ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar *sav, int *must_be_last)
3783{
3784 struct ip6_hdr *ip6;
3785 int error = 0;
3786 int plen;
3787 struct sockaddr_in6* dst6;
3788 struct route_in6 *ro6;
3789
3790 /* validity check */
3791 if (sav == NULL || sav->sah == NULL || sav->sah->saidx.mode != IPSEC_MODE_TUNNEL) {
3792 error = EINVAL;
3793 goto bad;
3794 }
3795
3796 /*
3797 * If there is no valid SA, we give up to process.
3798 * see same place at ipsec4_output().
3799 */
3800 if (sav->state != SADB_SASTATE_MATURE
3801 && sav->state != SADB_SASTATE_DYING) {
3802 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3803 error = EINVAL;
3804 goto bad;
3805 }
3806
3807 state->outgoing_if = sav->sah->outgoing_if;
3808
3809 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3810 /*
3811 * build IPsec tunnel.
3812 */
3813 state->m = ipsec6_splithdr(state->m);
3814 if (!state->m) {
3815 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
3816 error = ENOMEM;
3817 goto bad;
3818 }
3819
3820 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3821 error = ipsec6_encapsulate(state->m, sav);
3822 if (error) {
3823 state->m = 0;
3824 goto bad;
3825 }
3826 ip6 = mtod(state->m, struct ip6_hdr *);
3827 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3828 struct ip *ip;
3829 struct sockaddr_in* dst4;
3830 struct route *ro4 = NULL;
3831 struct route ro4_copy;
3832 struct ip_out_args ipoa;
3833
3834 bzero(&ipoa, sizeof(ipoa));
3835 ipoa.ipoa_boundif = IFSCOPE_NONE;
3836 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
3837 ipoa.ipoa_sotc = SO_TC_UNSPEC;
3838 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
3839
3840 if (must_be_last) {
3841 *must_be_last = 1;
3842 }
3843
3844 state->tunneled = 4; /* must not process any further in ip6_output */
3845 error = ipsec64_encapsulate(state->m, sav);
3846 if (error) {
3847 state->m = 0;
3848 goto bad;
3849 }
3850 /* Now we have an IPv4 packet */
3851 ip = mtod(state->m, struct ip *);
3852
3853 // grab sadb_mutex, to update sah's route cache and get a local copy of it
3854 lck_mtx_lock(sadb_mutex);
3855 ro4 = (struct route *)&sav->sah->sa_route;
3856 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3857 if (ro4->ro_rt) {
3858 RT_LOCK(ro4->ro_rt);
3859 }
3860 if (ROUTE_UNUSABLE(ro4) ||
3861 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3862 if (ro4->ro_rt != NULL) {
3863 RT_UNLOCK(ro4->ro_rt);
3864 }
3865 ROUTE_RELEASE(ro4);
3866 }
3867 if (ro4->ro_rt == NULL) {
3868 dst4->sin_family = AF_INET;
3869 dst4->sin_len = sizeof(*dst4);
3870 dst4->sin_addr = ip->ip_dst;
3871 } else {
3872 RT_UNLOCK(ro4->ro_rt);
3873 }
3874 route_copyout(&ro4_copy, ro4, sizeof(struct route));
3875 // release sadb_mutex, after updating sah's route cache and getting a local copy
3876 lck_mtx_unlock(sadb_mutex);
3877 state->m = ipsec4_splithdr(state->m);
3878 if (!state->m) {
3879 error = ENOMEM;
3880 ROUTE_RELEASE(&ro4_copy);
3881 goto bad;
3882 }
3883 switch (sav->sah->saidx.proto) {
3884 case IPPROTO_ESP:
3885#if IPSEC_ESP
3886 if ((error = esp4_output(state->m, sav)) != 0) {
3887 state->m = NULL;
3888 ROUTE_RELEASE(&ro4_copy);
3889 goto bad;
3890 }
3891 break;
3892
3893#else
3894 m_freem(state->m);
3895 state->m = NULL;
3896 error = EINVAL;
3897 ROUTE_RELEASE(&ro4_copy);
3898 goto bad;
3899#endif
3900 case IPPROTO_AH:
3901 if ((error = ah4_output(state->m, sav)) != 0) {
3902 state->m = NULL;
3903 ROUTE_RELEASE(&ro4_copy);
3904 goto bad;
3905 }
3906 break;
3907 default:
3908 ipseclog((LOG_ERR,
3909 "ipsec4_output: unknown ipsec protocol %d\n",
3910 sav->sah->saidx.proto));
3911 m_freem(state->m);
3912 state->m = NULL;
3913 error = EPROTONOSUPPORT;
3914 ROUTE_RELEASE(&ro4_copy);
3915 goto bad;
3916 }
3917
3918 if (state->m == 0) {
3919 error = ENOMEM;
3920 ROUTE_RELEASE(&ro4_copy);
3921 goto bad;
3922 }
3923 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET);
3924 ipsec_set_ipoa_for_interface(sav->sah->ipsec_if, &ipoa);
3925
3926 ip = mtod(state->m, struct ip *);
3927 ip->ip_len = ntohs(ip->ip_len); /* flip len field before calling ip_output */
3928 error = ip_output(state->m, NULL, &ro4_copy, IP_OUTARGS, NULL, &ipoa);
3929 state->m = NULL;
3930 // grab sadb_mutex, to synchronize the sah's route cache with the local copy
3931 lck_mtx_lock(sadb_mutex);
3932 route_copyin(&ro4_copy, ro4, sizeof(struct route));
3933 lck_mtx_unlock(sadb_mutex);
3934 if (error != 0) {
3935 goto bad;
3936 }
3937 goto done;
3938 } else {
3939 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3940 "unsupported inner family, spi=%u\n",
3941 (u_int32_t)ntohl(sav->spi)));
3942 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3943 error = EAFNOSUPPORT;
3944 goto bad;
3945 }
3946
3947 // grab sadb_mutex, before updating sah's route cache
3948 lck_mtx_lock(sadb_mutex);
3949 ro6 = &sav->sah->sa_route;
3950 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
3951 if (ro6->ro_rt) {
3952 RT_LOCK(ro6->ro_rt);
3953 }
3954 if (ROUTE_UNUSABLE(ro6) ||
3955 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
3956 if (ro6->ro_rt != NULL) {
3957 RT_UNLOCK(ro6->ro_rt);
3958 }
3959 ROUTE_RELEASE(ro6);
3960 }
3961 if (ro6->ro_rt == 0) {
3962 bzero(dst6, sizeof(*dst6));
3963 dst6->sin6_family = AF_INET6;
3964 dst6->sin6_len = sizeof(*dst6);
3965 dst6->sin6_addr = ip6->ip6_dst;
3966 rtalloc_scoped((struct route *)ro6, sav->sah->outgoing_if);
3967 if (ro6->ro_rt) {
3968 RT_LOCK(ro6->ro_rt);
3969 }
3970 }
3971 if (ro6->ro_rt == 0) {
3972 ip6stat.ip6s_noroute++;
3973 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
3974 error = EHOSTUNREACH;
3975 // release sadb_mutex, after updating sah's route cache
3976 lck_mtx_unlock(sadb_mutex);
3977 goto bad;
3978 }
3979
3980 /*
3981 * adjust state->dst if tunnel endpoint is offlink
3982 *
3983 * XXX: caching rt_gateway value in the state is
3984 * not really good, since it may point elsewhere
3985 * when the gateway gets modified to a larger
3986 * sockaddr via rt_setgate(). This is currently
3987 * addressed by SA_SIZE roundup in that routine.
3988 */
3989 if (ro6->ro_rt->rt_flags & RTF_GATEWAY) {
3990 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
3991 }
3992 RT_UNLOCK(ro6->ro_rt);
3993 ROUTE_RELEASE(&state->ro);
3994 route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6));
3995 state->dst = (struct sockaddr *)dst6;
3996 state->tunneled = 6;
3997 // release sadb_mutex, after updating sah's route cache
3998 lck_mtx_unlock(sadb_mutex);
3999 }
4000
4001 state->m = ipsec6_splithdr(state->m);
4002 if (!state->m) {
4003 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
4004 error = ENOMEM;
4005 goto bad;
4006 }
4007 ip6 = mtod(state->m, struct ip6_hdr *);
4008 switch (sav->sah->saidx.proto) {
4009 case IPPROTO_ESP:
4010#if IPSEC_ESP
4011 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
4012#else
4013 m_freem(state->m);
4014 error = EINVAL;
4015#endif
4016 break;
4017 case IPPROTO_AH:
4018 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
4019 break;
4020 default:
4021 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4022 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
4023 m_freem(state->m);
4024 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
4025 error = EINVAL;
4026 break;
4027 }
4028 if (error) {
4029 state->m = NULL;
4030 goto bad;
4031 }
4032 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
4033 if (plen > IPV6_MAXPACKET) {
4034 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4035 "IPsec with IPv6 jumbogram is not supported\n"));
4036 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
4037 error = EINVAL; /*XXX*/
4038 goto bad;
4039 }
4040 ip6 = mtod(state->m, struct ip6_hdr *);
4041 ip6->ip6_plen = htons(plen);
4042done:
4043 return 0;
4044
4045bad:
4046 return error;
4047}
4048
4049int
4050ipsec6_output_tunnel(
4051 struct ipsec_output_state *state,
4052 struct secpolicy *sp,
4053 __unused int flags)
4054{
4055 struct ip6_hdr *ip6;
4056 struct ipsecrequest *isr = NULL;
4057 struct secasindex saidx;
4058 struct secasvar *sav = NULL;
4059 int error = 0;
4060
4061 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4062
4063 if (!state) {
4064 panic("state == NULL in ipsec6_output_tunnel");
4065 }
4066 if (!state->m) {
4067 panic("state->m == NULL in ipsec6_output_tunnel");
4068 }
4069 if (!sp) {
4070 panic("sp == NULL in ipsec6_output_tunnel");
4071 }
4072
4073 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
4074 printf("ipsec6_output_tunnel: applyed SP\n");
4075 kdebug_secpolicy(sp));
4076
4077 /*
4078 * transport mode ipsec (before the 1st tunnel mode) is already
4079 * processed by ipsec6_output_trans().
4080 */
4081 for (isr = sp->req; isr; isr = isr->next) {
4082 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
4083 break;
4084 }
4085 }
4086
4087 for (/* already initialized */; isr; isr = isr->next) {
4088 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
4089 /* When tunnel mode, SA peers must be specified. */
4090 bcopy(&isr->saidx, &saidx, sizeof(saidx));
4091 } else {
4092 /* make SA index to look for a proper SA */
4093 struct sockaddr_in6 *sin6;
4094
4095 bzero(&saidx, sizeof(saidx));
4096 saidx.proto = isr->saidx.proto;
4097 saidx.mode = isr->saidx.mode;
4098 saidx.reqid = isr->saidx.reqid;
4099
4100 ip6 = mtod(state->m, struct ip6_hdr *);
4101 sin6 = (struct sockaddr_in6 *)&saidx.src;
4102 if (sin6->sin6_len == 0) {
4103 sin6->sin6_len = sizeof(*sin6);
4104 sin6->sin6_family = AF_INET6;
4105 sin6->sin6_port = IPSEC_PORT_ANY;
4106 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
4107 sizeof(ip6->ip6_src));
4108 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
4109 /* fix scope id for comparing SPD */
4110 sin6->sin6_addr.s6_addr16[1] = 0;
4111 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
4112 }
4113 }
4114 sin6 = (struct sockaddr_in6 *)&saidx.dst;
4115 if (sin6->sin6_len == 0) {
4116 sin6->sin6_len = sizeof(*sin6);
4117 sin6->sin6_family = AF_INET6;
4118 sin6->sin6_port = IPSEC_PORT_ANY;
4119 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
4120 sizeof(ip6->ip6_dst));
4121 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
4122 /* fix scope id for comparing SPD */
4123 sin6->sin6_addr.s6_addr16[1] = 0;
4124 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
4125 }
4126 }
4127 }
4128
4129 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
4130 /*
4131 * IPsec processing is required, but no SA found.
4132 * I assume that key_acquire() had been called
4133 * to get/establish the SA. Here I discard
4134 * this packet because it is responsibility for
4135 * upper layer to retransmit the packet.
4136 */
4137 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4138 error = ENOENT;
4139 goto bad;
4140 }
4141
4142 /* validity check */
4143 if (sav == NULL) {
4144 switch (ipsec_get_reqlevel(isr)) {
4145 case IPSEC_LEVEL_USE:
4146 continue;
4147 case IPSEC_LEVEL_REQUIRE:
4148 /* must be not reached here. */
4149 panic("ipsec6_output_tunnel: no SA found, but required.");
4150 }
4151 }
4152
4153 /*
4154 * If there is no valid SA, we give up to process.
4155 * see same place at ipsec4_output().
4156 */
4157 if (sav->state != SADB_SASTATE_MATURE
4158 && sav->state != SADB_SASTATE_DYING) {
4159 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4160 error = EINVAL;
4161 goto bad;
4162 }
4163
4164 int must_be_last = 0;
4165
4166 if ((error = ipsec6_output_tunnel_internal(state, sav, &must_be_last)) != 0) {
4167 goto bad;
4168 }
4169
4170 if (must_be_last && isr->next) {
4171 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4172 "IPv4 must be outer layer, spi=%u\n",
4173 (u_int32_t)ntohl(sav->spi)));
4174 error = EINVAL;
4175 goto bad;
4176 }
4177 }
4178
4179 if (sav) {
4180 key_freesav(sav, KEY_SADB_UNLOCKED);
4181 }
4182 return 0;
4183
4184bad:
4185 if (sav) {
4186 key_freesav(sav, KEY_SADB_UNLOCKED);
4187 }
4188 if (state->m) {
4189 m_freem(state->m);
4190 }
4191 state->m = NULL;
4192 return error;
4193}
4194
4195int
4196ipsec6_interface_output(struct ipsec_output_state *state, ifnet_t interface, u_char *nexthdrp, struct mbuf *mprev)
4197{
4198 int error = 0;
4199 struct secasvar *sav = NULL;
4200
4201 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4202
4203 if (state == NULL) {
4204 panic("state == NULL in ipsec6_output");
4205 }
4206 if (state->m == NULL) {
4207 panic("state->m == NULL in ipsec6_output");
4208 }
4209 if (nexthdrp == NULL) {
4210 panic("nexthdrp == NULL in ipsec6_output");
4211 }
4212 if (mprev == NULL) {
4213 panic("mprev == NULL in ipsec6_output");
4214 }
4215
4216 struct ip6_hdr *ip6 = mtod(state->m, struct ip6_hdr *);
4217
4218 struct sockaddr_in6 src = {};
4219 src.sin6_family = AF_INET6;
4220 src.sin6_len = sizeof(src);
4221 memcpy(&src.sin6_addr, &ip6->ip6_src, sizeof(src.sin6_addr));
4222
4223 struct sockaddr_in6 dst = {};
4224 dst.sin6_family = AF_INET6;
4225 dst.sin6_len = sizeof(dst);
4226 memcpy(&dst.sin6_addr, &ip6->ip6_dst, sizeof(dst.sin6_addr));
4227
4228 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET6,
4229 (struct sockaddr *)&src,
4230 (struct sockaddr *)&dst);
4231 if (sav == NULL) {
4232 goto bad;
4233 }
4234
4235 if (sav->sah && sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
4236 if ((error = ipsec6_output_tunnel_internal(state, sav, NULL)) != 0) {
4237 goto bad;
4238 }
4239 } else {
4240 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
4241 goto bad;
4242 }
4243 }
4244
4245 if (sav) {
4246 key_freesav(sav, KEY_SADB_UNLOCKED);
4247 }
4248 return 0;
4249
4250bad:
4251 if (sav) {
4252 key_freesav(sav, KEY_SADB_UNLOCKED);
4253 }
4254 m_freem(state->m);
4255 state->m = NULL;
4256 return error;
4257}
4258#endif /*INET6*/
4259
4260#if INET
4261/*
4262 * Chop IP header and option off from the payload.
4263 */
4264struct mbuf *
4265ipsec4_splithdr(struct mbuf *m)
4266{
4267 struct mbuf *mh;
4268 struct ip *ip;
4269 int hlen;
4270
4271 if (m->m_len < sizeof(struct ip)) {
4272 panic("ipsec4_splithdr: first mbuf too short, m_len %d, pkt_len %d, m_flag %x", m->m_len, m->m_pkthdr.len, m->m_flags);
4273 }
4274 ip = mtod(m, struct ip *);
4275#ifdef _IP_VHL
4276 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
4277#else
4278 hlen = ip->ip_hl << 2;
4279#endif
4280 if (m->m_len > hlen) {
4281 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4282 if (!mh) {
4283 m_freem(m);
4284 return NULL;
4285 }
4286 M_COPY_PKTHDR(mh, m);
4287 MH_ALIGN(mh, hlen);
4288 m->m_flags &= ~M_PKTHDR;
4289 m_mchtype(m, MT_DATA);
4290 m->m_len -= hlen;
4291 m->m_data += hlen;
4292 mh->m_next = m;
4293 m = mh;
4294 m->m_len = hlen;
4295 bcopy((caddr_t)ip, mtod(m, caddr_t), hlen);
4296 } else if (m->m_len < hlen) {
4297 m = m_pullup(m, hlen);
4298 if (!m) {
4299 return NULL;
4300 }
4301 }
4302 return m;
4303}
4304#endif
4305
4306#if INET6
4307struct mbuf *
4308ipsec6_splithdr(struct mbuf *m)
4309{
4310 struct mbuf *mh;
4311 struct ip6_hdr *ip6;
4312 int hlen;
4313
4314 if (m->m_len < sizeof(struct ip6_hdr)) {
4315 panic("ipsec6_splithdr: first mbuf too short");
4316 }
4317 ip6 = mtod(m, struct ip6_hdr *);
4318 hlen = sizeof(struct ip6_hdr);
4319 if (m->m_len > hlen) {
4320 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4321 if (!mh) {
4322 m_freem(m);
4323 return NULL;
4324 }
4325 M_COPY_PKTHDR(mh, m);
4326 MH_ALIGN(mh, hlen);
4327 m->m_flags &= ~M_PKTHDR;
4328 m_mchtype(m, MT_DATA);
4329 m->m_len -= hlen;
4330 m->m_data += hlen;
4331 mh->m_next = m;
4332 m = mh;
4333 m->m_len = hlen;
4334 bcopy((caddr_t)ip6, mtod(m, caddr_t), hlen);
4335 } else if (m->m_len < hlen) {
4336 m = m_pullup(m, hlen);
4337 if (!m) {
4338 return NULL;
4339 }
4340 }
4341 return m;
4342}
4343#endif
4344
4345/* validate inbound IPsec tunnel packet. */
4346int
4347ipsec4_tunnel_validate(
4348 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4349 int off,
4350 u_int nxt0,
4351 struct secasvar *sav,
4352 sa_family_t *ifamily)
4353{
4354 u_int8_t nxt = nxt0 & 0xff;
4355 struct sockaddr_in *sin;
4356 struct sockaddr_in osrc, odst, i4src, i4dst;
4357 struct sockaddr_in6 i6src, i6dst;
4358 int hlen;
4359 struct secpolicy *sp;
4360 struct ip *oip;
4361
4362 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4363
4364#if DIAGNOSTIC
4365 if (m->m_len < sizeof(struct ip)) {
4366 panic("too short mbuf on ipsec4_tunnel_validate");
4367 }
4368#endif
4369 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6) {
4370 return 0;
4371 }
4372 if (m->m_pkthdr.len < off + sizeof(struct ip)) {
4373 return 0;
4374 }
4375 /* do not decapsulate if the SA is for transport mode only */
4376 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) {
4377 return 0;
4378 }
4379
4380 oip = mtod(m, struct ip *);
4381#ifdef _IP_VHL
4382 hlen = _IP_VHL_HL(oip->ip_vhl) << 2;
4383#else
4384 hlen = oip->ip_hl << 2;
4385#endif
4386 if (hlen != sizeof(struct ip)) {
4387 return 0;
4388 }
4389
4390 sin = (struct sockaddr_in *)&sav->sah->saidx.dst;
4391 if (sin->sin_family != AF_INET) {
4392 return 0;
4393 }
4394 if (bcmp(&oip->ip_dst, &sin->sin_addr, sizeof(oip->ip_dst)) != 0) {
4395 return 0;
4396 }
4397
4398 if (sav->sah->ipsec_if != NULL) {
4399 // the ipsec interface SAs don't have a policies.
4400 if (nxt == IPPROTO_IPV4) {
4401 *ifamily = AF_INET;
4402 } else if (nxt == IPPROTO_IPV6) {
4403 *ifamily = AF_INET6;
4404 } else {
4405 return 0;
4406 }
4407 return 1;
4408 }
4409
4410 /* XXX slow */
4411 bzero(&osrc, sizeof(osrc));
4412 bzero(&odst, sizeof(odst));
4413 osrc.sin_family = odst.sin_family = AF_INET;
4414 osrc.sin_len = odst.sin_len = sizeof(struct sockaddr_in);
4415 osrc.sin_addr = oip->ip_src;
4416 odst.sin_addr = oip->ip_dst;
4417 /*
4418 * RFC2401 5.2.1 (b): (assume that we are using tunnel mode)
4419 * - if the inner destination is multicast address, there can be
4420 * multiple permissible inner source address. implementation
4421 * may want to skip verification of inner source address against
4422 * SPD selector.
4423 * - if the inner protocol is ICMP, the packet may be an error report
4424 * from routers on the other side of the VPN cloud (R in the
4425 * following diagram). in this case, we cannot verify inner source
4426 * address against SPD selector.
4427 * me -- gw === gw -- R -- you
4428 *
4429 * we consider the first bullet to be users responsibility on SPD entry
4430 * configuration (if you need to encrypt multicast traffic, set
4431 * the source range of SPD selector to 0.0.0.0/0, or have explicit
4432 * address ranges for possible senders).
4433 * the second bullet is not taken care of (yet).
4434 *
4435 * therefore, we do not do anything special about inner source.
4436 */
4437 if (nxt == IPPROTO_IPV4) {
4438 bzero(&i4src, sizeof(struct sockaddr_in));
4439 bzero(&i4dst, sizeof(struct sockaddr_in));
4440 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4441 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4442 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4443 (caddr_t)&i4src.sin_addr);
4444 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4445 (caddr_t)&i4dst.sin_addr);
4446 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4447 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4448 } else if (nxt == IPPROTO_IPV6) {
4449 bzero(&i6src, sizeof(struct sockaddr_in6));
4450 bzero(&i6dst, sizeof(struct sockaddr_in6));
4451 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4452 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4453 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4454 (caddr_t)&i6src.sin6_addr);
4455 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4456 (caddr_t)&i6dst.sin6_addr);
4457 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4458 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4459 } else {
4460 return 0; /* unsupported family */
4461 }
4462 if (!sp) {
4463 return 0;
4464 }
4465
4466 key_freesp(sp, KEY_SADB_UNLOCKED);
4467
4468 return 1;
4469}
4470
4471#if INET6
4472/* validate inbound IPsec tunnel packet. */
4473int
4474ipsec6_tunnel_validate(
4475 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4476 int off,
4477 u_int nxt0,
4478 struct secasvar *sav,
4479 sa_family_t *ifamily)
4480{
4481 u_int8_t nxt = nxt0 & 0xff;
4482 struct sockaddr_in6 *sin6;
4483 struct sockaddr_in i4src, i4dst;
4484 struct sockaddr_in6 osrc, odst, i6src, i6dst;
4485 struct secpolicy *sp;
4486 struct ip6_hdr *oip6;
4487
4488 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4489
4490#if DIAGNOSTIC
4491 if (m->m_len < sizeof(struct ip6_hdr)) {
4492 panic("too short mbuf on ipsec6_tunnel_validate");
4493 }
4494#endif
4495 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6) {
4496 return 0;
4497 }
4498
4499 if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr)) {
4500 return 0;
4501 }
4502 /* do not decapsulate if the SA is for transport mode only */
4503 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) {
4504 return 0;
4505 }
4506
4507 oip6 = mtod(m, struct ip6_hdr *);
4508 /* AF_INET should be supported, but at this moment we don't. */
4509 sin6 = (struct sockaddr_in6 *)&sav->sah->saidx.dst;
4510 if (sin6->sin6_family != AF_INET6) {
4511 return 0;
4512 }
4513 if (!IN6_ARE_ADDR_EQUAL(&oip6->ip6_dst, &sin6->sin6_addr)) {
4514 return 0;
4515 }
4516
4517 if (sav->sah->ipsec_if != NULL) {
4518 // the ipsec interface SAs don't have a policies.
4519 if (nxt == IPPROTO_IPV4) {
4520 *ifamily = AF_INET;
4521 } else if (nxt == IPPROTO_IPV6) {
4522 *ifamily = AF_INET6;
4523 } else {
4524 return 0;
4525 }
4526 return 1;
4527 }
4528
4529 /* XXX slow */
4530 bzero(&osrc, sizeof(osrc));
4531 bzero(&odst, sizeof(odst));
4532 osrc.sin6_family = odst.sin6_family = AF_INET6;
4533 osrc.sin6_len = odst.sin6_len = sizeof(struct sockaddr_in6);
4534 osrc.sin6_addr = oip6->ip6_src;
4535 odst.sin6_addr = oip6->ip6_dst;
4536
4537 /*
4538 * regarding to inner source address validation, see a long comment
4539 * in ipsec4_tunnel_validate.
4540 */
4541
4542 if (nxt == IPPROTO_IPV4) {
4543 bzero(&i4src, sizeof(struct sockaddr_in));
4544 bzero(&i4dst, sizeof(struct sockaddr_in));
4545 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4546 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4547 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4548 (caddr_t)&i4src.sin_addr);
4549 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4550 (caddr_t)&i4dst.sin_addr);
4551 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4552 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4553 } else if (nxt == IPPROTO_IPV6) {
4554 bzero(&i6src, sizeof(struct sockaddr_in6));
4555 bzero(&i6dst, sizeof(struct sockaddr_in6));
4556 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4557 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4558 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4559 (caddr_t)&i6src.sin6_addr);
4560 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4561 (caddr_t)&i6dst.sin6_addr);
4562 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4563 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4564 } else {
4565 return 0; /* unsupported family */
4566 }
4567 /*
4568 * when there is no suitable inbound policy for the packet of the ipsec
4569 * tunnel mode, the kernel never decapsulate the tunneled packet
4570 * as the ipsec tunnel mode even when the system wide policy is "none".
4571 * then the kernel leaves the generic tunnel module to process this
4572 * packet. if there is no rule of the generic tunnel, the packet
4573 * is rejected and the statistics will be counted up.
4574 */
4575 if (!sp) {
4576 return 0;
4577 }
4578 key_freesp(sp, KEY_SADB_UNLOCKED);
4579
4580 return 1;
4581}
4582#endif
4583
4584/*
4585 * Make a mbuf chain for encryption.
4586 * If the original mbuf chain contains a mbuf with a cluster,
4587 * allocate a new cluster and copy the data to the new cluster.
4588 * XXX: this hack is inefficient, but is necessary to handle cases
4589 * of TCP retransmission...
4590 */
4591struct mbuf *
4592ipsec_copypkt(struct mbuf *m)
4593{
4594 struct mbuf *n, **mpp, *mnew;
4595
4596 for (n = m, mpp = &m; n; n = n->m_next) {
4597 if (n->m_flags & M_EXT) {
4598 /*
4599 * Make a copy only if there are more than one references
4600 * to the cluster.
4601 * XXX: is this approach effective?
4602 */
4603 if (
4604 m_get_ext_free(n) != NULL ||
4605 m_mclhasreference(n)
4606 ) {
4607 int remain, copied;
4608 struct mbuf *mm;
4609
4610 if (n->m_flags & M_PKTHDR) {
4611 MGETHDR(mnew, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4612 if (mnew == NULL) {
4613 goto fail;
4614 }
4615 M_COPY_PKTHDR(mnew, n);
4616 } else {
4617 MGET(mnew, M_DONTWAIT, MT_DATA);
4618 if (mnew == NULL) {
4619 goto fail;
4620 }
4621 }
4622 mnew->m_len = 0;
4623 mm = mnew;
4624
4625 /*
4626 * Copy data. If we don't have enough space to
4627 * store the whole data, allocate a cluster
4628 * or additional mbufs.
4629 * XXX: we don't use m_copyback(), since the
4630 * function does not use clusters and thus is
4631 * inefficient.
4632 */
4633 remain = n->m_len;
4634 copied = 0;
4635 while (1) {
4636 int len;
4637 struct mbuf *mn;
4638
4639 if (remain <= (mm->m_flags & M_PKTHDR ? MHLEN : MLEN)) {
4640 len = remain;
4641 } else { /* allocate a cluster */
4642 MCLGET(mm, M_DONTWAIT);
4643 if (!(mm->m_flags & M_EXT)) {
4644 m_free(mm);
4645 goto fail;
4646 }
4647 len = remain < MCLBYTES ?
4648 remain : MCLBYTES;
4649 }
4650
4651 bcopy(n->m_data + copied, mm->m_data,
4652 len);
4653
4654 copied += len;
4655 remain -= len;
4656 mm->m_len = len;
4657
4658 if (remain <= 0) { /* completed? */
4659 break;
4660 }
4661
4662 /* need another mbuf */
4663 MGETHDR(mn, M_DONTWAIT, MT_HEADER); /* XXXMAC: tags copied next time in loop? */
4664 if (mn == NULL) {
4665 goto fail;
4666 }
4667 mn->m_pkthdr.rcvif = NULL;
4668 mm->m_next = mn;
4669 mm = mn;
4670 }
4671
4672 /* adjust chain */
4673 mm->m_next = m_free(n);
4674 n = mm;
4675 *mpp = mnew;
4676 mpp = &n->m_next;
4677
4678 continue;
4679 }
4680 }
4681 *mpp = n;
4682 mpp = &n->m_next;
4683 }
4684
4685 return m;
4686fail:
4687 m_freem(m);
4688 return NULL;
4689}
4690
4691/*
4692 * Tags are allocated as mbufs for now, since our minimum size is MLEN, we
4693 * should make use of up to that much space.
4694 */
4695#define IPSEC_TAG_HEADER \
4696
4697struct ipsec_tag {
4698 struct socket *socket;
4699 u_int32_t history_count;
4700 struct ipsec_history history[];
4701#if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
4702/* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers
4703 * are 32-bit:
4704 * Aligning to 64-bit since we case to m_tag which is 64-bit aligned.
4705 */
4706} __attribute__ ((aligned(8)));
4707#else
4708};
4709#endif
4710
4711#define IPSEC_TAG_SIZE (MLEN - sizeof(struct m_tag))
4712#define IPSEC_TAG_HDR_SIZE (offsetof(struct ipsec_tag, history[0]))
4713#define IPSEC_HISTORY_MAX ((IPSEC_TAG_SIZE - IPSEC_TAG_HDR_SIZE) / \
4714 sizeof(struct ipsec_history))
4715
4716static struct ipsec_tag *
4717ipsec_addaux(
4718 struct mbuf *m)
4719{
4720 struct m_tag *tag;
4721
4722 /* Check if the tag already exists */
4723 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4724
4725 if (tag == NULL) {
4726 struct ipsec_tag *itag;
4727
4728 /* Allocate a tag */
4729 tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC,
4730 IPSEC_TAG_SIZE, M_DONTWAIT, m);
4731
4732 if (tag) {
4733 itag = (struct ipsec_tag*)(tag + 1);
4734 itag->socket = 0;
4735 itag->history_count = 0;
4736
4737 m_tag_prepend(m, tag);
4738 }
4739 }
4740
4741 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4742}
4743
4744static struct ipsec_tag *
4745ipsec_findaux(
4746 struct mbuf *m)
4747{
4748 struct m_tag *tag;
4749
4750 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4751
4752 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4753}
4754
4755void
4756ipsec_delaux(
4757 struct mbuf *m)
4758{
4759 struct m_tag *tag;
4760
4761 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4762
4763 if (tag) {
4764 m_tag_delete(m, tag);
4765 }
4766}
4767
4768/* if the aux buffer is unnecessary, nuke it. */
4769static void
4770ipsec_optaux(
4771 struct mbuf *m,
4772 struct ipsec_tag *itag)
4773{
4774 if (itag && itag->socket == NULL && itag->history_count == 0) {
4775 m_tag_delete(m, ((struct m_tag*)itag) - 1);
4776 }
4777}
4778
4779int
4780ipsec_setsocket(struct mbuf *m, struct socket *so)
4781{
4782 struct ipsec_tag *tag;
4783
4784 /* if so == NULL, don't insist on getting the aux mbuf */
4785 if (so) {
4786 tag = ipsec_addaux(m);
4787 if (!tag) {
4788 return ENOBUFS;
4789 }
4790 } else {
4791 tag = ipsec_findaux(m);
4792 }
4793 if (tag) {
4794 tag->socket = so;
4795 ipsec_optaux(m, tag);
4796 }
4797 return 0;
4798}
4799
4800struct socket *
4801ipsec_getsocket(struct mbuf *m)
4802{
4803 struct ipsec_tag *itag;
4804
4805 itag = ipsec_findaux(m);
4806 if (itag) {
4807 return itag->socket;
4808 } else {
4809 return NULL;
4810 }
4811}
4812
4813int
4814ipsec_addhist(
4815 struct mbuf *m,
4816 int proto,
4817 u_int32_t spi)
4818{
4819 struct ipsec_tag *itag;
4820 struct ipsec_history *p;
4821 itag = ipsec_addaux(m);
4822 if (!itag) {
4823 return ENOBUFS;
4824 }
4825 if (itag->history_count == IPSEC_HISTORY_MAX) {
4826 return ENOSPC; /* XXX */
4827 }
4828 p = &itag->history[itag->history_count];
4829 itag->history_count++;
4830
4831 bzero(p, sizeof(*p));
4832 p->ih_proto = proto;
4833 p->ih_spi = spi;
4834
4835 return 0;
4836}
4837
4838struct ipsec_history *
4839ipsec_gethist(
4840 struct mbuf *m,
4841 int *lenp)
4842{
4843 struct ipsec_tag *itag;
4844
4845 itag = ipsec_findaux(m);
4846 if (!itag) {
4847 return NULL;
4848 }
4849 if (itag->history_count == 0) {
4850 return NULL;
4851 }
4852 if (lenp) {
4853 *lenp = (int)(itag->history_count * sizeof(struct ipsec_history));
4854 }
4855 return itag->history;
4856}
4857
4858void
4859ipsec_clearhist(
4860 struct mbuf *m)
4861{
4862 struct ipsec_tag *itag;
4863
4864 itag = ipsec_findaux(m);
4865 if (itag) {
4866 itag->history_count = 0;
4867 }
4868 ipsec_optaux(m, itag);
4869}
4870
4871__private_extern__ boolean_t
4872ipsec_send_natt_keepalive(
4873 struct secasvar *sav)
4874{
4875 struct mbuf *m = NULL;
4876 int error = 0;
4877 int keepalive_interval = natt_keepalive_interval;
4878
4879 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4880 lck_mtx_lock(sadb_mutex);
4881
4882 if (((esp_udp_encap_port & 0xFFFF) == 0 && sav->natt_encapsulated_src_port == 0) || sav->remote_ike_port == 0) {
4883 lck_mtx_unlock(sadb_mutex);
4884 return FALSE;
4885 }
4886
4887 if (sav->natt_interval != 0) {
4888 keepalive_interval = (int)sav->natt_interval;
4889 }
4890
4891 // natt timestamp may have changed... reverify
4892 if ((natt_now - sav->natt_last_activity) < keepalive_interval) {
4893 lck_mtx_unlock(sadb_mutex);
4894 return FALSE;
4895 }
4896
4897 if (sav->flags & SADB_X_EXT_ESP_KEEPALIVE) {
4898 lck_mtx_unlock(sadb_mutex);
4899 return FALSE; // don't send these from the kernel
4900 }
4901
4902 lck_mtx_unlock(sadb_mutex);
4903
4904 m = m_gethdr(M_NOWAIT, MT_DATA);
4905 if (m == NULL) {
4906 return FALSE;
4907 }
4908
4909 lck_mtx_lock(sadb_mutex);
4910 if (sav->sah->saidx.dst.ss_family == AF_INET) {
4911 struct ip_out_args ipoa = {};
4912 struct route ro = {};
4913
4914 ipoa.ipoa_boundif = IFSCOPE_NONE;
4915 ipoa.ipoa_flags = IPOAF_SELECT_SRCIF;
4916 ipoa.ipoa_sotc = SO_TC_UNSPEC;
4917 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
4918
4919 struct ip *ip = (__typeof__(ip))m_mtod(m);
4920
4921 /*
4922 * Type 2: a UDP packet complete with IP header.
4923 * We must do this because UDP output requires
4924 * an inpcb which we don't have. UDP packet
4925 * contains one byte payload. The byte is set
4926 * to 0xFF.
4927 */
4928 struct udphdr *uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip));
4929 m->m_len = sizeof(struct udpiphdr) + 1;
4930 bzero(m_mtod(m), m->m_len);
4931 m->m_pkthdr.len = m->m_len;
4932
4933 ip->ip_len = m->m_len;
4934 ip->ip_ttl = ip_defttl;
4935 ip->ip_p = IPPROTO_UDP;
4936 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4937 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4938 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4939 } else {
4940 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4941 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4942 }
4943 if (sav->natt_encapsulated_src_port != 0) {
4944 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
4945 } else {
4946 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4947 }
4948 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4949 uh->uh_dport = htons(sav->remote_ike_port);
4950 uh->uh_ulen = htons(1 + sizeof(*uh));
4951 uh->uh_sum = 0;
4952 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip) + sizeof(*uh)) = 0xFF;
4953
4954 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
4955 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET) {
4956 ROUTE_RELEASE(&sav->sah->sa_route);
4957 }
4958
4959 route_copyout(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route));
4960 lck_mtx_unlock(sadb_mutex);
4961
4962 necp_mark_packet_as_keepalive(m, TRUE);
4963 error = ip_output(m, NULL, &ro, IP_OUTARGS | IP_NOIPSEC, NULL, &ipoa);
4964
4965 lck_mtx_lock(sadb_mutex);
4966 route_copyin(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route));
4967 } else if (sav->sah->saidx.dst.ss_family == AF_INET6) {
4968 struct ip6_out_args ip6oa = {};
4969 struct route_in6 ro6 = {};
4970
4971 ip6oa.ip6oa_flowadv.code = 0;
4972 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
4973 if (sav->sah->outgoing_if) {
4974 ip6oa.ip6oa_boundif = sav->sah->outgoing_if;
4975 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
4976 }
4977
4978 struct ip6_hdr *ip6 = (__typeof__(ip6))m_mtod(m);
4979
4980 /*
4981 * Type 2: a UDP packet complete with IPv6 header.
4982 * We must do this because UDP output requires
4983 * an inpcb which we don't have. UDP packet
4984 * contains one byte payload. The byte is set
4985 * to 0xFF.
4986 */
4987 struct udphdr *uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip6));
4988 m->m_len = sizeof(struct udphdr) + sizeof(struct ip6_hdr) + 1;
4989 bzero(m_mtod(m), m->m_len);
4990 m->m_pkthdr.len = m->m_len;
4991
4992 ip6->ip6_flow = 0;
4993 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
4994 ip6->ip6_vfc |= IPV6_VERSION;
4995 ip6->ip6_nxt = IPPROTO_UDP;
4996 ip6->ip6_hlim = ip6_defhlim;
4997 ip6->ip6_plen = htons(sizeof(struct udphdr) + 1);
4998 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4999 ip6->ip6_src = ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr;
5000 ip6->ip6_dst = ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr;
5001 } else {
5002 ip6->ip6_src = ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr;
5003 ip6->ip6_dst = ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr;
5004 }
5005
5006 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
5007 ip6->ip6_src.s6_addr16[1] = 0;
5008 }
5009 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
5010 ip6->ip6_dst.s6_addr16[1] = 0;
5011 }
5012
5013 if (sav->natt_encapsulated_src_port != 0) {
5014 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
5015 } else {
5016 uh->uh_sport = htons((u_short)esp_udp_encap_port);
5017 }
5018 uh->uh_dport = htons(sav->remote_ike_port);
5019 uh->uh_ulen = htons(1 + sizeof(*uh));
5020 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip6) + sizeof(*uh)) = 0xFF;
5021 uh->uh_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htonl(ntohs(uh->uh_ulen) + IPPROTO_UDP));
5022 m->m_pkthdr.csum_flags = (CSUM_UDPIPV6 | CSUM_ZERO_INVERT);
5023 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
5024
5025 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
5026 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET6) {
5027 ROUTE_RELEASE(&sav->sah->sa_route);
5028 }
5029
5030 route_copyout((struct route *)&ro6, (struct route *)&sav->sah->sa_route, sizeof(struct route_in6));
5031 lck_mtx_unlock(sadb_mutex);
5032
5033 necp_mark_packet_as_keepalive(m, TRUE);
5034 error = ip6_output(m, NULL, &ro6, IPV6_OUTARGS, NULL, NULL, &ip6oa);
5035
5036 lck_mtx_lock(sadb_mutex);
5037 route_copyin((struct route *)&ro6, (struct route *)&sav->sah->sa_route, sizeof(struct route_in6));
5038 } else {
5039 ipseclog((LOG_ERR, "nat keepalive: invalid address family %u\n", sav->sah->saidx.dst.ss_family));
5040 lck_mtx_unlock(sadb_mutex);
5041 m_freem(m);
5042 return FALSE;
5043 }
5044
5045 if (error == 0) {
5046 sav->natt_last_activity = natt_now;
5047 lck_mtx_unlock(sadb_mutex);
5048 return TRUE;
5049 }
5050
5051 lck_mtx_unlock(sadb_mutex);
5052 return FALSE;
5053}
5054
5055__private_extern__ bool
5056ipsec_fill_offload_frame(ifnet_t ifp,
5057 struct secasvar *sav,
5058 struct ifnet_keepalive_offload_frame *frame,
5059 size_t frame_data_offset)
5060{
5061 u_int8_t *data = NULL;
5062 struct ip *ip = NULL;
5063 struct udphdr *uh = NULL;
5064
5065 if (sav == NULL || sav->sah == NULL || frame == NULL ||
5066 (ifp != NULL && ifp->if_index != sav->sah->outgoing_if) ||
5067 sav->sah->saidx.dst.ss_family != AF_INET ||
5068 !(sav->flags & SADB_X_EXT_NATT) ||
5069 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE) ||
5070 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE_OFFLOAD) ||
5071 sav->flags & SADB_X_EXT_ESP_KEEPALIVE ||
5072 ((esp_udp_encap_port & 0xFFFF) == 0 && sav->natt_encapsulated_src_port == 0) ||
5073 sav->remote_ike_port == 0 ||
5074 (natt_keepalive_interval == 0 && sav->natt_interval == 0 && sav->natt_offload_interval == 0)) {
5075 /* SA is not eligible for keepalive offload on this interface */
5076 return FALSE;
5077 }
5078
5079 if (frame_data_offset + sizeof(struct udpiphdr) + 1 >
5080 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
5081 /* Not enough room in this data frame */
5082 return FALSE;
5083 }
5084
5085 data = frame->data;
5086 ip = (__typeof__(ip))(void *)(data + frame_data_offset);
5087 uh = (__typeof__(uh))(void *)(data + frame_data_offset + sizeof(*ip));
5088
5089 frame->length = frame_data_offset + sizeof(struct udpiphdr) + 1;
5090 frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_IPSEC;
5091 frame->ether_type = IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4;
5092
5093 bzero(data, IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE);
5094
5095 ip->ip_v = IPVERSION;
5096 ip->ip_hl = sizeof(struct ip) >> 2;
5097 ip->ip_off &= htons(~IP_OFFMASK);
5098 ip->ip_off &= htons(~IP_MF);
5099 switch (ip4_ipsec_dfbit) {
5100 case 0: /* clear DF bit */
5101 ip->ip_off &= htons(~IP_DF);
5102 break;
5103 case 1: /* set DF bit */
5104 ip->ip_off |= htons(IP_DF);
5105 break;
5106 default: /* copy DF bit */
5107 break;
5108 }
5109 ip->ip_len = htons(sizeof(struct udpiphdr) + 1);
5110 if (rfc6864 && IP_OFF_IS_ATOMIC(htons(ip->ip_off))) {
5111 ip->ip_id = 0;
5112 } else {
5113 ip->ip_id = ip_randomid();
5114 }
5115 ip->ip_ttl = ip_defttl;
5116 ip->ip_p = IPPROTO_UDP;
5117 ip->ip_sum = 0;
5118 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
5119 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5120 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5121 } else {
5122 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
5123 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
5124 }
5125 ip->ip_sum = in_cksum_hdr_opt(ip);
5126 /* Fill out the UDP header */
5127 if (sav->natt_encapsulated_src_port != 0) {
5128 uh->uh_sport = (u_short)sav->natt_encapsulated_src_port;
5129 } else {
5130 uh->uh_sport = htons((u_short)esp_udp_encap_port);
5131 }
5132 uh->uh_dport = htons(sav->remote_ike_port);
5133 uh->uh_ulen = htons(1 + sizeof(*uh));
5134 uh->uh_sum = 0;
5135 *(u_int8_t*)(data + frame_data_offset + sizeof(*ip) + sizeof(*uh)) = 0xFF;
5136
5137 if (sav->natt_offload_interval != 0) {
5138 frame->interval = sav->natt_offload_interval;
5139 } else if (sav->natt_interval != 0) {
5140 frame->interval = sav->natt_interval;
5141 } else {
5142 frame->interval = natt_keepalive_interval;
5143 }
5144 return TRUE;
5145}
5146
5147static int
5148sysctl_ipsec_wake_packet SYSCTL_HANDLER_ARGS
5149{
5150 #pragma unused(oidp, arg1, arg2)
5151 if (req->newptr != USER_ADDR_NULL) {
5152 ipseclog((LOG_ERR, "ipsec: invalid parameters"));
5153 return EINVAL;
5154 }
5155
5156 struct proc *p = current_proc();
5157 if (p != NULL) {
5158 uid_t uid = kauth_cred_getuid(proc_ucred(p));
5159 if (uid != 0 && priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_IPSEC_WAKE_PACKET, 0) != 0) {
5160 ipseclog((LOG_ERR, "process does not hold necessary entitlement to get ipsec wake packet"));
5161 return EPERM;
5162 }
5163
5164 int result = sysctl_io_opaque(req, &ipsec_wake_pkt, sizeof(ipsec_wake_pkt), NULL);
5165 return result;
5166 }
5167
5168 return EINVAL;
5169}
5170
5171SYSCTL_PROC(_net_link_generic_system, OID_AUTO, ipsec_wake_pkt, CTLTYPE_STRUCT | CTLFLAG_RD |
5172 CTLFLAG_LOCKED, 0, 0, &sysctl_ipsec_wake_packet, "S,ipsec wake packet", "");
5173
5174void
5175ipsec_save_wake_packet(struct mbuf *wake_mbuf, u_int32_t spi, u_int32_t seq)
5176{
5177 if (wake_mbuf == NULL) {
5178 ipseclog((LOG_ERR, "ipsec: bad wake packet"));
5179 return;
5180 }
5181
5182 lck_mtx_lock(sadb_mutex);
5183 if (__probable(!ipsec_save_wake_pkt)) {
5184 goto done;
5185 }
5186
5187 u_int16_t max_len = (wake_mbuf->m_pkthdr.len > IPSEC_MAX_WAKE_PKT_LEN) ? IPSEC_MAX_WAKE_PKT_LEN : wake_mbuf->m_pkthdr.len;
5188 m_copydata(wake_mbuf, 0, max_len, (void *)ipsec_wake_pkt.wake_pkt);
5189 ipsec_wake_pkt.wake_pkt_len = max_len;
5190
5191 ipsec_wake_pkt.wake_pkt_spi = spi;
5192 ipsec_wake_pkt.wake_pkt_seq = seq;
5193
5194 ipsec_save_wake_pkt = false;
5195done:
5196 lck_mtx_unlock(sadb_mutex);
5197 return;
5198}
5199
5200static IOReturn
5201ipsec_sleep_wake_handler(void *target, void *refCon, UInt32 messageType,
5202 void *provider, void *messageArgument, vm_size_t argSize)
5203{
5204#pragma unused(target, refCon, provider, messageArgument, argSize)
5205 switch (messageType) {
5206 case kIOMessageSystemWillSleep:
5207 memset(&ipsec_wake_pkt, 0, sizeof(ipsec_wake_pkt));
5208 IOPMCopySleepWakeUUIDKey(ipsec_wake_pkt.wake_uuid,
5209 sizeof(ipsec_wake_pkt.wake_uuid));
5210 ipseclog((LOG_INFO,
5211 "ipsec: system will sleep"));
5212 break;
5213 case kIOMessageSystemHasPoweredOn:
5214 ipsec_save_wake_pkt = true;
5215 ipseclog((LOG_INFO,
5216 "ipsec: system has powered on"));
5217 break;
5218 default:
5219 break;
5220 }
5221
5222 return IOPMAckImplied;
5223}
5224
5225void
5226ipsec_monitor_sleep_wake(void)
5227{
5228 LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED);
5229
5230 if (sleep_wake_handle == NULL) {
5231 sleep_wake_handle = registerSleepWakeInterest(ipsec_sleep_wake_handler,
5232 NULL, NULL);
5233 if (sleep_wake_handle != NULL) {
5234 ipseclog((LOG_INFO,
5235 "ipsec: monitoring sleep wake"));
5236 }
5237 }
5238}