]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/netinet6/ipsec.c
xnu-3789.60.24.tar.gz
[apple/xnu.git] / bsd / netinet6 / ipsec.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* $FreeBSD: src/sys/netinet6/ipsec.c,v 1.3.2.7 2001/07/19 06:37:23 kris Exp $ */
30/* $KAME: ipsec.c,v 1.103 2001/05/24 07:14:18 sakane Exp $ */
31
32/*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61/*
62 * IPsec controller part.
63 */
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/malloc.h>
68#include <sys/mbuf.h>
69#include <sys/mcache.h>
70#include <sys/domain.h>
71#include <sys/protosw.h>
72#include <sys/socket.h>
73#include <sys/socketvar.h>
74#include <sys/errno.h>
75#include <sys/time.h>
76#include <sys/kernel.h>
77#include <sys/syslog.h>
78#include <sys/sysctl.h>
79#include <kern/locks.h>
80#include <sys/kauth.h>
81#include <libkern/OSAtomic.h>
82
83#include <net/if.h>
84#include <net/route.h>
85#include <net/if_ipsec.h>
86
87#include <netinet/in.h>
88#include <netinet/in_systm.h>
89#include <netinet/ip.h>
90#include <netinet/ip_var.h>
91#include <netinet/in_var.h>
92#include <netinet/udp.h>
93#include <netinet/udp_var.h>
94#include <netinet/ip_ecn.h>
95#if INET6
96#include <netinet6/ip6_ecn.h>
97#endif
98#include <netinet/tcp.h>
99#include <netinet/udp.h>
100
101#include <netinet/ip6.h>
102#if INET6
103#include <netinet6/ip6_var.h>
104#endif
105#include <netinet/in_pcb.h>
106#if INET6
107#include <netinet/icmp6.h>
108#endif
109
110#include <netinet6/ipsec.h>
111#if INET6
112#include <netinet6/ipsec6.h>
113#endif
114#include <netinet6/ah.h>
115#if INET6
116#include <netinet6/ah6.h>
117#endif
118#if IPSEC_ESP
119#include <netinet6/esp.h>
120#if INET6
121#include <netinet6/esp6.h>
122#endif
123#endif
124#include <netinet6/ipcomp.h>
125#if INET6
126#include <netinet6/ipcomp6.h>
127#endif
128#include <netkey/key.h>
129#include <netkey/keydb.h>
130#include <netkey/key_debug.h>
131
132#include <net/net_osdep.h>
133
134#if IPSEC_DEBUG
135int ipsec_debug = 1;
136#else
137int ipsec_debug = 0;
138#endif
139
140#include <sys/kdebug.h>
141#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
142#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
143#define DBG_FNC_GETPOL_SOCK NETDBG_CODE(DBG_NETIPSEC, (1 << 8))
144#define DBG_FNC_GETPOL_ADDR NETDBG_CODE(DBG_NETIPSEC, (2 << 8))
145#define DBG_FNC_IPSEC_OUT NETDBG_CODE(DBG_NETIPSEC, (3 << 8))
146
147extern lck_mtx_t *sadb_mutex;
148
149struct ipsecstat ipsecstat;
150int ip4_ah_cleartos = 1;
151int ip4_ah_offsetmask = 0; /* maybe IP_DF? */
152int ip4_ipsec_dfbit = 0; /* DF bit on encap. 0: clear 1: set 2: copy */
153int ip4_esp_trans_deflev = IPSEC_LEVEL_USE;
154int ip4_esp_net_deflev = IPSEC_LEVEL_USE;
155int ip4_ah_trans_deflev = IPSEC_LEVEL_USE;
156int ip4_ah_net_deflev = IPSEC_LEVEL_USE;
157struct secpolicy ip4_def_policy;
158int ip4_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
159int ip4_esp_randpad = -1;
160int esp_udp_encap_port = 0;
161static int sysctl_def_policy SYSCTL_HANDLER_ARGS;
162extern int natt_keepalive_interval;
163extern u_int64_t natt_now;
164
165struct ipsec_tag;
166
167SYSCTL_DECL(_net_inet_ipsec);
168#if INET6
169SYSCTL_DECL(_net_inet6_ipsec6);
170#endif
171/* net.inet.ipsec */
172SYSCTL_STRUCT(_net_inet_ipsec, IPSECCTL_STATS,
173 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsecstat, ipsecstat, "");
174SYSCTL_PROC(_net_inet_ipsec, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
175 &ip4_def_policy.policy, 0, &sysctl_def_policy, "I", "");
176SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
177 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_trans_deflev, 0, "");
178SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
179 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_net_deflev, 0, "");
180SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
181 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_trans_deflev, 0, "");
182SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
183 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_net_deflev, 0, "");
184SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_CLEARTOS,
185 ah_cleartos, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_cleartos, 0, "");
186SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_OFFSETMASK,
187 ah_offsetmask, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_offsetmask, 0, "");
188SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DFBIT,
189 dfbit, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_dfbit, 0, "");
190SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ECN,
191 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_ecn, 0, "");
192SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEBUG,
193 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
194SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ESP_RANDPAD,
195 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_randpad, 0, "");
196
197/* for performance, we bypass ipsec until a security policy is set */
198int ipsec_bypass = 1;
199SYSCTL_INT(_net_inet_ipsec, OID_AUTO, bypass, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec_bypass,0, "");
200
201/*
202 * NAT Traversal requires a UDP port for encapsulation,
203 * esp_udp_encap_port controls which port is used. Racoon
204 * must set this port to the port racoon is using locally
205 * for nat traversal.
206 */
207SYSCTL_INT(_net_inet_ipsec, OID_AUTO, esp_port,
208 CTLFLAG_RW | CTLFLAG_LOCKED, &esp_udp_encap_port, 0, "");
209
210#if INET6
211struct ipsecstat ipsec6stat;
212int ip6_esp_trans_deflev = IPSEC_LEVEL_USE;
213int ip6_esp_net_deflev = IPSEC_LEVEL_USE;
214int ip6_ah_trans_deflev = IPSEC_LEVEL_USE;
215int ip6_ah_net_deflev = IPSEC_LEVEL_USE;
216struct secpolicy ip6_def_policy;
217int ip6_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
218int ip6_esp_randpad = -1;
219
220/* net.inet6.ipsec6 */
221SYSCTL_STRUCT(_net_inet6_ipsec6, IPSECCTL_STATS,
222 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec6stat, ipsecstat, "");
223SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_POLICY,
224 def_policy, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_def_policy.policy, 0, "");
225SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
226 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_trans_deflev, 0, "");
227SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
228 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_net_deflev, 0, "");
229SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
230 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_trans_deflev, 0, "");
231SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
232 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_net_deflev, 0, "");
233SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ECN,
234 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ipsec_ecn, 0, "");
235SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEBUG,
236 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
237SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ESP_RANDPAD,
238 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_randpad, 0, "");
239#endif /* INET6 */
240
241static int ipsec_setspidx_interface(struct secpolicyindex *, u_int, struct mbuf *,
242 int, int, int);
243static int ipsec_setspidx_mbuf(struct secpolicyindex *, u_int, u_int,
244 struct mbuf *, int);
245static int ipsec4_setspidx_inpcb(struct mbuf *, struct inpcb *pcb);
246#if INET6
247static int ipsec6_setspidx_in6pcb(struct mbuf *, struct in6pcb *pcb);
248#endif
249static int ipsec_setspidx(struct mbuf *, struct secpolicyindex *, int, int);
250static void ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
251static int ipsec4_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
252#if INET6
253static void ipsec6_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
254static int ipsec6_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
255#endif
256static struct inpcbpolicy *ipsec_newpcbpolicy(void);
257static void ipsec_delpcbpolicy(struct inpcbpolicy *);
258static struct secpolicy *ipsec_deepcopy_policy(struct secpolicy *src);
259static int ipsec_set_policy(struct secpolicy **pcb_sp,
260 int optname, caddr_t request, size_t len, int priv);
261static void vshiftl(unsigned char *, int, int);
262static int ipsec_in_reject(struct secpolicy *, struct mbuf *);
263#if INET6
264static int ipsec64_encapsulate(struct mbuf *, struct secasvar *);
265static int ipsec6_update_routecache_and_output(struct ipsec_output_state *state, struct secasvar *sav);
266static int ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav);
267#endif
268static struct ipsec_tag *ipsec_addaux(struct mbuf *);
269static struct ipsec_tag *ipsec_findaux(struct mbuf *);
270static void ipsec_optaux(struct mbuf *, struct ipsec_tag *);
271int ipsec_send_natt_keepalive(struct secasvar *sav);
272bool ipsec_fill_offload_frame(ifnet_t ifp, struct secasvar *sav, struct ifnet_keepalive_offload_frame *frame, size_t frame_data_offset);
273
274static int
275sysctl_def_policy SYSCTL_HANDLER_ARGS
276{
277 int old_policy = ip4_def_policy.policy;
278 int error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
279
280#pragma unused(arg1, arg2)
281
282 if (ip4_def_policy.policy != IPSEC_POLICY_NONE &&
283 ip4_def_policy.policy != IPSEC_POLICY_DISCARD) {
284 ip4_def_policy.policy = old_policy;
285 return EINVAL;
286 }
287
288 /* Turn off the bypass if the default security policy changes */
289 if (ipsec_bypass != 0 && ip4_def_policy.policy != IPSEC_POLICY_NONE)
290 ipsec_bypass = 0;
291
292 return error;
293}
294
295/*
296 * For OUTBOUND packet having a socket. Searching SPD for packet,
297 * and return a pointer to SP.
298 * OUT: NULL: no apropreate SP found, the following value is set to error.
299 * 0 : bypass
300 * EACCES : discard packet.
301 * ENOENT : ipsec_acquire() in progress, maybe.
302 * others : error occurred.
303 * others: a pointer to SP
304 *
305 * NOTE: IPv6 mapped adddress concern is implemented here.
306 */
307struct secpolicy *
308ipsec4_getpolicybysock(struct mbuf *m,
309 u_int dir,
310 struct socket *so,
311 int *error)
312{
313 struct inpcbpolicy *pcbsp = NULL;
314 struct secpolicy *currsp = NULL; /* policy on socket */
315 struct secpolicy *kernsp = NULL; /* policy on kernel */
316
317 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
318 /* sanity check */
319 if (m == NULL || so == NULL || error == NULL)
320 panic("ipsec4_getpolicybysock: NULL pointer was passed.\n");
321
322 if (so->so_pcb == NULL) {
323 printf("ipsec4_getpolicybysock: so->so_pcb == NULL\n");
324 return ipsec4_getpolicybyaddr(m, dir, 0, error);
325 }
326
327 switch (SOCK_DOM(so)) {
328 case PF_INET:
329 pcbsp = sotoinpcb(so)->inp_sp;
330 break;
331#if INET6
332 case PF_INET6:
333 pcbsp = sotoin6pcb(so)->in6p_sp;
334 break;
335#endif
336 }
337
338 if (!pcbsp){
339 /* Socket has not specified an IPSEC policy */
340 return ipsec4_getpolicybyaddr(m, dir, 0, error);
341 }
342
343 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_START, 0,0,0,0,0);
344
345 switch (SOCK_DOM(so)) {
346 case PF_INET:
347 /* set spidx in pcb */
348 *error = ipsec4_setspidx_inpcb(m, sotoinpcb(so));
349 break;
350#if INET6
351 case PF_INET6:
352 /* set spidx in pcb */
353 *error = ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
354 break;
355#endif
356 default:
357 panic("ipsec4_getpolicybysock: unsupported address family\n");
358 }
359 if (*error) {
360 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 1,*error,0,0,0);
361 return NULL;
362 }
363
364 /* sanity check */
365 if (pcbsp == NULL)
366 panic("ipsec4_getpolicybysock: pcbsp is NULL.\n");
367
368 switch (dir) {
369 case IPSEC_DIR_INBOUND:
370 currsp = pcbsp->sp_in;
371 break;
372 case IPSEC_DIR_OUTBOUND:
373 currsp = pcbsp->sp_out;
374 break;
375 default:
376 panic("ipsec4_getpolicybysock: illegal direction.\n");
377 }
378
379 /* sanity check */
380 if (currsp == NULL)
381 panic("ipsec4_getpolicybysock: currsp is NULL.\n");
382
383 /* when privilieged socket */
384 if (pcbsp->priv) {
385 switch (currsp->policy) {
386 case IPSEC_POLICY_BYPASS:
387 lck_mtx_lock(sadb_mutex);
388 currsp->refcnt++;
389 lck_mtx_unlock(sadb_mutex);
390 *error = 0;
391 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 2,*error,0,0,0);
392 return currsp;
393
394 case IPSEC_POLICY_ENTRUST:
395 /* look for a policy in SPD */
396 kernsp = key_allocsp(&currsp->spidx, dir);
397
398 /* SP found */
399 if (kernsp != NULL) {
400 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
401 printf("DP ipsec4_getpolicybysock called "
402 "to allocate SP:0x%llx\n",
403 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
404 *error = 0;
405 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 3,*error,0,0,0);
406 return kernsp;
407 }
408
409 /* no SP found */
410 lck_mtx_lock(sadb_mutex);
411 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
412 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
413 ipseclog((LOG_INFO,
414 "fixed system default policy: %d->%d\n",
415 ip4_def_policy.policy, IPSEC_POLICY_NONE));
416 ip4_def_policy.policy = IPSEC_POLICY_NONE;
417 }
418 ip4_def_policy.refcnt++;
419 lck_mtx_unlock(sadb_mutex);
420 *error = 0;
421 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 4,*error,0,0,0);
422 return &ip4_def_policy;
423
424 case IPSEC_POLICY_IPSEC:
425 lck_mtx_lock(sadb_mutex);
426 currsp->refcnt++;
427 lck_mtx_unlock(sadb_mutex);
428 *error = 0;
429 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 5,*error,0,0,0);
430 return currsp;
431
432 default:
433 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
434 "Invalid policy for PCB %d\n", currsp->policy));
435 *error = EINVAL;
436 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 6,*error,0,0,0);
437 return NULL;
438 }
439 /* NOTREACHED */
440 }
441
442 /* when non-privilieged socket */
443 /* look for a policy in SPD */
444 kernsp = key_allocsp(&currsp->spidx, dir);
445
446 /* SP found */
447 if (kernsp != NULL) {
448 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
449 printf("DP ipsec4_getpolicybysock called "
450 "to allocate SP:0x%llx\n",
451 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
452 *error = 0;
453 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 7,*error,0,0,0);
454 return kernsp;
455 }
456
457 /* no SP found */
458 switch (currsp->policy) {
459 case IPSEC_POLICY_BYPASS:
460 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
461 "Illegal policy for non-priviliged defined %d\n",
462 currsp->policy));
463 *error = EINVAL;
464 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 8,*error,0,0,0);
465 return NULL;
466
467 case IPSEC_POLICY_ENTRUST:
468 lck_mtx_lock(sadb_mutex);
469 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
470 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
471 ipseclog((LOG_INFO,
472 "fixed system default policy: %d->%d\n",
473 ip4_def_policy.policy, IPSEC_POLICY_NONE));
474 ip4_def_policy.policy = IPSEC_POLICY_NONE;
475 }
476 ip4_def_policy.refcnt++;
477 lck_mtx_unlock(sadb_mutex);
478 *error = 0;
479 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 9,*error,0,0,0);
480 return &ip4_def_policy;
481
482 case IPSEC_POLICY_IPSEC:
483 lck_mtx_lock(sadb_mutex);
484 currsp->refcnt++;
485 lck_mtx_unlock(sadb_mutex);
486 *error = 0;
487 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 10,*error,0,0,0);
488 return currsp;
489
490 default:
491 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
492 "Invalid policy for PCB %d\n", currsp->policy));
493 *error = EINVAL;
494 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 11,*error,0,0,0);
495 return NULL;
496 }
497 /* NOTREACHED */
498}
499
500/*
501 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
502 * and return a pointer to SP.
503 * OUT: positive: a pointer to the entry for security policy leaf matched.
504 * NULL: no apropreate SP found, the following value is set to error.
505 * 0 : bypass
506 * EACCES : discard packet.
507 * ENOENT : ipsec_acquire() in progress, maybe.
508 * others : error occurred.
509 */
510struct secpolicy *
511ipsec4_getpolicybyaddr(struct mbuf *m,
512 u_int dir,
513 int flag,
514 int *error)
515{
516 struct secpolicy *sp = NULL;
517
518 if (ipsec_bypass != 0)
519 return 0;
520
521 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
522
523 /* sanity check */
524 if (m == NULL || error == NULL)
525 panic("ipsec4_getpolicybyaddr: NULL pointer was passed.\n");
526 {
527 struct secpolicyindex spidx;
528
529 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0);
530 bzero(&spidx, sizeof(spidx));
531
532 /* make a index to look for a policy */
533 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET, m,
534 (flag & IP_FORWARDING) ? 0 : 1);
535
536 if (*error != 0) {
537 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,*error,0,0,0);
538 return NULL;
539 }
540
541 sp = key_allocsp(&spidx, dir);
542 }
543
544 /* SP found */
545 if (sp != NULL) {
546 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
547 printf("DP ipsec4_getpolicybyaddr called "
548 "to allocate SP:0x%llx\n",
549 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
550 *error = 0;
551 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,*error,0,0,0);
552 return sp;
553 }
554
555 /* no SP found */
556 lck_mtx_lock(sadb_mutex);
557 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
558 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
559 ipseclog((LOG_INFO, "fixed system default policy:%d->%d\n",
560 ip4_def_policy.policy,
561 IPSEC_POLICY_NONE));
562 ip4_def_policy.policy = IPSEC_POLICY_NONE;
563 }
564 ip4_def_policy.refcnt++;
565 lck_mtx_unlock(sadb_mutex);
566 *error = 0;
567 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 3,*error,0,0,0);
568 return &ip4_def_policy;
569}
570
571/* Match with bound interface rather than src addr.
572 * Unlike getpolicybyaddr, do not set the default policy.
573 * Return 0 if should continue processing, or -1 if packet
574 * should be dropped.
575 */
576int
577ipsec4_getpolicybyinterface(struct mbuf *m,
578 u_int dir,
579 int *flags,
580 struct ip_out_args *ipoa,
581 struct secpolicy **sp)
582{
583 struct secpolicyindex spidx;
584 int error = 0;
585
586 if (ipsec_bypass != 0)
587 return 0;
588
589 /* Sanity check */
590 if (m == NULL || ipoa == NULL || sp == NULL)
591 panic("ipsec4_getpolicybyinterface: NULL pointer was passed.\n");
592
593 if (ipoa->ipoa_boundif == IFSCOPE_NONE)
594 return 0;
595
596 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0);
597 bzero(&spidx, sizeof(spidx));
598
599 /* make a index to look for a policy */
600 error = ipsec_setspidx_interface(&spidx, dir, m, (*flags & IP_FORWARDING) ? 0 : 1,
601 ipoa->ipoa_boundif, 4);
602
603 if (error != 0) {
604 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,error,0,0,0);
605 return 0;
606 }
607
608 *sp = key_allocsp(&spidx, dir);
609
610 /* Return SP, whether NULL or not */
611 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
612 if ((*sp)->ipsec_if == NULL) {
613 /* Invalid to capture on an interface without redirect */
614 key_freesp(*sp, KEY_SADB_UNLOCKED);
615 *sp = NULL;
616 return -1;
617 } else if ((*sp)->disabled) {
618 /* Disabled policies go in the clear */
619 key_freesp(*sp, KEY_SADB_UNLOCKED);
620 *sp = NULL;
621 *flags |= IP_NOIPSEC; /* Avoid later IPSec check */
622 } else {
623 /* If policy is enabled, redirect to ipsec interface */
624 ipoa->ipoa_boundif = (*sp)->ipsec_if->if_index;
625 }
626 }
627
628 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,error,0,0,0);
629
630 return 0;
631}
632
633
634#if INET6
635/*
636 * For OUTBOUND packet having a socket. Searching SPD for packet,
637 * and return a pointer to SP.
638 * OUT: NULL: no apropreate SP found, the following value is set to error.
639 * 0 : bypass
640 * EACCES : discard packet.
641 * ENOENT : ipsec_acquire() in progress, maybe.
642 * others : error occurred.
643 * others: a pointer to SP
644 */
645struct secpolicy *
646ipsec6_getpolicybysock(struct mbuf *m,
647 u_int dir,
648 struct socket *so,
649 int *error)
650{
651 struct inpcbpolicy *pcbsp = NULL;
652 struct secpolicy *currsp = NULL; /* policy on socket */
653 struct secpolicy *kernsp = NULL; /* policy on kernel */
654
655 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
656
657 /* sanity check */
658 if (m == NULL || so == NULL || error == NULL)
659 panic("ipsec6_getpolicybysock: NULL pointer was passed.\n");
660
661#if DIAGNOSTIC
662 if (SOCK_DOM(so) != PF_INET6)
663 panic("ipsec6_getpolicybysock: socket domain != inet6\n");
664#endif
665
666 pcbsp = sotoin6pcb(so)->in6p_sp;
667
668 if (!pcbsp){
669 return ipsec6_getpolicybyaddr(m, dir, 0, error);
670 }
671
672 /* set spidx in pcb */
673 ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
674
675 /* sanity check */
676 if (pcbsp == NULL)
677 panic("ipsec6_getpolicybysock: pcbsp is NULL.\n");
678
679 switch (dir) {
680 case IPSEC_DIR_INBOUND:
681 currsp = pcbsp->sp_in;
682 break;
683 case IPSEC_DIR_OUTBOUND:
684 currsp = pcbsp->sp_out;
685 break;
686 default:
687 panic("ipsec6_getpolicybysock: illegal direction.\n");
688 }
689
690 /* sanity check */
691 if (currsp == NULL)
692 panic("ipsec6_getpolicybysock: currsp is NULL.\n");
693
694 /* when privilieged socket */
695 if (pcbsp->priv) {
696 switch (currsp->policy) {
697 case IPSEC_POLICY_BYPASS:
698 lck_mtx_lock(sadb_mutex);
699 currsp->refcnt++;
700 lck_mtx_unlock(sadb_mutex);
701 *error = 0;
702 return currsp;
703
704 case IPSEC_POLICY_ENTRUST:
705 /* look for a policy in SPD */
706 kernsp = key_allocsp(&currsp->spidx, dir);
707
708 /* SP found */
709 if (kernsp != NULL) {
710 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
711 printf("DP ipsec6_getpolicybysock called "
712 "to allocate SP:0x%llx\n",
713 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
714 *error = 0;
715 return kernsp;
716 }
717
718 /* no SP found */
719 lck_mtx_lock(sadb_mutex);
720 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
721 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
722 ipseclog((LOG_INFO,
723 "fixed system default policy: %d->%d\n",
724 ip6_def_policy.policy, IPSEC_POLICY_NONE));
725 ip6_def_policy.policy = IPSEC_POLICY_NONE;
726 }
727 ip6_def_policy.refcnt++;
728 lck_mtx_unlock(sadb_mutex);
729 *error = 0;
730 return &ip6_def_policy;
731
732 case IPSEC_POLICY_IPSEC:
733 lck_mtx_lock(sadb_mutex);
734 currsp->refcnt++;
735 lck_mtx_unlock(sadb_mutex);
736 *error = 0;
737 return currsp;
738
739 default:
740 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
741 "Invalid policy for PCB %d\n", currsp->policy));
742 *error = EINVAL;
743 return NULL;
744 }
745 /* NOTREACHED */
746 }
747
748 /* when non-privilieged socket */
749 /* look for a policy in SPD */
750 kernsp = key_allocsp(&currsp->spidx, dir);
751
752 /* SP found */
753 if (kernsp != NULL) {
754 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
755 printf("DP ipsec6_getpolicybysock called "
756 "to allocate SP:0x%llx\n",
757 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
758 *error = 0;
759 return kernsp;
760 }
761
762 /* no SP found */
763 switch (currsp->policy) {
764 case IPSEC_POLICY_BYPASS:
765 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
766 "Illegal policy for non-priviliged defined %d\n",
767 currsp->policy));
768 *error = EINVAL;
769 return NULL;
770
771 case IPSEC_POLICY_ENTRUST:
772 lck_mtx_lock(sadb_mutex);
773 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
774 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
775 ipseclog((LOG_INFO,
776 "fixed system default policy: %d->%d\n",
777 ip6_def_policy.policy, IPSEC_POLICY_NONE));
778 ip6_def_policy.policy = IPSEC_POLICY_NONE;
779 }
780 ip6_def_policy.refcnt++;
781 lck_mtx_unlock(sadb_mutex);
782 *error = 0;
783 return &ip6_def_policy;
784
785 case IPSEC_POLICY_IPSEC:
786 lck_mtx_lock(sadb_mutex);
787 currsp->refcnt++;
788 lck_mtx_unlock(sadb_mutex);
789 *error = 0;
790 return currsp;
791
792 default:
793 ipseclog((LOG_ERR,
794 "ipsec6_policybysock: Invalid policy for PCB %d\n",
795 currsp->policy));
796 *error = EINVAL;
797 return NULL;
798 }
799 /* NOTREACHED */
800}
801
802/*
803 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
804 * and return a pointer to SP.
805 * `flag' means that packet is to be forwarded whether or not.
806 * flag = 1: forwad
807 * OUT: positive: a pointer to the entry for security policy leaf matched.
808 * NULL: no apropreate SP found, the following value is set to error.
809 * 0 : bypass
810 * EACCES : discard packet.
811 * ENOENT : ipsec_acquire() in progress, maybe.
812 * others : error occurred.
813 */
814#ifndef IP_FORWARDING
815#define IP_FORWARDING 1
816#endif
817
818struct secpolicy *
819ipsec6_getpolicybyaddr(struct mbuf *m,
820 u_int dir,
821 int flag,
822 int *error)
823{
824 struct secpolicy *sp = NULL;
825
826 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
827
828 /* sanity check */
829 if (m == NULL || error == NULL)
830 panic("ipsec6_getpolicybyaddr: NULL pointer was passed.\n");
831
832 {
833 struct secpolicyindex spidx;
834
835 bzero(&spidx, sizeof(spidx));
836
837 /* make a index to look for a policy */
838 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET6, m,
839 (flag & IP_FORWARDING) ? 0 : 1);
840
841 if (*error != 0)
842 return NULL;
843
844 sp = key_allocsp(&spidx, dir);
845 }
846
847 /* SP found */
848 if (sp != NULL) {
849 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
850 printf("DP ipsec6_getpolicybyaddr called "
851 "to allocate SP:0x%llx\n",
852 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
853 *error = 0;
854 return sp;
855 }
856
857 /* no SP found */
858 lck_mtx_lock(sadb_mutex);
859 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
860 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
861 ipseclog((LOG_INFO, "fixed system default policy: %d->%d\n",
862 ip6_def_policy.policy, IPSEC_POLICY_NONE));
863 ip6_def_policy.policy = IPSEC_POLICY_NONE;
864 }
865 ip6_def_policy.refcnt++;
866 lck_mtx_unlock(sadb_mutex);
867 *error = 0;
868 return &ip6_def_policy;
869}
870
871/* Match with bound interface rather than src addr.
872 * Unlike getpolicybyaddr, do not set the default policy.
873 * Return 0 if should continue processing, or -1 if packet
874 * should be dropped.
875 */
876int
877ipsec6_getpolicybyinterface(struct mbuf *m,
878 u_int dir,
879 int flag,
880 struct ip6_out_args *ip6oap,
881 int *noipsec,
882 struct secpolicy **sp)
883{
884 struct secpolicyindex spidx;
885 int error = 0;
886
887 if (ipsec_bypass != 0)
888 return 0;
889
890 /* Sanity check */
891 if (m == NULL || sp == NULL || noipsec == NULL || ip6oap == NULL)
892 panic("ipsec6_getpolicybyinterface: NULL pointer was passed.\n");
893
894 *noipsec = 0;
895
896 if (ip6oap->ip6oa_boundif == IFSCOPE_NONE)
897 return 0;
898
899 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0);
900 bzero(&spidx, sizeof(spidx));
901
902 /* make a index to look for a policy */
903 error = ipsec_setspidx_interface(&spidx, dir, m, (flag & IP_FORWARDING) ? 0 : 1,
904 ip6oap->ip6oa_boundif, 6);
905
906 if (error != 0) {
907 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,error,0,0,0);
908 return 0;
909 }
910
911 *sp = key_allocsp(&spidx, dir);
912
913 /* Return SP, whether NULL or not */
914 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
915 if ((*sp)->ipsec_if == NULL) {
916 /* Invalid to capture on an interface without redirect */
917 key_freesp(*sp, KEY_SADB_UNLOCKED);
918 *sp = NULL;
919 return -1;
920 } else if ((*sp)->disabled) {
921 /* Disabled policies go in the clear */
922 key_freesp(*sp, KEY_SADB_UNLOCKED);
923 *sp = NULL;
924 *noipsec = 1; /* Avoid later IPSec check */
925 } else {
926 /* If policy is enabled, redirect to ipsec interface */
927 ip6oap->ip6oa_boundif = (*sp)->ipsec_if->if_index;
928 }
929 }
930
931 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,*error,0,0,0);
932
933 return 0;
934}
935#endif /* INET6 */
936
937/*
938 * set IP address into spidx from mbuf.
939 * When Forwarding packet and ICMP echo reply, this function is used.
940 *
941 * IN: get the followings from mbuf.
942 * protocol family, src, dst, next protocol
943 * OUT:
944 * 0: success.
945 * other: failure, and set errno.
946 */
947static int
948ipsec_setspidx_mbuf(
949 struct secpolicyindex *spidx,
950 u_int dir,
951 __unused u_int family,
952 struct mbuf *m,
953 int needport)
954{
955 int error;
956
957 /* sanity check */
958 if (spidx == NULL || m == NULL)
959 panic("ipsec_setspidx_mbuf: NULL pointer was passed.\n");
960
961 bzero(spidx, sizeof(*spidx));
962
963 error = ipsec_setspidx(m, spidx, needport, 0);
964 if (error)
965 goto bad;
966 spidx->dir = dir;
967
968 return 0;
969
970 bad:
971 /* XXX initialize */
972 bzero(spidx, sizeof(*spidx));
973 return EINVAL;
974}
975
976static int
977ipsec_setspidx_interface(
978 struct secpolicyindex *spidx,
979 u_int dir,
980 struct mbuf *m,
981 int needport,
982 int ifindex,
983 int ip_version)
984{
985 int error;
986
987 /* sanity check */
988 if (spidx == NULL || m == NULL)
989 panic("ipsec_setspidx_interface: NULL pointer was passed.\n");
990
991 bzero(spidx, sizeof(*spidx));
992
993 error = ipsec_setspidx(m, spidx, needport, ip_version);
994 if (error)
995 goto bad;
996 spidx->dir = dir;
997
998 if (ifindex != 0) {
999 ifnet_head_lock_shared();
1000 spidx->internal_if = ifindex2ifnet[ifindex];
1001 ifnet_head_done();
1002 } else {
1003 spidx->internal_if = NULL;
1004 }
1005
1006 return 0;
1007
1008bad:
1009 return EINVAL;
1010}
1011
1012static int
1013ipsec4_setspidx_inpcb(struct mbuf *m, struct inpcb *pcb)
1014{
1015 struct secpolicyindex *spidx;
1016 int error;
1017
1018 if (ipsec_bypass != 0)
1019 return 0;
1020
1021 /* sanity check */
1022 if (pcb == NULL)
1023 panic("ipsec4_setspidx_inpcb: no PCB found.\n");
1024 if (pcb->inp_sp == NULL)
1025 panic("ipsec4_setspidx_inpcb: no inp_sp found.\n");
1026 if (pcb->inp_sp->sp_out == NULL || pcb->inp_sp->sp_in == NULL)
1027 panic("ipsec4_setspidx_inpcb: no sp_in/out found.\n");
1028
1029 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1030 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1031
1032 spidx = &pcb->inp_sp->sp_in->spidx;
1033 error = ipsec_setspidx(m, spidx, 1, 0);
1034 if (error)
1035 goto bad;
1036 spidx->dir = IPSEC_DIR_INBOUND;
1037
1038 spidx = &pcb->inp_sp->sp_out->spidx;
1039 error = ipsec_setspidx(m, spidx, 1, 0);
1040 if (error)
1041 goto bad;
1042 spidx->dir = IPSEC_DIR_OUTBOUND;
1043
1044 return 0;
1045
1046bad:
1047 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1048 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1049 return error;
1050}
1051
1052#if INET6
1053static int
1054ipsec6_setspidx_in6pcb(struct mbuf *m, struct in6pcb *pcb)
1055{
1056 struct secpolicyindex *spidx;
1057 int error;
1058
1059 /* sanity check */
1060 if (pcb == NULL)
1061 panic("ipsec6_setspidx_in6pcb: no PCB found.\n");
1062 if (pcb->in6p_sp == NULL)
1063 panic("ipsec6_setspidx_in6pcb: no in6p_sp found.\n");
1064 if (pcb->in6p_sp->sp_out == NULL || pcb->in6p_sp->sp_in == NULL)
1065 panic("ipsec6_setspidx_in6pcb: no sp_in/out found.\n");
1066
1067 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1068 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1069
1070 spidx = &pcb->in6p_sp->sp_in->spidx;
1071 error = ipsec_setspidx(m, spidx, 1, 0);
1072 if (error)
1073 goto bad;
1074 spidx->dir = IPSEC_DIR_INBOUND;
1075
1076 spidx = &pcb->in6p_sp->sp_out->spidx;
1077 error = ipsec_setspidx(m, spidx, 1, 0);
1078 if (error)
1079 goto bad;
1080 spidx->dir = IPSEC_DIR_OUTBOUND;
1081
1082 return 0;
1083
1084bad:
1085 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1086 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1087 return error;
1088}
1089#endif
1090
1091/*
1092 * configure security policy index (src/dst/proto/sport/dport)
1093 * by looking at the content of mbuf.
1094 * the caller is responsible for error recovery (like clearing up spidx).
1095 */
1096static int
1097ipsec_setspidx(struct mbuf *m,
1098 struct secpolicyindex *spidx,
1099 int needport,
1100 int force_ip_version)
1101{
1102 struct ip *ip = NULL;
1103 struct ip ipbuf;
1104 u_int v;
1105 struct mbuf *n;
1106 int len;
1107 int error;
1108
1109 if (m == NULL)
1110 panic("ipsec_setspidx: m == 0 passed.\n");
1111
1112 /*
1113 * validate m->m_pkthdr.len. we see incorrect length if we
1114 * mistakenly call this function with inconsistent mbuf chain
1115 * (like 4.4BSD tcp/udp processing). XXX should we panic here?
1116 */
1117 len = 0;
1118 for (n = m; n; n = n->m_next)
1119 len += n->m_len;
1120 if (m->m_pkthdr.len != len) {
1121 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1122 printf("ipsec_setspidx: "
1123 "total of m_len(%d) != pkthdr.len(%d), "
1124 "ignored.\n",
1125 len, m->m_pkthdr.len));
1126 return EINVAL;
1127 }
1128
1129 if (m->m_pkthdr.len < sizeof(struct ip)) {
1130 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1131 printf("ipsec_setspidx: "
1132 "pkthdr.len(%d) < sizeof(struct ip), ignored.\n",
1133 m->m_pkthdr.len));
1134 return EINVAL;
1135 }
1136
1137 if (m->m_len >= sizeof(*ip))
1138 ip = mtod(m, struct ip *);
1139 else {
1140 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1141 ip = &ipbuf;
1142 }
1143
1144 if (force_ip_version) {
1145 v = force_ip_version;
1146 } else {
1147#ifdef _IP_VHL
1148 v = _IP_VHL_V(ip->ip_vhl);
1149#else
1150 v = ip->ip_v;
1151#endif
1152 }
1153 switch (v) {
1154 case 4:
1155 error = ipsec4_setspidx_ipaddr(m, spidx);
1156 if (error)
1157 return error;
1158 ipsec4_get_ulp(m, spidx, needport);
1159 return 0;
1160#if INET6
1161 case 6:
1162 if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) {
1163 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1164 printf("ipsec_setspidx: "
1165 "pkthdr.len(%d) < sizeof(struct ip6_hdr), "
1166 "ignored.\n", m->m_pkthdr.len));
1167 return EINVAL;
1168 }
1169 error = ipsec6_setspidx_ipaddr(m, spidx);
1170 if (error)
1171 return error;
1172 ipsec6_get_ulp(m, spidx, needport);
1173 return 0;
1174#endif
1175 default:
1176 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1177 printf("ipsec_setspidx: "
1178 "unknown IP version %u, ignored.\n", v));
1179 return EINVAL;
1180 }
1181}
1182
1183static void
1184ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport)
1185{
1186 struct ip ip;
1187 struct ip6_ext ip6e;
1188 u_int8_t nxt;
1189 int off;
1190 struct tcphdr th;
1191 struct udphdr uh;
1192
1193 /* sanity check */
1194 if (m == NULL)
1195 panic("ipsec4_get_ulp: NULL pointer was passed.\n");
1196 if (m->m_pkthdr.len < sizeof(ip))
1197 panic("ipsec4_get_ulp: too short\n");
1198
1199 /* set default */
1200 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1201 ((struct sockaddr_in *)&spidx->src)->sin_port = IPSEC_PORT_ANY;
1202 ((struct sockaddr_in *)&spidx->dst)->sin_port = IPSEC_PORT_ANY;
1203
1204 m_copydata(m, 0, sizeof(ip), (caddr_t)&ip);
1205 /* ip_input() flips it into host endian XXX need more checking */
1206 if (ip.ip_off & (IP_MF | IP_OFFMASK))
1207 return;
1208
1209 nxt = ip.ip_p;
1210#ifdef _IP_VHL
1211 off = _IP_VHL_HL(ip->ip_vhl) << 2;
1212#else
1213 off = ip.ip_hl << 2;
1214#endif
1215 while (off < m->m_pkthdr.len) {
1216 switch (nxt) {
1217 case IPPROTO_TCP:
1218 spidx->ul_proto = nxt;
1219 if (!needport)
1220 return;
1221 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len)
1222 return;
1223 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1224 ((struct sockaddr_in *)&spidx->src)->sin_port =
1225 th.th_sport;
1226 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1227 th.th_dport;
1228 return;
1229 case IPPROTO_UDP:
1230 spidx->ul_proto = nxt;
1231 if (!needport)
1232 return;
1233 if (off + sizeof(struct udphdr) > m->m_pkthdr.len)
1234 return;
1235 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1236 ((struct sockaddr_in *)&spidx->src)->sin_port =
1237 uh.uh_sport;
1238 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1239 uh.uh_dport;
1240 return;
1241 case IPPROTO_AH:
1242 if (off + sizeof(ip6e) > m->m_pkthdr.len)
1243 return;
1244 m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
1245 off += (ip6e.ip6e_len + 2) << 2;
1246 nxt = ip6e.ip6e_nxt;
1247 break;
1248 case IPPROTO_ICMP:
1249 default:
1250 /* XXX intermediate headers??? */
1251 spidx->ul_proto = nxt;
1252 return;
1253 }
1254 }
1255}
1256
1257/* assumes that m is sane */
1258static int
1259ipsec4_setspidx_ipaddr(struct mbuf *m, struct secpolicyindex *spidx)
1260{
1261 struct ip *ip = NULL;
1262 struct ip ipbuf;
1263 struct sockaddr_in *sin;
1264
1265 if (m->m_len >= sizeof(*ip))
1266 ip = mtod(m, struct ip *);
1267 else {
1268 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1269 ip = &ipbuf;
1270 }
1271
1272 sin = (struct sockaddr_in *)&spidx->src;
1273 bzero(sin, sizeof(*sin));
1274 sin->sin_family = AF_INET;
1275 sin->sin_len = sizeof(struct sockaddr_in);
1276 bcopy(&ip->ip_src, &sin->sin_addr, sizeof(ip->ip_src));
1277 spidx->prefs = sizeof(struct in_addr) << 3;
1278
1279 sin = (struct sockaddr_in *)&spidx->dst;
1280 bzero(sin, sizeof(*sin));
1281 sin->sin_family = AF_INET;
1282 sin->sin_len = sizeof(struct sockaddr_in);
1283 bcopy(&ip->ip_dst, &sin->sin_addr, sizeof(ip->ip_dst));
1284 spidx->prefd = sizeof(struct in_addr) << 3;
1285
1286 return 0;
1287}
1288
1289#if INET6
1290static void
1291ipsec6_get_ulp(struct mbuf *m,
1292 struct secpolicyindex *spidx,
1293 int needport)
1294{
1295 int off, nxt;
1296 struct tcphdr th;
1297 struct udphdr uh;
1298
1299 /* sanity check */
1300 if (m == NULL)
1301 panic("ipsec6_get_ulp: NULL pointer was passed.\n");
1302
1303 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1304 printf("ipsec6_get_ulp:\n"); kdebug_mbuf(m));
1305
1306 /* set default */
1307 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1308 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = IPSEC_PORT_ANY;
1309 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = IPSEC_PORT_ANY;
1310
1311 nxt = -1;
1312 off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt);
1313 if (off < 0 || m->m_pkthdr.len < off)
1314 return;
1315
1316 switch (nxt) {
1317 case IPPROTO_TCP:
1318 spidx->ul_proto = nxt;
1319 if (!needport)
1320 break;
1321 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len)
1322 break;
1323 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1324 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = th.th_sport;
1325 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = th.th_dport;
1326 break;
1327 case IPPROTO_UDP:
1328 spidx->ul_proto = nxt;
1329 if (!needport)
1330 break;
1331 if (off + sizeof(struct udphdr) > m->m_pkthdr.len)
1332 break;
1333 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1334 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = uh.uh_sport;
1335 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = uh.uh_dport;
1336 break;
1337 case IPPROTO_ICMPV6:
1338 default:
1339 /* XXX intermediate headers??? */
1340 spidx->ul_proto = nxt;
1341 break;
1342 }
1343}
1344
1345/* assumes that m is sane */
1346static int
1347ipsec6_setspidx_ipaddr(struct mbuf *m,
1348 struct secpolicyindex *spidx)
1349{
1350 struct ip6_hdr *ip6 = NULL;
1351 struct ip6_hdr ip6buf;
1352 struct sockaddr_in6 *sin6;
1353
1354 if (m->m_len >= sizeof(*ip6))
1355 ip6 = mtod(m, struct ip6_hdr *);
1356 else {
1357 m_copydata(m, 0, sizeof(ip6buf), (caddr_t)&ip6buf);
1358 ip6 = &ip6buf;
1359 }
1360
1361 sin6 = (struct sockaddr_in6 *)&spidx->src;
1362 bzero(sin6, sizeof(*sin6));
1363 sin6->sin6_family = AF_INET6;
1364 sin6->sin6_len = sizeof(struct sockaddr_in6);
1365 bcopy(&ip6->ip6_src, &sin6->sin6_addr, sizeof(ip6->ip6_src));
1366 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
1367 sin6->sin6_addr.s6_addr16[1] = 0;
1368 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
1369 }
1370 spidx->prefs = sizeof(struct in6_addr) << 3;
1371
1372 sin6 = (struct sockaddr_in6 *)&spidx->dst;
1373 bzero(sin6, sizeof(*sin6));
1374 sin6->sin6_family = AF_INET6;
1375 sin6->sin6_len = sizeof(struct sockaddr_in6);
1376 bcopy(&ip6->ip6_dst, &sin6->sin6_addr, sizeof(ip6->ip6_dst));
1377 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
1378 sin6->sin6_addr.s6_addr16[1] = 0;
1379 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
1380 }
1381 spidx->prefd = sizeof(struct in6_addr) << 3;
1382
1383 return 0;
1384}
1385#endif
1386
1387static struct inpcbpolicy *
1388ipsec_newpcbpolicy(void)
1389{
1390 struct inpcbpolicy *p;
1391
1392 p = (struct inpcbpolicy *)_MALLOC(sizeof(*p), M_SECA, M_WAITOK);
1393 return p;
1394}
1395
1396static void
1397ipsec_delpcbpolicy(struct inpcbpolicy *p)
1398{
1399 FREE(p, M_SECA);
1400}
1401
1402/* initialize policy in PCB */
1403int
1404ipsec_init_policy(struct socket *so,
1405 struct inpcbpolicy **pcb_sp)
1406{
1407 struct inpcbpolicy *new;
1408
1409 /* sanity check. */
1410 if (so == NULL || pcb_sp == NULL)
1411 panic("ipsec_init_policy: NULL pointer was passed.\n");
1412
1413 new = ipsec_newpcbpolicy();
1414 if (new == NULL) {
1415 ipseclog((LOG_DEBUG, "ipsec_init_policy: No more memory.\n"));
1416 return ENOBUFS;
1417 }
1418 bzero(new, sizeof(*new));
1419
1420#ifdef __APPLE__
1421 if (kauth_cred_issuser(so->so_cred))
1422#else
1423 if (so->so_cred != 0 && !suser(so->so_cred->pc_ucred, NULL))
1424#endif
1425 new->priv = 1;
1426 else
1427 new->priv = 0;
1428
1429 if ((new->sp_in = key_newsp()) == NULL) {
1430 ipsec_delpcbpolicy(new);
1431 return ENOBUFS;
1432 }
1433 new->sp_in->state = IPSEC_SPSTATE_ALIVE;
1434 new->sp_in->policy = IPSEC_POLICY_ENTRUST;
1435
1436 if ((new->sp_out = key_newsp()) == NULL) {
1437 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1438 ipsec_delpcbpolicy(new);
1439 return ENOBUFS;
1440 }
1441 new->sp_out->state = IPSEC_SPSTATE_ALIVE;
1442 new->sp_out->policy = IPSEC_POLICY_ENTRUST;
1443
1444 *pcb_sp = new;
1445
1446 return 0;
1447}
1448
1449/* copy old ipsec policy into new */
1450int
1451ipsec_copy_policy(struct inpcbpolicy *old,
1452 struct inpcbpolicy *new)
1453{
1454 struct secpolicy *sp;
1455
1456 if (ipsec_bypass != 0)
1457 return 0;
1458
1459 sp = ipsec_deepcopy_policy(old->sp_in);
1460 if (sp) {
1461 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1462 new->sp_in = sp;
1463 } else
1464 return ENOBUFS;
1465
1466 sp = ipsec_deepcopy_policy(old->sp_out);
1467 if (sp) {
1468 key_freesp(new->sp_out, KEY_SADB_UNLOCKED);
1469 new->sp_out = sp;
1470 } else
1471 return ENOBUFS;
1472
1473 new->priv = old->priv;
1474
1475 return 0;
1476}
1477
1478/* deep-copy a policy in PCB */
1479static struct secpolicy *
1480ipsec_deepcopy_policy(struct secpolicy *src)
1481{
1482 struct ipsecrequest *newchain = NULL;
1483 struct ipsecrequest *p;
1484 struct ipsecrequest **q;
1485 struct ipsecrequest *r;
1486 struct secpolicy *dst;
1487
1488 if (src == NULL)
1489 return NULL;
1490 dst = key_newsp();
1491 if (dst == NULL)
1492 return NULL;
1493
1494 /*
1495 * deep-copy IPsec request chain. This is required since struct
1496 * ipsecrequest is not reference counted.
1497 */
1498 q = &newchain;
1499 for (p = src->req; p; p = p->next) {
1500 *q = (struct ipsecrequest *)_MALLOC(sizeof(struct ipsecrequest),
1501 M_SECA, M_WAITOK | M_ZERO);
1502 if (*q == NULL)
1503 goto fail;
1504 (*q)->next = NULL;
1505
1506 (*q)->saidx.proto = p->saidx.proto;
1507 (*q)->saidx.mode = p->saidx.mode;
1508 (*q)->level = p->level;
1509 (*q)->saidx.reqid = p->saidx.reqid;
1510
1511 bcopy(&p->saidx.src, &(*q)->saidx.src, sizeof((*q)->saidx.src));
1512 bcopy(&p->saidx.dst, &(*q)->saidx.dst, sizeof((*q)->saidx.dst));
1513
1514 (*q)->sp = dst;
1515
1516 q = &((*q)->next);
1517 }
1518
1519 dst->req = newchain;
1520 dst->state = src->state;
1521 dst->policy = src->policy;
1522 /* do not touch the refcnt fields */
1523
1524 return dst;
1525
1526fail:
1527 for (p = newchain; p; p = r) {
1528 r = p->next;
1529 FREE(p, M_SECA);
1530 p = NULL;
1531 }
1532 key_freesp(dst, KEY_SADB_UNLOCKED);
1533 return NULL;
1534}
1535
1536/* set policy and ipsec request if present. */
1537static int
1538ipsec_set_policy(struct secpolicy **pcb_sp,
1539 __unused int optname,
1540 caddr_t request,
1541 size_t len,
1542 int priv)
1543{
1544 struct sadb_x_policy *xpl;
1545 struct secpolicy *newsp = NULL;
1546 int error;
1547
1548 /* sanity check. */
1549 if (pcb_sp == NULL || *pcb_sp == NULL || request == NULL)
1550 return EINVAL;
1551 if (len < sizeof(*xpl))
1552 return EINVAL;
1553 xpl = (struct sadb_x_policy *)(void *)request;
1554
1555 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1556 printf("ipsec_set_policy: passed policy\n");
1557 kdebug_sadb_x_policy((struct sadb_ext *)xpl));
1558
1559 /* check policy type */
1560 /* ipsec_set_policy() accepts IPSEC, ENTRUST and BYPASS. */
1561 if (xpl->sadb_x_policy_type == IPSEC_POLICY_DISCARD
1562 || xpl->sadb_x_policy_type == IPSEC_POLICY_NONE)
1563 return EINVAL;
1564
1565 /* check privileged socket */
1566 if (priv == 0 && xpl->sadb_x_policy_type == IPSEC_POLICY_BYPASS)
1567 return EACCES;
1568
1569 /* allocation new SP entry */
1570 if ((newsp = key_msg2sp(xpl, len, &error)) == NULL)
1571 return error;
1572
1573 newsp->state = IPSEC_SPSTATE_ALIVE;
1574
1575 /* clear old SP and set new SP */
1576 key_freesp(*pcb_sp, KEY_SADB_UNLOCKED);
1577 *pcb_sp = newsp;
1578 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1579 printf("ipsec_set_policy: new policy\n");
1580 kdebug_secpolicy(newsp));
1581
1582 return 0;
1583}
1584
1585int
1586ipsec4_set_policy(struct inpcb *inp,
1587 int optname,
1588 caddr_t request,
1589 size_t len,
1590 int priv)
1591{
1592 struct sadb_x_policy *xpl;
1593 struct secpolicy **pcb_sp;
1594 int error = 0;
1595 struct sadb_x_policy xpl_aligned_buf;
1596 u_int8_t *xpl_unaligned;
1597
1598 /* sanity check. */
1599 if (inp == NULL || request == NULL)
1600 return EINVAL;
1601 if (len < sizeof(*xpl))
1602 return EINVAL;
1603 xpl = (struct sadb_x_policy *)(void *)request;
1604
1605 /* This is a new mbuf allocated by soopt_getm() */
1606 if (IPSEC_IS_P2ALIGNED(xpl)) {
1607 xpl_unaligned = NULL;
1608 } else {
1609 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1610 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1611 xpl = (__typeof__(xpl))&xpl_aligned_buf;
1612 }
1613
1614 if (inp->inp_sp == NULL) {
1615 error = ipsec_init_policy(inp->inp_socket, &inp->inp_sp);
1616 if (error)
1617 return error;
1618 }
1619
1620 /* select direction */
1621 switch (xpl->sadb_x_policy_dir) {
1622 case IPSEC_DIR_INBOUND:
1623 pcb_sp = &inp->inp_sp->sp_in;
1624 break;
1625 case IPSEC_DIR_OUTBOUND:
1626 pcb_sp = &inp->inp_sp->sp_out;
1627 break;
1628 default:
1629 ipseclog((LOG_ERR, "ipsec4_set_policy: invalid direction=%u\n",
1630 xpl->sadb_x_policy_dir));
1631 return EINVAL;
1632 }
1633
1634 /* turn bypass off */
1635 if (ipsec_bypass != 0)
1636 ipsec_bypass = 0;
1637
1638 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1639}
1640
1641/* delete policy in PCB */
1642int
1643ipsec4_delete_pcbpolicy(struct inpcb *inp)
1644{
1645
1646 /* sanity check. */
1647 if (inp == NULL)
1648 panic("ipsec4_delete_pcbpolicy: NULL pointer was passed.\n");
1649
1650 if (inp->inp_sp == NULL)
1651 return 0;
1652
1653 if (inp->inp_sp->sp_in != NULL) {
1654 key_freesp(inp->inp_sp->sp_in, KEY_SADB_UNLOCKED);
1655 inp->inp_sp->sp_in = NULL;
1656 }
1657
1658 if (inp->inp_sp->sp_out != NULL) {
1659 key_freesp(inp->inp_sp->sp_out, KEY_SADB_UNLOCKED);
1660 inp->inp_sp->sp_out = NULL;
1661 }
1662
1663 ipsec_delpcbpolicy(inp->inp_sp);
1664 inp->inp_sp = NULL;
1665
1666 return 0;
1667}
1668
1669#if INET6
1670int
1671ipsec6_set_policy(struct in6pcb *in6p,
1672 int optname,
1673 caddr_t request,
1674 size_t len,
1675 int priv)
1676{
1677 struct sadb_x_policy *xpl;
1678 struct secpolicy **pcb_sp;
1679 int error = 0;
1680 struct sadb_x_policy xpl_aligned_buf;
1681 u_int8_t *xpl_unaligned;
1682
1683 /* sanity check. */
1684 if (in6p == NULL || request == NULL)
1685 return EINVAL;
1686 if (len < sizeof(*xpl))
1687 return EINVAL;
1688 xpl = (struct sadb_x_policy *)(void *)request;
1689
1690 /* This is a new mbuf allocated by soopt_getm() */
1691 if (IPSEC_IS_P2ALIGNED(xpl)) {
1692 xpl_unaligned = NULL;
1693 } else {
1694 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1695 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1696 xpl = (__typeof__(xpl))&xpl_aligned_buf;
1697 }
1698
1699 if (in6p->in6p_sp == NULL) {
1700 error = ipsec_init_policy(in6p->inp_socket, &in6p->in6p_sp);
1701 if (error)
1702 return error;
1703 }
1704
1705 /* select direction */
1706 switch (xpl->sadb_x_policy_dir) {
1707 case IPSEC_DIR_INBOUND:
1708 pcb_sp = &in6p->in6p_sp->sp_in;
1709 break;
1710 case IPSEC_DIR_OUTBOUND:
1711 pcb_sp = &in6p->in6p_sp->sp_out;
1712 break;
1713 default:
1714 ipseclog((LOG_ERR, "ipsec6_set_policy: invalid direction=%u\n",
1715 xpl->sadb_x_policy_dir));
1716 return EINVAL;
1717 }
1718
1719 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1720}
1721
1722int
1723ipsec6_delete_pcbpolicy(struct in6pcb *in6p)
1724{
1725
1726 /* sanity check. */
1727 if (in6p == NULL)
1728 panic("ipsec6_delete_pcbpolicy: NULL pointer was passed.\n");
1729
1730 if (in6p->in6p_sp == NULL)
1731 return 0;
1732
1733 if (in6p->in6p_sp->sp_in != NULL) {
1734 key_freesp(in6p->in6p_sp->sp_in, KEY_SADB_UNLOCKED);
1735 in6p->in6p_sp->sp_in = NULL;
1736 }
1737
1738 if (in6p->in6p_sp->sp_out != NULL) {
1739 key_freesp(in6p->in6p_sp->sp_out, KEY_SADB_UNLOCKED);
1740 in6p->in6p_sp->sp_out = NULL;
1741 }
1742
1743 ipsec_delpcbpolicy(in6p->in6p_sp);
1744 in6p->in6p_sp = NULL;
1745
1746 return 0;
1747}
1748#endif
1749
1750/*
1751 * return current level.
1752 * Either IPSEC_LEVEL_USE or IPSEC_LEVEL_REQUIRE are always returned.
1753 */
1754u_int
1755ipsec_get_reqlevel(struct ipsecrequest *isr)
1756{
1757 u_int level = 0;
1758 u_int esp_trans_deflev = 0, esp_net_deflev = 0, ah_trans_deflev = 0, ah_net_deflev = 0;
1759
1760 /* sanity check */
1761 if (isr == NULL || isr->sp == NULL)
1762 panic("ipsec_get_reqlevel: NULL pointer is passed.\n");
1763 if (((struct sockaddr *)&isr->sp->spidx.src)->sa_family
1764 != ((struct sockaddr *)&isr->sp->spidx.dst)->sa_family)
1765 panic("ipsec_get_reqlevel: family mismatched.\n");
1766
1767/* XXX note that we have ipseclog() expanded here - code sync issue */
1768#define IPSEC_CHECK_DEFAULT(lev) \
1769 (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE \
1770 && (lev) != IPSEC_LEVEL_UNIQUE) \
1771 ? (ipsec_debug \
1772 ? log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\
1773 (lev), IPSEC_LEVEL_REQUIRE) \
1774 : (void)0), \
1775 (lev) = IPSEC_LEVEL_REQUIRE, \
1776 (lev) \
1777 : (lev))
1778
1779 /* set default level */
1780 switch (((struct sockaddr *)&isr->sp->spidx.src)->sa_family) {
1781#if INET
1782 case AF_INET:
1783 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_trans_deflev);
1784 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_net_deflev);
1785 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_trans_deflev);
1786 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_net_deflev);
1787 break;
1788#endif
1789#if INET6
1790 case AF_INET6:
1791 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_trans_deflev);
1792 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_net_deflev);
1793 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_trans_deflev);
1794 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_net_deflev);
1795 break;
1796#endif /* INET6 */
1797 default:
1798 panic("key_get_reqlevel: Unknown family. %d\n",
1799 ((struct sockaddr *)&isr->sp->spidx.src)->sa_family);
1800 }
1801
1802#undef IPSEC_CHECK_DEFAULT
1803
1804 /* set level */
1805 switch (isr->level) {
1806 case IPSEC_LEVEL_DEFAULT:
1807 switch (isr->saidx.proto) {
1808 case IPPROTO_ESP:
1809 if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
1810 level = esp_net_deflev;
1811 else
1812 level = esp_trans_deflev;
1813 break;
1814 case IPPROTO_AH:
1815 if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
1816 level = ah_net_deflev;
1817 else
1818 level = ah_trans_deflev;
1819 break;
1820 case IPPROTO_IPCOMP:
1821 /*
1822 * we don't really care, as IPcomp document says that
1823 * we shouldn't compress small packets
1824 */
1825 level = IPSEC_LEVEL_USE;
1826 break;
1827 default:
1828 panic("ipsec_get_reqlevel: "
1829 "Illegal protocol defined %u\n",
1830 isr->saidx.proto);
1831 }
1832 break;
1833
1834 case IPSEC_LEVEL_USE:
1835 case IPSEC_LEVEL_REQUIRE:
1836 level = isr->level;
1837 break;
1838 case IPSEC_LEVEL_UNIQUE:
1839 level = IPSEC_LEVEL_REQUIRE;
1840 break;
1841
1842 default:
1843 panic("ipsec_get_reqlevel: Illegal IPsec level %u\n",
1844 isr->level);
1845 }
1846
1847 return level;
1848}
1849
1850/*
1851 * Check AH/ESP integrity.
1852 * OUT:
1853 * 0: valid
1854 * 1: invalid
1855 */
1856static int
1857ipsec_in_reject(struct secpolicy *sp, struct mbuf *m)
1858{
1859 struct ipsecrequest *isr;
1860 u_int level;
1861 int need_auth, need_conf, need_icv;
1862
1863 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
1864 printf("ipsec_in_reject: using SP\n");
1865 kdebug_secpolicy(sp));
1866
1867 /* check policy */
1868 switch (sp->policy) {
1869 case IPSEC_POLICY_DISCARD:
1870 case IPSEC_POLICY_GENERATE:
1871 return 1;
1872 case IPSEC_POLICY_BYPASS:
1873 case IPSEC_POLICY_NONE:
1874 return 0;
1875
1876 case IPSEC_POLICY_IPSEC:
1877 break;
1878
1879 case IPSEC_POLICY_ENTRUST:
1880 default:
1881 panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy);
1882 }
1883
1884 need_auth = 0;
1885 need_conf = 0;
1886 need_icv = 0;
1887
1888 /* XXX should compare policy against ipsec header history */
1889
1890 for (isr = sp->req; isr != NULL; isr = isr->next) {
1891
1892 /* get current level */
1893 level = ipsec_get_reqlevel(isr);
1894
1895 switch (isr->saidx.proto) {
1896 case IPPROTO_ESP:
1897 if (level == IPSEC_LEVEL_REQUIRE) {
1898 need_conf++;
1899
1900#if 0
1901 /* this won't work with multiple input threads - isr->sav would change
1902 * with every packet and is not necessarily related to the current packet
1903 * being processed. If ESP processing is required - the esp code should
1904 * make sure that the integrity check is present and correct. I don't see
1905 * why it would be necessary to check for the presence of the integrity
1906 * check value here. I think this is just wrong.
1907 * isr->sav has been removed.
1908 * %%%%%% this needs to be re-worked at some point but I think the code below can
1909 * be ignored for now.
1910 */
1911 if (isr->sav != NULL
1912 && isr->sav->flags == SADB_X_EXT_NONE
1913 && isr->sav->alg_auth != SADB_AALG_NONE)
1914 need_icv++;
1915#endif
1916 }
1917 break;
1918 case IPPROTO_AH:
1919 if (level == IPSEC_LEVEL_REQUIRE) {
1920 need_auth++;
1921 need_icv++;
1922 }
1923 break;
1924 case IPPROTO_IPCOMP:
1925 /*
1926 * we don't really care, as IPcomp document says that
1927 * we shouldn't compress small packets, IPComp policy
1928 * should always be treated as being in "use" level.
1929 */
1930 break;
1931 }
1932 }
1933
1934 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1935 printf("ipsec_in_reject: auth:%d conf:%d icv:%d m_flags:%x\n",
1936 need_auth, need_conf, need_icv, m->m_flags));
1937
1938 if ((need_conf && !(m->m_flags & M_DECRYPTED))
1939 || (!need_auth && need_icv && !(m->m_flags & M_AUTHIPDGM))
1940 || (need_auth && !(m->m_flags & M_AUTHIPHDR)))
1941 return 1;
1942
1943 return 0;
1944}
1945
1946/*
1947 * Check AH/ESP integrity.
1948 * This function is called from tcp_input(), udp_input(),
1949 * and {ah,esp}4_input for tunnel mode
1950 */
1951int
1952ipsec4_in_reject_so(struct mbuf *m, struct socket *so)
1953{
1954 struct secpolicy *sp = NULL;
1955 int error;
1956 int result;
1957
1958 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
1959 /* sanity check */
1960 if (m == NULL)
1961 return 0; /* XXX should be panic ? */
1962
1963 /* get SP for this packet.
1964 * When we are called from ip_forward(), we call
1965 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
1966 */
1967 if (so == NULL)
1968 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
1969 else
1970 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
1971
1972 if (sp == NULL)
1973 return 0; /* XXX should be panic ?
1974 * -> No, there may be error. */
1975
1976 result = ipsec_in_reject(sp, m);
1977 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1978 printf("DP ipsec4_in_reject_so call free SP:0x%llx\n",
1979 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
1980 key_freesp(sp, KEY_SADB_UNLOCKED);
1981
1982 return result;
1983}
1984
1985int
1986ipsec4_in_reject(struct mbuf *m, struct inpcb *inp)
1987{
1988 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
1989 if (inp == NULL)
1990 return ipsec4_in_reject_so(m, NULL);
1991 if (inp->inp_socket)
1992 return ipsec4_in_reject_so(m, inp->inp_socket);
1993 else
1994 panic("ipsec4_in_reject: invalid inpcb/socket");
1995
1996 /* NOTREACHED */
1997 return 0;
1998}
1999
2000#if INET6
2001/*
2002 * Check AH/ESP integrity.
2003 * This function is called from tcp6_input(), udp6_input(),
2004 * and {ah,esp}6_input for tunnel mode
2005 */
2006int
2007ipsec6_in_reject_so(struct mbuf *m, struct socket *so)
2008{
2009 struct secpolicy *sp = NULL;
2010 int error;
2011 int result;
2012
2013 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2014 /* sanity check */
2015 if (m == NULL)
2016 return 0; /* XXX should be panic ? */
2017
2018 /* get SP for this packet.
2019 * When we are called from ip_forward(), we call
2020 * ipsec6_getpolicybyaddr() with IP_FORWARDING flag.
2021 */
2022 if (so == NULL)
2023 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
2024 else
2025 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
2026
2027 if (sp == NULL)
2028 return 0; /* XXX should be panic ? */
2029
2030 result = ipsec_in_reject(sp, m);
2031 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2032 printf("DP ipsec6_in_reject_so call free SP:0x%llx\n",
2033 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2034 key_freesp(sp, KEY_SADB_UNLOCKED);
2035
2036 return result;
2037}
2038
2039int
2040ipsec6_in_reject(struct mbuf *m, struct in6pcb *in6p)
2041{
2042
2043 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2044 if (in6p == NULL)
2045 return ipsec6_in_reject_so(m, NULL);
2046 if (in6p->in6p_socket)
2047 return ipsec6_in_reject_so(m, in6p->in6p_socket);
2048 else
2049 panic("ipsec6_in_reject: invalid in6p/socket");
2050
2051 /* NOTREACHED */
2052 return 0;
2053}
2054#endif
2055
2056/*
2057 * compute the byte size to be occupied by IPsec header.
2058 * in case it is tunneled, it includes the size of outer IP header.
2059 * NOTE: SP passed is free in this function.
2060 */
2061size_t
2062ipsec_hdrsiz(struct secpolicy *sp)
2063{
2064 struct ipsecrequest *isr;
2065 size_t siz, clen;
2066
2067 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2068 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2069 printf("ipsec_hdrsiz: using SP\n");
2070 kdebug_secpolicy(sp));
2071
2072 /* check policy */
2073 switch (sp->policy) {
2074 case IPSEC_POLICY_DISCARD:
2075 case IPSEC_POLICY_GENERATE:
2076 case IPSEC_POLICY_BYPASS:
2077 case IPSEC_POLICY_NONE:
2078 return 0;
2079
2080 case IPSEC_POLICY_IPSEC:
2081 break;
2082
2083 case IPSEC_POLICY_ENTRUST:
2084 default:
2085 panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy);
2086 }
2087
2088 siz = 0;
2089
2090 for (isr = sp->req; isr != NULL; isr = isr->next) {
2091
2092 clen = 0;
2093
2094 switch (isr->saidx.proto) {
2095 case IPPROTO_ESP:
2096#if IPSEC_ESP
2097 clen = esp_hdrsiz(isr);
2098#else
2099 clen = 0; /*XXX*/
2100#endif
2101 break;
2102 case IPPROTO_AH:
2103 clen = ah_hdrsiz(isr);
2104 break;
2105 case IPPROTO_IPCOMP:
2106 clen = sizeof(struct ipcomp);
2107 break;
2108 }
2109
2110 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
2111 switch (((struct sockaddr *)&isr->saidx.dst)->sa_family) {
2112 case AF_INET:
2113 clen += sizeof(struct ip);
2114 break;
2115#if INET6
2116 case AF_INET6:
2117 clen += sizeof(struct ip6_hdr);
2118 break;
2119#endif
2120 default:
2121 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2122 "unknown AF %d in IPsec tunnel SA\n",
2123 ((struct sockaddr *)&isr->saidx.dst)->sa_family));
2124 break;
2125 }
2126 }
2127 siz += clen;
2128 }
2129
2130 return siz;
2131}
2132
2133/* This function is called from ip_forward() and ipsec4_hdrsize_tcp(). */
2134size_t
2135ipsec4_hdrsiz(struct mbuf *m, u_int dir, struct inpcb *inp)
2136{
2137 struct secpolicy *sp = NULL;
2138 int error;
2139 size_t size;
2140
2141 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2142 /* sanity check */
2143 if (m == NULL)
2144 return 0; /* XXX should be panic ? */
2145 if (inp != NULL && inp->inp_socket == NULL)
2146 panic("ipsec4_hdrsize: why is socket NULL but there is PCB.");
2147
2148 /* get SP for this packet.
2149 * When we are called from ip_forward(), we call
2150 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2151 */
2152 if (inp == NULL)
2153 sp = ipsec4_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2154 else
2155 sp = ipsec4_getpolicybyaddr(m, dir, 0, &error);
2156
2157 if (sp == NULL)
2158 return 0; /* XXX should be panic ? */
2159
2160 size = ipsec_hdrsiz(sp);
2161 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2162 printf("DP ipsec4_hdrsiz call free SP:0x%llx\n",
2163 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2164 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2165 printf("ipsec4_hdrsiz: size:%lu.\n", (u_int32_t)size));
2166 key_freesp(sp, KEY_SADB_UNLOCKED);
2167
2168 return size;
2169}
2170
2171#if INET6
2172/* This function is called from ipsec6_hdrsize_tcp(),
2173 * and maybe from ip6_forward.()
2174 */
2175size_t
2176ipsec6_hdrsiz(struct mbuf *m, u_int dir, struct in6pcb *in6p)
2177{
2178 struct secpolicy *sp = NULL;
2179 int error;
2180 size_t size;
2181
2182 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2183 /* sanity check */
2184 if (m == NULL)
2185 return 0; /* XXX shoud be panic ? */
2186 if (in6p != NULL && in6p->in6p_socket == NULL)
2187 panic("ipsec6_hdrsize: why is socket NULL but there is PCB.");
2188
2189 /* get SP for this packet */
2190 /* XXX Is it right to call with IP_FORWARDING. */
2191 if (in6p == NULL)
2192 sp = ipsec6_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2193 else
2194 sp = ipsec6_getpolicybyaddr(m, dir, 0, &error);
2195
2196 if (sp == NULL)
2197 return 0;
2198 size = ipsec_hdrsiz(sp);
2199 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2200 printf("DP ipsec6_hdrsiz call free SP:0x%llx\n",
2201 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2202 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2203 printf("ipsec6_hdrsiz: size:%lu.\n", (u_int32_t)size));
2204 key_freesp(sp, KEY_SADB_UNLOCKED);
2205
2206 return size;
2207}
2208#endif /*INET6*/
2209
2210#if INET
2211/*
2212 * encapsulate for ipsec tunnel.
2213 * ip->ip_src must be fixed later on.
2214 */
2215int
2216ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav)
2217{
2218 struct ip *oip;
2219 struct ip *ip;
2220 size_t hlen;
2221 size_t plen;
2222
2223 /* can't tunnel between different AFs */
2224 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2225 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2226 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2227 m_freem(m);
2228 return EINVAL;
2229 }
2230#if 0
2231 /* XXX if the dst is myself, perform nothing. */
2232 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2233 m_freem(m);
2234 return EINVAL;
2235 }
2236#endif
2237
2238 if (m->m_len < sizeof(*ip))
2239 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2240
2241 ip = mtod(m, struct ip *);
2242#ifdef _IP_VHL
2243 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2244#else
2245 hlen = ip->ip_hl << 2;
2246#endif
2247
2248 if (m->m_len != hlen)
2249 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2250
2251 /* generate header checksum */
2252 ip->ip_sum = 0;
2253#ifdef _IP_VHL
2254 ip->ip_sum = in_cksum(m, hlen);
2255#else
2256 ip->ip_sum = in_cksum(m, hlen);
2257#endif
2258
2259 plen = m->m_pkthdr.len;
2260
2261 /*
2262 * grow the mbuf to accomodate the new IPv4 header.
2263 * NOTE: IPv4 options will never be copied.
2264 */
2265 if (M_LEADINGSPACE(m->m_next) < hlen) {
2266 struct mbuf *n;
2267 MGET(n, M_DONTWAIT, MT_DATA);
2268 if (!n) {
2269 m_freem(m);
2270 return ENOBUFS;
2271 }
2272 n->m_len = hlen;
2273 n->m_next = m->m_next;
2274 m->m_next = n;
2275 m->m_pkthdr.len += hlen;
2276 oip = mtod(n, struct ip *);
2277 } else {
2278 m->m_next->m_len += hlen;
2279 m->m_next->m_data -= hlen;
2280 m->m_pkthdr.len += hlen;
2281 oip = mtod(m->m_next, struct ip *);
2282 }
2283 ip = mtod(m, struct ip *);
2284 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2285 m->m_len = sizeof(struct ip);
2286 m->m_pkthdr.len -= (hlen - sizeof(struct ip));
2287
2288 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2289 /* ECN consideration. */
2290 ip_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &oip->ip_tos);
2291#ifdef _IP_VHL
2292 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
2293#else
2294 ip->ip_hl = sizeof(struct ip) >> 2;
2295#endif
2296 ip->ip_off &= htons(~IP_OFFMASK);
2297 ip->ip_off &= htons(~IP_MF);
2298 switch (ip4_ipsec_dfbit) {
2299 case 0: /* clear DF bit */
2300 ip->ip_off &= htons(~IP_DF);
2301 break;
2302 case 1: /* set DF bit */
2303 ip->ip_off |= htons(IP_DF);
2304 break;
2305 default: /* copy DF bit */
2306 break;
2307 }
2308 ip->ip_p = IPPROTO_IPIP;
2309 if (plen + sizeof(struct ip) < IP_MAXPACKET)
2310 ip->ip_len = htons(plen + sizeof(struct ip));
2311 else {
2312 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2313 "leave ip_len as is (invalid packet)\n"));
2314 }
2315 ip->ip_id = ip_randomid();
2316 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2317 &ip->ip_src, sizeof(ip->ip_src));
2318 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2319 &ip->ip_dst, sizeof(ip->ip_dst));
2320 ip->ip_ttl = IPDEFTTL;
2321
2322 /* XXX Should ip_src be updated later ? */
2323
2324 return 0;
2325}
2326
2327#endif /*INET*/
2328
2329#if INET6
2330int
2331ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav)
2332{
2333 struct ip6_hdr *oip6;
2334 struct ip6_hdr *ip6;
2335 size_t plen;
2336
2337 /* can't tunnel between different AFs */
2338 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2339 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2340 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2341 m_freem(m);
2342 return EINVAL;
2343 }
2344#if 0
2345 /* XXX if the dst is myself, perform nothing. */
2346 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2347 m_freem(m);
2348 return EINVAL;
2349 }
2350#endif
2351
2352 plen = m->m_pkthdr.len;
2353
2354 /*
2355 * grow the mbuf to accomodate the new IPv6 header.
2356 */
2357 if (m->m_len != sizeof(struct ip6_hdr))
2358 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2359 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2360 struct mbuf *n;
2361 MGET(n, M_DONTWAIT, MT_DATA);
2362 if (!n) {
2363 m_freem(m);
2364 return ENOBUFS;
2365 }
2366 n->m_len = sizeof(struct ip6_hdr);
2367 n->m_next = m->m_next;
2368 m->m_next = n;
2369 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2370 oip6 = mtod(n, struct ip6_hdr *);
2371 } else {
2372 m->m_next->m_len += sizeof(struct ip6_hdr);
2373 m->m_next->m_data -= sizeof(struct ip6_hdr);
2374 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2375 oip6 = mtod(m->m_next, struct ip6_hdr *);
2376 }
2377 ip6 = mtod(m, struct ip6_hdr *);
2378 ovbcopy((caddr_t)ip6, (caddr_t)oip6, sizeof(struct ip6_hdr));
2379
2380 /* Fake link-local scope-class addresses */
2381 if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_src))
2382 oip6->ip6_src.s6_addr16[1] = 0;
2383 if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_dst))
2384 oip6->ip6_dst.s6_addr16[1] = 0;
2385
2386 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2387 /* ECN consideration. */
2388 ip6_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip6->ip6_flow);
2389 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr))
2390 ip6->ip6_plen = htons(plen);
2391 else {
2392 /* ip6->ip6_plen will be updated in ip6_output() */
2393 }
2394 ip6->ip6_nxt = IPPROTO_IPV6;
2395 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2396 &ip6->ip6_src, sizeof(ip6->ip6_src));
2397 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2398 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2399 ip6->ip6_hlim = IPV6_DEFHLIM;
2400
2401 /* XXX Should ip6_src be updated later ? */
2402
2403 return 0;
2404}
2405
2406static int
2407ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav)
2408{
2409 struct ip6_hdr *ip6, *ip6i;
2410 struct ip *ip;
2411 size_t plen;
2412 u_int8_t hlim;
2413
2414 /* tunneling over IPv4 */
2415 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2416 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2417 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2418 m_freem(m);
2419 return EINVAL;
2420 }
2421#if 0
2422 /* XXX if the dst is myself, perform nothing. */
2423 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2424 m_freem(m);
2425 return EINVAL;
2426 }
2427#endif
2428
2429 plen = m->m_pkthdr.len;
2430 ip6 = mtod(m, struct ip6_hdr *);
2431 hlim = ip6->ip6_hlim;
2432 /*
2433 * grow the mbuf to accomodate the new IPv4 header.
2434 */
2435 if (m->m_len != sizeof(struct ip6_hdr))
2436 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2437 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2438 struct mbuf *n;
2439 MGET(n, M_DONTWAIT, MT_DATA);
2440 if (!n) {
2441 m_freem(m);
2442 return ENOBUFS;
2443 }
2444 n->m_len = sizeof(struct ip6_hdr);
2445 n->m_next = m->m_next;
2446 m->m_next = n;
2447 m->m_pkthdr.len += sizeof(struct ip);
2448 ip6i = mtod(n, struct ip6_hdr *);
2449 } else {
2450 m->m_next->m_len += sizeof(struct ip6_hdr);
2451 m->m_next->m_data -= sizeof(struct ip6_hdr);
2452 m->m_pkthdr.len += sizeof(struct ip);
2453 ip6i = mtod(m->m_next, struct ip6_hdr *);
2454 }
2455
2456 bcopy(ip6, ip6i, sizeof(struct ip6_hdr));
2457 ip = mtod(m, struct ip *);
2458 m->m_len = sizeof(struct ip);
2459 /*
2460 * Fill in some of the IPv4 fields - we don't need all of them
2461 * because the rest will be filled in by ip_output
2462 */
2463 ip->ip_v = IPVERSION;
2464 ip->ip_hl = sizeof(struct ip) >> 2;
2465 ip->ip_id = 0;
2466 ip->ip_sum = 0;
2467 ip->ip_tos = 0;
2468 ip->ip_off = 0;
2469 ip->ip_ttl = hlim;
2470 ip->ip_p = IPPROTO_IPV6;
2471
2472 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2473 /* ECN consideration. */
2474 ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6->ip6_flow);
2475
2476 if (plen + sizeof(struct ip) < IP_MAXPACKET)
2477 ip->ip_len = htons(plen + sizeof(struct ip));
2478 else {
2479 ip->ip_len = htons(plen);
2480 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2481 "leave ip_len as is (invalid packet)\n"));
2482 }
2483 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2484 &ip->ip_src, sizeof(ip->ip_src));
2485 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2486 &ip->ip_dst, sizeof(ip->ip_dst));
2487
2488 return 0;
2489}
2490
2491int
2492ipsec6_update_routecache_and_output(
2493 struct ipsec_output_state *state,
2494 struct secasvar *sav)
2495{
2496 struct sockaddr_in6* dst6;
2497 struct route *ro6;
2498 struct ip6_hdr *ip6;
2499 errno_t error = 0;
2500
2501 int plen;
2502 struct ip6_out_args ip6oa;
2503 struct route_in6 ro6_new;
2504 struct flowadv *adv = NULL;
2505
2506 if (!state->m) {
2507 return EINVAL;
2508 }
2509 ip6 = mtod(state->m, struct ip6_hdr *);
2510
2511 // grab sadb_mutex, before updating sah's route cache
2512 lck_mtx_lock(sadb_mutex);
2513 ro6 = &sav->sah->sa_route;
2514 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
2515 if (ro6->ro_rt) {
2516 RT_LOCK(ro6->ro_rt);
2517 }
2518 if (ROUTE_UNUSABLE(ro6) ||
2519 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
2520 if (ro6->ro_rt != NULL)
2521 RT_UNLOCK(ro6->ro_rt);
2522 ROUTE_RELEASE(ro6);
2523 }
2524 if (ro6->ro_rt == 0) {
2525 bzero(dst6, sizeof(*dst6));
2526 dst6->sin6_family = AF_INET6;
2527 dst6->sin6_len = sizeof(*dst6);
2528 dst6->sin6_addr = ip6->ip6_dst;
2529 rtalloc_scoped(ro6, sav->sah->outgoing_if);
2530 if (ro6->ro_rt) {
2531 RT_LOCK(ro6->ro_rt);
2532 }
2533 }
2534 if (ro6->ro_rt == 0) {
2535 ip6stat.ip6s_noroute++;
2536 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
2537 error = EHOSTUNREACH;
2538 // release sadb_mutex, after updating sah's route cache
2539 lck_mtx_unlock(sadb_mutex);
2540 return error;
2541 }
2542
2543 /*
2544 * adjust state->dst if tunnel endpoint is offlink
2545 *
2546 * XXX: caching rt_gateway value in the state is
2547 * not really good, since it may point elsewhere
2548 * when the gateway gets modified to a larger
2549 * sockaddr via rt_setgate(). This is currently
2550 * addressed by SA_SIZE roundup in that routine.
2551 */
2552 if (ro6->ro_rt->rt_flags & RTF_GATEWAY)
2553 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
2554 RT_UNLOCK(ro6->ro_rt);
2555 ROUTE_RELEASE(&state->ro);
2556 route_copyout(&state->ro, ro6, sizeof(state->ro));
2557 state->dst = (struct sockaddr *)dst6;
2558 state->tunneled = 6;
2559 // release sadb_mutex, after updating sah's route cache
2560 lck_mtx_unlock(sadb_mutex);
2561
2562 state->m = ipsec6_splithdr(state->m);
2563 if (!state->m) {
2564 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
2565 error = ENOMEM;
2566 return error;
2567 }
2568
2569 ip6 = mtod(state->m, struct ip6_hdr *);
2570 switch (sav->sah->saidx.proto) {
2571 case IPPROTO_ESP:
2572#if IPSEC_ESP
2573 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2574#else
2575 m_freem(state->m);
2576 error = EINVAL;
2577#endif
2578 break;
2579 case IPPROTO_AH:
2580 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2581 break;
2582 case IPPROTO_IPCOMP:
2583 /* XXX code should be here */
2584 /*FALLTHROUGH*/
2585 default:
2586 ipseclog((LOG_ERR, "%s: unknown ipsec protocol %d\n", __FUNCTION__, sav->sah->saidx.proto));
2587 m_freem(state->m);
2588 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2589 error = EINVAL;
2590 break;
2591 }
2592 if (error) {
2593 // If error, packet already freed by above output routines
2594 state->m = NULL;
2595 return error;
2596 }
2597
2598 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
2599 if (plen > IPV6_MAXPACKET) {
2600 ipseclog((LOG_ERR, "%s: IPsec with IPv6 jumbogram is not supported\n", __FUNCTION__));
2601 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2602 error = EINVAL;/*XXX*/
2603 return error;
2604 }
2605 ip6 = mtod(state->m, struct ip6_hdr *);
2606 ip6->ip6_plen = htons(plen);
2607
2608 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET6);
2609 ipsec_set_ip6oa_for_interface(sav->sah->ipsec_if, &ip6oa);
2610
2611 /* Increment statistics */
2612 ifnet_stat_increment_out(sav->sah->ipsec_if, 1, mbuf_pkthdr_len(state->m), 0);
2613
2614 /* Send to ip6_output */
2615 bzero(&ro6_new, sizeof(ro6_new));
2616 bzero(&ip6oa, sizeof(ip6oa));
2617 ip6oa.ip6oa_flowadv.code = 0;
2618 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
2619 if (state->outgoing_if) {
2620 ip6oa.ip6oa_boundif = state->outgoing_if;
2621 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
2622 }
2623
2624 adv = &ip6oa.ip6oa_flowadv;
2625 (void) ip6_output(state->m, NULL, &ro6_new, IPV6_OUTARGS, NULL, NULL, &ip6oa);
2626 state->m = NULL;
2627
2628 if (adv->code == FADV_FLOW_CONTROLLED || adv->code == FADV_SUSPENDED) {
2629 error = ENOBUFS;
2630 ifnet_disable_output(sav->sah->ipsec_if);
2631 return error;
2632 }
2633
2634 return 0;
2635}
2636
2637int
2638ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav)
2639{
2640 struct mbuf *m;
2641 struct ip6_hdr *ip6;
2642 struct ip *oip;
2643 struct ip *ip;
2644 size_t hlen;
2645 size_t plen;
2646
2647 m = state->m;
2648 if (!m) {
2649 return EINVAL;
2650 }
2651
2652 /* can't tunnel between different AFs */
2653 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2654 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2655 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2656 m_freem(m);
2657 return EINVAL;
2658 }
2659#if 0
2660 /* XXX if the dst is myself, perform nothing. */
2661 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2662 m_freem(m);
2663 return EINVAL;
2664 }
2665#endif
2666
2667 if (m->m_len < sizeof(*ip)) {
2668 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2669 return EINVAL;
2670 }
2671
2672 ip = mtod(m, struct ip *);
2673#ifdef _IP_VHL
2674 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2675#else
2676 hlen = ip->ip_hl << 2;
2677#endif
2678
2679 if (m->m_len != hlen) {
2680 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2681 return EINVAL;
2682 }
2683
2684 /* generate header checksum */
2685 ip->ip_sum = 0;
2686#ifdef _IP_VHL
2687 ip->ip_sum = in_cksum(m, hlen);
2688#else
2689 ip->ip_sum = in_cksum(m, hlen);
2690#endif
2691
2692 plen = m->m_pkthdr.len; // save original IPv4 packet len, this will be ipv6 payload len
2693
2694 /*
2695 * First move the IPv4 header to the second mbuf in the chain
2696 */
2697 if (M_LEADINGSPACE(m->m_next) < hlen) {
2698 struct mbuf *n;
2699 MGET(n, M_DONTWAIT, MT_DATA);
2700 if (!n) {
2701 m_freem(m);
2702 return ENOBUFS;
2703 }
2704 n->m_len = hlen;
2705 n->m_next = m->m_next;
2706 m->m_next = n;
2707 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2708 oip = mtod(n, struct ip *);
2709 } else {
2710 m->m_next->m_len += hlen;
2711 m->m_next->m_data -= hlen;
2712 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2713 oip = mtod(m->m_next, struct ip *);
2714 }
2715 ip = mtod(m, struct ip *);
2716 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2717
2718 /*
2719 * Grow the first mbuf to accomodate the new IPv6 header.
2720 */
2721 if (M_LEADINGSPACE(m) < sizeof(struct ip6_hdr) - hlen) {
2722 struct mbuf *n;
2723 MGETHDR(n, M_DONTWAIT, MT_HEADER);
2724 if (!n) {
2725 m_freem(m);
2726 return ENOBUFS;
2727 }
2728 M_COPY_PKTHDR(n, m);
2729 MH_ALIGN(n, sizeof(struct ip6_hdr));
2730 n->m_len = sizeof(struct ip6_hdr);
2731 n->m_next = m->m_next;
2732 m->m_next = NULL;
2733 m_freem(m);
2734 state->m = n;
2735 m = state->m;
2736 } else {
2737 m->m_len += (sizeof(struct ip6_hdr) - hlen);
2738 m->m_data -= (sizeof(struct ip6_hdr) - hlen);
2739 }
2740 ip6 = mtod(m, struct ip6_hdr *);
2741 ip6->ip6_flow = 0;
2742 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2743 ip6->ip6_vfc |= IPV6_VERSION;
2744
2745 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2746 /* ECN consideration. */
2747 ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &ip->ip_tos);
2748 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr))
2749 ip6->ip6_plen = htons(plen);
2750 else {
2751 /* ip6->ip6_plen will be updated in ip6_output() */
2752 }
2753
2754 ip6->ip6_nxt = IPPROTO_IPV4;
2755 ip6->ip6_hlim = IPV6_DEFHLIM;
2756
2757 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2758 &ip6->ip6_src, sizeof(ip6->ip6_src));
2759 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2760 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2761
2762 return 0;
2763}
2764
2765#endif /*INET6*/
2766
2767/*
2768 * Check the variable replay window.
2769 * ipsec_chkreplay() performs replay check before ICV verification.
2770 * ipsec_updatereplay() updates replay bitmap. This must be called after
2771 * ICV verification (it also performs replay check, which is usually done
2772 * beforehand).
2773 * 0 (zero) is returned if packet disallowed, 1 if packet permitted.
2774 *
2775 * based on RFC 2401.
2776 */
2777int
2778ipsec_chkreplay(u_int32_t seq, struct secasvar *sav)
2779{
2780 const struct secreplay *replay;
2781 u_int32_t diff;
2782 int fr;
2783 u_int32_t wsizeb; /* constant: bits of window size */
2784 int frlast; /* constant: last frame */
2785
2786
2787 /* sanity check */
2788 if (sav == NULL)
2789 panic("ipsec_chkreplay: NULL pointer was passed.\n");
2790
2791 lck_mtx_lock(sadb_mutex);
2792 replay = sav->replay;
2793
2794 if (replay->wsize == 0) {
2795 lck_mtx_unlock(sadb_mutex);
2796 return 1; /* no need to check replay. */
2797 }
2798
2799 /* constant */
2800 frlast = replay->wsize - 1;
2801 wsizeb = replay->wsize << 3;
2802
2803 /* sequence number of 0 is invalid */
2804 if (seq == 0) {
2805 lck_mtx_unlock(sadb_mutex);
2806 return 0;
2807 }
2808
2809 /* first time is always okay */
2810 if (replay->count == 0) {
2811 lck_mtx_unlock(sadb_mutex);
2812 return 1;
2813 }
2814
2815 if (seq > replay->lastseq) {
2816 /* larger sequences are okay */
2817 lck_mtx_unlock(sadb_mutex);
2818 return 1;
2819 } else {
2820 /* seq is equal or less than lastseq. */
2821 diff = replay->lastseq - seq;
2822
2823 /* over range to check, i.e. too old or wrapped */
2824 if (diff >= wsizeb) {
2825 lck_mtx_unlock(sadb_mutex);
2826 return 0;
2827 }
2828
2829 fr = frlast - diff / 8;
2830
2831 /* this packet already seen ? */
2832 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2833 lck_mtx_unlock(sadb_mutex);
2834 return 0;
2835 }
2836
2837 /* out of order but good */
2838 lck_mtx_unlock(sadb_mutex);
2839 return 1;
2840 }
2841}
2842
2843/*
2844 * check replay counter whether to update or not.
2845 * OUT: 0: OK
2846 * 1: NG
2847 */
2848int
2849ipsec_updatereplay(u_int32_t seq, struct secasvar *sav)
2850{
2851 struct secreplay *replay;
2852 u_int32_t diff;
2853 int fr;
2854 u_int32_t wsizeb; /* constant: bits of window size */
2855 int frlast; /* constant: last frame */
2856
2857 /* sanity check */
2858 if (sav == NULL)
2859 panic("ipsec_chkreplay: NULL pointer was passed.\n");
2860
2861 lck_mtx_lock(sadb_mutex);
2862 replay = sav->replay;
2863
2864 if (replay->wsize == 0)
2865 goto ok; /* no need to check replay. */
2866
2867 /* constant */
2868 frlast = replay->wsize - 1;
2869 wsizeb = replay->wsize << 3;
2870
2871 /* sequence number of 0 is invalid */
2872 if (seq == 0)
2873 return 1;
2874
2875 /* first time */
2876 if (replay->count == 0) {
2877 replay->lastseq = seq;
2878 bzero(replay->bitmap, replay->wsize);
2879 (replay->bitmap)[frlast] = 1;
2880 goto ok;
2881 }
2882
2883 if (seq > replay->lastseq) {
2884 /* seq is larger than lastseq. */
2885 diff = seq - replay->lastseq;
2886
2887 /* new larger sequence number */
2888 if (diff < wsizeb) {
2889 /* In window */
2890 /* set bit for this packet */
2891 vshiftl((unsigned char *) replay->bitmap, diff, replay->wsize);
2892 (replay->bitmap)[frlast] |= 1;
2893 } else {
2894 /* this packet has a "way larger" */
2895 bzero(replay->bitmap, replay->wsize);
2896 (replay->bitmap)[frlast] = 1;
2897 }
2898 replay->lastseq = seq;
2899
2900 /* larger is good */
2901 } else {
2902 /* seq is equal or less than lastseq. */
2903 diff = replay->lastseq - seq;
2904
2905 /* over range to check, i.e. too old or wrapped */
2906 if (diff >= wsizeb) {
2907 lck_mtx_unlock(sadb_mutex);
2908 return 1;
2909 }
2910
2911 fr = frlast - diff / 8;
2912
2913 /* this packet already seen ? */
2914 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2915 lck_mtx_unlock(sadb_mutex);
2916 return 1;
2917 }
2918
2919 /* mark as seen */
2920 (replay->bitmap)[fr] |= (1 << (diff % 8));
2921
2922 /* out of order but good */
2923 }
2924
2925ok:
2926 if (replay->count == ~0) {
2927
2928 /* set overflow flag */
2929 replay->overflow++;
2930
2931 /* don't increment, no more packets accepted */
2932 if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
2933 lck_mtx_unlock(sadb_mutex);
2934 return 1;
2935 }
2936
2937 ipseclog((LOG_WARNING, "replay counter made %d cycle. %s\n",
2938 replay->overflow, ipsec_logsastr(sav)));
2939 }
2940
2941 replay->count++;
2942
2943 lck_mtx_unlock(sadb_mutex);
2944 return 0;
2945}
2946
2947/*
2948 * shift variable length buffer to left.
2949 * IN: bitmap: pointer to the buffer
2950 * nbit: the number of to shift.
2951 * wsize: buffer size (bytes).
2952 */
2953static void
2954vshiftl(unsigned char *bitmap, int nbit, int wsize)
2955{
2956 int s, j, i;
2957 unsigned char over;
2958
2959 for (j = 0; j < nbit; j += 8) {
2960 s = (nbit - j < 8) ? (nbit - j): 8;
2961 bitmap[0] <<= s;
2962 for (i = 1; i < wsize; i++) {
2963 over = (bitmap[i] >> (8 - s));
2964 bitmap[i] <<= s;
2965 bitmap[i-1] |= over;
2966 }
2967 }
2968
2969 return;
2970}
2971
2972const char *
2973ipsec4_logpacketstr(struct ip *ip, u_int32_t spi)
2974{
2975 static char buf[256] __attribute__((aligned(4)));
2976 char *p;
2977 u_int8_t *s, *d;
2978
2979 s = (u_int8_t *)(&ip->ip_src);
2980 d = (u_int8_t *)(&ip->ip_dst);
2981
2982 p = buf;
2983 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
2984 while (p && *p)
2985 p++;
2986 snprintf(p, sizeof(buf) - (p - buf), "src=%u.%u.%u.%u",
2987 s[0], s[1], s[2], s[3]);
2988 while (p && *p)
2989 p++;
2990 snprintf(p, sizeof(buf) - (p - buf), " dst=%u.%u.%u.%u",
2991 d[0], d[1], d[2], d[3]);
2992 while (p && *p)
2993 p++;
2994 snprintf(p, sizeof(buf) - (p - buf), ")");
2995
2996 return buf;
2997}
2998
2999#if INET6
3000const char *
3001ipsec6_logpacketstr(struct ip6_hdr *ip6, u_int32_t spi)
3002{
3003 static char buf[256] __attribute__((aligned(4)));
3004 char *p;
3005
3006 p = buf;
3007 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3008 while (p && *p)
3009 p++;
3010 snprintf(p, sizeof(buf) - (p - buf), "src=%s",
3011 ip6_sprintf(&ip6->ip6_src));
3012 while (p && *p)
3013 p++;
3014 snprintf(p, sizeof(buf) - (p - buf), " dst=%s",
3015 ip6_sprintf(&ip6->ip6_dst));
3016 while (p && *p)
3017 p++;
3018 snprintf(p, sizeof(buf) - (p - buf), ")");
3019
3020 return buf;
3021}
3022#endif /*INET6*/
3023
3024const char *
3025ipsec_logsastr(struct secasvar *sav)
3026{
3027 static char buf[256] __attribute__((aligned(4)));
3028 char *p;
3029 struct secasindex *saidx = &sav->sah->saidx;
3030
3031 /* validity check */
3032 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
3033 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family)
3034 panic("ipsec_logsastr: family mismatched.\n");
3035
3036 p = buf;
3037 snprintf(buf, sizeof(buf), "SA(SPI=%u ", (u_int32_t)ntohl(sav->spi));
3038 while (p && *p)
3039 p++;
3040 if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET) {
3041 u_int8_t *s, *d;
3042 s = (u_int8_t *)&((struct sockaddr_in *)&saidx->src)->sin_addr;
3043 d = (u_int8_t *)&((struct sockaddr_in *)&saidx->dst)->sin_addr;
3044 snprintf(p, sizeof(buf) - (p - buf),
3045 "src=%d.%d.%d.%d dst=%d.%d.%d.%d",
3046 s[0], s[1], s[2], s[3], d[0], d[1], d[2], d[3]);
3047 }
3048#if INET6
3049 else if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET6) {
3050 snprintf(p, sizeof(buf) - (p - buf),
3051 "src=%s",
3052 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->src)->sin6_addr));
3053 while (p && *p)
3054 p++;
3055 snprintf(p, sizeof(buf) - (p - buf),
3056 " dst=%s",
3057 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->dst)->sin6_addr));
3058 }
3059#endif
3060 while (p && *p)
3061 p++;
3062 snprintf(p, sizeof(buf) - (p - buf), ")");
3063
3064 return buf;
3065}
3066
3067void
3068ipsec_dumpmbuf(struct mbuf *m)
3069{
3070 int totlen;
3071 int i;
3072 u_char *p;
3073
3074 totlen = 0;
3075 printf("---\n");
3076 while (m) {
3077 p = mtod(m, u_char *);
3078 for (i = 0; i < m->m_len; i++) {
3079 printf("%02x ", p[i]);
3080 totlen++;
3081 if (totlen % 16 == 0)
3082 printf("\n");
3083 }
3084 m = m->m_next;
3085 }
3086 if (totlen % 16 != 0)
3087 printf("\n");
3088 printf("---\n");
3089}
3090
3091#if INET
3092/*
3093 * IPsec output logic for IPv4.
3094 */
3095static int
3096ipsec4_output_internal(struct ipsec_output_state *state, struct secasvar *sav)
3097{
3098 struct ip *ip = NULL;
3099 int error = 0;
3100 struct sockaddr_in *dst4;
3101 struct route *ro4;
3102
3103 /* validity check */
3104 if (sav == NULL || sav->sah == NULL) {
3105 error = EINVAL;
3106 goto bad;
3107 }
3108
3109 /*
3110 * If there is no valid SA, we give up to process any
3111 * more. In such a case, the SA's status is changed
3112 * from DYING to DEAD after allocating. If a packet
3113 * send to the receiver by dead SA, the receiver can
3114 * not decode a packet because SA has been dead.
3115 */
3116 if (sav->state != SADB_SASTATE_MATURE
3117 && sav->state != SADB_SASTATE_DYING) {
3118 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3119 error = EINVAL;
3120 goto bad;
3121 }
3122
3123 state->outgoing_if = sav->sah->outgoing_if;
3124
3125 /*
3126 * There may be the case that SA status will be changed when
3127 * we are refering to one. So calling splsoftnet().
3128 */
3129
3130 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3131 /*
3132 * build IPsec tunnel.
3133 */
3134 state->m = ipsec4_splithdr(state->m);
3135 if (!state->m) {
3136 error = ENOMEM;
3137 goto bad;
3138 }
3139
3140 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3141 error = ipsec46_encapsulate(state, sav);
3142 if (error) {
3143 // packet already freed by encapsulation error handling
3144 state->m = NULL;
3145 return error;
3146 }
3147
3148 error = ipsec6_update_routecache_and_output(state, sav);
3149 return error;
3150
3151 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3152 error = ipsec4_encapsulate(state->m, sav);
3153 if (error) {
3154 state->m = NULL;
3155 goto bad;
3156 }
3157 ip = mtod(state->m, struct ip *);
3158
3159 // grab sadb_mutex, before updating sah's route cache
3160 lck_mtx_lock(sadb_mutex);
3161 ro4= &sav->sah->sa_route;
3162 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3163 if (ro4->ro_rt != NULL) {
3164 RT_LOCK(ro4->ro_rt);
3165 }
3166 if (ROUTE_UNUSABLE(ro4) ||
3167 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3168 if (ro4->ro_rt != NULL)
3169 RT_UNLOCK(ro4->ro_rt);
3170 ROUTE_RELEASE(ro4);
3171 }
3172 if (ro4->ro_rt == 0) {
3173 dst4->sin_family = AF_INET;
3174 dst4->sin_len = sizeof(*dst4);
3175 dst4->sin_addr = ip->ip_dst;
3176 rtalloc_scoped(ro4, sav->sah->outgoing_if);
3177 if (ro4->ro_rt == 0) {
3178 OSAddAtomic(1, &ipstat.ips_noroute);
3179 error = EHOSTUNREACH;
3180 // release sadb_mutex, after updating sah's route cache
3181 lck_mtx_unlock(sadb_mutex);
3182 goto bad;
3183 }
3184 RT_LOCK(ro4->ro_rt);
3185 }
3186
3187 /*
3188 * adjust state->dst if tunnel endpoint is offlink
3189 *
3190 * XXX: caching rt_gateway value in the state is
3191 * not really good, since it may point elsewhere
3192 * when the gateway gets modified to a larger
3193 * sockaddr via rt_setgate(). This is currently
3194 * addressed by SA_SIZE roundup in that routine.
3195 */
3196 if (ro4->ro_rt->rt_flags & RTF_GATEWAY)
3197 dst4 = (struct sockaddr_in *)(void *)ro4->ro_rt->rt_gateway;
3198 RT_UNLOCK(ro4->ro_rt);
3199 ROUTE_RELEASE(&state->ro);
3200 route_copyout(&state->ro, ro4, sizeof(state->ro));
3201 state->dst = (struct sockaddr *)dst4;
3202 state->tunneled = 4;
3203 // release sadb_mutex, after updating sah's route cache
3204 lck_mtx_unlock(sadb_mutex);
3205 } else {
3206 ipseclog((LOG_ERR, "%s: family mismatched between inner and outer spi=%u\n",
3207 __FUNCTION__, (u_int32_t)ntohl(sav->spi)));
3208 error = EAFNOSUPPORT;
3209 goto bad;
3210 }
3211 }
3212
3213 state->m = ipsec4_splithdr(state->m);
3214 if (!state->m) {
3215 error = ENOMEM;
3216 goto bad;
3217 }
3218 switch (sav->sah->saidx.proto) {
3219 case IPPROTO_ESP:
3220#if IPSEC_ESP
3221 if ((error = esp4_output(state->m, sav)) != 0) {
3222 state->m = NULL;
3223 goto bad;
3224 }
3225 break;
3226#else
3227 m_freem(state->m);
3228 state->m = NULL;
3229 error = EINVAL;
3230 goto bad;
3231#endif
3232 case IPPROTO_AH:
3233 if ((error = ah4_output(state->m, sav)) != 0) {
3234 state->m = NULL;
3235 goto bad;
3236 }
3237 break;
3238 case IPPROTO_IPCOMP:
3239 if ((error = ipcomp4_output(state->m, sav)) != 0) {
3240 state->m = NULL;
3241 goto bad;
3242 }
3243 break;
3244 default:
3245 ipseclog((LOG_ERR,
3246 "ipsec4_output: unknown ipsec protocol %d\n",
3247 sav->sah->saidx.proto));
3248 m_freem(state->m);
3249 state->m = NULL;
3250 error = EINVAL;
3251 goto bad;
3252 }
3253
3254 if (state->m == 0) {
3255 error = ENOMEM;
3256 goto bad;
3257 }
3258
3259 return 0;
3260
3261bad:
3262 return error;
3263}
3264
3265int
3266ipsec4_interface_output(struct ipsec_output_state *state, ifnet_t interface)
3267{
3268 int error = 0;
3269 struct secasvar *sav = NULL;
3270
3271 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3272
3273 if (!state)
3274 panic("state == NULL in ipsec4_output");
3275 if (!state->m)
3276 panic("state->m == NULL in ipsec4_output");
3277 if (!state->dst)
3278 panic("state->dst == NULL in ipsec4_output");
3279
3280 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET);
3281 if (sav == NULL) {
3282 goto bad;
3283 }
3284
3285 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3286 goto bad;
3287 }
3288
3289 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0,0,0,0,0);
3290 if (sav)
3291 key_freesav(sav, KEY_SADB_UNLOCKED);
3292 return 0;
3293
3294bad:
3295 if (sav)
3296 key_freesav(sav, KEY_SADB_UNLOCKED);
3297 m_freem(state->m);
3298 state->m = NULL;
3299 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error,0,0,0,0);
3300 return error;
3301}
3302
3303int
3304ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused int flags)
3305{
3306 struct ip *ip = NULL;
3307 struct ipsecrequest *isr = NULL;
3308 struct secasindex saidx;
3309 struct secasvar *sav = NULL;
3310 int error = 0;
3311 struct sockaddr_in *sin;
3312
3313 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3314
3315 if (!state)
3316 panic("state == NULL in ipsec4_output");
3317 if (!state->m)
3318 panic("state->m == NULL in ipsec4_output");
3319 if (!state->dst)
3320 panic("state->dst == NULL in ipsec4_output");
3321
3322 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_START, 0,0,0,0,0);
3323
3324 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3325 printf("ipsec4_output: applied SP\n");
3326 kdebug_secpolicy(sp));
3327
3328 for (isr = sp->req; isr != NULL; isr = isr->next) {
3329 /* make SA index for search proper SA */
3330 ip = mtod(state->m, struct ip *);
3331 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3332 saidx.mode = isr->saidx.mode;
3333 saidx.reqid = isr->saidx.reqid;
3334 sin = (struct sockaddr_in *)&saidx.src;
3335 if (sin->sin_len == 0) {
3336 sin->sin_len = sizeof(*sin);
3337 sin->sin_family = AF_INET;
3338 sin->sin_port = IPSEC_PORT_ANY;
3339 bcopy(&ip->ip_src, &sin->sin_addr,
3340 sizeof(sin->sin_addr));
3341 }
3342 sin = (struct sockaddr_in *)&saidx.dst;
3343 if (sin->sin_len == 0) {
3344 sin->sin_len = sizeof(*sin);
3345 sin->sin_family = AF_INET;
3346 sin->sin_port = IPSEC_PORT_ANY;
3347 /*
3348 * Get port from packet if upper layer is UDP and nat traversal
3349 * is enabled and transport mode.
3350 */
3351
3352 if ((esp_udp_encap_port & 0xFFFF) != 0 &&
3353 isr->saidx.mode == IPSEC_MODE_TRANSPORT) {
3354
3355 if (ip->ip_p == IPPROTO_UDP) {
3356 struct udphdr *udp;
3357 size_t hlen;
3358#ifdef _IP_VHL
3359 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
3360#else
3361 hlen = ip->ip_hl << 2;
3362#endif
3363 if (state->m->m_len < hlen + sizeof(struct udphdr)) {
3364 state->m = m_pullup(state->m, hlen + sizeof(struct udphdr));
3365 if (!state->m) {
3366 ipseclog((LOG_DEBUG, "IPv4 output: can't pullup UDP header\n"));
3367 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
3368 goto bad;
3369 }
3370 ip = mtod(state->m, struct ip *);
3371 }
3372 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + hlen);
3373 sin->sin_port = udp->uh_dport;
3374 }
3375 }
3376
3377 bcopy(&ip->ip_dst, &sin->sin_addr,
3378 sizeof(sin->sin_addr));
3379 }
3380
3381 if ((error = key_checkrequest(isr, &saidx, &sav)) != 0) {
3382 /*
3383 * IPsec processing is required, but no SA found.
3384 * I assume that key_acquire() had been called
3385 * to get/establish the SA. Here I discard
3386 * this packet because it is responsibility for
3387 * upper layer to retransmit the packet.
3388 */
3389 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3390 goto bad;
3391 }
3392
3393 /* validity check */
3394 if (sav == NULL) {
3395 switch (ipsec_get_reqlevel(isr)) {
3396 case IPSEC_LEVEL_USE:
3397 continue;
3398 case IPSEC_LEVEL_REQUIRE:
3399 /* must be not reached here. */
3400 panic("ipsec4_output: no SA found, but required.");
3401 }
3402 }
3403
3404 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3405 goto bad;
3406 }
3407 }
3408
3409 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0,0,0,0,0);
3410 if (sav)
3411 key_freesav(sav, KEY_SADB_UNLOCKED);
3412 return 0;
3413
3414bad:
3415 if (sav)
3416 key_freesav(sav, KEY_SADB_UNLOCKED);
3417 m_freem(state->m);
3418 state->m = NULL;
3419 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error,0,0,0,0);
3420 return error;
3421}
3422
3423#endif
3424
3425#if INET6
3426/*
3427 * IPsec output logic for IPv6, transport mode.
3428 */
3429static int
3430ipsec6_output_trans_internal(
3431 struct ipsec_output_state *state,
3432 struct secasvar *sav,
3433 u_char *nexthdrp,
3434 struct mbuf *mprev)
3435{
3436 struct ip6_hdr *ip6;
3437 int error = 0;
3438 int plen;
3439
3440 /* validity check */
3441 if (sav == NULL || sav->sah == NULL) {
3442 error = EINVAL;
3443 goto bad;
3444 }
3445
3446 /*
3447 * If there is no valid SA, we give up to process.
3448 * see same place at ipsec4_output().
3449 */
3450 if (sav->state != SADB_SASTATE_MATURE
3451 && sav->state != SADB_SASTATE_DYING) {
3452 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3453 error = EINVAL;
3454 goto bad;
3455 }
3456
3457 state->outgoing_if = sav->sah->outgoing_if;
3458
3459 switch (sav->sah->saidx.proto) {
3460 case IPPROTO_ESP:
3461#if IPSEC_ESP
3462 error = esp6_output(state->m, nexthdrp, mprev->m_next, sav);
3463#else
3464 m_freem(state->m);
3465 error = EINVAL;
3466#endif
3467 break;
3468 case IPPROTO_AH:
3469 error = ah6_output(state->m, nexthdrp, mprev->m_next, sav);
3470 break;
3471 case IPPROTO_IPCOMP:
3472 error = ipcomp6_output(state->m, nexthdrp, mprev->m_next, sav);
3473 break;
3474 default:
3475 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3476 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3477 m_freem(state->m);
3478 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3479 error = EINVAL;
3480 break;
3481 }
3482 if (error) {
3483 state->m = NULL;
3484 goto bad;
3485 }
3486 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
3487 if (plen > IPV6_MAXPACKET) {
3488 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3489 "IPsec with IPv6 jumbogram is not supported\n"));
3490 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3491 error = EINVAL; /*XXX*/
3492 goto bad;
3493 }
3494 ip6 = mtod(state->m, struct ip6_hdr *);
3495 ip6->ip6_plen = htons(plen);
3496
3497 return 0;
3498bad:
3499 return error;
3500}
3501
3502int
3503ipsec6_output_trans(
3504 struct ipsec_output_state *state,
3505 u_char *nexthdrp,
3506 struct mbuf *mprev,
3507 struct secpolicy *sp,
3508 __unused int flags,
3509 int *tun)
3510{
3511 struct ip6_hdr *ip6;
3512 struct ipsecrequest *isr = NULL;
3513 struct secasindex saidx;
3514 int error = 0;
3515 struct sockaddr_in6 *sin6;
3516 struct secasvar *sav = NULL;
3517
3518 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3519
3520 if (!state)
3521 panic("state == NULL in ipsec6_output_trans");
3522 if (!state->m)
3523 panic("state->m == NULL in ipsec6_output_trans");
3524 if (!nexthdrp)
3525 panic("nexthdrp == NULL in ipsec6_output_trans");
3526 if (!mprev)
3527 panic("mprev == NULL in ipsec6_output_trans");
3528 if (!sp)
3529 panic("sp == NULL in ipsec6_output_trans");
3530 if (!tun)
3531 panic("tun == NULL in ipsec6_output_trans");
3532
3533 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3534 printf("ipsec6_output_trans: applyed SP\n");
3535 kdebug_secpolicy(sp));
3536
3537 *tun = 0;
3538 for (isr = sp->req; isr; isr = isr->next) {
3539 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
3540 /* the rest will be handled by ipsec6_output_tunnel() */
3541 break;
3542 }
3543
3544 /* make SA index for search proper SA */
3545 ip6 = mtod(state->m, struct ip6_hdr *);
3546 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3547 saidx.mode = isr->saidx.mode;
3548 saidx.reqid = isr->saidx.reqid;
3549 sin6 = (struct sockaddr_in6 *)&saidx.src;
3550 if (sin6->sin6_len == 0) {
3551 sin6->sin6_len = sizeof(*sin6);
3552 sin6->sin6_family = AF_INET6;
3553 sin6->sin6_port = IPSEC_PORT_ANY;
3554 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
3555 sizeof(ip6->ip6_src));
3556 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
3557 /* fix scope id for comparing SPD */
3558 sin6->sin6_addr.s6_addr16[1] = 0;
3559 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
3560 }
3561 }
3562 sin6 = (struct sockaddr_in6 *)&saidx.dst;
3563 if (sin6->sin6_len == 0) {
3564 sin6->sin6_len = sizeof(*sin6);
3565 sin6->sin6_family = AF_INET6;
3566 sin6->sin6_port = IPSEC_PORT_ANY;
3567 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
3568 sizeof(ip6->ip6_dst));
3569 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
3570 /* fix scope id for comparing SPD */
3571 sin6->sin6_addr.s6_addr16[1] = 0;
3572 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
3573 }
3574 }
3575
3576 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
3577 /*
3578 * IPsec processing is required, but no SA found.
3579 * I assume that key_acquire() had been called
3580 * to get/establish the SA. Here I discard
3581 * this packet because it is responsibility for
3582 * upper layer to retransmit the packet.
3583 */
3584 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3585 error = ENOENT;
3586
3587 /*
3588 * Notify the fact that the packet is discarded
3589 * to ourselves. I believe this is better than
3590 * just silently discarding. (jinmei@kame.net)
3591 * XXX: should we restrict the error to TCP packets?
3592 * XXX: should we directly notify sockets via
3593 * pfctlinputs?
3594 */
3595 icmp6_error(state->m, ICMP6_DST_UNREACH,
3596 ICMP6_DST_UNREACH_ADMIN, 0);
3597 state->m = NULL; /* icmp6_error freed the mbuf */
3598 goto bad;
3599 }
3600
3601 /* validity check */
3602 if (sav == NULL) {
3603 switch (ipsec_get_reqlevel(isr)) {
3604 case IPSEC_LEVEL_USE:
3605 continue;
3606 case IPSEC_LEVEL_REQUIRE:
3607 /* must be not reached here. */
3608 panic("ipsec6_output_trans: no SA found, but required.");
3609 }
3610 }
3611
3612 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
3613 goto bad;
3614 }
3615 }
3616
3617 /* if we have more to go, we need a tunnel mode processing */
3618 if (isr != NULL)
3619 *tun = 1;
3620
3621 if (sav)
3622 key_freesav(sav, KEY_SADB_UNLOCKED);
3623 return 0;
3624
3625bad:
3626 if (sav)
3627 key_freesav(sav, KEY_SADB_UNLOCKED);
3628 m_freem(state->m);
3629 state->m = NULL;
3630 return error;
3631}
3632
3633/*
3634 * IPsec output logic for IPv6, tunnel mode.
3635 */
3636static int
3637ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar *sav, int *must_be_last)
3638{
3639 struct ip6_hdr *ip6;
3640 int error = 0;
3641 int plen;
3642 struct sockaddr_in6* dst6;
3643 struct route *ro6;
3644
3645 /* validity check */
3646 if (sav == NULL || sav->sah == NULL || sav->sah->saidx.mode != IPSEC_MODE_TUNNEL) {
3647 error = EINVAL;
3648 goto bad;
3649 }
3650
3651 /*
3652 * If there is no valid SA, we give up to process.
3653 * see same place at ipsec4_output().
3654 */
3655 if (sav->state != SADB_SASTATE_MATURE
3656 && sav->state != SADB_SASTATE_DYING) {
3657 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3658 error = EINVAL;
3659 goto bad;
3660 }
3661
3662 state->outgoing_if = sav->sah->outgoing_if;
3663
3664 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3665 /*
3666 * build IPsec tunnel.
3667 */
3668 state->m = ipsec6_splithdr(state->m);
3669 if (!state->m) {
3670 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
3671 error = ENOMEM;
3672 goto bad;
3673 }
3674
3675 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3676 error = ipsec6_encapsulate(state->m, sav);
3677 if (error) {
3678 state->m = 0;
3679 goto bad;
3680 }
3681 ip6 = mtod(state->m, struct ip6_hdr *);
3682 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3683
3684 struct ip *ip;
3685 struct sockaddr_in* dst4;
3686 struct route *ro4 = NULL;
3687 struct route ro4_copy;
3688 struct ip_out_args ipoa = { IFSCOPE_NONE, { 0 }, IPOAF_SELECT_SRCIF, 0,
3689 SO_TC_UNSPEC, _NET_SERVICE_TYPE_UNSPEC };
3690
3691 if (must_be_last)
3692 *must_be_last = 1;
3693
3694 state->tunneled = 4; /* must not process any further in ip6_output */
3695 error = ipsec64_encapsulate(state->m, sav);
3696 if (error) {
3697 state->m = 0;
3698 goto bad;
3699 }
3700 /* Now we have an IPv4 packet */
3701 ip = mtod(state->m, struct ip *);
3702
3703 // grab sadb_mutex, to update sah's route cache and get a local copy of it
3704 lck_mtx_lock(sadb_mutex);
3705 ro4 = &sav->sah->sa_route;
3706 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3707 if (ro4->ro_rt) {
3708 RT_LOCK(ro4->ro_rt);
3709 }
3710 if (ROUTE_UNUSABLE(ro4) ||
3711 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3712 if (ro4->ro_rt != NULL)
3713 RT_UNLOCK(ro4->ro_rt);
3714 ROUTE_RELEASE(ro4);
3715 }
3716 if (ro4->ro_rt == NULL) {
3717 dst4->sin_family = AF_INET;
3718 dst4->sin_len = sizeof(*dst4);
3719 dst4->sin_addr = ip->ip_dst;
3720 } else {
3721 RT_UNLOCK(ro4->ro_rt);
3722 }
3723 route_copyout(&ro4_copy, ro4, sizeof(ro4_copy));
3724 // release sadb_mutex, after updating sah's route cache and getting a local copy
3725 lck_mtx_unlock(sadb_mutex);
3726 state->m = ipsec4_splithdr(state->m);
3727 if (!state->m) {
3728 error = ENOMEM;
3729 ROUTE_RELEASE(&ro4_copy);
3730 goto bad;
3731 }
3732 switch (sav->sah->saidx.proto) {
3733 case IPPROTO_ESP:
3734#if IPSEC_ESP
3735 if ((error = esp4_output(state->m, sav)) != 0) {
3736 state->m = NULL;
3737 ROUTE_RELEASE(&ro4_copy);
3738 goto bad;
3739 }
3740 break;
3741
3742#else
3743 m_freem(state->m);
3744 state->m = NULL;
3745 error = EINVAL;
3746 ROUTE_RELEASE(&ro4_copy);
3747 goto bad;
3748#endif
3749 case IPPROTO_AH:
3750 if ((error = ah4_output(state->m, sav)) != 0) {
3751 state->m = NULL;
3752 ROUTE_RELEASE(&ro4_copy);
3753 goto bad;
3754 }
3755 break;
3756 case IPPROTO_IPCOMP:
3757 if ((error = ipcomp4_output(state->m, sav)) != 0) {
3758 state->m = NULL;
3759 ROUTE_RELEASE(&ro4_copy);
3760 goto bad;
3761 }
3762 break;
3763 default:
3764 ipseclog((LOG_ERR,
3765 "ipsec4_output: unknown ipsec protocol %d\n",
3766 sav->sah->saidx.proto));
3767 m_freem(state->m);
3768 state->m = NULL;
3769 error = EINVAL;
3770 ROUTE_RELEASE(&ro4_copy);
3771 goto bad;
3772 }
3773
3774 if (state->m == 0) {
3775 error = ENOMEM;
3776 ROUTE_RELEASE(&ro4_copy);
3777 goto bad;
3778 }
3779 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET);
3780 ipsec_set_ipoa_for_interface(sav->sah->ipsec_if, &ipoa);
3781
3782 ip = mtod(state->m, struct ip *);
3783 ip->ip_len = ntohs(ip->ip_len); /* flip len field before calling ip_output */
3784 error = ip_output(state->m, NULL, &ro4_copy, IP_OUTARGS, NULL, &ipoa);
3785 state->m = NULL;
3786 // grab sadb_mutex, to synchronize the sah's route cache with the local copy
3787 lck_mtx_lock(sadb_mutex);
3788 route_copyin(&ro4_copy, ro4, sizeof(ro4_copy));
3789 lck_mtx_unlock(sadb_mutex);
3790 if (error != 0)
3791 goto bad;
3792 goto done;
3793 } else {
3794 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3795 "unsupported inner family, spi=%u\n",
3796 (u_int32_t)ntohl(sav->spi)));
3797 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3798 error = EAFNOSUPPORT;
3799 goto bad;
3800 }
3801
3802 // grab sadb_mutex, before updating sah's route cache
3803 lck_mtx_lock(sadb_mutex);
3804 ro6 = &sav->sah->sa_route;
3805 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
3806 if (ro6->ro_rt) {
3807 RT_LOCK(ro6->ro_rt);
3808 }
3809 if (ROUTE_UNUSABLE(ro6) ||
3810 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
3811 if (ro6->ro_rt != NULL)
3812 RT_UNLOCK(ro6->ro_rt);
3813 ROUTE_RELEASE(ro6);
3814 }
3815 if (ro6->ro_rt == 0) {
3816 bzero(dst6, sizeof(*dst6));
3817 dst6->sin6_family = AF_INET6;
3818 dst6->sin6_len = sizeof(*dst6);
3819 dst6->sin6_addr = ip6->ip6_dst;
3820 rtalloc_scoped(ro6, sav->sah->outgoing_if);
3821 if (ro6->ro_rt) {
3822 RT_LOCK(ro6->ro_rt);
3823 }
3824 }
3825 if (ro6->ro_rt == 0) {
3826 ip6stat.ip6s_noroute++;
3827 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
3828 error = EHOSTUNREACH;
3829 // release sadb_mutex, after updating sah's route cache
3830 lck_mtx_unlock(sadb_mutex);
3831 goto bad;
3832 }
3833
3834 /*
3835 * adjust state->dst if tunnel endpoint is offlink
3836 *
3837 * XXX: caching rt_gateway value in the state is
3838 * not really good, since it may point elsewhere
3839 * when the gateway gets modified to a larger
3840 * sockaddr via rt_setgate(). This is currently
3841 * addressed by SA_SIZE roundup in that routine.
3842 */
3843 if (ro6->ro_rt->rt_flags & RTF_GATEWAY)
3844 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
3845 RT_UNLOCK(ro6->ro_rt);
3846 ROUTE_RELEASE(&state->ro);
3847 route_copyout(&state->ro, ro6, sizeof(state->ro));
3848 state->dst = (struct sockaddr *)dst6;
3849 state->tunneled = 6;
3850 // release sadb_mutex, after updating sah's route cache
3851 lck_mtx_unlock(sadb_mutex);
3852 }
3853
3854 state->m = ipsec6_splithdr(state->m);
3855 if (!state->m) {
3856 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
3857 error = ENOMEM;
3858 goto bad;
3859 }
3860 ip6 = mtod(state->m, struct ip6_hdr *);
3861 switch (sav->sah->saidx.proto) {
3862 case IPPROTO_ESP:
3863#if IPSEC_ESP
3864 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
3865#else
3866 m_freem(state->m);
3867 error = EINVAL;
3868#endif
3869 break;
3870 case IPPROTO_AH:
3871 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
3872 break;
3873 case IPPROTO_IPCOMP:
3874 /* XXX code should be here */
3875 /*FALLTHROUGH*/
3876 default:
3877 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3878 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3879 m_freem(state->m);
3880 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3881 error = EINVAL;
3882 break;
3883 }
3884 if (error) {
3885 state->m = NULL;
3886 goto bad;
3887 }
3888 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
3889 if (plen > IPV6_MAXPACKET) {
3890 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3891 "IPsec with IPv6 jumbogram is not supported\n"));
3892 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3893 error = EINVAL; /*XXX*/
3894 goto bad;
3895 }
3896 ip6 = mtod(state->m, struct ip6_hdr *);
3897 ip6->ip6_plen = htons(plen);
3898done:
3899 return 0;
3900
3901bad:
3902 return error;
3903}
3904
3905int
3906ipsec6_output_tunnel(
3907 struct ipsec_output_state *state,
3908 struct secpolicy *sp,
3909 __unused int flags)
3910{
3911 struct ip6_hdr *ip6;
3912 struct ipsecrequest *isr = NULL;
3913 struct secasindex saidx;
3914 struct secasvar *sav = NULL;
3915 int error = 0;
3916
3917 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3918
3919 if (!state)
3920 panic("state == NULL in ipsec6_output_tunnel");
3921 if (!state->m)
3922 panic("state->m == NULL in ipsec6_output_tunnel");
3923 if (!sp)
3924 panic("sp == NULL in ipsec6_output_tunnel");
3925
3926 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3927 printf("ipsec6_output_tunnel: applyed SP\n");
3928 kdebug_secpolicy(sp));
3929
3930 /*
3931 * transport mode ipsec (before the 1st tunnel mode) is already
3932 * processed by ipsec6_output_trans().
3933 */
3934 for (isr = sp->req; isr; isr = isr->next) {
3935 if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
3936 break;
3937 }
3938
3939 for (/* already initialized */; isr; isr = isr->next) {
3940 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
3941 /* When tunnel mode, SA peers must be specified. */
3942 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3943 } else {
3944 /* make SA index to look for a proper SA */
3945 struct sockaddr_in6 *sin6;
3946
3947 bzero(&saidx, sizeof(saidx));
3948 saidx.proto = isr->saidx.proto;
3949 saidx.mode = isr->saidx.mode;
3950 saidx.reqid = isr->saidx.reqid;
3951
3952 ip6 = mtod(state->m, struct ip6_hdr *);
3953 sin6 = (struct sockaddr_in6 *)&saidx.src;
3954 if (sin6->sin6_len == 0) {
3955 sin6->sin6_len = sizeof(*sin6);
3956 sin6->sin6_family = AF_INET6;
3957 sin6->sin6_port = IPSEC_PORT_ANY;
3958 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
3959 sizeof(ip6->ip6_src));
3960 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
3961 /* fix scope id for comparing SPD */
3962 sin6->sin6_addr.s6_addr16[1] = 0;
3963 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
3964 }
3965 }
3966 sin6 = (struct sockaddr_in6 *)&saidx.dst;
3967 if (sin6->sin6_len == 0) {
3968 sin6->sin6_len = sizeof(*sin6);
3969 sin6->sin6_family = AF_INET6;
3970 sin6->sin6_port = IPSEC_PORT_ANY;
3971 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
3972 sizeof(ip6->ip6_dst));
3973 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
3974 /* fix scope id for comparing SPD */
3975 sin6->sin6_addr.s6_addr16[1] = 0;
3976 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
3977 }
3978 }
3979 }
3980
3981 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
3982 /*
3983 * IPsec processing is required, but no SA found.
3984 * I assume that key_acquire() had been called
3985 * to get/establish the SA. Here I discard
3986 * this packet because it is responsibility for
3987 * upper layer to retransmit the packet.
3988 */
3989 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3990 error = ENOENT;
3991 goto bad;
3992 }
3993
3994 /* validity check */
3995 if (sav == NULL) {
3996 switch (ipsec_get_reqlevel(isr)) {
3997 case IPSEC_LEVEL_USE:
3998 continue;
3999 case IPSEC_LEVEL_REQUIRE:
4000 /* must be not reached here. */
4001 panic("ipsec6_output_tunnel: no SA found, but required.");
4002 }
4003 }
4004
4005 /*
4006 * If there is no valid SA, we give up to process.
4007 * see same place at ipsec4_output().
4008 */
4009 if (sav->state != SADB_SASTATE_MATURE
4010 && sav->state != SADB_SASTATE_DYING) {
4011 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4012 error = EINVAL;
4013 goto bad;
4014 }
4015
4016 int must_be_last = 0;
4017
4018 if ((error = ipsec6_output_tunnel_internal(state, sav, &must_be_last)) != 0) {
4019 goto bad;
4020 }
4021
4022 if (must_be_last && isr->next) {
4023 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4024 "IPv4 must be outer layer, spi=%u\n",
4025 (u_int32_t)ntohl(sav->spi)));
4026 error = EINVAL;
4027 goto bad;
4028 }
4029 }
4030
4031 if (sav)
4032 key_freesav(sav, KEY_SADB_UNLOCKED);
4033 return 0;
4034
4035bad:
4036 if (sav)
4037 key_freesav(sav, KEY_SADB_UNLOCKED);
4038 if (state->m)
4039 m_freem(state->m);
4040 state->m = NULL;
4041 return error;
4042}
4043
4044int
4045ipsec6_interface_output(struct ipsec_output_state *state, ifnet_t interface, u_char *nexthdrp, struct mbuf *mprev)
4046{
4047 int error = 0;
4048 struct secasvar *sav = NULL;
4049
4050 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4051
4052 if (!state)
4053 panic("state == NULL in ipsec6_output");
4054 if (!state->m)
4055 panic("state->m == NULL in ipsec6_output");
4056 if (!nexthdrp)
4057 panic("nexthdrp == NULL in ipsec6_output");
4058 if (!mprev)
4059 panic("mprev == NULL in ipsec6_output");
4060
4061 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET6);
4062 if (sav == NULL) {
4063 goto bad;
4064 }
4065
4066 if (sav->sah && sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
4067 if ((error = ipsec6_output_tunnel_internal(state, sav, NULL)) != 0) {
4068 goto bad;
4069 }
4070 }
4071 else {
4072 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
4073 goto bad;
4074 }
4075 }
4076
4077 if (sav)
4078 key_freesav(sav, KEY_SADB_UNLOCKED);
4079 return 0;
4080
4081bad:
4082 if (sav)
4083 key_freesav(sav, KEY_SADB_UNLOCKED);
4084 m_freem(state->m);
4085 state->m = NULL;
4086 return error;
4087}
4088#endif /*INET6*/
4089
4090#if INET
4091/*
4092 * Chop IP header and option off from the payload.
4093 */
4094struct mbuf *
4095ipsec4_splithdr(struct mbuf *m)
4096{
4097 struct mbuf *mh;
4098 struct ip *ip;
4099 int hlen;
4100
4101 if (m->m_len < sizeof(struct ip))
4102 panic("ipsec4_splithdr: first mbuf too short, m_len %d, pkt_len %d, m_flag %x", m->m_len, m->m_pkthdr.len, m->m_flags);
4103 ip = mtod(m, struct ip *);
4104#ifdef _IP_VHL
4105 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
4106#else
4107 hlen = ip->ip_hl << 2;
4108#endif
4109 if (m->m_len > hlen) {
4110 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4111 if (!mh) {
4112 m_freem(m);
4113 return NULL;
4114 }
4115 M_COPY_PKTHDR(mh, m);
4116 MH_ALIGN(mh, hlen);
4117 m->m_flags &= ~M_PKTHDR;
4118 m_mchtype(m, MT_DATA);
4119 m->m_len -= hlen;
4120 m->m_data += hlen;
4121 mh->m_next = m;
4122 m = mh;
4123 m->m_len = hlen;
4124 bcopy((caddr_t)ip, mtod(m, caddr_t), hlen);
4125 } else if (m->m_len < hlen) {
4126 m = m_pullup(m, hlen);
4127 if (!m)
4128 return NULL;
4129 }
4130 return m;
4131}
4132#endif
4133
4134#if INET6
4135struct mbuf *
4136ipsec6_splithdr(struct mbuf *m)
4137{
4138 struct mbuf *mh;
4139 struct ip6_hdr *ip6;
4140 int hlen;
4141
4142 if (m->m_len < sizeof(struct ip6_hdr))
4143 panic("ipsec6_splithdr: first mbuf too short");
4144 ip6 = mtod(m, struct ip6_hdr *);
4145 hlen = sizeof(struct ip6_hdr);
4146 if (m->m_len > hlen) {
4147 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4148 if (!mh) {
4149 m_freem(m);
4150 return NULL;
4151 }
4152 M_COPY_PKTHDR(mh, m);
4153 MH_ALIGN(mh, hlen);
4154 m->m_flags &= ~M_PKTHDR;
4155 m_mchtype(m, MT_DATA);
4156 m->m_len -= hlen;
4157 m->m_data += hlen;
4158 mh->m_next = m;
4159 m = mh;
4160 m->m_len = hlen;
4161 bcopy((caddr_t)ip6, mtod(m, caddr_t), hlen);
4162 } else if (m->m_len < hlen) {
4163 m = m_pullup(m, hlen);
4164 if (!m)
4165 return NULL;
4166 }
4167 return m;
4168}
4169#endif
4170
4171/* validate inbound IPsec tunnel packet. */
4172int
4173ipsec4_tunnel_validate(
4174 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4175 int off,
4176 u_int nxt0,
4177 struct secasvar *sav,
4178 sa_family_t *ifamily)
4179{
4180 u_int8_t nxt = nxt0 & 0xff;
4181 struct sockaddr_in *sin;
4182 struct sockaddr_in osrc, odst, i4src, i4dst;
4183 struct sockaddr_in6 i6src, i6dst;
4184 int hlen;
4185 struct secpolicy *sp;
4186 struct ip *oip;
4187
4188 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4189
4190#if DIAGNOSTIC
4191 if (m->m_len < sizeof(struct ip))
4192 panic("too short mbuf on ipsec4_tunnel_validate");
4193#endif
4194 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6)
4195 return 0;
4196 if (m->m_pkthdr.len < off + sizeof(struct ip))
4197 return 0;
4198 /* do not decapsulate if the SA is for transport mode only */
4199 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT)
4200 return 0;
4201
4202 oip = mtod(m, struct ip *);
4203#ifdef _IP_VHL
4204 hlen = _IP_VHL_HL(oip->ip_vhl) << 2;
4205#else
4206 hlen = oip->ip_hl << 2;
4207#endif
4208 if (hlen != sizeof(struct ip))
4209 return 0;
4210
4211 sin = (struct sockaddr_in *)&sav->sah->saidx.dst;
4212 if (sin->sin_family != AF_INET)
4213 return 0;
4214 if (bcmp(&oip->ip_dst, &sin->sin_addr, sizeof(oip->ip_dst)) != 0)
4215 return 0;
4216
4217 if (sav->sah->ipsec_if != NULL) {
4218 // the ipsec interface SAs don't have a policies.
4219 if (nxt == IPPROTO_IPV4) {
4220 *ifamily = AF_INET;
4221 } else if (nxt == IPPROTO_IPV6) {
4222 *ifamily = AF_INET6;
4223 } else {
4224 return 0;
4225 }
4226 return 1;
4227 }
4228
4229 /* XXX slow */
4230 bzero(&osrc, sizeof(osrc));
4231 bzero(&odst, sizeof(odst));
4232 osrc.sin_family = odst.sin_family = AF_INET;
4233 osrc.sin_len = odst.sin_len = sizeof(struct sockaddr_in);
4234 osrc.sin_addr = oip->ip_src;
4235 odst.sin_addr = oip->ip_dst;
4236 /*
4237 * RFC2401 5.2.1 (b): (assume that we are using tunnel mode)
4238 * - if the inner destination is multicast address, there can be
4239 * multiple permissible inner source address. implementation
4240 * may want to skip verification of inner source address against
4241 * SPD selector.
4242 * - if the inner protocol is ICMP, the packet may be an error report
4243 * from routers on the other side of the VPN cloud (R in the
4244 * following diagram). in this case, we cannot verify inner source
4245 * address against SPD selector.
4246 * me -- gw === gw -- R -- you
4247 *
4248 * we consider the first bullet to be users responsibility on SPD entry
4249 * configuration (if you need to encrypt multicast traffic, set
4250 * the source range of SPD selector to 0.0.0.0/0, or have explicit
4251 * address ranges for possible senders).
4252 * the second bullet is not taken care of (yet).
4253 *
4254 * therefore, we do not do anything special about inner source.
4255 */
4256 if (nxt == IPPROTO_IPV4) {
4257 bzero(&i4src, sizeof(struct sockaddr_in));
4258 bzero(&i4dst, sizeof(struct sockaddr_in));
4259 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4260 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4261 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4262 (caddr_t)&i4src.sin_addr);
4263 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4264 (caddr_t)&i4dst.sin_addr);
4265 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4266 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4267 } else if (nxt == IPPROTO_IPV6) {
4268 bzero(&i6src, sizeof(struct sockaddr_in6));
4269 bzero(&i6dst, sizeof(struct sockaddr_in6));
4270 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4271 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4272 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4273 (caddr_t)&i6src.sin6_addr);
4274 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4275 (caddr_t)&i6dst.sin6_addr);
4276 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4277 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4278 } else
4279 return 0; /* unsupported family */
4280
4281 if (!sp)
4282 return 0;
4283
4284 key_freesp(sp, KEY_SADB_UNLOCKED);
4285
4286 return 1;
4287}
4288
4289#if INET6
4290/* validate inbound IPsec tunnel packet. */
4291int
4292ipsec6_tunnel_validate(
4293 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4294 int off,
4295 u_int nxt0,
4296 struct secasvar *sav,
4297 sa_family_t *ifamily)
4298{
4299 u_int8_t nxt = nxt0 & 0xff;
4300 struct sockaddr_in6 *sin6;
4301 struct sockaddr_in i4src, i4dst;
4302 struct sockaddr_in6 osrc, odst, i6src, i6dst;
4303 struct secpolicy *sp;
4304 struct ip6_hdr *oip6;
4305
4306 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4307
4308#if DIAGNOSTIC
4309 if (m->m_len < sizeof(struct ip6_hdr))
4310 panic("too short mbuf on ipsec6_tunnel_validate");
4311#endif
4312 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6)
4313 return 0;
4314
4315 if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr))
4316 return 0;
4317 /* do not decapsulate if the SA is for transport mode only */
4318 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT)
4319 return 0;
4320
4321 oip6 = mtod(m, struct ip6_hdr *);
4322 /* AF_INET should be supported, but at this moment we don't. */
4323 sin6 = (struct sockaddr_in6 *)&sav->sah->saidx.dst;
4324 if (sin6->sin6_family != AF_INET6)
4325 return 0;
4326 if (!IN6_ARE_ADDR_EQUAL(&oip6->ip6_dst, &sin6->sin6_addr))
4327 return 0;
4328
4329 if (sav->sah->ipsec_if != NULL) {
4330 // the ipsec interface SAs don't have a policies.
4331 if (nxt == IPPROTO_IPV4) {
4332 *ifamily = AF_INET;
4333 } else if (nxt == IPPROTO_IPV6) {
4334 *ifamily = AF_INET6;
4335 } else {
4336 return 0;
4337 }
4338 return 1;
4339 }
4340
4341 /* XXX slow */
4342 bzero(&osrc, sizeof(osrc));
4343 bzero(&odst, sizeof(odst));
4344 osrc.sin6_family = odst.sin6_family = AF_INET6;
4345 osrc.sin6_len = odst.sin6_len = sizeof(struct sockaddr_in6);
4346 osrc.sin6_addr = oip6->ip6_src;
4347 odst.sin6_addr = oip6->ip6_dst;
4348
4349 /*
4350 * regarding to inner source address validation, see a long comment
4351 * in ipsec4_tunnel_validate.
4352 */
4353
4354 if (nxt == IPPROTO_IPV4) {
4355 bzero(&i4src, sizeof(struct sockaddr_in));
4356 bzero(&i4dst, sizeof(struct sockaddr_in));
4357 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4358 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4359 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4360 (caddr_t)&i4src.sin_addr);
4361 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4362 (caddr_t)&i4dst.sin_addr);
4363 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4364 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4365 } else if (nxt == IPPROTO_IPV6) {
4366 bzero(&i6src, sizeof(struct sockaddr_in6));
4367 bzero(&i6dst, sizeof(struct sockaddr_in6));
4368 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4369 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4370 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4371 (caddr_t)&i6src.sin6_addr);
4372 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4373 (caddr_t)&i6dst.sin6_addr);
4374 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4375 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4376 } else
4377 return 0; /* unsupported family */
4378 /*
4379 * when there is no suitable inbound policy for the packet of the ipsec
4380 * tunnel mode, the kernel never decapsulate the tunneled packet
4381 * as the ipsec tunnel mode even when the system wide policy is "none".
4382 * then the kernel leaves the generic tunnel module to process this
4383 * packet. if there is no rule of the generic tunnel, the packet
4384 * is rejected and the statistics will be counted up.
4385 */
4386 if (!sp)
4387 return 0;
4388 key_freesp(sp, KEY_SADB_UNLOCKED);
4389
4390 return 1;
4391}
4392#endif
4393
4394/*
4395 * Make a mbuf chain for encryption.
4396 * If the original mbuf chain contains a mbuf with a cluster,
4397 * allocate a new cluster and copy the data to the new cluster.
4398 * XXX: this hack is inefficient, but is necessary to handle cases
4399 * of TCP retransmission...
4400 */
4401struct mbuf *
4402ipsec_copypkt(struct mbuf *m)
4403{
4404 struct mbuf *n, **mpp, *mnew;
4405
4406 for (n = m, mpp = &m; n; n = n->m_next) {
4407 if (n->m_flags & M_EXT) {
4408 /*
4409 * Make a copy only if there are more than one references
4410 * to the cluster.
4411 * XXX: is this approach effective?
4412 */
4413 if (
4414 m_get_ext_free(n) != NULL ||
4415 m_mclhasreference(n)
4416 )
4417 {
4418 int remain, copied;
4419 struct mbuf *mm;
4420
4421 if (n->m_flags & M_PKTHDR) {
4422 MGETHDR(mnew, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4423 if (mnew == NULL)
4424 goto fail;
4425 M_COPY_PKTHDR(mnew, n);
4426 }
4427 else {
4428 MGET(mnew, M_DONTWAIT, MT_DATA);
4429 if (mnew == NULL)
4430 goto fail;
4431 }
4432 mnew->m_len = 0;
4433 mm = mnew;
4434
4435 /*
4436 * Copy data. If we don't have enough space to
4437 * store the whole data, allocate a cluster
4438 * or additional mbufs.
4439 * XXX: we don't use m_copyback(), since the
4440 * function does not use clusters and thus is
4441 * inefficient.
4442 */
4443 remain = n->m_len;
4444 copied = 0;
4445 while (1) {
4446 int len;
4447 struct mbuf *mn;
4448
4449 if (remain <= (mm->m_flags & M_PKTHDR ? MHLEN : MLEN))
4450 len = remain;
4451 else { /* allocate a cluster */
4452 MCLGET(mm, M_DONTWAIT);
4453 if (!(mm->m_flags & M_EXT)) {
4454 m_free(mm);
4455 goto fail;
4456 }
4457 len = remain < MCLBYTES ?
4458 remain : MCLBYTES;
4459 }
4460
4461 bcopy(n->m_data + copied, mm->m_data,
4462 len);
4463
4464 copied += len;
4465 remain -= len;
4466 mm->m_len = len;
4467
4468 if (remain <= 0) /* completed? */
4469 break;
4470
4471 /* need another mbuf */
4472 MGETHDR(mn, M_DONTWAIT, MT_HEADER); /* XXXMAC: tags copied next time in loop? */
4473 if (mn == NULL)
4474 goto fail;
4475 mn->m_pkthdr.rcvif = NULL;
4476 mm->m_next = mn;
4477 mm = mn;
4478 }
4479
4480 /* adjust chain */
4481 mm->m_next = m_free(n);
4482 n = mm;
4483 *mpp = mnew;
4484 mpp = &n->m_next;
4485
4486 continue;
4487 }
4488 }
4489 *mpp = n;
4490 mpp = &n->m_next;
4491 }
4492
4493 return(m);
4494 fail:
4495 m_freem(m);
4496 return(NULL);
4497}
4498
4499/*
4500 * Tags are allocated as mbufs for now, since our minimum size is MLEN, we
4501 * should make use of up to that much space.
4502 */
4503#define IPSEC_TAG_HEADER \
4504
4505struct ipsec_tag {
4506 struct socket *socket;
4507 u_int32_t history_count;
4508 struct ipsec_history history[];
4509};
4510
4511#define IPSEC_TAG_SIZE (MLEN - sizeof(struct m_tag))
4512#define IPSEC_TAG_HDR_SIZE (offsetof(struct ipsec_tag, history[0]))
4513#define IPSEC_HISTORY_MAX ((IPSEC_TAG_SIZE - IPSEC_TAG_HDR_SIZE) / \
4514 sizeof(struct ipsec_history))
4515
4516static struct ipsec_tag *
4517ipsec_addaux(
4518 struct mbuf *m)
4519{
4520 struct m_tag *tag;
4521
4522 /* Check if the tag already exists */
4523 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4524
4525 if (tag == NULL) {
4526 struct ipsec_tag *itag;
4527
4528 /* Allocate a tag */
4529 tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC,
4530 IPSEC_TAG_SIZE, M_DONTWAIT, m);
4531
4532 if (tag) {
4533 itag = (struct ipsec_tag*)(tag + 1);
4534 itag->socket = 0;
4535 itag->history_count = 0;
4536
4537 m_tag_prepend(m, tag);
4538 }
4539 }
4540
4541 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4542}
4543
4544static struct ipsec_tag *
4545ipsec_findaux(
4546 struct mbuf *m)
4547{
4548 struct m_tag *tag;
4549
4550 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4551
4552 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4553}
4554
4555void
4556ipsec_delaux(
4557 struct mbuf *m)
4558{
4559 struct m_tag *tag;
4560
4561 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4562
4563 if (tag) {
4564 m_tag_delete(m, tag);
4565 }
4566}
4567
4568/* if the aux buffer is unnecessary, nuke it. */
4569static void
4570ipsec_optaux(
4571 struct mbuf *m,
4572 struct ipsec_tag *itag)
4573{
4574 if (itag && itag->socket == NULL && itag->history_count == 0) {
4575 m_tag_delete(m, ((struct m_tag*)itag) - 1);
4576 }
4577}
4578
4579int
4580ipsec_setsocket(struct mbuf *m, struct socket *so)
4581{
4582 struct ipsec_tag *tag;
4583
4584 /* if so == NULL, don't insist on getting the aux mbuf */
4585 if (so) {
4586 tag = ipsec_addaux(m);
4587 if (!tag)
4588 return ENOBUFS;
4589 } else
4590 tag = ipsec_findaux(m);
4591 if (tag) {
4592 tag->socket = so;
4593 ipsec_optaux(m, tag);
4594 }
4595 return 0;
4596}
4597
4598struct socket *
4599ipsec_getsocket(struct mbuf *m)
4600{
4601 struct ipsec_tag *itag;
4602
4603 itag = ipsec_findaux(m);
4604 if (itag)
4605 return itag->socket;
4606 else
4607 return NULL;
4608}
4609
4610int
4611ipsec_addhist(
4612 struct mbuf *m,
4613 int proto,
4614 u_int32_t spi)
4615{
4616 struct ipsec_tag *itag;
4617 struct ipsec_history *p;
4618 itag = ipsec_addaux(m);
4619 if (!itag)
4620 return ENOBUFS;
4621 if (itag->history_count == IPSEC_HISTORY_MAX)
4622 return ENOSPC; /* XXX */
4623
4624 p = &itag->history[itag->history_count];
4625 itag->history_count++;
4626
4627 bzero(p, sizeof(*p));
4628 p->ih_proto = proto;
4629 p->ih_spi = spi;
4630
4631 return 0;
4632}
4633
4634struct ipsec_history *
4635ipsec_gethist(
4636 struct mbuf *m,
4637 int *lenp)
4638{
4639 struct ipsec_tag *itag;
4640
4641 itag = ipsec_findaux(m);
4642 if (!itag)
4643 return NULL;
4644 if (itag->history_count == 0)
4645 return NULL;
4646 if (lenp)
4647 *lenp = (int)(itag->history_count * sizeof(struct ipsec_history));
4648 return itag->history;
4649}
4650
4651void
4652ipsec_clearhist(
4653 struct mbuf *m)
4654{
4655 struct ipsec_tag *itag;
4656
4657 itag = ipsec_findaux(m);
4658 if (itag) {
4659 itag->history_count = 0;
4660 }
4661 ipsec_optaux(m, itag);
4662}
4663
4664__private_extern__ int
4665ipsec_send_natt_keepalive(
4666 struct secasvar *sav)
4667{
4668 struct mbuf *m;
4669 struct ip *ip;
4670 int error;
4671 struct ip_out_args ipoa =
4672 { IFSCOPE_NONE, { 0 }, IPOAF_SELECT_SRCIF, 0,
4673 SO_TC_UNSPEC, _NET_SERVICE_TYPE_UNSPEC };
4674 struct route ro;
4675 int keepalive_interval = natt_keepalive_interval;
4676
4677 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4678
4679 if ((esp_udp_encap_port & 0xFFFF) == 0 || sav->remote_ike_port == 0) return FALSE;
4680
4681 if (sav->natt_interval != 0) {
4682 keepalive_interval = (int)sav->natt_interval;
4683 }
4684
4685 // natt timestamp may have changed... reverify
4686 if ((natt_now - sav->natt_last_activity) < keepalive_interval) return FALSE;
4687
4688 if (sav->flags & SADB_X_EXT_ESP_KEEPALIVE) return FALSE; // don't send these from the kernel
4689
4690 m = m_gethdr(M_NOWAIT, MT_DATA);
4691 if (m == NULL) return FALSE;
4692
4693 ip = (__typeof__(ip))m_mtod(m);
4694
4695 // this sends one type of NATT keepalives (Type 1, ESP keepalives, aren't sent by kernel)
4696 if ((sav->flags & SADB_X_EXT_ESP_KEEPALIVE) == 0) {
4697 struct udphdr *uh;
4698
4699 /*
4700 * Type 2: a UDP packet complete with IP header.
4701 * We must do this because UDP output requires
4702 * an inpcb which we don't have. UDP packet
4703 * contains one byte payload. The byte is set
4704 * to 0xFF.
4705 */
4706 uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip));
4707 m->m_len = sizeof(struct udpiphdr) + 1;
4708 bzero(m_mtod(m), m->m_len);
4709 m->m_pkthdr.len = m->m_len;
4710
4711 ip->ip_len = m->m_len;
4712 ip->ip_ttl = ip_defttl;
4713 ip->ip_p = IPPROTO_UDP;
4714 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4715 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4716 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4717 } else {
4718 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4719 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4720 }
4721 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4722 uh->uh_dport = htons(sav->remote_ike_port);
4723 uh->uh_ulen = htons(1 + sizeof(*uh));
4724 uh->uh_sum = 0;
4725 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip) + sizeof(*uh)) = 0xFF;
4726 }
4727
4728 // grab sadb_mutex, to get a local copy of sah's route cache
4729 lck_mtx_lock(sadb_mutex);
4730 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
4731 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET)
4732 ROUTE_RELEASE(&sav->sah->sa_route);
4733
4734 route_copyout(&ro, &sav->sah->sa_route, sizeof(ro));
4735 lck_mtx_unlock(sadb_mutex);
4736
4737 necp_mark_packet_as_keepalive(m, TRUE);
4738
4739 error = ip_output(m, NULL, &ro, IP_OUTARGS | IP_NOIPSEC, NULL, &ipoa);
4740
4741 // grab sadb_mutex, to synchronize the sah's route cache with the local copy
4742 lck_mtx_lock(sadb_mutex);
4743 route_copyin(&ro, &sav->sah->sa_route, sizeof(ro));
4744 lck_mtx_unlock(sadb_mutex);
4745 if (error == 0) {
4746 sav->natt_last_activity = natt_now;
4747 return TRUE;
4748 }
4749 return FALSE;
4750}
4751
4752__private_extern__ bool
4753ipsec_fill_offload_frame(ifnet_t ifp,
4754 struct secasvar *sav,
4755 struct ifnet_keepalive_offload_frame *frame,
4756 size_t frame_data_offset)
4757{
4758 u_int8_t *data = NULL;
4759 struct ip *ip = NULL;
4760 struct udphdr *uh = NULL;
4761
4762 if (sav == NULL || sav->sah == NULL || frame == NULL ||
4763 (ifp != NULL && ifp->if_index != sav->sah->outgoing_if) ||
4764 sav->sah->saidx.dst.ss_family != AF_INET ||
4765 !(sav->flags & SADB_X_EXT_NATT) ||
4766 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE) ||
4767 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE_OFFLOAD) ||
4768 sav->flags & SADB_X_EXT_ESP_KEEPALIVE ||
4769 (esp_udp_encap_port & 0xFFFF) == 0 ||
4770 sav->remote_ike_port == 0 ||
4771 (natt_keepalive_interval == 0 && sav->natt_interval == 0 && sav->natt_offload_interval == 0)) {
4772 /* SA is not eligible for keepalive offload on this interface */
4773 return (FALSE);
4774 }
4775
4776 if (frame_data_offset + sizeof(struct udpiphdr) + 1 >
4777 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
4778 /* Not enough room in this data frame */
4779 return (FALSE);
4780 }
4781
4782 data = frame->data;
4783 ip = (__typeof__(ip))(void *)(data + frame_data_offset);
4784 uh = (__typeof__(uh))(void *)(data + frame_data_offset + sizeof(*ip));
4785
4786 frame->length = frame_data_offset + sizeof(struct udpiphdr) + 1;
4787 frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_IPSEC;
4788 frame->ether_type = IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4;
4789
4790 bzero(data, IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE);
4791
4792 ip->ip_v = IPVERSION;
4793 ip->ip_hl = sizeof(struct ip) >> 2;
4794 ip->ip_off &= htons(~IP_OFFMASK);
4795 ip->ip_off &= htons(~IP_MF);
4796 switch (ip4_ipsec_dfbit) {
4797 case 0: /* clear DF bit */
4798 ip->ip_off &= htons(~IP_DF);
4799 break;
4800 case 1: /* set DF bit */
4801 ip->ip_off |= htons(IP_DF);
4802 break;
4803 default: /* copy DF bit */
4804 break;
4805 }
4806 ip->ip_len = htons(sizeof(struct udpiphdr) + 1);
4807 ip->ip_id = ip_randomid();
4808 ip->ip_ttl = ip_defttl;
4809 ip->ip_p = IPPROTO_UDP;
4810 ip->ip_sum = 0;
4811 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4812 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4813 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4814 } else {
4815 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4816 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4817 }
4818 ip->ip_sum = in_cksum_hdr_opt(ip);
4819 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4820 uh->uh_dport = htons(sav->remote_ike_port);
4821 uh->uh_ulen = htons(1 + sizeof(*uh));
4822 uh->uh_sum = 0;
4823 *(u_int8_t*)(data + frame_data_offset + sizeof(*ip) + sizeof(*uh)) = 0xFF;
4824
4825 if (sav->natt_offload_interval != 0) {
4826 frame->interval = sav->natt_offload_interval;
4827 } else if (sav->natt_interval != 0) {
4828 frame->interval = sav->natt_interval;
4829 } else {
4830 frame->interval = natt_keepalive_interval;
4831 }
4832 return (TRUE);
4833}