]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/netinet6/ipsec.c
xnu-3789.41.3.tar.gz
[apple/xnu.git] / bsd / netinet6 / ipsec.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/* $FreeBSD: src/sys/netinet6/ipsec.c,v 1.3.2.7 2001/07/19 06:37:23 kris Exp $ */
30/* $KAME: ipsec.c,v 1.103 2001/05/24 07:14:18 sakane Exp $ */
31
32/*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61/*
62 * IPsec controller part.
63 */
64
65#include <sys/param.h>
66#include <sys/systm.h>
67#include <sys/malloc.h>
68#include <sys/mbuf.h>
69#include <sys/mcache.h>
70#include <sys/domain.h>
71#include <sys/protosw.h>
72#include <sys/socket.h>
73#include <sys/socketvar.h>
74#include <sys/errno.h>
75#include <sys/time.h>
76#include <sys/kernel.h>
77#include <sys/syslog.h>
78#include <sys/sysctl.h>
79#include <kern/locks.h>
80#include <sys/kauth.h>
81#include <libkern/OSAtomic.h>
82
83#include <net/if.h>
84#include <net/route.h>
85#include <net/if_ipsec.h>
86
87#include <netinet/in.h>
88#include <netinet/in_systm.h>
89#include <netinet/ip.h>
90#include <netinet/ip_var.h>
91#include <netinet/in_var.h>
92#include <netinet/udp.h>
93#include <netinet/udp_var.h>
94#include <netinet/ip_ecn.h>
95#if INET6
96#include <netinet6/ip6_ecn.h>
97#endif
98#include <netinet/tcp.h>
99#include <netinet/udp.h>
100
101#include <netinet/ip6.h>
102#if INET6
103#include <netinet6/ip6_var.h>
104#endif
105#include <netinet/in_pcb.h>
106#if INET6
107#include <netinet/icmp6.h>
108#endif
109
110#include <netinet6/ipsec.h>
111#if INET6
112#include <netinet6/ipsec6.h>
113#endif
114#include <netinet6/ah.h>
115#if INET6
116#include <netinet6/ah6.h>
117#endif
118#if IPSEC_ESP
119#include <netinet6/esp.h>
120#if INET6
121#include <netinet6/esp6.h>
122#endif
123#endif
124#include <netinet6/ipcomp.h>
125#if INET6
126#include <netinet6/ipcomp6.h>
127#endif
128#include <netkey/key.h>
129#include <netkey/keydb.h>
130#include <netkey/key_debug.h>
131
132#include <net/net_osdep.h>
133
134#if IPSEC_DEBUG
135int ipsec_debug = 1;
136#else
137int ipsec_debug = 0;
138#endif
139
140#include <sys/kdebug.h>
141#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
142#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
143#define DBG_FNC_GETPOL_SOCK NETDBG_CODE(DBG_NETIPSEC, (1 << 8))
144#define DBG_FNC_GETPOL_ADDR NETDBG_CODE(DBG_NETIPSEC, (2 << 8))
145#define DBG_FNC_IPSEC_OUT NETDBG_CODE(DBG_NETIPSEC, (3 << 8))
146
147extern lck_mtx_t *sadb_mutex;
148
149struct ipsecstat ipsecstat;
150int ip4_ah_cleartos = 1;
151int ip4_ah_offsetmask = 0; /* maybe IP_DF? */
152int ip4_ipsec_dfbit = 0; /* DF bit on encap. 0: clear 1: set 2: copy */
153int ip4_esp_trans_deflev = IPSEC_LEVEL_USE;
154int ip4_esp_net_deflev = IPSEC_LEVEL_USE;
155int ip4_ah_trans_deflev = IPSEC_LEVEL_USE;
156int ip4_ah_net_deflev = IPSEC_LEVEL_USE;
157struct secpolicy ip4_def_policy;
158int ip4_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
159int ip4_esp_randpad = -1;
160int esp_udp_encap_port = 0;
161static int sysctl_def_policy SYSCTL_HANDLER_ARGS;
162extern int natt_keepalive_interval;
163extern u_int64_t natt_now;
164
165struct ipsec_tag;
166
167SYSCTL_DECL(_net_inet_ipsec);
168#if INET6
169SYSCTL_DECL(_net_inet6_ipsec6);
170#endif
171/* net.inet.ipsec */
172SYSCTL_STRUCT(_net_inet_ipsec, IPSECCTL_STATS,
173 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsecstat, ipsecstat, "");
174SYSCTL_PROC(_net_inet_ipsec, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED,
175 &ip4_def_policy.policy, 0, &sysctl_def_policy, "I", "");
176SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
177 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_trans_deflev, 0, "");
178SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
179 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_net_deflev, 0, "");
180SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
181 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_trans_deflev, 0, "");
182SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
183 CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_net_deflev, 0, "");
184SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_CLEARTOS,
185 ah_cleartos, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_cleartos, 0, "");
186SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_OFFSETMASK,
187 ah_offsetmask, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_offsetmask, 0, "");
188SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DFBIT,
189 dfbit, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_dfbit, 0, "");
190SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ECN,
191 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_ecn, 0, "");
192SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEBUG,
193 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
194SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ESP_RANDPAD,
195 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_randpad, 0, "");
196
197/* for performance, we bypass ipsec until a security policy is set */
198int ipsec_bypass = 1;
199SYSCTL_INT(_net_inet_ipsec, OID_AUTO, bypass, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec_bypass,0, "");
200
201/*
202 * NAT Traversal requires a UDP port for encapsulation,
203 * esp_udp_encap_port controls which port is used. Racoon
204 * must set this port to the port racoon is using locally
205 * for nat traversal.
206 */
207SYSCTL_INT(_net_inet_ipsec, OID_AUTO, esp_port,
208 CTLFLAG_RW | CTLFLAG_LOCKED, &esp_udp_encap_port, 0, "");
209
210#if INET6
211struct ipsecstat ipsec6stat;
212int ip6_esp_trans_deflev = IPSEC_LEVEL_USE;
213int ip6_esp_net_deflev = IPSEC_LEVEL_USE;
214int ip6_ah_trans_deflev = IPSEC_LEVEL_USE;
215int ip6_ah_net_deflev = IPSEC_LEVEL_USE;
216struct secpolicy ip6_def_policy;
217int ip6_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */
218int ip6_esp_randpad = -1;
219
220/* net.inet6.ipsec6 */
221SYSCTL_STRUCT(_net_inet6_ipsec6, IPSECCTL_STATS,
222 stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec6stat, ipsecstat, "");
223SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_POLICY,
224 def_policy, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_def_policy.policy, 0, "");
225SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev,
226 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_trans_deflev, 0, "");
227SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev,
228 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_net_deflev, 0, "");
229SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev,
230 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_trans_deflev, 0, "");
231SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev,
232 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_net_deflev, 0, "");
233SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ECN,
234 ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ipsec_ecn, 0, "");
235SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEBUG,
236 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, "");
237SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ESP_RANDPAD,
238 esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_randpad, 0, "");
239#endif /* INET6 */
240
241static int ipsec_setspidx_interface(struct secpolicyindex *, u_int, struct mbuf *,
242 int, int, int);
243static int ipsec_setspidx_mbuf(struct secpolicyindex *, u_int, u_int,
244 struct mbuf *, int);
245static int ipsec4_setspidx_inpcb(struct mbuf *, struct inpcb *pcb);
246#if INET6
247static int ipsec6_setspidx_in6pcb(struct mbuf *, struct in6pcb *pcb);
248#endif
249static int ipsec_setspidx(struct mbuf *, struct secpolicyindex *, int, int);
250static void ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
251static int ipsec4_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
252#if INET6
253static void ipsec6_get_ulp(struct mbuf *m, struct secpolicyindex *, int);
254static int ipsec6_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *);
255#endif
256static struct inpcbpolicy *ipsec_newpcbpolicy(void);
257static void ipsec_delpcbpolicy(struct inpcbpolicy *);
258static struct secpolicy *ipsec_deepcopy_policy(struct secpolicy *src);
259static int ipsec_set_policy(struct secpolicy **pcb_sp,
260 int optname, caddr_t request, size_t len, int priv);
261static void vshiftl(unsigned char *, int, int);
262static int ipsec_in_reject(struct secpolicy *, struct mbuf *);
263#if INET6
264static int ipsec64_encapsulate(struct mbuf *, struct secasvar *);
265static int ipsec6_update_routecache_and_output(struct ipsec_output_state *state, struct secasvar *sav);
266static int ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav);
267#endif
268static struct ipsec_tag *ipsec_addaux(struct mbuf *);
269static struct ipsec_tag *ipsec_findaux(struct mbuf *);
270static void ipsec_optaux(struct mbuf *, struct ipsec_tag *);
271int ipsec_send_natt_keepalive(struct secasvar *sav);
272bool ipsec_fill_offload_frame(ifnet_t ifp, struct secasvar *sav, struct ifnet_keepalive_offload_frame *frame, size_t frame_data_offset);
273
274static int
275sysctl_def_policy SYSCTL_HANDLER_ARGS
276{
277 int old_policy = ip4_def_policy.policy;
278 int error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
279
280#pragma unused(arg1, arg2)
281
282 if (ip4_def_policy.policy != IPSEC_POLICY_NONE &&
283 ip4_def_policy.policy != IPSEC_POLICY_DISCARD) {
284 ip4_def_policy.policy = old_policy;
285 return EINVAL;
286 }
287
288 /* Turn off the bypass if the default security policy changes */
289 if (ipsec_bypass != 0 && ip4_def_policy.policy != IPSEC_POLICY_NONE)
290 ipsec_bypass = 0;
291
292 return error;
293}
294
295/*
296 * For OUTBOUND packet having a socket. Searching SPD for packet,
297 * and return a pointer to SP.
298 * OUT: NULL: no apropreate SP found, the following value is set to error.
299 * 0 : bypass
300 * EACCES : discard packet.
301 * ENOENT : ipsec_acquire() in progress, maybe.
302 * others : error occurred.
303 * others: a pointer to SP
304 *
305 * NOTE: IPv6 mapped adddress concern is implemented here.
306 */
307struct secpolicy *
308ipsec4_getpolicybysock(struct mbuf *m,
309 u_int dir,
310 struct socket *so,
311 int *error)
312{
313 struct inpcbpolicy *pcbsp = NULL;
314 struct secpolicy *currsp = NULL; /* policy on socket */
315 struct secpolicy *kernsp = NULL; /* policy on kernel */
316
317 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
318 /* sanity check */
319 if (m == NULL || so == NULL || error == NULL)
320 panic("ipsec4_getpolicybysock: NULL pointer was passed.\n");
321
322 if (so->so_pcb == NULL) {
323 printf("ipsec4_getpolicybysock: so->so_pcb == NULL\n");
324 return ipsec4_getpolicybyaddr(m, dir, 0, error);
325 }
326
327 switch (SOCK_DOM(so)) {
328 case PF_INET:
329 pcbsp = sotoinpcb(so)->inp_sp;
330 break;
331#if INET6
332 case PF_INET6:
333 pcbsp = sotoin6pcb(so)->in6p_sp;
334 break;
335#endif
336 }
337
338 if (!pcbsp){
339 /* Socket has not specified an IPSEC policy */
340 return ipsec4_getpolicybyaddr(m, dir, 0, error);
341 }
342
343 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_START, 0,0,0,0,0);
344
345 switch (SOCK_DOM(so)) {
346 case PF_INET:
347 /* set spidx in pcb */
348 *error = ipsec4_setspidx_inpcb(m, sotoinpcb(so));
349 break;
350#if INET6
351 case PF_INET6:
352 /* set spidx in pcb */
353 *error = ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
354 break;
355#endif
356 default:
357 panic("ipsec4_getpolicybysock: unsupported address family\n");
358 }
359 if (*error) {
360 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 1,*error,0,0,0);
361 return NULL;
362 }
363
364 /* sanity check */
365 if (pcbsp == NULL)
366 panic("ipsec4_getpolicybysock: pcbsp is NULL.\n");
367
368 switch (dir) {
369 case IPSEC_DIR_INBOUND:
370 currsp = pcbsp->sp_in;
371 break;
372 case IPSEC_DIR_OUTBOUND:
373 currsp = pcbsp->sp_out;
374 break;
375 default:
376 panic("ipsec4_getpolicybysock: illegal direction.\n");
377 }
378
379 /* sanity check */
380 if (currsp == NULL)
381 panic("ipsec4_getpolicybysock: currsp is NULL.\n");
382
383 /* when privilieged socket */
384 if (pcbsp->priv) {
385 switch (currsp->policy) {
386 case IPSEC_POLICY_BYPASS:
387 lck_mtx_lock(sadb_mutex);
388 currsp->refcnt++;
389 lck_mtx_unlock(sadb_mutex);
390 *error = 0;
391 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 2,*error,0,0,0);
392 return currsp;
393
394 case IPSEC_POLICY_ENTRUST:
395 /* look for a policy in SPD */
396 kernsp = key_allocsp(&currsp->spidx, dir);
397
398 /* SP found */
399 if (kernsp != NULL) {
400 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
401 printf("DP ipsec4_getpolicybysock called "
402 "to allocate SP:0x%llx\n",
403 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
404 *error = 0;
405 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 3,*error,0,0,0);
406 return kernsp;
407 }
408
409 /* no SP found */
410 lck_mtx_lock(sadb_mutex);
411 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
412 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
413 ipseclog((LOG_INFO,
414 "fixed system default policy: %d->%d\n",
415 ip4_def_policy.policy, IPSEC_POLICY_NONE));
416 ip4_def_policy.policy = IPSEC_POLICY_NONE;
417 }
418 ip4_def_policy.refcnt++;
419 lck_mtx_unlock(sadb_mutex);
420 *error = 0;
421 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 4,*error,0,0,0);
422 return &ip4_def_policy;
423
424 case IPSEC_POLICY_IPSEC:
425 lck_mtx_lock(sadb_mutex);
426 currsp->refcnt++;
427 lck_mtx_unlock(sadb_mutex);
428 *error = 0;
429 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 5,*error,0,0,0);
430 return currsp;
431
432 default:
433 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
434 "Invalid policy for PCB %d\n", currsp->policy));
435 *error = EINVAL;
436 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 6,*error,0,0,0);
437 return NULL;
438 }
439 /* NOTREACHED */
440 }
441
442 /* when non-privilieged socket */
443 /* look for a policy in SPD */
444 kernsp = key_allocsp(&currsp->spidx, dir);
445
446 /* SP found */
447 if (kernsp != NULL) {
448 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
449 printf("DP ipsec4_getpolicybysock called "
450 "to allocate SP:0x%llx\n",
451 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
452 *error = 0;
453 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 7,*error,0,0,0);
454 return kernsp;
455 }
456
457 /* no SP found */
458 switch (currsp->policy) {
459 case IPSEC_POLICY_BYPASS:
460 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
461 "Illegal policy for non-priviliged defined %d\n",
462 currsp->policy));
463 *error = EINVAL;
464 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 8,*error,0,0,0);
465 return NULL;
466
467 case IPSEC_POLICY_ENTRUST:
468 lck_mtx_lock(sadb_mutex);
469 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
470 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
471 ipseclog((LOG_INFO,
472 "fixed system default policy: %d->%d\n",
473 ip4_def_policy.policy, IPSEC_POLICY_NONE));
474 ip4_def_policy.policy = IPSEC_POLICY_NONE;
475 }
476 ip4_def_policy.refcnt++;
477 lck_mtx_unlock(sadb_mutex);
478 *error = 0;
479 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 9,*error,0,0,0);
480 return &ip4_def_policy;
481
482 case IPSEC_POLICY_IPSEC:
483 lck_mtx_lock(sadb_mutex);
484 currsp->refcnt++;
485 lck_mtx_unlock(sadb_mutex);
486 *error = 0;
487 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 10,*error,0,0,0);
488 return currsp;
489
490 default:
491 ipseclog((LOG_ERR, "ipsec4_getpolicybysock: "
492 "Invalid policy for PCB %d\n", currsp->policy));
493 *error = EINVAL;
494 KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 11,*error,0,0,0);
495 return NULL;
496 }
497 /* NOTREACHED */
498}
499
500/*
501 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
502 * and return a pointer to SP.
503 * OUT: positive: a pointer to the entry for security policy leaf matched.
504 * NULL: no apropreate SP found, the following value is set to error.
505 * 0 : bypass
506 * EACCES : discard packet.
507 * ENOENT : ipsec_acquire() in progress, maybe.
508 * others : error occurred.
509 */
510struct secpolicy *
511ipsec4_getpolicybyaddr(struct mbuf *m,
512 u_int dir,
513 int flag,
514 int *error)
515{
516 struct secpolicy *sp = NULL;
517
518 if (ipsec_bypass != 0)
519 return 0;
520
521 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
522
523 /* sanity check */
524 if (m == NULL || error == NULL)
525 panic("ipsec4_getpolicybyaddr: NULL pointer was passed.\n");
526 {
527 struct secpolicyindex spidx;
528
529 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0);
530 bzero(&spidx, sizeof(spidx));
531
532 /* make a index to look for a policy */
533 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET, m,
534 (flag & IP_FORWARDING) ? 0 : 1);
535
536 if (*error != 0) {
537 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,*error,0,0,0);
538 return NULL;
539 }
540
541 sp = key_allocsp(&spidx, dir);
542 }
543
544 /* SP found */
545 if (sp != NULL) {
546 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
547 printf("DP ipsec4_getpolicybyaddr called "
548 "to allocate SP:0x%llx\n",
549 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
550 *error = 0;
551 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,*error,0,0,0);
552 return sp;
553 }
554
555 /* no SP found */
556 lck_mtx_lock(sadb_mutex);
557 if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD
558 && ip4_def_policy.policy != IPSEC_POLICY_NONE) {
559 ipseclog((LOG_INFO, "fixed system default policy:%d->%d\n",
560 ip4_def_policy.policy,
561 IPSEC_POLICY_NONE));
562 ip4_def_policy.policy = IPSEC_POLICY_NONE;
563 }
564 ip4_def_policy.refcnt++;
565 lck_mtx_unlock(sadb_mutex);
566 *error = 0;
567 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 3,*error,0,0,0);
568 return &ip4_def_policy;
569}
570
571/* Match with bound interface rather than src addr.
572 * Unlike getpolicybyaddr, do not set the default policy.
573 * Return 0 if should continue processing, or -1 if packet
574 * should be dropped.
575 */
576int
577ipsec4_getpolicybyinterface(struct mbuf *m,
578 u_int dir,
579 int *flags,
580 struct ip_out_args *ipoa,
581 struct secpolicy **sp)
582{
583 struct secpolicyindex spidx;
584 int error = 0;
585
586 if (ipsec_bypass != 0)
587 return 0;
588
589 /* Sanity check */
590 if (m == NULL || ipoa == NULL || sp == NULL)
591 panic("ipsec4_getpolicybyinterface: NULL pointer was passed.\n");
592
593 if (ipoa->ipoa_boundif == IFSCOPE_NONE)
594 return 0;
595
596 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0);
597 bzero(&spidx, sizeof(spidx));
598
599 /* make a index to look for a policy */
600 error = ipsec_setspidx_interface(&spidx, dir, m, (*flags & IP_FORWARDING) ? 0 : 1,
601 ipoa->ipoa_boundif, 4);
602
603 if (error != 0) {
604 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,error,0,0,0);
605 return 0;
606 }
607
608 *sp = key_allocsp(&spidx, dir);
609
610 /* Return SP, whether NULL or not */
611 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
612 if ((*sp)->ipsec_if == NULL) {
613 /* Invalid to capture on an interface without redirect */
614 key_freesp(*sp, KEY_SADB_UNLOCKED);
615 *sp = NULL;
616 return -1;
617 } else if ((*sp)->disabled) {
618 /* Disabled policies go in the clear */
619 key_freesp(*sp, KEY_SADB_UNLOCKED);
620 *sp = NULL;
621 *flags |= IP_NOIPSEC; /* Avoid later IPSec check */
622 } else {
623 /* If policy is enabled, redirect to ipsec interface */
624 ipoa->ipoa_boundif = (*sp)->ipsec_if->if_index;
625 }
626 }
627
628 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,error,0,0,0);
629
630 return 0;
631}
632
633
634#if INET6
635/*
636 * For OUTBOUND packet having a socket. Searching SPD for packet,
637 * and return a pointer to SP.
638 * OUT: NULL: no apropreate SP found, the following value is set to error.
639 * 0 : bypass
640 * EACCES : discard packet.
641 * ENOENT : ipsec_acquire() in progress, maybe.
642 * others : error occurred.
643 * others: a pointer to SP
644 */
645struct secpolicy *
646ipsec6_getpolicybysock(struct mbuf *m,
647 u_int dir,
648 struct socket *so,
649 int *error)
650{
651 struct inpcbpolicy *pcbsp = NULL;
652 struct secpolicy *currsp = NULL; /* policy on socket */
653 struct secpolicy *kernsp = NULL; /* policy on kernel */
654
655 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
656
657 /* sanity check */
658 if (m == NULL || so == NULL || error == NULL)
659 panic("ipsec6_getpolicybysock: NULL pointer was passed.\n");
660
661#if DIAGNOSTIC
662 if (SOCK_DOM(so) != PF_INET6)
663 panic("ipsec6_getpolicybysock: socket domain != inet6\n");
664#endif
665
666 pcbsp = sotoin6pcb(so)->in6p_sp;
667
668 if (!pcbsp){
669 return ipsec6_getpolicybyaddr(m, dir, 0, error);
670 }
671
672 /* set spidx in pcb */
673 ipsec6_setspidx_in6pcb(m, sotoin6pcb(so));
674
675 /* sanity check */
676 if (pcbsp == NULL)
677 panic("ipsec6_getpolicybysock: pcbsp is NULL.\n");
678
679 switch (dir) {
680 case IPSEC_DIR_INBOUND:
681 currsp = pcbsp->sp_in;
682 break;
683 case IPSEC_DIR_OUTBOUND:
684 currsp = pcbsp->sp_out;
685 break;
686 default:
687 panic("ipsec6_getpolicybysock: illegal direction.\n");
688 }
689
690 /* sanity check */
691 if (currsp == NULL)
692 panic("ipsec6_getpolicybysock: currsp is NULL.\n");
693
694 /* when privilieged socket */
695 if (pcbsp->priv) {
696 switch (currsp->policy) {
697 case IPSEC_POLICY_BYPASS:
698 lck_mtx_lock(sadb_mutex);
699 currsp->refcnt++;
700 lck_mtx_unlock(sadb_mutex);
701 *error = 0;
702 return currsp;
703
704 case IPSEC_POLICY_ENTRUST:
705 /* look for a policy in SPD */
706 kernsp = key_allocsp(&currsp->spidx, dir);
707
708 /* SP found */
709 if (kernsp != NULL) {
710 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
711 printf("DP ipsec6_getpolicybysock called "
712 "to allocate SP:0x%llx\n",
713 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
714 *error = 0;
715 return kernsp;
716 }
717
718 /* no SP found */
719 lck_mtx_lock(sadb_mutex);
720 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
721 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
722 ipseclog((LOG_INFO,
723 "fixed system default policy: %d->%d\n",
724 ip6_def_policy.policy, IPSEC_POLICY_NONE));
725 ip6_def_policy.policy = IPSEC_POLICY_NONE;
726 }
727 ip6_def_policy.refcnt++;
728 lck_mtx_unlock(sadb_mutex);
729 *error = 0;
730 return &ip6_def_policy;
731
732 case IPSEC_POLICY_IPSEC:
733 lck_mtx_lock(sadb_mutex);
734 currsp->refcnt++;
735 lck_mtx_unlock(sadb_mutex);
736 *error = 0;
737 return currsp;
738
739 default:
740 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
741 "Invalid policy for PCB %d\n", currsp->policy));
742 *error = EINVAL;
743 return NULL;
744 }
745 /* NOTREACHED */
746 }
747
748 /* when non-privilieged socket */
749 /* look for a policy in SPD */
750 kernsp = key_allocsp(&currsp->spidx, dir);
751
752 /* SP found */
753 if (kernsp != NULL) {
754 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
755 printf("DP ipsec6_getpolicybysock called "
756 "to allocate SP:0x%llx\n",
757 (uint64_t)VM_KERNEL_ADDRPERM(kernsp)));
758 *error = 0;
759 return kernsp;
760 }
761
762 /* no SP found */
763 switch (currsp->policy) {
764 case IPSEC_POLICY_BYPASS:
765 ipseclog((LOG_ERR, "ipsec6_getpolicybysock: "
766 "Illegal policy for non-priviliged defined %d\n",
767 currsp->policy));
768 *error = EINVAL;
769 return NULL;
770
771 case IPSEC_POLICY_ENTRUST:
772 lck_mtx_lock(sadb_mutex);
773 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
774 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
775 ipseclog((LOG_INFO,
776 "fixed system default policy: %d->%d\n",
777 ip6_def_policy.policy, IPSEC_POLICY_NONE));
778 ip6_def_policy.policy = IPSEC_POLICY_NONE;
779 }
780 ip6_def_policy.refcnt++;
781 lck_mtx_unlock(sadb_mutex);
782 *error = 0;
783 return &ip6_def_policy;
784
785 case IPSEC_POLICY_IPSEC:
786 lck_mtx_lock(sadb_mutex);
787 currsp->refcnt++;
788 lck_mtx_unlock(sadb_mutex);
789 *error = 0;
790 return currsp;
791
792 default:
793 ipseclog((LOG_ERR,
794 "ipsec6_policybysock: Invalid policy for PCB %d\n",
795 currsp->policy));
796 *error = EINVAL;
797 return NULL;
798 }
799 /* NOTREACHED */
800}
801
802/*
803 * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet,
804 * and return a pointer to SP.
805 * `flag' means that packet is to be forwarded whether or not.
806 * flag = 1: forwad
807 * OUT: positive: a pointer to the entry for security policy leaf matched.
808 * NULL: no apropreate SP found, the following value is set to error.
809 * 0 : bypass
810 * EACCES : discard packet.
811 * ENOENT : ipsec_acquire() in progress, maybe.
812 * others : error occurred.
813 */
814#ifndef IP_FORWARDING
815#define IP_FORWARDING 1
816#endif
817
818struct secpolicy *
819ipsec6_getpolicybyaddr(struct mbuf *m,
820 u_int dir,
821 int flag,
822 int *error)
823{
824 struct secpolicy *sp = NULL;
825
826 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
827
828 /* sanity check */
829 if (m == NULL || error == NULL)
830 panic("ipsec6_getpolicybyaddr: NULL pointer was passed.\n");
831
832 {
833 struct secpolicyindex spidx;
834
835 bzero(&spidx, sizeof(spidx));
836
837 /* make a index to look for a policy */
838 *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET6, m,
839 (flag & IP_FORWARDING) ? 0 : 1);
840
841 if (*error != 0)
842 return NULL;
843
844 sp = key_allocsp(&spidx, dir);
845 }
846
847 /* SP found */
848 if (sp != NULL) {
849 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
850 printf("DP ipsec6_getpolicybyaddr called "
851 "to allocate SP:0x%llx\n",
852 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
853 *error = 0;
854 return sp;
855 }
856
857 /* no SP found */
858 lck_mtx_lock(sadb_mutex);
859 if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD
860 && ip6_def_policy.policy != IPSEC_POLICY_NONE) {
861 ipseclog((LOG_INFO, "fixed system default policy: %d->%d\n",
862 ip6_def_policy.policy, IPSEC_POLICY_NONE));
863 ip6_def_policy.policy = IPSEC_POLICY_NONE;
864 }
865 ip6_def_policy.refcnt++;
866 lck_mtx_unlock(sadb_mutex);
867 *error = 0;
868 return &ip6_def_policy;
869}
870
871/* Match with bound interface rather than src addr.
872 * Unlike getpolicybyaddr, do not set the default policy.
873 * Return 0 if should continue processing, or -1 if packet
874 * should be dropped.
875 */
876int
877ipsec6_getpolicybyinterface(struct mbuf *m,
878 u_int dir,
879 int flag,
880 struct ip6_out_args *ip6oap,
881 int *noipsec,
882 struct secpolicy **sp)
883{
884 struct secpolicyindex spidx;
885 int error = 0;
886
887 if (ipsec_bypass != 0)
888 return 0;
889
890 /* Sanity check */
891 if (m == NULL || sp == NULL || noipsec == NULL || ip6oap == NULL)
892 panic("ipsec6_getpolicybyinterface: NULL pointer was passed.\n");
893
894 *noipsec = 0;
895
896 if (ip6oap->ip6oa_boundif == IFSCOPE_NONE)
897 return 0;
898
899 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0);
900 bzero(&spidx, sizeof(spidx));
901
902 /* make a index to look for a policy */
903 error = ipsec_setspidx_interface(&spidx, dir, m, (flag & IP_FORWARDING) ? 0 : 1,
904 ip6oap->ip6oa_boundif, 6);
905
906 if (error != 0) {
907 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,error,0,0,0);
908 return 0;
909 }
910
911 *sp = key_allocsp(&spidx, dir);
912
913 /* Return SP, whether NULL or not */
914 if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) {
915 if ((*sp)->ipsec_if == NULL) {
916 /* Invalid to capture on an interface without redirect */
917 key_freesp(*sp, KEY_SADB_UNLOCKED);
918 *sp = NULL;
919 return -1;
920 } else if ((*sp)->disabled) {
921 /* Disabled policies go in the clear */
922 key_freesp(*sp, KEY_SADB_UNLOCKED);
923 *sp = NULL;
924 *noipsec = 1; /* Avoid later IPSec check */
925 } else {
926 /* If policy is enabled, redirect to ipsec interface */
927 ip6oap->ip6oa_boundif = (*sp)->ipsec_if->if_index;
928 }
929 }
930
931 KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,*error,0,0,0);
932
933 return 0;
934}
935#endif /* INET6 */
936
937/*
938 * set IP address into spidx from mbuf.
939 * When Forwarding packet and ICMP echo reply, this function is used.
940 *
941 * IN: get the followings from mbuf.
942 * protocol family, src, dst, next protocol
943 * OUT:
944 * 0: success.
945 * other: failure, and set errno.
946 */
947static int
948ipsec_setspidx_mbuf(
949 struct secpolicyindex *spidx,
950 u_int dir,
951 __unused u_int family,
952 struct mbuf *m,
953 int needport)
954{
955 int error;
956
957 /* sanity check */
958 if (spidx == NULL || m == NULL)
959 panic("ipsec_setspidx_mbuf: NULL pointer was passed.\n");
960
961 bzero(spidx, sizeof(*spidx));
962
963 error = ipsec_setspidx(m, spidx, needport, 0);
964 if (error)
965 goto bad;
966 spidx->dir = dir;
967
968 return 0;
969
970 bad:
971 /* XXX initialize */
972 bzero(spidx, sizeof(*spidx));
973 return EINVAL;
974}
975
976static int
977ipsec_setspidx_interface(
978 struct secpolicyindex *spidx,
979 u_int dir,
980 struct mbuf *m,
981 int needport,
982 int ifindex,
983 int ip_version)
984{
985 int error;
986
987 /* sanity check */
988 if (spidx == NULL || m == NULL)
989 panic("ipsec_setspidx_interface: NULL pointer was passed.\n");
990
991 bzero(spidx, sizeof(*spidx));
992
993 error = ipsec_setspidx(m, spidx, needport, ip_version);
994 if (error)
995 goto bad;
996 spidx->dir = dir;
997
998 if (ifindex != 0) {
999 ifnet_head_lock_shared();
1000 spidx->internal_if = ifindex2ifnet[ifindex];
1001 ifnet_head_done();
1002 } else {
1003 spidx->internal_if = NULL;
1004 }
1005
1006 return 0;
1007
1008bad:
1009 return EINVAL;
1010}
1011
1012static int
1013ipsec4_setspidx_inpcb(struct mbuf *m, struct inpcb *pcb)
1014{
1015 struct secpolicyindex *spidx;
1016 int error;
1017
1018 if (ipsec_bypass != 0)
1019 return 0;
1020
1021 /* sanity check */
1022 if (pcb == NULL)
1023 panic("ipsec4_setspidx_inpcb: no PCB found.\n");
1024 if (pcb->inp_sp == NULL)
1025 panic("ipsec4_setspidx_inpcb: no inp_sp found.\n");
1026 if (pcb->inp_sp->sp_out == NULL || pcb->inp_sp->sp_in == NULL)
1027 panic("ipsec4_setspidx_inpcb: no sp_in/out found.\n");
1028
1029 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1030 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1031
1032 spidx = &pcb->inp_sp->sp_in->spidx;
1033 error = ipsec_setspidx(m, spidx, 1, 0);
1034 if (error)
1035 goto bad;
1036 spidx->dir = IPSEC_DIR_INBOUND;
1037
1038 spidx = &pcb->inp_sp->sp_out->spidx;
1039 error = ipsec_setspidx(m, spidx, 1, 0);
1040 if (error)
1041 goto bad;
1042 spidx->dir = IPSEC_DIR_OUTBOUND;
1043
1044 return 0;
1045
1046bad:
1047 bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx));
1048 bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx));
1049 return error;
1050}
1051
1052#if INET6
1053static int
1054ipsec6_setspidx_in6pcb(struct mbuf *m, struct in6pcb *pcb)
1055{
1056 struct secpolicyindex *spidx;
1057 int error;
1058
1059 /* sanity check */
1060 if (pcb == NULL)
1061 panic("ipsec6_setspidx_in6pcb: no PCB found.\n");
1062 if (pcb->in6p_sp == NULL)
1063 panic("ipsec6_setspidx_in6pcb: no in6p_sp found.\n");
1064 if (pcb->in6p_sp->sp_out == NULL || pcb->in6p_sp->sp_in == NULL)
1065 panic("ipsec6_setspidx_in6pcb: no sp_in/out found.\n");
1066
1067 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1068 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1069
1070 spidx = &pcb->in6p_sp->sp_in->spidx;
1071 error = ipsec_setspidx(m, spidx, 1, 0);
1072 if (error)
1073 goto bad;
1074 spidx->dir = IPSEC_DIR_INBOUND;
1075
1076 spidx = &pcb->in6p_sp->sp_out->spidx;
1077 error = ipsec_setspidx(m, spidx, 1, 0);
1078 if (error)
1079 goto bad;
1080 spidx->dir = IPSEC_DIR_OUTBOUND;
1081
1082 return 0;
1083
1084bad:
1085 bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx));
1086 bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx));
1087 return error;
1088}
1089#endif
1090
1091/*
1092 * configure security policy index (src/dst/proto/sport/dport)
1093 * by looking at the content of mbuf.
1094 * the caller is responsible for error recovery (like clearing up spidx).
1095 */
1096static int
1097ipsec_setspidx(struct mbuf *m,
1098 struct secpolicyindex *spidx,
1099 int needport,
1100 int force_ip_version)
1101{
1102 struct ip *ip = NULL;
1103 struct ip ipbuf;
1104 u_int v;
1105 struct mbuf *n;
1106 int len;
1107 int error;
1108
1109 if (m == NULL)
1110 panic("ipsec_setspidx: m == 0 passed.\n");
1111
1112 /*
1113 * validate m->m_pkthdr.len. we see incorrect length if we
1114 * mistakenly call this function with inconsistent mbuf chain
1115 * (like 4.4BSD tcp/udp processing). XXX should we panic here?
1116 */
1117 len = 0;
1118 for (n = m; n; n = n->m_next)
1119 len += n->m_len;
1120 if (m->m_pkthdr.len != len) {
1121 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1122 printf("ipsec_setspidx: "
1123 "total of m_len(%d) != pkthdr.len(%d), "
1124 "ignored.\n",
1125 len, m->m_pkthdr.len));
1126 return EINVAL;
1127 }
1128
1129 if (m->m_pkthdr.len < sizeof(struct ip)) {
1130 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1131 printf("ipsec_setspidx: "
1132 "pkthdr.len(%d) < sizeof(struct ip), ignored.\n",
1133 m->m_pkthdr.len));
1134 return EINVAL;
1135 }
1136
1137 if (m->m_len >= sizeof(*ip))
1138 ip = mtod(m, struct ip *);
1139 else {
1140 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1141 ip = &ipbuf;
1142 }
1143
1144 if (force_ip_version) {
1145 v = force_ip_version;
1146 } else {
1147#ifdef _IP_VHL
1148 v = _IP_VHL_V(ip->ip_vhl);
1149#else
1150 v = ip->ip_v;
1151#endif
1152 }
1153 switch (v) {
1154 case 4:
1155 error = ipsec4_setspidx_ipaddr(m, spidx);
1156 if (error)
1157 return error;
1158 ipsec4_get_ulp(m, spidx, needport);
1159 return 0;
1160#if INET6
1161 case 6:
1162 if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) {
1163 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1164 printf("ipsec_setspidx: "
1165 "pkthdr.len(%d) < sizeof(struct ip6_hdr), "
1166 "ignored.\n", m->m_pkthdr.len));
1167 return EINVAL;
1168 }
1169 error = ipsec6_setspidx_ipaddr(m, spidx);
1170 if (error)
1171 return error;
1172 ipsec6_get_ulp(m, spidx, needport);
1173 return 0;
1174#endif
1175 default:
1176 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1177 printf("ipsec_setspidx: "
1178 "unknown IP version %u, ignored.\n", v));
1179 return EINVAL;
1180 }
1181}
1182
1183static void
1184ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport)
1185{
1186 struct ip ip;
1187 struct ip6_ext ip6e;
1188 u_int8_t nxt;
1189 int off;
1190 struct tcphdr th;
1191 struct udphdr uh;
1192
1193 /* sanity check */
1194 if (m == NULL)
1195 panic("ipsec4_get_ulp: NULL pointer was passed.\n");
1196 if (m->m_pkthdr.len < sizeof(ip))
1197 panic("ipsec4_get_ulp: too short\n");
1198
1199 /* set default */
1200 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1201 ((struct sockaddr_in *)&spidx->src)->sin_port = IPSEC_PORT_ANY;
1202 ((struct sockaddr_in *)&spidx->dst)->sin_port = IPSEC_PORT_ANY;
1203
1204 m_copydata(m, 0, sizeof(ip), (caddr_t)&ip);
1205 /* ip_input() flips it into host endian XXX need more checking */
1206 if (ip.ip_off & (IP_MF | IP_OFFMASK))
1207 return;
1208
1209 nxt = ip.ip_p;
1210#ifdef _IP_VHL
1211 off = _IP_VHL_HL(ip->ip_vhl) << 2;
1212#else
1213 off = ip.ip_hl << 2;
1214#endif
1215 while (off < m->m_pkthdr.len) {
1216 switch (nxt) {
1217 case IPPROTO_TCP:
1218 spidx->ul_proto = nxt;
1219 if (!needport)
1220 return;
1221 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len)
1222 return;
1223 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1224 ((struct sockaddr_in *)&spidx->src)->sin_port =
1225 th.th_sport;
1226 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1227 th.th_dport;
1228 return;
1229 case IPPROTO_UDP:
1230 spidx->ul_proto = nxt;
1231 if (!needport)
1232 return;
1233 if (off + sizeof(struct udphdr) > m->m_pkthdr.len)
1234 return;
1235 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1236 ((struct sockaddr_in *)&spidx->src)->sin_port =
1237 uh.uh_sport;
1238 ((struct sockaddr_in *)&spidx->dst)->sin_port =
1239 uh.uh_dport;
1240 return;
1241 case IPPROTO_AH:
1242 if (off + sizeof(ip6e) > m->m_pkthdr.len)
1243 return;
1244 m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
1245 off += (ip6e.ip6e_len + 2) << 2;
1246 nxt = ip6e.ip6e_nxt;
1247 break;
1248 case IPPROTO_ICMP:
1249 default:
1250 /* XXX intermediate headers??? */
1251 spidx->ul_proto = nxt;
1252 return;
1253 }
1254 }
1255}
1256
1257/* assumes that m is sane */
1258static int
1259ipsec4_setspidx_ipaddr(struct mbuf *m, struct secpolicyindex *spidx)
1260{
1261 struct ip *ip = NULL;
1262 struct ip ipbuf;
1263 struct sockaddr_in *sin;
1264
1265 if (m->m_len >= sizeof(*ip))
1266 ip = mtod(m, struct ip *);
1267 else {
1268 m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf);
1269 ip = &ipbuf;
1270 }
1271
1272 sin = (struct sockaddr_in *)&spidx->src;
1273 bzero(sin, sizeof(*sin));
1274 sin->sin_family = AF_INET;
1275 sin->sin_len = sizeof(struct sockaddr_in);
1276 bcopy(&ip->ip_src, &sin->sin_addr, sizeof(ip->ip_src));
1277 spidx->prefs = sizeof(struct in_addr) << 3;
1278
1279 sin = (struct sockaddr_in *)&spidx->dst;
1280 bzero(sin, sizeof(*sin));
1281 sin->sin_family = AF_INET;
1282 sin->sin_len = sizeof(struct sockaddr_in);
1283 bcopy(&ip->ip_dst, &sin->sin_addr, sizeof(ip->ip_dst));
1284 spidx->prefd = sizeof(struct in_addr) << 3;
1285
1286 return 0;
1287}
1288
1289#if INET6
1290static void
1291ipsec6_get_ulp(struct mbuf *m,
1292 struct secpolicyindex *spidx,
1293 int needport)
1294{
1295 int off, nxt;
1296 struct tcphdr th;
1297 struct udphdr uh;
1298
1299 /* sanity check */
1300 if (m == NULL)
1301 panic("ipsec6_get_ulp: NULL pointer was passed.\n");
1302
1303 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1304 printf("ipsec6_get_ulp:\n"); kdebug_mbuf(m));
1305
1306 /* set default */
1307 spidx->ul_proto = IPSEC_ULPROTO_ANY;
1308 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = IPSEC_PORT_ANY;
1309 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = IPSEC_PORT_ANY;
1310
1311 nxt = -1;
1312 off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt);
1313 if (off < 0 || m->m_pkthdr.len < off)
1314 return;
1315
1316 switch (nxt) {
1317 case IPPROTO_TCP:
1318 spidx->ul_proto = nxt;
1319 if (!needport)
1320 break;
1321 if (off + sizeof(struct tcphdr) > m->m_pkthdr.len)
1322 break;
1323 m_copydata(m, off, sizeof(th), (caddr_t)&th);
1324 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = th.th_sport;
1325 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = th.th_dport;
1326 break;
1327 case IPPROTO_UDP:
1328 spidx->ul_proto = nxt;
1329 if (!needport)
1330 break;
1331 if (off + sizeof(struct udphdr) > m->m_pkthdr.len)
1332 break;
1333 m_copydata(m, off, sizeof(uh), (caddr_t)&uh);
1334 ((struct sockaddr_in6 *)&spidx->src)->sin6_port = uh.uh_sport;
1335 ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = uh.uh_dport;
1336 break;
1337 case IPPROTO_ICMPV6:
1338 default:
1339 /* XXX intermediate headers??? */
1340 spidx->ul_proto = nxt;
1341 break;
1342 }
1343}
1344
1345/* assumes that m is sane */
1346static int
1347ipsec6_setspidx_ipaddr(struct mbuf *m,
1348 struct secpolicyindex *spidx)
1349{
1350 struct ip6_hdr *ip6 = NULL;
1351 struct ip6_hdr ip6buf;
1352 struct sockaddr_in6 *sin6;
1353
1354 if (m->m_len >= sizeof(*ip6))
1355 ip6 = mtod(m, struct ip6_hdr *);
1356 else {
1357 m_copydata(m, 0, sizeof(ip6buf), (caddr_t)&ip6buf);
1358 ip6 = &ip6buf;
1359 }
1360
1361 sin6 = (struct sockaddr_in6 *)&spidx->src;
1362 bzero(sin6, sizeof(*sin6));
1363 sin6->sin6_family = AF_INET6;
1364 sin6->sin6_len = sizeof(struct sockaddr_in6);
1365 bcopy(&ip6->ip6_src, &sin6->sin6_addr, sizeof(ip6->ip6_src));
1366 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
1367 sin6->sin6_addr.s6_addr16[1] = 0;
1368 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
1369 }
1370 spidx->prefs = sizeof(struct in6_addr) << 3;
1371
1372 sin6 = (struct sockaddr_in6 *)&spidx->dst;
1373 bzero(sin6, sizeof(*sin6));
1374 sin6->sin6_family = AF_INET6;
1375 sin6->sin6_len = sizeof(struct sockaddr_in6);
1376 bcopy(&ip6->ip6_dst, &sin6->sin6_addr, sizeof(ip6->ip6_dst));
1377 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
1378 sin6->sin6_addr.s6_addr16[1] = 0;
1379 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
1380 }
1381 spidx->prefd = sizeof(struct in6_addr) << 3;
1382
1383 return 0;
1384}
1385#endif
1386
1387static struct inpcbpolicy *
1388ipsec_newpcbpolicy(void)
1389{
1390 struct inpcbpolicy *p;
1391
1392 p = (struct inpcbpolicy *)_MALLOC(sizeof(*p), M_SECA, M_WAITOK);
1393 return p;
1394}
1395
1396static void
1397ipsec_delpcbpolicy(struct inpcbpolicy *p)
1398{
1399 FREE(p, M_SECA);
1400}
1401
1402/* initialize policy in PCB */
1403int
1404ipsec_init_policy(struct socket *so,
1405 struct inpcbpolicy **pcb_sp)
1406{
1407 struct inpcbpolicy *new;
1408
1409 /* sanity check. */
1410 if (so == NULL || pcb_sp == NULL)
1411 panic("ipsec_init_policy: NULL pointer was passed.\n");
1412
1413 new = ipsec_newpcbpolicy();
1414 if (new == NULL) {
1415 ipseclog((LOG_DEBUG, "ipsec_init_policy: No more memory.\n"));
1416 return ENOBUFS;
1417 }
1418 bzero(new, sizeof(*new));
1419
1420#ifdef __APPLE__
1421 if (kauth_cred_issuser(so->so_cred))
1422#else
1423 if (so->so_cred != 0 && !suser(so->so_cred->pc_ucred, NULL))
1424#endif
1425 new->priv = 1;
1426 else
1427 new->priv = 0;
1428
1429 if ((new->sp_in = key_newsp()) == NULL) {
1430 ipsec_delpcbpolicy(new);
1431 return ENOBUFS;
1432 }
1433 new->sp_in->state = IPSEC_SPSTATE_ALIVE;
1434 new->sp_in->policy = IPSEC_POLICY_ENTRUST;
1435
1436 if ((new->sp_out = key_newsp()) == NULL) {
1437 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1438 ipsec_delpcbpolicy(new);
1439 return ENOBUFS;
1440 }
1441 new->sp_out->state = IPSEC_SPSTATE_ALIVE;
1442 new->sp_out->policy = IPSEC_POLICY_ENTRUST;
1443
1444 *pcb_sp = new;
1445
1446 return 0;
1447}
1448
1449/* copy old ipsec policy into new */
1450int
1451ipsec_copy_policy(struct inpcbpolicy *old,
1452 struct inpcbpolicy *new)
1453{
1454 struct secpolicy *sp;
1455
1456 if (ipsec_bypass != 0)
1457 return 0;
1458
1459 sp = ipsec_deepcopy_policy(old->sp_in);
1460 if (sp) {
1461 key_freesp(new->sp_in, KEY_SADB_UNLOCKED);
1462 new->sp_in = sp;
1463 } else
1464 return ENOBUFS;
1465
1466 sp = ipsec_deepcopy_policy(old->sp_out);
1467 if (sp) {
1468 key_freesp(new->sp_out, KEY_SADB_UNLOCKED);
1469 new->sp_out = sp;
1470 } else
1471 return ENOBUFS;
1472
1473 new->priv = old->priv;
1474
1475 return 0;
1476}
1477
1478/* deep-copy a policy in PCB */
1479static struct secpolicy *
1480ipsec_deepcopy_policy(struct secpolicy *src)
1481{
1482 struct ipsecrequest *newchain = NULL;
1483 struct ipsecrequest *p;
1484 struct ipsecrequest **q;
1485 struct ipsecrequest *r;
1486 struct secpolicy *dst;
1487
1488 if (src == NULL)
1489 return NULL;
1490 dst = key_newsp();
1491 if (dst == NULL)
1492 return NULL;
1493
1494 /*
1495 * deep-copy IPsec request chain. This is required since struct
1496 * ipsecrequest is not reference counted.
1497 */
1498 q = &newchain;
1499 for (p = src->req; p; p = p->next) {
1500 *q = (struct ipsecrequest *)_MALLOC(sizeof(struct ipsecrequest),
1501 M_SECA, M_WAITOK | M_ZERO);
1502 if (*q == NULL)
1503 goto fail;
1504 (*q)->next = NULL;
1505
1506 (*q)->saidx.proto = p->saidx.proto;
1507 (*q)->saidx.mode = p->saidx.mode;
1508 (*q)->level = p->level;
1509 (*q)->saidx.reqid = p->saidx.reqid;
1510
1511 bcopy(&p->saidx.src, &(*q)->saidx.src, sizeof((*q)->saidx.src));
1512 bcopy(&p->saidx.dst, &(*q)->saidx.dst, sizeof((*q)->saidx.dst));
1513
1514 (*q)->sp = dst;
1515
1516 q = &((*q)->next);
1517 }
1518
1519 dst->req = newchain;
1520 dst->state = src->state;
1521 dst->policy = src->policy;
1522 /* do not touch the refcnt fields */
1523
1524 return dst;
1525
1526fail:
1527 for (p = newchain; p; p = r) {
1528 r = p->next;
1529 FREE(p, M_SECA);
1530 p = NULL;
1531 }
1532 key_freesp(dst, KEY_SADB_UNLOCKED);
1533 return NULL;
1534}
1535
1536/* set policy and ipsec request if present. */
1537static int
1538ipsec_set_policy(struct secpolicy **pcb_sp,
1539 __unused int optname,
1540 caddr_t request,
1541 size_t len,
1542 int priv)
1543{
1544 struct sadb_x_policy *xpl;
1545 struct secpolicy *newsp = NULL;
1546 int error;
1547
1548 /* sanity check. */
1549 if (pcb_sp == NULL || *pcb_sp == NULL || request == NULL)
1550 return EINVAL;
1551 if (len < sizeof(*xpl))
1552 return EINVAL;
1553 xpl = (struct sadb_x_policy *)(void *)request;
1554
1555 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1556 printf("ipsec_set_policy: passed policy\n");
1557 kdebug_sadb_x_policy((struct sadb_ext *)xpl));
1558
1559 /* check policy type */
1560 /* ipsec_set_policy() accepts IPSEC, ENTRUST and BYPASS. */
1561 if (xpl->sadb_x_policy_type == IPSEC_POLICY_DISCARD
1562 || xpl->sadb_x_policy_type == IPSEC_POLICY_NONE)
1563 return EINVAL;
1564
1565 /* check privileged socket */
1566 if (priv == 0 && xpl->sadb_x_policy_type == IPSEC_POLICY_BYPASS)
1567 return EACCES;
1568
1569 /* allocation new SP entry */
1570 if ((newsp = key_msg2sp(xpl, len, &error)) == NULL)
1571 return error;
1572
1573 newsp->state = IPSEC_SPSTATE_ALIVE;
1574
1575 /* clear old SP and set new SP */
1576 key_freesp(*pcb_sp, KEY_SADB_UNLOCKED);
1577 *pcb_sp = newsp;
1578 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1579 printf("ipsec_set_policy: new policy\n");
1580 kdebug_secpolicy(newsp));
1581
1582 return 0;
1583}
1584
1585int
1586ipsec4_set_policy(struct inpcb *inp,
1587 int optname,
1588 caddr_t request,
1589 size_t len,
1590 int priv)
1591{
1592 struct sadb_x_policy *xpl;
1593 struct secpolicy **pcb_sp;
1594 int error = 0;
1595 struct sadb_x_policy xpl_aligned_buf;
1596 u_int8_t *xpl_unaligned;
1597
1598 /* sanity check. */
1599 if (inp == NULL || request == NULL)
1600 return EINVAL;
1601 if (len < sizeof(*xpl))
1602 return EINVAL;
1603 xpl = (struct sadb_x_policy *)(void *)request;
1604
1605 /* This is a new mbuf allocated by soopt_getm() */
1606 if (IPSEC_IS_P2ALIGNED(xpl)) {
1607 xpl_unaligned = NULL;
1608 } else {
1609 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1610 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1611 xpl = (__typeof__(xpl))&xpl_aligned_buf;
1612 }
1613
1614 if (inp->inp_sp == NULL) {
1615 error = ipsec_init_policy(inp->inp_socket, &inp->inp_sp);
1616 if (error)
1617 return error;
1618 }
1619
1620 /* select direction */
1621 switch (xpl->sadb_x_policy_dir) {
1622 case IPSEC_DIR_INBOUND:
1623 pcb_sp = &inp->inp_sp->sp_in;
1624 break;
1625 case IPSEC_DIR_OUTBOUND:
1626 pcb_sp = &inp->inp_sp->sp_out;
1627 break;
1628 default:
1629 ipseclog((LOG_ERR, "ipsec4_set_policy: invalid direction=%u\n",
1630 xpl->sadb_x_policy_dir));
1631 return EINVAL;
1632 }
1633
1634 /* turn bypass off */
1635 if (ipsec_bypass != 0)
1636 ipsec_bypass = 0;
1637
1638 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1639}
1640
1641/* delete policy in PCB */
1642int
1643ipsec4_delete_pcbpolicy(struct inpcb *inp)
1644{
1645
1646 /* sanity check. */
1647 if (inp == NULL)
1648 panic("ipsec4_delete_pcbpolicy: NULL pointer was passed.\n");
1649
1650 if (inp->inp_sp == NULL)
1651 return 0;
1652
1653 if (inp->inp_sp->sp_in != NULL) {
1654 key_freesp(inp->inp_sp->sp_in, KEY_SADB_UNLOCKED);
1655 inp->inp_sp->sp_in = NULL;
1656 }
1657
1658 if (inp->inp_sp->sp_out != NULL) {
1659 key_freesp(inp->inp_sp->sp_out, KEY_SADB_UNLOCKED);
1660 inp->inp_sp->sp_out = NULL;
1661 }
1662
1663 ipsec_delpcbpolicy(inp->inp_sp);
1664 inp->inp_sp = NULL;
1665
1666 return 0;
1667}
1668
1669#if INET6
1670int
1671ipsec6_set_policy(struct in6pcb *in6p,
1672 int optname,
1673 caddr_t request,
1674 size_t len,
1675 int priv)
1676{
1677 struct sadb_x_policy *xpl;
1678 struct secpolicy **pcb_sp;
1679 int error = 0;
1680 struct sadb_x_policy xpl_aligned_buf;
1681 u_int8_t *xpl_unaligned;
1682
1683 /* sanity check. */
1684 if (in6p == NULL || request == NULL)
1685 return EINVAL;
1686 if (len < sizeof(*xpl))
1687 return EINVAL;
1688 xpl = (struct sadb_x_policy *)(void *)request;
1689
1690 /* This is a new mbuf allocated by soopt_getm() */
1691 if (IPSEC_IS_P2ALIGNED(xpl)) {
1692 xpl_unaligned = NULL;
1693 } else {
1694 xpl_unaligned = (__typeof__(xpl_unaligned))xpl;
1695 memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf));
1696 xpl = (__typeof__(xpl))&xpl_aligned_buf;
1697 }
1698
1699 if (in6p->in6p_sp == NULL) {
1700 error = ipsec_init_policy(in6p->inp_socket, &in6p->in6p_sp);
1701 if (error)
1702 return error;
1703 }
1704
1705 /* select direction */
1706 switch (xpl->sadb_x_policy_dir) {
1707 case IPSEC_DIR_INBOUND:
1708 pcb_sp = &in6p->in6p_sp->sp_in;
1709 break;
1710 case IPSEC_DIR_OUTBOUND:
1711 pcb_sp = &in6p->in6p_sp->sp_out;
1712 break;
1713 default:
1714 ipseclog((LOG_ERR, "ipsec6_set_policy: invalid direction=%u\n",
1715 xpl->sadb_x_policy_dir));
1716 return EINVAL;
1717 }
1718
1719 return ipsec_set_policy(pcb_sp, optname, request, len, priv);
1720}
1721
1722int
1723ipsec6_delete_pcbpolicy(struct in6pcb *in6p)
1724{
1725
1726 /* sanity check. */
1727 if (in6p == NULL)
1728 panic("ipsec6_delete_pcbpolicy: NULL pointer was passed.\n");
1729
1730 if (in6p->in6p_sp == NULL)
1731 return 0;
1732
1733 if (in6p->in6p_sp->sp_in != NULL) {
1734 key_freesp(in6p->in6p_sp->sp_in, KEY_SADB_UNLOCKED);
1735 in6p->in6p_sp->sp_in = NULL;
1736 }
1737
1738 if (in6p->in6p_sp->sp_out != NULL) {
1739 key_freesp(in6p->in6p_sp->sp_out, KEY_SADB_UNLOCKED);
1740 in6p->in6p_sp->sp_out = NULL;
1741 }
1742
1743 ipsec_delpcbpolicy(in6p->in6p_sp);
1744 in6p->in6p_sp = NULL;
1745
1746 return 0;
1747}
1748#endif
1749
1750/*
1751 * return current level.
1752 * Either IPSEC_LEVEL_USE or IPSEC_LEVEL_REQUIRE are always returned.
1753 */
1754u_int
1755ipsec_get_reqlevel(struct ipsecrequest *isr)
1756{
1757 u_int level = 0;
1758 u_int esp_trans_deflev = 0, esp_net_deflev = 0, ah_trans_deflev = 0, ah_net_deflev = 0;
1759
1760 /* sanity check */
1761 if (isr == NULL || isr->sp == NULL)
1762 panic("ipsec_get_reqlevel: NULL pointer is passed.\n");
1763 if (((struct sockaddr *)&isr->sp->spidx.src)->sa_family
1764 != ((struct sockaddr *)&isr->sp->spidx.dst)->sa_family)
1765 panic("ipsec_get_reqlevel: family mismatched.\n");
1766
1767/* XXX note that we have ipseclog() expanded here - code sync issue */
1768#define IPSEC_CHECK_DEFAULT(lev) \
1769 (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE \
1770 && (lev) != IPSEC_LEVEL_UNIQUE) \
1771 ? (ipsec_debug \
1772 ? log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\
1773 (lev), IPSEC_LEVEL_REQUIRE) \
1774 : (void)0), \
1775 (lev) = IPSEC_LEVEL_REQUIRE, \
1776 (lev) \
1777 : (lev))
1778
1779 /* set default level */
1780 switch (((struct sockaddr *)&isr->sp->spidx.src)->sa_family) {
1781#if INET
1782 case AF_INET:
1783 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_trans_deflev);
1784 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_net_deflev);
1785 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_trans_deflev);
1786 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_net_deflev);
1787 break;
1788#endif
1789#if INET6
1790 case AF_INET6:
1791 esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_trans_deflev);
1792 esp_net_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_net_deflev);
1793 ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_trans_deflev);
1794 ah_net_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_net_deflev);
1795 break;
1796#endif /* INET6 */
1797 default:
1798 panic("key_get_reqlevel: Unknown family. %d\n",
1799 ((struct sockaddr *)&isr->sp->spidx.src)->sa_family);
1800 }
1801
1802#undef IPSEC_CHECK_DEFAULT
1803
1804 /* set level */
1805 switch (isr->level) {
1806 case IPSEC_LEVEL_DEFAULT:
1807 switch (isr->saidx.proto) {
1808 case IPPROTO_ESP:
1809 if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
1810 level = esp_net_deflev;
1811 else
1812 level = esp_trans_deflev;
1813 break;
1814 case IPPROTO_AH:
1815 if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
1816 level = ah_net_deflev;
1817 else
1818 level = ah_trans_deflev;
1819 break;
1820 case IPPROTO_IPCOMP:
1821 /*
1822 * we don't really care, as IPcomp document says that
1823 * we shouldn't compress small packets
1824 */
1825 level = IPSEC_LEVEL_USE;
1826 break;
1827 default:
1828 panic("ipsec_get_reqlevel: "
1829 "Illegal protocol defined %u\n",
1830 isr->saidx.proto);
1831 }
1832 break;
1833
1834 case IPSEC_LEVEL_USE:
1835 case IPSEC_LEVEL_REQUIRE:
1836 level = isr->level;
1837 break;
1838 case IPSEC_LEVEL_UNIQUE:
1839 level = IPSEC_LEVEL_REQUIRE;
1840 break;
1841
1842 default:
1843 panic("ipsec_get_reqlevel: Illegal IPsec level %u\n",
1844 isr->level);
1845 }
1846
1847 return level;
1848}
1849
1850/*
1851 * Check AH/ESP integrity.
1852 * OUT:
1853 * 0: valid
1854 * 1: invalid
1855 */
1856static int
1857ipsec_in_reject(struct secpolicy *sp, struct mbuf *m)
1858{
1859 struct ipsecrequest *isr;
1860 u_int level;
1861 int need_auth, need_conf, need_icv;
1862
1863 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
1864 printf("ipsec_in_reject: using SP\n");
1865 kdebug_secpolicy(sp));
1866
1867 /* check policy */
1868 switch (sp->policy) {
1869 case IPSEC_POLICY_DISCARD:
1870 case IPSEC_POLICY_GENERATE:
1871 return 1;
1872 case IPSEC_POLICY_BYPASS:
1873 case IPSEC_POLICY_NONE:
1874 return 0;
1875
1876 case IPSEC_POLICY_IPSEC:
1877 break;
1878
1879 case IPSEC_POLICY_ENTRUST:
1880 default:
1881 panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy);
1882 }
1883
1884 need_auth = 0;
1885 need_conf = 0;
1886 need_icv = 0;
1887
1888 /* XXX should compare policy against ipsec header history */
1889
1890 for (isr = sp->req; isr != NULL; isr = isr->next) {
1891
1892 /* get current level */
1893 level = ipsec_get_reqlevel(isr);
1894
1895 switch (isr->saidx.proto) {
1896 case IPPROTO_ESP:
1897 if (level == IPSEC_LEVEL_REQUIRE) {
1898 need_conf++;
1899
1900#if 0
1901 /* this won't work with multiple input threads - isr->sav would change
1902 * with every packet and is not necessarily related to the current packet
1903 * being processed. If ESP processing is required - the esp code should
1904 * make sure that the integrity check is present and correct. I don't see
1905 * why it would be necessary to check for the presence of the integrity
1906 * check value here. I think this is just wrong.
1907 * isr->sav has been removed.
1908 * %%%%%% this needs to be re-worked at some point but I think the code below can
1909 * be ignored for now.
1910 */
1911 if (isr->sav != NULL
1912 && isr->sav->flags == SADB_X_EXT_NONE
1913 && isr->sav->alg_auth != SADB_AALG_NONE)
1914 need_icv++;
1915#endif
1916 }
1917 break;
1918 case IPPROTO_AH:
1919 if (level == IPSEC_LEVEL_REQUIRE) {
1920 need_auth++;
1921 need_icv++;
1922 }
1923 break;
1924 case IPPROTO_IPCOMP:
1925 /*
1926 * we don't really care, as IPcomp document says that
1927 * we shouldn't compress small packets, IPComp policy
1928 * should always be treated as being in "use" level.
1929 */
1930 break;
1931 }
1932 }
1933
1934 KEYDEBUG(KEYDEBUG_IPSEC_DUMP,
1935 printf("ipsec_in_reject: auth:%d conf:%d icv:%d m_flags:%x\n",
1936 need_auth, need_conf, need_icv, m->m_flags));
1937
1938 if ((need_conf && !(m->m_flags & M_DECRYPTED))
1939 || (!need_auth && need_icv && !(m->m_flags & M_AUTHIPDGM))
1940 || (need_auth && !(m->m_flags & M_AUTHIPHDR)))
1941 return 1;
1942
1943 return 0;
1944}
1945
1946/*
1947 * Check AH/ESP integrity.
1948 * This function is called from tcp_input(), udp_input(),
1949 * and {ah,esp}4_input for tunnel mode
1950 */
1951int
1952ipsec4_in_reject_so(struct mbuf *m, struct socket *so)
1953{
1954 struct secpolicy *sp = NULL;
1955 int error;
1956 int result;
1957
1958 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
1959 /* sanity check */
1960 if (m == NULL)
1961 return 0; /* XXX should be panic ? */
1962
1963 /* get SP for this packet.
1964 * When we are called from ip_forward(), we call
1965 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
1966 */
1967 if (so == NULL)
1968 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
1969 else
1970 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
1971
1972 if (sp == NULL)
1973 return 0; /* XXX should be panic ?
1974 * -> No, there may be error. */
1975
1976 result = ipsec_in_reject(sp, m);
1977 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1978 printf("DP ipsec4_in_reject_so call free SP:0x%llx\n",
1979 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
1980 key_freesp(sp, KEY_SADB_UNLOCKED);
1981
1982 return result;
1983}
1984
1985int
1986ipsec4_in_reject(struct mbuf *m, struct inpcb *inp)
1987{
1988 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
1989 if (inp == NULL)
1990 return ipsec4_in_reject_so(m, NULL);
1991 if (inp->inp_socket)
1992 return ipsec4_in_reject_so(m, inp->inp_socket);
1993 else
1994 panic("ipsec4_in_reject: invalid inpcb/socket");
1995
1996 /* NOTREACHED */
1997 return 0;
1998}
1999
2000#if INET6
2001/*
2002 * Check AH/ESP integrity.
2003 * This function is called from tcp6_input(), udp6_input(),
2004 * and {ah,esp}6_input for tunnel mode
2005 */
2006int
2007ipsec6_in_reject_so(struct mbuf *m, struct socket *so)
2008{
2009 struct secpolicy *sp = NULL;
2010 int error;
2011 int result;
2012
2013 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2014 /* sanity check */
2015 if (m == NULL)
2016 return 0; /* XXX should be panic ? */
2017
2018 /* get SP for this packet.
2019 * When we are called from ip_forward(), we call
2020 * ipsec6_getpolicybyaddr() with IP_FORWARDING flag.
2021 */
2022 if (so == NULL)
2023 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error);
2024 else
2025 sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error);
2026
2027 if (sp == NULL)
2028 return 0; /* XXX should be panic ? */
2029
2030 result = ipsec_in_reject(sp, m);
2031 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2032 printf("DP ipsec6_in_reject_so call free SP:0x%llx\n",
2033 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2034 key_freesp(sp, KEY_SADB_UNLOCKED);
2035
2036 return result;
2037}
2038
2039int
2040ipsec6_in_reject(struct mbuf *m, struct in6pcb *in6p)
2041{
2042
2043 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2044 if (in6p == NULL)
2045 return ipsec6_in_reject_so(m, NULL);
2046 if (in6p->in6p_socket)
2047 return ipsec6_in_reject_so(m, in6p->in6p_socket);
2048 else
2049 panic("ipsec6_in_reject: invalid in6p/socket");
2050
2051 /* NOTREACHED */
2052 return 0;
2053}
2054#endif
2055
2056/*
2057 * compute the byte size to be occupied by IPsec header.
2058 * in case it is tunneled, it includes the size of outer IP header.
2059 * NOTE: SP passed is free in this function.
2060 */
2061size_t
2062ipsec_hdrsiz(struct secpolicy *sp)
2063{
2064 struct ipsecrequest *isr;
2065 size_t siz, clen;
2066
2067 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2068 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2069 printf("ipsec_hdrsiz: using SP\n");
2070 kdebug_secpolicy(sp));
2071
2072 /* check policy */
2073 switch (sp->policy) {
2074 case IPSEC_POLICY_DISCARD:
2075 case IPSEC_POLICY_GENERATE:
2076 case IPSEC_POLICY_BYPASS:
2077 case IPSEC_POLICY_NONE:
2078 return 0;
2079
2080 case IPSEC_POLICY_IPSEC:
2081 break;
2082
2083 case IPSEC_POLICY_ENTRUST:
2084 default:
2085 panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy);
2086 }
2087
2088 siz = 0;
2089
2090 for (isr = sp->req; isr != NULL; isr = isr->next) {
2091
2092 clen = 0;
2093
2094 switch (isr->saidx.proto) {
2095 case IPPROTO_ESP:
2096#if IPSEC_ESP
2097 clen = esp_hdrsiz(isr);
2098#else
2099 clen = 0; /*XXX*/
2100#endif
2101 break;
2102 case IPPROTO_AH:
2103 clen = ah_hdrsiz(isr);
2104 break;
2105 case IPPROTO_IPCOMP:
2106 clen = sizeof(struct ipcomp);
2107 break;
2108 }
2109
2110 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
2111 switch (((struct sockaddr *)&isr->saidx.dst)->sa_family) {
2112 case AF_INET:
2113 clen += sizeof(struct ip);
2114 break;
2115#if INET6
2116 case AF_INET6:
2117 clen += sizeof(struct ip6_hdr);
2118 break;
2119#endif
2120 default:
2121 ipseclog((LOG_ERR, "ipsec_hdrsiz: "
2122 "unknown AF %d in IPsec tunnel SA\n",
2123 ((struct sockaddr *)&isr->saidx.dst)->sa_family));
2124 break;
2125 }
2126 }
2127 siz += clen;
2128 }
2129
2130 return siz;
2131}
2132
2133/* This function is called from ip_forward() and ipsec4_hdrsize_tcp(). */
2134size_t
2135ipsec4_hdrsiz(struct mbuf *m, u_int dir, struct inpcb *inp)
2136{
2137 struct secpolicy *sp = NULL;
2138 int error;
2139 size_t size;
2140
2141 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2142 /* sanity check */
2143 if (m == NULL)
2144 return 0; /* XXX should be panic ? */
2145 if (inp != NULL && inp->inp_socket == NULL)
2146 panic("ipsec4_hdrsize: why is socket NULL but there is PCB.");
2147
2148 /* get SP for this packet.
2149 * When we are called from ip_forward(), we call
2150 * ipsec4_getpolicybyaddr() with IP_FORWARDING flag.
2151 */
2152 if (inp == NULL)
2153 sp = ipsec4_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2154 else
2155 sp = ipsec4_getpolicybyaddr(m, dir, 0, &error);
2156
2157 if (sp == NULL)
2158 return 0; /* XXX should be panic ? */
2159
2160 size = ipsec_hdrsiz(sp);
2161 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2162 printf("DP ipsec4_hdrsiz call free SP:0x%llx\n",
2163 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2164 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2165 printf("ipsec4_hdrsiz: size:%lu.\n", (u_int32_t)size));
2166 key_freesp(sp, KEY_SADB_UNLOCKED);
2167
2168 return size;
2169}
2170
2171#if INET6
2172/* This function is called from ipsec6_hdrsize_tcp(),
2173 * and maybe from ip6_forward.()
2174 */
2175size_t
2176ipsec6_hdrsiz(struct mbuf *m, u_int dir, struct in6pcb *in6p)
2177{
2178 struct secpolicy *sp = NULL;
2179 int error;
2180 size_t size;
2181
2182 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
2183 /* sanity check */
2184 if (m == NULL)
2185 return 0; /* XXX shoud be panic ? */
2186 if (in6p != NULL && in6p->in6p_socket == NULL)
2187 panic("ipsec6_hdrsize: why is socket NULL but there is PCB.");
2188
2189 /* get SP for this packet */
2190 /* XXX Is it right to call with IP_FORWARDING. */
2191 if (in6p == NULL)
2192 sp = ipsec6_getpolicybyaddr(m, dir, IP_FORWARDING, &error);
2193 else
2194 sp = ipsec6_getpolicybyaddr(m, dir, 0, &error);
2195
2196 if (sp == NULL)
2197 return 0;
2198 size = ipsec_hdrsiz(sp);
2199 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
2200 printf("DP ipsec6_hdrsiz call free SP:0x%llx\n",
2201 (uint64_t)VM_KERNEL_ADDRPERM(sp)));
2202 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
2203 printf("ipsec6_hdrsiz: size:%lu.\n", (u_int32_t)size));
2204 key_freesp(sp, KEY_SADB_UNLOCKED);
2205
2206 return size;
2207}
2208#endif /*INET6*/
2209
2210#if INET
2211/*
2212 * encapsulate for ipsec tunnel.
2213 * ip->ip_src must be fixed later on.
2214 */
2215int
2216ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav)
2217{
2218 struct ip *oip;
2219 struct ip *ip;
2220 size_t hlen;
2221 size_t plen;
2222
2223 /* can't tunnel between different AFs */
2224 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2225 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2226 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2227 m_freem(m);
2228 return EINVAL;
2229 }
2230#if 0
2231 /* XXX if the dst is myself, perform nothing. */
2232 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2233 m_freem(m);
2234 return EINVAL;
2235 }
2236#endif
2237
2238 if (m->m_len < sizeof(*ip))
2239 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2240
2241 ip = mtod(m, struct ip *);
2242#ifdef _IP_VHL
2243 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2244#else
2245 hlen = ip->ip_hl << 2;
2246#endif
2247
2248 if (m->m_len != hlen)
2249 panic("ipsec4_encapsulate: assumption failed (first mbuf length)");
2250
2251 /* generate header checksum */
2252 ip->ip_sum = 0;
2253#ifdef _IP_VHL
2254 ip->ip_sum = in_cksum(m, hlen);
2255#else
2256 ip->ip_sum = in_cksum(m, hlen);
2257#endif
2258
2259 plen = m->m_pkthdr.len;
2260
2261 /*
2262 * grow the mbuf to accomodate the new IPv4 header.
2263 * NOTE: IPv4 options will never be copied.
2264 */
2265 if (M_LEADINGSPACE(m->m_next) < hlen) {
2266 struct mbuf *n;
2267 MGET(n, M_DONTWAIT, MT_DATA);
2268 if (!n) {
2269 m_freem(m);
2270 return ENOBUFS;
2271 }
2272 n->m_len = hlen;
2273 n->m_next = m->m_next;
2274 m->m_next = n;
2275 m->m_pkthdr.len += hlen;
2276 oip = mtod(n, struct ip *);
2277 } else {
2278 m->m_next->m_len += hlen;
2279 m->m_next->m_data -= hlen;
2280 m->m_pkthdr.len += hlen;
2281 oip = mtod(m->m_next, struct ip *);
2282 }
2283 ip = mtod(m, struct ip *);
2284 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2285 m->m_len = sizeof(struct ip);
2286 m->m_pkthdr.len -= (hlen - sizeof(struct ip));
2287
2288 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2289 /* ECN consideration. */
2290 ip_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &oip->ip_tos);
2291#ifdef _IP_VHL
2292 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
2293#else
2294 ip->ip_hl = sizeof(struct ip) >> 2;
2295#endif
2296 ip->ip_off &= htons(~IP_OFFMASK);
2297 ip->ip_off &= htons(~IP_MF);
2298 switch (ip4_ipsec_dfbit) {
2299 case 0: /* clear DF bit */
2300 ip->ip_off &= htons(~IP_DF);
2301 break;
2302 case 1: /* set DF bit */
2303 ip->ip_off |= htons(IP_DF);
2304 break;
2305 default: /* copy DF bit */
2306 break;
2307 }
2308 ip->ip_p = IPPROTO_IPIP;
2309 if (plen + sizeof(struct ip) < IP_MAXPACKET)
2310 ip->ip_len = htons(plen + sizeof(struct ip));
2311 else {
2312 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2313 "leave ip_len as is (invalid packet)\n"));
2314 }
2315 ip->ip_id = ip_randomid();
2316 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2317 &ip->ip_src, sizeof(ip->ip_src));
2318 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2319 &ip->ip_dst, sizeof(ip->ip_dst));
2320 ip->ip_ttl = IPDEFTTL;
2321
2322 /* XXX Should ip_src be updated later ? */
2323
2324 return 0;
2325}
2326
2327#endif /*INET*/
2328
2329#if INET6
2330int
2331ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav)
2332{
2333 struct ip6_hdr *oip6;
2334 struct ip6_hdr *ip6;
2335 size_t plen;
2336
2337 /* can't tunnel between different AFs */
2338 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2339 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2340 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2341 m_freem(m);
2342 return EINVAL;
2343 }
2344#if 0
2345 /* XXX if the dst is myself, perform nothing. */
2346 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2347 m_freem(m);
2348 return EINVAL;
2349 }
2350#endif
2351
2352 plen = m->m_pkthdr.len;
2353
2354 /*
2355 * grow the mbuf to accomodate the new IPv6 header.
2356 */
2357 if (m->m_len != sizeof(struct ip6_hdr))
2358 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2359 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2360 struct mbuf *n;
2361 MGET(n, M_DONTWAIT, MT_DATA);
2362 if (!n) {
2363 m_freem(m);
2364 return ENOBUFS;
2365 }
2366 n->m_len = sizeof(struct ip6_hdr);
2367 n->m_next = m->m_next;
2368 m->m_next = n;
2369 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2370 oip6 = mtod(n, struct ip6_hdr *);
2371 } else {
2372 m->m_next->m_len += sizeof(struct ip6_hdr);
2373 m->m_next->m_data -= sizeof(struct ip6_hdr);
2374 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2375 oip6 = mtod(m->m_next, struct ip6_hdr *);
2376 }
2377 ip6 = mtod(m, struct ip6_hdr *);
2378 ovbcopy((caddr_t)ip6, (caddr_t)oip6, sizeof(struct ip6_hdr));
2379
2380 /* Fake link-local scope-class addresses */
2381 if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_src))
2382 oip6->ip6_src.s6_addr16[1] = 0;
2383 if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_dst))
2384 oip6->ip6_dst.s6_addr16[1] = 0;
2385
2386 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2387 /* ECN consideration. */
2388 ip6_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip6->ip6_flow);
2389 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr))
2390 ip6->ip6_plen = htons(plen);
2391 else {
2392 /* ip6->ip6_plen will be updated in ip6_output() */
2393 }
2394 ip6->ip6_nxt = IPPROTO_IPV6;
2395 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2396 &ip6->ip6_src, sizeof(ip6->ip6_src));
2397 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2398 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2399 ip6->ip6_hlim = IPV6_DEFHLIM;
2400
2401 /* XXX Should ip6_src be updated later ? */
2402
2403 return 0;
2404}
2405
2406static int
2407ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav)
2408{
2409 struct ip6_hdr *ip6, *ip6i;
2410 struct ip *ip;
2411 size_t plen;
2412 u_int8_t hlim;
2413
2414 /* tunneling over IPv4 */
2415 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2416 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2417 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) {
2418 m_freem(m);
2419 return EINVAL;
2420 }
2421#if 0
2422 /* XXX if the dst is myself, perform nothing. */
2423 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2424 m_freem(m);
2425 return EINVAL;
2426 }
2427#endif
2428
2429 plen = m->m_pkthdr.len;
2430 ip6 = mtod(m, struct ip6_hdr *);
2431 hlim = ip6->ip6_hlim;
2432 /*
2433 * grow the mbuf to accomodate the new IPv4 header.
2434 */
2435 if (m->m_len != sizeof(struct ip6_hdr))
2436 panic("ipsec6_encapsulate: assumption failed (first mbuf length)");
2437 if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) {
2438 struct mbuf *n;
2439 MGET(n, M_DONTWAIT, MT_DATA);
2440 if (!n) {
2441 m_freem(m);
2442 return ENOBUFS;
2443 }
2444 n->m_len = sizeof(struct ip6_hdr);
2445 n->m_next = m->m_next;
2446 m->m_next = n;
2447 m->m_pkthdr.len += sizeof(struct ip);
2448 ip6i = mtod(n, struct ip6_hdr *);
2449 } else {
2450 m->m_next->m_len += sizeof(struct ip6_hdr);
2451 m->m_next->m_data -= sizeof(struct ip6_hdr);
2452 m->m_pkthdr.len += sizeof(struct ip);
2453 ip6i = mtod(m->m_next, struct ip6_hdr *);
2454 }
2455
2456 bcopy(ip6, ip6i, sizeof(struct ip6_hdr));
2457 ip = mtod(m, struct ip *);
2458 m->m_len = sizeof(struct ip);
2459 /*
2460 * Fill in some of the IPv4 fields - we don't need all of them
2461 * because the rest will be filled in by ip_output
2462 */
2463 ip->ip_v = IPVERSION;
2464 ip->ip_hl = sizeof(struct ip) >> 2;
2465 ip->ip_id = 0;
2466 ip->ip_sum = 0;
2467 ip->ip_tos = 0;
2468 ip->ip_off = 0;
2469 ip->ip_ttl = hlim;
2470 ip->ip_p = IPPROTO_IPV6;
2471
2472 /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
2473 /* ECN consideration. */
2474 ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6->ip6_flow);
2475
2476 if (plen + sizeof(struct ip) < IP_MAXPACKET)
2477 ip->ip_len = htons(plen + sizeof(struct ip));
2478 else {
2479 ip->ip_len = htons(plen);
2480 ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: "
2481 "leave ip_len as is (invalid packet)\n"));
2482 }
2483 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr,
2484 &ip->ip_src, sizeof(ip->ip_src));
2485 bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr,
2486 &ip->ip_dst, sizeof(ip->ip_dst));
2487
2488 return 0;
2489}
2490
2491int
2492ipsec6_update_routecache_and_output(
2493 struct ipsec_output_state *state,
2494 struct secasvar *sav)
2495{
2496 struct sockaddr_in6* dst6;
2497 struct route *ro6;
2498 struct ip6_hdr *ip6;
2499 errno_t error = 0;
2500
2501 int plen;
2502 struct ip6_out_args ip6oa;
2503 struct route_in6 ro6_new;
2504 struct flowadv *adv = NULL;
2505
2506 if (!state->m) {
2507 return EINVAL;
2508 }
2509 ip6 = mtod(state->m, struct ip6_hdr *);
2510
2511 // grab sadb_mutex, before updating sah's route cache
2512 lck_mtx_lock(sadb_mutex);
2513 ro6 = &sav->sah->sa_route;
2514 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
2515 if (ro6->ro_rt) {
2516 RT_LOCK(ro6->ro_rt);
2517 }
2518 if (ROUTE_UNUSABLE(ro6) ||
2519 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
2520 if (ro6->ro_rt != NULL)
2521 RT_UNLOCK(ro6->ro_rt);
2522 ROUTE_RELEASE(ro6);
2523 }
2524 if (ro6->ro_rt == 0) {
2525 bzero(dst6, sizeof(*dst6));
2526 dst6->sin6_family = AF_INET6;
2527 dst6->sin6_len = sizeof(*dst6);
2528 dst6->sin6_addr = ip6->ip6_dst;
2529 rtalloc_scoped(ro6, sav->sah->outgoing_if);
2530 if (ro6->ro_rt) {
2531 RT_LOCK(ro6->ro_rt);
2532 }
2533 }
2534 if (ro6->ro_rt == 0) {
2535 ip6stat.ip6s_noroute++;
2536 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
2537 error = EHOSTUNREACH;
2538 // release sadb_mutex, after updating sah's route cache
2539 lck_mtx_unlock(sadb_mutex);
2540 return error;
2541 }
2542
2543 /*
2544 * adjust state->dst if tunnel endpoint is offlink
2545 *
2546 * XXX: caching rt_gateway value in the state is
2547 * not really good, since it may point elsewhere
2548 * when the gateway gets modified to a larger
2549 * sockaddr via rt_setgate(). This is currently
2550 * addressed by SA_SIZE roundup in that routine.
2551 */
2552 if (ro6->ro_rt->rt_flags & RTF_GATEWAY)
2553 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
2554 RT_UNLOCK(ro6->ro_rt);
2555 ROUTE_RELEASE(&state->ro);
2556 route_copyout(&state->ro, ro6, sizeof(state->ro));
2557 state->dst = (struct sockaddr *)dst6;
2558 state->tunneled = 6;
2559 // release sadb_mutex, after updating sah's route cache
2560 lck_mtx_unlock(sadb_mutex);
2561
2562 state->m = ipsec6_splithdr(state->m);
2563 if (!state->m) {
2564 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
2565 error = ENOMEM;
2566 return error;
2567 }
2568
2569 ip6 = mtod(state->m, struct ip6_hdr *);
2570 switch (sav->sah->saidx.proto) {
2571 case IPPROTO_ESP:
2572#if IPSEC_ESP
2573 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2574#else
2575 m_freem(state->m);
2576 error = EINVAL;
2577#endif
2578 break;
2579 case IPPROTO_AH:
2580 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
2581 break;
2582 case IPPROTO_IPCOMP:
2583 /* XXX code should be here */
2584 /*FALLTHROUGH*/
2585 default:
2586 ipseclog((LOG_ERR, "%s: unknown ipsec protocol %d\n", __FUNCTION__, sav->sah->saidx.proto));
2587 m_freem(state->m);
2588 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2589 error = EINVAL;
2590 break;
2591 }
2592 if (error) {
2593 // If error, packet already freed by above output routines
2594 state->m = NULL;
2595 return error;
2596 }
2597
2598 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
2599 if (plen > IPV6_MAXPACKET) {
2600 ipseclog((LOG_ERR, "%s: IPsec with IPv6 jumbogram is not supported\n", __FUNCTION__));
2601 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
2602 error = EINVAL;/*XXX*/
2603 return error;
2604 }
2605 ip6 = mtod(state->m, struct ip6_hdr *);
2606 ip6->ip6_plen = htons(plen);
2607
2608 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET6);
2609 ipsec_set_ip6oa_for_interface(sav->sah->ipsec_if, &ip6oa);
2610
2611 /* Increment statistics */
2612 ifnet_stat_increment_out(sav->sah->ipsec_if, 1, mbuf_pkthdr_len(state->m), 0);
2613
2614 /* Send to ip6_output */
2615 bzero(&ro6_new, sizeof(ro6_new));
2616 bzero(&ip6oa, sizeof(ip6oa));
2617 ip6oa.ip6oa_flowadv.code = 0;
2618 ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR;
2619 if (state->outgoing_if) {
2620 ip6oa.ip6oa_boundif = state->outgoing_if;
2621 ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF;
2622 }
2623
2624 adv = &ip6oa.ip6oa_flowadv;
2625 (void) ip6_output(state->m, NULL, &ro6_new, IPV6_OUTARGS, NULL, NULL, &ip6oa);
2626
2627 if (adv->code == FADV_FLOW_CONTROLLED || adv->code == FADV_SUSPENDED) {
2628 error = ENOBUFS;
2629 ifnet_disable_output(sav->sah->ipsec_if);
2630 return error;
2631 }
2632
2633 return 0;
2634}
2635
2636int
2637ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav)
2638{
2639 struct mbuf *m;
2640 struct ip6_hdr *ip6;
2641 struct ip *oip;
2642 struct ip *ip;
2643 size_t hlen;
2644 size_t plen;
2645
2646 m = state->m;
2647 if (!m) {
2648 return EINVAL;
2649 }
2650
2651 /* can't tunnel between different AFs */
2652 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
2653 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family
2654 || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) {
2655 m_freem(m);
2656 return EINVAL;
2657 }
2658#if 0
2659 /* XXX if the dst is myself, perform nothing. */
2660 if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) {
2661 m_freem(m);
2662 return EINVAL;
2663 }
2664#endif
2665
2666 if (m->m_len < sizeof(*ip)) {
2667 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2668 return EINVAL;
2669 }
2670
2671 ip = mtod(m, struct ip *);
2672#ifdef _IP_VHL
2673 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
2674#else
2675 hlen = ip->ip_hl << 2;
2676#endif
2677
2678 if (m->m_len != hlen) {
2679 panic("ipsec46_encapsulate: assumption failed (first mbuf length)");
2680 return EINVAL;
2681 }
2682
2683 /* generate header checksum */
2684 ip->ip_sum = 0;
2685#ifdef _IP_VHL
2686 ip->ip_sum = in_cksum(m, hlen);
2687#else
2688 ip->ip_sum = in_cksum(m, hlen);
2689#endif
2690
2691 plen = m->m_pkthdr.len; // save original IPv4 packet len, this will be ipv6 payload len
2692
2693 /*
2694 * First move the IPv4 header to the second mbuf in the chain
2695 */
2696 if (M_LEADINGSPACE(m->m_next) < hlen) {
2697 struct mbuf *n;
2698 MGET(n, M_DONTWAIT, MT_DATA);
2699 if (!n) {
2700 m_freem(m);
2701 return ENOBUFS;
2702 }
2703 n->m_len = hlen;
2704 n->m_next = m->m_next;
2705 m->m_next = n;
2706 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2707 oip = mtod(n, struct ip *);
2708 } else {
2709 m->m_next->m_len += hlen;
2710 m->m_next->m_data -= hlen;
2711 m->m_pkthdr.len += sizeof(struct ip6_hdr);
2712 oip = mtod(m->m_next, struct ip *);
2713 }
2714 ip = mtod(m, struct ip *);
2715 ovbcopy((caddr_t)ip, (caddr_t)oip, hlen);
2716
2717 /*
2718 * Grow the first mbuf to accomodate the new IPv6 header.
2719 */
2720 if (M_LEADINGSPACE(m) < sizeof(struct ip6_hdr) - hlen) {
2721 struct mbuf *n;
2722 MGETHDR(n, M_DONTWAIT, MT_HEADER);
2723 if (!n) {
2724 m_freem(m);
2725 return ENOBUFS;
2726 }
2727 M_COPY_PKTHDR(n, m);
2728 MH_ALIGN(n, sizeof(struct ip6_hdr));
2729 n->m_len = sizeof(struct ip6_hdr);
2730 n->m_next = m->m_next;
2731 m->m_next = NULL;
2732 m_freem(m);
2733 state->m = n;
2734 m = state->m;
2735 } else {
2736 m->m_len += (sizeof(struct ip6_hdr) - hlen);
2737 m->m_data -= (sizeof(struct ip6_hdr) - hlen);
2738 }
2739 ip6 = mtod(m, struct ip6_hdr *);
2740 ip6->ip6_flow = 0;
2741 ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
2742 ip6->ip6_vfc |= IPV6_VERSION;
2743
2744 /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
2745 /* ECN consideration. */
2746 ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &ip->ip_tos);
2747 if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr))
2748 ip6->ip6_plen = htons(plen);
2749 else {
2750 /* ip6->ip6_plen will be updated in ip6_output() */
2751 }
2752
2753 ip6->ip6_nxt = IPPROTO_IPV4;
2754 ip6->ip6_hlim = IPV6_DEFHLIM;
2755
2756 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr,
2757 &ip6->ip6_src, sizeof(ip6->ip6_src));
2758 bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr,
2759 &ip6->ip6_dst, sizeof(ip6->ip6_dst));
2760
2761 return 0;
2762}
2763
2764#endif /*INET6*/
2765
2766/*
2767 * Check the variable replay window.
2768 * ipsec_chkreplay() performs replay check before ICV verification.
2769 * ipsec_updatereplay() updates replay bitmap. This must be called after
2770 * ICV verification (it also performs replay check, which is usually done
2771 * beforehand).
2772 * 0 (zero) is returned if packet disallowed, 1 if packet permitted.
2773 *
2774 * based on RFC 2401.
2775 */
2776int
2777ipsec_chkreplay(u_int32_t seq, struct secasvar *sav)
2778{
2779 const struct secreplay *replay;
2780 u_int32_t diff;
2781 int fr;
2782 u_int32_t wsizeb; /* constant: bits of window size */
2783 int frlast; /* constant: last frame */
2784
2785
2786 /* sanity check */
2787 if (sav == NULL)
2788 panic("ipsec_chkreplay: NULL pointer was passed.\n");
2789
2790 lck_mtx_lock(sadb_mutex);
2791 replay = sav->replay;
2792
2793 if (replay->wsize == 0) {
2794 lck_mtx_unlock(sadb_mutex);
2795 return 1; /* no need to check replay. */
2796 }
2797
2798 /* constant */
2799 frlast = replay->wsize - 1;
2800 wsizeb = replay->wsize << 3;
2801
2802 /* sequence number of 0 is invalid */
2803 if (seq == 0) {
2804 lck_mtx_unlock(sadb_mutex);
2805 return 0;
2806 }
2807
2808 /* first time is always okay */
2809 if (replay->count == 0) {
2810 lck_mtx_unlock(sadb_mutex);
2811 return 1;
2812 }
2813
2814 if (seq > replay->lastseq) {
2815 /* larger sequences are okay */
2816 lck_mtx_unlock(sadb_mutex);
2817 return 1;
2818 } else {
2819 /* seq is equal or less than lastseq. */
2820 diff = replay->lastseq - seq;
2821
2822 /* over range to check, i.e. too old or wrapped */
2823 if (diff >= wsizeb) {
2824 lck_mtx_unlock(sadb_mutex);
2825 return 0;
2826 }
2827
2828 fr = frlast - diff / 8;
2829
2830 /* this packet already seen ? */
2831 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2832 lck_mtx_unlock(sadb_mutex);
2833 return 0;
2834 }
2835
2836 /* out of order but good */
2837 lck_mtx_unlock(sadb_mutex);
2838 return 1;
2839 }
2840}
2841
2842/*
2843 * check replay counter whether to update or not.
2844 * OUT: 0: OK
2845 * 1: NG
2846 */
2847int
2848ipsec_updatereplay(u_int32_t seq, struct secasvar *sav)
2849{
2850 struct secreplay *replay;
2851 u_int32_t diff;
2852 int fr;
2853 u_int32_t wsizeb; /* constant: bits of window size */
2854 int frlast; /* constant: last frame */
2855
2856 /* sanity check */
2857 if (sav == NULL)
2858 panic("ipsec_chkreplay: NULL pointer was passed.\n");
2859
2860 lck_mtx_lock(sadb_mutex);
2861 replay = sav->replay;
2862
2863 if (replay->wsize == 0)
2864 goto ok; /* no need to check replay. */
2865
2866 /* constant */
2867 frlast = replay->wsize - 1;
2868 wsizeb = replay->wsize << 3;
2869
2870 /* sequence number of 0 is invalid */
2871 if (seq == 0)
2872 return 1;
2873
2874 /* first time */
2875 if (replay->count == 0) {
2876 replay->lastseq = seq;
2877 bzero(replay->bitmap, replay->wsize);
2878 (replay->bitmap)[frlast] = 1;
2879 goto ok;
2880 }
2881
2882 if (seq > replay->lastseq) {
2883 /* seq is larger than lastseq. */
2884 diff = seq - replay->lastseq;
2885
2886 /* new larger sequence number */
2887 if (diff < wsizeb) {
2888 /* In window */
2889 /* set bit for this packet */
2890 vshiftl((unsigned char *) replay->bitmap, diff, replay->wsize);
2891 (replay->bitmap)[frlast] |= 1;
2892 } else {
2893 /* this packet has a "way larger" */
2894 bzero(replay->bitmap, replay->wsize);
2895 (replay->bitmap)[frlast] = 1;
2896 }
2897 replay->lastseq = seq;
2898
2899 /* larger is good */
2900 } else {
2901 /* seq is equal or less than lastseq. */
2902 diff = replay->lastseq - seq;
2903
2904 /* over range to check, i.e. too old or wrapped */
2905 if (diff >= wsizeb) {
2906 lck_mtx_unlock(sadb_mutex);
2907 return 1;
2908 }
2909
2910 fr = frlast - diff / 8;
2911
2912 /* this packet already seen ? */
2913 if ((replay->bitmap)[fr] & (1 << (diff % 8))) {
2914 lck_mtx_unlock(sadb_mutex);
2915 return 1;
2916 }
2917
2918 /* mark as seen */
2919 (replay->bitmap)[fr] |= (1 << (diff % 8));
2920
2921 /* out of order but good */
2922 }
2923
2924ok:
2925 if (replay->count == ~0) {
2926
2927 /* set overflow flag */
2928 replay->overflow++;
2929
2930 /* don't increment, no more packets accepted */
2931 if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
2932 lck_mtx_unlock(sadb_mutex);
2933 return 1;
2934 }
2935
2936 ipseclog((LOG_WARNING, "replay counter made %d cycle. %s\n",
2937 replay->overflow, ipsec_logsastr(sav)));
2938 }
2939
2940 replay->count++;
2941
2942 lck_mtx_unlock(sadb_mutex);
2943 return 0;
2944}
2945
2946/*
2947 * shift variable length buffer to left.
2948 * IN: bitmap: pointer to the buffer
2949 * nbit: the number of to shift.
2950 * wsize: buffer size (bytes).
2951 */
2952static void
2953vshiftl(unsigned char *bitmap, int nbit, int wsize)
2954{
2955 int s, j, i;
2956 unsigned char over;
2957
2958 for (j = 0; j < nbit; j += 8) {
2959 s = (nbit - j < 8) ? (nbit - j): 8;
2960 bitmap[0] <<= s;
2961 for (i = 1; i < wsize; i++) {
2962 over = (bitmap[i] >> (8 - s));
2963 bitmap[i] <<= s;
2964 bitmap[i-1] |= over;
2965 }
2966 }
2967
2968 return;
2969}
2970
2971const char *
2972ipsec4_logpacketstr(struct ip *ip, u_int32_t spi)
2973{
2974 static char buf[256] __attribute__((aligned(4)));
2975 char *p;
2976 u_int8_t *s, *d;
2977
2978 s = (u_int8_t *)(&ip->ip_src);
2979 d = (u_int8_t *)(&ip->ip_dst);
2980
2981 p = buf;
2982 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
2983 while (p && *p)
2984 p++;
2985 snprintf(p, sizeof(buf) - (p - buf), "src=%u.%u.%u.%u",
2986 s[0], s[1], s[2], s[3]);
2987 while (p && *p)
2988 p++;
2989 snprintf(p, sizeof(buf) - (p - buf), " dst=%u.%u.%u.%u",
2990 d[0], d[1], d[2], d[3]);
2991 while (p && *p)
2992 p++;
2993 snprintf(p, sizeof(buf) - (p - buf), ")");
2994
2995 return buf;
2996}
2997
2998#if INET6
2999const char *
3000ipsec6_logpacketstr(struct ip6_hdr *ip6, u_int32_t spi)
3001{
3002 static char buf[256] __attribute__((aligned(4)));
3003 char *p;
3004
3005 p = buf;
3006 snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi));
3007 while (p && *p)
3008 p++;
3009 snprintf(p, sizeof(buf) - (p - buf), "src=%s",
3010 ip6_sprintf(&ip6->ip6_src));
3011 while (p && *p)
3012 p++;
3013 snprintf(p, sizeof(buf) - (p - buf), " dst=%s",
3014 ip6_sprintf(&ip6->ip6_dst));
3015 while (p && *p)
3016 p++;
3017 snprintf(p, sizeof(buf) - (p - buf), ")");
3018
3019 return buf;
3020}
3021#endif /*INET6*/
3022
3023const char *
3024ipsec_logsastr(struct secasvar *sav)
3025{
3026 static char buf[256] __attribute__((aligned(4)));
3027 char *p;
3028 struct secasindex *saidx = &sav->sah->saidx;
3029
3030 /* validity check */
3031 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family
3032 != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family)
3033 panic("ipsec_logsastr: family mismatched.\n");
3034
3035 p = buf;
3036 snprintf(buf, sizeof(buf), "SA(SPI=%u ", (u_int32_t)ntohl(sav->spi));
3037 while (p && *p)
3038 p++;
3039 if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET) {
3040 u_int8_t *s, *d;
3041 s = (u_int8_t *)&((struct sockaddr_in *)&saidx->src)->sin_addr;
3042 d = (u_int8_t *)&((struct sockaddr_in *)&saidx->dst)->sin_addr;
3043 snprintf(p, sizeof(buf) - (p - buf),
3044 "src=%d.%d.%d.%d dst=%d.%d.%d.%d",
3045 s[0], s[1], s[2], s[3], d[0], d[1], d[2], d[3]);
3046 }
3047#if INET6
3048 else if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET6) {
3049 snprintf(p, sizeof(buf) - (p - buf),
3050 "src=%s",
3051 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->src)->sin6_addr));
3052 while (p && *p)
3053 p++;
3054 snprintf(p, sizeof(buf) - (p - buf),
3055 " dst=%s",
3056 ip6_sprintf(&((struct sockaddr_in6 *)&saidx->dst)->sin6_addr));
3057 }
3058#endif
3059 while (p && *p)
3060 p++;
3061 snprintf(p, sizeof(buf) - (p - buf), ")");
3062
3063 return buf;
3064}
3065
3066void
3067ipsec_dumpmbuf(struct mbuf *m)
3068{
3069 int totlen;
3070 int i;
3071 u_char *p;
3072
3073 totlen = 0;
3074 printf("---\n");
3075 while (m) {
3076 p = mtod(m, u_char *);
3077 for (i = 0; i < m->m_len; i++) {
3078 printf("%02x ", p[i]);
3079 totlen++;
3080 if (totlen % 16 == 0)
3081 printf("\n");
3082 }
3083 m = m->m_next;
3084 }
3085 if (totlen % 16 != 0)
3086 printf("\n");
3087 printf("---\n");
3088}
3089
3090#if INET
3091/*
3092 * IPsec output logic for IPv4.
3093 */
3094static int
3095ipsec4_output_internal(struct ipsec_output_state *state, struct secasvar *sav)
3096{
3097 struct ip *ip = NULL;
3098 int error = 0;
3099 struct sockaddr_in *dst4;
3100 struct route *ro4;
3101
3102 /* validity check */
3103 if (sav == NULL || sav->sah == NULL) {
3104 error = EINVAL;
3105 goto bad;
3106 }
3107
3108 /*
3109 * If there is no valid SA, we give up to process any
3110 * more. In such a case, the SA's status is changed
3111 * from DYING to DEAD after allocating. If a packet
3112 * send to the receiver by dead SA, the receiver can
3113 * not decode a packet because SA has been dead.
3114 */
3115 if (sav->state != SADB_SASTATE_MATURE
3116 && sav->state != SADB_SASTATE_DYING) {
3117 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3118 error = EINVAL;
3119 goto bad;
3120 }
3121
3122 state->outgoing_if = sav->sah->outgoing_if;
3123
3124 /*
3125 * There may be the case that SA status will be changed when
3126 * we are refering to one. So calling splsoftnet().
3127 */
3128
3129 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3130 /*
3131 * build IPsec tunnel.
3132 */
3133 state->m = ipsec4_splithdr(state->m);
3134 if (!state->m) {
3135 error = ENOMEM;
3136 goto bad;
3137 }
3138
3139 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3140 error = ipsec46_encapsulate(state, sav);
3141 if (error) {
3142 // packet already freed by encapsulation error handling
3143 state->m = NULL;
3144 return error;
3145 }
3146
3147 error = ipsec6_update_routecache_and_output(state, sav);
3148 return error;
3149
3150 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3151 error = ipsec4_encapsulate(state->m, sav);
3152 if (error) {
3153 state->m = NULL;
3154 goto bad;
3155 }
3156 ip = mtod(state->m, struct ip *);
3157
3158 // grab sadb_mutex, before updating sah's route cache
3159 lck_mtx_lock(sadb_mutex);
3160 ro4= &sav->sah->sa_route;
3161 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3162 if (ro4->ro_rt != NULL) {
3163 RT_LOCK(ro4->ro_rt);
3164 }
3165 if (ROUTE_UNUSABLE(ro4) ||
3166 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3167 if (ro4->ro_rt != NULL)
3168 RT_UNLOCK(ro4->ro_rt);
3169 ROUTE_RELEASE(ro4);
3170 }
3171 if (ro4->ro_rt == 0) {
3172 dst4->sin_family = AF_INET;
3173 dst4->sin_len = sizeof(*dst4);
3174 dst4->sin_addr = ip->ip_dst;
3175 rtalloc_scoped(ro4, sav->sah->outgoing_if);
3176 if (ro4->ro_rt == 0) {
3177 OSAddAtomic(1, &ipstat.ips_noroute);
3178 error = EHOSTUNREACH;
3179 // release sadb_mutex, after updating sah's route cache
3180 lck_mtx_unlock(sadb_mutex);
3181 goto bad;
3182 }
3183 RT_LOCK(ro4->ro_rt);
3184 }
3185
3186 /*
3187 * adjust state->dst if tunnel endpoint is offlink
3188 *
3189 * XXX: caching rt_gateway value in the state is
3190 * not really good, since it may point elsewhere
3191 * when the gateway gets modified to a larger
3192 * sockaddr via rt_setgate(). This is currently
3193 * addressed by SA_SIZE roundup in that routine.
3194 */
3195 if (ro4->ro_rt->rt_flags & RTF_GATEWAY)
3196 dst4 = (struct sockaddr_in *)(void *)ro4->ro_rt->rt_gateway;
3197 RT_UNLOCK(ro4->ro_rt);
3198 ROUTE_RELEASE(&state->ro);
3199 route_copyout(&state->ro, ro4, sizeof(state->ro));
3200 state->dst = (struct sockaddr *)dst4;
3201 state->tunneled = 4;
3202 // release sadb_mutex, after updating sah's route cache
3203 lck_mtx_unlock(sadb_mutex);
3204 } else {
3205 ipseclog((LOG_ERR, "%s: family mismatched between inner and outer spi=%u\n",
3206 __FUNCTION__, (u_int32_t)ntohl(sav->spi)));
3207 error = EAFNOSUPPORT;
3208 goto bad;
3209 }
3210 }
3211
3212 state->m = ipsec4_splithdr(state->m);
3213 if (!state->m) {
3214 error = ENOMEM;
3215 goto bad;
3216 }
3217 switch (sav->sah->saidx.proto) {
3218 case IPPROTO_ESP:
3219#if IPSEC_ESP
3220 if ((error = esp4_output(state->m, sav)) != 0) {
3221 state->m = NULL;
3222 goto bad;
3223 }
3224 break;
3225#else
3226 m_freem(state->m);
3227 state->m = NULL;
3228 error = EINVAL;
3229 goto bad;
3230#endif
3231 case IPPROTO_AH:
3232 if ((error = ah4_output(state->m, sav)) != 0) {
3233 state->m = NULL;
3234 goto bad;
3235 }
3236 break;
3237 case IPPROTO_IPCOMP:
3238 if ((error = ipcomp4_output(state->m, sav)) != 0) {
3239 state->m = NULL;
3240 goto bad;
3241 }
3242 break;
3243 default:
3244 ipseclog((LOG_ERR,
3245 "ipsec4_output: unknown ipsec protocol %d\n",
3246 sav->sah->saidx.proto));
3247 m_freem(state->m);
3248 state->m = NULL;
3249 error = EINVAL;
3250 goto bad;
3251 }
3252
3253 if (state->m == 0) {
3254 error = ENOMEM;
3255 goto bad;
3256 }
3257
3258 return 0;
3259
3260bad:
3261 return error;
3262}
3263
3264int
3265ipsec4_interface_output(struct ipsec_output_state *state, ifnet_t interface)
3266{
3267 int error = 0;
3268 struct secasvar *sav = NULL;
3269
3270 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3271
3272 if (!state)
3273 panic("state == NULL in ipsec4_output");
3274 if (!state->m)
3275 panic("state->m == NULL in ipsec4_output");
3276 if (!state->dst)
3277 panic("state->dst == NULL in ipsec4_output");
3278
3279 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET);
3280 if (sav == NULL) {
3281 goto bad;
3282 }
3283
3284 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3285 goto bad;
3286 }
3287
3288 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0,0,0,0,0);
3289 if (sav)
3290 key_freesav(sav, KEY_SADB_UNLOCKED);
3291 return 0;
3292
3293bad:
3294 if (sav)
3295 key_freesav(sav, KEY_SADB_UNLOCKED);
3296 m_freem(state->m);
3297 state->m = NULL;
3298 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error,0,0,0,0);
3299 return error;
3300}
3301
3302int
3303ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused int flags)
3304{
3305 struct ip *ip = NULL;
3306 struct ipsecrequest *isr = NULL;
3307 struct secasindex saidx;
3308 struct secasvar *sav = NULL;
3309 int error = 0;
3310 struct sockaddr_in *sin;
3311
3312 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3313
3314 if (!state)
3315 panic("state == NULL in ipsec4_output");
3316 if (!state->m)
3317 panic("state->m == NULL in ipsec4_output");
3318 if (!state->dst)
3319 panic("state->dst == NULL in ipsec4_output");
3320
3321 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_START, 0,0,0,0,0);
3322
3323 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3324 printf("ipsec4_output: applied SP\n");
3325 kdebug_secpolicy(sp));
3326
3327 for (isr = sp->req; isr != NULL; isr = isr->next) {
3328 /* make SA index for search proper SA */
3329 ip = mtod(state->m, struct ip *);
3330 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3331 saidx.mode = isr->saidx.mode;
3332 saidx.reqid = isr->saidx.reqid;
3333 sin = (struct sockaddr_in *)&saidx.src;
3334 if (sin->sin_len == 0) {
3335 sin->sin_len = sizeof(*sin);
3336 sin->sin_family = AF_INET;
3337 sin->sin_port = IPSEC_PORT_ANY;
3338 bcopy(&ip->ip_src, &sin->sin_addr,
3339 sizeof(sin->sin_addr));
3340 }
3341 sin = (struct sockaddr_in *)&saidx.dst;
3342 if (sin->sin_len == 0) {
3343 sin->sin_len = sizeof(*sin);
3344 sin->sin_family = AF_INET;
3345 sin->sin_port = IPSEC_PORT_ANY;
3346 /*
3347 * Get port from packet if upper layer is UDP and nat traversal
3348 * is enabled and transport mode.
3349 */
3350
3351 if ((esp_udp_encap_port & 0xFFFF) != 0 &&
3352 isr->saidx.mode == IPSEC_MODE_TRANSPORT) {
3353
3354 if (ip->ip_p == IPPROTO_UDP) {
3355 struct udphdr *udp;
3356 size_t hlen;
3357#ifdef _IP_VHL
3358 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
3359#else
3360 hlen = ip->ip_hl << 2;
3361#endif
3362 if (state->m->m_len < hlen + sizeof(struct udphdr)) {
3363 state->m = m_pullup(state->m, hlen + sizeof(struct udphdr));
3364 if (!state->m) {
3365 ipseclog((LOG_DEBUG, "IPv4 output: can't pullup UDP header\n"));
3366 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
3367 goto bad;
3368 }
3369 ip = mtod(state->m, struct ip *);
3370 }
3371 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + hlen);
3372 sin->sin_port = udp->uh_dport;
3373 }
3374 }
3375
3376 bcopy(&ip->ip_dst, &sin->sin_addr,
3377 sizeof(sin->sin_addr));
3378 }
3379
3380 if ((error = key_checkrequest(isr, &saidx, &sav)) != 0) {
3381 /*
3382 * IPsec processing is required, but no SA found.
3383 * I assume that key_acquire() had been called
3384 * to get/establish the SA. Here I discard
3385 * this packet because it is responsibility for
3386 * upper layer to retransmit the packet.
3387 */
3388 IPSEC_STAT_INCREMENT(ipsecstat.out_nosa);
3389 goto bad;
3390 }
3391
3392 /* validity check */
3393 if (sav == NULL) {
3394 switch (ipsec_get_reqlevel(isr)) {
3395 case IPSEC_LEVEL_USE:
3396 continue;
3397 case IPSEC_LEVEL_REQUIRE:
3398 /* must be not reached here. */
3399 panic("ipsec4_output: no SA found, but required.");
3400 }
3401 }
3402
3403 if ((error = ipsec4_output_internal(state, sav)) != 0) {
3404 goto bad;
3405 }
3406 }
3407
3408 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0,0,0,0,0);
3409 if (sav)
3410 key_freesav(sav, KEY_SADB_UNLOCKED);
3411 return 0;
3412
3413bad:
3414 if (sav)
3415 key_freesav(sav, KEY_SADB_UNLOCKED);
3416 m_freem(state->m);
3417 state->m = NULL;
3418 KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error,0,0,0,0);
3419 return error;
3420}
3421
3422#endif
3423
3424#if INET6
3425/*
3426 * IPsec output logic for IPv6, transport mode.
3427 */
3428static int
3429ipsec6_output_trans_internal(
3430 struct ipsec_output_state *state,
3431 struct secasvar *sav,
3432 u_char *nexthdrp,
3433 struct mbuf *mprev)
3434{
3435 struct ip6_hdr *ip6;
3436 int error = 0;
3437 int plen;
3438
3439 /* validity check */
3440 if (sav == NULL || sav->sah == NULL) {
3441 error = EINVAL;
3442 goto bad;
3443 }
3444
3445 /*
3446 * If there is no valid SA, we give up to process.
3447 * see same place at ipsec4_output().
3448 */
3449 if (sav->state != SADB_SASTATE_MATURE
3450 && sav->state != SADB_SASTATE_DYING) {
3451 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3452 error = EINVAL;
3453 goto bad;
3454 }
3455
3456 state->outgoing_if = sav->sah->outgoing_if;
3457
3458 switch (sav->sah->saidx.proto) {
3459 case IPPROTO_ESP:
3460#if IPSEC_ESP
3461 error = esp6_output(state->m, nexthdrp, mprev->m_next, sav);
3462#else
3463 m_freem(state->m);
3464 error = EINVAL;
3465#endif
3466 break;
3467 case IPPROTO_AH:
3468 error = ah6_output(state->m, nexthdrp, mprev->m_next, sav);
3469 break;
3470 case IPPROTO_IPCOMP:
3471 error = ipcomp6_output(state->m, nexthdrp, mprev->m_next, sav);
3472 break;
3473 default:
3474 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3475 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3476 m_freem(state->m);
3477 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3478 error = EINVAL;
3479 break;
3480 }
3481 if (error) {
3482 state->m = NULL;
3483 goto bad;
3484 }
3485 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
3486 if (plen > IPV6_MAXPACKET) {
3487 ipseclog((LOG_ERR, "ipsec6_output_trans: "
3488 "IPsec with IPv6 jumbogram is not supported\n"));
3489 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3490 error = EINVAL; /*XXX*/
3491 goto bad;
3492 }
3493 ip6 = mtod(state->m, struct ip6_hdr *);
3494 ip6->ip6_plen = htons(plen);
3495
3496 return 0;
3497bad:
3498 return error;
3499}
3500
3501int
3502ipsec6_output_trans(
3503 struct ipsec_output_state *state,
3504 u_char *nexthdrp,
3505 struct mbuf *mprev,
3506 struct secpolicy *sp,
3507 __unused int flags,
3508 int *tun)
3509{
3510 struct ip6_hdr *ip6;
3511 struct ipsecrequest *isr = NULL;
3512 struct secasindex saidx;
3513 int error = 0;
3514 struct sockaddr_in6 *sin6;
3515 struct secasvar *sav = NULL;
3516
3517 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3518
3519 if (!state)
3520 panic("state == NULL in ipsec6_output_trans");
3521 if (!state->m)
3522 panic("state->m == NULL in ipsec6_output_trans");
3523 if (!nexthdrp)
3524 panic("nexthdrp == NULL in ipsec6_output_trans");
3525 if (!mprev)
3526 panic("mprev == NULL in ipsec6_output_trans");
3527 if (!sp)
3528 panic("sp == NULL in ipsec6_output_trans");
3529 if (!tun)
3530 panic("tun == NULL in ipsec6_output_trans");
3531
3532 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3533 printf("ipsec6_output_trans: applyed SP\n");
3534 kdebug_secpolicy(sp));
3535
3536 *tun = 0;
3537 for (isr = sp->req; isr; isr = isr->next) {
3538 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
3539 /* the rest will be handled by ipsec6_output_tunnel() */
3540 break;
3541 }
3542
3543 /* make SA index for search proper SA */
3544 ip6 = mtod(state->m, struct ip6_hdr *);
3545 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3546 saidx.mode = isr->saidx.mode;
3547 saidx.reqid = isr->saidx.reqid;
3548 sin6 = (struct sockaddr_in6 *)&saidx.src;
3549 if (sin6->sin6_len == 0) {
3550 sin6->sin6_len = sizeof(*sin6);
3551 sin6->sin6_family = AF_INET6;
3552 sin6->sin6_port = IPSEC_PORT_ANY;
3553 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
3554 sizeof(ip6->ip6_src));
3555 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
3556 /* fix scope id for comparing SPD */
3557 sin6->sin6_addr.s6_addr16[1] = 0;
3558 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
3559 }
3560 }
3561 sin6 = (struct sockaddr_in6 *)&saidx.dst;
3562 if (sin6->sin6_len == 0) {
3563 sin6->sin6_len = sizeof(*sin6);
3564 sin6->sin6_family = AF_INET6;
3565 sin6->sin6_port = IPSEC_PORT_ANY;
3566 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
3567 sizeof(ip6->ip6_dst));
3568 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
3569 /* fix scope id for comparing SPD */
3570 sin6->sin6_addr.s6_addr16[1] = 0;
3571 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
3572 }
3573 }
3574
3575 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
3576 /*
3577 * IPsec processing is required, but no SA found.
3578 * I assume that key_acquire() had been called
3579 * to get/establish the SA. Here I discard
3580 * this packet because it is responsibility for
3581 * upper layer to retransmit the packet.
3582 */
3583 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3584 error = ENOENT;
3585
3586 /*
3587 * Notify the fact that the packet is discarded
3588 * to ourselves. I believe this is better than
3589 * just silently discarding. (jinmei@kame.net)
3590 * XXX: should we restrict the error to TCP packets?
3591 * XXX: should we directly notify sockets via
3592 * pfctlinputs?
3593 */
3594 icmp6_error(state->m, ICMP6_DST_UNREACH,
3595 ICMP6_DST_UNREACH_ADMIN, 0);
3596 state->m = NULL; /* icmp6_error freed the mbuf */
3597 goto bad;
3598 }
3599
3600 /* validity check */
3601 if (sav == NULL) {
3602 switch (ipsec_get_reqlevel(isr)) {
3603 case IPSEC_LEVEL_USE:
3604 continue;
3605 case IPSEC_LEVEL_REQUIRE:
3606 /* must be not reached here. */
3607 panic("ipsec6_output_trans: no SA found, but required.");
3608 }
3609 }
3610
3611 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
3612 goto bad;
3613 }
3614 }
3615
3616 /* if we have more to go, we need a tunnel mode processing */
3617 if (isr != NULL)
3618 *tun = 1;
3619
3620 if (sav)
3621 key_freesav(sav, KEY_SADB_UNLOCKED);
3622 return 0;
3623
3624bad:
3625 if (sav)
3626 key_freesav(sav, KEY_SADB_UNLOCKED);
3627 m_freem(state->m);
3628 state->m = NULL;
3629 return error;
3630}
3631
3632/*
3633 * IPsec output logic for IPv6, tunnel mode.
3634 */
3635static int
3636ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar *sav, int *must_be_last)
3637{
3638 struct ip6_hdr *ip6;
3639 int error = 0;
3640 int plen;
3641 struct sockaddr_in6* dst6;
3642 struct route *ro6;
3643
3644 /* validity check */
3645 if (sav == NULL || sav->sah == NULL || sav->sah->saidx.mode != IPSEC_MODE_TUNNEL) {
3646 error = EINVAL;
3647 goto bad;
3648 }
3649
3650 /*
3651 * If there is no valid SA, we give up to process.
3652 * see same place at ipsec4_output().
3653 */
3654 if (sav->state != SADB_SASTATE_MATURE
3655 && sav->state != SADB_SASTATE_DYING) {
3656 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3657 error = EINVAL;
3658 goto bad;
3659 }
3660
3661 state->outgoing_if = sav->sah->outgoing_if;
3662
3663 if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
3664 /*
3665 * build IPsec tunnel.
3666 */
3667 state->m = ipsec6_splithdr(state->m);
3668 if (!state->m) {
3669 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
3670 error = ENOMEM;
3671 goto bad;
3672 }
3673
3674 if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) {
3675 error = ipsec6_encapsulate(state->m, sav);
3676 if (error) {
3677 state->m = 0;
3678 goto bad;
3679 }
3680 ip6 = mtod(state->m, struct ip6_hdr *);
3681 } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) {
3682
3683 struct ip *ip;
3684 struct sockaddr_in* dst4;
3685 struct route *ro4 = NULL;
3686 struct route ro4_copy;
3687 struct ip_out_args ipoa = { IFSCOPE_NONE, { 0 }, IPOAF_SELECT_SRCIF, 0,
3688 SO_TC_UNSPEC, _NET_SERVICE_TYPE_UNSPEC };
3689
3690 if (must_be_last)
3691 *must_be_last = 1;
3692
3693 state->tunneled = 4; /* must not process any further in ip6_output */
3694 error = ipsec64_encapsulate(state->m, sav);
3695 if (error) {
3696 state->m = 0;
3697 goto bad;
3698 }
3699 /* Now we have an IPv4 packet */
3700 ip = mtod(state->m, struct ip *);
3701
3702 // grab sadb_mutex, to update sah's route cache and get a local copy of it
3703 lck_mtx_lock(sadb_mutex);
3704 ro4 = &sav->sah->sa_route;
3705 dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst;
3706 if (ro4->ro_rt) {
3707 RT_LOCK(ro4->ro_rt);
3708 }
3709 if (ROUTE_UNUSABLE(ro4) ||
3710 dst4->sin_addr.s_addr != ip->ip_dst.s_addr) {
3711 if (ro4->ro_rt != NULL)
3712 RT_UNLOCK(ro4->ro_rt);
3713 ROUTE_RELEASE(ro4);
3714 }
3715 if (ro4->ro_rt == NULL) {
3716 dst4->sin_family = AF_INET;
3717 dst4->sin_len = sizeof(*dst4);
3718 dst4->sin_addr = ip->ip_dst;
3719 } else {
3720 RT_UNLOCK(ro4->ro_rt);
3721 }
3722 route_copyout(&ro4_copy, ro4, sizeof(ro4_copy));
3723 // release sadb_mutex, after updating sah's route cache and getting a local copy
3724 lck_mtx_unlock(sadb_mutex);
3725 state->m = ipsec4_splithdr(state->m);
3726 if (!state->m) {
3727 error = ENOMEM;
3728 ROUTE_RELEASE(&ro4_copy);
3729 goto bad;
3730 }
3731 switch (sav->sah->saidx.proto) {
3732 case IPPROTO_ESP:
3733#if IPSEC_ESP
3734 if ((error = esp4_output(state->m, sav)) != 0) {
3735 state->m = NULL;
3736 ROUTE_RELEASE(&ro4_copy);
3737 goto bad;
3738 }
3739 break;
3740
3741#else
3742 m_freem(state->m);
3743 state->m = NULL;
3744 error = EINVAL;
3745 ROUTE_RELEASE(&ro4_copy);
3746 goto bad;
3747#endif
3748 case IPPROTO_AH:
3749 if ((error = ah4_output(state->m, sav)) != 0) {
3750 state->m = NULL;
3751 ROUTE_RELEASE(&ro4_copy);
3752 goto bad;
3753 }
3754 break;
3755 case IPPROTO_IPCOMP:
3756 if ((error = ipcomp4_output(state->m, sav)) != 0) {
3757 state->m = NULL;
3758 ROUTE_RELEASE(&ro4_copy);
3759 goto bad;
3760 }
3761 break;
3762 default:
3763 ipseclog((LOG_ERR,
3764 "ipsec4_output: unknown ipsec protocol %d\n",
3765 sav->sah->saidx.proto));
3766 m_freem(state->m);
3767 state->m = NULL;
3768 error = EINVAL;
3769 ROUTE_RELEASE(&ro4_copy);
3770 goto bad;
3771 }
3772
3773 if (state->m == 0) {
3774 error = ENOMEM;
3775 ROUTE_RELEASE(&ro4_copy);
3776 goto bad;
3777 }
3778 ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET);
3779 ipsec_set_ipoa_for_interface(sav->sah->ipsec_if, &ipoa);
3780
3781 ip = mtod(state->m, struct ip *);
3782 ip->ip_len = ntohs(ip->ip_len); /* flip len field before calling ip_output */
3783 error = ip_output(state->m, NULL, &ro4_copy, IP_OUTARGS, NULL, &ipoa);
3784 state->m = NULL;
3785 // grab sadb_mutex, to synchronize the sah's route cache with the local copy
3786 lck_mtx_lock(sadb_mutex);
3787 route_copyin(&ro4_copy, ro4, sizeof(ro4_copy));
3788 lck_mtx_unlock(sadb_mutex);
3789 if (error != 0)
3790 goto bad;
3791 goto done;
3792 } else {
3793 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3794 "unsupported inner family, spi=%u\n",
3795 (u_int32_t)ntohl(sav->spi)));
3796 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3797 error = EAFNOSUPPORT;
3798 goto bad;
3799 }
3800
3801 // grab sadb_mutex, before updating sah's route cache
3802 lck_mtx_lock(sadb_mutex);
3803 ro6 = &sav->sah->sa_route;
3804 dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst;
3805 if (ro6->ro_rt) {
3806 RT_LOCK(ro6->ro_rt);
3807 }
3808 if (ROUTE_UNUSABLE(ro6) ||
3809 !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) {
3810 if (ro6->ro_rt != NULL)
3811 RT_UNLOCK(ro6->ro_rt);
3812 ROUTE_RELEASE(ro6);
3813 }
3814 if (ro6->ro_rt == 0) {
3815 bzero(dst6, sizeof(*dst6));
3816 dst6->sin6_family = AF_INET6;
3817 dst6->sin6_len = sizeof(*dst6);
3818 dst6->sin6_addr = ip6->ip6_dst;
3819 rtalloc_scoped(ro6, sav->sah->outgoing_if);
3820 if (ro6->ro_rt) {
3821 RT_LOCK(ro6->ro_rt);
3822 }
3823 }
3824 if (ro6->ro_rt == 0) {
3825 ip6stat.ip6s_noroute++;
3826 IPSEC_STAT_INCREMENT(ipsec6stat.out_noroute);
3827 error = EHOSTUNREACH;
3828 // release sadb_mutex, after updating sah's route cache
3829 lck_mtx_unlock(sadb_mutex);
3830 goto bad;
3831 }
3832
3833 /*
3834 * adjust state->dst if tunnel endpoint is offlink
3835 *
3836 * XXX: caching rt_gateway value in the state is
3837 * not really good, since it may point elsewhere
3838 * when the gateway gets modified to a larger
3839 * sockaddr via rt_setgate(). This is currently
3840 * addressed by SA_SIZE roundup in that routine.
3841 */
3842 if (ro6->ro_rt->rt_flags & RTF_GATEWAY)
3843 dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway;
3844 RT_UNLOCK(ro6->ro_rt);
3845 ROUTE_RELEASE(&state->ro);
3846 route_copyout(&state->ro, ro6, sizeof(state->ro));
3847 state->dst = (struct sockaddr *)dst6;
3848 state->tunneled = 6;
3849 // release sadb_mutex, after updating sah's route cache
3850 lck_mtx_unlock(sadb_mutex);
3851 }
3852
3853 state->m = ipsec6_splithdr(state->m);
3854 if (!state->m) {
3855 IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem);
3856 error = ENOMEM;
3857 goto bad;
3858 }
3859 ip6 = mtod(state->m, struct ip6_hdr *);
3860 switch (sav->sah->saidx.proto) {
3861 case IPPROTO_ESP:
3862#if IPSEC_ESP
3863 error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
3864#else
3865 m_freem(state->m);
3866 error = EINVAL;
3867#endif
3868 break;
3869 case IPPROTO_AH:
3870 error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav);
3871 break;
3872 case IPPROTO_IPCOMP:
3873 /* XXX code should be here */
3874 /*FALLTHROUGH*/
3875 default:
3876 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3877 "unknown ipsec protocol %d\n", sav->sah->saidx.proto));
3878 m_freem(state->m);
3879 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3880 error = EINVAL;
3881 break;
3882 }
3883 if (error) {
3884 state->m = NULL;
3885 goto bad;
3886 }
3887 plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr);
3888 if (plen > IPV6_MAXPACKET) {
3889 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
3890 "IPsec with IPv6 jumbogram is not supported\n"));
3891 IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
3892 error = EINVAL; /*XXX*/
3893 goto bad;
3894 }
3895 ip6 = mtod(state->m, struct ip6_hdr *);
3896 ip6->ip6_plen = htons(plen);
3897done:
3898 return 0;
3899
3900bad:
3901 return error;
3902}
3903
3904int
3905ipsec6_output_tunnel(
3906 struct ipsec_output_state *state,
3907 struct secpolicy *sp,
3908 __unused int flags)
3909{
3910 struct ip6_hdr *ip6;
3911 struct ipsecrequest *isr = NULL;
3912 struct secasindex saidx;
3913 struct secasvar *sav = NULL;
3914 int error = 0;
3915
3916 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
3917
3918 if (!state)
3919 panic("state == NULL in ipsec6_output_tunnel");
3920 if (!state->m)
3921 panic("state->m == NULL in ipsec6_output_tunnel");
3922 if (!sp)
3923 panic("sp == NULL in ipsec6_output_tunnel");
3924
3925 KEYDEBUG(KEYDEBUG_IPSEC_DATA,
3926 printf("ipsec6_output_tunnel: applyed SP\n");
3927 kdebug_secpolicy(sp));
3928
3929 /*
3930 * transport mode ipsec (before the 1st tunnel mode) is already
3931 * processed by ipsec6_output_trans().
3932 */
3933 for (isr = sp->req; isr; isr = isr->next) {
3934 if (isr->saidx.mode == IPSEC_MODE_TUNNEL)
3935 break;
3936 }
3937
3938 for (/* already initialized */; isr; isr = isr->next) {
3939 if (isr->saidx.mode == IPSEC_MODE_TUNNEL) {
3940 /* When tunnel mode, SA peers must be specified. */
3941 bcopy(&isr->saidx, &saidx, sizeof(saidx));
3942 } else {
3943 /* make SA index to look for a proper SA */
3944 struct sockaddr_in6 *sin6;
3945
3946 bzero(&saidx, sizeof(saidx));
3947 saidx.proto = isr->saidx.proto;
3948 saidx.mode = isr->saidx.mode;
3949 saidx.reqid = isr->saidx.reqid;
3950
3951 ip6 = mtod(state->m, struct ip6_hdr *);
3952 sin6 = (struct sockaddr_in6 *)&saidx.src;
3953 if (sin6->sin6_len == 0) {
3954 sin6->sin6_len = sizeof(*sin6);
3955 sin6->sin6_family = AF_INET6;
3956 sin6->sin6_port = IPSEC_PORT_ANY;
3957 bcopy(&ip6->ip6_src, &sin6->sin6_addr,
3958 sizeof(ip6->ip6_src));
3959 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
3960 /* fix scope id for comparing SPD */
3961 sin6->sin6_addr.s6_addr16[1] = 0;
3962 sin6->sin6_scope_id = ntohs(ip6->ip6_src.s6_addr16[1]);
3963 }
3964 }
3965 sin6 = (struct sockaddr_in6 *)&saidx.dst;
3966 if (sin6->sin6_len == 0) {
3967 sin6->sin6_len = sizeof(*sin6);
3968 sin6->sin6_family = AF_INET6;
3969 sin6->sin6_port = IPSEC_PORT_ANY;
3970 bcopy(&ip6->ip6_dst, &sin6->sin6_addr,
3971 sizeof(ip6->ip6_dst));
3972 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) {
3973 /* fix scope id for comparing SPD */
3974 sin6->sin6_addr.s6_addr16[1] = 0;
3975 sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]);
3976 }
3977 }
3978 }
3979
3980 if (key_checkrequest(isr, &saidx, &sav) == ENOENT) {
3981 /*
3982 * IPsec processing is required, but no SA found.
3983 * I assume that key_acquire() had been called
3984 * to get/establish the SA. Here I discard
3985 * this packet because it is responsibility for
3986 * upper layer to retransmit the packet.
3987 */
3988 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
3989 error = ENOENT;
3990 goto bad;
3991 }
3992
3993 /* validity check */
3994 if (sav == NULL) {
3995 switch (ipsec_get_reqlevel(isr)) {
3996 case IPSEC_LEVEL_USE:
3997 continue;
3998 case IPSEC_LEVEL_REQUIRE:
3999 /* must be not reached here. */
4000 panic("ipsec6_output_tunnel: no SA found, but required.");
4001 }
4002 }
4003
4004 /*
4005 * If there is no valid SA, we give up to process.
4006 * see same place at ipsec4_output().
4007 */
4008 if (sav->state != SADB_SASTATE_MATURE
4009 && sav->state != SADB_SASTATE_DYING) {
4010 IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa);
4011 error = EINVAL;
4012 goto bad;
4013 }
4014
4015 int must_be_last = 0;
4016
4017 if ((error = ipsec6_output_tunnel_internal(state, sav, &must_be_last)) != 0) {
4018 goto bad;
4019 }
4020
4021 if (must_be_last && isr->next) {
4022 ipseclog((LOG_ERR, "ipsec6_output_tunnel: "
4023 "IPv4 must be outer layer, spi=%u\n",
4024 (u_int32_t)ntohl(sav->spi)));
4025 error = EINVAL;
4026 goto bad;
4027 }
4028 }
4029
4030 if (sav)
4031 key_freesav(sav, KEY_SADB_UNLOCKED);
4032 return 0;
4033
4034bad:
4035 if (sav)
4036 key_freesav(sav, KEY_SADB_UNLOCKED);
4037 if (state->m)
4038 m_freem(state->m);
4039 state->m = NULL;
4040 return error;
4041}
4042
4043int
4044ipsec6_interface_output(struct ipsec_output_state *state, ifnet_t interface, u_char *nexthdrp, struct mbuf *mprev)
4045{
4046 int error = 0;
4047 struct secasvar *sav = NULL;
4048
4049 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4050
4051 if (!state)
4052 panic("state == NULL in ipsec6_output");
4053 if (!state->m)
4054 panic("state->m == NULL in ipsec6_output");
4055 if (!nexthdrp)
4056 panic("nexthdrp == NULL in ipsec6_output");
4057 if (!mprev)
4058 panic("mprev == NULL in ipsec6_output");
4059
4060 sav = key_alloc_outbound_sav_for_interface(interface, AF_INET6);
4061 if (sav == NULL) {
4062 goto bad;
4063 }
4064
4065 if (sav->sah && sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
4066 if ((error = ipsec6_output_tunnel_internal(state, sav, NULL)) != 0) {
4067 goto bad;
4068 }
4069 }
4070 else {
4071 if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) {
4072 goto bad;
4073 }
4074 }
4075
4076 if (sav)
4077 key_freesav(sav, KEY_SADB_UNLOCKED);
4078 return 0;
4079
4080bad:
4081 if (sav)
4082 key_freesav(sav, KEY_SADB_UNLOCKED);
4083 m_freem(state->m);
4084 state->m = NULL;
4085 return error;
4086}
4087#endif /*INET6*/
4088
4089#if INET
4090/*
4091 * Chop IP header and option off from the payload.
4092 */
4093struct mbuf *
4094ipsec4_splithdr(struct mbuf *m)
4095{
4096 struct mbuf *mh;
4097 struct ip *ip;
4098 int hlen;
4099
4100 if (m->m_len < sizeof(struct ip))
4101 panic("ipsec4_splithdr: first mbuf too short, m_len %d, pkt_len %d, m_flag %x", m->m_len, m->m_pkthdr.len, m->m_flags);
4102 ip = mtod(m, struct ip *);
4103#ifdef _IP_VHL
4104 hlen = _IP_VHL_HL(ip->ip_vhl) << 2;
4105#else
4106 hlen = ip->ip_hl << 2;
4107#endif
4108 if (m->m_len > hlen) {
4109 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4110 if (!mh) {
4111 m_freem(m);
4112 return NULL;
4113 }
4114 M_COPY_PKTHDR(mh, m);
4115 MH_ALIGN(mh, hlen);
4116 m->m_flags &= ~M_PKTHDR;
4117 m_mchtype(m, MT_DATA);
4118 m->m_len -= hlen;
4119 m->m_data += hlen;
4120 mh->m_next = m;
4121 m = mh;
4122 m->m_len = hlen;
4123 bcopy((caddr_t)ip, mtod(m, caddr_t), hlen);
4124 } else if (m->m_len < hlen) {
4125 m = m_pullup(m, hlen);
4126 if (!m)
4127 return NULL;
4128 }
4129 return m;
4130}
4131#endif
4132
4133#if INET6
4134struct mbuf *
4135ipsec6_splithdr(struct mbuf *m)
4136{
4137 struct mbuf *mh;
4138 struct ip6_hdr *ip6;
4139 int hlen;
4140
4141 if (m->m_len < sizeof(struct ip6_hdr))
4142 panic("ipsec6_splithdr: first mbuf too short");
4143 ip6 = mtod(m, struct ip6_hdr *);
4144 hlen = sizeof(struct ip6_hdr);
4145 if (m->m_len > hlen) {
4146 MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4147 if (!mh) {
4148 m_freem(m);
4149 return NULL;
4150 }
4151 M_COPY_PKTHDR(mh, m);
4152 MH_ALIGN(mh, hlen);
4153 m->m_flags &= ~M_PKTHDR;
4154 m_mchtype(m, MT_DATA);
4155 m->m_len -= hlen;
4156 m->m_data += hlen;
4157 mh->m_next = m;
4158 m = mh;
4159 m->m_len = hlen;
4160 bcopy((caddr_t)ip6, mtod(m, caddr_t), hlen);
4161 } else if (m->m_len < hlen) {
4162 m = m_pullup(m, hlen);
4163 if (!m)
4164 return NULL;
4165 }
4166 return m;
4167}
4168#endif
4169
4170/* validate inbound IPsec tunnel packet. */
4171int
4172ipsec4_tunnel_validate(
4173 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4174 int off,
4175 u_int nxt0,
4176 struct secasvar *sav,
4177 sa_family_t *ifamily)
4178{
4179 u_int8_t nxt = nxt0 & 0xff;
4180 struct sockaddr_in *sin;
4181 struct sockaddr_in osrc, odst, i4src, i4dst;
4182 struct sockaddr_in6 i6src, i6dst;
4183 int hlen;
4184 struct secpolicy *sp;
4185 struct ip *oip;
4186
4187 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4188
4189#if DIAGNOSTIC
4190 if (m->m_len < sizeof(struct ip))
4191 panic("too short mbuf on ipsec4_tunnel_validate");
4192#endif
4193 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6)
4194 return 0;
4195 if (m->m_pkthdr.len < off + sizeof(struct ip))
4196 return 0;
4197 /* do not decapsulate if the SA is for transport mode only */
4198 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT)
4199 return 0;
4200
4201 oip = mtod(m, struct ip *);
4202#ifdef _IP_VHL
4203 hlen = _IP_VHL_HL(oip->ip_vhl) << 2;
4204#else
4205 hlen = oip->ip_hl << 2;
4206#endif
4207 if (hlen != sizeof(struct ip))
4208 return 0;
4209
4210 sin = (struct sockaddr_in *)&sav->sah->saidx.dst;
4211 if (sin->sin_family != AF_INET)
4212 return 0;
4213 if (bcmp(&oip->ip_dst, &sin->sin_addr, sizeof(oip->ip_dst)) != 0)
4214 return 0;
4215
4216 if (sav->sah->ipsec_if != NULL) {
4217 // the ipsec interface SAs don't have a policies.
4218 if (nxt == IPPROTO_IPV4) {
4219 *ifamily = AF_INET;
4220 } else if (nxt == IPPROTO_IPV6) {
4221 *ifamily = AF_INET6;
4222 } else {
4223 return 0;
4224 }
4225 return 1;
4226 }
4227
4228 /* XXX slow */
4229 bzero(&osrc, sizeof(osrc));
4230 bzero(&odst, sizeof(odst));
4231 osrc.sin_family = odst.sin_family = AF_INET;
4232 osrc.sin_len = odst.sin_len = sizeof(struct sockaddr_in);
4233 osrc.sin_addr = oip->ip_src;
4234 odst.sin_addr = oip->ip_dst;
4235 /*
4236 * RFC2401 5.2.1 (b): (assume that we are using tunnel mode)
4237 * - if the inner destination is multicast address, there can be
4238 * multiple permissible inner source address. implementation
4239 * may want to skip verification of inner source address against
4240 * SPD selector.
4241 * - if the inner protocol is ICMP, the packet may be an error report
4242 * from routers on the other side of the VPN cloud (R in the
4243 * following diagram). in this case, we cannot verify inner source
4244 * address against SPD selector.
4245 * me -- gw === gw -- R -- you
4246 *
4247 * we consider the first bullet to be users responsibility on SPD entry
4248 * configuration (if you need to encrypt multicast traffic, set
4249 * the source range of SPD selector to 0.0.0.0/0, or have explicit
4250 * address ranges for possible senders).
4251 * the second bullet is not taken care of (yet).
4252 *
4253 * therefore, we do not do anything special about inner source.
4254 */
4255 if (nxt == IPPROTO_IPV4) {
4256 bzero(&i4src, sizeof(struct sockaddr_in));
4257 bzero(&i4dst, sizeof(struct sockaddr_in));
4258 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4259 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4260 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4261 (caddr_t)&i4src.sin_addr);
4262 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4263 (caddr_t)&i4dst.sin_addr);
4264 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4265 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4266 } else if (nxt == IPPROTO_IPV6) {
4267 bzero(&i6src, sizeof(struct sockaddr_in6));
4268 bzero(&i6dst, sizeof(struct sockaddr_in6));
4269 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4270 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4271 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4272 (caddr_t)&i6src.sin6_addr);
4273 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4274 (caddr_t)&i6dst.sin6_addr);
4275 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4276 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4277 } else
4278 return 0; /* unsupported family */
4279
4280 if (!sp)
4281 return 0;
4282
4283 key_freesp(sp, KEY_SADB_UNLOCKED);
4284
4285 return 1;
4286}
4287
4288#if INET6
4289/* validate inbound IPsec tunnel packet. */
4290int
4291ipsec6_tunnel_validate(
4292 struct mbuf *m, /* no pullup permitted, m->m_len >= ip */
4293 int off,
4294 u_int nxt0,
4295 struct secasvar *sav,
4296 sa_family_t *ifamily)
4297{
4298 u_int8_t nxt = nxt0 & 0xff;
4299 struct sockaddr_in6 *sin6;
4300 struct sockaddr_in i4src, i4dst;
4301 struct sockaddr_in6 osrc, odst, i6src, i6dst;
4302 struct secpolicy *sp;
4303 struct ip6_hdr *oip6;
4304
4305 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4306
4307#if DIAGNOSTIC
4308 if (m->m_len < sizeof(struct ip6_hdr))
4309 panic("too short mbuf on ipsec6_tunnel_validate");
4310#endif
4311 if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6)
4312 return 0;
4313
4314 if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr))
4315 return 0;
4316 /* do not decapsulate if the SA is for transport mode only */
4317 if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT)
4318 return 0;
4319
4320 oip6 = mtod(m, struct ip6_hdr *);
4321 /* AF_INET should be supported, but at this moment we don't. */
4322 sin6 = (struct sockaddr_in6 *)&sav->sah->saidx.dst;
4323 if (sin6->sin6_family != AF_INET6)
4324 return 0;
4325 if (!IN6_ARE_ADDR_EQUAL(&oip6->ip6_dst, &sin6->sin6_addr))
4326 return 0;
4327
4328 if (sav->sah->ipsec_if != NULL) {
4329 // the ipsec interface SAs don't have a policies.
4330 if (nxt == IPPROTO_IPV4) {
4331 *ifamily = AF_INET;
4332 } else if (nxt == IPPROTO_IPV6) {
4333 *ifamily = AF_INET6;
4334 } else {
4335 return 0;
4336 }
4337 return 1;
4338 }
4339
4340 /* XXX slow */
4341 bzero(&osrc, sizeof(osrc));
4342 bzero(&odst, sizeof(odst));
4343 osrc.sin6_family = odst.sin6_family = AF_INET6;
4344 osrc.sin6_len = odst.sin6_len = sizeof(struct sockaddr_in6);
4345 osrc.sin6_addr = oip6->ip6_src;
4346 odst.sin6_addr = oip6->ip6_dst;
4347
4348 /*
4349 * regarding to inner source address validation, see a long comment
4350 * in ipsec4_tunnel_validate.
4351 */
4352
4353 if (nxt == IPPROTO_IPV4) {
4354 bzero(&i4src, sizeof(struct sockaddr_in));
4355 bzero(&i4dst, sizeof(struct sockaddr_in));
4356 i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET;
4357 i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in);
4358 m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr),
4359 (caddr_t)&i4src.sin_addr);
4360 m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr),
4361 (caddr_t)&i4dst.sin_addr);
4362 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4363 (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst);
4364 } else if (nxt == IPPROTO_IPV6) {
4365 bzero(&i6src, sizeof(struct sockaddr_in6));
4366 bzero(&i6dst, sizeof(struct sockaddr_in6));
4367 i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6;
4368 i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6);
4369 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr),
4370 (caddr_t)&i6src.sin6_addr);
4371 m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr),
4372 (caddr_t)&i6dst.sin6_addr);
4373 sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst,
4374 (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst);
4375 } else
4376 return 0; /* unsupported family */
4377 /*
4378 * when there is no suitable inbound policy for the packet of the ipsec
4379 * tunnel mode, the kernel never decapsulate the tunneled packet
4380 * as the ipsec tunnel mode even when the system wide policy is "none".
4381 * then the kernel leaves the generic tunnel module to process this
4382 * packet. if there is no rule of the generic tunnel, the packet
4383 * is rejected and the statistics will be counted up.
4384 */
4385 if (!sp)
4386 return 0;
4387 key_freesp(sp, KEY_SADB_UNLOCKED);
4388
4389 return 1;
4390}
4391#endif
4392
4393/*
4394 * Make a mbuf chain for encryption.
4395 * If the original mbuf chain contains a mbuf with a cluster,
4396 * allocate a new cluster and copy the data to the new cluster.
4397 * XXX: this hack is inefficient, but is necessary to handle cases
4398 * of TCP retransmission...
4399 */
4400struct mbuf *
4401ipsec_copypkt(struct mbuf *m)
4402{
4403 struct mbuf *n, **mpp, *mnew;
4404
4405 for (n = m, mpp = &m; n; n = n->m_next) {
4406 if (n->m_flags & M_EXT) {
4407 /*
4408 * Make a copy only if there are more than one references
4409 * to the cluster.
4410 * XXX: is this approach effective?
4411 */
4412 if (
4413 n->m_ext.ext_free ||
4414 m_mclhasreference(n)
4415 )
4416 {
4417 int remain, copied;
4418 struct mbuf *mm;
4419
4420 if (n->m_flags & M_PKTHDR) {
4421 MGETHDR(mnew, M_DONTWAIT, MT_HEADER); /* MAC-OK */
4422 if (mnew == NULL)
4423 goto fail;
4424 M_COPY_PKTHDR(mnew, n);
4425 }
4426 else {
4427 MGET(mnew, M_DONTWAIT, MT_DATA);
4428 if (mnew == NULL)
4429 goto fail;
4430 }
4431 mnew->m_len = 0;
4432 mm = mnew;
4433
4434 /*
4435 * Copy data. If we don't have enough space to
4436 * store the whole data, allocate a cluster
4437 * or additional mbufs.
4438 * XXX: we don't use m_copyback(), since the
4439 * function does not use clusters and thus is
4440 * inefficient.
4441 */
4442 remain = n->m_len;
4443 copied = 0;
4444 while (1) {
4445 int len;
4446 struct mbuf *mn;
4447
4448 if (remain <= (mm->m_flags & M_PKTHDR ? MHLEN : MLEN))
4449 len = remain;
4450 else { /* allocate a cluster */
4451 MCLGET(mm, M_DONTWAIT);
4452 if (!(mm->m_flags & M_EXT)) {
4453 m_free(mm);
4454 goto fail;
4455 }
4456 len = remain < MCLBYTES ?
4457 remain : MCLBYTES;
4458 }
4459
4460 bcopy(n->m_data + copied, mm->m_data,
4461 len);
4462
4463 copied += len;
4464 remain -= len;
4465 mm->m_len = len;
4466
4467 if (remain <= 0) /* completed? */
4468 break;
4469
4470 /* need another mbuf */
4471 MGETHDR(mn, M_DONTWAIT, MT_HEADER); /* XXXMAC: tags copied next time in loop? */
4472 if (mn == NULL)
4473 goto fail;
4474 mn->m_pkthdr.rcvif = NULL;
4475 mm->m_next = mn;
4476 mm = mn;
4477 }
4478
4479 /* adjust chain */
4480 mm->m_next = m_free(n);
4481 n = mm;
4482 *mpp = mnew;
4483 mpp = &n->m_next;
4484
4485 continue;
4486 }
4487 }
4488 *mpp = n;
4489 mpp = &n->m_next;
4490 }
4491
4492 return(m);
4493 fail:
4494 m_freem(m);
4495 return(NULL);
4496}
4497
4498/*
4499 * Tags are allocated as mbufs for now, since our minimum size is MLEN, we
4500 * should make use of up to that much space.
4501 */
4502#define IPSEC_TAG_HEADER \
4503
4504struct ipsec_tag {
4505 struct socket *socket;
4506 u_int32_t history_count;
4507 struct ipsec_history history[];
4508};
4509
4510#define IPSEC_TAG_SIZE (MLEN - sizeof(struct m_tag))
4511#define IPSEC_TAG_HDR_SIZE (offsetof(struct ipsec_tag, history[0]))
4512#define IPSEC_HISTORY_MAX ((IPSEC_TAG_SIZE - IPSEC_TAG_HDR_SIZE) / \
4513 sizeof(struct ipsec_history))
4514
4515static struct ipsec_tag *
4516ipsec_addaux(
4517 struct mbuf *m)
4518{
4519 struct m_tag *tag;
4520
4521 /* Check if the tag already exists */
4522 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4523
4524 if (tag == NULL) {
4525 struct ipsec_tag *itag;
4526
4527 /* Allocate a tag */
4528 tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC,
4529 IPSEC_TAG_SIZE, M_DONTWAIT, m);
4530
4531 if (tag) {
4532 itag = (struct ipsec_tag*)(tag + 1);
4533 itag->socket = 0;
4534 itag->history_count = 0;
4535
4536 m_tag_prepend(m, tag);
4537 }
4538 }
4539
4540 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4541}
4542
4543static struct ipsec_tag *
4544ipsec_findaux(
4545 struct mbuf *m)
4546{
4547 struct m_tag *tag;
4548
4549 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4550
4551 return tag ? (struct ipsec_tag*)(tag + 1) : NULL;
4552}
4553
4554void
4555ipsec_delaux(
4556 struct mbuf *m)
4557{
4558 struct m_tag *tag;
4559
4560 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL);
4561
4562 if (tag) {
4563 m_tag_delete(m, tag);
4564 }
4565}
4566
4567/* if the aux buffer is unnecessary, nuke it. */
4568static void
4569ipsec_optaux(
4570 struct mbuf *m,
4571 struct ipsec_tag *itag)
4572{
4573 if (itag && itag->socket == NULL && itag->history_count == 0) {
4574 m_tag_delete(m, ((struct m_tag*)itag) - 1);
4575 }
4576}
4577
4578int
4579ipsec_setsocket(struct mbuf *m, struct socket *so)
4580{
4581 struct ipsec_tag *tag;
4582
4583 /* if so == NULL, don't insist on getting the aux mbuf */
4584 if (so) {
4585 tag = ipsec_addaux(m);
4586 if (!tag)
4587 return ENOBUFS;
4588 } else
4589 tag = ipsec_findaux(m);
4590 if (tag) {
4591 tag->socket = so;
4592 ipsec_optaux(m, tag);
4593 }
4594 return 0;
4595}
4596
4597struct socket *
4598ipsec_getsocket(struct mbuf *m)
4599{
4600 struct ipsec_tag *itag;
4601
4602 itag = ipsec_findaux(m);
4603 if (itag)
4604 return itag->socket;
4605 else
4606 return NULL;
4607}
4608
4609int
4610ipsec_addhist(
4611 struct mbuf *m,
4612 int proto,
4613 u_int32_t spi)
4614{
4615 struct ipsec_tag *itag;
4616 struct ipsec_history *p;
4617 itag = ipsec_addaux(m);
4618 if (!itag)
4619 return ENOBUFS;
4620 if (itag->history_count == IPSEC_HISTORY_MAX)
4621 return ENOSPC; /* XXX */
4622
4623 p = &itag->history[itag->history_count];
4624 itag->history_count++;
4625
4626 bzero(p, sizeof(*p));
4627 p->ih_proto = proto;
4628 p->ih_spi = spi;
4629
4630 return 0;
4631}
4632
4633struct ipsec_history *
4634ipsec_gethist(
4635 struct mbuf *m,
4636 int *lenp)
4637{
4638 struct ipsec_tag *itag;
4639
4640 itag = ipsec_findaux(m);
4641 if (!itag)
4642 return NULL;
4643 if (itag->history_count == 0)
4644 return NULL;
4645 if (lenp)
4646 *lenp = (int)(itag->history_count * sizeof(struct ipsec_history));
4647 return itag->history;
4648}
4649
4650void
4651ipsec_clearhist(
4652 struct mbuf *m)
4653{
4654 struct ipsec_tag *itag;
4655
4656 itag = ipsec_findaux(m);
4657 if (itag) {
4658 itag->history_count = 0;
4659 }
4660 ipsec_optaux(m, itag);
4661}
4662
4663__private_extern__ int
4664ipsec_send_natt_keepalive(
4665 struct secasvar *sav)
4666{
4667 struct mbuf *m;
4668 struct ip *ip;
4669 int error;
4670 struct ip_out_args ipoa =
4671 { IFSCOPE_NONE, { 0 }, IPOAF_SELECT_SRCIF, 0,
4672 SO_TC_UNSPEC, _NET_SERVICE_TYPE_UNSPEC };
4673 struct route ro;
4674 int keepalive_interval = natt_keepalive_interval;
4675
4676 lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED);
4677
4678 if ((esp_udp_encap_port & 0xFFFF) == 0 || sav->remote_ike_port == 0) return FALSE;
4679
4680 if (sav->natt_interval != 0) {
4681 keepalive_interval = (int)sav->natt_interval;
4682 }
4683
4684 // natt timestamp may have changed... reverify
4685 if ((natt_now - sav->natt_last_activity) < keepalive_interval) return FALSE;
4686
4687 if (sav->flags & SADB_X_EXT_ESP_KEEPALIVE) return FALSE; // don't send these from the kernel
4688
4689 m = m_gethdr(M_NOWAIT, MT_DATA);
4690 if (m == NULL) return FALSE;
4691
4692 ip = (__typeof__(ip))m_mtod(m);
4693
4694 // this sends one type of NATT keepalives (Type 1, ESP keepalives, aren't sent by kernel)
4695 if ((sav->flags & SADB_X_EXT_ESP_KEEPALIVE) == 0) {
4696 struct udphdr *uh;
4697
4698 /*
4699 * Type 2: a UDP packet complete with IP header.
4700 * We must do this because UDP output requires
4701 * an inpcb which we don't have. UDP packet
4702 * contains one byte payload. The byte is set
4703 * to 0xFF.
4704 */
4705 uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip));
4706 m->m_len = sizeof(struct udpiphdr) + 1;
4707 bzero(m_mtod(m), m->m_len);
4708 m->m_pkthdr.len = m->m_len;
4709
4710 ip->ip_len = m->m_len;
4711 ip->ip_ttl = ip_defttl;
4712 ip->ip_p = IPPROTO_UDP;
4713 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4714 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4715 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4716 } else {
4717 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4718 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4719 }
4720 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4721 uh->uh_dport = htons(sav->remote_ike_port);
4722 uh->uh_ulen = htons(1 + sizeof(*uh));
4723 uh->uh_sum = 0;
4724 *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip) + sizeof(*uh)) = 0xFF;
4725 }
4726
4727 // grab sadb_mutex, to get a local copy of sah's route cache
4728 lck_mtx_lock(sadb_mutex);
4729 if (ROUTE_UNUSABLE(&sav->sah->sa_route) ||
4730 rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET)
4731 ROUTE_RELEASE(&sav->sah->sa_route);
4732
4733 route_copyout(&ro, &sav->sah->sa_route, sizeof(ro));
4734 lck_mtx_unlock(sadb_mutex);
4735
4736 necp_mark_packet_as_keepalive(m, TRUE);
4737
4738 error = ip_output(m, NULL, &ro, IP_OUTARGS | IP_NOIPSEC, NULL, &ipoa);
4739
4740 // grab sadb_mutex, to synchronize the sah's route cache with the local copy
4741 lck_mtx_lock(sadb_mutex);
4742 route_copyin(&ro, &sav->sah->sa_route, sizeof(ro));
4743 lck_mtx_unlock(sadb_mutex);
4744 if (error == 0) {
4745 sav->natt_last_activity = natt_now;
4746 return TRUE;
4747 }
4748 return FALSE;
4749}
4750
4751__private_extern__ bool
4752ipsec_fill_offload_frame(ifnet_t ifp,
4753 struct secasvar *sav,
4754 struct ifnet_keepalive_offload_frame *frame,
4755 size_t frame_data_offset)
4756{
4757 u_int8_t *data = NULL;
4758 struct ip *ip = NULL;
4759 struct udphdr *uh = NULL;
4760
4761 if (sav == NULL || sav->sah == NULL || frame == NULL ||
4762 (ifp != NULL && ifp->if_index != sav->sah->outgoing_if) ||
4763 sav->sah->saidx.dst.ss_family != AF_INET ||
4764 !(sav->flags & SADB_X_EXT_NATT) ||
4765 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE) ||
4766 !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE_OFFLOAD) ||
4767 sav->flags & SADB_X_EXT_ESP_KEEPALIVE ||
4768 (esp_udp_encap_port & 0xFFFF) == 0 ||
4769 sav->remote_ike_port == 0 ||
4770 (natt_keepalive_interval == 0 && sav->natt_interval == 0 && sav->natt_offload_interval == 0)) {
4771 /* SA is not eligible for keepalive offload on this interface */
4772 return (FALSE);
4773 }
4774
4775 if (frame_data_offset + sizeof(struct udpiphdr) + 1 >
4776 IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
4777 /* Not enough room in this data frame */
4778 return (FALSE);
4779 }
4780
4781 data = frame->data;
4782 ip = (__typeof__(ip))(void *)(data + frame_data_offset);
4783 uh = (__typeof__(uh))(void *)(data + frame_data_offset + sizeof(*ip));
4784
4785 frame->length = frame_data_offset + sizeof(struct udpiphdr) + 1;
4786 frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_IPSEC;
4787 frame->ether_type = IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4;
4788
4789 bzero(data, IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE);
4790
4791 ip->ip_v = IPVERSION;
4792 ip->ip_hl = sizeof(struct ip) >> 2;
4793 ip->ip_off &= htons(~IP_OFFMASK);
4794 ip->ip_off &= htons(~IP_MF);
4795 switch (ip4_ipsec_dfbit) {
4796 case 0: /* clear DF bit */
4797 ip->ip_off &= htons(~IP_DF);
4798 break;
4799 case 1: /* set DF bit */
4800 ip->ip_off |= htons(IP_DF);
4801 break;
4802 default: /* copy DF bit */
4803 break;
4804 }
4805 ip->ip_len = htons(sizeof(struct udpiphdr) + 1);
4806 ip->ip_id = ip_randomid();
4807 ip->ip_ttl = ip_defttl;
4808 ip->ip_p = IPPROTO_UDP;
4809 ip->ip_sum = 0;
4810 if (sav->sah->dir != IPSEC_DIR_INBOUND) {
4811 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4812 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4813 } else {
4814 ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.dst)->sin_addr;
4815 ip->ip_dst = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr;
4816 }
4817 ip->ip_sum = in_cksum_hdr_opt(ip);
4818 uh->uh_sport = htons((u_short)esp_udp_encap_port);
4819 uh->uh_dport = htons(sav->remote_ike_port);
4820 uh->uh_ulen = htons(1 + sizeof(*uh));
4821 uh->uh_sum = 0;
4822 *(u_int8_t*)(data + frame_data_offset + sizeof(*ip) + sizeof(*uh)) = 0xFF;
4823
4824 if (sav->natt_offload_interval != 0) {
4825 frame->interval = sav->natt_offload_interval;
4826 } else if (sav->natt_interval != 0) {
4827 frame->interval = sav->natt_interval;
4828 } else {
4829 frame->interval = natt_keepalive_interval;
4830 }
4831 return (TRUE);
4832}