2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $apfw: git commit 6602420f2f101b74305cd78f7cd9e0c8fdedae97 $ */
30 /* $OpenBSD: pf.c,v 1.567 2008/02/20 23:40:13 henning Exp $ */
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002 - 2013 Henning Brauer
35 * NAT64 - Copyright (c) 2010 Viagenie Inc. (http://www.viagenie.ca)
36 * All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
42 * - Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * - Redistributions in binary form must reproduce the above
45 * copyright notice, this list of conditions and the following
46 * disclaimer in the documentation and/or other materials provided
47 * with the distribution.
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
52 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
53 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
54 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
55 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
56 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
57 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
59 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
60 * POSSIBILITY OF SUCH DAMAGE.
62 * Effort sponsored in part by the Defense Advanced Research Projects
63 * Agency (DARPA) and Air Force Research Laboratory, Air Force
64 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
68 #include <machine/endian.h>
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/filio.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 #include <sys/kernel.h>
77 #include <sys/random.h>
78 #include <sys/mcache.h>
79 #include <sys/protosw.h>
81 #include <libkern/crypto/md5.h>
82 #include <libkern/libkern.h>
84 #include <mach/thread_act.h>
87 #include <net/if_types.h>
89 #include <net/route.h>
92 #include <netinet/in.h>
93 #include <netinet/in_var.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/ip.h>
96 #include <netinet/ip_var.h>
97 #include <netinet/tcp.h>
98 #include <netinet/tcp_seq.h>
99 #include <netinet/udp.h>
100 #include <netinet/ip_icmp.h>
101 #include <netinet/in_pcb.h>
102 #include <netinet/tcp_timer.h>
103 #include <netinet/tcp_var.h>
104 #include <netinet/tcp_fsm.h>
105 #include <netinet/udp_var.h>
106 #include <netinet/icmp_var.h>
107 #include <net/if_ether.h>
108 #include <net/ethernet.h>
109 #include <net/flowhash.h>
110 #include <net/nat464_utils.h>
111 #include <net/pfvar.h>
112 #include <net/if_pflog.h>
115 #include <net/if_pfsync.h>
119 #include <netinet/ip6.h>
120 #include <netinet6/in6_pcb.h>
121 #include <netinet6/ip6_var.h>
122 #include <netinet/icmp6.h>
123 #include <netinet6/nd6.h>
127 #include <netinet/ip_dummynet.h>
128 #endif /* DUMMYNET */
131 * For RandomULong(), to get a 32 bits random value
132 * Note that random() returns a 31 bits value, see rdar://11159750
134 #include <dev/random/randomdev.h>
136 #define DPFPRINTF(n, x) (pf_status.debug >= (n) ? printf x : ((void)0))
139 * On Mac OS X, the rtableid value is treated as the interface scope
140 * value that is equivalent to the interface index used for scoped
141 * routing. A valid scope value is anything but IFSCOPE_NONE (0),
142 * as per definition of ifindex which is a positive, non-zero number.
143 * The other BSDs treat a negative rtableid value as invalid, hence
144 * the test against INT_MAX to handle userland apps which initialize
145 * the field with a negative number.
147 #define PF_RTABLEID_IS_VALID(r) \
148 ((r) > IFSCOPE_NONE && (r) <= INT_MAX)
153 decl_lck_mtx_data(, pf_lock_data
);
154 decl_lck_rw_data(, pf_perim_lock_data
);
155 lck_mtx_t
*pf_lock
= &pf_lock_data
;
156 lck_rw_t
*pf_perim_lock
= &pf_perim_lock_data
;
159 struct pf_state_tree_lan_ext pf_statetbl_lan_ext
;
160 struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy
;
162 struct pf_palist pf_pabuf
;
163 struct pf_status pf_status
;
165 u_int32_t ticket_pabuf
;
167 static MD5_CTX pf_tcp_secret_ctx
;
168 static u_char pf_tcp_secret
[16];
169 static int pf_tcp_secret_init
;
170 static int pf_tcp_iss_off
;
172 static struct pf_anchor_stackframe
{
173 struct pf_ruleset
*rs
;
175 struct pf_anchor_node
*parent
;
176 struct pf_anchor
*child
;
177 } pf_anchor_stack
[64];
179 struct pool pf_src_tree_pl
, pf_rule_pl
, pf_pooladdr_pl
;
180 struct pool pf_state_pl
, pf_state_key_pl
;
182 typedef void (*hook_fn_t
)(void *);
185 TAILQ_ENTRY(hook_desc
) hd_list
;
190 #define HOOK_REMOVE 0x01
191 #define HOOK_FREE 0x02
192 #define HOOK_ABORT 0x04
194 static void *hook_establish(struct hook_desc_head
*, int,
196 static void hook_runloop(struct hook_desc_head
*, int flags
);
198 struct pool pf_app_state_pl
;
199 static void pf_print_addr(struct pf_addr
*addr
, sa_family_t af
);
200 static void pf_print_sk_host(struct pf_state_host
*, u_int8_t
, int,
203 static void pf_print_host(struct pf_addr
*, u_int16_t
, u_int8_t
);
205 static void pf_init_threshold(struct pf_threshold
*, u_int32_t
,
207 static void pf_add_threshold(struct pf_threshold
*);
208 static int pf_check_threshold(struct pf_threshold
*);
210 static void pf_change_ap(int, pbuf_t
*, struct pf_addr
*,
211 u_int16_t
*, u_int16_t
*, u_int16_t
*,
212 struct pf_addr
*, u_int16_t
, u_int8_t
, sa_family_t
,
214 static int pf_modulate_sack(pbuf_t
*, int, struct pf_pdesc
*,
215 struct tcphdr
*, struct pf_state_peer
*);
217 static void pf_change_a6(struct pf_addr
*, u_int16_t
*,
218 struct pf_addr
*, u_int8_t
);
219 void pf_change_addr(struct pf_addr
*a
, u_int16_t
*c
,
220 struct pf_addr
*an
, u_int8_t u
,
221 sa_family_t af
, sa_family_t afn
);
223 static void pf_change_icmp(struct pf_addr
*, u_int16_t
*,
224 struct pf_addr
*, struct pf_addr
*, u_int16_t
,
225 u_int16_t
*, u_int16_t
*, u_int16_t
*,
226 u_int16_t
*, u_int8_t
, sa_family_t
);
227 static void pf_send_tcp(const struct pf_rule
*, sa_family_t
,
228 const struct pf_addr
*, const struct pf_addr
*,
229 u_int16_t
, u_int16_t
, u_int32_t
, u_int32_t
,
230 u_int8_t
, u_int16_t
, u_int16_t
, u_int8_t
, int,
231 u_int16_t
, struct ether_header
*, struct ifnet
*);
232 static void pf_send_icmp(pbuf_t
*, u_int8_t
, u_int8_t
,
233 sa_family_t
, struct pf_rule
*);
234 static struct pf_rule
*pf_match_translation(struct pf_pdesc
*, pbuf_t
*,
235 int, int, struct pfi_kif
*, struct pf_addr
*,
236 union pf_state_xport
*, struct pf_addr
*,
237 union pf_state_xport
*, int);
238 static struct pf_rule
*pf_get_translation_aux(struct pf_pdesc
*,
239 pbuf_t
*, int, int, struct pfi_kif
*,
240 struct pf_src_node
**, struct pf_addr
*,
241 union pf_state_xport
*, struct pf_addr
*,
242 union pf_state_xport
*, union pf_state_xport
*
244 static void pf_attach_state(struct pf_state_key
*,
245 struct pf_state
*, int);
246 static void pf_detach_state(struct pf_state
*, int);
247 static u_int32_t
pf_tcp_iss(struct pf_pdesc
*);
248 static int pf_test_rule(struct pf_rule
**, struct pf_state
**,
249 int, struct pfi_kif
*, pbuf_t
*, int,
250 void *, struct pf_pdesc
*, struct pf_rule
**,
251 struct pf_ruleset
**, struct ifqueue
*);
253 static int pf_test_dummynet(struct pf_rule
**, int,
254 struct pfi_kif
*, pbuf_t
**,
255 struct pf_pdesc
*, struct ip_fw_args
*);
256 #endif /* DUMMYNET */
257 static int pf_test_fragment(struct pf_rule
**, int,
258 struct pfi_kif
*, pbuf_t
*, void *,
259 struct pf_pdesc
*, struct pf_rule
**,
260 struct pf_ruleset
**);
261 static int pf_test_state_tcp(struct pf_state
**, int,
262 struct pfi_kif
*, pbuf_t
*, int,
263 void *, struct pf_pdesc
*, u_short
*);
264 static int pf_test_state_udp(struct pf_state
**, int,
265 struct pfi_kif
*, pbuf_t
*, int,
266 void *, struct pf_pdesc
*, u_short
*);
267 static int pf_test_state_icmp(struct pf_state
**, int,
268 struct pfi_kif
*, pbuf_t
*, int,
269 void *, struct pf_pdesc
*, u_short
*);
270 static int pf_test_state_other(struct pf_state
**, int,
271 struct pfi_kif
*, struct pf_pdesc
*);
272 static int pf_match_tag(struct pf_rule
*,
273 struct pf_mtag
*, int *);
274 static void pf_hash(struct pf_addr
*, struct pf_addr
*,
275 struct pf_poolhashkey
*, sa_family_t
);
276 static int pf_map_addr(u_int8_t
, struct pf_rule
*,
277 struct pf_addr
*, struct pf_addr
*,
278 struct pf_addr
*, struct pf_src_node
**);
279 static int pf_get_sport(struct pf_pdesc
*, struct pfi_kif
*,
280 struct pf_rule
*, struct pf_addr
*,
281 union pf_state_xport
*, struct pf_addr
*,
282 union pf_state_xport
*, struct pf_addr
*,
283 union pf_state_xport
*, struct pf_src_node
**
285 static void pf_route(pbuf_t
**, struct pf_rule
*, int,
286 struct ifnet
*, struct pf_state
*,
289 static void pf_route6(pbuf_t
**, struct pf_rule
*, int,
290 struct ifnet
*, struct pf_state
*,
293 static u_int8_t
pf_get_wscale(pbuf_t
*, int, u_int16_t
,
295 static u_int16_t
pf_get_mss(pbuf_t
*, int, u_int16_t
,
297 static u_int16_t
pf_calc_mss(struct pf_addr
*, sa_family_t
,
299 static void pf_set_rt_ifp(struct pf_state
*,
300 struct pf_addr
*, sa_family_t af
);
301 static int pf_check_proto_cksum(pbuf_t
*, int, int,
302 u_int8_t
, sa_family_t
);
303 static int pf_addr_wrap_neq(struct pf_addr_wrap
*,
304 struct pf_addr_wrap
*);
305 static struct pf_state
*pf_find_state(struct pfi_kif
*,
306 struct pf_state_key_cmp
*, u_int
);
307 static int pf_src_connlimit(struct pf_state
**);
308 static void pf_stateins_err(const char *, struct pf_state
*,
310 static int pf_check_congestion(struct ifqueue
*);
313 static const char *pf_pptp_ctrl_type_name(u_int16_t code
);
315 static void pf_pptp_handler(struct pf_state
*, int, int,
316 struct pf_pdesc
*, struct pfi_kif
*);
317 static void pf_pptp_unlink(struct pf_state
*);
318 static void pf_grev1_unlink(struct pf_state
*);
319 static int pf_test_state_grev1(struct pf_state
**, int,
320 struct pfi_kif
*, int, struct pf_pdesc
*);
321 static int pf_ike_compare(struct pf_app_state
*,
322 struct pf_app_state
*);
323 static int pf_test_state_esp(struct pf_state
**, int,
324 struct pfi_kif
*, int, struct pf_pdesc
*);
326 extern struct pool pfr_ktable_pl
;
327 extern struct pool pfr_kentry_pl
;
328 extern int path_mtu_discovery
;
330 struct pf_pool_limit pf_pool_limits
[PF_LIMIT_MAX
] = {
331 { .pp
= &pf_state_pl
, .limit
= PFSTATE_HIWAT
},
332 { .pp
= &pf_app_state_pl
, .limit
= PFAPPSTATE_HIWAT
},
333 { .pp
= &pf_src_tree_pl
, .limit
= PFSNODE_HIWAT
},
334 { .pp
= &pf_frent_pl
, .limit
= PFFRAG_FRENT_HIWAT
},
335 { .pp
= &pfr_ktable_pl
, .limit
= PFR_KTABLE_HIWAT
},
336 { .pp
= &pfr_kentry_pl
, .limit
= PFR_KENTRY_HIWAT
},
340 pf_lazy_makewritable(struct pf_pdesc
*pd
, pbuf_t
*pbuf
, int len
)
348 VERIFY(pbuf
== pd
->mp
);
352 if ((p
= pbuf_ensure_writable(pbuf
, len
)) == NULL
) {
357 pd
->pf_mtag
= pf_find_mtag_pbuf(pbuf
);
362 pd
->src
= (struct pf_addr
*)(uintptr_t)&h
->ip_src
;
363 pd
->dst
= (struct pf_addr
*)(uintptr_t)&h
->ip_dst
;
364 pd
->ip_sum
= &h
->ip_sum
;
369 struct ip6_hdr
*h
= p
;
370 pd
->src
= (struct pf_addr
*)(uintptr_t)&h
->ip6_src
;
371 pd
->dst
= (struct pf_addr
*)(uintptr_t)&h
->ip6_dst
;
379 return len
< 0 ? NULL
: p
;
383 pf_state_lookup_aux(struct pf_state
**state
, struct pfi_kif
*kif
,
384 int direction
, int *action
)
386 if (*state
== NULL
|| (*state
)->timeout
== PFTM_PURGE
) {
391 if (direction
== PF_OUT
&&
392 (((*state
)->rule
.ptr
->rt
== PF_ROUTETO
&&
393 (*state
)->rule
.ptr
->direction
== PF_OUT
) ||
394 ((*state
)->rule
.ptr
->rt
== PF_REPLYTO
&&
395 (*state
)->rule
.ptr
->direction
== PF_IN
)) &&
396 (*state
)->rt_kif
!= NULL
&& (*state
)->rt_kif
!= kif
) {
404 #define STATE_LOOKUP() \
407 *state = pf_find_state(kif, &key, direction); \
408 if (*state != NULL && pd != NULL && \
409 !(pd->pktflags & PKTF_FLOW_ID)) { \
410 pd->flowsrc = (*state)->state_key->flowsrc; \
411 pd->flowhash = (*state)->state_key->flowhash; \
412 if (pd->flowhash != 0) { \
413 pd->pktflags |= PKTF_FLOW_ID; \
414 pd->pktflags &= ~PKTF_FLOW_ADV; \
417 if (pf_state_lookup_aux(state, kif, direction, &action)) \
421 #define STATE_ADDR_TRANSLATE(sk) \
422 (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \
423 ((sk)->af_lan == AF_INET6 && \
424 ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \
425 (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \
426 (sk)->lan.addr.addr32[3] != (sk)->gwy.addr.addr32[3]))
428 #define STATE_TRANSLATE(sk) \
429 ((sk)->af_lan != (sk)->af_gwy || \
430 STATE_ADDR_TRANSLATE(sk) || \
431 (sk)->lan.xport.port != (sk)->gwy.xport.port)
433 #define STATE_GRE_TRANSLATE(sk) \
434 (STATE_ADDR_TRANSLATE(sk) || \
435 (sk)->lan.xport.call_id != (sk)->gwy.xport.call_id)
437 #define BOUND_IFACE(r, k) \
438 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
440 #define STATE_INC_COUNTERS(s) \
442 s->rule.ptr->states++; \
443 VERIFY(s->rule.ptr->states != 0); \
444 if (s->anchor.ptr != NULL) { \
445 s->anchor.ptr->states++; \
446 VERIFY(s->anchor.ptr->states != 0); \
448 if (s->nat_rule.ptr != NULL) { \
449 s->nat_rule.ptr->states++; \
450 VERIFY(s->nat_rule.ptr->states != 0); \
454 #define STATE_DEC_COUNTERS(s) \
456 if (s->nat_rule.ptr != NULL) { \
457 VERIFY(s->nat_rule.ptr->states > 0); \
458 s->nat_rule.ptr->states--; \
460 if (s->anchor.ptr != NULL) { \
461 VERIFY(s->anchor.ptr->states > 0); \
462 s->anchor.ptr->states--; \
464 VERIFY(s->rule.ptr->states > 0); \
465 s->rule.ptr->states--; \
468 static __inline
int pf_src_compare(struct pf_src_node
*, struct pf_src_node
*);
469 static __inline
int pf_state_compare_lan_ext(struct pf_state_key
*,
470 struct pf_state_key
*);
471 static __inline
int pf_state_compare_ext_gwy(struct pf_state_key
*,
472 struct pf_state_key
*);
473 static __inline
int pf_state_compare_id(struct pf_state
*,
476 struct pf_src_tree tree_src_tracking
;
478 struct pf_state_tree_id tree_id
;
479 struct pf_state_queue state_list
;
481 RB_GENERATE(pf_src_tree
, pf_src_node
, entry
, pf_src_compare
);
482 RB_GENERATE(pf_state_tree_lan_ext
, pf_state_key
,
483 entry_lan_ext
, pf_state_compare_lan_ext
);
484 RB_GENERATE(pf_state_tree_ext_gwy
, pf_state_key
,
485 entry_ext_gwy
, pf_state_compare_ext_gwy
);
486 RB_GENERATE(pf_state_tree_id
, pf_state
,
487 entry_id
, pf_state_compare_id
);
489 #define PF_DT_SKIP_LANEXT 0x01
490 #define PF_DT_SKIP_EXTGWY 0x02
492 static const u_int16_t PF_PPTP_PORT
= 1723;
493 static const u_int32_t PF_PPTP_MAGIC_NUMBER
= 0x1A2B3C4D;
501 struct pf_pptp_ctrl_hdr
{
503 u_int16_t reserved_0
;
506 struct pf_pptp_ctrl_generic
{
510 #define PF_PPTP_CTRL_TYPE_START_REQ 1
511 struct pf_pptp_ctrl_start_req
{
512 u_int16_t protocol_version
;
513 u_int16_t reserved_1
;
514 u_int32_t framing_capabilities
;
515 u_int32_t bearer_capabilities
;
516 u_int16_t maximum_channels
;
517 u_int16_t firmware_revision
;
518 u_int8_t host_name
[64];
519 u_int8_t vendor_string
[64];
522 #define PF_PPTP_CTRL_TYPE_START_RPY 2
523 struct pf_pptp_ctrl_start_rpy
{
524 u_int16_t protocol_version
;
525 u_int8_t result_code
;
527 u_int32_t framing_capabilities
;
528 u_int32_t bearer_capabilities
;
529 u_int16_t maximum_channels
;
530 u_int16_t firmware_revision
;
531 u_int8_t host_name
[64];
532 u_int8_t vendor_string
[64];
535 #define PF_PPTP_CTRL_TYPE_STOP_REQ 3
536 struct pf_pptp_ctrl_stop_req
{
539 u_int16_t reserved_2
;
542 #define PF_PPTP_CTRL_TYPE_STOP_RPY 4
543 struct pf_pptp_ctrl_stop_rpy
{
546 u_int16_t reserved_1
;
549 #define PF_PPTP_CTRL_TYPE_ECHO_REQ 5
550 struct pf_pptp_ctrl_echo_req
{
551 u_int32_t identifier
;
554 #define PF_PPTP_CTRL_TYPE_ECHO_RPY 6
555 struct pf_pptp_ctrl_echo_rpy
{
556 u_int32_t identifier
;
557 u_int8_t result_code
;
559 u_int16_t reserved_1
;
562 #define PF_PPTP_CTRL_TYPE_CALL_OUT_REQ 7
563 struct pf_pptp_ctrl_call_out_req
{
565 u_int16_t call_sernum
;
567 u_int32_t bearer_type
;
568 u_int32_t framing_type
;
569 u_int16_t rxwindow_size
;
570 u_int16_t proc_delay
;
571 u_int8_t phone_num
[64];
572 u_int8_t sub_addr
[64];
575 #define PF_PPTP_CTRL_TYPE_CALL_OUT_RPY 8
576 struct pf_pptp_ctrl_call_out_rpy
{
578 u_int16_t peer_call_id
;
579 u_int8_t result_code
;
581 u_int16_t cause_code
;
582 u_int32_t connect_speed
;
583 u_int16_t rxwindow_size
;
584 u_int16_t proc_delay
;
585 u_int32_t phy_channel_id
;
588 #define PF_PPTP_CTRL_TYPE_CALL_IN_1ST 9
589 struct pf_pptp_ctrl_call_in_1st
{
591 u_int16_t call_sernum
;
592 u_int32_t bearer_type
;
593 u_int32_t phy_channel_id
;
594 u_int16_t dialed_number_len
;
595 u_int16_t dialing_number_len
;
596 u_int8_t dialed_num
[64];
597 u_int8_t dialing_num
[64];
598 u_int8_t sub_addr
[64];
601 #define PF_PPTP_CTRL_TYPE_CALL_IN_2ND 10
602 struct pf_pptp_ctrl_call_in_2nd
{
604 u_int16_t peer_call_id
;
605 u_int8_t result_code
;
607 u_int16_t rxwindow_size
;
609 u_int16_t reserved_1
;
612 #define PF_PPTP_CTRL_TYPE_CALL_IN_3RD 11
613 struct pf_pptp_ctrl_call_in_3rd
{
615 u_int16_t reserved_1
;
616 u_int32_t connect_speed
;
617 u_int16_t rxwindow_size
;
619 u_int32_t framing_type
;
622 #define PF_PPTP_CTRL_TYPE_CALL_CLR 12
623 struct pf_pptp_ctrl_call_clr
{
625 u_int16_t reserved_1
;
628 #define PF_PPTP_CTRL_TYPE_CALL_DISC 13
629 struct pf_pptp_ctrl_call_disc
{
631 u_int8_t result_code
;
633 u_int16_t cause_code
;
634 u_int16_t reserved_1
;
635 u_int8_t statistics
[128];
638 #define PF_PPTP_CTRL_TYPE_ERROR 14
639 struct pf_pptp_ctrl_error
{
640 u_int16_t peer_call_id
;
641 u_int16_t reserved_1
;
642 u_int32_t crc_errors
;
645 u_int32_t buf_errors
;
646 u_int32_t tim_errors
;
647 u_int32_t align_errors
;
650 #define PF_PPTP_CTRL_TYPE_SET_LINKINFO 15
651 struct pf_pptp_ctrl_set_linkinfo
{
652 u_int16_t peer_call_id
;
653 u_int16_t reserved_1
;
660 pf_pptp_ctrl_type_name(u_int16_t code
)
664 if (code
< PF_PPTP_CTRL_TYPE_START_REQ
||
665 code
> PF_PPTP_CTRL_TYPE_SET_LINKINFO
) {
666 static char reserved
[] = "reserved-00";
668 sprintf(&reserved
[9], "%02x", code
);
671 static const char *name
[] = {
672 "start_req", "start_rpy", "stop_req", "stop_rpy",
673 "echo_req", "echo_rpy", "call_out_req", "call_out_rpy",
674 "call_in_1st", "call_in_2nd", "call_in_3rd",
675 "call_clr", "call_disc", "error", "set_linkinfo"
678 return name
[code
- 1];
683 static const size_t PF_PPTP_CTRL_MSG_MINSIZE
=
684 sizeof(struct pf_pptp_hdr
) + sizeof(struct pf_pptp_ctrl_hdr
);
686 union pf_pptp_ctrl_msg_union
{
687 struct pf_pptp_ctrl_start_req start_req
;
688 struct pf_pptp_ctrl_start_rpy start_rpy
;
689 struct pf_pptp_ctrl_stop_req stop_req
;
690 struct pf_pptp_ctrl_stop_rpy stop_rpy
;
691 struct pf_pptp_ctrl_echo_req echo_req
;
692 struct pf_pptp_ctrl_echo_rpy echo_rpy
;
693 struct pf_pptp_ctrl_call_out_req call_out_req
;
694 struct pf_pptp_ctrl_call_out_rpy call_out_rpy
;
695 struct pf_pptp_ctrl_call_in_1st call_in_1st
;
696 struct pf_pptp_ctrl_call_in_2nd call_in_2nd
;
697 struct pf_pptp_ctrl_call_in_3rd call_in_3rd
;
698 struct pf_pptp_ctrl_call_clr call_clr
;
699 struct pf_pptp_ctrl_call_disc call_disc
;
700 struct pf_pptp_ctrl_error error
;
701 struct pf_pptp_ctrl_set_linkinfo set_linkinfo
;
705 struct pf_pptp_ctrl_msg
{
706 struct pf_pptp_hdr hdr
;
707 struct pf_pptp_ctrl_hdr ctrl
;
708 union pf_pptp_ctrl_msg_union msg
;
711 #define PF_GRE_FLAG_CHECKSUM_PRESENT 0x8000
712 #define PF_GRE_FLAG_VERSION_MASK 0x0007
713 #define PF_GRE_PPP_ETHERTYPE 0x880B
715 struct pf_grev1_hdr
{
717 u_int16_t protocol_type
;
718 u_int16_t payload_length
;
726 static const u_int16_t PF_IKE_PORT
= 500;
729 u_int64_t initiator_cookie
, responder_cookie
;
730 u_int8_t next_payload
, version
, exchange_type
, flags
;
731 u_int32_t message_id
, length
;
734 #define PF_IKE_PACKET_MINSIZE (sizeof (struct pf_ike_hdr))
736 #define PF_IKEv1_EXCHTYPE_BASE 1
737 #define PF_IKEv1_EXCHTYPE_ID_PROTECT 2
738 #define PF_IKEv1_EXCHTYPE_AUTH_ONLY 3
739 #define PF_IKEv1_EXCHTYPE_AGGRESSIVE 4
740 #define PF_IKEv1_EXCHTYPE_INFORMATIONAL 5
741 #define PF_IKEv2_EXCHTYPE_SA_INIT 34
742 #define PF_IKEv2_EXCHTYPE_AUTH 35
743 #define PF_IKEv2_EXCHTYPE_CREATE_CHILD_SA 36
744 #define PF_IKEv2_EXCHTYPE_INFORMATIONAL 37
746 #define PF_IKEv1_FLAG_E 0x01
747 #define PF_IKEv1_FLAG_C 0x02
748 #define PF_IKEv1_FLAG_A 0x04
749 #define PF_IKEv2_FLAG_I 0x08
750 #define PF_IKEv2_FLAG_V 0x10
751 #define PF_IKEv2_FLAG_R 0x20
760 pf_addr_compare(struct pf_addr
*a
, struct pf_addr
*b
, sa_family_t af
)
765 if (a
->addr32
[0] > b
->addr32
[0]) {
768 if (a
->addr32
[0] < b
->addr32
[0]) {
775 if (a
->addr32
[3] > b
->addr32
[3]) {
778 if (a
->addr32
[3] < b
->addr32
[3]) {
781 if (a
->addr32
[2] > b
->addr32
[2]) {
784 if (a
->addr32
[2] < b
->addr32
[2]) {
787 if (a
->addr32
[1] > b
->addr32
[1]) {
790 if (a
->addr32
[1] < b
->addr32
[1]) {
793 if (a
->addr32
[0] > b
->addr32
[0]) {
796 if (a
->addr32
[0] < b
->addr32
[0]) {
806 pf_src_compare(struct pf_src_node
*a
, struct pf_src_node
*b
)
810 if (a
->rule
.ptr
> b
->rule
.ptr
) {
813 if (a
->rule
.ptr
< b
->rule
.ptr
) {
816 if ((diff
= a
->af
- b
->af
) != 0) {
819 if ((diff
= pf_addr_compare(&a
->addr
, &b
->addr
, a
->af
)) != 0) {
826 pf_state_compare_lan_ext(struct pf_state_key
*a
, struct pf_state_key
*b
)
831 if ((diff
= a
->proto
- b
->proto
) != 0) {
834 if ((diff
= a
->af_lan
- b
->af_lan
) != 0) {
838 extfilter
= PF_EXTFILTER_APD
;
843 if ((diff
= a
->lan
.xport
.port
- b
->lan
.xport
.port
) != 0) {
849 if ((diff
= a
->lan
.xport
.port
- b
->lan
.xport
.port
) != 0) {
852 if ((diff
= a
->ext_lan
.xport
.port
- b
->ext_lan
.xport
.port
) != 0) {
858 if ((diff
= a
->proto_variant
- b
->proto_variant
)) {
861 extfilter
= a
->proto_variant
;
862 if ((diff
= a
->lan
.xport
.port
- b
->lan
.xport
.port
) != 0) {
865 if ((extfilter
< PF_EXTFILTER_AD
) &&
866 (diff
= a
->ext_lan
.xport
.port
- b
->ext_lan
.xport
.port
) != 0) {
872 if (a
->proto_variant
== PF_GRE_PPTP_VARIANT
&&
873 a
->proto_variant
== b
->proto_variant
) {
874 if (!!(diff
= a
->ext_lan
.xport
.call_id
-
875 b
->ext_lan
.xport
.call_id
)) {
882 if (!!(diff
= a
->ext_lan
.xport
.spi
- b
->ext_lan
.xport
.spi
)) {
894 if ((diff
= pf_addr_compare(&a
->lan
.addr
, &b
->lan
.addr
,
899 if (extfilter
< PF_EXTFILTER_EI
) {
900 if ((diff
= pf_addr_compare(&a
->ext_lan
.addr
,
910 if ((diff
= pf_addr_compare(&a
->lan
.addr
, &b
->lan
.addr
,
915 if (extfilter
< PF_EXTFILTER_EI
||
916 !PF_AZERO(&b
->ext_lan
.addr
, AF_INET6
)) {
917 if ((diff
= pf_addr_compare(&a
->ext_lan
.addr
,
927 if (a
->app_state
&& b
->app_state
) {
928 if (a
->app_state
->compare_lan_ext
&&
929 b
->app_state
->compare_lan_ext
) {
930 diff
= (const char *)b
->app_state
->compare_lan_ext
-
931 (const char *)a
->app_state
->compare_lan_ext
;
935 diff
= a
->app_state
->compare_lan_ext(a
->app_state
,
947 pf_state_compare_ext_gwy(struct pf_state_key
*a
, struct pf_state_key
*b
)
952 if ((diff
= a
->proto
- b
->proto
) != 0) {
956 if ((diff
= a
->af_gwy
- b
->af_gwy
) != 0) {
960 extfilter
= PF_EXTFILTER_APD
;
965 if ((diff
= a
->gwy
.xport
.port
- b
->gwy
.xport
.port
) != 0) {
971 if ((diff
= a
->ext_gwy
.xport
.port
- b
->ext_gwy
.xport
.port
) != 0) {
974 if ((diff
= a
->gwy
.xport
.port
- b
->gwy
.xport
.port
) != 0) {
980 if ((diff
= a
->proto_variant
- b
->proto_variant
)) {
983 extfilter
= a
->proto_variant
;
984 if ((diff
= a
->gwy
.xport
.port
- b
->gwy
.xport
.port
) != 0) {
987 if ((extfilter
< PF_EXTFILTER_AD
) &&
988 (diff
= a
->ext_gwy
.xport
.port
- b
->ext_gwy
.xport
.port
) != 0) {
994 if (a
->proto_variant
== PF_GRE_PPTP_VARIANT
&&
995 a
->proto_variant
== b
->proto_variant
) {
996 if (!!(diff
= a
->gwy
.xport
.call_id
-
997 b
->gwy
.xport
.call_id
)) {
1004 if (!!(diff
= a
->gwy
.xport
.spi
- b
->gwy
.xport
.spi
)) {
1013 switch (a
->af_gwy
) {
1016 if ((diff
= pf_addr_compare(&a
->gwy
.addr
, &b
->gwy
.addr
,
1021 if (extfilter
< PF_EXTFILTER_EI
) {
1022 if ((diff
= pf_addr_compare(&a
->ext_gwy
.addr
, &b
->ext_gwy
.addr
,
1031 if ((diff
= pf_addr_compare(&a
->gwy
.addr
, &b
->gwy
.addr
,
1036 if (extfilter
< PF_EXTFILTER_EI
||
1037 !PF_AZERO(&b
->ext_gwy
.addr
, AF_INET6
)) {
1038 if ((diff
= pf_addr_compare(&a
->ext_gwy
.addr
, &b
->ext_gwy
.addr
,
1047 if (a
->app_state
&& b
->app_state
) {
1048 if (a
->app_state
->compare_ext_gwy
&&
1049 b
->app_state
->compare_ext_gwy
) {
1050 diff
= (const char *)b
->app_state
->compare_ext_gwy
-
1051 (const char *)a
->app_state
->compare_ext_gwy
;
1055 diff
= a
->app_state
->compare_ext_gwy(a
->app_state
,
1067 pf_state_compare_id(struct pf_state
*a
, struct pf_state
*b
)
1069 if (a
->id
> b
->id
) {
1072 if (a
->id
< b
->id
) {
1075 if (a
->creatorid
> b
->creatorid
) {
1078 if (a
->creatorid
< b
->creatorid
) {
1087 pf_addrcpy(struct pf_addr
*dst
, struct pf_addr
*src
, sa_family_t af
)
1092 dst
->addr32
[0] = src
->addr32
[0];
1096 dst
->addr32
[0] = src
->addr32
[0];
1097 dst
->addr32
[1] = src
->addr32
[1];
1098 dst
->addr32
[2] = src
->addr32
[2];
1099 dst
->addr32
[3] = src
->addr32
[3];
1106 pf_find_state_byid(struct pf_state_cmp
*key
)
1108 pf_status
.fcounters
[FCNT_STATE_SEARCH
]++;
1110 return RB_FIND(pf_state_tree_id
, &tree_id
,
1111 (struct pf_state
*)(void *)key
);
1114 static struct pf_state
*
1115 pf_find_state(struct pfi_kif
*kif
, struct pf_state_key_cmp
*key
, u_int dir
)
1117 struct pf_state_key
*sk
= NULL
;
1120 pf_status
.fcounters
[FCNT_STATE_SEARCH
]++;
1124 sk
= RB_FIND(pf_state_tree_lan_ext
, &pf_statetbl_lan_ext
,
1125 (struct pf_state_key
*)key
);
1128 sk
= RB_FIND(pf_state_tree_ext_gwy
, &pf_statetbl_ext_gwy
,
1129 (struct pf_state_key
*)key
);
1131 * NAT64 is done only on input, for packets coming in from
1132 * from the LAN side, need to lookup the lan_ext tree.
1135 sk
= RB_FIND(pf_state_tree_lan_ext
,
1136 &pf_statetbl_lan_ext
,
1137 (struct pf_state_key
*)key
);
1138 if (sk
&& sk
->af_lan
== sk
->af_gwy
) {
1144 panic("pf_find_state");
1147 /* list is sorted, if-bound states before floating ones */
1149 TAILQ_FOREACH(s
, &sk
->states
, next
)
1150 if (s
->kif
== pfi_all
|| s
->kif
== kif
) {
1159 pf_find_state_all(struct pf_state_key_cmp
*key
, u_int dir
, int *more
)
1161 struct pf_state_key
*sk
= NULL
;
1162 struct pf_state
*s
, *ret
= NULL
;
1164 pf_status
.fcounters
[FCNT_STATE_SEARCH
]++;
1168 sk
= RB_FIND(pf_state_tree_lan_ext
,
1169 &pf_statetbl_lan_ext
, (struct pf_state_key
*)key
);
1172 sk
= RB_FIND(pf_state_tree_ext_gwy
,
1173 &pf_statetbl_ext_gwy
, (struct pf_state_key
*)key
);
1175 * NAT64 is done only on input, for packets coming in from
1176 * from the LAN side, need to lookup the lan_ext tree.
1178 if ((sk
== NULL
) && pf_nat64_configured
) {
1179 sk
= RB_FIND(pf_state_tree_lan_ext
,
1180 &pf_statetbl_lan_ext
,
1181 (struct pf_state_key
*)key
);
1182 if (sk
&& sk
->af_lan
== sk
->af_gwy
) {
1188 panic("pf_find_state_all");
1192 ret
= TAILQ_FIRST(&sk
->states
);
1197 TAILQ_FOREACH(s
, &sk
->states
, next
)
1205 pf_init_threshold(struct pf_threshold
*threshold
,
1206 u_int32_t limit
, u_int32_t seconds
)
1208 threshold
->limit
= limit
* PF_THRESHOLD_MULT
;
1209 threshold
->seconds
= seconds
;
1210 threshold
->count
= 0;
1211 threshold
->last
= pf_time_second();
1215 pf_add_threshold(struct pf_threshold
*threshold
)
1217 u_int32_t t
= pf_time_second(), diff
= t
- threshold
->last
;
1219 if (diff
>= threshold
->seconds
) {
1220 threshold
->count
= 0;
1222 threshold
->count
-= threshold
->count
* diff
/
1225 threshold
->count
+= PF_THRESHOLD_MULT
;
1226 threshold
->last
= t
;
1230 pf_check_threshold(struct pf_threshold
*threshold
)
1232 return threshold
->count
> threshold
->limit
;
1236 pf_src_connlimit(struct pf_state
**state
)
1239 (*state
)->src_node
->conn
++;
1240 VERIFY((*state
)->src_node
->conn
!= 0);
1241 (*state
)->src
.tcp_est
= 1;
1242 pf_add_threshold(&(*state
)->src_node
->conn_rate
);
1244 if ((*state
)->rule
.ptr
->max_src_conn
&&
1245 (*state
)->rule
.ptr
->max_src_conn
<
1246 (*state
)->src_node
->conn
) {
1247 pf_status
.lcounters
[LCNT_SRCCONN
]++;
1251 if ((*state
)->rule
.ptr
->max_src_conn_rate
.limit
&&
1252 pf_check_threshold(&(*state
)->src_node
->conn_rate
)) {
1253 pf_status
.lcounters
[LCNT_SRCCONNRATE
]++;
1261 if ((*state
)->rule
.ptr
->overload_tbl
) {
1263 u_int32_t killed
= 0;
1265 pf_status
.lcounters
[LCNT_OVERLOAD_TABLE
]++;
1266 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1267 printf("pf_src_connlimit: blocking address ");
1268 pf_print_host(&(*state
)->src_node
->addr
, 0,
1269 (*state
)->state_key
->af_lan
);
1272 bzero(&p
, sizeof(p
));
1273 p
.pfra_af
= (*state
)->state_key
->af_lan
;
1274 switch ((*state
)->state_key
->af_lan
) {
1278 p
.pfra_ip4addr
= (*state
)->src_node
->addr
.v4addr
;
1284 p
.pfra_ip6addr
= (*state
)->src_node
->addr
.v6addr
;
1289 pfr_insert_kentry((*state
)->rule
.ptr
->overload_tbl
,
1290 &p
, pf_calendar_time_second());
1292 /* kill existing states if that's required. */
1293 if ((*state
)->rule
.ptr
->flush
) {
1294 struct pf_state_key
*sk
;
1295 struct pf_state
*st
;
1297 pf_status
.lcounters
[LCNT_OVERLOAD_FLUSH
]++;
1298 RB_FOREACH(st
, pf_state_tree_id
, &tree_id
) {
1301 * Kill states from this source. (Only those
1302 * from the same rule if PF_FLUSH_GLOBAL is not
1306 (*state
)->state_key
->af_lan
&&
1307 (((*state
)->state_key
->direction
==
1309 PF_AEQ(&(*state
)->src_node
->addr
,
1310 &sk
->lan
.addr
, sk
->af_lan
)) ||
1311 ((*state
)->state_key
->direction
== PF_IN
&&
1312 PF_AEQ(&(*state
)->src_node
->addr
,
1313 &sk
->ext_lan
.addr
, sk
->af_lan
))) &&
1314 ((*state
)->rule
.ptr
->flush
&
1316 (*state
)->rule
.ptr
== st
->rule
.ptr
)) {
1317 st
->timeout
= PFTM_PURGE
;
1318 st
->src
.state
= st
->dst
.state
=
1323 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1324 printf(", %u states killed", killed
);
1327 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1332 /* kill this state */
1333 (*state
)->timeout
= PFTM_PURGE
;
1334 (*state
)->src
.state
= (*state
)->dst
.state
= TCPS_CLOSED
;
1339 pf_insert_src_node(struct pf_src_node
**sn
, struct pf_rule
*rule
,
1340 struct pf_addr
*src
, sa_family_t af
)
1342 struct pf_src_node k
;
1346 PF_ACPY(&k
.addr
, src
, af
);
1347 if (rule
->rule_flag
& PFRULE_RULESRCTRACK
||
1348 rule
->rpool
.opts
& PF_POOL_STICKYADDR
) {
1353 pf_status
.scounters
[SCNT_SRC_NODE_SEARCH
]++;
1354 *sn
= RB_FIND(pf_src_tree
, &tree_src_tracking
, &k
);
1357 if (!rule
->max_src_nodes
||
1358 rule
->src_nodes
< rule
->max_src_nodes
) {
1359 (*sn
) = pool_get(&pf_src_tree_pl
, PR_WAITOK
);
1361 pf_status
.lcounters
[LCNT_SRCNODES
]++;
1363 if ((*sn
) == NULL
) {
1366 bzero(*sn
, sizeof(struct pf_src_node
));
1368 pf_init_threshold(&(*sn
)->conn_rate
,
1369 rule
->max_src_conn_rate
.limit
,
1370 rule
->max_src_conn_rate
.seconds
);
1373 if (rule
->rule_flag
& PFRULE_RULESRCTRACK
||
1374 rule
->rpool
.opts
& PF_POOL_STICKYADDR
) {
1375 (*sn
)->rule
.ptr
= rule
;
1377 (*sn
)->rule
.ptr
= NULL
;
1379 PF_ACPY(&(*sn
)->addr
, src
, af
);
1380 if (RB_INSERT(pf_src_tree
,
1381 &tree_src_tracking
, *sn
) != NULL
) {
1382 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1383 printf("pf: src_tree insert failed: ");
1384 pf_print_host(&(*sn
)->addr
, 0, af
);
1387 pool_put(&pf_src_tree_pl
, *sn
);
1390 (*sn
)->creation
= pf_time_second();
1391 (*sn
)->ruletype
= rule
->action
;
1392 if ((*sn
)->rule
.ptr
!= NULL
) {
1393 (*sn
)->rule
.ptr
->src_nodes
++;
1395 pf_status
.scounters
[SCNT_SRC_NODE_INSERT
]++;
1396 pf_status
.src_nodes
++;
1398 if (rule
->max_src_states
&&
1399 (*sn
)->states
>= rule
->max_src_states
) {
1400 pf_status
.lcounters
[LCNT_SRCSTATES
]++;
1408 pf_stateins_err(const char *tree
, struct pf_state
*s
, struct pfi_kif
*kif
)
1410 struct pf_state_key
*sk
= s
->state_key
;
1412 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1413 printf("pf: state insert failed: %s %s ", tree
, kif
->pfik_name
);
1414 switch (sk
->proto
) {
1424 case IPPROTO_ICMPV6
:
1428 printf("PROTO=%u", sk
->proto
);
1432 pf_print_sk_host(&sk
->lan
, sk
->af_lan
, sk
->proto
,
1435 pf_print_sk_host(&sk
->gwy
, sk
->af_gwy
, sk
->proto
,
1437 printf(" ext_lan: ");
1438 pf_print_sk_host(&sk
->ext_lan
, sk
->af_lan
, sk
->proto
,
1440 printf(" ext_gwy: ");
1441 pf_print_sk_host(&sk
->ext_gwy
, sk
->af_gwy
, sk
->proto
,
1443 if (s
->sync_flags
& PFSTATE_FROMSYNC
) {
1444 printf(" (from sync)");
1451 pf_insert_state(struct pfi_kif
*kif
, struct pf_state
*s
)
1453 struct pf_state_key
*cur
;
1454 struct pf_state
*sp
;
1456 VERIFY(s
->state_key
!= NULL
);
1459 if ((cur
= RB_INSERT(pf_state_tree_lan_ext
, &pf_statetbl_lan_ext
,
1460 s
->state_key
)) != NULL
) {
1461 /* key exists. check for same kif, if none, add to key */
1462 TAILQ_FOREACH(sp
, &cur
->states
, next
)
1463 if (sp
->kif
== kif
) { /* collision! */
1464 pf_stateins_err("tree_lan_ext", s
, kif
);
1466 PF_DT_SKIP_LANEXT
| PF_DT_SKIP_EXTGWY
);
1469 pf_detach_state(s
, PF_DT_SKIP_LANEXT
| PF_DT_SKIP_EXTGWY
);
1470 pf_attach_state(cur
, s
, kif
== pfi_all
? 1 : 0);
1473 /* if cur != NULL, we already found a state key and attached to it */
1474 if (cur
== NULL
&& (cur
= RB_INSERT(pf_state_tree_ext_gwy
,
1475 &pf_statetbl_ext_gwy
, s
->state_key
)) != NULL
) {
1476 /* must not happen. we must have found the sk above! */
1477 pf_stateins_err("tree_ext_gwy", s
, kif
);
1478 pf_detach_state(s
, PF_DT_SKIP_EXTGWY
);
1482 if (s
->id
== 0 && s
->creatorid
== 0) {
1483 s
->id
= htobe64(pf_status
.stateid
++);
1484 s
->creatorid
= pf_status
.hostid
;
1486 if (RB_INSERT(pf_state_tree_id
, &tree_id
, s
) != NULL
) {
1487 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1488 printf("pf: state insert failed: "
1489 "id: %016llx creatorid: %08x",
1490 be64toh(s
->id
), ntohl(s
->creatorid
));
1491 if (s
->sync_flags
& PFSTATE_FROMSYNC
) {
1492 printf(" (from sync)");
1496 pf_detach_state(s
, 0);
1499 TAILQ_INSERT_TAIL(&state_list
, s
, entry_list
);
1500 pf_status
.fcounters
[FCNT_STATE_INSERT
]++;
1502 VERIFY(pf_status
.states
!= 0);
1503 pfi_kif_ref(kif
, PFI_KIF_REF_STATE
);
1505 pfsync_insert_state(s
);
1511 pf_purge_thread_cont(int err
)
1514 static u_int32_t nloops
= 0;
1515 int t
= 1; /* 1 second */
1518 * Update coarse-grained networking timestamp (in sec.); the idea
1519 * is to piggy-back on the periodic timeout callout to update
1520 * the counter returnable via net_uptime().
1522 net_update_uptime();
1524 lck_rw_lock_shared(pf_perim_lock
);
1525 lck_mtx_lock(pf_lock
);
1527 /* purge everything if not running */
1528 if (!pf_status
.running
) {
1529 pf_purge_expired_states(pf_status
.states
);
1530 pf_purge_expired_fragments();
1531 pf_purge_expired_src_nodes();
1533 /* terminate thread (we don't currently do this) */
1534 if (pf_purge_thread
== NULL
) {
1535 lck_mtx_unlock(pf_lock
);
1536 lck_rw_done(pf_perim_lock
);
1538 thread_deallocate(current_thread());
1539 thread_terminate(current_thread());
1543 /* if there's nothing left, sleep w/o timeout */
1544 if (pf_status
.states
== 0 &&
1545 pf_normalize_isempty() &&
1546 RB_EMPTY(&tree_src_tracking
)) {
1554 /* process a fraction of the state table every second */
1555 pf_purge_expired_states(1 + (pf_status
.states
1556 / pf_default_rule
.timeout
[PFTM_INTERVAL
]));
1558 /* purge other expired types every PFTM_INTERVAL seconds */
1559 if (++nloops
>= pf_default_rule
.timeout
[PFTM_INTERVAL
]) {
1560 pf_purge_expired_fragments();
1561 pf_purge_expired_src_nodes();
1565 lck_mtx_unlock(pf_lock
);
1566 lck_rw_done(pf_perim_lock
);
1568 (void) tsleep0(pf_purge_thread_fn
, PWAIT
, "pf_purge_cont",
1569 t
* hz
, pf_purge_thread_cont
);
1577 pf_purge_thread_fn(void *v
, wait_result_t w
)
1579 #pragma unused(v, w)
1580 (void) tsleep0(pf_purge_thread_fn
, PWAIT
, "pf_purge", 0,
1581 pf_purge_thread_cont
);
1583 * tsleep0() shouldn't have returned as PCATCH was not set;
1584 * therefore assert in this case.
1590 pf_state_expires(const struct pf_state
*state
)
1597 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1599 /* handle all PFTM_* > PFTM_MAX here */
1600 if (state
->timeout
== PFTM_PURGE
) {
1601 return pf_time_second();
1604 VERIFY(state
->timeout
!= PFTM_UNLINKED
);
1605 VERIFY(state
->timeout
< PFTM_MAX
);
1606 t
= state
->rule
.ptr
->timeout
[state
->timeout
];
1608 t
= pf_default_rule
.timeout
[state
->timeout
];
1610 start
= state
->rule
.ptr
->timeout
[PFTM_ADAPTIVE_START
];
1612 end
= state
->rule
.ptr
->timeout
[PFTM_ADAPTIVE_END
];
1613 states
= state
->rule
.ptr
->states
;
1615 start
= pf_default_rule
.timeout
[PFTM_ADAPTIVE_START
];
1616 end
= pf_default_rule
.timeout
[PFTM_ADAPTIVE_END
];
1617 states
= pf_status
.states
;
1619 if (end
&& states
> start
&& start
< end
) {
1621 return state
->expire
+ t
* (end
- states
) /
1624 return pf_time_second();
1627 return state
->expire
+ t
;
1631 pf_purge_expired_src_nodes(void)
1633 struct pf_src_node
*cur
, *next
;
1635 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1637 for (cur
= RB_MIN(pf_src_tree
, &tree_src_tracking
); cur
; cur
= next
) {
1638 next
= RB_NEXT(pf_src_tree
, &tree_src_tracking
, cur
);
1640 if (cur
->states
<= 0 && cur
->expire
<= pf_time_second()) {
1641 if (cur
->rule
.ptr
!= NULL
) {
1642 cur
->rule
.ptr
->src_nodes
--;
1643 if (cur
->rule
.ptr
->states
<= 0 &&
1644 cur
->rule
.ptr
->max_src_nodes
<= 0) {
1645 pf_rm_rule(NULL
, cur
->rule
.ptr
);
1648 RB_REMOVE(pf_src_tree
, &tree_src_tracking
, cur
);
1649 pf_status
.scounters
[SCNT_SRC_NODE_REMOVALS
]++;
1650 pf_status
.src_nodes
--;
1651 pool_put(&pf_src_tree_pl
, cur
);
1657 pf_src_tree_remove_state(struct pf_state
*s
)
1661 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1663 if (s
->src_node
!= NULL
) {
1664 if (s
->src
.tcp_est
) {
1665 VERIFY(s
->src_node
->conn
> 0);
1666 --s
->src_node
->conn
;
1668 VERIFY(s
->src_node
->states
> 0);
1669 if (--s
->src_node
->states
<= 0) {
1670 t
= s
->rule
.ptr
->timeout
[PFTM_SRC_NODE
];
1672 t
= pf_default_rule
.timeout
[PFTM_SRC_NODE
];
1674 s
->src_node
->expire
= pf_time_second() + t
;
1677 if (s
->nat_src_node
!= s
->src_node
&& s
->nat_src_node
!= NULL
) {
1678 VERIFY(s
->nat_src_node
->states
> 0);
1679 if (--s
->nat_src_node
->states
<= 0) {
1680 t
= s
->rule
.ptr
->timeout
[PFTM_SRC_NODE
];
1682 t
= pf_default_rule
.timeout
[PFTM_SRC_NODE
];
1684 s
->nat_src_node
->expire
= pf_time_second() + t
;
1687 s
->src_node
= s
->nat_src_node
= NULL
;
1691 pf_unlink_state(struct pf_state
*cur
)
1693 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1695 if (cur
->src
.state
== PF_TCPS_PROXY_DST
) {
1696 pf_send_tcp(cur
->rule
.ptr
, cur
->state_key
->af_lan
,
1697 &cur
->state_key
->ext_lan
.addr
, &cur
->state_key
->lan
.addr
,
1698 cur
->state_key
->ext_lan
.xport
.port
,
1699 cur
->state_key
->lan
.xport
.port
,
1700 cur
->src
.seqhi
, cur
->src
.seqlo
+ 1,
1701 TH_RST
| TH_ACK
, 0, 0, 0, 1, cur
->tag
, NULL
, NULL
);
1704 hook_runloop(&cur
->unlink_hooks
, HOOK_REMOVE
| HOOK_FREE
);
1705 RB_REMOVE(pf_state_tree_id
, &tree_id
, cur
);
1707 if (cur
->creatorid
== pf_status
.hostid
) {
1708 pfsync_delete_state(cur
);
1711 cur
->timeout
= PFTM_UNLINKED
;
1712 pf_src_tree_remove_state(cur
);
1713 pf_detach_state(cur
, 0);
1716 /* callers should be at splpf and hold the
1717 * write_lock on pf_consistency_lock */
1719 pf_free_state(struct pf_state
*cur
)
1721 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1723 if (pfsyncif
!= NULL
&&
1724 (pfsyncif
->sc_bulk_send_next
== cur
||
1725 pfsyncif
->sc_bulk_terminator
== cur
)) {
1729 VERIFY(cur
->timeout
== PFTM_UNLINKED
);
1730 VERIFY(cur
->rule
.ptr
->states
> 0);
1731 if (--cur
->rule
.ptr
->states
<= 0 &&
1732 cur
->rule
.ptr
->src_nodes
<= 0) {
1733 pf_rm_rule(NULL
, cur
->rule
.ptr
);
1735 if (cur
->nat_rule
.ptr
!= NULL
) {
1736 VERIFY(cur
->nat_rule
.ptr
->states
> 0);
1737 if (--cur
->nat_rule
.ptr
->states
<= 0 &&
1738 cur
->nat_rule
.ptr
->src_nodes
<= 0) {
1739 pf_rm_rule(NULL
, cur
->nat_rule
.ptr
);
1742 if (cur
->anchor
.ptr
!= NULL
) {
1743 VERIFY(cur
->anchor
.ptr
->states
> 0);
1744 if (--cur
->anchor
.ptr
->states
<= 0) {
1745 pf_rm_rule(NULL
, cur
->anchor
.ptr
);
1748 pf_normalize_tcp_cleanup(cur
);
1749 pfi_kif_unref(cur
->kif
, PFI_KIF_REF_STATE
);
1750 TAILQ_REMOVE(&state_list
, cur
, entry_list
);
1752 pf_tag_unref(cur
->tag
);
1754 pool_put(&pf_state_pl
, cur
);
1755 pf_status
.fcounters
[FCNT_STATE_REMOVALS
]++;
1756 VERIFY(pf_status
.states
> 0);
1761 pf_purge_expired_states(u_int32_t maxcheck
)
1763 static struct pf_state
*cur
= NULL
;
1764 struct pf_state
*next
;
1766 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1768 while (maxcheck
--) {
1769 /* wrap to start of list when we hit the end */
1771 cur
= TAILQ_FIRST(&state_list
);
1773 break; /* list empty */
1777 /* get next state, as cur may get deleted */
1778 next
= TAILQ_NEXT(cur
, entry_list
);
1780 if (cur
->timeout
== PFTM_UNLINKED
) {
1782 } else if (pf_state_expires(cur
) <= pf_time_second()) {
1783 /* unlink and free expired state */
1784 pf_unlink_state(cur
);
1792 pf_tbladdr_setup(struct pf_ruleset
*rs
, struct pf_addr_wrap
*aw
)
1794 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1796 if (aw
->type
!= PF_ADDR_TABLE
) {
1799 if ((aw
->p
.tbl
= pfr_attach_table(rs
, aw
->v
.tblname
)) == NULL
) {
1806 pf_tbladdr_remove(struct pf_addr_wrap
*aw
)
1808 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1810 if (aw
->type
!= PF_ADDR_TABLE
|| aw
->p
.tbl
== NULL
) {
1813 pfr_detach_table(aw
->p
.tbl
);
1818 pf_tbladdr_copyout(struct pf_addr_wrap
*aw
)
1820 struct pfr_ktable
*kt
= aw
->p
.tbl
;
1822 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1824 if (aw
->type
!= PF_ADDR_TABLE
|| kt
== NULL
) {
1827 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
) {
1828 kt
= kt
->pfrkt_root
;
1831 aw
->p
.tblcnt
= (kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) ?
1836 pf_print_addr(struct pf_addr
*addr
, sa_family_t af
)
1841 u_int32_t a
= ntohl(addr
->addr32
[0]);
1842 printf("%u.%u.%u.%u", (a
>> 24) & 255, (a
>> 16) & 255,
1843 (a
>> 8) & 255, a
& 255);
1850 u_int8_t i
, curstart
= 255, curend
= 0,
1851 maxstart
= 0, maxend
= 0;
1852 for (i
= 0; i
< 8; i
++) {
1853 if (!addr
->addr16
[i
]) {
1854 if (curstart
== 255) {
1861 if ((curend
- curstart
) >
1862 (maxend
- maxstart
)) {
1863 maxstart
= curstart
;
1870 for (i
= 0; i
< 8; i
++) {
1871 if (i
>= maxstart
&& i
<= maxend
) {
1873 if (i
== maxstart
) {
1882 b
= ntohs(addr
->addr16
[i
]);
1896 pf_print_sk_host(struct pf_state_host
*sh
, sa_family_t af
, int proto
,
1897 u_int8_t proto_variant
)
1899 pf_print_addr(&sh
->addr
, af
);
1903 if (sh
->xport
.spi
) {
1904 printf("[%08x]", ntohl(sh
->xport
.spi
));
1909 if (proto_variant
== PF_GRE_PPTP_VARIANT
) {
1910 printf("[%u]", ntohs(sh
->xport
.call_id
));
1916 printf("[%u]", ntohs(sh
->xport
.port
));
1925 pf_print_host(struct pf_addr
*addr
, u_int16_t p
, sa_family_t af
)
1927 pf_print_addr(addr
, af
);
1929 printf("[%u]", ntohs(p
));
1934 pf_print_state(struct pf_state
*s
)
1936 struct pf_state_key
*sk
= s
->state_key
;
1937 switch (sk
->proto
) {
1942 printf("GRE%u ", sk
->proto_variant
);
1953 case IPPROTO_ICMPV6
:
1957 printf("%u ", sk
->proto
);
1960 pf_print_sk_host(&sk
->lan
, sk
->af_lan
, sk
->proto
, sk
->proto_variant
);
1962 pf_print_sk_host(&sk
->gwy
, sk
->af_gwy
, sk
->proto
, sk
->proto_variant
);
1964 pf_print_sk_host(&sk
->ext_lan
, sk
->af_lan
, sk
->proto
,
1967 pf_print_sk_host(&sk
->ext_gwy
, sk
->af_gwy
, sk
->proto
,
1969 printf(" [lo=%u high=%u win=%u modulator=%u", s
->src
.seqlo
,
1970 s
->src
.seqhi
, s
->src
.max_win
, s
->src
.seqdiff
);
1971 if (s
->src
.wscale
&& s
->dst
.wscale
) {
1972 printf(" wscale=%u", s
->src
.wscale
& PF_WSCALE_MASK
);
1975 printf(" [lo=%u high=%u win=%u modulator=%u", s
->dst
.seqlo
,
1976 s
->dst
.seqhi
, s
->dst
.max_win
, s
->dst
.seqdiff
);
1977 if (s
->src
.wscale
&& s
->dst
.wscale
) {
1978 printf(" wscale=%u", s
->dst
.wscale
& PF_WSCALE_MASK
);
1981 printf(" %u:%u", s
->src
.state
, s
->dst
.state
);
1985 pf_print_flags(u_int8_t f
)
2016 #define PF_SET_SKIP_STEPS(i) \
2018 while (head[i] != cur) { \
2019 head[i]->skip[i].ptr = cur; \
2020 head[i] = TAILQ_NEXT(head[i], entries); \
2025 pf_calc_skip_steps(struct pf_rulequeue
*rules
)
2027 struct pf_rule
*cur
, *prev
, *head
[PF_SKIP_COUNT
];
2030 cur
= TAILQ_FIRST(rules
);
2032 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
) {
2035 while (cur
!= NULL
) {
2036 if (cur
->kif
!= prev
->kif
|| cur
->ifnot
!= prev
->ifnot
) {
2037 PF_SET_SKIP_STEPS(PF_SKIP_IFP
);
2039 if (cur
->direction
!= prev
->direction
) {
2040 PF_SET_SKIP_STEPS(PF_SKIP_DIR
);
2042 if (cur
->af
!= prev
->af
) {
2043 PF_SET_SKIP_STEPS(PF_SKIP_AF
);
2045 if (cur
->proto
!= prev
->proto
) {
2046 PF_SET_SKIP_STEPS(PF_SKIP_PROTO
);
2048 if (cur
->src
.neg
!= prev
->src
.neg
||
2049 pf_addr_wrap_neq(&cur
->src
.addr
, &prev
->src
.addr
)) {
2050 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR
);
2053 union pf_rule_xport
*cx
= &cur
->src
.xport
;
2054 union pf_rule_xport
*px
= &prev
->src
.xport
;
2056 switch (cur
->proto
) {
2059 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT
);
2062 if (prev
->proto
== IPPROTO_GRE
||
2063 prev
->proto
== IPPROTO_ESP
||
2064 cx
->range
.op
!= px
->range
.op
||
2065 cx
->range
.port
[0] != px
->range
.port
[0] ||
2066 cx
->range
.port
[1] != px
->range
.port
[1]) {
2067 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT
);
2072 if (cur
->dst
.neg
!= prev
->dst
.neg
||
2073 pf_addr_wrap_neq(&cur
->dst
.addr
, &prev
->dst
.addr
)) {
2074 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR
);
2077 union pf_rule_xport
*cx
= &cur
->dst
.xport
;
2078 union pf_rule_xport
*px
= &prev
->dst
.xport
;
2080 switch (cur
->proto
) {
2082 if (cur
->proto
!= prev
->proto
||
2083 cx
->call_id
!= px
->call_id
) {
2084 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2088 if (cur
->proto
!= prev
->proto
||
2089 cx
->spi
!= px
->spi
) {
2090 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2094 if (prev
->proto
== IPPROTO_GRE
||
2095 prev
->proto
== IPPROTO_ESP
||
2096 cx
->range
.op
!= px
->range
.op
||
2097 cx
->range
.port
[0] != px
->range
.port
[0] ||
2098 cx
->range
.port
[1] != px
->range
.port
[1]) {
2099 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2106 cur
= TAILQ_NEXT(cur
, entries
);
2108 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
) {
2109 PF_SET_SKIP_STEPS(i
);
2114 pf_calc_state_key_flowhash(struct pf_state_key
*sk
)
2116 struct pf_flowhash_key fh
__attribute__((aligned(8)));
2117 uint32_t flowhash
= 0;
2119 bzero(&fh
, sizeof(fh
));
2120 if (PF_ALEQ(&sk
->lan
.addr
, &sk
->ext_lan
.addr
, sk
->af_lan
)) {
2121 bcopy(&sk
->lan
.addr
, &fh
.ap1
.addr
, sizeof(fh
.ap1
.addr
));
2122 bcopy(&sk
->ext_lan
.addr
, &fh
.ap2
.addr
, sizeof(fh
.ap2
.addr
));
2124 bcopy(&sk
->ext_lan
.addr
, &fh
.ap1
.addr
, sizeof(fh
.ap1
.addr
));
2125 bcopy(&sk
->lan
.addr
, &fh
.ap2
.addr
, sizeof(fh
.ap2
.addr
));
2127 if (sk
->lan
.xport
.spi
<= sk
->ext_lan
.xport
.spi
) {
2128 fh
.ap1
.xport
.spi
= sk
->lan
.xport
.spi
;
2129 fh
.ap2
.xport
.spi
= sk
->ext_lan
.xport
.spi
;
2131 fh
.ap1
.xport
.spi
= sk
->ext_lan
.xport
.spi
;
2132 fh
.ap2
.xport
.spi
= sk
->lan
.xport
.spi
;
2135 fh
.proto
= sk
->proto
;
2138 flowhash
= net_flowhash(&fh
, sizeof(fh
), pf_hash_seed
);
2139 if (flowhash
== 0) {
2140 /* try to get a non-zero flowhash */
2141 pf_hash_seed
= RandomULong();
2149 pf_addr_wrap_neq(struct pf_addr_wrap
*aw1
, struct pf_addr_wrap
*aw2
)
2151 if (aw1
->type
!= aw2
->type
) {
2154 switch (aw1
->type
) {
2155 case PF_ADDR_ADDRMASK
:
2157 if (PF_ANEQ(&aw1
->v
.a
.addr
, &aw2
->v
.a
.addr
, 0)) {
2160 if (PF_ANEQ(&aw1
->v
.a
.mask
, &aw2
->v
.a
.mask
, 0)) {
2164 case PF_ADDR_DYNIFTL
:
2165 return aw1
->p
.dyn
== NULL
|| aw2
->p
.dyn
== NULL
||
2166 aw1
->p
.dyn
->pfid_kt
!= aw2
->p
.dyn
->pfid_kt
;
2167 case PF_ADDR_NOROUTE
:
2168 case PF_ADDR_URPFFAILED
:
2171 return aw1
->p
.tbl
!= aw2
->p
.tbl
;
2172 case PF_ADDR_RTLABEL
:
2173 return aw1
->v
.rtlabel
!= aw2
->v
.rtlabel
;
2175 printf("invalid address type: %d\n", aw1
->type
);
2181 pf_cksum_fixup(u_int16_t cksum
, u_int16_t old
, u_int16_t
new, u_int8_t udp
)
2183 return nat464_cksum_fixup(cksum
, old
, new, udp
);
2187 * change ip address & port
2188 * dir : packet direction
2189 * a : address to be changed
2190 * p : port to be changed
2191 * ic : ip header checksum
2192 * pc : protocol checksum
2193 * an : new ip address
2195 * u : should be 1 if UDP packet else 0
2196 * af : address family of the packet
2197 * afn : address family of the new address
2198 * ua : should be 1 if ip address needs to be updated in the packet else
2199 * only the checksum is recalculated & updated.
2202 pf_change_ap(int dir
, pbuf_t
*pbuf
, struct pf_addr
*a
, u_int16_t
*p
,
2203 u_int16_t
*ic
, u_int16_t
*pc
, struct pf_addr
*an
, u_int16_t pn
,
2204 u_int8_t u
, sa_family_t af
, sa_family_t afn
, int ua
)
2209 PF_ACPY(&ao
, a
, af
);
2211 PF_ACPY(a
, an
, afn
);
2221 *ic
= pf_cksum_fixup(pf_cksum_fixup(*ic
,
2222 ao
.addr16
[0], an
->addr16
[0], 0),
2223 ao
.addr16
[1], an
->addr16
[1], 0);
2226 * If the packet is originated from an ALG on the NAT gateway
2227 * (source address is loopback or local), in which case the
2228 * TCP/UDP checksum field contains the pseudo header checksum
2229 * that's not yet complemented.
2230 * In that case we do not need to fixup the checksum for port
2231 * translation as the pseudo header checksum doesn't include ports.
2233 * A packet generated locally will have UDP/TCP CSUM flag
2234 * set (gets set in protocol output).
2236 * It should be noted that the fixup doesn't do anything if the
2239 if (dir
== PF_OUT
&& pbuf
!= NULL
&&
2240 (*pbuf
->pb_csum_flags
& (CSUM_TCP
| CSUM_UDP
))) {
2241 /* Pseudo-header checksum does not include ports */
2242 *pc
= ~pf_cksum_fixup(pf_cksum_fixup(~*pc
,
2243 ao
.addr16
[0], an
->addr16
[0], u
),
2244 ao
.addr16
[1], an
->addr16
[1], u
);
2247 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2248 *pc
, ao
.addr16
[0], an
->addr16
[0], u
),
2249 ao
.addr16
[1], an
->addr16
[1], u
),
2256 *pc
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2257 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2259 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc
,
2260 ao
.addr16
[0], an
->addr16
[0], u
),
2261 ao
.addr16
[1], an
->addr16
[1], u
),
2262 0, an
->addr16
[2], u
),
2263 0, an
->addr16
[3], u
),
2264 0, an
->addr16
[4], u
),
2265 0, an
->addr16
[5], u
),
2266 0, an
->addr16
[6], u
),
2267 0, an
->addr16
[7], u
),
2279 * If the packet is originated from an ALG on the NAT gateway
2280 * (source address is loopback or local), in which case the
2281 * TCP/UDP checksum field contains the pseudo header checksum
2282 * that's not yet complemented.
2283 * A packet generated locally
2284 * will have UDP/TCP CSUM flag set (gets set in protocol
2287 if (dir
== PF_OUT
&& pbuf
!= NULL
&&
2288 (*pbuf
->pb_csum_flags
& (CSUM_TCPIPV6
|
2290 /* Pseudo-header checksum does not include ports */
2292 ~pf_cksum_fixup(pf_cksum_fixup(
2293 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2294 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2296 ao
.addr16
[0], an
->addr16
[0], u
),
2297 ao
.addr16
[1], an
->addr16
[1], u
),
2298 ao
.addr16
[2], an
->addr16
[2], u
),
2299 ao
.addr16
[3], an
->addr16
[3], u
),
2300 ao
.addr16
[4], an
->addr16
[4], u
),
2301 ao
.addr16
[5], an
->addr16
[5], u
),
2302 ao
.addr16
[6], an
->addr16
[6], u
),
2303 ao
.addr16
[7], an
->addr16
[7], u
);
2306 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2307 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2308 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2310 ao
.addr16
[0], an
->addr16
[0], u
),
2311 ao
.addr16
[1], an
->addr16
[1], u
),
2312 ao
.addr16
[2], an
->addr16
[2], u
),
2313 ao
.addr16
[3], an
->addr16
[3], u
),
2314 ao
.addr16
[4], an
->addr16
[4], u
),
2315 ao
.addr16
[5], an
->addr16
[5], u
),
2316 ao
.addr16
[6], an
->addr16
[6], u
),
2317 ao
.addr16
[7], an
->addr16
[7], u
),
2323 *pc
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2324 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2325 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc
,
2326 ao
.addr16
[0], an
->addr16
[0], u
),
2327 ao
.addr16
[1], an
->addr16
[1], u
),
2328 ao
.addr16
[2], 0, u
),
2329 ao
.addr16
[3], 0, u
),
2330 ao
.addr16
[4], 0, u
),
2331 ao
.addr16
[5], 0, u
),
2332 ao
.addr16
[6], 0, u
),
2333 ao
.addr16
[7], 0, u
),
2344 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2346 pf_change_a(void *a
, u_int16_t
*c
, u_int32_t an
, u_int8_t u
)
2350 memcpy(&ao
, a
, sizeof(ao
));
2351 memcpy(a
, &an
, sizeof(u_int32_t
));
2352 *c
= pf_cksum_fixup(pf_cksum_fixup(*c
, ao
/ 65536, an
/ 65536, u
),
2353 ao
% 65536, an
% 65536, u
);
2358 pf_change_a6(struct pf_addr
*a
, u_int16_t
*c
, struct pf_addr
*an
, u_int8_t u
)
2362 PF_ACPY(&ao
, a
, AF_INET6
);
2363 PF_ACPY(a
, an
, AF_INET6
);
2365 *c
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2366 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2367 pf_cksum_fixup(pf_cksum_fixup(*c
,
2368 ao
.addr16
[0], an
->addr16
[0], u
),
2369 ao
.addr16
[1], an
->addr16
[1], u
),
2370 ao
.addr16
[2], an
->addr16
[2], u
),
2371 ao
.addr16
[3], an
->addr16
[3], u
),
2372 ao
.addr16
[4], an
->addr16
[4], u
),
2373 ao
.addr16
[5], an
->addr16
[5], u
),
2374 ao
.addr16
[6], an
->addr16
[6], u
),
2375 ao
.addr16
[7], an
->addr16
[7], u
);
2379 pf_change_addr(struct pf_addr
*a
, u_int16_t
*c
, struct pf_addr
*an
, u_int8_t u
,
2380 sa_family_t af
, sa_family_t afn
)
2385 PF_ACPY(&ao
, a
, af
);
2386 PF_ACPY(a
, an
, afn
);
2393 pf_change_a(a
, c
, an
->v4addr
.s_addr
, u
);
2396 *c
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2397 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2398 pf_cksum_fixup(pf_cksum_fixup(*c
,
2399 ao
.addr16
[0], an
->addr16
[0], u
),
2400 ao
.addr16
[1], an
->addr16
[1], u
),
2401 0, an
->addr16
[2], u
),
2402 0, an
->addr16
[3], u
),
2403 0, an
->addr16
[4], u
),
2404 0, an
->addr16
[5], u
),
2405 0, an
->addr16
[6], u
),
2406 0, an
->addr16
[7], u
);
2413 *c
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2414 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2415 pf_cksum_fixup(pf_cksum_fixup(*c
,
2416 ao
.addr16
[0], an
->addr16
[0], u
),
2417 ao
.addr16
[1], an
->addr16
[1], u
),
2418 ao
.addr16
[2], 0, u
),
2419 ao
.addr16
[3], 0, u
),
2420 ao
.addr16
[4], 0, u
),
2421 ao
.addr16
[5], 0, u
),
2422 ao
.addr16
[6], 0, u
),
2423 ao
.addr16
[7], 0, u
);
2426 pf_change_a6(a
, c
, an
, u
);
2436 pf_change_icmp(struct pf_addr
*ia
, u_int16_t
*ip
, struct pf_addr
*oa
,
2437 struct pf_addr
*na
, u_int16_t np
, u_int16_t
*pc
, u_int16_t
*h2c
,
2438 u_int16_t
*ic
, u_int16_t
*hc
, u_int8_t u
, sa_family_t af
)
2440 struct pf_addr oia
, ooa
;
2442 PF_ACPY(&oia
, ia
, af
);
2443 PF_ACPY(&ooa
, oa
, af
);
2445 /* Change inner protocol port, fix inner protocol checksum. */
2447 u_int16_t oip
= *ip
;
2455 *pc
= pf_cksum_fixup(*pc
, oip
, *ip
, u
);
2457 *ic
= pf_cksum_fixup(*ic
, oip
, *ip
, 0);
2459 *ic
= pf_cksum_fixup(*ic
, opc
, *pc
, 0);
2462 /* Change inner ip address, fix inner ip and icmp checksums. */
2463 PF_ACPY(ia
, na
, af
);
2467 u_int32_t oh2c
= *h2c
;
2469 *h2c
= pf_cksum_fixup(pf_cksum_fixup(*h2c
,
2470 oia
.addr16
[0], ia
->addr16
[0], 0),
2471 oia
.addr16
[1], ia
->addr16
[1], 0);
2472 *ic
= pf_cksum_fixup(pf_cksum_fixup(*ic
,
2473 oia
.addr16
[0], ia
->addr16
[0], 0),
2474 oia
.addr16
[1], ia
->addr16
[1], 0);
2475 *ic
= pf_cksum_fixup(*ic
, oh2c
, *h2c
, 0);
2481 *ic
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2482 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2483 pf_cksum_fixup(pf_cksum_fixup(*ic
,
2484 oia
.addr16
[0], ia
->addr16
[0], u
),
2485 oia
.addr16
[1], ia
->addr16
[1], u
),
2486 oia
.addr16
[2], ia
->addr16
[2], u
),
2487 oia
.addr16
[3], ia
->addr16
[3], u
),
2488 oia
.addr16
[4], ia
->addr16
[4], u
),
2489 oia
.addr16
[5], ia
->addr16
[5], u
),
2490 oia
.addr16
[6], ia
->addr16
[6], u
),
2491 oia
.addr16
[7], ia
->addr16
[7], u
);
2495 /* Change outer ip address, fix outer ip or icmpv6 checksum. */
2496 PF_ACPY(oa
, na
, af
);
2500 *hc
= pf_cksum_fixup(pf_cksum_fixup(*hc
,
2501 ooa
.addr16
[0], oa
->addr16
[0], 0),
2502 ooa
.addr16
[1], oa
->addr16
[1], 0);
2507 *ic
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2508 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2509 pf_cksum_fixup(pf_cksum_fixup(*ic
,
2510 ooa
.addr16
[0], oa
->addr16
[0], u
),
2511 ooa
.addr16
[1], oa
->addr16
[1], u
),
2512 ooa
.addr16
[2], oa
->addr16
[2], u
),
2513 ooa
.addr16
[3], oa
->addr16
[3], u
),
2514 ooa
.addr16
[4], oa
->addr16
[4], u
),
2515 ooa
.addr16
[5], oa
->addr16
[5], u
),
2516 ooa
.addr16
[6], oa
->addr16
[6], u
),
2517 ooa
.addr16
[7], oa
->addr16
[7], u
);
2525 * Need to modulate the sequence numbers in the TCP SACK option
2526 * (credits to Krzysztof Pfaff for report and patch)
2529 pf_modulate_sack(pbuf_t
*pbuf
, int off
, struct pf_pdesc
*pd
,
2530 struct tcphdr
*th
, struct pf_state_peer
*dst
)
2532 int hlen
= (th
->th_off
<< 2) - sizeof(*th
), thoptlen
= hlen
;
2533 u_int8_t opts
[MAX_TCPOPTLEN
], *opt
= opts
;
2534 int copyback
= 0, i
, olen
;
2535 struct sackblk sack
;
2537 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2538 if (hlen
< TCPOLEN_SACKLEN
||
2539 !pf_pull_hdr(pbuf
, off
+ sizeof(*th
), opts
, hlen
, NULL
, NULL
, pd
->af
)) {
2543 while (hlen
>= TCPOLEN_SACKLEN
) {
2546 case TCPOPT_EOL
: /* FALLTHROUGH */
2555 if (olen
>= TCPOLEN_SACKLEN
) {
2556 for (i
= 2; i
+ TCPOLEN_SACK
<= olen
;
2557 i
+= TCPOLEN_SACK
) {
2558 memcpy(&sack
, &opt
[i
], sizeof(sack
));
2559 pf_change_a(&sack
.start
, &th
->th_sum
,
2560 htonl(ntohl(sack
.start
) -
2562 pf_change_a(&sack
.end
, &th
->th_sum
,
2563 htonl(ntohl(sack
.end
) -
2565 memcpy(&opt
[i
], &sack
, sizeof(sack
));
2567 copyback
= off
+ sizeof(*th
) + thoptlen
;
2580 if (pf_lazy_makewritable(pd
, pbuf
, copyback
) == NULL
) {
2583 pbuf_copy_back(pbuf
, off
+ sizeof(*th
), thoptlen
, opts
);
2591 * The following functions (pf_send_tcp and pf_send_icmp) are somewhat
2592 * special in that they originate "spurious" packets rather than
2593 * filter/NAT existing packets. As such, they're not a great fit for
2594 * the 'pbuf' shim, which assumes the underlying packet buffers are
2595 * allocated elsewhere.
2597 * Since these functions are rarely used, we'll carry on allocating mbufs
2598 * and passing them to the IP stack for eventual routing.
2601 pf_send_tcp(const struct pf_rule
*r
, sa_family_t af
,
2602 const struct pf_addr
*saddr
, const struct pf_addr
*daddr
,
2603 u_int16_t sport
, u_int16_t dport
, u_int32_t seq
, u_int32_t ack
,
2604 u_int8_t flags
, u_int16_t win
, u_int16_t mss
, u_int8_t ttl
, int tag
,
2605 u_int16_t rtag
, struct ether_header
*eh
, struct ifnet
*ifp
)
2607 #pragma unused(eh, ifp)
2611 struct ip
*h
= NULL
;
2614 struct ip6_hdr
*h6
= NULL
;
2616 struct tcphdr
*th
= NULL
;
2618 struct pf_mtag
*pf_mtag
;
2620 /* maximum segment size tcp option */
2621 tlen
= sizeof(struct tcphdr
);
2629 len
= sizeof(struct ip
) + tlen
;
2634 len
= sizeof(struct ip6_hdr
) + tlen
;
2638 panic("pf_send_tcp: not AF_INET or AF_INET6!");
2642 /* create outgoing mbuf */
2643 m
= m_gethdr(M_DONTWAIT
, MT_HEADER
);
2648 if ((pf_mtag
= pf_get_mtag(m
)) == NULL
) {
2653 pf_mtag
->pftag_flags
|= PF_TAG_GENERATED
;
2655 pf_mtag
->pftag_tag
= rtag
;
2657 if (r
!= NULL
&& PF_RTABLEID_IS_VALID(r
->rtableid
)) {
2658 pf_mtag
->pftag_rtableid
= r
->rtableid
;
2662 /* add hints for ecn */
2663 pf_mtag
->pftag_hdr
= mtod(m
, struct ip
*);
2664 /* record address family */
2665 pf_mtag
->pftag_flags
&= ~(PF_TAG_HDR_INET
| PF_TAG_HDR_INET6
);
2669 pf_mtag
->pftag_flags
|= PF_TAG_HDR_INET
;
2674 pf_mtag
->pftag_flags
|= PF_TAG_HDR_INET6
;
2680 /* indicate this is TCP */
2681 m
->m_pkthdr
.pkt_proto
= IPPROTO_TCP
;
2683 /* Make sure headers are 32-bit aligned */
2684 m
->m_data
+= max_linkhdr
;
2685 m
->m_pkthdr
.len
= m
->m_len
= len
;
2686 m
->m_pkthdr
.rcvif
= NULL
;
2687 bzero(m
->m_data
, len
);
2691 h
= mtod(m
, struct ip
*);
2693 /* IP header fields included in the TCP checksum */
2694 h
->ip_p
= IPPROTO_TCP
;
2695 h
->ip_len
= htons(tlen
);
2696 h
->ip_src
.s_addr
= saddr
->v4addr
.s_addr
;
2697 h
->ip_dst
.s_addr
= daddr
->v4addr
.s_addr
;
2699 th
= (struct tcphdr
*)(void *)((caddr_t
)h
+ sizeof(struct ip
));
2704 h6
= mtod(m
, struct ip6_hdr
*);
2706 /* IP header fields included in the TCP checksum */
2707 h6
->ip6_nxt
= IPPROTO_TCP
;
2708 h6
->ip6_plen
= htons(tlen
);
2709 memcpy(&h6
->ip6_src
, &saddr
->v6addr
, sizeof(struct in6_addr
));
2710 memcpy(&h6
->ip6_dst
, &daddr
->v6addr
, sizeof(struct in6_addr
));
2712 th
= (struct tcphdr
*)(void *)
2713 ((caddr_t
)h6
+ sizeof(struct ip6_hdr
));
2719 th
->th_sport
= sport
;
2720 th
->th_dport
= dport
;
2721 th
->th_seq
= htonl(seq
);
2722 th
->th_ack
= htonl(ack
);
2723 th
->th_off
= tlen
>> 2;
2724 th
->th_flags
= flags
;
2725 th
->th_win
= htons(win
);
2728 opt
= (char *)(th
+ 1);
2729 opt
[0] = TCPOPT_MAXSEG
;
2731 #if BYTE_ORDER != BIG_ENDIAN
2734 bcopy((caddr_t
)&mss
, (caddr_t
)(opt
+ 2), 2);
2743 th
->th_sum
= in_cksum(m
, len
);
2745 /* Finish the IP header */
2747 h
->ip_hl
= sizeof(*h
) >> 2;
2748 h
->ip_tos
= IPTOS_LOWDELAY
;
2750 * ip_output() expects ip_len and ip_off to be in host order.
2753 h
->ip_off
= (path_mtu_discovery
? IP_DF
: 0);
2754 h
->ip_ttl
= ttl
? ttl
: ip_defttl
;
2757 bzero(&ro
, sizeof(ro
));
2758 ip_output(m
, NULL
, &ro
, 0, NULL
, NULL
);
2765 struct route_in6 ro6
;
2768 th
->th_sum
= in6_cksum(m
, IPPROTO_TCP
,
2769 sizeof(struct ip6_hdr
), tlen
);
2771 h6
->ip6_vfc
|= IPV6_VERSION
;
2772 h6
->ip6_hlim
= IPV6_DEFHLIM
;
2774 bzero(&ro6
, sizeof(ro6
));
2775 ip6_output(m
, NULL
, &ro6
, 0, NULL
, NULL
, NULL
);
2776 ROUTE_RELEASE(&ro6
);
2784 pf_send_icmp(pbuf_t
*pbuf
, u_int8_t type
, u_int8_t code
, sa_family_t af
,
2788 struct pf_mtag
*pf_mtag
;
2790 m0
= pbuf_clone_to_mbuf(pbuf
);
2795 if ((pf_mtag
= pf_get_mtag(m0
)) == NULL
) {
2799 pf_mtag
->pftag_flags
|= PF_TAG_GENERATED
;
2801 if (PF_RTABLEID_IS_VALID(r
->rtableid
)) {
2802 pf_mtag
->pftag_rtableid
= r
->rtableid
;
2806 /* add hints for ecn */
2807 pf_mtag
->pftag_hdr
= mtod(m0
, struct ip
*);
2808 /* record address family */
2809 pf_mtag
->pftag_flags
&= ~(PF_TAG_HDR_INET
| PF_TAG_HDR_INET6
);
2813 pf_mtag
->pftag_flags
|= PF_TAG_HDR_INET
;
2814 m0
->m_pkthdr
.pkt_proto
= IPPROTO_ICMP
;
2819 pf_mtag
->pftag_flags
|= PF_TAG_HDR_INET6
;
2820 m0
->m_pkthdr
.pkt_proto
= IPPROTO_ICMPV6
;
2829 icmp_error(m0
, type
, code
, 0, 0);
2834 icmp6_error(m0
, type
, code
, 0);
2841 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2842 * If n is 0, they match if they are equal. If n is != 0, they match if they
2846 pf_match_addr(u_int8_t n
, struct pf_addr
*a
, struct pf_addr
*m
,
2847 struct pf_addr
*b
, sa_family_t af
)
2854 if ((a
->addr32
[0] & m
->addr32
[0]) ==
2855 (b
->addr32
[0] & m
->addr32
[0])) {
2862 if (((a
->addr32
[0] & m
->addr32
[0]) ==
2863 (b
->addr32
[0] & m
->addr32
[0])) &&
2864 ((a
->addr32
[1] & m
->addr32
[1]) ==
2865 (b
->addr32
[1] & m
->addr32
[1])) &&
2866 ((a
->addr32
[2] & m
->addr32
[2]) ==
2867 (b
->addr32
[2] & m
->addr32
[2])) &&
2868 ((a
->addr32
[3] & m
->addr32
[3]) ==
2869 (b
->addr32
[3] & m
->addr32
[3]))) {
2891 * Return 1 if b <= a <= e, otherwise return 0.
2894 pf_match_addr_range(struct pf_addr
*b
, struct pf_addr
*e
,
2895 struct pf_addr
*a
, sa_family_t af
)
2900 if ((a
->addr32
[0] < b
->addr32
[0]) ||
2901 (a
->addr32
[0] > e
->addr32
[0])) {
2911 for (i
= 0; i
< 4; ++i
) {
2912 if (a
->addr32
[i
] > b
->addr32
[i
]) {
2914 } else if (a
->addr32
[i
] < b
->addr32
[i
]) {
2919 for (i
= 0; i
< 4; ++i
) {
2920 if (a
->addr32
[i
] < e
->addr32
[i
]) {
2922 } else if (a
->addr32
[i
] > e
->addr32
[i
]) {
2934 pf_match(u_int8_t op
, u_int32_t a1
, u_int32_t a2
, u_int32_t p
)
2938 return (p
> a1
) && (p
< a2
);
2940 return (p
< a1
) || (p
> a2
);
2942 return (p
>= a1
) && (p
<= a2
);
2956 return 0; /* never reached */
2960 pf_match_port(u_int8_t op
, u_int16_t a1
, u_int16_t a2
, u_int16_t p
)
2962 #if BYTE_ORDER != BIG_ENDIAN
2967 return pf_match(op
, a1
, a2
, p
);
2971 pf_match_xport(u_int8_t proto
, u_int8_t proto_variant
, union pf_rule_xport
*rx
,
2972 union pf_state_xport
*sx
)
2979 if (proto_variant
== PF_GRE_PPTP_VARIANT
) {
2980 d
= (rx
->call_id
== sx
->call_id
);
2985 d
= (rx
->spi
== sx
->spi
);
2991 case IPPROTO_ICMPV6
:
2993 d
= pf_match_port(rx
->range
.op
,
2994 rx
->range
.port
[0], rx
->range
.port
[1],
3008 pf_match_uid(u_int8_t op
, uid_t a1
, uid_t a2
, uid_t u
)
3010 if (u
== UID_MAX
&& op
!= PF_OP_EQ
&& op
!= PF_OP_NE
) {
3013 return pf_match(op
, a1
, a2
, u
);
3017 pf_match_gid(u_int8_t op
, gid_t a1
, gid_t a2
, gid_t g
)
3019 if (g
== GID_MAX
&& op
!= PF_OP_EQ
&& op
!= PF_OP_NE
) {
3022 return pf_match(op
, a1
, a2
, g
);
3026 pf_match_tag(struct pf_rule
*r
, struct pf_mtag
*pf_mtag
,
3030 *tag
= pf_mtag
->pftag_tag
;
3033 return (!r
->match_tag_not
&& r
->match_tag
== *tag
) ||
3034 (r
->match_tag_not
&& r
->match_tag
!= *tag
);
3038 pf_tag_packet(pbuf_t
*pbuf
, struct pf_mtag
*pf_mtag
, int tag
,
3039 unsigned int rtableid
, struct pf_pdesc
*pd
)
3041 if (tag
<= 0 && !PF_RTABLEID_IS_VALID(rtableid
) &&
3042 (pd
== NULL
|| !(pd
->pktflags
& PKTF_FLOW_ID
))) {
3046 if (pf_mtag
== NULL
&& (pf_mtag
= pf_get_mtag_pbuf(pbuf
)) == NULL
) {
3051 pf_mtag
->pftag_tag
= tag
;
3053 if (PF_RTABLEID_IS_VALID(rtableid
)) {
3054 pf_mtag
->pftag_rtableid
= rtableid
;
3056 if (pd
!= NULL
&& (pd
->pktflags
& PKTF_FLOW_ID
)) {
3057 *pbuf
->pb_flowsrc
= pd
->flowsrc
;
3058 *pbuf
->pb_flowid
= pd
->flowhash
;
3059 *pbuf
->pb_flags
|= pd
->pktflags
;
3060 *pbuf
->pb_proto
= pd
->proto
;
3067 pf_step_into_anchor(int *depth
, struct pf_ruleset
**rs
, int n
,
3068 struct pf_rule
**r
, struct pf_rule
**a
, int *match
)
3070 struct pf_anchor_stackframe
*f
;
3072 (*r
)->anchor
->match
= 0;
3076 if (*depth
>= (int)sizeof(pf_anchor_stack
) /
3077 (int)sizeof(pf_anchor_stack
[0])) {
3078 printf("pf_step_into_anchor: stack overflow\n");
3079 *r
= TAILQ_NEXT(*r
, entries
);
3081 } else if (*depth
== 0 && a
!= NULL
) {
3084 f
= pf_anchor_stack
+ (*depth
)++;
3087 if ((*r
)->anchor_wildcard
) {
3088 f
->parent
= &(*r
)->anchor
->children
;
3089 if ((f
->child
= RB_MIN(pf_anchor_node
, f
->parent
)) ==
3094 *rs
= &f
->child
->ruleset
;
3098 *rs
= &(*r
)->anchor
->ruleset
;
3100 *r
= TAILQ_FIRST((*rs
)->rules
[n
].active
.ptr
);
3104 pf_step_out_of_anchor(int *depth
, struct pf_ruleset
**rs
, int n
,
3105 struct pf_rule
**r
, struct pf_rule
**a
, int *match
)
3107 struct pf_anchor_stackframe
*f
;
3114 f
= pf_anchor_stack
+ *depth
- 1;
3115 if (f
->parent
!= NULL
&& f
->child
!= NULL
) {
3116 if (f
->child
->match
||
3117 (match
!= NULL
&& *match
)) {
3118 f
->r
->anchor
->match
= 1;
3123 f
->child
= RB_NEXT(pf_anchor_node
, f
->parent
, f
->child
);
3124 if (f
->child
!= NULL
) {
3125 *rs
= &f
->child
->ruleset
;
3126 *r
= TAILQ_FIRST((*rs
)->rules
[n
].active
.ptr
);
3135 if (*depth
== 0 && a
!= NULL
) {
3139 if (f
->r
->anchor
->match
|| (match
!= NULL
&& *match
)) {
3140 quick
= f
->r
->quick
;
3142 *r
= TAILQ_NEXT(f
->r
, entries
);
3143 } while (*r
== NULL
);
3150 pf_poolmask(struct pf_addr
*naddr
, struct pf_addr
*raddr
,
3151 struct pf_addr
*rmask
, struct pf_addr
*saddr
, sa_family_t af
)
3156 naddr
->addr32
[0] = (raddr
->addr32
[0] & rmask
->addr32
[0]) |
3157 ((rmask
->addr32
[0] ^ 0xffffffff) & saddr
->addr32
[0]);
3161 naddr
->addr32
[0] = (raddr
->addr32
[0] & rmask
->addr32
[0]) |
3162 ((rmask
->addr32
[0] ^ 0xffffffff) & saddr
->addr32
[0]);
3163 naddr
->addr32
[1] = (raddr
->addr32
[1] & rmask
->addr32
[1]) |
3164 ((rmask
->addr32
[1] ^ 0xffffffff) & saddr
->addr32
[1]);
3165 naddr
->addr32
[2] = (raddr
->addr32
[2] & rmask
->addr32
[2]) |
3166 ((rmask
->addr32
[2] ^ 0xffffffff) & saddr
->addr32
[2]);
3167 naddr
->addr32
[3] = (raddr
->addr32
[3] & rmask
->addr32
[3]) |
3168 ((rmask
->addr32
[3] ^ 0xffffffff) & saddr
->addr32
[3]);
3174 pf_addr_inc(struct pf_addr
*addr
, sa_family_t af
)
3179 addr
->addr32
[0] = htonl(ntohl(addr
->addr32
[0]) + 1);
3183 if (addr
->addr32
[3] == 0xffffffff) {
3184 addr
->addr32
[3] = 0;
3185 if (addr
->addr32
[2] == 0xffffffff) {
3186 addr
->addr32
[2] = 0;
3187 if (addr
->addr32
[1] == 0xffffffff) {
3188 addr
->addr32
[1] = 0;
3190 htonl(ntohl(addr
->addr32
[0]) + 1);
3193 htonl(ntohl(addr
->addr32
[1]) + 1);
3197 htonl(ntohl(addr
->addr32
[2]) + 1);
3201 htonl(ntohl(addr
->addr32
[3]) + 1);
3208 #define mix(a, b, c) \
3210 a -= b; a -= c; a ^= (c >> 13); \
3211 b -= c; b -= a; b ^= (a << 8); \
3212 c -= a; c -= b; c ^= (b >> 13); \
3213 a -= b; a -= c; a ^= (c >> 12); \
3214 b -= c; b -= a; b ^= (a << 16); \
3215 c -= a; c -= b; c ^= (b >> 5); \
3216 a -= b; a -= c; a ^= (c >> 3); \
3217 b -= c; b -= a; b ^= (a << 10); \
3218 c -= a; c -= b; c ^= (b >> 15); \
3222 * hash function based on bridge_hash in if_bridge.c
3225 pf_hash(struct pf_addr
*inaddr
, struct pf_addr
*hash
,
3226 struct pf_poolhashkey
*key
, sa_family_t af
)
3228 u_int32_t a
= 0x9e3779b9, b
= 0x9e3779b9, c
= key
->key32
[0];
3233 a
+= inaddr
->addr32
[0];
3236 hash
->addr32
[0] = c
+ key
->key32
[2];
3241 a
+= inaddr
->addr32
[0];
3242 b
+= inaddr
->addr32
[2];
3244 hash
->addr32
[0] = c
;
3245 a
+= inaddr
->addr32
[1];
3246 b
+= inaddr
->addr32
[3];
3249 hash
->addr32
[1] = c
;
3250 a
+= inaddr
->addr32
[2];
3251 b
+= inaddr
->addr32
[1];
3254 hash
->addr32
[2] = c
;
3255 a
+= inaddr
->addr32
[3];
3256 b
+= inaddr
->addr32
[0];
3259 hash
->addr32
[3] = c
;
3266 pf_map_addr(sa_family_t af
, struct pf_rule
*r
, struct pf_addr
*saddr
,
3267 struct pf_addr
*naddr
, struct pf_addr
*init_addr
, struct pf_src_node
**sn
)
3269 unsigned char hash
[16];
3270 struct pf_pool
*rpool
= &r
->rpool
;
3271 struct pf_addr
*raddr
= &rpool
->cur
->addr
.v
.a
.addr
;
3272 struct pf_addr
*rmask
= &rpool
->cur
->addr
.v
.a
.mask
;
3273 struct pf_pooladdr
*acur
= rpool
->cur
;
3274 struct pf_src_node k
;
3276 if (*sn
== NULL
&& r
->rpool
.opts
& PF_POOL_STICKYADDR
&&
3277 (r
->rpool
.opts
& PF_POOL_TYPEMASK
) != PF_POOL_NONE
) {
3279 PF_ACPY(&k
.addr
, saddr
, af
);
3280 if (r
->rule_flag
& PFRULE_RULESRCTRACK
||
3281 r
->rpool
.opts
& PF_POOL_STICKYADDR
) {
3286 pf_status
.scounters
[SCNT_SRC_NODE_SEARCH
]++;
3287 *sn
= RB_FIND(pf_src_tree
, &tree_src_tracking
, &k
);
3288 if (*sn
!= NULL
&& !PF_AZERO(&(*sn
)->raddr
, rpool
->af
)) {
3289 PF_ACPY(naddr
, &(*sn
)->raddr
, rpool
->af
);
3290 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
3291 printf("pf_map_addr: src tracking maps ");
3292 pf_print_host(&k
.addr
, 0, af
);
3294 pf_print_host(naddr
, 0, rpool
->af
);
3301 if (rpool
->cur
->addr
.type
== PF_ADDR_NOROUTE
) {
3304 if (rpool
->cur
->addr
.type
== PF_ADDR_DYNIFTL
) {
3305 if (rpool
->cur
->addr
.p
.dyn
== NULL
) {
3308 switch (rpool
->af
) {
3311 if (rpool
->cur
->addr
.p
.dyn
->pfid_acnt4
< 1 &&
3312 (rpool
->opts
& PF_POOL_TYPEMASK
) !=
3313 PF_POOL_ROUNDROBIN
) {
3316 raddr
= &rpool
->cur
->addr
.p
.dyn
->pfid_addr4
;
3317 rmask
= &rpool
->cur
->addr
.p
.dyn
->pfid_mask4
;
3322 if (rpool
->cur
->addr
.p
.dyn
->pfid_acnt6
< 1 &&
3323 (rpool
->opts
& PF_POOL_TYPEMASK
) !=
3324 PF_POOL_ROUNDROBIN
) {
3327 raddr
= &rpool
->cur
->addr
.p
.dyn
->pfid_addr6
;
3328 rmask
= &rpool
->cur
->addr
.p
.dyn
->pfid_mask6
;
3332 } else if (rpool
->cur
->addr
.type
== PF_ADDR_TABLE
) {
3333 if ((rpool
->opts
& PF_POOL_TYPEMASK
) != PF_POOL_ROUNDROBIN
) {
3334 return 1; /* unsupported */
3337 raddr
= &rpool
->cur
->addr
.v
.a
.addr
;
3338 rmask
= &rpool
->cur
->addr
.v
.a
.mask
;
3341 switch (rpool
->opts
& PF_POOL_TYPEMASK
) {
3343 PF_ACPY(naddr
, raddr
, rpool
->af
);
3345 case PF_POOL_BITMASK
:
3346 ASSERT(af
== rpool
->af
);
3347 PF_POOLMASK(naddr
, raddr
, rmask
, saddr
, af
);
3349 case PF_POOL_RANDOM
:
3350 if (init_addr
!= NULL
&& PF_AZERO(init_addr
, rpool
->af
)) {
3354 rpool
->counter
.addr32
[0] = htonl(random());
3359 if (rmask
->addr32
[3] != 0xffffffff) {
3360 rpool
->counter
.addr32
[3] =
3365 if (rmask
->addr32
[2] != 0xffffffff) {
3366 rpool
->counter
.addr32
[2] =
3371 if (rmask
->addr32
[1] != 0xffffffff) {
3372 rpool
->counter
.addr32
[1] =
3377 if (rmask
->addr32
[0] != 0xffffffff) {
3378 rpool
->counter
.addr32
[0] =
3384 PF_POOLMASK(naddr
, raddr
, rmask
, &rpool
->counter
,
3386 PF_ACPY(init_addr
, naddr
, rpool
->af
);
3388 PF_AINC(&rpool
->counter
, rpool
->af
);
3389 PF_POOLMASK(naddr
, raddr
, rmask
, &rpool
->counter
,
3393 case PF_POOL_SRCHASH
:
3394 ASSERT(af
== rpool
->af
);
3395 PF_POOLMASK(naddr
, raddr
, rmask
, saddr
, af
);
3396 pf_hash(saddr
, (struct pf_addr
*)(void *)&hash
,
3398 PF_POOLMASK(naddr
, raddr
, rmask
,
3399 (struct pf_addr
*)(void *)&hash
, af
);
3401 case PF_POOL_ROUNDROBIN
:
3402 if (rpool
->cur
->addr
.type
== PF_ADDR_TABLE
) {
3403 if (!pfr_pool_get(rpool
->cur
->addr
.p
.tbl
,
3404 &rpool
->tblidx
, &rpool
->counter
,
3405 &raddr
, &rmask
, rpool
->af
)) {
3408 } else if (rpool
->cur
->addr
.type
== PF_ADDR_DYNIFTL
) {
3409 if (rpool
->cur
->addr
.p
.dyn
!= NULL
&&
3410 !pfr_pool_get(rpool
->cur
->addr
.p
.dyn
->pfid_kt
,
3411 &rpool
->tblidx
, &rpool
->counter
,
3412 &raddr
, &rmask
, af
)) {
3415 } else if (pf_match_addr(0, raddr
, rmask
, &rpool
->counter
,
3421 if ((rpool
->cur
= TAILQ_NEXT(rpool
->cur
, entries
)) == NULL
) {
3422 rpool
->cur
= TAILQ_FIRST(&rpool
->list
);
3424 if (rpool
->cur
->addr
.type
== PF_ADDR_TABLE
) {
3426 if (pfr_pool_get(rpool
->cur
->addr
.p
.tbl
,
3427 &rpool
->tblidx
, &rpool
->counter
,
3428 &raddr
, &rmask
, rpool
->af
)) {
3429 /* table contains no address of type
3431 if (rpool
->cur
!= acur
) {
3436 } else if (rpool
->cur
->addr
.type
== PF_ADDR_DYNIFTL
) {
3438 if (rpool
->cur
->addr
.p
.dyn
== NULL
) {
3441 if (pfr_pool_get(rpool
->cur
->addr
.p
.dyn
->pfid_kt
,
3442 &rpool
->tblidx
, &rpool
->counter
,
3443 &raddr
, &rmask
, rpool
->af
)) {
3444 /* table contains no address of type
3446 if (rpool
->cur
!= acur
) {
3452 raddr
= &rpool
->cur
->addr
.v
.a
.addr
;
3453 rmask
= &rpool
->cur
->addr
.v
.a
.mask
;
3454 PF_ACPY(&rpool
->counter
, raddr
, rpool
->af
);
3458 PF_ACPY(naddr
, &rpool
->counter
, rpool
->af
);
3459 if (init_addr
!= NULL
&& PF_AZERO(init_addr
, rpool
->af
)) {
3460 PF_ACPY(init_addr
, naddr
, rpool
->af
);
3462 PF_AINC(&rpool
->counter
, rpool
->af
);
3466 PF_ACPY(&(*sn
)->raddr
, naddr
, rpool
->af
);
3469 if (pf_status
.debug
>= PF_DEBUG_MISC
&&
3470 (rpool
->opts
& PF_POOL_TYPEMASK
) != PF_POOL_NONE
) {
3471 printf("pf_map_addr: selected address ");
3472 pf_print_host(naddr
, 0, rpool
->af
);
3480 pf_get_sport(struct pf_pdesc
*pd
, struct pfi_kif
*kif
, struct pf_rule
*r
,
3481 struct pf_addr
*saddr
, union pf_state_xport
*sxport
, struct pf_addr
*daddr
,
3482 union pf_state_xport
*dxport
, struct pf_addr
*naddr
,
3483 union pf_state_xport
*nxport
, struct pf_src_node
**sn
3487 struct pf_state_key_cmp key
;
3488 struct pf_addr init_addr
;
3490 sa_family_t af
= pd
->af
;
3491 u_int8_t proto
= pd
->proto
;
3492 unsigned int low
= r
->rpool
.proxy_port
[0];
3493 unsigned int high
= r
->rpool
.proxy_port
[1];
3495 bzero(&init_addr
, sizeof(init_addr
));
3496 if (pf_map_addr(af
, r
, saddr
, naddr
, &init_addr
, sn
)) {
3500 if (proto
== IPPROTO_ICMP
) {
3506 return 0; /* No output necessary. */
3508 /*--- Special mapping rules for UDP ---*/
3509 if (proto
== IPPROTO_UDP
) {
3510 /*--- Never float IKE source port ---*/
3511 if (ntohs(sxport
->port
) == PF_IKE_PORT
) {
3512 nxport
->port
= sxport
->port
;
3516 /*--- Apply exterior mapping options ---*/
3517 if (r
->extmap
> PF_EXTMAP_APD
) {
3520 TAILQ_FOREACH(s
, &state_list
, entry_list
) {
3521 struct pf_state_key
*sk
= s
->state_key
;
3525 if (s
->nat_rule
.ptr
!= r
) {
3528 if (sk
->proto
!= IPPROTO_UDP
||
3532 if (sk
->lan
.xport
.port
!= sxport
->port
) {
3535 if (PF_ANEQ(&sk
->lan
.addr
, saddr
, af
)) {
3538 if (r
->extmap
< PF_EXTMAP_EI
&&
3539 PF_ANEQ(&sk
->ext_lan
.addr
, daddr
, af
)) {
3543 nxport
->port
= sk
->gwy
.xport
.port
;
3547 } else if (proto
== IPPROTO_TCP
) {
3550 * APPLE MODIFICATION: <rdar://problem/6546358>
3551 * Fix allows....NAT to use a single binding for TCP session
3552 * with same source IP and source port
3554 TAILQ_FOREACH(s
, &state_list
, entry_list
) {
3555 struct pf_state_key
* sk
= s
->state_key
;
3559 if (s
->nat_rule
.ptr
!= r
) {
3562 if (sk
->proto
!= IPPROTO_TCP
|| sk
->af_lan
!= af
) {
3565 if (sk
->lan
.xport
.port
!= sxport
->port
) {
3568 if (!(PF_AEQ(&sk
->lan
.addr
, saddr
, af
))) {
3571 nxport
->port
= sk
->gwy
.xport
.port
;
3578 PF_ACPY(&key
.ext_gwy
.addr
, daddr
, key
.af_gwy
);
3579 PF_ACPY(&key
.gwy
.addr
, naddr
, key
.af_gwy
);
3582 key
.proto_variant
= r
->extfilter
;
3585 key
.proto_variant
= 0;
3589 key
.ext_gwy
.xport
= *dxport
;
3591 memset(&key
.ext_gwy
.xport
, 0,
3592 sizeof(key
.ext_gwy
.xport
));
3595 * port search; start random, step;
3596 * similar 2 portloop in in_pcbbind
3598 if (!(proto
== IPPROTO_TCP
|| proto
== IPPROTO_UDP
||
3599 proto
== IPPROTO_ICMP
)) {
3601 key
.gwy
.xport
= *dxport
;
3603 memset(&key
.gwy
.xport
, 0,
3604 sizeof(key
.gwy
.xport
));
3606 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
) {
3609 } else if (low
== 0 && high
== 0) {
3610 key
.gwy
.xport
= *nxport
;
3611 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
3615 } else if (low
== high
) {
3616 key
.gwy
.xport
.port
= htons(low
);
3617 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
3619 nxport
->port
= htons(low
);
3630 cut
= htonl(random()) % (1 + high
- low
) + low
;
3631 /* low <= cut <= high */
3632 for (tmp
= cut
; tmp
<= high
; ++(tmp
)) {
3633 key
.gwy
.xport
.port
= htons(tmp
);
3634 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
3636 nxport
->port
= htons(tmp
);
3640 for (tmp
= cut
- 1; tmp
>= low
; --(tmp
)) {
3641 key
.gwy
.xport
.port
= htons(tmp
);
3642 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
3644 nxport
->port
= htons(tmp
);
3650 switch (r
->rpool
.opts
& PF_POOL_TYPEMASK
) {
3651 case PF_POOL_RANDOM
:
3652 case PF_POOL_ROUNDROBIN
:
3653 if (pf_map_addr(af
, r
, saddr
, naddr
, &init_addr
, sn
)) {
3658 case PF_POOL_SRCHASH
:
3659 case PF_POOL_BITMASK
:
3663 } while (!PF_AEQ(&init_addr
, naddr
, af
));
3665 return 1; /* none available */
3668 static struct pf_rule
*
3669 pf_match_translation(struct pf_pdesc
*pd
, pbuf_t
*pbuf
, int off
,
3670 int direction
, struct pfi_kif
*kif
, struct pf_addr
*saddr
,
3671 union pf_state_xport
*sxport
, struct pf_addr
*daddr
,
3672 union pf_state_xport
*dxport
, int rs_num
)
3674 struct pf_rule
*r
, *rm
= NULL
;
3675 struct pf_ruleset
*ruleset
= NULL
;
3677 unsigned int rtableid
= IFSCOPE_NONE
;
3680 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[rs_num
].active
.ptr
);
3681 while (r
&& rm
== NULL
) {
3682 struct pf_rule_addr
*src
= NULL
, *dst
= NULL
;
3683 struct pf_addr_wrap
*xdst
= NULL
;
3684 struct pf_addr_wrap
*xsrc
= NULL
;
3685 union pf_rule_xport rdrxport
;
3687 if (r
->action
== PF_BINAT
&& direction
== PF_IN
) {
3689 if (r
->rpool
.cur
!= NULL
) {
3690 xdst
= &r
->rpool
.cur
->addr
;
3692 } else if (r
->action
== PF_RDR
&& direction
== PF_OUT
) {
3695 if (r
->rpool
.cur
!= NULL
) {
3696 rdrxport
.range
.op
= PF_OP_EQ
;
3697 rdrxport
.range
.port
[0] =
3698 htons(r
->rpool
.proxy_port
[0]);
3699 xsrc
= &r
->rpool
.cur
->addr
;
3707 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
) {
3708 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
3709 } else if (r
->direction
&& r
->direction
!= direction
) {
3710 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
3711 } else if (r
->af
&& r
->af
!= pd
->af
) {
3712 r
= r
->skip
[PF_SKIP_AF
].ptr
;
3713 } else if (r
->proto
&& r
->proto
!= pd
->proto
) {
3714 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
3715 } else if (xsrc
&& PF_MISMATCHAW(xsrc
, saddr
, pd
->af
, 0, NULL
)) {
3716 r
= TAILQ_NEXT(r
, entries
);
3717 } else if (!xsrc
&& PF_MISMATCHAW(&src
->addr
, saddr
, pd
->af
,
3719 r
= TAILQ_NEXT(r
, entries
);
3720 } else if (xsrc
&& (!rdrxport
.range
.port
[0] ||
3721 !pf_match_xport(r
->proto
, r
->proto_variant
, &rdrxport
,
3723 r
= TAILQ_NEXT(r
, entries
);
3724 } else if (!xsrc
&& !pf_match_xport(r
->proto
,
3725 r
->proto_variant
, &src
->xport
, sxport
)) {
3726 r
= r
->skip
[src
== &r
->src
? PF_SKIP_SRC_PORT
:
3727 PF_SKIP_DST_PORT
].ptr
;
3728 } else if (dst
!= NULL
&&
3729 PF_MISMATCHAW(&dst
->addr
, daddr
, pd
->af
, dst
->neg
, NULL
)) {
3730 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
3731 } else if (xdst
!= NULL
&& PF_MISMATCHAW(xdst
, daddr
, pd
->af
,
3733 r
= TAILQ_NEXT(r
, entries
);
3734 } else if (dst
&& !pf_match_xport(r
->proto
, r
->proto_variant
,
3735 &dst
->xport
, dxport
)) {
3736 r
= r
->skip
[PF_SKIP_DST_PORT
].ptr
;
3737 } else if (r
->match_tag
&& !pf_match_tag(r
, pd
->pf_mtag
, &tag
)) {
3738 r
= TAILQ_NEXT(r
, entries
);
3739 } else if (r
->os_fingerprint
!= PF_OSFP_ANY
&& (pd
->proto
!=
3740 IPPROTO_TCP
|| !pf_osfp_match(pf_osfp_fingerprint(pd
, pbuf
,
3741 off
, pd
->hdr
.tcp
), r
->os_fingerprint
))) {
3742 r
= TAILQ_NEXT(r
, entries
);
3747 if (PF_RTABLEID_IS_VALID(r
->rtableid
)) {
3748 rtableid
= r
->rtableid
;
3750 if (r
->anchor
== NULL
) {
3753 pf_step_into_anchor(&asd
, &ruleset
, rs_num
,
3758 pf_step_out_of_anchor(&asd
, &ruleset
, rs_num
, &r
,
3762 if (pf_tag_packet(pbuf
, pd
->pf_mtag
, tag
, rtableid
, NULL
)) {
3765 if (rm
!= NULL
&& (rm
->action
== PF_NONAT
||
3766 rm
->action
== PF_NORDR
|| rm
->action
== PF_NOBINAT
||
3767 rm
->action
== PF_NONAT64
)) {
3774 * Get address translation information for NAT/BINAT/RDR
3775 * pd : pf packet descriptor
3776 * pbuf : pbuf holding the packet
3777 * off : offset to protocol header
3778 * direction : direction of packet
3779 * kif : pf interface info obtained from the packet's recv interface
3780 * sn : source node pointer (output)
3781 * saddr : packet source address
3782 * sxport : packet source port
3783 * daddr : packet destination address
3784 * dxport : packet destination port
3785 * nsxport : translated source port (output)
3787 * Translated source & destination address are updated in pd->nsaddr &
3790 static struct pf_rule
*
3791 pf_get_translation_aux(struct pf_pdesc
*pd
, pbuf_t
*pbuf
, int off
,
3792 int direction
, struct pfi_kif
*kif
, struct pf_src_node
**sn
,
3793 struct pf_addr
*saddr
, union pf_state_xport
*sxport
, struct pf_addr
*daddr
,
3794 union pf_state_xport
*dxport
, union pf_state_xport
*nsxport
3797 struct pf_rule
*r
= NULL
;
3800 if (direction
== PF_OUT
) {
3801 r
= pf_match_translation(pd
, pbuf
, off
, direction
, kif
, saddr
,
3802 sxport
, daddr
, dxport
, PF_RULESET_BINAT
);
3804 r
= pf_match_translation(pd
, pbuf
, off
, direction
, kif
,
3805 saddr
, sxport
, daddr
, dxport
, PF_RULESET_RDR
);
3808 r
= pf_match_translation(pd
, pbuf
, off
, direction
, kif
,
3809 saddr
, sxport
, daddr
, dxport
, PF_RULESET_NAT
);
3812 r
= pf_match_translation(pd
, pbuf
, off
, direction
, kif
, saddr
,
3813 sxport
, daddr
, dxport
, PF_RULESET_RDR
);
3815 r
= pf_match_translation(pd
, pbuf
, off
, direction
, kif
,
3816 saddr
, sxport
, daddr
, dxport
, PF_RULESET_BINAT
);
3821 struct pf_addr
*nsaddr
= &pd
->naddr
;
3822 struct pf_addr
*ndaddr
= &pd
->ndaddr
;
3827 switch (r
->action
) {
3836 * we do NAT64 on incoming path and we call ip_input
3837 * which asserts receive interface to be not NULL.
3838 * The below check is to prevent NAT64 action on any
3839 * packet generated by local entity using synthesized
3842 if ((r
->action
== PF_NAT64
) && (direction
== PF_OUT
)) {
3846 if (pf_get_sport(pd
, kif
, r
, saddr
, sxport
, daddr
,
3847 dxport
, nsaddr
, nsxport
, sn
3849 DPFPRINTF(PF_DEBUG_MISC
,
3850 ("pf: NAT proxy port allocation "
3852 r
->rpool
.proxy_port
[0],
3853 r
->rpool
.proxy_port
[1]));
3857 * For NAT64 the destination IPv4 address is derived
3858 * from the last 32 bits of synthesized IPv6 address
3860 if (r
->action
== PF_NAT64
) {
3861 ndaddr
->v4addr
.s_addr
= daddr
->addr32
[3];
3866 switch (direction
) {
3868 if (r
->rpool
.cur
->addr
.type
==
3870 if (r
->rpool
.cur
->addr
.p
.dyn
== NULL
) {
3876 if (r
->rpool
.cur
->addr
.p
.dyn
->
3881 &r
->rpool
.cur
->addr
.p
.dyn
->
3883 &r
->rpool
.cur
->addr
.p
.dyn
->
3890 if (r
->rpool
.cur
->addr
.p
.dyn
->
3895 &r
->rpool
.cur
->addr
.p
.dyn
->
3897 &r
->rpool
.cur
->addr
.p
.dyn
->
3905 &r
->rpool
.cur
->addr
.v
.a
.addr
,
3906 &r
->rpool
.cur
->addr
.v
.a
.mask
,
3911 if (r
->src
.addr
.type
== PF_ADDR_DYNIFTL
) {
3912 if (r
->src
.addr
.p
.dyn
== NULL
) {
3918 if (r
->src
.addr
.p
.dyn
->
3923 &r
->src
.addr
.p
.dyn
->
3925 &r
->src
.addr
.p
.dyn
->
3932 if (r
->src
.addr
.p
.dyn
->
3937 &r
->src
.addr
.p
.dyn
->
3939 &r
->src
.addr
.p
.dyn
->
3947 &r
->src
.addr
.v
.a
.addr
,
3948 &r
->src
.addr
.v
.a
.mask
, daddr
,
3955 switch (direction
) {
3957 if (r
->dst
.addr
.type
== PF_ADDR_DYNIFTL
) {
3958 if (r
->dst
.addr
.p
.dyn
== NULL
) {
3964 if (r
->dst
.addr
.p
.dyn
->
3969 &r
->dst
.addr
.p
.dyn
->
3971 &r
->dst
.addr
.p
.dyn
->
3978 if (r
->dst
.addr
.p
.dyn
->
3983 &r
->dst
.addr
.p
.dyn
->
3985 &r
->dst
.addr
.p
.dyn
->
3993 &r
->dst
.addr
.v
.a
.addr
,
3994 &r
->dst
.addr
.v
.a
.mask
,
3997 if (nsxport
&& r
->dst
.xport
.range
.port
[0]) {
3999 r
->dst
.xport
.range
.port
[0];
4003 if (pf_map_addr(pd
->af
, r
, saddr
,
4004 ndaddr
, NULL
, sn
)) {
4007 if ((r
->rpool
.opts
& PF_POOL_TYPEMASK
) ==
4009 PF_POOLMASK(ndaddr
, ndaddr
,
4010 &r
->rpool
.cur
->addr
.v
.a
.mask
, daddr
,
4014 if (nsxport
&& dxport
) {
4015 if (r
->rpool
.proxy_port
[1]) {
4016 u_int32_t tmp_nport
;
4019 ((ntohs(dxport
->port
) -
4020 ntohs(r
->dst
.xport
.range
.
4022 (r
->rpool
.proxy_port
[1] -
4023 r
->rpool
.proxy_port
[0] +
4024 1)) + r
->rpool
.proxy_port
[0];
4026 /* wrap around if necessary */
4027 if (tmp_nport
> 65535) {
4031 htons((u_int16_t
)tmp_nport
);
4032 } else if (r
->rpool
.proxy_port
[0]) {
4033 nsxport
->port
= htons(r
->rpool
.
4050 pf_socket_lookup(int direction
, struct pf_pdesc
*pd
)
4052 struct pf_addr
*saddr
, *daddr
;
4053 u_int16_t sport
, dport
;
4054 struct inpcbinfo
*pi
;
4060 pd
->lookup
.uid
= UID_MAX
;
4061 pd
->lookup
.gid
= GID_MAX
;
4062 pd
->lookup
.pid
= NO_PID
;
4064 switch (pd
->proto
) {
4066 if (pd
->hdr
.tcp
== NULL
) {
4069 sport
= pd
->hdr
.tcp
->th_sport
;
4070 dport
= pd
->hdr
.tcp
->th_dport
;
4074 if (pd
->hdr
.udp
== NULL
) {
4077 sport
= pd
->hdr
.udp
->uh_sport
;
4078 dport
= pd
->hdr
.udp
->uh_dport
;
4084 if (direction
== PF_IN
) {
4099 inp
= in_pcblookup_hash_exists(pi
, saddr
->v4addr
, sport
, daddr
->v4addr
, dport
,
4100 0, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4103 struct in6_addr s6
, d6
;
4105 memset(&s6
, 0, sizeof(s6
));
4106 s6
.s6_addr16
[5] = htons(0xffff);
4107 memcpy(&s6
.s6_addr32
[3], &saddr
->v4addr
,
4108 sizeof(saddr
->v4addr
));
4110 memset(&d6
, 0, sizeof(d6
));
4111 d6
.s6_addr16
[5] = htons(0xffff);
4112 memcpy(&d6
.s6_addr32
[3], &daddr
->v4addr
,
4113 sizeof(daddr
->v4addr
));
4115 inp
= in6_pcblookup_hash_exists(pi
, &s6
, sport
,
4116 &d6
, dport
, 0, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4118 inp
= in_pcblookup_hash_exists(pi
, saddr
->v4addr
, sport
,
4119 daddr
->v4addr
, dport
, INPLOOKUP_WILDCARD
, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4121 inp
= in6_pcblookup_hash_exists(pi
, &s6
, sport
,
4122 &d6
, dport
, INPLOOKUP_WILDCARD
,
4123 &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4132 inp
= in_pcblookup_hash_exists(pi
, saddr
->v4addr
, sport
,
4133 daddr
->v4addr
, dport
, INPLOOKUP_WILDCARD
,
4134 &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4144 inp
= in6_pcblookup_hash_exists(pi
, &saddr
->v6addr
, sport
, &daddr
->v6addr
,
4145 dport
, 0, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4147 inp
= in6_pcblookup_hash_exists(pi
, &saddr
->v6addr
, sport
,
4148 &daddr
->v6addr
, dport
, INPLOOKUP_WILDCARD
,
4149 &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4165 pf_get_wscale(pbuf_t
*pbuf
, int off
, u_int16_t th_off
, sa_family_t af
)
4169 u_int8_t
*opt
, optlen
;
4170 u_int8_t wscale
= 0;
4172 hlen
= th_off
<< 2; /* hlen <= sizeof (hdr) */
4173 if (hlen
<= (int)sizeof(struct tcphdr
)) {
4176 if (!pf_pull_hdr(pbuf
, off
, hdr
, hlen
, NULL
, NULL
, af
)) {
4179 opt
= hdr
+ sizeof(struct tcphdr
);
4180 hlen
-= sizeof(struct tcphdr
);
4190 if (wscale
> TCP_MAX_WINSHIFT
) {
4191 wscale
= TCP_MAX_WINSHIFT
;
4193 wscale
|= PF_WSCALE_FLAG
;
4209 pf_get_mss(pbuf_t
*pbuf
, int off
, u_int16_t th_off
, sa_family_t af
)
4213 u_int8_t
*opt
, optlen
;
4214 u_int16_t mss
= tcp_mssdflt
;
4216 hlen
= th_off
<< 2; /* hlen <= sizeof (hdr) */
4217 if (hlen
<= (int)sizeof(struct tcphdr
)) {
4220 if (!pf_pull_hdr(pbuf
, off
, hdr
, hlen
, NULL
, NULL
, af
)) {
4223 opt
= hdr
+ sizeof(struct tcphdr
);
4224 hlen
-= sizeof(struct tcphdr
);
4225 while (hlen
>= TCPOLEN_MAXSEG
) {
4233 bcopy((caddr_t
)(opt
+ 2), (caddr_t
)&mss
, 2);
4234 #if BYTE_ORDER != BIG_ENDIAN
4252 pf_calc_mss(struct pf_addr
*addr
, sa_family_t af
, u_int16_t offer
)
4255 struct sockaddr_in
*dst
;
4259 struct sockaddr_in6
*dst6
;
4260 struct route_in6 ro6
;
4262 struct rtentry
*rt
= NULL
;
4264 u_int16_t mss
= tcp_mssdflt
;
4269 hlen
= sizeof(struct ip
);
4270 bzero(&ro
, sizeof(ro
));
4271 dst
= (struct sockaddr_in
*)(void *)&ro
.ro_dst
;
4272 dst
->sin_family
= AF_INET
;
4273 dst
->sin_len
= sizeof(*dst
);
4274 dst
->sin_addr
= addr
->v4addr
;
4281 hlen
= sizeof(struct ip6_hdr
);
4282 bzero(&ro6
, sizeof(ro6
));
4283 dst6
= (struct sockaddr_in6
*)(void *)&ro6
.ro_dst
;
4284 dst6
->sin6_family
= AF_INET6
;
4285 dst6
->sin6_len
= sizeof(*dst6
);
4286 dst6
->sin6_addr
= addr
->v6addr
;
4287 rtalloc((struct route
*)&ro
);
4292 panic("pf_calc_mss: not AF_INET or AF_INET6!");
4296 if (rt
&& rt
->rt_ifp
) {
4297 /* This is relevant only for PF SYN Proxy */
4298 int interface_mtu
= rt
->rt_ifp
->if_mtu
;
4300 if (af
== AF_INET
&&
4301 INTF_ADJUST_MTU_FOR_CLAT46(rt
->rt_ifp
)) {
4302 interface_mtu
= IN6_LINKMTU(rt
->rt_ifp
);
4303 /* Further adjust the size for CLAT46 expansion */
4304 interface_mtu
-= CLAT46_HDR_EXPANSION_OVERHD
;
4306 mss
= interface_mtu
- hlen
- sizeof(struct tcphdr
);
4307 mss
= max(tcp_mssdflt
, mss
);
4310 mss
= min(mss
, offer
);
4311 mss
= max(mss
, 64); /* sanity - at least max opt space */
4316 pf_set_rt_ifp(struct pf_state
*s
, struct pf_addr
*saddr
, sa_family_t af
)
4318 struct pf_rule
*r
= s
->rule
.ptr
;
4322 if (!r
->rt
|| r
->rt
== PF_FASTROUTE
) {
4325 if ((af
== AF_INET
) || (af
== AF_INET6
)) {
4326 pf_map_addr(af
, r
, saddr
, &s
->rt_addr
, NULL
,
4328 s
->rt_kif
= r
->rpool
.cur
->kif
;
4335 pf_attach_state(struct pf_state_key
*sk
, struct pf_state
*s
, int tail
)
4340 /* list is sorted, if-bound states before floating */
4342 TAILQ_INSERT_TAIL(&sk
->states
, s
, next
);
4344 TAILQ_INSERT_HEAD(&sk
->states
, s
, next
);
4349 pf_detach_state(struct pf_state
*s
, int flags
)
4351 struct pf_state_key
*sk
= s
->state_key
;
4357 s
->state_key
= NULL
;
4358 TAILQ_REMOVE(&sk
->states
, s
, next
);
4359 if (--sk
->refcnt
== 0) {
4360 if (!(flags
& PF_DT_SKIP_EXTGWY
)) {
4361 RB_REMOVE(pf_state_tree_ext_gwy
,
4362 &pf_statetbl_ext_gwy
, sk
);
4364 if (!(flags
& PF_DT_SKIP_LANEXT
)) {
4365 RB_REMOVE(pf_state_tree_lan_ext
,
4366 &pf_statetbl_lan_ext
, sk
);
4368 if (sk
->app_state
) {
4369 pool_put(&pf_app_state_pl
, sk
->app_state
);
4371 pool_put(&pf_state_key_pl
, sk
);
4375 struct pf_state_key
*
4376 pf_alloc_state_key(struct pf_state
*s
, struct pf_state_key
*psk
)
4378 struct pf_state_key
*sk
;
4380 if ((sk
= pool_get(&pf_state_key_pl
, PR_WAITOK
)) == NULL
) {
4383 bzero(sk
, sizeof(*sk
));
4384 TAILQ_INIT(&sk
->states
);
4385 pf_attach_state(sk
, s
, 0);
4387 /* initialize state key from psk, if provided */
4389 bcopy(&psk
->lan
, &sk
->lan
, sizeof(sk
->lan
));
4390 bcopy(&psk
->gwy
, &sk
->gwy
, sizeof(sk
->gwy
));
4391 bcopy(&psk
->ext_lan
, &sk
->ext_lan
, sizeof(sk
->ext_lan
));
4392 bcopy(&psk
->ext_gwy
, &sk
->ext_gwy
, sizeof(sk
->ext_gwy
));
4393 sk
->af_lan
= psk
->af_lan
;
4394 sk
->af_gwy
= psk
->af_gwy
;
4395 sk
->proto
= psk
->proto
;
4396 sk
->direction
= psk
->direction
;
4397 sk
->proto_variant
= psk
->proto_variant
;
4398 VERIFY(psk
->app_state
== NULL
);
4399 sk
->flowsrc
= psk
->flowsrc
;
4400 sk
->flowhash
= psk
->flowhash
;
4401 /* don't touch tree entries, states and refcnt on sk */
4408 pf_tcp_iss(struct pf_pdesc
*pd
)
4411 u_int32_t digest
[4];
4413 if (pf_tcp_secret_init
== 0) {
4414 read_frandom(pf_tcp_secret
, sizeof(pf_tcp_secret
));
4415 MD5Init(&pf_tcp_secret_ctx
);
4416 MD5Update(&pf_tcp_secret_ctx
, pf_tcp_secret
,
4417 sizeof(pf_tcp_secret
));
4418 pf_tcp_secret_init
= 1;
4420 ctx
= pf_tcp_secret_ctx
;
4422 MD5Update(&ctx
, (char *)&pd
->hdr
.tcp
->th_sport
, sizeof(u_short
));
4423 MD5Update(&ctx
, (char *)&pd
->hdr
.tcp
->th_dport
, sizeof(u_short
));
4424 if (pd
->af
== AF_INET6
) {
4425 MD5Update(&ctx
, (char *)&pd
->src
->v6addr
, sizeof(struct in6_addr
));
4426 MD5Update(&ctx
, (char *)&pd
->dst
->v6addr
, sizeof(struct in6_addr
));
4428 MD5Update(&ctx
, (char *)&pd
->src
->v4addr
, sizeof(struct in_addr
));
4429 MD5Update(&ctx
, (char *)&pd
->dst
->v4addr
, sizeof(struct in_addr
));
4431 MD5Final((u_char
*)digest
, &ctx
);
4432 pf_tcp_iss_off
+= 4096;
4433 return digest
[0] + random() + pf_tcp_iss_off
;
4437 * This routine is called to perform address family translation on the
4438 * inner IP header (that may come as payload) of an ICMP(v4addr/6) error
4442 pf_change_icmp_af(pbuf_t
*pbuf
, int off
,
4443 struct pf_pdesc
*pd
, struct pf_pdesc
*pd2
, struct pf_addr
*src
,
4444 struct pf_addr
*dst
, sa_family_t af
, sa_family_t naf
)
4446 struct ip
*ip4
= NULL
;
4447 struct ip6_hdr
*ip6
= NULL
;
4451 if (af
== naf
|| (af
!= AF_INET
&& af
!= AF_INET6
) ||
4452 (naf
!= AF_INET
&& naf
!= AF_INET6
)) {
4457 olen
= pd2
->off
- off
;
4459 hlen
= naf
== AF_INET
? sizeof(*ip4
) : sizeof(*ip6
);
4461 /* Modify the pbuf to accommodate the new header */
4462 hdr
= pbuf_resize_segment(pbuf
, off
, olen
, hlen
);
4467 /* translate inner ip/ip6 header */
4471 bzero(ip4
, sizeof(*ip4
));
4472 ip4
->ip_v
= IPVERSION
;
4473 ip4
->ip_hl
= sizeof(*ip4
) >> 2;
4474 ip4
->ip_len
= htons(sizeof(*ip4
) + pd2
->tot_len
- olen
);
4475 ip4
->ip_id
= rfc6864
? 0 : htons(ip_randomid());
4476 ip4
->ip_off
= htons(IP_DF
);
4477 ip4
->ip_ttl
= pd2
->ttl
;
4478 if (pd2
->proto
== IPPROTO_ICMPV6
) {
4479 ip4
->ip_p
= IPPROTO_ICMP
;
4481 ip4
->ip_p
= pd2
->proto
;
4483 ip4
->ip_src
= src
->v4addr
;
4484 ip4
->ip_dst
= dst
->v4addr
;
4485 ip4
->ip_sum
= pbuf_inet_cksum(pbuf
, 0, 0, ip4
->ip_hl
<< 2);
4489 bzero(ip6
, sizeof(*ip6
));
4490 ip6
->ip6_vfc
= IPV6_VERSION
;
4491 ip6
->ip6_plen
= htons(pd2
->tot_len
- olen
);
4492 if (pd2
->proto
== IPPROTO_ICMP
) {
4493 ip6
->ip6_nxt
= IPPROTO_ICMPV6
;
4495 ip6
->ip6_nxt
= pd2
->proto
;
4497 if (!pd2
->ttl
|| pd2
->ttl
> IPV6_DEFHLIM
) {
4498 ip6
->ip6_hlim
= IPV6_DEFHLIM
;
4500 ip6
->ip6_hlim
= pd2
->ttl
;
4502 ip6
->ip6_src
= src
->v6addr
;
4503 ip6
->ip6_dst
= dst
->v6addr
;
4507 /* adjust payload offset and total packet length */
4508 pd2
->off
+= hlen
- olen
;
4509 pd
->tot_len
+= hlen
- olen
;
4514 #define PTR_IP(field) ((int32_t)offsetof(struct ip, field))
4515 #define PTR_IP6(field) ((int32_t)offsetof(struct ip6_hdr, field))
4518 pf_translate_icmp_af(int af
, void *arg
)
4521 struct icmp6_hdr
*icmp6
;
4530 type
= icmp6
->icmp6_type
;
4531 code
= icmp6
->icmp6_code
;
4532 mtu
= ntohl(icmp6
->icmp6_mtu
);
4535 case ICMP6_ECHO_REQUEST
:
4538 case ICMP6_ECHO_REPLY
:
4539 type
= ICMP_ECHOREPLY
;
4541 case ICMP6_DST_UNREACH
:
4542 type
= ICMP_UNREACH
;
4544 case ICMP6_DST_UNREACH_NOROUTE
:
4545 case ICMP6_DST_UNREACH_BEYONDSCOPE
:
4546 case ICMP6_DST_UNREACH_ADDR
:
4547 code
= ICMP_UNREACH_HOST
;
4549 case ICMP6_DST_UNREACH_ADMIN
:
4550 code
= ICMP_UNREACH_HOST_PROHIB
;
4552 case ICMP6_DST_UNREACH_NOPORT
:
4553 code
= ICMP_UNREACH_PORT
;
4559 case ICMP6_PACKET_TOO_BIG
:
4560 type
= ICMP_UNREACH
;
4561 code
= ICMP_UNREACH_NEEDFRAG
;
4564 case ICMP6_TIME_EXCEEDED
:
4565 type
= ICMP_TIMXCEED
;
4567 case ICMP6_PARAM_PROB
:
4569 case ICMP6_PARAMPROB_HEADER
:
4570 type
= ICMP_PARAMPROB
;
4571 code
= ICMP_PARAMPROB_ERRATPTR
;
4572 ptr
= ntohl(icmp6
->icmp6_pptr
);
4574 if (ptr
== PTR_IP6(ip6_vfc
)) {
4576 } else if (ptr
== PTR_IP6(ip6_vfc
) + 1) {
4577 ptr
= PTR_IP(ip_tos
);
4578 } else if (ptr
== PTR_IP6(ip6_plen
) ||
4579 ptr
== PTR_IP6(ip6_plen
) + 1) {
4580 ptr
= PTR_IP(ip_len
);
4581 } else if (ptr
== PTR_IP6(ip6_nxt
)) {
4583 } else if (ptr
== PTR_IP6(ip6_hlim
)) {
4584 ptr
= PTR_IP(ip_ttl
);
4585 } else if (ptr
>= PTR_IP6(ip6_src
) &&
4586 ptr
< PTR_IP6(ip6_dst
)) {
4587 ptr
= PTR_IP(ip_src
);
4588 } else if (ptr
>= PTR_IP6(ip6_dst
) &&
4589 ptr
< (int32_t)sizeof(struct ip6_hdr
)) {
4590 ptr
= PTR_IP(ip_dst
);
4595 case ICMP6_PARAMPROB_NEXTHEADER
:
4596 type
= ICMP_UNREACH
;
4597 code
= ICMP_UNREACH_PROTOCOL
;
4606 icmp6
->icmp6_type
= type
;
4607 icmp6
->icmp6_code
= code
;
4608 /* aligns well with a icmpv4 nextmtu */
4609 icmp6
->icmp6_mtu
= htonl(mtu
);
4610 /* icmpv4 pptr is a one most significant byte */
4612 icmp6
->icmp6_pptr
= htonl(ptr
<< 24);
4618 type
= icmp4
->icmp_type
;
4619 code
= icmp4
->icmp_code
;
4620 mtu
= ntohs(icmp4
->icmp_nextmtu
);
4624 type
= ICMP6_ECHO_REQUEST
;
4626 case ICMP_ECHOREPLY
:
4627 type
= ICMP6_ECHO_REPLY
;
4630 type
= ICMP6_DST_UNREACH
;
4632 case ICMP_UNREACH_NET
:
4633 case ICMP_UNREACH_HOST
:
4634 case ICMP_UNREACH_NET_UNKNOWN
:
4635 case ICMP_UNREACH_HOST_UNKNOWN
:
4636 case ICMP_UNREACH_ISOLATED
:
4637 case ICMP_UNREACH_TOSNET
:
4638 case ICMP_UNREACH_TOSHOST
:
4639 code
= ICMP6_DST_UNREACH_NOROUTE
;
4641 case ICMP_UNREACH_PORT
:
4642 code
= ICMP6_DST_UNREACH_NOPORT
;
4644 case ICMP_UNREACH_NET_PROHIB
:
4645 case ICMP_UNREACH_HOST_PROHIB
:
4646 case ICMP_UNREACH_FILTER_PROHIB
:
4647 case ICMP_UNREACH_PRECEDENCE_CUTOFF
:
4648 code
= ICMP6_DST_UNREACH_ADMIN
;
4650 case ICMP_UNREACH_PROTOCOL
:
4651 type
= ICMP6_PARAM_PROB
;
4652 code
= ICMP6_PARAMPROB_NEXTHEADER
;
4653 ptr
= offsetof(struct ip6_hdr
, ip6_nxt
);
4655 case ICMP_UNREACH_NEEDFRAG
:
4656 type
= ICMP6_PACKET_TOO_BIG
;
4665 type
= ICMP6_TIME_EXCEEDED
;
4667 case ICMP_PARAMPROB
:
4668 type
= ICMP6_PARAM_PROB
;
4670 case ICMP_PARAMPROB_ERRATPTR
:
4671 code
= ICMP6_PARAMPROB_HEADER
;
4673 case ICMP_PARAMPROB_LENGTH
:
4674 code
= ICMP6_PARAMPROB_HEADER
;
4680 ptr
= icmp4
->icmp_pptr
;
4681 if (ptr
== 0 || ptr
== PTR_IP(ip_tos
)) {
4683 } else if (ptr
== PTR_IP(ip_len
) ||
4684 ptr
== PTR_IP(ip_len
) + 1) {
4685 ptr
= PTR_IP6(ip6_plen
);
4686 } else if (ptr
== PTR_IP(ip_ttl
)) {
4687 ptr
= PTR_IP6(ip6_hlim
);
4688 } else if (ptr
== PTR_IP(ip_p
)) {
4689 ptr
= PTR_IP6(ip6_nxt
);
4690 } else if (ptr
>= PTR_IP(ip_src
) &&
4691 ptr
< PTR_IP(ip_dst
)) {
4692 ptr
= PTR_IP6(ip6_src
);
4693 } else if (ptr
>= PTR_IP(ip_dst
) &&
4694 ptr
< (int32_t)sizeof(struct ip
)) {
4695 ptr
= PTR_IP6(ip6_dst
);
4703 icmp4
->icmp_type
= type
;
4704 icmp4
->icmp_code
= code
;
4705 icmp4
->icmp_nextmtu
= htons(mtu
);
4707 icmp4
->icmp_void
= htonl(ptr
);
4715 /* Note: frees pbuf if PF_NAT64 is returned */
4717 pf_nat64_ipv6(pbuf_t
*pbuf
, int off
, struct pf_pdesc
*pd
)
4723 * ip_input asserts for rcvif to be not NULL
4724 * That may not be true for two corner cases
4725 * 1. If for some reason a local app sends DNS
4726 * AAAA query to local host
4727 * 2. If IPv6 stack in kernel internally generates a
4728 * message destined for a synthesized IPv6 end-point.
4730 if (pbuf
->pb_ifp
== NULL
) {
4734 ip4
= (struct ip
*)pbuf_resize_segment(pbuf
, 0, off
, sizeof(*ip4
));
4741 ip4
->ip_tos
= pd
->tos
& htonl(0x0ff00000);
4742 ip4
->ip_len
= htons(sizeof(*ip4
) + (pd
->tot_len
- off
));
4744 ip4
->ip_off
= htons(IP_DF
);
4745 ip4
->ip_ttl
= pd
->ttl
;
4746 ip4
->ip_p
= pd
->proto
;
4748 ip4
->ip_src
= pd
->naddr
.v4addr
;
4749 ip4
->ip_dst
= pd
->ndaddr
.v4addr
;
4750 ip4
->ip_sum
= pbuf_inet_cksum(pbuf
, 0, 0, ip4
->ip_hl
<< 2);
4752 /* recalculate icmp checksums */
4753 if (pd
->proto
== IPPROTO_ICMP
) {
4755 int hlen
= sizeof(*ip4
);
4757 icmp
= (struct icmp
*)pbuf_contig_segment(pbuf
, hlen
,
4763 icmp
->icmp_cksum
= 0;
4764 icmp
->icmp_cksum
= pbuf_inet_cksum(pbuf
, 0, hlen
,
4765 ntohs(ip4
->ip_len
) - hlen
);
4768 if ((m
= pbuf_to_mbuf(pbuf
, TRUE
)) != NULL
) {
4776 pf_nat64_ipv4(pbuf_t
*pbuf
, int off
, struct pf_pdesc
*pd
)
4778 struct ip6_hdr
*ip6
;
4781 if (pbuf
->pb_ifp
== NULL
) {
4785 ip6
= (struct ip6_hdr
*)pbuf_resize_segment(pbuf
, 0, off
, sizeof(*ip6
));
4790 ip6
->ip6_vfc
= htonl((6 << 28) | (pd
->tos
<< 20));
4791 ip6
->ip6_plen
= htons(pd
->tot_len
- off
);
4792 ip6
->ip6_nxt
= pd
->proto
;
4793 ip6
->ip6_hlim
= pd
->ttl
;
4794 ip6
->ip6_src
= pd
->naddr
.v6addr
;
4795 ip6
->ip6_dst
= pd
->ndaddr
.v6addr
;
4797 /* recalculate icmp6 checksums */
4798 if (pd
->proto
== IPPROTO_ICMPV6
) {
4799 struct icmp6_hdr
*icmp6
;
4800 int hlen
= sizeof(*ip6
);
4802 icmp6
= (struct icmp6_hdr
*)pbuf_contig_segment(pbuf
, hlen
,
4804 if (icmp6
== NULL
) {
4808 icmp6
->icmp6_cksum
= 0;
4809 icmp6
->icmp6_cksum
= pbuf_inet6_cksum(pbuf
,
4810 IPPROTO_ICMPV6
, hlen
,
4811 ntohs(ip6
->ip6_plen
));
4812 } else if (pd
->proto
== IPPROTO_UDP
) {
4814 int hlen
= sizeof(*ip6
);
4816 uh
= (struct udphdr
*)pbuf_contig_segment(pbuf
, hlen
,
4822 if (uh
->uh_sum
== 0) {
4823 uh
->uh_sum
= pbuf_inet6_cksum(pbuf
, IPPROTO_UDP
,
4824 hlen
, ntohs(ip6
->ip6_plen
));
4828 if ((m
= pbuf_to_mbuf(pbuf
, TRUE
)) != NULL
) {
4836 pf_test_rule(struct pf_rule
**rm
, struct pf_state
**sm
, int direction
,
4837 struct pfi_kif
*kif
, pbuf_t
*pbuf
, int off
, void *h
,
4838 struct pf_pdesc
*pd
, struct pf_rule
**am
, struct pf_ruleset
**rsm
,
4839 struct ifqueue
*ifq
)
4842 struct pf_rule
*nr
= NULL
;
4843 struct pf_addr
*saddr
= pd
->src
, *daddr
= pd
->dst
;
4844 sa_family_t af
= pd
->af
;
4845 struct pf_rule
*r
, *a
= NULL
;
4846 struct pf_ruleset
*ruleset
= NULL
;
4847 struct pf_src_node
*nsn
= NULL
;
4848 struct tcphdr
*th
= pd
->hdr
.tcp
;
4849 struct udphdr
*uh
= pd
->hdr
.udp
;
4851 int rewrite
= 0, hdrlen
= 0;
4853 unsigned int rtableid
= IFSCOPE_NONE
;
4857 u_int16_t mss
= tcp_mssdflt
;
4858 u_int8_t icmptype
= 0, icmpcode
= 0;
4860 struct pf_grev1_hdr
*grev1
= pd
->hdr
.grev1
;
4861 union pf_state_xport bxport
, bdxport
, nxport
, sxport
, dxport
;
4862 struct pf_state_key psk
;
4864 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
4866 if (direction
== PF_IN
&& pf_check_congestion(ifq
)) {
4867 REASON_SET(&reason
, PFRES_CONGEST
);
4876 switch (pd
->proto
) {
4878 sxport
.port
= th
->th_sport
;
4879 dxport
.port
= th
->th_dport
;
4880 hdrlen
= sizeof(*th
);
4883 sxport
.port
= uh
->uh_sport
;
4884 dxport
.port
= uh
->uh_dport
;
4885 hdrlen
= sizeof(*uh
);
4889 if (pd
->af
!= AF_INET
) {
4892 sxport
.port
= dxport
.port
= pd
->hdr
.icmp
->icmp_id
;
4893 hdrlen
= ICMP_MINLEN
;
4894 icmptype
= pd
->hdr
.icmp
->icmp_type
;
4895 icmpcode
= pd
->hdr
.icmp
->icmp_code
;
4897 if (ICMP_ERRORTYPE(icmptype
)) {
4903 case IPPROTO_ICMPV6
:
4904 if (pd
->af
!= AF_INET6
) {
4907 sxport
.port
= dxport
.port
= pd
->hdr
.icmp6
->icmp6_id
;
4908 hdrlen
= sizeof(*pd
->hdr
.icmp6
);
4909 icmptype
= pd
->hdr
.icmp6
->icmp6_type
;
4910 icmpcode
= pd
->hdr
.icmp6
->icmp6_code
;
4912 if (ICMP6_ERRORTYPE(icmptype
)) {
4918 if (pd
->proto_variant
== PF_GRE_PPTP_VARIANT
) {
4919 sxport
.call_id
= dxport
.call_id
=
4920 pd
->hdr
.grev1
->call_id
;
4921 hdrlen
= sizeof(*pd
->hdr
.grev1
);
4926 dxport
.spi
= pd
->hdr
.esp
->spi
;
4927 hdrlen
= sizeof(*pd
->hdr
.esp
);
4931 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[PF_RULESET_FILTER
].active
.ptr
);
4936 if (direction
== PF_OUT
) {
4942 /* check packet for BINAT/NAT/RDR */
4943 if ((nr
= pf_get_translation_aux(pd
, pbuf
, off
, direction
, kif
, &nsn
,
4944 saddr
, &sxport
, daddr
, &dxport
, &nxport
4949 if (pd
->af
!= pd
->naf
) {
4955 PF_ACPY(&pd
->baddr
, saddr
, af
);
4956 PF_ACPY(&pd
->bdaddr
, daddr
, af
);
4958 switch (pd
->proto
) {
4960 if (pd
->af
!= pd
->naf
||
4961 PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
4962 pf_change_ap(direction
, pd
->mp
, saddr
,
4963 &th
->th_sport
, pd
->ip_sum
, &th
->th_sum
,
4964 &pd
->naddr
, nxport
.port
, 0, af
,
4966 sxport
.port
= th
->th_sport
;
4969 if (pd
->af
!= pd
->naf
||
4970 PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
) ||
4971 (nr
&& (nr
->action
== PF_RDR
) &&
4972 (th
->th_dport
!= nxport
.port
))) {
4973 if (nr
&& nr
->action
== PF_RDR
) {
4974 dport
= nxport
.port
;
4976 dport
= th
->th_dport
;
4978 pf_change_ap(direction
, pd
->mp
, daddr
,
4979 &th
->th_dport
, pd
->ip_sum
,
4980 &th
->th_sum
, &pd
->ndaddr
,
4981 dport
, 0, af
, pd
->naf
, ua
);
4982 dxport
.port
= th
->th_dport
;
4988 if (pd
->af
!= pd
->naf
||
4989 PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
4990 pf_change_ap(direction
, pd
->mp
, saddr
,
4991 &uh
->uh_sport
, pd
->ip_sum
,
4992 &uh
->uh_sum
, &pd
->naddr
,
4993 nxport
.port
, 1, af
, pd
->naf
, ua
);
4994 sxport
.port
= uh
->uh_sport
;
4997 if (pd
->af
!= pd
->naf
||
4998 PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
) ||
4999 (nr
&& (nr
->action
== PF_RDR
) &&
5000 (uh
->uh_dport
!= nxport
.port
))) {
5001 if (nr
&& nr
->action
== PF_RDR
) {
5002 dport
= nxport
.port
;
5004 dport
= uh
->uh_dport
;
5006 pf_change_ap(direction
, pd
->mp
, daddr
,
5007 &uh
->uh_dport
, pd
->ip_sum
,
5008 &uh
->uh_sum
, &pd
->ndaddr
,
5009 dport
, 0, af
, pd
->naf
, ua
);
5010 dxport
.port
= uh
->uh_dport
;
5016 if (pd
->af
!= AF_INET
) {
5021 * pd->af != pd->naf not handled yet here and would be
5022 * needed for NAT46 needed to support XLAT.
5023 * Will cross the bridge when it comes.
5025 if (PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
5026 pf_change_a(&saddr
->v4addr
.s_addr
, pd
->ip_sum
,
5027 pd
->naddr
.v4addr
.s_addr
, 0);
5028 pd
->hdr
.icmp
->icmp_cksum
= pf_cksum_fixup(
5029 pd
->hdr
.icmp
->icmp_cksum
, sxport
.port
,
5031 pd
->hdr
.icmp
->icmp_id
= nxport
.port
;
5034 if (PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
)) {
5035 pf_change_a(&daddr
->v4addr
.s_addr
, pd
->ip_sum
,
5036 pd
->ndaddr
.v4addr
.s_addr
, 0);
5042 case IPPROTO_ICMPV6
:
5043 if (pd
->af
!= AF_INET6
) {
5047 if (pd
->af
!= pd
->naf
||
5048 PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
5049 pf_change_addr(saddr
,
5050 &pd
->hdr
.icmp6
->icmp6_cksum
,
5051 &pd
->naddr
, 0, pd
->af
, pd
->naf
);
5054 if (pd
->af
!= pd
->naf
||
5055 PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
)) {
5056 pf_change_addr(daddr
,
5057 &pd
->hdr
.icmp6
->icmp6_cksum
,
5058 &pd
->ndaddr
, 0, pd
->af
, pd
->naf
);
5061 if (pd
->af
!= pd
->naf
) {
5062 if (pf_translate_icmp_af(AF_INET
,
5066 pd
->proto
= IPPROTO_ICMP
;
5072 if ((direction
== PF_IN
) &&
5073 (pd
->proto_variant
== PF_GRE_PPTP_VARIANT
)) {
5074 grev1
->call_id
= nxport
.call_id
;
5080 if (PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
5081 pf_change_a(&saddr
->v4addr
.s_addr
,
5083 pd
->naddr
.v4addr
.s_addr
, 0);
5085 if (PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
)) {
5086 pf_change_a(&daddr
->v4addr
.s_addr
,
5088 pd
->ndaddr
.v4addr
.s_addr
, 0);
5094 if (PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
5095 PF_ACPY(saddr
, &pd
->naddr
, AF_INET6
);
5097 if (PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
)) {
5098 PF_ACPY(daddr
, &pd
->ndaddr
, AF_INET6
);
5106 if (direction
== PF_OUT
) {
5113 if (PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
5114 pf_change_a(&saddr
->v4addr
.s_addr
,
5115 pd
->ip_sum
, pd
->naddr
.v4addr
.s_addr
, 0);
5117 if (PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
)) {
5118 pf_change_a(&daddr
->v4addr
.s_addr
,
5120 pd
->ndaddr
.v4addr
.s_addr
, 0);
5126 if (PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
5127 PF_ACPY(saddr
, &pd
->naddr
, AF_INET6
);
5129 if (PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
)) {
5130 PF_ACPY(daddr
, &pd
->ndaddr
, AF_INET6
);
5140 if ((pd
->naf
!= AF_INET
) ||
5141 (PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
))) {
5142 pf_change_addr(saddr
, pd
->ip_sum
,
5143 &pd
->naddr
, 0, af
, pd
->naf
);
5146 if ((pd
->naf
!= AF_INET
) ||
5147 (PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
))) {
5148 pf_change_addr(daddr
, pd
->ip_sum
,
5149 &pd
->ndaddr
, 0, af
, pd
->naf
);
5155 if (PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
5156 PF_ACPY(saddr
, &pd
->naddr
, af
);
5158 if (PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
)) {
5159 PF_ACPY(daddr
, &pd
->ndaddr
, af
);
5175 if (nr
&& nr
->tag
> 0) {
5181 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
) {
5182 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
5183 } else if (r
->direction
&& r
->direction
!= direction
) {
5184 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
5185 } else if (r
->af
&& r
->af
!= pd
->af
) {
5186 r
= r
->skip
[PF_SKIP_AF
].ptr
;
5187 } else if (r
->proto
&& r
->proto
!= pd
->proto
) {
5188 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
5189 } else if (PF_MISMATCHAW(&r
->src
.addr
, saddr
, pd
->af
,
5191 r
= r
->skip
[PF_SKIP_SRC_ADDR
].ptr
;
5193 /* tcp/udp only. port_op always 0 in other cases */
5194 else if (r
->proto
== pd
->proto
&&
5195 (r
->proto
== IPPROTO_TCP
|| r
->proto
== IPPROTO_UDP
) &&
5196 r
->src
.xport
.range
.op
&&
5197 !pf_match_port(r
->src
.xport
.range
.op
,
5198 r
->src
.xport
.range
.port
[0], r
->src
.xport
.range
.port
[1],
5200 r
= r
->skip
[PF_SKIP_SRC_PORT
].ptr
;
5201 } else if (PF_MISMATCHAW(&r
->dst
.addr
, daddr
, pd
->af
,
5202 r
->dst
.neg
, NULL
)) {
5203 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
5205 /* tcp/udp only. port_op always 0 in other cases */
5206 else if (r
->proto
== pd
->proto
&&
5207 (r
->proto
== IPPROTO_TCP
|| r
->proto
== IPPROTO_UDP
) &&
5208 r
->dst
.xport
.range
.op
&&
5209 !pf_match_port(r
->dst
.xport
.range
.op
,
5210 r
->dst
.xport
.range
.port
[0], r
->dst
.xport
.range
.port
[1],
5212 r
= r
->skip
[PF_SKIP_DST_PORT
].ptr
;
5214 /* icmp only. type always 0 in other cases */
5215 else if (r
->type
&& r
->type
!= icmptype
+ 1) {
5216 r
= TAILQ_NEXT(r
, entries
);
5218 /* icmp only. type always 0 in other cases */
5219 else if (r
->code
&& r
->code
!= icmpcode
+ 1) {
5220 r
= TAILQ_NEXT(r
, entries
);
5221 } else if ((r
->rule_flag
& PFRULE_TOS
) && r
->tos
&&
5222 !(r
->tos
& pd
->tos
)) {
5223 r
= TAILQ_NEXT(r
, entries
);
5224 } else if ((r
->rule_flag
& PFRULE_DSCP
) && r
->tos
&&
5225 !(r
->tos
& (pd
->tos
& DSCP_MASK
))) {
5226 r
= TAILQ_NEXT(r
, entries
);
5227 } else if ((r
->rule_flag
& PFRULE_SC
) && r
->tos
&&
5228 ((r
->tos
& SCIDX_MASK
) != pd
->sc
)) {
5229 r
= TAILQ_NEXT(r
, entries
);
5230 } else if (r
->rule_flag
& PFRULE_FRAGMENT
) {
5231 r
= TAILQ_NEXT(r
, entries
);
5232 } else if (pd
->proto
== IPPROTO_TCP
&&
5233 (r
->flagset
& th
->th_flags
) != r
->flags
) {
5234 r
= TAILQ_NEXT(r
, entries
);
5236 /* tcp/udp only. uid.op always 0 in other cases */
5237 else if (r
->uid
.op
&& (pd
->lookup
.done
|| ((void)(pd
->lookup
.done
=
5238 pf_socket_lookup(direction
, pd
)), 1)) &&
5239 !pf_match_uid(r
->uid
.op
, r
->uid
.uid
[0], r
->uid
.uid
[1],
5241 r
= TAILQ_NEXT(r
, entries
);
5243 /* tcp/udp only. gid.op always 0 in other cases */
5244 else if (r
->gid
.op
&& (pd
->lookup
.done
|| ((void)(pd
->lookup
.done
=
5245 pf_socket_lookup(direction
, pd
)), 1)) &&
5246 !pf_match_gid(r
->gid
.op
, r
->gid
.gid
[0], r
->gid
.gid
[1],
5248 r
= TAILQ_NEXT(r
, entries
);
5249 } else if (r
->prob
&& r
->prob
<= (RandomULong() % (UINT_MAX
- 1) + 1)) {
5250 r
= TAILQ_NEXT(r
, entries
);
5251 } else if (r
->match_tag
&& !pf_match_tag(r
, pd
->pf_mtag
, &tag
)) {
5252 r
= TAILQ_NEXT(r
, entries
);
5253 } else if (r
->os_fingerprint
!= PF_OSFP_ANY
&&
5254 (pd
->proto
!= IPPROTO_TCP
|| !pf_osfp_match(
5255 pf_osfp_fingerprint(pd
, pbuf
, off
, th
),
5256 r
->os_fingerprint
))) {
5257 r
= TAILQ_NEXT(r
, entries
);
5262 if (PF_RTABLEID_IS_VALID(r
->rtableid
)) {
5263 rtableid
= r
->rtableid
;
5265 if (r
->anchor
== NULL
) {
5273 r
= TAILQ_NEXT(r
, entries
);
5275 pf_step_into_anchor(&asd
, &ruleset
,
5276 PF_RULESET_FILTER
, &r
, &a
, &match
);
5279 if (r
== NULL
&& pf_step_out_of_anchor(&asd
, &ruleset
,
5280 PF_RULESET_FILTER
, &r
, &a
, &match
)) {
5288 REASON_SET(&reason
, PFRES_MATCH
);
5290 if (r
->log
|| (nr
!= NULL
&& nr
->log
)) {
5292 if (rewrite
< off
+ hdrlen
) {
5293 rewrite
= off
+ hdrlen
;
5296 if (pf_lazy_makewritable(pd
, pbuf
, rewrite
) == NULL
) {
5297 REASON_SET(&reason
, PFRES_MEMORY
);
5301 pbuf_copy_back(pbuf
, off
, hdrlen
, pd
->hdr
.any
);
5303 PFLOG_PACKET(kif
, h
, pbuf
, pd
->af
, direction
, reason
,
5304 r
->log
? r
: nr
, a
, ruleset
, pd
);
5307 if ((r
->action
== PF_DROP
) &&
5308 ((r
->rule_flag
& PFRULE_RETURNRST
) ||
5309 (r
->rule_flag
& PFRULE_RETURNICMP
) ||
5310 (r
->rule_flag
& PFRULE_RETURN
))) {
5311 /* undo NAT changes, if they have taken place */
5312 /* XXX For NAT64 we are not reverting the changes */
5313 if (nr
!= NULL
&& nr
->action
!= PF_NAT64
) {
5314 if (direction
== PF_OUT
) {
5316 switch (pd
->proto
) {
5318 pf_change_ap(direction
, pd
->mp
, saddr
,
5319 &th
->th_sport
, pd
->ip_sum
,
5320 &th
->th_sum
, &pd
->baddr
,
5321 bxport
.port
, 0, af
, pd
->af
, 1);
5322 sxport
.port
= th
->th_sport
;
5326 pf_change_ap(direction
, pd
->mp
, saddr
,
5327 &pd
->hdr
.udp
->uh_sport
, pd
->ip_sum
,
5328 &pd
->hdr
.udp
->uh_sum
, &pd
->baddr
,
5329 bxport
.port
, 1, af
, pd
->af
, 1);
5330 sxport
.port
= pd
->hdr
.udp
->uh_sport
;
5335 case IPPROTO_ICMPV6
:
5340 PF_ACPY(&pd
->baddr
, saddr
, af
);
5345 pf_change_a(&saddr
->v4addr
.s_addr
,
5347 pd
->baddr
.v4addr
.s_addr
, 0);
5352 PF_ACPY(saddr
, &pd
->baddr
,
5359 PF_ACPY(&pd
->baddr
, saddr
, af
);
5363 pf_change_a(&saddr
->v4addr
.s_addr
,
5365 pd
->baddr
.v4addr
.s_addr
, 0);
5370 PF_ACPY(saddr
, &pd
->baddr
,
5379 pf_change_a(&saddr
->v4addr
.s_addr
,
5381 pd
->baddr
.v4addr
.s_addr
, 0);
5384 PF_ACPY(saddr
, &pd
->baddr
, af
);
5389 switch (pd
->proto
) {
5391 pf_change_ap(direction
, pd
->mp
, daddr
,
5392 &th
->th_dport
, pd
->ip_sum
,
5393 &th
->th_sum
, &pd
->bdaddr
,
5394 bdxport
.port
, 0, af
, pd
->af
, 1);
5395 dxport
.port
= th
->th_dport
;
5399 pf_change_ap(direction
, pd
->mp
, daddr
,
5400 &pd
->hdr
.udp
->uh_dport
, pd
->ip_sum
,
5401 &pd
->hdr
.udp
->uh_sum
, &pd
->bdaddr
,
5402 bdxport
.port
, 1, af
, pd
->af
, 1);
5403 dxport
.port
= pd
->hdr
.udp
->uh_dport
;
5408 case IPPROTO_ICMPV6
:
5413 if (pd
->proto_variant
==
5414 PF_GRE_PPTP_VARIANT
) {
5422 pf_change_a(&daddr
->v4addr
.s_addr
,
5424 pd
->bdaddr
.v4addr
.s_addr
, 0);
5429 PF_ACPY(daddr
, &pd
->bdaddr
,
5439 pf_change_a(&daddr
->v4addr
.s_addr
,
5441 pd
->bdaddr
.v4addr
.s_addr
, 0);
5446 PF_ACPY(daddr
, &pd
->bdaddr
,
5455 pf_change_a(&daddr
->v4addr
.s_addr
,
5457 pd
->bdaddr
.v4addr
.s_addr
, 0);
5461 PF_ACPY(daddr
, &pd
->bdaddr
, af
);
5468 if (pd
->proto
== IPPROTO_TCP
&&
5469 ((r
->rule_flag
& PFRULE_RETURNRST
) ||
5470 (r
->rule_flag
& PFRULE_RETURN
)) &&
5471 !(th
->th_flags
& TH_RST
)) {
5472 u_int32_t ack
= ntohl(th
->th_seq
) + pd
->p_len
;
5482 len
= ntohs(h4
->ip_len
) - off
;
5487 len
= ntohs(h6
->ip6_plen
) -
5488 (off
- sizeof(*h6
));
5493 if (pf_check_proto_cksum(pbuf
, off
, len
, IPPROTO_TCP
,
5495 REASON_SET(&reason
, PFRES_PROTCKSUM
);
5497 if (th
->th_flags
& TH_SYN
) {
5500 if (th
->th_flags
& TH_FIN
) {
5503 pf_send_tcp(r
, pd
->af
, pd
->dst
,
5504 pd
->src
, th
->th_dport
, th
->th_sport
,
5505 ntohl(th
->th_ack
), ack
, TH_RST
| TH_ACK
, 0, 0,
5506 r
->return_ttl
, 1, 0, pd
->eh
, kif
->pfik_ifp
);
5508 } else if (pd
->proto
!= IPPROTO_ICMP
&& pd
->af
== AF_INET
&&
5509 pd
->proto
!= IPPROTO_ESP
&& pd
->proto
!= IPPROTO_AH
&&
5511 pf_send_icmp(pbuf
, r
->return_icmp
>> 8,
5512 r
->return_icmp
& 255, pd
->af
, r
);
5513 } else if (pd
->proto
!= IPPROTO_ICMPV6
&& af
== AF_INET6
&&
5514 pd
->proto
!= IPPROTO_ESP
&& pd
->proto
!= IPPROTO_AH
&&
5516 pf_send_icmp(pbuf
, r
->return_icmp6
>> 8,
5517 r
->return_icmp6
& 255, pd
->af
, r
);
5521 if (r
->action
== PF_DROP
) {
5525 /* prepare state key, for flowhash and/or the state (if created) */
5526 bzero(&psk
, sizeof(psk
));
5527 psk
.proto
= pd
->proto
;
5528 psk
.direction
= direction
;
5529 if (pd
->proto
== IPPROTO_UDP
) {
5530 if (ntohs(pd
->hdr
.udp
->uh_sport
) == PF_IKE_PORT
&&
5531 ntohs(pd
->hdr
.udp
->uh_dport
) == PF_IKE_PORT
) {
5532 psk
.proto_variant
= PF_EXTFILTER_APD
;
5534 psk
.proto_variant
= nr
? nr
->extfilter
: r
->extfilter
;
5535 if (psk
.proto_variant
< PF_EXTFILTER_APD
) {
5536 psk
.proto_variant
= PF_EXTFILTER_APD
;
5539 } else if (pd
->proto
== IPPROTO_GRE
) {
5540 psk
.proto_variant
= pd
->proto_variant
;
5542 if (direction
== PF_OUT
) {
5544 PF_ACPY(&psk
.gwy
.addr
, saddr
, af
);
5545 PF_ACPY(&psk
.ext_gwy
.addr
, daddr
, af
);
5546 switch (pd
->proto
) {
5548 psk
.gwy
.xport
.spi
= 0;
5549 psk
.ext_gwy
.xport
.spi
= pd
->hdr
.esp
->spi
;
5553 case IPPROTO_ICMPV6
:
5556 * NAT64 requires protocol translation between ICMPv4
5557 * and ICMPv6. TCP and UDP do not require protocol
5558 * translation. To avoid adding complexity just to
5559 * handle ICMP(v4addr/v6addr), we always lookup for
5560 * proto = IPPROTO_ICMP on both LAN and WAN side
5562 psk
.proto
= IPPROTO_ICMP
;
5563 psk
.gwy
.xport
.port
= nxport
.port
;
5564 psk
.ext_gwy
.xport
.spi
= 0;
5567 psk
.gwy
.xport
= sxport
;
5568 psk
.ext_gwy
.xport
= dxport
;
5573 PF_ACPY(&psk
.lan
.addr
, &pd
->baddr
, af
);
5574 psk
.lan
.xport
= bxport
;
5575 PF_ACPY(&psk
.ext_lan
.addr
, &pd
->bdaddr
, af
);
5576 psk
.ext_lan
.xport
= bdxport
;
5578 PF_ACPY(&psk
.lan
.addr
, &psk
.gwy
.addr
, af
);
5579 psk
.lan
.xport
= psk
.gwy
.xport
;
5580 PF_ACPY(&psk
.ext_lan
.addr
, &psk
.ext_gwy
.addr
, af
);
5581 psk
.ext_lan
.xport
= psk
.ext_gwy
.xport
;
5585 if (nr
&& nr
->action
== PF_NAT64
) {
5586 PF_ACPY(&psk
.lan
.addr
, &pd
->baddr
, af
);
5587 PF_ACPY(&psk
.ext_lan
.addr
, &pd
->bdaddr
, af
);
5589 PF_ACPY(&psk
.lan
.addr
, daddr
, af
);
5590 PF_ACPY(&psk
.ext_lan
.addr
, saddr
, af
);
5592 switch (pd
->proto
) {
5595 case IPPROTO_ICMPV6
:
5598 * NAT64 requires protocol translation between ICMPv4
5599 * and ICMPv6. TCP and UDP do not require protocol
5600 * translation. To avoid adding complexity just to
5601 * handle ICMP(v4addr/v6addr), we always lookup for
5602 * proto = IPPROTO_ICMP on both LAN and WAN side
5604 psk
.proto
= IPPROTO_ICMP
;
5605 if (nr
&& nr
->action
== PF_NAT64
) {
5606 psk
.lan
.xport
= bxport
;
5607 psk
.ext_lan
.xport
= bxport
;
5609 psk
.lan
.xport
= nxport
;
5610 psk
.ext_lan
.xport
.spi
= 0;
5614 psk
.ext_lan
.xport
.spi
= 0;
5615 psk
.lan
.xport
.spi
= pd
->hdr
.esp
->spi
;
5619 if (nr
->action
== PF_NAT64
) {
5620 psk
.lan
.xport
= bxport
;
5621 psk
.ext_lan
.xport
= bdxport
;
5623 psk
.lan
.xport
= dxport
;
5624 psk
.ext_lan
.xport
= sxport
;
5627 psk
.lan
.xport
= dxport
;
5628 psk
.ext_lan
.xport
= sxport
;
5632 psk
.af_gwy
= pd
->naf
;
5634 if (nr
->action
== PF_NAT64
) {
5635 PF_ACPY(&psk
.gwy
.addr
, &pd
->naddr
, pd
->naf
);
5636 PF_ACPY(&psk
.ext_gwy
.addr
, &pd
->ndaddr
,
5638 if ((pd
->proto
== IPPROTO_ICMPV6
) ||
5639 (pd
->proto
== IPPROTO_ICMP
)) {
5640 psk
.gwy
.xport
= nxport
;
5641 psk
.ext_gwy
.xport
= nxport
;
5643 psk
.gwy
.xport
= sxport
;
5644 psk
.ext_gwy
.xport
= dxport
;
5647 PF_ACPY(&psk
.gwy
.addr
, &pd
->bdaddr
, af
);
5648 psk
.gwy
.xport
= bdxport
;
5649 PF_ACPY(&psk
.ext_gwy
.addr
, saddr
, af
);
5650 psk
.ext_gwy
.xport
= sxport
;
5653 PF_ACPY(&psk
.gwy
.addr
, &psk
.lan
.addr
, af
);
5654 psk
.gwy
.xport
= psk
.lan
.xport
;
5655 PF_ACPY(&psk
.ext_gwy
.addr
, &psk
.ext_lan
.addr
, af
);
5656 psk
.ext_gwy
.xport
= psk
.ext_lan
.xport
;
5659 if (pd
->pktflags
& PKTF_FLOW_ID
) {
5660 /* flow hash was already computed outside of PF */
5661 psk
.flowsrc
= pd
->flowsrc
;
5662 psk
.flowhash
= pd
->flowhash
;
5664 /* compute flow hash and store it in state key */
5665 psk
.flowsrc
= FLOWSRC_PF
;
5666 psk
.flowhash
= pf_calc_state_key_flowhash(&psk
);
5667 pd
->flowsrc
= psk
.flowsrc
;
5668 pd
->flowhash
= psk
.flowhash
;
5669 pd
->pktflags
|= PKTF_FLOW_ID
;
5670 pd
->pktflags
&= ~PKTF_FLOW_ADV
;
5673 if (pf_tag_packet(pbuf
, pd
->pf_mtag
, tag
, rtableid
, pd
)) {
5674 REASON_SET(&reason
, PFRES_MEMORY
);
5678 if (!state_icmp
&& (r
->keep_state
|| nr
!= NULL
||
5679 (pd
->flags
& PFDESC_TCP_NORM
))) {
5680 /* create new state */
5681 struct pf_state
*s
= NULL
;
5682 struct pf_state_key
*sk
= NULL
;
5683 struct pf_src_node
*sn
= NULL
;
5684 struct pf_ike_hdr ike
;
5686 if (pd
->proto
== IPPROTO_UDP
) {
5687 size_t plen
= pbuf
->pb_packet_len
- off
- sizeof(*uh
);
5689 if (ntohs(uh
->uh_sport
) == PF_IKE_PORT
&&
5690 ntohs(uh
->uh_dport
) == PF_IKE_PORT
&&
5691 plen
>= PF_IKE_PACKET_MINSIZE
) {
5692 if (plen
> PF_IKE_PACKET_MINSIZE
) {
5693 plen
= PF_IKE_PACKET_MINSIZE
;
5695 pbuf_copy_data(pbuf
, off
+ sizeof(*uh
), plen
,
5700 if (nr
!= NULL
&& pd
->proto
== IPPROTO_ESP
&&
5701 direction
== PF_OUT
) {
5702 struct pf_state_key_cmp sk0
;
5703 struct pf_state
*s0
;
5707 * This squelches state creation if the external
5708 * address matches an existing incomplete state with a
5709 * different internal address. Only one 'blocking'
5710 * partial state is allowed for each external address.
5712 memset(&sk0
, 0, sizeof(sk0
));
5713 sk0
.af_gwy
= pd
->af
;
5714 sk0
.proto
= IPPROTO_ESP
;
5715 PF_ACPY(&sk0
.gwy
.addr
, saddr
, sk0
.af_gwy
);
5716 PF_ACPY(&sk0
.ext_gwy
.addr
, daddr
, sk0
.af_gwy
);
5717 s0
= pf_find_state(kif
, &sk0
, PF_IN
);
5719 if (s0
&& PF_ANEQ(&s0
->state_key
->lan
.addr
,
5726 /* check maximums */
5727 if (r
->max_states
&& (r
->states
>= r
->max_states
)) {
5728 pf_status
.lcounters
[LCNT_STATES
]++;
5729 REASON_SET(&reason
, PFRES_MAXSTATES
);
5732 /* src node for filter rule */
5733 if ((r
->rule_flag
& PFRULE_SRCTRACK
||
5734 r
->rpool
.opts
& PF_POOL_STICKYADDR
) &&
5735 pf_insert_src_node(&sn
, r
, saddr
, af
) != 0) {
5736 REASON_SET(&reason
, PFRES_SRCLIMIT
);
5739 /* src node for translation rule */
5740 if (nr
!= NULL
&& (nr
->rpool
.opts
& PF_POOL_STICKYADDR
) &&
5741 ((direction
== PF_OUT
&&
5742 nr
->action
!= PF_RDR
&&
5743 pf_insert_src_node(&nsn
, nr
, &pd
->baddr
, af
) != 0) ||
5744 (pf_insert_src_node(&nsn
, nr
, saddr
, af
) != 0))) {
5745 REASON_SET(&reason
, PFRES_SRCLIMIT
);
5748 s
= pool_get(&pf_state_pl
, PR_WAITOK
);
5750 REASON_SET(&reason
, PFRES_MEMORY
);
5752 if (sn
!= NULL
&& sn
->states
== 0 && sn
->expire
== 0) {
5753 RB_REMOVE(pf_src_tree
, &tree_src_tracking
, sn
);
5754 pf_status
.scounters
[SCNT_SRC_NODE_REMOVALS
]++;
5755 pf_status
.src_nodes
--;
5756 pool_put(&pf_src_tree_pl
, sn
);
5758 if (nsn
!= sn
&& nsn
!= NULL
&& nsn
->states
== 0 &&
5760 RB_REMOVE(pf_src_tree
, &tree_src_tracking
, nsn
);
5761 pf_status
.scounters
[SCNT_SRC_NODE_REMOVALS
]++;
5762 pf_status
.src_nodes
--;
5763 pool_put(&pf_src_tree_pl
, nsn
);
5766 if (sk
->app_state
) {
5767 pool_put(&pf_app_state_pl
,
5770 pool_put(&pf_state_key_pl
, sk
);
5774 bzero(s
, sizeof(*s
));
5775 TAILQ_INIT(&s
->unlink_hooks
);
5777 s
->nat_rule
.ptr
= nr
;
5779 STATE_INC_COUNTERS(s
);
5780 s
->allow_opts
= r
->allow_opts
;
5781 s
->log
= r
->log
& PF_LOG_ALL
;
5783 s
->log
|= nr
->log
& PF_LOG_ALL
;
5785 switch (pd
->proto
) {
5787 s
->src
.seqlo
= ntohl(th
->th_seq
);
5788 s
->src
.seqhi
= s
->src
.seqlo
+ pd
->p_len
+ 1;
5789 if ((th
->th_flags
& (TH_SYN
| TH_ACK
)) ==
5790 TH_SYN
&& r
->keep_state
== PF_STATE_MODULATE
) {
5791 /* Generate sequence number modulator */
5792 if ((s
->src
.seqdiff
= pf_tcp_iss(pd
) -
5793 s
->src
.seqlo
) == 0) {
5796 pf_change_a(&th
->th_seq
, &th
->th_sum
,
5797 htonl(s
->src
.seqlo
+ s
->src
.seqdiff
), 0);
5798 rewrite
= off
+ sizeof(*th
);
5802 if (th
->th_flags
& TH_SYN
) {
5804 s
->src
.wscale
= pf_get_wscale(pbuf
, off
,
5807 s
->src
.max_win
= MAX(ntohs(th
->th_win
), 1);
5808 if (s
->src
.wscale
& PF_WSCALE_MASK
) {
5809 /* Remove scale factor from initial window */
5810 int win
= s
->src
.max_win
;
5811 win
+= 1 << (s
->src
.wscale
& PF_WSCALE_MASK
);
5812 s
->src
.max_win
= (win
- 1) >>
5813 (s
->src
.wscale
& PF_WSCALE_MASK
);
5815 if (th
->th_flags
& TH_FIN
) {
5820 s
->src
.state
= TCPS_SYN_SENT
;
5821 s
->dst
.state
= TCPS_CLOSED
;
5822 s
->timeout
= PFTM_TCP_FIRST_PACKET
;
5825 s
->src
.state
= PFUDPS_SINGLE
;
5826 s
->dst
.state
= PFUDPS_NO_TRAFFIC
;
5827 s
->timeout
= PFTM_UDP_FIRST_PACKET
;
5831 case IPPROTO_ICMPV6
:
5833 s
->timeout
= PFTM_ICMP_FIRST_PACKET
;
5836 s
->src
.state
= PFGRE1S_INITIATING
;
5837 s
->dst
.state
= PFGRE1S_NO_TRAFFIC
;
5838 s
->timeout
= PFTM_GREv1_INITIATING
;
5841 s
->src
.state
= PFESPS_INITIATING
;
5842 s
->dst
.state
= PFESPS_NO_TRAFFIC
;
5843 s
->timeout
= PFTM_ESP_FIRST_PACKET
;
5846 s
->src
.state
= PFOTHERS_SINGLE
;
5847 s
->dst
.state
= PFOTHERS_NO_TRAFFIC
;
5848 s
->timeout
= PFTM_OTHER_FIRST_PACKET
;
5851 s
->creation
= pf_time_second();
5852 s
->expire
= pf_time_second();
5856 s
->src_node
->states
++;
5857 VERIFY(s
->src_node
->states
!= 0);
5860 PF_ACPY(&nsn
->raddr
, &pd
->naddr
, af
);
5861 s
->nat_src_node
= nsn
;
5862 s
->nat_src_node
->states
++;
5863 VERIFY(s
->nat_src_node
->states
!= 0);
5865 if (pd
->proto
== IPPROTO_TCP
) {
5866 if ((pd
->flags
& PFDESC_TCP_NORM
) &&
5867 pf_normalize_tcp_init(pbuf
, off
, pd
, th
, &s
->src
,
5869 REASON_SET(&reason
, PFRES_MEMORY
);
5870 pf_src_tree_remove_state(s
);
5871 STATE_DEC_COUNTERS(s
);
5872 pool_put(&pf_state_pl
, s
);
5875 if ((pd
->flags
& PFDESC_TCP_NORM
) && s
->src
.scrub
&&
5876 pf_normalize_tcp_stateful(pbuf
, off
, pd
, &reason
,
5877 th
, s
, &s
->src
, &s
->dst
, &rewrite
)) {
5878 /* This really shouldn't happen!!! */
5879 DPFPRINTF(PF_DEBUG_URGENT
,
5880 ("pf_normalize_tcp_stateful failed on "
5882 pf_normalize_tcp_cleanup(s
);
5883 pf_src_tree_remove_state(s
);
5884 STATE_DEC_COUNTERS(s
);
5885 pool_put(&pf_state_pl
, s
);
5890 /* allocate state key and import values from psk */
5891 if ((sk
= pf_alloc_state_key(s
, &psk
)) == NULL
) {
5892 REASON_SET(&reason
, PFRES_MEMORY
);
5894 * XXXSCW: This will leak the freshly-allocated
5895 * state structure 's'. Although it should
5896 * eventually be aged-out and removed.
5901 pf_set_rt_ifp(s
, saddr
, af
); /* needs s->state_key set */
5903 pbuf
= pd
->mp
; // XXXSCW: Why?
5905 if (sk
->app_state
== 0) {
5906 switch (pd
->proto
) {
5908 u_int16_t dport
= (direction
== PF_OUT
) ?
5909 sk
->ext_gwy
.xport
.port
: sk
->gwy
.xport
.port
;
5912 ntohs(dport
) == PF_PPTP_PORT
) {
5913 struct pf_app_state
*as
;
5915 as
= pool_get(&pf_app_state_pl
,
5923 bzero(as
, sizeof(*as
));
5924 as
->handler
= pf_pptp_handler
;
5925 as
->compare_lan_ext
= 0;
5926 as
->compare_ext_gwy
= 0;
5927 as
->u
.pptp
.grev1_state
= 0;
5929 (void) hook_establish(&s
->unlink_hooks
,
5930 0, (hook_fn_t
) pf_pptp_unlink
, s
);
5937 ntohs(uh
->uh_sport
) == PF_IKE_PORT
&&
5938 ntohs(uh
->uh_dport
) == PF_IKE_PORT
) {
5939 struct pf_app_state
*as
;
5941 as
= pool_get(&pf_app_state_pl
,
5949 bzero(as
, sizeof(*as
));
5950 as
->compare_lan_ext
= pf_ike_compare
;
5951 as
->compare_ext_gwy
= pf_ike_compare
;
5952 as
->u
.ike
.cookie
= ike
.initiator_cookie
;
5963 if (pf_insert_state(BOUND_IFACE(r
, kif
), s
)) {
5964 if (pd
->proto
== IPPROTO_TCP
) {
5965 pf_normalize_tcp_cleanup(s
);
5967 REASON_SET(&reason
, PFRES_STATEINS
);
5968 pf_src_tree_remove_state(s
);
5969 STATE_DEC_COUNTERS(s
);
5970 pool_put(&pf_state_pl
, s
);
5979 if (pd
->proto
== IPPROTO_TCP
&&
5980 (th
->th_flags
& (TH_SYN
| TH_ACK
)) == TH_SYN
&&
5981 r
->keep_state
== PF_STATE_SYNPROXY
) {
5982 int ua
= (sk
->af_lan
== sk
->af_gwy
) ? 1 : 0;
5983 s
->src
.state
= PF_TCPS_PROXY_SRC
;
5985 if (direction
== PF_OUT
) {
5986 pf_change_ap(direction
, pd
->mp
, saddr
,
5987 &th
->th_sport
, pd
->ip_sum
,
5988 &th
->th_sum
, &pd
->baddr
,
5989 bxport
.port
, 0, af
, pd
->af
, ua
);
5990 sxport
.port
= th
->th_sport
;
5992 pf_change_ap(direction
, pd
->mp
, daddr
,
5993 &th
->th_dport
, pd
->ip_sum
,
5994 &th
->th_sum
, &pd
->baddr
,
5995 bxport
.port
, 0, af
, pd
->af
, ua
);
5996 sxport
.port
= th
->th_dport
;
5999 s
->src
.seqhi
= htonl(random());
6000 /* Find mss option */
6001 mss
= pf_get_mss(pbuf
, off
, th
->th_off
, af
);
6002 mss
= pf_calc_mss(saddr
, af
, mss
);
6003 mss
= pf_calc_mss(daddr
, af
, mss
);
6005 pf_send_tcp(r
, af
, daddr
, saddr
, th
->th_dport
,
6006 th
->th_sport
, s
->src
.seqhi
, ntohl(th
->th_seq
) + 1,
6007 TH_SYN
| TH_ACK
, 0, s
->src
.mss
, 0, 1, 0, NULL
, NULL
);
6008 REASON_SET(&reason
, PFRES_SYNPROXY
);
6009 return PF_SYNPROXY_DROP
;
6012 if (sk
->app_state
&& sk
->app_state
->handler
) {
6015 switch (pd
->proto
) {
6017 offx
+= th
->th_off
<< 2;
6020 offx
+= pd
->hdr
.udp
->uh_ulen
<< 2;
6023 /* ALG handlers only apply to TCP and UDP rules */
6028 sk
->app_state
->handler(s
, direction
, offx
,
6031 REASON_SET(&reason
, PFRES_MEMORY
);
6034 pbuf
= pd
->mp
; // XXXSCW: Why?
6039 /* copy back packet headers if we performed NAT operations */
6041 if (rewrite
< off
+ hdrlen
) {
6042 rewrite
= off
+ hdrlen
;
6045 if (pf_lazy_makewritable(pd
, pd
->mp
, rewrite
) == NULL
) {
6046 REASON_SET(&reason
, PFRES_MEMORY
);
6050 pbuf_copy_back(pbuf
, off
, hdrlen
, pd
->hdr
.any
);
6051 if (af
== AF_INET6
&& pd
->naf
== AF_INET
) {
6052 return pf_nat64_ipv6(pbuf
, off
, pd
);
6053 } else if (af
== AF_INET
&& pd
->naf
== AF_INET6
) {
6054 return pf_nat64_ipv4(pbuf
, off
, pd
);
6061 boolean_t is_nlc_enabled_glb
= FALSE
;
6063 static inline boolean_t
6064 pf_is_dummynet_enabled(void)
6067 if (__probable(!PF_IS_ENABLED
)) {
6071 if (__probable(!DUMMYNET_LOADED
)) {
6075 if (__probable(TAILQ_EMPTY(pf_main_ruleset
.
6076 rules
[PF_RULESET_DUMMYNET
].active
.ptr
))) {
6083 #endif /* DUMMYNET */
6088 * When pf_test_dummynet() returns PF_PASS, the rule matching parameter "rm"
6089 * remains unchanged, meaning the packet did not match a dummynet rule.
6090 * when the packet does match a dummynet rule, pf_test_dummynet() returns
6091 * PF_PASS and zero out the mbuf rule as the packet is effectively siphoned
6095 pf_test_dummynet(struct pf_rule
**rm
, int direction
, struct pfi_kif
*kif
,
6096 pbuf_t
**pbuf0
, struct pf_pdesc
*pd
, struct ip_fw_args
*fwa
)
6098 pbuf_t
*pbuf
= *pbuf0
;
6099 struct pf_rule
*am
= NULL
;
6100 struct pf_ruleset
*rsm
= NULL
;
6101 struct pf_addr
*saddr
= pd
->src
, *daddr
= pd
->dst
;
6102 sa_family_t af
= pd
->af
;
6103 struct pf_rule
*r
, *a
= NULL
;
6104 struct pf_ruleset
*ruleset
= NULL
;
6105 struct tcphdr
*th
= pd
->hdr
.tcp
;
6109 unsigned int rtableid
= IFSCOPE_NONE
;
6112 u_int8_t icmptype
= 0, icmpcode
= 0;
6113 struct ip_fw_args dnflow
;
6114 struct pf_rule
*prev_matching_rule
= fwa
? fwa
->fwa_pf_rule
: NULL
;
6115 int found_prev_rule
= (prev_matching_rule
) ? 0 : 1;
6117 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
6119 if (!pf_is_dummynet_enabled()) {
6123 bzero(&dnflow
, sizeof(dnflow
));
6127 /* Fragments don't gave protocol headers */
6128 if (!(pd
->flags
& PFDESC_IP_FRAG
)) {
6129 switch (pd
->proto
) {
6131 dnflow
.fwa_id
.flags
= pd
->hdr
.tcp
->th_flags
;
6132 dnflow
.fwa_id
.dst_port
= ntohs(pd
->hdr
.tcp
->th_dport
);
6133 dnflow
.fwa_id
.src_port
= ntohs(pd
->hdr
.tcp
->th_sport
);
6134 hdrlen
= sizeof(*th
);
6137 dnflow
.fwa_id
.dst_port
= ntohs(pd
->hdr
.udp
->uh_dport
);
6138 dnflow
.fwa_id
.src_port
= ntohs(pd
->hdr
.udp
->uh_sport
);
6139 hdrlen
= sizeof(*pd
->hdr
.udp
);
6143 if (af
!= AF_INET
) {
6146 hdrlen
= ICMP_MINLEN
;
6147 icmptype
= pd
->hdr
.icmp
->icmp_type
;
6148 icmpcode
= pd
->hdr
.icmp
->icmp_code
;
6152 case IPPROTO_ICMPV6
:
6153 if (af
!= AF_INET6
) {
6156 hdrlen
= sizeof(*pd
->hdr
.icmp6
);
6157 icmptype
= pd
->hdr
.icmp6
->icmp6_type
;
6158 icmpcode
= pd
->hdr
.icmp6
->icmp6_code
;
6162 if (pd
->proto_variant
== PF_GRE_PPTP_VARIANT
) {
6163 hdrlen
= sizeof(*pd
->hdr
.grev1
);
6167 hdrlen
= sizeof(*pd
->hdr
.esp
);
6172 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[PF_RULESET_DUMMYNET
].active
.ptr
);
6176 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
) {
6177 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
6178 } else if (r
->direction
&& r
->direction
!= direction
) {
6179 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
6180 } else if (r
->af
&& r
->af
!= af
) {
6181 r
= r
->skip
[PF_SKIP_AF
].ptr
;
6182 } else if (r
->proto
&& r
->proto
!= pd
->proto
) {
6183 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
6184 } else if (PF_MISMATCHAW(&r
->src
.addr
, saddr
, af
,
6186 r
= r
->skip
[PF_SKIP_SRC_ADDR
].ptr
;
6188 /* tcp/udp only. port_op always 0 in other cases */
6189 else if (r
->proto
== pd
->proto
&&
6190 (r
->proto
== IPPROTO_TCP
|| r
->proto
== IPPROTO_UDP
) &&
6191 ((pd
->flags
& PFDESC_IP_FRAG
) ||
6192 ((r
->src
.xport
.range
.op
&&
6193 !pf_match_port(r
->src
.xport
.range
.op
,
6194 r
->src
.xport
.range
.port
[0], r
->src
.xport
.range
.port
[1],
6196 r
= r
->skip
[PF_SKIP_SRC_PORT
].ptr
;
6197 } else if (PF_MISMATCHAW(&r
->dst
.addr
, daddr
, af
,
6198 r
->dst
.neg
, NULL
)) {
6199 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
6201 /* tcp/udp only. port_op always 0 in other cases */
6202 else if (r
->proto
== pd
->proto
&&
6203 (r
->proto
== IPPROTO_TCP
|| r
->proto
== IPPROTO_UDP
) &&
6204 r
->dst
.xport
.range
.op
&&
6205 ((pd
->flags
& PFDESC_IP_FRAG
) ||
6206 !pf_match_port(r
->dst
.xport
.range
.op
,
6207 r
->dst
.xport
.range
.port
[0], r
->dst
.xport
.range
.port
[1],
6209 r
= r
->skip
[PF_SKIP_DST_PORT
].ptr
;
6211 /* icmp only. type always 0 in other cases */
6213 ((pd
->flags
& PFDESC_IP_FRAG
) ||
6214 r
->type
!= icmptype
+ 1)) {
6215 r
= TAILQ_NEXT(r
, entries
);
6217 /* icmp only. type always 0 in other cases */
6219 ((pd
->flags
& PFDESC_IP_FRAG
) ||
6220 r
->code
!= icmpcode
+ 1)) {
6221 r
= TAILQ_NEXT(r
, entries
);
6222 } else if (r
->tos
&& !(r
->tos
== pd
->tos
)) {
6223 r
= TAILQ_NEXT(r
, entries
);
6224 } else if (r
->rule_flag
& PFRULE_FRAGMENT
) {
6225 r
= TAILQ_NEXT(r
, entries
);
6226 } else if (pd
->proto
== IPPROTO_TCP
&&
6227 ((pd
->flags
& PFDESC_IP_FRAG
) ||
6228 (r
->flagset
& th
->th_flags
) != r
->flags
)) {
6229 r
= TAILQ_NEXT(r
, entries
);
6230 } else if (r
->prob
&& r
->prob
<= (RandomULong() % (UINT_MAX
- 1) + 1)) {
6231 r
= TAILQ_NEXT(r
, entries
);
6232 } else if (r
->match_tag
&& !pf_match_tag(r
, pd
->pf_mtag
, &tag
)) {
6233 r
= TAILQ_NEXT(r
, entries
);
6236 * Need to go past the previous dummynet matching rule
6238 if (r
->anchor
== NULL
) {
6239 if (found_prev_rule
) {
6243 if (PF_RTABLEID_IS_VALID(r
->rtableid
)) {
6244 rtableid
= r
->rtableid
;
6253 } else if (r
== prev_matching_rule
) {
6254 found_prev_rule
= 1;
6256 r
= TAILQ_NEXT(r
, entries
);
6258 pf_step_into_anchor(&asd
, &ruleset
,
6259 PF_RULESET_DUMMYNET
, &r
, &a
, &match
);
6262 if (r
== NULL
&& pf_step_out_of_anchor(&asd
, &ruleset
,
6263 PF_RULESET_DUMMYNET
, &r
, &a
, &match
)) {
6275 REASON_SET(&reason
, PFRES_DUMMYNET
);
6278 PFLOG_PACKET(kif
, h
, pbuf
, af
, direction
, reason
, r
,
6282 if (r
->action
== PF_NODUMMYNET
) {
6283 int dirndx
= (direction
== PF_OUT
);
6285 r
->packets
[dirndx
]++;
6286 r
->bytes
[dirndx
] += pd
->tot_len
;
6290 if (pf_tag_packet(pbuf
, pd
->pf_mtag
, tag
, rtableid
, pd
)) {
6291 REASON_SET(&reason
, PFRES_MEMORY
);
6296 if (r
->dnpipe
&& ip_dn_io_ptr
!= NULL
) {
6298 int dirndx
= (direction
== PF_OUT
);
6300 r
->packets
[dirndx
]++;
6301 r
->bytes
[dirndx
] += pd
->tot_len
;
6303 dnflow
.fwa_cookie
= r
->dnpipe
;
6304 dnflow
.fwa_pf_rule
= r
;
6305 dnflow
.fwa_id
.proto
= pd
->proto
;
6306 dnflow
.fwa_flags
= r
->dntype
;
6309 dnflow
.fwa_id
.addr_type
= 4;
6310 dnflow
.fwa_id
.src_ip
= ntohl(saddr
->v4addr
.s_addr
);
6311 dnflow
.fwa_id
.dst_ip
= ntohl(daddr
->v4addr
.s_addr
);
6314 dnflow
.fwa_id
.addr_type
= 6;
6315 dnflow
.fwa_id
.src_ip6
= saddr
->v6addr
;
6316 dnflow
.fwa_id
.dst_ip6
= saddr
->v6addr
;
6321 dnflow
.fwa_oif
= fwa
->fwa_oif
;
6322 dnflow
.fwa_oflags
= fwa
->fwa_oflags
;
6324 * Note that fwa_ro, fwa_dst and fwa_ipoa are
6325 * actually in a union so the following does work
6326 * for both IPv4 and IPv6
6328 dnflow
.fwa_ro
= fwa
->fwa_ro
;
6329 dnflow
.fwa_dst
= fwa
->fwa_dst
;
6330 dnflow
.fwa_ipoa
= fwa
->fwa_ipoa
;
6331 dnflow
.fwa_ro6_pmtu
= fwa
->fwa_ro6_pmtu
;
6332 dnflow
.fwa_origifp
= fwa
->fwa_origifp
;
6333 dnflow
.fwa_mtu
= fwa
->fwa_mtu
;
6334 dnflow
.fwa_unfragpartlen
= fwa
->fwa_unfragpartlen
;
6335 dnflow
.fwa_exthdrs
= fwa
->fwa_exthdrs
;
6338 if (af
== AF_INET
) {
6339 struct ip
*iphdr
= pbuf
->pb_data
;
6340 NTOHS(iphdr
->ip_len
);
6341 NTOHS(iphdr
->ip_off
);
6344 * Don't need to unlock pf_lock as NET_THREAD_HELD_PF
6345 * allows for recursive behavior
6347 m
= pbuf_to_mbuf(pbuf
, TRUE
);
6350 dnflow
.fwa_cookie
, (af
== AF_INET
) ?
6351 ((direction
== PF_IN
) ? DN_TO_IP_IN
: DN_TO_IP_OUT
) :
6352 ((direction
== PF_IN
) ? DN_TO_IP6_IN
: DN_TO_IP6_OUT
),
6353 &dnflow
, DN_CLIENT_PF
);
6357 * The packet is siphoned out by dummynet so return a NULL
6358 * pbuf so the caller can still return success.
6367 #endif /* DUMMYNET */
6370 pf_test_fragment(struct pf_rule
**rm
, int direction
, struct pfi_kif
*kif
,
6371 pbuf_t
*pbuf
, void *h
, struct pf_pdesc
*pd
, struct pf_rule
**am
,
6372 struct pf_ruleset
**rsm
)
6375 struct pf_rule
*r
, *a
= NULL
;
6376 struct pf_ruleset
*ruleset
= NULL
;
6377 sa_family_t af
= pd
->af
;
6383 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[PF_RULESET_FILTER
].active
.ptr
);
6386 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
) {
6387 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
6388 } else if (r
->direction
&& r
->direction
!= direction
) {
6389 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
6390 } else if (r
->af
&& r
->af
!= af
) {
6391 r
= r
->skip
[PF_SKIP_AF
].ptr
;
6392 } else if (r
->proto
&& r
->proto
!= pd
->proto
) {
6393 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
6394 } else if (PF_MISMATCHAW(&r
->src
.addr
, pd
->src
, af
,
6396 r
= r
->skip
[PF_SKIP_SRC_ADDR
].ptr
;
6397 } else if (PF_MISMATCHAW(&r
->dst
.addr
, pd
->dst
, af
,
6398 r
->dst
.neg
, NULL
)) {
6399 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
6400 } else if ((r
->rule_flag
& PFRULE_TOS
) && r
->tos
&&
6401 !(r
->tos
& pd
->tos
)) {
6402 r
= TAILQ_NEXT(r
, entries
);
6403 } else if ((r
->rule_flag
& PFRULE_DSCP
) && r
->tos
&&
6404 !(r
->tos
& (pd
->tos
& DSCP_MASK
))) {
6405 r
= TAILQ_NEXT(r
, entries
);
6406 } else if ((r
->rule_flag
& PFRULE_SC
) && r
->tos
&&
6407 ((r
->tos
& SCIDX_MASK
) != pd
->sc
)) {
6408 r
= TAILQ_NEXT(r
, entries
);
6409 } else if (r
->os_fingerprint
!= PF_OSFP_ANY
) {
6410 r
= TAILQ_NEXT(r
, entries
);
6411 } else if (pd
->proto
== IPPROTO_UDP
&&
6412 (r
->src
.xport
.range
.op
|| r
->dst
.xport
.range
.op
)) {
6413 r
= TAILQ_NEXT(r
, entries
);
6414 } else if (pd
->proto
== IPPROTO_TCP
&&
6415 (r
->src
.xport
.range
.op
|| r
->dst
.xport
.range
.op
||
6417 r
= TAILQ_NEXT(r
, entries
);
6418 } else if ((pd
->proto
== IPPROTO_ICMP
||
6419 pd
->proto
== IPPROTO_ICMPV6
) &&
6420 (r
->type
|| r
->code
)) {
6421 r
= TAILQ_NEXT(r
, entries
);
6422 } else if (r
->prob
&& r
->prob
<= (RandomULong() % (UINT_MAX
- 1) + 1)) {
6423 r
= TAILQ_NEXT(r
, entries
);
6424 } else if (r
->match_tag
&& !pf_match_tag(r
, pd
->pf_mtag
, &tag
)) {
6425 r
= TAILQ_NEXT(r
, entries
);
6427 if (r
->anchor
== NULL
) {
6435 r
= TAILQ_NEXT(r
, entries
);
6437 pf_step_into_anchor(&asd
, &ruleset
,
6438 PF_RULESET_FILTER
, &r
, &a
, &match
);
6441 if (r
== NULL
&& pf_step_out_of_anchor(&asd
, &ruleset
,
6442 PF_RULESET_FILTER
, &r
, &a
, &match
)) {
6450 REASON_SET(&reason
, PFRES_MATCH
);
6453 PFLOG_PACKET(kif
, h
, pbuf
, af
, direction
, reason
, r
, a
, ruleset
,
6457 if (r
->action
!= PF_PASS
) {
6461 if (pf_tag_packet(pbuf
, pd
->pf_mtag
, tag
, -1, NULL
)) {
6462 REASON_SET(&reason
, PFRES_MEMORY
);
6470 pf_pptp_handler(struct pf_state
*s
, int direction
, int off
,
6471 struct pf_pdesc
*pd
, struct pfi_kif
*kif
)
6473 #pragma unused(direction)
6475 struct pf_pptp_state
*pptps
;
6476 struct pf_pptp_ctrl_msg cm
;
6478 struct pf_state
*gs
;
6480 u_int16_t
*pac_call_id
;
6481 u_int16_t
*pns_call_id
;
6482 u_int16_t
*spoof_call_id
;
6483 u_int8_t
*pac_state
;
6484 u_int8_t
*pns_state
;
6485 enum { PF_PPTP_PASS
, PF_PPTP_INSERT_GRE
, PF_PPTP_REMOVE_GRE
} op
;
6487 struct pf_state_key
*sk
;
6488 struct pf_state_key
*gsk
;
6489 struct pf_app_state
*gas
;
6492 pptps
= &sk
->app_state
->u
.pptp
;
6493 gs
= pptps
->grev1_state
;
6496 gs
->expire
= pf_time_second();
6500 plen
= min(sizeof(cm
), pbuf
->pb_packet_len
- off
);
6501 if (plen
< PF_PPTP_CTRL_MSG_MINSIZE
) {
6504 tlen
= plen
- PF_PPTP_CTRL_MSG_MINSIZE
;
6505 pbuf_copy_data(pbuf
, off
, plen
, &cm
);
6507 if (ntohl(cm
.hdr
.magic
) != PF_PPTP_MAGIC_NUMBER
) {
6510 if (ntohs(cm
.hdr
.type
) != 1) {
6514 #define TYPE_LEN_CHECK(_type, _name) \
6515 case PF_PPTP_CTRL_TYPE_##_type: \
6516 if (tlen < sizeof(struct pf_pptp_ctrl_##_name)) \
6520 switch (cm
.ctrl
.type
) {
6521 TYPE_LEN_CHECK(START_REQ
, start_req
);
6522 TYPE_LEN_CHECK(START_RPY
, start_rpy
);
6523 TYPE_LEN_CHECK(STOP_REQ
, stop_req
);
6524 TYPE_LEN_CHECK(STOP_RPY
, stop_rpy
);
6525 TYPE_LEN_CHECK(ECHO_REQ
, echo_req
);
6526 TYPE_LEN_CHECK(ECHO_RPY
, echo_rpy
);
6527 TYPE_LEN_CHECK(CALL_OUT_REQ
, call_out_req
);
6528 TYPE_LEN_CHECK(CALL_OUT_RPY
, call_out_rpy
);
6529 TYPE_LEN_CHECK(CALL_IN_1ST
, call_in_1st
);
6530 TYPE_LEN_CHECK(CALL_IN_2ND
, call_in_2nd
);
6531 TYPE_LEN_CHECK(CALL_IN_3RD
, call_in_3rd
);
6532 TYPE_LEN_CHECK(CALL_CLR
, call_clr
);
6533 TYPE_LEN_CHECK(CALL_DISC
, call_disc
);
6534 TYPE_LEN_CHECK(ERROR
, error
);
6535 TYPE_LEN_CHECK(SET_LINKINFO
, set_linkinfo
);
6539 #undef TYPE_LEN_CHECK
6542 gs
= pool_get(&pf_state_pl
, PR_WAITOK
);
6547 memcpy(gs
, s
, sizeof(*gs
));
6549 memset(&gs
->entry_id
, 0, sizeof(gs
->entry_id
));
6550 memset(&gs
->entry_list
, 0, sizeof(gs
->entry_list
));
6552 TAILQ_INIT(&gs
->unlink_hooks
);
6555 gs
->pfsync_time
= 0;
6556 gs
->packets
[0] = gs
->packets
[1] = 0;
6557 gs
->bytes
[0] = gs
->bytes
[1] = 0;
6558 gs
->timeout
= PFTM_UNLINKED
;
6559 gs
->id
= gs
->creatorid
= 0;
6560 gs
->src
.state
= gs
->dst
.state
= PFGRE1S_NO_TRAFFIC
;
6561 gs
->src
.scrub
= gs
->dst
.scrub
= 0;
6563 gas
= pool_get(&pf_app_state_pl
, PR_NOWAIT
);
6565 pool_put(&pf_state_pl
, gs
);
6569 gsk
= pf_alloc_state_key(gs
, NULL
);
6571 pool_put(&pf_app_state_pl
, gas
);
6572 pool_put(&pf_state_pl
, gs
);
6576 memcpy(&gsk
->lan
, &sk
->lan
, sizeof(gsk
->lan
));
6577 memcpy(&gsk
->gwy
, &sk
->gwy
, sizeof(gsk
->gwy
));
6578 memcpy(&gsk
->ext_lan
, &sk
->ext_lan
, sizeof(gsk
->ext_lan
));
6579 memcpy(&gsk
->ext_gwy
, &sk
->ext_gwy
, sizeof(gsk
->ext_gwy
));
6580 gsk
->af_lan
= sk
->af_lan
;
6581 gsk
->af_gwy
= sk
->af_gwy
;
6582 gsk
->proto
= IPPROTO_GRE
;
6583 gsk
->proto_variant
= PF_GRE_PPTP_VARIANT
;
6584 gsk
->app_state
= gas
;
6585 gsk
->lan
.xport
.call_id
= 0;
6586 gsk
->gwy
.xport
.call_id
= 0;
6587 gsk
->ext_lan
.xport
.call_id
= 0;
6588 gsk
->ext_gwy
.xport
.call_id
= 0;
6589 gsk
->flowsrc
= FLOWSRC_PF
;
6590 gsk
->flowhash
= pf_calc_state_key_flowhash(gsk
);
6591 memset(gas
, 0, sizeof(*gas
));
6592 gas
->u
.grev1
.pptp_state
= s
;
6593 STATE_INC_COUNTERS(gs
);
6594 pptps
->grev1_state
= gs
;
6595 (void) hook_establish(&gs
->unlink_hooks
, 0,
6596 (hook_fn_t
) pf_grev1_unlink
, gs
);
6598 gsk
= gs
->state_key
;
6601 switch (sk
->direction
) {
6603 pns_call_id
= &gsk
->ext_lan
.xport
.call_id
;
6604 pns_state
= &gs
->dst
.state
;
6605 pac_call_id
= &gsk
->lan
.xport
.call_id
;
6606 pac_state
= &gs
->src
.state
;
6610 pns_call_id
= &gsk
->lan
.xport
.call_id
;
6611 pns_state
= &gs
->src
.state
;
6612 pac_call_id
= &gsk
->ext_lan
.xport
.call_id
;
6613 pac_state
= &gs
->dst
.state
;
6617 DPFPRINTF(PF_DEBUG_URGENT
,
6618 ("pf_pptp_handler: bad directional!\n"));
6625 ct
= ntohs(cm
.ctrl
.type
);
6628 case PF_PPTP_CTRL_TYPE_CALL_OUT_REQ
:
6629 *pns_call_id
= cm
.msg
.call_out_req
.call_id
;
6630 *pns_state
= PFGRE1S_INITIATING
;
6631 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
) {
6632 spoof_call_id
= &cm
.msg
.call_out_req
.call_id
;
6636 case PF_PPTP_CTRL_TYPE_CALL_OUT_RPY
:
6637 *pac_call_id
= cm
.msg
.call_out_rpy
.call_id
;
6638 if (s
->nat_rule
.ptr
) {
6640 (pac_call_id
== &gsk
->lan
.xport
.call_id
) ?
6641 &cm
.msg
.call_out_rpy
.call_id
:
6642 &cm
.msg
.call_out_rpy
.peer_call_id
;
6644 if (gs
->timeout
== PFTM_UNLINKED
) {
6645 *pac_state
= PFGRE1S_INITIATING
;
6646 op
= PF_PPTP_INSERT_GRE
;
6650 case PF_PPTP_CTRL_TYPE_CALL_IN_1ST
:
6651 *pns_call_id
= cm
.msg
.call_in_1st
.call_id
;
6652 *pns_state
= PFGRE1S_INITIATING
;
6653 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
) {
6654 spoof_call_id
= &cm
.msg
.call_in_1st
.call_id
;
6658 case PF_PPTP_CTRL_TYPE_CALL_IN_2ND
:
6659 *pac_call_id
= cm
.msg
.call_in_2nd
.call_id
;
6660 *pac_state
= PFGRE1S_INITIATING
;
6661 if (s
->nat_rule
.ptr
) {
6663 (pac_call_id
== &gsk
->lan
.xport
.call_id
) ?
6664 &cm
.msg
.call_in_2nd
.call_id
:
6665 &cm
.msg
.call_in_2nd
.peer_call_id
;
6669 case PF_PPTP_CTRL_TYPE_CALL_IN_3RD
:
6670 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
) {
6671 spoof_call_id
= &cm
.msg
.call_in_3rd
.call_id
;
6673 if (cm
.msg
.call_in_3rd
.call_id
!= *pns_call_id
) {
6676 if (gs
->timeout
== PFTM_UNLINKED
) {
6677 op
= PF_PPTP_INSERT_GRE
;
6681 case PF_PPTP_CTRL_TYPE_CALL_CLR
:
6682 if (cm
.msg
.call_clr
.call_id
!= *pns_call_id
) {
6683 op
= PF_PPTP_REMOVE_GRE
;
6687 case PF_PPTP_CTRL_TYPE_CALL_DISC
:
6688 if (cm
.msg
.call_clr
.call_id
!= *pac_call_id
) {
6689 op
= PF_PPTP_REMOVE_GRE
;
6693 case PF_PPTP_CTRL_TYPE_ERROR
:
6694 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
) {
6695 spoof_call_id
= &cm
.msg
.error
.peer_call_id
;
6699 case PF_PPTP_CTRL_TYPE_SET_LINKINFO
:
6700 if (s
->nat_rule
.ptr
&& pac_call_id
== &gsk
->lan
.xport
.call_id
) {
6701 spoof_call_id
= &cm
.msg
.set_linkinfo
.peer_call_id
;
6710 if (!gsk
->gwy
.xport
.call_id
&& gsk
->lan
.xport
.call_id
) {
6711 gsk
->gwy
.xport
.call_id
= gsk
->lan
.xport
.call_id
;
6712 if (spoof_call_id
) {
6713 u_int16_t call_id
= 0;
6715 struct pf_state_key_cmp key
;
6717 key
.af_gwy
= gsk
->af_gwy
;
6718 key
.proto
= IPPROTO_GRE
;
6719 key
.proto_variant
= PF_GRE_PPTP_VARIANT
;
6720 PF_ACPY(&key
.gwy
.addr
, &gsk
->gwy
.addr
, key
.af_gwy
);
6721 PF_ACPY(&key
.ext_gwy
.addr
, &gsk
->ext_gwy
.addr
, key
.af_gwy
);
6722 key
.gwy
.xport
.call_id
= gsk
->gwy
.xport
.call_id
;
6723 key
.ext_gwy
.xport
.call_id
= gsk
->ext_gwy
.xport
.call_id
;
6725 call_id
= htonl(random());
6728 while (pf_find_state_all(&key
, PF_IN
, 0)) {
6729 call_id
= ntohs(call_id
);
6731 if (--call_id
== 0) {
6734 call_id
= htons(call_id
);
6736 key
.gwy
.xport
.call_id
= call_id
;
6739 DPFPRINTF(PF_DEBUG_URGENT
,
6740 ("pf_pptp_handler: failed to spoof "
6742 key
.gwy
.xport
.call_id
= 0;
6747 gsk
->gwy
.xport
.call_id
= call_id
;
6753 if (spoof_call_id
&& gsk
->lan
.xport
.call_id
!= gsk
->gwy
.xport
.call_id
) {
6754 if (*spoof_call_id
== gsk
->gwy
.xport
.call_id
) {
6755 *spoof_call_id
= gsk
->lan
.xport
.call_id
;
6756 th
->th_sum
= pf_cksum_fixup(th
->th_sum
,
6757 gsk
->gwy
.xport
.call_id
, gsk
->lan
.xport
.call_id
, 0);
6759 *spoof_call_id
= gsk
->gwy
.xport
.call_id
;
6760 th
->th_sum
= pf_cksum_fixup(th
->th_sum
,
6761 gsk
->lan
.xport
.call_id
, gsk
->gwy
.xport
.call_id
, 0);
6764 if (pf_lazy_makewritable(pd
, pbuf
, off
+ plen
) == NULL
) {
6765 pptps
->grev1_state
= NULL
;
6766 STATE_DEC_COUNTERS(gs
);
6767 pool_put(&pf_state_pl
, gs
);
6770 pbuf_copy_back(pbuf
, off
, plen
, &cm
);
6774 case PF_PPTP_REMOVE_GRE
:
6775 gs
->timeout
= PFTM_PURGE
;
6776 gs
->src
.state
= gs
->dst
.state
= PFGRE1S_NO_TRAFFIC
;
6777 gsk
->lan
.xport
.call_id
= 0;
6778 gsk
->gwy
.xport
.call_id
= 0;
6779 gsk
->ext_lan
.xport
.call_id
= 0;
6780 gsk
->ext_gwy
.xport
.call_id
= 0;
6781 gs
->id
= gs
->creatorid
= 0;
6784 case PF_PPTP_INSERT_GRE
:
6785 gs
->creation
= pf_time_second();
6786 gs
->expire
= pf_time_second();
6787 gs
->timeout
= PFTM_TCP_ESTABLISHED
;
6788 if (gs
->src_node
!= NULL
) {
6789 ++gs
->src_node
->states
;
6790 VERIFY(gs
->src_node
->states
!= 0);
6792 if (gs
->nat_src_node
!= NULL
) {
6793 ++gs
->nat_src_node
->states
;
6794 VERIFY(gs
->nat_src_node
->states
!= 0);
6796 pf_set_rt_ifp(gs
, &sk
->lan
.addr
, sk
->af_lan
);
6797 if (pf_insert_state(BOUND_IFACE(s
->rule
.ptr
, kif
), gs
)) {
6800 * FIX ME: insertion can fail when multiple PNS
6801 * behind the same NAT open calls to the same PAC
6802 * simultaneously because spoofed call ID numbers
6803 * are chosen before states are inserted. This is
6804 * hard to fix and happens infrequently enough that
6805 * users will normally try again and this ALG will
6806 * succeed. Failures are expected to be rare enough
6807 * that fixing this is a low priority.
6809 pptps
->grev1_state
= NULL
;
6810 pd
->lmw
= -1; /* Force PF_DROP on PFRES_MEMORY */
6811 pf_src_tree_remove_state(gs
);
6812 STATE_DEC_COUNTERS(gs
);
6813 pool_put(&pf_state_pl
, gs
);
6814 DPFPRINTF(PF_DEBUG_URGENT
, ("pf_pptp_handler: error "
6815 "inserting GREv1 state.\n"));
6825 pf_pptp_unlink(struct pf_state
*s
)
6827 struct pf_app_state
*as
= s
->state_key
->app_state
;
6828 struct pf_state
*grev1s
= as
->u
.pptp
.grev1_state
;
6831 struct pf_app_state
*gas
= grev1s
->state_key
->app_state
;
6833 if (grev1s
->timeout
< PFTM_MAX
) {
6834 grev1s
->timeout
= PFTM_PURGE
;
6836 gas
->u
.grev1
.pptp_state
= NULL
;
6837 as
->u
.pptp
.grev1_state
= NULL
;
6842 pf_grev1_unlink(struct pf_state
*s
)
6844 struct pf_app_state
*as
= s
->state_key
->app_state
;
6845 struct pf_state
*pptps
= as
->u
.grev1
.pptp_state
;
6848 struct pf_app_state
*pas
= pptps
->state_key
->app_state
;
6850 pas
->u
.pptp
.grev1_state
= NULL
;
6851 as
->u
.grev1
.pptp_state
= NULL
;
6856 pf_ike_compare(struct pf_app_state
*a
, struct pf_app_state
*b
)
6858 int64_t d
= a
->u
.ike
.cookie
- b
->u
.ike
.cookie
;
6859 return (d
> 0) ? 1 : ((d
< 0) ? -1 : 0);
6863 pf_do_nat64(struct pf_state_key
*sk
, struct pf_pdesc
*pd
, pbuf_t
*pbuf
,
6866 if (pd
->af
== AF_INET
) {
6867 if (pd
->af
!= sk
->af_lan
) {
6868 pd
->ndaddr
= sk
->lan
.addr
;
6869 pd
->naddr
= sk
->ext_lan
.addr
;
6871 pd
->naddr
= sk
->gwy
.addr
;
6872 pd
->ndaddr
= sk
->ext_gwy
.addr
;
6874 return pf_nat64_ipv4(pbuf
, off
, pd
);
6875 } else if (pd
->af
== AF_INET6
) {
6876 if (pd
->af
!= sk
->af_lan
) {
6877 pd
->ndaddr
= sk
->lan
.addr
;
6878 pd
->naddr
= sk
->ext_lan
.addr
;
6880 pd
->naddr
= sk
->gwy
.addr
;
6881 pd
->ndaddr
= sk
->ext_gwy
.addr
;
6883 return pf_nat64_ipv6(pbuf
, off
, pd
);
6889 pf_test_state_tcp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
6890 pbuf_t
*pbuf
, int off
, void *h
, struct pf_pdesc
*pd
,
6894 struct pf_state_key_cmp key
;
6895 struct tcphdr
*th
= pd
->hdr
.tcp
;
6896 u_int16_t win
= ntohs(th
->th_win
);
6897 u_int32_t ack
, end
, seq
, orig_seq
;
6901 struct pf_state_peer
*src
, *dst
;
6902 struct pf_state_key
*sk
;
6905 key
.proto
= IPPROTO_TCP
;
6906 key
.af_lan
= key
.af_gwy
= pd
->af
;
6909 * For NAT64 the first time rule search and state creation
6910 * is done on the incoming side only.
6911 * Once the state gets created, NAT64's LAN side (ipv6) will
6912 * not be able to find the state in ext-gwy tree as that normally
6913 * is intended to be looked up for incoming traffic from the
6915 * Therefore to handle NAT64 case we init keys here for both
6916 * lan-ext as well as ext-gwy trees.
6917 * In the state lookup we attempt a lookup on both trees if
6918 * first one does not return any result and return a match if
6919 * the match state's was created by NAT64 rule.
6921 PF_ACPY(&key
.ext_gwy
.addr
, pd
->src
, key
.af_gwy
);
6922 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af_gwy
);
6923 key
.ext_gwy
.xport
.port
= th
->th_sport
;
6924 key
.gwy
.xport
.port
= th
->th_dport
;
6926 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af_lan
);
6927 PF_ACPY(&key
.ext_lan
.addr
, pd
->dst
, key
.af_lan
);
6928 key
.lan
.xport
.port
= th
->th_sport
;
6929 key
.ext_lan
.xport
.port
= th
->th_dport
;
6933 sk
= (*state
)->state_key
;
6935 * In case of NAT64 the translation is first applied on the LAN
6936 * side. Therefore for stack's address family comparison
6937 * we use sk->af_lan.
6939 if ((direction
== sk
->direction
) && (pd
->af
== sk
->af_lan
)) {
6940 src
= &(*state
)->src
;
6941 dst
= &(*state
)->dst
;
6943 src
= &(*state
)->dst
;
6944 dst
= &(*state
)->src
;
6947 if (src
->state
== PF_TCPS_PROXY_SRC
) {
6948 if (direction
!= sk
->direction
) {
6949 REASON_SET(reason
, PFRES_SYNPROXY
);
6950 return PF_SYNPROXY_DROP
;
6952 if (th
->th_flags
& TH_SYN
) {
6953 if (ntohl(th
->th_seq
) != src
->seqlo
) {
6954 REASON_SET(reason
, PFRES_SYNPROXY
);
6957 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, pd
->dst
,
6958 pd
->src
, th
->th_dport
, th
->th_sport
,
6959 src
->seqhi
, ntohl(th
->th_seq
) + 1,
6960 TH_SYN
| TH_ACK
, 0, src
->mss
, 0, 1,
6962 REASON_SET(reason
, PFRES_SYNPROXY
);
6963 return PF_SYNPROXY_DROP
;
6964 } else if (!(th
->th_flags
& TH_ACK
) ||
6965 (ntohl(th
->th_ack
) != src
->seqhi
+ 1) ||
6966 (ntohl(th
->th_seq
) != src
->seqlo
+ 1)) {
6967 REASON_SET(reason
, PFRES_SYNPROXY
);
6969 } else if ((*state
)->src_node
!= NULL
&&
6970 pf_src_connlimit(state
)) {
6971 REASON_SET(reason
, PFRES_SRCLIMIT
);
6974 src
->state
= PF_TCPS_PROXY_DST
;
6977 if (src
->state
== PF_TCPS_PROXY_DST
) {
6978 struct pf_state_host
*psrc
, *pdst
;
6980 if (direction
== PF_OUT
) {
6982 pdst
= &sk
->ext_gwy
;
6984 psrc
= &sk
->ext_lan
;
6987 if (direction
== sk
->direction
) {
6988 if (((th
->th_flags
& (TH_SYN
| TH_ACK
)) != TH_ACK
) ||
6989 (ntohl(th
->th_ack
) != src
->seqhi
+ 1) ||
6990 (ntohl(th
->th_seq
) != src
->seqlo
+ 1)) {
6991 REASON_SET(reason
, PFRES_SYNPROXY
);
6994 src
->max_win
= MAX(ntohs(th
->th_win
), 1);
6995 if (dst
->seqhi
== 1) {
6996 dst
->seqhi
= htonl(random());
6998 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, &psrc
->addr
,
6999 &pdst
->addr
, psrc
->xport
.port
, pdst
->xport
.port
,
7000 dst
->seqhi
, 0, TH_SYN
, 0,
7001 src
->mss
, 0, 0, (*state
)->tag
, NULL
, NULL
);
7002 REASON_SET(reason
, PFRES_SYNPROXY
);
7003 return PF_SYNPROXY_DROP
;
7004 } else if (((th
->th_flags
& (TH_SYN
| TH_ACK
)) !=
7005 (TH_SYN
| TH_ACK
)) ||
7006 (ntohl(th
->th_ack
) != dst
->seqhi
+ 1)) {
7007 REASON_SET(reason
, PFRES_SYNPROXY
);
7010 dst
->max_win
= MAX(ntohs(th
->th_win
), 1);
7011 dst
->seqlo
= ntohl(th
->th_seq
);
7012 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, pd
->dst
,
7013 pd
->src
, th
->th_dport
, th
->th_sport
,
7014 ntohl(th
->th_ack
), ntohl(th
->th_seq
) + 1,
7015 TH_ACK
, src
->max_win
, 0, 0, 0,
7016 (*state
)->tag
, NULL
, NULL
);
7017 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, &psrc
->addr
,
7018 &pdst
->addr
, psrc
->xport
.port
, pdst
->xport
.port
,
7019 src
->seqhi
+ 1, src
->seqlo
+ 1,
7020 TH_ACK
, dst
->max_win
, 0, 0, 1,
7022 src
->seqdiff
= dst
->seqhi
-
7024 dst
->seqdiff
= src
->seqhi
-
7026 src
->seqhi
= src
->seqlo
+
7028 dst
->seqhi
= dst
->seqlo
+
7030 src
->wscale
= dst
->wscale
= 0;
7031 src
->state
= dst
->state
=
7033 REASON_SET(reason
, PFRES_SYNPROXY
);
7034 return PF_SYNPROXY_DROP
;
7038 if (((th
->th_flags
& (TH_SYN
| TH_ACK
)) == TH_SYN
) &&
7039 dst
->state
>= TCPS_FIN_WAIT_2
&&
7040 src
->state
>= TCPS_FIN_WAIT_2
) {
7041 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
7042 printf("pf: state reuse ");
7043 pf_print_state(*state
);
7044 pf_print_flags(th
->th_flags
);
7047 /* XXX make sure it's the same direction ?? */
7048 src
->state
= dst
->state
= TCPS_CLOSED
;
7049 pf_unlink_state(*state
);
7054 if ((th
->th_flags
& TH_SYN
) == 0) {
7055 sws
= (src
->wscale
& PF_WSCALE_FLAG
) ?
7056 (src
->wscale
& PF_WSCALE_MASK
) : TCP_MAX_WINSHIFT
;
7057 dws
= (dst
->wscale
& PF_WSCALE_FLAG
) ?
7058 (dst
->wscale
& PF_WSCALE_MASK
) : TCP_MAX_WINSHIFT
;
7064 * Sequence tracking algorithm from Guido van Rooij's paper:
7065 * http://www.madison-gurkha.com/publications/tcp_filtering/
7069 orig_seq
= seq
= ntohl(th
->th_seq
);
7070 if (src
->seqlo
== 0) {
7071 /* First packet from this end. Set its state */
7073 if ((pd
->flags
& PFDESC_TCP_NORM
|| dst
->scrub
) &&
7074 src
->scrub
== NULL
) {
7075 if (pf_normalize_tcp_init(pbuf
, off
, pd
, th
, src
, dst
)) {
7076 REASON_SET(reason
, PFRES_MEMORY
);
7081 /* Deferred generation of sequence number modulator */
7082 if (dst
->seqdiff
&& !src
->seqdiff
) {
7083 /* use random iss for the TCP server */
7084 while ((src
->seqdiff
= random() - seq
) == 0) {
7087 ack
= ntohl(th
->th_ack
) - dst
->seqdiff
;
7088 pf_change_a(&th
->th_seq
, &th
->th_sum
, htonl(seq
+
7090 pf_change_a(&th
->th_ack
, &th
->th_sum
, htonl(ack
), 0);
7091 copyback
= off
+ sizeof(*th
);
7093 ack
= ntohl(th
->th_ack
);
7096 end
= seq
+ pd
->p_len
;
7097 if (th
->th_flags
& TH_SYN
) {
7099 if (dst
->wscale
& PF_WSCALE_FLAG
) {
7100 src
->wscale
= pf_get_wscale(pbuf
, off
,
7101 th
->th_off
, pd
->af
);
7102 if (src
->wscale
& PF_WSCALE_FLAG
) {
7104 * Remove scale factor from initial
7107 sws
= src
->wscale
& PF_WSCALE_MASK
;
7108 win
= ((u_int32_t
)win
+ (1 << sws
) - 1)
7110 dws
= dst
->wscale
& PF_WSCALE_MASK
;
7113 * Window scale negotiation has failed,
7114 * therefore we must restore the window
7115 * scale in the state record that we
7116 * optimistically removed in
7117 * pf_test_rule(). Care is required to
7118 * prevent arithmetic overflow from
7119 * zeroing the window when it's
7120 * truncated down to 16-bits.
7122 u_int32_t max_win
= dst
->max_win
;
7124 dst
->wscale
& PF_WSCALE_MASK
;
7125 dst
->max_win
= MIN(0xffff, max_win
);
7126 /* in case of a retrans SYN|ACK */
7131 if (th
->th_flags
& TH_FIN
) {
7136 if (src
->state
< TCPS_SYN_SENT
) {
7137 src
->state
= TCPS_SYN_SENT
;
7141 * May need to slide the window (seqhi may have been set by
7142 * the crappy stack check or if we picked up the connection
7143 * after establishment)
7145 if (src
->seqhi
== 1 ||
7146 SEQ_GEQ(end
+ MAX(1, (u_int32_t
)dst
->max_win
<< dws
),
7148 src
->seqhi
= end
+ MAX(1, (u_int32_t
)dst
->max_win
<< dws
);
7150 if (win
> src
->max_win
) {
7154 ack
= ntohl(th
->th_ack
) - dst
->seqdiff
;
7156 /* Modulate sequence numbers */
7157 pf_change_a(&th
->th_seq
, &th
->th_sum
, htonl(seq
+
7159 pf_change_a(&th
->th_ack
, &th
->th_sum
, htonl(ack
), 0);
7160 copyback
= off
+ sizeof(*th
);
7162 end
= seq
+ pd
->p_len
;
7163 if (th
->th_flags
& TH_SYN
) {
7166 if (th
->th_flags
& TH_FIN
) {
7171 if ((th
->th_flags
& TH_ACK
) == 0) {
7172 /* Let it pass through the ack skew check */
7174 } else if ((ack
== 0 &&
7175 (th
->th_flags
& (TH_ACK
| TH_RST
)) == (TH_ACK
| TH_RST
)) ||
7176 /* broken tcp stacks do not set ack */
7177 (dst
->state
< TCPS_SYN_SENT
)) {
7179 * Many stacks (ours included) will set the ACK number in an
7180 * FIN|ACK if the SYN times out -- no sequence to ACK.
7186 /* Ease sequencing restrictions on no data packets */
7191 ackskew
= dst
->seqlo
- ack
;
7195 * Need to demodulate the sequence numbers in any TCP SACK options
7196 * (Selective ACK). We could optionally validate the SACK values
7197 * against the current ACK window, either forwards or backwards, but
7198 * I'm not confident that SACK has been implemented properly
7199 * everywhere. It wouldn't surprise me if several stacks accidently
7200 * SACK too far backwards of previously ACKed data. There really aren't
7201 * any security implications of bad SACKing unless the target stack
7202 * doesn't validate the option length correctly. Someone trying to
7203 * spoof into a TCP connection won't bother blindly sending SACK
7206 if (dst
->seqdiff
&& (th
->th_off
<< 2) > (int)sizeof(struct tcphdr
)) {
7207 copyback
= pf_modulate_sack(pbuf
, off
, pd
, th
, dst
);
7208 if (copyback
== -1) {
7209 REASON_SET(reason
, PFRES_MEMORY
);
7213 pbuf
= pd
->mp
; // XXXSCW: Why?
7217 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
7218 if (SEQ_GEQ(src
->seqhi
, end
) &&
7219 /* Last octet inside other's window space */
7220 SEQ_GEQ(seq
, src
->seqlo
- ((u_int32_t
)dst
->max_win
<< dws
)) &&
7221 /* Retrans: not more than one window back */
7222 (ackskew
>= -MAXACKWINDOW
) &&
7223 /* Acking not more than one reassembled fragment backwards */
7224 (ackskew
<= (MAXACKWINDOW
<< sws
)) &&
7225 /* Acking not more than one window forward */
7226 ((th
->th_flags
& TH_RST
) == 0 || orig_seq
== src
->seqlo
||
7227 (orig_seq
== src
->seqlo
+ 1) || (orig_seq
+ 1 == src
->seqlo
) ||
7228 (pd
->flags
& PFDESC_IP_REAS
) == 0)) {
7229 /* Require an exact/+1 sequence match on resets when possible */
7231 if (dst
->scrub
|| src
->scrub
) {
7232 if (pf_normalize_tcp_stateful(pbuf
, off
, pd
, reason
, th
,
7233 *state
, src
, dst
, ©back
)) {
7237 pbuf
= pd
->mp
; // XXXSCW: Why?
7240 /* update max window */
7241 if (src
->max_win
< win
) {
7244 /* synchronize sequencing */
7245 if (SEQ_GT(end
, src
->seqlo
)) {
7248 /* slide the window of what the other end can send */
7249 if (SEQ_GEQ(ack
+ ((u_int32_t
)win
<< sws
), dst
->seqhi
)) {
7250 dst
->seqhi
= ack
+ MAX(((u_int32_t
)win
<< sws
), 1);
7254 if (th
->th_flags
& TH_SYN
) {
7255 if (src
->state
< TCPS_SYN_SENT
) {
7256 src
->state
= TCPS_SYN_SENT
;
7259 if (th
->th_flags
& TH_FIN
) {
7260 if (src
->state
< TCPS_CLOSING
) {
7261 src
->state
= TCPS_CLOSING
;
7264 if (th
->th_flags
& TH_ACK
) {
7265 if (dst
->state
== TCPS_SYN_SENT
) {
7266 dst
->state
= TCPS_ESTABLISHED
;
7267 if (src
->state
== TCPS_ESTABLISHED
&&
7268 (*state
)->src_node
!= NULL
&&
7269 pf_src_connlimit(state
)) {
7270 REASON_SET(reason
, PFRES_SRCLIMIT
);
7273 } else if (dst
->state
== TCPS_CLOSING
) {
7274 dst
->state
= TCPS_FIN_WAIT_2
;
7277 if (th
->th_flags
& TH_RST
) {
7278 src
->state
= dst
->state
= TCPS_TIME_WAIT
;
7281 /* update expire time */
7282 (*state
)->expire
= pf_time_second();
7283 if (src
->state
>= TCPS_FIN_WAIT_2
&&
7284 dst
->state
>= TCPS_FIN_WAIT_2
) {
7285 (*state
)->timeout
= PFTM_TCP_CLOSED
;
7286 } else if (src
->state
>= TCPS_CLOSING
&&
7287 dst
->state
>= TCPS_CLOSING
) {
7288 (*state
)->timeout
= PFTM_TCP_FIN_WAIT
;
7289 } else if (src
->state
< TCPS_ESTABLISHED
||
7290 dst
->state
< TCPS_ESTABLISHED
) {
7291 (*state
)->timeout
= PFTM_TCP_OPENING
;
7292 } else if (src
->state
>= TCPS_CLOSING
||
7293 dst
->state
>= TCPS_CLOSING
) {
7294 (*state
)->timeout
= PFTM_TCP_CLOSING
;
7296 (*state
)->timeout
= PFTM_TCP_ESTABLISHED
;
7299 /* Fall through to PASS packet */
7300 } else if ((dst
->state
< TCPS_SYN_SENT
||
7301 dst
->state
>= TCPS_FIN_WAIT_2
|| src
->state
>= TCPS_FIN_WAIT_2
) &&
7302 SEQ_GEQ(src
->seqhi
+ MAXACKWINDOW
, end
) &&
7303 /* Within a window forward of the originating packet */
7304 SEQ_GEQ(seq
, src
->seqlo
- MAXACKWINDOW
)) {
7305 /* Within a window backward of the originating packet */
7308 * This currently handles three situations:
7309 * 1) Stupid stacks will shotgun SYNs before their peer
7311 * 2) When PF catches an already established stream (the
7312 * firewall rebooted, the state table was flushed, routes
7314 * 3) Packets get funky immediately after the connection
7315 * closes (this should catch Solaris spurious ACK|FINs
7316 * that web servers like to spew after a close)
7318 * This must be a little more careful than the above code
7319 * since packet floods will also be caught here. We don't
7320 * update the TTL here to mitigate the damage of a packet
7321 * flood and so the same code can handle awkward establishment
7322 * and a loosened connection close.
7323 * In the establishment case, a correct peer response will
7324 * validate the connection, go through the normal state code
7325 * and keep updating the state TTL.
7328 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
7329 printf("pf: loose state match: ");
7330 pf_print_state(*state
);
7331 pf_print_flags(th
->th_flags
);
7332 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
7333 "pkts=%llu:%llu dir=%s,%s\n", seq
, orig_seq
, ack
,
7334 pd
->p_len
, ackskew
, (*state
)->packets
[0],
7335 (*state
)->packets
[1],
7336 direction
== PF_IN
? "in" : "out",
7337 direction
== sk
->direction
?
7341 if (dst
->scrub
|| src
->scrub
) {
7342 if (pf_normalize_tcp_stateful(pbuf
, off
, pd
, reason
, th
,
7343 *state
, src
, dst
, ©back
)) {
7346 pbuf
= pd
->mp
; // XXXSCW: Why?
7349 /* update max window */
7350 if (src
->max_win
< win
) {
7353 /* synchronize sequencing */
7354 if (SEQ_GT(end
, src
->seqlo
)) {
7357 /* slide the window of what the other end can send */
7358 if (SEQ_GEQ(ack
+ ((u_int32_t
)win
<< sws
), dst
->seqhi
)) {
7359 dst
->seqhi
= ack
+ MAX(((u_int32_t
)win
<< sws
), 1);
7363 * Cannot set dst->seqhi here since this could be a shotgunned
7364 * SYN and not an already established connection.
7367 if (th
->th_flags
& TH_FIN
) {
7368 if (src
->state
< TCPS_CLOSING
) {
7369 src
->state
= TCPS_CLOSING
;
7372 if (th
->th_flags
& TH_RST
) {
7373 src
->state
= dst
->state
= TCPS_TIME_WAIT
;
7376 /* Fall through to PASS packet */
7378 if (dst
->state
== TCPS_SYN_SENT
&&
7379 src
->state
== TCPS_SYN_SENT
) {
7380 /* Send RST for state mismatches during handshake */
7381 if (!(th
->th_flags
& TH_RST
)) {
7382 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
,
7383 pd
->dst
, pd
->src
, th
->th_dport
,
7384 th
->th_sport
, ntohl(th
->th_ack
), 0,
7386 (*state
)->rule
.ptr
->return_ttl
, 1, 0,
7387 pd
->eh
, kif
->pfik_ifp
);
7392 } else if (pf_status
.debug
>= PF_DEBUG_MISC
) {
7393 printf("pf: BAD state: ");
7394 pf_print_state(*state
);
7395 pf_print_flags(th
->th_flags
);
7396 printf("\n seq=%u (%u) ack=%u len=%u ackskew=%d "
7397 "sws=%u dws=%u pkts=%llu:%llu dir=%s,%s\n",
7398 seq
, orig_seq
, ack
, pd
->p_len
, ackskew
,
7399 (unsigned int)sws
, (unsigned int)dws
,
7400 (*state
)->packets
[0], (*state
)->packets
[1],
7401 direction
== PF_IN
? "in" : "out",
7402 direction
== sk
->direction
?
7404 printf("pf: State failure on: %c %c %c %c | %c %c\n",
7405 SEQ_GEQ(src
->seqhi
, end
) ? ' ' : '1',
7407 src
->seqlo
- ((u_int32_t
)dst
->max_win
<< dws
)) ?
7409 (ackskew
>= -MAXACKWINDOW
) ? ' ' : '3',
7410 (ackskew
<= (MAXACKWINDOW
<< sws
)) ? ' ' : '4',
7411 SEQ_GEQ(src
->seqhi
+ MAXACKWINDOW
, end
) ?' ' :'5',
7412 SEQ_GEQ(seq
, src
->seqlo
- MAXACKWINDOW
) ?' ' :'6');
7414 REASON_SET(reason
, PFRES_BADSTATE
);
7418 /* Any packets which have gotten here are to be passed */
7420 if (sk
->app_state
&&
7421 sk
->app_state
->handler
) {
7422 sk
->app_state
->handler(*state
, direction
,
7423 off
+ (th
->th_off
<< 2), pd
, kif
);
7425 REASON_SET(reason
, PFRES_MEMORY
);
7428 pbuf
= pd
->mp
; // XXXSCW: Why?
7431 /* translate source/destination address, if necessary */
7432 if (STATE_TRANSLATE(sk
)) {
7433 pd
->naf
= (pd
->af
== sk
->af_lan
) ? sk
->af_gwy
: sk
->af_lan
;
7435 if (direction
== PF_OUT
) {
7436 pf_change_ap(direction
, pd
->mp
, pd
->src
, &th
->th_sport
,
7437 pd
->ip_sum
, &th
->th_sum
, &sk
->gwy
.addr
,
7438 sk
->gwy
.xport
.port
, 0, pd
->af
, pd
->naf
, 1);
7440 if (pd
->af
!= pd
->naf
) {
7441 if (pd
->af
== sk
->af_gwy
) {
7442 pf_change_ap(direction
, pd
->mp
, pd
->dst
,
7443 &th
->th_dport
, pd
->ip_sum
,
7444 &th
->th_sum
, &sk
->lan
.addr
,
7445 sk
->lan
.xport
.port
, 0,
7446 pd
->af
, pd
->naf
, 0);
7448 pf_change_ap(direction
, pd
->mp
, pd
->src
,
7449 &th
->th_sport
, pd
->ip_sum
,
7450 &th
->th_sum
, &sk
->ext_lan
.addr
,
7451 th
->th_sport
, 0, pd
->af
,
7454 pf_change_ap(direction
, pd
->mp
, pd
->dst
,
7455 &th
->th_dport
, pd
->ip_sum
,
7456 &th
->th_sum
, &sk
->ext_gwy
.addr
,
7457 th
->th_dport
, 0, pd
->af
,
7460 pf_change_ap(direction
, pd
->mp
, pd
->src
,
7461 &th
->th_sport
, pd
->ip_sum
,
7462 &th
->th_sum
, &sk
->gwy
.addr
,
7463 sk
->gwy
.xport
.port
, 0, pd
->af
,
7467 pf_change_ap(direction
, pd
->mp
, pd
->dst
,
7468 &th
->th_dport
, pd
->ip_sum
,
7469 &th
->th_sum
, &sk
->lan
.addr
,
7470 sk
->lan
.xport
.port
, 0, pd
->af
,
7475 copyback
= off
+ sizeof(*th
);
7479 if (pf_lazy_makewritable(pd
, pbuf
, copyback
) == NULL
) {
7480 REASON_SET(reason
, PFRES_MEMORY
);
7484 /* Copyback sequence modulation or stateful scrub changes */
7485 pbuf_copy_back(pbuf
, off
, sizeof(*th
), th
);
7487 if (sk
->af_lan
!= sk
->af_gwy
) {
7488 return pf_do_nat64(sk
, pd
, pbuf
, off
);
7495 pf_test_state_udp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
7496 pbuf_t
*pbuf
, int off
, void *h
, struct pf_pdesc
*pd
, u_short
*reason
)
7499 struct pf_state_peer
*src
, *dst
;
7500 struct pf_state_key_cmp key
;
7501 struct pf_state_key
*sk
;
7502 struct udphdr
*uh
= pd
->hdr
.udp
;
7503 struct pf_app_state as
;
7504 int action
, extfilter
;
7506 key
.proto_variant
= PF_EXTFILTER_APD
;
7508 key
.proto
= IPPROTO_UDP
;
7509 key
.af_lan
= key
.af_gwy
= pd
->af
;
7512 * For NAT64 the first time rule search and state creation
7513 * is done on the incoming side only.
7514 * Once the state gets created, NAT64's LAN side (ipv6) will
7515 * not be able to find the state in ext-gwy tree as that normally
7516 * is intended to be looked up for incoming traffic from the
7518 * Therefore to handle NAT64 case we init keys here for both
7519 * lan-ext as well as ext-gwy trees.
7520 * In the state lookup we attempt a lookup on both trees if
7521 * first one does not return any result and return a match if
7522 * the match state's was created by NAT64 rule.
7524 PF_ACPY(&key
.ext_gwy
.addr
, pd
->src
, key
.af_gwy
);
7525 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af_gwy
);
7526 key
.ext_gwy
.xport
.port
= uh
->uh_sport
;
7527 key
.gwy
.xport
.port
= uh
->uh_dport
;
7529 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af_lan
);
7530 PF_ACPY(&key
.ext_lan
.addr
, pd
->dst
, key
.af_lan
);
7531 key
.lan
.xport
.port
= uh
->uh_sport
;
7532 key
.ext_lan
.xport
.port
= uh
->uh_dport
;
7534 if (ntohs(uh
->uh_sport
) == PF_IKE_PORT
&&
7535 ntohs(uh
->uh_dport
) == PF_IKE_PORT
) {
7536 struct pf_ike_hdr ike
;
7537 size_t plen
= pbuf
->pb_packet_len
- off
- sizeof(*uh
);
7538 if (plen
< PF_IKE_PACKET_MINSIZE
) {
7539 DPFPRINTF(PF_DEBUG_MISC
,
7540 ("pf: IKE message too small.\n"));
7544 if (plen
> sizeof(ike
)) {
7547 pbuf_copy_data(pbuf
, off
+ sizeof(*uh
), plen
, &ike
);
7549 if (ike
.initiator_cookie
) {
7550 key
.app_state
= &as
;
7551 as
.compare_lan_ext
= pf_ike_compare
;
7552 as
.compare_ext_gwy
= pf_ike_compare
;
7553 as
.u
.ike
.cookie
= ike
.initiator_cookie
;
7556 * <http://tools.ietf.org/html/\
7557 * draft-ietf-ipsec-nat-t-ike-01>
7558 * Support non-standard NAT-T implementations that
7559 * push the ESP packet over the top of the IKE packet.
7560 * Do not drop packet.
7562 DPFPRINTF(PF_DEBUG_MISC
,
7563 ("pf: IKE initiator cookie = 0.\n"));
7567 *state
= pf_find_state(kif
, &key
, direction
);
7569 if (!key
.app_state
&& *state
== 0) {
7570 key
.proto_variant
= PF_EXTFILTER_AD
;
7571 *state
= pf_find_state(kif
, &key
, direction
);
7574 if (!key
.app_state
&& *state
== 0) {
7575 key
.proto_variant
= PF_EXTFILTER_EI
;
7576 *state
= pf_find_state(kif
, &key
, direction
);
7579 /* similar to STATE_LOOKUP() */
7580 if (*state
!= NULL
&& pd
!= NULL
&& !(pd
->pktflags
& PKTF_FLOW_ID
)) {
7581 pd
->flowsrc
= (*state
)->state_key
->flowsrc
;
7582 pd
->flowhash
= (*state
)->state_key
->flowhash
;
7583 if (pd
->flowhash
!= 0) {
7584 pd
->pktflags
|= PKTF_FLOW_ID
;
7585 pd
->pktflags
&= ~PKTF_FLOW_ADV
;
7589 if (pf_state_lookup_aux(state
, kif
, direction
, &action
)) {
7593 sk
= (*state
)->state_key
;
7596 * In case of NAT64 the translation is first applied on the LAN
7597 * side. Therefore for stack's address family comparison
7598 * we use sk->af_lan.
7600 if ((direction
== sk
->direction
) && (pd
->af
== sk
->af_lan
)) {
7601 src
= &(*state
)->src
;
7602 dst
= &(*state
)->dst
;
7604 src
= &(*state
)->dst
;
7605 dst
= &(*state
)->src
;
7609 if (src
->state
< PFUDPS_SINGLE
) {
7610 src
->state
= PFUDPS_SINGLE
;
7612 if (dst
->state
== PFUDPS_SINGLE
) {
7613 dst
->state
= PFUDPS_MULTIPLE
;
7616 /* update expire time */
7617 (*state
)->expire
= pf_time_second();
7618 if (src
->state
== PFUDPS_MULTIPLE
&& dst
->state
== PFUDPS_MULTIPLE
) {
7619 (*state
)->timeout
= PFTM_UDP_MULTIPLE
;
7621 (*state
)->timeout
= PFTM_UDP_SINGLE
;
7624 extfilter
= sk
->proto_variant
;
7625 if (extfilter
> PF_EXTFILTER_APD
) {
7626 if (direction
== PF_OUT
) {
7627 sk
->ext_lan
.xport
.port
= key
.ext_lan
.xport
.port
;
7628 if (extfilter
> PF_EXTFILTER_AD
) {
7629 PF_ACPY(&sk
->ext_lan
.addr
, &key
.ext_lan
.addr
,
7633 sk
->ext_gwy
.xport
.port
= key
.ext_gwy
.xport
.port
;
7634 if (extfilter
> PF_EXTFILTER_AD
) {
7635 PF_ACPY(&sk
->ext_gwy
.addr
, &key
.ext_gwy
.addr
,
7641 if (sk
->app_state
&& sk
->app_state
->handler
) {
7642 sk
->app_state
->handler(*state
, direction
, off
+ uh
->uh_ulen
,
7645 REASON_SET(reason
, PFRES_MEMORY
);
7648 pbuf
= pd
->mp
; // XXXSCW: Why?
7651 /* translate source/destination address, if necessary */
7652 if (STATE_TRANSLATE(sk
)) {
7653 if (pf_lazy_makewritable(pd
, pbuf
, off
+ sizeof(*uh
)) == NULL
) {
7654 REASON_SET(reason
, PFRES_MEMORY
);
7658 pd
->naf
= (pd
->af
== sk
->af_lan
) ? sk
->af_gwy
: sk
->af_lan
;
7660 if (direction
== PF_OUT
) {
7661 pf_change_ap(direction
, pd
->mp
, pd
->src
, &uh
->uh_sport
,
7662 pd
->ip_sum
, &uh
->uh_sum
, &sk
->gwy
.addr
,
7663 sk
->gwy
.xport
.port
, 1, pd
->af
, pd
->naf
, 1);
7665 if (pd
->af
!= pd
->naf
) {
7666 if (pd
->af
== sk
->af_gwy
) {
7667 pf_change_ap(direction
, pd
->mp
, pd
->dst
,
7668 &uh
->uh_dport
, pd
->ip_sum
,
7669 &uh
->uh_sum
, &sk
->lan
.addr
,
7670 sk
->lan
.xport
.port
, 1,
7671 pd
->af
, pd
->naf
, 0);
7673 pf_change_ap(direction
, pd
->mp
, pd
->src
,
7674 &uh
->uh_sport
, pd
->ip_sum
,
7675 &uh
->uh_sum
, &sk
->ext_lan
.addr
,
7676 uh
->uh_sport
, 1, pd
->af
,
7679 pf_change_ap(direction
, pd
->mp
, pd
->dst
,
7680 &uh
->uh_dport
, pd
->ip_sum
,
7681 &uh
->uh_sum
, &sk
->ext_gwy
.addr
,
7682 uh
->uh_dport
, 1, pd
->af
,
7685 pf_change_ap(direction
, pd
->mp
, pd
->src
,
7686 &uh
->uh_sport
, pd
->ip_sum
,
7687 &uh
->uh_sum
, &sk
->gwy
.addr
,
7688 sk
->gwy
.xport
.port
, 1, pd
->af
,
7692 pf_change_ap(direction
, pd
->mp
, pd
->dst
,
7693 &uh
->uh_dport
, pd
->ip_sum
,
7694 &uh
->uh_sum
, &sk
->lan
.addr
,
7695 sk
->lan
.xport
.port
, 1,
7696 pd
->af
, pd
->naf
, 1);
7700 pbuf_copy_back(pbuf
, off
, sizeof(*uh
), uh
);
7701 if (sk
->af_lan
!= sk
->af_gwy
) {
7702 return pf_do_nat64(sk
, pd
, pbuf
, off
);
7709 pf_test_state_icmp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
7710 pbuf_t
*pbuf
, int off
, void *h
, struct pf_pdesc
*pd
, u_short
*reason
)
7713 struct pf_addr
*saddr
= pd
->src
, *daddr
= pd
->dst
;
7714 struct in_addr srcv4_inaddr
= saddr
->v4addr
;
7715 u_int16_t icmpid
= 0, *icmpsum
= NULL
;
7716 u_int8_t icmptype
= 0;
7718 struct pf_state_key_cmp key
;
7719 struct pf_state_key
*sk
;
7721 struct pf_app_state as
;
7726 switch (pd
->proto
) {
7729 icmptype
= pd
->hdr
.icmp
->icmp_type
;
7730 icmpid
= pd
->hdr
.icmp
->icmp_id
;
7731 icmpsum
= &pd
->hdr
.icmp
->icmp_cksum
;
7733 if (ICMP_ERRORTYPE(icmptype
)) {
7739 case IPPROTO_ICMPV6
:
7740 icmptype
= pd
->hdr
.icmp6
->icmp6_type
;
7741 icmpid
= pd
->hdr
.icmp6
->icmp6_id
;
7742 icmpsum
= &pd
->hdr
.icmp6
->icmp6_cksum
;
7744 if (ICMP6_ERRORTYPE(icmptype
)) {
7753 * ICMP query/reply message not related to a TCP/UDP packet.
7754 * Search for an ICMP state.
7757 * NAT64 requires protocol translation between ICMPv4
7758 * and ICMPv6. TCP and UDP do not require protocol
7759 * translation. To avoid adding complexity just to
7760 * handle ICMP(v4addr/v6addr), we always lookup for
7761 * proto = IPPROTO_ICMP on both LAN and WAN side
7763 key
.proto
= IPPROTO_ICMP
;
7764 key
.af_lan
= key
.af_gwy
= pd
->af
;
7766 PF_ACPY(&key
.ext_gwy
.addr
, pd
->src
, key
.af_gwy
);
7767 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af_gwy
);
7768 key
.ext_gwy
.xport
.port
= 0;
7769 key
.gwy
.xport
.port
= icmpid
;
7771 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af_lan
);
7772 PF_ACPY(&key
.ext_lan
.addr
, pd
->dst
, key
.af_lan
);
7773 key
.lan
.xport
.port
= icmpid
;
7774 key
.ext_lan
.xport
.port
= 0;
7778 sk
= (*state
)->state_key
;
7779 (*state
)->expire
= pf_time_second();
7780 (*state
)->timeout
= PFTM_ICMP_ERROR_REPLY
;
7782 /* translate source/destination address, if necessary */
7783 if (STATE_TRANSLATE(sk
)) {
7784 pd
->naf
= (pd
->af
== sk
->af_lan
) ?
7785 sk
->af_gwy
: sk
->af_lan
;
7786 if (direction
== PF_OUT
) {
7790 pf_change_a(&saddr
->v4addr
.s_addr
,
7792 sk
->gwy
.addr
.v4addr
.s_addr
, 0);
7793 pd
->hdr
.icmp
->icmp_cksum
=
7795 pd
->hdr
.icmp
->icmp_cksum
, icmpid
,
7796 sk
->gwy
.xport
.port
, 0);
7797 pd
->hdr
.icmp
->icmp_id
=
7799 if (pf_lazy_makewritable(pd
, pbuf
,
7800 off
+ ICMP_MINLEN
) == NULL
) {
7803 pbuf_copy_back(pbuf
, off
, ICMP_MINLEN
,
7810 &pd
->hdr
.icmp6
->icmp6_cksum
,
7812 if (pf_lazy_makewritable(pd
, pbuf
,
7813 off
+ sizeof(struct icmp6_hdr
)) ==
7817 pbuf_copy_back(pbuf
, off
,
7818 sizeof(struct icmp6_hdr
),
7827 if (pd
->naf
!= AF_INET
) {
7828 if (pf_translate_icmp_af(
7829 AF_INET6
, pd
->hdr
.icmp
)) {
7833 pd
->proto
= IPPROTO_ICMPV6
;
7835 pf_change_a(&daddr
->v4addr
.s_addr
,
7837 sk
->lan
.addr
.v4addr
.s_addr
, 0);
7839 pd
->hdr
.icmp
->icmp_cksum
=
7841 pd
->hdr
.icmp
->icmp_cksum
,
7842 icmpid
, sk
->lan
.xport
.port
, 0);
7844 pd
->hdr
.icmp
->icmp_id
=
7848 if (pf_lazy_makewritable(pd
, pbuf
,
7849 off
+ ICMP_MINLEN
) == NULL
) {
7852 pbuf_copy_back(pbuf
, off
, ICMP_MINLEN
,
7854 if (sk
->af_lan
!= sk
->af_gwy
) {
7855 return pf_do_nat64(sk
, pd
,
7862 if (pd
->naf
!= AF_INET6
) {
7863 if (pf_translate_icmp_af(
7864 AF_INET
, pd
->hdr
.icmp6
)) {
7868 pd
->proto
= IPPROTO_ICMP
;
7871 &pd
->hdr
.icmp6
->icmp6_cksum
,
7874 if (pf_lazy_makewritable(pd
, pbuf
,
7875 off
+ sizeof(struct icmp6_hdr
)) ==
7879 pbuf_copy_back(pbuf
, off
,
7880 sizeof(struct icmp6_hdr
),
7882 if (sk
->af_lan
!= sk
->af_gwy
) {
7883 return pf_do_nat64(sk
, pd
,
7895 * ICMP error message in response to a TCP/UDP packet.
7896 * Extract the inner TCP/UDP header and search for that state.
7898 struct pf_pdesc pd2
; /* For inner (original) header */
7903 struct ip6_hdr h2_6
;
7909 memset(&pd2
, 0, sizeof(pd2
));
7915 /* offset of h2 in mbuf chain */
7916 ipoff2
= off
+ ICMP_MINLEN
;
7918 if (!pf_pull_hdr(pbuf
, ipoff2
, &h2
, sizeof(h2
),
7919 NULL
, reason
, pd2
.af
)) {
7920 DPFPRINTF(PF_DEBUG_MISC
,
7921 ("pf: ICMP error message too short "
7926 * ICMP error messages don't refer to non-first
7929 if (h2
.ip_off
& htons(IP_OFFMASK
)) {
7930 REASON_SET(reason
, PFRES_FRAG
);
7934 /* offset of protocol header that follows h2 */
7935 off2
= ipoff2
+ (h2
.ip_hl
<< 2);
7937 pd2
.off
= ipoff2
+ (h2
.ip_hl
<< 2);
7939 pd2
.proto
= h2
.ip_p
;
7940 pd2
.src
= (struct pf_addr
*)&h2
.ip_src
;
7941 pd2
.dst
= (struct pf_addr
*)&h2
.ip_dst
;
7942 pd2
.ip_sum
= &h2
.ip_sum
;
7947 ipoff2
= off
+ sizeof(struct icmp6_hdr
);
7949 if (!pf_pull_hdr(pbuf
, ipoff2
, &h2_6
, sizeof(h2_6
),
7950 NULL
, reason
, pd2
.af
)) {
7951 DPFPRINTF(PF_DEBUG_MISC
,
7952 ("pf: ICMP error message too short "
7956 pd2
.proto
= h2_6
.ip6_nxt
;
7957 pd2
.src
= (struct pf_addr
*)(uintptr_t)&h2_6
.ip6_src
;
7958 pd2
.dst
= (struct pf_addr
*)(uintptr_t)&h2_6
.ip6_dst
;
7960 off2
= ipoff2
+ sizeof(h2_6
);
7962 switch (pd2
.proto
) {
7963 case IPPROTO_FRAGMENT
:
7965 * ICMPv6 error messages for
7966 * non-first fragments
7968 REASON_SET(reason
, PFRES_FRAG
);
7971 case IPPROTO_HOPOPTS
:
7972 case IPPROTO_ROUTING
:
7973 case IPPROTO_DSTOPTS
: {
7974 /* get next header and header length */
7975 struct ip6_ext opt6
;
7977 if (!pf_pull_hdr(pbuf
, off2
, &opt6
,
7978 sizeof(opt6
), NULL
, reason
,
7980 DPFPRINTF(PF_DEBUG_MISC
,
7981 ("pf: ICMPv6 short opt\n"));
7984 if (pd2
.proto
== IPPROTO_AH
) {
7985 off2
+= (opt6
.ip6e_len
+ 2) * 4;
7987 off2
+= (opt6
.ip6e_len
+ 1) * 8;
7989 pd2
.proto
= opt6
.ip6e_nxt
;
7990 /* goto the next header */
7997 } while (!terminal
);
8004 switch (pd2
.proto
) {
8008 struct pf_state_peer
*src
, *dst
;
8013 * Only the first 8 bytes of the TCP header can be
8014 * expected. Don't access any TCP header fields after
8015 * th_seq, an ackskew test is not possible.
8017 if (!pf_pull_hdr(pbuf
, off2
, &th
, 8, NULL
, reason
,
8019 DPFPRINTF(PF_DEBUG_MISC
,
8020 ("pf: ICMP error message too short "
8025 key
.proto
= IPPROTO_TCP
;
8026 key
.af_gwy
= pd2
.af
;
8027 PF_ACPY(&key
.ext_gwy
.addr
, pd2
.dst
, key
.af_gwy
);
8028 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af_gwy
);
8029 key
.ext_gwy
.xport
.port
= th
.th_dport
;
8030 key
.gwy
.xport
.port
= th
.th_sport
;
8032 key
.af_lan
= pd2
.af
;
8033 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af_lan
);
8034 PF_ACPY(&key
.ext_lan
.addr
, pd2
.src
, key
.af_lan
);
8035 key
.lan
.xport
.port
= th
.th_dport
;
8036 key
.ext_lan
.xport
.port
= th
.th_sport
;
8040 sk
= (*state
)->state_key
;
8041 if ((direction
== sk
->direction
) &&
8042 ((sk
->af_lan
== sk
->af_gwy
) ||
8043 (pd2
.af
== sk
->af_lan
))) {
8044 src
= &(*state
)->dst
;
8045 dst
= &(*state
)->src
;
8047 src
= &(*state
)->src
;
8048 dst
= &(*state
)->dst
;
8051 if (src
->wscale
&& (dst
->wscale
& PF_WSCALE_FLAG
)) {
8052 dws
= dst
->wscale
& PF_WSCALE_MASK
;
8054 dws
= TCP_MAX_WINSHIFT
;
8057 /* Demodulate sequence number */
8058 seq
= ntohl(th
.th_seq
) - src
->seqdiff
;
8060 pf_change_a(&th
.th_seq
, icmpsum
,
8065 if (!SEQ_GEQ(src
->seqhi
, seq
) ||
8067 src
->seqlo
- ((u_int32_t
)dst
->max_win
<< dws
))) {
8068 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
8069 printf("pf: BAD ICMP %d:%d ",
8070 icmptype
, pd
->hdr
.icmp
->icmp_code
);
8071 pf_print_host(pd
->src
, 0, pd
->af
);
8073 pf_print_host(pd
->dst
, 0, pd
->af
);
8075 pf_print_state(*state
);
8076 printf(" seq=%u\n", seq
);
8078 REASON_SET(reason
, PFRES_BADSTATE
);
8082 pd
->naf
= pd2
.naf
= (pd2
.af
== sk
->af_lan
) ?
8083 sk
->af_gwy
: sk
->af_lan
;
8085 if (STATE_TRANSLATE(sk
)) {
8087 if (sk
->af_lan
!= sk
->af_gwy
) {
8088 struct pf_state_host
*saddr2
, *daddr2
;
8090 if (pd2
.naf
== sk
->af_lan
) {
8092 daddr2
= &sk
->ext_lan
;
8094 saddr2
= &sk
->ext_gwy
;
8098 /* translate ICMP message types and codes */
8099 if (pf_translate_icmp_af(pd
->naf
,
8104 if (pf_lazy_makewritable(pd
, pbuf
,
8105 off2
+ 8) == NULL
) {
8109 pbuf_copy_back(pbuf
, pd
->off
,
8110 sizeof(struct icmp6_hdr
),
8114 * translate inner ip header within the
8117 if (pf_change_icmp_af(pbuf
, ipoff2
, pd
,
8118 &pd2
, &saddr2
->addr
, &daddr2
->addr
,
8123 if (pd
->naf
== AF_INET
) {
8124 pd
->proto
= IPPROTO_ICMP
;
8126 pd
->proto
= IPPROTO_ICMPV6
;
8130 * translate inner tcp header within
8133 pf_change_ap(direction
, NULL
, pd2
.src
,
8134 &th
.th_sport
, pd2
.ip_sum
,
8135 &th
.th_sum
, &daddr2
->addr
,
8136 saddr2
->xport
.port
, 0, pd2
.af
,
8139 pf_change_ap(direction
, NULL
, pd2
.dst
,
8140 &th
.th_dport
, pd2
.ip_sum
,
8141 &th
.th_sum
, &saddr2
->addr
,
8142 daddr2
->xport
.port
, 0, pd2
.af
,
8145 pbuf_copy_back(pbuf
, pd2
.off
, 8, &th
);
8147 /* translate outer ip header */
8148 PF_ACPY(&pd
->naddr
, &daddr2
->addr
,
8150 PF_ACPY(&pd
->ndaddr
, &saddr2
->addr
,
8152 if (pd
->af
== AF_INET
) {
8153 memcpy(&pd
->naddr
.addr32
[3],
8155 sizeof(pd
->naddr
.addr32
[3]));
8156 return pf_nat64_ipv4(pbuf
, off
,
8159 return pf_nat64_ipv6(pbuf
, off
,
8163 if (direction
== PF_IN
) {
8164 pf_change_icmp(pd2
.src
, &th
.th_sport
,
8165 daddr
, &sk
->lan
.addr
,
8166 sk
->lan
.xport
.port
, NULL
,
8167 pd2
.ip_sum
, icmpsum
,
8168 pd
->ip_sum
, 0, pd2
.af
);
8170 pf_change_icmp(pd2
.dst
, &th
.th_dport
,
8171 saddr
, &sk
->gwy
.addr
,
8172 sk
->gwy
.xport
.port
, NULL
,
8173 pd2
.ip_sum
, icmpsum
,
8174 pd
->ip_sum
, 0, pd2
.af
);
8180 if (pf_lazy_makewritable(pd
, pbuf
, off2
+ 8) ==
8187 pbuf_copy_back(pbuf
, off
, ICMP_MINLEN
,
8189 pbuf_copy_back(pbuf
, ipoff2
, sizeof(h2
),
8195 pbuf_copy_back(pbuf
, off
,
8196 sizeof(struct icmp6_hdr
),
8198 pbuf_copy_back(pbuf
, ipoff2
,
8199 sizeof(h2_6
), &h2_6
);
8203 pbuf_copy_back(pbuf
, off2
, 8, &th
);
8211 if (!pf_pull_hdr(pbuf
, off2
, &uh
, sizeof(uh
),
8212 NULL
, reason
, pd2
.af
)) {
8213 DPFPRINTF(PF_DEBUG_MISC
,
8214 ("pf: ICMP error message too short "
8219 key
.af_gwy
= pd2
.af
;
8220 PF_ACPY(&key
.ext_gwy
.addr
, pd2
.dst
, key
.af_gwy
);
8221 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af_gwy
);
8222 key
.ext_gwy
.xport
.port
= uh
.uh_dport
;
8223 key
.gwy
.xport
.port
= uh
.uh_sport
;
8225 key
.af_lan
= pd2
.af
;
8226 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af_lan
);
8227 PF_ACPY(&key
.ext_lan
.addr
, pd2
.src
, key
.af_lan
);
8228 key
.lan
.xport
.port
= uh
.uh_dport
;
8229 key
.ext_lan
.xport
.port
= uh
.uh_sport
;
8231 key
.proto
= IPPROTO_UDP
;
8232 key
.proto_variant
= PF_EXTFILTER_APD
;
8235 if (ntohs(uh
.uh_sport
) == PF_IKE_PORT
&&
8236 ntohs(uh
.uh_dport
) == PF_IKE_PORT
) {
8237 struct pf_ike_hdr ike
;
8238 size_t plen
= pbuf
->pb_packet_len
- off2
-
8240 if (direction
== PF_IN
&&
8241 plen
< 8 /* PF_IKE_PACKET_MINSIZE */) {
8242 DPFPRINTF(PF_DEBUG_MISC
, ("pf: "
8243 "ICMP error, embedded IKE message "
8248 if (plen
> sizeof(ike
)) {
8251 pbuf_copy_data(pbuf
, off
+ sizeof(uh
), plen
,
8254 key
.app_state
= &as
;
8255 as
.compare_lan_ext
= pf_ike_compare
;
8256 as
.compare_ext_gwy
= pf_ike_compare
;
8257 as
.u
.ike
.cookie
= ike
.initiator_cookie
;
8260 *state
= pf_find_state(kif
, &key
, dx
);
8262 if (key
.app_state
&& *state
== 0) {
8264 *state
= pf_find_state(kif
, &key
, dx
);
8268 key
.proto_variant
= PF_EXTFILTER_AD
;
8269 *state
= pf_find_state(kif
, &key
, dx
);
8273 key
.proto_variant
= PF_EXTFILTER_EI
;
8274 *state
= pf_find_state(kif
, &key
, dx
);
8277 /* similar to STATE_LOOKUP() */
8278 if (*state
!= NULL
&& pd
!= NULL
&&
8279 !(pd
->pktflags
& PKTF_FLOW_ID
)) {
8280 pd
->flowsrc
= (*state
)->state_key
->flowsrc
;
8281 pd
->flowhash
= (*state
)->state_key
->flowhash
;
8282 if (pd
->flowhash
!= 0) {
8283 pd
->pktflags
|= PKTF_FLOW_ID
;
8284 pd
->pktflags
&= ~PKTF_FLOW_ADV
;
8288 if (pf_state_lookup_aux(state
, kif
, direction
, &action
)) {
8292 sk
= (*state
)->state_key
;
8293 pd
->naf
= pd2
.naf
= (pd2
.af
== sk
->af_lan
) ?
8294 sk
->af_gwy
: sk
->af_lan
;
8296 if (STATE_TRANSLATE(sk
)) {
8298 if (sk
->af_lan
!= sk
->af_gwy
) {
8299 struct pf_state_host
*saddr2
, *daddr2
;
8301 if (pd2
.naf
== sk
->af_lan
) {
8303 daddr2
= &sk
->ext_lan
;
8305 saddr2
= &sk
->ext_gwy
;
8309 /* translate ICMP message */
8310 if (pf_translate_icmp_af(pd
->naf
,
8314 if (pf_lazy_makewritable(pd
, pbuf
,
8315 off2
+ 8) == NULL
) {
8319 pbuf_copy_back(pbuf
, pd
->off
,
8320 sizeof(struct icmp6_hdr
),
8324 * translate inner ip header within the
8327 if (pf_change_icmp_af(pbuf
, ipoff2
, pd
,
8328 &pd2
, &saddr2
->addr
, &daddr2
->addr
,
8333 if (pd
->naf
== AF_INET
) {
8334 pd
->proto
= IPPROTO_ICMP
;
8336 pd
->proto
= IPPROTO_ICMPV6
;
8340 * translate inner udp header within
8343 pf_change_ap(direction
, NULL
, pd2
.src
,
8344 &uh
.uh_sport
, pd2
.ip_sum
,
8345 &uh
.uh_sum
, &daddr2
->addr
,
8346 saddr2
->xport
.port
, 0, pd2
.af
,
8349 pf_change_ap(direction
, NULL
, pd2
.dst
,
8350 &uh
.uh_dport
, pd2
.ip_sum
,
8351 &uh
.uh_sum
, &saddr2
->addr
,
8352 daddr2
->xport
.port
, 0, pd2
.af
,
8355 pbuf_copy_back(pbuf
, pd2
.off
,
8358 /* translate outer ip header */
8359 PF_ACPY(&pd
->naddr
, &daddr2
->addr
,
8361 PF_ACPY(&pd
->ndaddr
, &saddr2
->addr
,
8363 if (pd
->af
== AF_INET
) {
8364 memcpy(&pd
->naddr
.addr32
[3],
8366 sizeof(pd
->naddr
.addr32
[3]));
8367 return pf_nat64_ipv4(pbuf
, off
,
8370 return pf_nat64_ipv6(pbuf
, off
,
8374 if (direction
== PF_IN
) {
8375 pf_change_icmp(pd2
.src
, &uh
.uh_sport
,
8376 daddr
, &sk
->lan
.addr
,
8377 sk
->lan
.xport
.port
, &uh
.uh_sum
,
8378 pd2
.ip_sum
, icmpsum
,
8379 pd
->ip_sum
, 1, pd2
.af
);
8381 pf_change_icmp(pd2
.dst
, &uh
.uh_dport
,
8382 saddr
, &sk
->gwy
.addr
,
8383 sk
->gwy
.xport
.port
, &uh
.uh_sum
,
8384 pd2
.ip_sum
, icmpsum
,
8385 pd
->ip_sum
, 1, pd2
.af
);
8387 if (pf_lazy_makewritable(pd
, pbuf
,
8388 off2
+ sizeof(uh
)) == NULL
) {
8394 pbuf_copy_back(pbuf
, off
, ICMP_MINLEN
,
8396 pbuf_copy_back(pbuf
, ipoff2
,
8402 pbuf_copy_back(pbuf
, off
,
8403 sizeof(struct icmp6_hdr
),
8405 pbuf_copy_back(pbuf
, ipoff2
,
8406 sizeof(h2_6
), &h2_6
);
8410 pbuf_copy_back(pbuf
, off2
, sizeof(uh
), &uh
);
8416 case IPPROTO_ICMP
: {
8419 if (!pf_pull_hdr(pbuf
, off2
, &iih
, ICMP_MINLEN
,
8420 NULL
, reason
, pd2
.af
)) {
8421 DPFPRINTF(PF_DEBUG_MISC
,
8422 ("pf: ICMP error message too short i"
8427 key
.proto
= IPPROTO_ICMP
;
8428 if (direction
== PF_IN
) {
8429 key
.af_gwy
= pd2
.af
;
8430 PF_ACPY(&key
.ext_gwy
.addr
, pd2
.dst
, key
.af_gwy
);
8431 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af_gwy
);
8432 key
.ext_gwy
.xport
.port
= 0;
8433 key
.gwy
.xport
.port
= iih
.icmp_id
;
8435 key
.af_lan
= pd2
.af
;
8436 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af_lan
);
8437 PF_ACPY(&key
.ext_lan
.addr
, pd2
.src
, key
.af_lan
);
8438 key
.lan
.xport
.port
= iih
.icmp_id
;
8439 key
.ext_lan
.xport
.port
= 0;
8444 sk
= (*state
)->state_key
;
8445 if (STATE_TRANSLATE(sk
)) {
8446 if (direction
== PF_IN
) {
8447 pf_change_icmp(pd2
.src
, &iih
.icmp_id
,
8448 daddr
, &sk
->lan
.addr
,
8449 sk
->lan
.xport
.port
, NULL
,
8450 pd2
.ip_sum
, icmpsum
,
8451 pd
->ip_sum
, 0, AF_INET
);
8453 pf_change_icmp(pd2
.dst
, &iih
.icmp_id
,
8454 saddr
, &sk
->gwy
.addr
,
8455 sk
->gwy
.xport
.port
, NULL
,
8456 pd2
.ip_sum
, icmpsum
,
8457 pd
->ip_sum
, 0, AF_INET
);
8459 if (pf_lazy_makewritable(pd
, pbuf
,
8460 off2
+ ICMP_MINLEN
) == NULL
) {
8463 pbuf_copy_back(pbuf
, off
, ICMP_MINLEN
,
8465 pbuf_copy_back(pbuf
, ipoff2
, sizeof(h2
), &h2
);
8466 pbuf_copy_back(pbuf
, off2
, ICMP_MINLEN
, &iih
);
8473 case IPPROTO_ICMPV6
: {
8474 struct icmp6_hdr iih
;
8476 if (!pf_pull_hdr(pbuf
, off2
, &iih
,
8477 sizeof(struct icmp6_hdr
), NULL
, reason
, pd2
.af
)) {
8478 DPFPRINTF(PF_DEBUG_MISC
,
8479 ("pf: ICMP error message too short "
8484 key
.proto
= IPPROTO_ICMPV6
;
8485 if (direction
== PF_IN
) {
8486 key
.af_gwy
= pd2
.af
;
8487 PF_ACPY(&key
.ext_gwy
.addr
, pd2
.dst
, key
.af_gwy
);
8488 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af_gwy
);
8489 key
.ext_gwy
.xport
.port
= 0;
8490 key
.gwy
.xport
.port
= iih
.icmp6_id
;
8492 key
.af_lan
= pd2
.af
;
8493 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af_lan
);
8494 PF_ACPY(&key
.ext_lan
.addr
, pd2
.src
, key
.af_lan
);
8495 key
.lan
.xport
.port
= iih
.icmp6_id
;
8496 key
.ext_lan
.xport
.port
= 0;
8501 sk
= (*state
)->state_key
;
8502 if (STATE_TRANSLATE(sk
)) {
8503 if (direction
== PF_IN
) {
8504 pf_change_icmp(pd2
.src
, &iih
.icmp6_id
,
8505 daddr
, &sk
->lan
.addr
,
8506 sk
->lan
.xport
.port
, NULL
,
8507 pd2
.ip_sum
, icmpsum
,
8508 pd
->ip_sum
, 0, AF_INET6
);
8510 pf_change_icmp(pd2
.dst
, &iih
.icmp6_id
,
8511 saddr
, &sk
->gwy
.addr
,
8512 sk
->gwy
.xport
.port
, NULL
,
8513 pd2
.ip_sum
, icmpsum
,
8514 pd
->ip_sum
, 0, AF_INET6
);
8516 if (pf_lazy_makewritable(pd
, pbuf
, off2
+
8517 sizeof(struct icmp6_hdr
)) == NULL
) {
8520 pbuf_copy_back(pbuf
, off
,
8521 sizeof(struct icmp6_hdr
), pd
->hdr
.icmp6
);
8522 pbuf_copy_back(pbuf
, ipoff2
, sizeof(h2_6
),
8524 pbuf_copy_back(pbuf
, off2
,
8525 sizeof(struct icmp6_hdr
), &iih
);
8532 key
.proto
= pd2
.proto
;
8533 if (direction
== PF_IN
) {
8534 key
.af_gwy
= pd2
.af
;
8535 PF_ACPY(&key
.ext_gwy
.addr
, pd2
.dst
, key
.af_gwy
);
8536 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af_gwy
);
8537 key
.ext_gwy
.xport
.port
= 0;
8538 key
.gwy
.xport
.port
= 0;
8540 key
.af_lan
= pd2
.af
;
8541 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af_lan
);
8542 PF_ACPY(&key
.ext_lan
.addr
, pd2
.src
, key
.af_lan
);
8543 key
.lan
.xport
.port
= 0;
8544 key
.ext_lan
.xport
.port
= 0;
8549 sk
= (*state
)->state_key
;
8550 if (STATE_TRANSLATE(sk
)) {
8551 if (direction
== PF_IN
) {
8552 pf_change_icmp(pd2
.src
, NULL
, daddr
,
8553 &sk
->lan
.addr
, 0, NULL
,
8554 pd2
.ip_sum
, icmpsum
,
8555 pd
->ip_sum
, 0, pd2
.af
);
8557 pf_change_icmp(pd2
.dst
, NULL
, saddr
,
8558 &sk
->gwy
.addr
, 0, NULL
,
8559 pd2
.ip_sum
, icmpsum
,
8560 pd
->ip_sum
, 0, pd2
.af
);
8565 if (pf_lazy_makewritable(pd
, pbuf
,
8566 ipoff2
+ sizeof(h2
)) == NULL
) {
8571 * Xnu was missing the following...
8573 pbuf_copy_back(pbuf
, off
, ICMP_MINLEN
,
8575 pbuf_copy_back(pbuf
, ipoff2
,
8584 if (pf_lazy_makewritable(pd
, pbuf
,
8585 ipoff2
+ sizeof(h2_6
)) == NULL
) {
8588 pbuf_copy_back(pbuf
, off
,
8589 sizeof(struct icmp6_hdr
),
8591 pbuf_copy_back(pbuf
, ipoff2
,
8592 sizeof(h2_6
), &h2_6
);
8605 pf_test_state_grev1(struct pf_state
**state
, int direction
,
8606 struct pfi_kif
*kif
, int off
, struct pf_pdesc
*pd
)
8608 struct pf_state_peer
*src
;
8609 struct pf_state_peer
*dst
;
8610 struct pf_state_key_cmp key
;
8611 struct pf_grev1_hdr
*grev1
= pd
->hdr
.grev1
;
8614 key
.proto
= IPPROTO_GRE
;
8615 key
.proto_variant
= PF_GRE_PPTP_VARIANT
;
8616 if (direction
== PF_IN
) {
8617 key
.af_gwy
= pd
->af
;
8618 PF_ACPY(&key
.ext_gwy
.addr
, pd
->src
, key
.af_gwy
);
8619 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af_gwy
);
8620 key
.gwy
.xport
.call_id
= grev1
->call_id
;
8622 key
.af_lan
= pd
->af
;
8623 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af_lan
);
8624 PF_ACPY(&key
.ext_lan
.addr
, pd
->dst
, key
.af_lan
);
8625 key
.ext_lan
.xport
.call_id
= grev1
->call_id
;
8630 if (direction
== (*state
)->state_key
->direction
) {
8631 src
= &(*state
)->src
;
8632 dst
= &(*state
)->dst
;
8634 src
= &(*state
)->dst
;
8635 dst
= &(*state
)->src
;
8639 if (src
->state
< PFGRE1S_INITIATING
) {
8640 src
->state
= PFGRE1S_INITIATING
;
8643 /* update expire time */
8644 (*state
)->expire
= pf_time_second();
8645 if (src
->state
>= PFGRE1S_INITIATING
&&
8646 dst
->state
>= PFGRE1S_INITIATING
) {
8647 if ((*state
)->timeout
!= PFTM_TCP_ESTABLISHED
) {
8648 (*state
)->timeout
= PFTM_GREv1_ESTABLISHED
;
8650 src
->state
= PFGRE1S_ESTABLISHED
;
8651 dst
->state
= PFGRE1S_ESTABLISHED
;
8653 (*state
)->timeout
= PFTM_GREv1_INITIATING
;
8656 if ((*state
)->state_key
->app_state
) {
8657 (*state
)->state_key
->app_state
->u
.grev1
.pptp_state
->expire
=
8661 /* translate source/destination address, if necessary */
8662 if (STATE_GRE_TRANSLATE((*state
)->state_key
)) {
8663 if (direction
== PF_OUT
) {
8667 pf_change_a(&pd
->src
->v4addr
.s_addr
,
8669 (*state
)->state_key
->gwy
.addr
.v4addr
.s_addr
, 0);
8674 PF_ACPY(pd
->src
, &(*state
)->state_key
->gwy
.addr
,
8680 grev1
->call_id
= (*state
)->state_key
->lan
.xport
.call_id
;
8685 pf_change_a(&pd
->dst
->v4addr
.s_addr
,
8687 (*state
)->state_key
->lan
.addr
.v4addr
.s_addr
, 0);
8692 PF_ACPY(pd
->dst
, &(*state
)->state_key
->lan
.addr
,
8699 if (pf_lazy_makewritable(pd
, pd
->mp
, off
+ sizeof(*grev1
)) ==
8703 pbuf_copy_back(pd
->mp
, off
, sizeof(*grev1
), grev1
);
8710 pf_test_state_esp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
8711 int off
, struct pf_pdesc
*pd
)
8714 struct pf_state_peer
*src
;
8715 struct pf_state_peer
*dst
;
8716 struct pf_state_key_cmp key
;
8717 struct pf_esp_hdr
*esp
= pd
->hdr
.esp
;
8720 memset(&key
, 0, sizeof(key
));
8721 key
.proto
= IPPROTO_ESP
;
8722 if (direction
== PF_IN
) {
8723 key
.af_gwy
= pd
->af
;
8724 PF_ACPY(&key
.ext_gwy
.addr
, pd
->src
, key
.af_gwy
);
8725 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af_gwy
);
8726 key
.gwy
.xport
.spi
= esp
->spi
;
8728 key
.af_lan
= pd
->af
;
8729 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af_lan
);
8730 PF_ACPY(&key
.ext_lan
.addr
, pd
->dst
, key
.af_lan
);
8731 key
.ext_lan
.xport
.spi
= esp
->spi
;
8734 *state
= pf_find_state(kif
, &key
, direction
);
8741 * No matching state. Look for a blocking state. If we find
8742 * one, then use that state and move it so that it's keyed to
8743 * the SPI in the current packet.
8745 if (direction
== PF_IN
) {
8746 key
.gwy
.xport
.spi
= 0;
8748 s
= pf_find_state(kif
, &key
, direction
);
8750 struct pf_state_key
*sk
= s
->state_key
;
8752 RB_REMOVE(pf_state_tree_ext_gwy
,
8753 &pf_statetbl_ext_gwy
, sk
);
8754 sk
->lan
.xport
.spi
= sk
->gwy
.xport
.spi
=
8757 if (RB_INSERT(pf_state_tree_ext_gwy
,
8758 &pf_statetbl_ext_gwy
, sk
)) {
8759 pf_detach_state(s
, PF_DT_SKIP_EXTGWY
);
8765 key
.ext_lan
.xport
.spi
= 0;
8767 s
= pf_find_state(kif
, &key
, direction
);
8769 struct pf_state_key
*sk
= s
->state_key
;
8771 RB_REMOVE(pf_state_tree_lan_ext
,
8772 &pf_statetbl_lan_ext
, sk
);
8773 sk
->ext_lan
.xport
.spi
= esp
->spi
;
8775 if (RB_INSERT(pf_state_tree_lan_ext
,
8776 &pf_statetbl_lan_ext
, sk
)) {
8777 pf_detach_state(s
, PF_DT_SKIP_LANEXT
);
8787 if (s
->creatorid
== pf_status
.hostid
) {
8788 pfsync_delete_state(s
);
8791 s
->timeout
= PFTM_UNLINKED
;
8792 hook_runloop(&s
->unlink_hooks
,
8793 HOOK_REMOVE
| HOOK_FREE
);
8794 pf_src_tree_remove_state(s
);
8801 /* similar to STATE_LOOKUP() */
8802 if (*state
!= NULL
&& pd
!= NULL
&& !(pd
->pktflags
& PKTF_FLOW_ID
)) {
8803 pd
->flowsrc
= (*state
)->state_key
->flowsrc
;
8804 pd
->flowhash
= (*state
)->state_key
->flowhash
;
8805 if (pd
->flowhash
!= 0) {
8806 pd
->pktflags
|= PKTF_FLOW_ID
;
8807 pd
->pktflags
&= ~PKTF_FLOW_ADV
;
8811 if (pf_state_lookup_aux(state
, kif
, direction
, &action
)) {
8815 if (direction
== (*state
)->state_key
->direction
) {
8816 src
= &(*state
)->src
;
8817 dst
= &(*state
)->dst
;
8819 src
= &(*state
)->dst
;
8820 dst
= &(*state
)->src
;
8824 if (src
->state
< PFESPS_INITIATING
) {
8825 src
->state
= PFESPS_INITIATING
;
8828 /* update expire time */
8829 (*state
)->expire
= pf_time_second();
8830 if (src
->state
>= PFESPS_INITIATING
&&
8831 dst
->state
>= PFESPS_INITIATING
) {
8832 (*state
)->timeout
= PFTM_ESP_ESTABLISHED
;
8833 src
->state
= PFESPS_ESTABLISHED
;
8834 dst
->state
= PFESPS_ESTABLISHED
;
8836 (*state
)->timeout
= PFTM_ESP_INITIATING
;
8838 /* translate source/destination address, if necessary */
8839 if (STATE_ADDR_TRANSLATE((*state
)->state_key
)) {
8840 if (direction
== PF_OUT
) {
8844 pf_change_a(&pd
->src
->v4addr
.s_addr
,
8846 (*state
)->state_key
->gwy
.addr
.v4addr
.s_addr
, 0);
8851 PF_ACPY(pd
->src
, &(*state
)->state_key
->gwy
.addr
,
8860 pf_change_a(&pd
->dst
->v4addr
.s_addr
,
8862 (*state
)->state_key
->lan
.addr
.v4addr
.s_addr
, 0);
8867 PF_ACPY(pd
->dst
, &(*state
)->state_key
->lan
.addr
,
8879 pf_test_state_other(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
8880 struct pf_pdesc
*pd
)
8882 struct pf_state_peer
*src
, *dst
;
8883 struct pf_state_key_cmp key
;
8886 key
.proto
= pd
->proto
;
8887 if (direction
== PF_IN
) {
8888 key
.af_gwy
= pd
->af
;
8889 PF_ACPY(&key
.ext_gwy
.addr
, pd
->src
, key
.af_gwy
);
8890 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af_gwy
);
8891 key
.ext_gwy
.xport
.port
= 0;
8892 key
.gwy
.xport
.port
= 0;
8894 key
.af_lan
= pd
->af
;
8895 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af_lan
);
8896 PF_ACPY(&key
.ext_lan
.addr
, pd
->dst
, key
.af_lan
);
8897 key
.lan
.xport
.port
= 0;
8898 key
.ext_lan
.xport
.port
= 0;
8903 if (direction
== (*state
)->state_key
->direction
) {
8904 src
= &(*state
)->src
;
8905 dst
= &(*state
)->dst
;
8907 src
= &(*state
)->dst
;
8908 dst
= &(*state
)->src
;
8912 if (src
->state
< PFOTHERS_SINGLE
) {
8913 src
->state
= PFOTHERS_SINGLE
;
8915 if (dst
->state
== PFOTHERS_SINGLE
) {
8916 dst
->state
= PFOTHERS_MULTIPLE
;
8919 /* update expire time */
8920 (*state
)->expire
= pf_time_second();
8921 if (src
->state
== PFOTHERS_MULTIPLE
&& dst
->state
== PFOTHERS_MULTIPLE
) {
8922 (*state
)->timeout
= PFTM_OTHER_MULTIPLE
;
8924 (*state
)->timeout
= PFTM_OTHER_SINGLE
;
8927 /* translate source/destination address, if necessary */
8928 if (STATE_ADDR_TRANSLATE((*state
)->state_key
)) {
8929 if (direction
== PF_OUT
) {
8933 pf_change_a(&pd
->src
->v4addr
.s_addr
,
8935 (*state
)->state_key
->gwy
.addr
.v4addr
.s_addr
,
8942 &(*state
)->state_key
->gwy
.addr
, pd
->af
);
8950 pf_change_a(&pd
->dst
->v4addr
.s_addr
,
8952 (*state
)->state_key
->lan
.addr
.v4addr
.s_addr
,
8959 &(*state
)->state_key
->lan
.addr
, pd
->af
);
8970 * ipoff and off are measured from the start of the mbuf chain.
8971 * h must be at "ipoff" on the mbuf chain.
8974 pf_pull_hdr(pbuf_t
*pbuf
, int off
, void *p
, int len
,
8975 u_short
*actionp
, u_short
*reasonp
, sa_family_t af
)
8980 struct ip
*h
= pbuf
->pb_data
;
8981 u_int16_t fragoff
= (ntohs(h
->ip_off
) & IP_OFFMASK
) << 3;
8984 if (fragoff
>= len
) {
8985 ACTION_SET(actionp
, PF_PASS
);
8987 ACTION_SET(actionp
, PF_DROP
);
8988 REASON_SET(reasonp
, PFRES_FRAG
);
8992 if (pbuf
->pb_packet_len
< (unsigned)(off
+ len
) ||
8993 ntohs(h
->ip_len
) < off
+ len
) {
8994 ACTION_SET(actionp
, PF_DROP
);
8995 REASON_SET(reasonp
, PFRES_SHORT
);
9003 struct ip6_hdr
*h
= pbuf
->pb_data
;
9005 if (pbuf
->pb_packet_len
< (unsigned)(off
+ len
) ||
9006 (ntohs(h
->ip6_plen
) + sizeof(struct ip6_hdr
)) <
9007 (unsigned)(off
+ len
)) {
9008 ACTION_SET(actionp
, PF_DROP
);
9009 REASON_SET(reasonp
, PFRES_SHORT
);
9016 pbuf_copy_data(pbuf
, off
, len
, p
);
9021 pf_routable(struct pf_addr
*addr
, sa_family_t af
, struct pfi_kif
*kif
)
9024 struct sockaddr_in
*dst
;
9027 struct sockaddr_in6
*dst6
;
9028 struct route_in6 ro
;
9033 bzero(&ro
, sizeof(ro
));
9036 dst
= satosin(&ro
.ro_dst
);
9037 dst
->sin_family
= AF_INET
;
9038 dst
->sin_len
= sizeof(*dst
);
9039 dst
->sin_addr
= addr
->v4addr
;
9043 dst6
= (struct sockaddr_in6
*)&ro
.ro_dst
;
9044 dst6
->sin6_family
= AF_INET6
;
9045 dst6
->sin6_len
= sizeof(*dst6
);
9046 dst6
->sin6_addr
= addr
->v6addr
;
9053 /* XXX: IFT_ENC is not currently used by anything*/
9054 /* Skip checks for ipsec interfaces */
9055 if (kif
!= NULL
&& kif
->pfik_ifp
->if_type
== IFT_ENC
) {
9059 /* XXX: what is the point of this? */
9060 rtalloc((struct route
*)&ro
);
9068 pf_rtlabel_match(struct pf_addr
*addr
, sa_family_t af
, struct pf_addr_wrap
*aw
)
9071 struct sockaddr_in
*dst
;
9073 struct sockaddr_in6
*dst6
;
9074 struct route_in6 ro
;
9080 bzero(&ro
, sizeof(ro
));
9083 dst
= satosin(&ro
.ro_dst
);
9084 dst
->sin_family
= AF_INET
;
9085 dst
->sin_len
= sizeof(*dst
);
9086 dst
->sin_addr
= addr
->v4addr
;
9090 dst6
= (struct sockaddr_in6
*)&ro
.ro_dst
;
9091 dst6
->sin6_family
= AF_INET6
;
9092 dst6
->sin6_len
= sizeof(*dst6
);
9093 dst6
->sin6_addr
= addr
->v6addr
;
9100 /* XXX: what is the point of this? */
9101 rtalloc((struct route
*)&ro
);
9110 pf_route(pbuf_t
**pbufp
, struct pf_rule
*r
, int dir
, struct ifnet
*oifp
,
9111 struct pf_state
*s
, struct pf_pdesc
*pd
)
9114 struct mbuf
*m0
, *m1
;
9115 struct route iproute
;
9116 struct route
*ro
= &iproute
;
9117 struct sockaddr_in
*dst
;
9119 struct ifnet
*ifp
= NULL
;
9120 struct pf_addr naddr
;
9121 struct pf_src_node
*sn
= NULL
;
9124 int interface_mtu
= 0;
9125 bzero(&iproute
, sizeof(iproute
));
9127 if (pbufp
== NULL
|| !pbuf_is_valid(*pbufp
) || r
== NULL
||
9128 (dir
!= PF_IN
&& dir
!= PF_OUT
) || oifp
== NULL
) {
9129 panic("pf_route: invalid parameters");
9132 if (pd
->pf_mtag
->pftag_routed
++ > 3) {
9133 pbuf_destroy(*pbufp
);
9140 * Since this is something of an edge case and may involve the
9141 * host stack (for routing, at least for now), we convert the
9142 * incoming pbuf into an mbuf.
9144 if (r
->rt
== PF_DUPTO
) {
9145 m0
= pbuf_clone_to_mbuf(*pbufp
);
9146 } else if ((r
->rt
== PF_REPLYTO
) == (r
->direction
== dir
)) {
9149 /* We're going to consume this packet */
9150 m0
= pbuf_to_mbuf(*pbufp
, TRUE
);
9158 /* We now have the packet in an mbuf (m0) */
9160 if (m0
->m_len
< (int)sizeof(struct ip
)) {
9161 DPFPRINTF(PF_DEBUG_URGENT
,
9162 ("pf_route: packet length < sizeof (struct ip)\n"));
9166 ip
= mtod(m0
, struct ip
*);
9168 dst
= satosin((void *)&ro
->ro_dst
);
9169 dst
->sin_family
= AF_INET
;
9170 dst
->sin_len
= sizeof(*dst
);
9171 dst
->sin_addr
= ip
->ip_dst
;
9173 if (r
->rt
== PF_FASTROUTE
) {
9175 if (ro
->ro_rt
== NULL
) {
9176 ipstat
.ips_noroute
++;
9180 ifp
= ro
->ro_rt
->rt_ifp
;
9182 ro
->ro_rt
->rt_use
++;
9184 if (ro
->ro_rt
->rt_flags
& RTF_GATEWAY
) {
9185 dst
= satosin((void *)ro
->ro_rt
->rt_gateway
);
9187 RT_UNLOCK(ro
->ro_rt
);
9189 if (TAILQ_EMPTY(&r
->rpool
.list
)) {
9190 DPFPRINTF(PF_DEBUG_URGENT
,
9191 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n"));
9195 pf_map_addr(AF_INET
, r
, (struct pf_addr
*)&ip
->ip_src
,
9197 if (!PF_AZERO(&naddr
, AF_INET
)) {
9198 dst
->sin_addr
.s_addr
= naddr
.v4addr
.s_addr
;
9200 ifp
= r
->rpool
.cur
->kif
?
9201 r
->rpool
.cur
->kif
->pfik_ifp
: NULL
;
9203 if (!PF_AZERO(&s
->rt_addr
, AF_INET
)) {
9204 dst
->sin_addr
.s_addr
=
9205 s
->rt_addr
.v4addr
.s_addr
;
9207 ifp
= s
->rt_kif
? s
->rt_kif
->pfik_ifp
: NULL
;
9215 if (pf_test_mbuf(PF_OUT
, ifp
, &m0
, NULL
, NULL
) != PF_PASS
) {
9217 } else if (m0
== NULL
) {
9220 if (m0
->m_len
< (int)sizeof(struct ip
)) {
9221 DPFPRINTF(PF_DEBUG_URGENT
,
9222 ("pf_route: packet length < sizeof (struct ip)\n"));
9225 ip
= mtod(m0
, struct ip
*);
9228 /* Catch routing changes wrt. hardware checksumming for TCP or UDP. */
9229 ip_output_checksum(ifp
, m0
, ((ip
->ip_hl
) << 2), ntohs(ip
->ip_len
),
9232 interface_mtu
= ifp
->if_mtu
;
9234 if (INTF_ADJUST_MTU_FOR_CLAT46(ifp
)) {
9235 interface_mtu
= IN6_LINKMTU(ifp
);
9236 /* Further adjust the size for CLAT46 expansion */
9237 interface_mtu
-= CLAT46_HDR_EXPANSION_OVERHD
;
9240 if (ntohs(ip
->ip_len
) <= interface_mtu
|| TSO_IPV4_OK(ifp
, m0
) ||
9241 (!(ip
->ip_off
& htons(IP_DF
)) &&
9242 (ifp
->if_hwassist
& CSUM_FRAGMENT
))) {
9244 if (sw_csum
& CSUM_DELAY_IP
) {
9245 ip
->ip_sum
= in_cksum(m0
, ip
->ip_hl
<< 2);
9246 sw_csum
&= ~CSUM_DELAY_IP
;
9247 m0
->m_pkthdr
.csum_flags
&= ~CSUM_DELAY_IP
;
9249 error
= ifnet_output(ifp
, PF_INET
, m0
, ro
->ro_rt
, sintosa(dst
));
9254 * Too large for interface; fragment if possible.
9255 * Must be able to put at least 8 bytes per fragment.
9256 * Balk when DF bit is set or the interface didn't support TSO.
9258 if ((ip
->ip_off
& htons(IP_DF
)) ||
9259 (m0
->m_pkthdr
.csum_flags
& CSUM_TSO_IPV4
)) {
9260 ipstat
.ips_cantfrag
++;
9261 if (r
->rt
!= PF_DUPTO
) {
9262 icmp_error(m0
, ICMP_UNREACH
, ICMP_UNREACH_NEEDFRAG
, 0,
9272 /* PR-8933605: send ip_len,ip_off to ip_fragment in host byte order */
9273 #if BYTE_ORDER != BIG_ENDIAN
9277 error
= ip_fragment(m0
, ifp
, interface_mtu
, sw_csum
);
9284 for (m0
= m1
; m0
; m0
= m1
) {
9288 error
= ifnet_output(ifp
, PF_INET
, m0
, ro
->ro_rt
,
9296 ipstat
.ips_fragmented
++;
9300 ROUTE_RELEASE(&iproute
);
9313 pf_route6(pbuf_t
**pbufp
, struct pf_rule
*r
, int dir
, struct ifnet
*oifp
,
9314 struct pf_state
*s
, struct pf_pdesc
*pd
)
9318 struct route_in6 ip6route
;
9319 struct route_in6
*ro
;
9320 struct sockaddr_in6
*dst
;
9321 struct ip6_hdr
*ip6
;
9322 struct ifnet
*ifp
= NULL
;
9323 struct pf_addr naddr
;
9324 struct pf_src_node
*sn
= NULL
;
9326 struct pf_mtag
*pf_mtag
;
9328 if (pbufp
== NULL
|| !pbuf_is_valid(*pbufp
) || r
== NULL
||
9329 (dir
!= PF_IN
&& dir
!= PF_OUT
) || oifp
== NULL
) {
9330 panic("pf_route6: invalid parameters");
9333 if (pd
->pf_mtag
->pftag_routed
++ > 3) {
9334 pbuf_destroy(*pbufp
);
9341 * Since this is something of an edge case and may involve the
9342 * host stack (for routing, at least for now), we convert the
9343 * incoming pbuf into an mbuf.
9345 if (r
->rt
== PF_DUPTO
) {
9346 m0
= pbuf_clone_to_mbuf(*pbufp
);
9347 } else if ((r
->rt
== PF_REPLYTO
) == (r
->direction
== dir
)) {
9350 /* We're about to consume this packet */
9351 m0
= pbuf_to_mbuf(*pbufp
, TRUE
);
9359 if (m0
->m_len
< (int)sizeof(struct ip6_hdr
)) {
9360 DPFPRINTF(PF_DEBUG_URGENT
,
9361 ("pf_route6: m0->m_len < sizeof (struct ip6_hdr)\n"));
9364 ip6
= mtod(m0
, struct ip6_hdr
*);
9367 bzero((caddr_t
)ro
, sizeof(*ro
));
9368 dst
= (struct sockaddr_in6
*)&ro
->ro_dst
;
9369 dst
->sin6_family
= AF_INET6
;
9370 dst
->sin6_len
= sizeof(*dst
);
9371 dst
->sin6_addr
= ip6
->ip6_dst
;
9373 /* Cheat. XXX why only in the v6addr case??? */
9374 if (r
->rt
== PF_FASTROUTE
) {
9375 pf_mtag
= pf_get_mtag(m0
);
9376 ASSERT(pf_mtag
!= NULL
);
9377 pf_mtag
->pftag_flags
|= PF_TAG_GENERATED
;
9378 ip6_output(m0
, NULL
, NULL
, 0, NULL
, NULL
, NULL
);
9382 if (TAILQ_EMPTY(&r
->rpool
.list
)) {
9383 DPFPRINTF(PF_DEBUG_URGENT
,
9384 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n"));
9388 pf_map_addr(AF_INET6
, r
, (struct pf_addr
*)(uintptr_t)&ip6
->ip6_src
,
9390 if (!PF_AZERO(&naddr
, AF_INET6
)) {
9391 PF_ACPY((struct pf_addr
*)&dst
->sin6_addr
,
9394 ifp
= r
->rpool
.cur
->kif
? r
->rpool
.cur
->kif
->pfik_ifp
: NULL
;
9396 if (!PF_AZERO(&s
->rt_addr
, AF_INET6
)) {
9397 PF_ACPY((struct pf_addr
*)&dst
->sin6_addr
,
9398 &s
->rt_addr
, AF_INET6
);
9400 ifp
= s
->rt_kif
? s
->rt_kif
->pfik_ifp
: NULL
;
9407 if (pf_test6_mbuf(PF_OUT
, ifp
, &m0
, NULL
, NULL
) != PF_PASS
) {
9409 } else if (m0
== NULL
) {
9412 if (m0
->m_len
< (int)sizeof(struct ip6_hdr
)) {
9413 DPFPRINTF(PF_DEBUG_URGENT
, ("pf_route6: m0->m_len "
9414 "< sizeof (struct ip6_hdr)\n"));
9417 pf_mtag
= pf_get_mtag(m0
);
9419 * send refragmented packets.
9421 if ((pf_mtag
->pftag_flags
& PF_TAG_REFRAGMENTED
) != 0) {
9422 pf_mtag
->pftag_flags
&= ~PF_TAG_REFRAGMENTED
;
9424 * nd6_output() frees packet chain in both success and
9427 error
= nd6_output(ifp
, ifp
, m0
, dst
, NULL
, NULL
);
9430 DPFPRINTF(PF_DEBUG_URGENT
, ("pf_route6:"
9431 "dropped refragmented packet\n"));
9435 ip6
= mtod(m0
, struct ip6_hdr
*);
9439 * If the packet is too large for the outgoing interface,
9440 * send back an icmp6 error.
9442 if (IN6_IS_SCOPE_EMBED(&dst
->sin6_addr
)) {
9443 dst
->sin6_addr
.s6_addr16
[1] = htons(ifp
->if_index
);
9445 if ((unsigned)m0
->m_pkthdr
.len
<= ifp
->if_mtu
) {
9446 error
= nd6_output(ifp
, ifp
, m0
, dst
, NULL
, NULL
);
9448 in6_ifstat_inc(ifp
, ifs6_in_toobig
);
9449 if (r
->rt
!= PF_DUPTO
) {
9450 icmp6_error(m0
, ICMP6_PACKET_TOO_BIG
, 0, ifp
->if_mtu
);
9470 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
9471 * off is the offset where the protocol header starts
9472 * len is the total length of protocol header plus payload
9473 * returns 0 when the checksum is valid, otherwise returns 1.
9476 pf_check_proto_cksum(pbuf_t
*pbuf
, int off
, int len
, u_int8_t p
,
9485 * Optimize for the common case; if the hardware calculated
9486 * value doesn't include pseudo-header checksum, or if it
9487 * is partially-computed (only 16-bit summation), do it in
9490 if ((*pbuf
->pb_csum_flags
&
9491 (CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
)) ==
9492 (CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
) &&
9493 (*pbuf
->pb_csum_data
^ 0xffff) == 0) {
9499 case IPPROTO_ICMPV6
:
9505 if (off
< (int)sizeof(struct ip
) || len
< (int)sizeof(struct udphdr
)) {
9508 if (pbuf
->pb_packet_len
< (unsigned)(off
+ len
)) {
9514 if (p
== IPPROTO_ICMP
) {
9516 if (m
->m_len
< off
) {
9521 sum
= in_cksum(m
, len
);
9525 if (pbuf
->pb_contig_len
< (unsigned)off
) {
9528 sum
= pbuf_inet_cksum(pbuf
, 0, off
, len
);
9531 if (pbuf
->pb_contig_len
< (int)sizeof(struct ip
)) {
9534 sum
= pbuf_inet_cksum(pbuf
, p
, off
, len
);
9540 if (pbuf
->pb_contig_len
< (int)sizeof(struct ip6_hdr
)) {
9543 sum
= pbuf_inet6_cksum(pbuf
, p
, off
, len
);
9552 tcpstat
.tcps_rcvbadsum
++;
9555 udpstat
.udps_badsum
++;
9558 icmpstat
.icps_checksum
++;
9561 case IPPROTO_ICMPV6
:
9562 icmp6stat
.icp6s_checksum
++;
9572 #define PF_APPLE_UPDATE_PDESC_IPv4() \
9574 if (pbuf && pd.mp && pbuf != pd.mp) { \
9576 h = pbuf->pb_data; \
9577 pd.pf_mtag = pf_get_mtag_pbuf(pbuf); \
9582 pf_test_mbuf(int dir
, struct ifnet
*ifp
, struct mbuf
**m0
,
9583 struct ether_header
*eh
, struct ip_fw_args
*fwa
)
9585 pbuf_t pbuf_store
, *pbuf
;
9588 pbuf_init_mbuf(&pbuf_store
, *m0
, (*m0
)->m_pkthdr
.rcvif
);
9591 rv
= pf_test(dir
, ifp
, &pbuf
, eh
, fwa
);
9593 if (pbuf_is_valid(pbuf
)) {
9594 *m0
= pbuf
->pb_mbuf
;
9595 pbuf
->pb_mbuf
= NULL
;
9605 pf_test(int dir
, struct ifnet
*ifp
, pbuf_t
**pbufp
,
9606 struct ether_header
*eh
, struct ip_fw_args
*fwa
)
9611 struct pfi_kif
*kif
;
9612 u_short action
= PF_PASS
, reason
= 0, log
= 0;
9613 pbuf_t
*pbuf
= *pbufp
;
9615 struct pf_rule
*a
= NULL
, *r
= &pf_default_rule
, *tr
, *nr
;
9616 struct pf_state
*s
= NULL
;
9617 struct pf_state_key
*sk
= NULL
;
9618 struct pf_ruleset
*ruleset
= NULL
;
9620 int off
, dirndx
, pqid
= 0;
9622 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
9624 if (!pf_status
.running
) {
9628 memset(&pd
, 0, sizeof(pd
));
9630 if ((pd
.pf_mtag
= pf_get_mtag_pbuf(pbuf
)) == NULL
) {
9631 DPFPRINTF(PF_DEBUG_URGENT
,
9632 ("pf_test: pf_get_mtag_pbuf returned NULL\n"));
9636 if (pd
.pf_mtag
->pftag_flags
& PF_TAG_GENERATED
) {
9640 kif
= (struct pfi_kif
*)ifp
->if_pf_kif
;
9643 DPFPRINTF(PF_DEBUG_URGENT
,
9644 ("pf_test: kif == NULL, if_name %s\n", ifp
->if_name
));
9647 if (kif
->pfik_flags
& PFI_IFLAG_SKIP
) {
9651 if (pbuf
->pb_packet_len
< (int)sizeof(*h
)) {
9652 REASON_SET(&reason
, PFRES_SHORT
);
9656 /* initialize enough of pd for the done label */
9660 pd
.pf_mtag
= pf_get_mtag_pbuf(pbuf
);
9661 pd
.src
= (struct pf_addr
*)&h
->ip_src
;
9662 pd
.dst
= (struct pf_addr
*)&h
->ip_dst
;
9663 PF_ACPY(&pd
.baddr
, pd
.src
, AF_INET
);
9664 PF_ACPY(&pd
.bdaddr
, pd
.dst
, AF_INET
);
9665 pd
.ip_sum
= &h
->ip_sum
;
9667 pd
.proto_variant
= 0;
9671 pd
.tot_len
= ntohs(h
->ip_len
);
9675 if (fwa
!= NULL
&& fwa
->fwa_pf_rule
!= NULL
) {
9678 #endif /* DUMMYNET */
9680 /* We do IP header normalization and packet reassembly here */
9681 action
= pf_normalize_ip(pbuf
, dir
, kif
, &reason
, &pd
);
9682 if (action
!= PF_PASS
|| pd
.lmw
< 0) {
9689 #endif /* DUMMYNET */
9690 /* pf_normalize can mess with pb_data */
9693 off
= h
->ip_hl
<< 2;
9694 if (off
< (int)sizeof(*h
)) {
9696 REASON_SET(&reason
, PFRES_SHORT
);
9701 pd
.src
= (struct pf_addr
*)&h
->ip_src
;
9702 pd
.dst
= (struct pf_addr
*)&h
->ip_dst
;
9703 PF_ACPY(&pd
.baddr
, pd
.src
, AF_INET
);
9704 PF_ACPY(&pd
.bdaddr
, pd
.dst
, AF_INET
);
9705 pd
.ip_sum
= &h
->ip_sum
;
9707 pd
.proto_variant
= 0;
9710 pd
.pf_mtag
= pf_get_mtag_pbuf(pbuf
);
9714 pd
.sc
= MBUF_SCIDX(pbuf_get_service_class(pbuf
));
9715 pd
.tot_len
= ntohs(h
->ip_len
);
9718 if (*pbuf
->pb_flags
& PKTF_FLOW_ID
) {
9719 pd
.flowsrc
= *pbuf
->pb_flowsrc
;
9720 pd
.flowhash
= *pbuf
->pb_flowid
;
9721 pd
.pktflags
= *pbuf
->pb_flags
& PKTF_FLOW_MASK
;
9724 /* handle fragments that didn't get reassembled by normalization */
9725 if (h
->ip_off
& htons(IP_MF
| IP_OFFMASK
)) {
9726 pd
.flags
|= PFDESC_IP_FRAG
;
9728 /* Traffic goes through dummynet first */
9729 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
9730 if (action
== PF_DROP
|| pbuf
== NULL
) {
9734 #endif /* DUMMYNET */
9735 action
= pf_test_fragment(&r
, dir
, kif
, pbuf
, h
,
9744 if (!pf_pull_hdr(pbuf
, off
, &th
, sizeof(th
),
9745 &action
, &reason
, AF_INET
)) {
9746 log
= action
!= PF_PASS
;
9749 pd
.p_len
= pd
.tot_len
- off
- (th
.th_off
<< 2);
9750 if ((th
.th_flags
& TH_ACK
) && pd
.p_len
== 0) {
9754 /* Traffic goes through dummynet first */
9755 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
9756 if (action
== PF_DROP
|| pbuf
== NULL
) {
9760 #endif /* DUMMYNET */
9761 action
= pf_normalize_tcp(dir
, kif
, pbuf
, 0, off
, h
, &pd
);
9765 PF_APPLE_UPDATE_PDESC_IPv4();
9766 if (action
== PF_DROP
) {
9769 action
= pf_test_state_tcp(&s
, dir
, kif
, pbuf
, off
, h
, &pd
,
9771 if (action
== PF_NAT64
) {
9777 PF_APPLE_UPDATE_PDESC_IPv4();
9778 if (action
== PF_PASS
) {
9780 pfsync_update_state(s
);
9781 #endif /* NPFSYNC */
9785 } else if (s
== NULL
) {
9786 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9787 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
9796 if (!pf_pull_hdr(pbuf
, off
, &uh
, sizeof(uh
),
9797 &action
, &reason
, AF_INET
)) {
9798 log
= action
!= PF_PASS
;
9801 if (uh
.uh_dport
== 0 ||
9802 ntohs(uh
.uh_ulen
) > pbuf
->pb_packet_len
- off
||
9803 ntohs(uh
.uh_ulen
) < sizeof(struct udphdr
)) {
9805 REASON_SET(&reason
, PFRES_SHORT
);
9809 /* Traffic goes through dummynet first */
9810 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
9811 if (action
== PF_DROP
|| pbuf
== NULL
) {
9815 #endif /* DUMMYNET */
9816 action
= pf_test_state_udp(&s
, dir
, kif
, pbuf
, off
, h
, &pd
,
9818 if (action
== PF_NAT64
) {
9824 PF_APPLE_UPDATE_PDESC_IPv4();
9825 if (action
== PF_PASS
) {
9827 pfsync_update_state(s
);
9828 #endif /* NPFSYNC */
9832 } else if (s
== NULL
) {
9833 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9834 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
9839 case IPPROTO_ICMP
: {
9843 if (!pf_pull_hdr(pbuf
, off
, &ih
, ICMP_MINLEN
,
9844 &action
, &reason
, AF_INET
)) {
9845 log
= action
!= PF_PASS
;
9849 /* Traffic goes through dummynet first */
9850 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
9851 if (action
== PF_DROP
|| pbuf
== NULL
) {
9855 #endif /* DUMMYNET */
9856 action
= pf_test_state_icmp(&s
, dir
, kif
, pbuf
, off
, h
, &pd
,
9858 if (action
== PF_NAT64
) {
9864 PF_APPLE_UPDATE_PDESC_IPv4();
9865 if (action
== PF_PASS
) {
9867 pfsync_update_state(s
);
9868 #endif /* NPFSYNC */
9872 } else if (s
== NULL
) {
9873 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9874 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
9880 struct pf_esp_hdr esp
;
9883 if (!pf_pull_hdr(pbuf
, off
, &esp
, sizeof(esp
), &action
, &reason
,
9885 log
= action
!= PF_PASS
;
9889 /* Traffic goes through dummynet first */
9890 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
9891 if (action
== PF_DROP
|| pbuf
== NULL
) {
9895 #endif /* DUMMYNET */
9896 action
= pf_test_state_esp(&s
, dir
, kif
, off
, &pd
);
9900 PF_APPLE_UPDATE_PDESC_IPv4();
9901 if (action
== PF_PASS
) {
9903 pfsync_update_state(s
);
9904 #endif /* NPFSYNC */
9908 } else if (s
== NULL
) {
9909 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9910 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
9916 struct pf_grev1_hdr grev1
;
9917 pd
.hdr
.grev1
= &grev1
;
9918 if (!pf_pull_hdr(pbuf
, off
, &grev1
, sizeof(grev1
), &action
,
9919 &reason
, AF_INET
)) {
9920 log
= (action
!= PF_PASS
);
9924 /* Traffic goes through dummynet first */
9925 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
9926 if (action
== PF_DROP
|| pbuf
== NULL
) {
9930 #endif /* DUMMYNET */
9931 if ((ntohs(grev1
.flags
) & PF_GRE_FLAG_VERSION_MASK
) == 1 &&
9932 ntohs(grev1
.protocol_type
) == PF_GRE_PPP_ETHERTYPE
) {
9933 if (ntohs(grev1
.payload_length
) >
9934 pbuf
->pb_packet_len
- off
) {
9936 REASON_SET(&reason
, PFRES_SHORT
);
9939 pd
.proto_variant
= PF_GRE_PPTP_VARIANT
;
9940 action
= pf_test_state_grev1(&s
, dir
, kif
, off
, &pd
);
9944 PF_APPLE_UPDATE_PDESC_IPv4();
9945 if (action
== PF_PASS
) {
9947 pfsync_update_state(s
);
9948 #endif /* NPFSYNC */
9953 } else if (s
== NULL
) {
9954 action
= pf_test_rule(&r
, &s
, dir
, kif
, pbuf
,
9955 off
, h
, &pd
, &a
, &ruleset
, NULL
);
9956 if (action
== PF_PASS
) {
9962 /* not GREv1/PPTP, so treat as ordinary GRE... */
9967 /* Traffic goes through dummynet first */
9968 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
9969 if (action
== PF_DROP
|| pbuf
== NULL
) {
9973 #endif /* DUMMYNET */
9974 action
= pf_test_state_other(&s
, dir
, kif
, &pd
);
9978 PF_APPLE_UPDATE_PDESC_IPv4();
9979 if (action
== PF_PASS
) {
9981 pfsync_update_state(s
);
9982 #endif /* NPFSYNC */
9986 } else if (s
== NULL
) {
9987 action
= pf_test_rule(&r
, &s
, dir
, kif
, pbuf
, off
, h
,
9988 &pd
, &a
, &ruleset
, NULL
);
9994 if (action
== PF_NAT64
) {
10000 PF_APPLE_UPDATE_PDESC_IPv4();
10002 if (action
!= PF_DROP
) {
10003 if (action
== PF_PASS
&& h
->ip_hl
> 5 &&
10004 !((s
&& s
->allow_opts
) || r
->allow_opts
)) {
10006 REASON_SET(&reason
, PFRES_IPOPTIONS
);
10008 DPFPRINTF(PF_DEBUG_MISC
,
10009 ("pf: dropping packet with ip options [hlen=%u]\n",
10010 (unsigned int) h
->ip_hl
));
10013 if ((s
&& s
->tag
) || PF_RTABLEID_IS_VALID(r
->rtableid
) ||
10014 (pd
.pktflags
& PKTF_FLOW_ID
)) {
10015 (void) pf_tag_packet(pbuf
, pd
.pf_mtag
, s
? s
->tag
: 0,
10019 if (action
== PF_PASS
) {
10021 /* add hints for ecn */
10022 pd
.pf_mtag
->pftag_hdr
= h
;
10023 /* record address family */
10024 pd
.pf_mtag
->pftag_flags
&= ~PF_TAG_HDR_INET6
;
10025 pd
.pf_mtag
->pftag_flags
|= PF_TAG_HDR_INET
;
10026 #endif /* PF_ECN */
10027 /* record protocol */
10028 *pbuf
->pb_proto
= pd
.proto
;
10031 * connections redirected to loopback should not match sockets
10032 * bound specifically to loopback due to security implications,
10033 * see tcp_input() and in_pcblookup_listen().
10035 if (dir
== PF_IN
&& (pd
.proto
== IPPROTO_TCP
||
10036 pd
.proto
== IPPROTO_UDP
) && s
!= NULL
&&
10037 s
->nat_rule
.ptr
!= NULL
&&
10038 (s
->nat_rule
.ptr
->action
== PF_RDR
||
10039 s
->nat_rule
.ptr
->action
== PF_BINAT
) &&
10040 (ntohl(pd
.dst
->v4addr
.s_addr
) >> IN_CLASSA_NSHIFT
)
10041 == IN_LOOPBACKNET
) {
10042 pd
.pf_mtag
->pftag_flags
|= PF_TAG_TRANSLATE_LOCALHOST
;
10048 struct pf_rule
*lr
;
10050 if (s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
10051 s
->nat_rule
.ptr
->log
& PF_LOG_ALL
) {
10052 lr
= s
->nat_rule
.ptr
;
10056 PFLOG_PACKET(kif
, h
, pbuf
, AF_INET
, dir
, reason
, lr
, a
, ruleset
,
10060 kif
->pfik_bytes
[0][dir
== PF_OUT
][action
!= PF_PASS
] += pd
.tot_len
;
10061 kif
->pfik_packets
[0][dir
== PF_OUT
][action
!= PF_PASS
]++;
10063 if (action
== PF_PASS
|| r
->action
== PF_DROP
) {
10064 dirndx
= (dir
== PF_OUT
);
10065 r
->packets
[dirndx
]++;
10066 r
->bytes
[dirndx
] += pd
.tot_len
;
10068 a
->packets
[dirndx
]++;
10069 a
->bytes
[dirndx
] += pd
.tot_len
;
10073 if (s
->nat_rule
.ptr
!= NULL
) {
10074 s
->nat_rule
.ptr
->packets
[dirndx
]++;
10075 s
->nat_rule
.ptr
->bytes
[dirndx
] += pd
.tot_len
;
10077 if (s
->src_node
!= NULL
) {
10078 s
->src_node
->packets
[dirndx
]++;
10079 s
->src_node
->bytes
[dirndx
] += pd
.tot_len
;
10081 if (s
->nat_src_node
!= NULL
) {
10082 s
->nat_src_node
->packets
[dirndx
]++;
10083 s
->nat_src_node
->bytes
[dirndx
] += pd
.tot_len
;
10085 dirndx
= (dir
== sk
->direction
) ? 0 : 1;
10086 s
->packets
[dirndx
]++;
10087 s
->bytes
[dirndx
] += pd
.tot_len
;
10090 nr
= (s
!= NULL
) ? s
->nat_rule
.ptr
: pd
.nat_rule
;
10094 * XXX: we need to make sure that the addresses
10095 * passed to pfr_update_stats() are the same than
10096 * the addresses used during matching (pfr_match)
10098 if (r
== &pf_default_rule
) {
10100 x
= (sk
== NULL
|| sk
->direction
== dir
) ?
10101 &pd
.baddr
: &pd
.naddr
;
10103 x
= (sk
== NULL
|| sk
->direction
== dir
) ?
10104 &pd
.naddr
: &pd
.baddr
;
10106 if (x
== &pd
.baddr
|| s
== NULL
) {
10107 /* we need to change the address */
10108 if (dir
== PF_OUT
) {
10115 if (tr
->src
.addr
.type
== PF_ADDR_TABLE
) {
10116 pfr_update_stats(tr
->src
.addr
.p
.tbl
, (sk
== NULL
||
10117 sk
->direction
== dir
) ?
10118 pd
.src
: pd
.dst
, pd
.af
,
10119 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
10122 if (tr
->dst
.addr
.type
== PF_ADDR_TABLE
) {
10123 pfr_update_stats(tr
->dst
.addr
.p
.tbl
, (sk
== NULL
||
10124 sk
->direction
== dir
) ? pd
.dst
: pd
.src
, pd
.af
,
10125 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
10130 VERIFY(pbuf
== NULL
|| pd
.mp
== NULL
|| pd
.mp
== pbuf
);
10134 REASON_SET(&reason
, PFRES_MEMORY
);
10138 if (action
== PF_DROP
) {
10139 pbuf_destroy(*pbufp
);
10147 if (action
== PF_SYNPROXY_DROP
) {
10148 pbuf_destroy(*pbufp
);
10151 } else if (r
->rt
) {
10152 /* pf_route can free the pbuf causing *pbufp to become NULL */
10153 pf_route(pbufp
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
10161 #define PF_APPLE_UPDATE_PDESC_IPv6() \
10163 if (pbuf && pd.mp && pbuf != pd.mp) { \
10166 h = pbuf->pb_data; \
10170 pf_test6_mbuf(int dir
, struct ifnet
*ifp
, struct mbuf
**m0
,
10171 struct ether_header
*eh
, struct ip_fw_args
*fwa
)
10173 pbuf_t pbuf_store
, *pbuf
;
10176 pbuf_init_mbuf(&pbuf_store
, *m0
, (*m0
)->m_pkthdr
.rcvif
);
10177 pbuf
= &pbuf_store
;
10179 rv
= pf_test6(dir
, ifp
, &pbuf
, eh
, fwa
);
10181 if (pbuf_is_valid(pbuf
)) {
10182 *m0
= pbuf
->pb_mbuf
;
10183 pbuf
->pb_mbuf
= NULL
;
10184 pbuf_destroy(pbuf
);
10193 pf_test6(int dir
, struct ifnet
*ifp
, pbuf_t
**pbufp
,
10194 struct ether_header
*eh
, struct ip_fw_args
*fwa
)
10197 #pragma unused(fwa)
10199 struct pfi_kif
*kif
;
10200 u_short action
= PF_PASS
, reason
= 0, log
= 0;
10201 pbuf_t
*pbuf
= *pbufp
;
10203 struct pf_rule
*a
= NULL
, *r
= &pf_default_rule
, *tr
, *nr
;
10204 struct pf_state
*s
= NULL
;
10205 struct pf_state_key
*sk
= NULL
;
10206 struct pf_ruleset
*ruleset
= NULL
;
10207 struct pf_pdesc pd
;
10208 int off
, terminal
= 0, dirndx
, rh_cnt
= 0;
10210 boolean_t fwd
= FALSE
;
10212 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
10214 ASSERT(ifp
!= NULL
);
10215 if ((dir
== PF_OUT
) && (pbuf
->pb_ifp
) && (ifp
!= pbuf
->pb_ifp
)) {
10219 if (!pf_status
.running
) {
10223 memset(&pd
, 0, sizeof(pd
));
10225 if ((pd
.pf_mtag
= pf_get_mtag_pbuf(pbuf
)) == NULL
) {
10226 DPFPRINTF(PF_DEBUG_URGENT
,
10227 ("pf_test6: pf_get_mtag_pbuf returned NULL\n"));
10231 if (pd
.pf_mtag
->pftag_flags
& PF_TAG_GENERATED
) {
10235 kif
= (struct pfi_kif
*)ifp
->if_pf_kif
;
10238 DPFPRINTF(PF_DEBUG_URGENT
,
10239 ("pf_test6: kif == NULL, if_name %s\n", ifp
->if_name
));
10242 if (kif
->pfik_flags
& PFI_IFLAG_SKIP
) {
10246 if (pbuf
->pb_packet_len
< (int)sizeof(*h
)) {
10247 REASON_SET(&reason
, PFRES_SHORT
);
10253 off
= ((caddr_t
)h
- (caddr_t
)pbuf
->pb_data
) + sizeof(struct ip6_hdr
);
10256 pd
.pf_mtag
= pf_get_mtag_pbuf(pbuf
);
10257 pd
.src
= (struct pf_addr
*)(uintptr_t)&h
->ip6_src
;
10258 pd
.dst
= (struct pf_addr
*)(uintptr_t)&h
->ip6_dst
;
10259 PF_ACPY(&pd
.baddr
, pd
.src
, AF_INET6
);
10260 PF_ACPY(&pd
.bdaddr
, pd
.dst
, AF_INET6
);
10264 pd
.proto_variant
= 0;
10266 pd
.ttl
= h
->ip6_hlim
;
10267 pd
.sc
= MBUF_SCIDX(pbuf_get_service_class(pbuf
));
10268 pd
.tot_len
= ntohs(h
->ip6_plen
) + sizeof(struct ip6_hdr
);
10271 if (*pbuf
->pb_flags
& PKTF_FLOW_ID
) {
10272 pd
.flowsrc
= *pbuf
->pb_flowsrc
;
10273 pd
.flowhash
= *pbuf
->pb_flowid
;
10274 pd
.pktflags
= (*pbuf
->pb_flags
& PKTF_FLOW_MASK
);
10278 if (fwa
!= NULL
&& fwa
->fwa_pf_rule
!= NULL
) {
10281 #endif /* DUMMYNET */
10283 /* We do IP header normalization and packet reassembly here */
10284 action
= pf_normalize_ip6(pbuf
, dir
, kif
, &reason
, &pd
);
10285 if (action
!= PF_PASS
|| pd
.lmw
< 0) {
10292 #endif /* DUMMYNET */
10297 * we do not support jumbogram yet. if we keep going, zero ip6_plen
10298 * will do something bad, so drop the packet for now.
10300 if (htons(h
->ip6_plen
) == 0) {
10302 REASON_SET(&reason
, PFRES_NORM
); /*XXX*/
10306 pd
.src
= (struct pf_addr
*)(uintptr_t)&h
->ip6_src
;
10307 pd
.dst
= (struct pf_addr
*)(uintptr_t)&h
->ip6_dst
;
10308 PF_ACPY(&pd
.baddr
, pd
.src
, AF_INET6
);
10309 PF_ACPY(&pd
.bdaddr
, pd
.dst
, AF_INET6
);
10313 pd
.ttl
= h
->ip6_hlim
;
10314 pd
.tot_len
= ntohs(h
->ip6_plen
) + sizeof(struct ip6_hdr
);
10317 off
= ((caddr_t
)h
- (caddr_t
)pbuf
->pb_data
) + sizeof(struct ip6_hdr
);
10318 pd
.proto
= h
->ip6_nxt
;
10319 pd
.proto_variant
= 0;
10322 pd
.pf_mtag
= pf_get_mtag_pbuf(pbuf
);
10325 switch (pd
.proto
) {
10326 case IPPROTO_FRAGMENT
: {
10327 struct ip6_frag ip6f
;
10329 pd
.flags
|= PFDESC_IP_FRAG
;
10330 if (!pf_pull_hdr(pbuf
, off
, &ip6f
, sizeof ip6f
, NULL
,
10332 DPFPRINTF(PF_DEBUG_MISC
,
10333 ("pf: IPv6 short fragment header\n"));
10335 REASON_SET(&reason
, PFRES_SHORT
);
10339 pd
.proto
= ip6f
.ip6f_nxt
;
10341 /* Traffic goes through dummynet first */
10342 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
,
10344 if (action
== PF_DROP
|| pbuf
== NULL
) {
10348 #endif /* DUMMYNET */
10349 action
= pf_test_fragment(&r
, dir
, kif
, pbuf
, h
, &pd
,
10351 if (action
== PF_DROP
) {
10352 REASON_SET(&reason
, PFRES_FRAG
);
10357 case IPPROTO_ROUTING
:
10362 case IPPROTO_HOPOPTS
:
10363 case IPPROTO_DSTOPTS
: {
10364 /* get next header and header length */
10365 struct ip6_ext opt6
;
10367 if (!pf_pull_hdr(pbuf
, off
, &opt6
, sizeof(opt6
),
10368 NULL
, &reason
, pd
.af
)) {
10369 DPFPRINTF(PF_DEBUG_MISC
,
10370 ("pf: IPv6 short opt\n"));
10375 if (pd
.proto
== IPPROTO_AH
) {
10376 off
+= (opt6
.ip6e_len
+ 2) * 4;
10378 off
+= (opt6
.ip6e_len
+ 1) * 8;
10380 pd
.proto
= opt6
.ip6e_nxt
;
10381 /* goto the next header */
10388 } while (!terminal
);
10391 switch (pd
.proto
) {
10392 case IPPROTO_TCP
: {
10396 if (!pf_pull_hdr(pbuf
, off
, &th
, sizeof(th
),
10397 &action
, &reason
, AF_INET6
)) {
10398 log
= action
!= PF_PASS
;
10401 pd
.p_len
= pd
.tot_len
- off
- (th
.th_off
<< 2);
10403 /* Traffic goes through dummynet first */
10404 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
10405 if (action
== PF_DROP
|| pbuf
== NULL
) {
10409 #endif /* DUMMYNET */
10410 action
= pf_normalize_tcp(dir
, kif
, pbuf
, 0, off
, h
, &pd
);
10414 PF_APPLE_UPDATE_PDESC_IPv6();
10415 if (action
== PF_DROP
) {
10418 action
= pf_test_state_tcp(&s
, dir
, kif
, pbuf
, off
, h
, &pd
,
10420 if (action
== PF_NAT64
) {
10426 PF_APPLE_UPDATE_PDESC_IPv6();
10427 if (action
== PF_PASS
) {
10429 pfsync_update_state(s
);
10430 #endif /* NPFSYNC */
10434 } else if (s
== NULL
) {
10435 action
= pf_test_rule(&r
, &s
, dir
, kif
,
10436 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
10441 case IPPROTO_UDP
: {
10445 if (!pf_pull_hdr(pbuf
, off
, &uh
, sizeof(uh
),
10446 &action
, &reason
, AF_INET6
)) {
10447 log
= action
!= PF_PASS
;
10450 if (uh
.uh_dport
== 0 ||
10451 ntohs(uh
.uh_ulen
) > pbuf
->pb_packet_len
- off
||
10452 ntohs(uh
.uh_ulen
) < sizeof(struct udphdr
)) {
10454 REASON_SET(&reason
, PFRES_SHORT
);
10458 /* Traffic goes through dummynet first */
10459 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
10460 if (action
== PF_DROP
|| pbuf
== NULL
) {
10464 #endif /* DUMMYNET */
10465 action
= pf_test_state_udp(&s
, dir
, kif
, pbuf
, off
, h
, &pd
,
10467 if (action
== PF_NAT64
) {
10473 PF_APPLE_UPDATE_PDESC_IPv6();
10474 if (action
== PF_PASS
) {
10476 pfsync_update_state(s
);
10477 #endif /* NPFSYNC */
10481 } else if (s
== NULL
) {
10482 action
= pf_test_rule(&r
, &s
, dir
, kif
,
10483 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
10488 case IPPROTO_ICMPV6
: {
10489 struct icmp6_hdr ih
;
10491 pd
.hdr
.icmp6
= &ih
;
10492 if (!pf_pull_hdr(pbuf
, off
, &ih
, sizeof(ih
),
10493 &action
, &reason
, AF_INET6
)) {
10494 log
= action
!= PF_PASS
;
10498 /* Traffic goes through dummynet first */
10499 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
10500 if (action
== PF_DROP
|| pbuf
== NULL
) {
10504 #endif /* DUMMYNET */
10505 action
= pf_test_state_icmp(&s
, dir
, kif
,
10506 pbuf
, off
, h
, &pd
, &reason
);
10507 if (action
== PF_NAT64
) {
10513 PF_APPLE_UPDATE_PDESC_IPv6();
10514 if (action
== PF_PASS
) {
10516 pfsync_update_state(s
);
10517 #endif /* NPFSYNC */
10521 } else if (s
== NULL
) {
10522 action
= pf_test_rule(&r
, &s
, dir
, kif
,
10523 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
10528 case IPPROTO_ESP
: {
10529 struct pf_esp_hdr esp
;
10532 if (!pf_pull_hdr(pbuf
, off
, &esp
, sizeof(esp
), &action
,
10533 &reason
, AF_INET6
)) {
10534 log
= action
!= PF_PASS
;
10538 /* Traffic goes through dummynet first */
10539 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
10540 if (action
== PF_DROP
|| pbuf
== NULL
) {
10544 #endif /* DUMMYNET */
10545 action
= pf_test_state_esp(&s
, dir
, kif
, off
, &pd
);
10549 PF_APPLE_UPDATE_PDESC_IPv6();
10550 if (action
== PF_PASS
) {
10552 pfsync_update_state(s
);
10553 #endif /* NPFSYNC */
10557 } else if (s
== NULL
) {
10558 action
= pf_test_rule(&r
, &s
, dir
, kif
,
10559 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
10564 case IPPROTO_GRE
: {
10565 struct pf_grev1_hdr grev1
;
10567 pd
.hdr
.grev1
= &grev1
;
10568 if (!pf_pull_hdr(pbuf
, off
, &grev1
, sizeof(grev1
), &action
,
10569 &reason
, AF_INET6
)) {
10570 log
= (action
!= PF_PASS
);
10574 /* Traffic goes through dummynet first */
10575 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
10576 if (action
== PF_DROP
|| pbuf
== NULL
) {
10580 #endif /* DUMMYNET */
10581 if ((ntohs(grev1
.flags
) & PF_GRE_FLAG_VERSION_MASK
) == 1 &&
10582 ntohs(grev1
.protocol_type
) == PF_GRE_PPP_ETHERTYPE
) {
10583 if (ntohs(grev1
.payload_length
) >
10584 pbuf
->pb_packet_len
- off
) {
10586 REASON_SET(&reason
, PFRES_SHORT
);
10589 action
= pf_test_state_grev1(&s
, dir
, kif
, off
, &pd
);
10593 PF_APPLE_UPDATE_PDESC_IPv6();
10594 if (action
== PF_PASS
) {
10596 pfsync_update_state(s
);
10597 #endif /* NPFSYNC */
10602 } else if (s
== NULL
) {
10603 action
= pf_test_rule(&r
, &s
, dir
, kif
, pbuf
,
10604 off
, h
, &pd
, &a
, &ruleset
, NULL
);
10605 if (action
== PF_PASS
) {
10611 /* not GREv1/PPTP, so treat as ordinary GRE... */
10616 /* Traffic goes through dummynet first */
10617 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
10618 if (action
== PF_DROP
|| pbuf
== NULL
) {
10622 #endif /* DUMMYNET */
10623 action
= pf_test_state_other(&s
, dir
, kif
, &pd
);
10627 PF_APPLE_UPDATE_PDESC_IPv6();
10628 if (action
== PF_PASS
) {
10630 pfsync_update_state(s
);
10631 #endif /* NPFSYNC */
10635 } else if (s
== NULL
) {
10636 action
= pf_test_rule(&r
, &s
, dir
, kif
, pbuf
, off
, h
,
10637 &pd
, &a
, &ruleset
, NULL
);
10643 if (action
== PF_NAT64
) {
10649 PF_APPLE_UPDATE_PDESC_IPv6();
10651 /* handle dangerous IPv6 extension headers. */
10652 if (action
!= PF_DROP
) {
10653 if (action
== PF_PASS
&& rh_cnt
&&
10654 !((s
&& s
->allow_opts
) || r
->allow_opts
)) {
10656 REASON_SET(&reason
, PFRES_IPOPTIONS
);
10658 DPFPRINTF(PF_DEBUG_MISC
,
10659 ("pf: dropping packet with dangerous v6addr headers\n"));
10662 if ((s
&& s
->tag
) || PF_RTABLEID_IS_VALID(r
->rtableid
) ||
10663 (pd
.pktflags
& PKTF_FLOW_ID
)) {
10664 (void) pf_tag_packet(pbuf
, pd
.pf_mtag
, s
? s
->tag
: 0,
10668 if (action
== PF_PASS
) {
10670 /* add hints for ecn */
10671 pd
.pf_mtag
->pftag_hdr
= h
;
10672 /* record address family */
10673 pd
.pf_mtag
->pftag_flags
&= ~PF_TAG_HDR_INET
;
10674 pd
.pf_mtag
->pftag_flags
|= PF_TAG_HDR_INET6
;
10675 #endif /* PF_ECN */
10676 /* record protocol */
10677 *pbuf
->pb_proto
= pd
.proto
;
10678 if (dir
== PF_IN
&& (pd
.proto
== IPPROTO_TCP
||
10679 pd
.proto
== IPPROTO_UDP
) && s
!= NULL
&&
10680 s
->nat_rule
.ptr
!= NULL
&&
10681 (s
->nat_rule
.ptr
->action
== PF_RDR
||
10682 s
->nat_rule
.ptr
->action
== PF_BINAT
) &&
10683 IN6_IS_ADDR_LOOPBACK(&pd
.dst
->v6addr
)) {
10684 pd
.pf_mtag
->pftag_flags
|= PF_TAG_TRANSLATE_LOCALHOST
;
10691 struct pf_rule
*lr
;
10693 if (s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
10694 s
->nat_rule
.ptr
->log
& PF_LOG_ALL
) {
10695 lr
= s
->nat_rule
.ptr
;
10699 PFLOG_PACKET(kif
, h
, pbuf
, AF_INET6
, dir
, reason
, lr
, a
, ruleset
,
10703 kif
->pfik_bytes
[1][dir
== PF_OUT
][action
!= PF_PASS
] += pd
.tot_len
;
10704 kif
->pfik_packets
[1][dir
== PF_OUT
][action
!= PF_PASS
]++;
10706 if (action
== PF_PASS
|| r
->action
== PF_DROP
) {
10707 dirndx
= (dir
== PF_OUT
);
10708 r
->packets
[dirndx
]++;
10709 r
->bytes
[dirndx
] += pd
.tot_len
;
10711 a
->packets
[dirndx
]++;
10712 a
->bytes
[dirndx
] += pd
.tot_len
;
10716 if (s
->nat_rule
.ptr
!= NULL
) {
10717 s
->nat_rule
.ptr
->packets
[dirndx
]++;
10718 s
->nat_rule
.ptr
->bytes
[dirndx
] += pd
.tot_len
;
10720 if (s
->src_node
!= NULL
) {
10721 s
->src_node
->packets
[dirndx
]++;
10722 s
->src_node
->bytes
[dirndx
] += pd
.tot_len
;
10724 if (s
->nat_src_node
!= NULL
) {
10725 s
->nat_src_node
->packets
[dirndx
]++;
10726 s
->nat_src_node
->bytes
[dirndx
] += pd
.tot_len
;
10728 dirndx
= (dir
== sk
->direction
) ? 0 : 1;
10729 s
->packets
[dirndx
]++;
10730 s
->bytes
[dirndx
] += pd
.tot_len
;
10733 nr
= (s
!= NULL
) ? s
->nat_rule
.ptr
: pd
.nat_rule
;
10737 * XXX: we need to make sure that the addresses
10738 * passed to pfr_update_stats() are the same than
10739 * the addresses used during matching (pfr_match)
10741 if (r
== &pf_default_rule
) {
10743 x
= (s
== NULL
|| sk
->direction
== dir
) ?
10744 &pd
.baddr
: &pd
.naddr
;
10746 x
= (s
== NULL
|| sk
->direction
== dir
) ?
10747 &pd
.naddr
: &pd
.baddr
;
10749 if (x
== &pd
.baddr
|| s
== NULL
) {
10750 if (dir
== PF_OUT
) {
10757 if (tr
->src
.addr
.type
== PF_ADDR_TABLE
) {
10758 pfr_update_stats(tr
->src
.addr
.p
.tbl
, (sk
== NULL
||
10759 sk
->direction
== dir
) ? pd
.src
: pd
.dst
, pd
.af
,
10760 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
10763 if (tr
->dst
.addr
.type
== PF_ADDR_TABLE
) {
10764 pfr_update_stats(tr
->dst
.addr
.p
.tbl
, (sk
== NULL
||
10765 sk
->direction
== dir
) ? pd
.dst
: pd
.src
, pd
.af
,
10766 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
10772 if (action
== PF_SYNPROXY_DROP
) {
10776 } else if (r
->rt
) {
10777 /* pf_route6 can free the mbuf causing *m0 to become NULL */
10778 pf_route6(m0
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
10781 VERIFY(pbuf
== NULL
|| pd
.mp
== NULL
|| pd
.mp
== pbuf
);
10785 REASON_SET(&reason
, PFRES_MEMORY
);
10789 if (action
== PF_DROP
) {
10790 pbuf_destroy(*pbufp
);
10798 if (action
== PF_SYNPROXY_DROP
) {
10799 pbuf_destroy(*pbufp
);
10802 } else if (r
->rt
) {
10803 /* pf_route6 can free the mbuf causing *pbufp to become NULL */
10804 pf_route6(pbufp
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
10808 /* if reassembled packet passed, create new fragments */
10809 struct pf_fragment_tag
*ftag
= NULL
;
10810 if ((action
== PF_PASS
) && (*pbufp
!= NULL
) && (fwd
) &&
10811 ((ftag
= pf_find_fragment_tag_pbuf(*pbufp
)) != NULL
)) {
10812 action
= pf_refragment6(ifp
, pbufp
, ftag
);
10819 pf_check_congestion(struct ifqueue
*ifq
)
10821 #pragma unused(ifq)
10826 pool_init(struct pool
*pp
, size_t size
, unsigned int align
, unsigned int ioff
,
10827 int flags
, const char *wchan
, void *palloc
)
10829 #pragma unused(align, ioff, flags, palloc)
10830 bzero(pp
, sizeof(*pp
));
10831 pp
->pool_zone
= zinit(size
, 1024 * size
, PAGE_SIZE
, wchan
);
10832 if (pp
->pool_zone
!= NULL
) {
10833 zone_change(pp
->pool_zone
, Z_EXPAND
, TRUE
);
10834 zone_change(pp
->pool_zone
, Z_CALLERACCT
, FALSE
);
10835 pp
->pool_hiwat
= pp
->pool_limit
= (unsigned int)-1;
10836 pp
->pool_name
= wchan
;
10840 /* Zones cannot be currently destroyed */
10842 pool_destroy(struct pool
*pp
)
10848 pool_sethiwat(struct pool
*pp
, int n
)
10850 pp
->pool_hiwat
= n
; /* Currently unused */
10854 pool_sethardlimit(struct pool
*pp
, int n
, const char *warnmess
, int ratecap
)
10856 #pragma unused(warnmess, ratecap)
10857 pp
->pool_limit
= n
;
10861 pool_get(struct pool
*pp
, int flags
)
10865 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
10867 if (pp
->pool_count
> pp
->pool_limit
) {
10868 DPFPRINTF(PF_DEBUG_NOISY
,
10869 ("pf: pool %s hard limit reached (%d)\n",
10870 pp
->pool_name
!= NULL
? pp
->pool_name
: "unknown",
10876 buf
= zalloc_canblock(pp
->pool_zone
, (flags
& (PR_NOWAIT
| PR_WAITOK
)));
10879 VERIFY(pp
->pool_count
!= 0);
10885 pool_put(struct pool
*pp
, void *v
)
10887 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
10889 zfree(pp
->pool_zone
, v
);
10890 VERIFY(pp
->pool_count
!= 0);
10895 pf_find_mtag_pbuf(pbuf_t
*pbuf
)
10897 return pbuf
->pb_pftag
;
10901 pf_find_mtag(struct mbuf
*m
)
10907 pf_get_mtag(struct mbuf
*m
)
10909 return pf_find_mtag(m
);
10913 pf_get_mtag_pbuf(pbuf_t
*pbuf
)
10915 return pf_find_mtag_pbuf(pbuf
);
10918 struct pf_fragment_tag
*
10919 pf_copy_fragment_tag(struct mbuf
*m
, struct pf_fragment_tag
*ftag
, int how
)
10922 struct pf_mtag
*pftag
= pf_find_mtag(m
);
10924 tag
= m_tag_create(KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_PF_REASS
,
10925 sizeof(*ftag
), how
, m
);
10929 m_tag_prepend(m
, tag
);
10932 bcopy(ftag
, tag
, sizeof(*ftag
));
10933 pftag
->pftag_flags
|= PF_TAG_REASSEMBLED
;
10934 return (struct pf_fragment_tag
*)tag
;
10937 struct pf_fragment_tag
*
10938 pf_find_fragment_tag(struct mbuf
*m
)
10941 struct pf_fragment_tag
*ftag
;
10942 struct pf_mtag
*pftag
= pf_find_mtag(m
);
10944 tag
= m_tag_locate(m
, KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_PF_REASS
,
10946 VERIFY((tag
== NULL
) || (pftag
->pftag_flags
& PF_TAG_REASSEMBLED
));
10950 ftag
= (struct pf_fragment_tag
*)tag
;
10954 struct pf_fragment_tag
*
10955 pf_find_fragment_tag_pbuf(pbuf_t
*pbuf
)
10957 struct pf_mtag
*mtag
= pf_find_mtag_pbuf(pbuf
);
10959 return (mtag
->pftag_flags
& PF_TAG_REASSEMBLED
) ?
10960 pbuf
->pb_pf_fragtag
: NULL
;
10964 pf_time_second(void)
10973 pf_calendar_time_second(void)
10982 hook_establish(struct hook_desc_head
*head
, int tail
, hook_fn_t fn
, void *arg
)
10984 struct hook_desc
*hd
;
10986 hd
= _MALLOC(sizeof(*hd
), M_DEVBUF
, M_WAITOK
);
10994 TAILQ_INSERT_TAIL(head
, hd
, hd_list
);
10996 TAILQ_INSERT_HEAD(head
, hd
, hd_list
);
11003 hook_runloop(struct hook_desc_head
*head
, int flags
)
11005 struct hook_desc
*hd
;
11007 if (!(flags
& HOOK_REMOVE
)) {
11008 if (!(flags
& HOOK_ABORT
)) {
11009 TAILQ_FOREACH(hd
, head
, hd_list
)
11010 hd
->hd_fn(hd
->hd_arg
);
11013 while (!!(hd
= TAILQ_FIRST(head
))) {
11014 TAILQ_REMOVE(head
, hd
, hd_list
);
11015 if (!(flags
& HOOK_ABORT
)) {
11016 hd
->hd_fn(hd
->hd_arg
);
11018 if (flags
& HOOK_FREE
) {
11019 _FREE(hd
, M_DEVBUF
);