2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $apfw: git commit 6602420f2f101b74305cd78f7cd9e0c8fdedae97 $ */
30 /* $OpenBSD: pf.c,v 1.567 2008/02/20 23:40:13 henning Exp $ */
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002 - 2013 Henning Brauer
35 * NAT64 - Copyright (c) 2010 Viagenie Inc. (http://www.viagenie.ca)
36 * All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
42 * - Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * - Redistributions in binary form must reproduce the above
45 * copyright notice, this list of conditions and the following
46 * disclaimer in the documentation and/or other materials provided
47 * with the distribution.
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
52 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
53 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
54 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
55 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
56 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
57 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
59 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
60 * POSSIBILITY OF SUCH DAMAGE.
62 * Effort sponsored in part by the Defense Advanced Research Projects
63 * Agency (DARPA) and Air Force Research Laboratory, Air Force
64 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
68 #include <machine/endian.h>
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/filio.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 #include <sys/kernel.h>
77 #include <sys/random.h>
78 #include <sys/mcache.h>
79 #include <sys/protosw.h>
81 #include <libkern/crypto/md5.h>
82 #include <libkern/libkern.h>
84 #include <mach/thread_act.h>
87 #include <net/if_types.h>
89 #include <net/route.h>
92 #include <netinet/in.h>
93 #include <netinet/in_var.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/ip.h>
96 #include <netinet/ip_var.h>
97 #include <netinet/tcp.h>
98 #include <netinet/tcp_seq.h>
99 #include <netinet/udp.h>
100 #include <netinet/ip_icmp.h>
101 #include <netinet/in_pcb.h>
102 #include <netinet/tcp_timer.h>
103 #include <netinet/tcp_var.h>
104 #include <netinet/tcp_fsm.h>
105 #include <netinet/udp_var.h>
106 #include <netinet/icmp_var.h>
107 #include <net/if_ether.h>
108 #include <net/ethernet.h>
109 #include <net/flowhash.h>
110 #include <net/nat464_utils.h>
111 #include <net/pfvar.h>
112 #include <net/if_pflog.h>
115 #include <net/if_pfsync.h>
118 #include <netinet/ip6.h>
119 #include <netinet6/in6_pcb.h>
120 #include <netinet6/ip6_var.h>
121 #include <netinet/icmp6.h>
122 #include <netinet6/nd6.h>
125 #include <netinet/ip_dummynet.h>
126 #endif /* DUMMYNET */
129 * For RandomULong(), to get a 32 bits random value
130 * Note that random() returns a 31 bits value, see rdar://11159750
132 #include <dev/random/randomdev.h>
134 #define DPFPRINTF(n, x) (pf_status.debug >= (n) ? printf x : ((void)0))
137 * On Mac OS X, the rtableid value is treated as the interface scope
138 * value that is equivalent to the interface index used for scoped
139 * routing. A valid scope value is anything but IFSCOPE_NONE (0),
140 * as per definition of ifindex which is a positive, non-zero number.
141 * The other BSDs treat a negative rtableid value as invalid, hence
142 * the test against INT_MAX to handle userland apps which initialize
143 * the field with a negative number.
145 #define PF_RTABLEID_IS_VALID(r) \
146 ((r) > IFSCOPE_NONE && (r) <= INT_MAX)
151 decl_lck_mtx_data(, pf_lock_data
);
152 decl_lck_rw_data(, pf_perim_lock_data
);
153 lck_mtx_t
*pf_lock
= &pf_lock_data
;
154 lck_rw_t
*pf_perim_lock
= &pf_perim_lock_data
;
157 struct pf_state_tree_lan_ext pf_statetbl_lan_ext
;
158 struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy
;
160 struct pf_palist pf_pabuf
;
161 struct pf_status pf_status
;
163 u_int32_t ticket_pabuf
;
165 static MD5_CTX pf_tcp_secret_ctx
;
166 static u_char pf_tcp_secret
[16];
167 static int pf_tcp_secret_init
;
168 static int pf_tcp_iss_off
;
170 static struct pf_anchor_stackframe
{
171 struct pf_ruleset
*rs
;
173 struct pf_anchor_node
*parent
;
174 struct pf_anchor
*child
;
175 } pf_anchor_stack
[64];
177 struct pool pf_src_tree_pl
, pf_rule_pl
, pf_pooladdr_pl
;
178 struct pool pf_state_pl
, pf_state_key_pl
;
180 typedef void (*hook_fn_t
)(void *);
183 TAILQ_ENTRY(hook_desc
) hd_list
;
188 #define HOOK_REMOVE 0x01
189 #define HOOK_FREE 0x02
190 #define HOOK_ABORT 0x04
192 static void *hook_establish(struct hook_desc_head
*, int,
194 static void hook_runloop(struct hook_desc_head
*, int flags
);
196 struct pool pf_app_state_pl
;
197 static void pf_print_addr(struct pf_addr
*addr
, sa_family_t af
);
198 static void pf_print_sk_host(struct pf_state_host
*, u_int8_t
, int,
201 static void pf_print_host(struct pf_addr
*, u_int16_t
, u_int8_t
);
203 static void pf_init_threshold(struct pf_threshold
*, u_int32_t
,
205 static void pf_add_threshold(struct pf_threshold
*);
206 static int pf_check_threshold(struct pf_threshold
*);
208 static void pf_change_ap(int, pbuf_t
*, struct pf_addr
*,
209 u_int16_t
*, u_int16_t
*, u_int16_t
*,
210 struct pf_addr
*, u_int16_t
, u_int8_t
, sa_family_t
,
212 static int pf_modulate_sack(pbuf_t
*, int, struct pf_pdesc
*,
213 struct tcphdr
*, struct pf_state_peer
*);
214 static void pf_change_a6(struct pf_addr
*, u_int16_t
*,
215 struct pf_addr
*, u_int8_t
);
216 void pf_change_addr(struct pf_addr
*a
, u_int16_t
*c
,
217 struct pf_addr
*an
, u_int8_t u
,
218 sa_family_t af
, sa_family_t afn
);
219 static void pf_change_icmp(struct pf_addr
*, u_int16_t
*,
220 struct pf_addr
*, struct pf_addr
*, u_int16_t
,
221 u_int16_t
*, u_int16_t
*, u_int16_t
*,
222 u_int16_t
*, u_int8_t
, sa_family_t
);
223 static void pf_send_tcp(const struct pf_rule
*, sa_family_t
,
224 const struct pf_addr
*, const struct pf_addr
*,
225 u_int16_t
, u_int16_t
, u_int32_t
, u_int32_t
,
226 u_int8_t
, u_int16_t
, u_int16_t
, u_int8_t
, int,
227 u_int16_t
, struct ether_header
*, struct ifnet
*);
228 static void pf_send_icmp(pbuf_t
*, u_int8_t
, u_int8_t
,
229 sa_family_t
, struct pf_rule
*);
230 static struct pf_rule
*pf_match_translation(struct pf_pdesc
*, pbuf_t
*,
231 int, int, struct pfi_kif
*, struct pf_addr
*,
232 union pf_state_xport
*, struct pf_addr
*,
233 union pf_state_xport
*, int);
234 static struct pf_rule
*pf_get_translation_aux(struct pf_pdesc
*,
235 pbuf_t
*, int, int, struct pfi_kif
*,
236 struct pf_src_node
**, struct pf_addr
*,
237 union pf_state_xport
*, struct pf_addr
*,
238 union pf_state_xport
*, union pf_state_xport
*
240 static void pf_attach_state(struct pf_state_key
*,
241 struct pf_state
*, int);
242 static void pf_detach_state(struct pf_state
*, int);
243 static u_int32_t
pf_tcp_iss(struct pf_pdesc
*);
244 static int pf_test_rule(struct pf_rule
**, struct pf_state
**,
245 int, struct pfi_kif
*, pbuf_t
*, int,
246 void *, struct pf_pdesc
*, struct pf_rule
**,
247 struct pf_ruleset
**, struct ifqueue
*);
249 static int pf_test_dummynet(struct pf_rule
**, int,
250 struct pfi_kif
*, pbuf_t
**,
251 struct pf_pdesc
*, struct ip_fw_args
*);
252 #endif /* DUMMYNET */
253 static int pf_test_fragment(struct pf_rule
**, int,
254 struct pfi_kif
*, pbuf_t
*, void *,
255 struct pf_pdesc
*, struct pf_rule
**,
256 struct pf_ruleset
**);
257 static int pf_test_state_tcp(struct pf_state
**, int,
258 struct pfi_kif
*, pbuf_t
*, int,
259 void *, struct pf_pdesc
*, u_short
*);
260 static int pf_test_state_udp(struct pf_state
**, int,
261 struct pfi_kif
*, pbuf_t
*, int,
262 void *, struct pf_pdesc
*, u_short
*);
263 static int pf_test_state_icmp(struct pf_state
**, int,
264 struct pfi_kif
*, pbuf_t
*, int,
265 void *, struct pf_pdesc
*, u_short
*);
266 static int pf_test_state_other(struct pf_state
**, int,
267 struct pfi_kif
*, struct pf_pdesc
*);
268 static int pf_match_tag(struct pf_rule
*,
269 struct pf_mtag
*, int *);
270 static void pf_hash(struct pf_addr
*, struct pf_addr
*,
271 struct pf_poolhashkey
*, sa_family_t
);
272 static int pf_map_addr(u_int8_t
, struct pf_rule
*,
273 struct pf_addr
*, struct pf_addr
*,
274 struct pf_addr
*, struct pf_src_node
**);
275 static int pf_get_sport(struct pf_pdesc
*, struct pfi_kif
*,
276 struct pf_rule
*, struct pf_addr
*,
277 union pf_state_xport
*, struct pf_addr
*,
278 union pf_state_xport
*, struct pf_addr
*,
279 union pf_state_xport
*, struct pf_src_node
**
281 static void pf_route(pbuf_t
**, struct pf_rule
*, int,
282 struct ifnet
*, struct pf_state
*,
284 static void pf_route6(pbuf_t
**, struct pf_rule
*, int,
285 struct ifnet
*, struct pf_state
*,
287 static u_int8_t
pf_get_wscale(pbuf_t
*, int, u_int16_t
,
289 static u_int16_t
pf_get_mss(pbuf_t
*, int, u_int16_t
,
291 static u_int16_t
pf_calc_mss(struct pf_addr
*, sa_family_t
,
293 static void pf_set_rt_ifp(struct pf_state
*,
294 struct pf_addr
*, sa_family_t af
);
295 static int pf_check_proto_cksum(pbuf_t
*, int, int,
296 u_int8_t
, sa_family_t
);
297 static int pf_addr_wrap_neq(struct pf_addr_wrap
*,
298 struct pf_addr_wrap
*);
299 static struct pf_state
*pf_find_state(struct pfi_kif
*,
300 struct pf_state_key_cmp
*, u_int
);
301 static int pf_src_connlimit(struct pf_state
**);
302 static void pf_stateins_err(const char *, struct pf_state
*,
304 static int pf_check_congestion(struct ifqueue
*);
307 static const char *pf_pptp_ctrl_type_name(u_int16_t code
);
309 static void pf_pptp_handler(struct pf_state
*, int, int,
310 struct pf_pdesc
*, struct pfi_kif
*);
311 static void pf_pptp_unlink(struct pf_state
*);
312 static void pf_grev1_unlink(struct pf_state
*);
313 static int pf_test_state_grev1(struct pf_state
**, int,
314 struct pfi_kif
*, int, struct pf_pdesc
*);
315 static int pf_ike_compare(struct pf_app_state
*,
316 struct pf_app_state
*);
317 static int pf_test_state_esp(struct pf_state
**, int,
318 struct pfi_kif
*, int, struct pf_pdesc
*);
320 extern struct pool pfr_ktable_pl
;
321 extern struct pool pfr_kentry_pl
;
322 extern int path_mtu_discovery
;
324 struct pf_pool_limit pf_pool_limits
[PF_LIMIT_MAX
] = {
325 { .pp
= &pf_state_pl
, .limit
= PFSTATE_HIWAT
},
326 { .pp
= &pf_app_state_pl
, .limit
= PFAPPSTATE_HIWAT
},
327 { .pp
= &pf_src_tree_pl
, .limit
= PFSNODE_HIWAT
},
328 { .pp
= &pf_frent_pl
, .limit
= PFFRAG_FRENT_HIWAT
},
329 { .pp
= &pfr_ktable_pl
, .limit
= PFR_KTABLE_HIWAT
},
330 { .pp
= &pfr_kentry_pl
, .limit
= PFR_KENTRY_HIWAT
},
334 pf_lazy_makewritable(struct pf_pdesc
*pd
, pbuf_t
*pbuf
, int len
)
342 VERIFY(pbuf
== pd
->mp
);
346 if ((p
= pbuf_ensure_writable(pbuf
, len
)) == NULL
) {
351 pd
->pf_mtag
= pf_find_mtag_pbuf(pbuf
);
356 pd
->src
= (struct pf_addr
*)(uintptr_t)&h
->ip_src
;
357 pd
->dst
= (struct pf_addr
*)(uintptr_t)&h
->ip_dst
;
358 pd
->ip_sum
= &h
->ip_sum
;
362 struct ip6_hdr
*h
= p
;
363 pd
->src
= (struct pf_addr
*)(uintptr_t)&h
->ip6_src
;
364 pd
->dst
= (struct pf_addr
*)(uintptr_t)&h
->ip6_dst
;
371 return len
< 0 ? NULL
: p
;
375 pf_state_lookup_aux(struct pf_state
**state
, struct pfi_kif
*kif
,
376 int direction
, int *action
)
378 if (*state
== NULL
|| (*state
)->timeout
== PFTM_PURGE
) {
383 if (direction
== PF_OUT
&&
384 (((*state
)->rule
.ptr
->rt
== PF_ROUTETO
&&
385 (*state
)->rule
.ptr
->direction
== PF_OUT
) ||
386 ((*state
)->rule
.ptr
->rt
== PF_REPLYTO
&&
387 (*state
)->rule
.ptr
->direction
== PF_IN
)) &&
388 (*state
)->rt_kif
!= NULL
&& (*state
)->rt_kif
!= kif
) {
396 #define STATE_LOOKUP() \
399 *state = pf_find_state(kif, &key, direction); \
400 if (*state != NULL && pd != NULL && \
401 !(pd->pktflags & PKTF_FLOW_ID)) { \
402 pd->flowsrc = (*state)->state_key->flowsrc; \
403 pd->flowhash = (*state)->state_key->flowhash; \
404 if (pd->flowhash != 0) { \
405 pd->pktflags |= PKTF_FLOW_ID; \
406 pd->pktflags &= ~PKTF_FLOW_ADV; \
409 if (pf_state_lookup_aux(state, kif, direction, &action)) \
413 #define STATE_ADDR_TRANSLATE(sk) \
414 (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \
415 ((sk)->af_lan == AF_INET6 && \
416 ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \
417 (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \
418 (sk)->lan.addr.addr32[3] != (sk)->gwy.addr.addr32[3]))
420 #define STATE_TRANSLATE(sk) \
421 ((sk)->af_lan != (sk)->af_gwy || \
422 STATE_ADDR_TRANSLATE(sk) || \
423 (sk)->lan.xport.port != (sk)->gwy.xport.port)
425 #define STATE_GRE_TRANSLATE(sk) \
426 (STATE_ADDR_TRANSLATE(sk) || \
427 (sk)->lan.xport.call_id != (sk)->gwy.xport.call_id)
429 #define BOUND_IFACE(r, k) \
430 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
432 #define STATE_INC_COUNTERS(s) \
434 s->rule.ptr->states++; \
435 VERIFY(s->rule.ptr->states != 0); \
436 if (s->anchor.ptr != NULL) { \
437 s->anchor.ptr->states++; \
438 VERIFY(s->anchor.ptr->states != 0); \
440 if (s->nat_rule.ptr != NULL) { \
441 s->nat_rule.ptr->states++; \
442 VERIFY(s->nat_rule.ptr->states != 0); \
446 #define STATE_DEC_COUNTERS(s) \
448 if (s->nat_rule.ptr != NULL) { \
449 VERIFY(s->nat_rule.ptr->states > 0); \
450 s->nat_rule.ptr->states--; \
452 if (s->anchor.ptr != NULL) { \
453 VERIFY(s->anchor.ptr->states > 0); \
454 s->anchor.ptr->states--; \
456 VERIFY(s->rule.ptr->states > 0); \
457 s->rule.ptr->states--; \
460 static __inline
int pf_src_compare(struct pf_src_node
*, struct pf_src_node
*);
461 static __inline
int pf_state_compare_lan_ext(struct pf_state_key
*,
462 struct pf_state_key
*);
463 static __inline
int pf_state_compare_ext_gwy(struct pf_state_key
*,
464 struct pf_state_key
*);
465 static __inline
int pf_state_compare_id(struct pf_state
*,
468 struct pf_src_tree tree_src_tracking
;
470 struct pf_state_tree_id tree_id
;
471 struct pf_state_queue state_list
;
473 RB_GENERATE(pf_src_tree
, pf_src_node
, entry
, pf_src_compare
);
474 RB_GENERATE(pf_state_tree_lan_ext
, pf_state_key
,
475 entry_lan_ext
, pf_state_compare_lan_ext
);
476 RB_GENERATE(pf_state_tree_ext_gwy
, pf_state_key
,
477 entry_ext_gwy
, pf_state_compare_ext_gwy
);
478 RB_GENERATE(pf_state_tree_id
, pf_state
,
479 entry_id
, pf_state_compare_id
);
481 #define PF_DT_SKIP_LANEXT 0x01
482 #define PF_DT_SKIP_EXTGWY 0x02
484 static const u_int16_t PF_PPTP_PORT
= 1723;
485 static const u_int32_t PF_PPTP_MAGIC_NUMBER
= 0x1A2B3C4D;
493 struct pf_pptp_ctrl_hdr
{
495 u_int16_t reserved_0
;
498 struct pf_pptp_ctrl_generic
{
502 #define PF_PPTP_CTRL_TYPE_START_REQ 1
503 struct pf_pptp_ctrl_start_req
{
504 u_int16_t protocol_version
;
505 u_int16_t reserved_1
;
506 u_int32_t framing_capabilities
;
507 u_int32_t bearer_capabilities
;
508 u_int16_t maximum_channels
;
509 u_int16_t firmware_revision
;
510 u_int8_t host_name
[64];
511 u_int8_t vendor_string
[64];
514 #define PF_PPTP_CTRL_TYPE_START_RPY 2
515 struct pf_pptp_ctrl_start_rpy
{
516 u_int16_t protocol_version
;
517 u_int8_t result_code
;
519 u_int32_t framing_capabilities
;
520 u_int32_t bearer_capabilities
;
521 u_int16_t maximum_channels
;
522 u_int16_t firmware_revision
;
523 u_int8_t host_name
[64];
524 u_int8_t vendor_string
[64];
527 #define PF_PPTP_CTRL_TYPE_STOP_REQ 3
528 struct pf_pptp_ctrl_stop_req
{
531 u_int16_t reserved_2
;
534 #define PF_PPTP_CTRL_TYPE_STOP_RPY 4
535 struct pf_pptp_ctrl_stop_rpy
{
538 u_int16_t reserved_1
;
541 #define PF_PPTP_CTRL_TYPE_ECHO_REQ 5
542 struct pf_pptp_ctrl_echo_req
{
543 u_int32_t identifier
;
546 #define PF_PPTP_CTRL_TYPE_ECHO_RPY 6
547 struct pf_pptp_ctrl_echo_rpy
{
548 u_int32_t identifier
;
549 u_int8_t result_code
;
551 u_int16_t reserved_1
;
554 #define PF_PPTP_CTRL_TYPE_CALL_OUT_REQ 7
555 struct pf_pptp_ctrl_call_out_req
{
557 u_int16_t call_sernum
;
559 u_int32_t bearer_type
;
560 u_int32_t framing_type
;
561 u_int16_t rxwindow_size
;
562 u_int16_t proc_delay
;
563 u_int8_t phone_num
[64];
564 u_int8_t sub_addr
[64];
567 #define PF_PPTP_CTRL_TYPE_CALL_OUT_RPY 8
568 struct pf_pptp_ctrl_call_out_rpy
{
570 u_int16_t peer_call_id
;
571 u_int8_t result_code
;
573 u_int16_t cause_code
;
574 u_int32_t connect_speed
;
575 u_int16_t rxwindow_size
;
576 u_int16_t proc_delay
;
577 u_int32_t phy_channel_id
;
580 #define PF_PPTP_CTRL_TYPE_CALL_IN_1ST 9
581 struct pf_pptp_ctrl_call_in_1st
{
583 u_int16_t call_sernum
;
584 u_int32_t bearer_type
;
585 u_int32_t phy_channel_id
;
586 u_int16_t dialed_number_len
;
587 u_int16_t dialing_number_len
;
588 u_int8_t dialed_num
[64];
589 u_int8_t dialing_num
[64];
590 u_int8_t sub_addr
[64];
593 #define PF_PPTP_CTRL_TYPE_CALL_IN_2ND 10
594 struct pf_pptp_ctrl_call_in_2nd
{
596 u_int16_t peer_call_id
;
597 u_int8_t result_code
;
599 u_int16_t rxwindow_size
;
601 u_int16_t reserved_1
;
604 #define PF_PPTP_CTRL_TYPE_CALL_IN_3RD 11
605 struct pf_pptp_ctrl_call_in_3rd
{
607 u_int16_t reserved_1
;
608 u_int32_t connect_speed
;
609 u_int16_t rxwindow_size
;
611 u_int32_t framing_type
;
614 #define PF_PPTP_CTRL_TYPE_CALL_CLR 12
615 struct pf_pptp_ctrl_call_clr
{
617 u_int16_t reserved_1
;
620 #define PF_PPTP_CTRL_TYPE_CALL_DISC 13
621 struct pf_pptp_ctrl_call_disc
{
623 u_int8_t result_code
;
625 u_int16_t cause_code
;
626 u_int16_t reserved_1
;
627 u_int8_t statistics
[128];
630 #define PF_PPTP_CTRL_TYPE_ERROR 14
631 struct pf_pptp_ctrl_error
{
632 u_int16_t peer_call_id
;
633 u_int16_t reserved_1
;
634 u_int32_t crc_errors
;
637 u_int32_t buf_errors
;
638 u_int32_t tim_errors
;
639 u_int32_t align_errors
;
642 #define PF_PPTP_CTRL_TYPE_SET_LINKINFO 15
643 struct pf_pptp_ctrl_set_linkinfo
{
644 u_int16_t peer_call_id
;
645 u_int16_t reserved_1
;
650 static const size_t PF_PPTP_CTRL_MSG_MINSIZE
=
651 sizeof(struct pf_pptp_hdr
) + sizeof(struct pf_pptp_ctrl_hdr
);
653 union pf_pptp_ctrl_msg_union
{
654 struct pf_pptp_ctrl_start_req start_req
;
655 struct pf_pptp_ctrl_start_rpy start_rpy
;
656 struct pf_pptp_ctrl_stop_req stop_req
;
657 struct pf_pptp_ctrl_stop_rpy stop_rpy
;
658 struct pf_pptp_ctrl_echo_req echo_req
;
659 struct pf_pptp_ctrl_echo_rpy echo_rpy
;
660 struct pf_pptp_ctrl_call_out_req call_out_req
;
661 struct pf_pptp_ctrl_call_out_rpy call_out_rpy
;
662 struct pf_pptp_ctrl_call_in_1st call_in_1st
;
663 struct pf_pptp_ctrl_call_in_2nd call_in_2nd
;
664 struct pf_pptp_ctrl_call_in_3rd call_in_3rd
;
665 struct pf_pptp_ctrl_call_clr call_clr
;
666 struct pf_pptp_ctrl_call_disc call_disc
;
667 struct pf_pptp_ctrl_error error
;
668 struct pf_pptp_ctrl_set_linkinfo set_linkinfo
;
672 struct pf_pptp_ctrl_msg
{
673 struct pf_pptp_hdr hdr
;
674 struct pf_pptp_ctrl_hdr ctrl
;
675 union pf_pptp_ctrl_msg_union msg
;
678 #define PF_GRE_FLAG_CHECKSUM_PRESENT 0x8000
679 #define PF_GRE_FLAG_VERSION_MASK 0x0007
680 #define PF_GRE_PPP_ETHERTYPE 0x880B
682 struct pf_grev1_hdr
{
684 u_int16_t protocol_type
;
685 u_int16_t payload_length
;
693 static const u_int16_t PF_IKE_PORT
= 500;
696 u_int64_t initiator_cookie
, responder_cookie
;
697 u_int8_t next_payload
, version
, exchange_type
, flags
;
698 u_int32_t message_id
, length
;
701 #define PF_IKE_PACKET_MINSIZE (sizeof (struct pf_ike_hdr))
703 #define PF_IKEv1_EXCHTYPE_BASE 1
704 #define PF_IKEv1_EXCHTYPE_ID_PROTECT 2
705 #define PF_IKEv1_EXCHTYPE_AUTH_ONLY 3
706 #define PF_IKEv1_EXCHTYPE_AGGRESSIVE 4
707 #define PF_IKEv1_EXCHTYPE_INFORMATIONAL 5
708 #define PF_IKEv2_EXCHTYPE_SA_INIT 34
709 #define PF_IKEv2_EXCHTYPE_AUTH 35
710 #define PF_IKEv2_EXCHTYPE_CREATE_CHILD_SA 36
711 #define PF_IKEv2_EXCHTYPE_INFORMATIONAL 37
713 #define PF_IKEv1_FLAG_E 0x01
714 #define PF_IKEv1_FLAG_C 0x02
715 #define PF_IKEv1_FLAG_A 0x04
716 #define PF_IKEv2_FLAG_I 0x08
717 #define PF_IKEv2_FLAG_V 0x10
718 #define PF_IKEv2_FLAG_R 0x20
727 pf_addr_compare(struct pf_addr
*a
, struct pf_addr
*b
, sa_family_t af
)
732 if (a
->addr32
[0] > b
->addr32
[0]) {
735 if (a
->addr32
[0] < b
->addr32
[0]) {
741 if (a
->addr32
[3] > b
->addr32
[3]) {
744 if (a
->addr32
[3] < b
->addr32
[3]) {
747 if (a
->addr32
[2] > b
->addr32
[2]) {
750 if (a
->addr32
[2] < b
->addr32
[2]) {
753 if (a
->addr32
[1] > b
->addr32
[1]) {
756 if (a
->addr32
[1] < b
->addr32
[1]) {
759 if (a
->addr32
[0] > b
->addr32
[0]) {
762 if (a
->addr32
[0] < b
->addr32
[0]) {
771 pf_src_compare(struct pf_src_node
*a
, struct pf_src_node
*b
)
775 if (a
->rule
.ptr
> b
->rule
.ptr
) {
778 if (a
->rule
.ptr
< b
->rule
.ptr
) {
781 if ((diff
= a
->af
- b
->af
) != 0) {
784 if ((diff
= pf_addr_compare(&a
->addr
, &b
->addr
, a
->af
)) != 0) {
791 pf_state_compare_lan_ext(struct pf_state_key
*a
, struct pf_state_key
*b
)
796 if ((diff
= a
->proto
- b
->proto
) != 0) {
799 if ((diff
= a
->af_lan
- b
->af_lan
) != 0) {
803 extfilter
= PF_EXTFILTER_APD
;
808 if ((diff
= a
->lan
.xport
.port
- b
->lan
.xport
.port
) != 0) {
814 if ((diff
= a
->lan
.xport
.port
- b
->lan
.xport
.port
) != 0) {
817 if ((diff
= a
->ext_lan
.xport
.port
- b
->ext_lan
.xport
.port
) != 0) {
823 if ((diff
= a
->proto_variant
- b
->proto_variant
)) {
826 extfilter
= a
->proto_variant
;
827 if ((diff
= a
->lan
.xport
.port
- b
->lan
.xport
.port
) != 0) {
830 if ((extfilter
< PF_EXTFILTER_AD
) &&
831 (diff
= a
->ext_lan
.xport
.port
- b
->ext_lan
.xport
.port
) != 0) {
837 if (a
->proto_variant
== PF_GRE_PPTP_VARIANT
&&
838 a
->proto_variant
== b
->proto_variant
) {
839 if (!!(diff
= a
->ext_lan
.xport
.call_id
-
840 b
->ext_lan
.xport
.call_id
)) {
847 if (!!(diff
= a
->ext_lan
.xport
.spi
- b
->ext_lan
.xport
.spi
)) {
859 if ((diff
= pf_addr_compare(&a
->lan
.addr
, &b
->lan
.addr
,
864 if (extfilter
< PF_EXTFILTER_EI
) {
865 if ((diff
= pf_addr_compare(&a
->ext_lan
.addr
,
874 if ((diff
= pf_addr_compare(&a
->lan
.addr
, &b
->lan
.addr
,
879 if (extfilter
< PF_EXTFILTER_EI
||
880 !PF_AZERO(&b
->ext_lan
.addr
, AF_INET6
)) {
881 if ((diff
= pf_addr_compare(&a
->ext_lan
.addr
,
890 if (a
->app_state
&& b
->app_state
) {
891 if (a
->app_state
->compare_lan_ext
&&
892 b
->app_state
->compare_lan_ext
) {
893 diff
= (const char *)b
->app_state
->compare_lan_ext
-
894 (const char *)a
->app_state
->compare_lan_ext
;
898 diff
= a
->app_state
->compare_lan_ext(a
->app_state
,
910 pf_state_compare_ext_gwy(struct pf_state_key
*a
, struct pf_state_key
*b
)
915 if ((diff
= a
->proto
- b
->proto
) != 0) {
919 if ((diff
= a
->af_gwy
- b
->af_gwy
) != 0) {
923 extfilter
= PF_EXTFILTER_APD
;
928 if ((diff
= a
->gwy
.xport
.port
- b
->gwy
.xport
.port
) != 0) {
934 if ((diff
= a
->ext_gwy
.xport
.port
- b
->ext_gwy
.xport
.port
) != 0) {
937 if ((diff
= a
->gwy
.xport
.port
- b
->gwy
.xport
.port
) != 0) {
943 if ((diff
= a
->proto_variant
- b
->proto_variant
)) {
946 extfilter
= a
->proto_variant
;
947 if ((diff
= a
->gwy
.xport
.port
- b
->gwy
.xport
.port
) != 0) {
950 if ((extfilter
< PF_EXTFILTER_AD
) &&
951 (diff
= a
->ext_gwy
.xport
.port
- b
->ext_gwy
.xport
.port
) != 0) {
957 if (a
->proto_variant
== PF_GRE_PPTP_VARIANT
&&
958 a
->proto_variant
== b
->proto_variant
) {
959 if (!!(diff
= a
->gwy
.xport
.call_id
-
960 b
->gwy
.xport
.call_id
)) {
967 if (!!(diff
= a
->gwy
.xport
.spi
- b
->gwy
.xport
.spi
)) {
979 if ((diff
= pf_addr_compare(&a
->gwy
.addr
, &b
->gwy
.addr
,
984 if (extfilter
< PF_EXTFILTER_EI
) {
985 if ((diff
= pf_addr_compare(&a
->ext_gwy
.addr
, &b
->ext_gwy
.addr
,
993 if ((diff
= pf_addr_compare(&a
->gwy
.addr
, &b
->gwy
.addr
,
998 if (extfilter
< PF_EXTFILTER_EI
||
999 !PF_AZERO(&b
->ext_gwy
.addr
, AF_INET6
)) {
1000 if ((diff
= pf_addr_compare(&a
->ext_gwy
.addr
, &b
->ext_gwy
.addr
,
1008 if (a
->app_state
&& b
->app_state
) {
1009 if (a
->app_state
->compare_ext_gwy
&&
1010 b
->app_state
->compare_ext_gwy
) {
1011 diff
= (const char *)b
->app_state
->compare_ext_gwy
-
1012 (const char *)a
->app_state
->compare_ext_gwy
;
1016 diff
= a
->app_state
->compare_ext_gwy(a
->app_state
,
1028 pf_state_compare_id(struct pf_state
*a
, struct pf_state
*b
)
1030 if (a
->id
> b
->id
) {
1033 if (a
->id
< b
->id
) {
1036 if (a
->creatorid
> b
->creatorid
) {
1039 if (a
->creatorid
< b
->creatorid
) {
1047 pf_addrcpy(struct pf_addr
*dst
, struct pf_addr
*src
, sa_family_t af
)
1052 dst
->addr32
[0] = src
->addr32
[0];
1056 dst
->addr32
[0] = src
->addr32
[0];
1057 dst
->addr32
[1] = src
->addr32
[1];
1058 dst
->addr32
[2] = src
->addr32
[2];
1059 dst
->addr32
[3] = src
->addr32
[3];
1065 pf_find_state_byid(struct pf_state_cmp
*key
)
1067 pf_status
.fcounters
[FCNT_STATE_SEARCH
]++;
1069 return RB_FIND(pf_state_tree_id
, &tree_id
,
1070 (struct pf_state
*)(void *)key
);
1073 static struct pf_state
*
1074 pf_find_state(struct pfi_kif
*kif
, struct pf_state_key_cmp
*key
, u_int dir
)
1076 struct pf_state_key
*sk
= NULL
;
1079 pf_status
.fcounters
[FCNT_STATE_SEARCH
]++;
1083 sk
= RB_FIND(pf_state_tree_lan_ext
, &pf_statetbl_lan_ext
,
1084 (struct pf_state_key
*)key
);
1087 sk
= RB_FIND(pf_state_tree_ext_gwy
, &pf_statetbl_ext_gwy
,
1088 (struct pf_state_key
*)key
);
1090 * NAT64 is done only on input, for packets coming in from
1091 * from the LAN side, need to lookup the lan_ext tree.
1094 sk
= RB_FIND(pf_state_tree_lan_ext
,
1095 &pf_statetbl_lan_ext
,
1096 (struct pf_state_key
*)key
);
1097 if (sk
&& sk
->af_lan
== sk
->af_gwy
) {
1103 panic("pf_find_state");
1106 /* list is sorted, if-bound states before floating ones */
1108 TAILQ_FOREACH(s
, &sk
->states
, next
)
1109 if (s
->kif
== pfi_all
|| s
->kif
== kif
) {
1118 pf_find_state_all(struct pf_state_key_cmp
*key
, u_int dir
, int *more
)
1120 struct pf_state_key
*sk
= NULL
;
1121 struct pf_state
*s
, *ret
= NULL
;
1123 pf_status
.fcounters
[FCNT_STATE_SEARCH
]++;
1127 sk
= RB_FIND(pf_state_tree_lan_ext
,
1128 &pf_statetbl_lan_ext
, (struct pf_state_key
*)key
);
1131 sk
= RB_FIND(pf_state_tree_ext_gwy
,
1132 &pf_statetbl_ext_gwy
, (struct pf_state_key
*)key
);
1134 * NAT64 is done only on input, for packets coming in from
1135 * from the LAN side, need to lookup the lan_ext tree.
1137 if ((sk
== NULL
) && pf_nat64_configured
) {
1138 sk
= RB_FIND(pf_state_tree_lan_ext
,
1139 &pf_statetbl_lan_ext
,
1140 (struct pf_state_key
*)key
);
1141 if (sk
&& sk
->af_lan
== sk
->af_gwy
) {
1147 panic("pf_find_state_all");
1151 ret
= TAILQ_FIRST(&sk
->states
);
1156 TAILQ_FOREACH(s
, &sk
->states
, next
)
1164 pf_init_threshold(struct pf_threshold
*threshold
,
1165 u_int32_t limit
, u_int32_t seconds
)
1167 threshold
->limit
= limit
* PF_THRESHOLD_MULT
;
1168 threshold
->seconds
= seconds
;
1169 threshold
->count
= 0;
1170 threshold
->last
= pf_time_second();
1174 pf_add_threshold(struct pf_threshold
*threshold
)
1176 u_int32_t t
= pf_time_second(), diff
= t
- threshold
->last
;
1178 if (diff
>= threshold
->seconds
) {
1179 threshold
->count
= 0;
1181 threshold
->count
-= threshold
->count
* diff
/
1184 threshold
->count
+= PF_THRESHOLD_MULT
;
1185 threshold
->last
= t
;
1189 pf_check_threshold(struct pf_threshold
*threshold
)
1191 return threshold
->count
> threshold
->limit
;
1195 pf_src_connlimit(struct pf_state
**state
)
1198 (*state
)->src_node
->conn
++;
1199 VERIFY((*state
)->src_node
->conn
!= 0);
1200 (*state
)->src
.tcp_est
= 1;
1201 pf_add_threshold(&(*state
)->src_node
->conn_rate
);
1203 if ((*state
)->rule
.ptr
->max_src_conn
&&
1204 (*state
)->rule
.ptr
->max_src_conn
<
1205 (*state
)->src_node
->conn
) {
1206 pf_status
.lcounters
[LCNT_SRCCONN
]++;
1210 if ((*state
)->rule
.ptr
->max_src_conn_rate
.limit
&&
1211 pf_check_threshold(&(*state
)->src_node
->conn_rate
)) {
1212 pf_status
.lcounters
[LCNT_SRCCONNRATE
]++;
1220 if ((*state
)->rule
.ptr
->overload_tbl
) {
1222 u_int32_t killed
= 0;
1224 pf_status
.lcounters
[LCNT_OVERLOAD_TABLE
]++;
1225 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1226 printf("pf_src_connlimit: blocking address ");
1227 pf_print_host(&(*state
)->src_node
->addr
, 0,
1228 (*state
)->state_key
->af_lan
);
1231 bzero(&p
, sizeof(p
));
1232 p
.pfra_af
= (*state
)->state_key
->af_lan
;
1233 switch ((*state
)->state_key
->af_lan
) {
1237 p
.pfra_ip4addr
= (*state
)->src_node
->addr
.v4addr
;
1242 p
.pfra_ip6addr
= (*state
)->src_node
->addr
.v6addr
;
1246 pfr_insert_kentry((*state
)->rule
.ptr
->overload_tbl
,
1247 &p
, pf_calendar_time_second());
1249 /* kill existing states if that's required. */
1250 if ((*state
)->rule
.ptr
->flush
) {
1251 struct pf_state_key
*sk
;
1252 struct pf_state
*st
;
1254 pf_status
.lcounters
[LCNT_OVERLOAD_FLUSH
]++;
1255 RB_FOREACH(st
, pf_state_tree_id
, &tree_id
) {
1258 * Kill states from this source. (Only those
1259 * from the same rule if PF_FLUSH_GLOBAL is not
1263 (*state
)->state_key
->af_lan
&&
1264 (((*state
)->state_key
->direction
==
1266 PF_AEQ(&(*state
)->src_node
->addr
,
1267 &sk
->lan
.addr
, sk
->af_lan
)) ||
1268 ((*state
)->state_key
->direction
== PF_IN
&&
1269 PF_AEQ(&(*state
)->src_node
->addr
,
1270 &sk
->ext_lan
.addr
, sk
->af_lan
))) &&
1271 ((*state
)->rule
.ptr
->flush
&
1273 (*state
)->rule
.ptr
== st
->rule
.ptr
)) {
1274 st
->timeout
= PFTM_PURGE
;
1275 st
->src
.state
= st
->dst
.state
=
1280 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1281 printf(", %u states killed", killed
);
1284 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1289 /* kill this state */
1290 (*state
)->timeout
= PFTM_PURGE
;
1291 (*state
)->src
.state
= (*state
)->dst
.state
= TCPS_CLOSED
;
1296 pf_insert_src_node(struct pf_src_node
**sn
, struct pf_rule
*rule
,
1297 struct pf_addr
*src
, sa_family_t af
)
1299 struct pf_src_node k
;
1303 PF_ACPY(&k
.addr
, src
, af
);
1304 if (rule
->rule_flag
& PFRULE_RULESRCTRACK
||
1305 rule
->rpool
.opts
& PF_POOL_STICKYADDR
) {
1310 pf_status
.scounters
[SCNT_SRC_NODE_SEARCH
]++;
1311 *sn
= RB_FIND(pf_src_tree
, &tree_src_tracking
, &k
);
1314 if (!rule
->max_src_nodes
||
1315 rule
->src_nodes
< rule
->max_src_nodes
) {
1316 (*sn
) = pool_get(&pf_src_tree_pl
, PR_WAITOK
);
1318 pf_status
.lcounters
[LCNT_SRCNODES
]++;
1320 if ((*sn
) == NULL
) {
1323 bzero(*sn
, sizeof(struct pf_src_node
));
1325 pf_init_threshold(&(*sn
)->conn_rate
,
1326 rule
->max_src_conn_rate
.limit
,
1327 rule
->max_src_conn_rate
.seconds
);
1330 if (rule
->rule_flag
& PFRULE_RULESRCTRACK
||
1331 rule
->rpool
.opts
& PF_POOL_STICKYADDR
) {
1332 (*sn
)->rule
.ptr
= rule
;
1334 (*sn
)->rule
.ptr
= NULL
;
1336 PF_ACPY(&(*sn
)->addr
, src
, af
);
1337 if (RB_INSERT(pf_src_tree
,
1338 &tree_src_tracking
, *sn
) != NULL
) {
1339 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1340 printf("pf: src_tree insert failed: ");
1341 pf_print_host(&(*sn
)->addr
, 0, af
);
1344 pool_put(&pf_src_tree_pl
, *sn
);
1347 (*sn
)->creation
= pf_time_second();
1348 (*sn
)->ruletype
= rule
->action
;
1349 if ((*sn
)->rule
.ptr
!= NULL
) {
1350 (*sn
)->rule
.ptr
->src_nodes
++;
1352 pf_status
.scounters
[SCNT_SRC_NODE_INSERT
]++;
1353 pf_status
.src_nodes
++;
1355 if (rule
->max_src_states
&&
1356 (*sn
)->states
>= rule
->max_src_states
) {
1357 pf_status
.lcounters
[LCNT_SRCSTATES
]++;
1365 pf_stateins_err(const char *tree
, struct pf_state
*s
, struct pfi_kif
*kif
)
1367 struct pf_state_key
*sk
= s
->state_key
;
1369 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1370 printf("pf: state insert failed: %s %s ", tree
, kif
->pfik_name
);
1371 switch (sk
->proto
) {
1381 case IPPROTO_ICMPV6
:
1385 printf("PROTO=%u", sk
->proto
);
1389 pf_print_sk_host(&sk
->lan
, sk
->af_lan
, sk
->proto
,
1392 pf_print_sk_host(&sk
->gwy
, sk
->af_gwy
, sk
->proto
,
1394 printf(" ext_lan: ");
1395 pf_print_sk_host(&sk
->ext_lan
, sk
->af_lan
, sk
->proto
,
1397 printf(" ext_gwy: ");
1398 pf_print_sk_host(&sk
->ext_gwy
, sk
->af_gwy
, sk
->proto
,
1400 if (s
->sync_flags
& PFSTATE_FROMSYNC
) {
1401 printf(" (from sync)");
1408 pf_insert_state(struct pfi_kif
*kif
, struct pf_state
*s
)
1410 struct pf_state_key
*cur
;
1411 struct pf_state
*sp
;
1413 VERIFY(s
->state_key
!= NULL
);
1416 if ((cur
= RB_INSERT(pf_state_tree_lan_ext
, &pf_statetbl_lan_ext
,
1417 s
->state_key
)) != NULL
) {
1418 /* key exists. check for same kif, if none, add to key */
1419 TAILQ_FOREACH(sp
, &cur
->states
, next
)
1420 if (sp
->kif
== kif
) { /* collision! */
1421 pf_stateins_err("tree_lan_ext", s
, kif
);
1423 PF_DT_SKIP_LANEXT
| PF_DT_SKIP_EXTGWY
);
1426 pf_detach_state(s
, PF_DT_SKIP_LANEXT
| PF_DT_SKIP_EXTGWY
);
1427 pf_attach_state(cur
, s
, kif
== pfi_all
? 1 : 0);
1430 /* if cur != NULL, we already found a state key and attached to it */
1431 if (cur
== NULL
&& (cur
= RB_INSERT(pf_state_tree_ext_gwy
,
1432 &pf_statetbl_ext_gwy
, s
->state_key
)) != NULL
) {
1433 /* must not happen. we must have found the sk above! */
1434 pf_stateins_err("tree_ext_gwy", s
, kif
);
1435 pf_detach_state(s
, PF_DT_SKIP_EXTGWY
);
1439 if (s
->id
== 0 && s
->creatorid
== 0) {
1440 s
->id
= htobe64(pf_status
.stateid
++);
1441 s
->creatorid
= pf_status
.hostid
;
1443 if (RB_INSERT(pf_state_tree_id
, &tree_id
, s
) != NULL
) {
1444 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1445 printf("pf: state insert failed: "
1446 "id: %016llx creatorid: %08x",
1447 be64toh(s
->id
), ntohl(s
->creatorid
));
1448 if (s
->sync_flags
& PFSTATE_FROMSYNC
) {
1449 printf(" (from sync)");
1453 pf_detach_state(s
, 0);
1456 TAILQ_INSERT_TAIL(&state_list
, s
, entry_list
);
1457 pf_status
.fcounters
[FCNT_STATE_INSERT
]++;
1459 VERIFY(pf_status
.states
!= 0);
1460 pfi_kif_ref(kif
, PFI_KIF_REF_STATE
);
1462 pfsync_insert_state(s
);
1468 pf_purge_thread_cont(int err
)
1471 static u_int32_t nloops
= 0;
1472 int t
= 1; /* 1 second */
1475 * Update coarse-grained networking timestamp (in sec.); the idea
1476 * is to piggy-back on the periodic timeout callout to update
1477 * the counter returnable via net_uptime().
1479 net_update_uptime();
1481 lck_rw_lock_shared(pf_perim_lock
);
1482 lck_mtx_lock(pf_lock
);
1484 /* purge everything if not running */
1485 if (!pf_status
.running
) {
1486 pf_purge_expired_states(pf_status
.states
);
1487 pf_purge_expired_fragments();
1488 pf_purge_expired_src_nodes();
1490 /* terminate thread (we don't currently do this) */
1491 if (pf_purge_thread
== NULL
) {
1492 lck_mtx_unlock(pf_lock
);
1493 lck_rw_done(pf_perim_lock
);
1495 thread_deallocate(current_thread());
1496 thread_terminate(current_thread());
1500 /* if there's nothing left, sleep w/o timeout */
1501 if (pf_status
.states
== 0 &&
1502 pf_normalize_isempty() &&
1503 RB_EMPTY(&tree_src_tracking
)) {
1511 /* process a fraction of the state table every second */
1512 pf_purge_expired_states(1 + (pf_status
.states
1513 / pf_default_rule
.timeout
[PFTM_INTERVAL
]));
1515 /* purge other expired types every PFTM_INTERVAL seconds */
1516 if (++nloops
>= pf_default_rule
.timeout
[PFTM_INTERVAL
]) {
1517 pf_purge_expired_fragments();
1518 pf_purge_expired_src_nodes();
1522 lck_mtx_unlock(pf_lock
);
1523 lck_rw_done(pf_perim_lock
);
1525 (void) tsleep0(pf_purge_thread_fn
, PWAIT
, "pf_purge_cont",
1526 t
* hz
, pf_purge_thread_cont
);
1534 pf_purge_thread_fn(void *v
, wait_result_t w
)
1536 #pragma unused(v, w)
1537 (void) tsleep0(pf_purge_thread_fn
, PWAIT
, "pf_purge", 0,
1538 pf_purge_thread_cont
);
1540 * tsleep0() shouldn't have returned as PCATCH was not set;
1541 * therefore assert in this case.
1547 pf_state_expires(const struct pf_state
*state
)
1554 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1556 /* handle all PFTM_* > PFTM_MAX here */
1557 if (state
->timeout
== PFTM_PURGE
) {
1558 return pf_time_second();
1561 VERIFY(state
->timeout
!= PFTM_UNLINKED
);
1562 VERIFY(state
->timeout
< PFTM_MAX
);
1563 t
= state
->rule
.ptr
->timeout
[state
->timeout
];
1565 t
= pf_default_rule
.timeout
[state
->timeout
];
1567 start
= state
->rule
.ptr
->timeout
[PFTM_ADAPTIVE_START
];
1569 end
= state
->rule
.ptr
->timeout
[PFTM_ADAPTIVE_END
];
1570 states
= state
->rule
.ptr
->states
;
1572 start
= pf_default_rule
.timeout
[PFTM_ADAPTIVE_START
];
1573 end
= pf_default_rule
.timeout
[PFTM_ADAPTIVE_END
];
1574 states
= pf_status
.states
;
1576 if (end
&& states
> start
&& start
< end
) {
1578 return state
->expire
+ t
* (end
- states
) /
1581 return pf_time_second();
1584 return state
->expire
+ t
;
1588 pf_purge_expired_src_nodes(void)
1590 struct pf_src_node
*cur
, *next
;
1592 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1594 for (cur
= RB_MIN(pf_src_tree
, &tree_src_tracking
); cur
; cur
= next
) {
1595 next
= RB_NEXT(pf_src_tree
, &tree_src_tracking
, cur
);
1597 if (cur
->states
<= 0 && cur
->expire
<= pf_time_second()) {
1598 if (cur
->rule
.ptr
!= NULL
) {
1599 cur
->rule
.ptr
->src_nodes
--;
1600 if (cur
->rule
.ptr
->states
<= 0 &&
1601 cur
->rule
.ptr
->max_src_nodes
<= 0) {
1602 pf_rm_rule(NULL
, cur
->rule
.ptr
);
1605 RB_REMOVE(pf_src_tree
, &tree_src_tracking
, cur
);
1606 pf_status
.scounters
[SCNT_SRC_NODE_REMOVALS
]++;
1607 pf_status
.src_nodes
--;
1608 pool_put(&pf_src_tree_pl
, cur
);
1614 pf_src_tree_remove_state(struct pf_state
*s
)
1618 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1620 if (s
->src_node
!= NULL
) {
1621 if (s
->src
.tcp_est
) {
1622 VERIFY(s
->src_node
->conn
> 0);
1623 --s
->src_node
->conn
;
1625 VERIFY(s
->src_node
->states
> 0);
1626 if (--s
->src_node
->states
<= 0) {
1627 t
= s
->rule
.ptr
->timeout
[PFTM_SRC_NODE
];
1629 t
= pf_default_rule
.timeout
[PFTM_SRC_NODE
];
1631 s
->src_node
->expire
= pf_time_second() + t
;
1634 if (s
->nat_src_node
!= s
->src_node
&& s
->nat_src_node
!= NULL
) {
1635 VERIFY(s
->nat_src_node
->states
> 0);
1636 if (--s
->nat_src_node
->states
<= 0) {
1637 t
= s
->rule
.ptr
->timeout
[PFTM_SRC_NODE
];
1639 t
= pf_default_rule
.timeout
[PFTM_SRC_NODE
];
1641 s
->nat_src_node
->expire
= pf_time_second() + t
;
1644 s
->src_node
= s
->nat_src_node
= NULL
;
1648 pf_unlink_state(struct pf_state
*cur
)
1650 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1652 if (cur
->src
.state
== PF_TCPS_PROXY_DST
) {
1653 pf_send_tcp(cur
->rule
.ptr
, cur
->state_key
->af_lan
,
1654 &cur
->state_key
->ext_lan
.addr
, &cur
->state_key
->lan
.addr
,
1655 cur
->state_key
->ext_lan
.xport
.port
,
1656 cur
->state_key
->lan
.xport
.port
,
1657 cur
->src
.seqhi
, cur
->src
.seqlo
+ 1,
1658 TH_RST
| TH_ACK
, 0, 0, 0, 1, cur
->tag
, NULL
, NULL
);
1661 hook_runloop(&cur
->unlink_hooks
, HOOK_REMOVE
| HOOK_FREE
);
1662 RB_REMOVE(pf_state_tree_id
, &tree_id
, cur
);
1664 if (cur
->creatorid
== pf_status
.hostid
) {
1665 pfsync_delete_state(cur
);
1668 cur
->timeout
= PFTM_UNLINKED
;
1669 pf_src_tree_remove_state(cur
);
1670 pf_detach_state(cur
, 0);
1673 /* callers should be at splpf and hold the
1674 * write_lock on pf_consistency_lock */
1676 pf_free_state(struct pf_state
*cur
)
1678 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1680 if (pfsyncif
!= NULL
&&
1681 (pfsyncif
->sc_bulk_send_next
== cur
||
1682 pfsyncif
->sc_bulk_terminator
== cur
)) {
1686 VERIFY(cur
->timeout
== PFTM_UNLINKED
);
1687 VERIFY(cur
->rule
.ptr
->states
> 0);
1688 if (--cur
->rule
.ptr
->states
<= 0 &&
1689 cur
->rule
.ptr
->src_nodes
<= 0) {
1690 pf_rm_rule(NULL
, cur
->rule
.ptr
);
1692 if (cur
->nat_rule
.ptr
!= NULL
) {
1693 VERIFY(cur
->nat_rule
.ptr
->states
> 0);
1694 if (--cur
->nat_rule
.ptr
->states
<= 0 &&
1695 cur
->nat_rule
.ptr
->src_nodes
<= 0) {
1696 pf_rm_rule(NULL
, cur
->nat_rule
.ptr
);
1699 if (cur
->anchor
.ptr
!= NULL
) {
1700 VERIFY(cur
->anchor
.ptr
->states
> 0);
1701 if (--cur
->anchor
.ptr
->states
<= 0) {
1702 pf_rm_rule(NULL
, cur
->anchor
.ptr
);
1705 pf_normalize_tcp_cleanup(cur
);
1706 pfi_kif_unref(cur
->kif
, PFI_KIF_REF_STATE
);
1707 TAILQ_REMOVE(&state_list
, cur
, entry_list
);
1709 pf_tag_unref(cur
->tag
);
1711 pool_put(&pf_state_pl
, cur
);
1712 pf_status
.fcounters
[FCNT_STATE_REMOVALS
]++;
1713 VERIFY(pf_status
.states
> 0);
1718 pf_purge_expired_states(u_int32_t maxcheck
)
1720 static struct pf_state
*cur
= NULL
;
1721 struct pf_state
*next
;
1723 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1725 while (maxcheck
--) {
1726 /* wrap to start of list when we hit the end */
1728 cur
= TAILQ_FIRST(&state_list
);
1730 break; /* list empty */
1734 /* get next state, as cur may get deleted */
1735 next
= TAILQ_NEXT(cur
, entry_list
);
1737 if (cur
->timeout
== PFTM_UNLINKED
) {
1739 } else if (pf_state_expires(cur
) <= pf_time_second()) {
1740 /* unlink and free expired state */
1741 pf_unlink_state(cur
);
1749 pf_tbladdr_setup(struct pf_ruleset
*rs
, struct pf_addr_wrap
*aw
)
1751 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1753 if (aw
->type
!= PF_ADDR_TABLE
) {
1756 if ((aw
->p
.tbl
= pfr_attach_table(rs
, aw
->v
.tblname
)) == NULL
) {
1763 pf_tbladdr_remove(struct pf_addr_wrap
*aw
)
1765 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1767 if (aw
->type
!= PF_ADDR_TABLE
|| aw
->p
.tbl
== NULL
) {
1770 pfr_detach_table(aw
->p
.tbl
);
1775 pf_tbladdr_copyout(struct pf_addr_wrap
*aw
)
1777 struct pfr_ktable
*kt
= aw
->p
.tbl
;
1779 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1781 if (aw
->type
!= PF_ADDR_TABLE
|| kt
== NULL
) {
1784 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
) {
1785 kt
= kt
->pfrkt_root
;
1788 aw
->p
.tblcnt
= (kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) ?
1793 pf_print_addr(struct pf_addr
*addr
, sa_family_t af
)
1798 u_int32_t a
= ntohl(addr
->addr32
[0]);
1799 printf("%u.%u.%u.%u", (a
>> 24) & 255, (a
>> 16) & 255,
1800 (a
>> 8) & 255, a
& 255);
1806 u_int8_t i
, curstart
= 255, curend
= 0,
1807 maxstart
= 0, maxend
= 0;
1808 for (i
= 0; i
< 8; i
++) {
1809 if (!addr
->addr16
[i
]) {
1810 if (curstart
== 255) {
1817 if ((curend
- curstart
) >
1818 (maxend
- maxstart
)) {
1819 maxstart
= curstart
;
1826 for (i
= 0; i
< 8; i
++) {
1827 if (i
>= maxstart
&& i
<= maxend
) {
1829 if (i
== maxstart
) {
1838 b
= ntohs(addr
->addr16
[i
]);
1851 pf_print_sk_host(struct pf_state_host
*sh
, sa_family_t af
, int proto
,
1852 u_int8_t proto_variant
)
1854 pf_print_addr(&sh
->addr
, af
);
1858 if (sh
->xport
.spi
) {
1859 printf("[%08x]", ntohl(sh
->xport
.spi
));
1864 if (proto_variant
== PF_GRE_PPTP_VARIANT
) {
1865 printf("[%u]", ntohs(sh
->xport
.call_id
));
1871 printf("[%u]", ntohs(sh
->xport
.port
));
1880 pf_print_host(struct pf_addr
*addr
, u_int16_t p
, sa_family_t af
)
1882 pf_print_addr(addr
, af
);
1884 printf("[%u]", ntohs(p
));
1889 pf_print_state(struct pf_state
*s
)
1891 struct pf_state_key
*sk
= s
->state_key
;
1892 switch (sk
->proto
) {
1897 printf("GRE%u ", sk
->proto_variant
);
1908 case IPPROTO_ICMPV6
:
1912 printf("%u ", sk
->proto
);
1915 pf_print_sk_host(&sk
->lan
, sk
->af_lan
, sk
->proto
, sk
->proto_variant
);
1917 pf_print_sk_host(&sk
->gwy
, sk
->af_gwy
, sk
->proto
, sk
->proto_variant
);
1919 pf_print_sk_host(&sk
->ext_lan
, sk
->af_lan
, sk
->proto
,
1922 pf_print_sk_host(&sk
->ext_gwy
, sk
->af_gwy
, sk
->proto
,
1924 printf(" [lo=%u high=%u win=%u modulator=%u", s
->src
.seqlo
,
1925 s
->src
.seqhi
, s
->src
.max_win
, s
->src
.seqdiff
);
1926 if (s
->src
.wscale
&& s
->dst
.wscale
) {
1927 printf(" wscale=%u", s
->src
.wscale
& PF_WSCALE_MASK
);
1930 printf(" [lo=%u high=%u win=%u modulator=%u", s
->dst
.seqlo
,
1931 s
->dst
.seqhi
, s
->dst
.max_win
, s
->dst
.seqdiff
);
1932 if (s
->src
.wscale
&& s
->dst
.wscale
) {
1933 printf(" wscale=%u", s
->dst
.wscale
& PF_WSCALE_MASK
);
1936 printf(" %u:%u", s
->src
.state
, s
->dst
.state
);
1940 pf_print_flags(u_int8_t f
)
1971 #define PF_SET_SKIP_STEPS(i) \
1973 while (head[i] != cur) { \
1974 head[i]->skip[i].ptr = cur; \
1975 head[i] = TAILQ_NEXT(head[i], entries); \
1980 pf_calc_skip_steps(struct pf_rulequeue
*rules
)
1982 struct pf_rule
*cur
, *prev
, *head
[PF_SKIP_COUNT
];
1985 cur
= TAILQ_FIRST(rules
);
1987 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
) {
1990 while (cur
!= NULL
) {
1991 if (cur
->kif
!= prev
->kif
|| cur
->ifnot
!= prev
->ifnot
) {
1992 PF_SET_SKIP_STEPS(PF_SKIP_IFP
);
1994 if (cur
->direction
!= prev
->direction
) {
1995 PF_SET_SKIP_STEPS(PF_SKIP_DIR
);
1997 if (cur
->af
!= prev
->af
) {
1998 PF_SET_SKIP_STEPS(PF_SKIP_AF
);
2000 if (cur
->proto
!= prev
->proto
) {
2001 PF_SET_SKIP_STEPS(PF_SKIP_PROTO
);
2003 if (cur
->src
.neg
!= prev
->src
.neg
||
2004 pf_addr_wrap_neq(&cur
->src
.addr
, &prev
->src
.addr
)) {
2005 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR
);
2008 union pf_rule_xport
*cx
= &cur
->src
.xport
;
2009 union pf_rule_xport
*px
= &prev
->src
.xport
;
2011 switch (cur
->proto
) {
2014 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT
);
2017 if (prev
->proto
== IPPROTO_GRE
||
2018 prev
->proto
== IPPROTO_ESP
||
2019 cx
->range
.op
!= px
->range
.op
||
2020 cx
->range
.port
[0] != px
->range
.port
[0] ||
2021 cx
->range
.port
[1] != px
->range
.port
[1]) {
2022 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT
);
2027 if (cur
->dst
.neg
!= prev
->dst
.neg
||
2028 pf_addr_wrap_neq(&cur
->dst
.addr
, &prev
->dst
.addr
)) {
2029 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR
);
2032 union pf_rule_xport
*cx
= &cur
->dst
.xport
;
2033 union pf_rule_xport
*px
= &prev
->dst
.xport
;
2035 switch (cur
->proto
) {
2037 if (cur
->proto
!= prev
->proto
||
2038 cx
->call_id
!= px
->call_id
) {
2039 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2043 if (cur
->proto
!= prev
->proto
||
2044 cx
->spi
!= px
->spi
) {
2045 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2049 if (prev
->proto
== IPPROTO_GRE
||
2050 prev
->proto
== IPPROTO_ESP
||
2051 cx
->range
.op
!= px
->range
.op
||
2052 cx
->range
.port
[0] != px
->range
.port
[0] ||
2053 cx
->range
.port
[1] != px
->range
.port
[1]) {
2054 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2061 cur
= TAILQ_NEXT(cur
, entries
);
2063 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
) {
2064 PF_SET_SKIP_STEPS(i
);
2069 pf_calc_state_key_flowhash(struct pf_state_key
*sk
)
2071 struct pf_flowhash_key fh
__attribute__((aligned(8)));
2072 uint32_t flowhash
= 0;
2074 bzero(&fh
, sizeof(fh
));
2075 if (PF_ALEQ(&sk
->lan
.addr
, &sk
->ext_lan
.addr
, sk
->af_lan
)) {
2076 bcopy(&sk
->lan
.addr
, &fh
.ap1
.addr
, sizeof(fh
.ap1
.addr
));
2077 bcopy(&sk
->ext_lan
.addr
, &fh
.ap2
.addr
, sizeof(fh
.ap2
.addr
));
2079 bcopy(&sk
->ext_lan
.addr
, &fh
.ap1
.addr
, sizeof(fh
.ap1
.addr
));
2080 bcopy(&sk
->lan
.addr
, &fh
.ap2
.addr
, sizeof(fh
.ap2
.addr
));
2082 if (sk
->lan
.xport
.spi
<= sk
->ext_lan
.xport
.spi
) {
2083 fh
.ap1
.xport
.spi
= sk
->lan
.xport
.spi
;
2084 fh
.ap2
.xport
.spi
= sk
->ext_lan
.xport
.spi
;
2086 fh
.ap1
.xport
.spi
= sk
->ext_lan
.xport
.spi
;
2087 fh
.ap2
.xport
.spi
= sk
->lan
.xport
.spi
;
2090 fh
.proto
= sk
->proto
;
2093 flowhash
= net_flowhash(&fh
, sizeof(fh
), pf_hash_seed
);
2094 if (flowhash
== 0) {
2095 /* try to get a non-zero flowhash */
2096 pf_hash_seed
= RandomULong();
2104 pf_addr_wrap_neq(struct pf_addr_wrap
*aw1
, struct pf_addr_wrap
*aw2
)
2106 if (aw1
->type
!= aw2
->type
) {
2109 switch (aw1
->type
) {
2110 case PF_ADDR_ADDRMASK
:
2112 if (PF_ANEQ(&aw1
->v
.a
.addr
, &aw2
->v
.a
.addr
, 0)) {
2115 if (PF_ANEQ(&aw1
->v
.a
.mask
, &aw2
->v
.a
.mask
, 0)) {
2119 case PF_ADDR_DYNIFTL
:
2120 return aw1
->p
.dyn
== NULL
|| aw2
->p
.dyn
== NULL
||
2121 aw1
->p
.dyn
->pfid_kt
!= aw2
->p
.dyn
->pfid_kt
;
2122 case PF_ADDR_NOROUTE
:
2123 case PF_ADDR_URPFFAILED
:
2126 return aw1
->p
.tbl
!= aw2
->p
.tbl
;
2127 case PF_ADDR_RTLABEL
:
2128 return aw1
->v
.rtlabel
!= aw2
->v
.rtlabel
;
2130 printf("invalid address type: %d\n", aw1
->type
);
2136 pf_cksum_fixup(u_int16_t cksum
, u_int16_t old
, u_int16_t
new, u_int8_t udp
)
2138 return nat464_cksum_fixup(cksum
, old
, new, udp
);
2142 * change ip address & port
2143 * dir : packet direction
2144 * a : address to be changed
2145 * p : port to be changed
2146 * ic : ip header checksum
2147 * pc : protocol checksum
2148 * an : new ip address
2150 * u : should be 1 if UDP packet else 0
2151 * af : address family of the packet
2152 * afn : address family of the new address
2153 * ua : should be 1 if ip address needs to be updated in the packet else
2154 * only the checksum is recalculated & updated.
2157 pf_change_ap(int dir
, pbuf_t
*pbuf
, struct pf_addr
*a
, u_int16_t
*p
,
2158 u_int16_t
*ic
, u_int16_t
*pc
, struct pf_addr
*an
, u_int16_t pn
,
2159 u_int8_t u
, sa_family_t af
, sa_family_t afn
, int ua
)
2164 PF_ACPY(&ao
, a
, af
);
2166 PF_ACPY(a
, an
, afn
);
2176 *ic
= pf_cksum_fixup(pf_cksum_fixup(*ic
,
2177 ao
.addr16
[0], an
->addr16
[0], 0),
2178 ao
.addr16
[1], an
->addr16
[1], 0);
2181 * If the packet is originated from an ALG on the NAT gateway
2182 * (source address is loopback or local), in which case the
2183 * TCP/UDP checksum field contains the pseudo header checksum
2184 * that's not yet complemented.
2185 * In that case we do not need to fixup the checksum for port
2186 * translation as the pseudo header checksum doesn't include ports.
2188 * A packet generated locally will have UDP/TCP CSUM flag
2189 * set (gets set in protocol output).
2191 * It should be noted that the fixup doesn't do anything if the
2194 if (dir
== PF_OUT
&& pbuf
!= NULL
&&
2195 (*pbuf
->pb_csum_flags
& (CSUM_TCP
| CSUM_UDP
))) {
2196 /* Pseudo-header checksum does not include ports */
2197 *pc
= ~pf_cksum_fixup(pf_cksum_fixup(~*pc
,
2198 ao
.addr16
[0], an
->addr16
[0], u
),
2199 ao
.addr16
[1], an
->addr16
[1], u
);
2202 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2203 *pc
, ao
.addr16
[0], an
->addr16
[0], u
),
2204 ao
.addr16
[1], an
->addr16
[1], u
),
2210 *pc
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2211 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2213 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc
,
2214 ao
.addr16
[0], an
->addr16
[0], u
),
2215 ao
.addr16
[1], an
->addr16
[1], u
),
2216 0, an
->addr16
[2], u
),
2217 0, an
->addr16
[3], u
),
2218 0, an
->addr16
[4], u
),
2219 0, an
->addr16
[5], u
),
2220 0, an
->addr16
[6], u
),
2221 0, an
->addr16
[7], u
),
2231 * If the packet is originated from an ALG on the NAT gateway
2232 * (source address is loopback or local), in which case the
2233 * TCP/UDP checksum field contains the pseudo header checksum
2234 * that's not yet complemented.
2235 * A packet generated locally
2236 * will have UDP/TCP CSUM flag set (gets set in protocol
2239 if (dir
== PF_OUT
&& pbuf
!= NULL
&&
2240 (*pbuf
->pb_csum_flags
& (CSUM_TCPIPV6
|
2242 /* Pseudo-header checksum does not include ports */
2244 ~pf_cksum_fixup(pf_cksum_fixup(
2245 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2246 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2248 ao
.addr16
[0], an
->addr16
[0], u
),
2249 ao
.addr16
[1], an
->addr16
[1], u
),
2250 ao
.addr16
[2], an
->addr16
[2], u
),
2251 ao
.addr16
[3], an
->addr16
[3], u
),
2252 ao
.addr16
[4], an
->addr16
[4], u
),
2253 ao
.addr16
[5], an
->addr16
[5], u
),
2254 ao
.addr16
[6], an
->addr16
[6], u
),
2255 ao
.addr16
[7], an
->addr16
[7], u
);
2258 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2259 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2260 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2262 ao
.addr16
[0], an
->addr16
[0], u
),
2263 ao
.addr16
[1], an
->addr16
[1], u
),
2264 ao
.addr16
[2], an
->addr16
[2], u
),
2265 ao
.addr16
[3], an
->addr16
[3], u
),
2266 ao
.addr16
[4], an
->addr16
[4], u
),
2267 ao
.addr16
[5], an
->addr16
[5], u
),
2268 ao
.addr16
[6], an
->addr16
[6], u
),
2269 ao
.addr16
[7], an
->addr16
[7], u
),
2275 *pc
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2276 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2277 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc
,
2278 ao
.addr16
[0], an
->addr16
[0], u
),
2279 ao
.addr16
[1], an
->addr16
[1], u
),
2280 ao
.addr16
[2], 0, u
),
2281 ao
.addr16
[3], 0, u
),
2282 ao
.addr16
[4], 0, u
),
2283 ao
.addr16
[5], 0, u
),
2284 ao
.addr16
[6], 0, u
),
2285 ao
.addr16
[7], 0, u
),
2295 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2297 pf_change_a(void *a
, u_int16_t
*c
, u_int32_t an
, u_int8_t u
)
2301 memcpy(&ao
, a
, sizeof(ao
));
2302 memcpy(a
, &an
, sizeof(u_int32_t
));
2303 *c
= pf_cksum_fixup(pf_cksum_fixup(*c
, ao
/ 65536, an
/ 65536, u
),
2304 ao
% 65536, an
% 65536, u
);
2308 pf_change_a6(struct pf_addr
*a
, u_int16_t
*c
, struct pf_addr
*an
, u_int8_t u
)
2312 PF_ACPY(&ao
, a
, AF_INET6
);
2313 PF_ACPY(a
, an
, AF_INET6
);
2315 *c
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2316 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2317 pf_cksum_fixup(pf_cksum_fixup(*c
,
2318 ao
.addr16
[0], an
->addr16
[0], u
),
2319 ao
.addr16
[1], an
->addr16
[1], u
),
2320 ao
.addr16
[2], an
->addr16
[2], u
),
2321 ao
.addr16
[3], an
->addr16
[3], u
),
2322 ao
.addr16
[4], an
->addr16
[4], u
),
2323 ao
.addr16
[5], an
->addr16
[5], u
),
2324 ao
.addr16
[6], an
->addr16
[6], u
),
2325 ao
.addr16
[7], an
->addr16
[7], u
);
2329 pf_change_addr(struct pf_addr
*a
, u_int16_t
*c
, struct pf_addr
*an
, u_int8_t u
,
2330 sa_family_t af
, sa_family_t afn
)
2335 PF_ACPY(&ao
, a
, af
);
2336 PF_ACPY(a
, an
, afn
);
2343 pf_change_a(a
, c
, an
->v4addr
.s_addr
, u
);
2346 *c
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2347 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2348 pf_cksum_fixup(pf_cksum_fixup(*c
,
2349 ao
.addr16
[0], an
->addr16
[0], u
),
2350 ao
.addr16
[1], an
->addr16
[1], u
),
2351 0, an
->addr16
[2], u
),
2352 0, an
->addr16
[3], u
),
2353 0, an
->addr16
[4], u
),
2354 0, an
->addr16
[5], u
),
2355 0, an
->addr16
[6], u
),
2356 0, an
->addr16
[7], u
);
2363 *c
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2364 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2365 pf_cksum_fixup(pf_cksum_fixup(*c
,
2366 ao
.addr16
[0], an
->addr16
[0], u
),
2367 ao
.addr16
[1], an
->addr16
[1], u
),
2368 ao
.addr16
[2], 0, u
),
2369 ao
.addr16
[3], 0, u
),
2370 ao
.addr16
[4], 0, u
),
2371 ao
.addr16
[5], 0, u
),
2372 ao
.addr16
[6], 0, u
),
2373 ao
.addr16
[7], 0, u
);
2376 pf_change_a6(a
, c
, an
, u
);
2384 pf_change_icmp(struct pf_addr
*ia
, u_int16_t
*ip
, struct pf_addr
*oa
,
2385 struct pf_addr
*na
, u_int16_t np
, u_int16_t
*pc
, u_int16_t
*h2c
,
2386 u_int16_t
*ic
, u_int16_t
*hc
, u_int8_t u
, sa_family_t af
)
2388 struct pf_addr oia
, ooa
;
2390 PF_ACPY(&oia
, ia
, af
);
2391 PF_ACPY(&ooa
, oa
, af
);
2393 /* Change inner protocol port, fix inner protocol checksum. */
2395 u_int16_t oip
= *ip
;
2403 *pc
= pf_cksum_fixup(*pc
, oip
, *ip
, u
);
2405 *ic
= pf_cksum_fixup(*ic
, oip
, *ip
, 0);
2407 *ic
= pf_cksum_fixup(*ic
, opc
, *pc
, 0);
2410 /* Change inner ip address, fix inner ip and icmp checksums. */
2411 PF_ACPY(ia
, na
, af
);
2415 u_int32_t oh2c
= *h2c
;
2417 *h2c
= pf_cksum_fixup(pf_cksum_fixup(*h2c
,
2418 oia
.addr16
[0], ia
->addr16
[0], 0),
2419 oia
.addr16
[1], ia
->addr16
[1], 0);
2420 *ic
= pf_cksum_fixup(pf_cksum_fixup(*ic
,
2421 oia
.addr16
[0], ia
->addr16
[0], 0),
2422 oia
.addr16
[1], ia
->addr16
[1], 0);
2423 *ic
= pf_cksum_fixup(*ic
, oh2c
, *h2c
, 0);
2428 *ic
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2429 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2430 pf_cksum_fixup(pf_cksum_fixup(*ic
,
2431 oia
.addr16
[0], ia
->addr16
[0], u
),
2432 oia
.addr16
[1], ia
->addr16
[1], u
),
2433 oia
.addr16
[2], ia
->addr16
[2], u
),
2434 oia
.addr16
[3], ia
->addr16
[3], u
),
2435 oia
.addr16
[4], ia
->addr16
[4], u
),
2436 oia
.addr16
[5], ia
->addr16
[5], u
),
2437 oia
.addr16
[6], ia
->addr16
[6], u
),
2438 oia
.addr16
[7], ia
->addr16
[7], u
);
2441 /* Change outer ip address, fix outer ip or icmpv6 checksum. */
2442 PF_ACPY(oa
, na
, af
);
2446 *hc
= pf_cksum_fixup(pf_cksum_fixup(*hc
,
2447 ooa
.addr16
[0], oa
->addr16
[0], 0),
2448 ooa
.addr16
[1], oa
->addr16
[1], 0);
2452 *ic
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2453 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2454 pf_cksum_fixup(pf_cksum_fixup(*ic
,
2455 ooa
.addr16
[0], oa
->addr16
[0], u
),
2456 ooa
.addr16
[1], oa
->addr16
[1], u
),
2457 ooa
.addr16
[2], oa
->addr16
[2], u
),
2458 ooa
.addr16
[3], oa
->addr16
[3], u
),
2459 ooa
.addr16
[4], oa
->addr16
[4], u
),
2460 ooa
.addr16
[5], oa
->addr16
[5], u
),
2461 ooa
.addr16
[6], oa
->addr16
[6], u
),
2462 ooa
.addr16
[7], oa
->addr16
[7], u
);
2469 * Need to modulate the sequence numbers in the TCP SACK option
2470 * (credits to Krzysztof Pfaff for report and patch)
2473 pf_modulate_sack(pbuf_t
*pbuf
, int off
, struct pf_pdesc
*pd
,
2474 struct tcphdr
*th
, struct pf_state_peer
*dst
)
2476 int hlen
= (th
->th_off
<< 2) - sizeof(*th
), thoptlen
= hlen
;
2477 u_int8_t opts
[MAX_TCPOPTLEN
], *opt
= opts
;
2478 int copyback
= 0, i
, olen
;
2479 struct sackblk sack
;
2481 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2482 if (hlen
< TCPOLEN_SACKLEN
||
2483 !pf_pull_hdr(pbuf
, off
+ sizeof(*th
), opts
, hlen
, NULL
, NULL
, pd
->af
)) {
2487 while (hlen
>= TCPOLEN_SACKLEN
) {
2490 case TCPOPT_EOL
: /* FALLTHROUGH */
2499 if (olen
>= TCPOLEN_SACKLEN
) {
2500 for (i
= 2; i
+ TCPOLEN_SACK
<= olen
;
2501 i
+= TCPOLEN_SACK
) {
2502 memcpy(&sack
, &opt
[i
], sizeof(sack
));
2503 pf_change_a(&sack
.start
, &th
->th_sum
,
2504 htonl(ntohl(sack
.start
) -
2506 pf_change_a(&sack
.end
, &th
->th_sum
,
2507 htonl(ntohl(sack
.end
) -
2509 memcpy(&opt
[i
], &sack
, sizeof(sack
));
2511 copyback
= off
+ sizeof(*th
) + thoptlen
;
2524 if (pf_lazy_makewritable(pd
, pbuf
, copyback
) == NULL
) {
2527 pbuf_copy_back(pbuf
, off
+ sizeof(*th
), thoptlen
, opts
);
2535 * The following functions (pf_send_tcp and pf_send_icmp) are somewhat
2536 * special in that they originate "spurious" packets rather than
2537 * filter/NAT existing packets. As such, they're not a great fit for
2538 * the 'pbuf' shim, which assumes the underlying packet buffers are
2539 * allocated elsewhere.
2541 * Since these functions are rarely used, we'll carry on allocating mbufs
2542 * and passing them to the IP stack for eventual routing.
2545 pf_send_tcp(const struct pf_rule
*r
, sa_family_t af
,
2546 const struct pf_addr
*saddr
, const struct pf_addr
*daddr
,
2547 u_int16_t sport
, u_int16_t dport
, u_int32_t seq
, u_int32_t ack
,
2548 u_int8_t flags
, u_int16_t win
, u_int16_t mss
, u_int8_t ttl
, int tag
,
2549 u_int16_t rtag
, struct ether_header
*eh
, struct ifnet
*ifp
)
2551 #pragma unused(eh, ifp)
2555 struct ip
*h
= NULL
;
2557 struct ip6_hdr
*h6
= NULL
;
2558 struct tcphdr
*th
= NULL
;
2560 struct pf_mtag
*pf_mtag
;
2562 /* maximum segment size tcp option */
2563 tlen
= sizeof(struct tcphdr
);
2571 len
= sizeof(struct ip
) + tlen
;
2575 len
= sizeof(struct ip6_hdr
) + tlen
;
2578 panic("pf_send_tcp: not AF_INET or AF_INET6!");
2582 /* create outgoing mbuf */
2583 m
= m_gethdr(M_DONTWAIT
, MT_HEADER
);
2588 if ((pf_mtag
= pf_get_mtag(m
)) == NULL
) {
2593 pf_mtag
->pftag_flags
|= PF_TAG_GENERATED
;
2595 pf_mtag
->pftag_tag
= rtag
;
2597 if (r
!= NULL
&& PF_RTABLEID_IS_VALID(r
->rtableid
)) {
2598 pf_mtag
->pftag_rtableid
= r
->rtableid
;
2602 /* add hints for ecn */
2603 pf_mtag
->pftag_hdr
= mtod(m
, struct ip
*);
2604 /* record address family */
2605 pf_mtag
->pftag_flags
&= ~(PF_TAG_HDR_INET
| PF_TAG_HDR_INET6
);
2609 pf_mtag
->pftag_flags
|= PF_TAG_HDR_INET
;
2613 pf_mtag
->pftag_flags
|= PF_TAG_HDR_INET6
;
2618 /* indicate this is TCP */
2619 m
->m_pkthdr
.pkt_proto
= IPPROTO_TCP
;
2621 /* Make sure headers are 32-bit aligned */
2622 m
->m_data
+= max_linkhdr
;
2623 m
->m_pkthdr
.len
= m
->m_len
= len
;
2624 m
->m_pkthdr
.rcvif
= NULL
;
2625 bzero(m
->m_data
, len
);
2629 h
= mtod(m
, struct ip
*);
2631 /* IP header fields included in the TCP checksum */
2632 h
->ip_p
= IPPROTO_TCP
;
2633 h
->ip_len
= htons(tlen
);
2634 h
->ip_src
.s_addr
= saddr
->v4addr
.s_addr
;
2635 h
->ip_dst
.s_addr
= daddr
->v4addr
.s_addr
;
2637 th
= (struct tcphdr
*)(void *)((caddr_t
)h
+ sizeof(struct ip
));
2641 h6
= mtod(m
, struct ip6_hdr
*);
2643 /* IP header fields included in the TCP checksum */
2644 h6
->ip6_nxt
= IPPROTO_TCP
;
2645 h6
->ip6_plen
= htons(tlen
);
2646 memcpy(&h6
->ip6_src
, &saddr
->v6addr
, sizeof(struct in6_addr
));
2647 memcpy(&h6
->ip6_dst
, &daddr
->v6addr
, sizeof(struct in6_addr
));
2649 th
= (struct tcphdr
*)(void *)
2650 ((caddr_t
)h6
+ sizeof(struct ip6_hdr
));
2655 th
->th_sport
= sport
;
2656 th
->th_dport
= dport
;
2657 th
->th_seq
= htonl(seq
);
2658 th
->th_ack
= htonl(ack
);
2659 th
->th_off
= tlen
>> 2;
2660 th
->th_flags
= flags
;
2661 th
->th_win
= htons(win
);
2664 opt
= (char *)(th
+ 1);
2665 opt
[0] = TCPOPT_MAXSEG
;
2667 #if BYTE_ORDER != BIG_ENDIAN
2670 bcopy((caddr_t
)&mss
, (caddr_t
)(opt
+ 2), 2);
2679 th
->th_sum
= in_cksum(m
, len
);
2681 /* Finish the IP header */
2683 h
->ip_hl
= sizeof(*h
) >> 2;
2684 h
->ip_tos
= IPTOS_LOWDELAY
;
2686 * ip_output() expects ip_len and ip_off to be in host order.
2689 h
->ip_off
= (path_mtu_discovery
? IP_DF
: 0);
2690 h
->ip_ttl
= ttl
? ttl
: ip_defttl
;
2693 bzero(&ro
, sizeof(ro
));
2694 ip_output(m
, NULL
, &ro
, 0, NULL
, NULL
);
2700 struct route_in6 ro6
;
2703 th
->th_sum
= in6_cksum(m
, IPPROTO_TCP
,
2704 sizeof(struct ip6_hdr
), tlen
);
2706 h6
->ip6_vfc
|= IPV6_VERSION
;
2707 h6
->ip6_hlim
= IPV6_DEFHLIM
;
2709 bzero(&ro6
, sizeof(ro6
));
2710 ip6_output(m
, NULL
, &ro6
, 0, NULL
, NULL
, NULL
);
2711 ROUTE_RELEASE(&ro6
);
2718 pf_send_icmp(pbuf_t
*pbuf
, u_int8_t type
, u_int8_t code
, sa_family_t af
,
2722 struct pf_mtag
*pf_mtag
;
2724 m0
= pbuf_clone_to_mbuf(pbuf
);
2729 if ((pf_mtag
= pf_get_mtag(m0
)) == NULL
) {
2733 pf_mtag
->pftag_flags
|= PF_TAG_GENERATED
;
2735 if (PF_RTABLEID_IS_VALID(r
->rtableid
)) {
2736 pf_mtag
->pftag_rtableid
= r
->rtableid
;
2740 /* add hints for ecn */
2741 pf_mtag
->pftag_hdr
= mtod(m0
, struct ip
*);
2742 /* record address family */
2743 pf_mtag
->pftag_flags
&= ~(PF_TAG_HDR_INET
| PF_TAG_HDR_INET6
);
2747 pf_mtag
->pftag_flags
|= PF_TAG_HDR_INET
;
2748 m0
->m_pkthdr
.pkt_proto
= IPPROTO_ICMP
;
2752 pf_mtag
->pftag_flags
|= PF_TAG_HDR_INET6
;
2753 m0
->m_pkthdr
.pkt_proto
= IPPROTO_ICMPV6
;
2761 icmp_error(m0
, type
, code
, 0, 0);
2765 icmp6_error(m0
, type
, code
, 0);
2771 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2772 * If n is 0, they match if they are equal. If n is != 0, they match if they
2776 pf_match_addr(u_int8_t n
, struct pf_addr
*a
, struct pf_addr
*m
,
2777 struct pf_addr
*b
, sa_family_t af
)
2784 if ((a
->addr32
[0] & m
->addr32
[0]) ==
2785 (b
->addr32
[0] & m
->addr32
[0])) {
2791 if (((a
->addr32
[0] & m
->addr32
[0]) ==
2792 (b
->addr32
[0] & m
->addr32
[0])) &&
2793 ((a
->addr32
[1] & m
->addr32
[1]) ==
2794 (b
->addr32
[1] & m
->addr32
[1])) &&
2795 ((a
->addr32
[2] & m
->addr32
[2]) ==
2796 (b
->addr32
[2] & m
->addr32
[2])) &&
2797 ((a
->addr32
[3] & m
->addr32
[3]) ==
2798 (b
->addr32
[3] & m
->addr32
[3]))) {
2819 * Return 1 if b <= a <= e, otherwise return 0.
2822 pf_match_addr_range(struct pf_addr
*b
, struct pf_addr
*e
,
2823 struct pf_addr
*a
, sa_family_t af
)
2828 if ((a
->addr32
[0] < b
->addr32
[0]) ||
2829 (a
->addr32
[0] > e
->addr32
[0])) {
2838 for (i
= 0; i
< 4; ++i
) {
2839 if (a
->addr32
[i
] > b
->addr32
[i
]) {
2841 } else if (a
->addr32
[i
] < b
->addr32
[i
]) {
2846 for (i
= 0; i
< 4; ++i
) {
2847 if (a
->addr32
[i
] < e
->addr32
[i
]) {
2849 } else if (a
->addr32
[i
] > e
->addr32
[i
]) {
2860 pf_match(u_int8_t op
, u_int32_t a1
, u_int32_t a2
, u_int32_t p
)
2864 return (p
> a1
) && (p
< a2
);
2866 return (p
< a1
) || (p
> a2
);
2868 return (p
>= a1
) && (p
<= a2
);
2882 return 0; /* never reached */
2886 pf_match_port(u_int8_t op
, u_int16_t a1
, u_int16_t a2
, u_int16_t p
)
2888 #if BYTE_ORDER != BIG_ENDIAN
2893 return pf_match(op
, a1
, a2
, p
);
2897 pf_match_xport(u_int8_t proto
, u_int8_t proto_variant
, union pf_rule_xport
*rx
,
2898 union pf_state_xport
*sx
)
2905 if (proto_variant
== PF_GRE_PPTP_VARIANT
) {
2906 d
= (rx
->call_id
== sx
->call_id
);
2911 d
= (rx
->spi
== sx
->spi
);
2917 case IPPROTO_ICMPV6
:
2919 d
= pf_match_port(rx
->range
.op
,
2920 rx
->range
.port
[0], rx
->range
.port
[1],
2934 pf_match_uid(u_int8_t op
, uid_t a1
, uid_t a2
, uid_t u
)
2936 if (u
== UID_MAX
&& op
!= PF_OP_EQ
&& op
!= PF_OP_NE
) {
2939 return pf_match(op
, a1
, a2
, u
);
2943 pf_match_gid(u_int8_t op
, gid_t a1
, gid_t a2
, gid_t g
)
2945 if (g
== GID_MAX
&& op
!= PF_OP_EQ
&& op
!= PF_OP_NE
) {
2948 return pf_match(op
, a1
, a2
, g
);
2952 pf_match_tag(struct pf_rule
*r
, struct pf_mtag
*pf_mtag
,
2956 *tag
= pf_mtag
->pftag_tag
;
2959 return (!r
->match_tag_not
&& r
->match_tag
== *tag
) ||
2960 (r
->match_tag_not
&& r
->match_tag
!= *tag
);
2964 pf_tag_packet(pbuf_t
*pbuf
, struct pf_mtag
*pf_mtag
, int tag
,
2965 unsigned int rtableid
, struct pf_pdesc
*pd
)
2967 if (tag
<= 0 && !PF_RTABLEID_IS_VALID(rtableid
) &&
2968 (pd
== NULL
|| !(pd
->pktflags
& PKTF_FLOW_ID
))) {
2972 if (pf_mtag
== NULL
&& (pf_mtag
= pf_get_mtag_pbuf(pbuf
)) == NULL
) {
2977 pf_mtag
->pftag_tag
= tag
;
2979 if (PF_RTABLEID_IS_VALID(rtableid
)) {
2980 pf_mtag
->pftag_rtableid
= rtableid
;
2982 if (pd
!= NULL
&& (pd
->pktflags
& PKTF_FLOW_ID
)) {
2983 *pbuf
->pb_flowsrc
= pd
->flowsrc
;
2984 *pbuf
->pb_flowid
= pd
->flowhash
;
2985 *pbuf
->pb_flags
|= pd
->pktflags
;
2986 *pbuf
->pb_proto
= pd
->proto
;
2993 pf_step_into_anchor(int *depth
, struct pf_ruleset
**rs
, int n
,
2994 struct pf_rule
**r
, struct pf_rule
**a
, int *match
)
2996 struct pf_anchor_stackframe
*f
;
2998 (*r
)->anchor
->match
= 0;
3002 if (*depth
>= (int)sizeof(pf_anchor_stack
) /
3003 (int)sizeof(pf_anchor_stack
[0])) {
3004 printf("pf_step_into_anchor: stack overflow\n");
3005 *r
= TAILQ_NEXT(*r
, entries
);
3007 } else if (*depth
== 0 && a
!= NULL
) {
3010 f
= pf_anchor_stack
+ (*depth
)++;
3013 if ((*r
)->anchor_wildcard
) {
3014 f
->parent
= &(*r
)->anchor
->children
;
3015 if ((f
->child
= RB_MIN(pf_anchor_node
, f
->parent
)) ==
3020 *rs
= &f
->child
->ruleset
;
3024 *rs
= &(*r
)->anchor
->ruleset
;
3026 *r
= TAILQ_FIRST((*rs
)->rules
[n
].active
.ptr
);
3030 pf_step_out_of_anchor(int *depth
, struct pf_ruleset
**rs
, int n
,
3031 struct pf_rule
**r
, struct pf_rule
**a
, int *match
)
3033 struct pf_anchor_stackframe
*f
;
3040 f
= pf_anchor_stack
+ *depth
- 1;
3041 if (f
->parent
!= NULL
&& f
->child
!= NULL
) {
3042 if (f
->child
->match
||
3043 (match
!= NULL
&& *match
)) {
3044 f
->r
->anchor
->match
= 1;
3049 f
->child
= RB_NEXT(pf_anchor_node
, f
->parent
, f
->child
);
3050 if (f
->child
!= NULL
) {
3051 *rs
= &f
->child
->ruleset
;
3052 *r
= TAILQ_FIRST((*rs
)->rules
[n
].active
.ptr
);
3061 if (*depth
== 0 && a
!= NULL
) {
3065 if (f
->r
->anchor
->match
|| (match
!= NULL
&& *match
)) {
3066 quick
= f
->r
->quick
;
3068 *r
= TAILQ_NEXT(f
->r
, entries
);
3069 } while (*r
== NULL
);
3075 pf_poolmask(struct pf_addr
*naddr
, struct pf_addr
*raddr
,
3076 struct pf_addr
*rmask
, struct pf_addr
*saddr
, sa_family_t af
)
3081 naddr
->addr32
[0] = (raddr
->addr32
[0] & rmask
->addr32
[0]) |
3082 ((rmask
->addr32
[0] ^ 0xffffffff) & saddr
->addr32
[0]);
3086 naddr
->addr32
[0] = (raddr
->addr32
[0] & rmask
->addr32
[0]) |
3087 ((rmask
->addr32
[0] ^ 0xffffffff) & saddr
->addr32
[0]);
3088 naddr
->addr32
[1] = (raddr
->addr32
[1] & rmask
->addr32
[1]) |
3089 ((rmask
->addr32
[1] ^ 0xffffffff) & saddr
->addr32
[1]);
3090 naddr
->addr32
[2] = (raddr
->addr32
[2] & rmask
->addr32
[2]) |
3091 ((rmask
->addr32
[2] ^ 0xffffffff) & saddr
->addr32
[2]);
3092 naddr
->addr32
[3] = (raddr
->addr32
[3] & rmask
->addr32
[3]) |
3093 ((rmask
->addr32
[3] ^ 0xffffffff) & saddr
->addr32
[3]);
3099 pf_addr_inc(struct pf_addr
*addr
, sa_family_t af
)
3104 addr
->addr32
[0] = htonl(ntohl(addr
->addr32
[0]) + 1);
3108 if (addr
->addr32
[3] == 0xffffffff) {
3109 addr
->addr32
[3] = 0;
3110 if (addr
->addr32
[2] == 0xffffffff) {
3111 addr
->addr32
[2] = 0;
3112 if (addr
->addr32
[1] == 0xffffffff) {
3113 addr
->addr32
[1] = 0;
3115 htonl(ntohl(addr
->addr32
[0]) + 1);
3118 htonl(ntohl(addr
->addr32
[1]) + 1);
3122 htonl(ntohl(addr
->addr32
[2]) + 1);
3126 htonl(ntohl(addr
->addr32
[3]) + 1);
3132 #define mix(a, b, c) \
3134 a -= b; a -= c; a ^= (c >> 13); \
3135 b -= c; b -= a; b ^= (a << 8); \
3136 c -= a; c -= b; c ^= (b >> 13); \
3137 a -= b; a -= c; a ^= (c >> 12); \
3138 b -= c; b -= a; b ^= (a << 16); \
3139 c -= a; c -= b; c ^= (b >> 5); \
3140 a -= b; a -= c; a ^= (c >> 3); \
3141 b -= c; b -= a; b ^= (a << 10); \
3142 c -= a; c -= b; c ^= (b >> 15); \
3146 * hash function based on bridge_hash in if_bridge.c
3149 pf_hash(struct pf_addr
*inaddr
, struct pf_addr
*hash
,
3150 struct pf_poolhashkey
*key
, sa_family_t af
)
3152 u_int32_t a
= 0x9e3779b9, b
= 0x9e3779b9, c
= key
->key32
[0];
3157 a
+= inaddr
->addr32
[0];
3160 hash
->addr32
[0] = c
+ key
->key32
[2];
3164 a
+= inaddr
->addr32
[0];
3165 b
+= inaddr
->addr32
[2];
3167 hash
->addr32
[0] = c
;
3168 a
+= inaddr
->addr32
[1];
3169 b
+= inaddr
->addr32
[3];
3172 hash
->addr32
[1] = c
;
3173 a
+= inaddr
->addr32
[2];
3174 b
+= inaddr
->addr32
[1];
3177 hash
->addr32
[2] = c
;
3178 a
+= inaddr
->addr32
[3];
3179 b
+= inaddr
->addr32
[0];
3182 hash
->addr32
[3] = c
;
3188 pf_map_addr(sa_family_t af
, struct pf_rule
*r
, struct pf_addr
*saddr
,
3189 struct pf_addr
*naddr
, struct pf_addr
*init_addr
, struct pf_src_node
**sn
)
3191 unsigned char hash
[16];
3192 struct pf_pool
*rpool
= &r
->rpool
;
3193 struct pf_addr
*raddr
= &rpool
->cur
->addr
.v
.a
.addr
;
3194 struct pf_addr
*rmask
= &rpool
->cur
->addr
.v
.a
.mask
;
3195 struct pf_pooladdr
*acur
= rpool
->cur
;
3196 struct pf_src_node k
;
3198 if (*sn
== NULL
&& r
->rpool
.opts
& PF_POOL_STICKYADDR
&&
3199 (r
->rpool
.opts
& PF_POOL_TYPEMASK
) != PF_POOL_NONE
) {
3201 PF_ACPY(&k
.addr
, saddr
, af
);
3202 if (r
->rule_flag
& PFRULE_RULESRCTRACK
||
3203 r
->rpool
.opts
& PF_POOL_STICKYADDR
) {
3208 pf_status
.scounters
[SCNT_SRC_NODE_SEARCH
]++;
3209 *sn
= RB_FIND(pf_src_tree
, &tree_src_tracking
, &k
);
3210 if (*sn
!= NULL
&& !PF_AZERO(&(*sn
)->raddr
, rpool
->af
)) {
3211 PF_ACPY(naddr
, &(*sn
)->raddr
, rpool
->af
);
3212 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
3213 printf("pf_map_addr: src tracking maps ");
3214 pf_print_host(&k
.addr
, 0, af
);
3216 pf_print_host(naddr
, 0, rpool
->af
);
3223 if (rpool
->cur
->addr
.type
== PF_ADDR_NOROUTE
) {
3226 if (rpool
->cur
->addr
.type
== PF_ADDR_DYNIFTL
) {
3227 if (rpool
->cur
->addr
.p
.dyn
== NULL
) {
3230 switch (rpool
->af
) {
3233 if (rpool
->cur
->addr
.p
.dyn
->pfid_acnt4
< 1 &&
3234 (rpool
->opts
& PF_POOL_TYPEMASK
) !=
3235 PF_POOL_ROUNDROBIN
) {
3238 raddr
= &rpool
->cur
->addr
.p
.dyn
->pfid_addr4
;
3239 rmask
= &rpool
->cur
->addr
.p
.dyn
->pfid_mask4
;
3243 if (rpool
->cur
->addr
.p
.dyn
->pfid_acnt6
< 1 &&
3244 (rpool
->opts
& PF_POOL_TYPEMASK
) !=
3245 PF_POOL_ROUNDROBIN
) {
3248 raddr
= &rpool
->cur
->addr
.p
.dyn
->pfid_addr6
;
3249 rmask
= &rpool
->cur
->addr
.p
.dyn
->pfid_mask6
;
3252 } else if (rpool
->cur
->addr
.type
== PF_ADDR_TABLE
) {
3253 if ((rpool
->opts
& PF_POOL_TYPEMASK
) != PF_POOL_ROUNDROBIN
) {
3254 return 1; /* unsupported */
3257 raddr
= &rpool
->cur
->addr
.v
.a
.addr
;
3258 rmask
= &rpool
->cur
->addr
.v
.a
.mask
;
3261 switch (rpool
->opts
& PF_POOL_TYPEMASK
) {
3263 PF_ACPY(naddr
, raddr
, rpool
->af
);
3265 case PF_POOL_BITMASK
:
3266 ASSERT(af
== rpool
->af
);
3267 PF_POOLMASK(naddr
, raddr
, rmask
, saddr
, af
);
3269 case PF_POOL_RANDOM
:
3270 if (init_addr
!= NULL
&& PF_AZERO(init_addr
, rpool
->af
)) {
3274 rpool
->counter
.addr32
[0] = htonl(random());
3278 if (rmask
->addr32
[3] != 0xffffffff) {
3279 rpool
->counter
.addr32
[3] =
3284 if (rmask
->addr32
[2] != 0xffffffff) {
3285 rpool
->counter
.addr32
[2] =
3290 if (rmask
->addr32
[1] != 0xffffffff) {
3291 rpool
->counter
.addr32
[1] =
3296 if (rmask
->addr32
[0] != 0xffffffff) {
3297 rpool
->counter
.addr32
[0] =
3302 PF_POOLMASK(naddr
, raddr
, rmask
, &rpool
->counter
,
3304 PF_ACPY(init_addr
, naddr
, rpool
->af
);
3306 PF_AINC(&rpool
->counter
, rpool
->af
);
3307 PF_POOLMASK(naddr
, raddr
, rmask
, &rpool
->counter
,
3311 case PF_POOL_SRCHASH
:
3312 ASSERT(af
== rpool
->af
);
3313 PF_POOLMASK(naddr
, raddr
, rmask
, saddr
, af
);
3314 pf_hash(saddr
, (struct pf_addr
*)(void *)&hash
,
3316 PF_POOLMASK(naddr
, raddr
, rmask
,
3317 (struct pf_addr
*)(void *)&hash
, af
);
3319 case PF_POOL_ROUNDROBIN
:
3320 if (rpool
->cur
->addr
.type
== PF_ADDR_TABLE
) {
3321 if (!pfr_pool_get(rpool
->cur
->addr
.p
.tbl
,
3322 &rpool
->tblidx
, &rpool
->counter
,
3323 &raddr
, &rmask
, rpool
->af
)) {
3326 } else if (rpool
->cur
->addr
.type
== PF_ADDR_DYNIFTL
) {
3327 if (rpool
->cur
->addr
.p
.dyn
!= NULL
&&
3328 !pfr_pool_get(rpool
->cur
->addr
.p
.dyn
->pfid_kt
,
3329 &rpool
->tblidx
, &rpool
->counter
,
3330 &raddr
, &rmask
, af
)) {
3333 } else if (pf_match_addr(0, raddr
, rmask
, &rpool
->counter
,
3339 if ((rpool
->cur
= TAILQ_NEXT(rpool
->cur
, entries
)) == NULL
) {
3340 rpool
->cur
= TAILQ_FIRST(&rpool
->list
);
3342 if (rpool
->cur
->addr
.type
== PF_ADDR_TABLE
) {
3344 if (pfr_pool_get(rpool
->cur
->addr
.p
.tbl
,
3345 &rpool
->tblidx
, &rpool
->counter
,
3346 &raddr
, &rmask
, rpool
->af
)) {
3347 /* table contains no address of type
3349 if (rpool
->cur
!= acur
) {
3354 } else if (rpool
->cur
->addr
.type
== PF_ADDR_DYNIFTL
) {
3356 if (rpool
->cur
->addr
.p
.dyn
== NULL
) {
3359 if (pfr_pool_get(rpool
->cur
->addr
.p
.dyn
->pfid_kt
,
3360 &rpool
->tblidx
, &rpool
->counter
,
3361 &raddr
, &rmask
, rpool
->af
)) {
3362 /* table contains no address of type
3364 if (rpool
->cur
!= acur
) {
3370 raddr
= &rpool
->cur
->addr
.v
.a
.addr
;
3371 rmask
= &rpool
->cur
->addr
.v
.a
.mask
;
3372 PF_ACPY(&rpool
->counter
, raddr
, rpool
->af
);
3376 PF_ACPY(naddr
, &rpool
->counter
, rpool
->af
);
3377 if (init_addr
!= NULL
&& PF_AZERO(init_addr
, rpool
->af
)) {
3378 PF_ACPY(init_addr
, naddr
, rpool
->af
);
3380 PF_AINC(&rpool
->counter
, rpool
->af
);
3384 PF_ACPY(&(*sn
)->raddr
, naddr
, rpool
->af
);
3387 if (pf_status
.debug
>= PF_DEBUG_MISC
&&
3388 (rpool
->opts
& PF_POOL_TYPEMASK
) != PF_POOL_NONE
) {
3389 printf("pf_map_addr: selected address ");
3390 pf_print_host(naddr
, 0, rpool
->af
);
3398 pf_get_sport(struct pf_pdesc
*pd
, struct pfi_kif
*kif
, struct pf_rule
*r
,
3399 struct pf_addr
*saddr
, union pf_state_xport
*sxport
, struct pf_addr
*daddr
,
3400 union pf_state_xport
*dxport
, struct pf_addr
*naddr
,
3401 union pf_state_xport
*nxport
, struct pf_src_node
**sn
3405 struct pf_state_key_cmp key
;
3406 struct pf_addr init_addr
;
3408 sa_family_t af
= pd
->af
;
3409 u_int8_t proto
= pd
->proto
;
3410 unsigned int low
= r
->rpool
.proxy_port
[0];
3411 unsigned int high
= r
->rpool
.proxy_port
[1];
3413 bzero(&init_addr
, sizeof(init_addr
));
3414 if (pf_map_addr(af
, r
, saddr
, naddr
, &init_addr
, sn
)) {
3418 if (proto
== IPPROTO_ICMP
) {
3424 return 0; /* No output necessary. */
3426 /*--- Special mapping rules for UDP ---*/
3427 if (proto
== IPPROTO_UDP
) {
3428 /*--- Never float IKE source port ---*/
3429 if (ntohs(sxport
->port
) == PF_IKE_PORT
) {
3430 nxport
->port
= sxport
->port
;
3434 /*--- Apply exterior mapping options ---*/
3435 if (r
->extmap
> PF_EXTMAP_APD
) {
3438 TAILQ_FOREACH(s
, &state_list
, entry_list
) {
3439 struct pf_state_key
*sk
= s
->state_key
;
3443 if (s
->nat_rule
.ptr
!= r
) {
3446 if (sk
->proto
!= IPPROTO_UDP
||
3450 if (sk
->lan
.xport
.port
!= sxport
->port
) {
3453 if (PF_ANEQ(&sk
->lan
.addr
, saddr
, af
)) {
3456 if (r
->extmap
< PF_EXTMAP_EI
&&
3457 PF_ANEQ(&sk
->ext_lan
.addr
, daddr
, af
)) {
3461 nxport
->port
= sk
->gwy
.xport
.port
;
3465 } else if (proto
== IPPROTO_TCP
) {
3468 * APPLE MODIFICATION: <rdar://problem/6546358>
3469 * Fix allows....NAT to use a single binding for TCP session
3470 * with same source IP and source port
3472 TAILQ_FOREACH(s
, &state_list
, entry_list
) {
3473 struct pf_state_key
* sk
= s
->state_key
;
3477 if (s
->nat_rule
.ptr
!= r
) {
3480 if (sk
->proto
!= IPPROTO_TCP
|| sk
->af_lan
!= af
) {
3483 if (sk
->lan
.xport
.port
!= sxport
->port
) {
3486 if (!(PF_AEQ(&sk
->lan
.addr
, saddr
, af
))) {
3489 nxport
->port
= sk
->gwy
.xport
.port
;
3496 PF_ACPY(&key
.ext_gwy
.addr
, daddr
, key
.af_gwy
);
3497 PF_ACPY(&key
.gwy
.addr
, naddr
, key
.af_gwy
);
3500 key
.proto_variant
= r
->extfilter
;
3503 key
.proto_variant
= 0;
3507 key
.ext_gwy
.xport
= *dxport
;
3509 memset(&key
.ext_gwy
.xport
, 0,
3510 sizeof(key
.ext_gwy
.xport
));
3513 * port search; start random, step;
3514 * similar 2 portloop in in_pcbbind
3516 if (!(proto
== IPPROTO_TCP
|| proto
== IPPROTO_UDP
||
3517 proto
== IPPROTO_ICMP
)) {
3519 key
.gwy
.xport
= *dxport
;
3521 memset(&key
.gwy
.xport
, 0,
3522 sizeof(key
.gwy
.xport
));
3524 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
) {
3527 } else if (low
== 0 && high
== 0) {
3528 key
.gwy
.xport
= *nxport
;
3529 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
3533 } else if (low
== high
) {
3534 key
.gwy
.xport
.port
= htons(low
);
3535 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
3537 nxport
->port
= htons(low
);
3548 cut
= htonl(random()) % (1 + high
- low
) + low
;
3549 /* low <= cut <= high */
3550 for (tmp
= cut
; tmp
<= high
; ++(tmp
)) {
3551 key
.gwy
.xport
.port
= htons(tmp
);
3552 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
3554 nxport
->port
= htons(tmp
);
3558 for (tmp
= cut
- 1; tmp
>= low
; --(tmp
)) {
3559 key
.gwy
.xport
.port
= htons(tmp
);
3560 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
3562 nxport
->port
= htons(tmp
);
3568 switch (r
->rpool
.opts
& PF_POOL_TYPEMASK
) {
3569 case PF_POOL_RANDOM
:
3570 case PF_POOL_ROUNDROBIN
:
3571 if (pf_map_addr(af
, r
, saddr
, naddr
, &init_addr
, sn
)) {
3576 case PF_POOL_SRCHASH
:
3577 case PF_POOL_BITMASK
:
3581 } while (!PF_AEQ(&init_addr
, naddr
, af
));
3583 return 1; /* none available */
3586 static struct pf_rule
*
3587 pf_match_translation(struct pf_pdesc
*pd
, pbuf_t
*pbuf
, int off
,
3588 int direction
, struct pfi_kif
*kif
, struct pf_addr
*saddr
,
3589 union pf_state_xport
*sxport
, struct pf_addr
*daddr
,
3590 union pf_state_xport
*dxport
, int rs_num
)
3592 struct pf_rule
*r
, *rm
= NULL
;
3593 struct pf_ruleset
*ruleset
= NULL
;
3595 unsigned int rtableid
= IFSCOPE_NONE
;
3598 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[rs_num
].active
.ptr
);
3599 while (r
&& rm
== NULL
) {
3600 struct pf_rule_addr
*src
= NULL
, *dst
= NULL
;
3601 struct pf_addr_wrap
*xdst
= NULL
;
3602 struct pf_addr_wrap
*xsrc
= NULL
;
3603 union pf_rule_xport rdrxport
;
3605 if (r
->action
== PF_BINAT
&& direction
== PF_IN
) {
3607 if (r
->rpool
.cur
!= NULL
) {
3608 xdst
= &r
->rpool
.cur
->addr
;
3610 } else if (r
->action
== PF_RDR
&& direction
== PF_OUT
) {
3613 if (r
->rpool
.cur
!= NULL
) {
3614 rdrxport
.range
.op
= PF_OP_EQ
;
3615 rdrxport
.range
.port
[0] =
3616 htons(r
->rpool
.proxy_port
[0]);
3617 xsrc
= &r
->rpool
.cur
->addr
;
3625 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
) {
3626 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
3627 } else if (r
->direction
&& r
->direction
!= direction
) {
3628 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
3629 } else if (r
->af
&& r
->af
!= pd
->af
) {
3630 r
= r
->skip
[PF_SKIP_AF
].ptr
;
3631 } else if (r
->proto
&& r
->proto
!= pd
->proto
) {
3632 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
3633 } else if (xsrc
&& PF_MISMATCHAW(xsrc
, saddr
, pd
->af
, 0, NULL
)) {
3634 r
= TAILQ_NEXT(r
, entries
);
3635 } else if (!xsrc
&& PF_MISMATCHAW(&src
->addr
, saddr
, pd
->af
,
3637 r
= TAILQ_NEXT(r
, entries
);
3638 } else if (xsrc
&& (!rdrxport
.range
.port
[0] ||
3639 !pf_match_xport(r
->proto
, r
->proto_variant
, &rdrxport
,
3641 r
= TAILQ_NEXT(r
, entries
);
3642 } else if (!xsrc
&& !pf_match_xport(r
->proto
,
3643 r
->proto_variant
, &src
->xport
, sxport
)) {
3644 r
= r
->skip
[src
== &r
->src
? PF_SKIP_SRC_PORT
:
3645 PF_SKIP_DST_PORT
].ptr
;
3646 } else if (dst
!= NULL
&&
3647 PF_MISMATCHAW(&dst
->addr
, daddr
, pd
->af
, dst
->neg
, NULL
)) {
3648 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
3649 } else if (xdst
!= NULL
&& PF_MISMATCHAW(xdst
, daddr
, pd
->af
,
3651 r
= TAILQ_NEXT(r
, entries
);
3652 } else if (dst
&& !pf_match_xport(r
->proto
, r
->proto_variant
,
3653 &dst
->xport
, dxport
)) {
3654 r
= r
->skip
[PF_SKIP_DST_PORT
].ptr
;
3655 } else if (r
->match_tag
&& !pf_match_tag(r
, pd
->pf_mtag
, &tag
)) {
3656 r
= TAILQ_NEXT(r
, entries
);
3657 } else if (r
->os_fingerprint
!= PF_OSFP_ANY
&& (pd
->proto
!=
3658 IPPROTO_TCP
|| !pf_osfp_match(pf_osfp_fingerprint(pd
, pbuf
,
3659 off
, pd
->hdr
.tcp
), r
->os_fingerprint
))) {
3660 r
= TAILQ_NEXT(r
, entries
);
3665 if (PF_RTABLEID_IS_VALID(r
->rtableid
)) {
3666 rtableid
= r
->rtableid
;
3668 if (r
->anchor
== NULL
) {
3671 pf_step_into_anchor(&asd
, &ruleset
, rs_num
,
3676 pf_step_out_of_anchor(&asd
, &ruleset
, rs_num
, &r
,
3680 if (pf_tag_packet(pbuf
, pd
->pf_mtag
, tag
, rtableid
, NULL
)) {
3683 if (rm
!= NULL
&& (rm
->action
== PF_NONAT
||
3684 rm
->action
== PF_NORDR
|| rm
->action
== PF_NOBINAT
||
3685 rm
->action
== PF_NONAT64
)) {
3692 * Get address translation information for NAT/BINAT/RDR
3693 * pd : pf packet descriptor
3694 * pbuf : pbuf holding the packet
3695 * off : offset to protocol header
3696 * direction : direction of packet
3697 * kif : pf interface info obtained from the packet's recv interface
3698 * sn : source node pointer (output)
3699 * saddr : packet source address
3700 * sxport : packet source port
3701 * daddr : packet destination address
3702 * dxport : packet destination port
3703 * nsxport : translated source port (output)
3705 * Translated source & destination address are updated in pd->nsaddr &
3708 static struct pf_rule
*
3709 pf_get_translation_aux(struct pf_pdesc
*pd
, pbuf_t
*pbuf
, int off
,
3710 int direction
, struct pfi_kif
*kif
, struct pf_src_node
**sn
,
3711 struct pf_addr
*saddr
, union pf_state_xport
*sxport
, struct pf_addr
*daddr
,
3712 union pf_state_xport
*dxport
, union pf_state_xport
*nsxport
3715 struct pf_rule
*r
= NULL
;
3718 if (direction
== PF_OUT
) {
3719 r
= pf_match_translation(pd
, pbuf
, off
, direction
, kif
, saddr
,
3720 sxport
, daddr
, dxport
, PF_RULESET_BINAT
);
3722 r
= pf_match_translation(pd
, pbuf
, off
, direction
, kif
,
3723 saddr
, sxport
, daddr
, dxport
, PF_RULESET_RDR
);
3726 r
= pf_match_translation(pd
, pbuf
, off
, direction
, kif
,
3727 saddr
, sxport
, daddr
, dxport
, PF_RULESET_NAT
);
3730 r
= pf_match_translation(pd
, pbuf
, off
, direction
, kif
, saddr
,
3731 sxport
, daddr
, dxport
, PF_RULESET_RDR
);
3733 r
= pf_match_translation(pd
, pbuf
, off
, direction
, kif
,
3734 saddr
, sxport
, daddr
, dxport
, PF_RULESET_BINAT
);
3739 struct pf_addr
*nsaddr
= &pd
->naddr
;
3740 struct pf_addr
*ndaddr
= &pd
->ndaddr
;
3745 switch (r
->action
) {
3754 * we do NAT64 on incoming path and we call ip_input
3755 * which asserts receive interface to be not NULL.
3756 * The below check is to prevent NAT64 action on any
3757 * packet generated by local entity using synthesized
3760 if ((r
->action
== PF_NAT64
) && (direction
== PF_OUT
)) {
3764 if (pf_get_sport(pd
, kif
, r
, saddr
, sxport
, daddr
,
3765 dxport
, nsaddr
, nsxport
, sn
3767 DPFPRINTF(PF_DEBUG_MISC
,
3768 ("pf: NAT proxy port allocation "
3770 r
->rpool
.proxy_port
[0],
3771 r
->rpool
.proxy_port
[1]));
3775 * For NAT64 the destination IPv4 address is derived
3776 * from the last 32 bits of synthesized IPv6 address
3778 if (r
->action
== PF_NAT64
) {
3779 ndaddr
->v4addr
.s_addr
= daddr
->addr32
[3];
3784 switch (direction
) {
3786 if (r
->rpool
.cur
->addr
.type
==
3788 if (r
->rpool
.cur
->addr
.p
.dyn
== NULL
) {
3794 if (r
->rpool
.cur
->addr
.p
.dyn
->
3799 &r
->rpool
.cur
->addr
.p
.dyn
->
3801 &r
->rpool
.cur
->addr
.p
.dyn
->
3807 if (r
->rpool
.cur
->addr
.p
.dyn
->
3812 &r
->rpool
.cur
->addr
.p
.dyn
->
3814 &r
->rpool
.cur
->addr
.p
.dyn
->
3821 &r
->rpool
.cur
->addr
.v
.a
.addr
,
3822 &r
->rpool
.cur
->addr
.v
.a
.mask
,
3827 if (r
->src
.addr
.type
== PF_ADDR_DYNIFTL
) {
3828 if (r
->src
.addr
.p
.dyn
== NULL
) {
3834 if (r
->src
.addr
.p
.dyn
->
3839 &r
->src
.addr
.p
.dyn
->
3841 &r
->src
.addr
.p
.dyn
->
3847 if (r
->src
.addr
.p
.dyn
->
3852 &r
->src
.addr
.p
.dyn
->
3854 &r
->src
.addr
.p
.dyn
->
3861 &r
->src
.addr
.v
.a
.addr
,
3862 &r
->src
.addr
.v
.a
.mask
, daddr
,
3869 switch (direction
) {
3871 if (r
->dst
.addr
.type
== PF_ADDR_DYNIFTL
) {
3872 if (r
->dst
.addr
.p
.dyn
== NULL
) {
3878 if (r
->dst
.addr
.p
.dyn
->
3883 &r
->dst
.addr
.p
.dyn
->
3885 &r
->dst
.addr
.p
.dyn
->
3891 if (r
->dst
.addr
.p
.dyn
->
3896 &r
->dst
.addr
.p
.dyn
->
3898 &r
->dst
.addr
.p
.dyn
->
3905 &r
->dst
.addr
.v
.a
.addr
,
3906 &r
->dst
.addr
.v
.a
.mask
,
3909 if (nsxport
&& r
->dst
.xport
.range
.port
[0]) {
3911 r
->dst
.xport
.range
.port
[0];
3915 if (pf_map_addr(pd
->af
, r
, saddr
,
3916 ndaddr
, NULL
, sn
)) {
3919 if ((r
->rpool
.opts
& PF_POOL_TYPEMASK
) ==
3921 PF_POOLMASK(ndaddr
, ndaddr
,
3922 &r
->rpool
.cur
->addr
.v
.a
.mask
, daddr
,
3926 if (nsxport
&& dxport
) {
3927 if (r
->rpool
.proxy_port
[1]) {
3928 u_int32_t tmp_nport
;
3931 ((ntohs(dxport
->port
) -
3932 ntohs(r
->dst
.xport
.range
.
3934 (r
->rpool
.proxy_port
[1] -
3935 r
->rpool
.proxy_port
[0] +
3936 1)) + r
->rpool
.proxy_port
[0];
3938 /* wrap around if necessary */
3939 if (tmp_nport
> 65535) {
3943 htons((u_int16_t
)tmp_nport
);
3944 } else if (r
->rpool
.proxy_port
[0]) {
3945 nsxport
->port
= htons(r
->rpool
.
3962 pf_socket_lookup(int direction
, struct pf_pdesc
*pd
)
3964 struct pf_addr
*saddr
, *daddr
;
3965 u_int16_t sport
, dport
;
3966 struct inpcbinfo
*pi
;
3972 pd
->lookup
.uid
= UID_MAX
;
3973 pd
->lookup
.gid
= GID_MAX
;
3974 pd
->lookup
.pid
= NO_PID
;
3976 switch (pd
->proto
) {
3978 if (pd
->hdr
.tcp
== NULL
) {
3981 sport
= pd
->hdr
.tcp
->th_sport
;
3982 dport
= pd
->hdr
.tcp
->th_dport
;
3986 if (pd
->hdr
.udp
== NULL
) {
3989 sport
= pd
->hdr
.udp
->uh_sport
;
3990 dport
= pd
->hdr
.udp
->uh_dport
;
3996 if (direction
== PF_IN
) {
4011 inp
= in_pcblookup_hash_exists(pi
, saddr
->v4addr
, sport
, daddr
->v4addr
, dport
,
4012 0, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4014 struct in6_addr s6
, d6
;
4016 memset(&s6
, 0, sizeof(s6
));
4017 s6
.s6_addr16
[5] = htons(0xffff);
4018 memcpy(&s6
.s6_addr32
[3], &saddr
->v4addr
,
4019 sizeof(saddr
->v4addr
));
4021 memset(&d6
, 0, sizeof(d6
));
4022 d6
.s6_addr16
[5] = htons(0xffff);
4023 memcpy(&d6
.s6_addr32
[3], &daddr
->v4addr
,
4024 sizeof(daddr
->v4addr
));
4026 inp
= in6_pcblookup_hash_exists(pi
, &s6
, sport
,
4027 &d6
, dport
, 0, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4029 inp
= in_pcblookup_hash_exists(pi
, saddr
->v4addr
, sport
,
4030 daddr
->v4addr
, dport
, INPLOOKUP_WILDCARD
, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4032 inp
= in6_pcblookup_hash_exists(pi
, &s6
, sport
,
4033 &d6
, dport
, INPLOOKUP_WILDCARD
,
4034 &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4044 inp
= in6_pcblookup_hash_exists(pi
, &saddr
->v6addr
, sport
, &daddr
->v6addr
,
4045 dport
, 0, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4047 inp
= in6_pcblookup_hash_exists(pi
, &saddr
->v6addr
, sport
,
4048 &daddr
->v6addr
, dport
, INPLOOKUP_WILDCARD
,
4049 &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4064 pf_get_wscale(pbuf_t
*pbuf
, int off
, u_int16_t th_off
, sa_family_t af
)
4068 u_int8_t
*opt
, optlen
;
4069 u_int8_t wscale
= 0;
4071 hlen
= th_off
<< 2; /* hlen <= sizeof (hdr) */
4072 if (hlen
<= (int)sizeof(struct tcphdr
)) {
4075 if (!pf_pull_hdr(pbuf
, off
, hdr
, hlen
, NULL
, NULL
, af
)) {
4078 opt
= hdr
+ sizeof(struct tcphdr
);
4079 hlen
-= sizeof(struct tcphdr
);
4089 if (wscale
> TCP_MAX_WINSHIFT
) {
4090 wscale
= TCP_MAX_WINSHIFT
;
4092 wscale
|= PF_WSCALE_FLAG
;
4108 pf_get_mss(pbuf_t
*pbuf
, int off
, u_int16_t th_off
, sa_family_t af
)
4112 u_int8_t
*opt
, optlen
;
4113 u_int16_t mss
= tcp_mssdflt
;
4115 hlen
= th_off
<< 2; /* hlen <= sizeof (hdr) */
4116 if (hlen
<= (int)sizeof(struct tcphdr
)) {
4119 if (!pf_pull_hdr(pbuf
, off
, hdr
, hlen
, NULL
, NULL
, af
)) {
4122 opt
= hdr
+ sizeof(struct tcphdr
);
4123 hlen
-= sizeof(struct tcphdr
);
4124 while (hlen
>= TCPOLEN_MAXSEG
) {
4132 bcopy((caddr_t
)(opt
+ 2), (caddr_t
)&mss
, 2);
4133 #if BYTE_ORDER != BIG_ENDIAN
4151 pf_calc_mss(struct pf_addr
*addr
, sa_family_t af
, u_int16_t offer
)
4154 struct sockaddr_in
*dst
;
4157 struct sockaddr_in6
*dst6
;
4158 struct route_in6 ro6
;
4159 struct rtentry
*rt
= NULL
;
4161 u_int16_t mss
= tcp_mssdflt
;
4166 hlen
= sizeof(struct ip
);
4167 bzero(&ro
, sizeof(ro
));
4168 dst
= (struct sockaddr_in
*)(void *)&ro
.ro_dst
;
4169 dst
->sin_family
= AF_INET
;
4170 dst
->sin_len
= sizeof(*dst
);
4171 dst
->sin_addr
= addr
->v4addr
;
4177 hlen
= sizeof(struct ip6_hdr
);
4178 bzero(&ro6
, sizeof(ro6
));
4179 dst6
= (struct sockaddr_in6
*)(void *)&ro6
.ro_dst
;
4180 dst6
->sin6_family
= AF_INET6
;
4181 dst6
->sin6_len
= sizeof(*dst6
);
4182 dst6
->sin6_addr
= addr
->v6addr
;
4183 rtalloc((struct route
*)&ro
);
4187 panic("pf_calc_mss: not AF_INET or AF_INET6!");
4191 if (rt
&& rt
->rt_ifp
) {
4192 /* This is relevant only for PF SYN Proxy */
4193 int interface_mtu
= rt
->rt_ifp
->if_mtu
;
4195 if (af
== AF_INET
&&
4196 INTF_ADJUST_MTU_FOR_CLAT46(rt
->rt_ifp
)) {
4197 interface_mtu
= IN6_LINKMTU(rt
->rt_ifp
);
4198 /* Further adjust the size for CLAT46 expansion */
4199 interface_mtu
-= CLAT46_HDR_EXPANSION_OVERHD
;
4201 mss
= interface_mtu
- hlen
- sizeof(struct tcphdr
);
4202 mss
= max(tcp_mssdflt
, mss
);
4205 mss
= min(mss
, offer
);
4206 mss
= max(mss
, 64); /* sanity - at least max opt space */
4211 pf_set_rt_ifp(struct pf_state
*s
, struct pf_addr
*saddr
, sa_family_t af
)
4213 struct pf_rule
*r
= s
->rule
.ptr
;
4217 if (!r
->rt
|| r
->rt
== PF_FASTROUTE
) {
4220 if ((af
== AF_INET
) || (af
== AF_INET6
)) {
4221 pf_map_addr(af
, r
, saddr
, &s
->rt_addr
, NULL
,
4223 s
->rt_kif
= r
->rpool
.cur
->kif
;
4230 pf_attach_state(struct pf_state_key
*sk
, struct pf_state
*s
, int tail
)
4235 /* list is sorted, if-bound states before floating */
4237 TAILQ_INSERT_TAIL(&sk
->states
, s
, next
);
4239 TAILQ_INSERT_HEAD(&sk
->states
, s
, next
);
4244 pf_detach_state(struct pf_state
*s
, int flags
)
4246 struct pf_state_key
*sk
= s
->state_key
;
4252 s
->state_key
= NULL
;
4253 TAILQ_REMOVE(&sk
->states
, s
, next
);
4254 if (--sk
->refcnt
== 0) {
4255 if (!(flags
& PF_DT_SKIP_EXTGWY
)) {
4256 RB_REMOVE(pf_state_tree_ext_gwy
,
4257 &pf_statetbl_ext_gwy
, sk
);
4259 if (!(flags
& PF_DT_SKIP_LANEXT
)) {
4260 RB_REMOVE(pf_state_tree_lan_ext
,
4261 &pf_statetbl_lan_ext
, sk
);
4263 if (sk
->app_state
) {
4264 pool_put(&pf_app_state_pl
, sk
->app_state
);
4266 pool_put(&pf_state_key_pl
, sk
);
4270 struct pf_state_key
*
4271 pf_alloc_state_key(struct pf_state
*s
, struct pf_state_key
*psk
)
4273 struct pf_state_key
*sk
;
4275 if ((sk
= pool_get(&pf_state_key_pl
, PR_WAITOK
)) == NULL
) {
4278 bzero(sk
, sizeof(*sk
));
4279 TAILQ_INIT(&sk
->states
);
4280 pf_attach_state(sk
, s
, 0);
4282 /* initialize state key from psk, if provided */
4284 bcopy(&psk
->lan
, &sk
->lan
, sizeof(sk
->lan
));
4285 bcopy(&psk
->gwy
, &sk
->gwy
, sizeof(sk
->gwy
));
4286 bcopy(&psk
->ext_lan
, &sk
->ext_lan
, sizeof(sk
->ext_lan
));
4287 bcopy(&psk
->ext_gwy
, &sk
->ext_gwy
, sizeof(sk
->ext_gwy
));
4288 sk
->af_lan
= psk
->af_lan
;
4289 sk
->af_gwy
= psk
->af_gwy
;
4290 sk
->proto
= psk
->proto
;
4291 sk
->direction
= psk
->direction
;
4292 sk
->proto_variant
= psk
->proto_variant
;
4293 VERIFY(psk
->app_state
== NULL
);
4294 sk
->flowsrc
= psk
->flowsrc
;
4295 sk
->flowhash
= psk
->flowhash
;
4296 /* don't touch tree entries, states and refcnt on sk */
4303 pf_tcp_iss(struct pf_pdesc
*pd
)
4306 u_int32_t digest
[4];
4308 if (pf_tcp_secret_init
== 0) {
4309 read_frandom(pf_tcp_secret
, sizeof(pf_tcp_secret
));
4310 MD5Init(&pf_tcp_secret_ctx
);
4311 MD5Update(&pf_tcp_secret_ctx
, pf_tcp_secret
,
4312 sizeof(pf_tcp_secret
));
4313 pf_tcp_secret_init
= 1;
4315 ctx
= pf_tcp_secret_ctx
;
4317 MD5Update(&ctx
, (char *)&pd
->hdr
.tcp
->th_sport
, sizeof(u_short
));
4318 MD5Update(&ctx
, (char *)&pd
->hdr
.tcp
->th_dport
, sizeof(u_short
));
4319 if (pd
->af
== AF_INET6
) {
4320 MD5Update(&ctx
, (char *)&pd
->src
->v6addr
, sizeof(struct in6_addr
));
4321 MD5Update(&ctx
, (char *)&pd
->dst
->v6addr
, sizeof(struct in6_addr
));
4323 MD5Update(&ctx
, (char *)&pd
->src
->v4addr
, sizeof(struct in_addr
));
4324 MD5Update(&ctx
, (char *)&pd
->dst
->v4addr
, sizeof(struct in_addr
));
4326 MD5Final((u_char
*)digest
, &ctx
);
4327 pf_tcp_iss_off
+= 4096;
4328 return digest
[0] + random() + pf_tcp_iss_off
;
4332 * This routine is called to perform address family translation on the
4333 * inner IP header (that may come as payload) of an ICMP(v4addr/6) error
4337 pf_change_icmp_af(pbuf_t
*pbuf
, int off
,
4338 struct pf_pdesc
*pd
, struct pf_pdesc
*pd2
, struct pf_addr
*src
,
4339 struct pf_addr
*dst
, sa_family_t af
, sa_family_t naf
)
4341 struct ip
*ip4
= NULL
;
4342 struct ip6_hdr
*ip6
= NULL
;
4346 if (af
== naf
|| (af
!= AF_INET
&& af
!= AF_INET6
) ||
4347 (naf
!= AF_INET
&& naf
!= AF_INET6
)) {
4352 olen
= pd2
->off
- off
;
4354 hlen
= naf
== AF_INET
? sizeof(*ip4
) : sizeof(*ip6
);
4356 /* Modify the pbuf to accommodate the new header */
4357 hdr
= pbuf_resize_segment(pbuf
, off
, olen
, hlen
);
4362 /* translate inner ip/ip6 header */
4366 bzero(ip4
, sizeof(*ip4
));
4367 ip4
->ip_v
= IPVERSION
;
4368 ip4
->ip_hl
= sizeof(*ip4
) >> 2;
4369 ip4
->ip_len
= htons(sizeof(*ip4
) + pd2
->tot_len
- olen
);
4370 ip4
->ip_id
= rfc6864
? 0 : htons(ip_randomid());
4371 ip4
->ip_off
= htons(IP_DF
);
4372 ip4
->ip_ttl
= pd2
->ttl
;
4373 if (pd2
->proto
== IPPROTO_ICMPV6
) {
4374 ip4
->ip_p
= IPPROTO_ICMP
;
4376 ip4
->ip_p
= pd2
->proto
;
4378 ip4
->ip_src
= src
->v4addr
;
4379 ip4
->ip_dst
= dst
->v4addr
;
4380 ip4
->ip_sum
= pbuf_inet_cksum(pbuf
, 0, 0, ip4
->ip_hl
<< 2);
4384 bzero(ip6
, sizeof(*ip6
));
4385 ip6
->ip6_vfc
= IPV6_VERSION
;
4386 ip6
->ip6_plen
= htons(pd2
->tot_len
- olen
);
4387 if (pd2
->proto
== IPPROTO_ICMP
) {
4388 ip6
->ip6_nxt
= IPPROTO_ICMPV6
;
4390 ip6
->ip6_nxt
= pd2
->proto
;
4392 if (!pd2
->ttl
|| pd2
->ttl
> IPV6_DEFHLIM
) {
4393 ip6
->ip6_hlim
= IPV6_DEFHLIM
;
4395 ip6
->ip6_hlim
= pd2
->ttl
;
4397 ip6
->ip6_src
= src
->v6addr
;
4398 ip6
->ip6_dst
= dst
->v6addr
;
4402 /* adjust payload offset and total packet length */
4403 pd2
->off
+= hlen
- olen
;
4404 pd
->tot_len
+= hlen
- olen
;
4409 #define PTR_IP(field) ((int32_t)offsetof(struct ip, field))
4410 #define PTR_IP6(field) ((int32_t)offsetof(struct ip6_hdr, field))
4413 pf_translate_icmp_af(int af
, void *arg
)
4416 struct icmp6_hdr
*icmp6
;
4425 type
= icmp6
->icmp6_type
;
4426 code
= icmp6
->icmp6_code
;
4427 mtu
= ntohl(icmp6
->icmp6_mtu
);
4430 case ICMP6_ECHO_REQUEST
:
4433 case ICMP6_ECHO_REPLY
:
4434 type
= ICMP_ECHOREPLY
;
4436 case ICMP6_DST_UNREACH
:
4437 type
= ICMP_UNREACH
;
4439 case ICMP6_DST_UNREACH_NOROUTE
:
4440 case ICMP6_DST_UNREACH_BEYONDSCOPE
:
4441 case ICMP6_DST_UNREACH_ADDR
:
4442 code
= ICMP_UNREACH_HOST
;
4444 case ICMP6_DST_UNREACH_ADMIN
:
4445 code
= ICMP_UNREACH_HOST_PROHIB
;
4447 case ICMP6_DST_UNREACH_NOPORT
:
4448 code
= ICMP_UNREACH_PORT
;
4454 case ICMP6_PACKET_TOO_BIG
:
4455 type
= ICMP_UNREACH
;
4456 code
= ICMP_UNREACH_NEEDFRAG
;
4459 case ICMP6_TIME_EXCEEDED
:
4460 type
= ICMP_TIMXCEED
;
4462 case ICMP6_PARAM_PROB
:
4464 case ICMP6_PARAMPROB_HEADER
:
4465 type
= ICMP_PARAMPROB
;
4466 code
= ICMP_PARAMPROB_ERRATPTR
;
4467 ptr
= ntohl(icmp6
->icmp6_pptr
);
4469 if (ptr
== PTR_IP6(ip6_vfc
)) {
4471 } else if (ptr
== PTR_IP6(ip6_vfc
) + 1) {
4472 ptr
= PTR_IP(ip_tos
);
4473 } else if (ptr
== PTR_IP6(ip6_plen
) ||
4474 ptr
== PTR_IP6(ip6_plen
) + 1) {
4475 ptr
= PTR_IP(ip_len
);
4476 } else if (ptr
== PTR_IP6(ip6_nxt
)) {
4478 } else if (ptr
== PTR_IP6(ip6_hlim
)) {
4479 ptr
= PTR_IP(ip_ttl
);
4480 } else if (ptr
>= PTR_IP6(ip6_src
) &&
4481 ptr
< PTR_IP6(ip6_dst
)) {
4482 ptr
= PTR_IP(ip_src
);
4483 } else if (ptr
>= PTR_IP6(ip6_dst
) &&
4484 ptr
< (int32_t)sizeof(struct ip6_hdr
)) {
4485 ptr
= PTR_IP(ip_dst
);
4490 case ICMP6_PARAMPROB_NEXTHEADER
:
4491 type
= ICMP_UNREACH
;
4492 code
= ICMP_UNREACH_PROTOCOL
;
4501 icmp6
->icmp6_type
= type
;
4502 icmp6
->icmp6_code
= code
;
4503 /* aligns well with a icmpv4 nextmtu */
4504 icmp6
->icmp6_mtu
= htonl(mtu
);
4505 /* icmpv4 pptr is a one most significant byte */
4507 icmp6
->icmp6_pptr
= htonl(ptr
<< 24);
4513 type
= icmp4
->icmp_type
;
4514 code
= icmp4
->icmp_code
;
4515 mtu
= ntohs(icmp4
->icmp_nextmtu
);
4519 type
= ICMP6_ECHO_REQUEST
;
4521 case ICMP_ECHOREPLY
:
4522 type
= ICMP6_ECHO_REPLY
;
4525 type
= ICMP6_DST_UNREACH
;
4527 case ICMP_UNREACH_NET
:
4528 case ICMP_UNREACH_HOST
:
4529 case ICMP_UNREACH_NET_UNKNOWN
:
4530 case ICMP_UNREACH_HOST_UNKNOWN
:
4531 case ICMP_UNREACH_ISOLATED
:
4532 case ICMP_UNREACH_TOSNET
:
4533 case ICMP_UNREACH_TOSHOST
:
4534 code
= ICMP6_DST_UNREACH_NOROUTE
;
4536 case ICMP_UNREACH_PORT
:
4537 code
= ICMP6_DST_UNREACH_NOPORT
;
4539 case ICMP_UNREACH_NET_PROHIB
:
4540 case ICMP_UNREACH_HOST_PROHIB
:
4541 case ICMP_UNREACH_FILTER_PROHIB
:
4542 case ICMP_UNREACH_PRECEDENCE_CUTOFF
:
4543 code
= ICMP6_DST_UNREACH_ADMIN
;
4545 case ICMP_UNREACH_PROTOCOL
:
4546 type
= ICMP6_PARAM_PROB
;
4547 code
= ICMP6_PARAMPROB_NEXTHEADER
;
4548 ptr
= offsetof(struct ip6_hdr
, ip6_nxt
);
4550 case ICMP_UNREACH_NEEDFRAG
:
4551 type
= ICMP6_PACKET_TOO_BIG
;
4560 type
= ICMP6_TIME_EXCEEDED
;
4562 case ICMP_PARAMPROB
:
4563 type
= ICMP6_PARAM_PROB
;
4565 case ICMP_PARAMPROB_ERRATPTR
:
4566 code
= ICMP6_PARAMPROB_HEADER
;
4568 case ICMP_PARAMPROB_LENGTH
:
4569 code
= ICMP6_PARAMPROB_HEADER
;
4575 ptr
= icmp4
->icmp_pptr
;
4576 if (ptr
== 0 || ptr
== PTR_IP(ip_tos
)) {
4578 } else if (ptr
== PTR_IP(ip_len
) ||
4579 ptr
== PTR_IP(ip_len
) + 1) {
4580 ptr
= PTR_IP6(ip6_plen
);
4581 } else if (ptr
== PTR_IP(ip_ttl
)) {
4582 ptr
= PTR_IP6(ip6_hlim
);
4583 } else if (ptr
== PTR_IP(ip_p
)) {
4584 ptr
= PTR_IP6(ip6_nxt
);
4585 } else if (ptr
>= PTR_IP(ip_src
) &&
4586 ptr
< PTR_IP(ip_dst
)) {
4587 ptr
= PTR_IP6(ip6_src
);
4588 } else if (ptr
>= PTR_IP(ip_dst
) &&
4589 ptr
< (int32_t)sizeof(struct ip
)) {
4590 ptr
= PTR_IP6(ip6_dst
);
4598 icmp4
->icmp_type
= type
;
4599 icmp4
->icmp_code
= code
;
4600 icmp4
->icmp_nextmtu
= htons(mtu
);
4602 icmp4
->icmp_void
= htonl(ptr
);
4610 /* Note: frees pbuf if PF_NAT64 is returned */
4612 pf_nat64_ipv6(pbuf_t
*pbuf
, int off
, struct pf_pdesc
*pd
)
4618 * ip_input asserts for rcvif to be not NULL
4619 * That may not be true for two corner cases
4620 * 1. If for some reason a local app sends DNS
4621 * AAAA query to local host
4622 * 2. If IPv6 stack in kernel internally generates a
4623 * message destined for a synthesized IPv6 end-point.
4625 if (pbuf
->pb_ifp
== NULL
) {
4629 ip4
= (struct ip
*)pbuf_resize_segment(pbuf
, 0, off
, sizeof(*ip4
));
4636 ip4
->ip_tos
= pd
->tos
& htonl(0x0ff00000);
4637 ip4
->ip_len
= htons(sizeof(*ip4
) + (pd
->tot_len
- off
));
4639 ip4
->ip_off
= htons(IP_DF
);
4640 ip4
->ip_ttl
= pd
->ttl
;
4641 ip4
->ip_p
= pd
->proto
;
4643 ip4
->ip_src
= pd
->naddr
.v4addr
;
4644 ip4
->ip_dst
= pd
->ndaddr
.v4addr
;
4645 ip4
->ip_sum
= pbuf_inet_cksum(pbuf
, 0, 0, ip4
->ip_hl
<< 2);
4647 /* recalculate icmp checksums */
4648 if (pd
->proto
== IPPROTO_ICMP
) {
4650 int hlen
= sizeof(*ip4
);
4652 icmp
= (struct icmp
*)pbuf_contig_segment(pbuf
, hlen
,
4658 icmp
->icmp_cksum
= 0;
4659 icmp
->icmp_cksum
= pbuf_inet_cksum(pbuf
, 0, hlen
,
4660 ntohs(ip4
->ip_len
) - hlen
);
4663 if ((m
= pbuf_to_mbuf(pbuf
, TRUE
)) != NULL
) {
4671 pf_nat64_ipv4(pbuf_t
*pbuf
, int off
, struct pf_pdesc
*pd
)
4673 struct ip6_hdr
*ip6
;
4676 if (pbuf
->pb_ifp
== NULL
) {
4680 ip6
= (struct ip6_hdr
*)pbuf_resize_segment(pbuf
, 0, off
, sizeof(*ip6
));
4685 ip6
->ip6_vfc
= htonl((6 << 28) | (pd
->tos
<< 20));
4686 ip6
->ip6_plen
= htons(pd
->tot_len
- off
);
4687 ip6
->ip6_nxt
= pd
->proto
;
4688 ip6
->ip6_hlim
= pd
->ttl
;
4689 ip6
->ip6_src
= pd
->naddr
.v6addr
;
4690 ip6
->ip6_dst
= pd
->ndaddr
.v6addr
;
4692 /* recalculate icmp6 checksums */
4693 if (pd
->proto
== IPPROTO_ICMPV6
) {
4694 struct icmp6_hdr
*icmp6
;
4695 int hlen
= sizeof(*ip6
);
4697 icmp6
= (struct icmp6_hdr
*)pbuf_contig_segment(pbuf
, hlen
,
4699 if (icmp6
== NULL
) {
4703 icmp6
->icmp6_cksum
= 0;
4704 icmp6
->icmp6_cksum
= pbuf_inet6_cksum(pbuf
,
4705 IPPROTO_ICMPV6
, hlen
,
4706 ntohs(ip6
->ip6_plen
));
4707 } else if (pd
->proto
== IPPROTO_UDP
) {
4709 int hlen
= sizeof(*ip6
);
4711 uh
= (struct udphdr
*)pbuf_contig_segment(pbuf
, hlen
,
4717 if (uh
->uh_sum
== 0) {
4718 uh
->uh_sum
= pbuf_inet6_cksum(pbuf
, IPPROTO_UDP
,
4719 hlen
, ntohs(ip6
->ip6_plen
));
4723 if ((m
= pbuf_to_mbuf(pbuf
, TRUE
)) != NULL
) {
4731 pf_test_rule(struct pf_rule
**rm
, struct pf_state
**sm
, int direction
,
4732 struct pfi_kif
*kif
, pbuf_t
*pbuf
, int off
, void *h
,
4733 struct pf_pdesc
*pd
, struct pf_rule
**am
, struct pf_ruleset
**rsm
,
4734 struct ifqueue
*ifq
)
4737 struct pf_rule
*nr
= NULL
;
4738 struct pf_addr
*saddr
= pd
->src
, *daddr
= pd
->dst
;
4739 sa_family_t af
= pd
->af
;
4740 struct pf_rule
*r
, *a
= NULL
;
4741 struct pf_ruleset
*ruleset
= NULL
;
4742 struct pf_src_node
*nsn
= NULL
;
4743 struct tcphdr
*th
= pd
->hdr
.tcp
;
4744 struct udphdr
*uh
= pd
->hdr
.udp
;
4746 int rewrite
= 0, hdrlen
= 0;
4748 unsigned int rtableid
= IFSCOPE_NONE
;
4752 u_int16_t mss
= tcp_mssdflt
;
4753 u_int8_t icmptype
= 0, icmpcode
= 0;
4755 struct pf_grev1_hdr
*grev1
= pd
->hdr
.grev1
;
4756 union pf_state_xport bxport
, bdxport
, nxport
, sxport
, dxport
;
4757 struct pf_state_key psk
;
4759 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
4761 if (direction
== PF_IN
&& pf_check_congestion(ifq
)) {
4762 REASON_SET(&reason
, PFRES_CONGEST
);
4771 switch (pd
->proto
) {
4773 sxport
.port
= th
->th_sport
;
4774 dxport
.port
= th
->th_dport
;
4775 hdrlen
= sizeof(*th
);
4778 sxport
.port
= uh
->uh_sport
;
4779 dxport
.port
= uh
->uh_dport
;
4780 hdrlen
= sizeof(*uh
);
4784 if (pd
->af
!= AF_INET
) {
4787 sxport
.port
= dxport
.port
= pd
->hdr
.icmp
->icmp_id
;
4788 hdrlen
= ICMP_MINLEN
;
4789 icmptype
= pd
->hdr
.icmp
->icmp_type
;
4790 icmpcode
= pd
->hdr
.icmp
->icmp_code
;
4792 if (ICMP_ERRORTYPE(icmptype
)) {
4797 case IPPROTO_ICMPV6
:
4798 if (pd
->af
!= AF_INET6
) {
4801 sxport
.port
= dxport
.port
= pd
->hdr
.icmp6
->icmp6_id
;
4802 hdrlen
= sizeof(*pd
->hdr
.icmp6
);
4803 icmptype
= pd
->hdr
.icmp6
->icmp6_type
;
4804 icmpcode
= pd
->hdr
.icmp6
->icmp6_code
;
4806 if (ICMP6_ERRORTYPE(icmptype
)) {
4811 if (pd
->proto_variant
== PF_GRE_PPTP_VARIANT
) {
4812 sxport
.call_id
= dxport
.call_id
=
4813 pd
->hdr
.grev1
->call_id
;
4814 hdrlen
= sizeof(*pd
->hdr
.grev1
);
4819 dxport
.spi
= pd
->hdr
.esp
->spi
;
4820 hdrlen
= sizeof(*pd
->hdr
.esp
);
4824 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[PF_RULESET_FILTER
].active
.ptr
);
4829 if (direction
== PF_OUT
) {
4835 /* check packet for BINAT/NAT/RDR */
4836 if ((nr
= pf_get_translation_aux(pd
, pbuf
, off
, direction
, kif
, &nsn
,
4837 saddr
, &sxport
, daddr
, &dxport
, &nxport
4842 if (pd
->af
!= pd
->naf
) {
4848 PF_ACPY(&pd
->baddr
, saddr
, af
);
4849 PF_ACPY(&pd
->bdaddr
, daddr
, af
);
4851 switch (pd
->proto
) {
4853 if (pd
->af
!= pd
->naf
||
4854 PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
4855 pf_change_ap(direction
, pd
->mp
, saddr
,
4856 &th
->th_sport
, pd
->ip_sum
, &th
->th_sum
,
4857 &pd
->naddr
, nxport
.port
, 0, af
,
4859 sxport
.port
= th
->th_sport
;
4862 if (pd
->af
!= pd
->naf
||
4863 PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
) ||
4864 (nr
&& (nr
->action
== PF_RDR
) &&
4865 (th
->th_dport
!= nxport
.port
))) {
4866 if (nr
&& nr
->action
== PF_RDR
) {
4867 dport
= nxport
.port
;
4869 dport
= th
->th_dport
;
4871 pf_change_ap(direction
, pd
->mp
, daddr
,
4872 &th
->th_dport
, pd
->ip_sum
,
4873 &th
->th_sum
, &pd
->ndaddr
,
4874 dport
, 0, af
, pd
->naf
, ua
);
4875 dxport
.port
= th
->th_dport
;
4881 if (pd
->af
!= pd
->naf
||
4882 PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
4883 pf_change_ap(direction
, pd
->mp
, saddr
,
4884 &uh
->uh_sport
, pd
->ip_sum
,
4885 &uh
->uh_sum
, &pd
->naddr
,
4886 nxport
.port
, 1, af
, pd
->naf
, ua
);
4887 sxport
.port
= uh
->uh_sport
;
4890 if (pd
->af
!= pd
->naf
||
4891 PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
) ||
4892 (nr
&& (nr
->action
== PF_RDR
) &&
4893 (uh
->uh_dport
!= nxport
.port
))) {
4894 if (nr
&& nr
->action
== PF_RDR
) {
4895 dport
= nxport
.port
;
4897 dport
= uh
->uh_dport
;
4899 pf_change_ap(direction
, pd
->mp
, daddr
,
4900 &uh
->uh_dport
, pd
->ip_sum
,
4901 &uh
->uh_sum
, &pd
->ndaddr
,
4902 dport
, 0, af
, pd
->naf
, ua
);
4903 dxport
.port
= uh
->uh_dport
;
4909 if (pd
->af
!= AF_INET
) {
4914 * pd->af != pd->naf not handled yet here and would be
4915 * needed for NAT46 needed to support XLAT.
4916 * Will cross the bridge when it comes.
4918 if (PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
4919 pf_change_a(&saddr
->v4addr
.s_addr
, pd
->ip_sum
,
4920 pd
->naddr
.v4addr
.s_addr
, 0);
4921 pd
->hdr
.icmp
->icmp_cksum
= pf_cksum_fixup(
4922 pd
->hdr
.icmp
->icmp_cksum
, sxport
.port
,
4924 pd
->hdr
.icmp
->icmp_id
= nxport
.port
;
4927 if (PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
)) {
4928 pf_change_a(&daddr
->v4addr
.s_addr
, pd
->ip_sum
,
4929 pd
->ndaddr
.v4addr
.s_addr
, 0);
4934 case IPPROTO_ICMPV6
:
4935 if (pd
->af
!= AF_INET6
) {
4939 if (pd
->af
!= pd
->naf
||
4940 PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
4941 pf_change_addr(saddr
,
4942 &pd
->hdr
.icmp6
->icmp6_cksum
,
4943 &pd
->naddr
, 0, pd
->af
, pd
->naf
);
4946 if (pd
->af
!= pd
->naf
||
4947 PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
)) {
4948 pf_change_addr(daddr
,
4949 &pd
->hdr
.icmp6
->icmp6_cksum
,
4950 &pd
->ndaddr
, 0, pd
->af
, pd
->naf
);
4953 if (pd
->af
!= pd
->naf
) {
4954 if (pf_translate_icmp_af(AF_INET
,
4958 pd
->proto
= IPPROTO_ICMP
;
4963 if ((direction
== PF_IN
) &&
4964 (pd
->proto_variant
== PF_GRE_PPTP_VARIANT
)) {
4965 grev1
->call_id
= nxport
.call_id
;
4971 if (PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
4972 pf_change_a(&saddr
->v4addr
.s_addr
,
4974 pd
->naddr
.v4addr
.s_addr
, 0);
4976 if (PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
)) {
4977 pf_change_a(&daddr
->v4addr
.s_addr
,
4979 pd
->ndaddr
.v4addr
.s_addr
, 0);
4984 if (PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
4985 PF_ACPY(saddr
, &pd
->naddr
, AF_INET6
);
4987 if (PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
)) {
4988 PF_ACPY(daddr
, &pd
->ndaddr
, AF_INET6
);
4995 if (direction
== PF_OUT
) {
5002 if (PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
5003 pf_change_a(&saddr
->v4addr
.s_addr
,
5004 pd
->ip_sum
, pd
->naddr
.v4addr
.s_addr
, 0);
5006 if (PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
)) {
5007 pf_change_a(&daddr
->v4addr
.s_addr
,
5009 pd
->ndaddr
.v4addr
.s_addr
, 0);
5014 if (PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
5015 PF_ACPY(saddr
, &pd
->naddr
, AF_INET6
);
5017 if (PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
)) {
5018 PF_ACPY(daddr
, &pd
->ndaddr
, AF_INET6
);
5027 if ((pd
->naf
!= AF_INET
) ||
5028 (PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
))) {
5029 pf_change_addr(saddr
, pd
->ip_sum
,
5030 &pd
->naddr
, 0, af
, pd
->naf
);
5033 if ((pd
->naf
!= AF_INET
) ||
5034 (PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
))) {
5035 pf_change_addr(daddr
, pd
->ip_sum
,
5036 &pd
->ndaddr
, 0, af
, pd
->naf
);
5041 if (PF_ANEQ(saddr
, &pd
->naddr
, pd
->af
)) {
5042 PF_ACPY(saddr
, &pd
->naddr
, af
);
5044 if (PF_ANEQ(daddr
, &pd
->ndaddr
, pd
->af
)) {
5045 PF_ACPY(daddr
, &pd
->ndaddr
, af
);
5060 if (nr
&& nr
->tag
> 0) {
5066 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
) {
5067 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
5068 } else if (r
->direction
&& r
->direction
!= direction
) {
5069 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
5070 } else if (r
->af
&& r
->af
!= pd
->af
) {
5071 r
= r
->skip
[PF_SKIP_AF
].ptr
;
5072 } else if (r
->proto
&& r
->proto
!= pd
->proto
) {
5073 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
5074 } else if (PF_MISMATCHAW(&r
->src
.addr
, saddr
, pd
->af
,
5076 r
= r
->skip
[PF_SKIP_SRC_ADDR
].ptr
;
5078 /* tcp/udp only. port_op always 0 in other cases */
5079 else if (r
->proto
== pd
->proto
&&
5080 (r
->proto
== IPPROTO_TCP
|| r
->proto
== IPPROTO_UDP
) &&
5081 r
->src
.xport
.range
.op
&&
5082 !pf_match_port(r
->src
.xport
.range
.op
,
5083 r
->src
.xport
.range
.port
[0], r
->src
.xport
.range
.port
[1],
5085 r
= r
->skip
[PF_SKIP_SRC_PORT
].ptr
;
5086 } else if (PF_MISMATCHAW(&r
->dst
.addr
, daddr
, pd
->af
,
5087 r
->dst
.neg
, NULL
)) {
5088 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
5090 /* tcp/udp only. port_op always 0 in other cases */
5091 else if (r
->proto
== pd
->proto
&&
5092 (r
->proto
== IPPROTO_TCP
|| r
->proto
== IPPROTO_UDP
) &&
5093 r
->dst
.xport
.range
.op
&&
5094 !pf_match_port(r
->dst
.xport
.range
.op
,
5095 r
->dst
.xport
.range
.port
[0], r
->dst
.xport
.range
.port
[1],
5097 r
= r
->skip
[PF_SKIP_DST_PORT
].ptr
;
5099 /* icmp only. type always 0 in other cases */
5100 else if (r
->type
&& r
->type
!= icmptype
+ 1) {
5101 r
= TAILQ_NEXT(r
, entries
);
5103 /* icmp only. type always 0 in other cases */
5104 else if (r
->code
&& r
->code
!= icmpcode
+ 1) {
5105 r
= TAILQ_NEXT(r
, entries
);
5106 } else if ((r
->rule_flag
& PFRULE_TOS
) && r
->tos
&&
5107 !(r
->tos
& pd
->tos
)) {
5108 r
= TAILQ_NEXT(r
, entries
);
5109 } else if ((r
->rule_flag
& PFRULE_DSCP
) && r
->tos
&&
5110 !(r
->tos
& (pd
->tos
& DSCP_MASK
))) {
5111 r
= TAILQ_NEXT(r
, entries
);
5112 } else if ((r
->rule_flag
& PFRULE_SC
) && r
->tos
&&
5113 ((r
->tos
& SCIDX_MASK
) != pd
->sc
)) {
5114 r
= TAILQ_NEXT(r
, entries
);
5115 } else if (r
->rule_flag
& PFRULE_FRAGMENT
) {
5116 r
= TAILQ_NEXT(r
, entries
);
5117 } else if (pd
->proto
== IPPROTO_TCP
&&
5118 (r
->flagset
& th
->th_flags
) != r
->flags
) {
5119 r
= TAILQ_NEXT(r
, entries
);
5121 /* tcp/udp only. uid.op always 0 in other cases */
5122 else if (r
->uid
.op
&& (pd
->lookup
.done
|| ((void)(pd
->lookup
.done
=
5123 pf_socket_lookup(direction
, pd
)), 1)) &&
5124 !pf_match_uid(r
->uid
.op
, r
->uid
.uid
[0], r
->uid
.uid
[1],
5126 r
= TAILQ_NEXT(r
, entries
);
5128 /* tcp/udp only. gid.op always 0 in other cases */
5129 else if (r
->gid
.op
&& (pd
->lookup
.done
|| ((void)(pd
->lookup
.done
=
5130 pf_socket_lookup(direction
, pd
)), 1)) &&
5131 !pf_match_gid(r
->gid
.op
, r
->gid
.gid
[0], r
->gid
.gid
[1],
5133 r
= TAILQ_NEXT(r
, entries
);
5134 } else if (r
->prob
&& r
->prob
<= (RandomULong() % (UINT_MAX
- 1) + 1)) {
5135 r
= TAILQ_NEXT(r
, entries
);
5136 } else if (r
->match_tag
&& !pf_match_tag(r
, pd
->pf_mtag
, &tag
)) {
5137 r
= TAILQ_NEXT(r
, entries
);
5138 } else if (r
->os_fingerprint
!= PF_OSFP_ANY
&&
5139 (pd
->proto
!= IPPROTO_TCP
|| !pf_osfp_match(
5140 pf_osfp_fingerprint(pd
, pbuf
, off
, th
),
5141 r
->os_fingerprint
))) {
5142 r
= TAILQ_NEXT(r
, entries
);
5147 if (PF_RTABLEID_IS_VALID(r
->rtableid
)) {
5148 rtableid
= r
->rtableid
;
5150 if (r
->anchor
== NULL
) {
5158 r
= TAILQ_NEXT(r
, entries
);
5160 pf_step_into_anchor(&asd
, &ruleset
,
5161 PF_RULESET_FILTER
, &r
, &a
, &match
);
5164 if (r
== NULL
&& pf_step_out_of_anchor(&asd
, &ruleset
,
5165 PF_RULESET_FILTER
, &r
, &a
, &match
)) {
5173 REASON_SET(&reason
, PFRES_MATCH
);
5175 if (r
->log
|| (nr
!= NULL
&& nr
->log
)) {
5177 if (rewrite
< off
+ hdrlen
) {
5178 rewrite
= off
+ hdrlen
;
5181 if (pf_lazy_makewritable(pd
, pbuf
, rewrite
) == NULL
) {
5182 REASON_SET(&reason
, PFRES_MEMORY
);
5186 pbuf_copy_back(pbuf
, off
, hdrlen
, pd
->hdr
.any
);
5188 PFLOG_PACKET(kif
, h
, pbuf
, pd
->af
, direction
, reason
,
5189 r
->log
? r
: nr
, a
, ruleset
, pd
);
5192 if ((r
->action
== PF_DROP
) &&
5193 ((r
->rule_flag
& PFRULE_RETURNRST
) ||
5194 (r
->rule_flag
& PFRULE_RETURNICMP
) ||
5195 (r
->rule_flag
& PFRULE_RETURN
))) {
5196 /* undo NAT changes, if they have taken place */
5197 /* XXX For NAT64 we are not reverting the changes */
5198 if (nr
!= NULL
&& nr
->action
!= PF_NAT64
) {
5199 if (direction
== PF_OUT
) {
5201 switch (pd
->proto
) {
5203 pf_change_ap(direction
, pd
->mp
, saddr
,
5204 &th
->th_sport
, pd
->ip_sum
,
5205 &th
->th_sum
, &pd
->baddr
,
5206 bxport
.port
, 0, af
, pd
->af
, 1);
5207 sxport
.port
= th
->th_sport
;
5211 pf_change_ap(direction
, pd
->mp
, saddr
,
5212 &pd
->hdr
.udp
->uh_sport
, pd
->ip_sum
,
5213 &pd
->hdr
.udp
->uh_sum
, &pd
->baddr
,
5214 bxport
.port
, 1, af
, pd
->af
, 1);
5215 sxport
.port
= pd
->hdr
.udp
->uh_sport
;
5219 case IPPROTO_ICMPV6
:
5223 PF_ACPY(&pd
->baddr
, saddr
, af
);
5228 pf_change_a(&saddr
->v4addr
.s_addr
,
5230 pd
->baddr
.v4addr
.s_addr
, 0);
5234 PF_ACPY(saddr
, &pd
->baddr
,
5240 PF_ACPY(&pd
->baddr
, saddr
, af
);
5244 pf_change_a(&saddr
->v4addr
.s_addr
,
5246 pd
->baddr
.v4addr
.s_addr
, 0);
5250 PF_ACPY(saddr
, &pd
->baddr
,
5258 pf_change_a(&saddr
->v4addr
.s_addr
,
5260 pd
->baddr
.v4addr
.s_addr
, 0);
5263 PF_ACPY(saddr
, &pd
->baddr
, af
);
5268 switch (pd
->proto
) {
5270 pf_change_ap(direction
, pd
->mp
, daddr
,
5271 &th
->th_dport
, pd
->ip_sum
,
5272 &th
->th_sum
, &pd
->bdaddr
,
5273 bdxport
.port
, 0, af
, pd
->af
, 1);
5274 dxport
.port
= th
->th_dport
;
5278 pf_change_ap(direction
, pd
->mp
, daddr
,
5279 &pd
->hdr
.udp
->uh_dport
, pd
->ip_sum
,
5280 &pd
->hdr
.udp
->uh_sum
, &pd
->bdaddr
,
5281 bdxport
.port
, 1, af
, pd
->af
, 1);
5282 dxport
.port
= pd
->hdr
.udp
->uh_dport
;
5286 case IPPROTO_ICMPV6
:
5290 if (pd
->proto_variant
==
5291 PF_GRE_PPTP_VARIANT
) {
5299 pf_change_a(&daddr
->v4addr
.s_addr
,
5301 pd
->bdaddr
.v4addr
.s_addr
, 0);
5305 PF_ACPY(daddr
, &pd
->bdaddr
,
5314 pf_change_a(&daddr
->v4addr
.s_addr
,
5316 pd
->bdaddr
.v4addr
.s_addr
, 0);
5320 PF_ACPY(daddr
, &pd
->bdaddr
,
5328 pf_change_a(&daddr
->v4addr
.s_addr
,
5330 pd
->bdaddr
.v4addr
.s_addr
, 0);
5333 PF_ACPY(daddr
, &pd
->bdaddr
, af
);
5339 if (pd
->proto
== IPPROTO_TCP
&&
5340 ((r
->rule_flag
& PFRULE_RETURNRST
) ||
5341 (r
->rule_flag
& PFRULE_RETURN
)) &&
5342 !(th
->th_flags
& TH_RST
)) {
5343 u_int32_t ack
= ntohl(th
->th_seq
) + pd
->p_len
;
5351 len
= ntohs(h4
->ip_len
) - off
;
5355 len
= ntohs(h6
->ip6_plen
) -
5356 (off
- sizeof(*h6
));
5360 if (pf_check_proto_cksum(pbuf
, off
, len
, IPPROTO_TCP
,
5362 REASON_SET(&reason
, PFRES_PROTCKSUM
);
5364 if (th
->th_flags
& TH_SYN
) {
5367 if (th
->th_flags
& TH_FIN
) {
5370 pf_send_tcp(r
, pd
->af
, pd
->dst
,
5371 pd
->src
, th
->th_dport
, th
->th_sport
,
5372 ntohl(th
->th_ack
), ack
, TH_RST
| TH_ACK
, 0, 0,
5373 r
->return_ttl
, 1, 0, pd
->eh
, kif
->pfik_ifp
);
5375 } else if (pd
->proto
!= IPPROTO_ICMP
&& pd
->af
== AF_INET
&&
5376 pd
->proto
!= IPPROTO_ESP
&& pd
->proto
!= IPPROTO_AH
&&
5378 pf_send_icmp(pbuf
, r
->return_icmp
>> 8,
5379 r
->return_icmp
& 255, pd
->af
, r
);
5380 } else if (pd
->proto
!= IPPROTO_ICMPV6
&& af
== AF_INET6
&&
5381 pd
->proto
!= IPPROTO_ESP
&& pd
->proto
!= IPPROTO_AH
&&
5383 pf_send_icmp(pbuf
, r
->return_icmp6
>> 8,
5384 r
->return_icmp6
& 255, pd
->af
, r
);
5388 if (r
->action
== PF_DROP
) {
5392 /* prepare state key, for flowhash and/or the state (if created) */
5393 bzero(&psk
, sizeof(psk
));
5394 psk
.proto
= pd
->proto
;
5395 psk
.direction
= direction
;
5396 if (pd
->proto
== IPPROTO_UDP
) {
5397 if (ntohs(pd
->hdr
.udp
->uh_sport
) == PF_IKE_PORT
&&
5398 ntohs(pd
->hdr
.udp
->uh_dport
) == PF_IKE_PORT
) {
5399 psk
.proto_variant
= PF_EXTFILTER_APD
;
5401 psk
.proto_variant
= nr
? nr
->extfilter
: r
->extfilter
;
5402 if (psk
.proto_variant
< PF_EXTFILTER_APD
) {
5403 psk
.proto_variant
= PF_EXTFILTER_APD
;
5406 } else if (pd
->proto
== IPPROTO_GRE
) {
5407 psk
.proto_variant
= pd
->proto_variant
;
5409 if (direction
== PF_OUT
) {
5411 PF_ACPY(&psk
.gwy
.addr
, saddr
, af
);
5412 PF_ACPY(&psk
.ext_gwy
.addr
, daddr
, af
);
5413 switch (pd
->proto
) {
5415 psk
.gwy
.xport
.spi
= 0;
5416 psk
.ext_gwy
.xport
.spi
= pd
->hdr
.esp
->spi
;
5419 case IPPROTO_ICMPV6
:
5421 * NAT64 requires protocol translation between ICMPv4
5422 * and ICMPv6. TCP and UDP do not require protocol
5423 * translation. To avoid adding complexity just to
5424 * handle ICMP(v4addr/v6addr), we always lookup for
5425 * proto = IPPROTO_ICMP on both LAN and WAN side
5427 psk
.proto
= IPPROTO_ICMP
;
5428 psk
.gwy
.xport
.port
= nxport
.port
;
5429 psk
.ext_gwy
.xport
.spi
= 0;
5432 psk
.gwy
.xport
= sxport
;
5433 psk
.ext_gwy
.xport
= dxport
;
5438 PF_ACPY(&psk
.lan
.addr
, &pd
->baddr
, af
);
5439 psk
.lan
.xport
= bxport
;
5440 PF_ACPY(&psk
.ext_lan
.addr
, &pd
->bdaddr
, af
);
5441 psk
.ext_lan
.xport
= bdxport
;
5443 PF_ACPY(&psk
.lan
.addr
, &psk
.gwy
.addr
, af
);
5444 psk
.lan
.xport
= psk
.gwy
.xport
;
5445 PF_ACPY(&psk
.ext_lan
.addr
, &psk
.ext_gwy
.addr
, af
);
5446 psk
.ext_lan
.xport
= psk
.ext_gwy
.xport
;
5450 if (nr
&& nr
->action
== PF_NAT64
) {
5451 PF_ACPY(&psk
.lan
.addr
, &pd
->baddr
, af
);
5452 PF_ACPY(&psk
.ext_lan
.addr
, &pd
->bdaddr
, af
);
5454 PF_ACPY(&psk
.lan
.addr
, daddr
, af
);
5455 PF_ACPY(&psk
.ext_lan
.addr
, saddr
, af
);
5457 switch (pd
->proto
) {
5459 case IPPROTO_ICMPV6
:
5461 * NAT64 requires protocol translation between ICMPv4
5462 * and ICMPv6. TCP and UDP do not require protocol
5463 * translation. To avoid adding complexity just to
5464 * handle ICMP(v4addr/v6addr), we always lookup for
5465 * proto = IPPROTO_ICMP on both LAN and WAN side
5467 psk
.proto
= IPPROTO_ICMP
;
5468 if (nr
&& nr
->action
== PF_NAT64
) {
5469 psk
.lan
.xport
= bxport
;
5470 psk
.ext_lan
.xport
= bxport
;
5472 psk
.lan
.xport
= nxport
;
5473 psk
.ext_lan
.xport
.spi
= 0;
5477 psk
.ext_lan
.xport
.spi
= 0;
5478 psk
.lan
.xport
.spi
= pd
->hdr
.esp
->spi
;
5482 if (nr
->action
== PF_NAT64
) {
5483 psk
.lan
.xport
= bxport
;
5484 psk
.ext_lan
.xport
= bdxport
;
5486 psk
.lan
.xport
= dxport
;
5487 psk
.ext_lan
.xport
= sxport
;
5490 psk
.lan
.xport
= dxport
;
5491 psk
.ext_lan
.xport
= sxport
;
5495 psk
.af_gwy
= pd
->naf
;
5497 if (nr
->action
== PF_NAT64
) {
5498 PF_ACPY(&psk
.gwy
.addr
, &pd
->naddr
, pd
->naf
);
5499 PF_ACPY(&psk
.ext_gwy
.addr
, &pd
->ndaddr
,
5501 if ((pd
->proto
== IPPROTO_ICMPV6
) ||
5502 (pd
->proto
== IPPROTO_ICMP
)) {
5503 psk
.gwy
.xport
= nxport
;
5504 psk
.ext_gwy
.xport
= nxport
;
5506 psk
.gwy
.xport
= sxport
;
5507 psk
.ext_gwy
.xport
= dxport
;
5510 PF_ACPY(&psk
.gwy
.addr
, &pd
->bdaddr
, af
);
5511 psk
.gwy
.xport
= bdxport
;
5512 PF_ACPY(&psk
.ext_gwy
.addr
, saddr
, af
);
5513 psk
.ext_gwy
.xport
= sxport
;
5516 PF_ACPY(&psk
.gwy
.addr
, &psk
.lan
.addr
, af
);
5517 psk
.gwy
.xport
= psk
.lan
.xport
;
5518 PF_ACPY(&psk
.ext_gwy
.addr
, &psk
.ext_lan
.addr
, af
);
5519 psk
.ext_gwy
.xport
= psk
.ext_lan
.xport
;
5522 if (pd
->pktflags
& PKTF_FLOW_ID
) {
5523 /* flow hash was already computed outside of PF */
5524 psk
.flowsrc
= pd
->flowsrc
;
5525 psk
.flowhash
= pd
->flowhash
;
5527 /* compute flow hash and store it in state key */
5528 psk
.flowsrc
= FLOWSRC_PF
;
5529 psk
.flowhash
= pf_calc_state_key_flowhash(&psk
);
5530 pd
->flowsrc
= psk
.flowsrc
;
5531 pd
->flowhash
= psk
.flowhash
;
5532 pd
->pktflags
|= PKTF_FLOW_ID
;
5533 pd
->pktflags
&= ~PKTF_FLOW_ADV
;
5536 if (pf_tag_packet(pbuf
, pd
->pf_mtag
, tag
, rtableid
, pd
)) {
5537 REASON_SET(&reason
, PFRES_MEMORY
);
5541 if (!state_icmp
&& (r
->keep_state
|| nr
!= NULL
||
5542 (pd
->flags
& PFDESC_TCP_NORM
))) {
5543 /* create new state */
5544 struct pf_state
*s
= NULL
;
5545 struct pf_state_key
*sk
= NULL
;
5546 struct pf_src_node
*sn
= NULL
;
5547 struct pf_ike_hdr ike
;
5549 if (pd
->proto
== IPPROTO_UDP
) {
5550 size_t plen
= pbuf
->pb_packet_len
- off
- sizeof(*uh
);
5552 if (ntohs(uh
->uh_sport
) == PF_IKE_PORT
&&
5553 ntohs(uh
->uh_dport
) == PF_IKE_PORT
&&
5554 plen
>= PF_IKE_PACKET_MINSIZE
) {
5555 if (plen
> PF_IKE_PACKET_MINSIZE
) {
5556 plen
= PF_IKE_PACKET_MINSIZE
;
5558 pbuf_copy_data(pbuf
, off
+ sizeof(*uh
), plen
,
5563 if (nr
!= NULL
&& pd
->proto
== IPPROTO_ESP
&&
5564 direction
== PF_OUT
) {
5565 struct pf_state_key_cmp sk0
;
5566 struct pf_state
*s0
;
5570 * This squelches state creation if the external
5571 * address matches an existing incomplete state with a
5572 * different internal address. Only one 'blocking'
5573 * partial state is allowed for each external address.
5575 memset(&sk0
, 0, sizeof(sk0
));
5576 sk0
.af_gwy
= pd
->af
;
5577 sk0
.proto
= IPPROTO_ESP
;
5578 PF_ACPY(&sk0
.gwy
.addr
, saddr
, sk0
.af_gwy
);
5579 PF_ACPY(&sk0
.ext_gwy
.addr
, daddr
, sk0
.af_gwy
);
5580 s0
= pf_find_state(kif
, &sk0
, PF_IN
);
5582 if (s0
&& PF_ANEQ(&s0
->state_key
->lan
.addr
,
5589 /* check maximums */
5590 if (r
->max_states
&& (r
->states
>= r
->max_states
)) {
5591 pf_status
.lcounters
[LCNT_STATES
]++;
5592 REASON_SET(&reason
, PFRES_MAXSTATES
);
5595 /* src node for filter rule */
5596 if ((r
->rule_flag
& PFRULE_SRCTRACK
||
5597 r
->rpool
.opts
& PF_POOL_STICKYADDR
) &&
5598 pf_insert_src_node(&sn
, r
, saddr
, af
) != 0) {
5599 REASON_SET(&reason
, PFRES_SRCLIMIT
);
5602 /* src node for translation rule */
5603 if (nr
!= NULL
&& (nr
->rpool
.opts
& PF_POOL_STICKYADDR
) &&
5604 ((direction
== PF_OUT
&&
5605 nr
->action
!= PF_RDR
&&
5606 pf_insert_src_node(&nsn
, nr
, &pd
->baddr
, af
) != 0) ||
5607 (pf_insert_src_node(&nsn
, nr
, saddr
, af
) != 0))) {
5608 REASON_SET(&reason
, PFRES_SRCLIMIT
);
5611 s
= pool_get(&pf_state_pl
, PR_WAITOK
);
5613 REASON_SET(&reason
, PFRES_MEMORY
);
5615 if (sn
!= NULL
&& sn
->states
== 0 && sn
->expire
== 0) {
5616 RB_REMOVE(pf_src_tree
, &tree_src_tracking
, sn
);
5617 pf_status
.scounters
[SCNT_SRC_NODE_REMOVALS
]++;
5618 pf_status
.src_nodes
--;
5619 pool_put(&pf_src_tree_pl
, sn
);
5621 if (nsn
!= sn
&& nsn
!= NULL
&& nsn
->states
== 0 &&
5623 RB_REMOVE(pf_src_tree
, &tree_src_tracking
, nsn
);
5624 pf_status
.scounters
[SCNT_SRC_NODE_REMOVALS
]++;
5625 pf_status
.src_nodes
--;
5626 pool_put(&pf_src_tree_pl
, nsn
);
5629 if (sk
->app_state
) {
5630 pool_put(&pf_app_state_pl
,
5633 pool_put(&pf_state_key_pl
, sk
);
5637 bzero(s
, sizeof(*s
));
5638 TAILQ_INIT(&s
->unlink_hooks
);
5640 s
->nat_rule
.ptr
= nr
;
5642 STATE_INC_COUNTERS(s
);
5643 s
->allow_opts
= r
->allow_opts
;
5644 s
->log
= r
->log
& PF_LOG_ALL
;
5646 s
->log
|= nr
->log
& PF_LOG_ALL
;
5648 switch (pd
->proto
) {
5650 s
->src
.seqlo
= ntohl(th
->th_seq
);
5651 s
->src
.seqhi
= s
->src
.seqlo
+ pd
->p_len
+ 1;
5652 if ((th
->th_flags
& (TH_SYN
| TH_ACK
)) ==
5653 TH_SYN
&& r
->keep_state
== PF_STATE_MODULATE
) {
5654 /* Generate sequence number modulator */
5655 if ((s
->src
.seqdiff
= pf_tcp_iss(pd
) -
5656 s
->src
.seqlo
) == 0) {
5659 pf_change_a(&th
->th_seq
, &th
->th_sum
,
5660 htonl(s
->src
.seqlo
+ s
->src
.seqdiff
), 0);
5661 rewrite
= off
+ sizeof(*th
);
5665 if (th
->th_flags
& TH_SYN
) {
5667 s
->src
.wscale
= pf_get_wscale(pbuf
, off
,
5670 s
->src
.max_win
= MAX(ntohs(th
->th_win
), 1);
5671 if (s
->src
.wscale
& PF_WSCALE_MASK
) {
5672 /* Remove scale factor from initial window */
5673 int win
= s
->src
.max_win
;
5674 win
+= 1 << (s
->src
.wscale
& PF_WSCALE_MASK
);
5675 s
->src
.max_win
= (win
- 1) >>
5676 (s
->src
.wscale
& PF_WSCALE_MASK
);
5678 if (th
->th_flags
& TH_FIN
) {
5683 s
->src
.state
= TCPS_SYN_SENT
;
5684 s
->dst
.state
= TCPS_CLOSED
;
5685 s
->timeout
= PFTM_TCP_FIRST_PACKET
;
5688 s
->src
.state
= PFUDPS_SINGLE
;
5689 s
->dst
.state
= PFUDPS_NO_TRAFFIC
;
5690 s
->timeout
= PFTM_UDP_FIRST_PACKET
;
5693 case IPPROTO_ICMPV6
:
5694 s
->timeout
= PFTM_ICMP_FIRST_PACKET
;
5697 s
->src
.state
= PFGRE1S_INITIATING
;
5698 s
->dst
.state
= PFGRE1S_NO_TRAFFIC
;
5699 s
->timeout
= PFTM_GREv1_INITIATING
;
5702 s
->src
.state
= PFESPS_INITIATING
;
5703 s
->dst
.state
= PFESPS_NO_TRAFFIC
;
5704 s
->timeout
= PFTM_ESP_FIRST_PACKET
;
5707 s
->src
.state
= PFOTHERS_SINGLE
;
5708 s
->dst
.state
= PFOTHERS_NO_TRAFFIC
;
5709 s
->timeout
= PFTM_OTHER_FIRST_PACKET
;
5712 s
->creation
= pf_time_second();
5713 s
->expire
= pf_time_second();
5717 s
->src_node
->states
++;
5718 VERIFY(s
->src_node
->states
!= 0);
5721 PF_ACPY(&nsn
->raddr
, &pd
->naddr
, af
);
5722 s
->nat_src_node
= nsn
;
5723 s
->nat_src_node
->states
++;
5724 VERIFY(s
->nat_src_node
->states
!= 0);
5726 if (pd
->proto
== IPPROTO_TCP
) {
5727 if ((pd
->flags
& PFDESC_TCP_NORM
) &&
5728 pf_normalize_tcp_init(pbuf
, off
, pd
, th
, &s
->src
,
5730 REASON_SET(&reason
, PFRES_MEMORY
);
5731 pf_src_tree_remove_state(s
);
5732 STATE_DEC_COUNTERS(s
);
5733 pool_put(&pf_state_pl
, s
);
5736 if ((pd
->flags
& PFDESC_TCP_NORM
) && s
->src
.scrub
&&
5737 pf_normalize_tcp_stateful(pbuf
, off
, pd
, &reason
,
5738 th
, s
, &s
->src
, &s
->dst
, &rewrite
)) {
5739 /* This really shouldn't happen!!! */
5740 DPFPRINTF(PF_DEBUG_URGENT
,
5741 ("pf_normalize_tcp_stateful failed on "
5743 pf_normalize_tcp_cleanup(s
);
5744 pf_src_tree_remove_state(s
);
5745 STATE_DEC_COUNTERS(s
);
5746 pool_put(&pf_state_pl
, s
);
5751 /* allocate state key and import values from psk */
5752 if ((sk
= pf_alloc_state_key(s
, &psk
)) == NULL
) {
5753 REASON_SET(&reason
, PFRES_MEMORY
);
5755 * XXXSCW: This will leak the freshly-allocated
5756 * state structure 's'. Although it should
5757 * eventually be aged-out and removed.
5762 pf_set_rt_ifp(s
, saddr
, af
); /* needs s->state_key set */
5764 pbuf
= pd
->mp
; // XXXSCW: Why?
5766 if (sk
->app_state
== 0) {
5767 switch (pd
->proto
) {
5769 u_int16_t dport
= (direction
== PF_OUT
) ?
5770 sk
->ext_gwy
.xport
.port
: sk
->gwy
.xport
.port
;
5773 ntohs(dport
) == PF_PPTP_PORT
) {
5774 struct pf_app_state
*as
;
5776 as
= pool_get(&pf_app_state_pl
,
5784 bzero(as
, sizeof(*as
));
5785 as
->handler
= pf_pptp_handler
;
5786 as
->compare_lan_ext
= 0;
5787 as
->compare_ext_gwy
= 0;
5788 as
->u
.pptp
.grev1_state
= 0;
5790 (void) hook_establish(&s
->unlink_hooks
,
5791 0, (hook_fn_t
) pf_pptp_unlink
, s
);
5798 ntohs(uh
->uh_sport
) == PF_IKE_PORT
&&
5799 ntohs(uh
->uh_dport
) == PF_IKE_PORT
) {
5800 struct pf_app_state
*as
;
5802 as
= pool_get(&pf_app_state_pl
,
5810 bzero(as
, sizeof(*as
));
5811 as
->compare_lan_ext
= pf_ike_compare
;
5812 as
->compare_ext_gwy
= pf_ike_compare
;
5813 as
->u
.ike
.cookie
= ike
.initiator_cookie
;
5824 if (pf_insert_state(BOUND_IFACE(r
, kif
), s
)) {
5825 if (pd
->proto
== IPPROTO_TCP
) {
5826 pf_normalize_tcp_cleanup(s
);
5828 REASON_SET(&reason
, PFRES_STATEINS
);
5829 pf_src_tree_remove_state(s
);
5830 STATE_DEC_COUNTERS(s
);
5831 pool_put(&pf_state_pl
, s
);
5840 if (pd
->proto
== IPPROTO_TCP
&&
5841 (th
->th_flags
& (TH_SYN
| TH_ACK
)) == TH_SYN
&&
5842 r
->keep_state
== PF_STATE_SYNPROXY
) {
5843 int ua
= (sk
->af_lan
== sk
->af_gwy
) ? 1 : 0;
5844 s
->src
.state
= PF_TCPS_PROXY_SRC
;
5846 if (direction
== PF_OUT
) {
5847 pf_change_ap(direction
, pd
->mp
, saddr
,
5848 &th
->th_sport
, pd
->ip_sum
,
5849 &th
->th_sum
, &pd
->baddr
,
5850 bxport
.port
, 0, af
, pd
->af
, ua
);
5851 sxport
.port
= th
->th_sport
;
5853 pf_change_ap(direction
, pd
->mp
, daddr
,
5854 &th
->th_dport
, pd
->ip_sum
,
5855 &th
->th_sum
, &pd
->baddr
,
5856 bxport
.port
, 0, af
, pd
->af
, ua
);
5857 sxport
.port
= th
->th_dport
;
5860 s
->src
.seqhi
= htonl(random());
5861 /* Find mss option */
5862 mss
= pf_get_mss(pbuf
, off
, th
->th_off
, af
);
5863 mss
= pf_calc_mss(saddr
, af
, mss
);
5864 mss
= pf_calc_mss(daddr
, af
, mss
);
5866 pf_send_tcp(r
, af
, daddr
, saddr
, th
->th_dport
,
5867 th
->th_sport
, s
->src
.seqhi
, ntohl(th
->th_seq
) + 1,
5868 TH_SYN
| TH_ACK
, 0, s
->src
.mss
, 0, 1, 0, NULL
, NULL
);
5869 REASON_SET(&reason
, PFRES_SYNPROXY
);
5870 return PF_SYNPROXY_DROP
;
5873 if (sk
->app_state
&& sk
->app_state
->handler
) {
5876 switch (pd
->proto
) {
5878 offx
+= th
->th_off
<< 2;
5881 offx
+= pd
->hdr
.udp
->uh_ulen
<< 2;
5884 /* ALG handlers only apply to TCP and UDP rules */
5889 sk
->app_state
->handler(s
, direction
, offx
,
5892 REASON_SET(&reason
, PFRES_MEMORY
);
5895 pbuf
= pd
->mp
; // XXXSCW: Why?
5900 /* copy back packet headers if we performed NAT operations */
5902 if (rewrite
< off
+ hdrlen
) {
5903 rewrite
= off
+ hdrlen
;
5906 if (pf_lazy_makewritable(pd
, pd
->mp
, rewrite
) == NULL
) {
5907 REASON_SET(&reason
, PFRES_MEMORY
);
5911 pbuf_copy_back(pbuf
, off
, hdrlen
, pd
->hdr
.any
);
5912 if (af
== AF_INET6
&& pd
->naf
== AF_INET
) {
5913 return pf_nat64_ipv6(pbuf
, off
, pd
);
5914 } else if (af
== AF_INET
&& pd
->naf
== AF_INET6
) {
5915 return pf_nat64_ipv4(pbuf
, off
, pd
);
5922 boolean_t is_nlc_enabled_glb
= FALSE
;
5924 static inline boolean_t
5925 pf_is_dummynet_enabled(void)
5928 if (__probable(!PF_IS_ENABLED
)) {
5932 if (__probable(!DUMMYNET_LOADED
)) {
5936 if (__probable(TAILQ_EMPTY(pf_main_ruleset
.
5937 rules
[PF_RULESET_DUMMYNET
].active
.ptr
))) {
5944 #endif /* DUMMYNET */
5949 * When pf_test_dummynet() returns PF_PASS, the rule matching parameter "rm"
5950 * remains unchanged, meaning the packet did not match a dummynet rule.
5951 * when the packet does match a dummynet rule, pf_test_dummynet() returns
5952 * PF_PASS and zero out the mbuf rule as the packet is effectively siphoned
5956 pf_test_dummynet(struct pf_rule
**rm
, int direction
, struct pfi_kif
*kif
,
5957 pbuf_t
**pbuf0
, struct pf_pdesc
*pd
, struct ip_fw_args
*fwa
)
5959 pbuf_t
*pbuf
= *pbuf0
;
5960 struct pf_rule
*am
= NULL
;
5961 struct pf_ruleset
*rsm
= NULL
;
5962 struct pf_addr
*saddr
= pd
->src
, *daddr
= pd
->dst
;
5963 sa_family_t af
= pd
->af
;
5964 struct pf_rule
*r
, *a
= NULL
;
5965 struct pf_ruleset
*ruleset
= NULL
;
5966 struct tcphdr
*th
= pd
->hdr
.tcp
;
5970 unsigned int rtableid
= IFSCOPE_NONE
;
5973 u_int8_t icmptype
= 0, icmpcode
= 0;
5974 struct ip_fw_args dnflow
;
5975 struct pf_rule
*prev_matching_rule
= fwa
? fwa
->fwa_pf_rule
: NULL
;
5976 int found_prev_rule
= (prev_matching_rule
) ? 0 : 1;
5978 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
5980 if (!pf_is_dummynet_enabled()) {
5984 bzero(&dnflow
, sizeof(dnflow
));
5988 /* Fragments don't gave protocol headers */
5989 if (!(pd
->flags
& PFDESC_IP_FRAG
)) {
5990 switch (pd
->proto
) {
5992 dnflow
.fwa_id
.flags
= pd
->hdr
.tcp
->th_flags
;
5993 dnflow
.fwa_id
.dst_port
= ntohs(pd
->hdr
.tcp
->th_dport
);
5994 dnflow
.fwa_id
.src_port
= ntohs(pd
->hdr
.tcp
->th_sport
);
5995 hdrlen
= sizeof(*th
);
5998 dnflow
.fwa_id
.dst_port
= ntohs(pd
->hdr
.udp
->uh_dport
);
5999 dnflow
.fwa_id
.src_port
= ntohs(pd
->hdr
.udp
->uh_sport
);
6000 hdrlen
= sizeof(*pd
->hdr
.udp
);
6004 if (af
!= AF_INET
) {
6007 hdrlen
= ICMP_MINLEN
;
6008 icmptype
= pd
->hdr
.icmp
->icmp_type
;
6009 icmpcode
= pd
->hdr
.icmp
->icmp_code
;
6012 case IPPROTO_ICMPV6
:
6013 if (af
!= AF_INET6
) {
6016 hdrlen
= sizeof(*pd
->hdr
.icmp6
);
6017 icmptype
= pd
->hdr
.icmp6
->icmp6_type
;
6018 icmpcode
= pd
->hdr
.icmp6
->icmp6_code
;
6021 if (pd
->proto_variant
== PF_GRE_PPTP_VARIANT
) {
6022 hdrlen
= sizeof(*pd
->hdr
.grev1
);
6026 hdrlen
= sizeof(*pd
->hdr
.esp
);
6031 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[PF_RULESET_DUMMYNET
].active
.ptr
);
6035 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
) {
6036 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
6037 } else if (r
->direction
&& r
->direction
!= direction
) {
6038 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
6039 } else if (r
->af
&& r
->af
!= af
) {
6040 r
= r
->skip
[PF_SKIP_AF
].ptr
;
6041 } else if (r
->proto
&& r
->proto
!= pd
->proto
) {
6042 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
6043 } else if (PF_MISMATCHAW(&r
->src
.addr
, saddr
, af
,
6045 r
= r
->skip
[PF_SKIP_SRC_ADDR
].ptr
;
6047 /* tcp/udp only. port_op always 0 in other cases */
6048 else if (r
->proto
== pd
->proto
&&
6049 (r
->proto
== IPPROTO_TCP
|| r
->proto
== IPPROTO_UDP
) &&
6050 ((pd
->flags
& PFDESC_IP_FRAG
) ||
6051 ((r
->src
.xport
.range
.op
&&
6052 !pf_match_port(r
->src
.xport
.range
.op
,
6053 r
->src
.xport
.range
.port
[0], r
->src
.xport
.range
.port
[1],
6055 r
= r
->skip
[PF_SKIP_SRC_PORT
].ptr
;
6056 } else if (PF_MISMATCHAW(&r
->dst
.addr
, daddr
, af
,
6057 r
->dst
.neg
, NULL
)) {
6058 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
6060 /* tcp/udp only. port_op always 0 in other cases */
6061 else if (r
->proto
== pd
->proto
&&
6062 (r
->proto
== IPPROTO_TCP
|| r
->proto
== IPPROTO_UDP
) &&
6063 r
->dst
.xport
.range
.op
&&
6064 ((pd
->flags
& PFDESC_IP_FRAG
) ||
6065 !pf_match_port(r
->dst
.xport
.range
.op
,
6066 r
->dst
.xport
.range
.port
[0], r
->dst
.xport
.range
.port
[1],
6068 r
= r
->skip
[PF_SKIP_DST_PORT
].ptr
;
6070 /* icmp only. type always 0 in other cases */
6072 ((pd
->flags
& PFDESC_IP_FRAG
) ||
6073 r
->type
!= icmptype
+ 1)) {
6074 r
= TAILQ_NEXT(r
, entries
);
6076 /* icmp only. type always 0 in other cases */
6078 ((pd
->flags
& PFDESC_IP_FRAG
) ||
6079 r
->code
!= icmpcode
+ 1)) {
6080 r
= TAILQ_NEXT(r
, entries
);
6081 } else if (r
->tos
&& !(r
->tos
== pd
->tos
)) {
6082 r
= TAILQ_NEXT(r
, entries
);
6083 } else if (r
->rule_flag
& PFRULE_FRAGMENT
) {
6084 r
= TAILQ_NEXT(r
, entries
);
6085 } else if (pd
->proto
== IPPROTO_TCP
&&
6086 ((pd
->flags
& PFDESC_IP_FRAG
) ||
6087 (r
->flagset
& th
->th_flags
) != r
->flags
)) {
6088 r
= TAILQ_NEXT(r
, entries
);
6089 } else if (r
->prob
&& r
->prob
<= (RandomULong() % (UINT_MAX
- 1) + 1)) {
6090 r
= TAILQ_NEXT(r
, entries
);
6091 } else if (r
->match_tag
&& !pf_match_tag(r
, pd
->pf_mtag
, &tag
)) {
6092 r
= TAILQ_NEXT(r
, entries
);
6095 * Need to go past the previous dummynet matching rule
6097 if (r
->anchor
== NULL
) {
6098 if (found_prev_rule
) {
6102 if (PF_RTABLEID_IS_VALID(r
->rtableid
)) {
6103 rtableid
= r
->rtableid
;
6112 } else if (r
== prev_matching_rule
) {
6113 found_prev_rule
= 1;
6115 r
= TAILQ_NEXT(r
, entries
);
6117 pf_step_into_anchor(&asd
, &ruleset
,
6118 PF_RULESET_DUMMYNET
, &r
, &a
, &match
);
6121 if (r
== NULL
&& pf_step_out_of_anchor(&asd
, &ruleset
,
6122 PF_RULESET_DUMMYNET
, &r
, &a
, &match
)) {
6134 REASON_SET(&reason
, PFRES_DUMMYNET
);
6137 PFLOG_PACKET(kif
, h
, pbuf
, af
, direction
, reason
, r
,
6141 if (r
->action
== PF_NODUMMYNET
) {
6142 int dirndx
= (direction
== PF_OUT
);
6144 r
->packets
[dirndx
]++;
6145 r
->bytes
[dirndx
] += pd
->tot_len
;
6149 if (pf_tag_packet(pbuf
, pd
->pf_mtag
, tag
, rtableid
, pd
)) {
6150 REASON_SET(&reason
, PFRES_MEMORY
);
6155 if (r
->dnpipe
&& ip_dn_io_ptr
!= NULL
) {
6157 int dirndx
= (direction
== PF_OUT
);
6159 r
->packets
[dirndx
]++;
6160 r
->bytes
[dirndx
] += pd
->tot_len
;
6162 dnflow
.fwa_cookie
= r
->dnpipe
;
6163 dnflow
.fwa_pf_rule
= r
;
6164 dnflow
.fwa_id
.proto
= pd
->proto
;
6165 dnflow
.fwa_flags
= r
->dntype
;
6168 dnflow
.fwa_id
.addr_type
= 4;
6169 dnflow
.fwa_id
.src_ip
= ntohl(saddr
->v4addr
.s_addr
);
6170 dnflow
.fwa_id
.dst_ip
= ntohl(daddr
->v4addr
.s_addr
);
6173 dnflow
.fwa_id
.addr_type
= 6;
6174 dnflow
.fwa_id
.src_ip6
= saddr
->v6addr
;
6175 dnflow
.fwa_id
.dst_ip6
= saddr
->v6addr
;
6180 dnflow
.fwa_oif
= fwa
->fwa_oif
;
6181 dnflow
.fwa_oflags
= fwa
->fwa_oflags
;
6183 * Note that fwa_ro, fwa_dst and fwa_ipoa are
6184 * actually in a union so the following does work
6185 * for both IPv4 and IPv6
6187 dnflow
.fwa_ro
= fwa
->fwa_ro
;
6188 dnflow
.fwa_dst
= fwa
->fwa_dst
;
6189 dnflow
.fwa_ipoa
= fwa
->fwa_ipoa
;
6190 dnflow
.fwa_ro6_pmtu
= fwa
->fwa_ro6_pmtu
;
6191 dnflow
.fwa_origifp
= fwa
->fwa_origifp
;
6192 dnflow
.fwa_mtu
= fwa
->fwa_mtu
;
6193 dnflow
.fwa_unfragpartlen
= fwa
->fwa_unfragpartlen
;
6194 dnflow
.fwa_exthdrs
= fwa
->fwa_exthdrs
;
6197 if (af
== AF_INET
) {
6198 struct ip
*iphdr
= pbuf
->pb_data
;
6199 NTOHS(iphdr
->ip_len
);
6200 NTOHS(iphdr
->ip_off
);
6203 * Don't need to unlock pf_lock as NET_THREAD_HELD_PF
6204 * allows for recursive behavior
6206 m
= pbuf_to_mbuf(pbuf
, TRUE
);
6209 dnflow
.fwa_cookie
, (af
== AF_INET
) ?
6210 ((direction
== PF_IN
) ? DN_TO_IP_IN
: DN_TO_IP_OUT
) :
6211 ((direction
== PF_IN
) ? DN_TO_IP6_IN
: DN_TO_IP6_OUT
),
6216 * The packet is siphoned out by dummynet so return a NULL
6217 * pbuf so the caller can still return success.
6226 #endif /* DUMMYNET */
6229 pf_test_fragment(struct pf_rule
**rm
, int direction
, struct pfi_kif
*kif
,
6230 pbuf_t
*pbuf
, void *h
, struct pf_pdesc
*pd
, struct pf_rule
**am
,
6231 struct pf_ruleset
**rsm
)
6234 struct pf_rule
*r
, *a
= NULL
;
6235 struct pf_ruleset
*ruleset
= NULL
;
6236 sa_family_t af
= pd
->af
;
6242 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[PF_RULESET_FILTER
].active
.ptr
);
6245 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
) {
6246 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
6247 } else if (r
->direction
&& r
->direction
!= direction
) {
6248 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
6249 } else if (r
->af
&& r
->af
!= af
) {
6250 r
= r
->skip
[PF_SKIP_AF
].ptr
;
6251 } else if (r
->proto
&& r
->proto
!= pd
->proto
) {
6252 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
6253 } else if (PF_MISMATCHAW(&r
->src
.addr
, pd
->src
, af
,
6255 r
= r
->skip
[PF_SKIP_SRC_ADDR
].ptr
;
6256 } else if (PF_MISMATCHAW(&r
->dst
.addr
, pd
->dst
, af
,
6257 r
->dst
.neg
, NULL
)) {
6258 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
6259 } else if ((r
->rule_flag
& PFRULE_TOS
) && r
->tos
&&
6260 !(r
->tos
& pd
->tos
)) {
6261 r
= TAILQ_NEXT(r
, entries
);
6262 } else if ((r
->rule_flag
& PFRULE_DSCP
) && r
->tos
&&
6263 !(r
->tos
& (pd
->tos
& DSCP_MASK
))) {
6264 r
= TAILQ_NEXT(r
, entries
);
6265 } else if ((r
->rule_flag
& PFRULE_SC
) && r
->tos
&&
6266 ((r
->tos
& SCIDX_MASK
) != pd
->sc
)) {
6267 r
= TAILQ_NEXT(r
, entries
);
6268 } else if (r
->os_fingerprint
!= PF_OSFP_ANY
) {
6269 r
= TAILQ_NEXT(r
, entries
);
6270 } else if (pd
->proto
== IPPROTO_UDP
&&
6271 (r
->src
.xport
.range
.op
|| r
->dst
.xport
.range
.op
)) {
6272 r
= TAILQ_NEXT(r
, entries
);
6273 } else if (pd
->proto
== IPPROTO_TCP
&&
6274 (r
->src
.xport
.range
.op
|| r
->dst
.xport
.range
.op
||
6276 r
= TAILQ_NEXT(r
, entries
);
6277 } else if ((pd
->proto
== IPPROTO_ICMP
||
6278 pd
->proto
== IPPROTO_ICMPV6
) &&
6279 (r
->type
|| r
->code
)) {
6280 r
= TAILQ_NEXT(r
, entries
);
6281 } else if (r
->prob
&& r
->prob
<= (RandomULong() % (UINT_MAX
- 1) + 1)) {
6282 r
= TAILQ_NEXT(r
, entries
);
6283 } else if (r
->match_tag
&& !pf_match_tag(r
, pd
->pf_mtag
, &tag
)) {
6284 r
= TAILQ_NEXT(r
, entries
);
6286 if (r
->anchor
== NULL
) {
6294 r
= TAILQ_NEXT(r
, entries
);
6296 pf_step_into_anchor(&asd
, &ruleset
,
6297 PF_RULESET_FILTER
, &r
, &a
, &match
);
6300 if (r
== NULL
&& pf_step_out_of_anchor(&asd
, &ruleset
,
6301 PF_RULESET_FILTER
, &r
, &a
, &match
)) {
6309 REASON_SET(&reason
, PFRES_MATCH
);
6312 PFLOG_PACKET(kif
, h
, pbuf
, af
, direction
, reason
, r
, a
, ruleset
,
6316 if (r
->action
!= PF_PASS
) {
6320 if (pf_tag_packet(pbuf
, pd
->pf_mtag
, tag
, -1, NULL
)) {
6321 REASON_SET(&reason
, PFRES_MEMORY
);
6329 pf_pptp_handler(struct pf_state
*s
, int direction
, int off
,
6330 struct pf_pdesc
*pd
, struct pfi_kif
*kif
)
6332 #pragma unused(direction)
6334 struct pf_pptp_state
*pptps
;
6335 struct pf_pptp_ctrl_msg cm
;
6337 struct pf_state
*gs
;
6339 u_int16_t
*pac_call_id
;
6340 u_int16_t
*pns_call_id
;
6341 u_int16_t
*spoof_call_id
;
6342 u_int8_t
*pac_state
;
6343 u_int8_t
*pns_state
;
6344 enum { PF_PPTP_PASS
, PF_PPTP_INSERT_GRE
, PF_PPTP_REMOVE_GRE
} op
;
6346 struct pf_state_key
*sk
;
6347 struct pf_state_key
*gsk
;
6348 struct pf_app_state
*gas
;
6351 pptps
= &sk
->app_state
->u
.pptp
;
6352 gs
= pptps
->grev1_state
;
6355 gs
->expire
= pf_time_second();
6359 plen
= min(sizeof(cm
), pbuf
->pb_packet_len
- off
);
6360 if (plen
< PF_PPTP_CTRL_MSG_MINSIZE
) {
6363 tlen
= plen
- PF_PPTP_CTRL_MSG_MINSIZE
;
6364 pbuf_copy_data(pbuf
, off
, plen
, &cm
);
6366 if (ntohl(cm
.hdr
.magic
) != PF_PPTP_MAGIC_NUMBER
) {
6369 if (ntohs(cm
.hdr
.type
) != 1) {
6373 #define TYPE_LEN_CHECK(_type, _name) \
6374 case PF_PPTP_CTRL_TYPE_##_type: \
6375 if (tlen < sizeof(struct pf_pptp_ctrl_##_name)) \
6379 switch (cm
.ctrl
.type
) {
6380 TYPE_LEN_CHECK(START_REQ
, start_req
);
6381 TYPE_LEN_CHECK(START_RPY
, start_rpy
);
6382 TYPE_LEN_CHECK(STOP_REQ
, stop_req
);
6383 TYPE_LEN_CHECK(STOP_RPY
, stop_rpy
);
6384 TYPE_LEN_CHECK(ECHO_REQ
, echo_req
);
6385 TYPE_LEN_CHECK(ECHO_RPY
, echo_rpy
);
6386 TYPE_LEN_CHECK(CALL_OUT_REQ
, call_out_req
);
6387 TYPE_LEN_CHECK(CALL_OUT_RPY
, call_out_rpy
);
6388 TYPE_LEN_CHECK(CALL_IN_1ST
, call_in_1st
);
6389 TYPE_LEN_CHECK(CALL_IN_2ND
, call_in_2nd
);
6390 TYPE_LEN_CHECK(CALL_IN_3RD
, call_in_3rd
);
6391 TYPE_LEN_CHECK(CALL_CLR
, call_clr
);
6392 TYPE_LEN_CHECK(CALL_DISC
, call_disc
);
6393 TYPE_LEN_CHECK(ERROR
, error
);
6394 TYPE_LEN_CHECK(SET_LINKINFO
, set_linkinfo
);
6398 #undef TYPE_LEN_CHECK
6401 gs
= pool_get(&pf_state_pl
, PR_WAITOK
);
6406 memcpy(gs
, s
, sizeof(*gs
));
6408 memset(&gs
->entry_id
, 0, sizeof(gs
->entry_id
));
6409 memset(&gs
->entry_list
, 0, sizeof(gs
->entry_list
));
6411 TAILQ_INIT(&gs
->unlink_hooks
);
6414 gs
->pfsync_time
= 0;
6415 gs
->packets
[0] = gs
->packets
[1] = 0;
6416 gs
->bytes
[0] = gs
->bytes
[1] = 0;
6417 gs
->timeout
= PFTM_UNLINKED
;
6418 gs
->id
= gs
->creatorid
= 0;
6419 gs
->src
.state
= gs
->dst
.state
= PFGRE1S_NO_TRAFFIC
;
6420 gs
->src
.scrub
= gs
->dst
.scrub
= 0;
6422 gas
= pool_get(&pf_app_state_pl
, PR_NOWAIT
);
6424 pool_put(&pf_state_pl
, gs
);
6428 gsk
= pf_alloc_state_key(gs
, NULL
);
6430 pool_put(&pf_app_state_pl
, gas
);
6431 pool_put(&pf_state_pl
, gs
);
6435 memcpy(&gsk
->lan
, &sk
->lan
, sizeof(gsk
->lan
));
6436 memcpy(&gsk
->gwy
, &sk
->gwy
, sizeof(gsk
->gwy
));
6437 memcpy(&gsk
->ext_lan
, &sk
->ext_lan
, sizeof(gsk
->ext_lan
));
6438 memcpy(&gsk
->ext_gwy
, &sk
->ext_gwy
, sizeof(gsk
->ext_gwy
));
6439 gsk
->af_lan
= sk
->af_lan
;
6440 gsk
->af_gwy
= sk
->af_gwy
;
6441 gsk
->proto
= IPPROTO_GRE
;
6442 gsk
->proto_variant
= PF_GRE_PPTP_VARIANT
;
6443 gsk
->app_state
= gas
;
6444 gsk
->lan
.xport
.call_id
= 0;
6445 gsk
->gwy
.xport
.call_id
= 0;
6446 gsk
->ext_lan
.xport
.call_id
= 0;
6447 gsk
->ext_gwy
.xport
.call_id
= 0;
6448 gsk
->flowsrc
= FLOWSRC_PF
;
6449 gsk
->flowhash
= pf_calc_state_key_flowhash(gsk
);
6450 memset(gas
, 0, sizeof(*gas
));
6451 gas
->u
.grev1
.pptp_state
= s
;
6452 STATE_INC_COUNTERS(gs
);
6453 pptps
->grev1_state
= gs
;
6454 (void) hook_establish(&gs
->unlink_hooks
, 0,
6455 (hook_fn_t
) pf_grev1_unlink
, gs
);
6457 gsk
= gs
->state_key
;
6460 switch (sk
->direction
) {
6462 pns_call_id
= &gsk
->ext_lan
.xport
.call_id
;
6463 pns_state
= &gs
->dst
.state
;
6464 pac_call_id
= &gsk
->lan
.xport
.call_id
;
6465 pac_state
= &gs
->src
.state
;
6469 pns_call_id
= &gsk
->lan
.xport
.call_id
;
6470 pns_state
= &gs
->src
.state
;
6471 pac_call_id
= &gsk
->ext_lan
.xport
.call_id
;
6472 pac_state
= &gs
->dst
.state
;
6476 DPFPRINTF(PF_DEBUG_URGENT
,
6477 ("pf_pptp_handler: bad directional!\n"));
6484 ct
= ntohs(cm
.ctrl
.type
);
6487 case PF_PPTP_CTRL_TYPE_CALL_OUT_REQ
:
6488 *pns_call_id
= cm
.msg
.call_out_req
.call_id
;
6489 *pns_state
= PFGRE1S_INITIATING
;
6490 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
) {
6491 spoof_call_id
= &cm
.msg
.call_out_req
.call_id
;
6495 case PF_PPTP_CTRL_TYPE_CALL_OUT_RPY
:
6496 *pac_call_id
= cm
.msg
.call_out_rpy
.call_id
;
6497 if (s
->nat_rule
.ptr
) {
6499 (pac_call_id
== &gsk
->lan
.xport
.call_id
) ?
6500 &cm
.msg
.call_out_rpy
.call_id
:
6501 &cm
.msg
.call_out_rpy
.peer_call_id
;
6503 if (gs
->timeout
== PFTM_UNLINKED
) {
6504 *pac_state
= PFGRE1S_INITIATING
;
6505 op
= PF_PPTP_INSERT_GRE
;
6509 case PF_PPTP_CTRL_TYPE_CALL_IN_1ST
:
6510 *pns_call_id
= cm
.msg
.call_in_1st
.call_id
;
6511 *pns_state
= PFGRE1S_INITIATING
;
6512 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
) {
6513 spoof_call_id
= &cm
.msg
.call_in_1st
.call_id
;
6517 case PF_PPTP_CTRL_TYPE_CALL_IN_2ND
:
6518 *pac_call_id
= cm
.msg
.call_in_2nd
.call_id
;
6519 *pac_state
= PFGRE1S_INITIATING
;
6520 if (s
->nat_rule
.ptr
) {
6522 (pac_call_id
== &gsk
->lan
.xport
.call_id
) ?
6523 &cm
.msg
.call_in_2nd
.call_id
:
6524 &cm
.msg
.call_in_2nd
.peer_call_id
;
6528 case PF_PPTP_CTRL_TYPE_CALL_IN_3RD
:
6529 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
) {
6530 spoof_call_id
= &cm
.msg
.call_in_3rd
.call_id
;
6532 if (cm
.msg
.call_in_3rd
.call_id
!= *pns_call_id
) {
6535 if (gs
->timeout
== PFTM_UNLINKED
) {
6536 op
= PF_PPTP_INSERT_GRE
;
6540 case PF_PPTP_CTRL_TYPE_CALL_CLR
:
6541 if (cm
.msg
.call_clr
.call_id
!= *pns_call_id
) {
6542 op
= PF_PPTP_REMOVE_GRE
;
6546 case PF_PPTP_CTRL_TYPE_CALL_DISC
:
6547 if (cm
.msg
.call_clr
.call_id
!= *pac_call_id
) {
6548 op
= PF_PPTP_REMOVE_GRE
;
6552 case PF_PPTP_CTRL_TYPE_ERROR
:
6553 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
) {
6554 spoof_call_id
= &cm
.msg
.error
.peer_call_id
;
6558 case PF_PPTP_CTRL_TYPE_SET_LINKINFO
:
6559 if (s
->nat_rule
.ptr
&& pac_call_id
== &gsk
->lan
.xport
.call_id
) {
6560 spoof_call_id
= &cm
.msg
.set_linkinfo
.peer_call_id
;
6569 if (!gsk
->gwy
.xport
.call_id
&& gsk
->lan
.xport
.call_id
) {
6570 gsk
->gwy
.xport
.call_id
= gsk
->lan
.xport
.call_id
;
6571 if (spoof_call_id
) {
6572 u_int16_t call_id
= 0;
6574 struct pf_state_key_cmp key
;
6576 key
.af_gwy
= gsk
->af_gwy
;
6577 key
.proto
= IPPROTO_GRE
;
6578 key
.proto_variant
= PF_GRE_PPTP_VARIANT
;
6579 PF_ACPY(&key
.gwy
.addr
, &gsk
->gwy
.addr
, key
.af_gwy
);
6580 PF_ACPY(&key
.ext_gwy
.addr
, &gsk
->ext_gwy
.addr
, key
.af_gwy
);
6581 key
.gwy
.xport
.call_id
= gsk
->gwy
.xport
.call_id
;
6582 key
.ext_gwy
.xport
.call_id
= gsk
->ext_gwy
.xport
.call_id
;
6584 call_id
= htonl(random());
6587 while (pf_find_state_all(&key
, PF_IN
, 0)) {
6588 call_id
= ntohs(call_id
);
6590 if (--call_id
== 0) {
6593 call_id
= htons(call_id
);
6595 key
.gwy
.xport
.call_id
= call_id
;
6598 DPFPRINTF(PF_DEBUG_URGENT
,
6599 ("pf_pptp_handler: failed to spoof "
6601 key
.gwy
.xport
.call_id
= 0;
6606 gsk
->gwy
.xport
.call_id
= call_id
;
6612 if (spoof_call_id
&& gsk
->lan
.xport
.call_id
!= gsk
->gwy
.xport
.call_id
) {
6613 if (*spoof_call_id
== gsk
->gwy
.xport
.call_id
) {
6614 *spoof_call_id
= gsk
->lan
.xport
.call_id
;
6615 th
->th_sum
= pf_cksum_fixup(th
->th_sum
,
6616 gsk
->gwy
.xport
.call_id
, gsk
->lan
.xport
.call_id
, 0);
6618 *spoof_call_id
= gsk
->gwy
.xport
.call_id
;
6619 th
->th_sum
= pf_cksum_fixup(th
->th_sum
,
6620 gsk
->lan
.xport
.call_id
, gsk
->gwy
.xport
.call_id
, 0);
6623 if (pf_lazy_makewritable(pd
, pbuf
, off
+ plen
) == NULL
) {
6624 pptps
->grev1_state
= NULL
;
6625 STATE_DEC_COUNTERS(gs
);
6626 pool_put(&pf_state_pl
, gs
);
6629 pbuf_copy_back(pbuf
, off
, plen
, &cm
);
6633 case PF_PPTP_REMOVE_GRE
:
6634 gs
->timeout
= PFTM_PURGE
;
6635 gs
->src
.state
= gs
->dst
.state
= PFGRE1S_NO_TRAFFIC
;
6636 gsk
->lan
.xport
.call_id
= 0;
6637 gsk
->gwy
.xport
.call_id
= 0;
6638 gsk
->ext_lan
.xport
.call_id
= 0;
6639 gsk
->ext_gwy
.xport
.call_id
= 0;
6640 gs
->id
= gs
->creatorid
= 0;
6643 case PF_PPTP_INSERT_GRE
:
6644 gs
->creation
= pf_time_second();
6645 gs
->expire
= pf_time_second();
6646 gs
->timeout
= PFTM_TCP_ESTABLISHED
;
6647 if (gs
->src_node
!= NULL
) {
6648 ++gs
->src_node
->states
;
6649 VERIFY(gs
->src_node
->states
!= 0);
6651 if (gs
->nat_src_node
!= NULL
) {
6652 ++gs
->nat_src_node
->states
;
6653 VERIFY(gs
->nat_src_node
->states
!= 0);
6655 pf_set_rt_ifp(gs
, &sk
->lan
.addr
, sk
->af_lan
);
6656 if (pf_insert_state(BOUND_IFACE(s
->rule
.ptr
, kif
), gs
)) {
6659 * FIX ME: insertion can fail when multiple PNS
6660 * behind the same NAT open calls to the same PAC
6661 * simultaneously because spoofed call ID numbers
6662 * are chosen before states are inserted. This is
6663 * hard to fix and happens infrequently enough that
6664 * users will normally try again and this ALG will
6665 * succeed. Failures are expected to be rare enough
6666 * that fixing this is a low priority.
6668 pptps
->grev1_state
= NULL
;
6669 pd
->lmw
= -1; /* Force PF_DROP on PFRES_MEMORY */
6670 pf_src_tree_remove_state(gs
);
6671 STATE_DEC_COUNTERS(gs
);
6672 pool_put(&pf_state_pl
, gs
);
6673 DPFPRINTF(PF_DEBUG_URGENT
, ("pf_pptp_handler: error "
6674 "inserting GREv1 state.\n"));
6684 pf_pptp_unlink(struct pf_state
*s
)
6686 struct pf_app_state
*as
= s
->state_key
->app_state
;
6687 struct pf_state
*grev1s
= as
->u
.pptp
.grev1_state
;
6690 struct pf_app_state
*gas
= grev1s
->state_key
->app_state
;
6692 if (grev1s
->timeout
< PFTM_MAX
) {
6693 grev1s
->timeout
= PFTM_PURGE
;
6695 gas
->u
.grev1
.pptp_state
= NULL
;
6696 as
->u
.pptp
.grev1_state
= NULL
;
6701 pf_grev1_unlink(struct pf_state
*s
)
6703 struct pf_app_state
*as
= s
->state_key
->app_state
;
6704 struct pf_state
*pptps
= as
->u
.grev1
.pptp_state
;
6707 struct pf_app_state
*pas
= pptps
->state_key
->app_state
;
6709 pas
->u
.pptp
.grev1_state
= NULL
;
6710 as
->u
.grev1
.pptp_state
= NULL
;
6715 pf_ike_compare(struct pf_app_state
*a
, struct pf_app_state
*b
)
6717 int64_t d
= a
->u
.ike
.cookie
- b
->u
.ike
.cookie
;
6718 return (d
> 0) ? 1 : ((d
< 0) ? -1 : 0);
6722 pf_do_nat64(struct pf_state_key
*sk
, struct pf_pdesc
*pd
, pbuf_t
*pbuf
,
6725 if (pd
->af
== AF_INET
) {
6726 if (pd
->af
!= sk
->af_lan
) {
6727 pd
->ndaddr
= sk
->lan
.addr
;
6728 pd
->naddr
= sk
->ext_lan
.addr
;
6730 pd
->naddr
= sk
->gwy
.addr
;
6731 pd
->ndaddr
= sk
->ext_gwy
.addr
;
6733 return pf_nat64_ipv4(pbuf
, off
, pd
);
6734 } else if (pd
->af
== AF_INET6
) {
6735 if (pd
->af
!= sk
->af_lan
) {
6736 pd
->ndaddr
= sk
->lan
.addr
;
6737 pd
->naddr
= sk
->ext_lan
.addr
;
6739 pd
->naddr
= sk
->gwy
.addr
;
6740 pd
->ndaddr
= sk
->ext_gwy
.addr
;
6742 return pf_nat64_ipv6(pbuf
, off
, pd
);
6748 pf_test_state_tcp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
6749 pbuf_t
*pbuf
, int off
, void *h
, struct pf_pdesc
*pd
,
6753 struct pf_state_key_cmp key
;
6754 struct tcphdr
*th
= pd
->hdr
.tcp
;
6755 u_int16_t win
= ntohs(th
->th_win
);
6756 u_int32_t ack
, end
, seq
, orig_seq
;
6760 struct pf_state_peer
*src
, *dst
;
6761 struct pf_state_key
*sk
;
6764 key
.proto
= IPPROTO_TCP
;
6765 key
.af_lan
= key
.af_gwy
= pd
->af
;
6768 * For NAT64 the first time rule search and state creation
6769 * is done on the incoming side only.
6770 * Once the state gets created, NAT64's LAN side (ipv6) will
6771 * not be able to find the state in ext-gwy tree as that normally
6772 * is intended to be looked up for incoming traffic from the
6774 * Therefore to handle NAT64 case we init keys here for both
6775 * lan-ext as well as ext-gwy trees.
6776 * In the state lookup we attempt a lookup on both trees if
6777 * first one does not return any result and return a match if
6778 * the match state's was created by NAT64 rule.
6780 PF_ACPY(&key
.ext_gwy
.addr
, pd
->src
, key
.af_gwy
);
6781 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af_gwy
);
6782 key
.ext_gwy
.xport
.port
= th
->th_sport
;
6783 key
.gwy
.xport
.port
= th
->th_dport
;
6785 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af_lan
);
6786 PF_ACPY(&key
.ext_lan
.addr
, pd
->dst
, key
.af_lan
);
6787 key
.lan
.xport
.port
= th
->th_sport
;
6788 key
.ext_lan
.xport
.port
= th
->th_dport
;
6792 sk
= (*state
)->state_key
;
6794 * In case of NAT64 the translation is first applied on the LAN
6795 * side. Therefore for stack's address family comparison
6796 * we use sk->af_lan.
6798 if ((direction
== sk
->direction
) && (pd
->af
== sk
->af_lan
)) {
6799 src
= &(*state
)->src
;
6800 dst
= &(*state
)->dst
;
6802 src
= &(*state
)->dst
;
6803 dst
= &(*state
)->src
;
6806 if (src
->state
== PF_TCPS_PROXY_SRC
) {
6807 if (direction
!= sk
->direction
) {
6808 REASON_SET(reason
, PFRES_SYNPROXY
);
6809 return PF_SYNPROXY_DROP
;
6811 if (th
->th_flags
& TH_SYN
) {
6812 if (ntohl(th
->th_seq
) != src
->seqlo
) {
6813 REASON_SET(reason
, PFRES_SYNPROXY
);
6816 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, pd
->dst
,
6817 pd
->src
, th
->th_dport
, th
->th_sport
,
6818 src
->seqhi
, ntohl(th
->th_seq
) + 1,
6819 TH_SYN
| TH_ACK
, 0, src
->mss
, 0, 1,
6821 REASON_SET(reason
, PFRES_SYNPROXY
);
6822 return PF_SYNPROXY_DROP
;
6823 } else if (!(th
->th_flags
& TH_ACK
) ||
6824 (ntohl(th
->th_ack
) != src
->seqhi
+ 1) ||
6825 (ntohl(th
->th_seq
) != src
->seqlo
+ 1)) {
6826 REASON_SET(reason
, PFRES_SYNPROXY
);
6828 } else if ((*state
)->src_node
!= NULL
&&
6829 pf_src_connlimit(state
)) {
6830 REASON_SET(reason
, PFRES_SRCLIMIT
);
6833 src
->state
= PF_TCPS_PROXY_DST
;
6836 if (src
->state
== PF_TCPS_PROXY_DST
) {
6837 struct pf_state_host
*psrc
, *pdst
;
6839 if (direction
== PF_OUT
) {
6841 pdst
= &sk
->ext_gwy
;
6843 psrc
= &sk
->ext_lan
;
6846 if (direction
== sk
->direction
) {
6847 if (((th
->th_flags
& (TH_SYN
| TH_ACK
)) != TH_ACK
) ||
6848 (ntohl(th
->th_ack
) != src
->seqhi
+ 1) ||
6849 (ntohl(th
->th_seq
) != src
->seqlo
+ 1)) {
6850 REASON_SET(reason
, PFRES_SYNPROXY
);
6853 src
->max_win
= MAX(ntohs(th
->th_win
), 1);
6854 if (dst
->seqhi
== 1) {
6855 dst
->seqhi
= htonl(random());
6857 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, &psrc
->addr
,
6858 &pdst
->addr
, psrc
->xport
.port
, pdst
->xport
.port
,
6859 dst
->seqhi
, 0, TH_SYN
, 0,
6860 src
->mss
, 0, 0, (*state
)->tag
, NULL
, NULL
);
6861 REASON_SET(reason
, PFRES_SYNPROXY
);
6862 return PF_SYNPROXY_DROP
;
6863 } else if (((th
->th_flags
& (TH_SYN
| TH_ACK
)) !=
6864 (TH_SYN
| TH_ACK
)) ||
6865 (ntohl(th
->th_ack
) != dst
->seqhi
+ 1)) {
6866 REASON_SET(reason
, PFRES_SYNPROXY
);
6869 dst
->max_win
= MAX(ntohs(th
->th_win
), 1);
6870 dst
->seqlo
= ntohl(th
->th_seq
);
6871 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, pd
->dst
,
6872 pd
->src
, th
->th_dport
, th
->th_sport
,
6873 ntohl(th
->th_ack
), ntohl(th
->th_seq
) + 1,
6874 TH_ACK
, src
->max_win
, 0, 0, 0,
6875 (*state
)->tag
, NULL
, NULL
);
6876 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, &psrc
->addr
,
6877 &pdst
->addr
, psrc
->xport
.port
, pdst
->xport
.port
,
6878 src
->seqhi
+ 1, src
->seqlo
+ 1,
6879 TH_ACK
, dst
->max_win
, 0, 0, 1,
6881 src
->seqdiff
= dst
->seqhi
-
6883 dst
->seqdiff
= src
->seqhi
-
6885 src
->seqhi
= src
->seqlo
+
6887 dst
->seqhi
= dst
->seqlo
+
6889 src
->wscale
= dst
->wscale
= 0;
6890 src
->state
= dst
->state
=
6892 REASON_SET(reason
, PFRES_SYNPROXY
);
6893 return PF_SYNPROXY_DROP
;
6897 if (((th
->th_flags
& (TH_SYN
| TH_ACK
)) == TH_SYN
) &&
6898 dst
->state
>= TCPS_FIN_WAIT_2
&&
6899 src
->state
>= TCPS_FIN_WAIT_2
) {
6900 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
6901 printf("pf: state reuse ");
6902 pf_print_state(*state
);
6903 pf_print_flags(th
->th_flags
);
6906 /* XXX make sure it's the same direction ?? */
6907 src
->state
= dst
->state
= TCPS_CLOSED
;
6908 pf_unlink_state(*state
);
6913 if ((th
->th_flags
& TH_SYN
) == 0) {
6914 sws
= (src
->wscale
& PF_WSCALE_FLAG
) ?
6915 (src
->wscale
& PF_WSCALE_MASK
) : TCP_MAX_WINSHIFT
;
6916 dws
= (dst
->wscale
& PF_WSCALE_FLAG
) ?
6917 (dst
->wscale
& PF_WSCALE_MASK
) : TCP_MAX_WINSHIFT
;
6923 * Sequence tracking algorithm from Guido van Rooij's paper:
6924 * http://www.madison-gurkha.com/publications/tcp_filtering/
6928 orig_seq
= seq
= ntohl(th
->th_seq
);
6929 if (src
->seqlo
== 0) {
6930 /* First packet from this end. Set its state */
6932 if ((pd
->flags
& PFDESC_TCP_NORM
|| dst
->scrub
) &&
6933 src
->scrub
== NULL
) {
6934 if (pf_normalize_tcp_init(pbuf
, off
, pd
, th
, src
, dst
)) {
6935 REASON_SET(reason
, PFRES_MEMORY
);
6940 /* Deferred generation of sequence number modulator */
6941 if (dst
->seqdiff
&& !src
->seqdiff
) {
6942 /* use random iss for the TCP server */
6943 while ((src
->seqdiff
= random() - seq
) == 0) {
6946 ack
= ntohl(th
->th_ack
) - dst
->seqdiff
;
6947 pf_change_a(&th
->th_seq
, &th
->th_sum
, htonl(seq
+
6949 pf_change_a(&th
->th_ack
, &th
->th_sum
, htonl(ack
), 0);
6950 copyback
= off
+ sizeof(*th
);
6952 ack
= ntohl(th
->th_ack
);
6955 end
= seq
+ pd
->p_len
;
6956 if (th
->th_flags
& TH_SYN
) {
6958 if (dst
->wscale
& PF_WSCALE_FLAG
) {
6959 src
->wscale
= pf_get_wscale(pbuf
, off
,
6960 th
->th_off
, pd
->af
);
6961 if (src
->wscale
& PF_WSCALE_FLAG
) {
6963 * Remove scale factor from initial
6966 sws
= src
->wscale
& PF_WSCALE_MASK
;
6967 win
= ((u_int32_t
)win
+ (1 << sws
) - 1)
6969 dws
= dst
->wscale
& PF_WSCALE_MASK
;
6972 * Window scale negotiation has failed,
6973 * therefore we must restore the window
6974 * scale in the state record that we
6975 * optimistically removed in
6976 * pf_test_rule(). Care is required to
6977 * prevent arithmetic overflow from
6978 * zeroing the window when it's
6979 * truncated down to 16-bits.
6981 u_int32_t max_win
= dst
->max_win
;
6983 dst
->wscale
& PF_WSCALE_MASK
;
6984 dst
->max_win
= MIN(0xffff, max_win
);
6985 /* in case of a retrans SYN|ACK */
6990 if (th
->th_flags
& TH_FIN
) {
6995 if (src
->state
< TCPS_SYN_SENT
) {
6996 src
->state
= TCPS_SYN_SENT
;
7000 * May need to slide the window (seqhi may have been set by
7001 * the crappy stack check or if we picked up the connection
7002 * after establishment)
7004 if (src
->seqhi
== 1 ||
7005 SEQ_GEQ(end
+ MAX(1, (u_int32_t
)dst
->max_win
<< dws
),
7007 src
->seqhi
= end
+ MAX(1, (u_int32_t
)dst
->max_win
<< dws
);
7009 if (win
> src
->max_win
) {
7013 ack
= ntohl(th
->th_ack
) - dst
->seqdiff
;
7015 /* Modulate sequence numbers */
7016 pf_change_a(&th
->th_seq
, &th
->th_sum
, htonl(seq
+
7018 pf_change_a(&th
->th_ack
, &th
->th_sum
, htonl(ack
), 0);
7019 copyback
= off
+ sizeof(*th
);
7021 end
= seq
+ pd
->p_len
;
7022 if (th
->th_flags
& TH_SYN
) {
7025 if (th
->th_flags
& TH_FIN
) {
7030 if ((th
->th_flags
& TH_ACK
) == 0) {
7031 /* Let it pass through the ack skew check */
7033 } else if ((ack
== 0 &&
7034 (th
->th_flags
& (TH_ACK
| TH_RST
)) == (TH_ACK
| TH_RST
)) ||
7035 /* broken tcp stacks do not set ack */
7036 (dst
->state
< TCPS_SYN_SENT
)) {
7038 * Many stacks (ours included) will set the ACK number in an
7039 * FIN|ACK if the SYN times out -- no sequence to ACK.
7045 /* Ease sequencing restrictions on no data packets */
7050 ackskew
= dst
->seqlo
- ack
;
7054 * Need to demodulate the sequence numbers in any TCP SACK options
7055 * (Selective ACK). We could optionally validate the SACK values
7056 * against the current ACK window, either forwards or backwards, but
7057 * I'm not confident that SACK has been implemented properly
7058 * everywhere. It wouldn't surprise me if several stacks accidently
7059 * SACK too far backwards of previously ACKed data. There really aren't
7060 * any security implications of bad SACKing unless the target stack
7061 * doesn't validate the option length correctly. Someone trying to
7062 * spoof into a TCP connection won't bother blindly sending SACK
7065 if (dst
->seqdiff
&& (th
->th_off
<< 2) > (int)sizeof(struct tcphdr
)) {
7066 copyback
= pf_modulate_sack(pbuf
, off
, pd
, th
, dst
);
7067 if (copyback
== -1) {
7068 REASON_SET(reason
, PFRES_MEMORY
);
7072 pbuf
= pd
->mp
; // XXXSCW: Why?
7076 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
7077 if (SEQ_GEQ(src
->seqhi
, end
) &&
7078 /* Last octet inside other's window space */
7079 SEQ_GEQ(seq
, src
->seqlo
- ((u_int32_t
)dst
->max_win
<< dws
)) &&
7080 /* Retrans: not more than one window back */
7081 (ackskew
>= -MAXACKWINDOW
) &&
7082 /* Acking not more than one reassembled fragment backwards */
7083 (ackskew
<= (MAXACKWINDOW
<< sws
)) &&
7084 /* Acking not more than one window forward */
7085 ((th
->th_flags
& TH_RST
) == 0 || orig_seq
== src
->seqlo
||
7086 (orig_seq
== src
->seqlo
+ 1) || (orig_seq
+ 1 == src
->seqlo
) ||
7087 (pd
->flags
& PFDESC_IP_REAS
) == 0)) {
7088 /* Require an exact/+1 sequence match on resets when possible */
7090 if (dst
->scrub
|| src
->scrub
) {
7091 if (pf_normalize_tcp_stateful(pbuf
, off
, pd
, reason
, th
,
7092 *state
, src
, dst
, ©back
)) {
7096 pbuf
= pd
->mp
; // XXXSCW: Why?
7099 /* update max window */
7100 if (src
->max_win
< win
) {
7103 /* synchronize sequencing */
7104 if (SEQ_GT(end
, src
->seqlo
)) {
7107 /* slide the window of what the other end can send */
7108 if (SEQ_GEQ(ack
+ ((u_int32_t
)win
<< sws
), dst
->seqhi
)) {
7109 dst
->seqhi
= ack
+ MAX(((u_int32_t
)win
<< sws
), 1);
7113 if (th
->th_flags
& TH_SYN
) {
7114 if (src
->state
< TCPS_SYN_SENT
) {
7115 src
->state
= TCPS_SYN_SENT
;
7118 if (th
->th_flags
& TH_FIN
) {
7119 if (src
->state
< TCPS_CLOSING
) {
7120 src
->state
= TCPS_CLOSING
;
7123 if (th
->th_flags
& TH_ACK
) {
7124 if (dst
->state
== TCPS_SYN_SENT
) {
7125 dst
->state
= TCPS_ESTABLISHED
;
7126 if (src
->state
== TCPS_ESTABLISHED
&&
7127 (*state
)->src_node
!= NULL
&&
7128 pf_src_connlimit(state
)) {
7129 REASON_SET(reason
, PFRES_SRCLIMIT
);
7132 } else if (dst
->state
== TCPS_CLOSING
) {
7133 dst
->state
= TCPS_FIN_WAIT_2
;
7136 if (th
->th_flags
& TH_RST
) {
7137 src
->state
= dst
->state
= TCPS_TIME_WAIT
;
7140 /* update expire time */
7141 (*state
)->expire
= pf_time_second();
7142 if (src
->state
>= TCPS_FIN_WAIT_2
&&
7143 dst
->state
>= TCPS_FIN_WAIT_2
) {
7144 (*state
)->timeout
= PFTM_TCP_CLOSED
;
7145 } else if (src
->state
>= TCPS_CLOSING
&&
7146 dst
->state
>= TCPS_CLOSING
) {
7147 (*state
)->timeout
= PFTM_TCP_FIN_WAIT
;
7148 } else if (src
->state
< TCPS_ESTABLISHED
||
7149 dst
->state
< TCPS_ESTABLISHED
) {
7150 (*state
)->timeout
= PFTM_TCP_OPENING
;
7151 } else if (src
->state
>= TCPS_CLOSING
||
7152 dst
->state
>= TCPS_CLOSING
) {
7153 (*state
)->timeout
= PFTM_TCP_CLOSING
;
7155 (*state
)->timeout
= PFTM_TCP_ESTABLISHED
;
7158 /* Fall through to PASS packet */
7159 } else if ((dst
->state
< TCPS_SYN_SENT
||
7160 dst
->state
>= TCPS_FIN_WAIT_2
|| src
->state
>= TCPS_FIN_WAIT_2
) &&
7161 SEQ_GEQ(src
->seqhi
+ MAXACKWINDOW
, end
) &&
7162 /* Within a window forward of the originating packet */
7163 SEQ_GEQ(seq
, src
->seqlo
- MAXACKWINDOW
)) {
7164 /* Within a window backward of the originating packet */
7167 * This currently handles three situations:
7168 * 1) Stupid stacks will shotgun SYNs before their peer
7170 * 2) When PF catches an already established stream (the
7171 * firewall rebooted, the state table was flushed, routes
7173 * 3) Packets get funky immediately after the connection
7174 * closes (this should catch Solaris spurious ACK|FINs
7175 * that web servers like to spew after a close)
7177 * This must be a little more careful than the above code
7178 * since packet floods will also be caught here. We don't
7179 * update the TTL here to mitigate the damage of a packet
7180 * flood and so the same code can handle awkward establishment
7181 * and a loosened connection close.
7182 * In the establishment case, a correct peer response will
7183 * validate the connection, go through the normal state code
7184 * and keep updating the state TTL.
7187 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
7188 printf("pf: loose state match: ");
7189 pf_print_state(*state
);
7190 pf_print_flags(th
->th_flags
);
7191 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
7192 "pkts=%llu:%llu dir=%s,%s\n", seq
, orig_seq
, ack
,
7193 pd
->p_len
, ackskew
, (*state
)->packets
[0],
7194 (*state
)->packets
[1],
7195 direction
== PF_IN
? "in" : "out",
7196 direction
== sk
->direction
?
7200 if (dst
->scrub
|| src
->scrub
) {
7201 if (pf_normalize_tcp_stateful(pbuf
, off
, pd
, reason
, th
,
7202 *state
, src
, dst
, ©back
)) {
7205 pbuf
= pd
->mp
; // XXXSCW: Why?
7208 /* update max window */
7209 if (src
->max_win
< win
) {
7212 /* synchronize sequencing */
7213 if (SEQ_GT(end
, src
->seqlo
)) {
7216 /* slide the window of what the other end can send */
7217 if (SEQ_GEQ(ack
+ ((u_int32_t
)win
<< sws
), dst
->seqhi
)) {
7218 dst
->seqhi
= ack
+ MAX(((u_int32_t
)win
<< sws
), 1);
7222 * Cannot set dst->seqhi here since this could be a shotgunned
7223 * SYN and not an already established connection.
7226 if (th
->th_flags
& TH_FIN
) {
7227 if (src
->state
< TCPS_CLOSING
) {
7228 src
->state
= TCPS_CLOSING
;
7231 if (th
->th_flags
& TH_RST
) {
7232 src
->state
= dst
->state
= TCPS_TIME_WAIT
;
7235 /* Fall through to PASS packet */
7237 if (dst
->state
== TCPS_SYN_SENT
&&
7238 src
->state
== TCPS_SYN_SENT
) {
7239 /* Send RST for state mismatches during handshake */
7240 if (!(th
->th_flags
& TH_RST
)) {
7241 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
,
7242 pd
->dst
, pd
->src
, th
->th_dport
,
7243 th
->th_sport
, ntohl(th
->th_ack
), 0,
7245 (*state
)->rule
.ptr
->return_ttl
, 1, 0,
7246 pd
->eh
, kif
->pfik_ifp
);
7251 } else if (pf_status
.debug
>= PF_DEBUG_MISC
) {
7252 printf("pf: BAD state: ");
7253 pf_print_state(*state
);
7254 pf_print_flags(th
->th_flags
);
7255 printf("\n seq=%u (%u) ack=%u len=%u ackskew=%d "
7256 "sws=%u dws=%u pkts=%llu:%llu dir=%s,%s\n",
7257 seq
, orig_seq
, ack
, pd
->p_len
, ackskew
,
7258 (unsigned int)sws
, (unsigned int)dws
,
7259 (*state
)->packets
[0], (*state
)->packets
[1],
7260 direction
== PF_IN
? "in" : "out",
7261 direction
== sk
->direction
?
7263 printf("pf: State failure on: %c %c %c %c | %c %c\n",
7264 SEQ_GEQ(src
->seqhi
, end
) ? ' ' : '1',
7266 src
->seqlo
- ((u_int32_t
)dst
->max_win
<< dws
)) ?
7268 (ackskew
>= -MAXACKWINDOW
) ? ' ' : '3',
7269 (ackskew
<= (MAXACKWINDOW
<< sws
)) ? ' ' : '4',
7270 SEQ_GEQ(src
->seqhi
+ MAXACKWINDOW
, end
) ?' ' :'5',
7271 SEQ_GEQ(seq
, src
->seqlo
- MAXACKWINDOW
) ?' ' :'6');
7273 REASON_SET(reason
, PFRES_BADSTATE
);
7277 /* Any packets which have gotten here are to be passed */
7279 if (sk
->app_state
&&
7280 sk
->app_state
->handler
) {
7281 sk
->app_state
->handler(*state
, direction
,
7282 off
+ (th
->th_off
<< 2), pd
, kif
);
7284 REASON_SET(reason
, PFRES_MEMORY
);
7287 pbuf
= pd
->mp
; // XXXSCW: Why?
7290 /* translate source/destination address, if necessary */
7291 if (STATE_TRANSLATE(sk
)) {
7292 pd
->naf
= (pd
->af
== sk
->af_lan
) ? sk
->af_gwy
: sk
->af_lan
;
7294 if (direction
== PF_OUT
) {
7295 pf_change_ap(direction
, pd
->mp
, pd
->src
, &th
->th_sport
,
7296 pd
->ip_sum
, &th
->th_sum
, &sk
->gwy
.addr
,
7297 sk
->gwy
.xport
.port
, 0, pd
->af
, pd
->naf
, 1);
7299 if (pd
->af
!= pd
->naf
) {
7300 if (pd
->af
== sk
->af_gwy
) {
7301 pf_change_ap(direction
, pd
->mp
, pd
->dst
,
7302 &th
->th_dport
, pd
->ip_sum
,
7303 &th
->th_sum
, &sk
->lan
.addr
,
7304 sk
->lan
.xport
.port
, 0,
7305 pd
->af
, pd
->naf
, 0);
7307 pf_change_ap(direction
, pd
->mp
, pd
->src
,
7308 &th
->th_sport
, pd
->ip_sum
,
7309 &th
->th_sum
, &sk
->ext_lan
.addr
,
7310 th
->th_sport
, 0, pd
->af
,
7313 pf_change_ap(direction
, pd
->mp
, pd
->dst
,
7314 &th
->th_dport
, pd
->ip_sum
,
7315 &th
->th_sum
, &sk
->ext_gwy
.addr
,
7316 th
->th_dport
, 0, pd
->af
,
7319 pf_change_ap(direction
, pd
->mp
, pd
->src
,
7320 &th
->th_sport
, pd
->ip_sum
,
7321 &th
->th_sum
, &sk
->gwy
.addr
,
7322 sk
->gwy
.xport
.port
, 0, pd
->af
,
7326 pf_change_ap(direction
, pd
->mp
, pd
->dst
,
7327 &th
->th_dport
, pd
->ip_sum
,
7328 &th
->th_sum
, &sk
->lan
.addr
,
7329 sk
->lan
.xport
.port
, 0, pd
->af
,
7334 copyback
= off
+ sizeof(*th
);
7338 if (pf_lazy_makewritable(pd
, pbuf
, copyback
) == NULL
) {
7339 REASON_SET(reason
, PFRES_MEMORY
);
7343 /* Copyback sequence modulation or stateful scrub changes */
7344 pbuf_copy_back(pbuf
, off
, sizeof(*th
), th
);
7346 if (sk
->af_lan
!= sk
->af_gwy
) {
7347 return pf_do_nat64(sk
, pd
, pbuf
, off
);
7354 pf_test_state_udp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
7355 pbuf_t
*pbuf
, int off
, void *h
, struct pf_pdesc
*pd
, u_short
*reason
)
7358 struct pf_state_peer
*src
, *dst
;
7359 struct pf_state_key_cmp key
;
7360 struct pf_state_key
*sk
;
7361 struct udphdr
*uh
= pd
->hdr
.udp
;
7362 struct pf_app_state as
;
7363 int action
, extfilter
;
7365 key
.proto_variant
= PF_EXTFILTER_APD
;
7367 key
.proto
= IPPROTO_UDP
;
7368 key
.af_lan
= key
.af_gwy
= pd
->af
;
7371 * For NAT64 the first time rule search and state creation
7372 * is done on the incoming side only.
7373 * Once the state gets created, NAT64's LAN side (ipv6) will
7374 * not be able to find the state in ext-gwy tree as that normally
7375 * is intended to be looked up for incoming traffic from the
7377 * Therefore to handle NAT64 case we init keys here for both
7378 * lan-ext as well as ext-gwy trees.
7379 * In the state lookup we attempt a lookup on both trees if
7380 * first one does not return any result and return a match if
7381 * the match state's was created by NAT64 rule.
7383 PF_ACPY(&key
.ext_gwy
.addr
, pd
->src
, key
.af_gwy
);
7384 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af_gwy
);
7385 key
.ext_gwy
.xport
.port
= uh
->uh_sport
;
7386 key
.gwy
.xport
.port
= uh
->uh_dport
;
7388 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af_lan
);
7389 PF_ACPY(&key
.ext_lan
.addr
, pd
->dst
, key
.af_lan
);
7390 key
.lan
.xport
.port
= uh
->uh_sport
;
7391 key
.ext_lan
.xport
.port
= uh
->uh_dport
;
7393 if (ntohs(uh
->uh_sport
) == PF_IKE_PORT
&&
7394 ntohs(uh
->uh_dport
) == PF_IKE_PORT
) {
7395 struct pf_ike_hdr ike
;
7396 size_t plen
= pbuf
->pb_packet_len
- off
- sizeof(*uh
);
7397 if (plen
< PF_IKE_PACKET_MINSIZE
) {
7398 DPFPRINTF(PF_DEBUG_MISC
,
7399 ("pf: IKE message too small.\n"));
7403 if (plen
> sizeof(ike
)) {
7406 pbuf_copy_data(pbuf
, off
+ sizeof(*uh
), plen
, &ike
);
7408 if (ike
.initiator_cookie
) {
7409 key
.app_state
= &as
;
7410 as
.compare_lan_ext
= pf_ike_compare
;
7411 as
.compare_ext_gwy
= pf_ike_compare
;
7412 as
.u
.ike
.cookie
= ike
.initiator_cookie
;
7415 * <http://tools.ietf.org/html/\
7416 * draft-ietf-ipsec-nat-t-ike-01>
7417 * Support non-standard NAT-T implementations that
7418 * push the ESP packet over the top of the IKE packet.
7419 * Do not drop packet.
7421 DPFPRINTF(PF_DEBUG_MISC
,
7422 ("pf: IKE initiator cookie = 0.\n"));
7426 *state
= pf_find_state(kif
, &key
, direction
);
7428 if (!key
.app_state
&& *state
== 0) {
7429 key
.proto_variant
= PF_EXTFILTER_AD
;
7430 *state
= pf_find_state(kif
, &key
, direction
);
7433 if (!key
.app_state
&& *state
== 0) {
7434 key
.proto_variant
= PF_EXTFILTER_EI
;
7435 *state
= pf_find_state(kif
, &key
, direction
);
7438 /* similar to STATE_LOOKUP() */
7439 if (*state
!= NULL
&& pd
!= NULL
&& !(pd
->pktflags
& PKTF_FLOW_ID
)) {
7440 pd
->flowsrc
= (*state
)->state_key
->flowsrc
;
7441 pd
->flowhash
= (*state
)->state_key
->flowhash
;
7442 if (pd
->flowhash
!= 0) {
7443 pd
->pktflags
|= PKTF_FLOW_ID
;
7444 pd
->pktflags
&= ~PKTF_FLOW_ADV
;
7448 if (pf_state_lookup_aux(state
, kif
, direction
, &action
)) {
7452 sk
= (*state
)->state_key
;
7455 * In case of NAT64 the translation is first applied on the LAN
7456 * side. Therefore for stack's address family comparison
7457 * we use sk->af_lan.
7459 if ((direction
== sk
->direction
) && (pd
->af
== sk
->af_lan
)) {
7460 src
= &(*state
)->src
;
7461 dst
= &(*state
)->dst
;
7463 src
= &(*state
)->dst
;
7464 dst
= &(*state
)->src
;
7468 if (src
->state
< PFUDPS_SINGLE
) {
7469 src
->state
= PFUDPS_SINGLE
;
7471 if (dst
->state
== PFUDPS_SINGLE
) {
7472 dst
->state
= PFUDPS_MULTIPLE
;
7475 /* update expire time */
7476 (*state
)->expire
= pf_time_second();
7477 if (src
->state
== PFUDPS_MULTIPLE
&& dst
->state
== PFUDPS_MULTIPLE
) {
7478 (*state
)->timeout
= PFTM_UDP_MULTIPLE
;
7480 (*state
)->timeout
= PFTM_UDP_SINGLE
;
7483 extfilter
= sk
->proto_variant
;
7484 if (extfilter
> PF_EXTFILTER_APD
) {
7485 if (direction
== PF_OUT
) {
7486 sk
->ext_lan
.xport
.port
= key
.ext_lan
.xport
.port
;
7487 if (extfilter
> PF_EXTFILTER_AD
) {
7488 PF_ACPY(&sk
->ext_lan
.addr
, &key
.ext_lan
.addr
,
7492 sk
->ext_gwy
.xport
.port
= key
.ext_gwy
.xport
.port
;
7493 if (extfilter
> PF_EXTFILTER_AD
) {
7494 PF_ACPY(&sk
->ext_gwy
.addr
, &key
.ext_gwy
.addr
,
7500 if (sk
->app_state
&& sk
->app_state
->handler
) {
7501 sk
->app_state
->handler(*state
, direction
, off
+ uh
->uh_ulen
,
7504 REASON_SET(reason
, PFRES_MEMORY
);
7507 pbuf
= pd
->mp
; // XXXSCW: Why?
7510 /* translate source/destination address, if necessary */
7511 if (STATE_TRANSLATE(sk
)) {
7512 if (pf_lazy_makewritable(pd
, pbuf
, off
+ sizeof(*uh
)) == NULL
) {
7513 REASON_SET(reason
, PFRES_MEMORY
);
7517 pd
->naf
= (pd
->af
== sk
->af_lan
) ? sk
->af_gwy
: sk
->af_lan
;
7519 if (direction
== PF_OUT
) {
7520 pf_change_ap(direction
, pd
->mp
, pd
->src
, &uh
->uh_sport
,
7521 pd
->ip_sum
, &uh
->uh_sum
, &sk
->gwy
.addr
,
7522 sk
->gwy
.xport
.port
, 1, pd
->af
, pd
->naf
, 1);
7524 if (pd
->af
!= pd
->naf
) {
7525 if (pd
->af
== sk
->af_gwy
) {
7526 pf_change_ap(direction
, pd
->mp
, pd
->dst
,
7527 &uh
->uh_dport
, pd
->ip_sum
,
7528 &uh
->uh_sum
, &sk
->lan
.addr
,
7529 sk
->lan
.xport
.port
, 1,
7530 pd
->af
, pd
->naf
, 0);
7532 pf_change_ap(direction
, pd
->mp
, pd
->src
,
7533 &uh
->uh_sport
, pd
->ip_sum
,
7534 &uh
->uh_sum
, &sk
->ext_lan
.addr
,
7535 uh
->uh_sport
, 1, pd
->af
,
7538 pf_change_ap(direction
, pd
->mp
, pd
->dst
,
7539 &uh
->uh_dport
, pd
->ip_sum
,
7540 &uh
->uh_sum
, &sk
->ext_gwy
.addr
,
7541 uh
->uh_dport
, 1, pd
->af
,
7544 pf_change_ap(direction
, pd
->mp
, pd
->src
,
7545 &uh
->uh_sport
, pd
->ip_sum
,
7546 &uh
->uh_sum
, &sk
->gwy
.addr
,
7547 sk
->gwy
.xport
.port
, 1, pd
->af
,
7551 pf_change_ap(direction
, pd
->mp
, pd
->dst
,
7552 &uh
->uh_dport
, pd
->ip_sum
,
7553 &uh
->uh_sum
, &sk
->lan
.addr
,
7554 sk
->lan
.xport
.port
, 1,
7555 pd
->af
, pd
->naf
, 1);
7559 pbuf_copy_back(pbuf
, off
, sizeof(*uh
), uh
);
7560 if (sk
->af_lan
!= sk
->af_gwy
) {
7561 return pf_do_nat64(sk
, pd
, pbuf
, off
);
7568 pf_test_state_icmp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
7569 pbuf_t
*pbuf
, int off
, void *h
, struct pf_pdesc
*pd
, u_short
*reason
)
7572 struct pf_addr
*saddr
= pd
->src
, *daddr
= pd
->dst
;
7573 struct in_addr srcv4_inaddr
= saddr
->v4addr
;
7574 u_int16_t icmpid
= 0, *icmpsum
= NULL
;
7575 u_int8_t icmptype
= 0;
7577 struct pf_state_key_cmp key
;
7578 struct pf_state_key
*sk
;
7580 struct pf_app_state as
;
7585 switch (pd
->proto
) {
7588 icmptype
= pd
->hdr
.icmp
->icmp_type
;
7589 icmpid
= pd
->hdr
.icmp
->icmp_id
;
7590 icmpsum
= &pd
->hdr
.icmp
->icmp_cksum
;
7592 if (ICMP_ERRORTYPE(icmptype
)) {
7597 case IPPROTO_ICMPV6
:
7598 icmptype
= pd
->hdr
.icmp6
->icmp6_type
;
7599 icmpid
= pd
->hdr
.icmp6
->icmp6_id
;
7600 icmpsum
= &pd
->hdr
.icmp6
->icmp6_cksum
;
7602 if (ICMP6_ERRORTYPE(icmptype
)) {
7610 * ICMP query/reply message not related to a TCP/UDP packet.
7611 * Search for an ICMP state.
7614 * NAT64 requires protocol translation between ICMPv4
7615 * and ICMPv6. TCP and UDP do not require protocol
7616 * translation. To avoid adding complexity just to
7617 * handle ICMP(v4addr/v6addr), we always lookup for
7618 * proto = IPPROTO_ICMP on both LAN and WAN side
7620 key
.proto
= IPPROTO_ICMP
;
7621 key
.af_lan
= key
.af_gwy
= pd
->af
;
7623 PF_ACPY(&key
.ext_gwy
.addr
, pd
->src
, key
.af_gwy
);
7624 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af_gwy
);
7625 key
.ext_gwy
.xport
.port
= 0;
7626 key
.gwy
.xport
.port
= icmpid
;
7628 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af_lan
);
7629 PF_ACPY(&key
.ext_lan
.addr
, pd
->dst
, key
.af_lan
);
7630 key
.lan
.xport
.port
= icmpid
;
7631 key
.ext_lan
.xport
.port
= 0;
7635 sk
= (*state
)->state_key
;
7636 (*state
)->expire
= pf_time_second();
7637 (*state
)->timeout
= PFTM_ICMP_ERROR_REPLY
;
7639 /* translate source/destination address, if necessary */
7640 if (STATE_TRANSLATE(sk
)) {
7641 pd
->naf
= (pd
->af
== sk
->af_lan
) ?
7642 sk
->af_gwy
: sk
->af_lan
;
7643 if (direction
== PF_OUT
) {
7647 pf_change_a(&saddr
->v4addr
.s_addr
,
7649 sk
->gwy
.addr
.v4addr
.s_addr
, 0);
7650 pd
->hdr
.icmp
->icmp_cksum
=
7652 pd
->hdr
.icmp
->icmp_cksum
, icmpid
,
7653 sk
->gwy
.xport
.port
, 0);
7654 pd
->hdr
.icmp
->icmp_id
=
7656 if (pf_lazy_makewritable(pd
, pbuf
,
7657 off
+ ICMP_MINLEN
) == NULL
) {
7660 pbuf_copy_back(pbuf
, off
, ICMP_MINLEN
,
7666 &pd
->hdr
.icmp6
->icmp6_cksum
,
7668 if (pf_lazy_makewritable(pd
, pbuf
,
7669 off
+ sizeof(struct icmp6_hdr
)) ==
7673 pbuf_copy_back(pbuf
, off
,
7674 sizeof(struct icmp6_hdr
),
7682 if (pd
->naf
!= AF_INET
) {
7683 if (pf_translate_icmp_af(
7684 AF_INET6
, pd
->hdr
.icmp
)) {
7688 pd
->proto
= IPPROTO_ICMPV6
;
7690 pf_change_a(&daddr
->v4addr
.s_addr
,
7692 sk
->lan
.addr
.v4addr
.s_addr
, 0);
7694 pd
->hdr
.icmp
->icmp_cksum
=
7696 pd
->hdr
.icmp
->icmp_cksum
,
7697 icmpid
, sk
->lan
.xport
.port
, 0);
7699 pd
->hdr
.icmp
->icmp_id
=
7703 if (pf_lazy_makewritable(pd
, pbuf
,
7704 off
+ ICMP_MINLEN
) == NULL
) {
7707 pbuf_copy_back(pbuf
, off
, ICMP_MINLEN
,
7709 if (sk
->af_lan
!= sk
->af_gwy
) {
7710 return pf_do_nat64(sk
, pd
,
7716 if (pd
->naf
!= AF_INET6
) {
7717 if (pf_translate_icmp_af(
7718 AF_INET
, pd
->hdr
.icmp6
)) {
7722 pd
->proto
= IPPROTO_ICMP
;
7725 &pd
->hdr
.icmp6
->icmp6_cksum
,
7728 if (pf_lazy_makewritable(pd
, pbuf
,
7729 off
+ sizeof(struct icmp6_hdr
)) ==
7733 pbuf_copy_back(pbuf
, off
,
7734 sizeof(struct icmp6_hdr
),
7736 if (sk
->af_lan
!= sk
->af_gwy
) {
7737 return pf_do_nat64(sk
, pd
,
7748 * ICMP error message in response to a TCP/UDP packet.
7749 * Extract the inner TCP/UDP header and search for that state.
7751 struct pf_pdesc pd2
; /* For inner (original) header */
7755 struct ip6_hdr h2_6
;
7760 memset(&pd2
, 0, sizeof(pd2
));
7766 /* offset of h2 in mbuf chain */
7767 ipoff2
= off
+ ICMP_MINLEN
;
7769 if (!pf_pull_hdr(pbuf
, ipoff2
, &h2
, sizeof(h2
),
7770 NULL
, reason
, pd2
.af
)) {
7771 DPFPRINTF(PF_DEBUG_MISC
,
7772 ("pf: ICMP error message too short "
7777 * ICMP error messages don't refer to non-first
7780 if (h2
.ip_off
& htons(IP_OFFMASK
)) {
7781 REASON_SET(reason
, PFRES_FRAG
);
7785 /* offset of protocol header that follows h2 */
7786 off2
= ipoff2
+ (h2
.ip_hl
<< 2);
7788 pd2
.off
= ipoff2
+ (h2
.ip_hl
<< 2);
7790 pd2
.proto
= h2
.ip_p
;
7791 pd2
.src
= (struct pf_addr
*)&h2
.ip_src
;
7792 pd2
.dst
= (struct pf_addr
*)&h2
.ip_dst
;
7793 pd2
.ip_sum
= &h2
.ip_sum
;
7797 ipoff2
= off
+ sizeof(struct icmp6_hdr
);
7799 if (!pf_pull_hdr(pbuf
, ipoff2
, &h2_6
, sizeof(h2_6
),
7800 NULL
, reason
, pd2
.af
)) {
7801 DPFPRINTF(PF_DEBUG_MISC
,
7802 ("pf: ICMP error message too short "
7806 pd2
.proto
= h2_6
.ip6_nxt
;
7807 pd2
.src
= (struct pf_addr
*)(uintptr_t)&h2_6
.ip6_src
;
7808 pd2
.dst
= (struct pf_addr
*)(uintptr_t)&h2_6
.ip6_dst
;
7810 off2
= ipoff2
+ sizeof(h2_6
);
7812 switch (pd2
.proto
) {
7813 case IPPROTO_FRAGMENT
:
7815 * ICMPv6 error messages for
7816 * non-first fragments
7818 REASON_SET(reason
, PFRES_FRAG
);
7821 case IPPROTO_HOPOPTS
:
7822 case IPPROTO_ROUTING
:
7823 case IPPROTO_DSTOPTS
: {
7824 /* get next header and header length */
7825 struct ip6_ext opt6
;
7827 if (!pf_pull_hdr(pbuf
, off2
, &opt6
,
7828 sizeof(opt6
), NULL
, reason
,
7830 DPFPRINTF(PF_DEBUG_MISC
,
7831 ("pf: ICMPv6 short opt\n"));
7834 if (pd2
.proto
== IPPROTO_AH
) {
7835 off2
+= (opt6
.ip6e_len
+ 2) * 4;
7837 off2
+= (opt6
.ip6e_len
+ 1) * 8;
7839 pd2
.proto
= opt6
.ip6e_nxt
;
7840 /* goto the next header */
7847 } while (!terminal
);
7853 switch (pd2
.proto
) {
7857 struct pf_state_peer
*src
, *dst
;
7862 * Only the first 8 bytes of the TCP header can be
7863 * expected. Don't access any TCP header fields after
7864 * th_seq, an ackskew test is not possible.
7866 if (!pf_pull_hdr(pbuf
, off2
, &th
, 8, NULL
, reason
,
7868 DPFPRINTF(PF_DEBUG_MISC
,
7869 ("pf: ICMP error message too short "
7874 key
.proto
= IPPROTO_TCP
;
7875 key
.af_gwy
= pd2
.af
;
7876 PF_ACPY(&key
.ext_gwy
.addr
, pd2
.dst
, key
.af_gwy
);
7877 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af_gwy
);
7878 key
.ext_gwy
.xport
.port
= th
.th_dport
;
7879 key
.gwy
.xport
.port
= th
.th_sport
;
7881 key
.af_lan
= pd2
.af
;
7882 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af_lan
);
7883 PF_ACPY(&key
.ext_lan
.addr
, pd2
.src
, key
.af_lan
);
7884 key
.lan
.xport
.port
= th
.th_dport
;
7885 key
.ext_lan
.xport
.port
= th
.th_sport
;
7889 sk
= (*state
)->state_key
;
7890 if ((direction
== sk
->direction
) &&
7891 ((sk
->af_lan
== sk
->af_gwy
) ||
7892 (pd2
.af
== sk
->af_lan
))) {
7893 src
= &(*state
)->dst
;
7894 dst
= &(*state
)->src
;
7896 src
= &(*state
)->src
;
7897 dst
= &(*state
)->dst
;
7900 if (src
->wscale
&& (dst
->wscale
& PF_WSCALE_FLAG
)) {
7901 dws
= dst
->wscale
& PF_WSCALE_MASK
;
7903 dws
= TCP_MAX_WINSHIFT
;
7906 /* Demodulate sequence number */
7907 seq
= ntohl(th
.th_seq
) - src
->seqdiff
;
7909 pf_change_a(&th
.th_seq
, icmpsum
,
7914 if (!SEQ_GEQ(src
->seqhi
, seq
) ||
7916 src
->seqlo
- ((u_int32_t
)dst
->max_win
<< dws
))) {
7917 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
7918 printf("pf: BAD ICMP %d:%d ",
7919 icmptype
, pd
->hdr
.icmp
->icmp_code
);
7920 pf_print_host(pd
->src
, 0, pd
->af
);
7922 pf_print_host(pd
->dst
, 0, pd
->af
);
7924 pf_print_state(*state
);
7925 printf(" seq=%u\n", seq
);
7927 REASON_SET(reason
, PFRES_BADSTATE
);
7931 pd
->naf
= pd2
.naf
= (pd2
.af
== sk
->af_lan
) ?
7932 sk
->af_gwy
: sk
->af_lan
;
7934 if (STATE_TRANSLATE(sk
)) {
7936 if (sk
->af_lan
!= sk
->af_gwy
) {
7937 struct pf_state_host
*saddr2
, *daddr2
;
7939 if (pd2
.naf
== sk
->af_lan
) {
7941 daddr2
= &sk
->ext_lan
;
7943 saddr2
= &sk
->ext_gwy
;
7947 /* translate ICMP message types and codes */
7948 if (pf_translate_icmp_af(pd
->naf
,
7953 if (pf_lazy_makewritable(pd
, pbuf
,
7954 off2
+ 8) == NULL
) {
7958 pbuf_copy_back(pbuf
, pd
->off
,
7959 sizeof(struct icmp6_hdr
),
7963 * translate inner ip header within the
7966 if (pf_change_icmp_af(pbuf
, ipoff2
, pd
,
7967 &pd2
, &saddr2
->addr
, &daddr2
->addr
,
7972 if (pd
->naf
== AF_INET
) {
7973 pd
->proto
= IPPROTO_ICMP
;
7975 pd
->proto
= IPPROTO_ICMPV6
;
7979 * translate inner tcp header within
7982 pf_change_ap(direction
, NULL
, pd2
.src
,
7983 &th
.th_sport
, pd2
.ip_sum
,
7984 &th
.th_sum
, &daddr2
->addr
,
7985 saddr2
->xport
.port
, 0, pd2
.af
,
7988 pf_change_ap(direction
, NULL
, pd2
.dst
,
7989 &th
.th_dport
, pd2
.ip_sum
,
7990 &th
.th_sum
, &saddr2
->addr
,
7991 daddr2
->xport
.port
, 0, pd2
.af
,
7994 pbuf_copy_back(pbuf
, pd2
.off
, 8, &th
);
7996 /* translate outer ip header */
7997 PF_ACPY(&pd
->naddr
, &daddr2
->addr
,
7999 PF_ACPY(&pd
->ndaddr
, &saddr2
->addr
,
8001 if (pd
->af
== AF_INET
) {
8002 memcpy(&pd
->naddr
.addr32
[3],
8004 sizeof(pd
->naddr
.addr32
[3]));
8005 return pf_nat64_ipv4(pbuf
, off
,
8008 return pf_nat64_ipv6(pbuf
, off
,
8012 if (direction
== PF_IN
) {
8013 pf_change_icmp(pd2
.src
, &th
.th_sport
,
8014 daddr
, &sk
->lan
.addr
,
8015 sk
->lan
.xport
.port
, NULL
,
8016 pd2
.ip_sum
, icmpsum
,
8017 pd
->ip_sum
, 0, pd2
.af
);
8019 pf_change_icmp(pd2
.dst
, &th
.th_dport
,
8020 saddr
, &sk
->gwy
.addr
,
8021 sk
->gwy
.xport
.port
, NULL
,
8022 pd2
.ip_sum
, icmpsum
,
8023 pd
->ip_sum
, 0, pd2
.af
);
8029 if (pf_lazy_makewritable(pd
, pbuf
, off2
+ 8) ==
8036 pbuf_copy_back(pbuf
, off
, ICMP_MINLEN
,
8038 pbuf_copy_back(pbuf
, ipoff2
, sizeof(h2
),
8043 pbuf_copy_back(pbuf
, off
,
8044 sizeof(struct icmp6_hdr
),
8046 pbuf_copy_back(pbuf
, ipoff2
,
8047 sizeof(h2_6
), &h2_6
);
8050 pbuf_copy_back(pbuf
, off2
, 8, &th
);
8058 if (!pf_pull_hdr(pbuf
, off2
, &uh
, sizeof(uh
),
8059 NULL
, reason
, pd2
.af
)) {
8060 DPFPRINTF(PF_DEBUG_MISC
,
8061 ("pf: ICMP error message too short "
8066 key
.af_gwy
= pd2
.af
;
8067 PF_ACPY(&key
.ext_gwy
.addr
, pd2
.dst
, key
.af_gwy
);
8068 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af_gwy
);
8069 key
.ext_gwy
.xport
.port
= uh
.uh_dport
;
8070 key
.gwy
.xport
.port
= uh
.uh_sport
;
8072 key
.af_lan
= pd2
.af
;
8073 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af_lan
);
8074 PF_ACPY(&key
.ext_lan
.addr
, pd2
.src
, key
.af_lan
);
8075 key
.lan
.xport
.port
= uh
.uh_dport
;
8076 key
.ext_lan
.xport
.port
= uh
.uh_sport
;
8078 key
.proto
= IPPROTO_UDP
;
8079 key
.proto_variant
= PF_EXTFILTER_APD
;
8082 if (ntohs(uh
.uh_sport
) == PF_IKE_PORT
&&
8083 ntohs(uh
.uh_dport
) == PF_IKE_PORT
) {
8084 struct pf_ike_hdr ike
;
8085 size_t plen
= pbuf
->pb_packet_len
- off2
-
8087 if (direction
== PF_IN
&&
8088 plen
< 8 /* PF_IKE_PACKET_MINSIZE */) {
8089 DPFPRINTF(PF_DEBUG_MISC
, ("pf: "
8090 "ICMP error, embedded IKE message "
8095 if (plen
> sizeof(ike
)) {
8098 pbuf_copy_data(pbuf
, off
+ sizeof(uh
), plen
,
8101 key
.app_state
= &as
;
8102 as
.compare_lan_ext
= pf_ike_compare
;
8103 as
.compare_ext_gwy
= pf_ike_compare
;
8104 as
.u
.ike
.cookie
= ike
.initiator_cookie
;
8107 *state
= pf_find_state(kif
, &key
, dx
);
8109 if (key
.app_state
&& *state
== 0) {
8111 *state
= pf_find_state(kif
, &key
, dx
);
8115 key
.proto_variant
= PF_EXTFILTER_AD
;
8116 *state
= pf_find_state(kif
, &key
, dx
);
8120 key
.proto_variant
= PF_EXTFILTER_EI
;
8121 *state
= pf_find_state(kif
, &key
, dx
);
8124 /* similar to STATE_LOOKUP() */
8125 if (*state
!= NULL
&& pd
!= NULL
&&
8126 !(pd
->pktflags
& PKTF_FLOW_ID
)) {
8127 pd
->flowsrc
= (*state
)->state_key
->flowsrc
;
8128 pd
->flowhash
= (*state
)->state_key
->flowhash
;
8129 if (pd
->flowhash
!= 0) {
8130 pd
->pktflags
|= PKTF_FLOW_ID
;
8131 pd
->pktflags
&= ~PKTF_FLOW_ADV
;
8135 if (pf_state_lookup_aux(state
, kif
, direction
, &action
)) {
8139 sk
= (*state
)->state_key
;
8140 pd
->naf
= pd2
.naf
= (pd2
.af
== sk
->af_lan
) ?
8141 sk
->af_gwy
: sk
->af_lan
;
8143 if (STATE_TRANSLATE(sk
)) {
8145 if (sk
->af_lan
!= sk
->af_gwy
) {
8146 struct pf_state_host
*saddr2
, *daddr2
;
8148 if (pd2
.naf
== sk
->af_lan
) {
8150 daddr2
= &sk
->ext_lan
;
8152 saddr2
= &sk
->ext_gwy
;
8156 /* translate ICMP message */
8157 if (pf_translate_icmp_af(pd
->naf
,
8161 if (pf_lazy_makewritable(pd
, pbuf
,
8162 off2
+ 8) == NULL
) {
8166 pbuf_copy_back(pbuf
, pd
->off
,
8167 sizeof(struct icmp6_hdr
),
8171 * translate inner ip header within the
8174 if (pf_change_icmp_af(pbuf
, ipoff2
, pd
,
8175 &pd2
, &saddr2
->addr
, &daddr2
->addr
,
8180 if (pd
->naf
== AF_INET
) {
8181 pd
->proto
= IPPROTO_ICMP
;
8183 pd
->proto
= IPPROTO_ICMPV6
;
8187 * translate inner udp header within
8190 pf_change_ap(direction
, NULL
, pd2
.src
,
8191 &uh
.uh_sport
, pd2
.ip_sum
,
8192 &uh
.uh_sum
, &daddr2
->addr
,
8193 saddr2
->xport
.port
, 0, pd2
.af
,
8196 pf_change_ap(direction
, NULL
, pd2
.dst
,
8197 &uh
.uh_dport
, pd2
.ip_sum
,
8198 &uh
.uh_sum
, &saddr2
->addr
,
8199 daddr2
->xport
.port
, 0, pd2
.af
,
8202 pbuf_copy_back(pbuf
, pd2
.off
,
8205 /* translate outer ip header */
8206 PF_ACPY(&pd
->naddr
, &daddr2
->addr
,
8208 PF_ACPY(&pd
->ndaddr
, &saddr2
->addr
,
8210 if (pd
->af
== AF_INET
) {
8211 memcpy(&pd
->naddr
.addr32
[3],
8213 sizeof(pd
->naddr
.addr32
[3]));
8214 return pf_nat64_ipv4(pbuf
, off
,
8217 return pf_nat64_ipv6(pbuf
, off
,
8221 if (direction
== PF_IN
) {
8222 pf_change_icmp(pd2
.src
, &uh
.uh_sport
,
8223 daddr
, &sk
->lan
.addr
,
8224 sk
->lan
.xport
.port
, &uh
.uh_sum
,
8225 pd2
.ip_sum
, icmpsum
,
8226 pd
->ip_sum
, 1, pd2
.af
);
8228 pf_change_icmp(pd2
.dst
, &uh
.uh_dport
,
8229 saddr
, &sk
->gwy
.addr
,
8230 sk
->gwy
.xport
.port
, &uh
.uh_sum
,
8231 pd2
.ip_sum
, icmpsum
,
8232 pd
->ip_sum
, 1, pd2
.af
);
8234 if (pf_lazy_makewritable(pd
, pbuf
,
8235 off2
+ sizeof(uh
)) == NULL
) {
8241 pbuf_copy_back(pbuf
, off
, ICMP_MINLEN
,
8243 pbuf_copy_back(pbuf
, ipoff2
,
8248 pbuf_copy_back(pbuf
, off
,
8249 sizeof(struct icmp6_hdr
),
8251 pbuf_copy_back(pbuf
, ipoff2
,
8252 sizeof(h2_6
), &h2_6
);
8255 pbuf_copy_back(pbuf
, off2
, sizeof(uh
), &uh
);
8261 case IPPROTO_ICMP
: {
8264 if (!pf_pull_hdr(pbuf
, off2
, &iih
, ICMP_MINLEN
,
8265 NULL
, reason
, pd2
.af
)) {
8266 DPFPRINTF(PF_DEBUG_MISC
,
8267 ("pf: ICMP error message too short i"
8272 key
.proto
= IPPROTO_ICMP
;
8273 if (direction
== PF_IN
) {
8274 key
.af_gwy
= pd2
.af
;
8275 PF_ACPY(&key
.ext_gwy
.addr
, pd2
.dst
, key
.af_gwy
);
8276 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af_gwy
);
8277 key
.ext_gwy
.xport
.port
= 0;
8278 key
.gwy
.xport
.port
= iih
.icmp_id
;
8280 key
.af_lan
= pd2
.af
;
8281 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af_lan
);
8282 PF_ACPY(&key
.ext_lan
.addr
, pd2
.src
, key
.af_lan
);
8283 key
.lan
.xport
.port
= iih
.icmp_id
;
8284 key
.ext_lan
.xport
.port
= 0;
8289 sk
= (*state
)->state_key
;
8290 if (STATE_TRANSLATE(sk
)) {
8291 if (direction
== PF_IN
) {
8292 pf_change_icmp(pd2
.src
, &iih
.icmp_id
,
8293 daddr
, &sk
->lan
.addr
,
8294 sk
->lan
.xport
.port
, NULL
,
8295 pd2
.ip_sum
, icmpsum
,
8296 pd
->ip_sum
, 0, AF_INET
);
8298 pf_change_icmp(pd2
.dst
, &iih
.icmp_id
,
8299 saddr
, &sk
->gwy
.addr
,
8300 sk
->gwy
.xport
.port
, NULL
,
8301 pd2
.ip_sum
, icmpsum
,
8302 pd
->ip_sum
, 0, AF_INET
);
8304 if (pf_lazy_makewritable(pd
, pbuf
,
8305 off2
+ ICMP_MINLEN
) == NULL
) {
8308 pbuf_copy_back(pbuf
, off
, ICMP_MINLEN
,
8310 pbuf_copy_back(pbuf
, ipoff2
, sizeof(h2
), &h2
);
8311 pbuf_copy_back(pbuf
, off2
, ICMP_MINLEN
, &iih
);
8317 case IPPROTO_ICMPV6
: {
8318 struct icmp6_hdr iih
;
8320 if (!pf_pull_hdr(pbuf
, off2
, &iih
,
8321 sizeof(struct icmp6_hdr
), NULL
, reason
, pd2
.af
)) {
8322 DPFPRINTF(PF_DEBUG_MISC
,
8323 ("pf: ICMP error message too short "
8328 key
.proto
= IPPROTO_ICMPV6
;
8329 if (direction
== PF_IN
) {
8330 key
.af_gwy
= pd2
.af
;
8331 PF_ACPY(&key
.ext_gwy
.addr
, pd2
.dst
, key
.af_gwy
);
8332 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af_gwy
);
8333 key
.ext_gwy
.xport
.port
= 0;
8334 key
.gwy
.xport
.port
= iih
.icmp6_id
;
8336 key
.af_lan
= pd2
.af
;
8337 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af_lan
);
8338 PF_ACPY(&key
.ext_lan
.addr
, pd2
.src
, key
.af_lan
);
8339 key
.lan
.xport
.port
= iih
.icmp6_id
;
8340 key
.ext_lan
.xport
.port
= 0;
8345 sk
= (*state
)->state_key
;
8346 if (STATE_TRANSLATE(sk
)) {
8347 if (direction
== PF_IN
) {
8348 pf_change_icmp(pd2
.src
, &iih
.icmp6_id
,
8349 daddr
, &sk
->lan
.addr
,
8350 sk
->lan
.xport
.port
, NULL
,
8351 pd2
.ip_sum
, icmpsum
,
8352 pd
->ip_sum
, 0, AF_INET6
);
8354 pf_change_icmp(pd2
.dst
, &iih
.icmp6_id
,
8355 saddr
, &sk
->gwy
.addr
,
8356 sk
->gwy
.xport
.port
, NULL
,
8357 pd2
.ip_sum
, icmpsum
,
8358 pd
->ip_sum
, 0, AF_INET6
);
8360 if (pf_lazy_makewritable(pd
, pbuf
, off2
+
8361 sizeof(struct icmp6_hdr
)) == NULL
) {
8364 pbuf_copy_back(pbuf
, off
,
8365 sizeof(struct icmp6_hdr
), pd
->hdr
.icmp6
);
8366 pbuf_copy_back(pbuf
, ipoff2
, sizeof(h2_6
),
8368 pbuf_copy_back(pbuf
, off2
,
8369 sizeof(struct icmp6_hdr
), &iih
);
8375 key
.proto
= pd2
.proto
;
8376 if (direction
== PF_IN
) {
8377 key
.af_gwy
= pd2
.af
;
8378 PF_ACPY(&key
.ext_gwy
.addr
, pd2
.dst
, key
.af_gwy
);
8379 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af_gwy
);
8380 key
.ext_gwy
.xport
.port
= 0;
8381 key
.gwy
.xport
.port
= 0;
8383 key
.af_lan
= pd2
.af
;
8384 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af_lan
);
8385 PF_ACPY(&key
.ext_lan
.addr
, pd2
.src
, key
.af_lan
);
8386 key
.lan
.xport
.port
= 0;
8387 key
.ext_lan
.xport
.port
= 0;
8392 sk
= (*state
)->state_key
;
8393 if (STATE_TRANSLATE(sk
)) {
8394 if (direction
== PF_IN
) {
8395 pf_change_icmp(pd2
.src
, NULL
, daddr
,
8396 &sk
->lan
.addr
, 0, NULL
,
8397 pd2
.ip_sum
, icmpsum
,
8398 pd
->ip_sum
, 0, pd2
.af
);
8400 pf_change_icmp(pd2
.dst
, NULL
, saddr
,
8401 &sk
->gwy
.addr
, 0, NULL
,
8402 pd2
.ip_sum
, icmpsum
,
8403 pd
->ip_sum
, 0, pd2
.af
);
8408 if (pf_lazy_makewritable(pd
, pbuf
,
8409 ipoff2
+ sizeof(h2
)) == NULL
) {
8414 * Xnu was missing the following...
8416 pbuf_copy_back(pbuf
, off
, ICMP_MINLEN
,
8418 pbuf_copy_back(pbuf
, ipoff2
,
8426 if (pf_lazy_makewritable(pd
, pbuf
,
8427 ipoff2
+ sizeof(h2_6
)) == NULL
) {
8430 pbuf_copy_back(pbuf
, off
,
8431 sizeof(struct icmp6_hdr
),
8433 pbuf_copy_back(pbuf
, ipoff2
,
8434 sizeof(h2_6
), &h2_6
);
8446 pf_test_state_grev1(struct pf_state
**state
, int direction
,
8447 struct pfi_kif
*kif
, int off
, struct pf_pdesc
*pd
)
8449 struct pf_state_peer
*src
;
8450 struct pf_state_peer
*dst
;
8451 struct pf_state_key_cmp key
;
8452 struct pf_grev1_hdr
*grev1
= pd
->hdr
.grev1
;
8455 key
.proto
= IPPROTO_GRE
;
8456 key
.proto_variant
= PF_GRE_PPTP_VARIANT
;
8457 if (direction
== PF_IN
) {
8458 key
.af_gwy
= pd
->af
;
8459 PF_ACPY(&key
.ext_gwy
.addr
, pd
->src
, key
.af_gwy
);
8460 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af_gwy
);
8461 key
.gwy
.xport
.call_id
= grev1
->call_id
;
8463 key
.af_lan
= pd
->af
;
8464 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af_lan
);
8465 PF_ACPY(&key
.ext_lan
.addr
, pd
->dst
, key
.af_lan
);
8466 key
.ext_lan
.xport
.call_id
= grev1
->call_id
;
8471 if (direction
== (*state
)->state_key
->direction
) {
8472 src
= &(*state
)->src
;
8473 dst
= &(*state
)->dst
;
8475 src
= &(*state
)->dst
;
8476 dst
= &(*state
)->src
;
8480 if (src
->state
< PFGRE1S_INITIATING
) {
8481 src
->state
= PFGRE1S_INITIATING
;
8484 /* update expire time */
8485 (*state
)->expire
= pf_time_second();
8486 if (src
->state
>= PFGRE1S_INITIATING
&&
8487 dst
->state
>= PFGRE1S_INITIATING
) {
8488 if ((*state
)->timeout
!= PFTM_TCP_ESTABLISHED
) {
8489 (*state
)->timeout
= PFTM_GREv1_ESTABLISHED
;
8491 src
->state
= PFGRE1S_ESTABLISHED
;
8492 dst
->state
= PFGRE1S_ESTABLISHED
;
8494 (*state
)->timeout
= PFTM_GREv1_INITIATING
;
8497 if ((*state
)->state_key
->app_state
) {
8498 (*state
)->state_key
->app_state
->u
.grev1
.pptp_state
->expire
=
8502 /* translate source/destination address, if necessary */
8503 if (STATE_GRE_TRANSLATE((*state
)->state_key
)) {
8504 if (direction
== PF_OUT
) {
8508 pf_change_a(&pd
->src
->v4addr
.s_addr
,
8510 (*state
)->state_key
->gwy
.addr
.v4addr
.s_addr
, 0);
8514 PF_ACPY(pd
->src
, &(*state
)->state_key
->gwy
.addr
,
8519 grev1
->call_id
= (*state
)->state_key
->lan
.xport
.call_id
;
8524 pf_change_a(&pd
->dst
->v4addr
.s_addr
,
8526 (*state
)->state_key
->lan
.addr
.v4addr
.s_addr
, 0);
8530 PF_ACPY(pd
->dst
, &(*state
)->state_key
->lan
.addr
,
8536 if (pf_lazy_makewritable(pd
, pd
->mp
, off
+ sizeof(*grev1
)) ==
8540 pbuf_copy_back(pd
->mp
, off
, sizeof(*grev1
), grev1
);
8547 pf_test_state_esp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
8548 int off
, struct pf_pdesc
*pd
)
8551 struct pf_state_peer
*src
;
8552 struct pf_state_peer
*dst
;
8553 struct pf_state_key_cmp key
;
8554 struct pf_esp_hdr
*esp
= pd
->hdr
.esp
;
8557 memset(&key
, 0, sizeof(key
));
8558 key
.proto
= IPPROTO_ESP
;
8559 if (direction
== PF_IN
) {
8560 key
.af_gwy
= pd
->af
;
8561 PF_ACPY(&key
.ext_gwy
.addr
, pd
->src
, key
.af_gwy
);
8562 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af_gwy
);
8563 key
.gwy
.xport
.spi
= esp
->spi
;
8565 key
.af_lan
= pd
->af
;
8566 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af_lan
);
8567 PF_ACPY(&key
.ext_lan
.addr
, pd
->dst
, key
.af_lan
);
8568 key
.ext_lan
.xport
.spi
= esp
->spi
;
8571 *state
= pf_find_state(kif
, &key
, direction
);
8578 * No matching state. Look for a blocking state. If we find
8579 * one, then use that state and move it so that it's keyed to
8580 * the SPI in the current packet.
8582 if (direction
== PF_IN
) {
8583 key
.gwy
.xport
.spi
= 0;
8585 s
= pf_find_state(kif
, &key
, direction
);
8587 struct pf_state_key
*sk
= s
->state_key
;
8589 RB_REMOVE(pf_state_tree_ext_gwy
,
8590 &pf_statetbl_ext_gwy
, sk
);
8591 sk
->lan
.xport
.spi
= sk
->gwy
.xport
.spi
=
8594 if (RB_INSERT(pf_state_tree_ext_gwy
,
8595 &pf_statetbl_ext_gwy
, sk
)) {
8596 pf_detach_state(s
, PF_DT_SKIP_EXTGWY
);
8602 key
.ext_lan
.xport
.spi
= 0;
8604 s
= pf_find_state(kif
, &key
, direction
);
8606 struct pf_state_key
*sk
= s
->state_key
;
8608 RB_REMOVE(pf_state_tree_lan_ext
,
8609 &pf_statetbl_lan_ext
, sk
);
8610 sk
->ext_lan
.xport
.spi
= esp
->spi
;
8612 if (RB_INSERT(pf_state_tree_lan_ext
,
8613 &pf_statetbl_lan_ext
, sk
)) {
8614 pf_detach_state(s
, PF_DT_SKIP_LANEXT
);
8624 if (s
->creatorid
== pf_status
.hostid
) {
8625 pfsync_delete_state(s
);
8628 s
->timeout
= PFTM_UNLINKED
;
8629 hook_runloop(&s
->unlink_hooks
,
8630 HOOK_REMOVE
| HOOK_FREE
);
8631 pf_src_tree_remove_state(s
);
8638 /* similar to STATE_LOOKUP() */
8639 if (*state
!= NULL
&& pd
!= NULL
&& !(pd
->pktflags
& PKTF_FLOW_ID
)) {
8640 pd
->flowsrc
= (*state
)->state_key
->flowsrc
;
8641 pd
->flowhash
= (*state
)->state_key
->flowhash
;
8642 if (pd
->flowhash
!= 0) {
8643 pd
->pktflags
|= PKTF_FLOW_ID
;
8644 pd
->pktflags
&= ~PKTF_FLOW_ADV
;
8648 if (pf_state_lookup_aux(state
, kif
, direction
, &action
)) {
8652 if (direction
== (*state
)->state_key
->direction
) {
8653 src
= &(*state
)->src
;
8654 dst
= &(*state
)->dst
;
8656 src
= &(*state
)->dst
;
8657 dst
= &(*state
)->src
;
8661 if (src
->state
< PFESPS_INITIATING
) {
8662 src
->state
= PFESPS_INITIATING
;
8665 /* update expire time */
8666 (*state
)->expire
= pf_time_second();
8667 if (src
->state
>= PFESPS_INITIATING
&&
8668 dst
->state
>= PFESPS_INITIATING
) {
8669 (*state
)->timeout
= PFTM_ESP_ESTABLISHED
;
8670 src
->state
= PFESPS_ESTABLISHED
;
8671 dst
->state
= PFESPS_ESTABLISHED
;
8673 (*state
)->timeout
= PFTM_ESP_INITIATING
;
8675 /* translate source/destination address, if necessary */
8676 if (STATE_ADDR_TRANSLATE((*state
)->state_key
)) {
8677 if (direction
== PF_OUT
) {
8681 pf_change_a(&pd
->src
->v4addr
.s_addr
,
8683 (*state
)->state_key
->gwy
.addr
.v4addr
.s_addr
, 0);
8687 PF_ACPY(pd
->src
, &(*state
)->state_key
->gwy
.addr
,
8695 pf_change_a(&pd
->dst
->v4addr
.s_addr
,
8697 (*state
)->state_key
->lan
.addr
.v4addr
.s_addr
, 0);
8701 PF_ACPY(pd
->dst
, &(*state
)->state_key
->lan
.addr
,
8712 pf_test_state_other(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
8713 struct pf_pdesc
*pd
)
8715 struct pf_state_peer
*src
, *dst
;
8716 struct pf_state_key_cmp key
;
8719 key
.proto
= pd
->proto
;
8720 if (direction
== PF_IN
) {
8721 key
.af_gwy
= pd
->af
;
8722 PF_ACPY(&key
.ext_gwy
.addr
, pd
->src
, key
.af_gwy
);
8723 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af_gwy
);
8724 key
.ext_gwy
.xport
.port
= 0;
8725 key
.gwy
.xport
.port
= 0;
8727 key
.af_lan
= pd
->af
;
8728 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af_lan
);
8729 PF_ACPY(&key
.ext_lan
.addr
, pd
->dst
, key
.af_lan
);
8730 key
.lan
.xport
.port
= 0;
8731 key
.ext_lan
.xport
.port
= 0;
8736 if (direction
== (*state
)->state_key
->direction
) {
8737 src
= &(*state
)->src
;
8738 dst
= &(*state
)->dst
;
8740 src
= &(*state
)->dst
;
8741 dst
= &(*state
)->src
;
8745 if (src
->state
< PFOTHERS_SINGLE
) {
8746 src
->state
= PFOTHERS_SINGLE
;
8748 if (dst
->state
== PFOTHERS_SINGLE
) {
8749 dst
->state
= PFOTHERS_MULTIPLE
;
8752 /* update expire time */
8753 (*state
)->expire
= pf_time_second();
8754 if (src
->state
== PFOTHERS_MULTIPLE
&& dst
->state
== PFOTHERS_MULTIPLE
) {
8755 (*state
)->timeout
= PFTM_OTHER_MULTIPLE
;
8757 (*state
)->timeout
= PFTM_OTHER_SINGLE
;
8760 /* translate source/destination address, if necessary */
8761 if (STATE_ADDR_TRANSLATE((*state
)->state_key
)) {
8762 if (direction
== PF_OUT
) {
8766 pf_change_a(&pd
->src
->v4addr
.s_addr
,
8768 (*state
)->state_key
->gwy
.addr
.v4addr
.s_addr
,
8774 &(*state
)->state_key
->gwy
.addr
, pd
->af
);
8781 pf_change_a(&pd
->dst
->v4addr
.s_addr
,
8783 (*state
)->state_key
->lan
.addr
.v4addr
.s_addr
,
8789 &(*state
)->state_key
->lan
.addr
, pd
->af
);
8799 * ipoff and off are measured from the start of the mbuf chain.
8800 * h must be at "ipoff" on the mbuf chain.
8803 pf_pull_hdr(pbuf_t
*pbuf
, int off
, void *p
, int len
,
8804 u_short
*actionp
, u_short
*reasonp
, sa_family_t af
)
8809 struct ip
*h
= pbuf
->pb_data
;
8810 u_int16_t fragoff
= (ntohs(h
->ip_off
) & IP_OFFMASK
) << 3;
8813 if (fragoff
>= len
) {
8814 ACTION_SET(actionp
, PF_PASS
);
8816 ACTION_SET(actionp
, PF_DROP
);
8817 REASON_SET(reasonp
, PFRES_FRAG
);
8821 if (pbuf
->pb_packet_len
< (unsigned)(off
+ len
) ||
8822 ntohs(h
->ip_len
) < off
+ len
) {
8823 ACTION_SET(actionp
, PF_DROP
);
8824 REASON_SET(reasonp
, PFRES_SHORT
);
8831 struct ip6_hdr
*h
= pbuf
->pb_data
;
8833 if (pbuf
->pb_packet_len
< (unsigned)(off
+ len
) ||
8834 (ntohs(h
->ip6_plen
) + sizeof(struct ip6_hdr
)) <
8835 (unsigned)(off
+ len
)) {
8836 ACTION_SET(actionp
, PF_DROP
);
8837 REASON_SET(reasonp
, PFRES_SHORT
);
8843 pbuf_copy_data(pbuf
, off
, len
, p
);
8848 pf_routable(struct pf_addr
*addr
, sa_family_t af
, struct pfi_kif
*kif
)
8851 struct sockaddr_in
*dst
;
8853 struct sockaddr_in6
*dst6
;
8854 struct route_in6 ro
;
8856 bzero(&ro
, sizeof(ro
));
8859 dst
= satosin(&ro
.ro_dst
);
8860 dst
->sin_family
= AF_INET
;
8861 dst
->sin_len
= sizeof(*dst
);
8862 dst
->sin_addr
= addr
->v4addr
;
8865 dst6
= (struct sockaddr_in6
*)&ro
.ro_dst
;
8866 dst6
->sin6_family
= AF_INET6
;
8867 dst6
->sin6_len
= sizeof(*dst6
);
8868 dst6
->sin6_addr
= addr
->v6addr
;
8874 /* XXX: IFT_ENC is not currently used by anything*/
8875 /* Skip checks for ipsec interfaces */
8876 if (kif
!= NULL
&& kif
->pfik_ifp
->if_type
== IFT_ENC
) {
8880 /* XXX: what is the point of this? */
8881 rtalloc((struct route
*)&ro
);
8889 pf_rtlabel_match(struct pf_addr
*addr
, sa_family_t af
, struct pf_addr_wrap
*aw
)
8892 struct sockaddr_in
*dst
;
8893 struct sockaddr_in6
*dst6
;
8894 struct route_in6 ro
;
8897 bzero(&ro
, sizeof(ro
));
8900 dst
= satosin(&ro
.ro_dst
);
8901 dst
->sin_family
= AF_INET
;
8902 dst
->sin_len
= sizeof(*dst
);
8903 dst
->sin_addr
= addr
->v4addr
;
8906 dst6
= (struct sockaddr_in6
*)&ro
.ro_dst
;
8907 dst6
->sin6_family
= AF_INET6
;
8908 dst6
->sin6_len
= sizeof(*dst6
);
8909 dst6
->sin6_addr
= addr
->v6addr
;
8915 /* XXX: what is the point of this? */
8916 rtalloc((struct route
*)&ro
);
8925 pf_route(pbuf_t
**pbufp
, struct pf_rule
*r
, int dir
, struct ifnet
*oifp
,
8926 struct pf_state
*s
, struct pf_pdesc
*pd
)
8929 struct mbuf
*m0
, *m1
;
8930 struct route iproute
;
8931 struct route
*ro
= &iproute
;
8932 struct sockaddr_in
*dst
;
8934 struct ifnet
*ifp
= NULL
;
8935 struct pf_addr naddr
;
8936 struct pf_src_node
*sn
= NULL
;
8939 int interface_mtu
= 0;
8940 bzero(&iproute
, sizeof(iproute
));
8942 if (pbufp
== NULL
|| !pbuf_is_valid(*pbufp
) || r
== NULL
||
8943 (dir
!= PF_IN
&& dir
!= PF_OUT
) || oifp
== NULL
) {
8944 panic("pf_route: invalid parameters");
8947 if (pd
->pf_mtag
->pftag_routed
++ > 3) {
8948 pbuf_destroy(*pbufp
);
8955 * Since this is something of an edge case and may involve the
8956 * host stack (for routing, at least for now), we convert the
8957 * incoming pbuf into an mbuf.
8959 if (r
->rt
== PF_DUPTO
) {
8960 m0
= pbuf_clone_to_mbuf(*pbufp
);
8961 } else if ((r
->rt
== PF_REPLYTO
) == (r
->direction
== dir
)) {
8964 /* We're going to consume this packet */
8965 m0
= pbuf_to_mbuf(*pbufp
, TRUE
);
8973 /* We now have the packet in an mbuf (m0) */
8975 if (m0
->m_len
< (int)sizeof(struct ip
)) {
8976 DPFPRINTF(PF_DEBUG_URGENT
,
8977 ("pf_route: packet length < sizeof (struct ip)\n"));
8981 ip
= mtod(m0
, struct ip
*);
8983 dst
= satosin((void *)&ro
->ro_dst
);
8984 dst
->sin_family
= AF_INET
;
8985 dst
->sin_len
= sizeof(*dst
);
8986 dst
->sin_addr
= ip
->ip_dst
;
8988 if (r
->rt
== PF_FASTROUTE
) {
8990 if (ro
->ro_rt
== NULL
) {
8991 ipstat
.ips_noroute
++;
8995 ifp
= ro
->ro_rt
->rt_ifp
;
8997 ro
->ro_rt
->rt_use
++;
8999 if (ro
->ro_rt
->rt_flags
& RTF_GATEWAY
) {
9000 dst
= satosin((void *)ro
->ro_rt
->rt_gateway
);
9002 RT_UNLOCK(ro
->ro_rt
);
9004 if (TAILQ_EMPTY(&r
->rpool
.list
)) {
9005 DPFPRINTF(PF_DEBUG_URGENT
,
9006 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n"));
9010 pf_map_addr(AF_INET
, r
, (struct pf_addr
*)&ip
->ip_src
,
9012 if (!PF_AZERO(&naddr
, AF_INET
)) {
9013 dst
->sin_addr
.s_addr
= naddr
.v4addr
.s_addr
;
9015 ifp
= r
->rpool
.cur
->kif
?
9016 r
->rpool
.cur
->kif
->pfik_ifp
: NULL
;
9018 if (!PF_AZERO(&s
->rt_addr
, AF_INET
)) {
9019 dst
->sin_addr
.s_addr
=
9020 s
->rt_addr
.v4addr
.s_addr
;
9022 ifp
= s
->rt_kif
? s
->rt_kif
->pfik_ifp
: NULL
;
9030 if (pf_test_mbuf(PF_OUT
, ifp
, &m0
, NULL
, NULL
) != PF_PASS
) {
9032 } else if (m0
== NULL
) {
9035 if (m0
->m_len
< (int)sizeof(struct ip
)) {
9036 DPFPRINTF(PF_DEBUG_URGENT
,
9037 ("pf_route: packet length < sizeof (struct ip)\n"));
9040 ip
= mtod(m0
, struct ip
*);
9043 /* Catch routing changes wrt. hardware checksumming for TCP or UDP. */
9044 ip_output_checksum(ifp
, m0
, ((ip
->ip_hl
) << 2), ntohs(ip
->ip_len
),
9047 interface_mtu
= ifp
->if_mtu
;
9049 if (INTF_ADJUST_MTU_FOR_CLAT46(ifp
)) {
9050 interface_mtu
= IN6_LINKMTU(ifp
);
9051 /* Further adjust the size for CLAT46 expansion */
9052 interface_mtu
-= CLAT46_HDR_EXPANSION_OVERHD
;
9055 if (ntohs(ip
->ip_len
) <= interface_mtu
|| TSO_IPV4_OK(ifp
, m0
) ||
9056 (!(ip
->ip_off
& htons(IP_DF
)) &&
9057 (ifp
->if_hwassist
& CSUM_FRAGMENT
))) {
9059 if (sw_csum
& CSUM_DELAY_IP
) {
9060 ip
->ip_sum
= in_cksum(m0
, ip
->ip_hl
<< 2);
9061 sw_csum
&= ~CSUM_DELAY_IP
;
9062 m0
->m_pkthdr
.csum_flags
&= ~CSUM_DELAY_IP
;
9064 error
= ifnet_output(ifp
, PF_INET
, m0
, ro
->ro_rt
, sintosa(dst
));
9069 * Too large for interface; fragment if possible.
9070 * Must be able to put at least 8 bytes per fragment.
9071 * Balk when DF bit is set or the interface didn't support TSO.
9073 if ((ip
->ip_off
& htons(IP_DF
)) ||
9074 (m0
->m_pkthdr
.csum_flags
& CSUM_TSO_IPV4
)) {
9075 ipstat
.ips_cantfrag
++;
9076 if (r
->rt
!= PF_DUPTO
) {
9077 icmp_error(m0
, ICMP_UNREACH
, ICMP_UNREACH_NEEDFRAG
, 0,
9087 /* PR-8933605: send ip_len,ip_off to ip_fragment in host byte order */
9088 #if BYTE_ORDER != BIG_ENDIAN
9092 error
= ip_fragment(m0
, ifp
, interface_mtu
, sw_csum
);
9099 for (m0
= m1
; m0
; m0
= m1
) {
9103 error
= ifnet_output(ifp
, PF_INET
, m0
, ro
->ro_rt
,
9111 ipstat
.ips_fragmented
++;
9115 ROUTE_RELEASE(&iproute
);
9127 pf_route6(pbuf_t
**pbufp
, struct pf_rule
*r
, int dir
, struct ifnet
*oifp
,
9128 struct pf_state
*s
, struct pf_pdesc
*pd
)
9132 struct route_in6 ip6route
;
9133 struct route_in6
*ro
;
9134 struct sockaddr_in6
*dst
;
9135 struct ip6_hdr
*ip6
;
9136 struct ifnet
*ifp
= NULL
;
9137 struct pf_addr naddr
;
9138 struct pf_src_node
*sn
= NULL
;
9140 struct pf_mtag
*pf_mtag
;
9142 if (pbufp
== NULL
|| !pbuf_is_valid(*pbufp
) || r
== NULL
||
9143 (dir
!= PF_IN
&& dir
!= PF_OUT
) || oifp
== NULL
) {
9144 panic("pf_route6: invalid parameters");
9147 if (pd
->pf_mtag
->pftag_routed
++ > 3) {
9148 pbuf_destroy(*pbufp
);
9155 * Since this is something of an edge case and may involve the
9156 * host stack (for routing, at least for now), we convert the
9157 * incoming pbuf into an mbuf.
9159 if (r
->rt
== PF_DUPTO
) {
9160 m0
= pbuf_clone_to_mbuf(*pbufp
);
9161 } else if ((r
->rt
== PF_REPLYTO
) == (r
->direction
== dir
)) {
9164 /* We're about to consume this packet */
9165 m0
= pbuf_to_mbuf(*pbufp
, TRUE
);
9173 if (m0
->m_len
< (int)sizeof(struct ip6_hdr
)) {
9174 DPFPRINTF(PF_DEBUG_URGENT
,
9175 ("pf_route6: m0->m_len < sizeof (struct ip6_hdr)\n"));
9178 ip6
= mtod(m0
, struct ip6_hdr
*);
9181 bzero((caddr_t
)ro
, sizeof(*ro
));
9182 dst
= (struct sockaddr_in6
*)&ro
->ro_dst
;
9183 dst
->sin6_family
= AF_INET6
;
9184 dst
->sin6_len
= sizeof(*dst
);
9185 dst
->sin6_addr
= ip6
->ip6_dst
;
9187 /* Cheat. XXX why only in the v6addr case??? */
9188 if (r
->rt
== PF_FASTROUTE
) {
9189 pf_mtag
= pf_get_mtag(m0
);
9190 ASSERT(pf_mtag
!= NULL
);
9191 pf_mtag
->pftag_flags
|= PF_TAG_GENERATED
;
9192 ip6_output(m0
, NULL
, NULL
, 0, NULL
, NULL
, NULL
);
9196 if (TAILQ_EMPTY(&r
->rpool
.list
)) {
9197 DPFPRINTF(PF_DEBUG_URGENT
,
9198 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n"));
9202 pf_map_addr(AF_INET6
, r
, (struct pf_addr
*)(uintptr_t)&ip6
->ip6_src
,
9204 if (!PF_AZERO(&naddr
, AF_INET6
)) {
9205 PF_ACPY((struct pf_addr
*)&dst
->sin6_addr
,
9208 ifp
= r
->rpool
.cur
->kif
? r
->rpool
.cur
->kif
->pfik_ifp
: NULL
;
9210 if (!PF_AZERO(&s
->rt_addr
, AF_INET6
)) {
9211 PF_ACPY((struct pf_addr
*)&dst
->sin6_addr
,
9212 &s
->rt_addr
, AF_INET6
);
9214 ifp
= s
->rt_kif
? s
->rt_kif
->pfik_ifp
: NULL
;
9221 if (pf_test6_mbuf(PF_OUT
, ifp
, &m0
, NULL
, NULL
) != PF_PASS
) {
9223 } else if (m0
== NULL
) {
9226 if (m0
->m_len
< (int)sizeof(struct ip6_hdr
)) {
9227 DPFPRINTF(PF_DEBUG_URGENT
, ("pf_route6: m0->m_len "
9228 "< sizeof (struct ip6_hdr)\n"));
9231 pf_mtag
= pf_get_mtag(m0
);
9233 * send refragmented packets.
9235 if ((pf_mtag
->pftag_flags
& PF_TAG_REFRAGMENTED
) != 0) {
9236 pf_mtag
->pftag_flags
&= ~PF_TAG_REFRAGMENTED
;
9238 * nd6_output() frees packet chain in both success and
9241 error
= nd6_output(ifp
, ifp
, m0
, dst
, NULL
, NULL
);
9244 DPFPRINTF(PF_DEBUG_URGENT
, ("pf_route6:"
9245 "dropped refragmented packet\n"));
9249 ip6
= mtod(m0
, struct ip6_hdr
*);
9253 * If the packet is too large for the outgoing interface,
9254 * send back an icmp6 error.
9256 if (IN6_IS_SCOPE_EMBED(&dst
->sin6_addr
)) {
9257 dst
->sin6_addr
.s6_addr16
[1] = htons(ifp
->if_index
);
9259 if ((unsigned)m0
->m_pkthdr
.len
<= ifp
->if_mtu
) {
9260 error
= nd6_output(ifp
, ifp
, m0
, dst
, NULL
, NULL
);
9262 in6_ifstat_inc(ifp
, ifs6_in_toobig
);
9263 if (r
->rt
!= PF_DUPTO
) {
9264 icmp6_error(m0
, ICMP6_PACKET_TOO_BIG
, 0, ifp
->if_mtu
);
9283 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
9284 * off is the offset where the protocol header starts
9285 * len is the total length of protocol header plus payload
9286 * returns 0 when the checksum is valid, otherwise returns 1.
9289 pf_check_proto_cksum(pbuf_t
*pbuf
, int off
, int len
, u_int8_t p
,
9298 * Optimize for the common case; if the hardware calculated
9299 * value doesn't include pseudo-header checksum, or if it
9300 * is partially-computed (only 16-bit summation), do it in
9303 if ((*pbuf
->pb_csum_flags
&
9304 (CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
)) ==
9305 (CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
) &&
9306 (*pbuf
->pb_csum_data
^ 0xffff) == 0) {
9311 case IPPROTO_ICMPV6
:
9316 if (off
< (int)sizeof(struct ip
) || len
< (int)sizeof(struct udphdr
)) {
9319 if (pbuf
->pb_packet_len
< (unsigned)(off
+ len
)) {
9325 if (p
== IPPROTO_ICMP
) {
9326 if (pbuf
->pb_contig_len
< (unsigned)off
) {
9329 sum
= pbuf_inet_cksum(pbuf
, 0, off
, len
);
9331 if (pbuf
->pb_contig_len
< (int)sizeof(struct ip
)) {
9334 sum
= pbuf_inet_cksum(pbuf
, p
, off
, len
);
9339 if (pbuf
->pb_contig_len
< (int)sizeof(struct ip6_hdr
)) {
9342 sum
= pbuf_inet6_cksum(pbuf
, p
, off
, len
);
9350 tcpstat
.tcps_rcvbadsum
++;
9353 udpstat
.udps_badsum
++;
9356 icmpstat
.icps_checksum
++;
9358 case IPPROTO_ICMPV6
:
9359 icmp6stat
.icp6s_checksum
++;
9368 #define PF_APPLE_UPDATE_PDESC_IPv4() \
9370 if (pbuf && pd.mp && pbuf != pd.mp) { \
9372 h = pbuf->pb_data; \
9373 pd.pf_mtag = pf_get_mtag_pbuf(pbuf); \
9378 pf_test_mbuf(int dir
, struct ifnet
*ifp
, struct mbuf
**m0
,
9379 struct ether_header
*eh
, struct ip_fw_args
*fwa
)
9381 pbuf_t pbuf_store
, *pbuf
;
9384 pbuf_init_mbuf(&pbuf_store
, *m0
, (*m0
)->m_pkthdr
.rcvif
);
9387 rv
= pf_test(dir
, ifp
, &pbuf
, eh
, fwa
);
9389 if (pbuf_is_valid(pbuf
)) {
9390 *m0
= pbuf
->pb_mbuf
;
9391 pbuf
->pb_mbuf
= NULL
;
9401 pf_test(int dir
, struct ifnet
*ifp
, pbuf_t
**pbufp
,
9402 struct ether_header
*eh
, struct ip_fw_args
*fwa
)
9407 struct pfi_kif
*kif
;
9408 u_short action
= PF_PASS
, reason
= 0, log
= 0;
9409 pbuf_t
*pbuf
= *pbufp
;
9411 struct pf_rule
*a
= NULL
, *r
= &pf_default_rule
, *tr
, *nr
;
9412 struct pf_state
*s
= NULL
;
9413 struct pf_state_key
*sk
= NULL
;
9414 struct pf_ruleset
*ruleset
= NULL
;
9416 int off
, dirndx
, pqid
= 0;
9418 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
9420 if (!pf_status
.running
) {
9424 memset(&pd
, 0, sizeof(pd
));
9426 if ((pd
.pf_mtag
= pf_get_mtag_pbuf(pbuf
)) == NULL
) {
9427 DPFPRINTF(PF_DEBUG_URGENT
,
9428 ("pf_test: pf_get_mtag_pbuf returned NULL\n"));
9432 if (pd
.pf_mtag
->pftag_flags
& PF_TAG_GENERATED
) {
9436 kif
= (struct pfi_kif
*)ifp
->if_pf_kif
;
9439 DPFPRINTF(PF_DEBUG_URGENT
,
9440 ("pf_test: kif == NULL, if_name %s\n", ifp
->if_name
));
9443 if (kif
->pfik_flags
& PFI_IFLAG_SKIP
) {
9447 if (pbuf
->pb_packet_len
< (int)sizeof(*h
)) {
9448 REASON_SET(&reason
, PFRES_SHORT
);
9452 /* initialize enough of pd for the done label */
9456 pd
.pf_mtag
= pf_get_mtag_pbuf(pbuf
);
9457 pd
.src
= (struct pf_addr
*)&h
->ip_src
;
9458 pd
.dst
= (struct pf_addr
*)&h
->ip_dst
;
9459 PF_ACPY(&pd
.baddr
, pd
.src
, AF_INET
);
9460 PF_ACPY(&pd
.bdaddr
, pd
.dst
, AF_INET
);
9461 pd
.ip_sum
= &h
->ip_sum
;
9463 pd
.proto_variant
= 0;
9467 pd
.tot_len
= ntohs(h
->ip_len
);
9471 if (fwa
!= NULL
&& fwa
->fwa_pf_rule
!= NULL
) {
9474 #endif /* DUMMYNET */
9476 /* We do IP header normalization and packet reassembly here */
9477 action
= pf_normalize_ip(pbuf
, dir
, kif
, &reason
, &pd
);
9478 if (action
!= PF_PASS
|| pd
.lmw
< 0) {
9485 #endif /* DUMMYNET */
9486 /* pf_normalize can mess with pb_data */
9489 off
= h
->ip_hl
<< 2;
9490 if (off
< (int)sizeof(*h
)) {
9492 REASON_SET(&reason
, PFRES_SHORT
);
9497 pd
.src
= (struct pf_addr
*)&h
->ip_src
;
9498 pd
.dst
= (struct pf_addr
*)&h
->ip_dst
;
9499 PF_ACPY(&pd
.baddr
, pd
.src
, AF_INET
);
9500 PF_ACPY(&pd
.bdaddr
, pd
.dst
, AF_INET
);
9501 pd
.ip_sum
= &h
->ip_sum
;
9503 pd
.proto_variant
= 0;
9506 pd
.pf_mtag
= pf_get_mtag_pbuf(pbuf
);
9510 pd
.sc
= MBUF_SCIDX(pbuf_get_service_class(pbuf
));
9511 pd
.tot_len
= ntohs(h
->ip_len
);
9514 if (*pbuf
->pb_flags
& PKTF_FLOW_ID
) {
9515 pd
.flowsrc
= *pbuf
->pb_flowsrc
;
9516 pd
.flowhash
= *pbuf
->pb_flowid
;
9517 pd
.pktflags
= *pbuf
->pb_flags
& PKTF_FLOW_MASK
;
9520 /* handle fragments that didn't get reassembled by normalization */
9521 if (h
->ip_off
& htons(IP_MF
| IP_OFFMASK
)) {
9522 pd
.flags
|= PFDESC_IP_FRAG
;
9524 /* Traffic goes through dummynet first */
9525 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
9526 if (action
== PF_DROP
|| pbuf
== NULL
) {
9530 #endif /* DUMMYNET */
9531 action
= pf_test_fragment(&r
, dir
, kif
, pbuf
, h
,
9540 if (!pf_pull_hdr(pbuf
, off
, &th
, sizeof(th
),
9541 &action
, &reason
, AF_INET
)) {
9542 log
= action
!= PF_PASS
;
9545 pd
.p_len
= pd
.tot_len
- off
- (th
.th_off
<< 2);
9546 if ((th
.th_flags
& TH_ACK
) && pd
.p_len
== 0) {
9550 /* Traffic goes through dummynet first */
9551 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
9552 if (action
== PF_DROP
|| pbuf
== NULL
) {
9556 #endif /* DUMMYNET */
9557 action
= pf_normalize_tcp(dir
, kif
, pbuf
, 0, off
, h
, &pd
);
9561 PF_APPLE_UPDATE_PDESC_IPv4();
9562 if (action
== PF_DROP
) {
9565 action
= pf_test_state_tcp(&s
, dir
, kif
, pbuf
, off
, h
, &pd
,
9567 if (action
== PF_NAT64
) {
9573 PF_APPLE_UPDATE_PDESC_IPv4();
9574 if (action
== PF_PASS
) {
9576 pfsync_update_state(s
);
9577 #endif /* NPFSYNC */
9581 } else if (s
== NULL
) {
9582 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9583 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
9592 if (!pf_pull_hdr(pbuf
, off
, &uh
, sizeof(uh
),
9593 &action
, &reason
, AF_INET
)) {
9594 log
= action
!= PF_PASS
;
9597 if (uh
.uh_dport
== 0 ||
9598 ntohs(uh
.uh_ulen
) > pbuf
->pb_packet_len
- off
||
9599 ntohs(uh
.uh_ulen
) < sizeof(struct udphdr
)) {
9601 REASON_SET(&reason
, PFRES_SHORT
);
9605 /* Traffic goes through dummynet first */
9606 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
9607 if (action
== PF_DROP
|| pbuf
== NULL
) {
9611 #endif /* DUMMYNET */
9612 action
= pf_test_state_udp(&s
, dir
, kif
, pbuf
, off
, h
, &pd
,
9614 if (action
== PF_NAT64
) {
9620 PF_APPLE_UPDATE_PDESC_IPv4();
9621 if (action
== PF_PASS
) {
9623 pfsync_update_state(s
);
9624 #endif /* NPFSYNC */
9628 } else if (s
== NULL
) {
9629 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9630 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
9635 case IPPROTO_ICMP
: {
9639 if (!pf_pull_hdr(pbuf
, off
, &ih
, ICMP_MINLEN
,
9640 &action
, &reason
, AF_INET
)) {
9641 log
= action
!= PF_PASS
;
9645 /* Traffic goes through dummynet first */
9646 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
9647 if (action
== PF_DROP
|| pbuf
== NULL
) {
9651 #endif /* DUMMYNET */
9652 action
= pf_test_state_icmp(&s
, dir
, kif
, pbuf
, off
, h
, &pd
,
9654 if (action
== PF_NAT64
) {
9660 PF_APPLE_UPDATE_PDESC_IPv4();
9661 if (action
== PF_PASS
) {
9663 pfsync_update_state(s
);
9664 #endif /* NPFSYNC */
9668 } else if (s
== NULL
) {
9669 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9670 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
9676 struct pf_esp_hdr esp
;
9679 if (!pf_pull_hdr(pbuf
, off
, &esp
, sizeof(esp
), &action
, &reason
,
9681 log
= action
!= PF_PASS
;
9685 /* Traffic goes through dummynet first */
9686 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
9687 if (action
== PF_DROP
|| pbuf
== NULL
) {
9691 #endif /* DUMMYNET */
9692 action
= pf_test_state_esp(&s
, dir
, kif
, off
, &pd
);
9696 PF_APPLE_UPDATE_PDESC_IPv4();
9697 if (action
== PF_PASS
) {
9699 pfsync_update_state(s
);
9700 #endif /* NPFSYNC */
9704 } else if (s
== NULL
) {
9705 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9706 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
9712 struct pf_grev1_hdr grev1
;
9713 pd
.hdr
.grev1
= &grev1
;
9714 if (!pf_pull_hdr(pbuf
, off
, &grev1
, sizeof(grev1
), &action
,
9715 &reason
, AF_INET
)) {
9716 log
= (action
!= PF_PASS
);
9720 /* Traffic goes through dummynet first */
9721 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
9722 if (action
== PF_DROP
|| pbuf
== NULL
) {
9726 #endif /* DUMMYNET */
9727 if ((ntohs(grev1
.flags
) & PF_GRE_FLAG_VERSION_MASK
) == 1 &&
9728 ntohs(grev1
.protocol_type
) == PF_GRE_PPP_ETHERTYPE
) {
9729 if (ntohs(grev1
.payload_length
) >
9730 pbuf
->pb_packet_len
- off
) {
9732 REASON_SET(&reason
, PFRES_SHORT
);
9735 pd
.proto_variant
= PF_GRE_PPTP_VARIANT
;
9736 action
= pf_test_state_grev1(&s
, dir
, kif
, off
, &pd
);
9740 PF_APPLE_UPDATE_PDESC_IPv4();
9741 if (action
== PF_PASS
) {
9743 pfsync_update_state(s
);
9744 #endif /* NPFSYNC */
9749 } else if (s
== NULL
) {
9750 action
= pf_test_rule(&r
, &s
, dir
, kif
, pbuf
,
9751 off
, h
, &pd
, &a
, &ruleset
, NULL
);
9752 if (action
== PF_PASS
) {
9758 /* not GREv1/PPTP, so treat as ordinary GRE... */
9764 /* Traffic goes through dummynet first */
9765 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
9766 if (action
== PF_DROP
|| pbuf
== NULL
) {
9770 #endif /* DUMMYNET */
9771 action
= pf_test_state_other(&s
, dir
, kif
, &pd
);
9775 PF_APPLE_UPDATE_PDESC_IPv4();
9776 if (action
== PF_PASS
) {
9778 pfsync_update_state(s
);
9779 #endif /* NPFSYNC */
9783 } else if (s
== NULL
) {
9784 action
= pf_test_rule(&r
, &s
, dir
, kif
, pbuf
, off
, h
,
9785 &pd
, &a
, &ruleset
, NULL
);
9791 if (action
== PF_NAT64
) {
9797 PF_APPLE_UPDATE_PDESC_IPv4();
9799 if (action
!= PF_DROP
) {
9800 if (action
== PF_PASS
&& h
->ip_hl
> 5 &&
9801 !((s
&& s
->allow_opts
) || r
->allow_opts
)) {
9803 REASON_SET(&reason
, PFRES_IPOPTIONS
);
9805 DPFPRINTF(PF_DEBUG_MISC
,
9806 ("pf: dropping packet with ip options [hlen=%u]\n",
9807 (unsigned int) h
->ip_hl
));
9810 if ((s
&& s
->tag
) || PF_RTABLEID_IS_VALID(r
->rtableid
) ||
9811 (pd
.pktflags
& PKTF_FLOW_ID
)) {
9812 (void) pf_tag_packet(pbuf
, pd
.pf_mtag
, s
? s
->tag
: 0,
9816 if (action
== PF_PASS
) {
9818 /* add hints for ecn */
9819 pd
.pf_mtag
->pftag_hdr
= h
;
9820 /* record address family */
9821 pd
.pf_mtag
->pftag_flags
&= ~PF_TAG_HDR_INET6
;
9822 pd
.pf_mtag
->pftag_flags
|= PF_TAG_HDR_INET
;
9824 /* record protocol */
9825 *pbuf
->pb_proto
= pd
.proto
;
9828 * connections redirected to loopback should not match sockets
9829 * bound specifically to loopback due to security implications,
9830 * see tcp_input() and in_pcblookup_listen().
9832 if (dir
== PF_IN
&& (pd
.proto
== IPPROTO_TCP
||
9833 pd
.proto
== IPPROTO_UDP
) && s
!= NULL
&&
9834 s
->nat_rule
.ptr
!= NULL
&&
9835 (s
->nat_rule
.ptr
->action
== PF_RDR
||
9836 s
->nat_rule
.ptr
->action
== PF_BINAT
) &&
9837 (ntohl(pd
.dst
->v4addr
.s_addr
) >> IN_CLASSA_NSHIFT
)
9838 == IN_LOOPBACKNET
) {
9839 pd
.pf_mtag
->pftag_flags
|= PF_TAG_TRANSLATE_LOCALHOST
;
9847 if (s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
9848 s
->nat_rule
.ptr
->log
& PF_LOG_ALL
) {
9849 lr
= s
->nat_rule
.ptr
;
9853 PFLOG_PACKET(kif
, h
, pbuf
, AF_INET
, dir
, reason
, lr
, a
, ruleset
,
9857 kif
->pfik_bytes
[0][dir
== PF_OUT
][action
!= PF_PASS
] += pd
.tot_len
;
9858 kif
->pfik_packets
[0][dir
== PF_OUT
][action
!= PF_PASS
]++;
9860 if (action
== PF_PASS
|| r
->action
== PF_DROP
) {
9861 dirndx
= (dir
== PF_OUT
);
9862 r
->packets
[dirndx
]++;
9863 r
->bytes
[dirndx
] += pd
.tot_len
;
9865 a
->packets
[dirndx
]++;
9866 a
->bytes
[dirndx
] += pd
.tot_len
;
9870 if (s
->nat_rule
.ptr
!= NULL
) {
9871 s
->nat_rule
.ptr
->packets
[dirndx
]++;
9872 s
->nat_rule
.ptr
->bytes
[dirndx
] += pd
.tot_len
;
9874 if (s
->src_node
!= NULL
) {
9875 s
->src_node
->packets
[dirndx
]++;
9876 s
->src_node
->bytes
[dirndx
] += pd
.tot_len
;
9878 if (s
->nat_src_node
!= NULL
) {
9879 s
->nat_src_node
->packets
[dirndx
]++;
9880 s
->nat_src_node
->bytes
[dirndx
] += pd
.tot_len
;
9882 dirndx
= (dir
== sk
->direction
) ? 0 : 1;
9883 s
->packets
[dirndx
]++;
9884 s
->bytes
[dirndx
] += pd
.tot_len
;
9887 nr
= (s
!= NULL
) ? s
->nat_rule
.ptr
: pd
.nat_rule
;
9891 * XXX: we need to make sure that the addresses
9892 * passed to pfr_update_stats() are the same than
9893 * the addresses used during matching (pfr_match)
9895 if (r
== &pf_default_rule
) {
9897 x
= (sk
== NULL
|| sk
->direction
== dir
) ?
9898 &pd
.baddr
: &pd
.naddr
;
9900 x
= (sk
== NULL
|| sk
->direction
== dir
) ?
9901 &pd
.naddr
: &pd
.baddr
;
9903 if (x
== &pd
.baddr
|| s
== NULL
) {
9904 /* we need to change the address */
9905 if (dir
== PF_OUT
) {
9912 if (tr
->src
.addr
.type
== PF_ADDR_TABLE
) {
9913 pfr_update_stats(tr
->src
.addr
.p
.tbl
, (sk
== NULL
||
9914 sk
->direction
== dir
) ?
9915 pd
.src
: pd
.dst
, pd
.af
,
9916 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
9919 if (tr
->dst
.addr
.type
== PF_ADDR_TABLE
) {
9920 pfr_update_stats(tr
->dst
.addr
.p
.tbl
, (sk
== NULL
||
9921 sk
->direction
== dir
) ? pd
.dst
: pd
.src
, pd
.af
,
9922 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
9927 VERIFY(pbuf
== NULL
|| pd
.mp
== NULL
|| pd
.mp
== pbuf
);
9931 REASON_SET(&reason
, PFRES_MEMORY
);
9935 if (action
== PF_DROP
) {
9936 pbuf_destroy(*pbufp
);
9944 if (action
== PF_SYNPROXY_DROP
) {
9945 pbuf_destroy(*pbufp
);
9949 /* pf_route can free the pbuf causing *pbufp to become NULL */
9950 pf_route(pbufp
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
9957 #define PF_APPLE_UPDATE_PDESC_IPv6() \
9959 if (pbuf && pd.mp && pbuf != pd.mp) { \
9962 h = pbuf->pb_data; \
9966 pf_test6_mbuf(int dir
, struct ifnet
*ifp
, struct mbuf
**m0
,
9967 struct ether_header
*eh
, struct ip_fw_args
*fwa
)
9969 pbuf_t pbuf_store
, *pbuf
;
9972 pbuf_init_mbuf(&pbuf_store
, *m0
, (*m0
)->m_pkthdr
.rcvif
);
9975 rv
= pf_test6(dir
, ifp
, &pbuf
, eh
, fwa
);
9977 if (pbuf_is_valid(pbuf
)) {
9978 *m0
= pbuf
->pb_mbuf
;
9979 pbuf
->pb_mbuf
= NULL
;
9989 pf_test6(int dir
, struct ifnet
*ifp
, pbuf_t
**pbufp
,
9990 struct ether_header
*eh
, struct ip_fw_args
*fwa
)
9995 struct pfi_kif
*kif
;
9996 u_short action
= PF_PASS
, reason
= 0, log
= 0;
9997 pbuf_t
*pbuf
= *pbufp
;
9999 struct pf_rule
*a
= NULL
, *r
= &pf_default_rule
, *tr
, *nr
;
10000 struct pf_state
*s
= NULL
;
10001 struct pf_state_key
*sk
= NULL
;
10002 struct pf_ruleset
*ruleset
= NULL
;
10003 struct pf_pdesc pd
;
10004 int off
, terminal
= 0, dirndx
, rh_cnt
= 0;
10006 boolean_t fwd
= FALSE
;
10008 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
10010 ASSERT(ifp
!= NULL
);
10011 if ((dir
== PF_OUT
) && (pbuf
->pb_ifp
) && (ifp
!= pbuf
->pb_ifp
)) {
10015 if (!pf_status
.running
) {
10019 memset(&pd
, 0, sizeof(pd
));
10021 if ((pd
.pf_mtag
= pf_get_mtag_pbuf(pbuf
)) == NULL
) {
10022 DPFPRINTF(PF_DEBUG_URGENT
,
10023 ("pf_test6: pf_get_mtag_pbuf returned NULL\n"));
10027 if (pd
.pf_mtag
->pftag_flags
& PF_TAG_GENERATED
) {
10031 kif
= (struct pfi_kif
*)ifp
->if_pf_kif
;
10034 DPFPRINTF(PF_DEBUG_URGENT
,
10035 ("pf_test6: kif == NULL, if_name %s\n", ifp
->if_name
));
10038 if (kif
->pfik_flags
& PFI_IFLAG_SKIP
) {
10042 if (pbuf
->pb_packet_len
< (int)sizeof(*h
)) {
10043 REASON_SET(&reason
, PFRES_SHORT
);
10049 off
= ((caddr_t
)h
- (caddr_t
)pbuf
->pb_data
) + sizeof(struct ip6_hdr
);
10052 pd
.pf_mtag
= pf_get_mtag_pbuf(pbuf
);
10053 pd
.src
= (struct pf_addr
*)(uintptr_t)&h
->ip6_src
;
10054 pd
.dst
= (struct pf_addr
*)(uintptr_t)&h
->ip6_dst
;
10055 PF_ACPY(&pd
.baddr
, pd
.src
, AF_INET6
);
10056 PF_ACPY(&pd
.bdaddr
, pd
.dst
, AF_INET6
);
10060 pd
.proto_variant
= 0;
10062 pd
.ttl
= h
->ip6_hlim
;
10063 pd
.sc
= MBUF_SCIDX(pbuf_get_service_class(pbuf
));
10064 pd
.tot_len
= ntohs(h
->ip6_plen
) + sizeof(struct ip6_hdr
);
10067 if (*pbuf
->pb_flags
& PKTF_FLOW_ID
) {
10068 pd
.flowsrc
= *pbuf
->pb_flowsrc
;
10069 pd
.flowhash
= *pbuf
->pb_flowid
;
10070 pd
.pktflags
= (*pbuf
->pb_flags
& PKTF_FLOW_MASK
);
10074 if (fwa
!= NULL
&& fwa
->fwa_pf_rule
!= NULL
) {
10077 #endif /* DUMMYNET */
10079 /* We do IP header normalization and packet reassembly here */
10080 action
= pf_normalize_ip6(pbuf
, dir
, kif
, &reason
, &pd
);
10081 if (action
!= PF_PASS
|| pd
.lmw
< 0) {
10088 #endif /* DUMMYNET */
10092 * we do not support jumbogram yet. if we keep going, zero ip6_plen
10093 * will do something bad, so drop the packet for now.
10095 if (htons(h
->ip6_plen
) == 0) {
10097 REASON_SET(&reason
, PFRES_NORM
); /*XXX*/
10100 pd
.src
= (struct pf_addr
*)(uintptr_t)&h
->ip6_src
;
10101 pd
.dst
= (struct pf_addr
*)(uintptr_t)&h
->ip6_dst
;
10102 PF_ACPY(&pd
.baddr
, pd
.src
, AF_INET6
);
10103 PF_ACPY(&pd
.bdaddr
, pd
.dst
, AF_INET6
);
10107 pd
.ttl
= h
->ip6_hlim
;
10108 pd
.tot_len
= ntohs(h
->ip6_plen
) + sizeof(struct ip6_hdr
);
10111 off
= ((caddr_t
)h
- (caddr_t
)pbuf
->pb_data
) + sizeof(struct ip6_hdr
);
10112 pd
.proto
= h
->ip6_nxt
;
10113 pd
.proto_variant
= 0;
10116 pd
.pf_mtag
= pf_get_mtag_pbuf(pbuf
);
10119 switch (pd
.proto
) {
10120 case IPPROTO_FRAGMENT
: {
10121 struct ip6_frag ip6f
;
10123 pd
.flags
|= PFDESC_IP_FRAG
;
10124 if (!pf_pull_hdr(pbuf
, off
, &ip6f
, sizeof ip6f
, NULL
,
10126 DPFPRINTF(PF_DEBUG_MISC
,
10127 ("pf: IPv6 short fragment header\n"));
10129 REASON_SET(&reason
, PFRES_SHORT
);
10133 pd
.proto
= ip6f
.ip6f_nxt
;
10135 /* Traffic goes through dummynet first */
10136 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
,
10138 if (action
== PF_DROP
|| pbuf
== NULL
) {
10142 #endif /* DUMMYNET */
10143 action
= pf_test_fragment(&r
, dir
, kif
, pbuf
, h
, &pd
,
10145 if (action
== PF_DROP
) {
10146 REASON_SET(&reason
, PFRES_FRAG
);
10151 case IPPROTO_ROUTING
:
10156 case IPPROTO_HOPOPTS
:
10157 case IPPROTO_DSTOPTS
: {
10158 /* get next header and header length */
10159 struct ip6_ext opt6
;
10161 if (!pf_pull_hdr(pbuf
, off
, &opt6
, sizeof(opt6
),
10162 NULL
, &reason
, pd
.af
)) {
10163 DPFPRINTF(PF_DEBUG_MISC
,
10164 ("pf: IPv6 short opt\n"));
10169 if (pd
.proto
== IPPROTO_AH
) {
10170 off
+= (opt6
.ip6e_len
+ 2) * 4;
10172 off
+= (opt6
.ip6e_len
+ 1) * 8;
10174 pd
.proto
= opt6
.ip6e_nxt
;
10175 /* goto the next header */
10182 } while (!terminal
);
10185 switch (pd
.proto
) {
10186 case IPPROTO_TCP
: {
10190 if (!pf_pull_hdr(pbuf
, off
, &th
, sizeof(th
),
10191 &action
, &reason
, AF_INET6
)) {
10192 log
= action
!= PF_PASS
;
10195 pd
.p_len
= pd
.tot_len
- off
- (th
.th_off
<< 2);
10197 /* Traffic goes through dummynet first */
10198 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
10199 if (action
== PF_DROP
|| pbuf
== NULL
) {
10203 #endif /* DUMMYNET */
10204 action
= pf_normalize_tcp(dir
, kif
, pbuf
, 0, off
, h
, &pd
);
10208 PF_APPLE_UPDATE_PDESC_IPv6();
10209 if (action
== PF_DROP
) {
10212 action
= pf_test_state_tcp(&s
, dir
, kif
, pbuf
, off
, h
, &pd
,
10214 if (action
== PF_NAT64
) {
10220 PF_APPLE_UPDATE_PDESC_IPv6();
10221 if (action
== PF_PASS
) {
10223 pfsync_update_state(s
);
10224 #endif /* NPFSYNC */
10228 } else if (s
== NULL
) {
10229 action
= pf_test_rule(&r
, &s
, dir
, kif
,
10230 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
10235 case IPPROTO_UDP
: {
10239 if (!pf_pull_hdr(pbuf
, off
, &uh
, sizeof(uh
),
10240 &action
, &reason
, AF_INET6
)) {
10241 log
= action
!= PF_PASS
;
10244 if (uh
.uh_dport
== 0 ||
10245 ntohs(uh
.uh_ulen
) > pbuf
->pb_packet_len
- off
||
10246 ntohs(uh
.uh_ulen
) < sizeof(struct udphdr
)) {
10248 REASON_SET(&reason
, PFRES_SHORT
);
10252 /* Traffic goes through dummynet first */
10253 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
10254 if (action
== PF_DROP
|| pbuf
== NULL
) {
10258 #endif /* DUMMYNET */
10259 action
= pf_test_state_udp(&s
, dir
, kif
, pbuf
, off
, h
, &pd
,
10261 if (action
== PF_NAT64
) {
10267 PF_APPLE_UPDATE_PDESC_IPv6();
10268 if (action
== PF_PASS
) {
10270 pfsync_update_state(s
);
10271 #endif /* NPFSYNC */
10275 } else if (s
== NULL
) {
10276 action
= pf_test_rule(&r
, &s
, dir
, kif
,
10277 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
10282 case IPPROTO_ICMPV6
: {
10283 struct icmp6_hdr ih
;
10285 pd
.hdr
.icmp6
= &ih
;
10286 if (!pf_pull_hdr(pbuf
, off
, &ih
, sizeof(ih
),
10287 &action
, &reason
, AF_INET6
)) {
10288 log
= action
!= PF_PASS
;
10292 /* Traffic goes through dummynet first */
10293 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
10294 if (action
== PF_DROP
|| pbuf
== NULL
) {
10298 #endif /* DUMMYNET */
10299 action
= pf_test_state_icmp(&s
, dir
, kif
,
10300 pbuf
, off
, h
, &pd
, &reason
);
10301 if (action
== PF_NAT64
) {
10307 PF_APPLE_UPDATE_PDESC_IPv6();
10308 if (action
== PF_PASS
) {
10310 pfsync_update_state(s
);
10311 #endif /* NPFSYNC */
10315 } else if (s
== NULL
) {
10316 action
= pf_test_rule(&r
, &s
, dir
, kif
,
10317 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
10322 case IPPROTO_ESP
: {
10323 struct pf_esp_hdr esp
;
10326 if (!pf_pull_hdr(pbuf
, off
, &esp
, sizeof(esp
), &action
,
10327 &reason
, AF_INET6
)) {
10328 log
= action
!= PF_PASS
;
10332 /* Traffic goes through dummynet first */
10333 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
10334 if (action
== PF_DROP
|| pbuf
== NULL
) {
10338 #endif /* DUMMYNET */
10339 action
= pf_test_state_esp(&s
, dir
, kif
, off
, &pd
);
10343 PF_APPLE_UPDATE_PDESC_IPv6();
10344 if (action
== PF_PASS
) {
10346 pfsync_update_state(s
);
10347 #endif /* NPFSYNC */
10351 } else if (s
== NULL
) {
10352 action
= pf_test_rule(&r
, &s
, dir
, kif
,
10353 pbuf
, off
, h
, &pd
, &a
, &ruleset
, NULL
);
10358 case IPPROTO_GRE
: {
10359 struct pf_grev1_hdr grev1
;
10361 pd
.hdr
.grev1
= &grev1
;
10362 if (!pf_pull_hdr(pbuf
, off
, &grev1
, sizeof(grev1
), &action
,
10363 &reason
, AF_INET6
)) {
10364 log
= (action
!= PF_PASS
);
10368 /* Traffic goes through dummynet first */
10369 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
10370 if (action
== PF_DROP
|| pbuf
== NULL
) {
10374 #endif /* DUMMYNET */
10375 if ((ntohs(grev1
.flags
) & PF_GRE_FLAG_VERSION_MASK
) == 1 &&
10376 ntohs(grev1
.protocol_type
) == PF_GRE_PPP_ETHERTYPE
) {
10377 if (ntohs(grev1
.payload_length
) >
10378 pbuf
->pb_packet_len
- off
) {
10380 REASON_SET(&reason
, PFRES_SHORT
);
10383 action
= pf_test_state_grev1(&s
, dir
, kif
, off
, &pd
);
10387 PF_APPLE_UPDATE_PDESC_IPv6();
10388 if (action
== PF_PASS
) {
10390 pfsync_update_state(s
);
10391 #endif /* NPFSYNC */
10396 } else if (s
== NULL
) {
10397 action
= pf_test_rule(&r
, &s
, dir
, kif
, pbuf
,
10398 off
, h
, &pd
, &a
, &ruleset
, NULL
);
10399 if (action
== PF_PASS
) {
10405 /* not GREv1/PPTP, so treat as ordinary GRE... */
10406 OS_FALLTHROUGH
; /* XXX is this correct? */
10411 /* Traffic goes through dummynet first */
10412 action
= pf_test_dummynet(&r
, dir
, kif
, &pbuf
, &pd
, fwa
);
10413 if (action
== PF_DROP
|| pbuf
== NULL
) {
10417 #endif /* DUMMYNET */
10418 action
= pf_test_state_other(&s
, dir
, kif
, &pd
);
10422 PF_APPLE_UPDATE_PDESC_IPv6();
10423 if (action
== PF_PASS
) {
10425 pfsync_update_state(s
);
10426 #endif /* NPFSYNC */
10430 } else if (s
== NULL
) {
10431 action
= pf_test_rule(&r
, &s
, dir
, kif
, pbuf
, off
, h
,
10432 &pd
, &a
, &ruleset
, NULL
);
10438 if (action
== PF_NAT64
) {
10444 PF_APPLE_UPDATE_PDESC_IPv6();
10446 /* handle dangerous IPv6 extension headers. */
10447 if (action
!= PF_DROP
) {
10448 if (action
== PF_PASS
&& rh_cnt
&&
10449 !((s
&& s
->allow_opts
) || r
->allow_opts
)) {
10451 REASON_SET(&reason
, PFRES_IPOPTIONS
);
10453 DPFPRINTF(PF_DEBUG_MISC
,
10454 ("pf: dropping packet with dangerous v6addr headers\n"));
10457 if ((s
&& s
->tag
) || PF_RTABLEID_IS_VALID(r
->rtableid
) ||
10458 (pd
.pktflags
& PKTF_FLOW_ID
)) {
10459 (void) pf_tag_packet(pbuf
, pd
.pf_mtag
, s
? s
->tag
: 0,
10463 if (action
== PF_PASS
) {
10465 /* add hints for ecn */
10466 pd
.pf_mtag
->pftag_hdr
= h
;
10467 /* record address family */
10468 pd
.pf_mtag
->pftag_flags
&= ~PF_TAG_HDR_INET
;
10469 pd
.pf_mtag
->pftag_flags
|= PF_TAG_HDR_INET6
;
10470 #endif /* PF_ECN */
10471 /* record protocol */
10472 *pbuf
->pb_proto
= pd
.proto
;
10473 if (dir
== PF_IN
&& (pd
.proto
== IPPROTO_TCP
||
10474 pd
.proto
== IPPROTO_UDP
) && s
!= NULL
&&
10475 s
->nat_rule
.ptr
!= NULL
&&
10476 (s
->nat_rule
.ptr
->action
== PF_RDR
||
10477 s
->nat_rule
.ptr
->action
== PF_BINAT
) &&
10478 IN6_IS_ADDR_LOOPBACK(&pd
.dst
->v6addr
)) {
10479 pd
.pf_mtag
->pftag_flags
|= PF_TAG_TRANSLATE_LOCALHOST
;
10486 struct pf_rule
*lr
;
10488 if (s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
10489 s
->nat_rule
.ptr
->log
& PF_LOG_ALL
) {
10490 lr
= s
->nat_rule
.ptr
;
10494 PFLOG_PACKET(kif
, h
, pbuf
, AF_INET6
, dir
, reason
, lr
, a
, ruleset
,
10498 kif
->pfik_bytes
[1][dir
== PF_OUT
][action
!= PF_PASS
] += pd
.tot_len
;
10499 kif
->pfik_packets
[1][dir
== PF_OUT
][action
!= PF_PASS
]++;
10501 if (action
== PF_PASS
|| r
->action
== PF_DROP
) {
10502 dirndx
= (dir
== PF_OUT
);
10503 r
->packets
[dirndx
]++;
10504 r
->bytes
[dirndx
] += pd
.tot_len
;
10506 a
->packets
[dirndx
]++;
10507 a
->bytes
[dirndx
] += pd
.tot_len
;
10511 if (s
->nat_rule
.ptr
!= NULL
) {
10512 s
->nat_rule
.ptr
->packets
[dirndx
]++;
10513 s
->nat_rule
.ptr
->bytes
[dirndx
] += pd
.tot_len
;
10515 if (s
->src_node
!= NULL
) {
10516 s
->src_node
->packets
[dirndx
]++;
10517 s
->src_node
->bytes
[dirndx
] += pd
.tot_len
;
10519 if (s
->nat_src_node
!= NULL
) {
10520 s
->nat_src_node
->packets
[dirndx
]++;
10521 s
->nat_src_node
->bytes
[dirndx
] += pd
.tot_len
;
10523 dirndx
= (dir
== sk
->direction
) ? 0 : 1;
10524 s
->packets
[dirndx
]++;
10525 s
->bytes
[dirndx
] += pd
.tot_len
;
10528 nr
= (s
!= NULL
) ? s
->nat_rule
.ptr
: pd
.nat_rule
;
10532 * XXX: we need to make sure that the addresses
10533 * passed to pfr_update_stats() are the same than
10534 * the addresses used during matching (pfr_match)
10536 if (r
== &pf_default_rule
) {
10538 x
= (s
== NULL
|| sk
->direction
== dir
) ?
10539 &pd
.baddr
: &pd
.naddr
;
10541 x
= (s
== NULL
|| sk
->direction
== dir
) ?
10542 &pd
.naddr
: &pd
.baddr
;
10544 if (x
== &pd
.baddr
|| s
== NULL
) {
10545 if (dir
== PF_OUT
) {
10552 if (tr
->src
.addr
.type
== PF_ADDR_TABLE
) {
10553 pfr_update_stats(tr
->src
.addr
.p
.tbl
, (sk
== NULL
||
10554 sk
->direction
== dir
) ? pd
.src
: pd
.dst
, pd
.af
,
10555 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
10558 if (tr
->dst
.addr
.type
== PF_ADDR_TABLE
) {
10559 pfr_update_stats(tr
->dst
.addr
.p
.tbl
, (sk
== NULL
||
10560 sk
->direction
== dir
) ? pd
.dst
: pd
.src
, pd
.af
,
10561 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
10566 VERIFY(pbuf
== NULL
|| pd
.mp
== NULL
|| pd
.mp
== pbuf
);
10570 REASON_SET(&reason
, PFRES_MEMORY
);
10574 if (action
== PF_DROP
) {
10575 pbuf_destroy(*pbufp
);
10583 if (action
== PF_SYNPROXY_DROP
) {
10584 pbuf_destroy(*pbufp
);
10587 } else if (r
->rt
) {
10588 /* pf_route6 can free the mbuf causing *pbufp to become NULL */
10589 pf_route6(pbufp
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
10592 /* if reassembled packet passed, create new fragments */
10593 struct pf_fragment_tag
*ftag
= NULL
;
10594 if ((action
== PF_PASS
) && (*pbufp
!= NULL
) && (fwd
) &&
10595 ((ftag
= pf_find_fragment_tag_pbuf(*pbufp
)) != NULL
)) {
10596 action
= pf_refragment6(ifp
, pbufp
, ftag
);
10602 pf_check_congestion(struct ifqueue
*ifq
)
10604 #pragma unused(ifq)
10609 pool_init(struct pool
*pp
, size_t size
, unsigned int align
, unsigned int ioff
,
10610 int flags
, const char *wchan
, void *palloc
)
10612 #pragma unused(align, ioff, flags, palloc)
10613 bzero(pp
, sizeof(*pp
));
10614 pp
->pool_zone
= zone_create(wchan
, size
, ZC_DESTRUCTIBLE
);
10615 pp
->pool_hiwat
= pp
->pool_limit
= (unsigned int)-1;
10616 pp
->pool_name
= wchan
;
10619 /* Zones cannot be currently destroyed */
10621 pool_destroy(struct pool
*pp
)
10627 pool_sethiwat(struct pool
*pp
, int n
)
10629 pp
->pool_hiwat
= n
; /* Currently unused */
10633 pool_sethardlimit(struct pool
*pp
, int n
, const char *warnmess
, int ratecap
)
10635 #pragma unused(warnmess, ratecap)
10636 pp
->pool_limit
= n
;
10640 pool_get(struct pool
*pp
, int flags
)
10644 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
10646 if (pp
->pool_count
> pp
->pool_limit
) {
10647 DPFPRINTF(PF_DEBUG_NOISY
,
10648 ("pf: pool %s hard limit reached (%d)\n",
10649 pp
->pool_name
!= NULL
? pp
->pool_name
: "unknown",
10655 buf
= zalloc_flags(pp
->pool_zone
,
10656 (flags
& PR_WAITOK
) ? Z_WAITOK
: Z_NOWAIT
);
10659 VERIFY(pp
->pool_count
!= 0);
10665 pool_put(struct pool
*pp
, void *v
)
10667 LCK_MTX_ASSERT(pf_lock
, LCK_MTX_ASSERT_OWNED
);
10669 zfree(pp
->pool_zone
, v
);
10670 VERIFY(pp
->pool_count
!= 0);
10675 pf_find_mtag_pbuf(pbuf_t
*pbuf
)
10677 return pbuf
->pb_pftag
;
10681 pf_find_mtag(struct mbuf
*m
)
10687 pf_get_mtag(struct mbuf
*m
)
10689 return pf_find_mtag(m
);
10693 pf_get_mtag_pbuf(pbuf_t
*pbuf
)
10695 return pf_find_mtag_pbuf(pbuf
);
10698 struct pf_fragment_tag
*
10699 pf_copy_fragment_tag(struct mbuf
*m
, struct pf_fragment_tag
*ftag
, int how
)
10702 struct pf_mtag
*pftag
= pf_find_mtag(m
);
10704 tag
= m_tag_create(KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_PF_REASS
,
10705 sizeof(*ftag
), how
, m
);
10709 m_tag_prepend(m
, tag
);
10712 bcopy(ftag
, tag
, sizeof(*ftag
));
10713 pftag
->pftag_flags
|= PF_TAG_REASSEMBLED
;
10714 return (struct pf_fragment_tag
*)tag
;
10717 struct pf_fragment_tag
*
10718 pf_find_fragment_tag(struct mbuf
*m
)
10721 struct pf_fragment_tag
*ftag
;
10722 struct pf_mtag
*pftag
= pf_find_mtag(m
);
10724 tag
= m_tag_locate(m
, KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_PF_REASS
,
10726 VERIFY((tag
== NULL
) || (pftag
->pftag_flags
& PF_TAG_REASSEMBLED
));
10730 ftag
= (struct pf_fragment_tag
*)tag
;
10734 struct pf_fragment_tag
*
10735 pf_find_fragment_tag_pbuf(pbuf_t
*pbuf
)
10737 struct pf_mtag
*mtag
= pf_find_mtag_pbuf(pbuf
);
10739 return (mtag
->pftag_flags
& PF_TAG_REASSEMBLED
) ?
10740 pbuf
->pb_pf_fragtag
: NULL
;
10744 pf_time_second(void)
10753 pf_calendar_time_second(void)
10762 hook_establish(struct hook_desc_head
*head
, int tail
, hook_fn_t fn
, void *arg
)
10764 struct hook_desc
*hd
;
10766 hd
= _MALLOC(sizeof(*hd
), M_DEVBUF
, M_WAITOK
);
10774 TAILQ_INSERT_TAIL(head
, hd
, hd_list
);
10776 TAILQ_INSERT_HEAD(head
, hd
, hd_list
);
10783 hook_runloop(struct hook_desc_head
*head
, int flags
)
10785 struct hook_desc
*hd
;
10787 if (!(flags
& HOOK_REMOVE
)) {
10788 if (!(flags
& HOOK_ABORT
)) {
10789 TAILQ_FOREACH(hd
, head
, hd_list
)
10790 hd
->hd_fn(hd
->hd_arg
);
10793 while (!!(hd
= TAILQ_FIRST(head
))) {
10794 TAILQ_REMOVE(head
, hd
, hd_list
);
10795 if (!(flags
& HOOK_ABORT
)) {
10796 hd
->hd_fn(hd
->hd_arg
);
10798 if (flags
& HOOK_FREE
) {
10799 _FREE(hd
, M_DEVBUF
);