2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $apfw: git commit 6602420f2f101b74305cd78f7cd9e0c8fdedae97 $ */
30 /* $OpenBSD: pf.c,v 1.567 2008/02/20 23:40:13 henning Exp $ */
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
71 #include <sys/filio.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 #include <sys/kernel.h>
77 #include <sys/random.h>
78 #include <sys/mcache.h>
80 #include <libkern/crypto/md5.h>
81 #include <libkern/libkern.h>
83 #include <mach/thread_act.h>
86 #include <net/if_types.h>
88 #include <net/route.h>
90 #include <netinet/in.h>
91 #include <netinet/in_var.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/ip.h>
94 #include <netinet/ip_var.h>
95 #include <netinet/tcp.h>
96 #include <netinet/tcp_seq.h>
97 #include <netinet/udp.h>
98 #include <netinet/ip_icmp.h>
99 #include <netinet/in_pcb.h>
100 #include <netinet/tcp_timer.h>
101 #include <netinet/tcp_var.h>
102 #include <netinet/tcp_fsm.h>
103 #include <netinet/udp_var.h>
104 #include <netinet/icmp_var.h>
105 #include <net/if_ether.h>
106 #include <net/ethernet.h>
108 #include <net/pfvar.h>
109 #include <net/if_pflog.h>
112 #include <net/if_pfsync.h>
116 #include <netinet/ip6.h>
117 #include <netinet6/in6_pcb.h>
118 #include <netinet6/ip6_var.h>
119 #include <netinet/icmp6.h>
120 #include <netinet6/nd6.h>
123 #ifndef NO_APPLE_EXTENSIONS
124 #define DPFPRINTF(n, x) (pf_status.debug >= (n) ? printf x : ((void)0))
126 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
129 /* XXX: should be in header somewhere */
130 #define satosin(sa) ((struct sockaddr_in *)(sa))
131 #define sintosa(sin) ((struct sockaddr *)(sin))
134 * On Mac OS X, the rtableid value is treated as the interface scope
135 * value that is equivalent to the interface index used for scoped
136 * routing. A valid scope value is anything but IFSCOPE_NONE (0),
137 * as per definition of ifindex which is a positive, non-zero number.
138 * The other BSDs treat a negative rtableid value as invalid, hence
139 * the test against INT_MAX to handle userland apps which initialize
140 * the field with a negative number.
142 #define PF_RTABLEID_IS_VALID(r) \
143 ((r) > IFSCOPE_NONE && (r) <= INT_MAX)
149 lck_rw_t
*pf_perim_lock
;
152 struct pf_state_tree_lan_ext pf_statetbl_lan_ext
;
153 struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy
;
155 struct pf_palist pf_pabuf
;
156 struct pf_status pf_status
;
159 struct pf_altqqueue pf_altqs
[2];
160 struct pf_altqqueue
*pf_altqs_active
;
161 struct pf_altqqueue
*pf_altqs_inactive
;
162 u_int32_t ticket_altqs_active
;
163 u_int32_t ticket_altqs_inactive
;
164 int altqs_inactive_open
;
166 u_int32_t ticket_pabuf
;
168 static MD5_CTX pf_tcp_secret_ctx
;
169 static u_char pf_tcp_secret
[16];
170 static int pf_tcp_secret_init
;
171 static int pf_tcp_iss_off
;
173 static struct pf_anchor_stackframe
{
174 struct pf_ruleset
*rs
;
176 struct pf_anchor_node
*parent
;
177 struct pf_anchor
*child
;
178 } pf_anchor_stack
[64];
180 struct pool pf_src_tree_pl
, pf_rule_pl
, pf_pooladdr_pl
;
181 struct pool pf_state_pl
, pf_state_key_pl
;
183 struct pool pf_altq_pl
;
186 #ifndef NO_APPLE_EXTENSIONS
187 typedef void (*hook_fn_t
)(void *);
190 TAILQ_ENTRY(hook_desc
) hd_list
;
195 #define HOOK_REMOVE 0x01
196 #define HOOK_FREE 0x02
197 #define HOOK_ABORT 0x04
199 static void *hook_establish(struct hook_desc_head
*, int,
201 static void hook_runloop(struct hook_desc_head
*, int flags
);
203 struct pool pf_app_state_pl
;
204 static void pf_print_addr(struct pf_addr
*addr
, sa_family_t af
);
205 static void pf_print_sk_host(struct pf_state_host
*, u_int8_t
, int,
209 static void pf_print_host(struct pf_addr
*, u_int16_t
, u_int8_t
);
211 static void pf_init_threshold(struct pf_threshold
*, u_int32_t
,
213 static void pf_add_threshold(struct pf_threshold
*);
214 static int pf_check_threshold(struct pf_threshold
*);
216 static void pf_change_ap(int, struct mbuf
*, struct pf_addr
*,
217 u_int16_t
*, u_int16_t
*, u_int16_t
*,
218 struct pf_addr
*, u_int16_t
, u_int8_t
, sa_family_t
);
219 static int pf_modulate_sack(struct mbuf
*, int, struct pf_pdesc
*,
220 struct tcphdr
*, struct pf_state_peer
*);
222 static void pf_change_a6(struct pf_addr
*, u_int16_t
*,
223 struct pf_addr
*, u_int8_t
);
225 static void pf_change_icmp(struct pf_addr
*, u_int16_t
*,
226 struct pf_addr
*, struct pf_addr
*, u_int16_t
,
227 u_int16_t
*, u_int16_t
*, u_int16_t
*,
228 u_int16_t
*, u_int8_t
, sa_family_t
);
229 static void pf_send_tcp(const struct pf_rule
*, sa_family_t
,
230 const struct pf_addr
*, const struct pf_addr
*,
231 u_int16_t
, u_int16_t
, u_int32_t
, u_int32_t
,
232 u_int8_t
, u_int16_t
, u_int16_t
, u_int8_t
, int,
233 u_int16_t
, struct ether_header
*, struct ifnet
*);
234 static void pf_send_icmp(struct mbuf
*, u_int8_t
, u_int8_t
,
235 sa_family_t
, struct pf_rule
*);
236 #ifndef NO_APPLE_EXTENSIONS
237 static struct pf_rule
*pf_match_translation(struct pf_pdesc
*, struct mbuf
*,
238 int, int, struct pfi_kif
*, struct pf_addr
*,
239 union pf_state_xport
*, struct pf_addr
*,
240 union pf_state_xport
*, int);
241 static struct pf_rule
*pf_get_translation_aux(struct pf_pdesc
*,
242 struct mbuf
*, int, int, struct pfi_kif
*,
243 struct pf_src_node
**, struct pf_addr
*,
244 union pf_state_xport
*, struct pf_addr
*,
245 union pf_state_xport
*, struct pf_addr
*,
246 union pf_state_xport
*);
248 struct pf_rule
*pf_match_translation(struct pf_pdesc
*, struct mbuf
*,
249 int, int, struct pfi_kif
*,
250 struct pf_addr
*, u_int16_t
, struct pf_addr
*,
252 struct pf_rule
*pf_get_translation(struct pf_pdesc
*, struct mbuf
*,
253 int, int, struct pfi_kif
*, struct pf_src_node
**,
254 struct pf_addr
*, u_int16_t
,
255 struct pf_addr
*, u_int16_t
,
256 struct pf_addr
*, u_int16_t
*);
258 static void pf_attach_state(struct pf_state_key
*,
259 struct pf_state
*, int);
260 static void pf_detach_state(struct pf_state
*, int);
261 static u_int32_t
pf_tcp_iss(struct pf_pdesc
*);
262 static int pf_test_rule(struct pf_rule
**, struct pf_state
**,
263 int, struct pfi_kif
*, struct mbuf
*, int,
264 void *, struct pf_pdesc
*, struct pf_rule
**,
265 struct pf_ruleset
**, struct ifqueue
*);
266 static int pf_test_fragment(struct pf_rule
**, int,
267 struct pfi_kif
*, struct mbuf
*, void *,
268 struct pf_pdesc
*, struct pf_rule
**,
269 struct pf_ruleset
**);
270 static int pf_test_state_tcp(struct pf_state
**, int,
271 struct pfi_kif
*, struct mbuf
*, int,
272 void *, struct pf_pdesc
*, u_short
*);
273 #ifndef NO_APPLE_EXTENSIONS
274 static int pf_test_state_udp(struct pf_state
**, int,
275 struct pfi_kif
*, struct mbuf
*, int,
276 void *, struct pf_pdesc
*, u_short
*);
278 static int pf_test_state_udp(struct pf_state
**, int,
279 struct pfi_kif
*, struct mbuf
*, int,
280 void *, struct pf_pdesc
*);
282 static int pf_test_state_icmp(struct pf_state
**, int,
283 struct pfi_kif
*, struct mbuf
*, int,
284 void *, struct pf_pdesc
*, u_short
*);
285 static int pf_test_state_other(struct pf_state
**, int,
286 struct pfi_kif
*, struct pf_pdesc
*);
287 static int pf_match_tag(struct mbuf
*, struct pf_rule
*,
288 struct pf_mtag
*, int *);
289 static void pf_hash(struct pf_addr
*, struct pf_addr
*,
290 struct pf_poolhashkey
*, sa_family_t
);
291 static int pf_map_addr(u_int8_t
, struct pf_rule
*,
292 struct pf_addr
*, struct pf_addr
*,
293 struct pf_addr
*, struct pf_src_node
**);
294 #ifndef NO_APPLE_EXTENSIONS
295 static int pf_get_sport(struct pf_pdesc
*, struct pfi_kif
*,
296 struct pf_rule
*, struct pf_addr
*,
297 union pf_state_xport
*, struct pf_addr
*,
298 union pf_state_xport
*, struct pf_addr
*,
299 union pf_state_xport
*, struct pf_src_node
**);
301 int pf_get_sport(sa_family_t
, u_int8_t
, struct pf_rule
*,
302 struct pf_addr
*, struct pf_addr
*, u_int16_t
,
303 struct pf_addr
*, u_int16_t
*, u_int16_t
, u_int16_t
,
304 struct pf_src_node
**);
306 static void pf_route(struct mbuf
**, struct pf_rule
*, int,
307 struct ifnet
*, struct pf_state
*,
310 static void pf_route6(struct mbuf
**, struct pf_rule
*, int,
311 struct ifnet
*, struct pf_state
*,
314 static u_int8_t
pf_get_wscale(struct mbuf
*, int, u_int16_t
,
316 static u_int16_t
pf_get_mss(struct mbuf
*, int, u_int16_t
,
318 static u_int16_t
pf_calc_mss(struct pf_addr
*, sa_family_t
,
320 static void pf_set_rt_ifp(struct pf_state
*,
322 static int pf_check_proto_cksum(struct mbuf
*, int, int,
323 u_int8_t
, sa_family_t
);
324 static int pf_addr_wrap_neq(struct pf_addr_wrap
*,
325 struct pf_addr_wrap
*);
326 static struct pf_state
*pf_find_state(struct pfi_kif
*,
327 struct pf_state_key_cmp
*, u_int
);
328 static int pf_src_connlimit(struct pf_state
**);
329 static void pf_stateins_err(const char *, struct pf_state
*,
331 static int pf_check_congestion(struct ifqueue
*);
333 #ifndef NO_APPLE_EXTENSIONS
335 static const char *pf_pptp_ctrl_type_name(u_int16_t code
);
337 static void pf_pptp_handler(struct pf_state
*, int, int,
338 struct pf_pdesc
*, struct pfi_kif
*);
339 static void pf_pptp_unlink(struct pf_state
*);
340 static void pf_grev1_unlink(struct pf_state
*);
341 static int pf_test_state_grev1(struct pf_state
**, int,
342 struct pfi_kif
*, int, struct pf_pdesc
*);
343 static int pf_ike_compare(struct pf_app_state
*,
344 struct pf_app_state
*);
345 static int pf_test_state_esp(struct pf_state
**, int,
346 struct pfi_kif
*, int, struct pf_pdesc
*);
349 extern struct pool pfr_ktable_pl
;
350 extern struct pool pfr_kentry_pl
;
351 extern int path_mtu_discovery
;
353 struct pf_pool_limit pf_pool_limits
[PF_LIMIT_MAX
] = {
354 { &pf_state_pl
, PFSTATE_HIWAT
},
355 { &pf_app_state_pl
, PFAPPSTATE_HIWAT
},
356 { &pf_src_tree_pl
, PFSNODE_HIWAT
},
357 { &pf_frent_pl
, PFFRAG_FRENT_HIWAT
},
358 { &pfr_ktable_pl
, PFR_KTABLE_HIWAT
},
359 { &pfr_kentry_pl
, PFR_KENTRY_HIWAT
}
362 #ifndef NO_APPLE_EXTENSIONS
364 pf_lazy_makewritable(struct pf_pdesc
*pd
, struct mbuf
*m
, int len
)
372 if (m_makewritable(&m
, 0, len
, M_DONTWAIT
))
375 if (len
>= 0 && m
!= pd
->mp
) {
377 pd
->pf_mtag
= pf_find_mtag(m
);
381 struct ip
*h
= mtod(m
, struct ip
*);
382 pd
->src
= (struct pf_addr
*)&h
->ip_src
;
383 pd
->dst
= (struct pf_addr
*)&h
->ip_dst
;
384 pd
->ip_sum
= &h
->ip_sum
;
389 struct ip6_hdr
*h
= mtod(m
, struct ip6_hdr
*);
390 pd
->src
= (struct pf_addr
*)&h
->ip6_src
;
391 pd
->dst
= (struct pf_addr
*)&h
->ip6_dst
;
399 return (len
< 0 ? 0 : m
);
403 pf_state_lookup_aux(struct pf_state
**state
, struct pfi_kif
*kif
,
404 int direction
, int *action
)
406 if (*state
== NULL
|| (*state
)->timeout
== PFTM_PURGE
) {
411 if (direction
== PF_OUT
&&
412 (((*state
)->rule
.ptr
->rt
== PF_ROUTETO
&&
413 (*state
)->rule
.ptr
->direction
== PF_OUT
) ||
414 ((*state
)->rule
.ptr
->rt
== PF_REPLYTO
&&
415 (*state
)->rule
.ptr
->direction
== PF_IN
)) &&
416 (*state
)->rt_kif
!= NULL
&& (*state
)->rt_kif
!= kif
) {
424 #define STATE_LOOKUP() \
427 *state = pf_find_state(kif, &key, direction); \
428 if (pf_state_lookup_aux(state, kif, direction, &action)) \
432 #define STATE_ADDR_TRANSLATE(sk) \
433 (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \
434 ((sk)->af == AF_INET6 && \
435 ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \
436 (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \
437 (sk)->lan.addr.addr32[3] != (sk)->gwy.addr.addr32[3]))
439 #define STATE_TRANSLATE(sk) \
440 (STATE_ADDR_TRANSLATE(sk) || \
441 (sk)->lan.xport.port != (sk)->gwy.xport.port)
443 #define STATE_GRE_TRANSLATE(sk) \
444 (STATE_ADDR_TRANSLATE(sk) || \
445 (sk)->lan.xport.call_id != (sk)->gwy.xport.call_id)
448 #define STATE_LOOKUP() \
450 *state = pf_find_state(kif, &key, direction); \
451 if (*state == NULL || (*state)->timeout == PFTM_PURGE) \
453 if (direction == PF_OUT && \
454 (((*state)->rule.ptr->rt == PF_ROUTETO && \
455 (*state)->rule.ptr->direction == PF_OUT) || \
456 ((*state)->rule.ptr->rt == PF_REPLYTO && \
457 (*state)->rule.ptr->direction == PF_IN)) && \
458 (*state)->rt_kif != NULL && \
459 (*state)->rt_kif != kif) \
463 #define STATE_TRANSLATE(sk) \
464 (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \
465 ((sk)->af == AF_INET6 && \
466 ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \
467 (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \
468 (sk)->lan.addr.addr32[3] != (sk)->gwy.addr.addr32[3])) || \
469 (sk)->lan.port != (sk)->gwy.port
472 #define BOUND_IFACE(r, k) \
473 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
475 #define STATE_INC_COUNTERS(s) \
477 s->rule.ptr->states++; \
478 VERIFY(s->rule.ptr->states != 0); \
479 if (s->anchor.ptr != NULL) { \
480 s->anchor.ptr->states++; \
481 VERIFY(s->anchor.ptr->states != 0); \
483 if (s->nat_rule.ptr != NULL) { \
484 s->nat_rule.ptr->states++; \
485 VERIFY(s->nat_rule.ptr->states != 0); \
489 #define STATE_DEC_COUNTERS(s) \
491 if (s->nat_rule.ptr != NULL) { \
492 VERIFY(s->nat_rule.ptr->states > 0); \
493 s->nat_rule.ptr->states--; \
495 if (s->anchor.ptr != NULL) { \
496 VERIFY(s->anchor.ptr->states > 0); \
497 s->anchor.ptr->states--; \
499 VERIFY(s->rule.ptr->states > 0); \
500 s->rule.ptr->states--; \
503 static __inline
int pf_src_compare(struct pf_src_node
*, struct pf_src_node
*);
504 static __inline
int pf_state_compare_lan_ext(struct pf_state_key
*,
505 struct pf_state_key
*);
506 static __inline
int pf_state_compare_ext_gwy(struct pf_state_key
*,
507 struct pf_state_key
*);
508 static __inline
int pf_state_compare_id(struct pf_state
*,
511 struct pf_src_tree tree_src_tracking
;
513 struct pf_state_tree_id tree_id
;
514 struct pf_state_queue state_list
;
516 RB_GENERATE(pf_src_tree
, pf_src_node
, entry
, pf_src_compare
);
517 RB_GENERATE(pf_state_tree_lan_ext
, pf_state_key
,
518 entry_lan_ext
, pf_state_compare_lan_ext
);
519 RB_GENERATE(pf_state_tree_ext_gwy
, pf_state_key
,
520 entry_ext_gwy
, pf_state_compare_ext_gwy
);
521 RB_GENERATE(pf_state_tree_id
, pf_state
,
522 entry_id
, pf_state_compare_id
);
524 #define PF_DT_SKIP_LANEXT 0x01
525 #define PF_DT_SKIP_EXTGWY 0x02
527 #ifndef NO_APPLE_EXTENSIONS
528 static const u_int16_t PF_PPTP_PORT
= 1723;
529 static const u_int32_t PF_PPTP_MAGIC_NUMBER
= 0x1A2B3C4D;
537 struct pf_pptp_ctrl_hdr
{
539 u_int16_t reserved_0
;
542 struct pf_pptp_ctrl_generic
{
546 #define PF_PPTP_CTRL_TYPE_START_REQ 1
547 struct pf_pptp_ctrl_start_req
{
548 u_int16_t protocol_version
;
549 u_int16_t reserved_1
;
550 u_int32_t framing_capabilities
;
551 u_int32_t bearer_capabilities
;
552 u_int16_t maximum_channels
;
553 u_int16_t firmware_revision
;
554 u_int8_t host_name
[64];
555 u_int8_t vendor_string
[64];
558 #define PF_PPTP_CTRL_TYPE_START_RPY 2
559 struct pf_pptp_ctrl_start_rpy
{
560 u_int16_t protocol_version
;
561 u_int8_t result_code
;
563 u_int32_t framing_capabilities
;
564 u_int32_t bearer_capabilities
;
565 u_int16_t maximum_channels
;
566 u_int16_t firmware_revision
;
567 u_int8_t host_name
[64];
568 u_int8_t vendor_string
[64];
571 #define PF_PPTP_CTRL_TYPE_STOP_REQ 3
572 struct pf_pptp_ctrl_stop_req
{
575 u_int16_t reserved_2
;
578 #define PF_PPTP_CTRL_TYPE_STOP_RPY 4
579 struct pf_pptp_ctrl_stop_rpy
{
582 u_int16_t reserved_1
;
585 #define PF_PPTP_CTRL_TYPE_ECHO_REQ 5
586 struct pf_pptp_ctrl_echo_req
{
587 u_int32_t identifier
;
590 #define PF_PPTP_CTRL_TYPE_ECHO_RPY 6
591 struct pf_pptp_ctrl_echo_rpy
{
592 u_int32_t identifier
;
593 u_int8_t result_code
;
595 u_int16_t reserved_1
;
598 #define PF_PPTP_CTRL_TYPE_CALL_OUT_REQ 7
599 struct pf_pptp_ctrl_call_out_req
{
601 u_int16_t call_sernum
;
603 u_int32_t bearer_type
;
604 u_int32_t framing_type
;
605 u_int16_t rxwindow_size
;
606 u_int16_t proc_delay
;
607 u_int8_t phone_num
[64];
608 u_int8_t sub_addr
[64];
611 #define PF_PPTP_CTRL_TYPE_CALL_OUT_RPY 8
612 struct pf_pptp_ctrl_call_out_rpy
{
614 u_int16_t peer_call_id
;
615 u_int8_t result_code
;
617 u_int16_t cause_code
;
618 u_int32_t connect_speed
;
619 u_int16_t rxwindow_size
;
620 u_int16_t proc_delay
;
621 u_int32_t phy_channel_id
;
624 #define PF_PPTP_CTRL_TYPE_CALL_IN_1ST 9
625 struct pf_pptp_ctrl_call_in_1st
{
627 u_int16_t call_sernum
;
628 u_int32_t bearer_type
;
629 u_int32_t phy_channel_id
;
630 u_int16_t dialed_number_len
;
631 u_int16_t dialing_number_len
;
632 u_int8_t dialed_num
[64];
633 u_int8_t dialing_num
[64];
634 u_int8_t sub_addr
[64];
637 #define PF_PPTP_CTRL_TYPE_CALL_IN_2ND 10
638 struct pf_pptp_ctrl_call_in_2nd
{
640 u_int16_t peer_call_id
;
641 u_int8_t result_code
;
643 u_int16_t rxwindow_size
;
645 u_int16_t reserved_1
;
648 #define PF_PPTP_CTRL_TYPE_CALL_IN_3RD 11
649 struct pf_pptp_ctrl_call_in_3rd
{
651 u_int16_t reserved_1
;
652 u_int32_t connect_speed
;
653 u_int16_t rxwindow_size
;
655 u_int32_t framing_type
;
658 #define PF_PPTP_CTRL_TYPE_CALL_CLR 12
659 struct pf_pptp_ctrl_call_clr
{
661 u_int16_t reserved_1
;
664 #define PF_PPTP_CTRL_TYPE_CALL_DISC 13
665 struct pf_pptp_ctrl_call_disc
{
667 u_int8_t result_code
;
669 u_int16_t cause_code
;
670 u_int16_t reserved_1
;
671 u_int8_t statistics
[128];
674 #define PF_PPTP_CTRL_TYPE_ERROR 14
675 struct pf_pptp_ctrl_error
{
676 u_int16_t peer_call_id
;
677 u_int16_t reserved_1
;
678 u_int32_t crc_errors
;
681 u_int32_t buf_errors
;
682 u_int32_t tim_errors
;
683 u_int32_t align_errors
;
686 #define PF_PPTP_CTRL_TYPE_SET_LINKINFO 15
687 struct pf_pptp_ctrl_set_linkinfo
{
688 u_int16_t peer_call_id
;
689 u_int16_t reserved_1
;
695 static const char *pf_pptp_ctrl_type_name(u_int16_t code
)
699 if (code
< PF_PPTP_CTRL_TYPE_START_REQ
||
700 code
> PF_PPTP_CTRL_TYPE_SET_LINKINFO
) {
701 static char reserved
[] = "reserved-00";
703 sprintf(&reserved
[9], "%02x", code
);
706 static const char *name
[] = {
707 "start_req", "start_rpy", "stop_req", "stop_rpy",
708 "echo_req", "echo_rpy", "call_out_req", "call_out_rpy",
709 "call_in_1st", "call_in_2nd", "call_in_3rd",
710 "call_clr", "call_disc", "error", "set_linkinfo"
713 return (name
[code
- 1]);
718 static const size_t PF_PPTP_CTRL_MSG_MINSIZE
=
719 sizeof (struct pf_pptp_hdr
) +
720 sizeof (struct pf_pptp_ctrl_hdr
) +
721 MIN(sizeof (struct pf_pptp_ctrl_start_req
),
722 MIN(sizeof (struct pf_pptp_ctrl_start_rpy
),
723 MIN(sizeof (struct pf_pptp_ctrl_stop_req
),
724 MIN(sizeof (struct pf_pptp_ctrl_stop_rpy
),
725 MIN(sizeof (struct pf_pptp_ctrl_echo_req
),
726 MIN(sizeof (struct pf_pptp_ctrl_echo_rpy
),
727 MIN(sizeof (struct pf_pptp_ctrl_call_out_req
),
728 MIN(sizeof (struct pf_pptp_ctrl_call_out_rpy
),
729 MIN(sizeof (struct pf_pptp_ctrl_call_in_1st
),
730 MIN(sizeof (struct pf_pptp_ctrl_call_in_2nd
),
731 MIN(sizeof (struct pf_pptp_ctrl_call_in_3rd
),
732 MIN(sizeof (struct pf_pptp_ctrl_call_clr
),
733 MIN(sizeof (struct pf_pptp_ctrl_call_disc
),
734 MIN(sizeof (struct pf_pptp_ctrl_error
),
735 sizeof (struct pf_pptp_ctrl_set_linkinfo
)
738 union pf_pptp_ctrl_msg_union
{
739 struct pf_pptp_ctrl_start_req start_req
;
740 struct pf_pptp_ctrl_start_rpy start_rpy
;
741 struct pf_pptp_ctrl_stop_req stop_req
;
742 struct pf_pptp_ctrl_stop_rpy stop_rpy
;
743 struct pf_pptp_ctrl_echo_req echo_req
;
744 struct pf_pptp_ctrl_echo_rpy echo_rpy
;
745 struct pf_pptp_ctrl_call_out_req call_out_req
;
746 struct pf_pptp_ctrl_call_out_rpy call_out_rpy
;
747 struct pf_pptp_ctrl_call_in_1st call_in_1st
;
748 struct pf_pptp_ctrl_call_in_2nd call_in_2nd
;
749 struct pf_pptp_ctrl_call_in_3rd call_in_3rd
;
750 struct pf_pptp_ctrl_call_clr call_clr
;
751 struct pf_pptp_ctrl_call_disc call_disc
;
752 struct pf_pptp_ctrl_error error
;
753 struct pf_pptp_ctrl_set_linkinfo set_linkinfo
;
757 struct pf_pptp_ctrl_msg
{
758 struct pf_pptp_hdr hdr
;
759 struct pf_pptp_ctrl_hdr ctrl
;
760 union pf_pptp_ctrl_msg_union msg
;
763 #define PF_GRE_FLAG_CHECKSUM_PRESENT 0x8000
764 #define PF_GRE_FLAG_VERSION_MASK 0x0007
765 #define PF_GRE_PPP_ETHERTYPE 0x880B
767 struct pf_grev1_hdr
{
769 u_int16_t protocol_type
;
770 u_int16_t payload_length
;
778 static const u_int16_t PF_IKE_PORT
= 500;
781 u_int64_t initiator_cookie
, responder_cookie
;
782 u_int8_t next_payload
, version
, exchange_type
, flags
;
783 u_int32_t message_id
, length
;
786 #define PF_IKE_PACKET_MINSIZE (sizeof (struct pf_ike_hdr))
788 #define PF_IKEv1_EXCHTYPE_BASE 1
789 #define PF_IKEv1_EXCHTYPE_ID_PROTECT 2
790 #define PF_IKEv1_EXCHTYPE_AUTH_ONLY 3
791 #define PF_IKEv1_EXCHTYPE_AGGRESSIVE 4
792 #define PF_IKEv1_EXCHTYPE_INFORMATIONAL 5
793 #define PF_IKEv2_EXCHTYPE_SA_INIT 34
794 #define PF_IKEv2_EXCHTYPE_AUTH 35
795 #define PF_IKEv2_EXCHTYPE_CREATE_CHILD_SA 36
796 #define PF_IKEv2_EXCHTYPE_INFORMATIONAL 37
798 #define PF_IKEv1_FLAG_E 0x01
799 #define PF_IKEv1_FLAG_C 0x02
800 #define PF_IKEv1_FLAG_A 0x04
801 #define PF_IKEv2_FLAG_I 0x08
802 #define PF_IKEv2_FLAG_V 0x10
803 #define PF_IKEv2_FLAG_R 0x20
813 pf_src_compare(struct pf_src_node
*a
, struct pf_src_node
*b
)
817 if (a
->rule
.ptr
> b
->rule
.ptr
)
819 if (a
->rule
.ptr
< b
->rule
.ptr
)
821 if ((diff
= a
->af
- b
->af
) != 0)
826 if (a
->addr
.addr32
[0] > b
->addr
.addr32
[0])
828 if (a
->addr
.addr32
[0] < b
->addr
.addr32
[0])
834 if (a
->addr
.addr32
[3] > b
->addr
.addr32
[3])
836 if (a
->addr
.addr32
[3] < b
->addr
.addr32
[3])
838 if (a
->addr
.addr32
[2] > b
->addr
.addr32
[2])
840 if (a
->addr
.addr32
[2] < b
->addr
.addr32
[2])
842 if (a
->addr
.addr32
[1] > b
->addr
.addr32
[1])
844 if (a
->addr
.addr32
[1] < b
->addr
.addr32
[1])
846 if (a
->addr
.addr32
[0] > b
->addr
.addr32
[0])
848 if (a
->addr
.addr32
[0] < b
->addr
.addr32
[0])
857 pf_state_compare_lan_ext(struct pf_state_key
*a
, struct pf_state_key
*b
)
860 #ifndef NO_APPLE_EXTENSIONS
864 if ((diff
= a
->proto
- b
->proto
) != 0)
866 if ((diff
= a
->af
- b
->af
) != 0)
869 #ifndef NO_APPLE_EXTENSIONS
870 extfilter
= PF_EXTFILTER_APD
;
875 if ((diff
= a
->lan
.xport
.port
- b
->lan
.xport
.port
) != 0)
880 if ((diff
= a
->lan
.xport
.port
- b
->lan
.xport
.port
) != 0)
882 if ((diff
= a
->ext
.xport
.port
- b
->ext
.xport
.port
) != 0)
887 if ((diff
= a
->proto_variant
- b
->proto_variant
))
889 extfilter
= a
->proto_variant
;
890 if ((diff
= a
->lan
.xport
.port
- b
->lan
.xport
.port
) != 0)
892 if ((extfilter
< PF_EXTFILTER_AD
) &&
893 (diff
= a
->ext
.xport
.port
- b
->ext
.xport
.port
) != 0)
898 if (a
->proto_variant
== PF_GRE_PPTP_VARIANT
&&
899 a
->proto_variant
== b
->proto_variant
) {
900 if (!!(diff
= a
->ext
.xport
.call_id
-
901 b
->ext
.xport
.call_id
))
907 if (!!(diff
= a
->ext
.xport
.spi
- b
->ext
.xport
.spi
))
919 if (a
->lan
.addr
.addr32
[0] > b
->lan
.addr
.addr32
[0])
921 if (a
->lan
.addr
.addr32
[0] < b
->lan
.addr
.addr32
[0])
923 #ifndef NO_APPLE_EXTENSIONS
924 if (extfilter
< PF_EXTFILTER_EI
) {
925 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
927 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
931 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
933 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
940 #ifndef NO_APPLE_EXTENSIONS
941 if (a
->lan
.addr
.addr32
[3] > b
->lan
.addr
.addr32
[3])
943 if (a
->lan
.addr
.addr32
[3] < b
->lan
.addr
.addr32
[3])
945 if (a
->lan
.addr
.addr32
[2] > b
->lan
.addr
.addr32
[2])
947 if (a
->lan
.addr
.addr32
[2] < b
->lan
.addr
.addr32
[2])
949 if (a
->lan
.addr
.addr32
[1] > b
->lan
.addr
.addr32
[1])
951 if (a
->lan
.addr
.addr32
[1] < b
->lan
.addr
.addr32
[1])
953 if (a
->lan
.addr
.addr32
[0] > b
->lan
.addr
.addr32
[0])
955 if (a
->lan
.addr
.addr32
[0] < b
->lan
.addr
.addr32
[0])
957 if (extfilter
< PF_EXTFILTER_EI
||
958 !PF_AZERO(&b
->ext
.addr
, AF_INET6
)) {
959 if (a
->ext
.addr
.addr32
[3] > b
->ext
.addr
.addr32
[3])
961 if (a
->ext
.addr
.addr32
[3] < b
->ext
.addr
.addr32
[3])
963 if (a
->ext
.addr
.addr32
[2] > b
->ext
.addr
.addr32
[2])
965 if (a
->ext
.addr
.addr32
[2] < b
->ext
.addr
.addr32
[2])
967 if (a
->ext
.addr
.addr32
[1] > b
->ext
.addr
.addr32
[1])
969 if (a
->ext
.addr
.addr32
[1] < b
->ext
.addr
.addr32
[1])
971 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
973 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
977 if (a
->lan
.addr
.addr32
[3] > b
->lan
.addr
.addr32
[3])
979 if (a
->lan
.addr
.addr32
[3] < b
->lan
.addr
.addr32
[3])
981 if (a
->ext
.addr
.addr32
[3] > b
->ext
.addr
.addr32
[3])
983 if (a
->ext
.addr
.addr32
[3] < b
->ext
.addr
.addr32
[3])
985 if (a
->lan
.addr
.addr32
[2] > b
->lan
.addr
.addr32
[2])
987 if (a
->lan
.addr
.addr32
[2] < b
->lan
.addr
.addr32
[2])
989 if (a
->ext
.addr
.addr32
[2] > b
->ext
.addr
.addr32
[2])
991 if (a
->ext
.addr
.addr32
[2] < b
->ext
.addr
.addr32
[2])
993 if (a
->lan
.addr
.addr32
[1] > b
->lan
.addr
.addr32
[1])
995 if (a
->lan
.addr
.addr32
[1] < b
->lan
.addr
.addr32
[1])
997 if (a
->ext
.addr
.addr32
[1] > b
->ext
.addr
.addr32
[1])
999 if (a
->ext
.addr
.addr32
[1] < b
->ext
.addr
.addr32
[1])
1001 if (a
->lan
.addr
.addr32
[0] > b
->lan
.addr
.addr32
[0])
1003 if (a
->lan
.addr
.addr32
[0] < b
->lan
.addr
.addr32
[0])
1005 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
1007 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
1014 #ifndef NO_APPLE_EXTENSIONS
1015 if (a
->app_state
&& b
->app_state
) {
1016 if (a
->app_state
->compare_lan_ext
&&
1017 b
->app_state
->compare_lan_ext
) {
1018 diff
= (const char *)b
->app_state
->compare_lan_ext
-
1019 (const char *)a
->app_state
->compare_lan_ext
;
1022 diff
= a
->app_state
->compare_lan_ext(a
->app_state
,
1029 if ((diff
= a
->lan
.port
- b
->lan
.port
) != 0)
1031 if ((diff
= a
->ext
.port
- b
->ext
.port
) != 0)
1039 pf_state_compare_ext_gwy(struct pf_state_key
*a
, struct pf_state_key
*b
)
1042 #ifndef NO_APPLE_EXTENSIONS
1046 if ((diff
= a
->proto
- b
->proto
) != 0)
1049 if ((diff
= a
->af
- b
->af
) != 0)
1052 #ifndef NO_APPLE_EXTENSIONS
1053 extfilter
= PF_EXTFILTER_APD
;
1057 case IPPROTO_ICMPV6
:
1058 if ((diff
= a
->gwy
.xport
.port
- b
->gwy
.xport
.port
) != 0)
1063 if ((diff
= a
->ext
.xport
.port
- b
->ext
.xport
.port
) != 0)
1065 if ((diff
= a
->gwy
.xport
.port
- b
->gwy
.xport
.port
) != 0)
1070 if ((diff
= a
->proto_variant
- b
->proto_variant
))
1072 extfilter
= a
->proto_variant
;
1073 if ((diff
= a
->gwy
.xport
.port
- b
->gwy
.xport
.port
) != 0)
1075 if ((extfilter
< PF_EXTFILTER_AD
) &&
1076 (diff
= a
->ext
.xport
.port
- b
->ext
.xport
.port
) != 0)
1081 if (a
->proto_variant
== PF_GRE_PPTP_VARIANT
&&
1082 a
->proto_variant
== b
->proto_variant
) {
1083 if (!!(diff
= a
->gwy
.xport
.call_id
-
1084 b
->gwy
.xport
.call_id
))
1090 if (!!(diff
= a
->gwy
.xport
.spi
- b
->gwy
.xport
.spi
))
1102 #ifndef NO_APPLE_EXTENSIONS
1103 if (a
->gwy
.addr
.addr32
[0] > b
->gwy
.addr
.addr32
[0])
1105 if (a
->gwy
.addr
.addr32
[0] < b
->gwy
.addr
.addr32
[0])
1107 if (extfilter
< PF_EXTFILTER_EI
) {
1108 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
1110 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
1114 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
1116 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
1118 if (a
->gwy
.addr
.addr32
[0] > b
->gwy
.addr
.addr32
[0])
1120 if (a
->gwy
.addr
.addr32
[0] < b
->gwy
.addr
.addr32
[0])
1127 #ifndef NO_APPLE_EXTENSIONS
1128 if (a
->gwy
.addr
.addr32
[3] > b
->gwy
.addr
.addr32
[3])
1130 if (a
->gwy
.addr
.addr32
[3] < b
->gwy
.addr
.addr32
[3])
1132 if (a
->gwy
.addr
.addr32
[2] > b
->gwy
.addr
.addr32
[2])
1134 if (a
->gwy
.addr
.addr32
[2] < b
->gwy
.addr
.addr32
[2])
1136 if (a
->gwy
.addr
.addr32
[1] > b
->gwy
.addr
.addr32
[1])
1138 if (a
->gwy
.addr
.addr32
[1] < b
->gwy
.addr
.addr32
[1])
1140 if (a
->gwy
.addr
.addr32
[0] > b
->gwy
.addr
.addr32
[0])
1142 if (a
->gwy
.addr
.addr32
[0] < b
->gwy
.addr
.addr32
[0])
1144 if (extfilter
< PF_EXTFILTER_EI
||
1145 !PF_AZERO(&b
->ext
.addr
, AF_INET6
)) {
1146 if (a
->ext
.addr
.addr32
[3] > b
->ext
.addr
.addr32
[3])
1148 if (a
->ext
.addr
.addr32
[3] < b
->ext
.addr
.addr32
[3])
1150 if (a
->ext
.addr
.addr32
[2] > b
->ext
.addr
.addr32
[2])
1152 if (a
->ext
.addr
.addr32
[2] < b
->ext
.addr
.addr32
[2])
1154 if (a
->ext
.addr
.addr32
[1] > b
->ext
.addr
.addr32
[1])
1156 if (a
->ext
.addr
.addr32
[1] < b
->ext
.addr
.addr32
[1])
1158 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
1160 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
1164 if (a
->ext
.addr
.addr32
[3] > b
->ext
.addr
.addr32
[3])
1166 if (a
->ext
.addr
.addr32
[3] < b
->ext
.addr
.addr32
[3])
1168 if (a
->gwy
.addr
.addr32
[3] > b
->gwy
.addr
.addr32
[3])
1170 if (a
->gwy
.addr
.addr32
[3] < b
->gwy
.addr
.addr32
[3])
1172 if (a
->ext
.addr
.addr32
[2] > b
->ext
.addr
.addr32
[2])
1174 if (a
->ext
.addr
.addr32
[2] < b
->ext
.addr
.addr32
[2])
1176 if (a
->gwy
.addr
.addr32
[2] > b
->gwy
.addr
.addr32
[2])
1178 if (a
->gwy
.addr
.addr32
[2] < b
->gwy
.addr
.addr32
[2])
1180 if (a
->ext
.addr
.addr32
[1] > b
->ext
.addr
.addr32
[1])
1182 if (a
->ext
.addr
.addr32
[1] < b
->ext
.addr
.addr32
[1])
1184 if (a
->gwy
.addr
.addr32
[1] > b
->gwy
.addr
.addr32
[1])
1186 if (a
->gwy
.addr
.addr32
[1] < b
->gwy
.addr
.addr32
[1])
1188 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
1190 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
1192 if (a
->gwy
.addr
.addr32
[0] > b
->gwy
.addr
.addr32
[0])
1194 if (a
->gwy
.addr
.addr32
[0] < b
->gwy
.addr
.addr32
[0])
1201 #ifndef NO_APPLE_EXTENSIONS
1202 if (a
->app_state
&& b
->app_state
) {
1203 if (a
->app_state
->compare_ext_gwy
&&
1204 b
->app_state
->compare_ext_gwy
) {
1205 diff
= (const char *)b
->app_state
->compare_ext_gwy
-
1206 (const char *)a
->app_state
->compare_ext_gwy
;
1209 diff
= a
->app_state
->compare_ext_gwy(a
->app_state
,
1216 if ((diff
= a
->ext
.port
- b
->ext
.port
) != 0)
1218 if ((diff
= a
->gwy
.port
- b
->gwy
.port
) != 0)
1226 pf_state_compare_id(struct pf_state
*a
, struct pf_state
*b
)
1232 if (a
->creatorid
> b
->creatorid
)
1234 if (a
->creatorid
< b
->creatorid
)
1242 pf_addrcpy(struct pf_addr
*dst
, struct pf_addr
*src
, sa_family_t af
)
1247 dst
->addr32
[0] = src
->addr32
[0];
1251 dst
->addr32
[0] = src
->addr32
[0];
1252 dst
->addr32
[1] = src
->addr32
[1];
1253 dst
->addr32
[2] = src
->addr32
[2];
1254 dst
->addr32
[3] = src
->addr32
[3];
1261 pf_find_state_byid(struct pf_state_cmp
*key
)
1263 pf_status
.fcounters
[FCNT_STATE_SEARCH
]++;
1265 return (RB_FIND(pf_state_tree_id
, &tree_id
, (struct pf_state
*)key
));
1268 static struct pf_state
*
1269 pf_find_state(struct pfi_kif
*kif
, struct pf_state_key_cmp
*key
, u_int dir
)
1271 struct pf_state_key
*sk
= NULL
;
1274 pf_status
.fcounters
[FCNT_STATE_SEARCH
]++;
1278 sk
= RB_FIND(pf_state_tree_lan_ext
, &pf_statetbl_lan_ext
,
1279 (struct pf_state_key
*)key
);
1282 sk
= RB_FIND(pf_state_tree_ext_gwy
, &pf_statetbl_ext_gwy
,
1283 (struct pf_state_key
*)key
);
1286 panic("pf_find_state");
1289 /* list is sorted, if-bound states before floating ones */
1291 TAILQ_FOREACH(s
, &sk
->states
, next
)
1292 if (s
->kif
== pfi_all
|| s
->kif
== kif
)
1299 pf_find_state_all(struct pf_state_key_cmp
*key
, u_int dir
, int *more
)
1301 struct pf_state_key
*sk
= NULL
;
1302 struct pf_state
*s
, *ret
= NULL
;
1304 pf_status
.fcounters
[FCNT_STATE_SEARCH
]++;
1308 sk
= RB_FIND(pf_state_tree_lan_ext
,
1309 &pf_statetbl_lan_ext
, (struct pf_state_key
*)key
);
1312 sk
= RB_FIND(pf_state_tree_ext_gwy
,
1313 &pf_statetbl_ext_gwy
, (struct pf_state_key
*)key
);
1316 panic("pf_find_state_all");
1320 ret
= TAILQ_FIRST(&sk
->states
);
1324 TAILQ_FOREACH(s
, &sk
->states
, next
)
1332 pf_init_threshold(struct pf_threshold
*threshold
,
1333 u_int32_t limit
, u_int32_t seconds
)
1335 threshold
->limit
= limit
* PF_THRESHOLD_MULT
;
1336 threshold
->seconds
= seconds
;
1337 threshold
->count
= 0;
1338 threshold
->last
= pf_time_second();
1342 pf_add_threshold(struct pf_threshold
*threshold
)
1344 u_int32_t t
= pf_time_second(), diff
= t
- threshold
->last
;
1346 if (diff
>= threshold
->seconds
)
1347 threshold
->count
= 0;
1349 threshold
->count
-= threshold
->count
* diff
/
1351 threshold
->count
+= PF_THRESHOLD_MULT
;
1352 threshold
->last
= t
;
1356 pf_check_threshold(struct pf_threshold
*threshold
)
1358 return (threshold
->count
> threshold
->limit
);
1362 pf_src_connlimit(struct pf_state
**state
)
1366 (*state
)->src_node
->conn
++;
1367 VERIFY((*state
)->src_node
->conn
!= 0);
1368 (*state
)->src
.tcp_est
= 1;
1369 pf_add_threshold(&(*state
)->src_node
->conn_rate
);
1371 if ((*state
)->rule
.ptr
->max_src_conn
&&
1372 (*state
)->rule
.ptr
->max_src_conn
<
1373 (*state
)->src_node
->conn
) {
1374 pf_status
.lcounters
[LCNT_SRCCONN
]++;
1378 if ((*state
)->rule
.ptr
->max_src_conn_rate
.limit
&&
1379 pf_check_threshold(&(*state
)->src_node
->conn_rate
)) {
1380 pf_status
.lcounters
[LCNT_SRCCONNRATE
]++;
1387 if ((*state
)->rule
.ptr
->overload_tbl
) {
1389 u_int32_t killed
= 0;
1391 pf_status
.lcounters
[LCNT_OVERLOAD_TABLE
]++;
1392 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1393 printf("pf_src_connlimit: blocking address ");
1394 pf_print_host(&(*state
)->src_node
->addr
, 0,
1395 (*state
)->state_key
->af
);
1398 bzero(&p
, sizeof (p
));
1399 p
.pfra_af
= (*state
)->state_key
->af
;
1400 switch ((*state
)->state_key
->af
) {
1404 p
.pfra_ip4addr
= (*state
)->src_node
->addr
.v4
;
1410 p
.pfra_ip6addr
= (*state
)->src_node
->addr
.v6
;
1415 pfr_insert_kentry((*state
)->rule
.ptr
->overload_tbl
,
1416 &p
, pf_calendar_time_second());
1418 /* kill existing states if that's required. */
1419 if ((*state
)->rule
.ptr
->flush
) {
1420 struct pf_state_key
*sk
;
1421 struct pf_state
*st
;
1423 pf_status
.lcounters
[LCNT_OVERLOAD_FLUSH
]++;
1424 RB_FOREACH(st
, pf_state_tree_id
, &tree_id
) {
1427 * Kill states from this source. (Only those
1428 * from the same rule if PF_FLUSH_GLOBAL is not
1432 (*state
)->state_key
->af
&&
1433 (((*state
)->state_key
->direction
==
1435 PF_AEQ(&(*state
)->src_node
->addr
,
1436 &sk
->lan
.addr
, sk
->af
)) ||
1437 ((*state
)->state_key
->direction
== PF_IN
&&
1438 PF_AEQ(&(*state
)->src_node
->addr
,
1439 &sk
->ext
.addr
, sk
->af
))) &&
1440 ((*state
)->rule
.ptr
->flush
&
1442 (*state
)->rule
.ptr
== st
->rule
.ptr
)) {
1443 st
->timeout
= PFTM_PURGE
;
1444 st
->src
.state
= st
->dst
.state
=
1449 if (pf_status
.debug
>= PF_DEBUG_MISC
)
1450 printf(", %u states killed", killed
);
1452 if (pf_status
.debug
>= PF_DEBUG_MISC
)
1456 /* kill this state */
1457 (*state
)->timeout
= PFTM_PURGE
;
1458 (*state
)->src
.state
= (*state
)->dst
.state
= TCPS_CLOSED
;
1463 pf_insert_src_node(struct pf_src_node
**sn
, struct pf_rule
*rule
,
1464 struct pf_addr
*src
, sa_family_t af
)
1466 struct pf_src_node k
;
1470 PF_ACPY(&k
.addr
, src
, af
);
1471 if (rule
->rule_flag
& PFRULE_RULESRCTRACK
||
1472 rule
->rpool
.opts
& PF_POOL_STICKYADDR
)
1476 pf_status
.scounters
[SCNT_SRC_NODE_SEARCH
]++;
1477 *sn
= RB_FIND(pf_src_tree
, &tree_src_tracking
, &k
);
1480 if (!rule
->max_src_nodes
||
1481 rule
->src_nodes
< rule
->max_src_nodes
)
1482 (*sn
) = pool_get(&pf_src_tree_pl
, PR_WAITOK
);
1484 pf_status
.lcounters
[LCNT_SRCNODES
]++;
1487 bzero(*sn
, sizeof (struct pf_src_node
));
1489 pf_init_threshold(&(*sn
)->conn_rate
,
1490 rule
->max_src_conn_rate
.limit
,
1491 rule
->max_src_conn_rate
.seconds
);
1494 if (rule
->rule_flag
& PFRULE_RULESRCTRACK
||
1495 rule
->rpool
.opts
& PF_POOL_STICKYADDR
)
1496 (*sn
)->rule
.ptr
= rule
;
1498 (*sn
)->rule
.ptr
= NULL
;
1499 PF_ACPY(&(*sn
)->addr
, src
, af
);
1500 if (RB_INSERT(pf_src_tree
,
1501 &tree_src_tracking
, *sn
) != NULL
) {
1502 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1503 printf("pf: src_tree insert failed: ");
1504 pf_print_host(&(*sn
)->addr
, 0, af
);
1507 pool_put(&pf_src_tree_pl
, *sn
);
1510 (*sn
)->creation
= pf_time_second();
1511 (*sn
)->ruletype
= rule
->action
;
1512 if ((*sn
)->rule
.ptr
!= NULL
)
1513 (*sn
)->rule
.ptr
->src_nodes
++;
1514 pf_status
.scounters
[SCNT_SRC_NODE_INSERT
]++;
1515 pf_status
.src_nodes
++;
1517 if (rule
->max_src_states
&&
1518 (*sn
)->states
>= rule
->max_src_states
) {
1519 pf_status
.lcounters
[LCNT_SRCSTATES
]++;
1527 pf_stateins_err(const char *tree
, struct pf_state
*s
, struct pfi_kif
*kif
)
1529 struct pf_state_key
*sk
= s
->state_key
;
1531 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1532 #ifndef NO_APPLE_EXTENSIONS
1533 printf("pf: state insert failed: %s %s ", tree
, kif
->pfik_name
);
1534 switch (sk
->proto
) {
1544 case IPPROTO_ICMPV6
:
1548 printf("PROTO=%u", sk
->proto
);
1552 pf_print_sk_host(&sk
->lan
, sk
->af
, sk
->proto
,
1555 pf_print_sk_host(&sk
->gwy
, sk
->af
, sk
->proto
,
1558 pf_print_sk_host(&sk
->ext
, sk
->af
, sk
->proto
,
1561 printf("pf: state insert failed: %s %s", tree
, kif
->pfik_name
);
1563 pf_print_host(&sk
->lan
.addr
, sk
->lan
.port
,
1566 pf_print_host(&sk
->gwy
.addr
, sk
->gwy
.port
,
1569 pf_print_host(&sk
->ext
.addr
, sk
->ext
.port
,
1572 if (s
->sync_flags
& PFSTATE_FROMSYNC
)
1573 printf(" (from sync)");
1579 pf_insert_state(struct pfi_kif
*kif
, struct pf_state
*s
)
1581 struct pf_state_key
*cur
;
1582 struct pf_state
*sp
;
1584 VERIFY(s
->state_key
!= NULL
);
1587 if ((cur
= RB_INSERT(pf_state_tree_lan_ext
, &pf_statetbl_lan_ext
,
1588 s
->state_key
)) != NULL
) {
1589 /* key exists. check for same kif, if none, add to key */
1590 TAILQ_FOREACH(sp
, &cur
->states
, next
)
1591 if (sp
->kif
== kif
) { /* collision! */
1592 pf_stateins_err("tree_lan_ext", s
, kif
);
1594 PF_DT_SKIP_LANEXT
|PF_DT_SKIP_EXTGWY
);
1597 pf_detach_state(s
, PF_DT_SKIP_LANEXT
|PF_DT_SKIP_EXTGWY
);
1598 pf_attach_state(cur
, s
, kif
== pfi_all
? 1 : 0);
1601 /* if cur != NULL, we already found a state key and attached to it */
1602 if (cur
== NULL
&& (cur
= RB_INSERT(pf_state_tree_ext_gwy
,
1603 &pf_statetbl_ext_gwy
, s
->state_key
)) != NULL
) {
1604 /* must not happen. we must have found the sk above! */
1605 pf_stateins_err("tree_ext_gwy", s
, kif
);
1606 pf_detach_state(s
, PF_DT_SKIP_EXTGWY
);
1610 if (s
->id
== 0 && s
->creatorid
== 0) {
1611 s
->id
= htobe64(pf_status
.stateid
++);
1612 s
->creatorid
= pf_status
.hostid
;
1614 if (RB_INSERT(pf_state_tree_id
, &tree_id
, s
) != NULL
) {
1615 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1616 printf("pf: state insert failed: "
1617 "id: %016llx creatorid: %08x",
1618 be64toh(s
->id
), ntohl(s
->creatorid
));
1619 if (s
->sync_flags
& PFSTATE_FROMSYNC
)
1620 printf(" (from sync)");
1623 pf_detach_state(s
, 0);
1626 TAILQ_INSERT_TAIL(&state_list
, s
, entry_list
);
1627 pf_status
.fcounters
[FCNT_STATE_INSERT
]++;
1629 VERIFY(pf_status
.states
!= 0);
1630 pfi_kif_ref(kif
, PFI_KIF_REF_STATE
);
1632 pfsync_insert_state(s
);
1638 pf_purge_thread_fn(void *v
, wait_result_t w
)
1640 #pragma unused(v, w)
1641 u_int32_t nloops
= 0;
1645 (void) tsleep(pf_purge_thread_fn
, PWAIT
, "pftm", t
* hz
);
1647 lck_rw_lock_shared(pf_perim_lock
);
1648 lck_mtx_lock(pf_lock
);
1650 /* purge everything if not running */
1651 if (!pf_status
.running
) {
1652 pf_purge_expired_states(pf_status
.states
);
1653 pf_purge_expired_fragments();
1654 pf_purge_expired_src_nodes();
1656 /* terminate thread (we don't currently do this) */
1657 if (pf_purge_thread
== NULL
) {
1658 lck_mtx_unlock(pf_lock
);
1659 lck_rw_done(pf_perim_lock
);
1661 thread_deallocate(current_thread());
1662 thread_terminate(current_thread());
1666 /* if there's nothing left, sleep w/o timeout */
1667 if (pf_status
.states
== 0 &&
1668 pf_normalize_isempty() &&
1669 RB_EMPTY(&tree_src_tracking
))
1672 lck_mtx_unlock(pf_lock
);
1673 lck_rw_done(pf_perim_lock
);
1676 } else if (t
== 0) {
1677 /* Set timeout to 1 second */
1681 /* process a fraction of the state table every second */
1682 pf_purge_expired_states(1 + (pf_status
.states
1683 / pf_default_rule
.timeout
[PFTM_INTERVAL
]));
1685 /* purge other expired types every PFTM_INTERVAL seconds */
1686 if (++nloops
>= pf_default_rule
.timeout
[PFTM_INTERVAL
]) {
1687 pf_purge_expired_fragments();
1688 pf_purge_expired_src_nodes();
1692 lck_mtx_unlock(pf_lock
);
1693 lck_rw_done(pf_perim_lock
);
1698 pf_state_expires(const struct pf_state
*state
)
1705 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1707 /* handle all PFTM_* > PFTM_MAX here */
1708 if (state
->timeout
== PFTM_PURGE
)
1709 return (pf_time_second());
1710 if (state
->timeout
== PFTM_UNTIL_PACKET
)
1712 VERIFY(state
->timeout
!= PFTM_UNLINKED
);
1713 VERIFY(state
->timeout
< PFTM_MAX
);
1714 t
= state
->rule
.ptr
->timeout
[state
->timeout
];
1716 t
= pf_default_rule
.timeout
[state
->timeout
];
1717 start
= state
->rule
.ptr
->timeout
[PFTM_ADAPTIVE_START
];
1719 end
= state
->rule
.ptr
->timeout
[PFTM_ADAPTIVE_END
];
1720 states
= state
->rule
.ptr
->states
;
1722 start
= pf_default_rule
.timeout
[PFTM_ADAPTIVE_START
];
1723 end
= pf_default_rule
.timeout
[PFTM_ADAPTIVE_END
];
1724 states
= pf_status
.states
;
1726 if (end
&& states
> start
&& start
< end
) {
1728 return (state
->expire
+ t
* (end
- states
) /
1731 return (pf_time_second());
1733 return (state
->expire
+ t
);
1737 pf_purge_expired_src_nodes(void)
1739 struct pf_src_node
*cur
, *next
;
1741 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1743 for (cur
= RB_MIN(pf_src_tree
, &tree_src_tracking
); cur
; cur
= next
) {
1744 next
= RB_NEXT(pf_src_tree
, &tree_src_tracking
, cur
);
1746 if (cur
->states
<= 0 && cur
->expire
<= pf_time_second()) {
1747 if (cur
->rule
.ptr
!= NULL
) {
1748 cur
->rule
.ptr
->src_nodes
--;
1749 if (cur
->rule
.ptr
->states
<= 0 &&
1750 cur
->rule
.ptr
->max_src_nodes
<= 0)
1751 pf_rm_rule(NULL
, cur
->rule
.ptr
);
1753 RB_REMOVE(pf_src_tree
, &tree_src_tracking
, cur
);
1754 pf_status
.scounters
[SCNT_SRC_NODE_REMOVALS
]++;
1755 pf_status
.src_nodes
--;
1756 pool_put(&pf_src_tree_pl
, cur
);
1762 pf_src_tree_remove_state(struct pf_state
*s
)
1766 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1768 if (s
->src_node
!= NULL
) {
1769 if (s
->src
.tcp_est
) {
1770 VERIFY(s
->src_node
->conn
> 0);
1771 --s
->src_node
->conn
;
1773 VERIFY(s
->src_node
->states
> 0);
1774 if (--s
->src_node
->states
<= 0) {
1775 t
= s
->rule
.ptr
->timeout
[PFTM_SRC_NODE
];
1777 t
= pf_default_rule
.timeout
[PFTM_SRC_NODE
];
1778 s
->src_node
->expire
= pf_time_second() + t
;
1781 if (s
->nat_src_node
!= s
->src_node
&& s
->nat_src_node
!= NULL
) {
1782 VERIFY(s
->nat_src_node
->states
> 0);
1783 if (--s
->nat_src_node
->states
<= 0) {
1784 t
= s
->rule
.ptr
->timeout
[PFTM_SRC_NODE
];
1786 t
= pf_default_rule
.timeout
[PFTM_SRC_NODE
];
1787 s
->nat_src_node
->expire
= pf_time_second() + t
;
1790 s
->src_node
= s
->nat_src_node
= NULL
;
1794 pf_unlink_state(struct pf_state
*cur
)
1796 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1798 #ifndef NO_APPLE_EXTENSIONS
1799 if (cur
->src
.state
== PF_TCPS_PROXY_DST
) {
1800 pf_send_tcp(cur
->rule
.ptr
, cur
->state_key
->af
,
1801 &cur
->state_key
->ext
.addr
, &cur
->state_key
->lan
.addr
,
1802 cur
->state_key
->ext
.xport
.port
,
1803 cur
->state_key
->lan
.xport
.port
,
1804 cur
->src
.seqhi
, cur
->src
.seqlo
+ 1,
1805 TH_RST
|TH_ACK
, 0, 0, 0, 1, cur
->tag
, NULL
, NULL
);
1808 hook_runloop(&cur
->unlink_hooks
, HOOK_REMOVE
|HOOK_FREE
);
1810 if (cur
->src
.state
== PF_TCPS_PROXY_DST
) {
1811 pf_send_tcp(cur
->rule
.ptr
, cur
->state_key
->af
,
1812 &cur
->state_key
->ext
.addr
, &cur
->state_key
->lan
.addr
,
1813 cur
->state_key
->ext
.port
, cur
->state_key
->lan
.port
,
1814 cur
->src
.seqhi
, cur
->src
.seqlo
+ 1,
1815 TH_RST
|TH_ACK
, 0, 0, 0, 1, cur
->tag
, NULL
, NULL
);
1818 RB_REMOVE(pf_state_tree_id
, &tree_id
, cur
);
1820 if (cur
->creatorid
== pf_status
.hostid
)
1821 pfsync_delete_state(cur
);
1823 cur
->timeout
= PFTM_UNLINKED
;
1824 pf_src_tree_remove_state(cur
);
1825 pf_detach_state(cur
, 0);
1828 /* callers should be at splpf and hold the
1829 * write_lock on pf_consistency_lock */
1831 pf_free_state(struct pf_state
*cur
)
1833 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1835 if (pfsyncif
!= NULL
&&
1836 (pfsyncif
->sc_bulk_send_next
== cur
||
1837 pfsyncif
->sc_bulk_terminator
== cur
))
1840 VERIFY(cur
->timeout
== PFTM_UNLINKED
);
1841 VERIFY(cur
->rule
.ptr
->states
> 0);
1842 if (--cur
->rule
.ptr
->states
<= 0 &&
1843 cur
->rule
.ptr
->src_nodes
<= 0)
1844 pf_rm_rule(NULL
, cur
->rule
.ptr
);
1845 if (cur
->nat_rule
.ptr
!= NULL
) {
1846 VERIFY(cur
->nat_rule
.ptr
->states
> 0);
1847 if (--cur
->nat_rule
.ptr
->states
<= 0 &&
1848 cur
->nat_rule
.ptr
->src_nodes
<= 0)
1849 pf_rm_rule(NULL
, cur
->nat_rule
.ptr
);
1851 if (cur
->anchor
.ptr
!= NULL
) {
1852 VERIFY(cur
->anchor
.ptr
->states
> 0);
1853 if (--cur
->anchor
.ptr
->states
<= 0)
1854 pf_rm_rule(NULL
, cur
->anchor
.ptr
);
1856 pf_normalize_tcp_cleanup(cur
);
1857 pfi_kif_unref(cur
->kif
, PFI_KIF_REF_STATE
);
1858 TAILQ_REMOVE(&state_list
, cur
, entry_list
);
1860 pf_tag_unref(cur
->tag
);
1861 pool_put(&pf_state_pl
, cur
);
1862 pf_status
.fcounters
[FCNT_STATE_REMOVALS
]++;
1863 VERIFY(pf_status
.states
> 0);
1868 pf_purge_expired_states(u_int32_t maxcheck
)
1870 static struct pf_state
*cur
= NULL
;
1871 struct pf_state
*next
;
1873 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1875 while (maxcheck
--) {
1876 /* wrap to start of list when we hit the end */
1878 cur
= TAILQ_FIRST(&state_list
);
1880 break; /* list empty */
1883 /* get next state, as cur may get deleted */
1884 next
= TAILQ_NEXT(cur
, entry_list
);
1886 if (cur
->timeout
== PFTM_UNLINKED
) {
1888 } else if (pf_state_expires(cur
) <= pf_time_second()) {
1889 /* unlink and free expired state */
1890 pf_unlink_state(cur
);
1898 pf_tbladdr_setup(struct pf_ruleset
*rs
, struct pf_addr_wrap
*aw
)
1900 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1902 if (aw
->type
!= PF_ADDR_TABLE
)
1904 if ((aw
->p
.tbl
= pfr_attach_table(rs
, aw
->v
.tblname
)) == NULL
)
1910 pf_tbladdr_remove(struct pf_addr_wrap
*aw
)
1912 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1914 if (aw
->type
!= PF_ADDR_TABLE
|| aw
->p
.tbl
== NULL
)
1916 pfr_detach_table(aw
->p
.tbl
);
1921 pf_tbladdr_copyout(struct pf_addr_wrap
*aw
)
1923 struct pfr_ktable
*kt
= aw
->p
.tbl
;
1925 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1927 if (aw
->type
!= PF_ADDR_TABLE
|| kt
== NULL
)
1929 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
)
1930 kt
= kt
->pfrkt_root
;
1932 aw
->p
.tblcnt
= (kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) ?
1936 #ifndef NO_APPLE_EXTENSIONS
1938 pf_print_addr(struct pf_addr
*addr
, sa_family_t af
)
1943 u_int32_t a
= ntohl(addr
->addr32
[0]);
1944 printf("%u.%u.%u.%u", (a
>>24)&255, (a
>>16)&255,
1952 u_int8_t i
, curstart
= 255, curend
= 0,
1953 maxstart
= 0, maxend
= 0;
1954 for (i
= 0; i
< 8; i
++) {
1955 if (!addr
->addr16
[i
]) {
1956 if (curstart
== 255)
1962 if ((curend
- curstart
) >
1963 (maxend
- maxstart
)) {
1964 maxstart
= curstart
;
1971 for (i
= 0; i
< 8; i
++) {
1972 if (i
>= maxstart
&& i
<= maxend
) {
1981 b
= ntohs(addr
->addr16
[i
]);
1994 pf_print_sk_host(struct pf_state_host
*sh
, sa_family_t af
, int proto
,
1995 u_int8_t proto_variant
)
1997 pf_print_addr(&sh
->addr
, af
);
2002 printf("[%08x]", ntohl(sh
->xport
.spi
));
2006 if (proto_variant
== PF_GRE_PPTP_VARIANT
)
2007 printf("[%u]", ntohs(sh
->xport
.call_id
));
2012 printf("[%u]", ntohs(sh
->xport
.port
));
2022 pf_print_host(struct pf_addr
*addr
, u_int16_t p
, sa_family_t af
)
2024 #ifndef NO_APPLE_EXTENSIONS
2025 pf_print_addr(addr
, af
);
2027 printf("[%u]", ntohs(p
));
2032 u_int32_t a
= ntohl(addr
->addr32
[0]);
2033 printf("%u.%u.%u.%u", (a
>>24)&255, (a
>>16)&255,
2045 u_int8_t i
, curstart
= 255, curend
= 0,
2046 maxstart
= 0, maxend
= 0;
2047 for (i
= 0; i
< 8; i
++) {
2048 if (!addr
->addr16
[i
]) {
2049 if (curstart
== 255)
2055 if ((curend
- curstart
) >
2056 (maxend
- maxstart
)) {
2057 maxstart
= curstart
;
2064 for (i
= 0; i
< 8; i
++) {
2065 if (i
>= maxstart
&& i
<= maxend
) {
2074 b
= ntohs(addr
->addr16
[i
]);
2092 pf_print_state(struct pf_state
*s
)
2094 struct pf_state_key
*sk
= s
->state_key
;
2095 switch (sk
->proto
) {
2096 #ifndef NO_APPLE_EXTENSIONS
2101 printf("GRE%u ", sk
->proto_variant
);
2113 case IPPROTO_ICMPV6
:
2117 printf("%u ", sk
->proto
);
2120 #ifndef NO_APPLE_EXTENSIONS
2121 pf_print_sk_host(&sk
->lan
, sk
->af
, sk
->proto
, sk
->proto_variant
);
2123 pf_print_sk_host(&sk
->gwy
, sk
->af
, sk
->proto
, sk
->proto_variant
);
2125 pf_print_sk_host(&sk
->ext
, sk
->af
, sk
->proto
, sk
->proto_variant
);
2127 pf_print_host(&sk
->lan
.addr
, sk
->lan
.port
, sk
->af
);
2129 pf_print_host(&sk
->gwy
.addr
, sk
->gwy
.port
, sk
->af
);
2131 pf_print_host(&sk
->ext
.addr
, sk
->ext
.port
, sk
->af
);
2133 printf(" [lo=%u high=%u win=%u modulator=%u", s
->src
.seqlo
,
2134 s
->src
.seqhi
, s
->src
.max_win
, s
->src
.seqdiff
);
2135 if (s
->src
.wscale
&& s
->dst
.wscale
)
2136 printf(" wscale=%u", s
->src
.wscale
& PF_WSCALE_MASK
);
2138 printf(" [lo=%u high=%u win=%u modulator=%u", s
->dst
.seqlo
,
2139 s
->dst
.seqhi
, s
->dst
.max_win
, s
->dst
.seqdiff
);
2140 if (s
->src
.wscale
&& s
->dst
.wscale
)
2141 printf(" wscale=%u", s
->dst
.wscale
& PF_WSCALE_MASK
);
2143 printf(" %u:%u", s
->src
.state
, s
->dst
.state
);
2147 pf_print_flags(u_int8_t f
)
2169 #define PF_SET_SKIP_STEPS(i) \
2171 while (head[i] != cur) { \
2172 head[i]->skip[i].ptr = cur; \
2173 head[i] = TAILQ_NEXT(head[i], entries); \
2178 pf_calc_skip_steps(struct pf_rulequeue
*rules
)
2180 struct pf_rule
*cur
, *prev
, *head
[PF_SKIP_COUNT
];
2183 cur
= TAILQ_FIRST(rules
);
2185 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
)
2187 while (cur
!= NULL
) {
2189 if (cur
->kif
!= prev
->kif
|| cur
->ifnot
!= prev
->ifnot
)
2190 PF_SET_SKIP_STEPS(PF_SKIP_IFP
);
2191 if (cur
->direction
!= prev
->direction
)
2192 PF_SET_SKIP_STEPS(PF_SKIP_DIR
);
2193 if (cur
->af
!= prev
->af
)
2194 PF_SET_SKIP_STEPS(PF_SKIP_AF
);
2195 if (cur
->proto
!= prev
->proto
)
2196 PF_SET_SKIP_STEPS(PF_SKIP_PROTO
);
2197 if (cur
->src
.neg
!= prev
->src
.neg
||
2198 pf_addr_wrap_neq(&cur
->src
.addr
, &prev
->src
.addr
))
2199 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR
);
2200 #ifndef NO_APPLE_EXTENSIONS
2202 union pf_rule_xport
*cx
= &cur
->src
.xport
;
2203 union pf_rule_xport
*px
= &prev
->src
.xport
;
2205 switch (cur
->proto
) {
2208 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT
);
2211 if (prev
->proto
== IPPROTO_GRE
||
2212 prev
->proto
== IPPROTO_ESP
||
2213 cx
->range
.op
!= px
->range
.op
||
2214 cx
->range
.port
[0] != px
->range
.port
[0] ||
2215 cx
->range
.port
[1] != px
->range
.port
[1])
2216 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT
);
2221 if (cur
->src
.port
[0] != prev
->src
.port
[0] ||
2222 cur
->src
.port
[1] != prev
->src
.port
[1] ||
2223 cur
->src
.port_op
!= prev
->src
.port_op
)
2224 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT
);
2226 if (cur
->dst
.neg
!= prev
->dst
.neg
||
2227 pf_addr_wrap_neq(&cur
->dst
.addr
, &prev
->dst
.addr
))
2228 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR
);
2229 #ifndef NO_APPLE_EXTENSIONS
2231 union pf_rule_xport
*cx
= &cur
->dst
.xport
;
2232 union pf_rule_xport
*px
= &prev
->dst
.xport
;
2234 switch (cur
->proto
) {
2236 if (cur
->proto
!= prev
->proto
||
2237 cx
->call_id
!= px
->call_id
)
2238 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2241 if (cur
->proto
!= prev
->proto
||
2243 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2246 if (prev
->proto
== IPPROTO_GRE
||
2247 prev
->proto
== IPPROTO_ESP
||
2248 cx
->range
.op
!= px
->range
.op
||
2249 cx
->range
.port
[0] != px
->range
.port
[0] ||
2250 cx
->range
.port
[1] != px
->range
.port
[1])
2251 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2256 if (cur
->dst
.port
[0] != prev
->dst
.port
[0] ||
2257 cur
->dst
.port
[1] != prev
->dst
.port
[1] ||
2258 cur
->dst
.port_op
!= prev
->dst
.port_op
)
2259 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2263 cur
= TAILQ_NEXT(cur
, entries
);
2265 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
)
2266 PF_SET_SKIP_STEPS(i
);
2270 pf_addr_wrap_neq(struct pf_addr_wrap
*aw1
, struct pf_addr_wrap
*aw2
)
2272 if (aw1
->type
!= aw2
->type
)
2274 switch (aw1
->type
) {
2275 case PF_ADDR_ADDRMASK
:
2277 if (PF_ANEQ(&aw1
->v
.a
.addr
, &aw2
->v
.a
.addr
, 0))
2279 if (PF_ANEQ(&aw1
->v
.a
.mask
, &aw2
->v
.a
.mask
, 0))
2282 case PF_ADDR_DYNIFTL
:
2283 return (aw1
->p
.dyn
->pfid_kt
!= aw2
->p
.dyn
->pfid_kt
);
2284 case PF_ADDR_NOROUTE
:
2285 case PF_ADDR_URPFFAILED
:
2288 return (aw1
->p
.tbl
!= aw2
->p
.tbl
);
2289 case PF_ADDR_RTLABEL
:
2290 return (aw1
->v
.rtlabel
!= aw2
->v
.rtlabel
);
2292 printf("invalid address type: %d\n", aw1
->type
);
2298 pf_cksum_fixup(u_int16_t cksum
, u_int16_t old
, u_int16_t
new, u_int8_t udp
)
2304 l
= cksum
+ old
- new;
2305 l
= (l
>> 16) + (l
& 0xffff);
2313 pf_change_ap(int dir
, struct mbuf
*m
, struct pf_addr
*a
, u_int16_t
*p
,
2314 u_int16_t
*ic
, u_int16_t
*pc
, struct pf_addr
*an
, u_int16_t pn
,
2315 u_int8_t u
, sa_family_t af
)
2320 PF_ACPY(&ao
, a
, af
);
2328 *ic
= pf_cksum_fixup(pf_cksum_fixup(*ic
,
2329 ao
.addr16
[0], an
->addr16
[0], 0),
2330 ao
.addr16
[1], an
->addr16
[1], 0);
2333 * If the packet is originated from an ALG on the NAT gateway
2334 * (source address is loopback or local), in which case the
2335 * TCP/UDP checksum field contains the pseudo header checksum
2336 * that's not yet complemented.
2338 if (dir
== PF_OUT
&& m
!= NULL
&&
2339 (m
->m_flags
& M_PKTHDR
) &&
2340 (m
->m_pkthdr
.csum_flags
& (CSUM_TCP
| CSUM_UDP
))) {
2341 /* Pseudo-header checksum does not include ports */
2342 *pc
= ~pf_cksum_fixup(pf_cksum_fixup(~*pc
,
2343 ao
.addr16
[0], an
->addr16
[0], u
),
2344 ao
.addr16
[1], an
->addr16
[1], u
);
2346 *pc
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc
,
2347 ao
.addr16
[0], an
->addr16
[0], u
),
2348 ao
.addr16
[1], an
->addr16
[1], u
),
2356 * If the packet is originated from an ALG on the NAT gateway
2357 * (source address is loopback or local), in which case the
2358 * TCP/UDP checksum field contains the pseudo header checksum
2359 * that's not yet complemented.
2361 if (dir
== PF_OUT
&& m
!= NULL
&&
2362 (m
->m_flags
& M_PKTHDR
) &&
2363 (m
->m_pkthdr
.csum_flags
& (CSUM_TCPIPV6
| CSUM_UDPIPV6
))) {
2364 /* Pseudo-header checksum does not include ports */
2365 *pc
= ~pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2366 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2367 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(~*pc
,
2368 ao
.addr16
[0], an
->addr16
[0], u
),
2369 ao
.addr16
[1], an
->addr16
[1], u
),
2370 ao
.addr16
[2], an
->addr16
[2], u
),
2371 ao
.addr16
[3], an
->addr16
[3], u
),
2372 ao
.addr16
[4], an
->addr16
[4], u
),
2373 ao
.addr16
[5], an
->addr16
[5], u
),
2374 ao
.addr16
[6], an
->addr16
[6], u
),
2375 ao
.addr16
[7], an
->addr16
[7], u
),
2378 *pc
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2379 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2380 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc
,
2381 ao
.addr16
[0], an
->addr16
[0], u
),
2382 ao
.addr16
[1], an
->addr16
[1], u
),
2383 ao
.addr16
[2], an
->addr16
[2], u
),
2384 ao
.addr16
[3], an
->addr16
[3], u
),
2385 ao
.addr16
[4], an
->addr16
[4], u
),
2386 ao
.addr16
[5], an
->addr16
[5], u
),
2387 ao
.addr16
[6], an
->addr16
[6], u
),
2388 ao
.addr16
[7], an
->addr16
[7], u
),
2397 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2399 pf_change_a(void *a
, u_int16_t
*c
, u_int32_t an
, u_int8_t u
)
2403 memcpy(&ao
, a
, sizeof (ao
));
2404 memcpy(a
, &an
, sizeof (u_int32_t
));
2405 *c
= pf_cksum_fixup(pf_cksum_fixup(*c
, ao
/ 65536, an
/ 65536, u
),
2406 ao
% 65536, an
% 65536, u
);
2411 pf_change_a6(struct pf_addr
*a
, u_int16_t
*c
, struct pf_addr
*an
, u_int8_t u
)
2415 PF_ACPY(&ao
, a
, AF_INET6
);
2416 PF_ACPY(a
, an
, AF_INET6
);
2418 *c
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2419 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2420 pf_cksum_fixup(pf_cksum_fixup(*c
,
2421 ao
.addr16
[0], an
->addr16
[0], u
),
2422 ao
.addr16
[1], an
->addr16
[1], u
),
2423 ao
.addr16
[2], an
->addr16
[2], u
),
2424 ao
.addr16
[3], an
->addr16
[3], u
),
2425 ao
.addr16
[4], an
->addr16
[4], u
),
2426 ao
.addr16
[5], an
->addr16
[5], u
),
2427 ao
.addr16
[6], an
->addr16
[6], u
),
2428 ao
.addr16
[7], an
->addr16
[7], u
);
2433 pf_change_icmp(struct pf_addr
*ia
, u_int16_t
*ip
, struct pf_addr
*oa
,
2434 struct pf_addr
*na
, u_int16_t np
, u_int16_t
*pc
, u_int16_t
*h2c
,
2435 u_int16_t
*ic
, u_int16_t
*hc
, u_int8_t u
, sa_family_t af
)
2437 struct pf_addr oia
, ooa
;
2439 PF_ACPY(&oia
, ia
, af
);
2440 PF_ACPY(&ooa
, oa
, af
);
2442 /* Change inner protocol port, fix inner protocol checksum. */
2444 u_int16_t oip
= *ip
;
2451 *pc
= pf_cksum_fixup(*pc
, oip
, *ip
, u
);
2452 *ic
= pf_cksum_fixup(*ic
, oip
, *ip
, 0);
2454 *ic
= pf_cksum_fixup(*ic
, opc
, *pc
, 0);
2456 /* Change inner ip address, fix inner ip and icmp checksums. */
2457 PF_ACPY(ia
, na
, af
);
2461 u_int32_t oh2c
= *h2c
;
2463 *h2c
= pf_cksum_fixup(pf_cksum_fixup(*h2c
,
2464 oia
.addr16
[0], ia
->addr16
[0], 0),
2465 oia
.addr16
[1], ia
->addr16
[1], 0);
2466 *ic
= pf_cksum_fixup(pf_cksum_fixup(*ic
,
2467 oia
.addr16
[0], ia
->addr16
[0], 0),
2468 oia
.addr16
[1], ia
->addr16
[1], 0);
2469 *ic
= pf_cksum_fixup(*ic
, oh2c
, *h2c
, 0);
2475 *ic
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2476 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2477 pf_cksum_fixup(pf_cksum_fixup(*ic
,
2478 oia
.addr16
[0], ia
->addr16
[0], u
),
2479 oia
.addr16
[1], ia
->addr16
[1], u
),
2480 oia
.addr16
[2], ia
->addr16
[2], u
),
2481 oia
.addr16
[3], ia
->addr16
[3], u
),
2482 oia
.addr16
[4], ia
->addr16
[4], u
),
2483 oia
.addr16
[5], ia
->addr16
[5], u
),
2484 oia
.addr16
[6], ia
->addr16
[6], u
),
2485 oia
.addr16
[7], ia
->addr16
[7], u
);
2489 /* Change outer ip address, fix outer ip or icmpv6 checksum. */
2490 PF_ACPY(oa
, na
, af
);
2494 *hc
= pf_cksum_fixup(pf_cksum_fixup(*hc
,
2495 ooa
.addr16
[0], oa
->addr16
[0], 0),
2496 ooa
.addr16
[1], oa
->addr16
[1], 0);
2501 *ic
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2502 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2503 pf_cksum_fixup(pf_cksum_fixup(*ic
,
2504 ooa
.addr16
[0], oa
->addr16
[0], u
),
2505 ooa
.addr16
[1], oa
->addr16
[1], u
),
2506 ooa
.addr16
[2], oa
->addr16
[2], u
),
2507 ooa
.addr16
[3], oa
->addr16
[3], u
),
2508 ooa
.addr16
[4], oa
->addr16
[4], u
),
2509 ooa
.addr16
[5], oa
->addr16
[5], u
),
2510 ooa
.addr16
[6], oa
->addr16
[6], u
),
2511 ooa
.addr16
[7], oa
->addr16
[7], u
);
2519 * Need to modulate the sequence numbers in the TCP SACK option
2520 * (credits to Krzysztof Pfaff for report and patch)
2523 pf_modulate_sack(struct mbuf
*m
, int off
, struct pf_pdesc
*pd
,
2524 struct tcphdr
*th
, struct pf_state_peer
*dst
)
2526 int hlen
= (th
->th_off
<< 2) - sizeof (*th
), thoptlen
= hlen
;
2527 u_int8_t opts
[MAX_TCPOPTLEN
], *opt
= opts
;
2528 int copyback
= 0, i
, olen
;
2529 struct sackblk sack
;
2531 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2532 if (hlen
< TCPOLEN_SACKLEN
||
2533 !pf_pull_hdr(m
, off
+ sizeof (*th
), opts
, hlen
, NULL
, NULL
, pd
->af
))
2536 while (hlen
>= TCPOLEN_SACKLEN
) {
2539 case TCPOPT_EOL
: /* FALLTHROUGH */
2547 if (olen
>= TCPOLEN_SACKLEN
) {
2548 for (i
= 2; i
+ TCPOLEN_SACK
<= olen
;
2549 i
+= TCPOLEN_SACK
) {
2550 memcpy(&sack
, &opt
[i
], sizeof (sack
));
2551 pf_change_a(&sack
.start
, &th
->th_sum
,
2552 htonl(ntohl(sack
.start
) -
2554 pf_change_a(&sack
.end
, &th
->th_sum
,
2555 htonl(ntohl(sack
.end
) -
2557 memcpy(&opt
[i
], &sack
, sizeof (sack
));
2559 #ifndef NO_APPLE_EXTENSIONS
2560 copyback
= off
+ sizeof (*th
) + thoptlen
;
2574 #ifndef NO_APPLE_EXTENSIONS
2576 m
= pf_lazy_makewritable(pd
, m
, copyback
);
2579 m_copyback(m
, off
+ sizeof (*th
), thoptlen
, opts
);
2583 m_copyback(m
, off
+ sizeof (*th
), thoptlen
, opts
);
2589 pf_send_tcp(const struct pf_rule
*r
, sa_family_t af
,
2590 const struct pf_addr
*saddr
, const struct pf_addr
*daddr
,
2591 u_int16_t sport
, u_int16_t dport
, u_int32_t seq
, u_int32_t ack
,
2592 u_int8_t flags
, u_int16_t win
, u_int16_t mss
, u_int8_t ttl
, int tag
,
2593 u_int16_t rtag
, struct ether_header
*eh
, struct ifnet
*ifp
)
2595 #pragma unused(eh, ifp)
2599 struct ip
*h
= NULL
;
2602 struct ip6_hdr
*h6
= NULL
;
2604 struct tcphdr
*th
= NULL
;
2606 struct pf_mtag
*pf_mtag
;
2608 /* maximum segment size tcp option */
2609 tlen
= sizeof (struct tcphdr
);
2616 len
= sizeof (struct ip
) + tlen
;
2621 len
= sizeof (struct ip6_hdr
) + tlen
;
2625 panic("pf_send_tcp: not AF_INET or AF_INET6!");
2629 /* create outgoing mbuf */
2630 m
= m_gethdr(M_DONTWAIT
, MT_HEADER
);
2634 if ((pf_mtag
= pf_get_mtag(m
)) == NULL
) {
2640 pf_mtag
->flags
|= PF_TAG_GENERATED
;
2641 pf_mtag
->tag
= rtag
;
2643 if (r
!= NULL
&& PF_RTABLEID_IS_VALID(r
->rtableid
))
2644 pf_mtag
->rtableid
= r
->rtableid
;
2647 if (r
!= NULL
&& r
->qid
) {
2648 pf_mtag
->qid
= r
->qid
;
2649 /* add hints for ecn */
2650 pf_mtag
->hdr
= mtod(m
, struct ip
*);
2653 m
->m_data
+= max_linkhdr
;
2654 m
->m_pkthdr
.len
= m
->m_len
= len
;
2655 m
->m_pkthdr
.rcvif
= NULL
;
2656 bzero(m
->m_data
, len
);
2660 h
= mtod(m
, struct ip
*);
2662 /* IP header fields included in the TCP checksum */
2663 h
->ip_p
= IPPROTO_TCP
;
2664 h
->ip_len
= htons(tlen
);
2665 h
->ip_src
.s_addr
= saddr
->v4
.s_addr
;
2666 h
->ip_dst
.s_addr
= daddr
->v4
.s_addr
;
2668 th
= (struct tcphdr
*)((caddr_t
)h
+ sizeof (struct ip
));
2673 h6
= mtod(m
, struct ip6_hdr
*);
2675 /* IP header fields included in the TCP checksum */
2676 h6
->ip6_nxt
= IPPROTO_TCP
;
2677 h6
->ip6_plen
= htons(tlen
);
2678 memcpy(&h6
->ip6_src
, &saddr
->v6
, sizeof (struct in6_addr
));
2679 memcpy(&h6
->ip6_dst
, &daddr
->v6
, sizeof (struct in6_addr
));
2681 th
= (struct tcphdr
*)((caddr_t
)h6
+ sizeof (struct ip6_hdr
));
2687 th
->th_sport
= sport
;
2688 th
->th_dport
= dport
;
2689 th
->th_seq
= htonl(seq
);
2690 th
->th_ack
= htonl(ack
);
2691 th
->th_off
= tlen
>> 2;
2692 th
->th_flags
= flags
;
2693 th
->th_win
= htons(win
);
2696 opt
= (char *)(th
+ 1);
2697 opt
[0] = TCPOPT_MAXSEG
;
2699 #if BYTE_ORDER != BIG_ENDIAN
2702 bcopy((caddr_t
)&mss
, (caddr_t
)(opt
+ 2), 2);
2711 th
->th_sum
= in_cksum(m
, len
);
2713 /* Finish the IP header */
2715 h
->ip_hl
= sizeof (*h
) >> 2;
2716 h
->ip_tos
= IPTOS_LOWDELAY
;
2718 * ip_output() expects ip_len and ip_off to be in host order.
2721 h
->ip_off
= (path_mtu_discovery
? IP_DF
: 0);
2722 h
->ip_ttl
= ttl
? ttl
: ip_defttl
;
2725 bzero(&ro
, sizeof (ro
));
2726 ip_output(m
, NULL
, &ro
, 0, NULL
, NULL
);
2727 if (ro
.ro_rt
!= NULL
)
2734 struct route_in6 ro6
;
2737 th
->th_sum
= in6_cksum(m
, IPPROTO_TCP
,
2738 sizeof (struct ip6_hdr
), tlen
);
2740 h6
->ip6_vfc
|= IPV6_VERSION
;
2741 h6
->ip6_hlim
= IPV6_DEFHLIM
;
2743 bzero(&ro6
, sizeof (ro6
));
2744 ip6_output(m
, NULL
, &ro6
, 0, NULL
, NULL
, NULL
);
2745 if (ro6
.ro_rt
!= NULL
)
2754 pf_send_icmp(struct mbuf
*m
, u_int8_t type
, u_int8_t code
, sa_family_t af
,
2758 struct pf_mtag
*pf_mtag
;
2760 m0
= m_copy(m
, 0, M_COPYALL
);
2764 if ((pf_mtag
= pf_get_mtag(m0
)) == NULL
)
2767 pf_mtag
->flags
|= PF_TAG_GENERATED
;
2769 if (PF_RTABLEID_IS_VALID(r
->rtableid
))
2770 pf_mtag
->rtableid
= r
->rtableid
;
2774 pf_mtag
->qid
= r
->qid
;
2775 /* add hints for ecn */
2776 pf_mtag
->hdr
= mtod(m0
, struct ip
*);
2782 icmp_error(m0
, type
, code
, 0, 0);
2787 icmp6_error(m0
, type
, code
, 0);
2794 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2795 * If n is 0, they match if they are equal. If n is != 0, they match if they
2799 pf_match_addr(u_int8_t n
, struct pf_addr
*a
, struct pf_addr
*m
,
2800 struct pf_addr
*b
, sa_family_t af
)
2807 if ((a
->addr32
[0] & m
->addr32
[0]) ==
2808 (b
->addr32
[0] & m
->addr32
[0]))
2814 if (((a
->addr32
[0] & m
->addr32
[0]) ==
2815 (b
->addr32
[0] & m
->addr32
[0])) &&
2816 ((a
->addr32
[1] & m
->addr32
[1]) ==
2817 (b
->addr32
[1] & m
->addr32
[1])) &&
2818 ((a
->addr32
[2] & m
->addr32
[2]) ==
2819 (b
->addr32
[2] & m
->addr32
[2])) &&
2820 ((a
->addr32
[3] & m
->addr32
[3]) ==
2821 (b
->addr32
[3] & m
->addr32
[3])))
2840 * Return 1 if b <= a <= e, otherwise return 0.
2843 pf_match_addr_range(struct pf_addr
*b
, struct pf_addr
*e
,
2844 struct pf_addr
*a
, sa_family_t af
)
2849 if ((a
->addr32
[0] < b
->addr32
[0]) ||
2850 (a
->addr32
[0] > e
->addr32
[0]))
2859 for (i
= 0; i
< 4; ++i
)
2860 if (a
->addr32
[i
] > b
->addr32
[i
])
2862 else if (a
->addr32
[i
] < b
->addr32
[i
])
2865 for (i
= 0; i
< 4; ++i
)
2866 if (a
->addr32
[i
] < e
->addr32
[i
])
2868 else if (a
->addr32
[i
] > e
->addr32
[i
])
2878 pf_match(u_int8_t op
, u_int32_t a1
, u_int32_t a2
, u_int32_t p
)
2882 return ((p
> a1
) && (p
< a2
));
2884 return ((p
< a1
) || (p
> a2
));
2886 return ((p
>= a1
) && (p
<= a2
));
2900 return (0); /* never reached */
2904 pf_match_port(u_int8_t op
, u_int16_t a1
, u_int16_t a2
, u_int16_t p
)
2906 #if BYTE_ORDER != BIG_ENDIAN
2911 return (pf_match(op
, a1
, a2
, p
));
2914 #ifndef NO_APPLE_EXTENSIONS
2916 pf_match_xport(u_int8_t proto
, u_int8_t proto_variant
, union pf_rule_xport
*rx
,
2917 union pf_state_xport
*sx
)
2924 if (proto_variant
== PF_GRE_PPTP_VARIANT
)
2925 d
= (rx
->call_id
== sx
->call_id
);
2929 d
= (rx
->spi
== sx
->spi
);
2935 case IPPROTO_ICMPV6
:
2937 d
= pf_match_port(rx
->range
.op
,
2938 rx
->range
.port
[0], rx
->range
.port
[1],
2952 pf_match_uid(u_int8_t op
, uid_t a1
, uid_t a2
, uid_t u
)
2954 if (u
== UID_MAX
&& op
!= PF_OP_EQ
&& op
!= PF_OP_NE
)
2956 return (pf_match(op
, a1
, a2
, u
));
2960 pf_match_gid(u_int8_t op
, gid_t a1
, gid_t a2
, gid_t g
)
2962 if (g
== GID_MAX
&& op
!= PF_OP_EQ
&& op
!= PF_OP_NE
)
2964 return (pf_match(op
, a1
, a2
, g
));
2968 pf_match_tag(struct mbuf
*m
, struct pf_rule
*r
, struct pf_mtag
*pf_mtag
,
2973 *tag
= pf_mtag
->tag
;
2975 return ((!r
->match_tag_not
&& r
->match_tag
== *tag
) ||
2976 (r
->match_tag_not
&& r
->match_tag
!= *tag
));
2980 pf_tag_packet(struct mbuf
*m
, struct pf_mtag
*pf_mtag
, int tag
,
2981 unsigned int rtableid
)
2983 if (tag
<= 0 && !PF_RTABLEID_IS_VALID(rtableid
))
2986 if (pf_mtag
== NULL
&& (pf_mtag
= pf_get_mtag(m
)) == NULL
)
2991 if (PF_RTABLEID_IS_VALID(rtableid
))
2992 pf_mtag
->rtableid
= rtableid
;
2998 pf_step_into_anchor(int *depth
, struct pf_ruleset
**rs
, int n
,
2999 struct pf_rule
**r
, struct pf_rule
**a
, int *match
)
3001 struct pf_anchor_stackframe
*f
;
3003 (*r
)->anchor
->match
= 0;
3006 if (*depth
>= (int)sizeof (pf_anchor_stack
) /
3007 (int)sizeof (pf_anchor_stack
[0])) {
3008 printf("pf_step_into_anchor: stack overflow\n");
3009 *r
= TAILQ_NEXT(*r
, entries
);
3011 } else if (*depth
== 0 && a
!= NULL
)
3013 f
= pf_anchor_stack
+ (*depth
)++;
3016 if ((*r
)->anchor_wildcard
) {
3017 f
->parent
= &(*r
)->anchor
->children
;
3018 if ((f
->child
= RB_MIN(pf_anchor_node
, f
->parent
)) ==
3023 *rs
= &f
->child
->ruleset
;
3027 *rs
= &(*r
)->anchor
->ruleset
;
3029 *r
= TAILQ_FIRST((*rs
)->rules
[n
].active
.ptr
);
3033 pf_step_out_of_anchor(int *depth
, struct pf_ruleset
**rs
, int n
,
3034 struct pf_rule
**r
, struct pf_rule
**a
, int *match
)
3036 struct pf_anchor_stackframe
*f
;
3042 f
= pf_anchor_stack
+ *depth
- 1;
3043 if (f
->parent
!= NULL
&& f
->child
!= NULL
) {
3044 if (f
->child
->match
||
3045 (match
!= NULL
&& *match
)) {
3046 f
->r
->anchor
->match
= 1;
3049 f
->child
= RB_NEXT(pf_anchor_node
, f
->parent
, f
->child
);
3050 if (f
->child
!= NULL
) {
3051 *rs
= &f
->child
->ruleset
;
3052 *r
= TAILQ_FIRST((*rs
)->rules
[n
].active
.ptr
);
3060 if (*depth
== 0 && a
!= NULL
)
3063 if (f
->r
->anchor
->match
|| (match
!= NULL
&& *match
))
3064 quick
= f
->r
->quick
;
3065 *r
= TAILQ_NEXT(f
->r
, entries
);
3066 } while (*r
== NULL
);
3073 pf_poolmask(struct pf_addr
*naddr
, struct pf_addr
*raddr
,
3074 struct pf_addr
*rmask
, struct pf_addr
*saddr
, sa_family_t af
)
3079 naddr
->addr32
[0] = (raddr
->addr32
[0] & rmask
->addr32
[0]) |
3080 ((rmask
->addr32
[0] ^ 0xffffffff) & saddr
->addr32
[0]);
3084 naddr
->addr32
[0] = (raddr
->addr32
[0] & rmask
->addr32
[0]) |
3085 ((rmask
->addr32
[0] ^ 0xffffffff) & saddr
->addr32
[0]);
3086 naddr
->addr32
[1] = (raddr
->addr32
[1] & rmask
->addr32
[1]) |
3087 ((rmask
->addr32
[1] ^ 0xffffffff) & saddr
->addr32
[1]);
3088 naddr
->addr32
[2] = (raddr
->addr32
[2] & rmask
->addr32
[2]) |
3089 ((rmask
->addr32
[2] ^ 0xffffffff) & saddr
->addr32
[2]);
3090 naddr
->addr32
[3] = (raddr
->addr32
[3] & rmask
->addr32
[3]) |
3091 ((rmask
->addr32
[3] ^ 0xffffffff) & saddr
->addr32
[3]);
3097 pf_addr_inc(struct pf_addr
*addr
, sa_family_t af
)
3102 addr
->addr32
[0] = htonl(ntohl(addr
->addr32
[0]) + 1);
3106 if (addr
->addr32
[3] == 0xffffffff) {
3107 addr
->addr32
[3] = 0;
3108 if (addr
->addr32
[2] == 0xffffffff) {
3109 addr
->addr32
[2] = 0;
3110 if (addr
->addr32
[1] == 0xffffffff) {
3111 addr
->addr32
[1] = 0;
3113 htonl(ntohl(addr
->addr32
[0]) + 1);
3116 htonl(ntohl(addr
->addr32
[1]) + 1);
3119 htonl(ntohl(addr
->addr32
[2]) + 1);
3122 htonl(ntohl(addr
->addr32
[3]) + 1);
3128 #define mix(a, b, c) \
3130 a -= b; a -= c; a ^= (c >> 13); \
3131 b -= c; b -= a; b ^= (a << 8); \
3132 c -= a; c -= b; c ^= (b >> 13); \
3133 a -= b; a -= c; a ^= (c >> 12); \
3134 b -= c; b -= a; b ^= (a << 16); \
3135 c -= a; c -= b; c ^= (b >> 5); \
3136 a -= b; a -= c; a ^= (c >> 3); \
3137 b -= c; b -= a; b ^= (a << 10); \
3138 c -= a; c -= b; c ^= (b >> 15); \
3142 * hash function based on bridge_hash in if_bridge.c
3145 pf_hash(struct pf_addr
*inaddr
, struct pf_addr
*hash
,
3146 struct pf_poolhashkey
*key
, sa_family_t af
)
3148 u_int32_t a
= 0x9e3779b9, b
= 0x9e3779b9, c
= key
->key32
[0];
3153 a
+= inaddr
->addr32
[0];
3156 hash
->addr32
[0] = c
+ key
->key32
[2];
3161 a
+= inaddr
->addr32
[0];
3162 b
+= inaddr
->addr32
[2];
3164 hash
->addr32
[0] = c
;
3165 a
+= inaddr
->addr32
[1];
3166 b
+= inaddr
->addr32
[3];
3169 hash
->addr32
[1] = c
;
3170 a
+= inaddr
->addr32
[2];
3171 b
+= inaddr
->addr32
[1];
3174 hash
->addr32
[2] = c
;
3175 a
+= inaddr
->addr32
[3];
3176 b
+= inaddr
->addr32
[0];
3179 hash
->addr32
[3] = c
;
3186 pf_map_addr(sa_family_t af
, struct pf_rule
*r
, struct pf_addr
*saddr
,
3187 struct pf_addr
*naddr
, struct pf_addr
*init_addr
, struct pf_src_node
**sn
)
3189 unsigned char hash
[16];
3190 struct pf_pool
*rpool
= &r
->rpool
;
3191 struct pf_addr
*raddr
= &rpool
->cur
->addr
.v
.a
.addr
;
3192 struct pf_addr
*rmask
= &rpool
->cur
->addr
.v
.a
.mask
;
3193 struct pf_pooladdr
*acur
= rpool
->cur
;
3194 struct pf_src_node k
;
3196 if (*sn
== NULL
&& r
->rpool
.opts
& PF_POOL_STICKYADDR
&&
3197 (r
->rpool
.opts
& PF_POOL_TYPEMASK
) != PF_POOL_NONE
) {
3199 PF_ACPY(&k
.addr
, saddr
, af
);
3200 if (r
->rule_flag
& PFRULE_RULESRCTRACK
||
3201 r
->rpool
.opts
& PF_POOL_STICKYADDR
)
3205 pf_status
.scounters
[SCNT_SRC_NODE_SEARCH
]++;
3206 *sn
= RB_FIND(pf_src_tree
, &tree_src_tracking
, &k
);
3207 if (*sn
!= NULL
&& !PF_AZERO(&(*sn
)->raddr
, af
)) {
3208 PF_ACPY(naddr
, &(*sn
)->raddr
, af
);
3209 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
3210 printf("pf_map_addr: src tracking maps ");
3211 pf_print_host(&k
.addr
, 0, af
);
3213 pf_print_host(naddr
, 0, af
);
3220 if (rpool
->cur
->addr
.type
== PF_ADDR_NOROUTE
)
3222 if (rpool
->cur
->addr
.type
== PF_ADDR_DYNIFTL
) {
3226 if (rpool
->cur
->addr
.p
.dyn
->pfid_acnt4
< 1 &&
3227 (rpool
->opts
& PF_POOL_TYPEMASK
) !=
3230 raddr
= &rpool
->cur
->addr
.p
.dyn
->pfid_addr4
;
3231 rmask
= &rpool
->cur
->addr
.p
.dyn
->pfid_mask4
;
3236 if (rpool
->cur
->addr
.p
.dyn
->pfid_acnt6
< 1 &&
3237 (rpool
->opts
& PF_POOL_TYPEMASK
) !=
3240 raddr
= &rpool
->cur
->addr
.p
.dyn
->pfid_addr6
;
3241 rmask
= &rpool
->cur
->addr
.p
.dyn
->pfid_mask6
;
3245 } else if (rpool
->cur
->addr
.type
== PF_ADDR_TABLE
) {
3246 if ((rpool
->opts
& PF_POOL_TYPEMASK
) != PF_POOL_ROUNDROBIN
)
3247 return (1); /* unsupported */
3249 raddr
= &rpool
->cur
->addr
.v
.a
.addr
;
3250 rmask
= &rpool
->cur
->addr
.v
.a
.mask
;
3253 switch (rpool
->opts
& PF_POOL_TYPEMASK
) {
3255 PF_ACPY(naddr
, raddr
, af
);
3257 case PF_POOL_BITMASK
:
3258 PF_POOLMASK(naddr
, raddr
, rmask
, saddr
, af
);
3260 case PF_POOL_RANDOM
:
3261 if (init_addr
!= NULL
&& PF_AZERO(init_addr
, af
)) {
3265 rpool
->counter
.addr32
[0] = htonl(random());
3270 if (rmask
->addr32
[3] != 0xffffffff)
3271 rpool
->counter
.addr32
[3] =
3275 if (rmask
->addr32
[2] != 0xffffffff)
3276 rpool
->counter
.addr32
[2] =
3280 if (rmask
->addr32
[1] != 0xffffffff)
3281 rpool
->counter
.addr32
[1] =
3285 if (rmask
->addr32
[0] != 0xffffffff)
3286 rpool
->counter
.addr32
[0] =
3291 PF_POOLMASK(naddr
, raddr
, rmask
, &rpool
->counter
, af
);
3292 PF_ACPY(init_addr
, naddr
, af
);
3295 PF_AINC(&rpool
->counter
, af
);
3296 PF_POOLMASK(naddr
, raddr
, rmask
, &rpool
->counter
, af
);
3299 case PF_POOL_SRCHASH
:
3300 pf_hash(saddr
, (struct pf_addr
*)&hash
, &rpool
->key
, af
);
3301 PF_POOLMASK(naddr
, raddr
, rmask
, (struct pf_addr
*)&hash
, af
);
3303 case PF_POOL_ROUNDROBIN
:
3304 if (rpool
->cur
->addr
.type
== PF_ADDR_TABLE
) {
3305 if (!pfr_pool_get(rpool
->cur
->addr
.p
.tbl
,
3306 &rpool
->tblidx
, &rpool
->counter
,
3307 &raddr
, &rmask
, af
))
3309 } else if (rpool
->cur
->addr
.type
== PF_ADDR_DYNIFTL
) {
3310 if (!pfr_pool_get(rpool
->cur
->addr
.p
.dyn
->pfid_kt
,
3311 &rpool
->tblidx
, &rpool
->counter
,
3312 &raddr
, &rmask
, af
))
3314 } else if (pf_match_addr(0, raddr
, rmask
, &rpool
->counter
, af
))
3318 if ((rpool
->cur
= TAILQ_NEXT(rpool
->cur
, entries
)) == NULL
)
3319 rpool
->cur
= TAILQ_FIRST(&rpool
->list
);
3320 if (rpool
->cur
->addr
.type
== PF_ADDR_TABLE
) {
3322 if (pfr_pool_get(rpool
->cur
->addr
.p
.tbl
,
3323 &rpool
->tblidx
, &rpool
->counter
,
3324 &raddr
, &rmask
, af
)) {
3325 /* table contains no address of type 'af' */
3326 if (rpool
->cur
!= acur
)
3330 } else if (rpool
->cur
->addr
.type
== PF_ADDR_DYNIFTL
) {
3332 if (pfr_pool_get(rpool
->cur
->addr
.p
.dyn
->pfid_kt
,
3333 &rpool
->tblidx
, &rpool
->counter
,
3334 &raddr
, &rmask
, af
)) {
3335 /* table contains no address of type 'af' */
3336 if (rpool
->cur
!= acur
)
3341 raddr
= &rpool
->cur
->addr
.v
.a
.addr
;
3342 rmask
= &rpool
->cur
->addr
.v
.a
.mask
;
3343 PF_ACPY(&rpool
->counter
, raddr
, af
);
3347 PF_ACPY(naddr
, &rpool
->counter
, af
);
3348 if (init_addr
!= NULL
&& PF_AZERO(init_addr
, af
))
3349 PF_ACPY(init_addr
, naddr
, af
);
3350 PF_AINC(&rpool
->counter
, af
);
3354 PF_ACPY(&(*sn
)->raddr
, naddr
, af
);
3356 if (pf_status
.debug
>= PF_DEBUG_MISC
&&
3357 (rpool
->opts
& PF_POOL_TYPEMASK
) != PF_POOL_NONE
) {
3358 printf("pf_map_addr: selected address ");
3359 pf_print_host(naddr
, 0, af
);
3366 #ifndef NO_APPLE_EXTENSIONS
3368 pf_get_sport(struct pf_pdesc
*pd
, struct pfi_kif
*kif
, struct pf_rule
*r
,
3369 struct pf_addr
*saddr
, union pf_state_xport
*sxport
, struct pf_addr
*daddr
,
3370 union pf_state_xport
*dxport
, struct pf_addr
*naddr
,
3371 union pf_state_xport
*nxport
, struct pf_src_node
**sn
)
3374 pf_get_sport(sa_family_t af
, u_int8_t proto
, struct pf_rule
*r
,
3375 struct pf_addr
*saddr
, struct pf_addr
*daddr
, u_int16_t dport
,
3376 struct pf_addr
*naddr
, u_int16_t
*nport
, u_int16_t low
, u_int16_t high
,
3377 struct pf_src_node
**sn
)
3381 struct pf_state_key_cmp key
;
3382 struct pf_addr init_addr
;
3383 #ifndef NO_APPLE_EXTENSIONS
3385 sa_family_t af
= pd
->af
;
3386 u_int8_t proto
= pd
->proto
;
3387 unsigned int low
= r
->rpool
.proxy_port
[0];
3388 unsigned int high
= r
->rpool
.proxy_port
[1];
3393 bzero(&init_addr
, sizeof (init_addr
));
3394 if (pf_map_addr(af
, r
, saddr
, naddr
, &init_addr
, sn
))
3397 if (proto
== IPPROTO_ICMP
) {
3402 #ifndef NO_APPLE_EXTENSIONS
3404 return (0); /* No output necessary. */
3406 /*--- Special mapping rules for UDP ---*/
3407 if (proto
== IPPROTO_UDP
) {
3409 /*--- Never float IKE source port ---*/
3410 if (ntohs(sxport
->port
) == PF_IKE_PORT
) {
3411 nxport
->port
= sxport
->port
;
3415 /*--- Apply exterior mapping options ---*/
3416 if (r
->extmap
> PF_EXTMAP_APD
) {
3419 TAILQ_FOREACH(s
, &state_list
, entry_list
) {
3420 struct pf_state_key
*sk
= s
->state_key
;
3423 if (s
->nat_rule
.ptr
!= r
)
3425 if (sk
->proto
!= IPPROTO_UDP
|| sk
->af
!= af
)
3427 if (sk
->lan
.xport
.port
!= sxport
->port
)
3429 if (PF_ANEQ(&sk
->lan
.addr
, saddr
, af
))
3431 if (r
->extmap
< PF_EXTMAP_EI
&&
3432 PF_ANEQ(&sk
->ext
.addr
, daddr
, af
))
3435 nxport
->port
= sk
->gwy
.xport
.port
;
3439 } else if (proto
== IPPROTO_TCP
) {
3442 * APPLE MODIFICATION: <rdar://problem/6546358>
3443 * Fix allows....NAT to use a single binding for TCP session
3444 * with same source IP and source port
3446 TAILQ_FOREACH(s
, &state_list
, entry_list
) {
3447 struct pf_state_key
* sk
= s
->state_key
;
3450 if (s
->nat_rule
.ptr
!= r
)
3452 if (sk
->proto
!= IPPROTO_TCP
|| sk
->af
!= af
)
3454 if (sk
->lan
.xport
.port
!= sxport
->port
)
3456 if (!(PF_AEQ(&sk
->lan
.addr
, saddr
, af
)))
3458 nxport
->port
= sk
->gwy
.xport
.port
;
3466 PF_ACPY(&key
.ext
.addr
, daddr
, key
.af
);
3467 PF_ACPY(&key
.gwy
.addr
, naddr
, key
.af
);
3468 #ifndef NO_APPLE_EXTENSIONS
3471 key
.proto_variant
= r
->extfilter
;
3474 key
.proto_variant
= 0;
3478 key
.ext
.xport
= *dxport
;
3480 memset(&key
.ext
.xport
, 0, sizeof (key
.ext
.xport
));
3482 key
.ext
.port
= dport
;
3485 * port search; start random, step;
3486 * similar 2 portloop in in_pcbbind
3488 if (!(proto
== IPPROTO_TCP
|| proto
== IPPROTO_UDP
||
3489 proto
== IPPROTO_ICMP
)) {
3490 #ifndef NO_APPLE_EXTENSIONS
3492 key
.gwy
.xport
= *dxport
;
3494 memset(&key
.gwy
.xport
, 0,
3495 sizeof (key
.ext
.xport
));
3497 key
.gwy
.port
= dport
;
3499 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
)
3501 } else if (low
== 0 && high
== 0) {
3502 #ifndef NO_APPLE_EXTENSIONS
3503 key
.gwy
.xport
= *nxport
;
3505 key
.gwy
.port
= *nport
;
3507 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
)
3509 } else if (low
== high
) {
3510 #ifndef NO_APPLE_EXTENSIONS
3511 key
.gwy
.xport
.port
= htons(low
);
3512 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
) {
3513 nxport
->port
= htons(low
);
3517 key
.gwy
.port
= htons(low
);
3518 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
) {
3519 *nport
= htons(low
);
3524 #ifndef NO_APPLE_EXTENSIONS
3535 cut
= htonl(random()) % (1 + high
- low
) + low
;
3536 /* low <= cut <= high */
3537 for (tmp
= cut
; tmp
<= high
; ++(tmp
)) {
3538 #ifndef NO_APPLE_EXTENSIONS
3539 key
.gwy
.xport
.port
= htons(tmp
);
3540 if (pf_find_state_all(&key
, PF_IN
, NULL
) ==
3542 nxport
->port
= htons(tmp
);
3546 key
.gwy
.port
= htons(tmp
);
3547 if (pf_find_state_all(&key
, PF_IN
, NULL
) ==
3549 *nport
= htons(tmp
);
3554 for (tmp
= cut
- 1; tmp
>= low
; --(tmp
)) {
3555 #ifndef NO_APPLE_EXTENSIONS
3556 key
.gwy
.xport
.port
= htons(tmp
);
3557 if (pf_find_state_all(&key
, PF_IN
, NULL
) ==
3559 nxport
->port
= htons(tmp
);
3563 key
.gwy
.port
= htons(tmp
);
3564 if (pf_find_state_all(&key
, PF_IN
, NULL
) ==
3566 *nport
= htons(tmp
);
3573 switch (r
->rpool
.opts
& PF_POOL_TYPEMASK
) {
3574 case PF_POOL_RANDOM
:
3575 case PF_POOL_ROUNDROBIN
:
3576 if (pf_map_addr(af
, r
, saddr
, naddr
, &init_addr
, sn
))
3580 case PF_POOL_SRCHASH
:
3581 case PF_POOL_BITMASK
:
3585 } while (!PF_AEQ(&init_addr
, naddr
, af
));
3587 return (1); /* none available */
3590 #ifndef NO_APPLE_EXTENSIONS
3591 static struct pf_rule
*
3592 pf_match_translation(struct pf_pdesc
*pd
, struct mbuf
*m
, int off
,
3593 int direction
, struct pfi_kif
*kif
, struct pf_addr
*saddr
,
3594 union pf_state_xport
*sxport
, struct pf_addr
*daddr
,
3595 union pf_state_xport
*dxport
, int rs_num
)
3598 pf_match_translation(struct pf_pdesc
*pd
, struct mbuf
*m
, int off
,
3599 int direction
, struct pfi_kif
*kif
, struct pf_addr
*saddr
, u_int16_t sport
,
3600 struct pf_addr
*daddr
, u_int16_t dport
, int rs_num
)
3603 struct pf_rule
*r
, *rm
= NULL
;
3604 struct pf_ruleset
*ruleset
= NULL
;
3606 unsigned int rtableid
= IFSCOPE_NONE
;
3609 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[rs_num
].active
.ptr
);
3610 while (r
&& rm
== NULL
) {
3611 struct pf_rule_addr
*src
= NULL
, *dst
= NULL
;
3612 struct pf_addr_wrap
*xdst
= NULL
;
3613 #ifndef NO_APPLE_EXTENSIONS
3614 struct pf_addr_wrap
*xsrc
= NULL
;
3615 union pf_rule_xport rdrxport
;
3618 if (r
->action
== PF_BINAT
&& direction
== PF_IN
) {
3620 if (r
->rpool
.cur
!= NULL
)
3621 xdst
= &r
->rpool
.cur
->addr
;
3622 #ifndef NO_APPLE_EXTENSIONS
3623 } else if (r
->action
== PF_RDR
&& direction
== PF_OUT
) {
3626 if (r
->rpool
.cur
!= NULL
) {
3627 rdrxport
.range
.op
= PF_OP_EQ
;
3628 rdrxport
.range
.port
[0] =
3629 htons(r
->rpool
.proxy_port
[0]);
3630 xsrc
= &r
->rpool
.cur
->addr
;
3639 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
)
3640 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
3641 else if (r
->direction
&& r
->direction
!= direction
)
3642 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
3643 else if (r
->af
&& r
->af
!= pd
->af
)
3644 r
= r
->skip
[PF_SKIP_AF
].ptr
;
3645 else if (r
->proto
&& r
->proto
!= pd
->proto
)
3646 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
3647 #ifndef NO_APPLE_EXTENSIONS
3648 else if (xsrc
&& PF_MISMATCHAW(xsrc
, saddr
, pd
->af
, 0, NULL
))
3649 r
= TAILQ_NEXT(r
, entries
);
3650 else if (!xsrc
&& PF_MISMATCHAW(&src
->addr
, saddr
, pd
->af
,
3652 r
= TAILQ_NEXT(r
, entries
);
3653 else if (xsrc
&& (!rdrxport
.range
.port
[0] ||
3654 !pf_match_xport(r
->proto
, r
->proto_variant
, &rdrxport
,
3656 r
= TAILQ_NEXT(r
, entries
);
3657 else if (!xsrc
&& !pf_match_xport(r
->proto
,
3658 r
->proto_variant
, &src
->xport
, sxport
))
3660 else if (PF_MISMATCHAW(&src
->addr
, saddr
, pd
->af
,
3662 r
= r
->skip
[src
== &r
->src
? PF_SKIP_SRC_ADDR
:
3663 PF_SKIP_DST_ADDR
].ptr
;
3664 else if (src
->port_op
&& !pf_match_port(src
->port_op
,
3665 src
->port
[0], src
->port
[1], sport
))
3667 r
= r
->skip
[src
== &r
->src
? PF_SKIP_SRC_PORT
:
3668 PF_SKIP_DST_PORT
].ptr
;
3669 else if (dst
!= NULL
&&
3670 PF_MISMATCHAW(&dst
->addr
, daddr
, pd
->af
, dst
->neg
, NULL
))
3671 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
3672 else if (xdst
!= NULL
&& PF_MISMATCHAW(xdst
, daddr
, pd
->af
,
3674 r
= TAILQ_NEXT(r
, entries
);
3675 #ifndef NO_APPLE_EXTENSIONS
3676 else if (dst
&& !pf_match_xport(r
->proto
, r
->proto_variant
,
3677 &dst
->xport
, dxport
))
3679 else if (dst
!= NULL
&& dst
->port_op
&&
3680 !pf_match_port(dst
->port_op
, dst
->port
[0],
3681 dst
->port
[1], dport
))
3683 r
= r
->skip
[PF_SKIP_DST_PORT
].ptr
;
3684 else if (r
->match_tag
&& !pf_match_tag(m
, r
, pd
->pf_mtag
, &tag
))
3685 r
= TAILQ_NEXT(r
, entries
);
3686 else if (r
->os_fingerprint
!= PF_OSFP_ANY
&& (pd
->proto
!=
3687 IPPROTO_TCP
|| !pf_osfp_match(pf_osfp_fingerprint(pd
, m
,
3688 off
, pd
->hdr
.tcp
), r
->os_fingerprint
)))
3689 r
= TAILQ_NEXT(r
, entries
);
3693 if (PF_RTABLEID_IS_VALID(r
->rtableid
))
3694 rtableid
= r
->rtableid
;
3695 if (r
->anchor
== NULL
) {
3698 pf_step_into_anchor(&asd
, &ruleset
, rs_num
,
3702 pf_step_out_of_anchor(&asd
, &ruleset
, rs_num
, &r
,
3705 if (pf_tag_packet(m
, pd
->pf_mtag
, tag
, rtableid
))
3707 if (rm
!= NULL
&& (rm
->action
== PF_NONAT
||
3708 rm
->action
== PF_NORDR
|| rm
->action
== PF_NOBINAT
))
3713 #ifndef NO_APPLE_EXTENSIONS
3714 static struct pf_rule
*
3715 pf_get_translation_aux(struct pf_pdesc
*pd
, struct mbuf
*m
, int off
,
3716 int direction
, struct pfi_kif
*kif
, struct pf_src_node
**sn
,
3717 struct pf_addr
*saddr
, union pf_state_xport
*sxport
, struct pf_addr
*daddr
,
3718 union pf_state_xport
*dxport
, struct pf_addr
*naddr
,
3719 union pf_state_xport
*nxport
)
3722 pf_get_translation(struct pf_pdesc
*pd
, struct mbuf
*m
, int off
, int direction
,
3723 struct pfi_kif
*kif
, struct pf_src_node
**sn
,
3724 struct pf_addr
*saddr
, u_int16_t sport
,
3725 struct pf_addr
*daddr
, u_int16_t dport
,
3726 struct pf_addr
*naddr
, u_int16_t
*nport
)
3729 struct pf_rule
*r
= NULL
;
3731 #ifndef NO_APPLE_EXTENSIONS
3732 if (direction
== PF_OUT
) {
3733 r
= pf_match_translation(pd
, m
, off
, direction
, kif
, saddr
,
3734 sxport
, daddr
, dxport
, PF_RULESET_BINAT
);
3736 r
= pf_match_translation(pd
, m
, off
, direction
, kif
,
3737 saddr
, sxport
, daddr
, dxport
, PF_RULESET_RDR
);
3739 r
= pf_match_translation(pd
, m
, off
, direction
, kif
,
3740 saddr
, sxport
, daddr
, dxport
, PF_RULESET_NAT
);
3742 r
= pf_match_translation(pd
, m
, off
, direction
, kif
, saddr
,
3743 sxport
, daddr
, dxport
, PF_RULESET_RDR
);
3745 r
= pf_match_translation(pd
, m
, off
, direction
, kif
,
3746 saddr
, sxport
, daddr
, dxport
, PF_RULESET_BINAT
);
3749 if (direction
== PF_OUT
) {
3750 r
= pf_match_translation(pd
, m
, off
, direction
, kif
, saddr
,
3751 sport
, daddr
, dport
, PF_RULESET_BINAT
);
3753 r
= pf_match_translation(pd
, m
, off
, direction
, kif
,
3754 saddr
, sport
, daddr
, dport
, PF_RULESET_NAT
);
3756 r
= pf_match_translation(pd
, m
, off
, direction
, kif
, saddr
,
3757 sport
, daddr
, dport
, PF_RULESET_RDR
);
3759 r
= pf_match_translation(pd
, m
, off
, direction
, kif
,
3760 saddr
, sport
, daddr
, dport
, PF_RULESET_BINAT
);
3765 switch (r
->action
) {
3771 #ifndef NO_APPLE_EXTENSIONS
3772 if (pf_get_sport(pd
, kif
, r
, saddr
, sxport
, daddr
,
3773 dxport
, naddr
, nxport
, sn
)) {
3775 if (pf_get_sport(pd
->af
, pd
->proto
, r
, saddr
,
3776 daddr
, dport
, naddr
, nport
, r
->rpool
.proxy_port
[0],
3777 r
->rpool
.proxy_port
[1], sn
)) {
3779 DPFPRINTF(PF_DEBUG_MISC
,
3780 ("pf: NAT proxy port allocation "
3782 r
->rpool
.proxy_port
[0],
3783 r
->rpool
.proxy_port
[1]));
3788 switch (direction
) {
3790 if (r
->rpool
.cur
->addr
.type
==
3795 if (r
->rpool
.cur
->addr
.p
.dyn
->
3799 &r
->rpool
.cur
->addr
.p
.dyn
->
3801 &r
->rpool
.cur
->addr
.p
.dyn
->
3808 if (r
->rpool
.cur
->addr
.p
.dyn
->
3812 &r
->rpool
.cur
->addr
.p
.dyn
->
3814 &r
->rpool
.cur
->addr
.p
.dyn
->
3822 &r
->rpool
.cur
->addr
.v
.a
.addr
,
3823 &r
->rpool
.cur
->addr
.v
.a
.mask
,
3828 if (r
->src
.addr
.type
== PF_ADDR_DYNIFTL
) {
3832 if (r
->src
.addr
.p
.dyn
->
3836 &r
->src
.addr
.p
.dyn
->
3838 &r
->src
.addr
.p
.dyn
->
3845 if (r
->src
.addr
.p
.dyn
->
3849 &r
->src
.addr
.p
.dyn
->
3851 &r
->src
.addr
.p
.dyn
->
3859 &r
->src
.addr
.v
.a
.addr
,
3860 &r
->src
.addr
.v
.a
.mask
, daddr
,
3866 #ifndef NO_APPLE_EXTENSIONS
3867 switch (direction
) {
3869 if (r
->dst
.addr
.type
== PF_ADDR_DYNIFTL
) {
3873 if (r
->dst
.addr
.p
.dyn
->
3877 &r
->dst
.addr
.p
.dyn
->
3879 &r
->dst
.addr
.p
.dyn
->
3886 if (r
->dst
.addr
.p
.dyn
->
3890 &r
->dst
.addr
.p
.dyn
->
3892 &r
->dst
.addr
.p
.dyn
->
3900 &r
->dst
.addr
.v
.a
.addr
,
3901 &r
->dst
.addr
.v
.a
.mask
,
3904 if (nxport
&& r
->dst
.xport
.range
.port
[0])
3906 r
->dst
.xport
.range
.port
[0];
3909 if (pf_map_addr(pd
->af
, r
, saddr
,
3912 if ((r
->rpool
.opts
& PF_POOL_TYPEMASK
) ==
3914 PF_POOLMASK(naddr
, naddr
,
3915 &r
->rpool
.cur
->addr
.v
.a
.mask
, daddr
,
3918 if (nxport
&& dxport
) {
3919 if (r
->rpool
.proxy_port
[1]) {
3920 u_int32_t tmp_nport
;
3923 ((ntohs(dxport
->port
) -
3924 ntohs(r
->dst
.xport
.range
.
3926 (r
->rpool
.proxy_port
[1] -
3927 r
->rpool
.proxy_port
[0] +
3928 1)) + r
->rpool
.proxy_port
[0];
3930 /* wrap around if necessary */
3931 if (tmp_nport
> 65535)
3934 htons((u_int16_t
)tmp_nport
);
3935 } else if (r
->rpool
.proxy_port
[0]) {
3936 nxport
->port
= htons(r
->rpool
.
3943 if (pf_map_addr(pd
->af
, r
, saddr
, naddr
, NULL
, sn
))
3945 if ((r
->rpool
.opts
& PF_POOL_TYPEMASK
) ==
3947 PF_POOLMASK(naddr
, naddr
,
3948 &r
->rpool
.cur
->addr
.v
.a
.mask
, daddr
,
3951 if (r
->rpool
.proxy_port
[1]) {
3952 u_int32_t tmp_nport
;
3954 tmp_nport
= ((ntohs(dport
) -
3955 ntohs(r
->dst
.port
[0])) %
3956 (r
->rpool
.proxy_port
[1] -
3957 r
->rpool
.proxy_port
[0] + 1)) +
3958 r
->rpool
.proxy_port
[0];
3960 /* wrap around if necessary */
3961 if (tmp_nport
> 65535)
3963 *nport
= htons((u_int16_t
)tmp_nport
);
3964 } else if (r
->rpool
.proxy_port
[0])
3965 *nport
= htons(r
->rpool
.proxy_port
[0]);
3978 pf_socket_lookup(int direction
, struct pf_pdesc
*pd
)
3980 struct pf_addr
*saddr
, *daddr
;
3981 u_int16_t sport
, dport
;
3982 struct inpcbinfo
*pi
;
3987 pd
->lookup
.uid
= UID_MAX
;
3988 pd
->lookup
.gid
= GID_MAX
;
3989 pd
->lookup
.pid
= NO_PID
;
3991 switch (pd
->proto
) {
3993 if (pd
->hdr
.tcp
== NULL
)
3995 sport
= pd
->hdr
.tcp
->th_sport
;
3996 dport
= pd
->hdr
.tcp
->th_dport
;
4000 if (pd
->hdr
.udp
== NULL
)
4002 sport
= pd
->hdr
.udp
->uh_sport
;
4003 dport
= pd
->hdr
.udp
->uh_dport
;
4009 if (direction
== PF_IN
) {
4024 inp
= in_pcblookup_hash_exists(pi
, saddr
->v4
, sport
, daddr
->v4
, dport
,
4025 0, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4028 struct in6_addr s6
, d6
;
4030 memset(&s6
, 0, sizeof (s6
));
4031 s6
.s6_addr16
[5] = htons(0xffff);
4032 memcpy(&s6
.s6_addr32
[3], &saddr
->v4
,
4033 sizeof (saddr
->v4
));
4035 memset(&d6
, 0, sizeof (d6
));
4036 d6
.s6_addr16
[5] = htons(0xffff);
4037 memcpy(&d6
.s6_addr32
[3], &daddr
->v4
,
4038 sizeof (daddr
->v4
));
4040 inp
= in6_pcblookup_hash_exists(pi
, &s6
, sport
,
4041 &d6
, dport
, 0, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4043 inp
= in_pcblookup_hash_exists(pi
, saddr
->v4
, sport
,
4044 daddr
->v4
, dport
, INPLOOKUP_WILDCARD
, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4046 inp
= in6_pcblookup_hash_exists(pi
, &s6
, sport
,
4047 &d6
, dport
, INPLOOKUP_WILDCARD
,
4048 &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4056 inp
= in_pcblookup_hash_exists(pi
, saddr
->v4
, sport
,
4057 daddr
->v4
, dport
, INPLOOKUP_WILDCARD
,
4058 &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4067 inp
= in6_pcblookup_hash_exists(pi
, &saddr
->v6
, sport
, &daddr
->v6
,
4068 dport
, 0, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4070 inp
= in6_pcblookup_hash_exists(pi
, &saddr
->v6
, sport
,
4071 &daddr
->v6
, dport
, INPLOOKUP_WILDCARD
,
4072 &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4087 pf_get_wscale(struct mbuf
*m
, int off
, u_int16_t th_off
, sa_family_t af
)
4091 u_int8_t
*opt
, optlen
;
4092 u_int8_t wscale
= 0;
4094 hlen
= th_off
<< 2; /* hlen <= sizeof (hdr) */
4095 if (hlen
<= (int)sizeof (struct tcphdr
))
4097 if (!pf_pull_hdr(m
, off
, hdr
, hlen
, NULL
, NULL
, af
))
4099 opt
= hdr
+ sizeof (struct tcphdr
);
4100 hlen
-= sizeof (struct tcphdr
);
4110 if (wscale
> TCP_MAX_WINSHIFT
)
4111 wscale
= TCP_MAX_WINSHIFT
;
4112 wscale
|= PF_WSCALE_FLAG
;
4127 pf_get_mss(struct mbuf
*m
, int off
, u_int16_t th_off
, sa_family_t af
)
4131 u_int8_t
*opt
, optlen
;
4132 u_int16_t mss
= tcp_mssdflt
;
4134 hlen
= th_off
<< 2; /* hlen <= sizeof (hdr) */
4135 if (hlen
<= (int)sizeof (struct tcphdr
))
4137 if (!pf_pull_hdr(m
, off
, hdr
, hlen
, NULL
, NULL
, af
))
4139 opt
= hdr
+ sizeof (struct tcphdr
);
4140 hlen
-= sizeof (struct tcphdr
);
4141 while (hlen
>= TCPOLEN_MAXSEG
) {
4149 bcopy((caddr_t
)(opt
+ 2), (caddr_t
)&mss
, 2);
4150 #if BYTE_ORDER != BIG_ENDIAN
4167 pf_calc_mss(struct pf_addr
*addr
, sa_family_t af
, u_int16_t offer
)
4170 struct sockaddr_in
*dst
;
4174 struct sockaddr_in6
*dst6
;
4175 struct route_in6 ro6
;
4177 struct rtentry
*rt
= NULL
;
4179 u_int16_t mss
= tcp_mssdflt
;
4184 hlen
= sizeof (struct ip
);
4185 bzero(&ro
, sizeof (ro
));
4186 dst
= (struct sockaddr_in
*)&ro
.ro_dst
;
4187 dst
->sin_family
= AF_INET
;
4188 dst
->sin_len
= sizeof (*dst
);
4189 dst
->sin_addr
= addr
->v4
;
4196 hlen
= sizeof (struct ip6_hdr
);
4197 bzero(&ro6
, sizeof (ro6
));
4198 dst6
= (struct sockaddr_in6
*)&ro6
.ro_dst
;
4199 dst6
->sin6_family
= AF_INET6
;
4200 dst6
->sin6_len
= sizeof (*dst6
);
4201 dst6
->sin6_addr
= addr
->v6
;
4202 rtalloc((struct route
*)&ro
);
4207 panic("pf_calc_mss: not AF_INET or AF_INET6!");
4211 if (rt
&& rt
->rt_ifp
) {
4212 mss
= rt
->rt_ifp
->if_mtu
- hlen
- sizeof (struct tcphdr
);
4213 mss
= max(tcp_mssdflt
, mss
);
4216 mss
= min(mss
, offer
);
4217 mss
= max(mss
, 64); /* sanity - at least max opt space */
4222 pf_set_rt_ifp(struct pf_state
*s
, struct pf_addr
*saddr
)
4224 struct pf_rule
*r
= s
->rule
.ptr
;
4227 if (!r
->rt
|| r
->rt
== PF_FASTROUTE
)
4229 switch (s
->state_key
->af
) {
4232 pf_map_addr(AF_INET
, r
, saddr
, &s
->rt_addr
, NULL
,
4234 s
->rt_kif
= r
->rpool
.cur
->kif
;
4239 pf_map_addr(AF_INET6
, r
, saddr
, &s
->rt_addr
, NULL
,
4241 s
->rt_kif
= r
->rpool
.cur
->kif
;
4248 pf_attach_state(struct pf_state_key
*sk
, struct pf_state
*s
, int tail
)
4253 /* list is sorted, if-bound states before floating */
4255 TAILQ_INSERT_TAIL(&sk
->states
, s
, next
);
4257 TAILQ_INSERT_HEAD(&sk
->states
, s
, next
);
4261 pf_detach_state(struct pf_state
*s
, int flags
)
4263 struct pf_state_key
*sk
= s
->state_key
;
4268 s
->state_key
= NULL
;
4269 TAILQ_REMOVE(&sk
->states
, s
, next
);
4270 if (--sk
->refcnt
== 0) {
4271 if (!(flags
& PF_DT_SKIP_EXTGWY
))
4272 RB_REMOVE(pf_state_tree_ext_gwy
,
4273 &pf_statetbl_ext_gwy
, sk
);
4274 if (!(flags
& PF_DT_SKIP_LANEXT
))
4275 RB_REMOVE(pf_state_tree_lan_ext
,
4276 &pf_statetbl_lan_ext
, sk
);
4277 #ifndef NO_APPLE_EXTENSIONS
4279 pool_put(&pf_app_state_pl
, sk
->app_state
);
4281 pool_put(&pf_state_key_pl
, sk
);
4285 struct pf_state_key
*
4286 pf_alloc_state_key(struct pf_state
*s
)
4288 struct pf_state_key
*sk
;
4290 if ((sk
= pool_get(&pf_state_key_pl
, PR_WAITOK
)) == NULL
)
4292 bzero(sk
, sizeof (*sk
));
4293 TAILQ_INIT(&sk
->states
);
4294 pf_attach_state(sk
, s
, 0);
4300 pf_tcp_iss(struct pf_pdesc
*pd
)
4303 u_int32_t digest
[4];
4305 if (pf_tcp_secret_init
== 0) {
4306 read_random(pf_tcp_secret
, sizeof (pf_tcp_secret
));
4307 MD5Init(&pf_tcp_secret_ctx
);
4308 MD5Update(&pf_tcp_secret_ctx
, pf_tcp_secret
,
4309 sizeof (pf_tcp_secret
));
4310 pf_tcp_secret_init
= 1;
4312 ctx
= pf_tcp_secret_ctx
;
4314 MD5Update(&ctx
, (char *)&pd
->hdr
.tcp
->th_sport
, sizeof (u_short
));
4315 MD5Update(&ctx
, (char *)&pd
->hdr
.tcp
->th_dport
, sizeof (u_short
));
4316 if (pd
->af
== AF_INET6
) {
4317 MD5Update(&ctx
, (char *)&pd
->src
->v6
, sizeof (struct in6_addr
));
4318 MD5Update(&ctx
, (char *)&pd
->dst
->v6
, sizeof (struct in6_addr
));
4320 MD5Update(&ctx
, (char *)&pd
->src
->v4
, sizeof (struct in_addr
));
4321 MD5Update(&ctx
, (char *)&pd
->dst
->v4
, sizeof (struct in_addr
));
4323 MD5Final((u_char
*)digest
, &ctx
);
4324 pf_tcp_iss_off
+= 4096;
4325 return (digest
[0] + random() + pf_tcp_iss_off
);
4329 pf_test_rule(struct pf_rule
**rm
, struct pf_state
**sm
, int direction
,
4330 struct pfi_kif
*kif
, struct mbuf
*m
, int off
, void *h
,
4331 struct pf_pdesc
*pd
, struct pf_rule
**am
, struct pf_ruleset
**rsm
,
4332 struct ifqueue
*ifq
)
4335 struct pf_rule
*nr
= NULL
;
4336 struct pf_addr
*saddr
= pd
->src
, *daddr
= pd
->dst
;
4337 #ifdef NO_APPLE_EXTENSIONS
4338 u_int16_t bport
, nport
= 0;
4340 sa_family_t af
= pd
->af
;
4341 struct pf_rule
*r
, *a
= NULL
;
4342 struct pf_ruleset
*ruleset
= NULL
;
4343 struct pf_src_node
*nsn
= NULL
;
4344 struct tcphdr
*th
= pd
->hdr
.tcp
;
4346 int rewrite
= 0, hdrlen
= 0;
4348 unsigned int rtableid
= IFSCOPE_NONE
;
4352 u_int16_t mss
= tcp_mssdflt
;
4353 #ifdef NO_APPLE_EXTENSIONS
4354 u_int16_t sport
, dport
;
4356 u_int8_t icmptype
= 0, icmpcode
= 0;
4358 #ifndef NO_APPLE_EXTENSIONS
4359 struct pf_grev1_hdr
*grev1
= pd
->hdr
.grev1
;
4360 union pf_state_xport bxport
, nxport
, sxport
, dxport
;
4363 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
4365 if (direction
== PF_IN
&& pf_check_congestion(ifq
)) {
4366 REASON_SET(&reason
, PFRES_CONGEST
);
4370 #ifndef NO_APPLE_EXTENSIONS
4376 sport
= dport
= hdrlen
= 0;
4379 switch (pd
->proto
) {
4381 #ifndef NO_APPLE_EXTENSIONS
4382 sxport
.port
= th
->th_sport
;
4383 dxport
.port
= th
->th_dport
;
4385 sport
= th
->th_sport
;
4386 dport
= th
->th_dport
;
4388 hdrlen
= sizeof (*th
);
4391 #ifndef NO_APPLE_EXTENSIONS
4392 sxport
.port
= pd
->hdr
.udp
->uh_sport
;
4393 dxport
.port
= pd
->hdr
.udp
->uh_dport
;
4395 sport
= pd
->hdr
.udp
->uh_sport
;
4396 dport
= pd
->hdr
.udp
->uh_dport
;
4398 hdrlen
= sizeof (*pd
->hdr
.udp
);
4402 if (pd
->af
!= AF_INET
)
4404 #ifndef NO_APPLE_EXTENSIONS
4405 sxport
.port
= dxport
.port
= pd
->hdr
.icmp
->icmp_id
;
4406 hdrlen
= ICMP_MINLEN
;
4408 sport
= dport
= pd
->hdr
.icmp
->icmp_id
;
4410 icmptype
= pd
->hdr
.icmp
->icmp_type
;
4411 icmpcode
= pd
->hdr
.icmp
->icmp_code
;
4413 if (icmptype
== ICMP_UNREACH
||
4414 icmptype
== ICMP_SOURCEQUENCH
||
4415 icmptype
== ICMP_REDIRECT
||
4416 icmptype
== ICMP_TIMXCEED
||
4417 icmptype
== ICMP_PARAMPROB
)
4422 case IPPROTO_ICMPV6
:
4423 if (pd
->af
!= AF_INET6
)
4425 #ifndef NO_APPLE_EXTENSIONS
4426 sxport
.port
= dxport
.port
= pd
->hdr
.icmp6
->icmp6_id
;
4428 sport
= dport
= pd
->hdr
.icmp6
->icmp6_id
;
4430 hdrlen
= sizeof (*pd
->hdr
.icmp6
);
4431 icmptype
= pd
->hdr
.icmp6
->icmp6_type
;
4432 icmpcode
= pd
->hdr
.icmp6
->icmp6_code
;
4434 if (icmptype
== ICMP6_DST_UNREACH
||
4435 icmptype
== ICMP6_PACKET_TOO_BIG
||
4436 icmptype
== ICMP6_TIME_EXCEEDED
||
4437 icmptype
== ICMP6_PARAM_PROB
)
4441 #ifndef NO_APPLE_EXTENSIONS
4443 if (pd
->proto_variant
== PF_GRE_PPTP_VARIANT
) {
4444 sxport
.call_id
= dxport
.call_id
=
4445 pd
->hdr
.grev1
->call_id
;
4446 hdrlen
= sizeof (*pd
->hdr
.grev1
);
4451 dxport
.spi
= pd
->hdr
.esp
->spi
;
4452 hdrlen
= sizeof (*pd
->hdr
.esp
);
4457 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[PF_RULESET_FILTER
].active
.ptr
);
4459 if (direction
== PF_OUT
) {
4460 #ifndef NO_APPLE_EXTENSIONS
4461 bxport
= nxport
= sxport
;
4462 /* check outgoing packet for BINAT/NAT */
4463 if ((nr
= pf_get_translation_aux(pd
, m
, off
, PF_OUT
, kif
, &nsn
,
4464 saddr
, &sxport
, daddr
, &dxport
, &pd
->naddr
, &nxport
)) !=
4467 bport
= nport
= sport
;
4468 /* check outgoing packet for BINAT/NAT */
4469 if ((nr
= pf_get_translation(pd
, m
, off
, PF_OUT
, kif
, &nsn
,
4470 saddr
, sport
, daddr
, dport
, &pd
->naddr
, &nport
)) != NULL
) {
4472 PF_ACPY(&pd
->baddr
, saddr
, af
);
4473 switch (pd
->proto
) {
4475 #ifndef NO_APPLE_EXTENSIONS
4476 pf_change_ap(direction
, pd
->mp
, saddr
,
4477 &th
->th_sport
, pd
->ip_sum
, &th
->th_sum
,
4478 &pd
->naddr
, nxport
.port
, 0, af
);
4479 sxport
.port
= th
->th_sport
;
4481 pf_change_ap(saddr
, &th
->th_sport
, pd
->ip_sum
,
4482 &th
->th_sum
, &pd
->naddr
, nport
, 0, af
);
4483 sport
= th
->th_sport
;
4488 #ifndef NO_APPLE_EXTENSIONS
4489 pf_change_ap(direction
, pd
->mp
, saddr
,
4490 &pd
->hdr
.udp
->uh_sport
, pd
->ip_sum
,
4491 &pd
->hdr
.udp
->uh_sum
, &pd
->naddr
,
4492 nxport
.port
, 1, af
);
4493 sxport
.port
= pd
->hdr
.udp
->uh_sport
;
4495 pf_change_ap(saddr
, &pd
->hdr
.udp
->uh_sport
,
4496 pd
->ip_sum
, &pd
->hdr
.udp
->uh_sum
,
4497 &pd
->naddr
, nport
, 1, af
);
4498 sport
= pd
->hdr
.udp
->uh_sport
;
4504 pf_change_a(&saddr
->v4
.s_addr
, pd
->ip_sum
,
4505 pd
->naddr
.v4
.s_addr
, 0);
4506 #ifndef NO_APPLE_EXTENSIONS
4507 pd
->hdr
.icmp
->icmp_cksum
= pf_cksum_fixup(
4508 pd
->hdr
.icmp
->icmp_cksum
, sxport
.port
,
4510 pd
->hdr
.icmp
->icmp_id
= nxport
.port
;
4513 pd
->hdr
.icmp
->icmp_cksum
= pf_cksum_fixup(
4514 pd
->hdr
.icmp
->icmp_cksum
, sport
, nport
, 0);
4515 pd
->hdr
.icmp
->icmp_id
= nport
;
4516 m_copyback(m
, off
, ICMP_MINLEN
, pd
->hdr
.icmp
);
4521 case IPPROTO_ICMPV6
:
4522 pf_change_a6(saddr
, &pd
->hdr
.icmp6
->icmp6_cksum
,
4527 #ifndef NO_APPLE_EXTENSIONS
4532 pf_change_a(&saddr
->v4
.s_addr
,
4533 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4538 PF_ACPY(saddr
, &pd
->naddr
, AF_INET6
);
4549 pf_change_a(&saddr
->v4
.s_addr
,
4550 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4555 PF_ACPY(saddr
, &pd
->naddr
, AF_INET6
);
4565 pf_change_a(&saddr
->v4
.s_addr
,
4566 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4571 PF_ACPY(saddr
, &pd
->naddr
, af
);
4583 #ifndef NO_APPLE_EXTENSIONS
4584 bxport
.port
= nxport
.port
= dxport
.port
;
4585 /* check incoming packet for BINAT/RDR */
4586 if ((nr
= pf_get_translation_aux(pd
, m
, off
, PF_IN
, kif
, &nsn
,
4587 saddr
, &sxport
, daddr
, &dxport
, &pd
->naddr
, &nxport
)) !=
4590 bport
= nport
= dport
;
4591 /* check incoming packet for BINAT/RDR */
4592 if ((nr
= pf_get_translation(pd
, m
, off
, PF_IN
, kif
, &nsn
,
4593 saddr
, sport
, daddr
, dport
, &pd
->naddr
, &nport
)) != NULL
) {
4595 PF_ACPY(&pd
->baddr
, daddr
, af
);
4596 switch (pd
->proto
) {
4598 #ifndef NO_APPLE_EXTENSIONS
4599 pf_change_ap(direction
, pd
->mp
, daddr
,
4600 &th
->th_dport
, pd
->ip_sum
, &th
->th_sum
,
4601 &pd
->naddr
, nxport
.port
, 0, af
);
4602 dxport
.port
= th
->th_dport
;
4604 pf_change_ap(daddr
, &th
->th_dport
, pd
->ip_sum
,
4605 &th
->th_sum
, &pd
->naddr
, nport
, 0, af
);
4606 dport
= th
->th_dport
;
4611 #ifndef NO_APPLE_EXTENSIONS
4612 pf_change_ap(direction
, pd
->mp
, daddr
,
4613 &pd
->hdr
.udp
->uh_dport
, pd
->ip_sum
,
4614 &pd
->hdr
.udp
->uh_sum
, &pd
->naddr
,
4615 nxport
.port
, 1, af
);
4616 dxport
.port
= pd
->hdr
.udp
->uh_dport
;
4618 pf_change_ap(direction
, daddr
,
4619 &pd
->hdr
.udp
->uh_dport
,
4620 pd
->ip_sum
, &pd
->hdr
.udp
->uh_sum
,
4621 &pd
->naddr
, nport
, 1, af
);
4622 dport
= pd
->hdr
.udp
->uh_dport
;
4628 pf_change_a(&daddr
->v4
.s_addr
, pd
->ip_sum
,
4629 pd
->naddr
.v4
.s_addr
, 0);
4633 case IPPROTO_ICMPV6
:
4634 pf_change_a6(daddr
, &pd
->hdr
.icmp6
->icmp6_cksum
,
4639 #ifndef NO_APPLE_EXTENSIONS
4641 if (pd
->proto_variant
== PF_GRE_PPTP_VARIANT
)
4642 grev1
->call_id
= nxport
.call_id
;
4647 pf_change_a(&daddr
->v4
.s_addr
,
4648 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4653 PF_ACPY(daddr
, &pd
->naddr
, AF_INET6
);
4663 pf_change_a(&daddr
->v4
.s_addr
,
4664 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4669 PF_ACPY(daddr
, &pd
->naddr
, AF_INET6
);
4679 pf_change_a(&daddr
->v4
.s_addr
,
4680 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4685 PF_ACPY(daddr
, &pd
->naddr
, af
);
4698 #ifndef NO_APPLE_EXTENSIONS
4699 if (nr
&& nr
->tag
> 0)
4705 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
)
4706 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
4707 else if (r
->direction
&& r
->direction
!= direction
)
4708 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
4709 else if (r
->af
&& r
->af
!= af
)
4710 r
= r
->skip
[PF_SKIP_AF
].ptr
;
4711 else if (r
->proto
&& r
->proto
!= pd
->proto
)
4712 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
4713 else if (PF_MISMATCHAW(&r
->src
.addr
, saddr
, af
,
4715 r
= r
->skip
[PF_SKIP_SRC_ADDR
].ptr
;
4716 /* tcp/udp only. port_op always 0 in other cases */
4717 #ifndef NO_APPLE_EXTENSIONS
4718 else if (r
->proto
== pd
->proto
&&
4719 (r
->proto
== IPPROTO_TCP
|| r
->proto
== IPPROTO_UDP
) &&
4720 r
->src
.xport
.range
.op
&&
4721 !pf_match_port(r
->src
.xport
.range
.op
,
4722 r
->src
.xport
.range
.port
[0], r
->src
.xport
.range
.port
[1],
4725 else if (r
->src
.port_op
&& !pf_match_port(r
->src
.port_op
,
4726 r
->src
.port
[0], r
->src
.port
[1], th
->th_sport
))
4728 r
= r
->skip
[PF_SKIP_SRC_PORT
].ptr
;
4729 else if (PF_MISMATCHAW(&r
->dst
.addr
, daddr
, af
,
4731 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
4732 /* tcp/udp only. port_op always 0 in other cases */
4733 #ifndef NO_APPLE_EXTENSIONS
4734 else if (r
->proto
== pd
->proto
&&
4735 (r
->proto
== IPPROTO_TCP
|| r
->proto
== IPPROTO_UDP
) &&
4736 r
->dst
.xport
.range
.op
&&
4737 !pf_match_port(r
->dst
.xport
.range
.op
,
4738 r
->dst
.xport
.range
.port
[0], r
->dst
.xport
.range
.port
[1],
4741 else if (r
->dst
.port_op
&& !pf_match_port(r
->dst
.port_op
,
4742 r
->dst
.port
[0], r
->dst
.port
[1], th
->th_dport
))
4744 r
= r
->skip
[PF_SKIP_DST_PORT
].ptr
;
4745 /* icmp only. type always 0 in other cases */
4746 else if (r
->type
&& r
->type
!= icmptype
+ 1)
4747 r
= TAILQ_NEXT(r
, entries
);
4748 /* icmp only. type always 0 in other cases */
4749 else if (r
->code
&& r
->code
!= icmpcode
+ 1)
4750 r
= TAILQ_NEXT(r
, entries
);
4751 else if (r
->tos
&& !(r
->tos
== pd
->tos
))
4752 r
= TAILQ_NEXT(r
, entries
);
4753 else if (r
->rule_flag
& PFRULE_FRAGMENT
)
4754 r
= TAILQ_NEXT(r
, entries
);
4755 else if (pd
->proto
== IPPROTO_TCP
&&
4756 (r
->flagset
& th
->th_flags
) != r
->flags
)
4757 r
= TAILQ_NEXT(r
, entries
);
4758 /* tcp/udp only. uid.op always 0 in other cases */
4759 else if (r
->uid
.op
&& (pd
->lookup
.done
|| (pd
->lookup
.done
=
4760 pf_socket_lookup(direction
, pd
), 1)) &&
4761 !pf_match_uid(r
->uid
.op
, r
->uid
.uid
[0], r
->uid
.uid
[1],
4763 r
= TAILQ_NEXT(r
, entries
);
4764 /* tcp/udp only. gid.op always 0 in other cases */
4765 else if (r
->gid
.op
&& (pd
->lookup
.done
|| (pd
->lookup
.done
=
4766 pf_socket_lookup(direction
, pd
), 1)) &&
4767 !pf_match_gid(r
->gid
.op
, r
->gid
.gid
[0], r
->gid
.gid
[1],
4769 r
= TAILQ_NEXT(r
, entries
);
4770 else if (r
->prob
&& r
->prob
<= (random() % (UINT_MAX
- 1) + 1))
4771 r
= TAILQ_NEXT(r
, entries
);
4772 else if (r
->match_tag
&& !pf_match_tag(m
, r
, pd
->pf_mtag
, &tag
))
4773 r
= TAILQ_NEXT(r
, entries
);
4774 else if (r
->os_fingerprint
!= PF_OSFP_ANY
&&
4775 (pd
->proto
!= IPPROTO_TCP
|| !pf_osfp_match(
4776 pf_osfp_fingerprint(pd
, m
, off
, th
),
4777 r
->os_fingerprint
)))
4778 r
= TAILQ_NEXT(r
, entries
);
4782 if (PF_RTABLEID_IS_VALID(r
->rtableid
))
4783 rtableid
= r
->rtableid
;
4784 if (r
->anchor
== NULL
) {
4791 r
= TAILQ_NEXT(r
, entries
);
4793 pf_step_into_anchor(&asd
, &ruleset
,
4794 PF_RULESET_FILTER
, &r
, &a
, &match
);
4796 if (r
== NULL
&& pf_step_out_of_anchor(&asd
, &ruleset
,
4797 PF_RULESET_FILTER
, &r
, &a
, &match
))
4804 REASON_SET(&reason
, PFRES_MATCH
);
4806 if (r
->log
|| (nr
!= NULL
&& nr
->log
)) {
4807 #ifndef NO_APPLE_EXTENSIONS
4809 if (rewrite
< off
+ hdrlen
)
4810 rewrite
= off
+ hdrlen
;
4812 m
= pf_lazy_makewritable(pd
, m
, rewrite
);
4814 REASON_SET(&reason
, PFRES_MEMORY
);
4818 m_copyback(m
, off
, hdrlen
, pd
->hdr
.any
);
4822 m_copyback(m
, off
, hdrlen
, pd
->hdr
.any
);
4824 PFLOG_PACKET(kif
, h
, m
, af
, direction
, reason
, r
->log
? r
: nr
,
4828 if ((r
->action
== PF_DROP
) &&
4829 ((r
->rule_flag
& PFRULE_RETURNRST
) ||
4830 (r
->rule_flag
& PFRULE_RETURNICMP
) ||
4831 (r
->rule_flag
& PFRULE_RETURN
))) {
4832 /* undo NAT changes, if they have taken place */
4834 if (direction
== PF_OUT
) {
4835 switch (pd
->proto
) {
4837 #ifndef NO_APPLE_EXTENSIONS
4838 pf_change_ap(direction
, pd
->mp
, saddr
,
4839 &th
->th_sport
, pd
->ip_sum
,
4840 &th
->th_sum
, &pd
->baddr
,
4841 bxport
.port
, 0, af
);
4842 sxport
.port
= th
->th_sport
;
4844 pf_change_ap(saddr
, &th
->th_sport
,
4845 pd
->ip_sum
, &th
->th_sum
,
4846 &pd
->baddr
, bport
, 0, af
);
4847 sport
= th
->th_sport
;
4852 #ifndef NO_APPLE_EXTENSIONS
4853 pf_change_ap(direction
, pd
->mp
, saddr
,
4854 &pd
->hdr
.udp
->uh_sport
, pd
->ip_sum
,
4855 &pd
->hdr
.udp
->uh_sum
, &pd
->baddr
,
4856 bxport
.port
, 1, af
);
4857 sxport
.port
= pd
->hdr
.udp
->uh_sport
;
4860 &pd
->hdr
.udp
->uh_sport
, pd
->ip_sum
,
4861 &pd
->hdr
.udp
->uh_sum
, &pd
->baddr
,
4863 sport
= pd
->hdr
.udp
->uh_sport
;
4869 case IPPROTO_ICMPV6
:
4873 #ifndef NO_APPLE_EXTENSIONS
4875 PF_ACPY(&pd
->baddr
, saddr
, af
);
4880 pf_change_a(&saddr
->v4
.s_addr
,
4882 pd
->baddr
.v4
.s_addr
, 0);
4887 PF_ACPY(saddr
, &pd
->baddr
,
4894 PF_ACPY(&pd
->baddr
, saddr
, af
);
4898 pf_change_a(&saddr
->v4
.s_addr
,
4900 pd
->baddr
.v4
.s_addr
, 0);
4905 PF_ACPY(saddr
, &pd
->baddr
,
4915 pf_change_a(&saddr
->v4
.s_addr
,
4917 pd
->baddr
.v4
.s_addr
, 0);
4920 PF_ACPY(saddr
, &pd
->baddr
, af
);
4925 switch (pd
->proto
) {
4927 #ifndef NO_APPLE_EXTENSIONS
4928 pf_change_ap(direction
, pd
->mp
, daddr
,
4929 &th
->th_dport
, pd
->ip_sum
,
4930 &th
->th_sum
, &pd
->baddr
,
4931 bxport
.port
, 0, af
);
4932 dxport
.port
= th
->th_dport
;
4934 pf_change_ap(daddr
, &th
->th_dport
,
4935 pd
->ip_sum
, &th
->th_sum
,
4936 &pd
->baddr
, bport
, 0, af
);
4937 dport
= th
->th_dport
;
4942 #ifndef NO_APPLE_EXTENSIONS
4943 pf_change_ap(direction
, pd
->mp
, daddr
,
4944 &pd
->hdr
.udp
->uh_dport
, pd
->ip_sum
,
4945 &pd
->hdr
.udp
->uh_sum
, &pd
->baddr
,
4946 bxport
.port
, 1, af
);
4947 dxport
.port
= pd
->hdr
.udp
->uh_dport
;
4950 &pd
->hdr
.udp
->uh_dport
, pd
->ip_sum
,
4951 &pd
->hdr
.udp
->uh_sum
, &pd
->baddr
,
4953 dport
= pd
->hdr
.udp
->uh_dport
;
4959 case IPPROTO_ICMPV6
:
4963 #ifndef NO_APPLE_EXTENSIONS
4965 if (pd
->proto_variant
==
4966 PF_GRE_PPTP_VARIANT
)
4967 grev1
->call_id
= bxport
.call_id
;
4972 pf_change_a(&daddr
->v4
.s_addr
,
4974 pd
->baddr
.v4
.s_addr
, 0);
4979 PF_ACPY(daddr
, &pd
->baddr
,
4989 pf_change_a(&daddr
->v4
.s_addr
,
4991 pd
->baddr
.v4
.s_addr
, 0);
4996 PF_ACPY(daddr
, &pd
->baddr
,
5006 pf_change_a(&daddr
->v4
.s_addr
,
5008 pd
->baddr
.v4
.s_addr
, 0);
5012 PF_ACPY(daddr
, &pd
->baddr
, af
);
5019 if (pd
->proto
== IPPROTO_TCP
&&
5020 ((r
->rule_flag
& PFRULE_RETURNRST
) ||
5021 (r
->rule_flag
& PFRULE_RETURN
)) &&
5022 !(th
->th_flags
& TH_RST
)) {
5023 u_int32_t ack
= ntohl(th
->th_seq
) + pd
->p_len
;
5032 h4
= mtod(m
, struct ip
*);
5033 len
= ntohs(h4
->ip_len
) - off
;
5037 h6
= mtod(m
, struct ip6_hdr
*);
5038 len
= ntohs(h6
->ip6_plen
) -
5039 (off
- sizeof (*h6
));
5044 if (pf_check_proto_cksum(m
, off
, len
, IPPROTO_TCP
, af
))
5045 REASON_SET(&reason
, PFRES_PROTCKSUM
);
5047 if (th
->th_flags
& TH_SYN
)
5049 if (th
->th_flags
& TH_FIN
)
5051 pf_send_tcp(r
, af
, pd
->dst
,
5052 pd
->src
, th
->th_dport
, th
->th_sport
,
5053 ntohl(th
->th_ack
), ack
, TH_RST
|TH_ACK
, 0, 0,
5054 r
->return_ttl
, 1, 0, pd
->eh
, kif
->pfik_ifp
);
5056 } else if (pd
->proto
!= IPPROTO_ICMP
&& af
== AF_INET
&&
5057 #ifndef NO_APPLE_EXTENSIONS
5058 pd
->proto
!= IPPROTO_ESP
&& pd
->proto
!= IPPROTO_AH
&&
5061 pf_send_icmp(m
, r
->return_icmp
>> 8,
5062 r
->return_icmp
& 255, af
, r
);
5063 else if (pd
->proto
!= IPPROTO_ICMPV6
&& af
== AF_INET6
&&
5064 #ifndef NO_APPLE_EXTENSIONS
5065 pd
->proto
!= IPPROTO_ESP
&& pd
->proto
!= IPPROTO_AH
&&
5068 pf_send_icmp(m
, r
->return_icmp6
>> 8,
5069 r
->return_icmp6
& 255, af
, r
);
5072 if (r
->action
== PF_DROP
)
5075 if (pf_tag_packet(m
, pd
->pf_mtag
, tag
, rtableid
)) {
5076 REASON_SET(&reason
, PFRES_MEMORY
);
5080 if (!state_icmp
&& (r
->keep_state
|| nr
!= NULL
||
5081 (pd
->flags
& PFDESC_TCP_NORM
))) {
5082 /* create new state */
5083 struct pf_state
*s
= NULL
;
5084 struct pf_state_key
*sk
= NULL
;
5085 struct pf_src_node
*sn
= NULL
;
5086 #ifndef NO_APPLE_EXTENSIONS
5087 struct pf_ike_hdr ike
;
5089 if (pd
->proto
== IPPROTO_UDP
) {
5090 struct udphdr
*uh
= pd
->hdr
.udp
;
5091 size_t plen
= m
->m_pkthdr
.len
- off
- sizeof (*uh
);
5093 if (ntohs(uh
->uh_sport
) == PF_IKE_PORT
&&
5094 ntohs(uh
->uh_dport
) == PF_IKE_PORT
&&
5095 plen
>= PF_IKE_PACKET_MINSIZE
) {
5096 if (plen
> PF_IKE_PACKET_MINSIZE
)
5097 plen
= PF_IKE_PACKET_MINSIZE
;
5098 m_copydata(m
, off
+ sizeof (*uh
), plen
, &ike
);
5102 if (nr
!= NULL
&& pd
->proto
== IPPROTO_ESP
&&
5103 direction
== PF_OUT
) {
5104 struct pf_state_key_cmp sk0
;
5105 struct pf_state
*s0
;
5109 * This squelches state creation if the external
5110 * address matches an existing incomplete state with a
5111 * different internal address. Only one 'blocking'
5112 * partial state is allowed for each external address.
5114 memset(&sk0
, 0, sizeof (sk0
));
5116 sk0
.proto
= IPPROTO_ESP
;
5117 PF_ACPY(&sk0
.gwy
.addr
, saddr
, sk0
.af
);
5118 PF_ACPY(&sk0
.ext
.addr
, daddr
, sk0
.af
);
5119 s0
= pf_find_state(kif
, &sk0
, PF_IN
);
5121 if (s0
&& PF_ANEQ(&s0
->state_key
->lan
.addr
,
5129 /* check maximums */
5130 if (r
->max_states
&& (r
->states
>= r
->max_states
)) {
5131 pf_status
.lcounters
[LCNT_STATES
]++;
5132 REASON_SET(&reason
, PFRES_MAXSTATES
);
5135 /* src node for filter rule */
5136 if ((r
->rule_flag
& PFRULE_SRCTRACK
||
5137 r
->rpool
.opts
& PF_POOL_STICKYADDR
) &&
5138 pf_insert_src_node(&sn
, r
, saddr
, af
) != 0) {
5139 REASON_SET(&reason
, PFRES_SRCLIMIT
);
5142 /* src node for translation rule */
5143 if (nr
!= NULL
&& (nr
->rpool
.opts
& PF_POOL_STICKYADDR
) &&
5144 ((direction
== PF_OUT
&&
5145 #ifndef NO_APPLE_EXTENSIONS
5146 nr
->action
!= PF_RDR
&&
5148 pf_insert_src_node(&nsn
, nr
, &pd
->baddr
, af
) != 0) ||
5149 (pf_insert_src_node(&nsn
, nr
, saddr
, af
) != 0))) {
5150 REASON_SET(&reason
, PFRES_SRCLIMIT
);
5153 s
= pool_get(&pf_state_pl
, PR_WAITOK
);
5155 REASON_SET(&reason
, PFRES_MEMORY
);
5157 if (sn
!= NULL
&& sn
->states
== 0 && sn
->expire
== 0) {
5158 RB_REMOVE(pf_src_tree
, &tree_src_tracking
, sn
);
5159 pf_status
.scounters
[SCNT_SRC_NODE_REMOVALS
]++;
5160 pf_status
.src_nodes
--;
5161 pool_put(&pf_src_tree_pl
, sn
);
5163 if (nsn
!= sn
&& nsn
!= NULL
&& nsn
->states
== 0 &&
5165 RB_REMOVE(pf_src_tree
, &tree_src_tracking
, nsn
);
5166 pf_status
.scounters
[SCNT_SRC_NODE_REMOVALS
]++;
5167 pf_status
.src_nodes
--;
5168 pool_put(&pf_src_tree_pl
, nsn
);
5171 #ifndef NO_APPLE_EXTENSIONS
5173 pool_put(&pf_app_state_pl
,
5176 pool_put(&pf_state_key_pl
, sk
);
5180 bzero(s
, sizeof (*s
));
5181 #ifndef NO_APPLE_EXTENSIONS
5182 TAILQ_INIT(&s
->unlink_hooks
);
5185 s
->nat_rule
.ptr
= nr
;
5187 STATE_INC_COUNTERS(s
);
5188 s
->allow_opts
= r
->allow_opts
;
5189 s
->log
= r
->log
& PF_LOG_ALL
;
5191 s
->log
|= nr
->log
& PF_LOG_ALL
;
5192 switch (pd
->proto
) {
5194 s
->src
.seqlo
= ntohl(th
->th_seq
);
5195 s
->src
.seqhi
= s
->src
.seqlo
+ pd
->p_len
+ 1;
5196 if ((th
->th_flags
& (TH_SYN
|TH_ACK
)) ==
5197 TH_SYN
&& r
->keep_state
== PF_STATE_MODULATE
) {
5198 /* Generate sequence number modulator */
5199 if ((s
->src
.seqdiff
= pf_tcp_iss(pd
) -
5202 pf_change_a(&th
->th_seq
, &th
->th_sum
,
5203 htonl(s
->src
.seqlo
+ s
->src
.seqdiff
), 0);
5204 rewrite
= off
+ sizeof (*th
);
5207 if (th
->th_flags
& TH_SYN
) {
5209 s
->src
.wscale
= pf_get_wscale(m
, off
,
5212 s
->src
.max_win
= MAX(ntohs(th
->th_win
), 1);
5213 if (s
->src
.wscale
& PF_WSCALE_MASK
) {
5214 /* Remove scale factor from initial window */
5215 int win
= s
->src
.max_win
;
5216 win
+= 1 << (s
->src
.wscale
& PF_WSCALE_MASK
);
5217 s
->src
.max_win
= (win
- 1) >>
5218 (s
->src
.wscale
& PF_WSCALE_MASK
);
5220 if (th
->th_flags
& TH_FIN
)
5224 s
->src
.state
= TCPS_SYN_SENT
;
5225 s
->dst
.state
= TCPS_CLOSED
;
5226 s
->timeout
= PFTM_TCP_FIRST_PACKET
;
5229 s
->src
.state
= PFUDPS_SINGLE
;
5230 s
->dst
.state
= PFUDPS_NO_TRAFFIC
;
5231 s
->timeout
= PFTM_UDP_FIRST_PACKET
;
5235 case IPPROTO_ICMPV6
:
5237 s
->timeout
= PFTM_ICMP_FIRST_PACKET
;
5239 #ifndef NO_APPLE_EXTENSIONS
5241 s
->src
.state
= PFGRE1S_INITIATING
;
5242 s
->dst
.state
= PFGRE1S_NO_TRAFFIC
;
5243 s
->timeout
= PFTM_GREv1_INITIATING
;
5246 s
->src
.state
= PFESPS_INITIATING
;
5247 s
->dst
.state
= PFESPS_NO_TRAFFIC
;
5248 s
->timeout
= PFTM_ESP_FIRST_PACKET
;
5252 s
->src
.state
= PFOTHERS_SINGLE
;
5253 s
->dst
.state
= PFOTHERS_NO_TRAFFIC
;
5254 s
->timeout
= PFTM_OTHER_FIRST_PACKET
;
5257 s
->creation
= pf_time_second();
5258 s
->expire
= pf_time_second();
5262 s
->src_node
->states
++;
5263 VERIFY(s
->src_node
->states
!= 0);
5266 PF_ACPY(&nsn
->raddr
, &pd
->naddr
, af
);
5267 s
->nat_src_node
= nsn
;
5268 s
->nat_src_node
->states
++;
5269 VERIFY(s
->nat_src_node
->states
!= 0);
5271 if (pd
->proto
== IPPROTO_TCP
) {
5272 if ((pd
->flags
& PFDESC_TCP_NORM
) &&
5273 pf_normalize_tcp_init(m
, off
, pd
, th
, &s
->src
,
5275 REASON_SET(&reason
, PFRES_MEMORY
);
5276 pf_src_tree_remove_state(s
);
5277 STATE_DEC_COUNTERS(s
);
5278 pool_put(&pf_state_pl
, s
);
5281 if ((pd
->flags
& PFDESC_TCP_NORM
) && s
->src
.scrub
&&
5282 pf_normalize_tcp_stateful(m
, off
, pd
, &reason
,
5283 th
, s
, &s
->src
, &s
->dst
, &rewrite
)) {
5284 /* This really shouldn't happen!!! */
5285 DPFPRINTF(PF_DEBUG_URGENT
,
5286 ("pf_normalize_tcp_stateful failed on "
5288 pf_normalize_tcp_cleanup(s
);
5289 pf_src_tree_remove_state(s
);
5290 STATE_DEC_COUNTERS(s
);
5291 pool_put(&pf_state_pl
, s
);
5296 if ((sk
= pf_alloc_state_key(s
)) == NULL
) {
5297 REASON_SET(&reason
, PFRES_MEMORY
);
5301 sk
->proto
= pd
->proto
;
5302 sk
->direction
= direction
;
5304 #ifndef NO_APPLE_EXTENSIONS
5305 if (pd
->proto
== IPPROTO_UDP
) {
5306 if (ntohs(pd
->hdr
.udp
->uh_sport
) == PF_IKE_PORT
&&
5307 ntohs(pd
->hdr
.udp
->uh_dport
) == PF_IKE_PORT
) {
5308 sk
->proto_variant
= PF_EXTFILTER_APD
;
5310 sk
->proto_variant
= nr
? nr
->extfilter
:
5312 if (sk
->proto_variant
< PF_EXTFILTER_APD
)
5313 sk
->proto_variant
= PF_EXTFILTER_APD
;
5315 } else if (pd
->proto
== IPPROTO_GRE
) {
5316 sk
->proto_variant
= pd
->proto_variant
;
5319 if (direction
== PF_OUT
) {
5320 PF_ACPY(&sk
->gwy
.addr
, saddr
, af
);
5321 PF_ACPY(&sk
->ext
.addr
, daddr
, af
);
5322 switch (pd
->proto
) {
5323 #ifndef NO_APPLE_EXTENSIONS
5325 sk
->gwy
.xport
= sxport
;
5326 sk
->ext
.xport
= dxport
;
5329 sk
->gwy
.xport
.spi
= 0;
5330 sk
->ext
.xport
.spi
= pd
->hdr
.esp
->spi
;
5335 case IPPROTO_ICMPV6
:
5337 #ifndef NO_APPLE_EXTENSIONS
5338 sk
->gwy
.xport
.port
= nxport
.port
;
5339 sk
->ext
.xport
.spi
= 0;
5341 sk
->gwy
.port
= nport
;
5346 #ifndef NO_APPLE_EXTENSIONS
5347 sk
->gwy
.xport
= sxport
;
5348 sk
->ext
.xport
= dxport
;
5351 sk
->gwy
.port
= sport
;
5352 sk
->ext
.port
= dport
;
5355 #ifndef NO_APPLE_EXTENSIONS
5357 PF_ACPY(&sk
->lan
.addr
, &pd
->baddr
, af
);
5358 sk
->lan
.xport
= bxport
;
5360 PF_ACPY(&sk
->lan
.addr
, &sk
->gwy
.addr
, af
);
5361 sk
->lan
.xport
= sk
->gwy
.xport
;
5365 PF_ACPY(&sk
->lan
.addr
, &pd
->baddr
, af
);
5366 sk
->lan
.port
= bport
;
5368 PF_ACPY(&sk
->lan
.addr
, &sk
->gwy
.addr
, af
);
5369 sk
->lan
.port
= sk
->gwy
.port
;
5373 PF_ACPY(&sk
->lan
.addr
, daddr
, af
);
5374 PF_ACPY(&sk
->ext
.addr
, saddr
, af
);
5375 switch (pd
->proto
) {
5378 case IPPROTO_ICMPV6
:
5380 #ifndef NO_APPLE_EXTENSIONS
5381 sk
->lan
.xport
= nxport
;
5382 sk
->ext
.xport
.spi
= 0;
5384 sk
->lan
.port
= nport
;
5388 #ifndef NO_APPLE_EXTENSIONS
5390 sk
->ext
.xport
.spi
= 0;
5391 sk
->lan
.xport
.spi
= pd
->hdr
.esp
->spi
;
5394 sk
->lan
.xport
= dxport
;
5395 sk
->ext
.xport
= sxport
;
5399 sk
->lan
.port
= dport
;
5400 sk
->ext
.port
= sport
;
5403 #ifndef NO_APPLE_EXTENSIONS
5405 PF_ACPY(&sk
->gwy
.addr
, &pd
->baddr
, af
);
5406 sk
->gwy
.xport
= bxport
;
5408 PF_ACPY(&sk
->gwy
.addr
, &sk
->lan
.addr
, af
);
5409 sk
->gwy
.xport
= sk
->lan
.xport
;
5414 PF_ACPY(&sk
->gwy
.addr
, &pd
->baddr
, af
);
5415 sk
->gwy
.port
= bport
;
5417 PF_ACPY(&sk
->gwy
.addr
, &sk
->lan
.addr
, af
);
5418 sk
->gwy
.port
= sk
->lan
.port
;
5423 pf_set_rt_ifp(s
, saddr
); /* needs s->state_key set */
5425 #ifndef NO_APPLE_EXTENSIONS
5428 if (sk
->app_state
== 0) {
5429 switch (pd
->proto
) {
5431 u_int16_t dport
= (direction
== PF_OUT
) ?
5432 sk
->ext
.xport
.port
: sk
->gwy
.xport
.port
;
5435 ntohs(dport
) == PF_PPTP_PORT
) {
5436 struct pf_app_state
*as
;
5438 as
= pool_get(&pf_app_state_pl
,
5446 bzero(as
, sizeof (*as
));
5447 as
->handler
= pf_pptp_handler
;
5448 as
->compare_lan_ext
= 0;
5449 as
->compare_ext_gwy
= 0;
5450 as
->u
.pptp
.grev1_state
= 0;
5452 (void) hook_establish(&s
->unlink_hooks
,
5453 0, (hook_fn_t
) pf_pptp_unlink
, s
);
5459 struct udphdr
*uh
= pd
->hdr
.udp
;
5462 ntohs(uh
->uh_sport
) == PF_IKE_PORT
&&
5463 ntohs(uh
->uh_dport
) == PF_IKE_PORT
) {
5464 struct pf_app_state
*as
;
5466 as
= pool_get(&pf_app_state_pl
,
5474 bzero(as
, sizeof (*as
));
5475 as
->compare_lan_ext
= pf_ike_compare
;
5476 as
->compare_ext_gwy
= pf_ike_compare
;
5477 as
->u
.ike
.cookie
= ike
.initiator_cookie
;
5489 if (pf_insert_state(BOUND_IFACE(r
, kif
), s
)) {
5490 if (pd
->proto
== IPPROTO_TCP
)
5491 pf_normalize_tcp_cleanup(s
);
5492 REASON_SET(&reason
, PFRES_STATEINS
);
5493 pf_src_tree_remove_state(s
);
5494 STATE_DEC_COUNTERS(s
);
5495 pool_put(&pf_state_pl
, s
);
5503 if (pd
->proto
== IPPROTO_TCP
&&
5504 (th
->th_flags
& (TH_SYN
|TH_ACK
)) == TH_SYN
&&
5505 r
->keep_state
== PF_STATE_SYNPROXY
) {
5506 s
->src
.state
= PF_TCPS_PROXY_SRC
;
5508 #ifndef NO_APPLE_EXTENSIONS
5509 if (direction
== PF_OUT
) {
5510 pf_change_ap(direction
, pd
->mp
, saddr
,
5511 &th
->th_sport
, pd
->ip_sum
,
5512 &th
->th_sum
, &pd
->baddr
,
5513 bxport
.port
, 0, af
);
5514 sxport
.port
= th
->th_sport
;
5516 pf_change_ap(direction
, pd
->mp
, daddr
,
5517 &th
->th_dport
, pd
->ip_sum
,
5518 &th
->th_sum
, &pd
->baddr
,
5519 bxport
.port
, 0, af
);
5520 sxport
.port
= th
->th_dport
;
5523 if (direction
== PF_OUT
) {
5524 pf_change_ap(saddr
, &th
->th_sport
,
5525 pd
->ip_sum
, &th
->th_sum
, &pd
->baddr
,
5527 sport
= th
->th_sport
;
5529 pf_change_ap(daddr
, &th
->th_dport
,
5530 pd
->ip_sum
, &th
->th_sum
, &pd
->baddr
,
5532 sport
= th
->th_dport
;
5536 s
->src
.seqhi
= htonl(random());
5537 /* Find mss option */
5538 mss
= pf_get_mss(m
, off
, th
->th_off
, af
);
5539 mss
= pf_calc_mss(saddr
, af
, mss
);
5540 mss
= pf_calc_mss(daddr
, af
, mss
);
5542 pf_send_tcp(r
, af
, daddr
, saddr
, th
->th_dport
,
5543 th
->th_sport
, s
->src
.seqhi
, ntohl(th
->th_seq
) + 1,
5544 TH_SYN
|TH_ACK
, 0, s
->src
.mss
, 0, 1, 0, NULL
, NULL
);
5545 REASON_SET(&reason
, PFRES_SYNPROXY
);
5546 return (PF_SYNPROXY_DROP
);
5549 #ifndef NO_APPLE_EXTENSIONS
5550 if (sk
->app_state
&& sk
->app_state
->handler
) {
5553 switch (pd
->proto
) {
5555 offx
+= th
->th_off
<< 2;
5558 offx
+= pd
->hdr
.udp
->uh_ulen
<< 2;
5561 /* ALG handlers only apply to TCP and UDP rules */
5566 sk
->app_state
->handler(s
, direction
, offx
,
5569 REASON_SET(&reason
, PFRES_MEMORY
);
5578 /* copy back packet headers if we performed NAT operations */
5579 #ifndef NO_APPLE_EXTENSIONS
5581 if (rewrite
< off
+ hdrlen
)
5582 rewrite
= off
+ hdrlen
;
5584 m
= pf_lazy_makewritable(pd
, pd
->mp
, rewrite
);
5586 REASON_SET(&reason
, PFRES_MEMORY
);
5590 m_copyback(m
, off
, hdrlen
, pd
->hdr
.any
);
5594 m_copyback(m
, off
, hdrlen
, pd
->hdr
.any
);
5601 pf_test_fragment(struct pf_rule
**rm
, int direction
, struct pfi_kif
*kif
,
5602 struct mbuf
*m
, void *h
, struct pf_pdesc
*pd
, struct pf_rule
**am
,
5603 struct pf_ruleset
**rsm
)
5606 struct pf_rule
*r
, *a
= NULL
;
5607 struct pf_ruleset
*ruleset
= NULL
;
5608 sa_family_t af
= pd
->af
;
5614 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[PF_RULESET_FILTER
].active
.ptr
);
5617 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
)
5618 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
5619 else if (r
->direction
&& r
->direction
!= direction
)
5620 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
5621 else if (r
->af
&& r
->af
!= af
)
5622 r
= r
->skip
[PF_SKIP_AF
].ptr
;
5623 else if (r
->proto
&& r
->proto
!= pd
->proto
)
5624 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
5625 else if (PF_MISMATCHAW(&r
->src
.addr
, pd
->src
, af
,
5627 r
= r
->skip
[PF_SKIP_SRC_ADDR
].ptr
;
5628 else if (PF_MISMATCHAW(&r
->dst
.addr
, pd
->dst
, af
,
5630 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
5631 else if (r
->tos
&& !(r
->tos
== pd
->tos
))
5632 r
= TAILQ_NEXT(r
, entries
);
5633 else if (r
->os_fingerprint
!= PF_OSFP_ANY
)
5634 r
= TAILQ_NEXT(r
, entries
);
5635 #ifndef NO_APPLE_EXTENSIONS
5636 else if (pd
->proto
== IPPROTO_UDP
&&
5637 (r
->src
.xport
.range
.op
|| r
->dst
.xport
.range
.op
))
5638 r
= TAILQ_NEXT(r
, entries
);
5639 else if (pd
->proto
== IPPROTO_TCP
&&
5640 (r
->src
.xport
.range
.op
|| r
->dst
.xport
.range
.op
||
5642 r
= TAILQ_NEXT(r
, entries
);
5644 else if (pd
->proto
== IPPROTO_UDP
&&
5645 (r
->src
.port_op
|| r
->dst
.port_op
))
5646 r
= TAILQ_NEXT(r
, entries
);
5647 else if (pd
->proto
== IPPROTO_TCP
&&
5648 (r
->src
.port_op
|| r
->dst
.port_op
|| r
->flagset
))
5649 r
= TAILQ_NEXT(r
, entries
);
5651 else if ((pd
->proto
== IPPROTO_ICMP
||
5652 pd
->proto
== IPPROTO_ICMPV6
) &&
5653 (r
->type
|| r
->code
))
5654 r
= TAILQ_NEXT(r
, entries
);
5655 else if (r
->prob
&& r
->prob
<= (random() % (UINT_MAX
- 1) + 1))
5656 r
= TAILQ_NEXT(r
, entries
);
5657 else if (r
->match_tag
&& !pf_match_tag(m
, r
, pd
->pf_mtag
, &tag
))
5658 r
= TAILQ_NEXT(r
, entries
);
5660 if (r
->anchor
== NULL
) {
5667 r
= TAILQ_NEXT(r
, entries
);
5669 pf_step_into_anchor(&asd
, &ruleset
,
5670 PF_RULESET_FILTER
, &r
, &a
, &match
);
5672 if (r
== NULL
&& pf_step_out_of_anchor(&asd
, &ruleset
,
5673 PF_RULESET_FILTER
, &r
, &a
, &match
))
5680 REASON_SET(&reason
, PFRES_MATCH
);
5683 PFLOG_PACKET(kif
, h
, m
, af
, direction
, reason
, r
, a
, ruleset
,
5686 if (r
->action
!= PF_PASS
)
5689 if (pf_tag_packet(m
, pd
->pf_mtag
, tag
, -1)) {
5690 REASON_SET(&reason
, PFRES_MEMORY
);
5697 #ifndef NO_APPLE_EXTENSIONS
5699 pf_pptp_handler(struct pf_state
*s
, int direction
, int off
,
5700 struct pf_pdesc
*pd
, struct pfi_kif
*kif
)
5702 #pragma unused(direction)
5704 struct pf_pptp_state
*pptps
;
5705 struct pf_pptp_ctrl_msg cm
;
5707 struct pf_state
*gs
;
5709 u_int16_t
*pac_call_id
;
5710 u_int16_t
*pns_call_id
;
5711 u_int16_t
*spoof_call_id
;
5712 u_int8_t
*pac_state
;
5713 u_int8_t
*pns_state
;
5714 enum { PF_PPTP_PASS
, PF_PPTP_INSERT_GRE
, PF_PPTP_REMOVE_GRE
} op
;
5716 struct pf_state_key
*sk
;
5717 struct pf_state_key
*gsk
;
5718 struct pf_app_state
*gas
;
5721 pptps
= &sk
->app_state
->u
.pptp
;
5722 gs
= pptps
->grev1_state
;
5725 gs
->expire
= pf_time_second();
5728 plen
= min(sizeof (cm
), m
->m_pkthdr
.len
- off
);
5729 if (plen
< PF_PPTP_CTRL_MSG_MINSIZE
)
5732 m_copydata(m
, off
, plen
, &cm
);
5734 if (ntohl(cm
.hdr
.magic
) != PF_PPTP_MAGIC_NUMBER
)
5736 if (ntohs(cm
.hdr
.type
) != 1)
5740 gs
= pool_get(&pf_state_pl
, PR_WAITOK
);
5744 memcpy(gs
, s
, sizeof (*gs
));
5746 memset(&gs
->entry_id
, 0, sizeof (gs
->entry_id
));
5747 memset(&gs
->entry_list
, 0, sizeof (gs
->entry_list
));
5749 TAILQ_INIT(&gs
->unlink_hooks
);
5752 gs
->pfsync_time
= 0;
5753 gs
->packets
[0] = gs
->packets
[1] = 0;
5754 gs
->bytes
[0] = gs
->bytes
[1] = 0;
5755 gs
->timeout
= PFTM_UNLINKED
;
5756 gs
->id
= gs
->creatorid
= 0;
5757 gs
->src
.state
= gs
->dst
.state
= PFGRE1S_NO_TRAFFIC
;
5758 gs
->src
.scrub
= gs
->dst
.scrub
= 0;
5760 gas
= pool_get(&pf_app_state_pl
, PR_NOWAIT
);
5762 pool_put(&pf_state_pl
, gs
);
5766 gsk
= pf_alloc_state_key(gs
);
5768 pool_put(&pf_app_state_pl
, gas
);
5769 pool_put(&pf_state_pl
, gs
);
5773 memcpy(&gsk
->lan
, &sk
->lan
, sizeof (gsk
->lan
));
5774 memcpy(&gsk
->gwy
, &sk
->gwy
, sizeof (gsk
->gwy
));
5775 memcpy(&gsk
->ext
, &sk
->ext
, sizeof (gsk
->ext
));
5777 gsk
->proto
= IPPROTO_GRE
;
5778 gsk
->proto_variant
= PF_GRE_PPTP_VARIANT
;
5779 gsk
->app_state
= gas
;
5780 gsk
->lan
.xport
.call_id
= 0;
5781 gsk
->gwy
.xport
.call_id
= 0;
5782 gsk
->ext
.xport
.call_id
= 0;
5783 memset(gas
, 0, sizeof (*gas
));
5784 gas
->u
.grev1
.pptp_state
= s
;
5785 STATE_INC_COUNTERS(gs
);
5786 pptps
->grev1_state
= gs
;
5787 (void) hook_establish(&gs
->unlink_hooks
, 0,
5788 (hook_fn_t
) pf_grev1_unlink
, gs
);
5790 gsk
= gs
->state_key
;
5793 switch (sk
->direction
) {
5795 pns_call_id
= &gsk
->ext
.xport
.call_id
;
5796 pns_state
= &gs
->dst
.state
;
5797 pac_call_id
= &gsk
->lan
.xport
.call_id
;
5798 pac_state
= &gs
->src
.state
;
5802 pns_call_id
= &gsk
->lan
.xport
.call_id
;
5803 pns_state
= &gs
->src
.state
;
5804 pac_call_id
= &gsk
->ext
.xport
.call_id
;
5805 pac_state
= &gs
->dst
.state
;
5809 DPFPRINTF(PF_DEBUG_URGENT
,
5810 ("pf_pptp_handler: bad directional!\n"));
5817 ct
= ntohs(cm
.ctrl
.type
);
5820 case PF_PPTP_CTRL_TYPE_CALL_OUT_REQ
:
5821 *pns_call_id
= cm
.msg
.call_out_req
.call_id
;
5822 *pns_state
= PFGRE1S_INITIATING
;
5823 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
)
5824 spoof_call_id
= &cm
.msg
.call_out_req
.call_id
;
5827 case PF_PPTP_CTRL_TYPE_CALL_OUT_RPY
:
5828 *pac_call_id
= cm
.msg
.call_out_rpy
.call_id
;
5829 if (s
->nat_rule
.ptr
)
5831 (pac_call_id
== &gsk
->lan
.xport
.call_id
) ?
5832 &cm
.msg
.call_out_rpy
.call_id
:
5833 &cm
.msg
.call_out_rpy
.peer_call_id
;
5834 if (gs
->timeout
== PFTM_UNLINKED
) {
5835 *pac_state
= PFGRE1S_INITIATING
;
5836 op
= PF_PPTP_INSERT_GRE
;
5840 case PF_PPTP_CTRL_TYPE_CALL_IN_1ST
:
5841 *pns_call_id
= cm
.msg
.call_in_1st
.call_id
;
5842 *pns_state
= PFGRE1S_INITIATING
;
5843 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
)
5844 spoof_call_id
= &cm
.msg
.call_in_1st
.call_id
;
5847 case PF_PPTP_CTRL_TYPE_CALL_IN_2ND
:
5848 *pac_call_id
= cm
.msg
.call_in_2nd
.call_id
;
5849 *pac_state
= PFGRE1S_INITIATING
;
5850 if (s
->nat_rule
.ptr
)
5852 (pac_call_id
== &gsk
->lan
.xport
.call_id
) ?
5853 &cm
.msg
.call_in_2nd
.call_id
:
5854 &cm
.msg
.call_in_2nd
.peer_call_id
;
5857 case PF_PPTP_CTRL_TYPE_CALL_IN_3RD
:
5858 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
)
5859 spoof_call_id
= &cm
.msg
.call_in_3rd
.call_id
;
5860 if (cm
.msg
.call_in_3rd
.call_id
!= *pns_call_id
) {
5863 if (gs
->timeout
== PFTM_UNLINKED
)
5864 op
= PF_PPTP_INSERT_GRE
;
5867 case PF_PPTP_CTRL_TYPE_CALL_CLR
:
5868 if (cm
.msg
.call_clr
.call_id
!= *pns_call_id
)
5869 op
= PF_PPTP_REMOVE_GRE
;
5872 case PF_PPTP_CTRL_TYPE_CALL_DISC
:
5873 if (cm
.msg
.call_clr
.call_id
!= *pac_call_id
)
5874 op
= PF_PPTP_REMOVE_GRE
;
5877 case PF_PPTP_CTRL_TYPE_ERROR
:
5878 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
)
5879 spoof_call_id
= &cm
.msg
.error
.peer_call_id
;
5882 case PF_PPTP_CTRL_TYPE_SET_LINKINFO
:
5883 if (s
->nat_rule
.ptr
&& pac_call_id
== &gsk
->lan
.xport
.call_id
)
5884 spoof_call_id
= &cm
.msg
.set_linkinfo
.peer_call_id
;
5892 if (!gsk
->gwy
.xport
.call_id
&& gsk
->lan
.xport
.call_id
) {
5893 gsk
->gwy
.xport
.call_id
= gsk
->lan
.xport
.call_id
;
5894 if (spoof_call_id
) {
5895 u_int16_t call_id
= 0;
5897 struct pf_state_key_cmp key
;
5900 key
.proto
= IPPROTO_GRE
;
5901 key
.proto_variant
= PF_GRE_PPTP_VARIANT
;
5902 PF_ACPY(&key
.gwy
.addr
, &gsk
->gwy
.addr
, key
.af
);
5903 PF_ACPY(&key
.ext
.addr
, &gsk
->ext
.addr
, key
.af
);
5904 key
.gwy
.xport
.call_id
= gsk
->gwy
.xport
.call_id
;
5905 key
.ext
.xport
.call_id
= gsk
->ext
.xport
.call_id
;
5907 call_id
= htonl(random());
5910 while (pf_find_state_all(&key
, PF_IN
, 0)) {
5911 call_id
= ntohs(call_id
);
5913 if (--call_id
== 0) call_id
= 0xffff;
5914 call_id
= htons(call_id
);
5916 key
.gwy
.xport
.call_id
= call_id
;
5919 DPFPRINTF(PF_DEBUG_URGENT
,
5920 ("pf_pptp_handler: failed to spoof "
5922 key
.gwy
.xport
.call_id
= 0;
5927 gsk
->gwy
.xport
.call_id
= call_id
;
5933 if (spoof_call_id
&& gsk
->lan
.xport
.call_id
!= gsk
->gwy
.xport
.call_id
) {
5934 if (*spoof_call_id
== gsk
->gwy
.xport
.call_id
) {
5935 *spoof_call_id
= gsk
->lan
.xport
.call_id
;
5936 th
->th_sum
= pf_cksum_fixup(th
->th_sum
,
5937 gsk
->gwy
.xport
.call_id
, gsk
->lan
.xport
.call_id
, 0);
5939 *spoof_call_id
= gsk
->gwy
.xport
.call_id
;
5940 th
->th_sum
= pf_cksum_fixup(th
->th_sum
,
5941 gsk
->lan
.xport
.call_id
, gsk
->gwy
.xport
.call_id
, 0);
5944 m
= pf_lazy_makewritable(pd
, m
, off
+ plen
);
5946 pptps
->grev1_state
= NULL
;
5947 STATE_DEC_COUNTERS(gs
);
5948 pool_put(&pf_state_pl
, gs
);
5951 m_copyback(m
, off
, plen
, &cm
);
5955 case PF_PPTP_REMOVE_GRE
:
5956 gs
->timeout
= PFTM_PURGE
;
5957 gs
->src
.state
= gs
->dst
.state
= PFGRE1S_NO_TRAFFIC
;
5958 gsk
->lan
.xport
.call_id
= 0;
5959 gsk
->gwy
.xport
.call_id
= 0;
5960 gsk
->ext
.xport
.call_id
= 0;
5961 gs
->id
= gs
->creatorid
= 0;
5964 case PF_PPTP_INSERT_GRE
:
5965 gs
->creation
= pf_time_second();
5966 gs
->expire
= pf_time_second();
5967 gs
->timeout
= PFTM_TCP_ESTABLISHED
;
5968 if (gs
->src_node
!= NULL
) {
5969 ++gs
->src_node
->states
;
5970 VERIFY(gs
->src_node
->states
!= 0);
5972 if (gs
->nat_src_node
!= NULL
) {
5973 ++gs
->nat_src_node
->states
;
5974 VERIFY(gs
->nat_src_node
->states
!= 0);
5976 pf_set_rt_ifp(gs
, &sk
->lan
.addr
);
5977 if (pf_insert_state(BOUND_IFACE(s
->rule
.ptr
, kif
), gs
)) {
5981 * FIX ME: insertion can fail when multiple PNS
5982 * behind the same NAT open calls to the same PAC
5983 * simultaneously because spoofed call ID numbers
5984 * are chosen before states are inserted. This is
5985 * hard to fix and happens infrequently enough that
5986 * users will normally try again and this ALG will
5987 * succeed. Failures are expected to be rare enough
5988 * that fixing this is a low priority.
5990 pptps
->grev1_state
= NULL
;
5991 pd
->lmw
= -1; /* Force PF_DROP on PFRES_MEMORY */
5992 pf_src_tree_remove_state(gs
);
5993 STATE_DEC_COUNTERS(gs
);
5994 pool_put(&pf_state_pl
, gs
);
5995 DPFPRINTF(PF_DEBUG_URGENT
, ("pf_pptp_handler: error "
5996 "inserting GREv1 state.\n"));
6006 pf_pptp_unlink(struct pf_state
*s
)
6008 struct pf_app_state
*as
= s
->state_key
->app_state
;
6009 struct pf_state
*grev1s
= as
->u
.pptp
.grev1_state
;
6012 struct pf_app_state
*gas
= grev1s
->state_key
->app_state
;
6014 if (grev1s
->timeout
< PFTM_MAX
)
6015 grev1s
->timeout
= PFTM_PURGE
;
6016 gas
->u
.grev1
.pptp_state
= NULL
;
6017 as
->u
.pptp
.grev1_state
= NULL
;
6022 pf_grev1_unlink(struct pf_state
*s
)
6024 struct pf_app_state
*as
= s
->state_key
->app_state
;
6025 struct pf_state
*pptps
= as
->u
.grev1
.pptp_state
;
6028 struct pf_app_state
*pas
= pptps
->state_key
->app_state
;
6030 pas
->u
.pptp
.grev1_state
= NULL
;
6031 as
->u
.grev1
.pptp_state
= NULL
;
6036 pf_ike_compare(struct pf_app_state
*a
, struct pf_app_state
*b
)
6038 int64_t d
= a
->u
.ike
.cookie
- b
->u
.ike
.cookie
;
6039 return ((d
> 0) ? 1 : ((d
< 0) ? -1 : 0));
6044 pf_test_state_tcp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
6045 struct mbuf
*m
, int off
, void *h
, struct pf_pdesc
*pd
,
6049 struct pf_state_key_cmp key
;
6050 struct tcphdr
*th
= pd
->hdr
.tcp
;
6051 u_int16_t win
= ntohs(th
->th_win
);
6052 u_int32_t ack
, end
, seq
, orig_seq
;
6056 struct pf_state_peer
*src
, *dst
;
6058 #ifndef NO_APPLE_EXTENSIONS
6062 key
.proto
= IPPROTO_TCP
;
6063 if (direction
== PF_IN
) {
6064 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
6065 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
6066 #ifndef NO_APPLE_EXTENSIONS
6067 key
.ext
.xport
.port
= th
->th_sport
;
6068 key
.gwy
.xport
.port
= th
->th_dport
;
6070 key
.ext
.port
= th
->th_sport
;
6071 key
.gwy
.port
= th
->th_dport
;
6074 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
6075 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
6076 #ifndef NO_APPLE_EXTENSIONS
6077 key
.lan
.xport
.port
= th
->th_sport
;
6078 key
.ext
.xport
.port
= th
->th_dport
;
6080 key
.lan
.port
= th
->th_sport
;
6081 key
.ext
.port
= th
->th_dport
;
6087 if (direction
== (*state
)->state_key
->direction
) {
6088 src
= &(*state
)->src
;
6089 dst
= &(*state
)->dst
;
6091 src
= &(*state
)->dst
;
6092 dst
= &(*state
)->src
;
6095 if ((*state
)->src
.state
== PF_TCPS_PROXY_SRC
) {
6096 if (direction
!= (*state
)->state_key
->direction
) {
6097 REASON_SET(reason
, PFRES_SYNPROXY
);
6098 return (PF_SYNPROXY_DROP
);
6100 if (th
->th_flags
& TH_SYN
) {
6101 if (ntohl(th
->th_seq
) != (*state
)->src
.seqlo
) {
6102 REASON_SET(reason
, PFRES_SYNPROXY
);
6105 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, pd
->dst
,
6106 pd
->src
, th
->th_dport
, th
->th_sport
,
6107 (*state
)->src
.seqhi
, ntohl(th
->th_seq
) + 1,
6108 TH_SYN
|TH_ACK
, 0, (*state
)->src
.mss
, 0, 1,
6110 REASON_SET(reason
, PFRES_SYNPROXY
);
6111 return (PF_SYNPROXY_DROP
);
6112 } else if (!(th
->th_flags
& TH_ACK
) ||
6113 (ntohl(th
->th_ack
) != (*state
)->src
.seqhi
+ 1) ||
6114 (ntohl(th
->th_seq
) != (*state
)->src
.seqlo
+ 1)) {
6115 REASON_SET(reason
, PFRES_SYNPROXY
);
6117 } else if ((*state
)->src_node
!= NULL
&&
6118 pf_src_connlimit(state
)) {
6119 REASON_SET(reason
, PFRES_SRCLIMIT
);
6122 (*state
)->src
.state
= PF_TCPS_PROXY_DST
;
6124 if ((*state
)->src
.state
== PF_TCPS_PROXY_DST
) {
6125 struct pf_state_host
*psrc
, *pdst
;
6127 if (direction
== PF_OUT
) {
6128 psrc
= &(*state
)->state_key
->gwy
;
6129 pdst
= &(*state
)->state_key
->ext
;
6131 psrc
= &(*state
)->state_key
->ext
;
6132 pdst
= &(*state
)->state_key
->lan
;
6134 if (direction
== (*state
)->state_key
->direction
) {
6135 if (((th
->th_flags
& (TH_SYN
|TH_ACK
)) != TH_ACK
) ||
6136 (ntohl(th
->th_ack
) != (*state
)->src
.seqhi
+ 1) ||
6137 (ntohl(th
->th_seq
) != (*state
)->src
.seqlo
+ 1)) {
6138 REASON_SET(reason
, PFRES_SYNPROXY
);
6141 (*state
)->src
.max_win
= MAX(ntohs(th
->th_win
), 1);
6142 if ((*state
)->dst
.seqhi
== 1)
6143 (*state
)->dst
.seqhi
= htonl(random());
6144 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, &psrc
->addr
,
6145 #ifndef NO_APPLE_EXTENSIONS
6146 &pdst
->addr
, psrc
->xport
.port
, pdst
->xport
.port
,
6148 &pdst
->addr
, psrc
->port
, pdst
->port
,
6150 (*state
)->dst
.seqhi
, 0, TH_SYN
, 0,
6151 (*state
)->src
.mss
, 0, 0, (*state
)->tag
, NULL
, NULL
);
6152 REASON_SET(reason
, PFRES_SYNPROXY
);
6153 return (PF_SYNPROXY_DROP
);
6154 } else if (((th
->th_flags
& (TH_SYN
|TH_ACK
)) !=
6156 (ntohl(th
->th_ack
) != (*state
)->dst
.seqhi
+ 1)) {
6157 REASON_SET(reason
, PFRES_SYNPROXY
);
6160 (*state
)->dst
.max_win
= MAX(ntohs(th
->th_win
), 1);
6161 (*state
)->dst
.seqlo
= ntohl(th
->th_seq
);
6162 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, pd
->dst
,
6163 pd
->src
, th
->th_dport
, th
->th_sport
,
6164 ntohl(th
->th_ack
), ntohl(th
->th_seq
) + 1,
6165 TH_ACK
, (*state
)->src
.max_win
, 0, 0, 0,
6166 (*state
)->tag
, NULL
, NULL
);
6167 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, &psrc
->addr
,
6168 #ifndef NO_APPLE_EXTENSIONS
6169 &pdst
->addr
, psrc
->xport
.port
, pdst
->xport
.port
,
6171 &pdst
->addr
, psrc
->port
, pdst
->port
,
6173 (*state
)->src
.seqhi
+ 1, (*state
)->src
.seqlo
+ 1,
6174 TH_ACK
, (*state
)->dst
.max_win
, 0, 0, 1,
6176 (*state
)->src
.seqdiff
= (*state
)->dst
.seqhi
-
6177 (*state
)->src
.seqlo
;
6178 (*state
)->dst
.seqdiff
= (*state
)->src
.seqhi
-
6179 (*state
)->dst
.seqlo
;
6180 (*state
)->src
.seqhi
= (*state
)->src
.seqlo
+
6181 (*state
)->dst
.max_win
;
6182 (*state
)->dst
.seqhi
= (*state
)->dst
.seqlo
+
6183 (*state
)->src
.max_win
;
6184 (*state
)->src
.wscale
= (*state
)->dst
.wscale
= 0;
6185 (*state
)->src
.state
= (*state
)->dst
.state
=
6187 REASON_SET(reason
, PFRES_SYNPROXY
);
6188 return (PF_SYNPROXY_DROP
);
6192 if (((th
->th_flags
& (TH_SYN
|TH_ACK
)) == TH_SYN
) &&
6193 dst
->state
>= TCPS_FIN_WAIT_2
&&
6194 src
->state
>= TCPS_FIN_WAIT_2
) {
6195 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
6196 printf("pf: state reuse ");
6197 pf_print_state(*state
);
6198 pf_print_flags(th
->th_flags
);
6201 /* XXX make sure it's the same direction ?? */
6202 (*state
)->src
.state
= (*state
)->dst
.state
= TCPS_CLOSED
;
6203 pf_unlink_state(*state
);
6208 if (src
->wscale
&& dst
->wscale
&& !(th
->th_flags
& TH_SYN
)) {
6209 sws
= src
->wscale
& PF_WSCALE_MASK
;
6210 dws
= dst
->wscale
& PF_WSCALE_MASK
;
6215 * Sequence tracking algorithm from Guido van Rooij's paper:
6216 * http://www.madison-gurkha.com/publications/tcp_filtering/
6220 orig_seq
= seq
= ntohl(th
->th_seq
);
6221 if (src
->seqlo
== 0) {
6222 /* First packet from this end. Set its state */
6224 if ((pd
->flags
& PFDESC_TCP_NORM
|| dst
->scrub
) &&
6225 src
->scrub
== NULL
) {
6226 if (pf_normalize_tcp_init(m
, off
, pd
, th
, src
, dst
)) {
6227 REASON_SET(reason
, PFRES_MEMORY
);
6232 /* Deferred generation of sequence number modulator */
6233 if (dst
->seqdiff
&& !src
->seqdiff
) {
6234 /* use random iss for the TCP server */
6235 while ((src
->seqdiff
= random() - seq
) == 0)
6237 ack
= ntohl(th
->th_ack
) - dst
->seqdiff
;
6238 pf_change_a(&th
->th_seq
, &th
->th_sum
, htonl(seq
+
6240 pf_change_a(&th
->th_ack
, &th
->th_sum
, htonl(ack
), 0);
6241 copyback
= off
+ sizeof (*th
);
6243 ack
= ntohl(th
->th_ack
);
6246 end
= seq
+ pd
->p_len
;
6247 if (th
->th_flags
& TH_SYN
) {
6249 if (dst
->wscale
& PF_WSCALE_FLAG
) {
6250 src
->wscale
= pf_get_wscale(m
, off
, th
->th_off
,
6252 if (src
->wscale
& PF_WSCALE_FLAG
) {
6254 * Remove scale factor from initial
6257 sws
= src
->wscale
& PF_WSCALE_MASK
;
6258 win
= ((u_int32_t
)win
+ (1 << sws
) - 1)
6260 dws
= dst
->wscale
& PF_WSCALE_MASK
;
6262 #ifndef NO_APPLE_MODIFICATION
6266 * Window scale negotiation has failed,
6267 * therefore we must restore the window
6268 * scale in the state record that we
6269 * optimistically removed in
6270 * pf_test_rule(). Care is required to
6271 * prevent arithmetic overflow from
6272 * zeroing the window when it's
6273 * truncated down to 16-bits. --jhw
6275 u_int32_t max_win
= dst
->max_win
;
6277 dst
->wscale
& PF_WSCALE_MASK
;
6278 dst
->max_win
= MIN(0xffff, max_win
);
6280 /* fixup other window */
6281 dst
->max_win
<<= dst
->wscale
&
6284 /* in case of a retrans SYN|ACK */
6289 if (th
->th_flags
& TH_FIN
)
6293 if (src
->state
< TCPS_SYN_SENT
)
6294 src
->state
= TCPS_SYN_SENT
;
6297 * May need to slide the window (seqhi may have been set by
6298 * the crappy stack check or if we picked up the connection
6299 * after establishment)
6301 #ifndef NO_APPLE_MODIFICATIONS
6302 if (src
->seqhi
== 1 ||
6303 SEQ_GEQ(end
+ MAX(1, (u_int32_t
)dst
->max_win
<< dws
),
6305 src
->seqhi
= end
+ MAX(1, (u_int32_t
)dst
->max_win
<< dws
);
6307 if (src
->seqhi
== 1 ||
6308 SEQ_GEQ(end
+ MAX(1, dst
->max_win
<< dws
), src
->seqhi
))
6309 src
->seqhi
= end
+ MAX(1, dst
->max_win
<< dws
);
6311 if (win
> src
->max_win
)
6315 ack
= ntohl(th
->th_ack
) - dst
->seqdiff
;
6317 /* Modulate sequence numbers */
6318 pf_change_a(&th
->th_seq
, &th
->th_sum
, htonl(seq
+
6320 pf_change_a(&th
->th_ack
, &th
->th_sum
, htonl(ack
), 0);
6321 copyback
= off
+ sizeof (*th
);
6323 end
= seq
+ pd
->p_len
;
6324 if (th
->th_flags
& TH_SYN
)
6326 if (th
->th_flags
& TH_FIN
)
6330 if ((th
->th_flags
& TH_ACK
) == 0) {
6331 /* Let it pass through the ack skew check */
6333 } else if ((ack
== 0 &&
6334 (th
->th_flags
& (TH_ACK
|TH_RST
)) == (TH_ACK
|TH_RST
)) ||
6335 /* broken tcp stacks do not set ack */
6336 (dst
->state
< TCPS_SYN_SENT
)) {
6338 * Many stacks (ours included) will set the ACK number in an
6339 * FIN|ACK if the SYN times out -- no sequence to ACK.
6345 /* Ease sequencing restrictions on no data packets */
6350 ackskew
= dst
->seqlo
- ack
;
6354 * Need to demodulate the sequence numbers in any TCP SACK options
6355 * (Selective ACK). We could optionally validate the SACK values
6356 * against the current ACK window, either forwards or backwards, but
6357 * I'm not confident that SACK has been implemented properly
6358 * everywhere. It wouldn't surprise me if several stacks accidently
6359 * SACK too far backwards of previously ACKed data. There really aren't
6360 * any security implications of bad SACKing unless the target stack
6361 * doesn't validate the option length correctly. Someone trying to
6362 * spoof into a TCP connection won't bother blindly sending SACK
6365 if (dst
->seqdiff
&& (th
->th_off
<< 2) > (int)sizeof (struct tcphdr
)) {
6366 #ifndef NO_APPLE_EXTENSIONS
6367 copyback
= pf_modulate_sack(m
, off
, pd
, th
, dst
);
6368 if (copyback
== -1) {
6369 REASON_SET(reason
, PFRES_MEMORY
);
6375 if (pf_modulate_sack(m
, off
, pd
, th
, dst
))
6381 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
6382 if (SEQ_GEQ(src
->seqhi
, end
) &&
6383 /* Last octet inside other's window space */
6384 #ifndef NO_APPLE_MODIFICATIONS
6385 SEQ_GEQ(seq
, src
->seqlo
- ((u_int32_t
)dst
->max_win
<< dws
)) &&
6387 SEQ_GEQ(seq
, src
->seqlo
- (dst
->max_win
<< dws
)) &&
6389 /* Retrans: not more than one window back */
6390 (ackskew
>= -MAXACKWINDOW
) &&
6391 /* Acking not more than one reassembled fragment backwards */
6392 (ackskew
<= (MAXACKWINDOW
<< sws
)) &&
6393 /* Acking not more than one window forward */
6394 ((th
->th_flags
& TH_RST
) == 0 || orig_seq
== src
->seqlo
||
6395 (orig_seq
== src
->seqlo
+ 1) || (orig_seq
+ 1 == src
->seqlo
) ||
6396 (pd
->flags
& PFDESC_IP_REAS
) == 0)) {
6397 /* Require an exact/+1 sequence match on resets when possible */
6399 if (dst
->scrub
|| src
->scrub
) {
6400 if (pf_normalize_tcp_stateful(m
, off
, pd
, reason
, th
,
6401 *state
, src
, dst
, ©back
))
6404 #ifndef NO_APPLE_EXTENSIONS
6409 /* update max window */
6410 if (src
->max_win
< win
)
6412 /* synchronize sequencing */
6413 if (SEQ_GT(end
, src
->seqlo
))
6415 /* slide the window of what the other end can send */
6416 #ifndef NO_APPLE_MODIFICATIONS
6417 if (SEQ_GEQ(ack
+ ((u_int32_t
)win
<< sws
), dst
->seqhi
))
6418 dst
->seqhi
= ack
+ MAX(((u_int32_t
)win
<< sws
), 1);
6420 if (SEQ_GEQ(ack
+ (win
<< sws
), dst
->seqhi
))
6421 dst
->seqhi
= ack
+ MAX((win
<< sws
), 1);
6425 if (th
->th_flags
& TH_SYN
)
6426 if (src
->state
< TCPS_SYN_SENT
)
6427 src
->state
= TCPS_SYN_SENT
;
6428 if (th
->th_flags
& TH_FIN
)
6429 if (src
->state
< TCPS_CLOSING
)
6430 src
->state
= TCPS_CLOSING
;
6431 if (th
->th_flags
& TH_ACK
) {
6432 if (dst
->state
== TCPS_SYN_SENT
) {
6433 dst
->state
= TCPS_ESTABLISHED
;
6434 if (src
->state
== TCPS_ESTABLISHED
&&
6435 (*state
)->src_node
!= NULL
&&
6436 pf_src_connlimit(state
)) {
6437 REASON_SET(reason
, PFRES_SRCLIMIT
);
6440 } else if (dst
->state
== TCPS_CLOSING
)
6441 dst
->state
= TCPS_FIN_WAIT_2
;
6443 if (th
->th_flags
& TH_RST
)
6444 src
->state
= dst
->state
= TCPS_TIME_WAIT
;
6446 /* update expire time */
6447 (*state
)->expire
= pf_time_second();
6448 if (src
->state
>= TCPS_FIN_WAIT_2
&&
6449 dst
->state
>= TCPS_FIN_WAIT_2
)
6450 (*state
)->timeout
= PFTM_TCP_CLOSED
;
6451 else if (src
->state
>= TCPS_CLOSING
&&
6452 dst
->state
>= TCPS_CLOSING
)
6453 (*state
)->timeout
= PFTM_TCP_FIN_WAIT
;
6454 else if (src
->state
< TCPS_ESTABLISHED
||
6455 dst
->state
< TCPS_ESTABLISHED
)
6456 (*state
)->timeout
= PFTM_TCP_OPENING
;
6457 else if (src
->state
>= TCPS_CLOSING
||
6458 dst
->state
>= TCPS_CLOSING
)
6459 (*state
)->timeout
= PFTM_TCP_CLOSING
;
6461 (*state
)->timeout
= PFTM_TCP_ESTABLISHED
;
6463 /* Fall through to PASS packet */
6465 } else if ((dst
->state
< TCPS_SYN_SENT
||
6466 dst
->state
>= TCPS_FIN_WAIT_2
|| src
->state
>= TCPS_FIN_WAIT_2
) &&
6467 SEQ_GEQ(src
->seqhi
+ MAXACKWINDOW
, end
) &&
6468 /* Within a window forward of the originating packet */
6469 SEQ_GEQ(seq
, src
->seqlo
- MAXACKWINDOW
)) {
6470 /* Within a window backward of the originating packet */
6473 * This currently handles three situations:
6474 * 1) Stupid stacks will shotgun SYNs before their peer
6476 * 2) When PF catches an already established stream (the
6477 * firewall rebooted, the state table was flushed, routes
6479 * 3) Packets get funky immediately after the connection
6480 * closes (this should catch Solaris spurious ACK|FINs
6481 * that web servers like to spew after a close)
6483 * This must be a little more careful than the above code
6484 * since packet floods will also be caught here. We don't
6485 * update the TTL here to mitigate the damage of a packet
6486 * flood and so the same code can handle awkward establishment
6487 * and a loosened connection close.
6488 * In the establishment case, a correct peer response will
6489 * validate the connection, go through the normal state code
6490 * and keep updating the state TTL.
6493 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
6494 printf("pf: loose state match: ");
6495 pf_print_state(*state
);
6496 pf_print_flags(th
->th_flags
);
6497 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
6498 "pkts=%llu:%llu dir=%s,%s\n", seq
, orig_seq
, ack
,
6499 pd
->p_len
, ackskew
, (*state
)->packets
[0],
6500 (*state
)->packets
[1],
6501 direction
== PF_IN
? "in" : "out",
6502 direction
== (*state
)->state_key
->direction
?
6506 if (dst
->scrub
|| src
->scrub
) {
6507 if (pf_normalize_tcp_stateful(m
, off
, pd
, reason
, th
,
6508 *state
, src
, dst
, ©back
))
6510 #ifndef NO_APPLE_EXTENSIONS
6515 /* update max window */
6516 if (src
->max_win
< win
)
6518 /* synchronize sequencing */
6519 if (SEQ_GT(end
, src
->seqlo
))
6521 /* slide the window of what the other end can send */
6522 #ifndef NO_APPLE_MODIFICATIONS
6523 if (SEQ_GEQ(ack
+ ((u_int32_t
)win
<< sws
), dst
->seqhi
))
6524 dst
->seqhi
= ack
+ MAX(((u_int32_t
)win
<< sws
), 1);
6526 if (SEQ_GEQ(ack
+ (win
<< sws
), dst
->seqhi
))
6527 dst
->seqhi
= ack
+ MAX((win
<< sws
), 1);
6531 * Cannot set dst->seqhi here since this could be a shotgunned
6532 * SYN and not an already established connection.
6535 if (th
->th_flags
& TH_FIN
)
6536 if (src
->state
< TCPS_CLOSING
)
6537 src
->state
= TCPS_CLOSING
;
6538 if (th
->th_flags
& TH_RST
)
6539 src
->state
= dst
->state
= TCPS_TIME_WAIT
;
6541 /* Fall through to PASS packet */
6544 if ((*state
)->dst
.state
== TCPS_SYN_SENT
&&
6545 (*state
)->src
.state
== TCPS_SYN_SENT
) {
6546 /* Send RST for state mismatches during handshake */
6547 if (!(th
->th_flags
& TH_RST
))
6548 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
,
6549 pd
->dst
, pd
->src
, th
->th_dport
,
6550 th
->th_sport
, ntohl(th
->th_ack
), 0,
6552 (*state
)->rule
.ptr
->return_ttl
, 1, 0,
6553 pd
->eh
, kif
->pfik_ifp
);
6557 } else if (pf_status
.debug
>= PF_DEBUG_MISC
) {
6558 printf("pf: BAD state: ");
6559 pf_print_state(*state
);
6560 pf_print_flags(th
->th_flags
);
6561 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
6562 "pkts=%llu:%llu dir=%s,%s\n",
6563 seq
, orig_seq
, ack
, pd
->p_len
, ackskew
,
6564 (*state
)->packets
[0], (*state
)->packets
[1],
6565 direction
== PF_IN
? "in" : "out",
6566 direction
== (*state
)->state_key
->direction
?
6568 printf("pf: State failure on: %c %c %c %c | %c %c\n",
6569 SEQ_GEQ(src
->seqhi
, end
) ? ' ' : '1',
6570 #ifndef NO_APPLE_MODIFICATIONS
6572 src
->seqlo
- ((u_int32_t
)dst
->max_win
<< dws
)) ?
6574 SEQ_GEQ(seq
, src
->seqlo
- (dst
->max_win
<< dws
)) ?
6577 (ackskew
>= -MAXACKWINDOW
) ? ' ' : '3',
6578 (ackskew
<= (MAXACKWINDOW
<< sws
)) ? ' ' : '4',
6579 SEQ_GEQ(src
->seqhi
+ MAXACKWINDOW
, end
) ?' ' :'5',
6580 SEQ_GEQ(seq
, src
->seqlo
- MAXACKWINDOW
) ?' ' :'6');
6582 REASON_SET(reason
, PFRES_BADSTATE
);
6586 /* Any packets which have gotten here are to be passed */
6588 #ifndef NO_APPLE_EXTENSIONS
6589 if ((*state
)->state_key
->app_state
&&
6590 (*state
)->state_key
->app_state
->handler
) {
6591 (*state
)->state_key
->app_state
->handler(*state
, direction
,
6592 off
+ (th
->th_off
<< 2), pd
, kif
);
6594 REASON_SET(reason
, PFRES_MEMORY
);
6600 /* translate source/destination address, if necessary */
6601 if (STATE_TRANSLATE((*state
)->state_key
)) {
6602 if (direction
== PF_OUT
)
6603 pf_change_ap(direction
, pd
->mp
, pd
->src
, &th
->th_sport
,
6604 pd
->ip_sum
, &th
->th_sum
,
6605 &(*state
)->state_key
->gwy
.addr
,
6606 (*state
)->state_key
->gwy
.xport
.port
, 0, pd
->af
);
6608 pf_change_ap(direction
, pd
->mp
, pd
->dst
, &th
->th_dport
,
6609 pd
->ip_sum
, &th
->th_sum
,
6610 &(*state
)->state_key
->lan
.addr
,
6611 (*state
)->state_key
->lan
.xport
.port
, 0, pd
->af
);
6612 copyback
= off
+ sizeof (*th
);
6616 m
= pf_lazy_makewritable(pd
, m
, copyback
);
6618 REASON_SET(reason
, PFRES_MEMORY
);
6622 /* Copyback sequence modulation or stateful scrub changes */
6623 m_copyback(m
, off
, sizeof (*th
), th
);
6626 /* translate source/destination address, if necessary */
6627 if (STATE_TRANSLATE((*state
)->state_key
)) {
6628 if (direction
== PF_OUT
)
6629 pf_change_ap(pd
->src
, pd
->mp
, &th
->th_sport
, pd
->ip_sum
,
6630 &th
->th_sum
, &(*state
)->state_key
->gwy
.addr
,
6631 (*state
)->state_key
->gwy
.port
, 0, pd
->af
);
6633 pf_change_ap(pd
->dst
, pd
->mp
, &th
->th_dport
, pd
->ip_sum
,
6634 &th
->th_sum
, &(*state
)->state_key
->lan
.addr
,
6635 (*state
)->state_key
->lan
.port
, 0, pd
->af
);
6636 m_copyback(m
, off
, sizeof (*th
), th
);
6637 } else if (copyback
) {
6638 /* Copyback sequence modulation or stateful scrub changes */
6639 m_copyback(m
, off
, sizeof (*th
), th
);
6646 #ifndef NO_APPLE_EXTENSIONS
6648 pf_test_state_udp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
6649 struct mbuf
*m
, int off
, void *h
, struct pf_pdesc
*pd
, u_short
*reason
)
6651 pf_test_state_udp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
6652 struct mbuf
*m
, int off
, void *h
, struct pf_pdesc
*pd
)
6656 struct pf_state_peer
*src
, *dst
;
6657 struct pf_state_key_cmp key
;
6658 struct udphdr
*uh
= pd
->hdr
.udp
;
6659 #ifndef NO_APPLE_EXTENSIONS
6660 struct pf_app_state as
;
6661 int dx
, action
, extfilter
;
6663 key
.proto_variant
= PF_EXTFILTER_APD
;
6667 key
.proto
= IPPROTO_UDP
;
6668 if (direction
== PF_IN
) {
6669 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
6670 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
6671 #ifndef NO_APPLE_EXTENSIONS
6672 key
.ext
.xport
.port
= uh
->uh_sport
;
6673 key
.gwy
.xport
.port
= uh
->uh_dport
;
6676 key
.ext
.port
= uh
->uh_sport
;
6677 key
.gwy
.port
= uh
->uh_dport
;
6680 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
6681 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
6682 #ifndef NO_APPLE_EXTENSIONS
6683 key
.lan
.xport
.port
= uh
->uh_sport
;
6684 key
.ext
.xport
.port
= uh
->uh_dport
;
6687 key
.lan
.port
= uh
->uh_sport
;
6688 key
.ext
.port
= uh
->uh_dport
;
6692 #ifndef NO_APPLE_EXTENSIONS
6693 if (ntohs(uh
->uh_sport
) == PF_IKE_PORT
&&
6694 ntohs(uh
->uh_dport
) == PF_IKE_PORT
) {
6695 struct pf_ike_hdr ike
;
6696 size_t plen
= m
->m_pkthdr
.len
- off
- sizeof (*uh
);
6697 if (plen
< PF_IKE_PACKET_MINSIZE
) {
6698 DPFPRINTF(PF_DEBUG_MISC
,
6699 ("pf: IKE message too small.\n"));
6703 if (plen
> sizeof (ike
))
6704 plen
= sizeof (ike
);
6705 m_copydata(m
, off
+ sizeof (*uh
), plen
, &ike
);
6707 if (ike
.initiator_cookie
) {
6708 key
.app_state
= &as
;
6709 as
.compare_lan_ext
= pf_ike_compare
;
6710 as
.compare_ext_gwy
= pf_ike_compare
;
6711 as
.u
.ike
.cookie
= ike
.initiator_cookie
;
6714 * <http://tools.ietf.org/html/\
6715 * draft-ietf-ipsec-nat-t-ike-01>
6716 * Support non-standard NAT-T implementations that
6717 * push the ESP packet over the top of the IKE packet.
6718 * Do not drop packet.
6720 DPFPRINTF(PF_DEBUG_MISC
,
6721 ("pf: IKE initiator cookie = 0.\n"));
6725 *state
= pf_find_state(kif
, &key
, dx
);
6727 if (!key
.app_state
&& *state
== 0) {
6728 key
.proto_variant
= PF_EXTFILTER_AD
;
6729 *state
= pf_find_state(kif
, &key
, dx
);
6732 if (!key
.app_state
&& *state
== 0) {
6733 key
.proto_variant
= PF_EXTFILTER_EI
;
6734 *state
= pf_find_state(kif
, &key
, dx
);
6737 if (pf_state_lookup_aux(state
, kif
, direction
, &action
))
6743 if (direction
== (*state
)->state_key
->direction
) {
6744 src
= &(*state
)->src
;
6745 dst
= &(*state
)->dst
;
6747 src
= &(*state
)->dst
;
6748 dst
= &(*state
)->src
;
6752 if (src
->state
< PFUDPS_SINGLE
)
6753 src
->state
= PFUDPS_SINGLE
;
6754 if (dst
->state
== PFUDPS_SINGLE
)
6755 dst
->state
= PFUDPS_MULTIPLE
;
6757 /* update expire time */
6758 (*state
)->expire
= pf_time_second();
6759 if (src
->state
== PFUDPS_MULTIPLE
&& dst
->state
== PFUDPS_MULTIPLE
)
6760 (*state
)->timeout
= PFTM_UDP_MULTIPLE
;
6762 (*state
)->timeout
= PFTM_UDP_SINGLE
;
6764 #ifndef NO_APPLE_EXTENSIONS
6765 extfilter
= (*state
)->state_key
->proto_variant
;
6766 if (extfilter
> PF_EXTFILTER_APD
) {
6767 (*state
)->state_key
->ext
.xport
.port
= key
.ext
.xport
.port
;
6768 if (extfilter
> PF_EXTFILTER_AD
)
6769 PF_ACPY(&(*state
)->state_key
->ext
.addr
,
6770 &key
.ext
.addr
, key
.af
);
6773 if ((*state
)->state_key
->app_state
&&
6774 (*state
)->state_key
->app_state
->handler
) {
6775 (*state
)->state_key
->app_state
->handler(*state
, direction
,
6776 off
+ uh
->uh_ulen
, pd
, kif
);
6778 REASON_SET(reason
, PFRES_MEMORY
);
6784 /* translate source/destination address, if necessary */
6785 if (STATE_TRANSLATE((*state
)->state_key
)) {
6786 m
= pf_lazy_makewritable(pd
, m
, off
+ sizeof (*uh
));
6788 REASON_SET(reason
, PFRES_MEMORY
);
6792 if (direction
== PF_OUT
)
6793 pf_change_ap(direction
, pd
->mp
, pd
->src
, &uh
->uh_sport
,
6794 pd
->ip_sum
, &uh
->uh_sum
,
6795 &(*state
)->state_key
->gwy
.addr
,
6796 (*state
)->state_key
->gwy
.xport
.port
, 1, pd
->af
);
6798 pf_change_ap(direction
, pd
->mp
, pd
->dst
, &uh
->uh_dport
,
6799 pd
->ip_sum
, &uh
->uh_sum
,
6800 &(*state
)->state_key
->lan
.addr
,
6801 (*state
)->state_key
->lan
.xport
.port
, 1, pd
->af
);
6802 m_copyback(m
, off
, sizeof (*uh
), uh
);
6805 /* translate source/destination address, if necessary */
6806 if (STATE_TRANSLATE((*state
)->state_key
)) {
6807 if (direction
== PF_OUT
)
6808 pf_change_ap(pd
->src
, &uh
->uh_sport
, pd
->ip_sum
,
6809 &uh
->uh_sum
, &(*state
)->state_key
->gwy
.addr
,
6810 (*state
)->state_key
->gwy
.port
, 1, pd
->af
);
6812 pf_change_ap(pd
->dst
, &uh
->uh_dport
, pd
->ip_sum
,
6813 &uh
->uh_sum
, &(*state
)->state_key
->lan
.addr
,
6814 (*state
)->state_key
->lan
.port
, 1, pd
->af
);
6815 m_copyback(m
, off
, sizeof (*uh
), uh
);
6823 pf_test_state_icmp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
6824 struct mbuf
*m
, int off
, void *h
, struct pf_pdesc
*pd
, u_short
*reason
)
6827 struct pf_addr
*saddr
= pd
->src
, *daddr
= pd
->dst
;
6828 u_int16_t icmpid
= 0, *icmpsum
;
6831 struct pf_state_key_cmp key
;
6833 #ifndef NO_APPLE_EXTENSIONS
6834 struct pf_app_state as
;
6838 switch (pd
->proto
) {
6841 icmptype
= pd
->hdr
.icmp
->icmp_type
;
6842 icmpid
= pd
->hdr
.icmp
->icmp_id
;
6843 icmpsum
= &pd
->hdr
.icmp
->icmp_cksum
;
6845 if (icmptype
== ICMP_UNREACH
||
6846 icmptype
== ICMP_SOURCEQUENCH
||
6847 icmptype
== ICMP_REDIRECT
||
6848 icmptype
== ICMP_TIMXCEED
||
6849 icmptype
== ICMP_PARAMPROB
)
6854 case IPPROTO_ICMPV6
:
6855 icmptype
= pd
->hdr
.icmp6
->icmp6_type
;
6856 icmpid
= pd
->hdr
.icmp6
->icmp6_id
;
6857 icmpsum
= &pd
->hdr
.icmp6
->icmp6_cksum
;
6859 if (icmptype
== ICMP6_DST_UNREACH
||
6860 icmptype
== ICMP6_PACKET_TOO_BIG
||
6861 icmptype
== ICMP6_TIME_EXCEEDED
||
6862 icmptype
== ICMP6_PARAM_PROB
)
6871 * ICMP query/reply message not related to a TCP/UDP packet.
6872 * Search for an ICMP state.
6875 key
.proto
= pd
->proto
;
6876 if (direction
== PF_IN
) {
6877 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
6878 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
6879 #ifndef NO_APPLE_EXTENSIONS
6880 key
.ext
.xport
.port
= 0;
6881 key
.gwy
.xport
.port
= icmpid
;
6884 key
.gwy
.port
= icmpid
;
6887 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
6888 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
6889 #ifndef NO_APPLE_EXTENSIONS
6890 key
.lan
.xport
.port
= icmpid
;
6891 key
.ext
.xport
.port
= 0;
6893 key
.lan
.port
= icmpid
;
6900 (*state
)->expire
= pf_time_second();
6901 (*state
)->timeout
= PFTM_ICMP_ERROR_REPLY
;
6903 /* translate source/destination address, if necessary */
6904 if (STATE_TRANSLATE((*state
)->state_key
)) {
6905 if (direction
== PF_OUT
) {
6909 pf_change_a(&saddr
->v4
.s_addr
,
6911 (*state
)->state_key
->gwy
.addr
.v4
.s_addr
, 0);
6912 #ifndef NO_APPLE_EXTENSIONS
6913 pd
->hdr
.icmp
->icmp_cksum
=
6915 pd
->hdr
.icmp
->icmp_cksum
, icmpid
,
6916 (*state
)->state_key
->gwy
.xport
.port
, 0);
6917 pd
->hdr
.icmp
->icmp_id
=
6918 (*state
)->state_key
->gwy
.xport
.port
;
6919 m
= pf_lazy_makewritable(pd
, m
,
6924 pd
->hdr
.icmp
->icmp_cksum
=
6926 pd
->hdr
.icmp
->icmp_cksum
, icmpid
,
6927 (*state
)->state_key
->gwy
.port
, 0);
6928 pd
->hdr
.icmp
->icmp_id
=
6929 (*state
)->state_key
->gwy
.port
;
6931 m_copyback(m
, off
, ICMP_MINLEN
,
6938 &pd
->hdr
.icmp6
->icmp6_cksum
,
6939 &(*state
)->state_key
->gwy
.addr
, 0);
6940 #ifndef NO_APPLE_EXTENSIONS
6941 m
= pf_lazy_makewritable(pd
, m
,
6942 off
+ sizeof (struct icmp6_hdr
));
6947 sizeof (struct icmp6_hdr
),
6956 pf_change_a(&daddr
->v4
.s_addr
,
6958 (*state
)->state_key
->lan
.addr
.v4
.s_addr
, 0);
6959 #ifndef NO_APPLE_EXTENSIONS
6960 pd
->hdr
.icmp
->icmp_cksum
=
6962 pd
->hdr
.icmp
->icmp_cksum
, icmpid
,
6963 (*state
)->state_key
->lan
.xport
.port
, 0);
6964 pd
->hdr
.icmp
->icmp_id
=
6965 (*state
)->state_key
->lan
.xport
.port
;
6966 m
= pf_lazy_makewritable(pd
, m
,
6971 pd
->hdr
.icmp
->icmp_cksum
=
6973 pd
->hdr
.icmp
->icmp_cksum
, icmpid
,
6974 (*state
)->state_key
->lan
.port
, 0);
6975 pd
->hdr
.icmp
->icmp_id
=
6976 (*state
)->state_key
->lan
.port
;
6978 m_copyback(m
, off
, ICMP_MINLEN
,
6985 &pd
->hdr
.icmp6
->icmp6_cksum
,
6986 &(*state
)->state_key
->lan
.addr
, 0);
6987 #ifndef NO_APPLE_EXTENSIONS
6988 m
= pf_lazy_makewritable(pd
, m
,
6989 off
+ sizeof (struct icmp6_hdr
));
6994 sizeof (struct icmp6_hdr
),
7006 * ICMP error message in response to a TCP/UDP packet.
7007 * Extract the inner TCP/UDP header and search for that state.
7010 struct pf_pdesc pd2
;
7015 struct ip6_hdr h2_6
;
7021 memset(&pd2
, 0, sizeof (pd2
));
7027 /* offset of h2 in mbuf chain */
7028 ipoff2
= off
+ ICMP_MINLEN
;
7030 if (!pf_pull_hdr(m
, ipoff2
, &h2
, sizeof (h2
),
7031 NULL
, reason
, pd2
.af
)) {
7032 DPFPRINTF(PF_DEBUG_MISC
,
7033 ("pf: ICMP error message too short "
7038 * ICMP error messages don't refer to non-first
7041 if (h2
.ip_off
& htons(IP_OFFMASK
)) {
7042 REASON_SET(reason
, PFRES_FRAG
);
7046 /* offset of protocol header that follows h2 */
7047 off2
= ipoff2
+ (h2
.ip_hl
<< 2);
7049 pd2
.proto
= h2
.ip_p
;
7050 pd2
.src
= (struct pf_addr
*)&h2
.ip_src
;
7051 pd2
.dst
= (struct pf_addr
*)&h2
.ip_dst
;
7052 pd2
.ip_sum
= &h2
.ip_sum
;
7057 ipoff2
= off
+ sizeof (struct icmp6_hdr
);
7059 if (!pf_pull_hdr(m
, ipoff2
, &h2_6
, sizeof (h2_6
),
7060 NULL
, reason
, pd2
.af
)) {
7061 DPFPRINTF(PF_DEBUG_MISC
,
7062 ("pf: ICMP error message too short "
7066 pd2
.proto
= h2_6
.ip6_nxt
;
7067 pd2
.src
= (struct pf_addr
*)&h2_6
.ip6_src
;
7068 pd2
.dst
= (struct pf_addr
*)&h2_6
.ip6_dst
;
7070 off2
= ipoff2
+ sizeof (h2_6
);
7072 switch (pd2
.proto
) {
7073 case IPPROTO_FRAGMENT
:
7075 * ICMPv6 error messages for
7076 * non-first fragments
7078 REASON_SET(reason
, PFRES_FRAG
);
7081 case IPPROTO_HOPOPTS
:
7082 case IPPROTO_ROUTING
:
7083 case IPPROTO_DSTOPTS
: {
7084 /* get next header and header length */
7085 struct ip6_ext opt6
;
7087 if (!pf_pull_hdr(m
, off2
, &opt6
,
7088 sizeof (opt6
), NULL
, reason
,
7090 DPFPRINTF(PF_DEBUG_MISC
,
7091 ("pf: ICMPv6 short opt\n"));
7094 if (pd2
.proto
== IPPROTO_AH
)
7095 off2
+= (opt6
.ip6e_len
+ 2) * 4;
7097 off2
+= (opt6
.ip6e_len
+ 1) * 8;
7098 pd2
.proto
= opt6
.ip6e_nxt
;
7099 /* goto the next header */
7106 } while (!terminal
);
7111 switch (pd2
.proto
) {
7115 struct pf_state_peer
*src
, *dst
;
7120 * Only the first 8 bytes of the TCP header can be
7121 * expected. Don't access any TCP header fields after
7122 * th_seq, an ackskew test is not possible.
7124 if (!pf_pull_hdr(m
, off2
, &th
, 8, NULL
, reason
,
7126 DPFPRINTF(PF_DEBUG_MISC
,
7127 ("pf: ICMP error message too short "
7133 key
.proto
= IPPROTO_TCP
;
7134 if (direction
== PF_IN
) {
7135 PF_ACPY(&key
.ext
.addr
, pd2
.dst
, key
.af
);
7136 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af
);
7137 #ifndef NO_APPLE_EXTENSIONS
7138 key
.ext
.xport
.port
= th
.th_dport
;
7139 key
.gwy
.xport
.port
= th
.th_sport
;
7141 key
.ext
.port
= th
.th_dport
;
7142 key
.gwy
.port
= th
.th_sport
;
7145 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af
);
7146 PF_ACPY(&key
.ext
.addr
, pd2
.src
, key
.af
);
7147 #ifndef NO_APPLE_EXTENSIONS
7148 key
.lan
.xport
.port
= th
.th_dport
;
7149 key
.ext
.xport
.port
= th
.th_sport
;
7151 key
.lan
.port
= th
.th_dport
;
7152 key
.ext
.port
= th
.th_sport
;
7158 if (direction
== (*state
)->state_key
->direction
) {
7159 src
= &(*state
)->dst
;
7160 dst
= &(*state
)->src
;
7162 src
= &(*state
)->src
;
7163 dst
= &(*state
)->dst
;
7166 if (src
->wscale
&& dst
->wscale
)
7167 dws
= dst
->wscale
& PF_WSCALE_MASK
;
7171 /* Demodulate sequence number */
7172 seq
= ntohl(th
.th_seq
) - src
->seqdiff
;
7174 pf_change_a(&th
.th_seq
, icmpsum
,
7179 if (!SEQ_GEQ(src
->seqhi
, seq
) ||
7180 #ifndef NO_APPLE_MODIFICATION
7182 src
->seqlo
- ((u_int32_t
)dst
->max_win
<< dws
))) {
7184 !SEQ_GEQ(seq
, src
->seqlo
- (dst
->max_win
<< dws
))) {
7186 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
7187 printf("pf: BAD ICMP %d:%d ",
7188 icmptype
, pd
->hdr
.icmp
->icmp_code
);
7189 pf_print_host(pd
->src
, 0, pd
->af
);
7191 pf_print_host(pd
->dst
, 0, pd
->af
);
7193 pf_print_state(*state
);
7194 printf(" seq=%u\n", seq
);
7196 REASON_SET(reason
, PFRES_BADSTATE
);
7200 if (STATE_TRANSLATE((*state
)->state_key
)) {
7201 if (direction
== PF_IN
) {
7202 pf_change_icmp(pd2
.src
, &th
.th_sport
,
7203 daddr
, &(*state
)->state_key
->lan
.addr
,
7204 #ifndef NO_APPLE_EXTENSIONS
7205 (*state
)->state_key
->lan
.xport
.port
, NULL
,
7207 (*state
)->state_key
->lan
.port
, NULL
,
7209 pd2
.ip_sum
, icmpsum
,
7210 pd
->ip_sum
, 0, pd2
.af
);
7212 pf_change_icmp(pd2
.dst
, &th
.th_dport
,
7213 saddr
, &(*state
)->state_key
->gwy
.addr
,
7214 #ifndef NO_APPLE_EXTENSIONS
7215 (*state
)->state_key
->gwy
.xport
.port
, NULL
,
7217 (*state
)->state_key
->gwy
.port
, NULL
,
7219 pd2
.ip_sum
, icmpsum
,
7220 pd
->ip_sum
, 0, pd2
.af
);
7226 #ifndef NO_APPLE_EXTENSIONS
7227 m
= pf_lazy_makewritable(pd
, m
, off2
+ 8);
7234 m_copyback(m
, off
, ICMP_MINLEN
,
7236 m_copyback(m
, ipoff2
, sizeof (h2
),
7243 sizeof (struct icmp6_hdr
),
7245 m_copyback(m
, ipoff2
, sizeof (h2_6
),
7250 m_copyback(m
, off2
, 8, &th
);
7258 #ifndef NO_APPLE_EXTENSIONS
7261 if (!pf_pull_hdr(m
, off2
, &uh
, sizeof (uh
),
7262 NULL
, reason
, pd2
.af
)) {
7263 DPFPRINTF(PF_DEBUG_MISC
,
7264 ("pf: ICMP error message too short "
7270 key
.proto
= IPPROTO_UDP
;
7271 if (direction
== PF_IN
) {
7272 PF_ACPY(&key
.ext
.addr
, pd2
.dst
, key
.af
);
7273 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af
);
7274 #ifndef NO_APPLE_EXTENSIONS
7275 key
.ext
.xport
.port
= uh
.uh_dport
;
7276 key
.gwy
.xport
.port
= uh
.uh_sport
;
7279 key
.ext
.port
= uh
.uh_dport
;
7280 key
.gwy
.port
= uh
.uh_sport
;
7283 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af
);
7284 PF_ACPY(&key
.ext
.addr
, pd2
.src
, key
.af
);
7285 #ifndef NO_APPLE_EXTENSIONS
7286 key
.lan
.xport
.port
= uh
.uh_dport
;
7287 key
.ext
.xport
.port
= uh
.uh_sport
;
7290 key
.lan
.port
= uh
.uh_dport
;
7291 key
.ext
.port
= uh
.uh_sport
;
7295 #ifndef NO_APPLE_EXTENSIONS
7296 key
.proto_variant
= PF_EXTFILTER_APD
;
7298 if (ntohs(uh
.uh_sport
) == PF_IKE_PORT
&&
7299 ntohs(uh
.uh_dport
) == PF_IKE_PORT
) {
7300 struct pf_ike_hdr ike
;
7302 m
->m_pkthdr
.len
- off2
- sizeof (uh
);
7303 if (direction
== PF_IN
&&
7304 plen
< 8 /* PF_IKE_PACKET_MINSIZE */) {
7305 DPFPRINTF(PF_DEBUG_MISC
, ("pf: "
7306 "ICMP error, embedded IKE message "
7311 if (plen
> sizeof (ike
))
7312 plen
= sizeof (ike
);
7313 m_copydata(m
, off
+ sizeof (uh
), plen
, &ike
);
7315 key
.app_state
= &as
;
7316 as
.compare_lan_ext
= pf_ike_compare
;
7317 as
.compare_ext_gwy
= pf_ike_compare
;
7318 as
.u
.ike
.cookie
= ike
.initiator_cookie
;
7321 *state
= pf_find_state(kif
, &key
, dx
);
7323 if (key
.app_state
&& *state
== 0) {
7325 *state
= pf_find_state(kif
, &key
, dx
);
7329 key
.proto_variant
= PF_EXTFILTER_AD
;
7330 *state
= pf_find_state(kif
, &key
, dx
);
7334 key
.proto_variant
= PF_EXTFILTER_EI
;
7335 *state
= pf_find_state(kif
, &key
, dx
);
7338 if (pf_state_lookup_aux(state
, kif
, direction
, &action
))
7344 if (STATE_TRANSLATE((*state
)->state_key
)) {
7345 if (direction
== PF_IN
) {
7346 pf_change_icmp(pd2
.src
, &uh
.uh_sport
,
7347 daddr
, &(*state
)->state_key
->lan
.addr
,
7348 #ifndef NO_APPLE_EXTENSIONS
7349 (*state
)->state_key
->lan
.xport
.port
, &uh
.uh_sum
,
7351 (*state
)->state_key
->lan
.port
, &uh
.uh_sum
,
7353 pd2
.ip_sum
, icmpsum
,
7354 pd
->ip_sum
, 1, pd2
.af
);
7356 pf_change_icmp(pd2
.dst
, &uh
.uh_dport
,
7357 saddr
, &(*state
)->state_key
->gwy
.addr
,
7358 #ifndef NO_APPLE_EXTENSIONS
7359 (*state
)->state_key
->gwy
.xport
.port
, &uh
.uh_sum
,
7361 (*state
)->state_key
->gwy
.port
, &uh
.uh_sum
,
7363 pd2
.ip_sum
, icmpsum
,
7364 pd
->ip_sum
, 1, pd2
.af
);
7366 #ifndef NO_APPLE_EXTENSIONS
7367 m
= pf_lazy_makewritable(pd
, m
,
7368 off2
+ sizeof (uh
));
7375 m_copyback(m
, off
, ICMP_MINLEN
,
7377 m_copyback(m
, ipoff2
, sizeof (h2
), &h2
);
7383 sizeof (struct icmp6_hdr
),
7385 m_copyback(m
, ipoff2
, sizeof (h2_6
),
7390 m_copyback(m
, off2
, sizeof (uh
), &uh
);
7397 case IPPROTO_ICMP
: {
7400 if (!pf_pull_hdr(m
, off2
, &iih
, ICMP_MINLEN
,
7401 NULL
, reason
, pd2
.af
)) {
7402 DPFPRINTF(PF_DEBUG_MISC
,
7403 ("pf: ICMP error message too short i"
7409 key
.proto
= IPPROTO_ICMP
;
7410 if (direction
== PF_IN
) {
7411 PF_ACPY(&key
.ext
.addr
, pd2
.dst
, key
.af
);
7412 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af
);
7413 #ifndef NO_APPLE_EXTENSIONS
7414 key
.ext
.xport
.port
= 0;
7415 key
.gwy
.xport
.port
= iih
.icmp_id
;
7418 key
.gwy
.port
= iih
.icmp_id
;
7421 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af
);
7422 PF_ACPY(&key
.ext
.addr
, pd2
.src
, key
.af
);
7423 #ifndef NO_APPLE_EXTENSIONS
7424 key
.lan
.xport
.port
= iih
.icmp_id
;
7425 key
.ext
.xport
.port
= 0;
7427 key
.lan
.port
= iih
.icmp_id
;
7434 if (STATE_TRANSLATE((*state
)->state_key
)) {
7435 if (direction
== PF_IN
) {
7436 pf_change_icmp(pd2
.src
, &iih
.icmp_id
,
7437 daddr
, &(*state
)->state_key
->lan
.addr
,
7438 #ifndef NO_APPLE_EXTENSIONS
7439 (*state
)->state_key
->lan
.xport
.port
, NULL
,
7441 (*state
)->state_key
->lan
.port
, NULL
,
7443 pd2
.ip_sum
, icmpsum
,
7444 pd
->ip_sum
, 0, AF_INET
);
7446 pf_change_icmp(pd2
.dst
, &iih
.icmp_id
,
7447 saddr
, &(*state
)->state_key
->gwy
.addr
,
7448 #ifndef NO_APPLE_EXTENSIONS
7449 (*state
)->state_key
->gwy
.xport
.port
, NULL
,
7451 (*state
)->state_key
->gwy
.port
, NULL
,
7453 pd2
.ip_sum
, icmpsum
,
7454 pd
->ip_sum
, 0, AF_INET
);
7456 #ifndef NO_APPLE_EXTENSIONS
7457 m
= pf_lazy_makewritable(pd
, m
, off2
+ ICMP_MINLEN
);
7461 m_copyback(m
, off
, ICMP_MINLEN
, pd
->hdr
.icmp
);
7462 m_copyback(m
, ipoff2
, sizeof (h2
), &h2
);
7463 m_copyback(m
, off2
, ICMP_MINLEN
, &iih
);
7471 case IPPROTO_ICMPV6
: {
7472 struct icmp6_hdr iih
;
7474 if (!pf_pull_hdr(m
, off2
, &iih
,
7475 sizeof (struct icmp6_hdr
), NULL
, reason
, pd2
.af
)) {
7476 DPFPRINTF(PF_DEBUG_MISC
,
7477 ("pf: ICMP error message too short "
7483 key
.proto
= IPPROTO_ICMPV6
;
7484 if (direction
== PF_IN
) {
7485 PF_ACPY(&key
.ext
.addr
, pd2
.dst
, key
.af
);
7486 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af
);
7487 #ifndef NO_APPLE_EXTENSIONS
7488 key
.ext
.xport
.port
= 0;
7489 key
.gwy
.xport
.port
= iih
.icmp6_id
;
7492 key
.gwy
.port
= iih
.icmp6_id
;
7495 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af
);
7496 PF_ACPY(&key
.ext
.addr
, pd2
.src
, key
.af
);
7497 #ifndef NO_APPLE_EXTENSIONS
7498 key
.lan
.xport
.port
= iih
.icmp6_id
;
7499 key
.ext
.xport
.port
= 0;
7501 key
.lan
.port
= iih
.icmp6_id
;
7508 if (STATE_TRANSLATE((*state
)->state_key
)) {
7509 if (direction
== PF_IN
) {
7510 pf_change_icmp(pd2
.src
, &iih
.icmp6_id
,
7511 daddr
, &(*state
)->state_key
->lan
.addr
,
7512 #ifndef NO_APPLE_EXTENSIONS
7513 (*state
)->state_key
->lan
.xport
.port
, NULL
,
7515 (*state
)->state_key
->lan
.port
, NULL
,
7517 pd2
.ip_sum
, icmpsum
,
7518 pd
->ip_sum
, 0, AF_INET6
);
7520 pf_change_icmp(pd2
.dst
, &iih
.icmp6_id
,
7521 saddr
, &(*state
)->state_key
->gwy
.addr
,
7522 #ifndef NO_APPLE_EXTENSIONS
7523 (*state
)->state_key
->gwy
.xport
.port
, NULL
,
7525 (*state
)->state_key
->gwy
.port
, NULL
,
7527 pd2
.ip_sum
, icmpsum
,
7528 pd
->ip_sum
, 0, AF_INET6
);
7530 #ifndef NO_APPLE_EXTENSIONS
7531 m
= pf_lazy_makewritable(pd
, m
, off2
+
7532 sizeof (struct icmp6_hdr
));
7536 m_copyback(m
, off
, sizeof (struct icmp6_hdr
),
7538 m_copyback(m
, ipoff2
, sizeof (h2_6
), &h2_6
);
7539 m_copyback(m
, off2
, sizeof (struct icmp6_hdr
),
7549 key
.proto
= pd2
.proto
;
7550 if (direction
== PF_IN
) {
7551 PF_ACPY(&key
.ext
.addr
, pd2
.dst
, key
.af
);
7552 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af
);
7553 #ifndef NO_APPLE_EXTENSIONS
7554 key
.ext
.xport
.port
= 0;
7555 key
.gwy
.xport
.port
= 0;
7561 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af
);
7562 PF_ACPY(&key
.ext
.addr
, pd2
.src
, key
.af
);
7563 #ifndef NO_APPLE_EXTENSIONS
7564 key
.lan
.xport
.port
= 0;
7565 key
.ext
.xport
.port
= 0;
7574 if (STATE_TRANSLATE((*state
)->state_key
)) {
7575 if (direction
== PF_IN
) {
7576 pf_change_icmp(pd2
.src
, NULL
,
7577 daddr
, &(*state
)->state_key
->lan
.addr
,
7579 pd2
.ip_sum
, icmpsum
,
7580 pd
->ip_sum
, 0, pd2
.af
);
7582 pf_change_icmp(pd2
.dst
, NULL
,
7583 saddr
, &(*state
)->state_key
->gwy
.addr
,
7585 pd2
.ip_sum
, icmpsum
,
7586 pd
->ip_sum
, 0, pd2
.af
);
7591 #ifndef NO_APPLE_EXTENSIONS
7592 m
= pf_lazy_makewritable(pd
, m
,
7593 ipoff2
+ sizeof (h2
));
7597 m_copyback(m
, off
, ICMP_MINLEN
,
7599 m_copyback(m
, ipoff2
, sizeof (h2
), &h2
);
7604 #ifndef NO_APPLE_EXTENSIONS
7605 m
= pf_lazy_makewritable(pd
, m
,
7606 ipoff2
+ sizeof (h2_6
));
7611 sizeof (struct icmp6_hdr
),
7613 m_copyback(m
, ipoff2
, sizeof (h2_6
),
7627 #ifndef NO_APPLE_EXTENSIONS
7629 pf_test_state_grev1(struct pf_state
**state
, int direction
,
7630 struct pfi_kif
*kif
, int off
, struct pf_pdesc
*pd
)
7632 struct pf_state_peer
*src
;
7633 struct pf_state_peer
*dst
;
7634 struct pf_state_key_cmp key
;
7635 struct pf_grev1_hdr
*grev1
= pd
->hdr
.grev1
;
7640 key
.proto
= IPPROTO_GRE
;
7641 key
.proto_variant
= PF_GRE_PPTP_VARIANT
;
7642 if (direction
== PF_IN
) {
7643 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
7644 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
7645 key
.gwy
.xport
.call_id
= grev1
->call_id
;
7647 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
7648 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
7649 key
.ext
.xport
.call_id
= grev1
->call_id
;
7654 if (direction
== (*state
)->state_key
->direction
) {
7655 src
= &(*state
)->src
;
7656 dst
= &(*state
)->dst
;
7658 src
= &(*state
)->dst
;
7659 dst
= &(*state
)->src
;
7663 if (src
->state
< PFGRE1S_INITIATING
)
7664 src
->state
= PFGRE1S_INITIATING
;
7666 /* update expire time */
7667 (*state
)->expire
= pf_time_second();
7668 if (src
->state
>= PFGRE1S_INITIATING
&&
7669 dst
->state
>= PFGRE1S_INITIATING
) {
7670 if ((*state
)->timeout
!= PFTM_TCP_ESTABLISHED
)
7671 (*state
)->timeout
= PFTM_GREv1_ESTABLISHED
;
7672 src
->state
= PFGRE1S_ESTABLISHED
;
7673 dst
->state
= PFGRE1S_ESTABLISHED
;
7675 (*state
)->timeout
= PFTM_GREv1_INITIATING
;
7678 if ((*state
)->state_key
->app_state
)
7679 (*state
)->state_key
->app_state
->u
.grev1
.pptp_state
->expire
=
7682 /* translate source/destination address, if necessary */
7683 if (STATE_GRE_TRANSLATE((*state
)->state_key
)) {
7684 if (direction
== PF_OUT
) {
7688 pf_change_a(&pd
->src
->v4
.s_addr
,
7690 (*state
)->state_key
->gwy
.addr
.v4
.s_addr
, 0);
7695 PF_ACPY(pd
->src
, &(*state
)->state_key
->gwy
.addr
,
7701 grev1
->call_id
= (*state
)->state_key
->lan
.xport
.call_id
;
7706 pf_change_a(&pd
->dst
->v4
.s_addr
,
7708 (*state
)->state_key
->lan
.addr
.v4
.s_addr
, 0);
7713 PF_ACPY(pd
->dst
, &(*state
)->state_key
->lan
.addr
,
7720 m
= pf_lazy_makewritable(pd
, pd
->mp
, off
+ sizeof (*grev1
));
7723 m_copyback(m
, off
, sizeof (*grev1
), grev1
);
7730 pf_test_state_esp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
7731 int off
, struct pf_pdesc
*pd
)
7734 struct pf_state_peer
*src
;
7735 struct pf_state_peer
*dst
;
7736 struct pf_state_key_cmp key
;
7737 struct pf_esp_hdr
*esp
= pd
->hdr
.esp
;
7740 memset(&key
, 0, sizeof (key
));
7742 key
.proto
= IPPROTO_ESP
;
7743 if (direction
== PF_IN
) {
7744 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
7745 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
7746 key
.gwy
.xport
.spi
= esp
->spi
;
7748 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
7749 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
7750 key
.ext
.xport
.spi
= esp
->spi
;
7753 *state
= pf_find_state(kif
, &key
, direction
);
7760 * No matching state. Look for a blocking state. If we find
7761 * one, then use that state and move it so that it's keyed to
7762 * the SPI in the current packet.
7764 if (direction
== PF_IN
) {
7765 key
.gwy
.xport
.spi
= 0;
7767 s
= pf_find_state(kif
, &key
, direction
);
7769 struct pf_state_key
*sk
= s
->state_key
;
7771 RB_REMOVE(pf_state_tree_ext_gwy
,
7772 &pf_statetbl_ext_gwy
, sk
);
7773 sk
->lan
.xport
.spi
= sk
->gwy
.xport
.spi
=
7776 if (RB_INSERT(pf_state_tree_ext_gwy
,
7777 &pf_statetbl_ext_gwy
, sk
))
7778 pf_detach_state(s
, PF_DT_SKIP_EXTGWY
);
7783 key
.ext
.xport
.spi
= 0;
7785 s
= pf_find_state(kif
, &key
, direction
);
7787 struct pf_state_key
*sk
= s
->state_key
;
7789 RB_REMOVE(pf_state_tree_lan_ext
,
7790 &pf_statetbl_lan_ext
, sk
);
7791 sk
->ext
.xport
.spi
= esp
->spi
;
7793 if (RB_INSERT(pf_state_tree_lan_ext
,
7794 &pf_statetbl_lan_ext
, sk
))
7795 pf_detach_state(s
, PF_DT_SKIP_LANEXT
);
7804 if (s
->creatorid
== pf_status
.hostid
)
7805 pfsync_delete_state(s
);
7807 s
->timeout
= PFTM_UNLINKED
;
7808 hook_runloop(&s
->unlink_hooks
,
7809 HOOK_REMOVE
|HOOK_FREE
);
7810 pf_src_tree_remove_state(s
);
7817 if (pf_state_lookup_aux(state
, kif
, direction
, &action
))
7820 if (direction
== (*state
)->state_key
->direction
) {
7821 src
= &(*state
)->src
;
7822 dst
= &(*state
)->dst
;
7824 src
= &(*state
)->dst
;
7825 dst
= &(*state
)->src
;
7829 if (src
->state
< PFESPS_INITIATING
)
7830 src
->state
= PFESPS_INITIATING
;
7832 /* update expire time */
7833 (*state
)->expire
= pf_time_second();
7834 if (src
->state
>= PFESPS_INITIATING
&&
7835 dst
->state
>= PFESPS_INITIATING
) {
7836 (*state
)->timeout
= PFTM_ESP_ESTABLISHED
;
7837 src
->state
= PFESPS_ESTABLISHED
;
7838 dst
->state
= PFESPS_ESTABLISHED
;
7840 (*state
)->timeout
= PFTM_ESP_INITIATING
;
7842 /* translate source/destination address, if necessary */
7843 if (STATE_ADDR_TRANSLATE((*state
)->state_key
)) {
7844 if (direction
== PF_OUT
) {
7848 pf_change_a(&pd
->src
->v4
.s_addr
,
7850 (*state
)->state_key
->gwy
.addr
.v4
.s_addr
, 0);
7855 PF_ACPY(pd
->src
, &(*state
)->state_key
->gwy
.addr
,
7864 pf_change_a(&pd
->dst
->v4
.s_addr
,
7866 (*state
)->state_key
->lan
.addr
.v4
.s_addr
, 0);
7871 PF_ACPY(pd
->dst
, &(*state
)->state_key
->lan
.addr
,
7884 pf_test_state_other(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
7885 struct pf_pdesc
*pd
)
7887 struct pf_state_peer
*src
, *dst
;
7888 struct pf_state_key_cmp key
;
7890 #ifndef NO_APPLE_EXTENSIONS
7894 key
.proto
= pd
->proto
;
7895 if (direction
== PF_IN
) {
7896 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
7897 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
7898 #ifndef NO_APPLE_EXTENSIONS
7899 key
.ext
.xport
.port
= 0;
7900 key
.gwy
.xport
.port
= 0;
7906 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
7907 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
7908 #ifndef NO_APPLE_EXTENSIONS
7909 key
.lan
.xport
.port
= 0;
7910 key
.ext
.xport
.port
= 0;
7919 if (direction
== (*state
)->state_key
->direction
) {
7920 src
= &(*state
)->src
;
7921 dst
= &(*state
)->dst
;
7923 src
= &(*state
)->dst
;
7924 dst
= &(*state
)->src
;
7928 if (src
->state
< PFOTHERS_SINGLE
)
7929 src
->state
= PFOTHERS_SINGLE
;
7930 if (dst
->state
== PFOTHERS_SINGLE
)
7931 dst
->state
= PFOTHERS_MULTIPLE
;
7933 /* update expire time */
7934 (*state
)->expire
= pf_time_second();
7935 if (src
->state
== PFOTHERS_MULTIPLE
&& dst
->state
== PFOTHERS_MULTIPLE
)
7936 (*state
)->timeout
= PFTM_OTHER_MULTIPLE
;
7938 (*state
)->timeout
= PFTM_OTHER_SINGLE
;
7940 /* translate source/destination address, if necessary */
7941 #ifndef NO_APPLE_EXTENSIONS
7942 if (STATE_ADDR_TRANSLATE((*state
)->state_key
)) {
7944 if (STATE_TRANSLATE((*state
)->state_key
)) {
7946 if (direction
== PF_OUT
) {
7950 pf_change_a(&pd
->src
->v4
.s_addr
,
7952 (*state
)->state_key
->gwy
.addr
.v4
.s_addr
,
7959 &(*state
)->state_key
->gwy
.addr
, pd
->af
);
7967 pf_change_a(&pd
->dst
->v4
.s_addr
,
7969 (*state
)->state_key
->lan
.addr
.v4
.s_addr
,
7976 &(*state
)->state_key
->lan
.addr
, pd
->af
);
7987 * ipoff and off are measured from the start of the mbuf chain.
7988 * h must be at "ipoff" on the mbuf chain.
7991 pf_pull_hdr(struct mbuf
*m
, int off
, void *p
, int len
,
7992 u_short
*actionp
, u_short
*reasonp
, sa_family_t af
)
7997 struct ip
*h
= mtod(m
, struct ip
*);
7998 u_int16_t fragoff
= (ntohs(h
->ip_off
) & IP_OFFMASK
) << 3;
8001 if (fragoff
>= len
) {
8002 ACTION_SET(actionp
, PF_PASS
);
8004 ACTION_SET(actionp
, PF_DROP
);
8005 REASON_SET(reasonp
, PFRES_FRAG
);
8009 if (m
->m_pkthdr
.len
< off
+ len
||
8010 ntohs(h
->ip_len
) < off
+ len
) {
8011 ACTION_SET(actionp
, PF_DROP
);
8012 REASON_SET(reasonp
, PFRES_SHORT
);
8020 struct ip6_hdr
*h
= mtod(m
, struct ip6_hdr
*);
8022 if (m
->m_pkthdr
.len
< off
+ len
||
8023 (ntohs(h
->ip6_plen
) + sizeof (struct ip6_hdr
)) <
8024 (unsigned)(off
+ len
)) {
8025 ACTION_SET(actionp
, PF_DROP
);
8026 REASON_SET(reasonp
, PFRES_SHORT
);
8033 m_copydata(m
, off
, len
, p
);
8038 pf_routable(struct pf_addr
*addr
, sa_family_t af
, struct pfi_kif
*kif
)
8041 struct sockaddr_in
*dst
;
8044 struct sockaddr_in6
*dst6
;
8045 struct route_in6 ro
;
8050 bzero(&ro
, sizeof (ro
));
8053 dst
= satosin(&ro
.ro_dst
);
8054 dst
->sin_family
= AF_INET
;
8055 dst
->sin_len
= sizeof (*dst
);
8056 dst
->sin_addr
= addr
->v4
;
8060 dst6
= (struct sockaddr_in6
*)&ro
.ro_dst
;
8061 dst6
->sin6_family
= AF_INET6
;
8062 dst6
->sin6_len
= sizeof (*dst6
);
8063 dst6
->sin6_addr
= addr
->v6
;
8070 /* XXX: IFT_ENC is not currently used by anything*/
8071 /* Skip checks for ipsec interfaces */
8072 if (kif
!= NULL
&& kif
->pfik_ifp
->if_type
== IFT_ENC
)
8075 rtalloc((struct route
*)&ro
);
8078 if (ro
.ro_rt
!= NULL
)
8084 pf_rtlabel_match(struct pf_addr
*addr
, sa_family_t af
, struct pf_addr_wrap
*aw
)
8087 struct sockaddr_in
*dst
;
8089 struct sockaddr_in6
*dst6
;
8090 struct route_in6 ro
;
8096 bzero(&ro
, sizeof (ro
));
8099 dst
= satosin(&ro
.ro_dst
);
8100 dst
->sin_family
= AF_INET
;
8101 dst
->sin_len
= sizeof (*dst
);
8102 dst
->sin_addr
= addr
->v4
;
8106 dst6
= (struct sockaddr_in6
*)&ro
.ro_dst
;
8107 dst6
->sin6_family
= AF_INET6
;
8108 dst6
->sin6_len
= sizeof (*dst6
);
8109 dst6
->sin6_addr
= addr
->v6
;
8116 rtalloc((struct route
*)&ro
);
8118 if (ro
.ro_rt
!= NULL
) {
8127 pf_route(struct mbuf
**m
, struct pf_rule
*r
, int dir
, struct ifnet
*oifp
,
8128 struct pf_state
*s
, struct pf_pdesc
*pd
)
8131 struct mbuf
*m0
, *m1
;
8132 struct route iproute
;
8133 struct route
*ro
= NULL
;
8134 struct sockaddr_in
*dst
;
8136 struct ifnet
*ifp
= NULL
;
8137 struct pf_addr naddr
;
8138 struct pf_src_node
*sn
= NULL
;
8142 if (m
== NULL
|| *m
== NULL
|| r
== NULL
||
8143 (dir
!= PF_IN
&& dir
!= PF_OUT
) || oifp
== NULL
)
8144 panic("pf_route: invalid parameters");
8146 if (pd
->pf_mtag
->routed
++ > 3) {
8152 if (r
->rt
== PF_DUPTO
) {
8153 if ((m0
= m_copym(*m
, 0, M_COPYALL
, M_NOWAIT
)) == NULL
)
8156 if ((r
->rt
== PF_REPLYTO
) == (r
->direction
== dir
))
8161 if (m0
->m_len
< (int)sizeof (struct ip
)) {
8162 DPFPRINTF(PF_DEBUG_URGENT
,
8163 ("pf_route: m0->m_len < sizeof (struct ip)\n"));
8167 ip
= mtod(m0
, struct ip
*);
8170 bzero((caddr_t
)ro
, sizeof (*ro
));
8171 dst
= satosin(&ro
->ro_dst
);
8172 dst
->sin_family
= AF_INET
;
8173 dst
->sin_len
= sizeof (*dst
);
8174 dst
->sin_addr
= ip
->ip_dst
;
8176 if (r
->rt
== PF_FASTROUTE
) {
8178 if (ro
->ro_rt
== 0) {
8179 ipstat
.ips_noroute
++;
8183 ifp
= ro
->ro_rt
->rt_ifp
;
8185 ro
->ro_rt
->rt_use
++;
8187 if (ro
->ro_rt
->rt_flags
& RTF_GATEWAY
)
8188 dst
= satosin(ro
->ro_rt
->rt_gateway
);
8189 RT_UNLOCK(ro
->ro_rt
);
8191 if (TAILQ_EMPTY(&r
->rpool
.list
)) {
8192 DPFPRINTF(PF_DEBUG_URGENT
,
8193 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n"));
8197 pf_map_addr(AF_INET
, r
, (struct pf_addr
*)&ip
->ip_src
,
8199 if (!PF_AZERO(&naddr
, AF_INET
))
8200 dst
->sin_addr
.s_addr
= naddr
.v4
.s_addr
;
8201 ifp
= r
->rpool
.cur
->kif
?
8202 r
->rpool
.cur
->kif
->pfik_ifp
: NULL
;
8204 if (!PF_AZERO(&s
->rt_addr
, AF_INET
))
8205 dst
->sin_addr
.s_addr
=
8206 s
->rt_addr
.v4
.s_addr
;
8207 ifp
= s
->rt_kif
? s
->rt_kif
->pfik_ifp
: NULL
;
8214 if (pf_test(PF_OUT
, ifp
, &m0
, NULL
) != PF_PASS
)
8216 else if (m0
== NULL
)
8218 if (m0
->m_len
< (int)sizeof (struct ip
)) {
8219 DPFPRINTF(PF_DEBUG_URGENT
,
8220 ("pf_route: m0->m_len < sizeof (struct ip)\n"));
8223 ip
= mtod(m0
, struct ip
*);
8226 /* Copied from ip_output. */
8228 /* Catch routing changes wrt. hardware checksumming for TCP or UDP. */
8229 m0
->m_pkthdr
.csum_flags
|= CSUM_IP
;
8230 sw_csum
= m0
->m_pkthdr
.csum_flags
&
8231 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
8233 if (ifp
->if_hwassist
& CSUM_TCP_SUM16
) {
8235 * Special case code for GMACE
8236 * frames that can be checksumed by GMACE SUM16 HW:
8237 * frame >64, no fragments, no UDP
8239 if (apple_hwcksum_tx
&& (m0
->m_pkthdr
.csum_flags
& CSUM_TCP
) &&
8240 (ntohs(ip
->ip_len
) > 50) &&
8241 (ntohs(ip
->ip_len
) <= ifp
->if_mtu
)) {
8243 * Apple GMAC HW, expects:
8244 * STUFF_OFFSET << 16 | START_OFFSET
8246 /* IP+Enet header length */
8247 u_short offset
= ((ip
->ip_hl
) << 2) + 14;
8248 u_short csumprev
= m0
->m_pkthdr
.csum_data
& 0xffff;
8249 m0
->m_pkthdr
.csum_flags
= CSUM_DATA_VALID
|
8250 CSUM_TCP_SUM16
; /* for GMAC */
8251 m0
->m_pkthdr
.csum_data
= (csumprev
+ offset
) << 16 ;
8252 m0
->m_pkthdr
.csum_data
+= offset
;
8253 /* do IP hdr chksum in software */
8254 sw_csum
= CSUM_DELAY_IP
;
8256 /* let the software handle any UDP or TCP checksums */
8257 sw_csum
|= (CSUM_DELAY_DATA
& m0
->m_pkthdr
.csum_flags
);
8259 } else if (apple_hwcksum_tx
== 0) {
8260 sw_csum
|= (CSUM_DELAY_DATA
| CSUM_DELAY_IP
) &
8261 m0
->m_pkthdr
.csum_flags
;
8264 if (sw_csum
& CSUM_DELAY_DATA
) {
8265 in_delayed_cksum(m0
);
8266 sw_csum
&= ~CSUM_DELAY_DATA
;
8267 m0
->m_pkthdr
.csum_flags
&= ~CSUM_DELAY_DATA
;
8270 if (apple_hwcksum_tx
!= 0) {
8271 m0
->m_pkthdr
.csum_flags
&=
8272 IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
8274 m0
->m_pkthdr
.csum_flags
= 0;
8277 if (ntohs(ip
->ip_len
) <= ifp
->if_mtu
||
8278 (ifp
->if_hwassist
& CSUM_FRAGMENT
)) {
8280 if (sw_csum
& CSUM_DELAY_IP
)
8281 ip
->ip_sum
= in_cksum(m0
, ip
->ip_hl
<< 2);
8282 error
= ifnet_output(ifp
, PF_INET
, m0
, ro
, sintosa(dst
));
8287 * Too large for interface; fragment if possible.
8288 * Must be able to put at least 8 bytes per fragment.
8290 if (ip
->ip_off
& htons(IP_DF
)) {
8291 ipstat
.ips_cantfrag
++;
8292 if (r
->rt
!= PF_DUPTO
) {
8293 icmp_error(m0
, ICMP_UNREACH
, ICMP_UNREACH_NEEDFRAG
, 0,
8302 /* PR-8933605: send ip_len,ip_off to ip_fragment in host byte order */
8303 #if BYTE_ORDER != BIG_ENDIAN
8307 error
= ip_fragment(m0
, ifp
, ifp
->if_mtu
, sw_csum
);
8314 for (m0
= m1
; m0
; m0
= m1
) {
8318 error
= ifnet_output(ifp
, PF_INET
, m0
, ro
,
8325 ipstat
.ips_fragmented
++;
8328 if (r
->rt
!= PF_DUPTO
)
8330 if (ro
== &iproute
&& ro
->ro_rt
)
8342 pf_route6(struct mbuf
**m
, struct pf_rule
*r
, int dir
, struct ifnet
*oifp
,
8343 struct pf_state
*s
, struct pf_pdesc
*pd
)
8347 struct route_in6 ip6route
;
8348 struct route_in6
*ro
;
8349 struct sockaddr_in6
*dst
;
8350 struct ip6_hdr
*ip6
;
8351 struct ifnet
*ifp
= NULL
;
8352 struct pf_addr naddr
;
8353 struct pf_src_node
*sn
= NULL
;
8356 if (m
== NULL
|| *m
== NULL
|| r
== NULL
||
8357 (dir
!= PF_IN
&& dir
!= PF_OUT
) || oifp
== NULL
)
8358 panic("pf_route6: invalid parameters");
8360 if (pd
->pf_mtag
->routed
++ > 3) {
8366 if (r
->rt
== PF_DUPTO
) {
8367 if ((m0
= m_copym(*m
, 0, M_COPYALL
, M_NOWAIT
)) == NULL
)
8370 if ((r
->rt
== PF_REPLYTO
) == (r
->direction
== dir
))
8375 if (m0
->m_len
< (int)sizeof (struct ip6_hdr
)) {
8376 DPFPRINTF(PF_DEBUG_URGENT
,
8377 ("pf_route6: m0->m_len < sizeof (struct ip6_hdr)\n"));
8380 ip6
= mtod(m0
, struct ip6_hdr
*);
8383 bzero((caddr_t
)ro
, sizeof (*ro
));
8384 dst
= (struct sockaddr_in6
*)&ro
->ro_dst
;
8385 dst
->sin6_family
= AF_INET6
;
8386 dst
->sin6_len
= sizeof (*dst
);
8387 dst
->sin6_addr
= ip6
->ip6_dst
;
8389 /* Cheat. XXX why only in the v6 case??? */
8390 if (r
->rt
== PF_FASTROUTE
) {
8391 struct pf_mtag
*pf_mtag
;
8393 if ((pf_mtag
= pf_get_mtag(m0
)) == NULL
)
8395 pf_mtag
->flags
|= PF_TAG_GENERATED
;
8396 ip6_output(m0
, NULL
, NULL
, 0, NULL
, NULL
, NULL
);
8400 if (TAILQ_EMPTY(&r
->rpool
.list
)) {
8401 DPFPRINTF(PF_DEBUG_URGENT
,
8402 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n"));
8406 pf_map_addr(AF_INET6
, r
, (struct pf_addr
*)&ip6
->ip6_src
,
8408 if (!PF_AZERO(&naddr
, AF_INET6
))
8409 PF_ACPY((struct pf_addr
*)&dst
->sin6_addr
,
8411 ifp
= r
->rpool
.cur
->kif
? r
->rpool
.cur
->kif
->pfik_ifp
: NULL
;
8413 if (!PF_AZERO(&s
->rt_addr
, AF_INET6
))
8414 PF_ACPY((struct pf_addr
*)&dst
->sin6_addr
,
8415 &s
->rt_addr
, AF_INET6
);
8416 ifp
= s
->rt_kif
? s
->rt_kif
->pfik_ifp
: NULL
;
8422 if (pf_test6(PF_OUT
, ifp
, &m0
, NULL
) != PF_PASS
)
8424 else if (m0
== NULL
)
8426 if (m0
->m_len
< (int)sizeof (struct ip6_hdr
)) {
8427 DPFPRINTF(PF_DEBUG_URGENT
, ("pf_route6: m0->m_len "
8428 "< sizeof (struct ip6_hdr)\n"));
8431 ip6
= mtod(m0
, struct ip6_hdr
*);
8435 * If the packet is too large for the outgoing interface,
8436 * send back an icmp6 error.
8438 if (IN6_IS_SCOPE_EMBED(&dst
->sin6_addr
))
8439 dst
->sin6_addr
.s6_addr16
[1] = htons(ifp
->if_index
);
8440 if ((unsigned)m0
->m_pkthdr
.len
<= ifp
->if_mtu
) {
8441 error
= nd6_output(ifp
, ifp
, m0
, dst
, NULL
);
8443 in6_ifstat_inc(ifp
, ifs6_in_toobig
);
8444 if (r
->rt
!= PF_DUPTO
)
8445 icmp6_error(m0
, ICMP6_PACKET_TOO_BIG
, 0, ifp
->if_mtu
);
8451 if (r
->rt
!= PF_DUPTO
)
8463 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
8464 * off is the offset where the protocol header starts
8465 * len is the total length of protocol header plus payload
8466 * returns 0 when the checksum is valid, otherwise returns 1.
8469 pf_check_proto_cksum(struct mbuf
*m
, int off
, int len
, u_int8_t p
,
8478 * Optimize for the common case; if the hardware calculated
8479 * value doesn't include pseudo-header checksum, or if it
8480 * is partially-computed (only 16-bit summation), do it in
8483 if (apple_hwcksum_rx
&& (m
->m_pkthdr
.csum_flags
&
8484 (CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
)) &&
8485 (m
->m_pkthdr
.csum_data
^ 0xffff) == 0) {
8491 case IPPROTO_ICMPV6
:
8497 if (off
< (int)sizeof (struct ip
) || len
< (int)sizeof (struct udphdr
))
8499 if (m
->m_pkthdr
.len
< off
+ len
)
8504 if (p
== IPPROTO_ICMP
) {
8509 sum
= in_cksum(m
, len
);
8513 if (m
->m_len
< (int)sizeof (struct ip
))
8515 sum
= inet_cksum(m
, p
, off
, len
);
8521 if (m
->m_len
< (int)sizeof (struct ip6_hdr
))
8523 sum
= inet6_cksum(m
, p
, off
, len
);
8532 tcpstat
.tcps_rcvbadsum
++;
8535 udpstat
.udps_badsum
++;
8538 icmpstat
.icps_checksum
++;
8541 case IPPROTO_ICMPV6
:
8542 icmp6stat
.icp6s_checksum
++;
8552 #ifndef NO_APPLE_EXTENSIONS
8553 #define PF_APPLE_UPDATE_PDESC_IPv4() \
8555 if (m && pd.mp && m != pd.mp) { \
8557 h = mtod(m, struct ip *); \
8563 pf_test(int dir
, struct ifnet
*ifp
, struct mbuf
**m0
,
8564 struct ether_header
*eh
)
8566 struct pfi_kif
*kif
;
8567 u_short action
, reason
= 0, log
= 0;
8568 struct mbuf
*m
= *m0
;
8570 struct pf_rule
*a
= NULL
, *r
= &pf_default_rule
, *tr
, *nr
;
8571 struct pf_state
*s
= NULL
;
8572 struct pf_state_key
*sk
= NULL
;
8573 struct pf_ruleset
*ruleset
= NULL
;
8575 int off
, dirndx
, pqid
= 0;
8577 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
8579 if (!pf_status
.running
)
8582 memset(&pd
, 0, sizeof (pd
));
8584 if ((pd
.pf_mtag
= pf_get_mtag(m
)) == NULL
) {
8585 DPFPRINTF(PF_DEBUG_URGENT
,
8586 ("pf_test: pf_get_mtag returned NULL\n"));
8590 if (pd
.pf_mtag
->flags
& PF_TAG_GENERATED
)
8593 kif
= (struct pfi_kif
*)ifp
->if_pf_kif
;
8596 DPFPRINTF(PF_DEBUG_URGENT
,
8597 ("pf_test: kif == NULL, if_name %s\n", ifp
->if_name
));
8600 if (kif
->pfik_flags
& PFI_IFLAG_SKIP
)
8604 if ((m
->m_flags
& M_PKTHDR
) == 0)
8605 panic("non-M_PKTHDR is passed to pf_test");
8606 #endif /* DIAGNOSTIC */
8608 if (m
->m_pkthdr
.len
< (int)sizeof (*h
)) {
8610 REASON_SET(&reason
, PFRES_SHORT
);
8615 /* We do IP header normalization and packet reassembly here */
8616 if (pf_normalize_ip(m0
, dir
, kif
, &reason
, &pd
) != PF_PASS
) {
8620 m
= *m0
; /* pf_normalize messes with m0 */
8621 h
= mtod(m
, struct ip
*);
8623 off
= h
->ip_hl
<< 2;
8624 if (off
< (int)sizeof (*h
)) {
8626 REASON_SET(&reason
, PFRES_SHORT
);
8631 pd
.src
= (struct pf_addr
*)&h
->ip_src
;
8632 pd
.dst
= (struct pf_addr
*)&h
->ip_dst
;
8633 PF_ACPY(&pd
.baddr
, dir
== PF_OUT
? pd
.src
: pd
.dst
, AF_INET
);
8634 pd
.ip_sum
= &h
->ip_sum
;
8636 #ifndef NO_APPLE_EXTENSIONS
8637 pd
.proto_variant
= 0;
8643 pd
.tot_len
= ntohs(h
->ip_len
);
8646 /* handle fragments that didn't get reassembled by normalization */
8647 if (h
->ip_off
& htons(IP_MF
| IP_OFFMASK
)) {
8648 action
= pf_test_fragment(&r
, dir
, kif
, m
, h
,
8658 if (!pf_pull_hdr(m
, off
, &th
, sizeof (th
),
8659 &action
, &reason
, AF_INET
)) {
8660 log
= action
!= PF_PASS
;
8663 pd
.p_len
= pd
.tot_len
- off
- (th
.th_off
<< 2);
8664 if ((th
.th_flags
& TH_ACK
) && pd
.p_len
== 0)
8666 action
= pf_normalize_tcp(dir
, kif
, m
, 0, off
, h
, &pd
);
8667 #ifndef NO_APPLE_EXTENSIONS
8670 PF_APPLE_UPDATE_PDESC_IPv4();
8672 if (action
== PF_DROP
)
8674 action
= pf_test_state_tcp(&s
, dir
, kif
, m
, off
, h
, &pd
,
8676 #ifndef NO_APPLE_EXTENSIONS
8679 PF_APPLE_UPDATE_PDESC_IPv4();
8681 if (action
== PF_PASS
) {
8683 pfsync_update_state(s
);
8684 #endif /* NPFSYNC */
8688 } else if (s
== NULL
)
8689 action
= pf_test_rule(&r
, &s
, dir
, kif
,
8690 m
, off
, h
, &pd
, &a
, &ruleset
, &ipintrq
);
8698 if (!pf_pull_hdr(m
, off
, &uh
, sizeof (uh
),
8699 &action
, &reason
, AF_INET
)) {
8700 log
= action
!= PF_PASS
;
8703 if (uh
.uh_dport
== 0 ||
8704 ntohs(uh
.uh_ulen
) > m
->m_pkthdr
.len
- off
||
8705 ntohs(uh
.uh_ulen
) < sizeof (struct udphdr
)) {
8707 REASON_SET(&reason
, PFRES_SHORT
);
8710 #ifndef NO_APPLE_EXTENSIONS
8711 action
= pf_test_state_udp(&s
, dir
, kif
, m
, off
, h
, &pd
,
8715 PF_APPLE_UPDATE_PDESC_IPv4();
8717 action
= pf_test_state_udp(&s
, dir
, kif
, m
, off
, h
, &pd
);
8719 if (action
== PF_PASS
) {
8721 pfsync_update_state(s
);
8722 #endif /* NPFSYNC */
8726 } else if (s
== NULL
)
8727 action
= pf_test_rule(&r
, &s
, dir
, kif
,
8728 m
, off
, h
, &pd
, &a
, &ruleset
, &ipintrq
);
8732 case IPPROTO_ICMP
: {
8736 if (!pf_pull_hdr(m
, off
, &ih
, ICMP_MINLEN
,
8737 &action
, &reason
, AF_INET
)) {
8738 log
= action
!= PF_PASS
;
8741 action
= pf_test_state_icmp(&s
, dir
, kif
, m
, off
, h
, &pd
,
8743 #ifndef NO_APPLE_EXTENSIONS
8746 PF_APPLE_UPDATE_PDESC_IPv4();
8748 if (action
== PF_PASS
) {
8750 pfsync_update_state(s
);
8751 #endif /* NPFSYNC */
8755 } else if (s
== NULL
)
8756 action
= pf_test_rule(&r
, &s
, dir
, kif
,
8757 m
, off
, h
, &pd
, &a
, &ruleset
, &ipintrq
);
8761 #ifndef NO_APPLE_EXTENSIONS
8763 struct pf_esp_hdr esp
;
8766 if (!pf_pull_hdr(m
, off
, &esp
, sizeof (esp
), &action
, &reason
,
8768 log
= action
!= PF_PASS
;
8771 action
= pf_test_state_esp(&s
, dir
, kif
, off
, &pd
);
8774 PF_APPLE_UPDATE_PDESC_IPv4();
8775 if (action
== PF_PASS
) {
8777 pfsync_update_state(s
);
8778 #endif /* NPFSYNC */
8782 } else if (s
== NULL
)
8783 action
= pf_test_rule(&r
, &s
, dir
, kif
,
8784 m
, off
, h
, &pd
, &a
, &ruleset
, &ipintrq
);
8789 struct pf_grev1_hdr grev1
;
8790 pd
.hdr
.grev1
= &grev1
;
8791 if (!pf_pull_hdr(m
, off
, &grev1
, sizeof (grev1
), &action
,
8792 &reason
, AF_INET
)) {
8793 log
= (action
!= PF_PASS
);
8796 if ((ntohs(grev1
.flags
) & PF_GRE_FLAG_VERSION_MASK
) == 1 &&
8797 ntohs(grev1
.protocol_type
) == PF_GRE_PPP_ETHERTYPE
) {
8798 if (ntohs(grev1
.payload_length
) >
8799 m
->m_pkthdr
.len
- off
) {
8801 REASON_SET(&reason
, PFRES_SHORT
);
8804 pd
.proto_variant
= PF_GRE_PPTP_VARIANT
;
8805 action
= pf_test_state_grev1(&s
, dir
, kif
, off
, &pd
);
8806 if (pd
.lmw
< 0) goto done
;
8807 PF_APPLE_UPDATE_PDESC_IPv4();
8808 if (action
== PF_PASS
) {
8810 pfsync_update_state(s
);
8811 #endif /* NPFSYNC */
8816 } else if (s
== NULL
) {
8817 action
= pf_test_rule(&r
, &s
, dir
, kif
, m
, off
,
8818 h
, &pd
, &a
, &ruleset
, &ipintrq
);
8819 if (action
== PF_PASS
)
8824 /* not GREv1/PPTP, so treat as ordinary GRE... */
8829 action
= pf_test_state_other(&s
, dir
, kif
, &pd
);
8830 #ifndef NO_APPLE_EXTENSIONS
8833 PF_APPLE_UPDATE_PDESC_IPv4();
8835 if (action
== PF_PASS
) {
8837 pfsync_update_state(s
);
8838 #endif /* NPFSYNC */
8842 } else if (s
== NULL
)
8843 action
= pf_test_rule(&r
, &s
, dir
, kif
, m
, off
, h
,
8844 &pd
, &a
, &ruleset
, &ipintrq
);
8849 #ifndef NO_APPLE_EXTENSIONS
8851 PF_APPLE_UPDATE_PDESC_IPv4();
8854 if (action
== PF_PASS
&& h
->ip_hl
> 5 &&
8855 !((s
&& s
->allow_opts
) || r
->allow_opts
)) {
8857 REASON_SET(&reason
, PFRES_IPOPTIONS
);
8859 DPFPRINTF(PF_DEBUG_MISC
,
8860 ("pf: dropping packet with ip options [hlen=%u]\n",
8861 (unsigned int) h
->ip_hl
));
8864 if ((s
&& s
->tag
) || PF_RTABLEID_IS_VALID(r
->rtableid
))
8865 (void) pf_tag_packet(m
, pd
.pf_mtag
, s
? s
->tag
: 0,
8869 if (action
== PF_PASS
&& r
->qid
) {
8870 if (pqid
|| (pd
.tos
& IPTOS_LOWDELAY
))
8871 pd
.pf_mtag
->qid
= r
->pqid
;
8873 pd
.pf_mtag
->qid
= r
->qid
;
8874 /* add hints for ecn */
8875 pd
.pf_mtag
->hdr
= h
;
8880 * connections redirected to loopback should not match sockets
8881 * bound specifically to loopback due to security implications,
8882 * see tcp_input() and in_pcblookup_listen().
8884 if (dir
== PF_IN
&& action
== PF_PASS
&& (pd
.proto
== IPPROTO_TCP
||
8885 pd
.proto
== IPPROTO_UDP
) && s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
8886 (s
->nat_rule
.ptr
->action
== PF_RDR
||
8887 s
->nat_rule
.ptr
->action
== PF_BINAT
) &&
8888 (ntohl(pd
.dst
->v4
.s_addr
) >> IN_CLASSA_NSHIFT
) == IN_LOOPBACKNET
)
8889 pd
.pf_mtag
->flags
|= PF_TAG_TRANSLATE_LOCALHOST
;
8894 if (s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
8895 s
->nat_rule
.ptr
->log
& PF_LOG_ALL
)
8896 lr
= s
->nat_rule
.ptr
;
8899 PFLOG_PACKET(kif
, h
, m
, AF_INET
, dir
, reason
, lr
, a
, ruleset
,
8903 kif
->pfik_bytes
[0][dir
== PF_OUT
][action
!= PF_PASS
] += pd
.tot_len
;
8904 kif
->pfik_packets
[0][dir
== PF_OUT
][action
!= PF_PASS
]++;
8906 if (action
== PF_PASS
|| r
->action
== PF_DROP
) {
8907 dirndx
= (dir
== PF_OUT
);
8908 r
->packets
[dirndx
]++;
8909 r
->bytes
[dirndx
] += pd
.tot_len
;
8911 a
->packets
[dirndx
]++;
8912 a
->bytes
[dirndx
] += pd
.tot_len
;
8916 if (s
->nat_rule
.ptr
!= NULL
) {
8917 s
->nat_rule
.ptr
->packets
[dirndx
]++;
8918 s
->nat_rule
.ptr
->bytes
[dirndx
] += pd
.tot_len
;
8920 if (s
->src_node
!= NULL
) {
8921 s
->src_node
->packets
[dirndx
]++;
8922 s
->src_node
->bytes
[dirndx
] += pd
.tot_len
;
8924 if (s
->nat_src_node
!= NULL
) {
8925 s
->nat_src_node
->packets
[dirndx
]++;
8926 s
->nat_src_node
->bytes
[dirndx
] += pd
.tot_len
;
8928 dirndx
= (dir
== sk
->direction
) ? 0 : 1;
8929 s
->packets
[dirndx
]++;
8930 s
->bytes
[dirndx
] += pd
.tot_len
;
8933 nr
= (s
!= NULL
) ? s
->nat_rule
.ptr
: pd
.nat_rule
;
8937 * XXX: we need to make sure that the addresses
8938 * passed to pfr_update_stats() are the same than
8939 * the addresses used during matching (pfr_match)
8941 if (r
== &pf_default_rule
) {
8943 x
= (sk
== NULL
|| sk
->direction
== dir
) ?
8944 &pd
.baddr
: &pd
.naddr
;
8946 x
= (sk
== NULL
|| sk
->direction
== dir
) ?
8947 &pd
.naddr
: &pd
.baddr
;
8948 if (x
== &pd
.baddr
|| s
== NULL
) {
8949 /* we need to change the address */
8956 if (tr
->src
.addr
.type
== PF_ADDR_TABLE
)
8957 pfr_update_stats(tr
->src
.addr
.p
.tbl
, (sk
== NULL
||
8958 sk
->direction
== dir
) ?
8959 pd
.src
: pd
.dst
, pd
.af
,
8960 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
8962 if (tr
->dst
.addr
.type
== PF_ADDR_TABLE
)
8963 pfr_update_stats(tr
->dst
.addr
.p
.tbl
, (sk
== NULL
||
8964 sk
->direction
== dir
) ? pd
.dst
: pd
.src
, pd
.af
,
8965 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
8969 #ifndef NO_APPLE_EXTENSIONS
8970 VERIFY(m
== NULL
|| pd
.mp
== NULL
|| pd
.mp
== m
);
8974 REASON_SET(&reason
, PFRES_MEMORY
);
8978 if (action
== PF_DROP
) {
8988 if (action
== PF_SYNPROXY_DROP
) {
8993 /* pf_route can free the mbuf causing *m0 to become NULL */
8994 pf_route(m0
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
9001 #ifndef NO_APPLE_EXTENSIONS
9002 #define PF_APPLE_UPDATE_PDESC_IPv6() \
9004 if (m && pd.mp && m != pd.mp) { \
9008 h = mtod(m, struct ip6_hdr *); \
9014 pf_test6(int dir
, struct ifnet
*ifp
, struct mbuf
**m0
,
9015 struct ether_header
*eh
)
9017 struct pfi_kif
*kif
;
9018 u_short action
, reason
= 0, log
= 0;
9019 struct mbuf
*m
= *m0
, *n
= NULL
;
9021 struct pf_rule
*a
= NULL
, *r
= &pf_default_rule
, *tr
, *nr
;
9022 struct pf_state
*s
= NULL
;
9023 struct pf_state_key
*sk
= NULL
;
9024 struct pf_ruleset
*ruleset
= NULL
;
9026 int off
, terminal
= 0, dirndx
, rh_cnt
= 0;
9028 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
9030 if (!pf_status
.running
)
9033 memset(&pd
, 0, sizeof (pd
));
9035 if ((pd
.pf_mtag
= pf_get_mtag(m
)) == NULL
) {
9036 DPFPRINTF(PF_DEBUG_URGENT
,
9037 ("pf_test6: pf_get_mtag returned NULL\n"));
9041 if (pd
.pf_mtag
->flags
& PF_TAG_GENERATED
)
9044 kif
= (struct pfi_kif
*)ifp
->if_pf_kif
;
9047 DPFPRINTF(PF_DEBUG_URGENT
,
9048 ("pf_test6: kif == NULL, if_name %s\n", ifp
->if_name
));
9051 if (kif
->pfik_flags
& PFI_IFLAG_SKIP
)
9055 if ((m
->m_flags
& M_PKTHDR
) == 0)
9056 panic("non-M_PKTHDR is passed to pf_test6");
9057 #endif /* DIAGNOSTIC */
9059 h
= mtod(m
, struct ip6_hdr
*);
9061 if (m
->m_pkthdr
.len
< (int)sizeof (*h
)) {
9063 REASON_SET(&reason
, PFRES_SHORT
);
9068 /* We do IP header normalization and packet reassembly here */
9069 if (pf_normalize_ip6(m0
, dir
, kif
, &reason
, &pd
) != PF_PASS
) {
9073 m
= *m0
; /* pf_normalize messes with m0 */
9074 h
= mtod(m
, struct ip6_hdr
*);
9078 * we do not support jumbogram yet. if we keep going, zero ip6_plen
9079 * will do something bad, so drop the packet for now.
9081 if (htons(h
->ip6_plen
) == 0) {
9083 REASON_SET(&reason
, PFRES_NORM
); /*XXX*/
9088 pd
.src
= (struct pf_addr
*)&h
->ip6_src
;
9089 pd
.dst
= (struct pf_addr
*)&h
->ip6_dst
;
9090 PF_ACPY(&pd
.baddr
, dir
== PF_OUT
? pd
.src
: pd
.dst
, AF_INET6
);
9094 pd
.tot_len
= ntohs(h
->ip6_plen
) + sizeof (struct ip6_hdr
);
9097 off
= ((caddr_t
)h
- m
->m_data
) + sizeof (struct ip6_hdr
);
9098 pd
.proto
= h
->ip6_nxt
;
9099 #ifndef NO_APPLE_EXTENSIONS
9100 pd
.proto_variant
= 0;
9106 case IPPROTO_FRAGMENT
:
9107 action
= pf_test_fragment(&r
, dir
, kif
, m
, h
,
9109 if (action
== PF_DROP
)
9110 REASON_SET(&reason
, PFRES_FRAG
);
9112 case IPPROTO_ROUTING
: {
9113 struct ip6_rthdr rthdr
;
9116 DPFPRINTF(PF_DEBUG_MISC
,
9117 ("pf: IPv6 more than one rthdr\n"));
9119 REASON_SET(&reason
, PFRES_IPOPTIONS
);
9123 if (!pf_pull_hdr(m
, off
, &rthdr
, sizeof (rthdr
), NULL
,
9125 DPFPRINTF(PF_DEBUG_MISC
,
9126 ("pf: IPv6 short rthdr\n"));
9128 REASON_SET(&reason
, PFRES_SHORT
);
9132 if (rthdr
.ip6r_type
== IPV6_RTHDR_TYPE_0
) {
9133 DPFPRINTF(PF_DEBUG_MISC
,
9134 ("pf: IPv6 rthdr0\n"));
9136 REASON_SET(&reason
, PFRES_IPOPTIONS
);
9143 case IPPROTO_HOPOPTS
:
9144 case IPPROTO_DSTOPTS
: {
9145 /* get next header and header length */
9146 struct ip6_ext opt6
;
9148 if (!pf_pull_hdr(m
, off
, &opt6
, sizeof (opt6
),
9149 NULL
, &reason
, pd
.af
)) {
9150 DPFPRINTF(PF_DEBUG_MISC
,
9151 ("pf: IPv6 short opt\n"));
9156 if (pd
.proto
== IPPROTO_AH
)
9157 off
+= (opt6
.ip6e_len
+ 2) * 4;
9159 off
+= (opt6
.ip6e_len
+ 1) * 8;
9160 pd
.proto
= opt6
.ip6e_nxt
;
9161 /* goto the next header */
9168 } while (!terminal
);
9170 /* if there's no routing header, use unmodified mbuf for checksumming */
9180 if (!pf_pull_hdr(m
, off
, &th
, sizeof (th
),
9181 &action
, &reason
, AF_INET6
)) {
9182 log
= action
!= PF_PASS
;
9185 pd
.p_len
= pd
.tot_len
- off
- (th
.th_off
<< 2);
9186 action
= pf_normalize_tcp(dir
, kif
, m
, 0, off
, h
, &pd
);
9187 #ifndef NO_APPLE_EXTENSIONS
9190 PF_APPLE_UPDATE_PDESC_IPv6();
9192 if (action
== PF_DROP
)
9194 action
= pf_test_state_tcp(&s
, dir
, kif
, m
, off
, h
, &pd
,
9196 #ifndef NO_APPLE_EXTENSIONS
9199 PF_APPLE_UPDATE_PDESC_IPv6();
9201 if (action
== PF_PASS
) {
9203 pfsync_update_state(s
);
9204 #endif /* NPFSYNC */
9208 } else if (s
== NULL
)
9209 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9210 m
, off
, h
, &pd
, &a
, &ruleset
, &ip6intrq
);
9218 if (!pf_pull_hdr(m
, off
, &uh
, sizeof (uh
),
9219 &action
, &reason
, AF_INET6
)) {
9220 log
= action
!= PF_PASS
;
9223 if (uh
.uh_dport
== 0 ||
9224 ntohs(uh
.uh_ulen
) > m
->m_pkthdr
.len
- off
||
9225 ntohs(uh
.uh_ulen
) < sizeof (struct udphdr
)) {
9227 REASON_SET(&reason
, PFRES_SHORT
);
9230 #ifndef NO_APPLE_EXTENSIONS
9231 action
= pf_test_state_udp(&s
, dir
, kif
, m
, off
, h
, &pd
,
9235 PF_APPLE_UPDATE_PDESC_IPv6();
9237 action
= pf_test_state_udp(&s
, dir
, kif
, m
, off
, h
, &pd
);
9239 if (action
== PF_PASS
) {
9241 pfsync_update_state(s
);
9242 #endif /* NPFSYNC */
9246 } else if (s
== NULL
)
9247 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9248 m
, off
, h
, &pd
, &a
, &ruleset
, &ip6intrq
);
9252 case IPPROTO_ICMPV6
: {
9253 struct icmp6_hdr ih
;
9256 if (!pf_pull_hdr(m
, off
, &ih
, sizeof (ih
),
9257 &action
, &reason
, AF_INET6
)) {
9258 log
= action
!= PF_PASS
;
9261 action
= pf_test_state_icmp(&s
, dir
, kif
,
9262 m
, off
, h
, &pd
, &reason
);
9263 #ifndef NO_APPLE_EXTENSIONS
9266 PF_APPLE_UPDATE_PDESC_IPv6();
9268 if (action
== PF_PASS
) {
9270 pfsync_update_state(s
);
9271 #endif /* NPFSYNC */
9275 } else if (s
== NULL
)
9276 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9277 m
, off
, h
, &pd
, &a
, &ruleset
, &ip6intrq
);
9281 #ifndef NO_APPLE_EXTENSIONS
9283 struct pf_esp_hdr esp
;
9286 if (!pf_pull_hdr(m
, off
, &esp
, sizeof (esp
), &action
, &reason
,
9288 log
= action
!= PF_PASS
;
9291 action
= pf_test_state_esp(&s
, dir
, kif
, off
, &pd
);
9294 PF_APPLE_UPDATE_PDESC_IPv6();
9295 if (action
== PF_PASS
) {
9297 pfsync_update_state(s
);
9298 #endif /* NPFSYNC */
9302 } else if (s
== NULL
)
9303 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9304 m
, off
, h
, &pd
, &a
, &ruleset
, &ip6intrq
);
9309 struct pf_grev1_hdr grev1
;
9311 pd
.hdr
.grev1
= &grev1
;
9312 if (!pf_pull_hdr(m
, off
, &grev1
, sizeof (grev1
), &action
,
9313 &reason
, AF_INET6
)) {
9314 log
= (action
!= PF_PASS
);
9317 if ((ntohs(grev1
.flags
) & PF_GRE_FLAG_VERSION_MASK
) == 1 &&
9318 ntohs(grev1
.protocol_type
) == PF_GRE_PPP_ETHERTYPE
) {
9319 if (ntohs(grev1
.payload_length
) >
9320 m
->m_pkthdr
.len
- off
) {
9322 REASON_SET(&reason
, PFRES_SHORT
);
9325 action
= pf_test_state_grev1(&s
, dir
, kif
, off
, &pd
);
9328 PF_APPLE_UPDATE_PDESC_IPv6();
9329 if (action
== PF_PASS
) {
9331 pfsync_update_state(s
);
9332 #endif /* NPFSYNC */
9337 } else if (s
== NULL
) {
9338 action
= pf_test_rule(&r
, &s
, dir
, kif
, m
, off
,
9339 h
, &pd
, &a
, &ruleset
, &ip6intrq
);
9340 if (action
== PF_PASS
)
9345 /* not GREv1/PPTP, so treat as ordinary GRE... */
9350 action
= pf_test_state_other(&s
, dir
, kif
, &pd
);
9351 #ifndef NO_APPLE_EXTENSIONS
9354 PF_APPLE_UPDATE_PDESC_IPv6();
9356 if (action
== PF_PASS
) {
9358 pfsync_update_state(s
);
9359 #endif /* NPFSYNC */
9363 } else if (s
== NULL
)
9364 action
= pf_test_rule(&r
, &s
, dir
, kif
, m
, off
, h
,
9365 &pd
, &a
, &ruleset
, &ip6intrq
);
9370 #ifndef NO_APPLE_EXTENSIONS
9372 PF_APPLE_UPDATE_PDESC_IPv6();
9380 /* handle dangerous IPv6 extension headers. */
9381 if (action
== PF_PASS
&& rh_cnt
&&
9382 !((s
&& s
->allow_opts
) || r
->allow_opts
)) {
9384 REASON_SET(&reason
, PFRES_IPOPTIONS
);
9386 DPFPRINTF(PF_DEBUG_MISC
,
9387 ("pf: dropping packet with dangerous v6 headers\n"));
9390 if ((s
&& s
->tag
) || PF_RTABLEID_IS_VALID(r
->rtableid
))
9391 (void) pf_tag_packet(m
, pd
.pf_mtag
, s
? s
->tag
: 0,
9395 if (action
== PF_PASS
&& r
->qid
) {
9396 if (pd
.tos
& IPTOS_LOWDELAY
)
9397 pd
.pf_mtag
->qid
= r
->pqid
;
9399 pd
.pf_mtag
->qid
= r
->qid
;
9400 /* add hints for ecn */
9401 pd
.pf_mtag
->hdr
= h
;
9405 if (dir
== PF_IN
&& action
== PF_PASS
&& (pd
.proto
== IPPROTO_TCP
||
9406 pd
.proto
== IPPROTO_UDP
) && s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
9407 (s
->nat_rule
.ptr
->action
== PF_RDR
||
9408 s
->nat_rule
.ptr
->action
== PF_BINAT
) &&
9409 IN6_IS_ADDR_LOOPBACK(&pd
.dst
->v6
))
9410 pd
.pf_mtag
->flags
|= PF_TAG_TRANSLATE_LOCALHOST
;
9415 if (s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
9416 s
->nat_rule
.ptr
->log
& PF_LOG_ALL
)
9417 lr
= s
->nat_rule
.ptr
;
9420 PFLOG_PACKET(kif
, h
, m
, AF_INET6
, dir
, reason
, lr
, a
, ruleset
,
9424 kif
->pfik_bytes
[1][dir
== PF_OUT
][action
!= PF_PASS
] += pd
.tot_len
;
9425 kif
->pfik_packets
[1][dir
== PF_OUT
][action
!= PF_PASS
]++;
9427 if (action
== PF_PASS
|| r
->action
== PF_DROP
) {
9428 dirndx
= (dir
== PF_OUT
);
9429 r
->packets
[dirndx
]++;
9430 r
->bytes
[dirndx
] += pd
.tot_len
;
9432 a
->packets
[dirndx
]++;
9433 a
->bytes
[dirndx
] += pd
.tot_len
;
9437 if (s
->nat_rule
.ptr
!= NULL
) {
9438 s
->nat_rule
.ptr
->packets
[dirndx
]++;
9439 s
->nat_rule
.ptr
->bytes
[dirndx
] += pd
.tot_len
;
9441 if (s
->src_node
!= NULL
) {
9442 s
->src_node
->packets
[dirndx
]++;
9443 s
->src_node
->bytes
[dirndx
] += pd
.tot_len
;
9445 if (s
->nat_src_node
!= NULL
) {
9446 s
->nat_src_node
->packets
[dirndx
]++;
9447 s
->nat_src_node
->bytes
[dirndx
] += pd
.tot_len
;
9449 dirndx
= (dir
== sk
->direction
) ? 0 : 1;
9450 s
->packets
[dirndx
]++;
9451 s
->bytes
[dirndx
] += pd
.tot_len
;
9454 nr
= (s
!= NULL
) ? s
->nat_rule
.ptr
: pd
.nat_rule
;
9458 * XXX: we need to make sure that the addresses
9459 * passed to pfr_update_stats() are the same than
9460 * the addresses used during matching (pfr_match)
9462 if (r
== &pf_default_rule
) {
9464 x
= (s
== NULL
|| sk
->direction
== dir
) ?
9465 &pd
.baddr
: &pd
.naddr
;
9467 x
= (s
== NULL
|| sk
->direction
== dir
) ?
9468 &pd
.naddr
: &pd
.baddr
;
9470 if (x
== &pd
.baddr
|| s
== NULL
) {
9477 if (tr
->src
.addr
.type
== PF_ADDR_TABLE
)
9478 pfr_update_stats(tr
->src
.addr
.p
.tbl
, (sk
== NULL
||
9479 sk
->direction
== dir
) ? pd
.src
: pd
.dst
, pd
.af
,
9480 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
9482 if (tr
->dst
.addr
.type
== PF_ADDR_TABLE
)
9483 pfr_update_stats(tr
->dst
.addr
.p
.tbl
, (sk
== NULL
||
9484 sk
->direction
== dir
) ? pd
.dst
: pd
.src
, pd
.af
,
9485 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
9490 if (action
== PF_SYNPROXY_DROP
) {
9495 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9496 pf_route6(m0
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
9498 #ifndef NO_APPLE_EXTENSIONS
9499 VERIFY(m
== NULL
|| pd
.mp
== NULL
|| pd
.mp
== m
);
9503 REASON_SET(&reason
, PFRES_MEMORY
);
9507 if (action
== PF_DROP
) {
9516 if (action
== PF_SYNPROXY_DROP
) {
9521 if (action
== PF_PASS
) {
9523 h
= mtod(m
, struct ip6_hdr
*);
9526 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9527 pf_route6(m0
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
9530 if (action
!= PF_SYNPROXY_DROP
&& r
->rt
)
9531 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9532 pf_route6(m0
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
9534 if (action
== PF_PASS
) {
9536 h
= mtod(m
, struct ip6_hdr
*);
9539 if (action
== PF_SYNPROXY_DROP
) {
9552 pf_check_congestion(struct ifqueue
*ifq
)
9559 pool_init(struct pool
*pp
, size_t size
, unsigned int align
, unsigned int ioff
,
9560 int flags
, const char *wchan
, void *palloc
)
9562 #pragma unused(align, ioff, flags, palloc)
9563 bzero(pp
, sizeof (*pp
));
9564 pp
->pool_zone
= zinit(size
, 1024 * size
, PAGE_SIZE
, wchan
);
9565 if (pp
->pool_zone
!= NULL
) {
9566 zone_change(pp
->pool_zone
, Z_EXPAND
, TRUE
);
9567 zone_change(pp
->pool_zone
, Z_CALLERACCT
, FALSE
);
9568 pp
->pool_hiwat
= pp
->pool_limit
= (unsigned int)-1;
9569 pp
->pool_name
= wchan
;
9573 /* Zones cannot be currently destroyed */
9575 pool_destroy(struct pool
*pp
)
9581 pool_sethiwat(struct pool
*pp
, int n
)
9583 pp
->pool_hiwat
= n
; /* Currently unused */
9587 pool_sethardlimit(struct pool
*pp
, int n
, const char *warnmess
, int ratecap
)
9589 #pragma unused(warnmess, ratecap)
9594 pool_get(struct pool
*pp
, int flags
)
9598 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
9600 if (pp
->pool_count
> pp
->pool_limit
) {
9601 DPFPRINTF(PF_DEBUG_NOISY
,
9602 ("pf: pool %s hard limit reached (%d)\n",
9603 pp
->pool_name
!= NULL
? pp
->pool_name
: "unknown",
9609 buf
= zalloc_canblock(pp
->pool_zone
, (flags
& (PR_NOWAIT
| PR_WAITOK
)));
9612 VERIFY(pp
->pool_count
!= 0);
9618 pool_put(struct pool
*pp
, void *v
)
9620 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
9622 zfree(pp
->pool_zone
, v
);
9623 VERIFY(pp
->pool_count
!= 0);
9628 pf_find_mtag(struct mbuf
*m
)
9633 if ((mtag
= m_tag_locate(m
, KERNEL_MODULE_TAG_ID
,
9634 KERNEL_TAG_TYPE_PF
, NULL
)) == NULL
)
9637 return ((struct pf_mtag
*)(mtag
+ 1));
9639 if (!(m
->m_flags
& M_PKTHDR
))
9642 return (&m
->m_pkthdr
.pf_mtag
);
9643 #endif /* PF_PKTHDR */
9647 pf_get_mtag(struct mbuf
*m
)
9652 if ((mtag
= m_tag_locate(m
, KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_PF
,
9654 mtag
= m_tag_create(KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_PF
,
9655 sizeof (struct pf_mtag
), M_NOWAIT
, m
);
9658 bzero(mtag
+ 1, sizeof (struct pf_mtag
));
9659 m_tag_prepend(m
, mtag
);
9661 return ((struct pf_mtag
*)(mtag
+ 1));
9663 return (pf_find_mtag(m
));
9664 #endif /* PF_PKTHDR */
9668 pf_time_second(void)
9677 pf_calendar_time_second(void)
9686 hook_establish(struct hook_desc_head
*head
, int tail
, hook_fn_t fn
, void *arg
)
9688 struct hook_desc
*hd
;
9690 hd
= _MALLOC(sizeof(*hd
), M_DEVBUF
, M_WAITOK
);
9697 TAILQ_INSERT_TAIL(head
, hd
, hd_list
);
9699 TAILQ_INSERT_HEAD(head
, hd
, hd_list
);
9705 hook_runloop(struct hook_desc_head
*head
, int flags
)
9707 struct hook_desc
*hd
;
9709 if (!(flags
& HOOK_REMOVE
)) {
9710 if (!(flags
& HOOK_ABORT
))
9711 TAILQ_FOREACH(hd
, head
, hd_list
)
9712 hd
->hd_fn(hd
->hd_arg
);
9714 while (!!(hd
= TAILQ_FIRST(head
))) {
9715 TAILQ_REMOVE(head
, hd
, hd_list
);
9716 if (!(flags
& HOOK_ABORT
))
9717 hd
->hd_fn(hd
->hd_arg
);
9718 if (flags
& HOOK_FREE
)
9719 _FREE(hd
, M_DEVBUF
);