2 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $apfw: git commit 7c8016ea91f7b68950cf41729c92dd8e3e423ba7 $ */
30 /* $OpenBSD: pf.c,v 1.567 2008/02/20 23:40:13 henning Exp $ */
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
71 #include <sys/filio.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 #include <sys/kernel.h>
77 #include <sys/random.h>
78 #include <sys/mcache.h>
80 #include <libkern/crypto/md5.h>
81 #include <libkern/libkern.h>
83 #include <mach/thread_act.h>
86 #include <net/if_types.h>
88 #include <net/route.h>
90 #include <netinet/in.h>
91 #include <netinet/in_var.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/ip.h>
94 #include <netinet/ip_var.h>
95 #include <netinet/tcp.h>
96 #include <netinet/tcp_seq.h>
97 #include <netinet/udp.h>
98 #include <netinet/ip_icmp.h>
99 #include <netinet/in_pcb.h>
100 #include <netinet/tcp_timer.h>
101 #include <netinet/tcp_var.h>
102 #include <netinet/tcp_fsm.h>
103 #include <netinet/udp_var.h>
104 #include <netinet/icmp_var.h>
105 #include <net/if_ether.h>
106 #include <net/ethernet.h>
108 #include <net/pfvar.h>
109 #include <net/if_pflog.h>
112 #include <net/if_pfsync.h>
116 #include <netinet/ip6.h>
117 #include <netinet6/in6_pcb.h>
118 #include <netinet6/ip6_var.h>
119 #include <netinet/icmp6.h>
120 #include <netinet6/nd6.h>
123 #ifndef NO_APPLE_EXTENSIONS
124 #define DPFPRINTF(n, x) (pf_status.debug >= (n) ? printf x : ((void)0))
126 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
129 /* XXX: should be in header somewhere */
130 #define satosin(sa) ((struct sockaddr_in *)(sa))
131 #define sintosa(sin) ((struct sockaddr *)(sin))
134 * On Mac OS X, the rtableid value is treated as the interface scope
135 * value that is equivalent to the interface index used for scoped
136 * routing. A valid scope value is anything but IFSCOPE_NONE (0),
137 * as per definition of ifindex which is a positive, non-zero number.
138 * The other BSDs treat a negative rtableid value as invalid, hence
139 * the test against INT_MAX to handle userland apps which initialize
140 * the field with a negative number.
142 #define PF_RTABLEID_IS_VALID(r) \
143 ((r) > IFSCOPE_NONE && (r) <= INT_MAX)
149 lck_rw_t
*pf_perim_lock
;
152 struct pf_state_tree_lan_ext pf_statetbl_lan_ext
;
153 struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy
;
155 struct pf_palist pf_pabuf
;
156 struct pf_status pf_status
;
159 struct pf_altqqueue pf_altqs
[2];
160 struct pf_altqqueue
*pf_altqs_active
;
161 struct pf_altqqueue
*pf_altqs_inactive
;
162 u_int32_t ticket_altqs_active
;
163 u_int32_t ticket_altqs_inactive
;
164 int altqs_inactive_open
;
166 u_int32_t ticket_pabuf
;
168 static MD5_CTX pf_tcp_secret_ctx
;
169 static u_char pf_tcp_secret
[16];
170 static int pf_tcp_secret_init
;
171 static int pf_tcp_iss_off
;
173 static struct pf_anchor_stackframe
{
174 struct pf_ruleset
*rs
;
176 struct pf_anchor_node
*parent
;
177 struct pf_anchor
*child
;
178 } pf_anchor_stack
[64];
180 struct pool pf_src_tree_pl
, pf_rule_pl
, pf_pooladdr_pl
;
181 struct pool pf_state_pl
, pf_state_key_pl
;
183 struct pool pf_altq_pl
;
186 #ifndef NO_APPLE_EXTENSIONS
187 typedef void (*hook_fn_t
)(void *);
190 TAILQ_ENTRY(hook_desc
) hd_list
;
195 #define HOOK_REMOVE 0x01
196 #define HOOK_FREE 0x02
197 #define HOOK_ABORT 0x04
199 static void *hook_establish(struct hook_desc_head
*, int,
201 static void hook_runloop(struct hook_desc_head
*, int flags
);
203 struct pool pf_app_state_pl
;
204 static void pf_print_addr(struct pf_addr
*addr
, sa_family_t af
);
205 static void pf_print_sk_host(struct pf_state_host
*, u_int8_t
, int,
209 static void pf_print_host(struct pf_addr
*, u_int16_t
, u_int8_t
);
211 static void pf_init_threshold(struct pf_threshold
*, u_int32_t
,
213 static void pf_add_threshold(struct pf_threshold
*);
214 static int pf_check_threshold(struct pf_threshold
*);
216 static void pf_change_ap(int, struct mbuf
*, struct pf_addr
*,
217 u_int16_t
*, u_int16_t
*, u_int16_t
*,
218 struct pf_addr
*, u_int16_t
, u_int8_t
, sa_family_t
);
219 static int pf_modulate_sack(struct mbuf
*, int, struct pf_pdesc
*,
220 struct tcphdr
*, struct pf_state_peer
*);
222 static void pf_change_a6(struct pf_addr
*, u_int16_t
*,
223 struct pf_addr
*, u_int8_t
);
225 static void pf_change_icmp(struct pf_addr
*, u_int16_t
*,
226 struct pf_addr
*, struct pf_addr
*, u_int16_t
,
227 u_int16_t
*, u_int16_t
*, u_int16_t
*,
228 u_int16_t
*, u_int8_t
, sa_family_t
);
229 static void pf_send_tcp(const struct pf_rule
*, sa_family_t
,
230 const struct pf_addr
*, const struct pf_addr
*,
231 u_int16_t
, u_int16_t
, u_int32_t
, u_int32_t
,
232 u_int8_t
, u_int16_t
, u_int16_t
, u_int8_t
, int,
233 u_int16_t
, struct ether_header
*, struct ifnet
*);
234 static void pf_send_icmp(struct mbuf
*, u_int8_t
, u_int8_t
,
235 sa_family_t
, struct pf_rule
*);
236 #ifndef NO_APPLE_EXTENSIONS
237 static struct pf_rule
*pf_match_translation(struct pf_pdesc
*, struct mbuf
*,
238 int, int, struct pfi_kif
*, struct pf_addr
*,
239 union pf_state_xport
*, struct pf_addr
*,
240 union pf_state_xport
*, int);
241 static struct pf_rule
*pf_get_translation_aux(struct pf_pdesc
*,
242 struct mbuf
*, int, int, struct pfi_kif
*,
243 struct pf_src_node
**, struct pf_addr
*,
244 union pf_state_xport
*, struct pf_addr
*,
245 union pf_state_xport
*, struct pf_addr
*,
246 union pf_state_xport
*);
248 struct pf_rule
*pf_match_translation(struct pf_pdesc
*, struct mbuf
*,
249 int, int, struct pfi_kif
*,
250 struct pf_addr
*, u_int16_t
, struct pf_addr
*,
252 struct pf_rule
*pf_get_translation(struct pf_pdesc
*, struct mbuf
*,
253 int, int, struct pfi_kif
*, struct pf_src_node
**,
254 struct pf_addr
*, u_int16_t
,
255 struct pf_addr
*, u_int16_t
,
256 struct pf_addr
*, u_int16_t
*);
258 static void pf_attach_state(struct pf_state_key
*,
259 struct pf_state
*, int);
260 static void pf_detach_state(struct pf_state
*, int);
261 static u_int32_t
pf_tcp_iss(struct pf_pdesc
*);
262 static int pf_test_rule(struct pf_rule
**, struct pf_state
**,
263 int, struct pfi_kif
*, struct mbuf
*, int,
264 void *, struct pf_pdesc
*, struct pf_rule
**,
265 struct pf_ruleset
**, struct ifqueue
*);
266 static int pf_test_fragment(struct pf_rule
**, int,
267 struct pfi_kif
*, struct mbuf
*, void *,
268 struct pf_pdesc
*, struct pf_rule
**,
269 struct pf_ruleset
**);
270 static int pf_test_state_tcp(struct pf_state
**, int,
271 struct pfi_kif
*, struct mbuf
*, int,
272 void *, struct pf_pdesc
*, u_short
*);
273 static int pf_test_state_udp(struct pf_state
**, int,
274 struct pfi_kif
*, struct mbuf
*, int,
275 void *, struct pf_pdesc
*, u_short
*);
276 static int pf_test_state_icmp(struct pf_state
**, int,
277 struct pfi_kif
*, struct mbuf
*, int,
278 void *, struct pf_pdesc
*, u_short
*);
279 static int pf_test_state_other(struct pf_state
**, int,
280 struct pfi_kif
*, struct pf_pdesc
*);
281 static int pf_match_tag(struct mbuf
*, struct pf_rule
*,
282 struct pf_mtag
*, int *);
283 static void pf_step_into_anchor(int *, struct pf_ruleset
**, int,
284 struct pf_rule
**, struct pf_rule
**, int *);
285 static int pf_step_out_of_anchor(int *, struct pf_ruleset
**,
286 int, struct pf_rule
**, struct pf_rule
**,
288 static void pf_hash(struct pf_addr
*, struct pf_addr
*,
289 struct pf_poolhashkey
*, sa_family_t
);
290 static int pf_map_addr(u_int8_t
, struct pf_rule
*,
291 struct pf_addr
*, struct pf_addr
*,
292 struct pf_addr
*, struct pf_src_node
**);
293 #ifndef NO_APPLE_EXTENSIONS
294 static int pf_get_sport(struct pf_pdesc
*, struct pfi_kif
*,
295 struct pf_rule
*, struct pf_addr
*,
296 union pf_state_xport
*, struct pf_addr
*,
297 union pf_state_xport
*, struct pf_addr
*,
298 union pf_state_xport
*, struct pf_src_node
**);
300 int pf_get_sport(sa_family_t
, u_int8_t
, struct pf_rule
*,
301 struct pf_addr
*, struct pf_addr
*, u_int16_t
,
302 struct pf_addr
*, u_int16_t
*, u_int16_t
, u_int16_t
,
303 struct pf_src_node
**);
305 static void pf_route(struct mbuf
**, struct pf_rule
*, int,
306 struct ifnet
*, struct pf_state
*,
309 static void pf_route6(struct mbuf
**, struct pf_rule
*, int,
310 struct ifnet
*, struct pf_state
*,
313 static u_int8_t
pf_get_wscale(struct mbuf
*, int, u_int16_t
,
315 static u_int16_t
pf_get_mss(struct mbuf
*, int, u_int16_t
,
317 static u_int16_t
pf_calc_mss(struct pf_addr
*, sa_family_t
,
319 static void pf_set_rt_ifp(struct pf_state
*,
321 static int pf_check_proto_cksum(struct mbuf
*, int, int,
322 u_int8_t
, sa_family_t
);
323 static int pf_addr_wrap_neq(struct pf_addr_wrap
*,
324 struct pf_addr_wrap
*);
325 static struct pf_state
*pf_find_state(struct pfi_kif
*,
326 struct pf_state_key_cmp
*, u_int
);
327 static int pf_src_connlimit(struct pf_state
**);
328 static void pf_stateins_err(const char *, struct pf_state
*,
330 static int pf_check_congestion(struct ifqueue
*);
332 #ifndef NO_APPLE_EXTENSIONS
334 static const char *pf_pptp_ctrl_type_name(u_int16_t code
);
336 static void pf_pptp_handler(struct pf_state
*, int, int,
337 struct pf_pdesc
*, struct pfi_kif
*);
338 static void pf_pptp_unlink(struct pf_state
*);
339 static int pf_test_state_grev1(struct pf_state
**, int,
340 struct pfi_kif
*, int, struct pf_pdesc
*);
341 static int pf_ike_compare(struct pf_app_state
*,
342 struct pf_app_state
*);
343 static int pf_test_state_esp(struct pf_state
**, int,
344 struct pfi_kif
*, int, struct pf_pdesc
*);
347 extern struct pool pfr_ktable_pl
;
348 extern struct pool pfr_kentry_pl
;
349 extern int path_mtu_discovery
;
351 struct pf_pool_limit pf_pool_limits
[PF_LIMIT_MAX
] = {
352 { &pf_state_pl
, PFSTATE_HIWAT
},
353 { &pf_app_state_pl
, PFAPPSTATE_HIWAT
},
354 { &pf_src_tree_pl
, PFSNODE_HIWAT
},
355 { &pf_frent_pl
, PFFRAG_FRENT_HIWAT
},
356 { &pfr_ktable_pl
, PFR_KTABLE_HIWAT
},
357 { &pfr_kentry_pl
, PFR_KENTRY_HIWAT
}
360 #ifndef NO_APPLE_EXTENSIONS
362 pf_lazy_makewritable(struct pf_pdesc
*pd
, struct mbuf
*m
, int len
)
370 if (m_makewritable(&m
, 0, len
, M_DONTWAIT
))
373 if (len
>= 0 && m
!= pd
->mp
) {
378 struct ip
*h
= mtod(m
, struct ip
*);
379 pd
->src
= (struct pf_addr
*)&h
->ip_src
;
380 pd
->dst
= (struct pf_addr
*)&h
->ip_dst
;
381 pd
->ip_sum
= &h
->ip_sum
;
386 struct ip6_hdr
*h
= mtod(m
, struct ip6_hdr
*);
387 pd
->src
= (struct pf_addr
*)&h
->ip6_src
;
388 pd
->dst
= (struct pf_addr
*)&h
->ip6_dst
;
396 return (len
< 0 ? 0 : m
);
400 pf_state_lookup_aux(struct pf_state
**state
, struct pfi_kif
*kif
,
401 int direction
, int *action
)
403 if (*state
== NULL
|| (*state
)->timeout
== PFTM_PURGE
) {
408 if (direction
== PF_OUT
&&
409 (((*state
)->rule
.ptr
->rt
== PF_ROUTETO
&&
410 (*state
)->rule
.ptr
->direction
== PF_OUT
) ||
411 ((*state
)->rule
.ptr
->rt
== PF_REPLYTO
&&
412 (*state
)->rule
.ptr
->direction
== PF_IN
)) &&
413 (*state
)->rt_kif
!= NULL
&& (*state
)->rt_kif
!= kif
) {
421 #define STATE_LOOKUP() \
424 *state = pf_find_state(kif, &key, direction); \
425 if (pf_state_lookup_aux(state, kif, direction, &action)) \
429 #define STATE_ADDR_TRANSLATE(sk) \
430 (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \
431 ((sk)->af == AF_INET6 && \
432 ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \
433 (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \
434 (sk)->lan.addr.addr32[3] != (sk)->gwy.addr.addr32[3]))
436 #define STATE_TRANSLATE(sk) \
437 (STATE_ADDR_TRANSLATE(sk) || \
438 (sk)->lan.xport.port != (sk)->gwy.xport.port)
440 #define STATE_GRE_TRANSLATE(sk) \
441 (STATE_ADDR_TRANSLATE(sk) || \
442 (sk)->lan.xport.call_id != (sk)->gwy.xport.call_id)
445 #define STATE_LOOKUP() \
447 *state = pf_find_state(kif, &key, direction); \
448 if (*state == NULL || (*state)->timeout == PFTM_PURGE) \
450 if (direction == PF_OUT && \
451 (((*state)->rule.ptr->rt == PF_ROUTETO && \
452 (*state)->rule.ptr->direction == PF_OUT) || \
453 ((*state)->rule.ptr->rt == PF_REPLYTO && \
454 (*state)->rule.ptr->direction == PF_IN)) && \
455 (*state)->rt_kif != NULL && \
456 (*state)->rt_kif != kif) \
460 #define STATE_TRANSLATE(sk) \
461 (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \
462 ((sk)->af == AF_INET6 && \
463 ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \
464 (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \
465 (sk)->lan.addr.addr32[3] != (sk)->gwy.addr.addr32[3])) || \
466 (sk)->lan.port != (sk)->gwy.port
469 #define BOUND_IFACE(r, k) \
470 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
472 #define STATE_INC_COUNTERS(s) \
474 s->rule.ptr->states++; \
475 VERIFY(s->rule.ptr->states != 0); \
476 if (s->anchor.ptr != NULL) { \
477 s->anchor.ptr->states++; \
478 VERIFY(s->anchor.ptr->states != 0); \
480 if (s->nat_rule.ptr != NULL) { \
481 s->nat_rule.ptr->states++; \
482 VERIFY(s->nat_rule.ptr->states != 0); \
486 #define STATE_DEC_COUNTERS(s) \
488 if (s->nat_rule.ptr != NULL) { \
489 VERIFY(s->nat_rule.ptr->states > 0); \
490 s->nat_rule.ptr->states--; \
492 if (s->anchor.ptr != NULL) { \
493 VERIFY(s->anchor.ptr->states > 0); \
494 s->anchor.ptr->states--; \
496 VERIFY(s->rule.ptr->states > 0); \
497 s->rule.ptr->states--; \
500 static __inline
int pf_src_compare(struct pf_src_node
*, struct pf_src_node
*);
501 static __inline
int pf_state_compare_lan_ext(struct pf_state_key
*,
502 struct pf_state_key
*);
503 static __inline
int pf_state_compare_ext_gwy(struct pf_state_key
*,
504 struct pf_state_key
*);
505 static __inline
int pf_state_compare_id(struct pf_state
*,
508 struct pf_src_tree tree_src_tracking
;
510 struct pf_state_tree_id tree_id
;
511 struct pf_state_queue state_list
;
513 RB_GENERATE(pf_src_tree
, pf_src_node
, entry
, pf_src_compare
);
514 RB_GENERATE(pf_state_tree_lan_ext
, pf_state_key
,
515 entry_lan_ext
, pf_state_compare_lan_ext
);
516 RB_GENERATE(pf_state_tree_ext_gwy
, pf_state_key
,
517 entry_ext_gwy
, pf_state_compare_ext_gwy
);
518 RB_GENERATE(pf_state_tree_id
, pf_state
,
519 entry_id
, pf_state_compare_id
);
521 #define PF_DT_SKIP_LANEXT 0x01
522 #define PF_DT_SKIP_EXTGWY 0x02
524 #ifndef NO_APPLE_EXTENSIONS
525 static const u_int16_t PF_PPTP_PORT
= 1723;
526 static const u_int32_t PF_PPTP_MAGIC_NUMBER
= 0x1A2B3C4D;
534 struct pf_pptp_ctrl_hdr
{
536 u_int16_t reserved_0
;
539 struct pf_pptp_ctrl_generic
{
543 #define PF_PPTP_CTRL_TYPE_START_REQ 1
544 struct pf_pptp_ctrl_start_req
{
545 u_int16_t protocol_version
;
546 u_int16_t reserved_1
;
547 u_int32_t framing_capabilities
;
548 u_int32_t bearer_capabilities
;
549 u_int16_t maximum_channels
;
550 u_int16_t firmware_revision
;
551 u_int8_t host_name
[64];
552 u_int8_t vendor_string
[64];
555 #define PF_PPTP_CTRL_TYPE_START_RPY 2
556 struct pf_pptp_ctrl_start_rpy
{
557 u_int16_t protocol_version
;
558 u_int8_t result_code
;
560 u_int32_t framing_capabilities
;
561 u_int32_t bearer_capabilities
;
562 u_int16_t maximum_channels
;
563 u_int16_t firmware_revision
;
564 u_int8_t host_name
[64];
565 u_int8_t vendor_string
[64];
568 #define PF_PPTP_CTRL_TYPE_STOP_REQ 3
569 struct pf_pptp_ctrl_stop_req
{
572 u_int16_t reserved_2
;
575 #define PF_PPTP_CTRL_TYPE_STOP_RPY 4
576 struct pf_pptp_ctrl_stop_rpy
{
579 u_int16_t reserved_1
;
582 #define PF_PPTP_CTRL_TYPE_ECHO_REQ 5
583 struct pf_pptp_ctrl_echo_req
{
584 u_int32_t identifier
;
587 #define PF_PPTP_CTRL_TYPE_ECHO_RPY 6
588 struct pf_pptp_ctrl_echo_rpy
{
589 u_int32_t identifier
;
590 u_int8_t result_code
;
592 u_int16_t reserved_1
;
595 #define PF_PPTP_CTRL_TYPE_CALL_OUT_REQ 7
596 struct pf_pptp_ctrl_call_out_req
{
598 u_int16_t call_sernum
;
600 u_int32_t bearer_type
;
601 u_int32_t framing_type
;
602 u_int16_t rxwindow_size
;
603 u_int16_t proc_delay
;
604 u_int8_t phone_num
[64];
605 u_int8_t sub_addr
[64];
608 #define PF_PPTP_CTRL_TYPE_CALL_OUT_RPY 8
609 struct pf_pptp_ctrl_call_out_rpy
{
611 u_int16_t peer_call_id
;
612 u_int8_t result_code
;
614 u_int16_t cause_code
;
615 u_int32_t connect_speed
;
616 u_int16_t rxwindow_size
;
617 u_int16_t proc_delay
;
618 u_int32_t phy_channel_id
;
621 #define PF_PPTP_CTRL_TYPE_CALL_IN_1ST 9
622 struct pf_pptp_ctrl_call_in_1st
{
624 u_int16_t call_sernum
;
625 u_int32_t bearer_type
;
626 u_int32_t phy_channel_id
;
627 u_int16_t dialed_number_len
;
628 u_int16_t dialing_number_len
;
629 u_int8_t dialed_num
[64];
630 u_int8_t dialing_num
[64];
631 u_int8_t sub_addr
[64];
634 #define PF_PPTP_CTRL_TYPE_CALL_IN_2ND 10
635 struct pf_pptp_ctrl_call_in_2nd
{
637 u_int16_t peer_call_id
;
638 u_int8_t result_code
;
640 u_int16_t rxwindow_size
;
642 u_int16_t reserved_1
;
645 #define PF_PPTP_CTRL_TYPE_CALL_IN_3RD 11
646 struct pf_pptp_ctrl_call_in_3rd
{
648 u_int16_t reserved_1
;
649 u_int32_t connect_speed
;
650 u_int16_t rxwindow_size
;
652 u_int32_t framing_type
;
655 #define PF_PPTP_CTRL_TYPE_CALL_CLR 12
656 struct pf_pptp_ctrl_call_clr
{
658 u_int16_t reserved_1
;
661 #define PF_PPTP_CTRL_TYPE_CALL_DISC 13
662 struct pf_pptp_ctrl_call_disc
{
664 u_int8_t result_code
;
666 u_int16_t cause_code
;
667 u_int16_t reserved_1
;
668 u_int8_t statistics
[128];
671 #define PF_PPTP_CTRL_TYPE_ERROR 14
672 struct pf_pptp_ctrl_error
{
673 u_int16_t peer_call_id
;
674 u_int16_t reserved_1
;
675 u_int32_t crc_errors
;
678 u_int32_t buf_errors
;
679 u_int32_t tim_errors
;
680 u_int32_t align_errors
;
683 #define PF_PPTP_CTRL_TYPE_SET_LINKINFO 15
684 struct pf_pptp_ctrl_set_linkinfo
{
685 u_int16_t peer_call_id
;
686 u_int16_t reserved_1
;
692 static const char *pf_pptp_ctrl_type_name(u_int16_t code
)
696 if (code
< PF_PPTP_CTRL_TYPE_START_REQ
||
697 code
> PF_PPTP_CTRL_TYPE_SET_LINKINFO
) {
698 static char reserved
[] = "reserved-00";
700 sprintf(&reserved
[9], "%02x", code
);
703 static const char *name
[] = {
704 "start_req", "start_rpy", "stop_req", "stop_rpy",
705 "echo_req", "echo_rpy", "call_out_req", "call_out_rpy",
706 "call_in_1st", "call_in_2nd", "call_in_3rd",
707 "call_clr", "call_disc", "error", "set_linkinfo"
710 return (name
[code
- 1]);
715 static const size_t PF_PPTP_CTRL_MSG_MINSIZE
=
716 sizeof (struct pf_pptp_hdr
) +
717 sizeof (struct pf_pptp_ctrl_hdr
) +
718 MIN(sizeof (struct pf_pptp_ctrl_start_req
),
719 MIN(sizeof (struct pf_pptp_ctrl_start_rpy
),
720 MIN(sizeof (struct pf_pptp_ctrl_stop_req
),
721 MIN(sizeof (struct pf_pptp_ctrl_stop_rpy
),
722 MIN(sizeof (struct pf_pptp_ctrl_echo_req
),
723 MIN(sizeof (struct pf_pptp_ctrl_echo_rpy
),
724 MIN(sizeof (struct pf_pptp_ctrl_call_out_req
),
725 MIN(sizeof (struct pf_pptp_ctrl_call_out_rpy
),
726 MIN(sizeof (struct pf_pptp_ctrl_call_in_1st
),
727 MIN(sizeof (struct pf_pptp_ctrl_call_in_2nd
),
728 MIN(sizeof (struct pf_pptp_ctrl_call_in_3rd
),
729 MIN(sizeof (struct pf_pptp_ctrl_call_clr
),
730 MIN(sizeof (struct pf_pptp_ctrl_call_disc
),
731 MIN(sizeof (struct pf_pptp_ctrl_error
),
732 sizeof (struct pf_pptp_ctrl_set_linkinfo
)
735 union pf_pptp_ctrl_msg_union
{
736 struct pf_pptp_ctrl_start_req start_req
;
737 struct pf_pptp_ctrl_start_rpy start_rpy
;
738 struct pf_pptp_ctrl_stop_req stop_req
;
739 struct pf_pptp_ctrl_stop_rpy stop_rpy
;
740 struct pf_pptp_ctrl_echo_req echo_req
;
741 struct pf_pptp_ctrl_echo_rpy echo_rpy
;
742 struct pf_pptp_ctrl_call_out_req call_out_req
;
743 struct pf_pptp_ctrl_call_out_rpy call_out_rpy
;
744 struct pf_pptp_ctrl_call_in_1st call_in_1st
;
745 struct pf_pptp_ctrl_call_in_2nd call_in_2nd
;
746 struct pf_pptp_ctrl_call_in_3rd call_in_3rd
;
747 struct pf_pptp_ctrl_call_clr call_clr
;
748 struct pf_pptp_ctrl_call_disc call_disc
;
749 struct pf_pptp_ctrl_error error
;
750 struct pf_pptp_ctrl_set_linkinfo set_linkinfo
;
754 struct pf_pptp_ctrl_msg
{
755 struct pf_pptp_hdr hdr
;
756 struct pf_pptp_ctrl_hdr ctrl
;
757 union pf_pptp_ctrl_msg_union msg
;
760 #define PF_GRE_FLAG_CHECKSUM_PRESENT 0x8000
761 #define PF_GRE_FLAG_VERSION_MASK 0x0007
762 #define PF_GRE_PPP_ETHERTYPE 0x880B
764 struct pf_grev1_hdr
{
766 u_int16_t protocol_type
;
767 u_int16_t payload_length
;
775 static const u_int16_t PF_IKE_PORT
= 500;
778 u_int64_t initiator_cookie
, responder_cookie
;
779 u_int8_t next_payload
, version
, exchange_type
, flags
;
780 u_int32_t message_id
, length
;
783 #define PF_IKE_PACKET_MINSIZE (sizeof (struct pf_ike_hdr))
785 #define PF_IKEv1_EXCHTYPE_BASE 1
786 #define PF_IKEv1_EXCHTYPE_ID_PROTECT 2
787 #define PF_IKEv1_EXCHTYPE_AUTH_ONLY 3
788 #define PF_IKEv1_EXCHTYPE_AGGRESSIVE 4
789 #define PF_IKEv1_EXCHTYPE_INFORMATIONAL 5
790 #define PF_IKEv2_EXCHTYPE_SA_INIT 34
791 #define PF_IKEv2_EXCHTYPE_AUTH 35
792 #define PF_IKEv2_EXCHTYPE_CREATE_CHILD_SA 36
793 #define PF_IKEv2_EXCHTYPE_INFORMATIONAL 37
795 #define PF_IKEv1_FLAG_E 0x01
796 #define PF_IKEv1_FLAG_C 0x02
797 #define PF_IKEv1_FLAG_A 0x04
798 #define PF_IKEv2_FLAG_I 0x08
799 #define PF_IKEv2_FLAG_V 0x10
800 #define PF_IKEv2_FLAG_R 0x20
810 pf_src_compare(struct pf_src_node
*a
, struct pf_src_node
*b
)
814 if (a
->rule
.ptr
> b
->rule
.ptr
)
816 if (a
->rule
.ptr
< b
->rule
.ptr
)
818 if ((diff
= a
->af
- b
->af
) != 0)
823 if (a
->addr
.addr32
[0] > b
->addr
.addr32
[0])
825 if (a
->addr
.addr32
[0] < b
->addr
.addr32
[0])
831 if (a
->addr
.addr32
[3] > b
->addr
.addr32
[3])
833 if (a
->addr
.addr32
[3] < b
->addr
.addr32
[3])
835 if (a
->addr
.addr32
[2] > b
->addr
.addr32
[2])
837 if (a
->addr
.addr32
[2] < b
->addr
.addr32
[2])
839 if (a
->addr
.addr32
[1] > b
->addr
.addr32
[1])
841 if (a
->addr
.addr32
[1] < b
->addr
.addr32
[1])
843 if (a
->addr
.addr32
[0] > b
->addr
.addr32
[0])
845 if (a
->addr
.addr32
[0] < b
->addr
.addr32
[0])
854 pf_state_compare_lan_ext(struct pf_state_key
*a
, struct pf_state_key
*b
)
857 #ifndef NO_APPLE_EXTENSIONS
861 if ((diff
= a
->proto
- b
->proto
) != 0)
863 if ((diff
= a
->af
- b
->af
) != 0)
866 #ifndef NO_APPLE_EXTENSIONS
867 extfilter
= PF_EXTFILTER_APD
;
872 if ((diff
= a
->lan
.xport
.port
- b
->lan
.xport
.port
) != 0)
877 if ((diff
= a
->lan
.xport
.port
- b
->lan
.xport
.port
) != 0)
879 if ((diff
= a
->ext
.xport
.port
- b
->ext
.xport
.port
) != 0)
884 if ((diff
= a
->proto_variant
- b
->proto_variant
))
886 extfilter
= a
->proto_variant
;
887 if ((diff
= a
->lan
.xport
.port
- b
->lan
.xport
.port
) != 0)
889 if ((extfilter
< PF_EXTFILTER_AD
) &&
890 (diff
= a
->ext
.xport
.port
- b
->ext
.xport
.port
) != 0)
895 if (a
->proto_variant
== PF_GRE_PPTP_VARIANT
&&
896 a
->proto_variant
== b
->proto_variant
) {
897 if (!!(diff
= a
->ext
.xport
.call_id
-
898 b
->ext
.xport
.call_id
))
904 if (!!(diff
= a
->ext
.xport
.spi
- b
->ext
.xport
.spi
))
916 if (a
->lan
.addr
.addr32
[0] > b
->lan
.addr
.addr32
[0])
918 if (a
->lan
.addr
.addr32
[0] < b
->lan
.addr
.addr32
[0])
920 #ifndef NO_APPLE_EXTENSIONS
921 if (extfilter
< PF_EXTFILTER_EI
) {
922 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
924 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
928 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
930 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
937 #ifndef NO_APPLE_EXTENSIONS
938 if (a
->lan
.addr
.addr32
[3] > b
->lan
.addr
.addr32
[3])
940 if (a
->lan
.addr
.addr32
[3] < b
->lan
.addr
.addr32
[3])
942 if (a
->lan
.addr
.addr32
[2] > b
->lan
.addr
.addr32
[2])
944 if (a
->lan
.addr
.addr32
[2] < b
->lan
.addr
.addr32
[2])
946 if (a
->lan
.addr
.addr32
[1] > b
->lan
.addr
.addr32
[1])
948 if (a
->lan
.addr
.addr32
[1] < b
->lan
.addr
.addr32
[1])
950 if (a
->lan
.addr
.addr32
[0] > b
->lan
.addr
.addr32
[0])
952 if (a
->lan
.addr
.addr32
[0] < b
->lan
.addr
.addr32
[0])
954 if (extfilter
< PF_EXTFILTER_EI
||
955 !PF_AZERO(&b
->ext
.addr
, AF_INET6
)) {
956 if (a
->ext
.addr
.addr32
[3] > b
->ext
.addr
.addr32
[3])
958 if (a
->ext
.addr
.addr32
[3] < b
->ext
.addr
.addr32
[3])
960 if (a
->ext
.addr
.addr32
[2] > b
->ext
.addr
.addr32
[2])
962 if (a
->ext
.addr
.addr32
[2] < b
->ext
.addr
.addr32
[2])
964 if (a
->ext
.addr
.addr32
[1] > b
->ext
.addr
.addr32
[1])
966 if (a
->ext
.addr
.addr32
[1] < b
->ext
.addr
.addr32
[1])
968 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
970 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
974 if (a
->lan
.addr
.addr32
[3] > b
->lan
.addr
.addr32
[3])
976 if (a
->lan
.addr
.addr32
[3] < b
->lan
.addr
.addr32
[3])
978 if (a
->ext
.addr
.addr32
[3] > b
->ext
.addr
.addr32
[3])
980 if (a
->ext
.addr
.addr32
[3] < b
->ext
.addr
.addr32
[3])
982 if (a
->lan
.addr
.addr32
[2] > b
->lan
.addr
.addr32
[2])
984 if (a
->lan
.addr
.addr32
[2] < b
->lan
.addr
.addr32
[2])
986 if (a
->ext
.addr
.addr32
[2] > b
->ext
.addr
.addr32
[2])
988 if (a
->ext
.addr
.addr32
[2] < b
->ext
.addr
.addr32
[2])
990 if (a
->lan
.addr
.addr32
[1] > b
->lan
.addr
.addr32
[1])
992 if (a
->lan
.addr
.addr32
[1] < b
->lan
.addr
.addr32
[1])
994 if (a
->ext
.addr
.addr32
[1] > b
->ext
.addr
.addr32
[1])
996 if (a
->ext
.addr
.addr32
[1] < b
->ext
.addr
.addr32
[1])
998 if (a
->lan
.addr
.addr32
[0] > b
->lan
.addr
.addr32
[0])
1000 if (a
->lan
.addr
.addr32
[0] < b
->lan
.addr
.addr32
[0])
1002 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
1004 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
1011 #ifndef NO_APPLE_EXTENSIONS
1012 if (a
->app_state
&& b
->app_state
) {
1013 if (a
->app_state
->compare_lan_ext
&&
1014 b
->app_state
->compare_lan_ext
) {
1015 diff
= (const char *)b
->app_state
->compare_lan_ext
-
1016 (const char *)a
->app_state
->compare_lan_ext
;
1019 diff
= a
->app_state
->compare_lan_ext(a
->app_state
,
1026 if ((diff
= a
->lan
.port
- b
->lan
.port
) != 0)
1028 if ((diff
= a
->ext
.port
- b
->ext
.port
) != 0)
1036 pf_state_compare_ext_gwy(struct pf_state_key
*a
, struct pf_state_key
*b
)
1039 #ifndef NO_APPLE_EXTENSIONS
1043 if ((diff
= a
->proto
- b
->proto
) != 0)
1046 if ((diff
= a
->af
- b
->af
) != 0)
1049 #ifndef NO_APPLE_EXTENSIONS
1050 extfilter
= PF_EXTFILTER_APD
;
1054 case IPPROTO_ICMPV6
:
1055 if ((diff
= a
->gwy
.xport
.port
- b
->gwy
.xport
.port
) != 0)
1060 if ((diff
= a
->ext
.xport
.port
- b
->ext
.xport
.port
) != 0)
1062 if ((diff
= a
->gwy
.xport
.port
- b
->gwy
.xport
.port
) != 0)
1067 if ((diff
= a
->proto_variant
- b
->proto_variant
))
1069 extfilter
= a
->proto_variant
;
1070 if ((diff
= a
->gwy
.xport
.port
- b
->gwy
.xport
.port
) != 0)
1072 if ((extfilter
< PF_EXTFILTER_AD
) &&
1073 (diff
= a
->ext
.xport
.port
- b
->ext
.xport
.port
) != 0)
1078 if (a
->proto_variant
== PF_GRE_PPTP_VARIANT
&&
1079 a
->proto_variant
== b
->proto_variant
) {
1080 if (!!(diff
= a
->gwy
.xport
.call_id
-
1081 b
->gwy
.xport
.call_id
))
1087 if (!!(diff
= a
->gwy
.xport
.spi
- b
->gwy
.xport
.spi
))
1099 #ifndef NO_APPLE_EXTENSIONS
1100 if (a
->gwy
.addr
.addr32
[0] > b
->gwy
.addr
.addr32
[0])
1102 if (a
->gwy
.addr
.addr32
[0] < b
->gwy
.addr
.addr32
[0])
1104 if (extfilter
< PF_EXTFILTER_EI
) {
1105 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
1107 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
1111 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
1113 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
1115 if (a
->gwy
.addr
.addr32
[0] > b
->gwy
.addr
.addr32
[0])
1117 if (a
->gwy
.addr
.addr32
[0] < b
->gwy
.addr
.addr32
[0])
1124 #ifndef NO_APPLE_EXTENSIONS
1125 if (a
->gwy
.addr
.addr32
[3] > b
->gwy
.addr
.addr32
[3])
1127 if (a
->gwy
.addr
.addr32
[3] < b
->gwy
.addr
.addr32
[3])
1129 if (a
->gwy
.addr
.addr32
[2] > b
->gwy
.addr
.addr32
[2])
1131 if (a
->gwy
.addr
.addr32
[2] < b
->gwy
.addr
.addr32
[2])
1133 if (a
->gwy
.addr
.addr32
[1] > b
->gwy
.addr
.addr32
[1])
1135 if (a
->gwy
.addr
.addr32
[1] < b
->gwy
.addr
.addr32
[1])
1137 if (a
->gwy
.addr
.addr32
[0] > b
->gwy
.addr
.addr32
[0])
1139 if (a
->gwy
.addr
.addr32
[0] < b
->gwy
.addr
.addr32
[0])
1141 if (extfilter
< PF_EXTFILTER_EI
||
1142 !PF_AZERO(&b
->ext
.addr
, AF_INET6
)) {
1143 if (a
->ext
.addr
.addr32
[3] > b
->ext
.addr
.addr32
[3])
1145 if (a
->ext
.addr
.addr32
[3] < b
->ext
.addr
.addr32
[3])
1147 if (a
->ext
.addr
.addr32
[2] > b
->ext
.addr
.addr32
[2])
1149 if (a
->ext
.addr
.addr32
[2] < b
->ext
.addr
.addr32
[2])
1151 if (a
->ext
.addr
.addr32
[1] > b
->ext
.addr
.addr32
[1])
1153 if (a
->ext
.addr
.addr32
[1] < b
->ext
.addr
.addr32
[1])
1155 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
1157 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
1161 if (a
->ext
.addr
.addr32
[3] > b
->ext
.addr
.addr32
[3])
1163 if (a
->ext
.addr
.addr32
[3] < b
->ext
.addr
.addr32
[3])
1165 if (a
->gwy
.addr
.addr32
[3] > b
->gwy
.addr
.addr32
[3])
1167 if (a
->gwy
.addr
.addr32
[3] < b
->gwy
.addr
.addr32
[3])
1169 if (a
->ext
.addr
.addr32
[2] > b
->ext
.addr
.addr32
[2])
1171 if (a
->ext
.addr
.addr32
[2] < b
->ext
.addr
.addr32
[2])
1173 if (a
->gwy
.addr
.addr32
[2] > b
->gwy
.addr
.addr32
[2])
1175 if (a
->gwy
.addr
.addr32
[2] < b
->gwy
.addr
.addr32
[2])
1177 if (a
->ext
.addr
.addr32
[1] > b
->ext
.addr
.addr32
[1])
1179 if (a
->ext
.addr
.addr32
[1] < b
->ext
.addr
.addr32
[1])
1181 if (a
->gwy
.addr
.addr32
[1] > b
->gwy
.addr
.addr32
[1])
1183 if (a
->gwy
.addr
.addr32
[1] < b
->gwy
.addr
.addr32
[1])
1185 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
1187 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
1189 if (a
->gwy
.addr
.addr32
[0] > b
->gwy
.addr
.addr32
[0])
1191 if (a
->gwy
.addr
.addr32
[0] < b
->gwy
.addr
.addr32
[0])
1198 #ifndef NO_APPLE_EXTENSIONS
1199 if (a
->app_state
&& b
->app_state
) {
1200 if (a
->app_state
->compare_ext_gwy
&&
1201 b
->app_state
->compare_ext_gwy
) {
1202 diff
= (const char *)b
->app_state
->compare_ext_gwy
-
1203 (const char *)a
->app_state
->compare_ext_gwy
;
1206 diff
= a
->app_state
->compare_ext_gwy(a
->app_state
,
1213 if ((diff
= a
->ext
.port
- b
->ext
.port
) != 0)
1215 if ((diff
= a
->gwy
.port
- b
->gwy
.port
) != 0)
1223 pf_state_compare_id(struct pf_state
*a
, struct pf_state
*b
)
1229 if (a
->creatorid
> b
->creatorid
)
1231 if (a
->creatorid
< b
->creatorid
)
1239 pf_addrcpy(struct pf_addr
*dst
, struct pf_addr
*src
, sa_family_t af
)
1244 dst
->addr32
[0] = src
->addr32
[0];
1248 dst
->addr32
[0] = src
->addr32
[0];
1249 dst
->addr32
[1] = src
->addr32
[1];
1250 dst
->addr32
[2] = src
->addr32
[2];
1251 dst
->addr32
[3] = src
->addr32
[3];
1258 pf_find_state_byid(struct pf_state_cmp
*key
)
1260 pf_status
.fcounters
[FCNT_STATE_SEARCH
]++;
1262 return (RB_FIND(pf_state_tree_id
, &tree_id
, (struct pf_state
*)key
));
1265 static struct pf_state
*
1266 pf_find_state(struct pfi_kif
*kif
, struct pf_state_key_cmp
*key
, u_int dir
)
1268 struct pf_state_key
*sk
= NULL
;
1271 pf_status
.fcounters
[FCNT_STATE_SEARCH
]++;
1275 sk
= RB_FIND(pf_state_tree_lan_ext
, &pf_statetbl_lan_ext
,
1276 (struct pf_state_key
*)key
);
1279 sk
= RB_FIND(pf_state_tree_ext_gwy
, &pf_statetbl_ext_gwy
,
1280 (struct pf_state_key
*)key
);
1283 panic("pf_find_state");
1286 /* list is sorted, if-bound states before floating ones */
1288 TAILQ_FOREACH(s
, &sk
->states
, next
)
1289 if (s
->kif
== pfi_all
|| s
->kif
== kif
)
1296 pf_find_state_all(struct pf_state_key_cmp
*key
, u_int dir
, int *more
)
1298 struct pf_state_key
*sk
= NULL
;
1299 struct pf_state
*s
, *ret
= NULL
;
1301 pf_status
.fcounters
[FCNT_STATE_SEARCH
]++;
1305 sk
= RB_FIND(pf_state_tree_lan_ext
,
1306 &pf_statetbl_lan_ext
, (struct pf_state_key
*)key
);
1309 sk
= RB_FIND(pf_state_tree_ext_gwy
,
1310 &pf_statetbl_ext_gwy
, (struct pf_state_key
*)key
);
1313 panic("pf_find_state_all");
1317 ret
= TAILQ_FIRST(&sk
->states
);
1321 TAILQ_FOREACH(s
, &sk
->states
, next
)
1329 pf_init_threshold(struct pf_threshold
*threshold
,
1330 u_int32_t limit
, u_int32_t seconds
)
1332 threshold
->limit
= limit
* PF_THRESHOLD_MULT
;
1333 threshold
->seconds
= seconds
;
1334 threshold
->count
= 0;
1335 threshold
->last
= pf_time_second();
1339 pf_add_threshold(struct pf_threshold
*threshold
)
1341 u_int32_t t
= pf_time_second(), diff
= t
- threshold
->last
;
1343 if (diff
>= threshold
->seconds
)
1344 threshold
->count
= 0;
1346 threshold
->count
-= threshold
->count
* diff
/
1348 threshold
->count
+= PF_THRESHOLD_MULT
;
1349 threshold
->last
= t
;
1353 pf_check_threshold(struct pf_threshold
*threshold
)
1355 return (threshold
->count
> threshold
->limit
);
1359 pf_src_connlimit(struct pf_state
**state
)
1363 (*state
)->src_node
->conn
++;
1364 VERIFY((*state
)->src_node
->conn
!= 0);
1365 (*state
)->src
.tcp_est
= 1;
1366 pf_add_threshold(&(*state
)->src_node
->conn_rate
);
1368 if ((*state
)->rule
.ptr
->max_src_conn
&&
1369 (*state
)->rule
.ptr
->max_src_conn
<
1370 (*state
)->src_node
->conn
) {
1371 pf_status
.lcounters
[LCNT_SRCCONN
]++;
1375 if ((*state
)->rule
.ptr
->max_src_conn_rate
.limit
&&
1376 pf_check_threshold(&(*state
)->src_node
->conn_rate
)) {
1377 pf_status
.lcounters
[LCNT_SRCCONNRATE
]++;
1384 if ((*state
)->rule
.ptr
->overload_tbl
) {
1386 u_int32_t killed
= 0;
1388 pf_status
.lcounters
[LCNT_OVERLOAD_TABLE
]++;
1389 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1390 printf("pf_src_connlimit: blocking address ");
1391 pf_print_host(&(*state
)->src_node
->addr
, 0,
1392 (*state
)->state_key
->af
);
1395 bzero(&p
, sizeof (p
));
1396 p
.pfra_af
= (*state
)->state_key
->af
;
1397 switch ((*state
)->state_key
->af
) {
1401 p
.pfra_ip4addr
= (*state
)->src_node
->addr
.v4
;
1407 p
.pfra_ip6addr
= (*state
)->src_node
->addr
.v6
;
1412 pfr_insert_kentry((*state
)->rule
.ptr
->overload_tbl
,
1413 &p
, pf_time_second());
1415 /* kill existing states if that's required. */
1416 if ((*state
)->rule
.ptr
->flush
) {
1417 struct pf_state_key
*sk
;
1418 struct pf_state
*st
;
1420 pf_status
.lcounters
[LCNT_OVERLOAD_FLUSH
]++;
1421 RB_FOREACH(st
, pf_state_tree_id
, &tree_id
) {
1424 * Kill states from this source. (Only those
1425 * from the same rule if PF_FLUSH_GLOBAL is not
1429 (*state
)->state_key
->af
&&
1430 (((*state
)->state_key
->direction
==
1432 PF_AEQ(&(*state
)->src_node
->addr
,
1433 &sk
->lan
.addr
, sk
->af
)) ||
1434 ((*state
)->state_key
->direction
== PF_IN
&&
1435 PF_AEQ(&(*state
)->src_node
->addr
,
1436 &sk
->ext
.addr
, sk
->af
))) &&
1437 ((*state
)->rule
.ptr
->flush
&
1439 (*state
)->rule
.ptr
== st
->rule
.ptr
)) {
1440 st
->timeout
= PFTM_PURGE
;
1441 st
->src
.state
= st
->dst
.state
=
1446 if (pf_status
.debug
>= PF_DEBUG_MISC
)
1447 printf(", %u states killed", killed
);
1449 if (pf_status
.debug
>= PF_DEBUG_MISC
)
1453 /* kill this state */
1454 (*state
)->timeout
= PFTM_PURGE
;
1455 (*state
)->src
.state
= (*state
)->dst
.state
= TCPS_CLOSED
;
1460 pf_insert_src_node(struct pf_src_node
**sn
, struct pf_rule
*rule
,
1461 struct pf_addr
*src
, sa_family_t af
)
1463 struct pf_src_node k
;
1467 PF_ACPY(&k
.addr
, src
, af
);
1468 if (rule
->rule_flag
& PFRULE_RULESRCTRACK
||
1469 rule
->rpool
.opts
& PF_POOL_STICKYADDR
)
1473 pf_status
.scounters
[SCNT_SRC_NODE_SEARCH
]++;
1474 *sn
= RB_FIND(pf_src_tree
, &tree_src_tracking
, &k
);
1477 if (!rule
->max_src_nodes
||
1478 rule
->src_nodes
< rule
->max_src_nodes
)
1479 (*sn
) = pool_get(&pf_src_tree_pl
, PR_WAITOK
);
1481 pf_status
.lcounters
[LCNT_SRCNODES
]++;
1484 bzero(*sn
, sizeof (struct pf_src_node
));
1486 pf_init_threshold(&(*sn
)->conn_rate
,
1487 rule
->max_src_conn_rate
.limit
,
1488 rule
->max_src_conn_rate
.seconds
);
1491 if (rule
->rule_flag
& PFRULE_RULESRCTRACK
||
1492 rule
->rpool
.opts
& PF_POOL_STICKYADDR
)
1493 (*sn
)->rule
.ptr
= rule
;
1495 (*sn
)->rule
.ptr
= NULL
;
1496 PF_ACPY(&(*sn
)->addr
, src
, af
);
1497 if (RB_INSERT(pf_src_tree
,
1498 &tree_src_tracking
, *sn
) != NULL
) {
1499 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1500 printf("pf: src_tree insert failed: ");
1501 pf_print_host(&(*sn
)->addr
, 0, af
);
1504 pool_put(&pf_src_tree_pl
, *sn
);
1507 (*sn
)->creation
= pf_time_second();
1508 (*sn
)->ruletype
= rule
->action
;
1509 if ((*sn
)->rule
.ptr
!= NULL
)
1510 (*sn
)->rule
.ptr
->src_nodes
++;
1511 pf_status
.scounters
[SCNT_SRC_NODE_INSERT
]++;
1512 pf_status
.src_nodes
++;
1514 if (rule
->max_src_states
&&
1515 (*sn
)->states
>= rule
->max_src_states
) {
1516 pf_status
.lcounters
[LCNT_SRCSTATES
]++;
1524 pf_stateins_err(const char *tree
, struct pf_state
*s
, struct pfi_kif
*kif
)
1526 struct pf_state_key
*sk
= s
->state_key
;
1528 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1529 #ifndef NO_APPLE_EXTENSIONS
1530 printf("pf: state insert failed: %s %s ", tree
, kif
->pfik_name
);
1531 switch (sk
->proto
) {
1541 case IPPROTO_ICMPV6
:
1545 printf("PROTO=%u", sk
->proto
);
1549 pf_print_sk_host(&sk
->lan
, sk
->af
, sk
->proto
,
1552 pf_print_sk_host(&sk
->gwy
, sk
->af
, sk
->proto
,
1555 pf_print_sk_host(&sk
->ext
, sk
->af
, sk
->proto
,
1558 printf("pf: state insert failed: %s %s", tree
, kif
->pfik_name
);
1560 pf_print_host(&sk
->lan
.addr
, sk
->lan
.port
,
1563 pf_print_host(&sk
->gwy
.addr
, sk
->gwy
.port
,
1566 pf_print_host(&sk
->ext
.addr
, sk
->ext
.port
,
1569 if (s
->sync_flags
& PFSTATE_FROMSYNC
)
1570 printf(" (from sync)");
1576 pf_insert_state(struct pfi_kif
*kif
, struct pf_state
*s
)
1578 struct pf_state_key
*cur
;
1579 struct pf_state
*sp
;
1581 VERIFY(s
->state_key
!= NULL
);
1584 if ((cur
= RB_INSERT(pf_state_tree_lan_ext
, &pf_statetbl_lan_ext
,
1585 s
->state_key
)) != NULL
) {
1586 /* key exists. check for same kif, if none, add to key */
1587 TAILQ_FOREACH(sp
, &cur
->states
, next
)
1588 if (sp
->kif
== kif
) { /* collision! */
1589 pf_stateins_err("tree_lan_ext", s
, kif
);
1591 PF_DT_SKIP_LANEXT
|PF_DT_SKIP_EXTGWY
);
1594 pf_detach_state(s
, PF_DT_SKIP_LANEXT
|PF_DT_SKIP_EXTGWY
);
1595 pf_attach_state(cur
, s
, kif
== pfi_all
? 1 : 0);
1598 /* if cur != NULL, we already found a state key and attached to it */
1599 if (cur
== NULL
&& (cur
= RB_INSERT(pf_state_tree_ext_gwy
,
1600 &pf_statetbl_ext_gwy
, s
->state_key
)) != NULL
) {
1601 /* must not happen. we must have found the sk above! */
1602 pf_stateins_err("tree_ext_gwy", s
, kif
);
1603 pf_detach_state(s
, PF_DT_SKIP_EXTGWY
);
1607 if (s
->id
== 0 && s
->creatorid
== 0) {
1608 s
->id
= htobe64(pf_status
.stateid
++);
1609 s
->creatorid
= pf_status
.hostid
;
1611 if (RB_INSERT(pf_state_tree_id
, &tree_id
, s
) != NULL
) {
1612 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1613 printf("pf: state insert failed: "
1614 "id: %016llx creatorid: %08x",
1615 be64toh(s
->id
), ntohl(s
->creatorid
));
1616 if (s
->sync_flags
& PFSTATE_FROMSYNC
)
1617 printf(" (from sync)");
1620 pf_detach_state(s
, 0);
1623 TAILQ_INSERT_TAIL(&state_list
, s
, entry_list
);
1624 pf_status
.fcounters
[FCNT_STATE_INSERT
]++;
1626 VERIFY(pf_status
.states
!= 0);
1627 pfi_kif_ref(kif
, PFI_KIF_REF_STATE
);
1629 pfsync_insert_state(s
);
1635 pf_purge_thread_fn(void *v
, wait_result_t w
)
1637 #pragma unused(v, w)
1638 u_int32_t nloops
= 0;
1642 (void) tsleep(pf_purge_thread_fn
, PWAIT
, "pftm", t
* hz
);
1644 lck_rw_lock_shared(pf_perim_lock
);
1645 lck_mtx_lock(pf_lock
);
1647 /* purge everything if not running */
1648 if (!pf_status
.running
) {
1649 pf_purge_expired_states(pf_status
.states
);
1650 pf_purge_expired_fragments();
1651 pf_purge_expired_src_nodes();
1653 /* terminate thread (we don't currently do this) */
1654 if (pf_purge_thread
== NULL
) {
1655 lck_mtx_unlock(pf_lock
);
1656 lck_rw_done(pf_perim_lock
);
1658 thread_deallocate(current_thread());
1659 thread_terminate(current_thread());
1663 /* if there's nothing left, sleep w/o timeout */
1664 if (pf_status
.states
== 0 &&
1665 pf_normalize_isempty() &&
1666 RB_EMPTY(&tree_src_tracking
))
1669 lck_mtx_unlock(pf_lock
);
1670 lck_rw_done(pf_perim_lock
);
1673 } else if (t
== 0) {
1674 /* Set timeout to 1 second */
1678 /* process a fraction of the state table every second */
1679 pf_purge_expired_states(1 + (pf_status
.states
1680 / pf_default_rule
.timeout
[PFTM_INTERVAL
]));
1682 /* purge other expired types every PFTM_INTERVAL seconds */
1683 if (++nloops
>= pf_default_rule
.timeout
[PFTM_INTERVAL
]) {
1684 pf_purge_expired_fragments();
1685 pf_purge_expired_src_nodes();
1689 lck_mtx_unlock(pf_lock
);
1690 lck_rw_done(pf_perim_lock
);
1695 pf_state_expires(const struct pf_state
*state
)
1702 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1704 /* handle all PFTM_* > PFTM_MAX here */
1705 if (state
->timeout
== PFTM_PURGE
)
1706 return (pf_time_second());
1707 if (state
->timeout
== PFTM_UNTIL_PACKET
)
1709 VERIFY(state
->timeout
!= PFTM_UNLINKED
);
1710 VERIFY(state
->timeout
< PFTM_MAX
);
1711 t
= state
->rule
.ptr
->timeout
[state
->timeout
];
1713 t
= pf_default_rule
.timeout
[state
->timeout
];
1714 start
= state
->rule
.ptr
->timeout
[PFTM_ADAPTIVE_START
];
1716 end
= state
->rule
.ptr
->timeout
[PFTM_ADAPTIVE_END
];
1717 states
= state
->rule
.ptr
->states
;
1719 start
= pf_default_rule
.timeout
[PFTM_ADAPTIVE_START
];
1720 end
= pf_default_rule
.timeout
[PFTM_ADAPTIVE_END
];
1721 states
= pf_status
.states
;
1723 if (end
&& states
> start
&& start
< end
) {
1725 return (state
->expire
+ t
* (end
- states
) /
1728 return (pf_time_second());
1730 return (state
->expire
+ t
);
1734 pf_purge_expired_src_nodes(void)
1736 struct pf_src_node
*cur
, *next
;
1738 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1740 for (cur
= RB_MIN(pf_src_tree
, &tree_src_tracking
); cur
; cur
= next
) {
1741 next
= RB_NEXT(pf_src_tree
, &tree_src_tracking
, cur
);
1743 if (cur
->states
<= 0 && cur
->expire
<= pf_time_second()) {
1744 if (cur
->rule
.ptr
!= NULL
) {
1745 cur
->rule
.ptr
->src_nodes
--;
1746 if (cur
->rule
.ptr
->states
<= 0 &&
1747 cur
->rule
.ptr
->max_src_nodes
<= 0)
1748 pf_rm_rule(NULL
, cur
->rule
.ptr
);
1750 RB_REMOVE(pf_src_tree
, &tree_src_tracking
, cur
);
1751 pf_status
.scounters
[SCNT_SRC_NODE_REMOVALS
]++;
1752 pf_status
.src_nodes
--;
1753 pool_put(&pf_src_tree_pl
, cur
);
1759 pf_src_tree_remove_state(struct pf_state
*s
)
1763 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1765 if (s
->src_node
!= NULL
) {
1766 if (s
->src
.tcp_est
) {
1767 VERIFY(s
->src_node
->conn
> 0);
1768 --s
->src_node
->conn
;
1770 VERIFY(s
->src_node
->states
> 0);
1771 if (--s
->src_node
->states
<= 0) {
1772 t
= s
->rule
.ptr
->timeout
[PFTM_SRC_NODE
];
1774 t
= pf_default_rule
.timeout
[PFTM_SRC_NODE
];
1775 s
->src_node
->expire
= pf_time_second() + t
;
1778 if (s
->nat_src_node
!= s
->src_node
&& s
->nat_src_node
!= NULL
) {
1779 VERIFY(s
->nat_src_node
->states
> 0);
1780 if (--s
->nat_src_node
->states
<= 0) {
1781 t
= s
->rule
.ptr
->timeout
[PFTM_SRC_NODE
];
1783 t
= pf_default_rule
.timeout
[PFTM_SRC_NODE
];
1784 s
->nat_src_node
->expire
= pf_time_second() + t
;
1787 s
->src_node
= s
->nat_src_node
= NULL
;
1791 pf_unlink_state(struct pf_state
*cur
)
1793 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1795 #ifndef NO_APPLE_EXTENSIONS
1796 if (cur
->src
.state
== PF_TCPS_PROXY_DST
) {
1797 pf_send_tcp(cur
->rule
.ptr
, cur
->state_key
->af
,
1798 &cur
->state_key
->ext
.addr
, &cur
->state_key
->lan
.addr
,
1799 cur
->state_key
->ext
.xport
.port
,
1800 cur
->state_key
->lan
.xport
.port
,
1801 cur
->src
.seqhi
, cur
->src
.seqlo
+ 1,
1802 TH_RST
|TH_ACK
, 0, 0, 0, 1, cur
->tag
, NULL
, NULL
);
1805 hook_runloop(&cur
->unlink_hooks
, HOOK_REMOVE
|HOOK_FREE
);
1807 if (cur
->src
.state
== PF_TCPS_PROXY_DST
) {
1808 pf_send_tcp(cur
->rule
.ptr
, cur
->state_key
->af
,
1809 &cur
->state_key
->ext
.addr
, &cur
->state_key
->lan
.addr
,
1810 cur
->state_key
->ext
.port
, cur
->state_key
->lan
.port
,
1811 cur
->src
.seqhi
, cur
->src
.seqlo
+ 1,
1812 TH_RST
|TH_ACK
, 0, 0, 0, 1, cur
->tag
, NULL
, NULL
);
1815 RB_REMOVE(pf_state_tree_id
, &tree_id
, cur
);
1817 if (cur
->creatorid
== pf_status
.hostid
)
1818 pfsync_delete_state(cur
);
1820 cur
->timeout
= PFTM_UNLINKED
;
1821 pf_src_tree_remove_state(cur
);
1822 pf_detach_state(cur
, 0);
1825 /* callers should be at splpf and hold the
1826 * write_lock on pf_consistency_lock */
1828 pf_free_state(struct pf_state
*cur
)
1830 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1832 if (pfsyncif
!= NULL
&&
1833 (pfsyncif
->sc_bulk_send_next
== cur
||
1834 pfsyncif
->sc_bulk_terminator
== cur
))
1837 VERIFY(cur
->timeout
== PFTM_UNLINKED
);
1838 VERIFY(cur
->rule
.ptr
->states
> 0);
1839 if (--cur
->rule
.ptr
->states
<= 0 &&
1840 cur
->rule
.ptr
->src_nodes
<= 0)
1841 pf_rm_rule(NULL
, cur
->rule
.ptr
);
1842 if (cur
->nat_rule
.ptr
!= NULL
) {
1843 VERIFY(cur
->nat_rule
.ptr
->states
> 0);
1844 if (--cur
->nat_rule
.ptr
->states
<= 0 &&
1845 cur
->nat_rule
.ptr
->src_nodes
<= 0)
1846 pf_rm_rule(NULL
, cur
->nat_rule
.ptr
);
1848 if (cur
->anchor
.ptr
!= NULL
) {
1849 VERIFY(cur
->anchor
.ptr
->states
> 0);
1850 if (--cur
->anchor
.ptr
->states
<= 0)
1851 pf_rm_rule(NULL
, cur
->anchor
.ptr
);
1853 pf_normalize_tcp_cleanup(cur
);
1854 pfi_kif_unref(cur
->kif
, PFI_KIF_REF_STATE
);
1855 TAILQ_REMOVE(&state_list
, cur
, entry_list
);
1857 pf_tag_unref(cur
->tag
);
1858 pool_put(&pf_state_pl
, cur
);
1859 pf_status
.fcounters
[FCNT_STATE_REMOVALS
]++;
1860 VERIFY(pf_status
.states
> 0);
1865 pf_purge_expired_states(u_int32_t maxcheck
)
1867 static struct pf_state
*cur
= NULL
;
1868 struct pf_state
*next
;
1870 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1872 while (maxcheck
--) {
1873 /* wrap to start of list when we hit the end */
1875 cur
= TAILQ_FIRST(&state_list
);
1877 break; /* list empty */
1880 /* get next state, as cur may get deleted */
1881 next
= TAILQ_NEXT(cur
, entry_list
);
1883 if (cur
->timeout
== PFTM_UNLINKED
) {
1885 } else if (pf_state_expires(cur
) <= pf_time_second()) {
1886 /* unlink and free expired state */
1887 pf_unlink_state(cur
);
1895 pf_tbladdr_setup(struct pf_ruleset
*rs
, struct pf_addr_wrap
*aw
)
1897 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1899 if (aw
->type
!= PF_ADDR_TABLE
)
1901 if ((aw
->p
.tbl
= pfr_attach_table(rs
, aw
->v
.tblname
)) == NULL
)
1907 pf_tbladdr_remove(struct pf_addr_wrap
*aw
)
1909 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1911 if (aw
->type
!= PF_ADDR_TABLE
|| aw
->p
.tbl
== NULL
)
1913 pfr_detach_table(aw
->p
.tbl
);
1918 pf_tbladdr_copyout(struct pf_addr_wrap
*aw
)
1920 struct pfr_ktable
*kt
= aw
->p
.tbl
;
1922 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1924 if (aw
->type
!= PF_ADDR_TABLE
|| kt
== NULL
)
1926 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
)
1927 kt
= kt
->pfrkt_root
;
1929 aw
->p
.tblcnt
= (kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) ?
1933 #ifndef NO_APPLE_EXTENSIONS
1935 pf_print_addr(struct pf_addr
*addr
, sa_family_t af
)
1940 u_int32_t a
= ntohl(addr
->addr32
[0]);
1941 printf("%u.%u.%u.%u", (a
>>24)&255, (a
>>16)&255,
1949 u_int8_t i
, curstart
= 255, curend
= 0,
1950 maxstart
= 0, maxend
= 0;
1951 for (i
= 0; i
< 8; i
++) {
1952 if (!addr
->addr16
[i
]) {
1953 if (curstart
== 255)
1959 if ((curend
- curstart
) >
1960 (maxend
- maxstart
)) {
1961 maxstart
= curstart
;
1968 for (i
= 0; i
< 8; i
++) {
1969 if (i
>= maxstart
&& i
<= maxend
) {
1978 b
= ntohs(addr
->addr16
[i
]);
1991 pf_print_sk_host(struct pf_state_host
*sh
, sa_family_t af
, int proto
,
1992 u_int8_t proto_variant
)
1994 pf_print_addr(&sh
->addr
, af
);
1999 printf("[%08x]", ntohl(sh
->xport
.spi
));
2003 if (proto_variant
== PF_GRE_PPTP_VARIANT
)
2004 printf("[%u]", ntohs(sh
->xport
.call_id
));
2009 printf("[%u]", ntohs(sh
->xport
.port
));
2019 pf_print_host(struct pf_addr
*addr
, u_int16_t p
, sa_family_t af
)
2021 #ifndef NO_APPLE_EXTENSIONS
2022 pf_print_addr(addr
, af
);
2024 printf("[%u]", ntohs(p
));
2029 u_int32_t a
= ntohl(addr
->addr32
[0]);
2030 printf("%u.%u.%u.%u", (a
>>24)&255, (a
>>16)&255,
2042 u_int8_t i
, curstart
= 255, curend
= 0,
2043 maxstart
= 0, maxend
= 0;
2044 for (i
= 0; i
< 8; i
++) {
2045 if (!addr
->addr16
[i
]) {
2046 if (curstart
== 255)
2052 if ((curend
- curstart
) >
2053 (maxend
- maxstart
)) {
2054 maxstart
= curstart
;
2061 for (i
= 0; i
< 8; i
++) {
2062 if (i
>= maxstart
&& i
<= maxend
) {
2071 b
= ntohs(addr
->addr16
[i
]);
2089 pf_print_state(struct pf_state
*s
)
2091 struct pf_state_key
*sk
= s
->state_key
;
2092 switch (sk
->proto
) {
2093 #ifndef NO_APPLE_EXTENSIONS
2098 printf("GRE%u ", sk
->proto_variant
);
2110 case IPPROTO_ICMPV6
:
2114 printf("%u ", sk
->proto
);
2117 #ifndef NO_APPLE_EXTENSIONS
2118 pf_print_sk_host(&sk
->lan
, sk
->af
, sk
->proto
, sk
->proto_variant
);
2120 pf_print_sk_host(&sk
->gwy
, sk
->af
, sk
->proto
, sk
->proto_variant
);
2122 pf_print_sk_host(&sk
->ext
, sk
->af
, sk
->proto
, sk
->proto_variant
);
2124 pf_print_host(&sk
->lan
.addr
, sk
->lan
.port
, sk
->af
);
2126 pf_print_host(&sk
->gwy
.addr
, sk
->gwy
.port
, sk
->af
);
2128 pf_print_host(&sk
->ext
.addr
, sk
->ext
.port
, sk
->af
);
2130 printf(" [lo=%u high=%u win=%u modulator=%u", s
->src
.seqlo
,
2131 s
->src
.seqhi
, s
->src
.max_win
, s
->src
.seqdiff
);
2132 if (s
->src
.wscale
&& s
->dst
.wscale
)
2133 printf(" wscale=%u", s
->src
.wscale
& PF_WSCALE_MASK
);
2135 printf(" [lo=%u high=%u win=%u modulator=%u", s
->dst
.seqlo
,
2136 s
->dst
.seqhi
, s
->dst
.max_win
, s
->dst
.seqdiff
);
2137 if (s
->src
.wscale
&& s
->dst
.wscale
)
2138 printf(" wscale=%u", s
->dst
.wscale
& PF_WSCALE_MASK
);
2140 printf(" %u:%u", s
->src
.state
, s
->dst
.state
);
2144 pf_print_flags(u_int8_t f
)
2166 #define PF_SET_SKIP_STEPS(i) \
2168 while (head[i] != cur) { \
2169 head[i]->skip[i].ptr = cur; \
2170 head[i] = TAILQ_NEXT(head[i], entries); \
2175 pf_calc_skip_steps(struct pf_rulequeue
*rules
)
2177 struct pf_rule
*cur
, *prev
, *head
[PF_SKIP_COUNT
];
2180 cur
= TAILQ_FIRST(rules
);
2182 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
)
2184 while (cur
!= NULL
) {
2186 if (cur
->kif
!= prev
->kif
|| cur
->ifnot
!= prev
->ifnot
)
2187 PF_SET_SKIP_STEPS(PF_SKIP_IFP
);
2188 if (cur
->direction
!= prev
->direction
)
2189 PF_SET_SKIP_STEPS(PF_SKIP_DIR
);
2190 if (cur
->af
!= prev
->af
)
2191 PF_SET_SKIP_STEPS(PF_SKIP_AF
);
2192 if (cur
->proto
!= prev
->proto
)
2193 PF_SET_SKIP_STEPS(PF_SKIP_PROTO
);
2194 if (cur
->src
.neg
!= prev
->src
.neg
||
2195 pf_addr_wrap_neq(&cur
->src
.addr
, &prev
->src
.addr
))
2196 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR
);
2197 #ifndef NO_APPLE_EXTENSIONS
2199 union pf_rule_xport
*cx
= &cur
->src
.xport
;
2200 union pf_rule_xport
*px
= &prev
->src
.xport
;
2202 switch (cur
->proto
) {
2205 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT
);
2208 if (prev
->proto
== IPPROTO_GRE
||
2209 prev
->proto
== IPPROTO_ESP
||
2210 cx
->range
.op
!= px
->range
.op
||
2211 cx
->range
.port
[0] != px
->range
.port
[0] ||
2212 cx
->range
.port
[1] != px
->range
.port
[1])
2213 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT
);
2218 if (cur
->src
.port
[0] != prev
->src
.port
[0] ||
2219 cur
->src
.port
[1] != prev
->src
.port
[1] ||
2220 cur
->src
.port_op
!= prev
->src
.port_op
)
2221 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT
);
2223 if (cur
->dst
.neg
!= prev
->dst
.neg
||
2224 pf_addr_wrap_neq(&cur
->dst
.addr
, &prev
->dst
.addr
))
2225 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR
);
2226 #ifndef NO_APPLE_EXTENSIONS
2228 union pf_rule_xport
*cx
= &cur
->dst
.xport
;
2229 union pf_rule_xport
*px
= &prev
->dst
.xport
;
2231 switch (cur
->proto
) {
2233 if (cur
->proto
!= prev
->proto
||
2234 cx
->call_id
!= px
->call_id
)
2235 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2238 if (cur
->proto
!= prev
->proto
||
2240 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2243 if (prev
->proto
== IPPROTO_GRE
||
2244 prev
->proto
== IPPROTO_ESP
||
2245 cx
->range
.op
!= px
->range
.op
||
2246 cx
->range
.port
[0] != px
->range
.port
[0] ||
2247 cx
->range
.port
[1] != px
->range
.port
[1])
2248 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2253 if (cur
->dst
.port
[0] != prev
->dst
.port
[0] ||
2254 cur
->dst
.port
[1] != prev
->dst
.port
[1] ||
2255 cur
->dst
.port_op
!= prev
->dst
.port_op
)
2256 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2260 cur
= TAILQ_NEXT(cur
, entries
);
2262 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
)
2263 PF_SET_SKIP_STEPS(i
);
2267 pf_addr_wrap_neq(struct pf_addr_wrap
*aw1
, struct pf_addr_wrap
*aw2
)
2269 if (aw1
->type
!= aw2
->type
)
2271 switch (aw1
->type
) {
2272 case PF_ADDR_ADDRMASK
:
2274 if (PF_ANEQ(&aw1
->v
.a
.addr
, &aw2
->v
.a
.addr
, 0))
2276 if (PF_ANEQ(&aw1
->v
.a
.mask
, &aw2
->v
.a
.mask
, 0))
2279 case PF_ADDR_DYNIFTL
:
2280 return (aw1
->p
.dyn
->pfid_kt
!= aw2
->p
.dyn
->pfid_kt
);
2281 case PF_ADDR_NOROUTE
:
2282 case PF_ADDR_URPFFAILED
:
2285 return (aw1
->p
.tbl
!= aw2
->p
.tbl
);
2286 case PF_ADDR_RTLABEL
:
2287 return (aw1
->v
.rtlabel
!= aw2
->v
.rtlabel
);
2289 printf("invalid address type: %d\n", aw1
->type
);
2295 pf_cksum_fixup(u_int16_t cksum
, u_int16_t old
, u_int16_t
new, u_int8_t udp
)
2301 l
= cksum
+ old
- new;
2302 l
= (l
>> 16) + (l
& 0xffff);
2310 pf_change_ap(int dir
, struct mbuf
*m
, struct pf_addr
*a
, u_int16_t
*p
,
2311 u_int16_t
*ic
, u_int16_t
*pc
, struct pf_addr
*an
, u_int16_t pn
,
2312 u_int8_t u
, sa_family_t af
)
2317 PF_ACPY(&ao
, a
, af
);
2325 *ic
= pf_cksum_fixup(pf_cksum_fixup(*ic
,
2326 ao
.addr16
[0], an
->addr16
[0], 0),
2327 ao
.addr16
[1], an
->addr16
[1], 0);
2330 * If the packet is originated from an ALG on the NAT gateway
2331 * (source address is loopback or local), in which case the
2332 * TCP/UDP checksum field contains the pseudo header checksum
2333 * that's not yet complemented.
2335 if (dir
== PF_OUT
&& m
!= NULL
&&
2336 (m
->m_flags
& M_PKTHDR
) &&
2337 (m
->m_pkthdr
.csum_flags
& (CSUM_TCP
| CSUM_UDP
))) {
2338 /* Pseudo-header checksum does not include ports */
2339 *pc
= ~pf_cksum_fixup(pf_cksum_fixup(~*pc
,
2340 ao
.addr16
[0], an
->addr16
[0], u
),
2341 ao
.addr16
[1], an
->addr16
[1], u
);
2343 *pc
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc
,
2344 ao
.addr16
[0], an
->addr16
[0], u
),
2345 ao
.addr16
[1], an
->addr16
[1], u
),
2352 *pc
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2353 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2354 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc
,
2355 ao
.addr16
[0], an
->addr16
[0], u
),
2356 ao
.addr16
[1], an
->addr16
[1], u
),
2357 ao
.addr16
[2], an
->addr16
[2], u
),
2358 ao
.addr16
[3], an
->addr16
[3], u
),
2359 ao
.addr16
[4], an
->addr16
[4], u
),
2360 ao
.addr16
[5], an
->addr16
[5], u
),
2361 ao
.addr16
[6], an
->addr16
[6], u
),
2362 ao
.addr16
[7], an
->addr16
[7], u
),
2370 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2372 pf_change_a(void *a
, u_int16_t
*c
, u_int32_t an
, u_int8_t u
)
2376 memcpy(&ao
, a
, sizeof (ao
));
2377 memcpy(a
, &an
, sizeof (u_int32_t
));
2378 *c
= pf_cksum_fixup(pf_cksum_fixup(*c
, ao
/ 65536, an
/ 65536, u
),
2379 ao
% 65536, an
% 65536, u
);
2384 pf_change_a6(struct pf_addr
*a
, u_int16_t
*c
, struct pf_addr
*an
, u_int8_t u
)
2388 PF_ACPY(&ao
, a
, AF_INET6
);
2389 PF_ACPY(a
, an
, AF_INET6
);
2391 *c
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2392 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2393 pf_cksum_fixup(pf_cksum_fixup(*c
,
2394 ao
.addr16
[0], an
->addr16
[0], u
),
2395 ao
.addr16
[1], an
->addr16
[1], u
),
2396 ao
.addr16
[2], an
->addr16
[2], u
),
2397 ao
.addr16
[3], an
->addr16
[3], u
),
2398 ao
.addr16
[4], an
->addr16
[4], u
),
2399 ao
.addr16
[5], an
->addr16
[5], u
),
2400 ao
.addr16
[6], an
->addr16
[6], u
),
2401 ao
.addr16
[7], an
->addr16
[7], u
);
2406 pf_change_icmp(struct pf_addr
*ia
, u_int16_t
*ip
, struct pf_addr
*oa
,
2407 struct pf_addr
*na
, u_int16_t np
, u_int16_t
*pc
, u_int16_t
*h2c
,
2408 u_int16_t
*ic
, u_int16_t
*hc
, u_int8_t u
, sa_family_t af
)
2410 struct pf_addr oia
, ooa
;
2412 PF_ACPY(&oia
, ia
, af
);
2413 PF_ACPY(&ooa
, oa
, af
);
2415 /* Change inner protocol port, fix inner protocol checksum. */
2417 u_int16_t oip
= *ip
;
2424 *pc
= pf_cksum_fixup(*pc
, oip
, *ip
, u
);
2425 *ic
= pf_cksum_fixup(*ic
, oip
, *ip
, 0);
2427 *ic
= pf_cksum_fixup(*ic
, opc
, *pc
, 0);
2429 /* Change inner ip address, fix inner ip and icmp checksums. */
2430 PF_ACPY(ia
, na
, af
);
2434 u_int32_t oh2c
= *h2c
;
2436 *h2c
= pf_cksum_fixup(pf_cksum_fixup(*h2c
,
2437 oia
.addr16
[0], ia
->addr16
[0], 0),
2438 oia
.addr16
[1], ia
->addr16
[1], 0);
2439 *ic
= pf_cksum_fixup(pf_cksum_fixup(*ic
,
2440 oia
.addr16
[0], ia
->addr16
[0], 0),
2441 oia
.addr16
[1], ia
->addr16
[1], 0);
2442 *ic
= pf_cksum_fixup(*ic
, oh2c
, *h2c
, 0);
2448 *ic
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2449 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2450 pf_cksum_fixup(pf_cksum_fixup(*ic
,
2451 oia
.addr16
[0], ia
->addr16
[0], u
),
2452 oia
.addr16
[1], ia
->addr16
[1], u
),
2453 oia
.addr16
[2], ia
->addr16
[2], u
),
2454 oia
.addr16
[3], ia
->addr16
[3], u
),
2455 oia
.addr16
[4], ia
->addr16
[4], u
),
2456 oia
.addr16
[5], ia
->addr16
[5], u
),
2457 oia
.addr16
[6], ia
->addr16
[6], u
),
2458 oia
.addr16
[7], ia
->addr16
[7], u
);
2462 /* Change outer ip address, fix outer ip or icmpv6 checksum. */
2463 PF_ACPY(oa
, na
, af
);
2467 *hc
= pf_cksum_fixup(pf_cksum_fixup(*hc
,
2468 ooa
.addr16
[0], oa
->addr16
[0], 0),
2469 ooa
.addr16
[1], oa
->addr16
[1], 0);
2474 *ic
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2475 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2476 pf_cksum_fixup(pf_cksum_fixup(*ic
,
2477 ooa
.addr16
[0], oa
->addr16
[0], u
),
2478 ooa
.addr16
[1], oa
->addr16
[1], u
),
2479 ooa
.addr16
[2], oa
->addr16
[2], u
),
2480 ooa
.addr16
[3], oa
->addr16
[3], u
),
2481 ooa
.addr16
[4], oa
->addr16
[4], u
),
2482 ooa
.addr16
[5], oa
->addr16
[5], u
),
2483 ooa
.addr16
[6], oa
->addr16
[6], u
),
2484 ooa
.addr16
[7], oa
->addr16
[7], u
);
2492 * Need to modulate the sequence numbers in the TCP SACK option
2493 * (credits to Krzysztof Pfaff for report and patch)
2496 pf_modulate_sack(struct mbuf
*m
, int off
, struct pf_pdesc
*pd
,
2497 struct tcphdr
*th
, struct pf_state_peer
*dst
)
2499 int hlen
= (th
->th_off
<< 2) - sizeof (*th
), thoptlen
= hlen
;
2500 u_int8_t opts
[MAX_TCPOPTLEN
], *opt
= opts
;
2501 int copyback
= 0, i
, olen
;
2502 struct sackblk sack
;
2504 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2505 if (hlen
< TCPOLEN_SACKLEN
||
2506 !pf_pull_hdr(m
, off
+ sizeof (*th
), opts
, hlen
, NULL
, NULL
, pd
->af
))
2509 while (hlen
>= TCPOLEN_SACKLEN
) {
2512 case TCPOPT_EOL
: /* FALLTHROUGH */
2520 if (olen
>= TCPOLEN_SACKLEN
) {
2521 for (i
= 2; i
+ TCPOLEN_SACK
<= olen
;
2522 i
+= TCPOLEN_SACK
) {
2523 memcpy(&sack
, &opt
[i
], sizeof (sack
));
2524 pf_change_a(&sack
.start
, &th
->th_sum
,
2525 htonl(ntohl(sack
.start
) -
2527 pf_change_a(&sack
.end
, &th
->th_sum
,
2528 htonl(ntohl(sack
.end
) -
2530 memcpy(&opt
[i
], &sack
, sizeof (sack
));
2532 #ifndef NO_APPLE_EXTENSIONS
2533 copyback
= off
+ sizeof (*th
) + thoptlen
;
2547 #ifndef NO_APPLE_EXTENSIONS
2549 m
= pf_lazy_makewritable(pd
, m
, copyback
);
2552 m_copyback(m
, off
+ sizeof (*th
), thoptlen
, opts
);
2556 m_copyback(m
, off
+ sizeof (*th
), thoptlen
, opts
);
2562 pf_send_tcp(const struct pf_rule
*r
, sa_family_t af
,
2563 const struct pf_addr
*saddr
, const struct pf_addr
*daddr
,
2564 u_int16_t sport
, u_int16_t dport
, u_int32_t seq
, u_int32_t ack
,
2565 u_int8_t flags
, u_int16_t win
, u_int16_t mss
, u_int8_t ttl
, int tag
,
2566 u_int16_t rtag
, struct ether_header
*eh
, struct ifnet
*ifp
)
2568 #pragma unused(eh, ifp)
2572 struct ip
*h
= NULL
;
2575 struct ip6_hdr
*h6
= NULL
;
2577 struct tcphdr
*th
= NULL
;
2579 struct pf_mtag
*pf_mtag
;
2581 /* maximum segment size tcp option */
2582 tlen
= sizeof (struct tcphdr
);
2589 len
= sizeof (struct ip
) + tlen
;
2594 len
= sizeof (struct ip6_hdr
) + tlen
;
2598 panic("pf_send_tcp: not AF_INET or AF_INET6!");
2602 /* create outgoing mbuf */
2603 m
= m_gethdr(M_DONTWAIT
, MT_HEADER
);
2607 if ((pf_mtag
= pf_get_mtag(m
)) == NULL
) {
2613 pf_mtag
->flags
|= PF_TAG_GENERATED
;
2614 pf_mtag
->tag
= rtag
;
2616 if (r
!= NULL
&& PF_RTABLEID_IS_VALID(r
->rtableid
))
2617 pf_mtag
->rtableid
= r
->rtableid
;
2620 if (r
!= NULL
&& r
->qid
) {
2621 pf_mtag
->qid
= r
->qid
;
2622 /* add hints for ecn */
2623 pf_mtag
->hdr
= mtod(m
, struct ip
*);
2626 m
->m_data
+= max_linkhdr
;
2627 m
->m_pkthdr
.len
= m
->m_len
= len
;
2628 m
->m_pkthdr
.rcvif
= NULL
;
2629 bzero(m
->m_data
, len
);
2633 h
= mtod(m
, struct ip
*);
2635 /* IP header fields included in the TCP checksum */
2636 h
->ip_p
= IPPROTO_TCP
;
2637 h
->ip_len
= htons(tlen
);
2638 h
->ip_src
.s_addr
= saddr
->v4
.s_addr
;
2639 h
->ip_dst
.s_addr
= daddr
->v4
.s_addr
;
2641 th
= (struct tcphdr
*)((caddr_t
)h
+ sizeof (struct ip
));
2646 h6
= mtod(m
, struct ip6_hdr
*);
2648 /* IP header fields included in the TCP checksum */
2649 h6
->ip6_nxt
= IPPROTO_TCP
;
2650 h6
->ip6_plen
= htons(tlen
);
2651 memcpy(&h6
->ip6_src
, &saddr
->v6
, sizeof (struct in6_addr
));
2652 memcpy(&h6
->ip6_dst
, &daddr
->v6
, sizeof (struct in6_addr
));
2654 th
= (struct tcphdr
*)((caddr_t
)h6
+ sizeof (struct ip6_hdr
));
2660 th
->th_sport
= sport
;
2661 th
->th_dport
= dport
;
2662 th
->th_seq
= htonl(seq
);
2663 th
->th_ack
= htonl(ack
);
2664 th
->th_off
= tlen
>> 2;
2665 th
->th_flags
= flags
;
2666 th
->th_win
= htons(win
);
2669 opt
= (char *)(th
+ 1);
2670 opt
[0] = TCPOPT_MAXSEG
;
2672 #if BYTE_ORDER != BIG_ENDIAN
2675 bcopy((caddr_t
)&mss
, (caddr_t
)(opt
+ 2), 2);
2684 th
->th_sum
= in_cksum(m
, len
);
2686 /* Finish the IP header */
2688 h
->ip_hl
= sizeof (*h
) >> 2;
2689 h
->ip_tos
= IPTOS_LOWDELAY
;
2691 * ip_output() expects ip_len and ip_off to be in host order.
2694 h
->ip_off
= (path_mtu_discovery
? IP_DF
: 0);
2695 h
->ip_ttl
= ttl
? ttl
: ip_defttl
;
2698 bzero(&ro
, sizeof (ro
));
2699 ip_output(m
, NULL
, &ro
, 0, NULL
, NULL
);
2700 if (ro
.ro_rt
!= NULL
)
2707 struct route_in6 ro6
;
2710 th
->th_sum
= in6_cksum(m
, IPPROTO_TCP
,
2711 sizeof (struct ip6_hdr
), tlen
);
2713 h6
->ip6_vfc
|= IPV6_VERSION
;
2714 h6
->ip6_hlim
= IPV6_DEFHLIM
;
2716 bzero(&ro6
, sizeof (ro6
));
2717 ip6_output(m
, NULL
, &ro6
, 0, NULL
, NULL
, 0);
2718 if (ro6
.ro_rt
!= NULL
)
2727 pf_send_icmp(struct mbuf
*m
, u_int8_t type
, u_int8_t code
, sa_family_t af
,
2731 struct pf_mtag
*pf_mtag
;
2733 m0
= m_copy(m
, 0, M_COPYALL
);
2737 if ((pf_mtag
= pf_get_mtag(m0
)) == NULL
)
2740 pf_mtag
->flags
|= PF_TAG_GENERATED
;
2742 if (PF_RTABLEID_IS_VALID(r
->rtableid
))
2743 pf_mtag
->rtableid
= r
->rtableid
;
2747 pf_mtag
->qid
= r
->qid
;
2748 /* add hints for ecn */
2749 pf_mtag
->hdr
= mtod(m0
, struct ip
*);
2755 icmp_error(m0
, type
, code
, 0, 0);
2760 icmp6_error(m0
, type
, code
, 0);
2767 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2768 * If n is 0, they match if they are equal. If n is != 0, they match if they
2772 pf_match_addr(u_int8_t n
, struct pf_addr
*a
, struct pf_addr
*m
,
2773 struct pf_addr
*b
, sa_family_t af
)
2780 if ((a
->addr32
[0] & m
->addr32
[0]) ==
2781 (b
->addr32
[0] & m
->addr32
[0]))
2787 if (((a
->addr32
[0] & m
->addr32
[0]) ==
2788 (b
->addr32
[0] & m
->addr32
[0])) &&
2789 ((a
->addr32
[1] & m
->addr32
[1]) ==
2790 (b
->addr32
[1] & m
->addr32
[1])) &&
2791 ((a
->addr32
[2] & m
->addr32
[2]) ==
2792 (b
->addr32
[2] & m
->addr32
[2])) &&
2793 ((a
->addr32
[3] & m
->addr32
[3]) ==
2794 (b
->addr32
[3] & m
->addr32
[3])))
2813 * Return 1 if b <= a <= e, otherwise return 0.
2816 pf_match_addr_range(struct pf_addr
*b
, struct pf_addr
*e
,
2817 struct pf_addr
*a
, sa_family_t af
)
2822 if ((a
->addr32
[0] < b
->addr32
[0]) ||
2823 (a
->addr32
[0] > e
->addr32
[0]))
2832 for (i
= 0; i
< 4; ++i
)
2833 if (a
->addr32
[i
] > b
->addr32
[i
])
2835 else if (a
->addr32
[i
] < b
->addr32
[i
])
2838 for (i
= 0; i
< 4; ++i
)
2839 if (a
->addr32
[i
] < e
->addr32
[i
])
2841 else if (a
->addr32
[i
] > e
->addr32
[i
])
2851 pf_match(u_int8_t op
, u_int32_t a1
, u_int32_t a2
, u_int32_t p
)
2855 return ((p
> a1
) && (p
< a2
));
2857 return ((p
< a1
) || (p
> a2
));
2859 return ((p
>= a1
) && (p
<= a2
));
2873 return (0); /* never reached */
2877 pf_match_port(u_int8_t op
, u_int16_t a1
, u_int16_t a2
, u_int16_t p
)
2879 #if BYTE_ORDER != BIG_ENDIAN
2884 return (pf_match(op
, a1
, a2
, p
));
2887 #ifndef NO_APPLE_EXTENSIONS
2889 pf_match_xport(u_int8_t proto
, u_int8_t proto_variant
, union pf_rule_xport
*rx
,
2890 union pf_state_xport
*sx
)
2897 if (proto_variant
== PF_GRE_PPTP_VARIANT
)
2898 d
= (rx
->call_id
== sx
->call_id
);
2902 d
= (rx
->spi
== sx
->spi
);
2908 case IPPROTO_ICMPV6
:
2910 d
= pf_match_port(rx
->range
.op
,
2911 rx
->range
.port
[0], rx
->range
.port
[1],
2925 pf_match_uid(u_int8_t op
, uid_t a1
, uid_t a2
, uid_t u
)
2927 if (u
== UID_MAX
&& op
!= PF_OP_EQ
&& op
!= PF_OP_NE
)
2929 return (pf_match(op
, a1
, a2
, u
));
2933 pf_match_gid(u_int8_t op
, gid_t a1
, gid_t a2
, gid_t g
)
2935 if (g
== GID_MAX
&& op
!= PF_OP_EQ
&& op
!= PF_OP_NE
)
2937 return (pf_match(op
, a1
, a2
, g
));
2941 pf_match_tag(struct mbuf
*m
, struct pf_rule
*r
, struct pf_mtag
*pf_mtag
,
2946 *tag
= pf_mtag
->tag
;
2948 return ((!r
->match_tag_not
&& r
->match_tag
== *tag
) ||
2949 (r
->match_tag_not
&& r
->match_tag
!= *tag
));
2953 pf_tag_packet(struct mbuf
*m
, struct pf_mtag
*pf_mtag
, int tag
,
2954 unsigned int rtableid
)
2956 if (tag
<= 0 && !PF_RTABLEID_IS_VALID(rtableid
))
2959 if (pf_mtag
== NULL
&& (pf_mtag
= pf_get_mtag(m
)) == NULL
)
2964 if (PF_RTABLEID_IS_VALID(rtableid
))
2965 pf_mtag
->rtableid
= rtableid
;
2971 pf_step_into_anchor(int *depth
, struct pf_ruleset
**rs
, int n
,
2972 struct pf_rule
**r
, struct pf_rule
**a
, int *match
)
2974 struct pf_anchor_stackframe
*f
;
2976 (*r
)->anchor
->match
= 0;
2979 if (*depth
>= (int)sizeof (pf_anchor_stack
) /
2980 (int)sizeof (pf_anchor_stack
[0])) {
2981 printf("pf_step_into_anchor: stack overflow\n");
2982 *r
= TAILQ_NEXT(*r
, entries
);
2984 } else if (*depth
== 0 && a
!= NULL
)
2986 f
= pf_anchor_stack
+ (*depth
)++;
2989 if ((*r
)->anchor_wildcard
) {
2990 f
->parent
= &(*r
)->anchor
->children
;
2991 if ((f
->child
= RB_MIN(pf_anchor_node
, f
->parent
)) ==
2996 *rs
= &f
->child
->ruleset
;
3000 *rs
= &(*r
)->anchor
->ruleset
;
3002 *r
= TAILQ_FIRST((*rs
)->rules
[n
].active
.ptr
);
3006 pf_step_out_of_anchor(int *depth
, struct pf_ruleset
**rs
, int n
,
3007 struct pf_rule
**r
, struct pf_rule
**a
, int *match
)
3009 struct pf_anchor_stackframe
*f
;
3015 f
= pf_anchor_stack
+ *depth
- 1;
3016 if (f
->parent
!= NULL
&& f
->child
!= NULL
) {
3017 if (f
->child
->match
||
3018 (match
!= NULL
&& *match
)) {
3019 f
->r
->anchor
->match
= 1;
3022 f
->child
= RB_NEXT(pf_anchor_node
, f
->parent
, f
->child
);
3023 if (f
->child
!= NULL
) {
3024 *rs
= &f
->child
->ruleset
;
3025 *r
= TAILQ_FIRST((*rs
)->rules
[n
].active
.ptr
);
3033 if (*depth
== 0 && a
!= NULL
)
3036 if (f
->r
->anchor
->match
|| (match
!= NULL
&& *match
))
3037 quick
= f
->r
->quick
;
3038 *r
= TAILQ_NEXT(f
->r
, entries
);
3039 } while (*r
== NULL
);
3046 pf_poolmask(struct pf_addr
*naddr
, struct pf_addr
*raddr
,
3047 struct pf_addr
*rmask
, struct pf_addr
*saddr
, sa_family_t af
)
3052 naddr
->addr32
[0] = (raddr
->addr32
[0] & rmask
->addr32
[0]) |
3053 ((rmask
->addr32
[0] ^ 0xffffffff) & saddr
->addr32
[0]);
3057 naddr
->addr32
[0] = (raddr
->addr32
[0] & rmask
->addr32
[0]) |
3058 ((rmask
->addr32
[0] ^ 0xffffffff) & saddr
->addr32
[0]);
3059 naddr
->addr32
[1] = (raddr
->addr32
[1] & rmask
->addr32
[1]) |
3060 ((rmask
->addr32
[1] ^ 0xffffffff) & saddr
->addr32
[1]);
3061 naddr
->addr32
[2] = (raddr
->addr32
[2] & rmask
->addr32
[2]) |
3062 ((rmask
->addr32
[2] ^ 0xffffffff) & saddr
->addr32
[2]);
3063 naddr
->addr32
[3] = (raddr
->addr32
[3] & rmask
->addr32
[3]) |
3064 ((rmask
->addr32
[3] ^ 0xffffffff) & saddr
->addr32
[3]);
3070 pf_addr_inc(struct pf_addr
*addr
, sa_family_t af
)
3075 addr
->addr32
[0] = htonl(ntohl(addr
->addr32
[0]) + 1);
3079 if (addr
->addr32
[3] == 0xffffffff) {
3080 addr
->addr32
[3] = 0;
3081 if (addr
->addr32
[2] == 0xffffffff) {
3082 addr
->addr32
[2] = 0;
3083 if (addr
->addr32
[1] == 0xffffffff) {
3084 addr
->addr32
[1] = 0;
3086 htonl(ntohl(addr
->addr32
[0]) + 1);
3089 htonl(ntohl(addr
->addr32
[1]) + 1);
3092 htonl(ntohl(addr
->addr32
[2]) + 1);
3095 htonl(ntohl(addr
->addr32
[3]) + 1);
3101 #define mix(a, b, c) \
3103 a -= b; a -= c; a ^= (c >> 13); \
3104 b -= c; b -= a; b ^= (a << 8); \
3105 c -= a; c -= b; c ^= (b >> 13); \
3106 a -= b; a -= c; a ^= (c >> 12); \
3107 b -= c; b -= a; b ^= (a << 16); \
3108 c -= a; c -= b; c ^= (b >> 5); \
3109 a -= b; a -= c; a ^= (c >> 3); \
3110 b -= c; b -= a; b ^= (a << 10); \
3111 c -= a; c -= b; c ^= (b >> 15); \
3115 * hash function based on bridge_hash in if_bridge.c
3118 pf_hash(struct pf_addr
*inaddr
, struct pf_addr
*hash
,
3119 struct pf_poolhashkey
*key
, sa_family_t af
)
3121 u_int32_t a
= 0x9e3779b9, b
= 0x9e3779b9, c
= key
->key32
[0];
3126 a
+= inaddr
->addr32
[0];
3129 hash
->addr32
[0] = c
+ key
->key32
[2];
3134 a
+= inaddr
->addr32
[0];
3135 b
+= inaddr
->addr32
[2];
3137 hash
->addr32
[0] = c
;
3138 a
+= inaddr
->addr32
[1];
3139 b
+= inaddr
->addr32
[3];
3142 hash
->addr32
[1] = c
;
3143 a
+= inaddr
->addr32
[2];
3144 b
+= inaddr
->addr32
[1];
3147 hash
->addr32
[2] = c
;
3148 a
+= inaddr
->addr32
[3];
3149 b
+= inaddr
->addr32
[0];
3152 hash
->addr32
[3] = c
;
3159 pf_map_addr(sa_family_t af
, struct pf_rule
*r
, struct pf_addr
*saddr
,
3160 struct pf_addr
*naddr
, struct pf_addr
*init_addr
, struct pf_src_node
**sn
)
3162 unsigned char hash
[16];
3163 struct pf_pool
*rpool
= &r
->rpool
;
3164 struct pf_addr
*raddr
= &rpool
->cur
->addr
.v
.a
.addr
;
3165 struct pf_addr
*rmask
= &rpool
->cur
->addr
.v
.a
.mask
;
3166 struct pf_pooladdr
*acur
= rpool
->cur
;
3167 struct pf_src_node k
;
3169 if (*sn
== NULL
&& r
->rpool
.opts
& PF_POOL_STICKYADDR
&&
3170 (r
->rpool
.opts
& PF_POOL_TYPEMASK
) != PF_POOL_NONE
) {
3172 PF_ACPY(&k
.addr
, saddr
, af
);
3173 if (r
->rule_flag
& PFRULE_RULESRCTRACK
||
3174 r
->rpool
.opts
& PF_POOL_STICKYADDR
)
3178 pf_status
.scounters
[SCNT_SRC_NODE_SEARCH
]++;
3179 *sn
= RB_FIND(pf_src_tree
, &tree_src_tracking
, &k
);
3180 if (*sn
!= NULL
&& !PF_AZERO(&(*sn
)->raddr
, af
)) {
3181 PF_ACPY(naddr
, &(*sn
)->raddr
, af
);
3182 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
3183 printf("pf_map_addr: src tracking maps ");
3184 pf_print_host(&k
.addr
, 0, af
);
3186 pf_print_host(naddr
, 0, af
);
3193 if (rpool
->cur
->addr
.type
== PF_ADDR_NOROUTE
)
3195 if (rpool
->cur
->addr
.type
== PF_ADDR_DYNIFTL
) {
3199 if (rpool
->cur
->addr
.p
.dyn
->pfid_acnt4
< 1 &&
3200 (rpool
->opts
& PF_POOL_TYPEMASK
) !=
3203 raddr
= &rpool
->cur
->addr
.p
.dyn
->pfid_addr4
;
3204 rmask
= &rpool
->cur
->addr
.p
.dyn
->pfid_mask4
;
3209 if (rpool
->cur
->addr
.p
.dyn
->pfid_acnt6
< 1 &&
3210 (rpool
->opts
& PF_POOL_TYPEMASK
) !=
3213 raddr
= &rpool
->cur
->addr
.p
.dyn
->pfid_addr6
;
3214 rmask
= &rpool
->cur
->addr
.p
.dyn
->pfid_mask6
;
3218 } else if (rpool
->cur
->addr
.type
== PF_ADDR_TABLE
) {
3219 if ((rpool
->opts
& PF_POOL_TYPEMASK
) != PF_POOL_ROUNDROBIN
)
3220 return (1); /* unsupported */
3222 raddr
= &rpool
->cur
->addr
.v
.a
.addr
;
3223 rmask
= &rpool
->cur
->addr
.v
.a
.mask
;
3226 switch (rpool
->opts
& PF_POOL_TYPEMASK
) {
3228 PF_ACPY(naddr
, raddr
, af
);
3230 case PF_POOL_BITMASK
:
3231 PF_POOLMASK(naddr
, raddr
, rmask
, saddr
, af
);
3233 case PF_POOL_RANDOM
:
3234 if (init_addr
!= NULL
&& PF_AZERO(init_addr
, af
)) {
3238 rpool
->counter
.addr32
[0] = htonl(random());
3243 if (rmask
->addr32
[3] != 0xffffffff)
3244 rpool
->counter
.addr32
[3] =
3248 if (rmask
->addr32
[2] != 0xffffffff)
3249 rpool
->counter
.addr32
[2] =
3253 if (rmask
->addr32
[1] != 0xffffffff)
3254 rpool
->counter
.addr32
[1] =
3258 if (rmask
->addr32
[0] != 0xffffffff)
3259 rpool
->counter
.addr32
[0] =
3264 PF_POOLMASK(naddr
, raddr
, rmask
, &rpool
->counter
, af
);
3265 PF_ACPY(init_addr
, naddr
, af
);
3268 PF_AINC(&rpool
->counter
, af
);
3269 PF_POOLMASK(naddr
, raddr
, rmask
, &rpool
->counter
, af
);
3272 case PF_POOL_SRCHASH
:
3273 pf_hash(saddr
, (struct pf_addr
*)&hash
, &rpool
->key
, af
);
3274 PF_POOLMASK(naddr
, raddr
, rmask
, (struct pf_addr
*)&hash
, af
);
3276 case PF_POOL_ROUNDROBIN
:
3277 if (rpool
->cur
->addr
.type
== PF_ADDR_TABLE
) {
3278 if (!pfr_pool_get(rpool
->cur
->addr
.p
.tbl
,
3279 &rpool
->tblidx
, &rpool
->counter
,
3280 &raddr
, &rmask
, af
))
3282 } else if (rpool
->cur
->addr
.type
== PF_ADDR_DYNIFTL
) {
3283 if (!pfr_pool_get(rpool
->cur
->addr
.p
.dyn
->pfid_kt
,
3284 &rpool
->tblidx
, &rpool
->counter
,
3285 &raddr
, &rmask
, af
))
3287 } else if (pf_match_addr(0, raddr
, rmask
, &rpool
->counter
, af
))
3291 if ((rpool
->cur
= TAILQ_NEXT(rpool
->cur
, entries
)) == NULL
)
3292 rpool
->cur
= TAILQ_FIRST(&rpool
->list
);
3293 if (rpool
->cur
->addr
.type
== PF_ADDR_TABLE
) {
3295 if (pfr_pool_get(rpool
->cur
->addr
.p
.tbl
,
3296 &rpool
->tblidx
, &rpool
->counter
,
3297 &raddr
, &rmask
, af
)) {
3298 /* table contains no address of type 'af' */
3299 if (rpool
->cur
!= acur
)
3303 } else if (rpool
->cur
->addr
.type
== PF_ADDR_DYNIFTL
) {
3305 if (pfr_pool_get(rpool
->cur
->addr
.p
.dyn
->pfid_kt
,
3306 &rpool
->tblidx
, &rpool
->counter
,
3307 &raddr
, &rmask
, af
)) {
3308 /* table contains no address of type 'af' */
3309 if (rpool
->cur
!= acur
)
3314 raddr
= &rpool
->cur
->addr
.v
.a
.addr
;
3315 rmask
= &rpool
->cur
->addr
.v
.a
.mask
;
3316 PF_ACPY(&rpool
->counter
, raddr
, af
);
3320 PF_ACPY(naddr
, &rpool
->counter
, af
);
3321 if (init_addr
!= NULL
&& PF_AZERO(init_addr
, af
))
3322 PF_ACPY(init_addr
, naddr
, af
);
3323 PF_AINC(&rpool
->counter
, af
);
3327 PF_ACPY(&(*sn
)->raddr
, naddr
, af
);
3329 if (pf_status
.debug
>= PF_DEBUG_MISC
&&
3330 (rpool
->opts
& PF_POOL_TYPEMASK
) != PF_POOL_NONE
) {
3331 printf("pf_map_addr: selected address ");
3332 pf_print_host(naddr
, 0, af
);
3339 #ifndef NO_APPLE_EXTENSIONS
3341 pf_get_sport(struct pf_pdesc
*pd
, struct pfi_kif
*kif
, struct pf_rule
*r
,
3342 struct pf_addr
*saddr
, union pf_state_xport
*sxport
, struct pf_addr
*daddr
,
3343 union pf_state_xport
*dxport
, struct pf_addr
*naddr
,
3344 union pf_state_xport
*nxport
, struct pf_src_node
**sn
)
3347 pf_get_sport(sa_family_t af
, u_int8_t proto
, struct pf_rule
*r
,
3348 struct pf_addr
*saddr
, struct pf_addr
*daddr
, u_int16_t dport
,
3349 struct pf_addr
*naddr
, u_int16_t
*nport
, u_int16_t low
, u_int16_t high
,
3350 struct pf_src_node
**sn
)
3354 struct pf_state_key_cmp key
;
3355 struct pf_addr init_addr
;
3356 #ifndef NO_APPLE_EXTENSIONS
3358 sa_family_t af
= pd
->af
;
3359 u_int8_t proto
= pd
->proto
;
3360 unsigned int low
= r
->rpool
.proxy_port
[0];
3361 unsigned int high
= r
->rpool
.proxy_port
[1];
3366 bzero(&init_addr
, sizeof (init_addr
));
3367 if (pf_map_addr(af
, r
, saddr
, naddr
, &init_addr
, sn
))
3370 if (proto
== IPPROTO_ICMP
) {
3375 #ifndef NO_APPLE_EXTENSIONS
3377 return (0); /* No output necessary. */
3379 /*--- Special mapping rules for UDP ---*/
3380 if (proto
== IPPROTO_UDP
) {
3382 /*--- Never float IKE source port ---*/
3383 if (ntohs(sxport
->port
) == PF_IKE_PORT
) {
3384 nxport
->port
= sxport
->port
;
3388 /*--- Apply exterior mapping options ---*/
3389 if (r
->extmap
> PF_EXTMAP_APD
) {
3392 TAILQ_FOREACH(s
, &state_list
, entry_list
) {
3393 struct pf_state_key
*sk
= s
->state_key
;
3396 if (s
->nat_rule
.ptr
!= r
)
3398 if (sk
->proto
!= IPPROTO_UDP
|| sk
->af
!= af
)
3400 if (sk
->lan
.xport
.port
!= sxport
->port
)
3402 if (PF_ANEQ(&sk
->lan
.addr
, saddr
, af
))
3404 if (r
->extmap
< PF_EXTMAP_EI
&&
3405 PF_ANEQ(&sk
->ext
.addr
, daddr
, af
))
3408 nxport
->port
= sk
->gwy
.xport
.port
;
3412 } else if (proto
== IPPROTO_TCP
) {
3415 * APPLE MODIFICATION: <rdar://problem/6546358>
3416 * Fix allows....NAT to use a single binding for TCP session
3417 * with same source IP and source port
3419 TAILQ_FOREACH(s
, &state_list
, entry_list
) {
3420 struct pf_state_key
* sk
= s
->state_key
;
3423 if (s
->nat_rule
.ptr
!= r
)
3425 if (sk
->proto
!= IPPROTO_TCP
|| sk
->af
!= af
)
3427 if (sk
->lan
.xport
.port
!= sxport
->port
)
3429 if (!(PF_AEQ(&sk
->lan
.addr
, saddr
, af
)))
3431 nxport
->port
= sk
->gwy
.xport
.port
;
3439 PF_ACPY(&key
.ext
.addr
, daddr
, key
.af
);
3440 PF_ACPY(&key
.gwy
.addr
, naddr
, key
.af
);
3441 #ifndef NO_APPLE_EXTENSIONS
3444 key
.proto_variant
= r
->extfilter
;
3447 key
.proto_variant
= 0;
3451 key
.ext
.xport
= *dxport
;
3453 memset(&key
.ext
.xport
, 0, sizeof (key
.ext
.xport
));
3455 key
.ext
.port
= dport
;
3458 * port search; start random, step;
3459 * similar 2 portloop in in_pcbbind
3461 if (!(proto
== IPPROTO_TCP
|| proto
== IPPROTO_UDP
||
3462 proto
== IPPROTO_ICMP
)) {
3463 #ifndef NO_APPLE_EXTENSIONS
3465 key
.gwy
.xport
= *dxport
;
3467 memset(&key
.gwy
.xport
, 0,
3468 sizeof (key
.ext
.xport
));
3470 key
.gwy
.port
= dport
;
3472 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
)
3474 } else if (low
== 0 && high
== 0) {
3475 #ifndef NO_APPLE_EXTENSIONS
3476 key
.gwy
.xport
= *nxport
;
3478 key
.gwy
.port
= *nport
;
3480 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
)
3482 } else if (low
== high
) {
3483 #ifndef NO_APPLE_EXTENSIONS
3484 key
.gwy
.xport
.port
= htons(low
);
3485 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
) {
3486 nxport
->port
= htons(low
);
3490 key
.gwy
.port
= htons(low
);
3491 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
) {
3492 *nport
= htons(low
);
3497 #ifndef NO_APPLE_EXTENSIONS
3508 cut
= htonl(random()) % (1 + high
- low
) + low
;
3509 /* low <= cut <= high */
3510 for (tmp
= cut
; tmp
<= high
; ++(tmp
)) {
3511 #ifndef NO_APPLE_EXTENSIONS
3512 key
.gwy
.xport
.port
= htons(tmp
);
3513 if (pf_find_state_all(&key
, PF_IN
, NULL
) ==
3515 nxport
->port
= htons(tmp
);
3519 key
.gwy
.port
= htons(tmp
);
3520 if (pf_find_state_all(&key
, PF_IN
, NULL
) ==
3522 *nport
= htons(tmp
);
3527 for (tmp
= cut
- 1; tmp
>= low
; --(tmp
)) {
3528 #ifndef NO_APPLE_EXTENSIONS
3529 key
.gwy
.xport
.port
= htons(tmp
);
3530 if (pf_find_state_all(&key
, PF_IN
, NULL
) ==
3532 nxport
->port
= htons(tmp
);
3536 key
.gwy
.port
= htons(tmp
);
3537 if (pf_find_state_all(&key
, PF_IN
, NULL
) ==
3539 *nport
= htons(tmp
);
3546 switch (r
->rpool
.opts
& PF_POOL_TYPEMASK
) {
3547 case PF_POOL_RANDOM
:
3548 case PF_POOL_ROUNDROBIN
:
3549 if (pf_map_addr(af
, r
, saddr
, naddr
, &init_addr
, sn
))
3553 case PF_POOL_SRCHASH
:
3554 case PF_POOL_BITMASK
:
3558 } while (!PF_AEQ(&init_addr
, naddr
, af
));
3560 return (1); /* none available */
3563 #ifndef NO_APPLE_EXTENSIONS
3564 static struct pf_rule
*
3565 pf_match_translation(struct pf_pdesc
*pd
, struct mbuf
*m
, int off
,
3566 int direction
, struct pfi_kif
*kif
, struct pf_addr
*saddr
,
3567 union pf_state_xport
*sxport
, struct pf_addr
*daddr
,
3568 union pf_state_xport
*dxport
, int rs_num
)
3571 pf_match_translation(struct pf_pdesc
*pd
, struct mbuf
*m
, int off
,
3572 int direction
, struct pfi_kif
*kif
, struct pf_addr
*saddr
, u_int16_t sport
,
3573 struct pf_addr
*daddr
, u_int16_t dport
, int rs_num
)
3576 struct pf_rule
*r
, *rm
= NULL
;
3577 struct pf_ruleset
*ruleset
= NULL
;
3579 unsigned int rtableid
= IFSCOPE_NONE
;
3582 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[rs_num
].active
.ptr
);
3583 while (r
&& rm
== NULL
) {
3584 struct pf_rule_addr
*src
= NULL
, *dst
= NULL
;
3585 struct pf_addr_wrap
*xdst
= NULL
;
3586 #ifndef NO_APPLE_EXTENSIONS
3587 struct pf_addr_wrap
*xsrc
= NULL
;
3590 if (r
->action
== PF_BINAT
&& direction
== PF_IN
) {
3592 if (r
->rpool
.cur
!= NULL
)
3593 xdst
= &r
->rpool
.cur
->addr
;
3594 #ifndef NO_APPLE_EXTENSIONS
3595 } else if (r
->action
== PF_RDR
&& direction
== PF_OUT
) {
3598 if (r
->rpool
.cur
!= NULL
)
3599 xsrc
= &r
->rpool
.cur
->addr
;
3607 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
)
3608 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
3609 else if (r
->direction
&& r
->direction
!= direction
)
3610 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
3611 else if (r
->af
&& r
->af
!= pd
->af
)
3612 r
= r
->skip
[PF_SKIP_AF
].ptr
;
3613 else if (r
->proto
&& r
->proto
!= pd
->proto
)
3614 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
3615 #ifndef NO_APPLE_EXTENSIONS
3616 else if (xsrc
&& PF_MISMATCHAW(xsrc
, saddr
, pd
->af
, 0, NULL
))
3617 r
= TAILQ_NEXT(r
, entries
);
3618 else if (!xsrc
&& PF_MISMATCHAW(&src
->addr
, saddr
, pd
->af
,
3620 r
= r
->skip
[src
== &r
->src
? PF_SKIP_SRC_ADDR
:
3621 PF_SKIP_DST_ADDR
].ptr
;
3622 else if (!pf_match_xport(r
->proto
,
3623 r
->proto_variant
, &src
->xport
, sxport
))
3625 else if (PF_MISMATCHAW(&src
->addr
, saddr
, pd
->af
,
3627 r
= r
->skip
[src
== &r
->src
? PF_SKIP_SRC_ADDR
:
3628 PF_SKIP_DST_ADDR
].ptr
;
3629 else if (src
->port_op
&& !pf_match_port(src
->port_op
,
3630 src
->port
[0], src
->port
[1], sport
))
3632 r
= r
->skip
[src
== &r
->src
? PF_SKIP_SRC_PORT
:
3633 PF_SKIP_DST_PORT
].ptr
;
3634 else if (dst
!= NULL
&&
3635 PF_MISMATCHAW(&dst
->addr
, daddr
, pd
->af
, dst
->neg
, NULL
))
3636 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
3637 else if (xdst
!= NULL
&& PF_MISMATCHAW(xdst
, daddr
, pd
->af
,
3639 r
= TAILQ_NEXT(r
, entries
);
3640 #ifndef NO_APPLE_EXTENSIONS
3641 else if (dst
&& !pf_match_xport(r
->proto
, r
->proto_variant
,
3642 &dst
->xport
, dxport
))
3644 else if (dst
!= NULL
&& dst
->port_op
&&
3645 !pf_match_port(dst
->port_op
, dst
->port
[0],
3646 dst
->port
[1], dport
))
3648 r
= r
->skip
[PF_SKIP_DST_PORT
].ptr
;
3649 else if (r
->match_tag
&& !pf_match_tag(m
, r
, pd
->pf_mtag
, &tag
))
3650 r
= TAILQ_NEXT(r
, entries
);
3651 else if (r
->os_fingerprint
!= PF_OSFP_ANY
&& (pd
->proto
!=
3652 IPPROTO_TCP
|| !pf_osfp_match(pf_osfp_fingerprint(pd
, m
,
3653 off
, pd
->hdr
.tcp
), r
->os_fingerprint
)))
3654 r
= TAILQ_NEXT(r
, entries
);
3658 if (PF_RTABLEID_IS_VALID(r
->rtableid
))
3659 rtableid
= r
->rtableid
;
3660 if (r
->anchor
== NULL
) {
3663 pf_step_into_anchor(&asd
, &ruleset
, rs_num
,
3667 pf_step_out_of_anchor(&asd
, &ruleset
, rs_num
, &r
,
3670 if (pf_tag_packet(m
, pd
->pf_mtag
, tag
, rtableid
))
3672 if (rm
!= NULL
&& (rm
->action
== PF_NONAT
||
3673 rm
->action
== PF_NORDR
|| rm
->action
== PF_NOBINAT
))
3678 #ifndef NO_APPLE_EXTENSIONS
3679 static struct pf_rule
*
3680 pf_get_translation_aux(struct pf_pdesc
*pd
, struct mbuf
*m
, int off
,
3681 int direction
, struct pfi_kif
*kif
, struct pf_src_node
**sn
,
3682 struct pf_addr
*saddr
, union pf_state_xport
*sxport
, struct pf_addr
*daddr
,
3683 union pf_state_xport
*dxport
, struct pf_addr
*naddr
,
3684 union pf_state_xport
*nxport
)
3687 pf_get_translation(struct pf_pdesc
*pd
, struct mbuf
*m
, int off
, int direction
,
3688 struct pfi_kif
*kif
, struct pf_src_node
**sn
,
3689 struct pf_addr
*saddr
, u_int16_t sport
,
3690 struct pf_addr
*daddr
, u_int16_t dport
,
3691 struct pf_addr
*naddr
, u_int16_t
*nport
)
3694 struct pf_rule
*r
= NULL
;
3696 #ifndef NO_APPLE_EXTENSIONS
3697 if (direction
== PF_OUT
) {
3698 r
= pf_match_translation(pd
, m
, off
, direction
, kif
, saddr
,
3699 sxport
, daddr
, dxport
, PF_RULESET_BINAT
);
3701 r
= pf_match_translation(pd
, m
, off
, direction
, kif
,
3702 saddr
, sxport
, daddr
, dxport
, PF_RULESET_RDR
);
3704 r
= pf_match_translation(pd
, m
, off
, direction
, kif
,
3705 saddr
, sxport
, daddr
, dxport
, PF_RULESET_NAT
);
3707 r
= pf_match_translation(pd
, m
, off
, direction
, kif
, saddr
,
3708 sxport
, daddr
, dxport
, PF_RULESET_RDR
);
3710 r
= pf_match_translation(pd
, m
, off
, direction
, kif
,
3711 saddr
, sxport
, daddr
, dxport
, PF_RULESET_BINAT
);
3714 if (direction
== PF_OUT
) {
3715 r
= pf_match_translation(pd
, m
, off
, direction
, kif
, saddr
,
3716 sport
, daddr
, dport
, PF_RULESET_BINAT
);
3718 r
= pf_match_translation(pd
, m
, off
, direction
, kif
,
3719 saddr
, sport
, daddr
, dport
, PF_RULESET_NAT
);
3721 r
= pf_match_translation(pd
, m
, off
, direction
, kif
, saddr
,
3722 sport
, daddr
, dport
, PF_RULESET_RDR
);
3724 r
= pf_match_translation(pd
, m
, off
, direction
, kif
,
3725 saddr
, sport
, daddr
, dport
, PF_RULESET_BINAT
);
3730 switch (r
->action
) {
3736 #ifndef NO_APPLE_EXTENSIONS
3737 if (pf_get_sport(pd
, kif
, r
, saddr
, sxport
, daddr
,
3738 dxport
, naddr
, nxport
, sn
)) {
3740 if (pf_get_sport(pd
->af
, pd
->proto
, r
, saddr
,
3741 daddr
, dport
, naddr
, nport
, r
->rpool
.proxy_port
[0],
3742 r
->rpool
.proxy_port
[1], sn
)) {
3744 DPFPRINTF(PF_DEBUG_MISC
,
3745 ("pf: NAT proxy port allocation "
3747 r
->rpool
.proxy_port
[0],
3748 r
->rpool
.proxy_port
[1]));
3753 switch (direction
) {
3755 if (r
->rpool
.cur
->addr
.type
==
3760 if (r
->rpool
.cur
->addr
.p
.dyn
->
3764 &r
->rpool
.cur
->addr
.p
.dyn
->
3766 &r
->rpool
.cur
->addr
.p
.dyn
->
3773 if (r
->rpool
.cur
->addr
.p
.dyn
->
3777 &r
->rpool
.cur
->addr
.p
.dyn
->
3779 &r
->rpool
.cur
->addr
.p
.dyn
->
3787 &r
->rpool
.cur
->addr
.v
.a
.addr
,
3788 &r
->rpool
.cur
->addr
.v
.a
.mask
,
3793 if (r
->src
.addr
.type
== PF_ADDR_DYNIFTL
) {
3797 if (r
->src
.addr
.p
.dyn
->
3801 &r
->src
.addr
.p
.dyn
->
3803 &r
->src
.addr
.p
.dyn
->
3810 if (r
->src
.addr
.p
.dyn
->
3814 &r
->src
.addr
.p
.dyn
->
3816 &r
->src
.addr
.p
.dyn
->
3824 &r
->src
.addr
.v
.a
.addr
,
3825 &r
->src
.addr
.v
.a
.mask
, daddr
,
3831 #ifndef NO_APPLE_EXTENSIONS
3832 switch (direction
) {
3834 if (r
->dst
.addr
.type
== PF_ADDR_DYNIFTL
) {
3838 if (r
->dst
.addr
.p
.dyn
->
3842 &r
->dst
.addr
.p
.dyn
->
3844 &r
->dst
.addr
.p
.dyn
->
3851 if (r
->dst
.addr
.p
.dyn
->
3855 &r
->dst
.addr
.p
.dyn
->
3857 &r
->dst
.addr
.p
.dyn
->
3865 &r
->dst
.addr
.v
.a
.addr
,
3866 &r
->dst
.addr
.v
.a
.mask
,
3869 if (nxport
&& dxport
)
3873 if (pf_map_addr(pd
->af
, r
, saddr
,
3876 if ((r
->rpool
.opts
& PF_POOL_TYPEMASK
) ==
3878 PF_POOLMASK(naddr
, naddr
,
3879 &r
->rpool
.cur
->addr
.v
.a
.mask
, daddr
,
3882 if (nxport
&& dxport
) {
3883 if (r
->rpool
.proxy_port
[1]) {
3884 u_int32_t tmp_nport
;
3887 ((ntohs(dxport
->port
) -
3888 ntohs(r
->dst
.xport
.range
.
3890 (r
->rpool
.proxy_port
[1] -
3891 r
->rpool
.proxy_port
[0] +
3892 1)) + r
->rpool
.proxy_port
[0];
3894 /* wrap around if necessary */
3895 if (tmp_nport
> 65535)
3898 htons((u_int16_t
)tmp_nport
);
3899 } else if (r
->rpool
.proxy_port
[0]) {
3900 nxport
->port
= htons(r
->rpool
.
3907 if (pf_map_addr(pd
->af
, r
, saddr
, naddr
, NULL
, sn
))
3909 if ((r
->rpool
.opts
& PF_POOL_TYPEMASK
) ==
3911 PF_POOLMASK(naddr
, naddr
,
3912 &r
->rpool
.cur
->addr
.v
.a
.mask
, daddr
,
3915 if (r
->rpool
.proxy_port
[1]) {
3916 u_int32_t tmp_nport
;
3918 tmp_nport
= ((ntohs(dport
) -
3919 ntohs(r
->dst
.port
[0])) %
3920 (r
->rpool
.proxy_port
[1] -
3921 r
->rpool
.proxy_port
[0] + 1)) +
3922 r
->rpool
.proxy_port
[0];
3924 /* wrap around if necessary */
3925 if (tmp_nport
> 65535)
3927 *nport
= htons((u_int16_t
)tmp_nport
);
3928 } else if (r
->rpool
.proxy_port
[0])
3929 *nport
= htons(r
->rpool
.proxy_port
[0]);
3942 pf_socket_lookup(int direction
, struct pf_pdesc
*pd
)
3944 struct pf_addr
*saddr
, *daddr
;
3945 u_int16_t sport
, dport
;
3946 struct inpcbinfo
*pi
;
3947 struct inpcb
*inp
= NULL
;
3951 pd
->lookup
.uid
= UID_MAX
;
3952 pd
->lookup
.gid
= GID_MAX
;
3953 pd
->lookup
.pid
= NO_PID
;
3955 switch (pd
->proto
) {
3957 if (pd
->hdr
.tcp
== NULL
)
3959 sport
= pd
->hdr
.tcp
->th_sport
;
3960 dport
= pd
->hdr
.tcp
->th_dport
;
3964 if (pd
->hdr
.udp
== NULL
)
3966 sport
= pd
->hdr
.udp
->uh_sport
;
3967 dport
= pd
->hdr
.udp
->uh_dport
;
3973 if (direction
== PF_IN
) {
3988 inp
= in_pcblookup_hash(pi
, saddr
->v4
, sport
, daddr
->v4
, dport
,
3992 struct in6_addr s6
, d6
;
3994 memset(&s6
, 0, sizeof (s6
));
3995 s6
.s6_addr16
[5] = htons(0xffff);
3996 memcpy(&s6
.s6_addr32
[3], &saddr
->v4
,
3997 sizeof (saddr
->v4
));
3999 memset(&d6
, 0, sizeof (d6
));
4000 d6
.s6_addr16
[5] = htons(0xffff);
4001 memcpy(&d6
.s6_addr32
[3], &daddr
->v4
,
4002 sizeof (daddr
->v4
));
4004 inp
= in6_pcblookup_hash(pi
, &s6
, sport
,
4005 &d6
, dport
, 0, NULL
);
4007 inp
= in_pcblookup_hash(pi
, saddr
->v4
, sport
,
4008 daddr
->v4
, dport
, INPLOOKUP_WILDCARD
, NULL
);
4010 inp
= in6_pcblookup_hash(pi
, &s6
, sport
,
4011 &d6
, dport
, INPLOOKUP_WILDCARD
,
4020 inp
= in_pcblookup_hash(pi
, saddr
->v4
, sport
,
4021 daddr
->v4
, dport
, INPLOOKUP_WILDCARD
, NULL
);
4030 inp
= in6_pcblookup_hash(pi
, &saddr
->v6
, sport
, &daddr
->v6
,
4033 inp
= in6_pcblookup_hash(pi
, &saddr
->v6
, sport
,
4034 &daddr
->v6
, dport
, INPLOOKUP_WILDCARD
, NULL
);
4046 in_pcb_checkstate(inp
, WNT_RELEASE
, 0);
4052 pf_get_wscale(struct mbuf
*m
, int off
, u_int16_t th_off
, sa_family_t af
)
4056 u_int8_t
*opt
, optlen
;
4057 u_int8_t wscale
= 0;
4059 hlen
= th_off
<< 2; /* hlen <= sizeof (hdr) */
4060 if (hlen
<= (int)sizeof (struct tcphdr
))
4062 if (!pf_pull_hdr(m
, off
, hdr
, hlen
, NULL
, NULL
, af
))
4064 opt
= hdr
+ sizeof (struct tcphdr
);
4065 hlen
-= sizeof (struct tcphdr
);
4075 if (wscale
> TCP_MAX_WINSHIFT
)
4076 wscale
= TCP_MAX_WINSHIFT
;
4077 wscale
|= PF_WSCALE_FLAG
;
4092 pf_get_mss(struct mbuf
*m
, int off
, u_int16_t th_off
, sa_family_t af
)
4096 u_int8_t
*opt
, optlen
;
4097 u_int16_t mss
= tcp_mssdflt
;
4099 hlen
= th_off
<< 2; /* hlen <= sizeof (hdr) */
4100 if (hlen
<= (int)sizeof (struct tcphdr
))
4102 if (!pf_pull_hdr(m
, off
, hdr
, hlen
, NULL
, NULL
, af
))
4104 opt
= hdr
+ sizeof (struct tcphdr
);
4105 hlen
-= sizeof (struct tcphdr
);
4106 while (hlen
>= TCPOLEN_MAXSEG
) {
4114 bcopy((caddr_t
)(opt
+ 2), (caddr_t
)&mss
, 2);
4115 #if BYTE_ORDER != BIG_ENDIAN
4132 pf_calc_mss(struct pf_addr
*addr
, sa_family_t af
, u_int16_t offer
)
4135 struct sockaddr_in
*dst
;
4139 struct sockaddr_in6
*dst6
;
4140 struct route_in6 ro6
;
4142 struct rtentry
*rt
= NULL
;
4144 u_int16_t mss
= tcp_mssdflt
;
4149 hlen
= sizeof (struct ip
);
4150 bzero(&ro
, sizeof (ro
));
4151 dst
= (struct sockaddr_in
*)&ro
.ro_dst
;
4152 dst
->sin_family
= AF_INET
;
4153 dst
->sin_len
= sizeof (*dst
);
4154 dst
->sin_addr
= addr
->v4
;
4161 hlen
= sizeof (struct ip6_hdr
);
4162 bzero(&ro6
, sizeof (ro6
));
4163 dst6
= (struct sockaddr_in6
*)&ro6
.ro_dst
;
4164 dst6
->sin6_family
= AF_INET6
;
4165 dst6
->sin6_len
= sizeof (*dst6
);
4166 dst6
->sin6_addr
= addr
->v6
;
4167 rtalloc((struct route
*)&ro
);
4172 panic("pf_calc_mss: not AF_INET or AF_INET6!");
4176 if (rt
&& rt
->rt_ifp
) {
4177 mss
= rt
->rt_ifp
->if_mtu
- hlen
- sizeof (struct tcphdr
);
4178 mss
= max(tcp_mssdflt
, mss
);
4181 mss
= min(mss
, offer
);
4182 mss
= max(mss
, 64); /* sanity - at least max opt space */
4187 pf_set_rt_ifp(struct pf_state
*s
, struct pf_addr
*saddr
)
4189 struct pf_rule
*r
= s
->rule
.ptr
;
4192 if (!r
->rt
|| r
->rt
== PF_FASTROUTE
)
4194 switch (s
->state_key
->af
) {
4197 pf_map_addr(AF_INET
, r
, saddr
, &s
->rt_addr
, NULL
,
4199 s
->rt_kif
= r
->rpool
.cur
->kif
;
4204 pf_map_addr(AF_INET6
, r
, saddr
, &s
->rt_addr
, NULL
,
4206 s
->rt_kif
= r
->rpool
.cur
->kif
;
4213 pf_attach_state(struct pf_state_key
*sk
, struct pf_state
*s
, int tail
)
4218 /* list is sorted, if-bound states before floating */
4220 TAILQ_INSERT_TAIL(&sk
->states
, s
, next
);
4222 TAILQ_INSERT_HEAD(&sk
->states
, s
, next
);
4226 pf_detach_state(struct pf_state
*s
, int flags
)
4228 struct pf_state_key
*sk
= s
->state_key
;
4233 s
->state_key
= NULL
;
4234 TAILQ_REMOVE(&sk
->states
, s
, next
);
4235 if (--sk
->refcnt
== 0) {
4236 if (!(flags
& PF_DT_SKIP_EXTGWY
))
4237 RB_REMOVE(pf_state_tree_ext_gwy
,
4238 &pf_statetbl_ext_gwy
, sk
);
4239 if (!(flags
& PF_DT_SKIP_LANEXT
))
4240 RB_REMOVE(pf_state_tree_lan_ext
,
4241 &pf_statetbl_lan_ext
, sk
);
4242 #ifndef NO_APPLE_EXTENSIONS
4244 pool_put(&pf_app_state_pl
, sk
->app_state
);
4246 pool_put(&pf_state_key_pl
, sk
);
4250 struct pf_state_key
*
4251 pf_alloc_state_key(struct pf_state
*s
)
4253 struct pf_state_key
*sk
;
4255 if ((sk
= pool_get(&pf_state_key_pl
, PR_WAITOK
)) == NULL
)
4257 bzero(sk
, sizeof (*sk
));
4258 TAILQ_INIT(&sk
->states
);
4259 pf_attach_state(sk
, s
, 0);
4265 pf_tcp_iss(struct pf_pdesc
*pd
)
4268 u_int32_t digest
[4];
4270 if (pf_tcp_secret_init
== 0) {
4271 read_random(pf_tcp_secret
, sizeof (pf_tcp_secret
));
4272 MD5Init(&pf_tcp_secret_ctx
);
4273 MD5Update(&pf_tcp_secret_ctx
, pf_tcp_secret
,
4274 sizeof (pf_tcp_secret
));
4275 pf_tcp_secret_init
= 1;
4277 ctx
= pf_tcp_secret_ctx
;
4279 MD5Update(&ctx
, (char *)&pd
->hdr
.tcp
->th_sport
, sizeof (u_short
));
4280 MD5Update(&ctx
, (char *)&pd
->hdr
.tcp
->th_dport
, sizeof (u_short
));
4281 if (pd
->af
== AF_INET6
) {
4282 MD5Update(&ctx
, (char *)&pd
->src
->v6
, sizeof (struct in6_addr
));
4283 MD5Update(&ctx
, (char *)&pd
->dst
->v6
, sizeof (struct in6_addr
));
4285 MD5Update(&ctx
, (char *)&pd
->src
->v4
, sizeof (struct in_addr
));
4286 MD5Update(&ctx
, (char *)&pd
->dst
->v4
, sizeof (struct in_addr
));
4288 MD5Final((u_char
*)digest
, &ctx
);
4289 pf_tcp_iss_off
+= 4096;
4290 return (digest
[0] + random() + pf_tcp_iss_off
);
4294 pf_test_rule(struct pf_rule
**rm
, struct pf_state
**sm
, int direction
,
4295 struct pfi_kif
*kif
, struct mbuf
*m
, int off
, void *h
,
4296 struct pf_pdesc
*pd
, struct pf_rule
**am
, struct pf_ruleset
**rsm
,
4297 struct ifqueue
*ifq
)
4300 struct pf_rule
*nr
= NULL
;
4301 struct pf_addr
*saddr
= pd
->src
, *daddr
= pd
->dst
;
4302 #ifdef NO_APPLE_EXTENSIONS
4303 u_int16_t bport
, nport
= 0;
4305 sa_family_t af
= pd
->af
;
4306 struct pf_rule
*r
, *a
= NULL
;
4307 struct pf_ruleset
*ruleset
= NULL
;
4308 struct pf_src_node
*nsn
= NULL
;
4309 struct tcphdr
*th
= pd
->hdr
.tcp
;
4311 int rewrite
= 0, hdrlen
= 0;
4313 unsigned int rtableid
= IFSCOPE_NONE
;
4317 u_int16_t mss
= tcp_mssdflt
;
4318 #ifdef NO_APPLE_EXTENSIONS
4319 u_int16_t sport
, dport
;
4321 u_int8_t icmptype
= 0, icmpcode
= 0;
4323 #ifndef NO_APPLE_EXTENSIONS
4324 struct pf_grev1_hdr
*grev1
= pd
->hdr
.grev1
;
4325 union pf_state_xport bxport
, nxport
, sxport
, dxport
;
4328 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
4330 if (direction
== PF_IN
&& pf_check_congestion(ifq
)) {
4331 REASON_SET(&reason
, PFRES_CONGEST
);
4335 #ifndef NO_APPLE_EXTENSIONS
4341 sport
= dport
= hdrlen
= 0;
4344 switch (pd
->proto
) {
4346 #ifndef NO_APPLE_EXTENSIONS
4347 sxport
.port
= th
->th_sport
;
4348 dxport
.port
= th
->th_dport
;
4350 sport
= th
->th_sport
;
4351 dport
= th
->th_dport
;
4353 hdrlen
= sizeof (*th
);
4356 #ifndef NO_APPLE_EXTENSIONS
4357 sxport
.port
= pd
->hdr
.udp
->uh_sport
;
4358 dxport
.port
= pd
->hdr
.udp
->uh_dport
;
4360 sport
= pd
->hdr
.udp
->uh_sport
;
4361 dport
= pd
->hdr
.udp
->uh_dport
;
4363 hdrlen
= sizeof (*pd
->hdr
.udp
);
4367 if (pd
->af
!= AF_INET
)
4369 #ifndef NO_APPLE_EXTENSIONS
4370 sxport
.port
= dxport
.port
= pd
->hdr
.icmp
->icmp_id
;
4371 hdrlen
= ICMP_MINLEN
;
4373 sport
= dport
= pd
->hdr
.icmp
->icmp_id
;
4375 icmptype
= pd
->hdr
.icmp
->icmp_type
;
4376 icmpcode
= pd
->hdr
.icmp
->icmp_code
;
4378 if (icmptype
== ICMP_UNREACH
||
4379 icmptype
== ICMP_SOURCEQUENCH
||
4380 icmptype
== ICMP_REDIRECT
||
4381 icmptype
== ICMP_TIMXCEED
||
4382 icmptype
== ICMP_PARAMPROB
)
4387 case IPPROTO_ICMPV6
:
4388 if (pd
->af
!= AF_INET6
)
4390 #ifndef NO_APPLE_EXTENSIONS
4391 sxport
.port
= dxport
.port
= pd
->hdr
.icmp6
->icmp6_id
;
4393 sport
= dport
= pd
->hdr
.icmp6
->icmp6_id
;
4395 hdrlen
= sizeof (*pd
->hdr
.icmp6
);
4396 icmptype
= pd
->hdr
.icmp6
->icmp6_type
;
4397 icmpcode
= pd
->hdr
.icmp6
->icmp6_code
;
4399 if (icmptype
== ICMP6_DST_UNREACH
||
4400 icmptype
== ICMP6_PACKET_TOO_BIG
||
4401 icmptype
== ICMP6_TIME_EXCEEDED
||
4402 icmptype
== ICMP6_PARAM_PROB
)
4406 #ifndef NO_APPLE_EXTENSIONS
4408 if (pd
->proto_variant
== PF_GRE_PPTP_VARIANT
) {
4409 sxport
.call_id
= dxport
.call_id
=
4410 pd
->hdr
.grev1
->call_id
;
4411 hdrlen
= sizeof (*pd
->hdr
.grev1
);
4416 dxport
.spi
= pd
->hdr
.esp
->spi
;
4417 hdrlen
= sizeof (*pd
->hdr
.esp
);
4422 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[PF_RULESET_FILTER
].active
.ptr
);
4424 if (direction
== PF_OUT
) {
4425 #ifndef NO_APPLE_EXTENSIONS
4426 bxport
= nxport
= sxport
;
4427 /* check outgoing packet for BINAT/NAT */
4428 if ((nr
= pf_get_translation_aux(pd
, m
, off
, PF_OUT
, kif
, &nsn
,
4429 saddr
, &sxport
, daddr
, &dxport
, &pd
->naddr
, &nxport
)) !=
4432 bport
= nport
= sport
;
4433 /* check outgoing packet for BINAT/NAT */
4434 if ((nr
= pf_get_translation(pd
, m
, off
, PF_OUT
, kif
, &nsn
,
4435 saddr
, sport
, daddr
, dport
, &pd
->naddr
, &nport
)) != NULL
) {
4437 PF_ACPY(&pd
->baddr
, saddr
, af
);
4438 switch (pd
->proto
) {
4440 #ifndef NO_APPLE_EXTENSIONS
4441 pf_change_ap(direction
, pd
->mp
, saddr
,
4442 &th
->th_sport
, pd
->ip_sum
, &th
->th_sum
,
4443 &pd
->naddr
, nxport
.port
, 0, af
);
4444 sxport
.port
= th
->th_sport
;
4446 pf_change_ap(saddr
, &th
->th_sport
, pd
->ip_sum
,
4447 &th
->th_sum
, &pd
->naddr
, nport
, 0, af
);
4448 sport
= th
->th_sport
;
4453 #ifndef NO_APPLE_EXTENSIONS
4454 pf_change_ap(direction
, pd
->mp
, saddr
,
4455 &pd
->hdr
.udp
->uh_sport
, pd
->ip_sum
,
4456 &pd
->hdr
.udp
->uh_sum
, &pd
->naddr
,
4457 nxport
.port
, 1, af
);
4458 sxport
.port
= pd
->hdr
.udp
->uh_sport
;
4460 pf_change_ap(saddr
, &pd
->hdr
.udp
->uh_sport
,
4461 pd
->ip_sum
, &pd
->hdr
.udp
->uh_sum
,
4462 &pd
->naddr
, nport
, 1, af
);
4463 sport
= pd
->hdr
.udp
->uh_sport
;
4469 pf_change_a(&saddr
->v4
.s_addr
, pd
->ip_sum
,
4470 pd
->naddr
.v4
.s_addr
, 0);
4471 #ifndef NO_APPLE_EXTENSIONS
4472 pd
->hdr
.icmp
->icmp_cksum
= pf_cksum_fixup(
4473 pd
->hdr
.icmp
->icmp_cksum
, sxport
.port
,
4475 pd
->hdr
.icmp
->icmp_id
= nxport
.port
;
4478 pd
->hdr
.icmp
->icmp_cksum
= pf_cksum_fixup(
4479 pd
->hdr
.icmp
->icmp_cksum
, sport
, nport
, 0);
4480 pd
->hdr
.icmp
->icmp_id
= nport
;
4481 m_copyback(m
, off
, ICMP_MINLEN
, pd
->hdr
.icmp
);
4486 case IPPROTO_ICMPV6
:
4487 pf_change_a6(saddr
, &pd
->hdr
.icmp6
->icmp6_cksum
,
4492 #ifndef NO_APPLE_EXTENSIONS
4497 pf_change_a(&saddr
->v4
.s_addr
,
4498 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4503 PF_ACPY(saddr
, &pd
->naddr
, AF_INET6
);
4514 pf_change_a(&saddr
->v4
.s_addr
,
4515 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4520 PF_ACPY(saddr
, &pd
->naddr
, AF_INET6
);
4530 pf_change_a(&saddr
->v4
.s_addr
,
4531 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4536 PF_ACPY(saddr
, &pd
->naddr
, af
);
4548 #ifndef NO_APPLE_EXTENSIONS
4549 bxport
.port
= nxport
.port
= dxport
.port
;
4550 /* check incoming packet for BINAT/RDR */
4551 if ((nr
= pf_get_translation_aux(pd
, m
, off
, PF_IN
, kif
, &nsn
,
4552 saddr
, &sxport
, daddr
, &dxport
, &pd
->naddr
, &nxport
)) !=
4555 bport
= nport
= dport
;
4556 /* check incoming packet for BINAT/RDR */
4557 if ((nr
= pf_get_translation(pd
, m
, off
, PF_IN
, kif
, &nsn
,
4558 saddr
, sport
, daddr
, dport
, &pd
->naddr
, &nport
)) != NULL
) {
4560 PF_ACPY(&pd
->baddr
, daddr
, af
);
4561 switch (pd
->proto
) {
4563 #ifndef NO_APPLE_EXTENSIONS
4564 pf_change_ap(direction
, pd
->mp
, daddr
,
4565 &th
->th_dport
, pd
->ip_sum
, &th
->th_sum
,
4566 &pd
->naddr
, nxport
.port
, 0, af
);
4567 dxport
.port
= th
->th_dport
;
4569 pf_change_ap(daddr
, &th
->th_dport
, pd
->ip_sum
,
4570 &th
->th_sum
, &pd
->naddr
, nport
, 0, af
);
4571 dport
= th
->th_dport
;
4576 #ifndef NO_APPLE_EXTENSIONS
4577 pf_change_ap(direction
, pd
->mp
, daddr
,
4578 &pd
->hdr
.udp
->uh_dport
, pd
->ip_sum
,
4579 &pd
->hdr
.udp
->uh_sum
, &pd
->naddr
,
4580 nxport
.port
, 1, af
);
4581 dxport
.port
= pd
->hdr
.udp
->uh_dport
;
4583 pf_change_ap(direction
, daddr
,
4584 &pd
->hdr
.udp
->uh_dport
,
4585 pd
->ip_sum
, &pd
->hdr
.udp
->uh_sum
,
4586 &pd
->naddr
, nport
, 1, af
);
4587 dport
= pd
->hdr
.udp
->uh_dport
;
4593 pf_change_a(&daddr
->v4
.s_addr
, pd
->ip_sum
,
4594 pd
->naddr
.v4
.s_addr
, 0);
4598 case IPPROTO_ICMPV6
:
4599 pf_change_a6(daddr
, &pd
->hdr
.icmp6
->icmp6_cksum
,
4604 #ifndef NO_APPLE_EXTENSIONS
4606 if (pd
->proto_variant
== PF_GRE_PPTP_VARIANT
)
4607 grev1
->call_id
= nxport
.call_id
;
4612 pf_change_a(&daddr
->v4
.s_addr
,
4613 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4618 PF_ACPY(daddr
, &pd
->naddr
, AF_INET6
);
4628 pf_change_a(&daddr
->v4
.s_addr
,
4629 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4634 PF_ACPY(daddr
, &pd
->naddr
, AF_INET6
);
4644 pf_change_a(&daddr
->v4
.s_addr
,
4645 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4650 PF_ACPY(daddr
, &pd
->naddr
, af
);
4663 #ifndef NO_APPLE_EXTENSIONS
4664 if (nr
&& nr
->tag
> 0)
4670 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
)
4671 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
4672 else if (r
->direction
&& r
->direction
!= direction
)
4673 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
4674 else if (r
->af
&& r
->af
!= af
)
4675 r
= r
->skip
[PF_SKIP_AF
].ptr
;
4676 else if (r
->proto
&& r
->proto
!= pd
->proto
)
4677 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
4678 else if (PF_MISMATCHAW(&r
->src
.addr
, saddr
, af
,
4680 r
= r
->skip
[PF_SKIP_SRC_ADDR
].ptr
;
4681 /* tcp/udp only. port_op always 0 in other cases */
4682 #ifndef NO_APPLE_EXTENSIONS
4683 else if (r
->proto
== pd
->proto
&&
4684 (r
->proto
== IPPROTO_TCP
|| r
->proto
== IPPROTO_UDP
) &&
4685 r
->src
.xport
.range
.op
&&
4686 !pf_match_port(r
->src
.xport
.range
.op
,
4687 r
->src
.xport
.range
.port
[0], r
->src
.xport
.range
.port
[1],
4690 else if (r
->src
.port_op
&& !pf_match_port(r
->src
.port_op
,
4691 r
->src
.port
[0], r
->src
.port
[1], th
->th_sport
))
4693 r
= r
->skip
[PF_SKIP_SRC_PORT
].ptr
;
4694 else if (PF_MISMATCHAW(&r
->dst
.addr
, daddr
, af
,
4696 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
4697 /* tcp/udp only. port_op always 0 in other cases */
4698 #ifndef NO_APPLE_EXTENSIONS
4699 else if (r
->proto
== pd
->proto
&&
4700 (r
->proto
== IPPROTO_TCP
|| r
->proto
== IPPROTO_UDP
) &&
4701 r
->dst
.xport
.range
.op
&&
4702 !pf_match_port(r
->dst
.xport
.range
.op
,
4703 r
->dst
.xport
.range
.port
[0], r
->dst
.xport
.range
.port
[1],
4706 else if (r
->dst
.port_op
&& !pf_match_port(r
->dst
.port_op
,
4707 r
->dst
.port
[0], r
->dst
.port
[1], th
->th_dport
))
4709 r
= r
->skip
[PF_SKIP_DST_PORT
].ptr
;
4710 /* icmp only. type always 0 in other cases */
4711 else if (r
->type
&& r
->type
!= icmptype
+ 1)
4712 r
= TAILQ_NEXT(r
, entries
);
4713 /* icmp only. type always 0 in other cases */
4714 else if (r
->code
&& r
->code
!= icmpcode
+ 1)
4715 r
= TAILQ_NEXT(r
, entries
);
4716 else if (r
->tos
&& !(r
->tos
== pd
->tos
))
4717 r
= TAILQ_NEXT(r
, entries
);
4718 else if (r
->rule_flag
& PFRULE_FRAGMENT
)
4719 r
= TAILQ_NEXT(r
, entries
);
4720 else if (pd
->proto
== IPPROTO_TCP
&&
4721 (r
->flagset
& th
->th_flags
) != r
->flags
)
4722 r
= TAILQ_NEXT(r
, entries
);
4723 /* tcp/udp only. uid.op always 0 in other cases */
4724 else if (r
->uid
.op
&& (pd
->lookup
.done
|| (pd
->lookup
.done
=
4725 pf_socket_lookup(direction
, pd
), 1)) &&
4726 !pf_match_uid(r
->uid
.op
, r
->uid
.uid
[0], r
->uid
.uid
[1],
4728 r
= TAILQ_NEXT(r
, entries
);
4729 /* tcp/udp only. gid.op always 0 in other cases */
4730 else if (r
->gid
.op
&& (pd
->lookup
.done
|| (pd
->lookup
.done
=
4731 pf_socket_lookup(direction
, pd
), 1)) &&
4732 !pf_match_gid(r
->gid
.op
, r
->gid
.gid
[0], r
->gid
.gid
[1],
4734 r
= TAILQ_NEXT(r
, entries
);
4735 else if (r
->prob
&& r
->prob
<= (random() % (UINT_MAX
- 1) + 1))
4736 r
= TAILQ_NEXT(r
, entries
);
4737 else if (r
->match_tag
&& !pf_match_tag(m
, r
, pd
->pf_mtag
, &tag
))
4738 r
= TAILQ_NEXT(r
, entries
);
4739 else if (r
->os_fingerprint
!= PF_OSFP_ANY
&&
4740 (pd
->proto
!= IPPROTO_TCP
|| !pf_osfp_match(
4741 pf_osfp_fingerprint(pd
, m
, off
, th
),
4742 r
->os_fingerprint
)))
4743 r
= TAILQ_NEXT(r
, entries
);
4747 if (PF_RTABLEID_IS_VALID(r
->rtableid
))
4748 rtableid
= r
->rtableid
;
4749 if (r
->anchor
== NULL
) {
4756 r
= TAILQ_NEXT(r
, entries
);
4758 pf_step_into_anchor(&asd
, &ruleset
,
4759 PF_RULESET_FILTER
, &r
, &a
, &match
);
4761 if (r
== NULL
&& pf_step_out_of_anchor(&asd
, &ruleset
,
4762 PF_RULESET_FILTER
, &r
, &a
, &match
))
4769 REASON_SET(&reason
, PFRES_MATCH
);
4771 if (r
->log
|| (nr
!= NULL
&& nr
->log
)) {
4772 #ifndef NO_APPLE_EXTENSIONS
4774 if (rewrite
< off
+ hdrlen
)
4775 rewrite
= off
+ hdrlen
;
4777 m
= pf_lazy_makewritable(pd
, m
, rewrite
);
4779 REASON_SET(&reason
, PFRES_MEMORY
);
4783 m_copyback(m
, off
, hdrlen
, pd
->hdr
.any
);
4787 m_copyback(m
, off
, hdrlen
, pd
->hdr
.any
);
4789 PFLOG_PACKET(kif
, h
, m
, af
, direction
, reason
, r
->log
? r
: nr
,
4793 if ((r
->action
== PF_DROP
) &&
4794 ((r
->rule_flag
& PFRULE_RETURNRST
) ||
4795 (r
->rule_flag
& PFRULE_RETURNICMP
) ||
4796 (r
->rule_flag
& PFRULE_RETURN
))) {
4797 /* undo NAT changes, if they have taken place */
4799 if (direction
== PF_OUT
) {
4800 switch (pd
->proto
) {
4802 #ifndef NO_APPLE_EXTENSIONS
4803 pf_change_ap(direction
, pd
->mp
, saddr
,
4804 &th
->th_sport
, pd
->ip_sum
,
4805 &th
->th_sum
, &pd
->baddr
,
4806 bxport
.port
, 0, af
);
4807 sxport
.port
= th
->th_sport
;
4809 pf_change_ap(saddr
, &th
->th_sport
,
4810 pd
->ip_sum
, &th
->th_sum
,
4811 &pd
->baddr
, bport
, 0, af
);
4812 sport
= th
->th_sport
;
4817 #ifndef NO_APPLE_EXTENSIONS
4818 pf_change_ap(direction
, pd
->mp
, saddr
,
4819 &pd
->hdr
.udp
->uh_sport
, pd
->ip_sum
,
4820 &pd
->hdr
.udp
->uh_sum
, &pd
->baddr
,
4821 bxport
.port
, 1, af
);
4822 sxport
.port
= pd
->hdr
.udp
->uh_sport
;
4825 &pd
->hdr
.udp
->uh_sport
, pd
->ip_sum
,
4826 &pd
->hdr
.udp
->uh_sum
, &pd
->baddr
,
4828 sport
= pd
->hdr
.udp
->uh_sport
;
4834 case IPPROTO_ICMPV6
:
4838 #ifndef NO_APPLE_EXTENSIONS
4840 PF_ACPY(&pd
->baddr
, saddr
, af
);
4845 pf_change_a(&saddr
->v4
.s_addr
,
4847 pd
->baddr
.v4
.s_addr
, 0);
4852 PF_ACPY(saddr
, &pd
->baddr
,
4859 PF_ACPY(&pd
->baddr
, saddr
, af
);
4863 pf_change_a(&saddr
->v4
.s_addr
,
4865 pd
->baddr
.v4
.s_addr
, 0);
4870 PF_ACPY(saddr
, &pd
->baddr
,
4880 pf_change_a(&saddr
->v4
.s_addr
,
4882 pd
->baddr
.v4
.s_addr
, 0);
4885 PF_ACPY(saddr
, &pd
->baddr
, af
);
4890 switch (pd
->proto
) {
4892 #ifndef NO_APPLE_EXTENSIONS
4893 pf_change_ap(direction
, pd
->mp
, daddr
,
4894 &th
->th_dport
, pd
->ip_sum
,
4895 &th
->th_sum
, &pd
->baddr
,
4896 bxport
.port
, 0, af
);
4897 dxport
.port
= th
->th_dport
;
4899 pf_change_ap(daddr
, &th
->th_dport
,
4900 pd
->ip_sum
, &th
->th_sum
,
4901 &pd
->baddr
, bport
, 0, af
);
4902 dport
= th
->th_dport
;
4907 #ifndef NO_APPLE_EXTENSIONS
4908 pf_change_ap(direction
, pd
->mp
, daddr
,
4909 &pd
->hdr
.udp
->uh_dport
, pd
->ip_sum
,
4910 &pd
->hdr
.udp
->uh_sum
, &pd
->baddr
,
4911 bxport
.port
, 1, af
);
4912 dxport
.port
= pd
->hdr
.udp
->uh_dport
;
4915 &pd
->hdr
.udp
->uh_dport
, pd
->ip_sum
,
4916 &pd
->hdr
.udp
->uh_sum
, &pd
->baddr
,
4918 dport
= pd
->hdr
.udp
->uh_dport
;
4924 case IPPROTO_ICMPV6
:
4928 #ifndef NO_APPLE_EXTENSIONS
4930 if (pd
->proto_variant
==
4931 PF_GRE_PPTP_VARIANT
)
4932 grev1
->call_id
= bxport
.call_id
;
4937 pf_change_a(&daddr
->v4
.s_addr
,
4939 pd
->baddr
.v4
.s_addr
, 0);
4944 PF_ACPY(daddr
, &pd
->baddr
,
4954 pf_change_a(&daddr
->v4
.s_addr
,
4956 pd
->baddr
.v4
.s_addr
, 0);
4961 PF_ACPY(daddr
, &pd
->baddr
,
4971 pf_change_a(&daddr
->v4
.s_addr
,
4973 pd
->baddr
.v4
.s_addr
, 0);
4977 PF_ACPY(daddr
, &pd
->baddr
, af
);
4984 if (pd
->proto
== IPPROTO_TCP
&&
4985 ((r
->rule_flag
& PFRULE_RETURNRST
) ||
4986 (r
->rule_flag
& PFRULE_RETURN
)) &&
4987 !(th
->th_flags
& TH_RST
)) {
4988 u_int32_t ack
= ntohl(th
->th_seq
) + pd
->p_len
;
4997 h4
= mtod(m
, struct ip
*);
4998 len
= ntohs(h4
->ip_len
) - off
;
5002 h6
= mtod(m
, struct ip6_hdr
*);
5003 len
= ntohs(h6
->ip6_plen
) -
5004 (off
- sizeof (*h6
));
5009 if (pf_check_proto_cksum(m
, off
, len
, IPPROTO_TCP
, af
))
5010 REASON_SET(&reason
, PFRES_PROTCKSUM
);
5012 if (th
->th_flags
& TH_SYN
)
5014 if (th
->th_flags
& TH_FIN
)
5016 pf_send_tcp(r
, af
, pd
->dst
,
5017 pd
->src
, th
->th_dport
, th
->th_sport
,
5018 ntohl(th
->th_ack
), ack
, TH_RST
|TH_ACK
, 0, 0,
5019 r
->return_ttl
, 1, 0, pd
->eh
, kif
->pfik_ifp
);
5021 } else if (pd
->proto
!= IPPROTO_ICMP
&& af
== AF_INET
&&
5022 #ifndef NO_APPLE_EXTENSIONS
5023 pd
->proto
!= IPPROTO_ESP
&& pd
->proto
!= IPPROTO_AH
&&
5026 pf_send_icmp(m
, r
->return_icmp
>> 8,
5027 r
->return_icmp
& 255, af
, r
);
5028 else if (pd
->proto
!= IPPROTO_ICMPV6
&& af
== AF_INET6
&&
5029 #ifndef NO_APPLE_EXTENSIONS
5030 pd
->proto
!= IPPROTO_ESP
&& pd
->proto
!= IPPROTO_AH
&&
5033 pf_send_icmp(m
, r
->return_icmp6
>> 8,
5034 r
->return_icmp6
& 255, af
, r
);
5037 if (r
->action
== PF_DROP
)
5040 if (pf_tag_packet(m
, pd
->pf_mtag
, tag
, rtableid
)) {
5041 REASON_SET(&reason
, PFRES_MEMORY
);
5045 if (!state_icmp
&& (r
->keep_state
|| nr
!= NULL
||
5046 (pd
->flags
& PFDESC_TCP_NORM
))) {
5047 /* create new state */
5048 struct pf_state
*s
= NULL
;
5049 struct pf_state_key
*sk
= NULL
;
5050 struct pf_src_node
*sn
= NULL
;
5051 #ifndef NO_APPLE_EXTENSIONS
5052 struct pf_ike_hdr ike
;
5054 if (pd
->proto
== IPPROTO_UDP
) {
5055 struct udphdr
*uh
= pd
->hdr
.udp
;
5056 size_t plen
= m
->m_pkthdr
.len
- off
- sizeof (*uh
);
5058 if (ntohs(uh
->uh_sport
) == PF_IKE_PORT
&&
5059 ntohs(uh
->uh_dport
) == PF_IKE_PORT
&&
5060 plen
>= PF_IKE_PACKET_MINSIZE
) {
5061 if (plen
> PF_IKE_PACKET_MINSIZE
)
5062 plen
= PF_IKE_PACKET_MINSIZE
;
5063 m_copydata(m
, off
+ sizeof (*uh
), plen
, &ike
);
5067 if (nr
!= NULL
&& pd
->proto
== IPPROTO_ESP
&&
5068 direction
== PF_OUT
) {
5069 struct pf_state_key_cmp sk0
;
5070 struct pf_state
*s0
;
5074 * This squelches state creation if the external
5075 * address matches an existing incomplete state with a
5076 * different internal address. Only one 'blocking'
5077 * partial state is allowed for each external address.
5079 memset(&sk0
, 0, sizeof (sk0
));
5081 sk0
.proto
= IPPROTO_ESP
;
5082 PF_ACPY(&sk0
.gwy
.addr
, saddr
, sk0
.af
);
5083 PF_ACPY(&sk0
.ext
.addr
, daddr
, sk0
.af
);
5084 s0
= pf_find_state(kif
, &sk0
, PF_IN
);
5086 if (s0
&& PF_ANEQ(&s0
->state_key
->lan
.addr
,
5094 /* check maximums */
5095 if (r
->max_states
&& (r
->states
>= r
->max_states
)) {
5096 pf_status
.lcounters
[LCNT_STATES
]++;
5097 REASON_SET(&reason
, PFRES_MAXSTATES
);
5100 /* src node for filter rule */
5101 if ((r
->rule_flag
& PFRULE_SRCTRACK
||
5102 r
->rpool
.opts
& PF_POOL_STICKYADDR
) &&
5103 pf_insert_src_node(&sn
, r
, saddr
, af
) != 0) {
5104 REASON_SET(&reason
, PFRES_SRCLIMIT
);
5107 /* src node for translation rule */
5108 if (nr
!= NULL
&& (nr
->rpool
.opts
& PF_POOL_STICKYADDR
) &&
5109 ((direction
== PF_OUT
&&
5110 #ifndef NO_APPLE_EXTENSIONS
5111 nr
->action
!= PF_RDR
&&
5113 pf_insert_src_node(&nsn
, nr
, &pd
->baddr
, af
) != 0) ||
5114 (pf_insert_src_node(&nsn
, nr
, saddr
, af
) != 0))) {
5115 REASON_SET(&reason
, PFRES_SRCLIMIT
);
5118 s
= pool_get(&pf_state_pl
, PR_WAITOK
);
5120 REASON_SET(&reason
, PFRES_MEMORY
);
5122 if (sn
!= NULL
&& sn
->states
== 0 && sn
->expire
== 0) {
5123 RB_REMOVE(pf_src_tree
, &tree_src_tracking
, sn
);
5124 pf_status
.scounters
[SCNT_SRC_NODE_REMOVALS
]++;
5125 pf_status
.src_nodes
--;
5126 pool_put(&pf_src_tree_pl
, sn
);
5128 if (nsn
!= sn
&& nsn
!= NULL
&& nsn
->states
== 0 &&
5130 RB_REMOVE(pf_src_tree
, &tree_src_tracking
, nsn
);
5131 pf_status
.scounters
[SCNT_SRC_NODE_REMOVALS
]++;
5132 pf_status
.src_nodes
--;
5133 pool_put(&pf_src_tree_pl
, nsn
);
5136 #ifndef NO_APPLE_EXTENSIONS
5138 pool_put(&pf_app_state_pl
,
5141 pool_put(&pf_state_key_pl
, sk
);
5145 bzero(s
, sizeof (*s
));
5146 #ifndef NO_APPLE_EXTENSIONS
5147 TAILQ_INIT(&s
->unlink_hooks
);
5150 s
->nat_rule
.ptr
= nr
;
5151 if (nr
&& nr
->action
== PF_RDR
&& direction
== PF_OUT
)
5153 STATE_INC_COUNTERS(s
);
5154 s
->allow_opts
= r
->allow_opts
;
5155 s
->log
= r
->log
& PF_LOG_ALL
;
5157 s
->log
|= nr
->log
& PF_LOG_ALL
;
5158 switch (pd
->proto
) {
5160 s
->src
.seqlo
= ntohl(th
->th_seq
);
5161 s
->src
.seqhi
= s
->src
.seqlo
+ pd
->p_len
+ 1;
5162 if ((th
->th_flags
& (TH_SYN
|TH_ACK
)) ==
5163 TH_SYN
&& r
->keep_state
== PF_STATE_MODULATE
) {
5164 /* Generate sequence number modulator */
5165 if ((s
->src
.seqdiff
= pf_tcp_iss(pd
) -
5168 pf_change_a(&th
->th_seq
, &th
->th_sum
,
5169 htonl(s
->src
.seqlo
+ s
->src
.seqdiff
), 0);
5170 rewrite
= off
+ sizeof (*th
);
5173 if (th
->th_flags
& TH_SYN
) {
5175 s
->src
.wscale
= pf_get_wscale(m
, off
,
5178 s
->src
.max_win
= MAX(ntohs(th
->th_win
), 1);
5179 if (s
->src
.wscale
& PF_WSCALE_MASK
) {
5180 /* Remove scale factor from initial window */
5181 int win
= s
->src
.max_win
;
5182 win
+= 1 << (s
->src
.wscale
& PF_WSCALE_MASK
);
5183 s
->src
.max_win
= (win
- 1) >>
5184 (s
->src
.wscale
& PF_WSCALE_MASK
);
5186 if (th
->th_flags
& TH_FIN
)
5190 s
->src
.state
= TCPS_SYN_SENT
;
5191 s
->dst
.state
= TCPS_CLOSED
;
5192 s
->timeout
= PFTM_TCP_FIRST_PACKET
;
5195 s
->src
.state
= PFUDPS_SINGLE
;
5196 s
->dst
.state
= PFUDPS_NO_TRAFFIC
;
5197 s
->timeout
= PFTM_UDP_FIRST_PACKET
;
5201 case IPPROTO_ICMPV6
:
5203 s
->timeout
= PFTM_ICMP_FIRST_PACKET
;
5205 #ifndef NO_APPLE_EXTENSIONS
5207 s
->src
.state
= PFGRE1S_INITIATING
;
5208 s
->dst
.state
= PFGRE1S_NO_TRAFFIC
;
5209 s
->timeout
= PFTM_GREv1_INITIATING
;
5212 s
->src
.state
= PFESPS_INITIATING
;
5213 s
->dst
.state
= PFESPS_NO_TRAFFIC
;
5214 s
->timeout
= PFTM_ESP_FIRST_PACKET
;
5218 s
->src
.state
= PFOTHERS_SINGLE
;
5219 s
->dst
.state
= PFOTHERS_NO_TRAFFIC
;
5220 s
->timeout
= PFTM_OTHER_FIRST_PACKET
;
5223 s
->creation
= pf_time_second();
5224 s
->expire
= pf_time_second();
5228 s
->src_node
->states
++;
5229 VERIFY(s
->src_node
->states
!= 0);
5232 PF_ACPY(&nsn
->raddr
, &pd
->naddr
, af
);
5233 s
->nat_src_node
= nsn
;
5234 s
->nat_src_node
->states
++;
5235 VERIFY(s
->nat_src_node
->states
!= 0);
5237 if (pd
->proto
== IPPROTO_TCP
) {
5238 if ((pd
->flags
& PFDESC_TCP_NORM
) &&
5239 pf_normalize_tcp_init(m
, off
, pd
, th
, &s
->src
,
5241 REASON_SET(&reason
, PFRES_MEMORY
);
5242 pf_src_tree_remove_state(s
);
5243 STATE_DEC_COUNTERS(s
);
5244 pool_put(&pf_state_pl
, s
);
5247 if ((pd
->flags
& PFDESC_TCP_NORM
) && s
->src
.scrub
&&
5248 pf_normalize_tcp_stateful(m
, off
, pd
, &reason
,
5249 th
, s
, &s
->src
, &s
->dst
, &rewrite
)) {
5250 /* This really shouldn't happen!!! */
5251 DPFPRINTF(PF_DEBUG_URGENT
,
5252 ("pf_normalize_tcp_stateful failed on "
5254 pf_normalize_tcp_cleanup(s
);
5255 pf_src_tree_remove_state(s
);
5256 STATE_DEC_COUNTERS(s
);
5257 pool_put(&pf_state_pl
, s
);
5262 if ((sk
= pf_alloc_state_key(s
)) == NULL
) {
5263 REASON_SET(&reason
, PFRES_MEMORY
);
5267 sk
->proto
= pd
->proto
;
5268 sk
->direction
= direction
;
5270 #ifndef NO_APPLE_EXTENSIONS
5271 if (pd
->proto
== IPPROTO_UDP
) {
5272 if (ntohs(pd
->hdr
.udp
->uh_sport
) == PF_IKE_PORT
&&
5273 ntohs(pd
->hdr
.udp
->uh_dport
) == PF_IKE_PORT
) {
5274 sk
->proto_variant
= PF_EXTFILTER_APD
;
5276 sk
->proto_variant
= nr
? nr
->extfilter
:
5278 if (sk
->proto_variant
< PF_EXTFILTER_APD
)
5279 sk
->proto_variant
= PF_EXTFILTER_APD
;
5281 } else if (pd
->proto
== IPPROTO_GRE
) {
5282 sk
->proto_variant
= pd
->proto_variant
;
5285 if (direction
== PF_OUT
) {
5286 PF_ACPY(&sk
->gwy
.addr
, saddr
, af
);
5287 PF_ACPY(&sk
->ext
.addr
, daddr
, af
);
5288 switch (pd
->proto
) {
5289 #ifndef NO_APPLE_EXTENSIONS
5291 sk
->gwy
.xport
= sxport
;
5292 sk
->ext
.xport
= dxport
;
5295 sk
->gwy
.xport
.spi
= 0;
5296 sk
->ext
.xport
.spi
= pd
->hdr
.esp
->spi
;
5301 case IPPROTO_ICMPV6
:
5303 #ifndef NO_APPLE_EXTENSIONS
5304 sk
->gwy
.xport
.port
= nxport
.port
;
5305 sk
->ext
.xport
.spi
= 0;
5307 sk
->gwy
.port
= nport
;
5312 #ifndef NO_APPLE_EXTENSIONS
5313 sk
->gwy
.xport
= sxport
;
5314 sk
->ext
.xport
= dxport
;
5317 sk
->gwy
.port
= sport
;
5318 sk
->ext
.port
= dport
;
5321 #ifndef NO_APPLE_EXTENSIONS
5323 PF_ACPY(&sk
->lan
.addr
, &pd
->baddr
, af
);
5324 sk
->lan
.xport
= bxport
;
5326 PF_ACPY(&sk
->lan
.addr
, &sk
->gwy
.addr
, af
);
5327 sk
->lan
.xport
= sk
->gwy
.xport
;
5331 PF_ACPY(&sk
->lan
.addr
, &pd
->baddr
, af
);
5332 sk
->lan
.port
= bport
;
5334 PF_ACPY(&sk
->lan
.addr
, &sk
->gwy
.addr
, af
);
5335 sk
->lan
.port
= sk
->gwy
.port
;
5339 PF_ACPY(&sk
->lan
.addr
, daddr
, af
);
5340 PF_ACPY(&sk
->ext
.addr
, saddr
, af
);
5341 switch (pd
->proto
) {
5344 case IPPROTO_ICMPV6
:
5346 #ifndef NO_APPLE_EXTENSIONS
5347 sk
->lan
.xport
= nxport
;
5348 sk
->ext
.xport
.spi
= 0;
5350 sk
->lan
.port
= nport
;
5354 #ifndef NO_APPLE_EXTENSIONS
5356 sk
->ext
.xport
.spi
= 0;
5357 sk
->lan
.xport
.spi
= pd
->hdr
.esp
->spi
;
5360 sk
->lan
.xport
= dxport
;
5361 sk
->ext
.xport
= sxport
;
5365 sk
->lan
.port
= dport
;
5366 sk
->ext
.port
= sport
;
5369 #ifndef NO_APPLE_EXTENSIONS
5371 PF_ACPY(&sk
->gwy
.addr
, &pd
->baddr
, af
);
5372 sk
->gwy
.xport
= bxport
;
5374 PF_ACPY(&sk
->gwy
.addr
, &sk
->lan
.addr
, af
);
5375 sk
->gwy
.xport
= sk
->lan
.xport
;
5380 PF_ACPY(&sk
->gwy
.addr
, &pd
->baddr
, af
);
5381 sk
->gwy
.port
= bport
;
5383 PF_ACPY(&sk
->gwy
.addr
, &sk
->lan
.addr
, af
);
5384 sk
->gwy
.port
= sk
->lan
.port
;
5389 pf_set_rt_ifp(s
, saddr
); /* needs s->state_key set */
5391 #ifndef NO_APPLE_EXTENSIONS
5394 if (sk
->app_state
== 0) {
5395 switch (pd
->proto
) {
5397 u_int16_t dport
= (direction
== PF_OUT
) ?
5398 sk
->ext
.xport
.port
: sk
->gwy
.xport
.port
;
5401 ntohs(dport
) == PF_PPTP_PORT
) {
5402 struct pf_app_state
*as
;
5404 as
= pool_get(&pf_app_state_pl
,
5412 bzero(as
, sizeof (*as
));
5413 as
->handler
= pf_pptp_handler
;
5414 as
->compare_lan_ext
= 0;
5415 as
->compare_ext_gwy
= 0;
5416 as
->u
.pptp
.grev1_state
= 0;
5418 (void) hook_establish(&s
->unlink_hooks
,
5419 0, (hook_fn_t
) pf_pptp_unlink
, s
);
5425 struct udphdr
*uh
= pd
->hdr
.udp
;
5428 ntohs(uh
->uh_sport
) == PF_IKE_PORT
&&
5429 ntohs(uh
->uh_dport
) == PF_IKE_PORT
) {
5430 struct pf_app_state
*as
;
5432 as
= pool_get(&pf_app_state_pl
,
5440 bzero(as
, sizeof (*as
));
5441 as
->compare_lan_ext
= pf_ike_compare
;
5442 as
->compare_ext_gwy
= pf_ike_compare
;
5443 as
->u
.ike
.cookie
= ike
.initiator_cookie
;
5455 if (pf_insert_state(BOUND_IFACE(r
, kif
), s
)) {
5456 if (pd
->proto
== IPPROTO_TCP
)
5457 pf_normalize_tcp_cleanup(s
);
5458 REASON_SET(&reason
, PFRES_STATEINS
);
5459 pf_src_tree_remove_state(s
);
5460 STATE_DEC_COUNTERS(s
);
5461 pool_put(&pf_state_pl
, s
);
5469 if (pd
->proto
== IPPROTO_TCP
&&
5470 (th
->th_flags
& (TH_SYN
|TH_ACK
)) == TH_SYN
&&
5471 r
->keep_state
== PF_STATE_SYNPROXY
) {
5472 s
->src
.state
= PF_TCPS_PROXY_SRC
;
5474 #ifndef NO_APPLE_EXTENSIONS
5475 if (direction
== PF_OUT
) {
5476 pf_change_ap(direction
, pd
->mp
, saddr
,
5477 &th
->th_sport
, pd
->ip_sum
,
5478 &th
->th_sum
, &pd
->baddr
,
5479 bxport
.port
, 0, af
);
5480 sxport
.port
= th
->th_sport
;
5482 pf_change_ap(direction
, pd
->mp
, daddr
,
5483 &th
->th_dport
, pd
->ip_sum
,
5484 &th
->th_sum
, &pd
->baddr
,
5485 bxport
.port
, 0, af
);
5486 sxport
.port
= th
->th_dport
;
5489 if (direction
== PF_OUT
) {
5490 pf_change_ap(saddr
, &th
->th_sport
,
5491 pd
->ip_sum
, &th
->th_sum
, &pd
->baddr
,
5493 sport
= th
->th_sport
;
5495 pf_change_ap(daddr
, &th
->th_dport
,
5496 pd
->ip_sum
, &th
->th_sum
, &pd
->baddr
,
5498 sport
= th
->th_dport
;
5502 s
->src
.seqhi
= htonl(random());
5503 /* Find mss option */
5504 mss
= pf_get_mss(m
, off
, th
->th_off
, af
);
5505 mss
= pf_calc_mss(saddr
, af
, mss
);
5506 mss
= pf_calc_mss(daddr
, af
, mss
);
5508 pf_send_tcp(r
, af
, daddr
, saddr
, th
->th_dport
,
5509 th
->th_sport
, s
->src
.seqhi
, ntohl(th
->th_seq
) + 1,
5510 TH_SYN
|TH_ACK
, 0, s
->src
.mss
, 0, 1, 0, NULL
, NULL
);
5511 REASON_SET(&reason
, PFRES_SYNPROXY
);
5512 return (PF_SYNPROXY_DROP
);
5515 #ifndef NO_APPLE_EXTENSIONS
5516 if (sk
->app_state
&& sk
->app_state
->handler
) {
5519 switch (pd
->proto
) {
5521 offx
+= th
->th_off
<< 2;
5524 offx
+= pd
->hdr
.udp
->uh_ulen
<< 2;
5527 /* ALG handlers only apply to TCP and UDP rules */
5532 sk
->app_state
->handler(s
, direction
, offx
,
5535 REASON_SET(&reason
, PFRES_MEMORY
);
5544 /* copy back packet headers if we performed NAT operations */
5545 #ifndef NO_APPLE_EXTENSIONS
5547 if (rewrite
< off
+ hdrlen
)
5548 rewrite
= off
+ hdrlen
;
5550 m
= pf_lazy_makewritable(pd
, pd
->mp
, rewrite
);
5552 REASON_SET(&reason
, PFRES_MEMORY
);
5556 m_copyback(m
, off
, hdrlen
, pd
->hdr
.any
);
5560 m_copyback(m
, off
, hdrlen
, pd
->hdr
.any
);
5567 pf_test_fragment(struct pf_rule
**rm
, int direction
, struct pfi_kif
*kif
,
5568 struct mbuf
*m
, void *h
, struct pf_pdesc
*pd
, struct pf_rule
**am
,
5569 struct pf_ruleset
**rsm
)
5572 struct pf_rule
*r
, *a
= NULL
;
5573 struct pf_ruleset
*ruleset
= NULL
;
5574 sa_family_t af
= pd
->af
;
5580 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[PF_RULESET_FILTER
].active
.ptr
);
5583 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
)
5584 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
5585 else if (r
->direction
&& r
->direction
!= direction
)
5586 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
5587 else if (r
->af
&& r
->af
!= af
)
5588 r
= r
->skip
[PF_SKIP_AF
].ptr
;
5589 else if (r
->proto
&& r
->proto
!= pd
->proto
)
5590 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
5591 else if (PF_MISMATCHAW(&r
->src
.addr
, pd
->src
, af
,
5593 r
= r
->skip
[PF_SKIP_SRC_ADDR
].ptr
;
5594 else if (PF_MISMATCHAW(&r
->dst
.addr
, pd
->dst
, af
,
5596 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
5597 else if (r
->tos
&& !(r
->tos
== pd
->tos
))
5598 r
= TAILQ_NEXT(r
, entries
);
5599 else if (r
->os_fingerprint
!= PF_OSFP_ANY
)
5600 r
= TAILQ_NEXT(r
, entries
);
5601 #ifndef NO_APPLE_EXTENSIONS
5602 else if (pd
->proto
== IPPROTO_UDP
&&
5603 (r
->src
.xport
.range
.op
|| r
->dst
.xport
.range
.op
))
5604 r
= TAILQ_NEXT(r
, entries
);
5605 else if (pd
->proto
== IPPROTO_TCP
&&
5606 (r
->src
.xport
.range
.op
|| r
->dst
.xport
.range
.op
||
5608 r
= TAILQ_NEXT(r
, entries
);
5610 else if (pd
->proto
== IPPROTO_UDP
&&
5611 (r
->src
.port_op
|| r
->dst
.port_op
))
5612 r
= TAILQ_NEXT(r
, entries
);
5613 else if (pd
->proto
== IPPROTO_TCP
&&
5614 (r
->src
.port_op
|| r
->dst
.port_op
|| r
->flagset
))
5615 r
= TAILQ_NEXT(r
, entries
);
5617 else if ((pd
->proto
== IPPROTO_ICMP
||
5618 pd
->proto
== IPPROTO_ICMPV6
) &&
5619 (r
->type
|| r
->code
))
5620 r
= TAILQ_NEXT(r
, entries
);
5621 else if (r
->prob
&& r
->prob
<= (random() % (UINT_MAX
- 1) + 1))
5622 r
= TAILQ_NEXT(r
, entries
);
5623 else if (r
->match_tag
&& !pf_match_tag(m
, r
, pd
->pf_mtag
, &tag
))
5624 r
= TAILQ_NEXT(r
, entries
);
5626 if (r
->anchor
== NULL
) {
5633 r
= TAILQ_NEXT(r
, entries
);
5635 pf_step_into_anchor(&asd
, &ruleset
,
5636 PF_RULESET_FILTER
, &r
, &a
, &match
);
5638 if (r
== NULL
&& pf_step_out_of_anchor(&asd
, &ruleset
,
5639 PF_RULESET_FILTER
, &r
, &a
, &match
))
5646 REASON_SET(&reason
, PFRES_MATCH
);
5649 PFLOG_PACKET(kif
, h
, m
, af
, direction
, reason
, r
, a
, ruleset
,
5652 if (r
->action
!= PF_PASS
)
5655 if (pf_tag_packet(m
, pd
->pf_mtag
, tag
, -1)) {
5656 REASON_SET(&reason
, PFRES_MEMORY
);
5663 #ifndef NO_APPLE_EXTENSIONS
5665 pf_pptp_handler(struct pf_state
*s
, int direction
, int off
,
5666 struct pf_pdesc
*pd
, struct pfi_kif
*kif
)
5668 #pragma unused(direction)
5670 struct pf_pptp_state
*as
;
5671 struct pf_pptp_ctrl_msg cm
;
5673 struct pf_state
*gs
;
5675 u_int16_t
*pac_call_id
;
5676 u_int16_t
*pns_call_id
;
5677 u_int16_t
*spoof_call_id
;
5678 u_int8_t
*pac_state
;
5679 u_int8_t
*pns_state
;
5680 enum { PF_PPTP_PASS
, PF_PPTP_INSERT_GRE
, PF_PPTP_REMOVE_GRE
} op
;
5682 struct pf_state_key
*sk
;
5683 struct pf_state_key
*gsk
;
5686 plen
= min(sizeof (cm
), m
->m_pkthdr
.len
- off
);
5687 if (plen
< PF_PPTP_CTRL_MSG_MINSIZE
)
5690 as
= &s
->state_key
->app_state
->u
.pptp
;
5691 m_copydata(m
, off
, plen
, &cm
);
5693 if (ntohl(cm
.hdr
.magic
) != PF_PPTP_MAGIC_NUMBER
)
5695 if (ntohs(cm
.hdr
.type
) != 1)
5699 gs
= as
->grev1_state
;
5701 gs
= pool_get(&pf_state_pl
, PR_WAITOK
);
5705 memcpy(gs
, s
, sizeof (*gs
));
5707 memset(&gs
->entry_id
, 0, sizeof (gs
->entry_id
));
5708 memset(&gs
->entry_list
, 0, sizeof (gs
->entry_list
));
5710 TAILQ_INIT(&gs
->unlink_hooks
);
5713 gs
->pfsync_time
= 0;
5714 gs
->packets
[0] = gs
->packets
[1] = 0;
5715 gs
->bytes
[0] = gs
->bytes
[1] = 0;
5716 gs
->timeout
= PFTM_UNLINKED
;
5717 gs
->id
= gs
->creatorid
= 0;
5718 gs
->src
.state
= gs
->dst
.state
= PFGRE1S_NO_TRAFFIC
;
5719 gs
->src
.scrub
= gs
->dst
.scrub
= 0;
5721 gsk
= pf_alloc_state_key(gs
);
5723 pool_put(&pf_state_pl
, gs
);
5727 memcpy(&gsk
->lan
, &sk
->lan
, sizeof (gsk
->lan
));
5728 memcpy(&gsk
->gwy
, &sk
->gwy
, sizeof (gsk
->gwy
));
5729 memcpy(&gsk
->ext
, &sk
->ext
, sizeof (gsk
->ext
));
5731 gsk
->proto
= IPPROTO_GRE
;
5732 gsk
->proto_variant
= PF_GRE_PPTP_VARIANT
;
5734 gsk
->lan
.xport
.call_id
= 0;
5735 gsk
->gwy
.xport
.call_id
= 0;
5736 gsk
->ext
.xport
.call_id
= 0;
5738 STATE_INC_COUNTERS(gs
);
5739 as
->grev1_state
= gs
;
5741 gsk
= gs
->state_key
;
5744 switch (sk
->direction
) {
5746 pns_call_id
= &gsk
->ext
.xport
.call_id
;
5747 pns_state
= &gs
->dst
.state
;
5748 pac_call_id
= &gsk
->lan
.xport
.call_id
;
5749 pac_state
= &gs
->src
.state
;
5753 pns_call_id
= &gsk
->lan
.xport
.call_id
;
5754 pns_state
= &gs
->src
.state
;
5755 pac_call_id
= &gsk
->ext
.xport
.call_id
;
5756 pac_state
= &gs
->dst
.state
;
5760 DPFPRINTF(PF_DEBUG_URGENT
,
5761 ("pf_pptp_handler: bad directional!\n"));
5768 ct
= ntohs(cm
.ctrl
.type
);
5771 case PF_PPTP_CTRL_TYPE_CALL_OUT_REQ
:
5772 *pns_call_id
= cm
.msg
.call_out_req
.call_id
;
5773 *pns_state
= PFGRE1S_INITIATING
;
5774 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
)
5775 spoof_call_id
= &cm
.msg
.call_out_req
.call_id
;
5778 case PF_PPTP_CTRL_TYPE_CALL_OUT_RPY
:
5779 *pac_call_id
= cm
.msg
.call_out_rpy
.call_id
;
5780 if (s
->nat_rule
.ptr
)
5782 (pac_call_id
== &gsk
->lan
.xport
.call_id
) ?
5783 &cm
.msg
.call_out_rpy
.call_id
:
5784 &cm
.msg
.call_out_rpy
.peer_call_id
;
5785 if (gs
->timeout
== PFTM_UNLINKED
) {
5786 *pac_state
= PFGRE1S_INITIATING
;
5787 op
= PF_PPTP_INSERT_GRE
;
5791 case PF_PPTP_CTRL_TYPE_CALL_IN_1ST
:
5792 *pns_call_id
= cm
.msg
.call_in_1st
.call_id
;
5793 *pns_state
= PFGRE1S_INITIATING
;
5794 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
)
5795 spoof_call_id
= &cm
.msg
.call_in_1st
.call_id
;
5798 case PF_PPTP_CTRL_TYPE_CALL_IN_2ND
:
5799 *pac_call_id
= cm
.msg
.call_in_2nd
.call_id
;
5800 *pac_state
= PFGRE1S_INITIATING
;
5801 if (s
->nat_rule
.ptr
)
5803 (pac_call_id
== &gsk
->lan
.xport
.call_id
) ?
5804 &cm
.msg
.call_in_2nd
.call_id
:
5805 &cm
.msg
.call_in_2nd
.peer_call_id
;
5808 case PF_PPTP_CTRL_TYPE_CALL_IN_3RD
:
5809 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
)
5810 spoof_call_id
= &cm
.msg
.call_in_3rd
.call_id
;
5811 if (cm
.msg
.call_in_3rd
.call_id
!= *pns_call_id
) {
5814 if (gs
->timeout
== PFTM_UNLINKED
)
5815 op
= PF_PPTP_INSERT_GRE
;
5818 case PF_PPTP_CTRL_TYPE_CALL_CLR
:
5819 if (cm
.msg
.call_clr
.call_id
!= *pns_call_id
)
5820 op
= PF_PPTP_REMOVE_GRE
;
5823 case PF_PPTP_CTRL_TYPE_CALL_DISC
:
5824 if (cm
.msg
.call_clr
.call_id
!= *pac_call_id
)
5825 op
= PF_PPTP_REMOVE_GRE
;
5828 case PF_PPTP_CTRL_TYPE_ERROR
:
5829 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
)
5830 spoof_call_id
= &cm
.msg
.error
.peer_call_id
;
5833 case PF_PPTP_CTRL_TYPE_SET_LINKINFO
:
5834 if (s
->nat_rule
.ptr
&& pac_call_id
== &gsk
->lan
.xport
.call_id
)
5835 spoof_call_id
= &cm
.msg
.set_linkinfo
.peer_call_id
;
5843 if (!gsk
->gwy
.xport
.call_id
&& gsk
->lan
.xport
.call_id
) {
5844 gsk
->gwy
.xport
.call_id
= gsk
->lan
.xport
.call_id
;
5845 if (spoof_call_id
) {
5846 u_int16_t call_id
= 0;
5848 struct pf_state_key_cmp key
;
5851 key
.proto
= IPPROTO_GRE
;
5852 key
.proto_variant
= PF_GRE_PPTP_VARIANT
;
5853 PF_ACPY(&key
.gwy
.addr
, &gsk
->gwy
.addr
, key
.af
);
5854 PF_ACPY(&key
.ext
.addr
, &gsk
->ext
.addr
, key
.af
);
5855 key
.gwy
.xport
.call_id
= gsk
->gwy
.xport
.call_id
;
5856 key
.ext
.xport
.call_id
= gsk
->ext
.xport
.call_id
;
5858 call_id
= htonl(random());
5861 while (pf_find_state_all(&key
, PF_IN
, 0)) {
5862 call_id
= ntohs(call_id
);
5864 if (--call_id
== 0) call_id
= 0xffff;
5865 call_id
= htons(call_id
);
5867 key
.gwy
.xport
.call_id
= call_id
;
5870 DPFPRINTF(PF_DEBUG_URGENT
,
5871 ("pf_pptp_handler: failed to spoof "
5873 key
.gwy
.xport
.call_id
= 0;
5878 gsk
->gwy
.xport
.call_id
= call_id
;
5884 if (spoof_call_id
&& gsk
->lan
.xport
.call_id
!= gsk
->gwy
.xport
.call_id
) {
5885 if (*spoof_call_id
== gsk
->gwy
.xport
.call_id
) {
5886 *spoof_call_id
= gsk
->lan
.xport
.call_id
;
5887 th
->th_sum
= pf_cksum_fixup(th
->th_sum
,
5888 gsk
->gwy
.xport
.call_id
, gsk
->lan
.xport
.call_id
, 0);
5890 *spoof_call_id
= gsk
->gwy
.xport
.call_id
;
5891 th
->th_sum
= pf_cksum_fixup(th
->th_sum
,
5892 gsk
->lan
.xport
.call_id
, gsk
->gwy
.xport
.call_id
, 0);
5895 m
= pf_lazy_makewritable(pd
, m
, off
+ plen
);
5897 as
->grev1_state
= NULL
;
5898 STATE_DEC_COUNTERS(gs
);
5899 pool_put(&pf_state_pl
, gs
);
5902 m_copyback(m
, off
, plen
, &cm
);
5906 case PF_PPTP_REMOVE_GRE
:
5907 gs
->timeout
= PFTM_PURGE
;
5908 gs
->src
.state
= gs
->dst
.state
= PFGRE1S_NO_TRAFFIC
;
5909 gsk
->lan
.xport
.call_id
= 0;
5910 gsk
->gwy
.xport
.call_id
= 0;
5911 gsk
->ext
.xport
.call_id
= 0;
5912 gs
->id
= gs
->creatorid
= 0;
5915 case PF_PPTP_INSERT_GRE
:
5916 gs
->creation
= pf_time_second();
5917 gs
->expire
= pf_time_second();
5918 gs
->timeout
= PFTM_GREv1_FIRST_PACKET
;
5919 if (gs
->src_node
!= NULL
) {
5920 ++gs
->src_node
->states
;
5921 VERIFY(gs
->src_node
->states
!= 0);
5923 if (gs
->nat_src_node
!= NULL
) {
5924 ++gs
->nat_src_node
->states
;
5925 VERIFY(gs
->nat_src_node
->states
!= 0);
5927 pf_set_rt_ifp(gs
, &sk
->lan
.addr
);
5928 if (pf_insert_state(BOUND_IFACE(s
->rule
.ptr
, kif
), gs
)) {
5932 * FIX ME: insertion can fail when multiple PNS
5933 * behind the same NAT open calls to the same PAC
5934 * simultaneously because spoofed call ID numbers
5935 * are chosen before states are inserted. This is
5936 * hard to fix and happens infrequently enough that
5937 * users will normally try again and this ALG will
5938 * succeed. Failures are expected to be rare enough
5939 * that fixing this is a low priority.
5941 as
->grev1_state
= NULL
;
5943 pf_src_tree_remove_state(gs
);
5944 STATE_DEC_COUNTERS(gs
);
5945 pool_put(&pf_state_pl
, gs
);
5946 DPFPRINTF(PF_DEBUG_URGENT
, ("pf_pptp_handler: error "
5947 "inserting GREv1 state.\n"));
5957 pf_pptp_unlink(struct pf_state
*s
)
5959 struct pf_app_state
*as
= s
->state_key
->app_state
;
5960 struct pf_state
*gs
= as
->u
.pptp
.grev1_state
;
5963 if (gs
->timeout
< PFTM_MAX
)
5964 gs
->timeout
= PFTM_PURGE
;
5965 as
->u
.pptp
.grev1_state
= 0;
5970 pf_ike_compare(struct pf_app_state
*a
, struct pf_app_state
*b
)
5972 int64_t d
= a
->u
.ike
.cookie
- b
->u
.ike
.cookie
;
5973 return ((d
> 0) ? 1 : ((d
< 0) ? -1 : 0));
5978 pf_test_state_tcp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
5979 struct mbuf
*m
, int off
, void *h
, struct pf_pdesc
*pd
,
5983 struct pf_state_key_cmp key
;
5984 struct tcphdr
*th
= pd
->hdr
.tcp
;
5985 u_int16_t win
= ntohs(th
->th_win
);
5986 u_int32_t ack
, end
, seq
, orig_seq
;
5990 struct pf_state_peer
*src
, *dst
;
5992 #ifndef NO_APPLE_EXTENSIONS
5996 key
.proto
= IPPROTO_TCP
;
5997 if (direction
== PF_IN
) {
5998 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
5999 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
6000 #ifndef NO_APPLE_EXTENSIONS
6001 key
.ext
.xport
.port
= th
->th_sport
;
6002 key
.gwy
.xport
.port
= th
->th_dport
;
6004 key
.ext
.port
= th
->th_sport
;
6005 key
.gwy
.port
= th
->th_dport
;
6008 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
6009 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
6010 #ifndef NO_APPLE_EXTENSIONS
6011 key
.lan
.xport
.port
= th
->th_sport
;
6012 key
.ext
.xport
.port
= th
->th_dport
;
6014 key
.lan
.port
= th
->th_sport
;
6015 key
.ext
.port
= th
->th_dport
;
6021 if (direction
== (*state
)->state_key
->direction
) {
6022 src
= &(*state
)->src
;
6023 dst
= &(*state
)->dst
;
6025 src
= &(*state
)->dst
;
6026 dst
= &(*state
)->src
;
6029 if ((*state
)->src
.state
== PF_TCPS_PROXY_SRC
) {
6030 if (direction
!= (*state
)->state_key
->direction
) {
6031 REASON_SET(reason
, PFRES_SYNPROXY
);
6032 return (PF_SYNPROXY_DROP
);
6034 if (th
->th_flags
& TH_SYN
) {
6035 if (ntohl(th
->th_seq
) != (*state
)->src
.seqlo
) {
6036 REASON_SET(reason
, PFRES_SYNPROXY
);
6039 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, pd
->dst
,
6040 pd
->src
, th
->th_dport
, th
->th_sport
,
6041 (*state
)->src
.seqhi
, ntohl(th
->th_seq
) + 1,
6042 TH_SYN
|TH_ACK
, 0, (*state
)->src
.mss
, 0, 1,
6044 REASON_SET(reason
, PFRES_SYNPROXY
);
6045 return (PF_SYNPROXY_DROP
);
6046 } else if (!(th
->th_flags
& TH_ACK
) ||
6047 (ntohl(th
->th_ack
) != (*state
)->src
.seqhi
+ 1) ||
6048 (ntohl(th
->th_seq
) != (*state
)->src
.seqlo
+ 1)) {
6049 REASON_SET(reason
, PFRES_SYNPROXY
);
6051 } else if ((*state
)->src_node
!= NULL
&&
6052 pf_src_connlimit(state
)) {
6053 REASON_SET(reason
, PFRES_SRCLIMIT
);
6056 (*state
)->src
.state
= PF_TCPS_PROXY_DST
;
6058 if ((*state
)->src
.state
== PF_TCPS_PROXY_DST
) {
6059 struct pf_state_host
*psrc
, *pdst
;
6061 if (direction
== PF_OUT
) {
6062 psrc
= &(*state
)->state_key
->gwy
;
6063 pdst
= &(*state
)->state_key
->ext
;
6065 psrc
= &(*state
)->state_key
->ext
;
6066 pdst
= &(*state
)->state_key
->lan
;
6068 if (direction
== (*state
)->state_key
->direction
) {
6069 if (((th
->th_flags
& (TH_SYN
|TH_ACK
)) != TH_ACK
) ||
6070 (ntohl(th
->th_ack
) != (*state
)->src
.seqhi
+ 1) ||
6071 (ntohl(th
->th_seq
) != (*state
)->src
.seqlo
+ 1)) {
6072 REASON_SET(reason
, PFRES_SYNPROXY
);
6075 (*state
)->src
.max_win
= MAX(ntohs(th
->th_win
), 1);
6076 if ((*state
)->dst
.seqhi
== 1)
6077 (*state
)->dst
.seqhi
= htonl(random());
6078 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, &psrc
->addr
,
6079 #ifndef NO_APPLE_EXTENSIONS
6080 &pdst
->addr
, psrc
->xport
.port
, pdst
->xport
.port
,
6082 &pdst
->addr
, psrc
->port
, pdst
->port
,
6084 (*state
)->dst
.seqhi
, 0, TH_SYN
, 0,
6085 (*state
)->src
.mss
, 0, 0, (*state
)->tag
, NULL
, NULL
);
6086 REASON_SET(reason
, PFRES_SYNPROXY
);
6087 return (PF_SYNPROXY_DROP
);
6088 } else if (((th
->th_flags
& (TH_SYN
|TH_ACK
)) !=
6090 (ntohl(th
->th_ack
) != (*state
)->dst
.seqhi
+ 1)) {
6091 REASON_SET(reason
, PFRES_SYNPROXY
);
6094 (*state
)->dst
.max_win
= MAX(ntohs(th
->th_win
), 1);
6095 (*state
)->dst
.seqlo
= ntohl(th
->th_seq
);
6096 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, pd
->dst
,
6097 pd
->src
, th
->th_dport
, th
->th_sport
,
6098 ntohl(th
->th_ack
), ntohl(th
->th_seq
) + 1,
6099 TH_ACK
, (*state
)->src
.max_win
, 0, 0, 0,
6100 (*state
)->tag
, NULL
, NULL
);
6101 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, &psrc
->addr
,
6102 #ifndef NO_APPLE_EXTENSIONS
6103 &pdst
->addr
, psrc
->xport
.port
, pdst
->xport
.port
,
6105 &pdst
->addr
, psrc
->port
, pdst
->port
,
6107 (*state
)->src
.seqhi
+ 1, (*state
)->src
.seqlo
+ 1,
6108 TH_ACK
, (*state
)->dst
.max_win
, 0, 0, 1,
6110 (*state
)->src
.seqdiff
= (*state
)->dst
.seqhi
-
6111 (*state
)->src
.seqlo
;
6112 (*state
)->dst
.seqdiff
= (*state
)->src
.seqhi
-
6113 (*state
)->dst
.seqlo
;
6114 (*state
)->src
.seqhi
= (*state
)->src
.seqlo
+
6115 (*state
)->dst
.max_win
;
6116 (*state
)->dst
.seqhi
= (*state
)->dst
.seqlo
+
6117 (*state
)->src
.max_win
;
6118 (*state
)->src
.wscale
= (*state
)->dst
.wscale
= 0;
6119 (*state
)->src
.state
= (*state
)->dst
.state
=
6121 REASON_SET(reason
, PFRES_SYNPROXY
);
6122 return (PF_SYNPROXY_DROP
);
6126 if (((th
->th_flags
& (TH_SYN
|TH_ACK
)) == TH_SYN
) &&
6127 dst
->state
>= TCPS_FIN_WAIT_2
&&
6128 src
->state
>= TCPS_FIN_WAIT_2
) {
6129 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
6130 printf("pf: state reuse ");
6131 pf_print_state(*state
);
6132 pf_print_flags(th
->th_flags
);
6135 /* XXX make sure it's the same direction ?? */
6136 (*state
)->src
.state
= (*state
)->dst
.state
= TCPS_CLOSED
;
6137 pf_unlink_state(*state
);
6142 if (src
->wscale
&& dst
->wscale
&& !(th
->th_flags
& TH_SYN
)) {
6143 sws
= src
->wscale
& PF_WSCALE_MASK
;
6144 dws
= dst
->wscale
& PF_WSCALE_MASK
;
6149 * Sequence tracking algorithm from Guido van Rooij's paper:
6150 * http://www.madison-gurkha.com/publications/tcp_filtering/
6154 orig_seq
= seq
= ntohl(th
->th_seq
);
6155 if (src
->seqlo
== 0) {
6156 /* First packet from this end. Set its state */
6158 if ((pd
->flags
& PFDESC_TCP_NORM
|| dst
->scrub
) &&
6159 src
->scrub
== NULL
) {
6160 if (pf_normalize_tcp_init(m
, off
, pd
, th
, src
, dst
)) {
6161 REASON_SET(reason
, PFRES_MEMORY
);
6166 /* Deferred generation of sequence number modulator */
6167 if (dst
->seqdiff
&& !src
->seqdiff
) {
6168 /* use random iss for the TCP server */
6169 while ((src
->seqdiff
= random() - seq
) == 0)
6171 ack
= ntohl(th
->th_ack
) - dst
->seqdiff
;
6172 pf_change_a(&th
->th_seq
, &th
->th_sum
, htonl(seq
+
6174 pf_change_a(&th
->th_ack
, &th
->th_sum
, htonl(ack
), 0);
6175 copyback
= off
+ sizeof (*th
);
6177 ack
= ntohl(th
->th_ack
);
6180 end
= seq
+ pd
->p_len
;
6181 if (th
->th_flags
& TH_SYN
) {
6183 if (dst
->wscale
& PF_WSCALE_FLAG
) {
6184 src
->wscale
= pf_get_wscale(m
, off
, th
->th_off
,
6186 if (src
->wscale
& PF_WSCALE_FLAG
) {
6188 * Remove scale factor from initial
6191 sws
= src
->wscale
& PF_WSCALE_MASK
;
6192 win
= ((u_int32_t
)win
+ (1 << sws
) - 1)
6194 dws
= dst
->wscale
& PF_WSCALE_MASK
;
6196 #ifndef NO_APPLE_MODIFICATION
6200 * Window scale negotiation has failed,
6201 * therefore we must restore the window
6202 * scale in the state record that we
6203 * optimistically removed in
6204 * pf_test_rule(). Care is required to
6205 * prevent arithmetic overflow from
6206 * zeroing the window when it's
6207 * truncated down to 16-bits. --jhw
6209 u_int32_t _win
= dst
->max_win
;
6210 _win
<<= dst
->wscale
& PF_WSCALE_MASK
;
6211 dst
->max_win
= MIN(0xffff, _win
);
6213 /* fixup other window */
6214 dst
->max_win
<<= dst
->wscale
&
6217 /* in case of a retrans SYN|ACK */
6222 if (th
->th_flags
& TH_FIN
)
6226 if (src
->state
< TCPS_SYN_SENT
)
6227 src
->state
= TCPS_SYN_SENT
;
6230 * May need to slide the window (seqhi may have been set by
6231 * the crappy stack check or if we picked up the connection
6232 * after establishment)
6234 #ifndef NO_APPLE_MODIFICATIONS
6235 if (src
->seqhi
== 1 ||
6236 SEQ_GEQ(end
+ MAX(1, (u_int32_t
)dst
->max_win
<< dws
),
6238 src
->seqhi
= end
+ MAX(1, (u_int32_t
)dst
->max_win
<< dws
);
6240 if (src
->seqhi
== 1 ||
6241 SEQ_GEQ(end
+ MAX(1, dst
->max_win
<< dws
), src
->seqhi
))
6242 src
->seqhi
= end
+ MAX(1, dst
->max_win
<< dws
);
6244 if (win
> src
->max_win
)
6248 ack
= ntohl(th
->th_ack
) - dst
->seqdiff
;
6250 /* Modulate sequence numbers */
6251 pf_change_a(&th
->th_seq
, &th
->th_sum
, htonl(seq
+
6253 pf_change_a(&th
->th_ack
, &th
->th_sum
, htonl(ack
), 0);
6254 copyback
= off
+ sizeof (*th
);
6256 end
= seq
+ pd
->p_len
;
6257 if (th
->th_flags
& TH_SYN
)
6259 if (th
->th_flags
& TH_FIN
)
6263 if ((th
->th_flags
& TH_ACK
) == 0) {
6264 /* Let it pass through the ack skew check */
6266 } else if ((ack
== 0 &&
6267 (th
->th_flags
& (TH_ACK
|TH_RST
)) == (TH_ACK
|TH_RST
)) ||
6268 /* broken tcp stacks do not set ack */
6269 (dst
->state
< TCPS_SYN_SENT
)) {
6271 * Many stacks (ours included) will set the ACK number in an
6272 * FIN|ACK if the SYN times out -- no sequence to ACK.
6278 /* Ease sequencing restrictions on no data packets */
6283 ackskew
= dst
->seqlo
- ack
;
6287 * Need to demodulate the sequence numbers in any TCP SACK options
6288 * (Selective ACK). We could optionally validate the SACK values
6289 * against the current ACK window, either forwards or backwards, but
6290 * I'm not confident that SACK has been implemented properly
6291 * everywhere. It wouldn't surprise me if several stacks accidently
6292 * SACK too far backwards of previously ACKed data. There really aren't
6293 * any security implications of bad SACKing unless the target stack
6294 * doesn't validate the option length correctly. Someone trying to
6295 * spoof into a TCP connection won't bother blindly sending SACK
6298 if (dst
->seqdiff
&& (th
->th_off
<< 2) > (int)sizeof (struct tcphdr
)) {
6299 #ifndef NO_APPLE_EXTENSIONS
6300 copyback
= pf_modulate_sack(m
, off
, pd
, th
, dst
);
6301 if (copyback
== -1) {
6302 REASON_SET(reason
, PFRES_MEMORY
);
6308 if (pf_modulate_sack(m
, off
, pd
, th
, dst
))
6314 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
6315 if (SEQ_GEQ(src
->seqhi
, end
) &&
6316 /* Last octet inside other's window space */
6317 #ifndef NO_APPLE_MODIFICATIONS
6318 SEQ_GEQ(seq
, src
->seqlo
- ((u_int32_t
)dst
->max_win
<< dws
)) &&
6320 SEQ_GEQ(seq
, src
->seqlo
- (dst
->max_win
<< dws
)) &&
6322 /* Retrans: not more than one window back */
6323 (ackskew
>= -MAXACKWINDOW
) &&
6324 /* Acking not more than one reassembled fragment backwards */
6325 (ackskew
<= (MAXACKWINDOW
<< sws
)) &&
6326 /* Acking not more than one window forward */
6327 ((th
->th_flags
& TH_RST
) == 0 || orig_seq
== src
->seqlo
||
6328 (orig_seq
== src
->seqlo
+ 1) || (orig_seq
+ 1 == src
->seqlo
) ||
6329 (pd
->flags
& PFDESC_IP_REAS
) == 0)) {
6330 /* Require an exact/+1 sequence match on resets when possible */
6332 if (dst
->scrub
|| src
->scrub
) {
6333 if (pf_normalize_tcp_stateful(m
, off
, pd
, reason
, th
,
6334 *state
, src
, dst
, ©back
))
6337 #ifndef NO_APPLE_EXTENSIONS
6342 /* update max window */
6343 if (src
->max_win
< win
)
6345 /* synchronize sequencing */
6346 if (SEQ_GT(end
, src
->seqlo
))
6348 /* slide the window of what the other end can send */
6349 #ifndef NO_APPLE_MODIFICATIONS
6350 if (SEQ_GEQ(ack
+ ((u_int32_t
)win
<< sws
), dst
->seqhi
))
6351 dst
->seqhi
= ack
+ MAX(((u_int32_t
)win
<< sws
), 1);
6353 if (SEQ_GEQ(ack
+ (win
<< sws
), dst
->seqhi
))
6354 dst
->seqhi
= ack
+ MAX((win
<< sws
), 1);
6358 if (th
->th_flags
& TH_SYN
)
6359 if (src
->state
< TCPS_SYN_SENT
)
6360 src
->state
= TCPS_SYN_SENT
;
6361 if (th
->th_flags
& TH_FIN
)
6362 if (src
->state
< TCPS_CLOSING
)
6363 src
->state
= TCPS_CLOSING
;
6364 if (th
->th_flags
& TH_ACK
) {
6365 if (dst
->state
== TCPS_SYN_SENT
) {
6366 dst
->state
= TCPS_ESTABLISHED
;
6367 if (src
->state
== TCPS_ESTABLISHED
&&
6368 (*state
)->src_node
!= NULL
&&
6369 pf_src_connlimit(state
)) {
6370 REASON_SET(reason
, PFRES_SRCLIMIT
);
6373 } else if (dst
->state
== TCPS_CLOSING
)
6374 dst
->state
= TCPS_FIN_WAIT_2
;
6376 if (th
->th_flags
& TH_RST
)
6377 src
->state
= dst
->state
= TCPS_TIME_WAIT
;
6379 /* update expire time */
6380 (*state
)->expire
= pf_time_second();
6381 if (src
->state
>= TCPS_FIN_WAIT_2
&&
6382 dst
->state
>= TCPS_FIN_WAIT_2
)
6383 (*state
)->timeout
= PFTM_TCP_CLOSED
;
6384 else if (src
->state
>= TCPS_CLOSING
&&
6385 dst
->state
>= TCPS_CLOSING
)
6386 (*state
)->timeout
= PFTM_TCP_FIN_WAIT
;
6387 else if (src
->state
< TCPS_ESTABLISHED
||
6388 dst
->state
< TCPS_ESTABLISHED
)
6389 (*state
)->timeout
= PFTM_TCP_OPENING
;
6390 else if (src
->state
>= TCPS_CLOSING
||
6391 dst
->state
>= TCPS_CLOSING
)
6392 (*state
)->timeout
= PFTM_TCP_CLOSING
;
6394 (*state
)->timeout
= PFTM_TCP_ESTABLISHED
;
6396 /* Fall through to PASS packet */
6398 } else if ((dst
->state
< TCPS_SYN_SENT
||
6399 dst
->state
>= TCPS_FIN_WAIT_2
|| src
->state
>= TCPS_FIN_WAIT_2
) &&
6400 SEQ_GEQ(src
->seqhi
+ MAXACKWINDOW
, end
) &&
6401 /* Within a window forward of the originating packet */
6402 SEQ_GEQ(seq
, src
->seqlo
- MAXACKWINDOW
)) {
6403 /* Within a window backward of the originating packet */
6406 * This currently handles three situations:
6407 * 1) Stupid stacks will shotgun SYNs before their peer
6409 * 2) When PF catches an already established stream (the
6410 * firewall rebooted, the state table was flushed, routes
6412 * 3) Packets get funky immediately after the connection
6413 * closes (this should catch Solaris spurious ACK|FINs
6414 * that web servers like to spew after a close)
6416 * This must be a little more careful than the above code
6417 * since packet floods will also be caught here. We don't
6418 * update the TTL here to mitigate the damage of a packet
6419 * flood and so the same code can handle awkward establishment
6420 * and a loosened connection close.
6421 * In the establishment case, a correct peer response will
6422 * validate the connection, go through the normal state code
6423 * and keep updating the state TTL.
6426 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
6427 printf("pf: loose state match: ");
6428 pf_print_state(*state
);
6429 pf_print_flags(th
->th_flags
);
6430 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
6431 "pkts=%llu:%llu dir=%s,%s\n", seq
, orig_seq
, ack
,
6432 pd
->p_len
, ackskew
, (*state
)->packets
[0],
6433 (*state
)->packets
[1],
6434 direction
== PF_IN
? "in" : "out",
6435 direction
== (*state
)->state_key
->direction
?
6439 if (dst
->scrub
|| src
->scrub
) {
6440 if (pf_normalize_tcp_stateful(m
, off
, pd
, reason
, th
,
6441 *state
, src
, dst
, ©back
))
6443 #ifndef NO_APPLE_EXTENSIONS
6448 /* update max window */
6449 if (src
->max_win
< win
)
6451 /* synchronize sequencing */
6452 if (SEQ_GT(end
, src
->seqlo
))
6454 /* slide the window of what the other end can send */
6455 #ifndef NO_APPLE_MODIFICATIONS
6456 if (SEQ_GEQ(ack
+ ((u_int32_t
)win
<< sws
), dst
->seqhi
))
6457 dst
->seqhi
= ack
+ MAX(((u_int32_t
)win
<< sws
), 1);
6459 if (SEQ_GEQ(ack
+ (win
<< sws
), dst
->seqhi
))
6460 dst
->seqhi
= ack
+ MAX((win
<< sws
), 1);
6464 * Cannot set dst->seqhi here since this could be a shotgunned
6465 * SYN and not an already established connection.
6468 if (th
->th_flags
& TH_FIN
)
6469 if (src
->state
< TCPS_CLOSING
)
6470 src
->state
= TCPS_CLOSING
;
6471 if (th
->th_flags
& TH_RST
)
6472 src
->state
= dst
->state
= TCPS_TIME_WAIT
;
6474 /* Fall through to PASS packet */
6477 if ((*state
)->dst
.state
== TCPS_SYN_SENT
&&
6478 (*state
)->src
.state
== TCPS_SYN_SENT
) {
6479 /* Send RST for state mismatches during handshake */
6480 if (!(th
->th_flags
& TH_RST
))
6481 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
,
6482 pd
->dst
, pd
->src
, th
->th_dport
,
6483 th
->th_sport
, ntohl(th
->th_ack
), 0,
6485 (*state
)->rule
.ptr
->return_ttl
, 1, 0,
6486 pd
->eh
, kif
->pfik_ifp
);
6490 } else if (pf_status
.debug
>= PF_DEBUG_MISC
) {
6491 printf("pf: BAD state: ");
6492 pf_print_state(*state
);
6493 pf_print_flags(th
->th_flags
);
6494 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
6495 "pkts=%llu:%llu dir=%s,%s\n",
6496 seq
, orig_seq
, ack
, pd
->p_len
, ackskew
,
6497 (*state
)->packets
[0], (*state
)->packets
[1],
6498 direction
== PF_IN
? "in" : "out",
6499 direction
== (*state
)->state_key
->direction
?
6501 printf("pf: State failure on: %c %c %c %c | %c %c\n",
6502 SEQ_GEQ(src
->seqhi
, end
) ? ' ' : '1',
6503 #ifndef NO_APPLE_MODIFICATIONS
6505 src
->seqlo
- ((u_int32_t
)dst
->max_win
<< dws
)) ?
6507 SEQ_GEQ(seq
, src
->seqlo
- (dst
->max_win
<< dws
)) ?
6510 (ackskew
>= -MAXACKWINDOW
) ? ' ' : '3',
6511 (ackskew
<= (MAXACKWINDOW
<< sws
)) ? ' ' : '4',
6512 SEQ_GEQ(src
->seqhi
+ MAXACKWINDOW
, end
) ?' ' :'5',
6513 SEQ_GEQ(seq
, src
->seqlo
- MAXACKWINDOW
) ?' ' :'6');
6515 REASON_SET(reason
, PFRES_BADSTATE
);
6519 /* Any packets which have gotten here are to be passed */
6521 #ifndef NO_APPLE_EXTENSIONS
6522 if ((*state
)->state_key
->app_state
&&
6523 (*state
)->state_key
->app_state
->handler
) {
6524 (*state
)->state_key
->app_state
->handler(*state
, direction
,
6525 off
+ (th
->th_off
<< 2), pd
, kif
);
6527 REASON_SET(reason
, PFRES_MEMORY
);
6533 /* translate source/destination address, if necessary */
6534 if (STATE_TRANSLATE((*state
)->state_key
)) {
6535 if (direction
== PF_OUT
)
6536 pf_change_ap(direction
, pd
->mp
, pd
->src
, &th
->th_sport
,
6537 pd
->ip_sum
, &th
->th_sum
,
6538 &(*state
)->state_key
->gwy
.addr
,
6539 (*state
)->state_key
->gwy
.xport
.port
, 0, pd
->af
);
6541 pf_change_ap(direction
, pd
->mp
, pd
->dst
, &th
->th_dport
,
6542 pd
->ip_sum
, &th
->th_sum
,
6543 &(*state
)->state_key
->lan
.addr
,
6544 (*state
)->state_key
->lan
.xport
.port
, 0, pd
->af
);
6545 copyback
= off
+ sizeof (*th
);
6549 m
= pf_lazy_makewritable(pd
, m
, copyback
);
6551 REASON_SET(reason
, PFRES_MEMORY
);
6555 /* Copyback sequence modulation or stateful scrub changes */
6556 m_copyback(m
, off
, sizeof (*th
), th
);
6559 /* translate source/destination address, if necessary */
6560 if (STATE_TRANSLATE((*state
)->state_key
)) {
6561 if (direction
== PF_OUT
)
6562 pf_change_ap(pd
->src
, pd
->mp
, &th
->th_sport
, pd
->ip_sum
,
6563 &th
->th_sum
, &(*state
)->state_key
->gwy
.addr
,
6564 (*state
)->state_key
->gwy
.port
, 0, pd
->af
);
6566 pf_change_ap(pd
->dst
, pd
->mp
, &th
->th_dport
, pd
->ip_sum
,
6567 &th
->th_sum
, &(*state
)->state_key
->lan
.addr
,
6568 (*state
)->state_key
->lan
.port
, 0, pd
->af
);
6569 m_copyback(m
, off
, sizeof (*th
), th
);
6570 } else if (copyback
) {
6571 /* Copyback sequence modulation or stateful scrub changes */
6572 m_copyback(m
, off
, sizeof (*th
), th
);
6580 pf_test_state_udp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
6581 struct mbuf
*m
, int off
, void *h
, struct pf_pdesc
*pd
, u_short
*reason
)
6584 struct pf_state_peer
*src
, *dst
;
6585 struct pf_state_key_cmp key
;
6586 struct udphdr
*uh
= pd
->hdr
.udp
;
6587 #ifndef NO_APPLE_EXTENSIONS
6588 struct pf_app_state as
;
6589 int dx
, action
, extfilter
;
6591 key
.proto_variant
= PF_EXTFILTER_APD
;
6595 key
.proto
= IPPROTO_UDP
;
6596 if (direction
== PF_IN
) {
6597 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
6598 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
6599 #ifndef NO_APPLE_EXTENSIONS
6600 key
.ext
.xport
.port
= uh
->uh_sport
;
6601 key
.gwy
.xport
.port
= uh
->uh_dport
;
6604 key
.ext
.port
= uh
->uh_sport
;
6605 key
.gwy
.port
= uh
->uh_dport
;
6608 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
6609 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
6610 #ifndef NO_APPLE_EXTENSIONS
6611 key
.lan
.xport
.port
= uh
->uh_sport
;
6612 key
.ext
.xport
.port
= uh
->uh_dport
;
6615 key
.lan
.port
= uh
->uh_sport
;
6616 key
.ext
.port
= uh
->uh_dport
;
6620 #ifndef NO_APPLE_EXTENSIONS
6621 if (ntohs(uh
->uh_sport
) == PF_IKE_PORT
&&
6622 ntohs(uh
->uh_dport
) == PF_IKE_PORT
) {
6623 struct pf_ike_hdr ike
;
6624 size_t plen
= m
->m_pkthdr
.len
- off
- sizeof (*uh
);
6625 if (plen
< PF_IKE_PACKET_MINSIZE
) {
6626 DPFPRINTF(PF_DEBUG_MISC
,
6627 ("pf: IKE message too small.\n"));
6631 if (plen
> sizeof (ike
))
6632 plen
= sizeof (ike
);
6633 m_copydata(m
, off
+ sizeof (*uh
), plen
, &ike
);
6635 if (ike
.initiator_cookie
) {
6636 key
.app_state
= &as
;
6637 as
.compare_lan_ext
= pf_ike_compare
;
6638 as
.compare_ext_gwy
= pf_ike_compare
;
6639 as
.u
.ike
.cookie
= ike
.initiator_cookie
;
6642 * <http://tools.ietf.org/html/\
6643 * draft-ietf-ipsec-nat-t-ike-01>
6644 * Support non-standard NAT-T implementations that
6645 * push the ESP packet over the top of the IKE packet.
6646 * Do not drop packet.
6648 DPFPRINTF(PF_DEBUG_MISC
,
6649 ("pf: IKE initiator cookie = 0.\n"));
6653 *state
= pf_find_state(kif
, &key
, dx
);
6655 if (!key
.app_state
&& *state
== 0) {
6656 key
.proto_variant
= PF_EXTFILTER_AD
;
6657 *state
= pf_find_state(kif
, &key
, dx
);
6660 if (!key
.app_state
&& *state
== 0) {
6661 key
.proto_variant
= PF_EXTFILTER_EI
;
6662 *state
= pf_find_state(kif
, &key
, dx
);
6665 if (pf_state_lookup_aux(state
, kif
, direction
, &action
))
6671 if (direction
== (*state
)->state_key
->direction
) {
6672 src
= &(*state
)->src
;
6673 dst
= &(*state
)->dst
;
6675 src
= &(*state
)->dst
;
6676 dst
= &(*state
)->src
;
6680 if (src
->state
< PFUDPS_SINGLE
)
6681 src
->state
= PFUDPS_SINGLE
;
6682 if (dst
->state
== PFUDPS_SINGLE
)
6683 dst
->state
= PFUDPS_MULTIPLE
;
6685 /* update expire time */
6686 (*state
)->expire
= pf_time_second();
6687 if (src
->state
== PFUDPS_MULTIPLE
&& dst
->state
== PFUDPS_MULTIPLE
)
6688 (*state
)->timeout
= PFTM_UDP_MULTIPLE
;
6690 (*state
)->timeout
= PFTM_UDP_SINGLE
;
6692 #ifndef NO_APPLE_EXTENSIONS
6693 extfilter
= (*state
)->state_key
->proto_variant
;
6694 if (extfilter
> PF_EXTFILTER_APD
) {
6695 (*state
)->state_key
->ext
.xport
.port
= key
.ext
.xport
.port
;
6696 if (extfilter
> PF_EXTFILTER_AD
)
6697 PF_ACPY(&(*state
)->state_key
->ext
.addr
,
6698 &key
.ext
.addr
, key
.af
);
6701 if ((*state
)->state_key
->app_state
&&
6702 (*state
)->state_key
->app_state
->handler
) {
6703 (*state
)->state_key
->app_state
->handler(*state
, direction
,
6704 off
+ uh
->uh_ulen
, pd
, kif
);
6706 REASON_SET(reason
, PFRES_MEMORY
);
6713 /* translate source/destination address, if necessary */
6714 #ifndef NO_APPLE_EXTENSIONS
6715 if (STATE_TRANSLATE((*state
)->state_key
)) {
6716 m
= pf_lazy_makewritable(pd
, m
, off
+ sizeof (*uh
));
6720 if (direction
== PF_OUT
)
6721 pf_change_ap(direction
, pd
->mp
, pd
->src
, &uh
->uh_sport
,
6722 pd
->ip_sum
, &uh
->uh_sum
,
6723 &(*state
)->state_key
->gwy
.addr
,
6724 (*state
)->state_key
->gwy
.xport
.port
, 1, pd
->af
);
6726 pf_change_ap(direction
, pd
->mp
, pd
->dst
, &uh
->uh_dport
,
6727 pd
->ip_sum
, &uh
->uh_sum
,
6728 &(*state
)->state_key
->lan
.addr
,
6729 (*state
)->state_key
->lan
.xport
.port
, 1, pd
->af
);
6730 m_copyback(m
, off
, sizeof (*uh
), uh
);
6733 if (STATE_TRANSLATE((*state
)->state_key
)) {
6734 if (direction
== PF_OUT
)
6735 pf_change_ap(pd
->src
, &uh
->uh_sport
, pd
->ip_sum
,
6736 &uh
->uh_sum
, &(*state
)->state_key
->gwy
.addr
,
6737 (*state
)->state_key
->gwy
.port
, 1, pd
->af
);
6739 pf_change_ap(pd
->dst
, &uh
->uh_dport
, pd
->ip_sum
,
6740 &uh
->uh_sum
, &(*state
)->state_key
->lan
.addr
,
6741 (*state
)->state_key
->lan
.port
, 1, pd
->af
);
6742 m_copyback(m
, off
, sizeof (*uh
), uh
);
6750 pf_test_state_icmp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
6751 struct mbuf
*m
, int off
, void *h
, struct pf_pdesc
*pd
, u_short
*reason
)
6754 struct pf_addr
*saddr
= pd
->src
, *daddr
= pd
->dst
;
6755 u_int16_t icmpid
= 0, *icmpsum
;
6758 struct pf_state_key_cmp key
;
6760 #ifndef NO_APPLE_EXTENSIONS
6761 struct pf_app_state as
;
6765 switch (pd
->proto
) {
6768 icmptype
= pd
->hdr
.icmp
->icmp_type
;
6769 icmpid
= pd
->hdr
.icmp
->icmp_id
;
6770 icmpsum
= &pd
->hdr
.icmp
->icmp_cksum
;
6772 if (icmptype
== ICMP_UNREACH
||
6773 icmptype
== ICMP_SOURCEQUENCH
||
6774 icmptype
== ICMP_REDIRECT
||
6775 icmptype
== ICMP_TIMXCEED
||
6776 icmptype
== ICMP_PARAMPROB
)
6781 case IPPROTO_ICMPV6
:
6782 icmptype
= pd
->hdr
.icmp6
->icmp6_type
;
6783 icmpid
= pd
->hdr
.icmp6
->icmp6_id
;
6784 icmpsum
= &pd
->hdr
.icmp6
->icmp6_cksum
;
6786 if (icmptype
== ICMP6_DST_UNREACH
||
6787 icmptype
== ICMP6_PACKET_TOO_BIG
||
6788 icmptype
== ICMP6_TIME_EXCEEDED
||
6789 icmptype
== ICMP6_PARAM_PROB
)
6798 * ICMP query/reply message not related to a TCP/UDP packet.
6799 * Search for an ICMP state.
6802 key
.proto
= pd
->proto
;
6803 if (direction
== PF_IN
) {
6804 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
6805 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
6806 #ifndef NO_APPLE_EXTENSIONS
6807 key
.ext
.xport
.port
= 0;
6808 key
.gwy
.xport
.port
= icmpid
;
6811 key
.gwy
.port
= icmpid
;
6814 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
6815 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
6816 #ifndef NO_APPLE_EXTENSIONS
6817 key
.lan
.xport
.port
= icmpid
;
6818 key
.ext
.xport
.port
= 0;
6820 key
.lan
.port
= icmpid
;
6827 (*state
)->expire
= pf_time_second();
6828 (*state
)->timeout
= PFTM_ICMP_ERROR_REPLY
;
6830 /* translate source/destination address, if necessary */
6831 if (STATE_TRANSLATE((*state
)->state_key
)) {
6832 if (direction
== PF_OUT
) {
6836 pf_change_a(&saddr
->v4
.s_addr
,
6838 (*state
)->state_key
->gwy
.addr
.v4
.s_addr
, 0);
6839 #ifndef NO_APPLE_EXTENSIONS
6840 pd
->hdr
.icmp
->icmp_cksum
=
6842 pd
->hdr
.icmp
->icmp_cksum
, icmpid
,
6843 (*state
)->state_key
->gwy
.xport
.port
, 0);
6844 pd
->hdr
.icmp
->icmp_id
=
6845 (*state
)->state_key
->gwy
.xport
.port
;
6846 m
= pf_lazy_makewritable(pd
, m
,
6851 pd
->hdr
.icmp
->icmp_cksum
=
6853 pd
->hdr
.icmp
->icmp_cksum
, icmpid
,
6854 (*state
)->state_key
->gwy
.port
, 0);
6855 pd
->hdr
.icmp
->icmp_id
=
6856 (*state
)->state_key
->gwy
.port
;
6858 m_copyback(m
, off
, ICMP_MINLEN
,
6865 &pd
->hdr
.icmp6
->icmp6_cksum
,
6866 &(*state
)->state_key
->gwy
.addr
, 0);
6867 #ifndef NO_APPLE_EXTENSIONS
6868 m
= pf_lazy_makewritable(pd
, m
,
6869 off
+ sizeof (struct icmp6_hdr
));
6874 sizeof (struct icmp6_hdr
),
6883 pf_change_a(&daddr
->v4
.s_addr
,
6885 (*state
)->state_key
->lan
.addr
.v4
.s_addr
, 0);
6886 #ifndef NO_APPLE_EXTENSIONS
6887 pd
->hdr
.icmp
->icmp_cksum
=
6889 pd
->hdr
.icmp
->icmp_cksum
, icmpid
,
6890 (*state
)->state_key
->lan
.xport
.port
, 0);
6891 pd
->hdr
.icmp
->icmp_id
=
6892 (*state
)->state_key
->lan
.xport
.port
;
6893 m
= pf_lazy_makewritable(pd
, m
,
6898 pd
->hdr
.icmp
->icmp_cksum
=
6900 pd
->hdr
.icmp
->icmp_cksum
, icmpid
,
6901 (*state
)->state_key
->lan
.port
, 0);
6902 pd
->hdr
.icmp
->icmp_id
=
6903 (*state
)->state_key
->lan
.port
;
6905 m_copyback(m
, off
, ICMP_MINLEN
,
6912 &pd
->hdr
.icmp6
->icmp6_cksum
,
6913 &(*state
)->state_key
->lan
.addr
, 0);
6914 #ifndef NO_APPLE_EXTENSIONS
6915 m
= pf_lazy_makewritable(pd
, m
,
6916 off
+ sizeof (struct icmp6_hdr
));
6921 sizeof (struct icmp6_hdr
),
6933 * ICMP error message in response to a TCP/UDP packet.
6934 * Extract the inner TCP/UDP header and search for that state.
6937 struct pf_pdesc pd2
;
6942 struct ip6_hdr h2_6
;
6948 memset(&pd2
, 0, sizeof (pd2
));
6954 /* offset of h2 in mbuf chain */
6955 ipoff2
= off
+ ICMP_MINLEN
;
6957 if (!pf_pull_hdr(m
, ipoff2
, &h2
, sizeof (h2
),
6958 NULL
, reason
, pd2
.af
)) {
6959 DPFPRINTF(PF_DEBUG_MISC
,
6960 ("pf: ICMP error message too short "
6965 * ICMP error messages don't refer to non-first
6968 if (h2
.ip_off
& htons(IP_OFFMASK
)) {
6969 REASON_SET(reason
, PFRES_FRAG
);
6973 /* offset of protocol header that follows h2 */
6974 off2
= ipoff2
+ (h2
.ip_hl
<< 2);
6976 pd2
.proto
= h2
.ip_p
;
6977 pd2
.src
= (struct pf_addr
*)&h2
.ip_src
;
6978 pd2
.dst
= (struct pf_addr
*)&h2
.ip_dst
;
6979 pd2
.ip_sum
= &h2
.ip_sum
;
6984 ipoff2
= off
+ sizeof (struct icmp6_hdr
);
6986 if (!pf_pull_hdr(m
, ipoff2
, &h2_6
, sizeof (h2_6
),
6987 NULL
, reason
, pd2
.af
)) {
6988 DPFPRINTF(PF_DEBUG_MISC
,
6989 ("pf: ICMP error message too short "
6993 pd2
.proto
= h2_6
.ip6_nxt
;
6994 pd2
.src
= (struct pf_addr
*)&h2_6
.ip6_src
;
6995 pd2
.dst
= (struct pf_addr
*)&h2_6
.ip6_dst
;
6997 off2
= ipoff2
+ sizeof (h2_6
);
6999 switch (pd2
.proto
) {
7000 case IPPROTO_FRAGMENT
:
7002 * ICMPv6 error messages for
7003 * non-first fragments
7005 REASON_SET(reason
, PFRES_FRAG
);
7008 case IPPROTO_HOPOPTS
:
7009 case IPPROTO_ROUTING
:
7010 case IPPROTO_DSTOPTS
: {
7011 /* get next header and header length */
7012 struct ip6_ext opt6
;
7014 if (!pf_pull_hdr(m
, off2
, &opt6
,
7015 sizeof (opt6
), NULL
, reason
,
7017 DPFPRINTF(PF_DEBUG_MISC
,
7018 ("pf: ICMPv6 short opt\n"));
7021 if (pd2
.proto
== IPPROTO_AH
)
7022 off2
+= (opt6
.ip6e_len
+ 2) * 4;
7024 off2
+= (opt6
.ip6e_len
+ 1) * 8;
7025 pd2
.proto
= opt6
.ip6e_nxt
;
7026 /* goto the next header */
7033 } while (!terminal
);
7038 switch (pd2
.proto
) {
7042 struct pf_state_peer
*src
, *dst
;
7047 * Only the first 8 bytes of the TCP header can be
7048 * expected. Don't access any TCP header fields after
7049 * th_seq, an ackskew test is not possible.
7051 if (!pf_pull_hdr(m
, off2
, &th
, 8, NULL
, reason
,
7053 DPFPRINTF(PF_DEBUG_MISC
,
7054 ("pf: ICMP error message too short "
7060 key
.proto
= IPPROTO_TCP
;
7061 if (direction
== PF_IN
) {
7062 PF_ACPY(&key
.ext
.addr
, pd2
.dst
, key
.af
);
7063 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af
);
7064 #ifndef NO_APPLE_EXTENSIONS
7065 key
.ext
.xport
.port
= th
.th_dport
;
7066 key
.gwy
.xport
.port
= th
.th_sport
;
7068 key
.ext
.port
= th
.th_dport
;
7069 key
.gwy
.port
= th
.th_sport
;
7072 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af
);
7073 PF_ACPY(&key
.ext
.addr
, pd2
.src
, key
.af
);
7074 #ifndef NO_APPLE_EXTENSIONS
7075 key
.lan
.xport
.port
= th
.th_dport
;
7076 key
.ext
.xport
.port
= th
.th_sport
;
7078 key
.lan
.port
= th
.th_dport
;
7079 key
.ext
.port
= th
.th_sport
;
7085 if (direction
== (*state
)->state_key
->direction
) {
7086 src
= &(*state
)->dst
;
7087 dst
= &(*state
)->src
;
7089 src
= &(*state
)->src
;
7090 dst
= &(*state
)->dst
;
7093 if (src
->wscale
&& dst
->wscale
)
7094 dws
= dst
->wscale
& PF_WSCALE_MASK
;
7098 /* Demodulate sequence number */
7099 seq
= ntohl(th
.th_seq
) - src
->seqdiff
;
7101 pf_change_a(&th
.th_seq
, icmpsum
,
7106 if (!SEQ_GEQ(src
->seqhi
, seq
) ||
7107 #ifndef NO_APPLE_MODIFICATION
7109 src
->seqlo
- ((u_int32_t
)dst
->max_win
<< dws
))) {
7111 !SEQ_GEQ(seq
, src
->seqlo
- (dst
->max_win
<< dws
))) {
7113 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
7114 printf("pf: BAD ICMP %d:%d ",
7115 icmptype
, pd
->hdr
.icmp
->icmp_code
);
7116 pf_print_host(pd
->src
, 0, pd
->af
);
7118 pf_print_host(pd
->dst
, 0, pd
->af
);
7120 pf_print_state(*state
);
7121 printf(" seq=%u\n", seq
);
7123 REASON_SET(reason
, PFRES_BADSTATE
);
7127 if (STATE_TRANSLATE((*state
)->state_key
)) {
7128 if (direction
== PF_IN
) {
7129 pf_change_icmp(pd2
.src
, &th
.th_sport
,
7130 daddr
, &(*state
)->state_key
->lan
.addr
,
7131 #ifndef NO_APPLE_EXTENSIONS
7132 (*state
)->state_key
->lan
.xport
.port
, NULL
,
7134 (*state
)->state_key
->lan
.port
, NULL
,
7136 pd2
.ip_sum
, icmpsum
,
7137 pd
->ip_sum
, 0, pd2
.af
);
7139 pf_change_icmp(pd2
.dst
, &th
.th_dport
,
7140 saddr
, &(*state
)->state_key
->gwy
.addr
,
7141 #ifndef NO_APPLE_EXTENSIONS
7142 (*state
)->state_key
->gwy
.xport
.port
, NULL
,
7144 (*state
)->state_key
->gwy
.port
, NULL
,
7146 pd2
.ip_sum
, icmpsum
,
7147 pd
->ip_sum
, 0, pd2
.af
);
7153 #ifndef NO_APPLE_EXTENSIONS
7154 m
= pf_lazy_makewritable(pd
, m
, off2
+ 8);
7161 m_copyback(m
, off
, ICMP_MINLEN
,
7163 m_copyback(m
, ipoff2
, sizeof (h2
),
7170 sizeof (struct icmp6_hdr
),
7172 m_copyback(m
, ipoff2
, sizeof (h2_6
),
7177 m_copyback(m
, off2
, 8, &th
);
7185 #ifndef NO_APPLE_EXTENSIONS
7188 if (!pf_pull_hdr(m
, off2
, &uh
, sizeof (uh
),
7189 NULL
, reason
, pd2
.af
)) {
7190 DPFPRINTF(PF_DEBUG_MISC
,
7191 ("pf: ICMP error message too short "
7197 key
.proto
= IPPROTO_UDP
;
7198 if (direction
== PF_IN
) {
7199 PF_ACPY(&key
.ext
.addr
, pd2
.dst
, key
.af
);
7200 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af
);
7201 #ifndef NO_APPLE_EXTENSIONS
7202 key
.ext
.xport
.port
= uh
.uh_dport
;
7203 key
.gwy
.xport
.port
= uh
.uh_sport
;
7206 key
.ext
.port
= uh
.uh_dport
;
7207 key
.gwy
.port
= uh
.uh_sport
;
7210 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af
);
7211 PF_ACPY(&key
.ext
.addr
, pd2
.src
, key
.af
);
7212 #ifndef NO_APPLE_EXTENSIONS
7213 key
.lan
.xport
.port
= uh
.uh_dport
;
7214 key
.ext
.xport
.port
= uh
.uh_sport
;
7217 key
.lan
.port
= uh
.uh_dport
;
7218 key
.ext
.port
= uh
.uh_sport
;
7222 #ifndef NO_APPLE_EXTENSIONS
7223 key
.proto_variant
= PF_EXTFILTER_APD
;
7225 if (ntohs(uh
.uh_sport
) == PF_IKE_PORT
&&
7226 ntohs(uh
.uh_dport
) == PF_IKE_PORT
) {
7227 struct pf_ike_hdr ike
;
7229 m
->m_pkthdr
.len
- off2
- sizeof (uh
);
7230 if (direction
== PF_IN
&&
7231 plen
< 8 /* PF_IKE_PACKET_MINSIZE */) {
7232 DPFPRINTF(PF_DEBUG_MISC
, ("pf: "
7233 "ICMP error, embedded IKE message "
7238 if (plen
> sizeof (ike
))
7239 plen
= sizeof (ike
);
7240 m_copydata(m
, off
+ sizeof (uh
), plen
, &ike
);
7242 key
.app_state
= &as
;
7243 as
.compare_lan_ext
= pf_ike_compare
;
7244 as
.compare_ext_gwy
= pf_ike_compare
;
7245 as
.u
.ike
.cookie
= ike
.initiator_cookie
;
7248 *state
= pf_find_state(kif
, &key
, dx
);
7250 if (key
.app_state
&& *state
== 0) {
7252 *state
= pf_find_state(kif
, &key
, dx
);
7256 key
.proto_variant
= PF_EXTFILTER_AD
;
7257 *state
= pf_find_state(kif
, &key
, dx
);
7261 key
.proto_variant
= PF_EXTFILTER_EI
;
7262 *state
= pf_find_state(kif
, &key
, dx
);
7265 if (pf_state_lookup_aux(state
, kif
, direction
, &action
))
7271 if (STATE_TRANSLATE((*state
)->state_key
)) {
7272 if (direction
== PF_IN
) {
7273 pf_change_icmp(pd2
.src
, &uh
.uh_sport
,
7274 daddr
, &(*state
)->state_key
->lan
.addr
,
7275 #ifndef NO_APPLE_EXTENSIONS
7276 (*state
)->state_key
->lan
.xport
.port
, &uh
.uh_sum
,
7278 (*state
)->state_key
->lan
.port
, &uh
.uh_sum
,
7280 pd2
.ip_sum
, icmpsum
,
7281 pd
->ip_sum
, 1, pd2
.af
);
7283 pf_change_icmp(pd2
.dst
, &uh
.uh_dport
,
7284 saddr
, &(*state
)->state_key
->gwy
.addr
,
7285 #ifndef NO_APPLE_EXTENSIONS
7286 (*state
)->state_key
->gwy
.xport
.port
, &uh
.uh_sum
,
7288 (*state
)->state_key
->gwy
.port
, &uh
.uh_sum
,
7290 pd2
.ip_sum
, icmpsum
,
7291 pd
->ip_sum
, 1, pd2
.af
);
7293 #ifndef NO_APPLE_EXTENSIONS
7294 m
= pf_lazy_makewritable(pd
, m
,
7295 off2
+ sizeof (uh
));
7302 m_copyback(m
, off
, ICMP_MINLEN
,
7304 m_copyback(m
, ipoff2
, sizeof (h2
), &h2
);
7310 sizeof (struct icmp6_hdr
),
7312 m_copyback(m
, ipoff2
, sizeof (h2_6
),
7317 m_copyback(m
, off2
, sizeof (uh
), &uh
);
7324 case IPPROTO_ICMP
: {
7327 if (!pf_pull_hdr(m
, off2
, &iih
, ICMP_MINLEN
,
7328 NULL
, reason
, pd2
.af
)) {
7329 DPFPRINTF(PF_DEBUG_MISC
,
7330 ("pf: ICMP error message too short i"
7336 key
.proto
= IPPROTO_ICMP
;
7337 if (direction
== PF_IN
) {
7338 PF_ACPY(&key
.ext
.addr
, pd2
.dst
, key
.af
);
7339 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af
);
7340 #ifndef NO_APPLE_EXTENSIONS
7341 key
.ext
.xport
.port
= 0;
7342 key
.gwy
.xport
.port
= iih
.icmp_id
;
7345 key
.gwy
.port
= iih
.icmp_id
;
7348 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af
);
7349 PF_ACPY(&key
.ext
.addr
, pd2
.src
, key
.af
);
7350 #ifndef NO_APPLE_EXTENSIONS
7351 key
.lan
.xport
.port
= iih
.icmp_id
;
7352 key
.ext
.xport
.port
= 0;
7354 key
.lan
.port
= iih
.icmp_id
;
7361 if (STATE_TRANSLATE((*state
)->state_key
)) {
7362 if (direction
== PF_IN
) {
7363 pf_change_icmp(pd2
.src
, &iih
.icmp_id
,
7364 daddr
, &(*state
)->state_key
->lan
.addr
,
7365 #ifndef NO_APPLE_EXTENSIONS
7366 (*state
)->state_key
->lan
.xport
.port
, NULL
,
7368 (*state
)->state_key
->lan
.port
, NULL
,
7370 pd2
.ip_sum
, icmpsum
,
7371 pd
->ip_sum
, 0, AF_INET
);
7373 pf_change_icmp(pd2
.dst
, &iih
.icmp_id
,
7374 saddr
, &(*state
)->state_key
->gwy
.addr
,
7375 #ifndef NO_APPLE_EXTENSIONS
7376 (*state
)->state_key
->gwy
.xport
.port
, NULL
,
7378 (*state
)->state_key
->gwy
.port
, NULL
,
7380 pd2
.ip_sum
, icmpsum
,
7381 pd
->ip_sum
, 0, AF_INET
);
7383 #ifndef NO_APPLE_EXTENSIONS
7384 m
= pf_lazy_makewritable(pd
, m
, off2
+ ICMP_MINLEN
);
7388 m_copyback(m
, off
, ICMP_MINLEN
, pd
->hdr
.icmp
);
7389 m_copyback(m
, ipoff2
, sizeof (h2
), &h2
);
7390 m_copyback(m
, off2
, ICMP_MINLEN
, &iih
);
7398 case IPPROTO_ICMPV6
: {
7399 struct icmp6_hdr iih
;
7401 if (!pf_pull_hdr(m
, off2
, &iih
,
7402 sizeof (struct icmp6_hdr
), NULL
, reason
, pd2
.af
)) {
7403 DPFPRINTF(PF_DEBUG_MISC
,
7404 ("pf: ICMP error message too short "
7410 key
.proto
= IPPROTO_ICMPV6
;
7411 if (direction
== PF_IN
) {
7412 PF_ACPY(&key
.ext
.addr
, pd2
.dst
, key
.af
);
7413 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af
);
7414 #ifndef NO_APPLE_EXTENSIONS
7415 key
.ext
.xport
.port
= 0;
7416 key
.gwy
.xport
.port
= iih
.icmp6_id
;
7419 key
.gwy
.port
= iih
.icmp6_id
;
7422 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af
);
7423 PF_ACPY(&key
.ext
.addr
, pd2
.src
, key
.af
);
7424 #ifndef NO_APPLE_EXTENSIONS
7425 key
.lan
.xport
.port
= iih
.icmp6_id
;
7426 key
.ext
.xport
.port
= 0;
7428 key
.lan
.port
= iih
.icmp6_id
;
7435 if (STATE_TRANSLATE((*state
)->state_key
)) {
7436 if (direction
== PF_IN
) {
7437 pf_change_icmp(pd2
.src
, &iih
.icmp6_id
,
7438 daddr
, &(*state
)->state_key
->lan
.addr
,
7439 #ifndef NO_APPLE_EXTENSIONS
7440 (*state
)->state_key
->lan
.xport
.port
, NULL
,
7442 (*state
)->state_key
->lan
.port
, NULL
,
7444 pd2
.ip_sum
, icmpsum
,
7445 pd
->ip_sum
, 0, AF_INET6
);
7447 pf_change_icmp(pd2
.dst
, &iih
.icmp6_id
,
7448 saddr
, &(*state
)->state_key
->gwy
.addr
,
7449 #ifndef NO_APPLE_EXTENSIONS
7450 (*state
)->state_key
->gwy
.xport
.port
, NULL
,
7452 (*state
)->state_key
->gwy
.port
, NULL
,
7454 pd2
.ip_sum
, icmpsum
,
7455 pd
->ip_sum
, 0, AF_INET6
);
7457 #ifndef NO_APPLE_EXTENSIONS
7458 m
= pf_lazy_makewritable(pd
, m
, off2
+
7459 sizeof (struct icmp6_hdr
));
7463 m_copyback(m
, off
, sizeof (struct icmp6_hdr
),
7465 m_copyback(m
, ipoff2
, sizeof (h2_6
), &h2_6
);
7466 m_copyback(m
, off2
, sizeof (struct icmp6_hdr
),
7476 key
.proto
= pd2
.proto
;
7477 if (direction
== PF_IN
) {
7478 PF_ACPY(&key
.ext
.addr
, pd2
.dst
, key
.af
);
7479 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af
);
7480 #ifndef NO_APPLE_EXTENSIONS
7481 key
.ext
.xport
.port
= 0;
7482 key
.gwy
.xport
.port
= 0;
7488 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af
);
7489 PF_ACPY(&key
.ext
.addr
, pd2
.src
, key
.af
);
7490 #ifndef NO_APPLE_EXTENSIONS
7491 key
.lan
.xport
.port
= 0;
7492 key
.ext
.xport
.port
= 0;
7501 if (STATE_TRANSLATE((*state
)->state_key
)) {
7502 if (direction
== PF_IN
) {
7503 pf_change_icmp(pd2
.src
, NULL
,
7504 daddr
, &(*state
)->state_key
->lan
.addr
,
7506 pd2
.ip_sum
, icmpsum
,
7507 pd
->ip_sum
, 0, pd2
.af
);
7509 pf_change_icmp(pd2
.dst
, NULL
,
7510 saddr
, &(*state
)->state_key
->gwy
.addr
,
7512 pd2
.ip_sum
, icmpsum
,
7513 pd
->ip_sum
, 0, pd2
.af
);
7518 #ifndef NO_APPLE_EXTENSIONS
7519 m
= pf_lazy_makewritable(pd
, m
,
7520 ipoff2
+ sizeof (h2
));
7524 m_copyback(m
, off
, ICMP_MINLEN
,
7526 m_copyback(m
, ipoff2
, sizeof (h2
), &h2
);
7531 #ifndef NO_APPLE_EXTENSIONS
7532 m
= pf_lazy_makewritable(pd
, m
,
7533 ipoff2
+ sizeof (h2_6
));
7538 sizeof (struct icmp6_hdr
),
7540 m_copyback(m
, ipoff2
, sizeof (h2_6
),
7554 #ifndef NO_APPLE_EXTENSIONS
7556 pf_test_state_grev1(struct pf_state
**state
, int direction
,
7557 struct pfi_kif
*kif
, int off
, struct pf_pdesc
*pd
)
7559 struct pf_state_peer
*src
;
7560 struct pf_state_peer
*dst
;
7561 struct pf_state_key_cmp key
;
7562 struct pf_grev1_hdr
*grev1
= pd
->hdr
.grev1
;
7565 #ifndef NO_APPLE_EXTENSIONS
7569 key
.proto
= IPPROTO_GRE
;
7570 key
.proto_variant
= PF_GRE_PPTP_VARIANT
;
7571 if (direction
== PF_IN
) {
7572 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
7573 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
7574 key
.gwy
.xport
.call_id
= grev1
->call_id
;
7576 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
7577 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
7578 key
.ext
.xport
.call_id
= grev1
->call_id
;
7583 if (direction
== (*state
)->state_key
->direction
) {
7584 src
= &(*state
)->src
;
7585 dst
= &(*state
)->dst
;
7587 src
= &(*state
)->dst
;
7588 dst
= &(*state
)->src
;
7592 if (src
->state
< PFGRE1S_INITIATING
)
7593 src
->state
= PFGRE1S_INITIATING
;
7595 /* update expire time */
7596 (*state
)->expire
= pf_time_second();
7597 if (src
->state
>= PFGRE1S_INITIATING
&&
7598 dst
->state
>= PFGRE1S_INITIATING
) {
7599 (*state
)->timeout
= PFTM_GREv1_ESTABLISHED
;
7600 src
->state
= PFGRE1S_ESTABLISHED
;
7601 dst
->state
= PFGRE1S_ESTABLISHED
;
7603 (*state
)->timeout
= PFTM_GREv1_INITIATING
;
7605 /* translate source/destination address, if necessary */
7606 if (STATE_GRE_TRANSLATE((*state
)->state_key
)) {
7607 if (direction
== PF_OUT
) {
7611 pf_change_a(&pd
->src
->v4
.s_addr
,
7613 (*state
)->state_key
->gwy
.addr
.v4
.s_addr
, 0);
7618 PF_ACPY(pd
->src
, &(*state
)->state_key
->gwy
.addr
,
7624 grev1
->call_id
= (*state
)->state_key
->lan
.xport
.call_id
;
7629 pf_change_a(&pd
->dst
->v4
.s_addr
,
7631 (*state
)->state_key
->lan
.addr
.v4
.s_addr
, 0);
7636 PF_ACPY(pd
->dst
, &(*state
)->state_key
->lan
.addr
,
7643 m
= pf_lazy_makewritable(pd
, pd
->mp
, off
+ sizeof (*grev1
));
7646 m_copyback(m
, off
, sizeof (*grev1
), grev1
);
7653 pf_test_state_esp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
7654 int off
, struct pf_pdesc
*pd
)
7657 struct pf_state_peer
*src
;
7658 struct pf_state_peer
*dst
;
7659 struct pf_state_key_cmp key
;
7660 struct pf_esp_hdr
*esp
= pd
->hdr
.esp
;
7663 memset(&key
, 0, sizeof (key
));
7665 key
.proto
= IPPROTO_ESP
;
7666 if (direction
== PF_IN
) {
7667 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
7668 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
7669 key
.gwy
.xport
.spi
= esp
->spi
;
7671 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
7672 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
7673 key
.ext
.xport
.spi
= esp
->spi
;
7676 *state
= pf_find_state(kif
, &key
, direction
);
7683 * No matching state. Look for a blocking state. If we find
7684 * one, then use that state and move it so that it's keyed to
7685 * the SPI in the current packet.
7687 if (direction
== PF_IN
) {
7688 key
.gwy
.xport
.spi
= 0;
7690 s
= pf_find_state(kif
, &key
, direction
);
7692 struct pf_state_key
*sk
= s
->state_key
;
7694 RB_REMOVE(pf_state_tree_ext_gwy
,
7695 &pf_statetbl_ext_gwy
, sk
);
7696 sk
->lan
.xport
.spi
= sk
->gwy
.xport
.spi
=
7699 if (RB_INSERT(pf_state_tree_ext_gwy
,
7700 &pf_statetbl_ext_gwy
, sk
))
7701 pf_detach_state(s
, PF_DT_SKIP_EXTGWY
);
7706 key
.ext
.xport
.spi
= 0;
7708 s
= pf_find_state(kif
, &key
, direction
);
7710 struct pf_state_key
*sk
= s
->state_key
;
7712 RB_REMOVE(pf_state_tree_lan_ext
,
7713 &pf_statetbl_lan_ext
, sk
);
7714 sk
->ext
.xport
.spi
= esp
->spi
;
7716 if (RB_INSERT(pf_state_tree_lan_ext
,
7717 &pf_statetbl_lan_ext
, sk
))
7718 pf_detach_state(s
, PF_DT_SKIP_LANEXT
);
7727 if (s
->creatorid
== pf_status
.hostid
)
7728 pfsync_delete_state(s
);
7730 s
->timeout
= PFTM_UNLINKED
;
7731 hook_runloop(&s
->unlink_hooks
,
7732 HOOK_REMOVE
|HOOK_FREE
);
7733 pf_src_tree_remove_state(s
);
7740 if (pf_state_lookup_aux(state
, kif
, direction
, &action
))
7743 if (direction
== (*state
)->state_key
->direction
) {
7744 src
= &(*state
)->src
;
7745 dst
= &(*state
)->dst
;
7747 src
= &(*state
)->dst
;
7748 dst
= &(*state
)->src
;
7752 if (src
->state
< PFESPS_INITIATING
)
7753 src
->state
= PFESPS_INITIATING
;
7755 /* update expire time */
7756 (*state
)->expire
= pf_time_second();
7757 if (src
->state
>= PFESPS_INITIATING
&&
7758 dst
->state
>= PFESPS_INITIATING
) {
7759 (*state
)->timeout
= PFTM_ESP_ESTABLISHED
;
7760 src
->state
= PFESPS_ESTABLISHED
;
7761 dst
->state
= PFESPS_ESTABLISHED
;
7763 (*state
)->timeout
= PFTM_ESP_INITIATING
;
7765 /* translate source/destination address, if necessary */
7766 if (STATE_ADDR_TRANSLATE((*state
)->state_key
)) {
7767 if (direction
== PF_OUT
) {
7771 pf_change_a(&pd
->src
->v4
.s_addr
,
7773 (*state
)->state_key
->gwy
.addr
.v4
.s_addr
, 0);
7778 PF_ACPY(pd
->src
, &(*state
)->state_key
->gwy
.addr
,
7787 pf_change_a(&pd
->dst
->v4
.s_addr
,
7789 (*state
)->state_key
->lan
.addr
.v4
.s_addr
, 0);
7794 PF_ACPY(pd
->dst
, &(*state
)->state_key
->lan
.addr
,
7807 pf_test_state_other(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
7808 struct pf_pdesc
*pd
)
7810 struct pf_state_peer
*src
, *dst
;
7811 struct pf_state_key_cmp key
;
7813 #ifndef NO_APPLE_EXTENSIONS
7817 key
.proto
= pd
->proto
;
7818 if (direction
== PF_IN
) {
7819 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
7820 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
7821 #ifndef NO_APPLE_EXTENSIONS
7822 key
.ext
.xport
.port
= 0;
7823 key
.gwy
.xport
.port
= 0;
7829 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
7830 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
7831 #ifndef NO_APPLE_EXTENSIONS
7832 key
.lan
.xport
.port
= 0;
7833 key
.ext
.xport
.port
= 0;
7842 if (direction
== (*state
)->state_key
->direction
) {
7843 src
= &(*state
)->src
;
7844 dst
= &(*state
)->dst
;
7846 src
= &(*state
)->dst
;
7847 dst
= &(*state
)->src
;
7851 if (src
->state
< PFOTHERS_SINGLE
)
7852 src
->state
= PFOTHERS_SINGLE
;
7853 if (dst
->state
== PFOTHERS_SINGLE
)
7854 dst
->state
= PFOTHERS_MULTIPLE
;
7856 /* update expire time */
7857 (*state
)->expire
= pf_time_second();
7858 if (src
->state
== PFOTHERS_MULTIPLE
&& dst
->state
== PFOTHERS_MULTIPLE
)
7859 (*state
)->timeout
= PFTM_OTHER_MULTIPLE
;
7861 (*state
)->timeout
= PFTM_OTHER_SINGLE
;
7863 /* translate source/destination address, if necessary */
7864 #ifndef NO_APPLE_EXTENSIONS
7865 if (STATE_ADDR_TRANSLATE((*state
)->state_key
)) {
7867 if (STATE_TRANSLATE((*state
)->state_key
)) {
7869 if (direction
== PF_OUT
) {
7873 pf_change_a(&pd
->src
->v4
.s_addr
,
7875 (*state
)->state_key
->gwy
.addr
.v4
.s_addr
,
7882 &(*state
)->state_key
->gwy
.addr
, pd
->af
);
7890 pf_change_a(&pd
->dst
->v4
.s_addr
,
7892 (*state
)->state_key
->lan
.addr
.v4
.s_addr
,
7899 &(*state
)->state_key
->lan
.addr
, pd
->af
);
7910 * ipoff and off are measured from the start of the mbuf chain.
7911 * h must be at "ipoff" on the mbuf chain.
7914 pf_pull_hdr(struct mbuf
*m
, int off
, void *p
, int len
,
7915 u_short
*actionp
, u_short
*reasonp
, sa_family_t af
)
7920 struct ip
*h
= mtod(m
, struct ip
*);
7921 u_int16_t fragoff
= (ntohs(h
->ip_off
) & IP_OFFMASK
) << 3;
7924 if (fragoff
>= len
) {
7925 ACTION_SET(actionp
, PF_PASS
);
7927 ACTION_SET(actionp
, PF_DROP
);
7928 REASON_SET(reasonp
, PFRES_FRAG
);
7932 if (m
->m_pkthdr
.len
< off
+ len
||
7933 ntohs(h
->ip_len
) < off
+ len
) {
7934 ACTION_SET(actionp
, PF_DROP
);
7935 REASON_SET(reasonp
, PFRES_SHORT
);
7943 struct ip6_hdr
*h
= mtod(m
, struct ip6_hdr
*);
7945 if (m
->m_pkthdr
.len
< off
+ len
||
7946 (ntohs(h
->ip6_plen
) + sizeof (struct ip6_hdr
)) <
7947 (unsigned)(off
+ len
)) {
7948 ACTION_SET(actionp
, PF_DROP
);
7949 REASON_SET(reasonp
, PFRES_SHORT
);
7956 m_copydata(m
, off
, len
, p
);
7961 pf_routable(struct pf_addr
*addr
, sa_family_t af
, struct pfi_kif
*kif
)
7964 struct sockaddr_in
*dst
;
7967 struct sockaddr_in6
*dst6
;
7968 struct route_in6 ro
;
7973 bzero(&ro
, sizeof (ro
));
7976 dst
= satosin(&ro
.ro_dst
);
7977 dst
->sin_family
= AF_INET
;
7978 dst
->sin_len
= sizeof (*dst
);
7979 dst
->sin_addr
= addr
->v4
;
7983 dst6
= (struct sockaddr_in6
*)&ro
.ro_dst
;
7984 dst6
->sin6_family
= AF_INET6
;
7985 dst6
->sin6_len
= sizeof (*dst6
);
7986 dst6
->sin6_addr
= addr
->v6
;
7993 /* XXX: IFT_ENC is not currently used by anything*/
7994 /* Skip checks for ipsec interfaces */
7995 if (kif
!= NULL
&& kif
->pfik_ifp
->if_type
== IFT_ENC
)
7998 rtalloc((struct route
*)&ro
);
8001 if (ro
.ro_rt
!= NULL
)
8007 pf_rtlabel_match(struct pf_addr
*addr
, sa_family_t af
, struct pf_addr_wrap
*aw
)
8010 struct sockaddr_in
*dst
;
8012 struct sockaddr_in6
*dst6
;
8013 struct route_in6 ro
;
8019 bzero(&ro
, sizeof (ro
));
8022 dst
= satosin(&ro
.ro_dst
);
8023 dst
->sin_family
= AF_INET
;
8024 dst
->sin_len
= sizeof (*dst
);
8025 dst
->sin_addr
= addr
->v4
;
8029 dst6
= (struct sockaddr_in6
*)&ro
.ro_dst
;
8030 dst6
->sin6_family
= AF_INET6
;
8031 dst6
->sin6_len
= sizeof (*dst6
);
8032 dst6
->sin6_addr
= addr
->v6
;
8039 rtalloc((struct route
*)&ro
);
8041 if (ro
.ro_rt
!= NULL
) {
8050 pf_route(struct mbuf
**m
, struct pf_rule
*r
, int dir
, struct ifnet
*oifp
,
8051 struct pf_state
*s
, struct pf_pdesc
*pd
)
8054 struct mbuf
*m0
, *m1
;
8055 struct route iproute
;
8056 struct route
*ro
= NULL
;
8057 struct sockaddr_in
*dst
;
8059 struct ifnet
*ifp
= NULL
;
8060 struct pf_addr naddr
;
8061 struct pf_src_node
*sn
= NULL
;
8065 if (m
== NULL
|| *m
== NULL
|| r
== NULL
||
8066 (dir
!= PF_IN
&& dir
!= PF_OUT
) || oifp
== NULL
)
8067 panic("pf_route: invalid parameters");
8069 if (pd
->pf_mtag
->routed
++ > 3) {
8075 if (r
->rt
== PF_DUPTO
) {
8076 if ((m0
= m_copym(*m
, 0, M_COPYALL
, M_NOWAIT
)) == NULL
)
8079 if ((r
->rt
== PF_REPLYTO
) == (r
->direction
== dir
))
8084 if (m0
->m_len
< (int)sizeof (struct ip
)) {
8085 DPFPRINTF(PF_DEBUG_URGENT
,
8086 ("pf_route: m0->m_len < sizeof (struct ip)\n"));
8090 ip
= mtod(m0
, struct ip
*);
8093 bzero((caddr_t
)ro
, sizeof (*ro
));
8094 dst
= satosin(&ro
->ro_dst
);
8095 dst
->sin_family
= AF_INET
;
8096 dst
->sin_len
= sizeof (*dst
);
8097 dst
->sin_addr
= ip
->ip_dst
;
8099 if (r
->rt
== PF_FASTROUTE
) {
8101 if (ro
->ro_rt
== 0) {
8102 ipstat
.ips_noroute
++;
8106 ifp
= ro
->ro_rt
->rt_ifp
;
8107 ro
->ro_rt
->rt_use
++;
8109 if (ro
->ro_rt
->rt_flags
& RTF_GATEWAY
)
8110 dst
= satosin(ro
->ro_rt
->rt_gateway
);
8112 if (TAILQ_EMPTY(&r
->rpool
.list
)) {
8113 DPFPRINTF(PF_DEBUG_URGENT
,
8114 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n"));
8118 pf_map_addr(AF_INET
, r
, (struct pf_addr
*)&ip
->ip_src
,
8120 if (!PF_AZERO(&naddr
, AF_INET
))
8121 dst
->sin_addr
.s_addr
= naddr
.v4
.s_addr
;
8122 ifp
= r
->rpool
.cur
->kif
?
8123 r
->rpool
.cur
->kif
->pfik_ifp
: NULL
;
8125 if (!PF_AZERO(&s
->rt_addr
, AF_INET
))
8126 dst
->sin_addr
.s_addr
=
8127 s
->rt_addr
.v4
.s_addr
;
8128 ifp
= s
->rt_kif
? s
->rt_kif
->pfik_ifp
: NULL
;
8135 if (pf_test(PF_OUT
, ifp
, &m0
, NULL
) != PF_PASS
)
8137 else if (m0
== NULL
)
8139 if (m0
->m_len
< (int)sizeof (struct ip
)) {
8140 DPFPRINTF(PF_DEBUG_URGENT
,
8141 ("pf_route: m0->m_len < sizeof (struct ip)\n"));
8144 ip
= mtod(m0
, struct ip
*);
8147 /* Copied from ip_output. */
8149 /* Catch routing changes wrt. hardware checksumming for TCP or UDP. */
8150 m0
->m_pkthdr
.csum_flags
|= CSUM_IP
;
8151 sw_csum
= m0
->m_pkthdr
.csum_flags
&
8152 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
8154 if (ifp
->if_hwassist
& CSUM_TCP_SUM16
) {
8156 * Special case code for GMACE
8157 * frames that can be checksumed by GMACE SUM16 HW:
8158 * frame >64, no fragments, no UDP
8160 if (apple_hwcksum_tx
&& (m0
->m_pkthdr
.csum_flags
& CSUM_TCP
) &&
8161 (ntohs(ip
->ip_len
) > 50) &&
8162 (ntohs(ip
->ip_len
) <= ifp
->if_mtu
)) {
8164 * Apple GMAC HW, expects:
8165 * STUFF_OFFSET << 16 | START_OFFSET
8167 /* IP+Enet header length */
8168 u_short offset
= ((ip
->ip_hl
) << 2) + 14;
8169 u_short csumprev
= m0
->m_pkthdr
.csum_data
& 0xffff;
8170 m0
->m_pkthdr
.csum_flags
= CSUM_DATA_VALID
|
8171 CSUM_TCP_SUM16
; /* for GMAC */
8172 m0
->m_pkthdr
.csum_data
= (csumprev
+ offset
) << 16 ;
8173 m0
->m_pkthdr
.csum_data
+= offset
;
8174 /* do IP hdr chksum in software */
8175 sw_csum
= CSUM_DELAY_IP
;
8177 /* let the software handle any UDP or TCP checksums */
8178 sw_csum
|= (CSUM_DELAY_DATA
& m0
->m_pkthdr
.csum_flags
);
8180 } else if (apple_hwcksum_tx
== 0) {
8181 sw_csum
|= (CSUM_DELAY_DATA
| CSUM_DELAY_IP
) &
8182 m0
->m_pkthdr
.csum_flags
;
8185 if (sw_csum
& CSUM_DELAY_DATA
) {
8186 in_delayed_cksum(m0
);
8187 sw_csum
&= ~CSUM_DELAY_DATA
;
8188 m0
->m_pkthdr
.csum_flags
&= ~CSUM_DELAY_DATA
;
8191 if (apple_hwcksum_tx
!= 0) {
8192 m0
->m_pkthdr
.csum_flags
&=
8193 IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
8195 m0
->m_pkthdr
.csum_flags
= 0;
8198 if (ntohs(ip
->ip_len
) <= ifp
->if_mtu
||
8199 (ifp
->if_hwassist
& CSUM_FRAGMENT
)) {
8201 if (sw_csum
& CSUM_DELAY_IP
)
8202 ip
->ip_sum
= in_cksum(m0
, ip
->ip_hl
<< 2);
8203 error
= ifnet_output(ifp
, PF_INET
, m0
, ro
, sintosa(dst
));
8208 * Too large for interface; fragment if possible.
8209 * Must be able to put at least 8 bytes per fragment.
8211 if (ip
->ip_off
& htons(IP_DF
)) {
8212 ipstat
.ips_cantfrag
++;
8213 if (r
->rt
!= PF_DUPTO
) {
8214 icmp_error(m0
, ICMP_UNREACH
, ICMP_UNREACH_NEEDFRAG
, 0,
8222 error
= ip_fragment(m0
, ifp
, ifp
->if_mtu
, sw_csum
);
8228 for (m0
= m1
; m0
; m0
= m1
) {
8232 error
= ifnet_output(ifp
, PF_INET
, m0
, ro
,
8239 ipstat
.ips_fragmented
++;
8242 if (r
->rt
!= PF_DUPTO
)
8244 if (ro
== &iproute
&& ro
->ro_rt
)
8256 pf_route6(struct mbuf
**m
, struct pf_rule
*r
, int dir
, struct ifnet
*oifp
,
8257 struct pf_state
*s
, struct pf_pdesc
*pd
)
8261 struct route_in6 ip6route
;
8262 struct route_in6
*ro
;
8263 struct sockaddr_in6
*dst
;
8264 struct ip6_hdr
*ip6
;
8265 struct ifnet
*ifp
= NULL
;
8266 struct pf_addr naddr
;
8267 struct pf_src_node
*sn
= NULL
;
8270 if (m
== NULL
|| *m
== NULL
|| r
== NULL
||
8271 (dir
!= PF_IN
&& dir
!= PF_OUT
) || oifp
== NULL
)
8272 panic("pf_route6: invalid parameters");
8274 if (pd
->pf_mtag
->routed
++ > 3) {
8280 if (r
->rt
== PF_DUPTO
) {
8281 if ((m0
= m_copym(*m
, 0, M_COPYALL
, M_NOWAIT
)) == NULL
)
8284 if ((r
->rt
== PF_REPLYTO
) == (r
->direction
== dir
))
8289 if (m0
->m_len
< (int)sizeof (struct ip6_hdr
)) {
8290 DPFPRINTF(PF_DEBUG_URGENT
,
8291 ("pf_route6: m0->m_len < sizeof (struct ip6_hdr)\n"));
8294 ip6
= mtod(m0
, struct ip6_hdr
*);
8297 bzero((caddr_t
)ro
, sizeof (*ro
));
8298 dst
= (struct sockaddr_in6
*)&ro
->ro_dst
;
8299 dst
->sin6_family
= AF_INET6
;
8300 dst
->sin6_len
= sizeof (*dst
);
8301 dst
->sin6_addr
= ip6
->ip6_dst
;
8303 /* Cheat. XXX why only in the v6 case??? */
8304 if (r
->rt
== PF_FASTROUTE
) {
8305 struct pf_mtag
*pf_mtag
;
8307 if ((pf_mtag
= pf_get_mtag(m0
)) == NULL
)
8309 pf_mtag
->flags
|= PF_TAG_GENERATED
;
8310 ip6_output(m0
, NULL
, NULL
, 0, NULL
, NULL
, 0);
8314 if (TAILQ_EMPTY(&r
->rpool
.list
)) {
8315 DPFPRINTF(PF_DEBUG_URGENT
,
8316 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n"));
8320 pf_map_addr(AF_INET6
, r
, (struct pf_addr
*)&ip6
->ip6_src
,
8322 if (!PF_AZERO(&naddr
, AF_INET6
))
8323 PF_ACPY((struct pf_addr
*)&dst
->sin6_addr
,
8325 ifp
= r
->rpool
.cur
->kif
? r
->rpool
.cur
->kif
->pfik_ifp
: NULL
;
8327 if (!PF_AZERO(&s
->rt_addr
, AF_INET6
))
8328 PF_ACPY((struct pf_addr
*)&dst
->sin6_addr
,
8329 &s
->rt_addr
, AF_INET6
);
8330 ifp
= s
->rt_kif
? s
->rt_kif
->pfik_ifp
: NULL
;
8336 if (pf_test6(PF_OUT
, ifp
, &m0
, NULL
) != PF_PASS
)
8338 else if (m0
== NULL
)
8340 if (m0
->m_len
< (int)sizeof (struct ip6_hdr
)) {
8341 DPFPRINTF(PF_DEBUG_URGENT
, ("pf_route6: m0->m_len "
8342 "< sizeof (struct ip6_hdr)\n"));
8345 ip6
= mtod(m0
, struct ip6_hdr
*);
8349 * If the packet is too large for the outgoing interface,
8350 * send back an icmp6 error.
8352 if (IN6_IS_SCOPE_EMBED(&dst
->sin6_addr
))
8353 dst
->sin6_addr
.s6_addr16
[1] = htons(ifp
->if_index
);
8354 if ((unsigned)m0
->m_pkthdr
.len
<= ifp
->if_mtu
) {
8355 error
= nd6_output(ifp
, ifp
, m0
, dst
, NULL
, 0);
8357 in6_ifstat_inc(ifp
, ifs6_in_toobig
);
8358 if (r
->rt
!= PF_DUPTO
)
8359 icmp6_error(m0
, ICMP6_PACKET_TOO_BIG
, 0, ifp
->if_mtu
);
8365 if (r
->rt
!= PF_DUPTO
)
8377 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
8378 * off is the offset where the protocol header starts
8379 * len is the total length of protocol header plus payload
8380 * returns 0 when the checksum is valid, otherwise returns 1.
8383 pf_check_proto_cksum(struct mbuf
*m
, int off
, int len
, u_int8_t p
,
8392 * Optimize for the common case; if the hardware calculated
8393 * value doesn't include pseudo-header checksum, or if it
8394 * is partially-computed (only 16-bit summation), do it in
8397 if (apple_hwcksum_rx
&& (m
->m_pkthdr
.csum_flags
&
8398 (CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
)) &&
8399 (m
->m_pkthdr
.csum_data
^ 0xffff) == 0) {
8405 case IPPROTO_ICMPV6
:
8411 if (off
< (int)sizeof (struct ip
) || len
< (int)sizeof (struct udphdr
))
8413 if (m
->m_pkthdr
.len
< off
+ len
)
8418 if (p
== IPPROTO_ICMP
) {
8423 sum
= in_cksum(m
, len
);
8427 if (m
->m_len
< (int)sizeof (struct ip
))
8429 sum
= inet_cksum(m
, p
, off
, len
);
8435 if (m
->m_len
< (int)sizeof (struct ip6_hdr
))
8437 sum
= inet6_cksum(m
, p
, off
, len
);
8446 tcpstat
.tcps_rcvbadsum
++;
8449 udpstat
.udps_badsum
++;
8452 icmpstat
.icps_checksum
++;
8455 case IPPROTO_ICMPV6
:
8456 icmp6stat
.icp6s_checksum
++;
8466 #ifndef NO_APPLE_EXTENSIONS
8467 #define PF_APPLE_UPDATE_PDESC_IPv4() \
8469 if (m && pd.mp && m != pd.mp) { \
8471 h = mtod(m, struct ip *); \
8477 pf_test(int dir
, struct ifnet
*ifp
, struct mbuf
**m0
,
8478 struct ether_header
*eh
)
8480 struct pfi_kif
*kif
;
8481 u_short action
, reason
= 0, log
= 0;
8482 struct mbuf
*m
= *m0
;
8484 struct pf_rule
*a
= NULL
, *r
= &pf_default_rule
, *tr
, *nr
;
8485 struct pf_state
*s
= NULL
;
8486 struct pf_state_key
*sk
= NULL
;
8487 struct pf_ruleset
*ruleset
= NULL
;
8489 int off
, dirndx
, pqid
= 0;
8491 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
8493 if (!pf_status
.running
)
8496 memset(&pd
, 0, sizeof (pd
));
8498 if ((pd
.pf_mtag
= pf_get_mtag(m
)) == NULL
) {
8499 DPFPRINTF(PF_DEBUG_URGENT
,
8500 ("pf_test: pf_get_mtag returned NULL\n"));
8504 if (pd
.pf_mtag
->flags
& PF_TAG_GENERATED
)
8507 kif
= (struct pfi_kif
*)ifp
->if_pf_kif
;
8510 DPFPRINTF(PF_DEBUG_URGENT
,
8511 ("pf_test: kif == NULL, if_name %s\n", ifp
->if_name
));
8514 if (kif
->pfik_flags
& PFI_IFLAG_SKIP
)
8518 if ((m
->m_flags
& M_PKTHDR
) == 0)
8519 panic("non-M_PKTHDR is passed to pf_test");
8520 #endif /* DIAGNOSTIC */
8522 if (m
->m_pkthdr
.len
< (int)sizeof (*h
)) {
8524 REASON_SET(&reason
, PFRES_SHORT
);
8529 /* We do IP header normalization and packet reassembly here */
8530 if (pf_normalize_ip(m0
, dir
, kif
, &reason
, &pd
) != PF_PASS
) {
8534 m
= *m0
; /* pf_normalize messes with m0 */
8535 h
= mtod(m
, struct ip
*);
8537 off
= h
->ip_hl
<< 2;
8538 if (off
< (int)sizeof (*h
)) {
8540 REASON_SET(&reason
, PFRES_SHORT
);
8545 pd
.src
= (struct pf_addr
*)&h
->ip_src
;
8546 pd
.dst
= (struct pf_addr
*)&h
->ip_dst
;
8547 PF_ACPY(&pd
.baddr
, dir
== PF_OUT
? pd
.src
: pd
.dst
, AF_INET
);
8548 pd
.ip_sum
= &h
->ip_sum
;
8550 #ifndef NO_APPLE_EXTENSIONS
8551 pd
.proto_variant
= 0;
8557 pd
.tot_len
= ntohs(h
->ip_len
);
8560 /* handle fragments that didn't get reassembled by normalization */
8561 if (h
->ip_off
& htons(IP_MF
| IP_OFFMASK
)) {
8562 action
= pf_test_fragment(&r
, dir
, kif
, m
, h
,
8572 if (!pf_pull_hdr(m
, off
, &th
, sizeof (th
),
8573 &action
, &reason
, AF_INET
)) {
8574 log
= action
!= PF_PASS
;
8577 pd
.p_len
= pd
.tot_len
- off
- (th
.th_off
<< 2);
8578 if ((th
.th_flags
& TH_ACK
) && pd
.p_len
== 0)
8580 action
= pf_normalize_tcp(dir
, kif
, m
, 0, off
, h
, &pd
);
8581 #ifndef NO_APPLE_EXTENSIONS
8584 PF_APPLE_UPDATE_PDESC_IPv4();
8586 if (action
== PF_DROP
)
8588 action
= pf_test_state_tcp(&s
, dir
, kif
, m
, off
, h
, &pd
,
8590 #ifndef NO_APPLE_EXTENSIONS
8593 PF_APPLE_UPDATE_PDESC_IPv4();
8595 if (action
== PF_PASS
) {
8597 pfsync_update_state(s
);
8598 #endif /* NPFSYNC */
8602 } else if (s
== NULL
)
8603 action
= pf_test_rule(&r
, &s
, dir
, kif
,
8604 m
, off
, h
, &pd
, &a
, &ruleset
, &ipintrq
);
8612 if (!pf_pull_hdr(m
, off
, &uh
, sizeof (uh
),
8613 &action
, &reason
, AF_INET
)) {
8614 log
= action
!= PF_PASS
;
8617 if (uh
.uh_dport
== 0 ||
8618 ntohs(uh
.uh_ulen
) > m
->m_pkthdr
.len
- off
||
8619 ntohs(uh
.uh_ulen
) < sizeof (struct udphdr
)) {
8621 REASON_SET(&reason
, PFRES_SHORT
);
8624 action
= pf_test_state_udp(&s
, dir
, kif
, m
, off
, h
, &pd
,
8626 #ifndef NO_APPLE_EXTENSIONS
8629 PF_APPLE_UPDATE_PDESC_IPv4();
8631 if (action
== PF_PASS
) {
8633 pfsync_update_state(s
);
8634 #endif /* NPFSYNC */
8638 } else if (s
== NULL
)
8639 action
= pf_test_rule(&r
, &s
, dir
, kif
,
8640 m
, off
, h
, &pd
, &a
, &ruleset
, &ipintrq
);
8644 case IPPROTO_ICMP
: {
8648 if (!pf_pull_hdr(m
, off
, &ih
, ICMP_MINLEN
,
8649 &action
, &reason
, AF_INET
)) {
8650 log
= action
!= PF_PASS
;
8653 action
= pf_test_state_icmp(&s
, dir
, kif
, m
, off
, h
, &pd
,
8655 #ifndef NO_APPLE_EXTENSIONS
8658 PF_APPLE_UPDATE_PDESC_IPv4();
8660 if (action
== PF_PASS
) {
8662 pfsync_update_state(s
);
8663 #endif /* NPFSYNC */
8667 } else if (s
== NULL
)
8668 action
= pf_test_rule(&r
, &s
, dir
, kif
,
8669 m
, off
, h
, &pd
, &a
, &ruleset
, &ipintrq
);
8673 #ifndef NO_APPLE_EXTENSIONS
8675 struct pf_esp_hdr esp
;
8678 if (!pf_pull_hdr(m
, off
, &esp
, sizeof (esp
), &action
, &reason
,
8680 log
= action
!= PF_PASS
;
8683 action
= pf_test_state_esp(&s
, dir
, kif
, off
, &pd
);
8686 PF_APPLE_UPDATE_PDESC_IPv4();
8687 if (action
== PF_PASS
) {
8689 pfsync_update_state(s
);
8690 #endif /* NPFSYNC */
8694 } else if (s
== NULL
)
8695 action
= pf_test_rule(&r
, &s
, dir
, kif
,
8696 m
, off
, h
, &pd
, &a
, &ruleset
, &ipintrq
);
8701 struct pf_grev1_hdr grev1
;
8702 pd
.hdr
.grev1
= &grev1
;
8703 if (!pf_pull_hdr(m
, off
, &grev1
, sizeof (grev1
), &action
,
8704 &reason
, AF_INET
)) {
8705 log
= (action
!= PF_PASS
);
8708 if ((ntohs(grev1
.flags
) & PF_GRE_FLAG_VERSION_MASK
) == 1 &&
8709 ntohs(grev1
.protocol_type
) == PF_GRE_PPP_ETHERTYPE
) {
8710 if (ntohs(grev1
.payload_length
) >
8711 m
->m_pkthdr
.len
- off
) {
8713 REASON_SET(&reason
, PFRES_SHORT
);
8716 pd
.proto_variant
= PF_GRE_PPTP_VARIANT
;
8717 action
= pf_test_state_grev1(&s
, dir
, kif
, off
, &pd
);
8718 if (pd
.lmw
< 0) goto done
;
8719 PF_APPLE_UPDATE_PDESC_IPv4();
8720 if (action
== PF_PASS
) {
8722 pfsync_update_state(s
);
8723 #endif /* NPFSYNC */
8728 } else if (s
== NULL
) {
8729 action
= pf_test_rule(&r
, &s
, dir
, kif
, m
, off
,
8730 h
, &pd
, &a
, &ruleset
, &ipintrq
);
8731 if (action
== PF_PASS
)
8736 /* not GREv1/PPTP, so treat as ordinary GRE... */
8741 action
= pf_test_state_other(&s
, dir
, kif
, &pd
);
8742 #ifndef NO_APPLE_EXTENSIONS
8745 PF_APPLE_UPDATE_PDESC_IPv4();
8747 if (action
== PF_PASS
) {
8749 pfsync_update_state(s
);
8750 #endif /* NPFSYNC */
8754 } else if (s
== NULL
)
8755 action
= pf_test_rule(&r
, &s
, dir
, kif
, m
, off
, h
,
8756 &pd
, &a
, &ruleset
, &ipintrq
);
8761 #ifndef NO_APPLE_EXTENSIONS
8763 PF_APPLE_UPDATE_PDESC_IPv4();
8766 if (action
== PF_PASS
&& h
->ip_hl
> 5 &&
8767 !((s
&& s
->allow_opts
) || r
->allow_opts
)) {
8769 REASON_SET(&reason
, PFRES_IPOPTIONS
);
8771 DPFPRINTF(PF_DEBUG_MISC
,
8772 ("pf: dropping packet with ip options [hlen=%u]\n",
8773 (unsigned int) h
->ip_hl
));
8776 if ((s
&& s
->tag
) || PF_RTABLEID_IS_VALID(r
->rtableid
))
8777 (void) pf_tag_packet(m
, pd
.pf_mtag
, s
? s
->tag
: 0,
8781 if (action
== PF_PASS
&& r
->qid
) {
8782 if (pqid
|| (pd
.tos
& IPTOS_LOWDELAY
))
8783 pd
.pf_mtag
->qid
= r
->pqid
;
8785 pd
.pf_mtag
->qid
= r
->qid
;
8786 /* add hints for ecn */
8787 pd
.pf_mtag
->hdr
= h
;
8792 * connections redirected to loopback should not match sockets
8793 * bound specifically to loopback due to security implications,
8794 * see tcp_input() and in_pcblookup_listen().
8796 if (dir
== PF_IN
&& action
== PF_PASS
&& (pd
.proto
== IPPROTO_TCP
||
8797 pd
.proto
== IPPROTO_UDP
) && s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
8798 (s
->nat_rule
.ptr
->action
== PF_RDR
||
8799 s
->nat_rule
.ptr
->action
== PF_BINAT
) &&
8800 (ntohl(pd
.dst
->v4
.s_addr
) >> IN_CLASSA_NSHIFT
) == IN_LOOPBACKNET
)
8801 pd
.pf_mtag
->flags
|= PF_TAG_TRANSLATE_LOCALHOST
;
8806 if (s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
8807 s
->nat_rule
.ptr
->log
& PF_LOG_ALL
)
8808 lr
= s
->nat_rule
.ptr
;
8811 PFLOG_PACKET(kif
, h
, m
, AF_INET
, dir
, reason
, lr
, a
, ruleset
,
8815 kif
->pfik_bytes
[0][dir
== PF_OUT
][action
!= PF_PASS
] += pd
.tot_len
;
8816 kif
->pfik_packets
[0][dir
== PF_OUT
][action
!= PF_PASS
]++;
8818 if (action
== PF_PASS
|| r
->action
== PF_DROP
) {
8819 dirndx
= (dir
== PF_OUT
);
8820 r
->packets
[dirndx
]++;
8821 r
->bytes
[dirndx
] += pd
.tot_len
;
8823 a
->packets
[dirndx
]++;
8824 a
->bytes
[dirndx
] += pd
.tot_len
;
8828 if (s
->nat_rule
.ptr
!= NULL
) {
8829 s
->nat_rule
.ptr
->packets
[dirndx
]++;
8830 s
->nat_rule
.ptr
->bytes
[dirndx
] += pd
.tot_len
;
8832 if (s
->src_node
!= NULL
) {
8833 s
->src_node
->packets
[dirndx
]++;
8834 s
->src_node
->bytes
[dirndx
] += pd
.tot_len
;
8836 if (s
->nat_src_node
!= NULL
) {
8837 s
->nat_src_node
->packets
[dirndx
]++;
8838 s
->nat_src_node
->bytes
[dirndx
] += pd
.tot_len
;
8840 dirndx
= (dir
== sk
->direction
) ? 0 : 1;
8841 s
->packets
[dirndx
]++;
8842 s
->bytes
[dirndx
] += pd
.tot_len
;
8845 nr
= (s
!= NULL
) ? s
->nat_rule
.ptr
: pd
.nat_rule
;
8849 * XXX: we need to make sure that the addresses
8850 * passed to pfr_update_stats() are the same than
8851 * the addresses used during matching (pfr_match)
8853 if (r
== &pf_default_rule
) {
8855 x
= (sk
== NULL
|| sk
->direction
== dir
) ?
8856 &pd
.baddr
: &pd
.naddr
;
8858 x
= (sk
== NULL
|| sk
->direction
== dir
) ?
8859 &pd
.naddr
: &pd
.baddr
;
8860 if (x
== &pd
.baddr
|| s
== NULL
) {
8861 /* we need to change the address */
8868 if (tr
->src
.addr
.type
== PF_ADDR_TABLE
)
8869 pfr_update_stats(tr
->src
.addr
.p
.tbl
, (sk
== NULL
||
8870 sk
->direction
== dir
) ?
8871 pd
.src
: pd
.dst
, pd
.af
,
8872 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
8874 if (tr
->dst
.addr
.type
== PF_ADDR_TABLE
)
8875 pfr_update_stats(tr
->dst
.addr
.p
.tbl
, (sk
== NULL
||
8876 sk
->direction
== dir
) ? pd
.dst
: pd
.src
, pd
.af
,
8877 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
8881 #ifndef NO_APPLE_EXTENSIONS
8882 VERIFY(m
== NULL
|| pd
.mp
== NULL
|| pd
.mp
== m
);
8886 REASON_SET(&reason
, PFRES_MEMORY
);
8890 if (action
== PF_DROP
) {
8900 if (action
== PF_SYNPROXY_DROP
) {
8905 /* pf_route can free the mbuf causing *m0 to become NULL */
8906 pf_route(m0
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
8913 #ifndef NO_APPLE_EXTENSIONS
8914 #define PF_APPLE_UPDATE_PDESC_IPv6() \
8916 if (m && pd.mp && m != pd.mp) { \
8920 h = mtod(m, struct ip6_hdr *); \
8926 pf_test6(int dir
, struct ifnet
*ifp
, struct mbuf
**m0
,
8927 struct ether_header
*eh
)
8929 struct pfi_kif
*kif
;
8930 u_short action
, reason
= 0, log
= 0;
8931 struct mbuf
*m
= *m0
, *n
= NULL
;
8933 struct pf_rule
*a
= NULL
, *r
= &pf_default_rule
, *tr
, *nr
;
8934 struct pf_state
*s
= NULL
;
8935 struct pf_state_key
*sk
= NULL
;
8936 struct pf_ruleset
*ruleset
= NULL
;
8938 int off
, terminal
= 0, dirndx
, rh_cnt
= 0;
8940 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
8942 if (!pf_status
.running
)
8945 memset(&pd
, 0, sizeof (pd
));
8947 if ((pd
.pf_mtag
= pf_get_mtag(m
)) == NULL
) {
8948 DPFPRINTF(PF_DEBUG_URGENT
,
8949 ("pf_test6: pf_get_mtag returned NULL\n"));
8953 if (pd
.pf_mtag
->flags
& PF_TAG_GENERATED
)
8956 kif
= (struct pfi_kif
*)ifp
->if_pf_kif
;
8959 DPFPRINTF(PF_DEBUG_URGENT
,
8960 ("pf_test6: kif == NULL, if_name %s\n", ifp
->if_name
));
8963 if (kif
->pfik_flags
& PFI_IFLAG_SKIP
)
8967 if ((m
->m_flags
& M_PKTHDR
) == 0)
8968 panic("non-M_PKTHDR is passed to pf_test6");
8969 #endif /* DIAGNOSTIC */
8971 h
= mtod(m
, struct ip6_hdr
*);
8973 if (m
->m_pkthdr
.len
< (int)sizeof (*h
)) {
8975 REASON_SET(&reason
, PFRES_SHORT
);
8980 /* We do IP header normalization and packet reassembly here */
8981 if (pf_normalize_ip6(m0
, dir
, kif
, &reason
, &pd
) != PF_PASS
) {
8985 m
= *m0
; /* pf_normalize messes with m0 */
8986 h
= mtod(m
, struct ip6_hdr
*);
8990 * we do not support jumbogram yet. if we keep going, zero ip6_plen
8991 * will do something bad, so drop the packet for now.
8993 if (htons(h
->ip6_plen
) == 0) {
8995 REASON_SET(&reason
, PFRES_NORM
); /*XXX*/
9000 pd
.src
= (struct pf_addr
*)&h
->ip6_src
;
9001 pd
.dst
= (struct pf_addr
*)&h
->ip6_dst
;
9002 PF_ACPY(&pd
.baddr
, dir
== PF_OUT
? pd
.src
: pd
.dst
, AF_INET6
);
9006 pd
.tot_len
= ntohs(h
->ip6_plen
) + sizeof (struct ip6_hdr
);
9009 off
= ((caddr_t
)h
- m
->m_data
) + sizeof (struct ip6_hdr
);
9010 pd
.proto
= h
->ip6_nxt
;
9011 #ifndef NO_APPLE_EXTENSIONS
9012 pd
.proto_variant
= 0;
9018 case IPPROTO_FRAGMENT
:
9019 action
= pf_test_fragment(&r
, dir
, kif
, m
, h
,
9021 if (action
== PF_DROP
)
9022 REASON_SET(&reason
, PFRES_FRAG
);
9024 case IPPROTO_ROUTING
: {
9025 struct ip6_rthdr rthdr
;
9028 DPFPRINTF(PF_DEBUG_MISC
,
9029 ("pf: IPv6 more than one rthdr\n"));
9031 REASON_SET(&reason
, PFRES_IPOPTIONS
);
9035 if (!pf_pull_hdr(m
, off
, &rthdr
, sizeof (rthdr
), NULL
,
9037 DPFPRINTF(PF_DEBUG_MISC
,
9038 ("pf: IPv6 short rthdr\n"));
9040 REASON_SET(&reason
, PFRES_SHORT
);
9044 if (rthdr
.ip6r_type
== IPV6_RTHDR_TYPE_0
) {
9045 DPFPRINTF(PF_DEBUG_MISC
,
9046 ("pf: IPv6 rthdr0\n"));
9048 REASON_SET(&reason
, PFRES_IPOPTIONS
);
9055 case IPPROTO_HOPOPTS
:
9056 case IPPROTO_DSTOPTS
: {
9057 /* get next header and header length */
9058 struct ip6_ext opt6
;
9060 if (!pf_pull_hdr(m
, off
, &opt6
, sizeof (opt6
),
9061 NULL
, &reason
, pd
.af
)) {
9062 DPFPRINTF(PF_DEBUG_MISC
,
9063 ("pf: IPv6 short opt\n"));
9068 if (pd
.proto
== IPPROTO_AH
)
9069 off
+= (opt6
.ip6e_len
+ 2) * 4;
9071 off
+= (opt6
.ip6e_len
+ 1) * 8;
9072 pd
.proto
= opt6
.ip6e_nxt
;
9073 /* goto the next header */
9080 } while (!terminal
);
9082 /* if there's no routing header, use unmodified mbuf for checksumming */
9092 if (!pf_pull_hdr(m
, off
, &th
, sizeof (th
),
9093 &action
, &reason
, AF_INET6
)) {
9094 log
= action
!= PF_PASS
;
9097 pd
.p_len
= pd
.tot_len
- off
- (th
.th_off
<< 2);
9098 action
= pf_normalize_tcp(dir
, kif
, m
, 0, off
, h
, &pd
);
9099 #ifndef NO_APPLE_EXTENSIONS
9102 PF_APPLE_UPDATE_PDESC_IPv6();
9104 if (action
== PF_DROP
)
9106 action
= pf_test_state_tcp(&s
, dir
, kif
, m
, off
, h
, &pd
,
9108 #ifndef NO_APPLE_EXTENSIONS
9111 PF_APPLE_UPDATE_PDESC_IPv6();
9113 if (action
== PF_PASS
) {
9115 pfsync_update_state(s
);
9116 #endif /* NPFSYNC */
9120 } else if (s
== NULL
)
9121 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9122 m
, off
, h
, &pd
, &a
, &ruleset
, &ip6intrq
);
9130 if (!pf_pull_hdr(m
, off
, &uh
, sizeof (uh
),
9131 &action
, &reason
, AF_INET6
)) {
9132 log
= action
!= PF_PASS
;
9135 if (uh
.uh_dport
== 0 ||
9136 ntohs(uh
.uh_ulen
) > m
->m_pkthdr
.len
- off
||
9137 ntohs(uh
.uh_ulen
) < sizeof (struct udphdr
)) {
9139 REASON_SET(&reason
, PFRES_SHORT
);
9142 action
= pf_test_state_udp(&s
, dir
, kif
, m
, off
, h
, &pd
,
9144 #ifndef NO_APPLE_EXTENSIONS
9147 PF_APPLE_UPDATE_PDESC_IPv6();
9149 if (action
== PF_PASS
) {
9151 pfsync_update_state(s
);
9152 #endif /* NPFSYNC */
9156 } else if (s
== NULL
)
9157 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9158 m
, off
, h
, &pd
, &a
, &ruleset
, &ip6intrq
);
9162 case IPPROTO_ICMPV6
: {
9163 struct icmp6_hdr ih
;
9166 if (!pf_pull_hdr(m
, off
, &ih
, sizeof (ih
),
9167 &action
, &reason
, AF_INET6
)) {
9168 log
= action
!= PF_PASS
;
9171 action
= pf_test_state_icmp(&s
, dir
, kif
,
9172 m
, off
, h
, &pd
, &reason
);
9173 #ifndef NO_APPLE_EXTENSIONS
9176 PF_APPLE_UPDATE_PDESC_IPv6();
9178 if (action
== PF_PASS
) {
9180 pfsync_update_state(s
);
9181 #endif /* NPFSYNC */
9185 } else if (s
== NULL
)
9186 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9187 m
, off
, h
, &pd
, &a
, &ruleset
, &ip6intrq
);
9191 #ifndef NO_APPLE_EXTENSIONS
9193 struct pf_esp_hdr esp
;
9196 if (!pf_pull_hdr(m
, off
, &esp
, sizeof (esp
), &action
, &reason
,
9198 log
= action
!= PF_PASS
;
9201 action
= pf_test_state_esp(&s
, dir
, kif
, off
, &pd
);
9204 PF_APPLE_UPDATE_PDESC_IPv6();
9205 if (action
== PF_PASS
) {
9207 pfsync_update_state(s
);
9208 #endif /* NPFSYNC */
9212 } else if (s
== NULL
)
9213 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9214 m
, off
, h
, &pd
, &a
, &ruleset
, &ip6intrq
);
9219 struct pf_grev1_hdr grev1
;
9221 pd
.hdr
.grev1
= &grev1
;
9222 if (!pf_pull_hdr(m
, off
, &grev1
, sizeof (grev1
), &action
,
9223 &reason
, AF_INET6
)) {
9224 log
= (action
!= PF_PASS
);
9227 if ((ntohs(grev1
.flags
) & PF_GRE_FLAG_VERSION_MASK
) == 1 &&
9228 ntohs(grev1
.protocol_type
) == PF_GRE_PPP_ETHERTYPE
) {
9229 if (ntohs(grev1
.payload_length
) >
9230 m
->m_pkthdr
.len
- off
) {
9232 REASON_SET(&reason
, PFRES_SHORT
);
9235 action
= pf_test_state_grev1(&s
, dir
, kif
, off
, &pd
);
9238 PF_APPLE_UPDATE_PDESC_IPv6();
9239 if (action
== PF_PASS
) {
9241 pfsync_update_state(s
);
9242 #endif /* NPFSYNC */
9247 } else if (s
== NULL
) {
9248 action
= pf_test_rule(&r
, &s
, dir
, kif
, m
, off
,
9249 h
, &pd
, &a
, &ruleset
, &ip6intrq
);
9250 if (action
== PF_PASS
)
9255 /* not GREv1/PPTP, so treat as ordinary GRE... */
9260 action
= pf_test_state_other(&s
, dir
, kif
, &pd
);
9261 #ifndef NO_APPLE_EXTENSIONS
9264 PF_APPLE_UPDATE_PDESC_IPv6();
9266 if (action
== PF_PASS
) {
9268 pfsync_update_state(s
);
9269 #endif /* NPFSYNC */
9273 } else if (s
== NULL
)
9274 action
= pf_test_rule(&r
, &s
, dir
, kif
, m
, off
, h
,
9275 &pd
, &a
, &ruleset
, &ip6intrq
);
9280 #ifndef NO_APPLE_EXTENSIONS
9282 PF_APPLE_UPDATE_PDESC_IPv6();
9290 /* handle dangerous IPv6 extension headers. */
9291 if (action
== PF_PASS
&& rh_cnt
&&
9292 !((s
&& s
->allow_opts
) || r
->allow_opts
)) {
9294 REASON_SET(&reason
, PFRES_IPOPTIONS
);
9296 DPFPRINTF(PF_DEBUG_MISC
,
9297 ("pf: dropping packet with dangerous v6 headers\n"));
9300 if ((s
&& s
->tag
) || PF_RTABLEID_IS_VALID(r
->rtableid
))
9301 (void) pf_tag_packet(m
, pd
.pf_mtag
, s
? s
->tag
: 0,
9305 if (action
== PF_PASS
&& r
->qid
) {
9306 if (pd
.tos
& IPTOS_LOWDELAY
)
9307 pd
.pf_mtag
->qid
= r
->pqid
;
9309 pd
.pf_mtag
->qid
= r
->qid
;
9310 /* add hints for ecn */
9311 pd
.pf_mtag
->hdr
= h
;
9315 if (dir
== PF_IN
&& action
== PF_PASS
&& (pd
.proto
== IPPROTO_TCP
||
9316 pd
.proto
== IPPROTO_UDP
) && s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
9317 (s
->nat_rule
.ptr
->action
== PF_RDR
||
9318 s
->nat_rule
.ptr
->action
== PF_BINAT
) &&
9319 IN6_IS_ADDR_LOOPBACK(&pd
.dst
->v6
))
9320 pd
.pf_mtag
->flags
|= PF_TAG_TRANSLATE_LOCALHOST
;
9325 if (s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
9326 s
->nat_rule
.ptr
->log
& PF_LOG_ALL
)
9327 lr
= s
->nat_rule
.ptr
;
9330 PFLOG_PACKET(kif
, h
, m
, AF_INET6
, dir
, reason
, lr
, a
, ruleset
,
9334 kif
->pfik_bytes
[1][dir
== PF_OUT
][action
!= PF_PASS
] += pd
.tot_len
;
9335 kif
->pfik_packets
[1][dir
== PF_OUT
][action
!= PF_PASS
]++;
9337 if (action
== PF_PASS
|| r
->action
== PF_DROP
) {
9338 dirndx
= (dir
== PF_OUT
);
9339 r
->packets
[dirndx
]++;
9340 r
->bytes
[dirndx
] += pd
.tot_len
;
9342 a
->packets
[dirndx
]++;
9343 a
->bytes
[dirndx
] += pd
.tot_len
;
9347 if (s
->nat_rule
.ptr
!= NULL
) {
9348 s
->nat_rule
.ptr
->packets
[dirndx
]++;
9349 s
->nat_rule
.ptr
->bytes
[dirndx
] += pd
.tot_len
;
9351 if (s
->src_node
!= NULL
) {
9352 s
->src_node
->packets
[dirndx
]++;
9353 s
->src_node
->bytes
[dirndx
] += pd
.tot_len
;
9355 if (s
->nat_src_node
!= NULL
) {
9356 s
->nat_src_node
->packets
[dirndx
]++;
9357 s
->nat_src_node
->bytes
[dirndx
] += pd
.tot_len
;
9359 dirndx
= (dir
== sk
->direction
) ? 0 : 1;
9360 s
->packets
[dirndx
]++;
9361 s
->bytes
[dirndx
] += pd
.tot_len
;
9364 nr
= (s
!= NULL
) ? s
->nat_rule
.ptr
: pd
.nat_rule
;
9368 * XXX: we need to make sure that the addresses
9369 * passed to pfr_update_stats() are the same than
9370 * the addresses used during matching (pfr_match)
9372 if (r
== &pf_default_rule
) {
9374 x
= (s
== NULL
|| sk
->direction
== dir
) ?
9375 &pd
.baddr
: &pd
.naddr
;
9377 x
= (s
== NULL
|| sk
->direction
== dir
) ?
9378 &pd
.naddr
: &pd
.baddr
;
9380 if (x
== &pd
.baddr
|| s
== NULL
) {
9387 if (tr
->src
.addr
.type
== PF_ADDR_TABLE
)
9388 pfr_update_stats(tr
->src
.addr
.p
.tbl
, (sk
== NULL
||
9389 sk
->direction
== dir
) ? pd
.src
: pd
.dst
, pd
.af
,
9390 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
9392 if (tr
->dst
.addr
.type
== PF_ADDR_TABLE
)
9393 pfr_update_stats(tr
->dst
.addr
.p
.tbl
, (sk
== NULL
||
9394 sk
->direction
== dir
) ? pd
.dst
: pd
.src
, pd
.af
,
9395 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
9400 if (action
== PF_SYNPROXY_DROP
) {
9405 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9406 pf_route6(m0
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
9408 #ifndef NO_APPLE_EXTENSIONS
9409 VERIFY(m
== NULL
|| pd
.mp
== NULL
|| pd
.mp
== m
);
9413 REASON_SET(&reason
, PFRES_MEMORY
);
9417 if (action
== PF_DROP
) {
9426 if (action
== PF_SYNPROXY_DROP
) {
9431 if (action
== PF_PASS
) {
9433 h
= mtod(m
, struct ip6_hdr
*);
9436 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9437 pf_route6(m0
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
9440 if (action
!= PF_SYNPROXY_DROP
&& r
->rt
)
9441 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9442 pf_route6(m0
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
9444 if (action
== PF_PASS
) {
9446 h
= mtod(m
, struct ip6_hdr
*);
9449 if (action
== PF_SYNPROXY_DROP
) {
9462 pf_check_congestion(struct ifqueue
*ifq
)
9469 pool_init(struct pool
*pp
, size_t size
, unsigned int align
, unsigned int ioff
,
9470 int flags
, const char *wchan
, void *palloc
)
9472 #pragma unused(align, ioff, flags, palloc)
9473 bzero(pp
, sizeof (*pp
));
9474 pp
->pool_zone
= zinit(size
, 1024 * size
, PAGE_SIZE
, wchan
);
9475 if (pp
->pool_zone
!= NULL
) {
9476 zone_change(pp
->pool_zone
, Z_EXPAND
, TRUE
);
9477 pp
->pool_hiwat
= pp
->pool_limit
= (unsigned int)-1;
9478 pp
->pool_name
= wchan
;
9482 /* Zones cannot be currently destroyed */
9484 pool_destroy(struct pool
*pp
)
9490 pool_sethiwat(struct pool
*pp
, int n
)
9492 pp
->pool_hiwat
= n
; /* Currently unused */
9496 pool_sethardlimit(struct pool
*pp
, int n
, const char *warnmess
, int ratecap
)
9498 #pragma unused(warnmess, ratecap)
9503 pool_get(struct pool
*pp
, int flags
)
9507 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
9509 if (pp
->pool_count
> pp
->pool_limit
) {
9510 DPFPRINTF(PF_DEBUG_NOISY
,
9511 ("pf: pool %s hard limit reached (%d)\n",
9512 pp
->pool_name
!= NULL
? pp
->pool_name
: "unknown",
9518 buf
= zalloc_canblock(pp
->pool_zone
, (flags
& (PR_NOWAIT
| PR_WAITOK
)));
9521 VERIFY(pp
->pool_count
!= 0);
9527 pool_put(struct pool
*pp
, void *v
)
9529 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
9531 zfree(pp
->pool_zone
, v
);
9532 VERIFY(pp
->pool_count
!= 0);
9537 pf_find_mtag(struct mbuf
*m
)
9542 if ((mtag
= m_tag_locate(m
, KERNEL_MODULE_TAG_ID
,
9543 KERNEL_TAG_TYPE_PF
, NULL
)) == NULL
)
9546 return ((struct pf_mtag
*)(mtag
+ 1));
9548 if (!(m
->m_flags
& M_PKTHDR
))
9551 return (&m
->m_pkthdr
.pf_mtag
);
9552 #endif /* PF_PKTHDR */
9556 pf_get_mtag(struct mbuf
*m
)
9561 if ((mtag
= m_tag_locate(m
, KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_PF
,
9563 mtag
= m_tag_alloc(KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_PF
,
9564 sizeof (struct pf_mtag
), M_NOWAIT
);
9567 bzero(mtag
+ 1, sizeof (struct pf_mtag
));
9568 m_tag_prepend(m
, mtag
);
9570 return ((struct pf_mtag
*)(mtag
+ 1));
9572 return (pf_find_mtag(m
));
9573 #endif /* PF_PKTHDR */
9577 pf_time_second(void)
9586 pf_calendar_time_second(void)
9595 hook_establish(struct hook_desc_head
*head
, int tail
, hook_fn_t fn
, void *arg
)
9597 struct hook_desc
*hd
;
9599 hd
= _MALLOC(sizeof(*hd
), M_DEVBUF
, M_WAITOK
);
9606 TAILQ_INSERT_TAIL(head
, hd
, hd_list
);
9608 TAILQ_INSERT_HEAD(head
, hd
, hd_list
);
9614 hook_runloop(struct hook_desc_head
*head
, int flags
)
9616 struct hook_desc
*hd
;
9618 if (!(flags
& HOOK_REMOVE
)) {
9619 if (!(flags
& HOOK_ABORT
))
9620 TAILQ_FOREACH(hd
, head
, hd_list
)
9621 hd
->hd_fn(hd
->hd_arg
);
9623 while (!!(hd
= TAILQ_FIRST(head
))) {
9624 TAILQ_REMOVE(head
, hd
, hd_list
);
9625 if (!(flags
& HOOK_ABORT
))
9626 hd
->hd_fn(hd
->hd_arg
);
9627 if (flags
& HOOK_FREE
)
9628 _FREE(hd
, M_DEVBUF
);