2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $apfw: git commit 6602420f2f101b74305cd78f7cd9e0c8fdedae97 $ */
30 /* $OpenBSD: pf.c,v 1.567 2008/02/20 23:40:13 henning Exp $ */
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
67 #include <machine/endian.h>
68 #include <sys/param.h>
69 #include <sys/systm.h>
71 #include <sys/filio.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 #include <sys/kernel.h>
77 #include <sys/random.h>
78 #include <sys/mcache.h>
80 #include <libkern/crypto/md5.h>
81 #include <libkern/libkern.h>
83 #include <mach/thread_act.h>
86 #include <net/if_types.h>
88 #include <net/route.h>
90 #include <netinet/in.h>
91 #include <netinet/in_var.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/ip.h>
94 #include <netinet/ip_var.h>
95 #include <netinet/tcp.h>
96 #include <netinet/tcp_seq.h>
97 #include <netinet/udp.h>
98 #include <netinet/ip_icmp.h>
99 #include <netinet/in_pcb.h>
100 #include <netinet/tcp_timer.h>
101 #include <netinet/tcp_var.h>
102 #include <netinet/tcp_fsm.h>
103 #include <netinet/udp_var.h>
104 #include <netinet/icmp_var.h>
105 #include <net/if_ether.h>
106 #include <net/ethernet.h>
108 #include <net/pfvar.h>
109 #include <net/if_pflog.h>
112 #include <net/if_pfsync.h>
116 #include <netinet/ip6.h>
117 #include <netinet6/in6_pcb.h>
118 #include <netinet6/ip6_var.h>
119 #include <netinet/icmp6.h>
120 #include <netinet6/nd6.h>
123 #ifndef NO_APPLE_EXTENSIONS
124 #define DPFPRINTF(n, x) (pf_status.debug >= (n) ? printf x : ((void)0))
126 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
129 /* XXX: should be in header somewhere */
130 #define satosin(sa) ((struct sockaddr_in *)(sa))
131 #define sintosa(sin) ((struct sockaddr *)(sin))
134 * On Mac OS X, the rtableid value is treated as the interface scope
135 * value that is equivalent to the interface index used for scoped
136 * routing. A valid scope value is anything but IFSCOPE_NONE (0),
137 * as per definition of ifindex which is a positive, non-zero number.
138 * The other BSDs treat a negative rtableid value as invalid, hence
139 * the test against INT_MAX to handle userland apps which initialize
140 * the field with a negative number.
142 #define PF_RTABLEID_IS_VALID(r) \
143 ((r) > IFSCOPE_NONE && (r) <= INT_MAX)
149 lck_rw_t
*pf_perim_lock
;
152 struct pf_state_tree_lan_ext pf_statetbl_lan_ext
;
153 struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy
;
155 struct pf_palist pf_pabuf
;
156 struct pf_status pf_status
;
159 struct pf_altqqueue pf_altqs
[2];
160 struct pf_altqqueue
*pf_altqs_active
;
161 struct pf_altqqueue
*pf_altqs_inactive
;
162 u_int32_t ticket_altqs_active
;
163 u_int32_t ticket_altqs_inactive
;
164 int altqs_inactive_open
;
166 u_int32_t ticket_pabuf
;
168 static MD5_CTX pf_tcp_secret_ctx
;
169 static u_char pf_tcp_secret
[16];
170 static int pf_tcp_secret_init
;
171 static int pf_tcp_iss_off
;
173 static struct pf_anchor_stackframe
{
174 struct pf_ruleset
*rs
;
176 struct pf_anchor_node
*parent
;
177 struct pf_anchor
*child
;
178 } pf_anchor_stack
[64];
180 struct pool pf_src_tree_pl
, pf_rule_pl
, pf_pooladdr_pl
;
181 struct pool pf_state_pl
, pf_state_key_pl
;
183 struct pool pf_altq_pl
;
186 #ifndef NO_APPLE_EXTENSIONS
187 typedef void (*hook_fn_t
)(void *);
190 TAILQ_ENTRY(hook_desc
) hd_list
;
195 #define HOOK_REMOVE 0x01
196 #define HOOK_FREE 0x02
197 #define HOOK_ABORT 0x04
199 static void *hook_establish(struct hook_desc_head
*, int,
201 static void hook_runloop(struct hook_desc_head
*, int flags
);
203 struct pool pf_app_state_pl
;
204 static void pf_print_addr(struct pf_addr
*addr
, sa_family_t af
);
205 static void pf_print_sk_host(struct pf_state_host
*, u_int8_t
, int,
209 static void pf_print_host(struct pf_addr
*, u_int16_t
, u_int8_t
);
211 static void pf_init_threshold(struct pf_threshold
*, u_int32_t
,
213 static void pf_add_threshold(struct pf_threshold
*);
214 static int pf_check_threshold(struct pf_threshold
*);
216 static void pf_change_ap(int, struct mbuf
*, struct pf_addr
*,
217 u_int16_t
*, u_int16_t
*, u_int16_t
*,
218 struct pf_addr
*, u_int16_t
, u_int8_t
, sa_family_t
);
219 static int pf_modulate_sack(struct mbuf
*, int, struct pf_pdesc
*,
220 struct tcphdr
*, struct pf_state_peer
*);
222 static void pf_change_a6(struct pf_addr
*, u_int16_t
*,
223 struct pf_addr
*, u_int8_t
);
225 static void pf_change_icmp(struct pf_addr
*, u_int16_t
*,
226 struct pf_addr
*, struct pf_addr
*, u_int16_t
,
227 u_int16_t
*, u_int16_t
*, u_int16_t
*,
228 u_int16_t
*, u_int8_t
, sa_family_t
);
229 static void pf_send_tcp(const struct pf_rule
*, sa_family_t
,
230 const struct pf_addr
*, const struct pf_addr
*,
231 u_int16_t
, u_int16_t
, u_int32_t
, u_int32_t
,
232 u_int8_t
, u_int16_t
, u_int16_t
, u_int8_t
, int,
233 u_int16_t
, struct ether_header
*, struct ifnet
*);
234 static void pf_send_icmp(struct mbuf
*, u_int8_t
, u_int8_t
,
235 sa_family_t
, struct pf_rule
*);
236 #ifndef NO_APPLE_EXTENSIONS
237 static struct pf_rule
*pf_match_translation(struct pf_pdesc
*, struct mbuf
*,
238 int, int, struct pfi_kif
*, struct pf_addr
*,
239 union pf_state_xport
*, struct pf_addr
*,
240 union pf_state_xport
*, int);
241 static struct pf_rule
*pf_get_translation_aux(struct pf_pdesc
*,
242 struct mbuf
*, int, int, struct pfi_kif
*,
243 struct pf_src_node
**, struct pf_addr
*,
244 union pf_state_xport
*, struct pf_addr
*,
245 union pf_state_xport
*, struct pf_addr
*,
246 union pf_state_xport
*);
248 struct pf_rule
*pf_match_translation(struct pf_pdesc
*, struct mbuf
*,
249 int, int, struct pfi_kif
*,
250 struct pf_addr
*, u_int16_t
, struct pf_addr
*,
252 struct pf_rule
*pf_get_translation(struct pf_pdesc
*, struct mbuf
*,
253 int, int, struct pfi_kif
*, struct pf_src_node
**,
254 struct pf_addr
*, u_int16_t
,
255 struct pf_addr
*, u_int16_t
,
256 struct pf_addr
*, u_int16_t
*);
258 static void pf_attach_state(struct pf_state_key
*,
259 struct pf_state
*, int);
260 static void pf_detach_state(struct pf_state
*, int);
261 static u_int32_t
pf_tcp_iss(struct pf_pdesc
*);
262 static int pf_test_rule(struct pf_rule
**, struct pf_state
**,
263 int, struct pfi_kif
*, struct mbuf
*, int,
264 void *, struct pf_pdesc
*, struct pf_rule
**,
265 struct pf_ruleset
**, struct ifqueue
*);
266 static int pf_test_fragment(struct pf_rule
**, int,
267 struct pfi_kif
*, struct mbuf
*, void *,
268 struct pf_pdesc
*, struct pf_rule
**,
269 struct pf_ruleset
**);
270 static int pf_test_state_tcp(struct pf_state
**, int,
271 struct pfi_kif
*, struct mbuf
*, int,
272 void *, struct pf_pdesc
*, u_short
*);
273 #ifndef NO_APPLE_EXTENSIONS
274 static int pf_test_state_udp(struct pf_state
**, int,
275 struct pfi_kif
*, struct mbuf
*, int,
276 void *, struct pf_pdesc
*, u_short
*);
278 static int pf_test_state_udp(struct pf_state
**, int,
279 struct pfi_kif
*, struct mbuf
*, int,
280 void *, struct pf_pdesc
*);
282 static int pf_test_state_icmp(struct pf_state
**, int,
283 struct pfi_kif
*, struct mbuf
*, int,
284 void *, struct pf_pdesc
*, u_short
*);
285 static int pf_test_state_other(struct pf_state
**, int,
286 struct pfi_kif
*, struct pf_pdesc
*);
287 static int pf_match_tag(struct mbuf
*, struct pf_rule
*,
288 struct pf_mtag
*, int *);
289 static void pf_step_into_anchor(int *, struct pf_ruleset
**, int,
290 struct pf_rule
**, struct pf_rule
**, int *);
291 static int pf_step_out_of_anchor(int *, struct pf_ruleset
**,
292 int, struct pf_rule
**, struct pf_rule
**,
294 static void pf_hash(struct pf_addr
*, struct pf_addr
*,
295 struct pf_poolhashkey
*, sa_family_t
);
296 static int pf_map_addr(u_int8_t
, struct pf_rule
*,
297 struct pf_addr
*, struct pf_addr
*,
298 struct pf_addr
*, struct pf_src_node
**);
299 #ifndef NO_APPLE_EXTENSIONS
300 static int pf_get_sport(struct pf_pdesc
*, struct pfi_kif
*,
301 struct pf_rule
*, struct pf_addr
*,
302 union pf_state_xport
*, struct pf_addr
*,
303 union pf_state_xport
*, struct pf_addr
*,
304 union pf_state_xport
*, struct pf_src_node
**);
306 int pf_get_sport(sa_family_t
, u_int8_t
, struct pf_rule
*,
307 struct pf_addr
*, struct pf_addr
*, u_int16_t
,
308 struct pf_addr
*, u_int16_t
*, u_int16_t
, u_int16_t
,
309 struct pf_src_node
**);
311 static void pf_route(struct mbuf
**, struct pf_rule
*, int,
312 struct ifnet
*, struct pf_state
*,
315 static void pf_route6(struct mbuf
**, struct pf_rule
*, int,
316 struct ifnet
*, struct pf_state
*,
319 static u_int8_t
pf_get_wscale(struct mbuf
*, int, u_int16_t
,
321 static u_int16_t
pf_get_mss(struct mbuf
*, int, u_int16_t
,
323 static u_int16_t
pf_calc_mss(struct pf_addr
*, sa_family_t
,
325 static void pf_set_rt_ifp(struct pf_state
*,
327 static int pf_check_proto_cksum(struct mbuf
*, int, int,
328 u_int8_t
, sa_family_t
);
329 static int pf_addr_wrap_neq(struct pf_addr_wrap
*,
330 struct pf_addr_wrap
*);
331 static struct pf_state
*pf_find_state(struct pfi_kif
*,
332 struct pf_state_key_cmp
*, u_int
);
333 static int pf_src_connlimit(struct pf_state
**);
334 static void pf_stateins_err(const char *, struct pf_state
*,
336 static int pf_check_congestion(struct ifqueue
*);
338 #ifndef NO_APPLE_EXTENSIONS
340 static const char *pf_pptp_ctrl_type_name(u_int16_t code
);
342 static void pf_pptp_handler(struct pf_state
*, int, int,
343 struct pf_pdesc
*, struct pfi_kif
*);
344 static void pf_pptp_unlink(struct pf_state
*);
345 static void pf_grev1_unlink(struct pf_state
*);
346 static int pf_test_state_grev1(struct pf_state
**, int,
347 struct pfi_kif
*, int, struct pf_pdesc
*);
348 static int pf_ike_compare(struct pf_app_state
*,
349 struct pf_app_state
*);
350 static int pf_test_state_esp(struct pf_state
**, int,
351 struct pfi_kif
*, int, struct pf_pdesc
*);
354 extern struct pool pfr_ktable_pl
;
355 extern struct pool pfr_kentry_pl
;
356 extern int path_mtu_discovery
;
358 struct pf_pool_limit pf_pool_limits
[PF_LIMIT_MAX
] = {
359 { &pf_state_pl
, PFSTATE_HIWAT
},
360 { &pf_app_state_pl
, PFAPPSTATE_HIWAT
},
361 { &pf_src_tree_pl
, PFSNODE_HIWAT
},
362 { &pf_frent_pl
, PFFRAG_FRENT_HIWAT
},
363 { &pfr_ktable_pl
, PFR_KTABLE_HIWAT
},
364 { &pfr_kentry_pl
, PFR_KENTRY_HIWAT
}
367 #ifndef NO_APPLE_EXTENSIONS
369 pf_lazy_makewritable(struct pf_pdesc
*pd
, struct mbuf
*m
, int len
)
377 if (m_makewritable(&m
, 0, len
, M_DONTWAIT
))
380 if (len
>= 0 && m
!= pd
->mp
) {
382 pd
->pf_mtag
= pf_find_mtag(m
);
386 struct ip
*h
= mtod(m
, struct ip
*);
387 pd
->src
= (struct pf_addr
*)&h
->ip_src
;
388 pd
->dst
= (struct pf_addr
*)&h
->ip_dst
;
389 pd
->ip_sum
= &h
->ip_sum
;
394 struct ip6_hdr
*h
= mtod(m
, struct ip6_hdr
*);
395 pd
->src
= (struct pf_addr
*)&h
->ip6_src
;
396 pd
->dst
= (struct pf_addr
*)&h
->ip6_dst
;
404 return (len
< 0 ? 0 : m
);
408 pf_state_lookup_aux(struct pf_state
**state
, struct pfi_kif
*kif
,
409 int direction
, int *action
)
411 if (*state
== NULL
|| (*state
)->timeout
== PFTM_PURGE
) {
416 if (direction
== PF_OUT
&&
417 (((*state
)->rule
.ptr
->rt
== PF_ROUTETO
&&
418 (*state
)->rule
.ptr
->direction
== PF_OUT
) ||
419 ((*state
)->rule
.ptr
->rt
== PF_REPLYTO
&&
420 (*state
)->rule
.ptr
->direction
== PF_IN
)) &&
421 (*state
)->rt_kif
!= NULL
&& (*state
)->rt_kif
!= kif
) {
429 #define STATE_LOOKUP() \
432 *state = pf_find_state(kif, &key, direction); \
433 if (pf_state_lookup_aux(state, kif, direction, &action)) \
437 #define STATE_ADDR_TRANSLATE(sk) \
438 (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \
439 ((sk)->af == AF_INET6 && \
440 ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \
441 (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \
442 (sk)->lan.addr.addr32[3] != (sk)->gwy.addr.addr32[3]))
444 #define STATE_TRANSLATE(sk) \
445 (STATE_ADDR_TRANSLATE(sk) || \
446 (sk)->lan.xport.port != (sk)->gwy.xport.port)
448 #define STATE_GRE_TRANSLATE(sk) \
449 (STATE_ADDR_TRANSLATE(sk) || \
450 (sk)->lan.xport.call_id != (sk)->gwy.xport.call_id)
453 #define STATE_LOOKUP() \
455 *state = pf_find_state(kif, &key, direction); \
456 if (*state == NULL || (*state)->timeout == PFTM_PURGE) \
458 if (direction == PF_OUT && \
459 (((*state)->rule.ptr->rt == PF_ROUTETO && \
460 (*state)->rule.ptr->direction == PF_OUT) || \
461 ((*state)->rule.ptr->rt == PF_REPLYTO && \
462 (*state)->rule.ptr->direction == PF_IN)) && \
463 (*state)->rt_kif != NULL && \
464 (*state)->rt_kif != kif) \
468 #define STATE_TRANSLATE(sk) \
469 (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \
470 ((sk)->af == AF_INET6 && \
471 ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \
472 (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \
473 (sk)->lan.addr.addr32[3] != (sk)->gwy.addr.addr32[3])) || \
474 (sk)->lan.port != (sk)->gwy.port
477 #define BOUND_IFACE(r, k) \
478 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
480 #define STATE_INC_COUNTERS(s) \
482 s->rule.ptr->states++; \
483 VERIFY(s->rule.ptr->states != 0); \
484 if (s->anchor.ptr != NULL) { \
485 s->anchor.ptr->states++; \
486 VERIFY(s->anchor.ptr->states != 0); \
488 if (s->nat_rule.ptr != NULL) { \
489 s->nat_rule.ptr->states++; \
490 VERIFY(s->nat_rule.ptr->states != 0); \
494 #define STATE_DEC_COUNTERS(s) \
496 if (s->nat_rule.ptr != NULL) { \
497 VERIFY(s->nat_rule.ptr->states > 0); \
498 s->nat_rule.ptr->states--; \
500 if (s->anchor.ptr != NULL) { \
501 VERIFY(s->anchor.ptr->states > 0); \
502 s->anchor.ptr->states--; \
504 VERIFY(s->rule.ptr->states > 0); \
505 s->rule.ptr->states--; \
508 static __inline
int pf_src_compare(struct pf_src_node
*, struct pf_src_node
*);
509 static __inline
int pf_state_compare_lan_ext(struct pf_state_key
*,
510 struct pf_state_key
*);
511 static __inline
int pf_state_compare_ext_gwy(struct pf_state_key
*,
512 struct pf_state_key
*);
513 static __inline
int pf_state_compare_id(struct pf_state
*,
516 struct pf_src_tree tree_src_tracking
;
518 struct pf_state_tree_id tree_id
;
519 struct pf_state_queue state_list
;
521 RB_GENERATE(pf_src_tree
, pf_src_node
, entry
, pf_src_compare
);
522 RB_GENERATE(pf_state_tree_lan_ext
, pf_state_key
,
523 entry_lan_ext
, pf_state_compare_lan_ext
);
524 RB_GENERATE(pf_state_tree_ext_gwy
, pf_state_key
,
525 entry_ext_gwy
, pf_state_compare_ext_gwy
);
526 RB_GENERATE(pf_state_tree_id
, pf_state
,
527 entry_id
, pf_state_compare_id
);
529 #define PF_DT_SKIP_LANEXT 0x01
530 #define PF_DT_SKIP_EXTGWY 0x02
532 #ifndef NO_APPLE_EXTENSIONS
533 static const u_int16_t PF_PPTP_PORT
= 1723;
534 static const u_int32_t PF_PPTP_MAGIC_NUMBER
= 0x1A2B3C4D;
542 struct pf_pptp_ctrl_hdr
{
544 u_int16_t reserved_0
;
547 struct pf_pptp_ctrl_generic
{
551 #define PF_PPTP_CTRL_TYPE_START_REQ 1
552 struct pf_pptp_ctrl_start_req
{
553 u_int16_t protocol_version
;
554 u_int16_t reserved_1
;
555 u_int32_t framing_capabilities
;
556 u_int32_t bearer_capabilities
;
557 u_int16_t maximum_channels
;
558 u_int16_t firmware_revision
;
559 u_int8_t host_name
[64];
560 u_int8_t vendor_string
[64];
563 #define PF_PPTP_CTRL_TYPE_START_RPY 2
564 struct pf_pptp_ctrl_start_rpy
{
565 u_int16_t protocol_version
;
566 u_int8_t result_code
;
568 u_int32_t framing_capabilities
;
569 u_int32_t bearer_capabilities
;
570 u_int16_t maximum_channels
;
571 u_int16_t firmware_revision
;
572 u_int8_t host_name
[64];
573 u_int8_t vendor_string
[64];
576 #define PF_PPTP_CTRL_TYPE_STOP_REQ 3
577 struct pf_pptp_ctrl_stop_req
{
580 u_int16_t reserved_2
;
583 #define PF_PPTP_CTRL_TYPE_STOP_RPY 4
584 struct pf_pptp_ctrl_stop_rpy
{
587 u_int16_t reserved_1
;
590 #define PF_PPTP_CTRL_TYPE_ECHO_REQ 5
591 struct pf_pptp_ctrl_echo_req
{
592 u_int32_t identifier
;
595 #define PF_PPTP_CTRL_TYPE_ECHO_RPY 6
596 struct pf_pptp_ctrl_echo_rpy
{
597 u_int32_t identifier
;
598 u_int8_t result_code
;
600 u_int16_t reserved_1
;
603 #define PF_PPTP_CTRL_TYPE_CALL_OUT_REQ 7
604 struct pf_pptp_ctrl_call_out_req
{
606 u_int16_t call_sernum
;
608 u_int32_t bearer_type
;
609 u_int32_t framing_type
;
610 u_int16_t rxwindow_size
;
611 u_int16_t proc_delay
;
612 u_int8_t phone_num
[64];
613 u_int8_t sub_addr
[64];
616 #define PF_PPTP_CTRL_TYPE_CALL_OUT_RPY 8
617 struct pf_pptp_ctrl_call_out_rpy
{
619 u_int16_t peer_call_id
;
620 u_int8_t result_code
;
622 u_int16_t cause_code
;
623 u_int32_t connect_speed
;
624 u_int16_t rxwindow_size
;
625 u_int16_t proc_delay
;
626 u_int32_t phy_channel_id
;
629 #define PF_PPTP_CTRL_TYPE_CALL_IN_1ST 9
630 struct pf_pptp_ctrl_call_in_1st
{
632 u_int16_t call_sernum
;
633 u_int32_t bearer_type
;
634 u_int32_t phy_channel_id
;
635 u_int16_t dialed_number_len
;
636 u_int16_t dialing_number_len
;
637 u_int8_t dialed_num
[64];
638 u_int8_t dialing_num
[64];
639 u_int8_t sub_addr
[64];
642 #define PF_PPTP_CTRL_TYPE_CALL_IN_2ND 10
643 struct pf_pptp_ctrl_call_in_2nd
{
645 u_int16_t peer_call_id
;
646 u_int8_t result_code
;
648 u_int16_t rxwindow_size
;
650 u_int16_t reserved_1
;
653 #define PF_PPTP_CTRL_TYPE_CALL_IN_3RD 11
654 struct pf_pptp_ctrl_call_in_3rd
{
656 u_int16_t reserved_1
;
657 u_int32_t connect_speed
;
658 u_int16_t rxwindow_size
;
660 u_int32_t framing_type
;
663 #define PF_PPTP_CTRL_TYPE_CALL_CLR 12
664 struct pf_pptp_ctrl_call_clr
{
666 u_int16_t reserved_1
;
669 #define PF_PPTP_CTRL_TYPE_CALL_DISC 13
670 struct pf_pptp_ctrl_call_disc
{
672 u_int8_t result_code
;
674 u_int16_t cause_code
;
675 u_int16_t reserved_1
;
676 u_int8_t statistics
[128];
679 #define PF_PPTP_CTRL_TYPE_ERROR 14
680 struct pf_pptp_ctrl_error
{
681 u_int16_t peer_call_id
;
682 u_int16_t reserved_1
;
683 u_int32_t crc_errors
;
686 u_int32_t buf_errors
;
687 u_int32_t tim_errors
;
688 u_int32_t align_errors
;
691 #define PF_PPTP_CTRL_TYPE_SET_LINKINFO 15
692 struct pf_pptp_ctrl_set_linkinfo
{
693 u_int16_t peer_call_id
;
694 u_int16_t reserved_1
;
700 static const char *pf_pptp_ctrl_type_name(u_int16_t code
)
704 if (code
< PF_PPTP_CTRL_TYPE_START_REQ
||
705 code
> PF_PPTP_CTRL_TYPE_SET_LINKINFO
) {
706 static char reserved
[] = "reserved-00";
708 sprintf(&reserved
[9], "%02x", code
);
711 static const char *name
[] = {
712 "start_req", "start_rpy", "stop_req", "stop_rpy",
713 "echo_req", "echo_rpy", "call_out_req", "call_out_rpy",
714 "call_in_1st", "call_in_2nd", "call_in_3rd",
715 "call_clr", "call_disc", "error", "set_linkinfo"
718 return (name
[code
- 1]);
723 static const size_t PF_PPTP_CTRL_MSG_MINSIZE
=
724 sizeof (struct pf_pptp_hdr
) +
725 sizeof (struct pf_pptp_ctrl_hdr
) +
726 MIN(sizeof (struct pf_pptp_ctrl_start_req
),
727 MIN(sizeof (struct pf_pptp_ctrl_start_rpy
),
728 MIN(sizeof (struct pf_pptp_ctrl_stop_req
),
729 MIN(sizeof (struct pf_pptp_ctrl_stop_rpy
),
730 MIN(sizeof (struct pf_pptp_ctrl_echo_req
),
731 MIN(sizeof (struct pf_pptp_ctrl_echo_rpy
),
732 MIN(sizeof (struct pf_pptp_ctrl_call_out_req
),
733 MIN(sizeof (struct pf_pptp_ctrl_call_out_rpy
),
734 MIN(sizeof (struct pf_pptp_ctrl_call_in_1st
),
735 MIN(sizeof (struct pf_pptp_ctrl_call_in_2nd
),
736 MIN(sizeof (struct pf_pptp_ctrl_call_in_3rd
),
737 MIN(sizeof (struct pf_pptp_ctrl_call_clr
),
738 MIN(sizeof (struct pf_pptp_ctrl_call_disc
),
739 MIN(sizeof (struct pf_pptp_ctrl_error
),
740 sizeof (struct pf_pptp_ctrl_set_linkinfo
)
743 union pf_pptp_ctrl_msg_union
{
744 struct pf_pptp_ctrl_start_req start_req
;
745 struct pf_pptp_ctrl_start_rpy start_rpy
;
746 struct pf_pptp_ctrl_stop_req stop_req
;
747 struct pf_pptp_ctrl_stop_rpy stop_rpy
;
748 struct pf_pptp_ctrl_echo_req echo_req
;
749 struct pf_pptp_ctrl_echo_rpy echo_rpy
;
750 struct pf_pptp_ctrl_call_out_req call_out_req
;
751 struct pf_pptp_ctrl_call_out_rpy call_out_rpy
;
752 struct pf_pptp_ctrl_call_in_1st call_in_1st
;
753 struct pf_pptp_ctrl_call_in_2nd call_in_2nd
;
754 struct pf_pptp_ctrl_call_in_3rd call_in_3rd
;
755 struct pf_pptp_ctrl_call_clr call_clr
;
756 struct pf_pptp_ctrl_call_disc call_disc
;
757 struct pf_pptp_ctrl_error error
;
758 struct pf_pptp_ctrl_set_linkinfo set_linkinfo
;
762 struct pf_pptp_ctrl_msg
{
763 struct pf_pptp_hdr hdr
;
764 struct pf_pptp_ctrl_hdr ctrl
;
765 union pf_pptp_ctrl_msg_union msg
;
768 #define PF_GRE_FLAG_CHECKSUM_PRESENT 0x8000
769 #define PF_GRE_FLAG_VERSION_MASK 0x0007
770 #define PF_GRE_PPP_ETHERTYPE 0x880B
772 struct pf_grev1_hdr
{
774 u_int16_t protocol_type
;
775 u_int16_t payload_length
;
783 static const u_int16_t PF_IKE_PORT
= 500;
786 u_int64_t initiator_cookie
, responder_cookie
;
787 u_int8_t next_payload
, version
, exchange_type
, flags
;
788 u_int32_t message_id
, length
;
791 #define PF_IKE_PACKET_MINSIZE (sizeof (struct pf_ike_hdr))
793 #define PF_IKEv1_EXCHTYPE_BASE 1
794 #define PF_IKEv1_EXCHTYPE_ID_PROTECT 2
795 #define PF_IKEv1_EXCHTYPE_AUTH_ONLY 3
796 #define PF_IKEv1_EXCHTYPE_AGGRESSIVE 4
797 #define PF_IKEv1_EXCHTYPE_INFORMATIONAL 5
798 #define PF_IKEv2_EXCHTYPE_SA_INIT 34
799 #define PF_IKEv2_EXCHTYPE_AUTH 35
800 #define PF_IKEv2_EXCHTYPE_CREATE_CHILD_SA 36
801 #define PF_IKEv2_EXCHTYPE_INFORMATIONAL 37
803 #define PF_IKEv1_FLAG_E 0x01
804 #define PF_IKEv1_FLAG_C 0x02
805 #define PF_IKEv1_FLAG_A 0x04
806 #define PF_IKEv2_FLAG_I 0x08
807 #define PF_IKEv2_FLAG_V 0x10
808 #define PF_IKEv2_FLAG_R 0x20
818 pf_src_compare(struct pf_src_node
*a
, struct pf_src_node
*b
)
822 if (a
->rule
.ptr
> b
->rule
.ptr
)
824 if (a
->rule
.ptr
< b
->rule
.ptr
)
826 if ((diff
= a
->af
- b
->af
) != 0)
831 if (a
->addr
.addr32
[0] > b
->addr
.addr32
[0])
833 if (a
->addr
.addr32
[0] < b
->addr
.addr32
[0])
839 if (a
->addr
.addr32
[3] > b
->addr
.addr32
[3])
841 if (a
->addr
.addr32
[3] < b
->addr
.addr32
[3])
843 if (a
->addr
.addr32
[2] > b
->addr
.addr32
[2])
845 if (a
->addr
.addr32
[2] < b
->addr
.addr32
[2])
847 if (a
->addr
.addr32
[1] > b
->addr
.addr32
[1])
849 if (a
->addr
.addr32
[1] < b
->addr
.addr32
[1])
851 if (a
->addr
.addr32
[0] > b
->addr
.addr32
[0])
853 if (a
->addr
.addr32
[0] < b
->addr
.addr32
[0])
862 pf_state_compare_lan_ext(struct pf_state_key
*a
, struct pf_state_key
*b
)
865 #ifndef NO_APPLE_EXTENSIONS
869 if ((diff
= a
->proto
- b
->proto
) != 0)
871 if ((diff
= a
->af
- b
->af
) != 0)
874 #ifndef NO_APPLE_EXTENSIONS
875 extfilter
= PF_EXTFILTER_APD
;
880 if ((diff
= a
->lan
.xport
.port
- b
->lan
.xport
.port
) != 0)
885 if ((diff
= a
->lan
.xport
.port
- b
->lan
.xport
.port
) != 0)
887 if ((diff
= a
->ext
.xport
.port
- b
->ext
.xport
.port
) != 0)
892 if ((diff
= a
->proto_variant
- b
->proto_variant
))
894 extfilter
= a
->proto_variant
;
895 if ((diff
= a
->lan
.xport
.port
- b
->lan
.xport
.port
) != 0)
897 if ((extfilter
< PF_EXTFILTER_AD
) &&
898 (diff
= a
->ext
.xport
.port
- b
->ext
.xport
.port
) != 0)
903 if (a
->proto_variant
== PF_GRE_PPTP_VARIANT
&&
904 a
->proto_variant
== b
->proto_variant
) {
905 if (!!(diff
= a
->ext
.xport
.call_id
-
906 b
->ext
.xport
.call_id
))
912 if (!!(diff
= a
->ext
.xport
.spi
- b
->ext
.xport
.spi
))
924 if (a
->lan
.addr
.addr32
[0] > b
->lan
.addr
.addr32
[0])
926 if (a
->lan
.addr
.addr32
[0] < b
->lan
.addr
.addr32
[0])
928 #ifndef NO_APPLE_EXTENSIONS
929 if (extfilter
< PF_EXTFILTER_EI
) {
930 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
932 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
936 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
938 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
945 #ifndef NO_APPLE_EXTENSIONS
946 if (a
->lan
.addr
.addr32
[3] > b
->lan
.addr
.addr32
[3])
948 if (a
->lan
.addr
.addr32
[3] < b
->lan
.addr
.addr32
[3])
950 if (a
->lan
.addr
.addr32
[2] > b
->lan
.addr
.addr32
[2])
952 if (a
->lan
.addr
.addr32
[2] < b
->lan
.addr
.addr32
[2])
954 if (a
->lan
.addr
.addr32
[1] > b
->lan
.addr
.addr32
[1])
956 if (a
->lan
.addr
.addr32
[1] < b
->lan
.addr
.addr32
[1])
958 if (a
->lan
.addr
.addr32
[0] > b
->lan
.addr
.addr32
[0])
960 if (a
->lan
.addr
.addr32
[0] < b
->lan
.addr
.addr32
[0])
962 if (extfilter
< PF_EXTFILTER_EI
||
963 !PF_AZERO(&b
->ext
.addr
, AF_INET6
)) {
964 if (a
->ext
.addr
.addr32
[3] > b
->ext
.addr
.addr32
[3])
966 if (a
->ext
.addr
.addr32
[3] < b
->ext
.addr
.addr32
[3])
968 if (a
->ext
.addr
.addr32
[2] > b
->ext
.addr
.addr32
[2])
970 if (a
->ext
.addr
.addr32
[2] < b
->ext
.addr
.addr32
[2])
972 if (a
->ext
.addr
.addr32
[1] > b
->ext
.addr
.addr32
[1])
974 if (a
->ext
.addr
.addr32
[1] < b
->ext
.addr
.addr32
[1])
976 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
978 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
982 if (a
->lan
.addr
.addr32
[3] > b
->lan
.addr
.addr32
[3])
984 if (a
->lan
.addr
.addr32
[3] < b
->lan
.addr
.addr32
[3])
986 if (a
->ext
.addr
.addr32
[3] > b
->ext
.addr
.addr32
[3])
988 if (a
->ext
.addr
.addr32
[3] < b
->ext
.addr
.addr32
[3])
990 if (a
->lan
.addr
.addr32
[2] > b
->lan
.addr
.addr32
[2])
992 if (a
->lan
.addr
.addr32
[2] < b
->lan
.addr
.addr32
[2])
994 if (a
->ext
.addr
.addr32
[2] > b
->ext
.addr
.addr32
[2])
996 if (a
->ext
.addr
.addr32
[2] < b
->ext
.addr
.addr32
[2])
998 if (a
->lan
.addr
.addr32
[1] > b
->lan
.addr
.addr32
[1])
1000 if (a
->lan
.addr
.addr32
[1] < b
->lan
.addr
.addr32
[1])
1002 if (a
->ext
.addr
.addr32
[1] > b
->ext
.addr
.addr32
[1])
1004 if (a
->ext
.addr
.addr32
[1] < b
->ext
.addr
.addr32
[1])
1006 if (a
->lan
.addr
.addr32
[0] > b
->lan
.addr
.addr32
[0])
1008 if (a
->lan
.addr
.addr32
[0] < b
->lan
.addr
.addr32
[0])
1010 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
1012 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
1019 #ifndef NO_APPLE_EXTENSIONS
1020 if (a
->app_state
&& b
->app_state
) {
1021 if (a
->app_state
->compare_lan_ext
&&
1022 b
->app_state
->compare_lan_ext
) {
1023 diff
= (const char *)b
->app_state
->compare_lan_ext
-
1024 (const char *)a
->app_state
->compare_lan_ext
;
1027 diff
= a
->app_state
->compare_lan_ext(a
->app_state
,
1034 if ((diff
= a
->lan
.port
- b
->lan
.port
) != 0)
1036 if ((diff
= a
->ext
.port
- b
->ext
.port
) != 0)
1044 pf_state_compare_ext_gwy(struct pf_state_key
*a
, struct pf_state_key
*b
)
1047 #ifndef NO_APPLE_EXTENSIONS
1051 if ((diff
= a
->proto
- b
->proto
) != 0)
1054 if ((diff
= a
->af
- b
->af
) != 0)
1057 #ifndef NO_APPLE_EXTENSIONS
1058 extfilter
= PF_EXTFILTER_APD
;
1062 case IPPROTO_ICMPV6
:
1063 if ((diff
= a
->gwy
.xport
.port
- b
->gwy
.xport
.port
) != 0)
1068 if ((diff
= a
->ext
.xport
.port
- b
->ext
.xport
.port
) != 0)
1070 if ((diff
= a
->gwy
.xport
.port
- b
->gwy
.xport
.port
) != 0)
1075 if ((diff
= a
->proto_variant
- b
->proto_variant
))
1077 extfilter
= a
->proto_variant
;
1078 if ((diff
= a
->gwy
.xport
.port
- b
->gwy
.xport
.port
) != 0)
1080 if ((extfilter
< PF_EXTFILTER_AD
) &&
1081 (diff
= a
->ext
.xport
.port
- b
->ext
.xport
.port
) != 0)
1086 if (a
->proto_variant
== PF_GRE_PPTP_VARIANT
&&
1087 a
->proto_variant
== b
->proto_variant
) {
1088 if (!!(diff
= a
->gwy
.xport
.call_id
-
1089 b
->gwy
.xport
.call_id
))
1095 if (!!(diff
= a
->gwy
.xport
.spi
- b
->gwy
.xport
.spi
))
1107 #ifndef NO_APPLE_EXTENSIONS
1108 if (a
->gwy
.addr
.addr32
[0] > b
->gwy
.addr
.addr32
[0])
1110 if (a
->gwy
.addr
.addr32
[0] < b
->gwy
.addr
.addr32
[0])
1112 if (extfilter
< PF_EXTFILTER_EI
) {
1113 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
1115 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
1119 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
1121 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
1123 if (a
->gwy
.addr
.addr32
[0] > b
->gwy
.addr
.addr32
[0])
1125 if (a
->gwy
.addr
.addr32
[0] < b
->gwy
.addr
.addr32
[0])
1132 #ifndef NO_APPLE_EXTENSIONS
1133 if (a
->gwy
.addr
.addr32
[3] > b
->gwy
.addr
.addr32
[3])
1135 if (a
->gwy
.addr
.addr32
[3] < b
->gwy
.addr
.addr32
[3])
1137 if (a
->gwy
.addr
.addr32
[2] > b
->gwy
.addr
.addr32
[2])
1139 if (a
->gwy
.addr
.addr32
[2] < b
->gwy
.addr
.addr32
[2])
1141 if (a
->gwy
.addr
.addr32
[1] > b
->gwy
.addr
.addr32
[1])
1143 if (a
->gwy
.addr
.addr32
[1] < b
->gwy
.addr
.addr32
[1])
1145 if (a
->gwy
.addr
.addr32
[0] > b
->gwy
.addr
.addr32
[0])
1147 if (a
->gwy
.addr
.addr32
[0] < b
->gwy
.addr
.addr32
[0])
1149 if (extfilter
< PF_EXTFILTER_EI
||
1150 !PF_AZERO(&b
->ext
.addr
, AF_INET6
)) {
1151 if (a
->ext
.addr
.addr32
[3] > b
->ext
.addr
.addr32
[3])
1153 if (a
->ext
.addr
.addr32
[3] < b
->ext
.addr
.addr32
[3])
1155 if (a
->ext
.addr
.addr32
[2] > b
->ext
.addr
.addr32
[2])
1157 if (a
->ext
.addr
.addr32
[2] < b
->ext
.addr
.addr32
[2])
1159 if (a
->ext
.addr
.addr32
[1] > b
->ext
.addr
.addr32
[1])
1161 if (a
->ext
.addr
.addr32
[1] < b
->ext
.addr
.addr32
[1])
1163 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
1165 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
1169 if (a
->ext
.addr
.addr32
[3] > b
->ext
.addr
.addr32
[3])
1171 if (a
->ext
.addr
.addr32
[3] < b
->ext
.addr
.addr32
[3])
1173 if (a
->gwy
.addr
.addr32
[3] > b
->gwy
.addr
.addr32
[3])
1175 if (a
->gwy
.addr
.addr32
[3] < b
->gwy
.addr
.addr32
[3])
1177 if (a
->ext
.addr
.addr32
[2] > b
->ext
.addr
.addr32
[2])
1179 if (a
->ext
.addr
.addr32
[2] < b
->ext
.addr
.addr32
[2])
1181 if (a
->gwy
.addr
.addr32
[2] > b
->gwy
.addr
.addr32
[2])
1183 if (a
->gwy
.addr
.addr32
[2] < b
->gwy
.addr
.addr32
[2])
1185 if (a
->ext
.addr
.addr32
[1] > b
->ext
.addr
.addr32
[1])
1187 if (a
->ext
.addr
.addr32
[1] < b
->ext
.addr
.addr32
[1])
1189 if (a
->gwy
.addr
.addr32
[1] > b
->gwy
.addr
.addr32
[1])
1191 if (a
->gwy
.addr
.addr32
[1] < b
->gwy
.addr
.addr32
[1])
1193 if (a
->ext
.addr
.addr32
[0] > b
->ext
.addr
.addr32
[0])
1195 if (a
->ext
.addr
.addr32
[0] < b
->ext
.addr
.addr32
[0])
1197 if (a
->gwy
.addr
.addr32
[0] > b
->gwy
.addr
.addr32
[0])
1199 if (a
->gwy
.addr
.addr32
[0] < b
->gwy
.addr
.addr32
[0])
1206 #ifndef NO_APPLE_EXTENSIONS
1207 if (a
->app_state
&& b
->app_state
) {
1208 if (a
->app_state
->compare_ext_gwy
&&
1209 b
->app_state
->compare_ext_gwy
) {
1210 diff
= (const char *)b
->app_state
->compare_ext_gwy
-
1211 (const char *)a
->app_state
->compare_ext_gwy
;
1214 diff
= a
->app_state
->compare_ext_gwy(a
->app_state
,
1221 if ((diff
= a
->ext
.port
- b
->ext
.port
) != 0)
1223 if ((diff
= a
->gwy
.port
- b
->gwy
.port
) != 0)
1231 pf_state_compare_id(struct pf_state
*a
, struct pf_state
*b
)
1237 if (a
->creatorid
> b
->creatorid
)
1239 if (a
->creatorid
< b
->creatorid
)
1247 pf_addrcpy(struct pf_addr
*dst
, struct pf_addr
*src
, sa_family_t af
)
1252 dst
->addr32
[0] = src
->addr32
[0];
1256 dst
->addr32
[0] = src
->addr32
[0];
1257 dst
->addr32
[1] = src
->addr32
[1];
1258 dst
->addr32
[2] = src
->addr32
[2];
1259 dst
->addr32
[3] = src
->addr32
[3];
1266 pf_find_state_byid(struct pf_state_cmp
*key
)
1268 pf_status
.fcounters
[FCNT_STATE_SEARCH
]++;
1270 return (RB_FIND(pf_state_tree_id
, &tree_id
, (struct pf_state
*)key
));
1273 static struct pf_state
*
1274 pf_find_state(struct pfi_kif
*kif
, struct pf_state_key_cmp
*key
, u_int dir
)
1276 struct pf_state_key
*sk
= NULL
;
1279 pf_status
.fcounters
[FCNT_STATE_SEARCH
]++;
1283 sk
= RB_FIND(pf_state_tree_lan_ext
, &pf_statetbl_lan_ext
,
1284 (struct pf_state_key
*)key
);
1287 sk
= RB_FIND(pf_state_tree_ext_gwy
, &pf_statetbl_ext_gwy
,
1288 (struct pf_state_key
*)key
);
1291 panic("pf_find_state");
1294 /* list is sorted, if-bound states before floating ones */
1296 TAILQ_FOREACH(s
, &sk
->states
, next
)
1297 if (s
->kif
== pfi_all
|| s
->kif
== kif
)
1304 pf_find_state_all(struct pf_state_key_cmp
*key
, u_int dir
, int *more
)
1306 struct pf_state_key
*sk
= NULL
;
1307 struct pf_state
*s
, *ret
= NULL
;
1309 pf_status
.fcounters
[FCNT_STATE_SEARCH
]++;
1313 sk
= RB_FIND(pf_state_tree_lan_ext
,
1314 &pf_statetbl_lan_ext
, (struct pf_state_key
*)key
);
1317 sk
= RB_FIND(pf_state_tree_ext_gwy
,
1318 &pf_statetbl_ext_gwy
, (struct pf_state_key
*)key
);
1321 panic("pf_find_state_all");
1325 ret
= TAILQ_FIRST(&sk
->states
);
1329 TAILQ_FOREACH(s
, &sk
->states
, next
)
1337 pf_init_threshold(struct pf_threshold
*threshold
,
1338 u_int32_t limit
, u_int32_t seconds
)
1340 threshold
->limit
= limit
* PF_THRESHOLD_MULT
;
1341 threshold
->seconds
= seconds
;
1342 threshold
->count
= 0;
1343 threshold
->last
= pf_time_second();
1347 pf_add_threshold(struct pf_threshold
*threshold
)
1349 u_int32_t t
= pf_time_second(), diff
= t
- threshold
->last
;
1351 if (diff
>= threshold
->seconds
)
1352 threshold
->count
= 0;
1354 threshold
->count
-= threshold
->count
* diff
/
1356 threshold
->count
+= PF_THRESHOLD_MULT
;
1357 threshold
->last
= t
;
1361 pf_check_threshold(struct pf_threshold
*threshold
)
1363 return (threshold
->count
> threshold
->limit
);
1367 pf_src_connlimit(struct pf_state
**state
)
1371 (*state
)->src_node
->conn
++;
1372 VERIFY((*state
)->src_node
->conn
!= 0);
1373 (*state
)->src
.tcp_est
= 1;
1374 pf_add_threshold(&(*state
)->src_node
->conn_rate
);
1376 if ((*state
)->rule
.ptr
->max_src_conn
&&
1377 (*state
)->rule
.ptr
->max_src_conn
<
1378 (*state
)->src_node
->conn
) {
1379 pf_status
.lcounters
[LCNT_SRCCONN
]++;
1383 if ((*state
)->rule
.ptr
->max_src_conn_rate
.limit
&&
1384 pf_check_threshold(&(*state
)->src_node
->conn_rate
)) {
1385 pf_status
.lcounters
[LCNT_SRCCONNRATE
]++;
1392 if ((*state
)->rule
.ptr
->overload_tbl
) {
1394 u_int32_t killed
= 0;
1396 pf_status
.lcounters
[LCNT_OVERLOAD_TABLE
]++;
1397 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1398 printf("pf_src_connlimit: blocking address ");
1399 pf_print_host(&(*state
)->src_node
->addr
, 0,
1400 (*state
)->state_key
->af
);
1403 bzero(&p
, sizeof (p
));
1404 p
.pfra_af
= (*state
)->state_key
->af
;
1405 switch ((*state
)->state_key
->af
) {
1409 p
.pfra_ip4addr
= (*state
)->src_node
->addr
.v4
;
1415 p
.pfra_ip6addr
= (*state
)->src_node
->addr
.v6
;
1420 pfr_insert_kentry((*state
)->rule
.ptr
->overload_tbl
,
1421 &p
, pf_calendar_time_second());
1423 /* kill existing states if that's required. */
1424 if ((*state
)->rule
.ptr
->flush
) {
1425 struct pf_state_key
*sk
;
1426 struct pf_state
*st
;
1428 pf_status
.lcounters
[LCNT_OVERLOAD_FLUSH
]++;
1429 RB_FOREACH(st
, pf_state_tree_id
, &tree_id
) {
1432 * Kill states from this source. (Only those
1433 * from the same rule if PF_FLUSH_GLOBAL is not
1437 (*state
)->state_key
->af
&&
1438 (((*state
)->state_key
->direction
==
1440 PF_AEQ(&(*state
)->src_node
->addr
,
1441 &sk
->lan
.addr
, sk
->af
)) ||
1442 ((*state
)->state_key
->direction
== PF_IN
&&
1443 PF_AEQ(&(*state
)->src_node
->addr
,
1444 &sk
->ext
.addr
, sk
->af
))) &&
1445 ((*state
)->rule
.ptr
->flush
&
1447 (*state
)->rule
.ptr
== st
->rule
.ptr
)) {
1448 st
->timeout
= PFTM_PURGE
;
1449 st
->src
.state
= st
->dst
.state
=
1454 if (pf_status
.debug
>= PF_DEBUG_MISC
)
1455 printf(", %u states killed", killed
);
1457 if (pf_status
.debug
>= PF_DEBUG_MISC
)
1461 /* kill this state */
1462 (*state
)->timeout
= PFTM_PURGE
;
1463 (*state
)->src
.state
= (*state
)->dst
.state
= TCPS_CLOSED
;
1468 pf_insert_src_node(struct pf_src_node
**sn
, struct pf_rule
*rule
,
1469 struct pf_addr
*src
, sa_family_t af
)
1471 struct pf_src_node k
;
1475 PF_ACPY(&k
.addr
, src
, af
);
1476 if (rule
->rule_flag
& PFRULE_RULESRCTRACK
||
1477 rule
->rpool
.opts
& PF_POOL_STICKYADDR
)
1481 pf_status
.scounters
[SCNT_SRC_NODE_SEARCH
]++;
1482 *sn
= RB_FIND(pf_src_tree
, &tree_src_tracking
, &k
);
1485 if (!rule
->max_src_nodes
||
1486 rule
->src_nodes
< rule
->max_src_nodes
)
1487 (*sn
) = pool_get(&pf_src_tree_pl
, PR_WAITOK
);
1489 pf_status
.lcounters
[LCNT_SRCNODES
]++;
1492 bzero(*sn
, sizeof (struct pf_src_node
));
1494 pf_init_threshold(&(*sn
)->conn_rate
,
1495 rule
->max_src_conn_rate
.limit
,
1496 rule
->max_src_conn_rate
.seconds
);
1499 if (rule
->rule_flag
& PFRULE_RULESRCTRACK
||
1500 rule
->rpool
.opts
& PF_POOL_STICKYADDR
)
1501 (*sn
)->rule
.ptr
= rule
;
1503 (*sn
)->rule
.ptr
= NULL
;
1504 PF_ACPY(&(*sn
)->addr
, src
, af
);
1505 if (RB_INSERT(pf_src_tree
,
1506 &tree_src_tracking
, *sn
) != NULL
) {
1507 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1508 printf("pf: src_tree insert failed: ");
1509 pf_print_host(&(*sn
)->addr
, 0, af
);
1512 pool_put(&pf_src_tree_pl
, *sn
);
1515 (*sn
)->creation
= pf_time_second();
1516 (*sn
)->ruletype
= rule
->action
;
1517 if ((*sn
)->rule
.ptr
!= NULL
)
1518 (*sn
)->rule
.ptr
->src_nodes
++;
1519 pf_status
.scounters
[SCNT_SRC_NODE_INSERT
]++;
1520 pf_status
.src_nodes
++;
1522 if (rule
->max_src_states
&&
1523 (*sn
)->states
>= rule
->max_src_states
) {
1524 pf_status
.lcounters
[LCNT_SRCSTATES
]++;
1532 pf_stateins_err(const char *tree
, struct pf_state
*s
, struct pfi_kif
*kif
)
1534 struct pf_state_key
*sk
= s
->state_key
;
1536 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1537 #ifndef NO_APPLE_EXTENSIONS
1538 printf("pf: state insert failed: %s %s ", tree
, kif
->pfik_name
);
1539 switch (sk
->proto
) {
1549 case IPPROTO_ICMPV6
:
1553 printf("PROTO=%u", sk
->proto
);
1557 pf_print_sk_host(&sk
->lan
, sk
->af
, sk
->proto
,
1560 pf_print_sk_host(&sk
->gwy
, sk
->af
, sk
->proto
,
1563 pf_print_sk_host(&sk
->ext
, sk
->af
, sk
->proto
,
1566 printf("pf: state insert failed: %s %s", tree
, kif
->pfik_name
);
1568 pf_print_host(&sk
->lan
.addr
, sk
->lan
.port
,
1571 pf_print_host(&sk
->gwy
.addr
, sk
->gwy
.port
,
1574 pf_print_host(&sk
->ext
.addr
, sk
->ext
.port
,
1577 if (s
->sync_flags
& PFSTATE_FROMSYNC
)
1578 printf(" (from sync)");
1584 pf_insert_state(struct pfi_kif
*kif
, struct pf_state
*s
)
1586 struct pf_state_key
*cur
;
1587 struct pf_state
*sp
;
1589 VERIFY(s
->state_key
!= NULL
);
1592 if ((cur
= RB_INSERT(pf_state_tree_lan_ext
, &pf_statetbl_lan_ext
,
1593 s
->state_key
)) != NULL
) {
1594 /* key exists. check for same kif, if none, add to key */
1595 TAILQ_FOREACH(sp
, &cur
->states
, next
)
1596 if (sp
->kif
== kif
) { /* collision! */
1597 pf_stateins_err("tree_lan_ext", s
, kif
);
1599 PF_DT_SKIP_LANEXT
|PF_DT_SKIP_EXTGWY
);
1602 pf_detach_state(s
, PF_DT_SKIP_LANEXT
|PF_DT_SKIP_EXTGWY
);
1603 pf_attach_state(cur
, s
, kif
== pfi_all
? 1 : 0);
1606 /* if cur != NULL, we already found a state key and attached to it */
1607 if (cur
== NULL
&& (cur
= RB_INSERT(pf_state_tree_ext_gwy
,
1608 &pf_statetbl_ext_gwy
, s
->state_key
)) != NULL
) {
1609 /* must not happen. we must have found the sk above! */
1610 pf_stateins_err("tree_ext_gwy", s
, kif
);
1611 pf_detach_state(s
, PF_DT_SKIP_EXTGWY
);
1615 if (s
->id
== 0 && s
->creatorid
== 0) {
1616 s
->id
= htobe64(pf_status
.stateid
++);
1617 s
->creatorid
= pf_status
.hostid
;
1619 if (RB_INSERT(pf_state_tree_id
, &tree_id
, s
) != NULL
) {
1620 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
1621 printf("pf: state insert failed: "
1622 "id: %016llx creatorid: %08x",
1623 be64toh(s
->id
), ntohl(s
->creatorid
));
1624 if (s
->sync_flags
& PFSTATE_FROMSYNC
)
1625 printf(" (from sync)");
1628 pf_detach_state(s
, 0);
1631 TAILQ_INSERT_TAIL(&state_list
, s
, entry_list
);
1632 pf_status
.fcounters
[FCNT_STATE_INSERT
]++;
1634 VERIFY(pf_status
.states
!= 0);
1635 pfi_kif_ref(kif
, PFI_KIF_REF_STATE
);
1637 pfsync_insert_state(s
);
1643 pf_purge_thread_fn(void *v
, wait_result_t w
)
1645 #pragma unused(v, w)
1646 u_int32_t nloops
= 0;
1650 (void) tsleep(pf_purge_thread_fn
, PWAIT
, "pftm", t
* hz
);
1652 lck_rw_lock_shared(pf_perim_lock
);
1653 lck_mtx_lock(pf_lock
);
1655 /* purge everything if not running */
1656 if (!pf_status
.running
) {
1657 pf_purge_expired_states(pf_status
.states
);
1658 pf_purge_expired_fragments();
1659 pf_purge_expired_src_nodes();
1661 /* terminate thread (we don't currently do this) */
1662 if (pf_purge_thread
== NULL
) {
1663 lck_mtx_unlock(pf_lock
);
1664 lck_rw_done(pf_perim_lock
);
1666 thread_deallocate(current_thread());
1667 thread_terminate(current_thread());
1671 /* if there's nothing left, sleep w/o timeout */
1672 if (pf_status
.states
== 0 &&
1673 pf_normalize_isempty() &&
1674 RB_EMPTY(&tree_src_tracking
))
1677 lck_mtx_unlock(pf_lock
);
1678 lck_rw_done(pf_perim_lock
);
1681 } else if (t
== 0) {
1682 /* Set timeout to 1 second */
1686 /* process a fraction of the state table every second */
1687 pf_purge_expired_states(1 + (pf_status
.states
1688 / pf_default_rule
.timeout
[PFTM_INTERVAL
]));
1690 /* purge other expired types every PFTM_INTERVAL seconds */
1691 if (++nloops
>= pf_default_rule
.timeout
[PFTM_INTERVAL
]) {
1692 pf_purge_expired_fragments();
1693 pf_purge_expired_src_nodes();
1697 lck_mtx_unlock(pf_lock
);
1698 lck_rw_done(pf_perim_lock
);
1703 pf_state_expires(const struct pf_state
*state
)
1710 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1712 /* handle all PFTM_* > PFTM_MAX here */
1713 if (state
->timeout
== PFTM_PURGE
)
1714 return (pf_time_second());
1715 if (state
->timeout
== PFTM_UNTIL_PACKET
)
1717 VERIFY(state
->timeout
!= PFTM_UNLINKED
);
1718 VERIFY(state
->timeout
< PFTM_MAX
);
1719 t
= state
->rule
.ptr
->timeout
[state
->timeout
];
1721 t
= pf_default_rule
.timeout
[state
->timeout
];
1722 start
= state
->rule
.ptr
->timeout
[PFTM_ADAPTIVE_START
];
1724 end
= state
->rule
.ptr
->timeout
[PFTM_ADAPTIVE_END
];
1725 states
= state
->rule
.ptr
->states
;
1727 start
= pf_default_rule
.timeout
[PFTM_ADAPTIVE_START
];
1728 end
= pf_default_rule
.timeout
[PFTM_ADAPTIVE_END
];
1729 states
= pf_status
.states
;
1731 if (end
&& states
> start
&& start
< end
) {
1733 return (state
->expire
+ t
* (end
- states
) /
1736 return (pf_time_second());
1738 return (state
->expire
+ t
);
1742 pf_purge_expired_src_nodes(void)
1744 struct pf_src_node
*cur
, *next
;
1746 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1748 for (cur
= RB_MIN(pf_src_tree
, &tree_src_tracking
); cur
; cur
= next
) {
1749 next
= RB_NEXT(pf_src_tree
, &tree_src_tracking
, cur
);
1751 if (cur
->states
<= 0 && cur
->expire
<= pf_time_second()) {
1752 if (cur
->rule
.ptr
!= NULL
) {
1753 cur
->rule
.ptr
->src_nodes
--;
1754 if (cur
->rule
.ptr
->states
<= 0 &&
1755 cur
->rule
.ptr
->max_src_nodes
<= 0)
1756 pf_rm_rule(NULL
, cur
->rule
.ptr
);
1758 RB_REMOVE(pf_src_tree
, &tree_src_tracking
, cur
);
1759 pf_status
.scounters
[SCNT_SRC_NODE_REMOVALS
]++;
1760 pf_status
.src_nodes
--;
1761 pool_put(&pf_src_tree_pl
, cur
);
1767 pf_src_tree_remove_state(struct pf_state
*s
)
1771 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1773 if (s
->src_node
!= NULL
) {
1774 if (s
->src
.tcp_est
) {
1775 VERIFY(s
->src_node
->conn
> 0);
1776 --s
->src_node
->conn
;
1778 VERIFY(s
->src_node
->states
> 0);
1779 if (--s
->src_node
->states
<= 0) {
1780 t
= s
->rule
.ptr
->timeout
[PFTM_SRC_NODE
];
1782 t
= pf_default_rule
.timeout
[PFTM_SRC_NODE
];
1783 s
->src_node
->expire
= pf_time_second() + t
;
1786 if (s
->nat_src_node
!= s
->src_node
&& s
->nat_src_node
!= NULL
) {
1787 VERIFY(s
->nat_src_node
->states
> 0);
1788 if (--s
->nat_src_node
->states
<= 0) {
1789 t
= s
->rule
.ptr
->timeout
[PFTM_SRC_NODE
];
1791 t
= pf_default_rule
.timeout
[PFTM_SRC_NODE
];
1792 s
->nat_src_node
->expire
= pf_time_second() + t
;
1795 s
->src_node
= s
->nat_src_node
= NULL
;
1799 pf_unlink_state(struct pf_state
*cur
)
1801 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1803 #ifndef NO_APPLE_EXTENSIONS
1804 if (cur
->src
.state
== PF_TCPS_PROXY_DST
) {
1805 pf_send_tcp(cur
->rule
.ptr
, cur
->state_key
->af
,
1806 &cur
->state_key
->ext
.addr
, &cur
->state_key
->lan
.addr
,
1807 cur
->state_key
->ext
.xport
.port
,
1808 cur
->state_key
->lan
.xport
.port
,
1809 cur
->src
.seqhi
, cur
->src
.seqlo
+ 1,
1810 TH_RST
|TH_ACK
, 0, 0, 0, 1, cur
->tag
, NULL
, NULL
);
1813 hook_runloop(&cur
->unlink_hooks
, HOOK_REMOVE
|HOOK_FREE
);
1815 if (cur
->src
.state
== PF_TCPS_PROXY_DST
) {
1816 pf_send_tcp(cur
->rule
.ptr
, cur
->state_key
->af
,
1817 &cur
->state_key
->ext
.addr
, &cur
->state_key
->lan
.addr
,
1818 cur
->state_key
->ext
.port
, cur
->state_key
->lan
.port
,
1819 cur
->src
.seqhi
, cur
->src
.seqlo
+ 1,
1820 TH_RST
|TH_ACK
, 0, 0, 0, 1, cur
->tag
, NULL
, NULL
);
1823 RB_REMOVE(pf_state_tree_id
, &tree_id
, cur
);
1825 if (cur
->creatorid
== pf_status
.hostid
)
1826 pfsync_delete_state(cur
);
1828 cur
->timeout
= PFTM_UNLINKED
;
1829 pf_src_tree_remove_state(cur
);
1830 pf_detach_state(cur
, 0);
1833 /* callers should be at splpf and hold the
1834 * write_lock on pf_consistency_lock */
1836 pf_free_state(struct pf_state
*cur
)
1838 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1840 if (pfsyncif
!= NULL
&&
1841 (pfsyncif
->sc_bulk_send_next
== cur
||
1842 pfsyncif
->sc_bulk_terminator
== cur
))
1845 VERIFY(cur
->timeout
== PFTM_UNLINKED
);
1846 VERIFY(cur
->rule
.ptr
->states
> 0);
1847 if (--cur
->rule
.ptr
->states
<= 0 &&
1848 cur
->rule
.ptr
->src_nodes
<= 0)
1849 pf_rm_rule(NULL
, cur
->rule
.ptr
);
1850 if (cur
->nat_rule
.ptr
!= NULL
) {
1851 VERIFY(cur
->nat_rule
.ptr
->states
> 0);
1852 if (--cur
->nat_rule
.ptr
->states
<= 0 &&
1853 cur
->nat_rule
.ptr
->src_nodes
<= 0)
1854 pf_rm_rule(NULL
, cur
->nat_rule
.ptr
);
1856 if (cur
->anchor
.ptr
!= NULL
) {
1857 VERIFY(cur
->anchor
.ptr
->states
> 0);
1858 if (--cur
->anchor
.ptr
->states
<= 0)
1859 pf_rm_rule(NULL
, cur
->anchor
.ptr
);
1861 pf_normalize_tcp_cleanup(cur
);
1862 pfi_kif_unref(cur
->kif
, PFI_KIF_REF_STATE
);
1863 TAILQ_REMOVE(&state_list
, cur
, entry_list
);
1865 pf_tag_unref(cur
->tag
);
1866 pool_put(&pf_state_pl
, cur
);
1867 pf_status
.fcounters
[FCNT_STATE_REMOVALS
]++;
1868 VERIFY(pf_status
.states
> 0);
1873 pf_purge_expired_states(u_int32_t maxcheck
)
1875 static struct pf_state
*cur
= NULL
;
1876 struct pf_state
*next
;
1878 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1880 while (maxcheck
--) {
1881 /* wrap to start of list when we hit the end */
1883 cur
= TAILQ_FIRST(&state_list
);
1885 break; /* list empty */
1888 /* get next state, as cur may get deleted */
1889 next
= TAILQ_NEXT(cur
, entry_list
);
1891 if (cur
->timeout
== PFTM_UNLINKED
) {
1893 } else if (pf_state_expires(cur
) <= pf_time_second()) {
1894 /* unlink and free expired state */
1895 pf_unlink_state(cur
);
1903 pf_tbladdr_setup(struct pf_ruleset
*rs
, struct pf_addr_wrap
*aw
)
1905 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1907 if (aw
->type
!= PF_ADDR_TABLE
)
1909 if ((aw
->p
.tbl
= pfr_attach_table(rs
, aw
->v
.tblname
)) == NULL
)
1915 pf_tbladdr_remove(struct pf_addr_wrap
*aw
)
1917 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1919 if (aw
->type
!= PF_ADDR_TABLE
|| aw
->p
.tbl
== NULL
)
1921 pfr_detach_table(aw
->p
.tbl
);
1926 pf_tbladdr_copyout(struct pf_addr_wrap
*aw
)
1928 struct pfr_ktable
*kt
= aw
->p
.tbl
;
1930 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
1932 if (aw
->type
!= PF_ADDR_TABLE
|| kt
== NULL
)
1934 if (!(kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) && kt
->pfrkt_root
!= NULL
)
1935 kt
= kt
->pfrkt_root
;
1937 aw
->p
.tblcnt
= (kt
->pfrkt_flags
& PFR_TFLAG_ACTIVE
) ?
1941 #ifndef NO_APPLE_EXTENSIONS
1943 pf_print_addr(struct pf_addr
*addr
, sa_family_t af
)
1948 u_int32_t a
= ntohl(addr
->addr32
[0]);
1949 printf("%u.%u.%u.%u", (a
>>24)&255, (a
>>16)&255,
1957 u_int8_t i
, curstart
= 255, curend
= 0,
1958 maxstart
= 0, maxend
= 0;
1959 for (i
= 0; i
< 8; i
++) {
1960 if (!addr
->addr16
[i
]) {
1961 if (curstart
== 255)
1967 if ((curend
- curstart
) >
1968 (maxend
- maxstart
)) {
1969 maxstart
= curstart
;
1976 for (i
= 0; i
< 8; i
++) {
1977 if (i
>= maxstart
&& i
<= maxend
) {
1986 b
= ntohs(addr
->addr16
[i
]);
1999 pf_print_sk_host(struct pf_state_host
*sh
, sa_family_t af
, int proto
,
2000 u_int8_t proto_variant
)
2002 pf_print_addr(&sh
->addr
, af
);
2007 printf("[%08x]", ntohl(sh
->xport
.spi
));
2011 if (proto_variant
== PF_GRE_PPTP_VARIANT
)
2012 printf("[%u]", ntohs(sh
->xport
.call_id
));
2017 printf("[%u]", ntohs(sh
->xport
.port
));
2027 pf_print_host(struct pf_addr
*addr
, u_int16_t p
, sa_family_t af
)
2029 #ifndef NO_APPLE_EXTENSIONS
2030 pf_print_addr(addr
, af
);
2032 printf("[%u]", ntohs(p
));
2037 u_int32_t a
= ntohl(addr
->addr32
[0]);
2038 printf("%u.%u.%u.%u", (a
>>24)&255, (a
>>16)&255,
2050 u_int8_t i
, curstart
= 255, curend
= 0,
2051 maxstart
= 0, maxend
= 0;
2052 for (i
= 0; i
< 8; i
++) {
2053 if (!addr
->addr16
[i
]) {
2054 if (curstart
== 255)
2060 if ((curend
- curstart
) >
2061 (maxend
- maxstart
)) {
2062 maxstart
= curstart
;
2069 for (i
= 0; i
< 8; i
++) {
2070 if (i
>= maxstart
&& i
<= maxend
) {
2079 b
= ntohs(addr
->addr16
[i
]);
2097 pf_print_state(struct pf_state
*s
)
2099 struct pf_state_key
*sk
= s
->state_key
;
2100 switch (sk
->proto
) {
2101 #ifndef NO_APPLE_EXTENSIONS
2106 printf("GRE%u ", sk
->proto_variant
);
2118 case IPPROTO_ICMPV6
:
2122 printf("%u ", sk
->proto
);
2125 #ifndef NO_APPLE_EXTENSIONS
2126 pf_print_sk_host(&sk
->lan
, sk
->af
, sk
->proto
, sk
->proto_variant
);
2128 pf_print_sk_host(&sk
->gwy
, sk
->af
, sk
->proto
, sk
->proto_variant
);
2130 pf_print_sk_host(&sk
->ext
, sk
->af
, sk
->proto
, sk
->proto_variant
);
2132 pf_print_host(&sk
->lan
.addr
, sk
->lan
.port
, sk
->af
);
2134 pf_print_host(&sk
->gwy
.addr
, sk
->gwy
.port
, sk
->af
);
2136 pf_print_host(&sk
->ext
.addr
, sk
->ext
.port
, sk
->af
);
2138 printf(" [lo=%u high=%u win=%u modulator=%u", s
->src
.seqlo
,
2139 s
->src
.seqhi
, s
->src
.max_win
, s
->src
.seqdiff
);
2140 if (s
->src
.wscale
&& s
->dst
.wscale
)
2141 printf(" wscale=%u", s
->src
.wscale
& PF_WSCALE_MASK
);
2143 printf(" [lo=%u high=%u win=%u modulator=%u", s
->dst
.seqlo
,
2144 s
->dst
.seqhi
, s
->dst
.max_win
, s
->dst
.seqdiff
);
2145 if (s
->src
.wscale
&& s
->dst
.wscale
)
2146 printf(" wscale=%u", s
->dst
.wscale
& PF_WSCALE_MASK
);
2148 printf(" %u:%u", s
->src
.state
, s
->dst
.state
);
2152 pf_print_flags(u_int8_t f
)
2174 #define PF_SET_SKIP_STEPS(i) \
2176 while (head[i] != cur) { \
2177 head[i]->skip[i].ptr = cur; \
2178 head[i] = TAILQ_NEXT(head[i], entries); \
2183 pf_calc_skip_steps(struct pf_rulequeue
*rules
)
2185 struct pf_rule
*cur
, *prev
, *head
[PF_SKIP_COUNT
];
2188 cur
= TAILQ_FIRST(rules
);
2190 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
)
2192 while (cur
!= NULL
) {
2194 if (cur
->kif
!= prev
->kif
|| cur
->ifnot
!= prev
->ifnot
)
2195 PF_SET_SKIP_STEPS(PF_SKIP_IFP
);
2196 if (cur
->direction
!= prev
->direction
)
2197 PF_SET_SKIP_STEPS(PF_SKIP_DIR
);
2198 if (cur
->af
!= prev
->af
)
2199 PF_SET_SKIP_STEPS(PF_SKIP_AF
);
2200 if (cur
->proto
!= prev
->proto
)
2201 PF_SET_SKIP_STEPS(PF_SKIP_PROTO
);
2202 if (cur
->src
.neg
!= prev
->src
.neg
||
2203 pf_addr_wrap_neq(&cur
->src
.addr
, &prev
->src
.addr
))
2204 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR
);
2205 #ifndef NO_APPLE_EXTENSIONS
2207 union pf_rule_xport
*cx
= &cur
->src
.xport
;
2208 union pf_rule_xport
*px
= &prev
->src
.xport
;
2210 switch (cur
->proto
) {
2213 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT
);
2216 if (prev
->proto
== IPPROTO_GRE
||
2217 prev
->proto
== IPPROTO_ESP
||
2218 cx
->range
.op
!= px
->range
.op
||
2219 cx
->range
.port
[0] != px
->range
.port
[0] ||
2220 cx
->range
.port
[1] != px
->range
.port
[1])
2221 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT
);
2226 if (cur
->src
.port
[0] != prev
->src
.port
[0] ||
2227 cur
->src
.port
[1] != prev
->src
.port
[1] ||
2228 cur
->src
.port_op
!= prev
->src
.port_op
)
2229 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT
);
2231 if (cur
->dst
.neg
!= prev
->dst
.neg
||
2232 pf_addr_wrap_neq(&cur
->dst
.addr
, &prev
->dst
.addr
))
2233 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR
);
2234 #ifndef NO_APPLE_EXTENSIONS
2236 union pf_rule_xport
*cx
= &cur
->dst
.xport
;
2237 union pf_rule_xport
*px
= &prev
->dst
.xport
;
2239 switch (cur
->proto
) {
2241 if (cur
->proto
!= prev
->proto
||
2242 cx
->call_id
!= px
->call_id
)
2243 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2246 if (cur
->proto
!= prev
->proto
||
2248 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2251 if (prev
->proto
== IPPROTO_GRE
||
2252 prev
->proto
== IPPROTO_ESP
||
2253 cx
->range
.op
!= px
->range
.op
||
2254 cx
->range
.port
[0] != px
->range
.port
[0] ||
2255 cx
->range
.port
[1] != px
->range
.port
[1])
2256 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2261 if (cur
->dst
.port
[0] != prev
->dst
.port
[0] ||
2262 cur
->dst
.port
[1] != prev
->dst
.port
[1] ||
2263 cur
->dst
.port_op
!= prev
->dst
.port_op
)
2264 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT
);
2268 cur
= TAILQ_NEXT(cur
, entries
);
2270 for (i
= 0; i
< PF_SKIP_COUNT
; ++i
)
2271 PF_SET_SKIP_STEPS(i
);
2275 pf_addr_wrap_neq(struct pf_addr_wrap
*aw1
, struct pf_addr_wrap
*aw2
)
2277 if (aw1
->type
!= aw2
->type
)
2279 switch (aw1
->type
) {
2280 case PF_ADDR_ADDRMASK
:
2282 if (PF_ANEQ(&aw1
->v
.a
.addr
, &aw2
->v
.a
.addr
, 0))
2284 if (PF_ANEQ(&aw1
->v
.a
.mask
, &aw2
->v
.a
.mask
, 0))
2287 case PF_ADDR_DYNIFTL
:
2288 return (aw1
->p
.dyn
->pfid_kt
!= aw2
->p
.dyn
->pfid_kt
);
2289 case PF_ADDR_NOROUTE
:
2290 case PF_ADDR_URPFFAILED
:
2293 return (aw1
->p
.tbl
!= aw2
->p
.tbl
);
2294 case PF_ADDR_RTLABEL
:
2295 return (aw1
->v
.rtlabel
!= aw2
->v
.rtlabel
);
2297 printf("invalid address type: %d\n", aw1
->type
);
2303 pf_cksum_fixup(u_int16_t cksum
, u_int16_t old
, u_int16_t
new, u_int8_t udp
)
2309 l
= cksum
+ old
- new;
2310 l
= (l
>> 16) + (l
& 0xffff);
2318 pf_change_ap(int dir
, struct mbuf
*m
, struct pf_addr
*a
, u_int16_t
*p
,
2319 u_int16_t
*ic
, u_int16_t
*pc
, struct pf_addr
*an
, u_int16_t pn
,
2320 u_int8_t u
, sa_family_t af
)
2325 PF_ACPY(&ao
, a
, af
);
2333 *ic
= pf_cksum_fixup(pf_cksum_fixup(*ic
,
2334 ao
.addr16
[0], an
->addr16
[0], 0),
2335 ao
.addr16
[1], an
->addr16
[1], 0);
2338 * If the packet is originated from an ALG on the NAT gateway
2339 * (source address is loopback or local), in which case the
2340 * TCP/UDP checksum field contains the pseudo header checksum
2341 * that's not yet complemented.
2343 if (dir
== PF_OUT
&& m
!= NULL
&&
2344 (m
->m_flags
& M_PKTHDR
) &&
2345 (m
->m_pkthdr
.csum_flags
& (CSUM_TCP
| CSUM_UDP
))) {
2346 /* Pseudo-header checksum does not include ports */
2347 *pc
= ~pf_cksum_fixup(pf_cksum_fixup(~*pc
,
2348 ao
.addr16
[0], an
->addr16
[0], u
),
2349 ao
.addr16
[1], an
->addr16
[1], u
);
2351 *pc
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc
,
2352 ao
.addr16
[0], an
->addr16
[0], u
),
2353 ao
.addr16
[1], an
->addr16
[1], u
),
2361 * If the packet is originated from an ALG on the NAT gateway
2362 * (source address is loopback or local), in which case the
2363 * TCP/UDP checksum field contains the pseudo header checksum
2364 * that's not yet complemented.
2366 if (dir
== PF_OUT
&& m
!= NULL
&&
2367 (m
->m_flags
& M_PKTHDR
) &&
2368 (m
->m_pkthdr
.csum_flags
& (CSUM_TCPIPV6
| CSUM_UDPIPV6
))) {
2369 /* Pseudo-header checksum does not include ports */
2370 *pc
= ~pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2371 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2372 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(~*pc
,
2373 ao
.addr16
[0], an
->addr16
[0], u
),
2374 ao
.addr16
[1], an
->addr16
[1], u
),
2375 ao
.addr16
[2], an
->addr16
[2], u
),
2376 ao
.addr16
[3], an
->addr16
[3], u
),
2377 ao
.addr16
[4], an
->addr16
[4], u
),
2378 ao
.addr16
[5], an
->addr16
[5], u
),
2379 ao
.addr16
[6], an
->addr16
[6], u
),
2380 ao
.addr16
[7], an
->addr16
[7], u
),
2383 *pc
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2384 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2385 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc
,
2386 ao
.addr16
[0], an
->addr16
[0], u
),
2387 ao
.addr16
[1], an
->addr16
[1], u
),
2388 ao
.addr16
[2], an
->addr16
[2], u
),
2389 ao
.addr16
[3], an
->addr16
[3], u
),
2390 ao
.addr16
[4], an
->addr16
[4], u
),
2391 ao
.addr16
[5], an
->addr16
[5], u
),
2392 ao
.addr16
[6], an
->addr16
[6], u
),
2393 ao
.addr16
[7], an
->addr16
[7], u
),
2402 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2404 pf_change_a(void *a
, u_int16_t
*c
, u_int32_t an
, u_int8_t u
)
2408 memcpy(&ao
, a
, sizeof (ao
));
2409 memcpy(a
, &an
, sizeof (u_int32_t
));
2410 *c
= pf_cksum_fixup(pf_cksum_fixup(*c
, ao
/ 65536, an
/ 65536, u
),
2411 ao
% 65536, an
% 65536, u
);
2416 pf_change_a6(struct pf_addr
*a
, u_int16_t
*c
, struct pf_addr
*an
, u_int8_t u
)
2420 PF_ACPY(&ao
, a
, AF_INET6
);
2421 PF_ACPY(a
, an
, AF_INET6
);
2423 *c
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2424 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2425 pf_cksum_fixup(pf_cksum_fixup(*c
,
2426 ao
.addr16
[0], an
->addr16
[0], u
),
2427 ao
.addr16
[1], an
->addr16
[1], u
),
2428 ao
.addr16
[2], an
->addr16
[2], u
),
2429 ao
.addr16
[3], an
->addr16
[3], u
),
2430 ao
.addr16
[4], an
->addr16
[4], u
),
2431 ao
.addr16
[5], an
->addr16
[5], u
),
2432 ao
.addr16
[6], an
->addr16
[6], u
),
2433 ao
.addr16
[7], an
->addr16
[7], u
);
2438 pf_change_icmp(struct pf_addr
*ia
, u_int16_t
*ip
, struct pf_addr
*oa
,
2439 struct pf_addr
*na
, u_int16_t np
, u_int16_t
*pc
, u_int16_t
*h2c
,
2440 u_int16_t
*ic
, u_int16_t
*hc
, u_int8_t u
, sa_family_t af
)
2442 struct pf_addr oia
, ooa
;
2444 PF_ACPY(&oia
, ia
, af
);
2445 PF_ACPY(&ooa
, oa
, af
);
2447 /* Change inner protocol port, fix inner protocol checksum. */
2449 u_int16_t oip
= *ip
;
2456 *pc
= pf_cksum_fixup(*pc
, oip
, *ip
, u
);
2457 *ic
= pf_cksum_fixup(*ic
, oip
, *ip
, 0);
2459 *ic
= pf_cksum_fixup(*ic
, opc
, *pc
, 0);
2461 /* Change inner ip address, fix inner ip and icmp checksums. */
2462 PF_ACPY(ia
, na
, af
);
2466 u_int32_t oh2c
= *h2c
;
2468 *h2c
= pf_cksum_fixup(pf_cksum_fixup(*h2c
,
2469 oia
.addr16
[0], ia
->addr16
[0], 0),
2470 oia
.addr16
[1], ia
->addr16
[1], 0);
2471 *ic
= pf_cksum_fixup(pf_cksum_fixup(*ic
,
2472 oia
.addr16
[0], ia
->addr16
[0], 0),
2473 oia
.addr16
[1], ia
->addr16
[1], 0);
2474 *ic
= pf_cksum_fixup(*ic
, oh2c
, *h2c
, 0);
2480 *ic
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2481 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2482 pf_cksum_fixup(pf_cksum_fixup(*ic
,
2483 oia
.addr16
[0], ia
->addr16
[0], u
),
2484 oia
.addr16
[1], ia
->addr16
[1], u
),
2485 oia
.addr16
[2], ia
->addr16
[2], u
),
2486 oia
.addr16
[3], ia
->addr16
[3], u
),
2487 oia
.addr16
[4], ia
->addr16
[4], u
),
2488 oia
.addr16
[5], ia
->addr16
[5], u
),
2489 oia
.addr16
[6], ia
->addr16
[6], u
),
2490 oia
.addr16
[7], ia
->addr16
[7], u
);
2494 /* Change outer ip address, fix outer ip or icmpv6 checksum. */
2495 PF_ACPY(oa
, na
, af
);
2499 *hc
= pf_cksum_fixup(pf_cksum_fixup(*hc
,
2500 ooa
.addr16
[0], oa
->addr16
[0], 0),
2501 ooa
.addr16
[1], oa
->addr16
[1], 0);
2506 *ic
= pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2507 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2508 pf_cksum_fixup(pf_cksum_fixup(*ic
,
2509 ooa
.addr16
[0], oa
->addr16
[0], u
),
2510 ooa
.addr16
[1], oa
->addr16
[1], u
),
2511 ooa
.addr16
[2], oa
->addr16
[2], u
),
2512 ooa
.addr16
[3], oa
->addr16
[3], u
),
2513 ooa
.addr16
[4], oa
->addr16
[4], u
),
2514 ooa
.addr16
[5], oa
->addr16
[5], u
),
2515 ooa
.addr16
[6], oa
->addr16
[6], u
),
2516 ooa
.addr16
[7], oa
->addr16
[7], u
);
2524 * Need to modulate the sequence numbers in the TCP SACK option
2525 * (credits to Krzysztof Pfaff for report and patch)
2528 pf_modulate_sack(struct mbuf
*m
, int off
, struct pf_pdesc
*pd
,
2529 struct tcphdr
*th
, struct pf_state_peer
*dst
)
2531 int hlen
= (th
->th_off
<< 2) - sizeof (*th
), thoptlen
= hlen
;
2532 u_int8_t opts
[MAX_TCPOPTLEN
], *opt
= opts
;
2533 int copyback
= 0, i
, olen
;
2534 struct sackblk sack
;
2536 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2537 if (hlen
< TCPOLEN_SACKLEN
||
2538 !pf_pull_hdr(m
, off
+ sizeof (*th
), opts
, hlen
, NULL
, NULL
, pd
->af
))
2541 while (hlen
>= TCPOLEN_SACKLEN
) {
2544 case TCPOPT_EOL
: /* FALLTHROUGH */
2552 if (olen
>= TCPOLEN_SACKLEN
) {
2553 for (i
= 2; i
+ TCPOLEN_SACK
<= olen
;
2554 i
+= TCPOLEN_SACK
) {
2555 memcpy(&sack
, &opt
[i
], sizeof (sack
));
2556 pf_change_a(&sack
.start
, &th
->th_sum
,
2557 htonl(ntohl(sack
.start
) -
2559 pf_change_a(&sack
.end
, &th
->th_sum
,
2560 htonl(ntohl(sack
.end
) -
2562 memcpy(&opt
[i
], &sack
, sizeof (sack
));
2564 #ifndef NO_APPLE_EXTENSIONS
2565 copyback
= off
+ sizeof (*th
) + thoptlen
;
2579 #ifndef NO_APPLE_EXTENSIONS
2581 m
= pf_lazy_makewritable(pd
, m
, copyback
);
2584 m_copyback(m
, off
+ sizeof (*th
), thoptlen
, opts
);
2588 m_copyback(m
, off
+ sizeof (*th
), thoptlen
, opts
);
2594 pf_send_tcp(const struct pf_rule
*r
, sa_family_t af
,
2595 const struct pf_addr
*saddr
, const struct pf_addr
*daddr
,
2596 u_int16_t sport
, u_int16_t dport
, u_int32_t seq
, u_int32_t ack
,
2597 u_int8_t flags
, u_int16_t win
, u_int16_t mss
, u_int8_t ttl
, int tag
,
2598 u_int16_t rtag
, struct ether_header
*eh
, struct ifnet
*ifp
)
2600 #pragma unused(eh, ifp)
2604 struct ip
*h
= NULL
;
2607 struct ip6_hdr
*h6
= NULL
;
2609 struct tcphdr
*th
= NULL
;
2611 struct pf_mtag
*pf_mtag
;
2613 /* maximum segment size tcp option */
2614 tlen
= sizeof (struct tcphdr
);
2621 len
= sizeof (struct ip
) + tlen
;
2626 len
= sizeof (struct ip6_hdr
) + tlen
;
2630 panic("pf_send_tcp: not AF_INET or AF_INET6!");
2634 /* create outgoing mbuf */
2635 m
= m_gethdr(M_DONTWAIT
, MT_HEADER
);
2639 if ((pf_mtag
= pf_get_mtag(m
)) == NULL
) {
2645 pf_mtag
->flags
|= PF_TAG_GENERATED
;
2646 pf_mtag
->tag
= rtag
;
2648 if (r
!= NULL
&& PF_RTABLEID_IS_VALID(r
->rtableid
))
2649 pf_mtag
->rtableid
= r
->rtableid
;
2652 if (r
!= NULL
&& r
->qid
) {
2653 pf_mtag
->qid
= r
->qid
;
2654 /* add hints for ecn */
2655 pf_mtag
->hdr
= mtod(m
, struct ip
*);
2658 m
->m_data
+= max_linkhdr
;
2659 m
->m_pkthdr
.len
= m
->m_len
= len
;
2660 m
->m_pkthdr
.rcvif
= NULL
;
2661 bzero(m
->m_data
, len
);
2665 h
= mtod(m
, struct ip
*);
2667 /* IP header fields included in the TCP checksum */
2668 h
->ip_p
= IPPROTO_TCP
;
2669 h
->ip_len
= htons(tlen
);
2670 h
->ip_src
.s_addr
= saddr
->v4
.s_addr
;
2671 h
->ip_dst
.s_addr
= daddr
->v4
.s_addr
;
2673 th
= (struct tcphdr
*)((caddr_t
)h
+ sizeof (struct ip
));
2678 h6
= mtod(m
, struct ip6_hdr
*);
2680 /* IP header fields included in the TCP checksum */
2681 h6
->ip6_nxt
= IPPROTO_TCP
;
2682 h6
->ip6_plen
= htons(tlen
);
2683 memcpy(&h6
->ip6_src
, &saddr
->v6
, sizeof (struct in6_addr
));
2684 memcpy(&h6
->ip6_dst
, &daddr
->v6
, sizeof (struct in6_addr
));
2686 th
= (struct tcphdr
*)((caddr_t
)h6
+ sizeof (struct ip6_hdr
));
2692 th
->th_sport
= sport
;
2693 th
->th_dport
= dport
;
2694 th
->th_seq
= htonl(seq
);
2695 th
->th_ack
= htonl(ack
);
2696 th
->th_off
= tlen
>> 2;
2697 th
->th_flags
= flags
;
2698 th
->th_win
= htons(win
);
2701 opt
= (char *)(th
+ 1);
2702 opt
[0] = TCPOPT_MAXSEG
;
2704 #if BYTE_ORDER != BIG_ENDIAN
2707 bcopy((caddr_t
)&mss
, (caddr_t
)(opt
+ 2), 2);
2716 th
->th_sum
= in_cksum(m
, len
);
2718 /* Finish the IP header */
2720 h
->ip_hl
= sizeof (*h
) >> 2;
2721 h
->ip_tos
= IPTOS_LOWDELAY
;
2723 * ip_output() expects ip_len and ip_off to be in host order.
2726 h
->ip_off
= (path_mtu_discovery
? IP_DF
: 0);
2727 h
->ip_ttl
= ttl
? ttl
: ip_defttl
;
2730 bzero(&ro
, sizeof (ro
));
2731 ip_output(m
, NULL
, &ro
, 0, NULL
, NULL
);
2732 if (ro
.ro_rt
!= NULL
)
2739 struct route_in6 ro6
;
2742 th
->th_sum
= in6_cksum(m
, IPPROTO_TCP
,
2743 sizeof (struct ip6_hdr
), tlen
);
2745 h6
->ip6_vfc
|= IPV6_VERSION
;
2746 h6
->ip6_hlim
= IPV6_DEFHLIM
;
2748 bzero(&ro6
, sizeof (ro6
));
2749 ip6_output(m
, NULL
, &ro6
, 0, NULL
, NULL
, NULL
);
2750 if (ro6
.ro_rt
!= NULL
)
2759 pf_send_icmp(struct mbuf
*m
, u_int8_t type
, u_int8_t code
, sa_family_t af
,
2763 struct pf_mtag
*pf_mtag
;
2765 m0
= m_copy(m
, 0, M_COPYALL
);
2769 if ((pf_mtag
= pf_get_mtag(m0
)) == NULL
)
2772 pf_mtag
->flags
|= PF_TAG_GENERATED
;
2774 if (PF_RTABLEID_IS_VALID(r
->rtableid
))
2775 pf_mtag
->rtableid
= r
->rtableid
;
2779 pf_mtag
->qid
= r
->qid
;
2780 /* add hints for ecn */
2781 pf_mtag
->hdr
= mtod(m0
, struct ip
*);
2787 icmp_error(m0
, type
, code
, 0, 0);
2792 icmp6_error(m0
, type
, code
, 0);
2799 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2800 * If n is 0, they match if they are equal. If n is != 0, they match if they
2804 pf_match_addr(u_int8_t n
, struct pf_addr
*a
, struct pf_addr
*m
,
2805 struct pf_addr
*b
, sa_family_t af
)
2812 if ((a
->addr32
[0] & m
->addr32
[0]) ==
2813 (b
->addr32
[0] & m
->addr32
[0]))
2819 if (((a
->addr32
[0] & m
->addr32
[0]) ==
2820 (b
->addr32
[0] & m
->addr32
[0])) &&
2821 ((a
->addr32
[1] & m
->addr32
[1]) ==
2822 (b
->addr32
[1] & m
->addr32
[1])) &&
2823 ((a
->addr32
[2] & m
->addr32
[2]) ==
2824 (b
->addr32
[2] & m
->addr32
[2])) &&
2825 ((a
->addr32
[3] & m
->addr32
[3]) ==
2826 (b
->addr32
[3] & m
->addr32
[3])))
2845 * Return 1 if b <= a <= e, otherwise return 0.
2848 pf_match_addr_range(struct pf_addr
*b
, struct pf_addr
*e
,
2849 struct pf_addr
*a
, sa_family_t af
)
2854 if ((a
->addr32
[0] < b
->addr32
[0]) ||
2855 (a
->addr32
[0] > e
->addr32
[0]))
2864 for (i
= 0; i
< 4; ++i
)
2865 if (a
->addr32
[i
] > b
->addr32
[i
])
2867 else if (a
->addr32
[i
] < b
->addr32
[i
])
2870 for (i
= 0; i
< 4; ++i
)
2871 if (a
->addr32
[i
] < e
->addr32
[i
])
2873 else if (a
->addr32
[i
] > e
->addr32
[i
])
2883 pf_match(u_int8_t op
, u_int32_t a1
, u_int32_t a2
, u_int32_t p
)
2887 return ((p
> a1
) && (p
< a2
));
2889 return ((p
< a1
) || (p
> a2
));
2891 return ((p
>= a1
) && (p
<= a2
));
2905 return (0); /* never reached */
2909 pf_match_port(u_int8_t op
, u_int16_t a1
, u_int16_t a2
, u_int16_t p
)
2911 #if BYTE_ORDER != BIG_ENDIAN
2916 return (pf_match(op
, a1
, a2
, p
));
2919 #ifndef NO_APPLE_EXTENSIONS
2921 pf_match_xport(u_int8_t proto
, u_int8_t proto_variant
, union pf_rule_xport
*rx
,
2922 union pf_state_xport
*sx
)
2929 if (proto_variant
== PF_GRE_PPTP_VARIANT
)
2930 d
= (rx
->call_id
== sx
->call_id
);
2934 d
= (rx
->spi
== sx
->spi
);
2940 case IPPROTO_ICMPV6
:
2942 d
= pf_match_port(rx
->range
.op
,
2943 rx
->range
.port
[0], rx
->range
.port
[1],
2957 pf_match_uid(u_int8_t op
, uid_t a1
, uid_t a2
, uid_t u
)
2959 if (u
== UID_MAX
&& op
!= PF_OP_EQ
&& op
!= PF_OP_NE
)
2961 return (pf_match(op
, a1
, a2
, u
));
2965 pf_match_gid(u_int8_t op
, gid_t a1
, gid_t a2
, gid_t g
)
2967 if (g
== GID_MAX
&& op
!= PF_OP_EQ
&& op
!= PF_OP_NE
)
2969 return (pf_match(op
, a1
, a2
, g
));
2973 pf_match_tag(struct mbuf
*m
, struct pf_rule
*r
, struct pf_mtag
*pf_mtag
,
2978 *tag
= pf_mtag
->tag
;
2980 return ((!r
->match_tag_not
&& r
->match_tag
== *tag
) ||
2981 (r
->match_tag_not
&& r
->match_tag
!= *tag
));
2985 pf_tag_packet(struct mbuf
*m
, struct pf_mtag
*pf_mtag
, int tag
,
2986 unsigned int rtableid
)
2988 if (tag
<= 0 && !PF_RTABLEID_IS_VALID(rtableid
))
2991 if (pf_mtag
== NULL
&& (pf_mtag
= pf_get_mtag(m
)) == NULL
)
2996 if (PF_RTABLEID_IS_VALID(rtableid
))
2997 pf_mtag
->rtableid
= rtableid
;
3003 pf_step_into_anchor(int *depth
, struct pf_ruleset
**rs
, int n
,
3004 struct pf_rule
**r
, struct pf_rule
**a
, int *match
)
3006 struct pf_anchor_stackframe
*f
;
3008 (*r
)->anchor
->match
= 0;
3011 if (*depth
>= (int)sizeof (pf_anchor_stack
) /
3012 (int)sizeof (pf_anchor_stack
[0])) {
3013 printf("pf_step_into_anchor: stack overflow\n");
3014 *r
= TAILQ_NEXT(*r
, entries
);
3016 } else if (*depth
== 0 && a
!= NULL
)
3018 f
= pf_anchor_stack
+ (*depth
)++;
3021 if ((*r
)->anchor_wildcard
) {
3022 f
->parent
= &(*r
)->anchor
->children
;
3023 if ((f
->child
= RB_MIN(pf_anchor_node
, f
->parent
)) ==
3028 *rs
= &f
->child
->ruleset
;
3032 *rs
= &(*r
)->anchor
->ruleset
;
3034 *r
= TAILQ_FIRST((*rs
)->rules
[n
].active
.ptr
);
3038 pf_step_out_of_anchor(int *depth
, struct pf_ruleset
**rs
, int n
,
3039 struct pf_rule
**r
, struct pf_rule
**a
, int *match
)
3041 struct pf_anchor_stackframe
*f
;
3047 f
= pf_anchor_stack
+ *depth
- 1;
3048 if (f
->parent
!= NULL
&& f
->child
!= NULL
) {
3049 if (f
->child
->match
||
3050 (match
!= NULL
&& *match
)) {
3051 f
->r
->anchor
->match
= 1;
3054 f
->child
= RB_NEXT(pf_anchor_node
, f
->parent
, f
->child
);
3055 if (f
->child
!= NULL
) {
3056 *rs
= &f
->child
->ruleset
;
3057 *r
= TAILQ_FIRST((*rs
)->rules
[n
].active
.ptr
);
3065 if (*depth
== 0 && a
!= NULL
)
3068 if (f
->r
->anchor
->match
|| (match
!= NULL
&& *match
))
3069 quick
= f
->r
->quick
;
3070 *r
= TAILQ_NEXT(f
->r
, entries
);
3071 } while (*r
== NULL
);
3078 pf_poolmask(struct pf_addr
*naddr
, struct pf_addr
*raddr
,
3079 struct pf_addr
*rmask
, struct pf_addr
*saddr
, sa_family_t af
)
3084 naddr
->addr32
[0] = (raddr
->addr32
[0] & rmask
->addr32
[0]) |
3085 ((rmask
->addr32
[0] ^ 0xffffffff) & saddr
->addr32
[0]);
3089 naddr
->addr32
[0] = (raddr
->addr32
[0] & rmask
->addr32
[0]) |
3090 ((rmask
->addr32
[0] ^ 0xffffffff) & saddr
->addr32
[0]);
3091 naddr
->addr32
[1] = (raddr
->addr32
[1] & rmask
->addr32
[1]) |
3092 ((rmask
->addr32
[1] ^ 0xffffffff) & saddr
->addr32
[1]);
3093 naddr
->addr32
[2] = (raddr
->addr32
[2] & rmask
->addr32
[2]) |
3094 ((rmask
->addr32
[2] ^ 0xffffffff) & saddr
->addr32
[2]);
3095 naddr
->addr32
[3] = (raddr
->addr32
[3] & rmask
->addr32
[3]) |
3096 ((rmask
->addr32
[3] ^ 0xffffffff) & saddr
->addr32
[3]);
3102 pf_addr_inc(struct pf_addr
*addr
, sa_family_t af
)
3107 addr
->addr32
[0] = htonl(ntohl(addr
->addr32
[0]) + 1);
3111 if (addr
->addr32
[3] == 0xffffffff) {
3112 addr
->addr32
[3] = 0;
3113 if (addr
->addr32
[2] == 0xffffffff) {
3114 addr
->addr32
[2] = 0;
3115 if (addr
->addr32
[1] == 0xffffffff) {
3116 addr
->addr32
[1] = 0;
3118 htonl(ntohl(addr
->addr32
[0]) + 1);
3121 htonl(ntohl(addr
->addr32
[1]) + 1);
3124 htonl(ntohl(addr
->addr32
[2]) + 1);
3127 htonl(ntohl(addr
->addr32
[3]) + 1);
3133 #define mix(a, b, c) \
3135 a -= b; a -= c; a ^= (c >> 13); \
3136 b -= c; b -= a; b ^= (a << 8); \
3137 c -= a; c -= b; c ^= (b >> 13); \
3138 a -= b; a -= c; a ^= (c >> 12); \
3139 b -= c; b -= a; b ^= (a << 16); \
3140 c -= a; c -= b; c ^= (b >> 5); \
3141 a -= b; a -= c; a ^= (c >> 3); \
3142 b -= c; b -= a; b ^= (a << 10); \
3143 c -= a; c -= b; c ^= (b >> 15); \
3147 * hash function based on bridge_hash in if_bridge.c
3150 pf_hash(struct pf_addr
*inaddr
, struct pf_addr
*hash
,
3151 struct pf_poolhashkey
*key
, sa_family_t af
)
3153 u_int32_t a
= 0x9e3779b9, b
= 0x9e3779b9, c
= key
->key32
[0];
3158 a
+= inaddr
->addr32
[0];
3161 hash
->addr32
[0] = c
+ key
->key32
[2];
3166 a
+= inaddr
->addr32
[0];
3167 b
+= inaddr
->addr32
[2];
3169 hash
->addr32
[0] = c
;
3170 a
+= inaddr
->addr32
[1];
3171 b
+= inaddr
->addr32
[3];
3174 hash
->addr32
[1] = c
;
3175 a
+= inaddr
->addr32
[2];
3176 b
+= inaddr
->addr32
[1];
3179 hash
->addr32
[2] = c
;
3180 a
+= inaddr
->addr32
[3];
3181 b
+= inaddr
->addr32
[0];
3184 hash
->addr32
[3] = c
;
3191 pf_map_addr(sa_family_t af
, struct pf_rule
*r
, struct pf_addr
*saddr
,
3192 struct pf_addr
*naddr
, struct pf_addr
*init_addr
, struct pf_src_node
**sn
)
3194 unsigned char hash
[16];
3195 struct pf_pool
*rpool
= &r
->rpool
;
3196 struct pf_addr
*raddr
= &rpool
->cur
->addr
.v
.a
.addr
;
3197 struct pf_addr
*rmask
= &rpool
->cur
->addr
.v
.a
.mask
;
3198 struct pf_pooladdr
*acur
= rpool
->cur
;
3199 struct pf_src_node k
;
3201 if (*sn
== NULL
&& r
->rpool
.opts
& PF_POOL_STICKYADDR
&&
3202 (r
->rpool
.opts
& PF_POOL_TYPEMASK
) != PF_POOL_NONE
) {
3204 PF_ACPY(&k
.addr
, saddr
, af
);
3205 if (r
->rule_flag
& PFRULE_RULESRCTRACK
||
3206 r
->rpool
.opts
& PF_POOL_STICKYADDR
)
3210 pf_status
.scounters
[SCNT_SRC_NODE_SEARCH
]++;
3211 *sn
= RB_FIND(pf_src_tree
, &tree_src_tracking
, &k
);
3212 if (*sn
!= NULL
&& !PF_AZERO(&(*sn
)->raddr
, af
)) {
3213 PF_ACPY(naddr
, &(*sn
)->raddr
, af
);
3214 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
3215 printf("pf_map_addr: src tracking maps ");
3216 pf_print_host(&k
.addr
, 0, af
);
3218 pf_print_host(naddr
, 0, af
);
3225 if (rpool
->cur
->addr
.type
== PF_ADDR_NOROUTE
)
3227 if (rpool
->cur
->addr
.type
== PF_ADDR_DYNIFTL
) {
3231 if (rpool
->cur
->addr
.p
.dyn
->pfid_acnt4
< 1 &&
3232 (rpool
->opts
& PF_POOL_TYPEMASK
) !=
3235 raddr
= &rpool
->cur
->addr
.p
.dyn
->pfid_addr4
;
3236 rmask
= &rpool
->cur
->addr
.p
.dyn
->pfid_mask4
;
3241 if (rpool
->cur
->addr
.p
.dyn
->pfid_acnt6
< 1 &&
3242 (rpool
->opts
& PF_POOL_TYPEMASK
) !=
3245 raddr
= &rpool
->cur
->addr
.p
.dyn
->pfid_addr6
;
3246 rmask
= &rpool
->cur
->addr
.p
.dyn
->pfid_mask6
;
3250 } else if (rpool
->cur
->addr
.type
== PF_ADDR_TABLE
) {
3251 if ((rpool
->opts
& PF_POOL_TYPEMASK
) != PF_POOL_ROUNDROBIN
)
3252 return (1); /* unsupported */
3254 raddr
= &rpool
->cur
->addr
.v
.a
.addr
;
3255 rmask
= &rpool
->cur
->addr
.v
.a
.mask
;
3258 switch (rpool
->opts
& PF_POOL_TYPEMASK
) {
3260 PF_ACPY(naddr
, raddr
, af
);
3262 case PF_POOL_BITMASK
:
3263 PF_POOLMASK(naddr
, raddr
, rmask
, saddr
, af
);
3265 case PF_POOL_RANDOM
:
3266 if (init_addr
!= NULL
&& PF_AZERO(init_addr
, af
)) {
3270 rpool
->counter
.addr32
[0] = htonl(random());
3275 if (rmask
->addr32
[3] != 0xffffffff)
3276 rpool
->counter
.addr32
[3] =
3280 if (rmask
->addr32
[2] != 0xffffffff)
3281 rpool
->counter
.addr32
[2] =
3285 if (rmask
->addr32
[1] != 0xffffffff)
3286 rpool
->counter
.addr32
[1] =
3290 if (rmask
->addr32
[0] != 0xffffffff)
3291 rpool
->counter
.addr32
[0] =
3296 PF_POOLMASK(naddr
, raddr
, rmask
, &rpool
->counter
, af
);
3297 PF_ACPY(init_addr
, naddr
, af
);
3300 PF_AINC(&rpool
->counter
, af
);
3301 PF_POOLMASK(naddr
, raddr
, rmask
, &rpool
->counter
, af
);
3304 case PF_POOL_SRCHASH
:
3305 pf_hash(saddr
, (struct pf_addr
*)&hash
, &rpool
->key
, af
);
3306 PF_POOLMASK(naddr
, raddr
, rmask
, (struct pf_addr
*)&hash
, af
);
3308 case PF_POOL_ROUNDROBIN
:
3309 if (rpool
->cur
->addr
.type
== PF_ADDR_TABLE
) {
3310 if (!pfr_pool_get(rpool
->cur
->addr
.p
.tbl
,
3311 &rpool
->tblidx
, &rpool
->counter
,
3312 &raddr
, &rmask
, af
))
3314 } else if (rpool
->cur
->addr
.type
== PF_ADDR_DYNIFTL
) {
3315 if (!pfr_pool_get(rpool
->cur
->addr
.p
.dyn
->pfid_kt
,
3316 &rpool
->tblidx
, &rpool
->counter
,
3317 &raddr
, &rmask
, af
))
3319 } else if (pf_match_addr(0, raddr
, rmask
, &rpool
->counter
, af
))
3323 if ((rpool
->cur
= TAILQ_NEXT(rpool
->cur
, entries
)) == NULL
)
3324 rpool
->cur
= TAILQ_FIRST(&rpool
->list
);
3325 if (rpool
->cur
->addr
.type
== PF_ADDR_TABLE
) {
3327 if (pfr_pool_get(rpool
->cur
->addr
.p
.tbl
,
3328 &rpool
->tblidx
, &rpool
->counter
,
3329 &raddr
, &rmask
, af
)) {
3330 /* table contains no address of type 'af' */
3331 if (rpool
->cur
!= acur
)
3335 } else if (rpool
->cur
->addr
.type
== PF_ADDR_DYNIFTL
) {
3337 if (pfr_pool_get(rpool
->cur
->addr
.p
.dyn
->pfid_kt
,
3338 &rpool
->tblidx
, &rpool
->counter
,
3339 &raddr
, &rmask
, af
)) {
3340 /* table contains no address of type 'af' */
3341 if (rpool
->cur
!= acur
)
3346 raddr
= &rpool
->cur
->addr
.v
.a
.addr
;
3347 rmask
= &rpool
->cur
->addr
.v
.a
.mask
;
3348 PF_ACPY(&rpool
->counter
, raddr
, af
);
3352 PF_ACPY(naddr
, &rpool
->counter
, af
);
3353 if (init_addr
!= NULL
&& PF_AZERO(init_addr
, af
))
3354 PF_ACPY(init_addr
, naddr
, af
);
3355 PF_AINC(&rpool
->counter
, af
);
3359 PF_ACPY(&(*sn
)->raddr
, naddr
, af
);
3361 if (pf_status
.debug
>= PF_DEBUG_MISC
&&
3362 (rpool
->opts
& PF_POOL_TYPEMASK
) != PF_POOL_NONE
) {
3363 printf("pf_map_addr: selected address ");
3364 pf_print_host(naddr
, 0, af
);
3371 #ifndef NO_APPLE_EXTENSIONS
3373 pf_get_sport(struct pf_pdesc
*pd
, struct pfi_kif
*kif
, struct pf_rule
*r
,
3374 struct pf_addr
*saddr
, union pf_state_xport
*sxport
, struct pf_addr
*daddr
,
3375 union pf_state_xport
*dxport
, struct pf_addr
*naddr
,
3376 union pf_state_xport
*nxport
, struct pf_src_node
**sn
)
3379 pf_get_sport(sa_family_t af
, u_int8_t proto
, struct pf_rule
*r
,
3380 struct pf_addr
*saddr
, struct pf_addr
*daddr
, u_int16_t dport
,
3381 struct pf_addr
*naddr
, u_int16_t
*nport
, u_int16_t low
, u_int16_t high
,
3382 struct pf_src_node
**sn
)
3386 struct pf_state_key_cmp key
;
3387 struct pf_addr init_addr
;
3388 #ifndef NO_APPLE_EXTENSIONS
3390 sa_family_t af
= pd
->af
;
3391 u_int8_t proto
= pd
->proto
;
3392 unsigned int low
= r
->rpool
.proxy_port
[0];
3393 unsigned int high
= r
->rpool
.proxy_port
[1];
3398 bzero(&init_addr
, sizeof (init_addr
));
3399 if (pf_map_addr(af
, r
, saddr
, naddr
, &init_addr
, sn
))
3402 if (proto
== IPPROTO_ICMP
) {
3407 #ifndef NO_APPLE_EXTENSIONS
3409 return (0); /* No output necessary. */
3411 /*--- Special mapping rules for UDP ---*/
3412 if (proto
== IPPROTO_UDP
) {
3414 /*--- Never float IKE source port ---*/
3415 if (ntohs(sxport
->port
) == PF_IKE_PORT
) {
3416 nxport
->port
= sxport
->port
;
3420 /*--- Apply exterior mapping options ---*/
3421 if (r
->extmap
> PF_EXTMAP_APD
) {
3424 TAILQ_FOREACH(s
, &state_list
, entry_list
) {
3425 struct pf_state_key
*sk
= s
->state_key
;
3428 if (s
->nat_rule
.ptr
!= r
)
3430 if (sk
->proto
!= IPPROTO_UDP
|| sk
->af
!= af
)
3432 if (sk
->lan
.xport
.port
!= sxport
->port
)
3434 if (PF_ANEQ(&sk
->lan
.addr
, saddr
, af
))
3436 if (r
->extmap
< PF_EXTMAP_EI
&&
3437 PF_ANEQ(&sk
->ext
.addr
, daddr
, af
))
3440 nxport
->port
= sk
->gwy
.xport
.port
;
3444 } else if (proto
== IPPROTO_TCP
) {
3447 * APPLE MODIFICATION: <rdar://problem/6546358>
3448 * Fix allows....NAT to use a single binding for TCP session
3449 * with same source IP and source port
3451 TAILQ_FOREACH(s
, &state_list
, entry_list
) {
3452 struct pf_state_key
* sk
= s
->state_key
;
3455 if (s
->nat_rule
.ptr
!= r
)
3457 if (sk
->proto
!= IPPROTO_TCP
|| sk
->af
!= af
)
3459 if (sk
->lan
.xport
.port
!= sxport
->port
)
3461 if (!(PF_AEQ(&sk
->lan
.addr
, saddr
, af
)))
3463 nxport
->port
= sk
->gwy
.xport
.port
;
3471 PF_ACPY(&key
.ext
.addr
, daddr
, key
.af
);
3472 PF_ACPY(&key
.gwy
.addr
, naddr
, key
.af
);
3473 #ifndef NO_APPLE_EXTENSIONS
3476 key
.proto_variant
= r
->extfilter
;
3479 key
.proto_variant
= 0;
3483 key
.ext
.xport
= *dxport
;
3485 memset(&key
.ext
.xport
, 0, sizeof (key
.ext
.xport
));
3487 key
.ext
.port
= dport
;
3490 * port search; start random, step;
3491 * similar 2 portloop in in_pcbbind
3493 if (!(proto
== IPPROTO_TCP
|| proto
== IPPROTO_UDP
||
3494 proto
== IPPROTO_ICMP
)) {
3495 #ifndef NO_APPLE_EXTENSIONS
3497 key
.gwy
.xport
= *dxport
;
3499 memset(&key
.gwy
.xport
, 0,
3500 sizeof (key
.ext
.xport
));
3502 key
.gwy
.port
= dport
;
3504 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
)
3506 } else if (low
== 0 && high
== 0) {
3507 #ifndef NO_APPLE_EXTENSIONS
3508 key
.gwy
.xport
= *nxport
;
3510 key
.gwy
.port
= *nport
;
3512 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
)
3514 } else if (low
== high
) {
3515 #ifndef NO_APPLE_EXTENSIONS
3516 key
.gwy
.xport
.port
= htons(low
);
3517 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
) {
3518 nxport
->port
= htons(low
);
3522 key
.gwy
.port
= htons(low
);
3523 if (pf_find_state_all(&key
, PF_IN
, NULL
) == NULL
) {
3524 *nport
= htons(low
);
3529 #ifndef NO_APPLE_EXTENSIONS
3540 cut
= htonl(random()) % (1 + high
- low
) + low
;
3541 /* low <= cut <= high */
3542 for (tmp
= cut
; tmp
<= high
; ++(tmp
)) {
3543 #ifndef NO_APPLE_EXTENSIONS
3544 key
.gwy
.xport
.port
= htons(tmp
);
3545 if (pf_find_state_all(&key
, PF_IN
, NULL
) ==
3547 nxport
->port
= htons(tmp
);
3551 key
.gwy
.port
= htons(tmp
);
3552 if (pf_find_state_all(&key
, PF_IN
, NULL
) ==
3554 *nport
= htons(tmp
);
3559 for (tmp
= cut
- 1; tmp
>= low
; --(tmp
)) {
3560 #ifndef NO_APPLE_EXTENSIONS
3561 key
.gwy
.xport
.port
= htons(tmp
);
3562 if (pf_find_state_all(&key
, PF_IN
, NULL
) ==
3564 nxport
->port
= htons(tmp
);
3568 key
.gwy
.port
= htons(tmp
);
3569 if (pf_find_state_all(&key
, PF_IN
, NULL
) ==
3571 *nport
= htons(tmp
);
3578 switch (r
->rpool
.opts
& PF_POOL_TYPEMASK
) {
3579 case PF_POOL_RANDOM
:
3580 case PF_POOL_ROUNDROBIN
:
3581 if (pf_map_addr(af
, r
, saddr
, naddr
, &init_addr
, sn
))
3585 case PF_POOL_SRCHASH
:
3586 case PF_POOL_BITMASK
:
3590 } while (!PF_AEQ(&init_addr
, naddr
, af
));
3592 return (1); /* none available */
3595 #ifndef NO_APPLE_EXTENSIONS
3596 static struct pf_rule
*
3597 pf_match_translation(struct pf_pdesc
*pd
, struct mbuf
*m
, int off
,
3598 int direction
, struct pfi_kif
*kif
, struct pf_addr
*saddr
,
3599 union pf_state_xport
*sxport
, struct pf_addr
*daddr
,
3600 union pf_state_xport
*dxport
, int rs_num
)
3603 pf_match_translation(struct pf_pdesc
*pd
, struct mbuf
*m
, int off
,
3604 int direction
, struct pfi_kif
*kif
, struct pf_addr
*saddr
, u_int16_t sport
,
3605 struct pf_addr
*daddr
, u_int16_t dport
, int rs_num
)
3608 struct pf_rule
*r
, *rm
= NULL
;
3609 struct pf_ruleset
*ruleset
= NULL
;
3611 unsigned int rtableid
= IFSCOPE_NONE
;
3614 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[rs_num
].active
.ptr
);
3615 while (r
&& rm
== NULL
) {
3616 struct pf_rule_addr
*src
= NULL
, *dst
= NULL
;
3617 struct pf_addr_wrap
*xdst
= NULL
;
3618 #ifndef NO_APPLE_EXTENSIONS
3619 struct pf_addr_wrap
*xsrc
= NULL
;
3620 union pf_rule_xport rdrxport
;
3623 if (r
->action
== PF_BINAT
&& direction
== PF_IN
) {
3625 if (r
->rpool
.cur
!= NULL
)
3626 xdst
= &r
->rpool
.cur
->addr
;
3627 #ifndef NO_APPLE_EXTENSIONS
3628 } else if (r
->action
== PF_RDR
&& direction
== PF_OUT
) {
3631 if (r
->rpool
.cur
!= NULL
) {
3632 rdrxport
.range
.op
= PF_OP_EQ
;
3633 rdrxport
.range
.port
[0] =
3634 htons(r
->rpool
.proxy_port
[0]);
3635 xsrc
= &r
->rpool
.cur
->addr
;
3644 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
)
3645 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
3646 else if (r
->direction
&& r
->direction
!= direction
)
3647 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
3648 else if (r
->af
&& r
->af
!= pd
->af
)
3649 r
= r
->skip
[PF_SKIP_AF
].ptr
;
3650 else if (r
->proto
&& r
->proto
!= pd
->proto
)
3651 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
3652 #ifndef NO_APPLE_EXTENSIONS
3653 else if (xsrc
&& PF_MISMATCHAW(xsrc
, saddr
, pd
->af
, 0, NULL
))
3654 r
= TAILQ_NEXT(r
, entries
);
3655 else if (!xsrc
&& PF_MISMATCHAW(&src
->addr
, saddr
, pd
->af
,
3657 r
= TAILQ_NEXT(r
, entries
);
3658 else if (xsrc
&& (!rdrxport
.range
.port
[0] ||
3659 !pf_match_xport(r
->proto
, r
->proto_variant
, &rdrxport
,
3661 r
= TAILQ_NEXT(r
, entries
);
3662 else if (!xsrc
&& !pf_match_xport(r
->proto
,
3663 r
->proto_variant
, &src
->xport
, sxport
))
3665 else if (PF_MISMATCHAW(&src
->addr
, saddr
, pd
->af
,
3667 r
= r
->skip
[src
== &r
->src
? PF_SKIP_SRC_ADDR
:
3668 PF_SKIP_DST_ADDR
].ptr
;
3669 else if (src
->port_op
&& !pf_match_port(src
->port_op
,
3670 src
->port
[0], src
->port
[1], sport
))
3672 r
= r
->skip
[src
== &r
->src
? PF_SKIP_SRC_PORT
:
3673 PF_SKIP_DST_PORT
].ptr
;
3674 else if (dst
!= NULL
&&
3675 PF_MISMATCHAW(&dst
->addr
, daddr
, pd
->af
, dst
->neg
, NULL
))
3676 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
3677 else if (xdst
!= NULL
&& PF_MISMATCHAW(xdst
, daddr
, pd
->af
,
3679 r
= TAILQ_NEXT(r
, entries
);
3680 #ifndef NO_APPLE_EXTENSIONS
3681 else if (dst
&& !pf_match_xport(r
->proto
, r
->proto_variant
,
3682 &dst
->xport
, dxport
))
3684 else if (dst
!= NULL
&& dst
->port_op
&&
3685 !pf_match_port(dst
->port_op
, dst
->port
[0],
3686 dst
->port
[1], dport
))
3688 r
= r
->skip
[PF_SKIP_DST_PORT
].ptr
;
3689 else if (r
->match_tag
&& !pf_match_tag(m
, r
, pd
->pf_mtag
, &tag
))
3690 r
= TAILQ_NEXT(r
, entries
);
3691 else if (r
->os_fingerprint
!= PF_OSFP_ANY
&& (pd
->proto
!=
3692 IPPROTO_TCP
|| !pf_osfp_match(pf_osfp_fingerprint(pd
, m
,
3693 off
, pd
->hdr
.tcp
), r
->os_fingerprint
)))
3694 r
= TAILQ_NEXT(r
, entries
);
3698 if (PF_RTABLEID_IS_VALID(r
->rtableid
))
3699 rtableid
= r
->rtableid
;
3700 if (r
->anchor
== NULL
) {
3703 pf_step_into_anchor(&asd
, &ruleset
, rs_num
,
3707 pf_step_out_of_anchor(&asd
, &ruleset
, rs_num
, &r
,
3710 if (pf_tag_packet(m
, pd
->pf_mtag
, tag
, rtableid
))
3712 if (rm
!= NULL
&& (rm
->action
== PF_NONAT
||
3713 rm
->action
== PF_NORDR
|| rm
->action
== PF_NOBINAT
))
3718 #ifndef NO_APPLE_EXTENSIONS
3719 static struct pf_rule
*
3720 pf_get_translation_aux(struct pf_pdesc
*pd
, struct mbuf
*m
, int off
,
3721 int direction
, struct pfi_kif
*kif
, struct pf_src_node
**sn
,
3722 struct pf_addr
*saddr
, union pf_state_xport
*sxport
, struct pf_addr
*daddr
,
3723 union pf_state_xport
*dxport
, struct pf_addr
*naddr
,
3724 union pf_state_xport
*nxport
)
3727 pf_get_translation(struct pf_pdesc
*pd
, struct mbuf
*m
, int off
, int direction
,
3728 struct pfi_kif
*kif
, struct pf_src_node
**sn
,
3729 struct pf_addr
*saddr
, u_int16_t sport
,
3730 struct pf_addr
*daddr
, u_int16_t dport
,
3731 struct pf_addr
*naddr
, u_int16_t
*nport
)
3734 struct pf_rule
*r
= NULL
;
3736 #ifndef NO_APPLE_EXTENSIONS
3737 if (direction
== PF_OUT
) {
3738 r
= pf_match_translation(pd
, m
, off
, direction
, kif
, saddr
,
3739 sxport
, daddr
, dxport
, PF_RULESET_BINAT
);
3741 r
= pf_match_translation(pd
, m
, off
, direction
, kif
,
3742 saddr
, sxport
, daddr
, dxport
, PF_RULESET_RDR
);
3744 r
= pf_match_translation(pd
, m
, off
, direction
, kif
,
3745 saddr
, sxport
, daddr
, dxport
, PF_RULESET_NAT
);
3747 r
= pf_match_translation(pd
, m
, off
, direction
, kif
, saddr
,
3748 sxport
, daddr
, dxport
, PF_RULESET_RDR
);
3750 r
= pf_match_translation(pd
, m
, off
, direction
, kif
,
3751 saddr
, sxport
, daddr
, dxport
, PF_RULESET_BINAT
);
3754 if (direction
== PF_OUT
) {
3755 r
= pf_match_translation(pd
, m
, off
, direction
, kif
, saddr
,
3756 sport
, daddr
, dport
, PF_RULESET_BINAT
);
3758 r
= pf_match_translation(pd
, m
, off
, direction
, kif
,
3759 saddr
, sport
, daddr
, dport
, PF_RULESET_NAT
);
3761 r
= pf_match_translation(pd
, m
, off
, direction
, kif
, saddr
,
3762 sport
, daddr
, dport
, PF_RULESET_RDR
);
3764 r
= pf_match_translation(pd
, m
, off
, direction
, kif
,
3765 saddr
, sport
, daddr
, dport
, PF_RULESET_BINAT
);
3770 switch (r
->action
) {
3776 #ifndef NO_APPLE_EXTENSIONS
3777 if (pf_get_sport(pd
, kif
, r
, saddr
, sxport
, daddr
,
3778 dxport
, naddr
, nxport
, sn
)) {
3780 if (pf_get_sport(pd
->af
, pd
->proto
, r
, saddr
,
3781 daddr
, dport
, naddr
, nport
, r
->rpool
.proxy_port
[0],
3782 r
->rpool
.proxy_port
[1], sn
)) {
3784 DPFPRINTF(PF_DEBUG_MISC
,
3785 ("pf: NAT proxy port allocation "
3787 r
->rpool
.proxy_port
[0],
3788 r
->rpool
.proxy_port
[1]));
3793 switch (direction
) {
3795 if (r
->rpool
.cur
->addr
.type
==
3800 if (r
->rpool
.cur
->addr
.p
.dyn
->
3804 &r
->rpool
.cur
->addr
.p
.dyn
->
3806 &r
->rpool
.cur
->addr
.p
.dyn
->
3813 if (r
->rpool
.cur
->addr
.p
.dyn
->
3817 &r
->rpool
.cur
->addr
.p
.dyn
->
3819 &r
->rpool
.cur
->addr
.p
.dyn
->
3827 &r
->rpool
.cur
->addr
.v
.a
.addr
,
3828 &r
->rpool
.cur
->addr
.v
.a
.mask
,
3833 if (r
->src
.addr
.type
== PF_ADDR_DYNIFTL
) {
3837 if (r
->src
.addr
.p
.dyn
->
3841 &r
->src
.addr
.p
.dyn
->
3843 &r
->src
.addr
.p
.dyn
->
3850 if (r
->src
.addr
.p
.dyn
->
3854 &r
->src
.addr
.p
.dyn
->
3856 &r
->src
.addr
.p
.dyn
->
3864 &r
->src
.addr
.v
.a
.addr
,
3865 &r
->src
.addr
.v
.a
.mask
, daddr
,
3871 #ifndef NO_APPLE_EXTENSIONS
3872 switch (direction
) {
3874 if (r
->dst
.addr
.type
== PF_ADDR_DYNIFTL
) {
3878 if (r
->dst
.addr
.p
.dyn
->
3882 &r
->dst
.addr
.p
.dyn
->
3884 &r
->dst
.addr
.p
.dyn
->
3891 if (r
->dst
.addr
.p
.dyn
->
3895 &r
->dst
.addr
.p
.dyn
->
3897 &r
->dst
.addr
.p
.dyn
->
3905 &r
->dst
.addr
.v
.a
.addr
,
3906 &r
->dst
.addr
.v
.a
.mask
,
3909 if (nxport
&& r
->dst
.xport
.range
.port
[0])
3911 r
->dst
.xport
.range
.port
[0];
3914 if (pf_map_addr(pd
->af
, r
, saddr
,
3917 if ((r
->rpool
.opts
& PF_POOL_TYPEMASK
) ==
3919 PF_POOLMASK(naddr
, naddr
,
3920 &r
->rpool
.cur
->addr
.v
.a
.mask
, daddr
,
3923 if (nxport
&& dxport
) {
3924 if (r
->rpool
.proxy_port
[1]) {
3925 u_int32_t tmp_nport
;
3928 ((ntohs(dxport
->port
) -
3929 ntohs(r
->dst
.xport
.range
.
3931 (r
->rpool
.proxy_port
[1] -
3932 r
->rpool
.proxy_port
[0] +
3933 1)) + r
->rpool
.proxy_port
[0];
3935 /* wrap around if necessary */
3936 if (tmp_nport
> 65535)
3939 htons((u_int16_t
)tmp_nport
);
3940 } else if (r
->rpool
.proxy_port
[0]) {
3941 nxport
->port
= htons(r
->rpool
.
3948 if (pf_map_addr(pd
->af
, r
, saddr
, naddr
, NULL
, sn
))
3950 if ((r
->rpool
.opts
& PF_POOL_TYPEMASK
) ==
3952 PF_POOLMASK(naddr
, naddr
,
3953 &r
->rpool
.cur
->addr
.v
.a
.mask
, daddr
,
3956 if (r
->rpool
.proxy_port
[1]) {
3957 u_int32_t tmp_nport
;
3959 tmp_nport
= ((ntohs(dport
) -
3960 ntohs(r
->dst
.port
[0])) %
3961 (r
->rpool
.proxy_port
[1] -
3962 r
->rpool
.proxy_port
[0] + 1)) +
3963 r
->rpool
.proxy_port
[0];
3965 /* wrap around if necessary */
3966 if (tmp_nport
> 65535)
3968 *nport
= htons((u_int16_t
)tmp_nport
);
3969 } else if (r
->rpool
.proxy_port
[0])
3970 *nport
= htons(r
->rpool
.proxy_port
[0]);
3983 pf_socket_lookup(int direction
, struct pf_pdesc
*pd
)
3985 struct pf_addr
*saddr
, *daddr
;
3986 u_int16_t sport
, dport
;
3987 struct inpcbinfo
*pi
;
3992 pd
->lookup
.uid
= UID_MAX
;
3993 pd
->lookup
.gid
= GID_MAX
;
3994 pd
->lookup
.pid
= NO_PID
;
3996 switch (pd
->proto
) {
3998 if (pd
->hdr
.tcp
== NULL
)
4000 sport
= pd
->hdr
.tcp
->th_sport
;
4001 dport
= pd
->hdr
.tcp
->th_dport
;
4005 if (pd
->hdr
.udp
== NULL
)
4007 sport
= pd
->hdr
.udp
->uh_sport
;
4008 dport
= pd
->hdr
.udp
->uh_dport
;
4014 if (direction
== PF_IN
) {
4029 inp
= in_pcblookup_hash_exists(pi
, saddr
->v4
, sport
, daddr
->v4
, dport
,
4030 0, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4033 struct in6_addr s6
, d6
;
4035 memset(&s6
, 0, sizeof (s6
));
4036 s6
.s6_addr16
[5] = htons(0xffff);
4037 memcpy(&s6
.s6_addr32
[3], &saddr
->v4
,
4038 sizeof (saddr
->v4
));
4040 memset(&d6
, 0, sizeof (d6
));
4041 d6
.s6_addr16
[5] = htons(0xffff);
4042 memcpy(&d6
.s6_addr32
[3], &daddr
->v4
,
4043 sizeof (daddr
->v4
));
4045 inp
= in6_pcblookup_hash_exists(pi
, &s6
, sport
,
4046 &d6
, dport
, 0, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4048 inp
= in_pcblookup_hash_exists(pi
, saddr
->v4
, sport
,
4049 daddr
->v4
, dport
, INPLOOKUP_WILDCARD
, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4051 inp
= in6_pcblookup_hash_exists(pi
, &s6
, sport
,
4052 &d6
, dport
, INPLOOKUP_WILDCARD
,
4053 &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4061 inp
= in_pcblookup_hash_exists(pi
, saddr
->v4
, sport
,
4062 daddr
->v4
, dport
, INPLOOKUP_WILDCARD
,
4063 &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4072 inp
= in6_pcblookup_hash_exists(pi
, &saddr
->v6
, sport
, &daddr
->v6
,
4073 dport
, 0, &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4075 inp
= in6_pcblookup_hash_exists(pi
, &saddr
->v6
, sport
,
4076 &daddr
->v6
, dport
, INPLOOKUP_WILDCARD
,
4077 &pd
->lookup
.uid
, &pd
->lookup
.gid
, NULL
);
4092 pf_get_wscale(struct mbuf
*m
, int off
, u_int16_t th_off
, sa_family_t af
)
4096 u_int8_t
*opt
, optlen
;
4097 u_int8_t wscale
= 0;
4099 hlen
= th_off
<< 2; /* hlen <= sizeof (hdr) */
4100 if (hlen
<= (int)sizeof (struct tcphdr
))
4102 if (!pf_pull_hdr(m
, off
, hdr
, hlen
, NULL
, NULL
, af
))
4104 opt
= hdr
+ sizeof (struct tcphdr
);
4105 hlen
-= sizeof (struct tcphdr
);
4115 if (wscale
> TCP_MAX_WINSHIFT
)
4116 wscale
= TCP_MAX_WINSHIFT
;
4117 wscale
|= PF_WSCALE_FLAG
;
4132 pf_get_mss(struct mbuf
*m
, int off
, u_int16_t th_off
, sa_family_t af
)
4136 u_int8_t
*opt
, optlen
;
4137 u_int16_t mss
= tcp_mssdflt
;
4139 hlen
= th_off
<< 2; /* hlen <= sizeof (hdr) */
4140 if (hlen
<= (int)sizeof (struct tcphdr
))
4142 if (!pf_pull_hdr(m
, off
, hdr
, hlen
, NULL
, NULL
, af
))
4144 opt
= hdr
+ sizeof (struct tcphdr
);
4145 hlen
-= sizeof (struct tcphdr
);
4146 while (hlen
>= TCPOLEN_MAXSEG
) {
4154 bcopy((caddr_t
)(opt
+ 2), (caddr_t
)&mss
, 2);
4155 #if BYTE_ORDER != BIG_ENDIAN
4172 pf_calc_mss(struct pf_addr
*addr
, sa_family_t af
, u_int16_t offer
)
4175 struct sockaddr_in
*dst
;
4179 struct sockaddr_in6
*dst6
;
4180 struct route_in6 ro6
;
4182 struct rtentry
*rt
= NULL
;
4184 u_int16_t mss
= tcp_mssdflt
;
4189 hlen
= sizeof (struct ip
);
4190 bzero(&ro
, sizeof (ro
));
4191 dst
= (struct sockaddr_in
*)&ro
.ro_dst
;
4192 dst
->sin_family
= AF_INET
;
4193 dst
->sin_len
= sizeof (*dst
);
4194 dst
->sin_addr
= addr
->v4
;
4201 hlen
= sizeof (struct ip6_hdr
);
4202 bzero(&ro6
, sizeof (ro6
));
4203 dst6
= (struct sockaddr_in6
*)&ro6
.ro_dst
;
4204 dst6
->sin6_family
= AF_INET6
;
4205 dst6
->sin6_len
= sizeof (*dst6
);
4206 dst6
->sin6_addr
= addr
->v6
;
4207 rtalloc((struct route
*)&ro
);
4212 panic("pf_calc_mss: not AF_INET or AF_INET6!");
4216 if (rt
&& rt
->rt_ifp
) {
4217 mss
= rt
->rt_ifp
->if_mtu
- hlen
- sizeof (struct tcphdr
);
4218 mss
= max(tcp_mssdflt
, mss
);
4221 mss
= min(mss
, offer
);
4222 mss
= max(mss
, 64); /* sanity - at least max opt space */
4227 pf_set_rt_ifp(struct pf_state
*s
, struct pf_addr
*saddr
)
4229 struct pf_rule
*r
= s
->rule
.ptr
;
4232 if (!r
->rt
|| r
->rt
== PF_FASTROUTE
)
4234 switch (s
->state_key
->af
) {
4237 pf_map_addr(AF_INET
, r
, saddr
, &s
->rt_addr
, NULL
,
4239 s
->rt_kif
= r
->rpool
.cur
->kif
;
4244 pf_map_addr(AF_INET6
, r
, saddr
, &s
->rt_addr
, NULL
,
4246 s
->rt_kif
= r
->rpool
.cur
->kif
;
4253 pf_attach_state(struct pf_state_key
*sk
, struct pf_state
*s
, int tail
)
4258 /* list is sorted, if-bound states before floating */
4260 TAILQ_INSERT_TAIL(&sk
->states
, s
, next
);
4262 TAILQ_INSERT_HEAD(&sk
->states
, s
, next
);
4266 pf_detach_state(struct pf_state
*s
, int flags
)
4268 struct pf_state_key
*sk
= s
->state_key
;
4273 s
->state_key
= NULL
;
4274 TAILQ_REMOVE(&sk
->states
, s
, next
);
4275 if (--sk
->refcnt
== 0) {
4276 if (!(flags
& PF_DT_SKIP_EXTGWY
))
4277 RB_REMOVE(pf_state_tree_ext_gwy
,
4278 &pf_statetbl_ext_gwy
, sk
);
4279 if (!(flags
& PF_DT_SKIP_LANEXT
))
4280 RB_REMOVE(pf_state_tree_lan_ext
,
4281 &pf_statetbl_lan_ext
, sk
);
4282 #ifndef NO_APPLE_EXTENSIONS
4284 pool_put(&pf_app_state_pl
, sk
->app_state
);
4286 pool_put(&pf_state_key_pl
, sk
);
4290 struct pf_state_key
*
4291 pf_alloc_state_key(struct pf_state
*s
)
4293 struct pf_state_key
*sk
;
4295 if ((sk
= pool_get(&pf_state_key_pl
, PR_WAITOK
)) == NULL
)
4297 bzero(sk
, sizeof (*sk
));
4298 TAILQ_INIT(&sk
->states
);
4299 pf_attach_state(sk
, s
, 0);
4305 pf_tcp_iss(struct pf_pdesc
*pd
)
4308 u_int32_t digest
[4];
4310 if (pf_tcp_secret_init
== 0) {
4311 read_random(pf_tcp_secret
, sizeof (pf_tcp_secret
));
4312 MD5Init(&pf_tcp_secret_ctx
);
4313 MD5Update(&pf_tcp_secret_ctx
, pf_tcp_secret
,
4314 sizeof (pf_tcp_secret
));
4315 pf_tcp_secret_init
= 1;
4317 ctx
= pf_tcp_secret_ctx
;
4319 MD5Update(&ctx
, (char *)&pd
->hdr
.tcp
->th_sport
, sizeof (u_short
));
4320 MD5Update(&ctx
, (char *)&pd
->hdr
.tcp
->th_dport
, sizeof (u_short
));
4321 if (pd
->af
== AF_INET6
) {
4322 MD5Update(&ctx
, (char *)&pd
->src
->v6
, sizeof (struct in6_addr
));
4323 MD5Update(&ctx
, (char *)&pd
->dst
->v6
, sizeof (struct in6_addr
));
4325 MD5Update(&ctx
, (char *)&pd
->src
->v4
, sizeof (struct in_addr
));
4326 MD5Update(&ctx
, (char *)&pd
->dst
->v4
, sizeof (struct in_addr
));
4328 MD5Final((u_char
*)digest
, &ctx
);
4329 pf_tcp_iss_off
+= 4096;
4330 return (digest
[0] + random() + pf_tcp_iss_off
);
4334 pf_test_rule(struct pf_rule
**rm
, struct pf_state
**sm
, int direction
,
4335 struct pfi_kif
*kif
, struct mbuf
*m
, int off
, void *h
,
4336 struct pf_pdesc
*pd
, struct pf_rule
**am
, struct pf_ruleset
**rsm
,
4337 struct ifqueue
*ifq
)
4340 struct pf_rule
*nr
= NULL
;
4341 struct pf_addr
*saddr
= pd
->src
, *daddr
= pd
->dst
;
4342 #ifdef NO_APPLE_EXTENSIONS
4343 u_int16_t bport
, nport
= 0;
4345 sa_family_t af
= pd
->af
;
4346 struct pf_rule
*r
, *a
= NULL
;
4347 struct pf_ruleset
*ruleset
= NULL
;
4348 struct pf_src_node
*nsn
= NULL
;
4349 struct tcphdr
*th
= pd
->hdr
.tcp
;
4351 int rewrite
= 0, hdrlen
= 0;
4353 unsigned int rtableid
= IFSCOPE_NONE
;
4357 u_int16_t mss
= tcp_mssdflt
;
4358 #ifdef NO_APPLE_EXTENSIONS
4359 u_int16_t sport
, dport
;
4361 u_int8_t icmptype
= 0, icmpcode
= 0;
4363 #ifndef NO_APPLE_EXTENSIONS
4364 struct pf_grev1_hdr
*grev1
= pd
->hdr
.grev1
;
4365 union pf_state_xport bxport
, nxport
, sxport
, dxport
;
4368 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
4370 if (direction
== PF_IN
&& pf_check_congestion(ifq
)) {
4371 REASON_SET(&reason
, PFRES_CONGEST
);
4375 #ifndef NO_APPLE_EXTENSIONS
4381 sport
= dport
= hdrlen
= 0;
4384 switch (pd
->proto
) {
4386 #ifndef NO_APPLE_EXTENSIONS
4387 sxport
.port
= th
->th_sport
;
4388 dxport
.port
= th
->th_dport
;
4390 sport
= th
->th_sport
;
4391 dport
= th
->th_dport
;
4393 hdrlen
= sizeof (*th
);
4396 #ifndef NO_APPLE_EXTENSIONS
4397 sxport
.port
= pd
->hdr
.udp
->uh_sport
;
4398 dxport
.port
= pd
->hdr
.udp
->uh_dport
;
4400 sport
= pd
->hdr
.udp
->uh_sport
;
4401 dport
= pd
->hdr
.udp
->uh_dport
;
4403 hdrlen
= sizeof (*pd
->hdr
.udp
);
4407 if (pd
->af
!= AF_INET
)
4409 #ifndef NO_APPLE_EXTENSIONS
4410 sxport
.port
= dxport
.port
= pd
->hdr
.icmp
->icmp_id
;
4411 hdrlen
= ICMP_MINLEN
;
4413 sport
= dport
= pd
->hdr
.icmp
->icmp_id
;
4415 icmptype
= pd
->hdr
.icmp
->icmp_type
;
4416 icmpcode
= pd
->hdr
.icmp
->icmp_code
;
4418 if (icmptype
== ICMP_UNREACH
||
4419 icmptype
== ICMP_SOURCEQUENCH
||
4420 icmptype
== ICMP_REDIRECT
||
4421 icmptype
== ICMP_TIMXCEED
||
4422 icmptype
== ICMP_PARAMPROB
)
4427 case IPPROTO_ICMPV6
:
4428 if (pd
->af
!= AF_INET6
)
4430 #ifndef NO_APPLE_EXTENSIONS
4431 sxport
.port
= dxport
.port
= pd
->hdr
.icmp6
->icmp6_id
;
4433 sport
= dport
= pd
->hdr
.icmp6
->icmp6_id
;
4435 hdrlen
= sizeof (*pd
->hdr
.icmp6
);
4436 icmptype
= pd
->hdr
.icmp6
->icmp6_type
;
4437 icmpcode
= pd
->hdr
.icmp6
->icmp6_code
;
4439 if (icmptype
== ICMP6_DST_UNREACH
||
4440 icmptype
== ICMP6_PACKET_TOO_BIG
||
4441 icmptype
== ICMP6_TIME_EXCEEDED
||
4442 icmptype
== ICMP6_PARAM_PROB
)
4446 #ifndef NO_APPLE_EXTENSIONS
4448 if (pd
->proto_variant
== PF_GRE_PPTP_VARIANT
) {
4449 sxport
.call_id
= dxport
.call_id
=
4450 pd
->hdr
.grev1
->call_id
;
4451 hdrlen
= sizeof (*pd
->hdr
.grev1
);
4456 dxport
.spi
= pd
->hdr
.esp
->spi
;
4457 hdrlen
= sizeof (*pd
->hdr
.esp
);
4462 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[PF_RULESET_FILTER
].active
.ptr
);
4464 if (direction
== PF_OUT
) {
4465 #ifndef NO_APPLE_EXTENSIONS
4466 bxport
= nxport
= sxport
;
4467 /* check outgoing packet for BINAT/NAT */
4468 if ((nr
= pf_get_translation_aux(pd
, m
, off
, PF_OUT
, kif
, &nsn
,
4469 saddr
, &sxport
, daddr
, &dxport
, &pd
->naddr
, &nxport
)) !=
4472 bport
= nport
= sport
;
4473 /* check outgoing packet for BINAT/NAT */
4474 if ((nr
= pf_get_translation(pd
, m
, off
, PF_OUT
, kif
, &nsn
,
4475 saddr
, sport
, daddr
, dport
, &pd
->naddr
, &nport
)) != NULL
) {
4477 PF_ACPY(&pd
->baddr
, saddr
, af
);
4478 switch (pd
->proto
) {
4480 #ifndef NO_APPLE_EXTENSIONS
4481 pf_change_ap(direction
, pd
->mp
, saddr
,
4482 &th
->th_sport
, pd
->ip_sum
, &th
->th_sum
,
4483 &pd
->naddr
, nxport
.port
, 0, af
);
4484 sxport
.port
= th
->th_sport
;
4486 pf_change_ap(saddr
, &th
->th_sport
, pd
->ip_sum
,
4487 &th
->th_sum
, &pd
->naddr
, nport
, 0, af
);
4488 sport
= th
->th_sport
;
4493 #ifndef NO_APPLE_EXTENSIONS
4494 pf_change_ap(direction
, pd
->mp
, saddr
,
4495 &pd
->hdr
.udp
->uh_sport
, pd
->ip_sum
,
4496 &pd
->hdr
.udp
->uh_sum
, &pd
->naddr
,
4497 nxport
.port
, 1, af
);
4498 sxport
.port
= pd
->hdr
.udp
->uh_sport
;
4500 pf_change_ap(saddr
, &pd
->hdr
.udp
->uh_sport
,
4501 pd
->ip_sum
, &pd
->hdr
.udp
->uh_sum
,
4502 &pd
->naddr
, nport
, 1, af
);
4503 sport
= pd
->hdr
.udp
->uh_sport
;
4509 pf_change_a(&saddr
->v4
.s_addr
, pd
->ip_sum
,
4510 pd
->naddr
.v4
.s_addr
, 0);
4511 #ifndef NO_APPLE_EXTENSIONS
4512 pd
->hdr
.icmp
->icmp_cksum
= pf_cksum_fixup(
4513 pd
->hdr
.icmp
->icmp_cksum
, sxport
.port
,
4515 pd
->hdr
.icmp
->icmp_id
= nxport
.port
;
4518 pd
->hdr
.icmp
->icmp_cksum
= pf_cksum_fixup(
4519 pd
->hdr
.icmp
->icmp_cksum
, sport
, nport
, 0);
4520 pd
->hdr
.icmp
->icmp_id
= nport
;
4521 m_copyback(m
, off
, ICMP_MINLEN
, pd
->hdr
.icmp
);
4526 case IPPROTO_ICMPV6
:
4527 pf_change_a6(saddr
, &pd
->hdr
.icmp6
->icmp6_cksum
,
4532 #ifndef NO_APPLE_EXTENSIONS
4537 pf_change_a(&saddr
->v4
.s_addr
,
4538 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4543 PF_ACPY(saddr
, &pd
->naddr
, AF_INET6
);
4554 pf_change_a(&saddr
->v4
.s_addr
,
4555 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4560 PF_ACPY(saddr
, &pd
->naddr
, AF_INET6
);
4570 pf_change_a(&saddr
->v4
.s_addr
,
4571 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4576 PF_ACPY(saddr
, &pd
->naddr
, af
);
4588 #ifndef NO_APPLE_EXTENSIONS
4589 bxport
.port
= nxport
.port
= dxport
.port
;
4590 /* check incoming packet for BINAT/RDR */
4591 if ((nr
= pf_get_translation_aux(pd
, m
, off
, PF_IN
, kif
, &nsn
,
4592 saddr
, &sxport
, daddr
, &dxport
, &pd
->naddr
, &nxport
)) !=
4595 bport
= nport
= dport
;
4596 /* check incoming packet for BINAT/RDR */
4597 if ((nr
= pf_get_translation(pd
, m
, off
, PF_IN
, kif
, &nsn
,
4598 saddr
, sport
, daddr
, dport
, &pd
->naddr
, &nport
)) != NULL
) {
4600 PF_ACPY(&pd
->baddr
, daddr
, af
);
4601 switch (pd
->proto
) {
4603 #ifndef NO_APPLE_EXTENSIONS
4604 pf_change_ap(direction
, pd
->mp
, daddr
,
4605 &th
->th_dport
, pd
->ip_sum
, &th
->th_sum
,
4606 &pd
->naddr
, nxport
.port
, 0, af
);
4607 dxport
.port
= th
->th_dport
;
4609 pf_change_ap(daddr
, &th
->th_dport
, pd
->ip_sum
,
4610 &th
->th_sum
, &pd
->naddr
, nport
, 0, af
);
4611 dport
= th
->th_dport
;
4616 #ifndef NO_APPLE_EXTENSIONS
4617 pf_change_ap(direction
, pd
->mp
, daddr
,
4618 &pd
->hdr
.udp
->uh_dport
, pd
->ip_sum
,
4619 &pd
->hdr
.udp
->uh_sum
, &pd
->naddr
,
4620 nxport
.port
, 1, af
);
4621 dxport
.port
= pd
->hdr
.udp
->uh_dport
;
4623 pf_change_ap(direction
, daddr
,
4624 &pd
->hdr
.udp
->uh_dport
,
4625 pd
->ip_sum
, &pd
->hdr
.udp
->uh_sum
,
4626 &pd
->naddr
, nport
, 1, af
);
4627 dport
= pd
->hdr
.udp
->uh_dport
;
4633 pf_change_a(&daddr
->v4
.s_addr
, pd
->ip_sum
,
4634 pd
->naddr
.v4
.s_addr
, 0);
4638 case IPPROTO_ICMPV6
:
4639 pf_change_a6(daddr
, &pd
->hdr
.icmp6
->icmp6_cksum
,
4644 #ifndef NO_APPLE_EXTENSIONS
4646 if (pd
->proto_variant
== PF_GRE_PPTP_VARIANT
)
4647 grev1
->call_id
= nxport
.call_id
;
4652 pf_change_a(&daddr
->v4
.s_addr
,
4653 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4658 PF_ACPY(daddr
, &pd
->naddr
, AF_INET6
);
4668 pf_change_a(&daddr
->v4
.s_addr
,
4669 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4674 PF_ACPY(daddr
, &pd
->naddr
, AF_INET6
);
4684 pf_change_a(&daddr
->v4
.s_addr
,
4685 pd
->ip_sum
, pd
->naddr
.v4
.s_addr
, 0);
4690 PF_ACPY(daddr
, &pd
->naddr
, af
);
4703 #ifndef NO_APPLE_EXTENSIONS
4704 if (nr
&& nr
->tag
> 0)
4710 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
)
4711 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
4712 else if (r
->direction
&& r
->direction
!= direction
)
4713 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
4714 else if (r
->af
&& r
->af
!= af
)
4715 r
= r
->skip
[PF_SKIP_AF
].ptr
;
4716 else if (r
->proto
&& r
->proto
!= pd
->proto
)
4717 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
4718 else if (PF_MISMATCHAW(&r
->src
.addr
, saddr
, af
,
4720 r
= r
->skip
[PF_SKIP_SRC_ADDR
].ptr
;
4721 /* tcp/udp only. port_op always 0 in other cases */
4722 #ifndef NO_APPLE_EXTENSIONS
4723 else if (r
->proto
== pd
->proto
&&
4724 (r
->proto
== IPPROTO_TCP
|| r
->proto
== IPPROTO_UDP
) &&
4725 r
->src
.xport
.range
.op
&&
4726 !pf_match_port(r
->src
.xport
.range
.op
,
4727 r
->src
.xport
.range
.port
[0], r
->src
.xport
.range
.port
[1],
4730 else if (r
->src
.port_op
&& !pf_match_port(r
->src
.port_op
,
4731 r
->src
.port
[0], r
->src
.port
[1], th
->th_sport
))
4733 r
= r
->skip
[PF_SKIP_SRC_PORT
].ptr
;
4734 else if (PF_MISMATCHAW(&r
->dst
.addr
, daddr
, af
,
4736 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
4737 /* tcp/udp only. port_op always 0 in other cases */
4738 #ifndef NO_APPLE_EXTENSIONS
4739 else if (r
->proto
== pd
->proto
&&
4740 (r
->proto
== IPPROTO_TCP
|| r
->proto
== IPPROTO_UDP
) &&
4741 r
->dst
.xport
.range
.op
&&
4742 !pf_match_port(r
->dst
.xport
.range
.op
,
4743 r
->dst
.xport
.range
.port
[0], r
->dst
.xport
.range
.port
[1],
4746 else if (r
->dst
.port_op
&& !pf_match_port(r
->dst
.port_op
,
4747 r
->dst
.port
[0], r
->dst
.port
[1], th
->th_dport
))
4749 r
= r
->skip
[PF_SKIP_DST_PORT
].ptr
;
4750 /* icmp only. type always 0 in other cases */
4751 else if (r
->type
&& r
->type
!= icmptype
+ 1)
4752 r
= TAILQ_NEXT(r
, entries
);
4753 /* icmp only. type always 0 in other cases */
4754 else if (r
->code
&& r
->code
!= icmpcode
+ 1)
4755 r
= TAILQ_NEXT(r
, entries
);
4756 else if (r
->tos
&& !(r
->tos
== pd
->tos
))
4757 r
= TAILQ_NEXT(r
, entries
);
4758 else if (r
->rule_flag
& PFRULE_FRAGMENT
)
4759 r
= TAILQ_NEXT(r
, entries
);
4760 else if (pd
->proto
== IPPROTO_TCP
&&
4761 (r
->flagset
& th
->th_flags
) != r
->flags
)
4762 r
= TAILQ_NEXT(r
, entries
);
4763 /* tcp/udp only. uid.op always 0 in other cases */
4764 else if (r
->uid
.op
&& (pd
->lookup
.done
|| (pd
->lookup
.done
=
4765 pf_socket_lookup(direction
, pd
), 1)) &&
4766 !pf_match_uid(r
->uid
.op
, r
->uid
.uid
[0], r
->uid
.uid
[1],
4768 r
= TAILQ_NEXT(r
, entries
);
4769 /* tcp/udp only. gid.op always 0 in other cases */
4770 else if (r
->gid
.op
&& (pd
->lookup
.done
|| (pd
->lookup
.done
=
4771 pf_socket_lookup(direction
, pd
), 1)) &&
4772 !pf_match_gid(r
->gid
.op
, r
->gid
.gid
[0], r
->gid
.gid
[1],
4774 r
= TAILQ_NEXT(r
, entries
);
4775 else if (r
->prob
&& r
->prob
<= (random() % (UINT_MAX
- 1) + 1))
4776 r
= TAILQ_NEXT(r
, entries
);
4777 else if (r
->match_tag
&& !pf_match_tag(m
, r
, pd
->pf_mtag
, &tag
))
4778 r
= TAILQ_NEXT(r
, entries
);
4779 else if (r
->os_fingerprint
!= PF_OSFP_ANY
&&
4780 (pd
->proto
!= IPPROTO_TCP
|| !pf_osfp_match(
4781 pf_osfp_fingerprint(pd
, m
, off
, th
),
4782 r
->os_fingerprint
)))
4783 r
= TAILQ_NEXT(r
, entries
);
4787 if (PF_RTABLEID_IS_VALID(r
->rtableid
))
4788 rtableid
= r
->rtableid
;
4789 if (r
->anchor
== NULL
) {
4796 r
= TAILQ_NEXT(r
, entries
);
4798 pf_step_into_anchor(&asd
, &ruleset
,
4799 PF_RULESET_FILTER
, &r
, &a
, &match
);
4801 if (r
== NULL
&& pf_step_out_of_anchor(&asd
, &ruleset
,
4802 PF_RULESET_FILTER
, &r
, &a
, &match
))
4809 REASON_SET(&reason
, PFRES_MATCH
);
4811 if (r
->log
|| (nr
!= NULL
&& nr
->log
)) {
4812 #ifndef NO_APPLE_EXTENSIONS
4814 if (rewrite
< off
+ hdrlen
)
4815 rewrite
= off
+ hdrlen
;
4817 m
= pf_lazy_makewritable(pd
, m
, rewrite
);
4819 REASON_SET(&reason
, PFRES_MEMORY
);
4823 m_copyback(m
, off
, hdrlen
, pd
->hdr
.any
);
4827 m_copyback(m
, off
, hdrlen
, pd
->hdr
.any
);
4829 PFLOG_PACKET(kif
, h
, m
, af
, direction
, reason
, r
->log
? r
: nr
,
4833 if ((r
->action
== PF_DROP
) &&
4834 ((r
->rule_flag
& PFRULE_RETURNRST
) ||
4835 (r
->rule_flag
& PFRULE_RETURNICMP
) ||
4836 (r
->rule_flag
& PFRULE_RETURN
))) {
4837 /* undo NAT changes, if they have taken place */
4839 if (direction
== PF_OUT
) {
4840 switch (pd
->proto
) {
4842 #ifndef NO_APPLE_EXTENSIONS
4843 pf_change_ap(direction
, pd
->mp
, saddr
,
4844 &th
->th_sport
, pd
->ip_sum
,
4845 &th
->th_sum
, &pd
->baddr
,
4846 bxport
.port
, 0, af
);
4847 sxport
.port
= th
->th_sport
;
4849 pf_change_ap(saddr
, &th
->th_sport
,
4850 pd
->ip_sum
, &th
->th_sum
,
4851 &pd
->baddr
, bport
, 0, af
);
4852 sport
= th
->th_sport
;
4857 #ifndef NO_APPLE_EXTENSIONS
4858 pf_change_ap(direction
, pd
->mp
, saddr
,
4859 &pd
->hdr
.udp
->uh_sport
, pd
->ip_sum
,
4860 &pd
->hdr
.udp
->uh_sum
, &pd
->baddr
,
4861 bxport
.port
, 1, af
);
4862 sxport
.port
= pd
->hdr
.udp
->uh_sport
;
4865 &pd
->hdr
.udp
->uh_sport
, pd
->ip_sum
,
4866 &pd
->hdr
.udp
->uh_sum
, &pd
->baddr
,
4868 sport
= pd
->hdr
.udp
->uh_sport
;
4874 case IPPROTO_ICMPV6
:
4878 #ifndef NO_APPLE_EXTENSIONS
4880 PF_ACPY(&pd
->baddr
, saddr
, af
);
4885 pf_change_a(&saddr
->v4
.s_addr
,
4887 pd
->baddr
.v4
.s_addr
, 0);
4892 PF_ACPY(saddr
, &pd
->baddr
,
4899 PF_ACPY(&pd
->baddr
, saddr
, af
);
4903 pf_change_a(&saddr
->v4
.s_addr
,
4905 pd
->baddr
.v4
.s_addr
, 0);
4910 PF_ACPY(saddr
, &pd
->baddr
,
4920 pf_change_a(&saddr
->v4
.s_addr
,
4922 pd
->baddr
.v4
.s_addr
, 0);
4925 PF_ACPY(saddr
, &pd
->baddr
, af
);
4930 switch (pd
->proto
) {
4932 #ifndef NO_APPLE_EXTENSIONS
4933 pf_change_ap(direction
, pd
->mp
, daddr
,
4934 &th
->th_dport
, pd
->ip_sum
,
4935 &th
->th_sum
, &pd
->baddr
,
4936 bxport
.port
, 0, af
);
4937 dxport
.port
= th
->th_dport
;
4939 pf_change_ap(daddr
, &th
->th_dport
,
4940 pd
->ip_sum
, &th
->th_sum
,
4941 &pd
->baddr
, bport
, 0, af
);
4942 dport
= th
->th_dport
;
4947 #ifndef NO_APPLE_EXTENSIONS
4948 pf_change_ap(direction
, pd
->mp
, daddr
,
4949 &pd
->hdr
.udp
->uh_dport
, pd
->ip_sum
,
4950 &pd
->hdr
.udp
->uh_sum
, &pd
->baddr
,
4951 bxport
.port
, 1, af
);
4952 dxport
.port
= pd
->hdr
.udp
->uh_dport
;
4955 &pd
->hdr
.udp
->uh_dport
, pd
->ip_sum
,
4956 &pd
->hdr
.udp
->uh_sum
, &pd
->baddr
,
4958 dport
= pd
->hdr
.udp
->uh_dport
;
4964 case IPPROTO_ICMPV6
:
4968 #ifndef NO_APPLE_EXTENSIONS
4970 if (pd
->proto_variant
==
4971 PF_GRE_PPTP_VARIANT
)
4972 grev1
->call_id
= bxport
.call_id
;
4977 pf_change_a(&daddr
->v4
.s_addr
,
4979 pd
->baddr
.v4
.s_addr
, 0);
4984 PF_ACPY(daddr
, &pd
->baddr
,
4994 pf_change_a(&daddr
->v4
.s_addr
,
4996 pd
->baddr
.v4
.s_addr
, 0);
5001 PF_ACPY(daddr
, &pd
->baddr
,
5011 pf_change_a(&daddr
->v4
.s_addr
,
5013 pd
->baddr
.v4
.s_addr
, 0);
5017 PF_ACPY(daddr
, &pd
->baddr
, af
);
5024 if (pd
->proto
== IPPROTO_TCP
&&
5025 ((r
->rule_flag
& PFRULE_RETURNRST
) ||
5026 (r
->rule_flag
& PFRULE_RETURN
)) &&
5027 !(th
->th_flags
& TH_RST
)) {
5028 u_int32_t ack
= ntohl(th
->th_seq
) + pd
->p_len
;
5037 h4
= mtod(m
, struct ip
*);
5038 len
= ntohs(h4
->ip_len
) - off
;
5042 h6
= mtod(m
, struct ip6_hdr
*);
5043 len
= ntohs(h6
->ip6_plen
) -
5044 (off
- sizeof (*h6
));
5049 if (pf_check_proto_cksum(m
, off
, len
, IPPROTO_TCP
, af
))
5050 REASON_SET(&reason
, PFRES_PROTCKSUM
);
5052 if (th
->th_flags
& TH_SYN
)
5054 if (th
->th_flags
& TH_FIN
)
5056 pf_send_tcp(r
, af
, pd
->dst
,
5057 pd
->src
, th
->th_dport
, th
->th_sport
,
5058 ntohl(th
->th_ack
), ack
, TH_RST
|TH_ACK
, 0, 0,
5059 r
->return_ttl
, 1, 0, pd
->eh
, kif
->pfik_ifp
);
5061 } else if (pd
->proto
!= IPPROTO_ICMP
&& af
== AF_INET
&&
5062 #ifndef NO_APPLE_EXTENSIONS
5063 pd
->proto
!= IPPROTO_ESP
&& pd
->proto
!= IPPROTO_AH
&&
5066 pf_send_icmp(m
, r
->return_icmp
>> 8,
5067 r
->return_icmp
& 255, af
, r
);
5068 else if (pd
->proto
!= IPPROTO_ICMPV6
&& af
== AF_INET6
&&
5069 #ifndef NO_APPLE_EXTENSIONS
5070 pd
->proto
!= IPPROTO_ESP
&& pd
->proto
!= IPPROTO_AH
&&
5073 pf_send_icmp(m
, r
->return_icmp6
>> 8,
5074 r
->return_icmp6
& 255, af
, r
);
5077 if (r
->action
== PF_DROP
)
5080 if (pf_tag_packet(m
, pd
->pf_mtag
, tag
, rtableid
)) {
5081 REASON_SET(&reason
, PFRES_MEMORY
);
5085 if (!state_icmp
&& (r
->keep_state
|| nr
!= NULL
||
5086 (pd
->flags
& PFDESC_TCP_NORM
))) {
5087 /* create new state */
5088 struct pf_state
*s
= NULL
;
5089 struct pf_state_key
*sk
= NULL
;
5090 struct pf_src_node
*sn
= NULL
;
5091 #ifndef NO_APPLE_EXTENSIONS
5092 struct pf_ike_hdr ike
;
5094 if (pd
->proto
== IPPROTO_UDP
) {
5095 struct udphdr
*uh
= pd
->hdr
.udp
;
5096 size_t plen
= m
->m_pkthdr
.len
- off
- sizeof (*uh
);
5098 if (ntohs(uh
->uh_sport
) == PF_IKE_PORT
&&
5099 ntohs(uh
->uh_dport
) == PF_IKE_PORT
&&
5100 plen
>= PF_IKE_PACKET_MINSIZE
) {
5101 if (plen
> PF_IKE_PACKET_MINSIZE
)
5102 plen
= PF_IKE_PACKET_MINSIZE
;
5103 m_copydata(m
, off
+ sizeof (*uh
), plen
, &ike
);
5107 if (nr
!= NULL
&& pd
->proto
== IPPROTO_ESP
&&
5108 direction
== PF_OUT
) {
5109 struct pf_state_key_cmp sk0
;
5110 struct pf_state
*s0
;
5114 * This squelches state creation if the external
5115 * address matches an existing incomplete state with a
5116 * different internal address. Only one 'blocking'
5117 * partial state is allowed for each external address.
5119 memset(&sk0
, 0, sizeof (sk0
));
5121 sk0
.proto
= IPPROTO_ESP
;
5122 PF_ACPY(&sk0
.gwy
.addr
, saddr
, sk0
.af
);
5123 PF_ACPY(&sk0
.ext
.addr
, daddr
, sk0
.af
);
5124 s0
= pf_find_state(kif
, &sk0
, PF_IN
);
5126 if (s0
&& PF_ANEQ(&s0
->state_key
->lan
.addr
,
5134 /* check maximums */
5135 if (r
->max_states
&& (r
->states
>= r
->max_states
)) {
5136 pf_status
.lcounters
[LCNT_STATES
]++;
5137 REASON_SET(&reason
, PFRES_MAXSTATES
);
5140 /* src node for filter rule */
5141 if ((r
->rule_flag
& PFRULE_SRCTRACK
||
5142 r
->rpool
.opts
& PF_POOL_STICKYADDR
) &&
5143 pf_insert_src_node(&sn
, r
, saddr
, af
) != 0) {
5144 REASON_SET(&reason
, PFRES_SRCLIMIT
);
5147 /* src node for translation rule */
5148 if (nr
!= NULL
&& (nr
->rpool
.opts
& PF_POOL_STICKYADDR
) &&
5149 ((direction
== PF_OUT
&&
5150 #ifndef NO_APPLE_EXTENSIONS
5151 nr
->action
!= PF_RDR
&&
5153 pf_insert_src_node(&nsn
, nr
, &pd
->baddr
, af
) != 0) ||
5154 (pf_insert_src_node(&nsn
, nr
, saddr
, af
) != 0))) {
5155 REASON_SET(&reason
, PFRES_SRCLIMIT
);
5158 s
= pool_get(&pf_state_pl
, PR_WAITOK
);
5160 REASON_SET(&reason
, PFRES_MEMORY
);
5162 if (sn
!= NULL
&& sn
->states
== 0 && sn
->expire
== 0) {
5163 RB_REMOVE(pf_src_tree
, &tree_src_tracking
, sn
);
5164 pf_status
.scounters
[SCNT_SRC_NODE_REMOVALS
]++;
5165 pf_status
.src_nodes
--;
5166 pool_put(&pf_src_tree_pl
, sn
);
5168 if (nsn
!= sn
&& nsn
!= NULL
&& nsn
->states
== 0 &&
5170 RB_REMOVE(pf_src_tree
, &tree_src_tracking
, nsn
);
5171 pf_status
.scounters
[SCNT_SRC_NODE_REMOVALS
]++;
5172 pf_status
.src_nodes
--;
5173 pool_put(&pf_src_tree_pl
, nsn
);
5176 #ifndef NO_APPLE_EXTENSIONS
5178 pool_put(&pf_app_state_pl
,
5181 pool_put(&pf_state_key_pl
, sk
);
5185 bzero(s
, sizeof (*s
));
5186 #ifndef NO_APPLE_EXTENSIONS
5187 TAILQ_INIT(&s
->unlink_hooks
);
5190 s
->nat_rule
.ptr
= nr
;
5192 STATE_INC_COUNTERS(s
);
5193 s
->allow_opts
= r
->allow_opts
;
5194 s
->log
= r
->log
& PF_LOG_ALL
;
5196 s
->log
|= nr
->log
& PF_LOG_ALL
;
5197 switch (pd
->proto
) {
5199 s
->src
.seqlo
= ntohl(th
->th_seq
);
5200 s
->src
.seqhi
= s
->src
.seqlo
+ pd
->p_len
+ 1;
5201 if ((th
->th_flags
& (TH_SYN
|TH_ACK
)) ==
5202 TH_SYN
&& r
->keep_state
== PF_STATE_MODULATE
) {
5203 /* Generate sequence number modulator */
5204 if ((s
->src
.seqdiff
= pf_tcp_iss(pd
) -
5207 pf_change_a(&th
->th_seq
, &th
->th_sum
,
5208 htonl(s
->src
.seqlo
+ s
->src
.seqdiff
), 0);
5209 rewrite
= off
+ sizeof (*th
);
5212 if (th
->th_flags
& TH_SYN
) {
5214 s
->src
.wscale
= pf_get_wscale(m
, off
,
5217 s
->src
.max_win
= MAX(ntohs(th
->th_win
), 1);
5218 if (s
->src
.wscale
& PF_WSCALE_MASK
) {
5219 /* Remove scale factor from initial window */
5220 int win
= s
->src
.max_win
;
5221 win
+= 1 << (s
->src
.wscale
& PF_WSCALE_MASK
);
5222 s
->src
.max_win
= (win
- 1) >>
5223 (s
->src
.wscale
& PF_WSCALE_MASK
);
5225 if (th
->th_flags
& TH_FIN
)
5229 s
->src
.state
= TCPS_SYN_SENT
;
5230 s
->dst
.state
= TCPS_CLOSED
;
5231 s
->timeout
= PFTM_TCP_FIRST_PACKET
;
5234 s
->src
.state
= PFUDPS_SINGLE
;
5235 s
->dst
.state
= PFUDPS_NO_TRAFFIC
;
5236 s
->timeout
= PFTM_UDP_FIRST_PACKET
;
5240 case IPPROTO_ICMPV6
:
5242 s
->timeout
= PFTM_ICMP_FIRST_PACKET
;
5244 #ifndef NO_APPLE_EXTENSIONS
5246 s
->src
.state
= PFGRE1S_INITIATING
;
5247 s
->dst
.state
= PFGRE1S_NO_TRAFFIC
;
5248 s
->timeout
= PFTM_GREv1_INITIATING
;
5251 s
->src
.state
= PFESPS_INITIATING
;
5252 s
->dst
.state
= PFESPS_NO_TRAFFIC
;
5253 s
->timeout
= PFTM_ESP_FIRST_PACKET
;
5257 s
->src
.state
= PFOTHERS_SINGLE
;
5258 s
->dst
.state
= PFOTHERS_NO_TRAFFIC
;
5259 s
->timeout
= PFTM_OTHER_FIRST_PACKET
;
5262 s
->creation
= pf_time_second();
5263 s
->expire
= pf_time_second();
5267 s
->src_node
->states
++;
5268 VERIFY(s
->src_node
->states
!= 0);
5271 PF_ACPY(&nsn
->raddr
, &pd
->naddr
, af
);
5272 s
->nat_src_node
= nsn
;
5273 s
->nat_src_node
->states
++;
5274 VERIFY(s
->nat_src_node
->states
!= 0);
5276 if (pd
->proto
== IPPROTO_TCP
) {
5277 if ((pd
->flags
& PFDESC_TCP_NORM
) &&
5278 pf_normalize_tcp_init(m
, off
, pd
, th
, &s
->src
,
5280 REASON_SET(&reason
, PFRES_MEMORY
);
5281 pf_src_tree_remove_state(s
);
5282 STATE_DEC_COUNTERS(s
);
5283 pool_put(&pf_state_pl
, s
);
5286 if ((pd
->flags
& PFDESC_TCP_NORM
) && s
->src
.scrub
&&
5287 pf_normalize_tcp_stateful(m
, off
, pd
, &reason
,
5288 th
, s
, &s
->src
, &s
->dst
, &rewrite
)) {
5289 /* This really shouldn't happen!!! */
5290 DPFPRINTF(PF_DEBUG_URGENT
,
5291 ("pf_normalize_tcp_stateful failed on "
5293 pf_normalize_tcp_cleanup(s
);
5294 pf_src_tree_remove_state(s
);
5295 STATE_DEC_COUNTERS(s
);
5296 pool_put(&pf_state_pl
, s
);
5301 if ((sk
= pf_alloc_state_key(s
)) == NULL
) {
5302 REASON_SET(&reason
, PFRES_MEMORY
);
5306 sk
->proto
= pd
->proto
;
5307 sk
->direction
= direction
;
5309 #ifndef NO_APPLE_EXTENSIONS
5310 if (pd
->proto
== IPPROTO_UDP
) {
5311 if (ntohs(pd
->hdr
.udp
->uh_sport
) == PF_IKE_PORT
&&
5312 ntohs(pd
->hdr
.udp
->uh_dport
) == PF_IKE_PORT
) {
5313 sk
->proto_variant
= PF_EXTFILTER_APD
;
5315 sk
->proto_variant
= nr
? nr
->extfilter
:
5317 if (sk
->proto_variant
< PF_EXTFILTER_APD
)
5318 sk
->proto_variant
= PF_EXTFILTER_APD
;
5320 } else if (pd
->proto
== IPPROTO_GRE
) {
5321 sk
->proto_variant
= pd
->proto_variant
;
5324 if (direction
== PF_OUT
) {
5325 PF_ACPY(&sk
->gwy
.addr
, saddr
, af
);
5326 PF_ACPY(&sk
->ext
.addr
, daddr
, af
);
5327 switch (pd
->proto
) {
5328 #ifndef NO_APPLE_EXTENSIONS
5330 sk
->gwy
.xport
= sxport
;
5331 sk
->ext
.xport
= dxport
;
5334 sk
->gwy
.xport
.spi
= 0;
5335 sk
->ext
.xport
.spi
= pd
->hdr
.esp
->spi
;
5340 case IPPROTO_ICMPV6
:
5342 #ifndef NO_APPLE_EXTENSIONS
5343 sk
->gwy
.xport
.port
= nxport
.port
;
5344 sk
->ext
.xport
.spi
= 0;
5346 sk
->gwy
.port
= nport
;
5351 #ifndef NO_APPLE_EXTENSIONS
5352 sk
->gwy
.xport
= sxport
;
5353 sk
->ext
.xport
= dxport
;
5356 sk
->gwy
.port
= sport
;
5357 sk
->ext
.port
= dport
;
5360 #ifndef NO_APPLE_EXTENSIONS
5362 PF_ACPY(&sk
->lan
.addr
, &pd
->baddr
, af
);
5363 sk
->lan
.xport
= bxport
;
5365 PF_ACPY(&sk
->lan
.addr
, &sk
->gwy
.addr
, af
);
5366 sk
->lan
.xport
= sk
->gwy
.xport
;
5370 PF_ACPY(&sk
->lan
.addr
, &pd
->baddr
, af
);
5371 sk
->lan
.port
= bport
;
5373 PF_ACPY(&sk
->lan
.addr
, &sk
->gwy
.addr
, af
);
5374 sk
->lan
.port
= sk
->gwy
.port
;
5378 PF_ACPY(&sk
->lan
.addr
, daddr
, af
);
5379 PF_ACPY(&sk
->ext
.addr
, saddr
, af
);
5380 switch (pd
->proto
) {
5383 case IPPROTO_ICMPV6
:
5385 #ifndef NO_APPLE_EXTENSIONS
5386 sk
->lan
.xport
= nxport
;
5387 sk
->ext
.xport
.spi
= 0;
5389 sk
->lan
.port
= nport
;
5393 #ifndef NO_APPLE_EXTENSIONS
5395 sk
->ext
.xport
.spi
= 0;
5396 sk
->lan
.xport
.spi
= pd
->hdr
.esp
->spi
;
5399 sk
->lan
.xport
= dxport
;
5400 sk
->ext
.xport
= sxport
;
5404 sk
->lan
.port
= dport
;
5405 sk
->ext
.port
= sport
;
5408 #ifndef NO_APPLE_EXTENSIONS
5410 PF_ACPY(&sk
->gwy
.addr
, &pd
->baddr
, af
);
5411 sk
->gwy
.xport
= bxport
;
5413 PF_ACPY(&sk
->gwy
.addr
, &sk
->lan
.addr
, af
);
5414 sk
->gwy
.xport
= sk
->lan
.xport
;
5419 PF_ACPY(&sk
->gwy
.addr
, &pd
->baddr
, af
);
5420 sk
->gwy
.port
= bport
;
5422 PF_ACPY(&sk
->gwy
.addr
, &sk
->lan
.addr
, af
);
5423 sk
->gwy
.port
= sk
->lan
.port
;
5428 pf_set_rt_ifp(s
, saddr
); /* needs s->state_key set */
5430 #ifndef NO_APPLE_EXTENSIONS
5433 if (sk
->app_state
== 0) {
5434 switch (pd
->proto
) {
5436 u_int16_t dport
= (direction
== PF_OUT
) ?
5437 sk
->ext
.xport
.port
: sk
->gwy
.xport
.port
;
5440 ntohs(dport
) == PF_PPTP_PORT
) {
5441 struct pf_app_state
*as
;
5443 as
= pool_get(&pf_app_state_pl
,
5451 bzero(as
, sizeof (*as
));
5452 as
->handler
= pf_pptp_handler
;
5453 as
->compare_lan_ext
= 0;
5454 as
->compare_ext_gwy
= 0;
5455 as
->u
.pptp
.grev1_state
= 0;
5457 (void) hook_establish(&s
->unlink_hooks
,
5458 0, (hook_fn_t
) pf_pptp_unlink
, s
);
5464 struct udphdr
*uh
= pd
->hdr
.udp
;
5467 ntohs(uh
->uh_sport
) == PF_IKE_PORT
&&
5468 ntohs(uh
->uh_dport
) == PF_IKE_PORT
) {
5469 struct pf_app_state
*as
;
5471 as
= pool_get(&pf_app_state_pl
,
5479 bzero(as
, sizeof (*as
));
5480 as
->compare_lan_ext
= pf_ike_compare
;
5481 as
->compare_ext_gwy
= pf_ike_compare
;
5482 as
->u
.ike
.cookie
= ike
.initiator_cookie
;
5494 if (pf_insert_state(BOUND_IFACE(r
, kif
), s
)) {
5495 if (pd
->proto
== IPPROTO_TCP
)
5496 pf_normalize_tcp_cleanup(s
);
5497 REASON_SET(&reason
, PFRES_STATEINS
);
5498 pf_src_tree_remove_state(s
);
5499 STATE_DEC_COUNTERS(s
);
5500 pool_put(&pf_state_pl
, s
);
5508 if (pd
->proto
== IPPROTO_TCP
&&
5509 (th
->th_flags
& (TH_SYN
|TH_ACK
)) == TH_SYN
&&
5510 r
->keep_state
== PF_STATE_SYNPROXY
) {
5511 s
->src
.state
= PF_TCPS_PROXY_SRC
;
5513 #ifndef NO_APPLE_EXTENSIONS
5514 if (direction
== PF_OUT
) {
5515 pf_change_ap(direction
, pd
->mp
, saddr
,
5516 &th
->th_sport
, pd
->ip_sum
,
5517 &th
->th_sum
, &pd
->baddr
,
5518 bxport
.port
, 0, af
);
5519 sxport
.port
= th
->th_sport
;
5521 pf_change_ap(direction
, pd
->mp
, daddr
,
5522 &th
->th_dport
, pd
->ip_sum
,
5523 &th
->th_sum
, &pd
->baddr
,
5524 bxport
.port
, 0, af
);
5525 sxport
.port
= th
->th_dport
;
5528 if (direction
== PF_OUT
) {
5529 pf_change_ap(saddr
, &th
->th_sport
,
5530 pd
->ip_sum
, &th
->th_sum
, &pd
->baddr
,
5532 sport
= th
->th_sport
;
5534 pf_change_ap(daddr
, &th
->th_dport
,
5535 pd
->ip_sum
, &th
->th_sum
, &pd
->baddr
,
5537 sport
= th
->th_dport
;
5541 s
->src
.seqhi
= htonl(random());
5542 /* Find mss option */
5543 mss
= pf_get_mss(m
, off
, th
->th_off
, af
);
5544 mss
= pf_calc_mss(saddr
, af
, mss
);
5545 mss
= pf_calc_mss(daddr
, af
, mss
);
5547 pf_send_tcp(r
, af
, daddr
, saddr
, th
->th_dport
,
5548 th
->th_sport
, s
->src
.seqhi
, ntohl(th
->th_seq
) + 1,
5549 TH_SYN
|TH_ACK
, 0, s
->src
.mss
, 0, 1, 0, NULL
, NULL
);
5550 REASON_SET(&reason
, PFRES_SYNPROXY
);
5551 return (PF_SYNPROXY_DROP
);
5554 #ifndef NO_APPLE_EXTENSIONS
5555 if (sk
->app_state
&& sk
->app_state
->handler
) {
5558 switch (pd
->proto
) {
5560 offx
+= th
->th_off
<< 2;
5563 offx
+= pd
->hdr
.udp
->uh_ulen
<< 2;
5566 /* ALG handlers only apply to TCP and UDP rules */
5571 sk
->app_state
->handler(s
, direction
, offx
,
5574 REASON_SET(&reason
, PFRES_MEMORY
);
5583 /* copy back packet headers if we performed NAT operations */
5584 #ifndef NO_APPLE_EXTENSIONS
5586 if (rewrite
< off
+ hdrlen
)
5587 rewrite
= off
+ hdrlen
;
5589 m
= pf_lazy_makewritable(pd
, pd
->mp
, rewrite
);
5591 REASON_SET(&reason
, PFRES_MEMORY
);
5595 m_copyback(m
, off
, hdrlen
, pd
->hdr
.any
);
5599 m_copyback(m
, off
, hdrlen
, pd
->hdr
.any
);
5606 pf_test_fragment(struct pf_rule
**rm
, int direction
, struct pfi_kif
*kif
,
5607 struct mbuf
*m
, void *h
, struct pf_pdesc
*pd
, struct pf_rule
**am
,
5608 struct pf_ruleset
**rsm
)
5611 struct pf_rule
*r
, *a
= NULL
;
5612 struct pf_ruleset
*ruleset
= NULL
;
5613 sa_family_t af
= pd
->af
;
5619 r
= TAILQ_FIRST(pf_main_ruleset
.rules
[PF_RULESET_FILTER
].active
.ptr
);
5622 if (pfi_kif_match(r
->kif
, kif
) == r
->ifnot
)
5623 r
= r
->skip
[PF_SKIP_IFP
].ptr
;
5624 else if (r
->direction
&& r
->direction
!= direction
)
5625 r
= r
->skip
[PF_SKIP_DIR
].ptr
;
5626 else if (r
->af
&& r
->af
!= af
)
5627 r
= r
->skip
[PF_SKIP_AF
].ptr
;
5628 else if (r
->proto
&& r
->proto
!= pd
->proto
)
5629 r
= r
->skip
[PF_SKIP_PROTO
].ptr
;
5630 else if (PF_MISMATCHAW(&r
->src
.addr
, pd
->src
, af
,
5632 r
= r
->skip
[PF_SKIP_SRC_ADDR
].ptr
;
5633 else if (PF_MISMATCHAW(&r
->dst
.addr
, pd
->dst
, af
,
5635 r
= r
->skip
[PF_SKIP_DST_ADDR
].ptr
;
5636 else if (r
->tos
&& !(r
->tos
== pd
->tos
))
5637 r
= TAILQ_NEXT(r
, entries
);
5638 else if (r
->os_fingerprint
!= PF_OSFP_ANY
)
5639 r
= TAILQ_NEXT(r
, entries
);
5640 #ifndef NO_APPLE_EXTENSIONS
5641 else if (pd
->proto
== IPPROTO_UDP
&&
5642 (r
->src
.xport
.range
.op
|| r
->dst
.xport
.range
.op
))
5643 r
= TAILQ_NEXT(r
, entries
);
5644 else if (pd
->proto
== IPPROTO_TCP
&&
5645 (r
->src
.xport
.range
.op
|| r
->dst
.xport
.range
.op
||
5647 r
= TAILQ_NEXT(r
, entries
);
5649 else if (pd
->proto
== IPPROTO_UDP
&&
5650 (r
->src
.port_op
|| r
->dst
.port_op
))
5651 r
= TAILQ_NEXT(r
, entries
);
5652 else if (pd
->proto
== IPPROTO_TCP
&&
5653 (r
->src
.port_op
|| r
->dst
.port_op
|| r
->flagset
))
5654 r
= TAILQ_NEXT(r
, entries
);
5656 else if ((pd
->proto
== IPPROTO_ICMP
||
5657 pd
->proto
== IPPROTO_ICMPV6
) &&
5658 (r
->type
|| r
->code
))
5659 r
= TAILQ_NEXT(r
, entries
);
5660 else if (r
->prob
&& r
->prob
<= (random() % (UINT_MAX
- 1) + 1))
5661 r
= TAILQ_NEXT(r
, entries
);
5662 else if (r
->match_tag
&& !pf_match_tag(m
, r
, pd
->pf_mtag
, &tag
))
5663 r
= TAILQ_NEXT(r
, entries
);
5665 if (r
->anchor
== NULL
) {
5672 r
= TAILQ_NEXT(r
, entries
);
5674 pf_step_into_anchor(&asd
, &ruleset
,
5675 PF_RULESET_FILTER
, &r
, &a
, &match
);
5677 if (r
== NULL
&& pf_step_out_of_anchor(&asd
, &ruleset
,
5678 PF_RULESET_FILTER
, &r
, &a
, &match
))
5685 REASON_SET(&reason
, PFRES_MATCH
);
5688 PFLOG_PACKET(kif
, h
, m
, af
, direction
, reason
, r
, a
, ruleset
,
5691 if (r
->action
!= PF_PASS
)
5694 if (pf_tag_packet(m
, pd
->pf_mtag
, tag
, -1)) {
5695 REASON_SET(&reason
, PFRES_MEMORY
);
5702 #ifndef NO_APPLE_EXTENSIONS
5704 pf_pptp_handler(struct pf_state
*s
, int direction
, int off
,
5705 struct pf_pdesc
*pd
, struct pfi_kif
*kif
)
5707 #pragma unused(direction)
5709 struct pf_pptp_state
*pptps
;
5710 struct pf_pptp_ctrl_msg cm
;
5712 struct pf_state
*gs
;
5714 u_int16_t
*pac_call_id
;
5715 u_int16_t
*pns_call_id
;
5716 u_int16_t
*spoof_call_id
;
5717 u_int8_t
*pac_state
;
5718 u_int8_t
*pns_state
;
5719 enum { PF_PPTP_PASS
, PF_PPTP_INSERT_GRE
, PF_PPTP_REMOVE_GRE
} op
;
5721 struct pf_state_key
*sk
;
5722 struct pf_state_key
*gsk
;
5723 struct pf_app_state
*gas
;
5726 pptps
= &sk
->app_state
->u
.pptp
;
5727 gs
= pptps
->grev1_state
;
5730 gs
->expire
= pf_time_second();
5733 plen
= min(sizeof (cm
), m
->m_pkthdr
.len
- off
);
5734 if (plen
< PF_PPTP_CTRL_MSG_MINSIZE
)
5737 m_copydata(m
, off
, plen
, &cm
);
5739 if (ntohl(cm
.hdr
.magic
) != PF_PPTP_MAGIC_NUMBER
)
5741 if (ntohs(cm
.hdr
.type
) != 1)
5745 gs
= pool_get(&pf_state_pl
, PR_WAITOK
);
5749 memcpy(gs
, s
, sizeof (*gs
));
5751 memset(&gs
->entry_id
, 0, sizeof (gs
->entry_id
));
5752 memset(&gs
->entry_list
, 0, sizeof (gs
->entry_list
));
5754 TAILQ_INIT(&gs
->unlink_hooks
);
5757 gs
->pfsync_time
= 0;
5758 gs
->packets
[0] = gs
->packets
[1] = 0;
5759 gs
->bytes
[0] = gs
->bytes
[1] = 0;
5760 gs
->timeout
= PFTM_UNLINKED
;
5761 gs
->id
= gs
->creatorid
= 0;
5762 gs
->src
.state
= gs
->dst
.state
= PFGRE1S_NO_TRAFFIC
;
5763 gs
->src
.scrub
= gs
->dst
.scrub
= 0;
5765 gas
= pool_get(&pf_app_state_pl
, PR_NOWAIT
);
5767 pool_put(&pf_state_pl
, gs
);
5771 gsk
= pf_alloc_state_key(gs
);
5773 pool_put(&pf_app_state_pl
, gas
);
5774 pool_put(&pf_state_pl
, gs
);
5778 memcpy(&gsk
->lan
, &sk
->lan
, sizeof (gsk
->lan
));
5779 memcpy(&gsk
->gwy
, &sk
->gwy
, sizeof (gsk
->gwy
));
5780 memcpy(&gsk
->ext
, &sk
->ext
, sizeof (gsk
->ext
));
5782 gsk
->proto
= IPPROTO_GRE
;
5783 gsk
->proto_variant
= PF_GRE_PPTP_VARIANT
;
5784 gsk
->app_state
= gas
;
5785 gsk
->lan
.xport
.call_id
= 0;
5786 gsk
->gwy
.xport
.call_id
= 0;
5787 gsk
->ext
.xport
.call_id
= 0;
5788 memset(gas
, 0, sizeof (*gas
));
5789 gas
->u
.grev1
.pptp_state
= s
;
5790 STATE_INC_COUNTERS(gs
);
5791 pptps
->grev1_state
= gs
;
5792 (void) hook_establish(&gs
->unlink_hooks
, 0,
5793 (hook_fn_t
) pf_grev1_unlink
, gs
);
5795 gsk
= gs
->state_key
;
5798 switch (sk
->direction
) {
5800 pns_call_id
= &gsk
->ext
.xport
.call_id
;
5801 pns_state
= &gs
->dst
.state
;
5802 pac_call_id
= &gsk
->lan
.xport
.call_id
;
5803 pac_state
= &gs
->src
.state
;
5807 pns_call_id
= &gsk
->lan
.xport
.call_id
;
5808 pns_state
= &gs
->src
.state
;
5809 pac_call_id
= &gsk
->ext
.xport
.call_id
;
5810 pac_state
= &gs
->dst
.state
;
5814 DPFPRINTF(PF_DEBUG_URGENT
,
5815 ("pf_pptp_handler: bad directional!\n"));
5822 ct
= ntohs(cm
.ctrl
.type
);
5825 case PF_PPTP_CTRL_TYPE_CALL_OUT_REQ
:
5826 *pns_call_id
= cm
.msg
.call_out_req
.call_id
;
5827 *pns_state
= PFGRE1S_INITIATING
;
5828 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
)
5829 spoof_call_id
= &cm
.msg
.call_out_req
.call_id
;
5832 case PF_PPTP_CTRL_TYPE_CALL_OUT_RPY
:
5833 *pac_call_id
= cm
.msg
.call_out_rpy
.call_id
;
5834 if (s
->nat_rule
.ptr
)
5836 (pac_call_id
== &gsk
->lan
.xport
.call_id
) ?
5837 &cm
.msg
.call_out_rpy
.call_id
:
5838 &cm
.msg
.call_out_rpy
.peer_call_id
;
5839 if (gs
->timeout
== PFTM_UNLINKED
) {
5840 *pac_state
= PFGRE1S_INITIATING
;
5841 op
= PF_PPTP_INSERT_GRE
;
5845 case PF_PPTP_CTRL_TYPE_CALL_IN_1ST
:
5846 *pns_call_id
= cm
.msg
.call_in_1st
.call_id
;
5847 *pns_state
= PFGRE1S_INITIATING
;
5848 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
)
5849 spoof_call_id
= &cm
.msg
.call_in_1st
.call_id
;
5852 case PF_PPTP_CTRL_TYPE_CALL_IN_2ND
:
5853 *pac_call_id
= cm
.msg
.call_in_2nd
.call_id
;
5854 *pac_state
= PFGRE1S_INITIATING
;
5855 if (s
->nat_rule
.ptr
)
5857 (pac_call_id
== &gsk
->lan
.xport
.call_id
) ?
5858 &cm
.msg
.call_in_2nd
.call_id
:
5859 &cm
.msg
.call_in_2nd
.peer_call_id
;
5862 case PF_PPTP_CTRL_TYPE_CALL_IN_3RD
:
5863 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
)
5864 spoof_call_id
= &cm
.msg
.call_in_3rd
.call_id
;
5865 if (cm
.msg
.call_in_3rd
.call_id
!= *pns_call_id
) {
5868 if (gs
->timeout
== PFTM_UNLINKED
)
5869 op
= PF_PPTP_INSERT_GRE
;
5872 case PF_PPTP_CTRL_TYPE_CALL_CLR
:
5873 if (cm
.msg
.call_clr
.call_id
!= *pns_call_id
)
5874 op
= PF_PPTP_REMOVE_GRE
;
5877 case PF_PPTP_CTRL_TYPE_CALL_DISC
:
5878 if (cm
.msg
.call_clr
.call_id
!= *pac_call_id
)
5879 op
= PF_PPTP_REMOVE_GRE
;
5882 case PF_PPTP_CTRL_TYPE_ERROR
:
5883 if (s
->nat_rule
.ptr
&& pns_call_id
== &gsk
->lan
.xport
.call_id
)
5884 spoof_call_id
= &cm
.msg
.error
.peer_call_id
;
5887 case PF_PPTP_CTRL_TYPE_SET_LINKINFO
:
5888 if (s
->nat_rule
.ptr
&& pac_call_id
== &gsk
->lan
.xport
.call_id
)
5889 spoof_call_id
= &cm
.msg
.set_linkinfo
.peer_call_id
;
5897 if (!gsk
->gwy
.xport
.call_id
&& gsk
->lan
.xport
.call_id
) {
5898 gsk
->gwy
.xport
.call_id
= gsk
->lan
.xport
.call_id
;
5899 if (spoof_call_id
) {
5900 u_int16_t call_id
= 0;
5902 struct pf_state_key_cmp key
;
5905 key
.proto
= IPPROTO_GRE
;
5906 key
.proto_variant
= PF_GRE_PPTP_VARIANT
;
5907 PF_ACPY(&key
.gwy
.addr
, &gsk
->gwy
.addr
, key
.af
);
5908 PF_ACPY(&key
.ext
.addr
, &gsk
->ext
.addr
, key
.af
);
5909 key
.gwy
.xport
.call_id
= gsk
->gwy
.xport
.call_id
;
5910 key
.ext
.xport
.call_id
= gsk
->ext
.xport
.call_id
;
5912 call_id
= htonl(random());
5915 while (pf_find_state_all(&key
, PF_IN
, 0)) {
5916 call_id
= ntohs(call_id
);
5918 if (--call_id
== 0) call_id
= 0xffff;
5919 call_id
= htons(call_id
);
5921 key
.gwy
.xport
.call_id
= call_id
;
5924 DPFPRINTF(PF_DEBUG_URGENT
,
5925 ("pf_pptp_handler: failed to spoof "
5927 key
.gwy
.xport
.call_id
= 0;
5932 gsk
->gwy
.xport
.call_id
= call_id
;
5938 if (spoof_call_id
&& gsk
->lan
.xport
.call_id
!= gsk
->gwy
.xport
.call_id
) {
5939 if (*spoof_call_id
== gsk
->gwy
.xport
.call_id
) {
5940 *spoof_call_id
= gsk
->lan
.xport
.call_id
;
5941 th
->th_sum
= pf_cksum_fixup(th
->th_sum
,
5942 gsk
->gwy
.xport
.call_id
, gsk
->lan
.xport
.call_id
, 0);
5944 *spoof_call_id
= gsk
->gwy
.xport
.call_id
;
5945 th
->th_sum
= pf_cksum_fixup(th
->th_sum
,
5946 gsk
->lan
.xport
.call_id
, gsk
->gwy
.xport
.call_id
, 0);
5949 m
= pf_lazy_makewritable(pd
, m
, off
+ plen
);
5951 pptps
->grev1_state
= NULL
;
5952 STATE_DEC_COUNTERS(gs
);
5953 pool_put(&pf_state_pl
, gs
);
5956 m_copyback(m
, off
, plen
, &cm
);
5960 case PF_PPTP_REMOVE_GRE
:
5961 gs
->timeout
= PFTM_PURGE
;
5962 gs
->src
.state
= gs
->dst
.state
= PFGRE1S_NO_TRAFFIC
;
5963 gsk
->lan
.xport
.call_id
= 0;
5964 gsk
->gwy
.xport
.call_id
= 0;
5965 gsk
->ext
.xport
.call_id
= 0;
5966 gs
->id
= gs
->creatorid
= 0;
5969 case PF_PPTP_INSERT_GRE
:
5970 gs
->creation
= pf_time_second();
5971 gs
->expire
= pf_time_second();
5972 gs
->timeout
= PFTM_TCP_ESTABLISHED
;
5973 if (gs
->src_node
!= NULL
) {
5974 ++gs
->src_node
->states
;
5975 VERIFY(gs
->src_node
->states
!= 0);
5977 if (gs
->nat_src_node
!= NULL
) {
5978 ++gs
->nat_src_node
->states
;
5979 VERIFY(gs
->nat_src_node
->states
!= 0);
5981 pf_set_rt_ifp(gs
, &sk
->lan
.addr
);
5982 if (pf_insert_state(BOUND_IFACE(s
->rule
.ptr
, kif
), gs
)) {
5986 * FIX ME: insertion can fail when multiple PNS
5987 * behind the same NAT open calls to the same PAC
5988 * simultaneously because spoofed call ID numbers
5989 * are chosen before states are inserted. This is
5990 * hard to fix and happens infrequently enough that
5991 * users will normally try again and this ALG will
5992 * succeed. Failures are expected to be rare enough
5993 * that fixing this is a low priority.
5995 pptps
->grev1_state
= NULL
;
5996 pd
->lmw
= -1; /* Force PF_DROP on PFRES_MEMORY */
5997 pf_src_tree_remove_state(gs
);
5998 STATE_DEC_COUNTERS(gs
);
5999 pool_put(&pf_state_pl
, gs
);
6000 DPFPRINTF(PF_DEBUG_URGENT
, ("pf_pptp_handler: error "
6001 "inserting GREv1 state.\n"));
6011 pf_pptp_unlink(struct pf_state
*s
)
6013 struct pf_app_state
*as
= s
->state_key
->app_state
;
6014 struct pf_state
*grev1s
= as
->u
.pptp
.grev1_state
;
6017 struct pf_app_state
*gas
= grev1s
->state_key
->app_state
;
6019 if (grev1s
->timeout
< PFTM_MAX
)
6020 grev1s
->timeout
= PFTM_PURGE
;
6021 gas
->u
.grev1
.pptp_state
= NULL
;
6022 as
->u
.pptp
.grev1_state
= NULL
;
6027 pf_grev1_unlink(struct pf_state
*s
)
6029 struct pf_app_state
*as
= s
->state_key
->app_state
;
6030 struct pf_state
*pptps
= as
->u
.grev1
.pptp_state
;
6033 struct pf_app_state
*pas
= pptps
->state_key
->app_state
;
6035 pas
->u
.pptp
.grev1_state
= NULL
;
6036 as
->u
.grev1
.pptp_state
= NULL
;
6041 pf_ike_compare(struct pf_app_state
*a
, struct pf_app_state
*b
)
6043 int64_t d
= a
->u
.ike
.cookie
- b
->u
.ike
.cookie
;
6044 return ((d
> 0) ? 1 : ((d
< 0) ? -1 : 0));
6049 pf_test_state_tcp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
6050 struct mbuf
*m
, int off
, void *h
, struct pf_pdesc
*pd
,
6054 struct pf_state_key_cmp key
;
6055 struct tcphdr
*th
= pd
->hdr
.tcp
;
6056 u_int16_t win
= ntohs(th
->th_win
);
6057 u_int32_t ack
, end
, seq
, orig_seq
;
6061 struct pf_state_peer
*src
, *dst
;
6063 #ifndef NO_APPLE_EXTENSIONS
6067 key
.proto
= IPPROTO_TCP
;
6068 if (direction
== PF_IN
) {
6069 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
6070 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
6071 #ifndef NO_APPLE_EXTENSIONS
6072 key
.ext
.xport
.port
= th
->th_sport
;
6073 key
.gwy
.xport
.port
= th
->th_dport
;
6075 key
.ext
.port
= th
->th_sport
;
6076 key
.gwy
.port
= th
->th_dport
;
6079 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
6080 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
6081 #ifndef NO_APPLE_EXTENSIONS
6082 key
.lan
.xport
.port
= th
->th_sport
;
6083 key
.ext
.xport
.port
= th
->th_dport
;
6085 key
.lan
.port
= th
->th_sport
;
6086 key
.ext
.port
= th
->th_dport
;
6092 if (direction
== (*state
)->state_key
->direction
) {
6093 src
= &(*state
)->src
;
6094 dst
= &(*state
)->dst
;
6096 src
= &(*state
)->dst
;
6097 dst
= &(*state
)->src
;
6100 if ((*state
)->src
.state
== PF_TCPS_PROXY_SRC
) {
6101 if (direction
!= (*state
)->state_key
->direction
) {
6102 REASON_SET(reason
, PFRES_SYNPROXY
);
6103 return (PF_SYNPROXY_DROP
);
6105 if (th
->th_flags
& TH_SYN
) {
6106 if (ntohl(th
->th_seq
) != (*state
)->src
.seqlo
) {
6107 REASON_SET(reason
, PFRES_SYNPROXY
);
6110 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, pd
->dst
,
6111 pd
->src
, th
->th_dport
, th
->th_sport
,
6112 (*state
)->src
.seqhi
, ntohl(th
->th_seq
) + 1,
6113 TH_SYN
|TH_ACK
, 0, (*state
)->src
.mss
, 0, 1,
6115 REASON_SET(reason
, PFRES_SYNPROXY
);
6116 return (PF_SYNPROXY_DROP
);
6117 } else if (!(th
->th_flags
& TH_ACK
) ||
6118 (ntohl(th
->th_ack
) != (*state
)->src
.seqhi
+ 1) ||
6119 (ntohl(th
->th_seq
) != (*state
)->src
.seqlo
+ 1)) {
6120 REASON_SET(reason
, PFRES_SYNPROXY
);
6122 } else if ((*state
)->src_node
!= NULL
&&
6123 pf_src_connlimit(state
)) {
6124 REASON_SET(reason
, PFRES_SRCLIMIT
);
6127 (*state
)->src
.state
= PF_TCPS_PROXY_DST
;
6129 if ((*state
)->src
.state
== PF_TCPS_PROXY_DST
) {
6130 struct pf_state_host
*psrc
, *pdst
;
6132 if (direction
== PF_OUT
) {
6133 psrc
= &(*state
)->state_key
->gwy
;
6134 pdst
= &(*state
)->state_key
->ext
;
6136 psrc
= &(*state
)->state_key
->ext
;
6137 pdst
= &(*state
)->state_key
->lan
;
6139 if (direction
== (*state
)->state_key
->direction
) {
6140 if (((th
->th_flags
& (TH_SYN
|TH_ACK
)) != TH_ACK
) ||
6141 (ntohl(th
->th_ack
) != (*state
)->src
.seqhi
+ 1) ||
6142 (ntohl(th
->th_seq
) != (*state
)->src
.seqlo
+ 1)) {
6143 REASON_SET(reason
, PFRES_SYNPROXY
);
6146 (*state
)->src
.max_win
= MAX(ntohs(th
->th_win
), 1);
6147 if ((*state
)->dst
.seqhi
== 1)
6148 (*state
)->dst
.seqhi
= htonl(random());
6149 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, &psrc
->addr
,
6150 #ifndef NO_APPLE_EXTENSIONS
6151 &pdst
->addr
, psrc
->xport
.port
, pdst
->xport
.port
,
6153 &pdst
->addr
, psrc
->port
, pdst
->port
,
6155 (*state
)->dst
.seqhi
, 0, TH_SYN
, 0,
6156 (*state
)->src
.mss
, 0, 0, (*state
)->tag
, NULL
, NULL
);
6157 REASON_SET(reason
, PFRES_SYNPROXY
);
6158 return (PF_SYNPROXY_DROP
);
6159 } else if (((th
->th_flags
& (TH_SYN
|TH_ACK
)) !=
6161 (ntohl(th
->th_ack
) != (*state
)->dst
.seqhi
+ 1)) {
6162 REASON_SET(reason
, PFRES_SYNPROXY
);
6165 (*state
)->dst
.max_win
= MAX(ntohs(th
->th_win
), 1);
6166 (*state
)->dst
.seqlo
= ntohl(th
->th_seq
);
6167 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, pd
->dst
,
6168 pd
->src
, th
->th_dport
, th
->th_sport
,
6169 ntohl(th
->th_ack
), ntohl(th
->th_seq
) + 1,
6170 TH_ACK
, (*state
)->src
.max_win
, 0, 0, 0,
6171 (*state
)->tag
, NULL
, NULL
);
6172 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
, &psrc
->addr
,
6173 #ifndef NO_APPLE_EXTENSIONS
6174 &pdst
->addr
, psrc
->xport
.port
, pdst
->xport
.port
,
6176 &pdst
->addr
, psrc
->port
, pdst
->port
,
6178 (*state
)->src
.seqhi
+ 1, (*state
)->src
.seqlo
+ 1,
6179 TH_ACK
, (*state
)->dst
.max_win
, 0, 0, 1,
6181 (*state
)->src
.seqdiff
= (*state
)->dst
.seqhi
-
6182 (*state
)->src
.seqlo
;
6183 (*state
)->dst
.seqdiff
= (*state
)->src
.seqhi
-
6184 (*state
)->dst
.seqlo
;
6185 (*state
)->src
.seqhi
= (*state
)->src
.seqlo
+
6186 (*state
)->dst
.max_win
;
6187 (*state
)->dst
.seqhi
= (*state
)->dst
.seqlo
+
6188 (*state
)->src
.max_win
;
6189 (*state
)->src
.wscale
= (*state
)->dst
.wscale
= 0;
6190 (*state
)->src
.state
= (*state
)->dst
.state
=
6192 REASON_SET(reason
, PFRES_SYNPROXY
);
6193 return (PF_SYNPROXY_DROP
);
6197 if (((th
->th_flags
& (TH_SYN
|TH_ACK
)) == TH_SYN
) &&
6198 dst
->state
>= TCPS_FIN_WAIT_2
&&
6199 src
->state
>= TCPS_FIN_WAIT_2
) {
6200 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
6201 printf("pf: state reuse ");
6202 pf_print_state(*state
);
6203 pf_print_flags(th
->th_flags
);
6206 /* XXX make sure it's the same direction ?? */
6207 (*state
)->src
.state
= (*state
)->dst
.state
= TCPS_CLOSED
;
6208 pf_unlink_state(*state
);
6213 if (src
->wscale
&& dst
->wscale
&& !(th
->th_flags
& TH_SYN
)) {
6214 sws
= src
->wscale
& PF_WSCALE_MASK
;
6215 dws
= dst
->wscale
& PF_WSCALE_MASK
;
6220 * Sequence tracking algorithm from Guido van Rooij's paper:
6221 * http://www.madison-gurkha.com/publications/tcp_filtering/
6225 orig_seq
= seq
= ntohl(th
->th_seq
);
6226 if (src
->seqlo
== 0) {
6227 /* First packet from this end. Set its state */
6229 if ((pd
->flags
& PFDESC_TCP_NORM
|| dst
->scrub
) &&
6230 src
->scrub
== NULL
) {
6231 if (pf_normalize_tcp_init(m
, off
, pd
, th
, src
, dst
)) {
6232 REASON_SET(reason
, PFRES_MEMORY
);
6237 /* Deferred generation of sequence number modulator */
6238 if (dst
->seqdiff
&& !src
->seqdiff
) {
6239 /* use random iss for the TCP server */
6240 while ((src
->seqdiff
= random() - seq
) == 0)
6242 ack
= ntohl(th
->th_ack
) - dst
->seqdiff
;
6243 pf_change_a(&th
->th_seq
, &th
->th_sum
, htonl(seq
+
6245 pf_change_a(&th
->th_ack
, &th
->th_sum
, htonl(ack
), 0);
6246 copyback
= off
+ sizeof (*th
);
6248 ack
= ntohl(th
->th_ack
);
6251 end
= seq
+ pd
->p_len
;
6252 if (th
->th_flags
& TH_SYN
) {
6254 if (dst
->wscale
& PF_WSCALE_FLAG
) {
6255 src
->wscale
= pf_get_wscale(m
, off
, th
->th_off
,
6257 if (src
->wscale
& PF_WSCALE_FLAG
) {
6259 * Remove scale factor from initial
6262 sws
= src
->wscale
& PF_WSCALE_MASK
;
6263 win
= ((u_int32_t
)win
+ (1 << sws
) - 1)
6265 dws
= dst
->wscale
& PF_WSCALE_MASK
;
6267 #ifndef NO_APPLE_MODIFICATION
6271 * Window scale negotiation has failed,
6272 * therefore we must restore the window
6273 * scale in the state record that we
6274 * optimistically removed in
6275 * pf_test_rule(). Care is required to
6276 * prevent arithmetic overflow from
6277 * zeroing the window when it's
6278 * truncated down to 16-bits. --jhw
6280 u_int32_t max_win
= dst
->max_win
;
6282 dst
->wscale
& PF_WSCALE_MASK
;
6283 dst
->max_win
= MIN(0xffff, max_win
);
6285 /* fixup other window */
6286 dst
->max_win
<<= dst
->wscale
&
6289 /* in case of a retrans SYN|ACK */
6294 if (th
->th_flags
& TH_FIN
)
6298 if (src
->state
< TCPS_SYN_SENT
)
6299 src
->state
= TCPS_SYN_SENT
;
6302 * May need to slide the window (seqhi may have been set by
6303 * the crappy stack check or if we picked up the connection
6304 * after establishment)
6306 #ifndef NO_APPLE_MODIFICATIONS
6307 if (src
->seqhi
== 1 ||
6308 SEQ_GEQ(end
+ MAX(1, (u_int32_t
)dst
->max_win
<< dws
),
6310 src
->seqhi
= end
+ MAX(1, (u_int32_t
)dst
->max_win
<< dws
);
6312 if (src
->seqhi
== 1 ||
6313 SEQ_GEQ(end
+ MAX(1, dst
->max_win
<< dws
), src
->seqhi
))
6314 src
->seqhi
= end
+ MAX(1, dst
->max_win
<< dws
);
6316 if (win
> src
->max_win
)
6320 ack
= ntohl(th
->th_ack
) - dst
->seqdiff
;
6322 /* Modulate sequence numbers */
6323 pf_change_a(&th
->th_seq
, &th
->th_sum
, htonl(seq
+
6325 pf_change_a(&th
->th_ack
, &th
->th_sum
, htonl(ack
), 0);
6326 copyback
= off
+ sizeof (*th
);
6328 end
= seq
+ pd
->p_len
;
6329 if (th
->th_flags
& TH_SYN
)
6331 if (th
->th_flags
& TH_FIN
)
6335 if ((th
->th_flags
& TH_ACK
) == 0) {
6336 /* Let it pass through the ack skew check */
6338 } else if ((ack
== 0 &&
6339 (th
->th_flags
& (TH_ACK
|TH_RST
)) == (TH_ACK
|TH_RST
)) ||
6340 /* broken tcp stacks do not set ack */
6341 (dst
->state
< TCPS_SYN_SENT
)) {
6343 * Many stacks (ours included) will set the ACK number in an
6344 * FIN|ACK if the SYN times out -- no sequence to ACK.
6350 /* Ease sequencing restrictions on no data packets */
6355 ackskew
= dst
->seqlo
- ack
;
6359 * Need to demodulate the sequence numbers in any TCP SACK options
6360 * (Selective ACK). We could optionally validate the SACK values
6361 * against the current ACK window, either forwards or backwards, but
6362 * I'm not confident that SACK has been implemented properly
6363 * everywhere. It wouldn't surprise me if several stacks accidently
6364 * SACK too far backwards of previously ACKed data. There really aren't
6365 * any security implications of bad SACKing unless the target stack
6366 * doesn't validate the option length correctly. Someone trying to
6367 * spoof into a TCP connection won't bother blindly sending SACK
6370 if (dst
->seqdiff
&& (th
->th_off
<< 2) > (int)sizeof (struct tcphdr
)) {
6371 #ifndef NO_APPLE_EXTENSIONS
6372 copyback
= pf_modulate_sack(m
, off
, pd
, th
, dst
);
6373 if (copyback
== -1) {
6374 REASON_SET(reason
, PFRES_MEMORY
);
6380 if (pf_modulate_sack(m
, off
, pd
, th
, dst
))
6386 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
6387 if (SEQ_GEQ(src
->seqhi
, end
) &&
6388 /* Last octet inside other's window space */
6389 #ifndef NO_APPLE_MODIFICATIONS
6390 SEQ_GEQ(seq
, src
->seqlo
- ((u_int32_t
)dst
->max_win
<< dws
)) &&
6392 SEQ_GEQ(seq
, src
->seqlo
- (dst
->max_win
<< dws
)) &&
6394 /* Retrans: not more than one window back */
6395 (ackskew
>= -MAXACKWINDOW
) &&
6396 /* Acking not more than one reassembled fragment backwards */
6397 (ackskew
<= (MAXACKWINDOW
<< sws
)) &&
6398 /* Acking not more than one window forward */
6399 ((th
->th_flags
& TH_RST
) == 0 || orig_seq
== src
->seqlo
||
6400 (orig_seq
== src
->seqlo
+ 1) || (orig_seq
+ 1 == src
->seqlo
) ||
6401 (pd
->flags
& PFDESC_IP_REAS
) == 0)) {
6402 /* Require an exact/+1 sequence match on resets when possible */
6404 if (dst
->scrub
|| src
->scrub
) {
6405 if (pf_normalize_tcp_stateful(m
, off
, pd
, reason
, th
,
6406 *state
, src
, dst
, ©back
))
6409 #ifndef NO_APPLE_EXTENSIONS
6414 /* update max window */
6415 if (src
->max_win
< win
)
6417 /* synchronize sequencing */
6418 if (SEQ_GT(end
, src
->seqlo
))
6420 /* slide the window of what the other end can send */
6421 #ifndef NO_APPLE_MODIFICATIONS
6422 if (SEQ_GEQ(ack
+ ((u_int32_t
)win
<< sws
), dst
->seqhi
))
6423 dst
->seqhi
= ack
+ MAX(((u_int32_t
)win
<< sws
), 1);
6425 if (SEQ_GEQ(ack
+ (win
<< sws
), dst
->seqhi
))
6426 dst
->seqhi
= ack
+ MAX((win
<< sws
), 1);
6430 if (th
->th_flags
& TH_SYN
)
6431 if (src
->state
< TCPS_SYN_SENT
)
6432 src
->state
= TCPS_SYN_SENT
;
6433 if (th
->th_flags
& TH_FIN
)
6434 if (src
->state
< TCPS_CLOSING
)
6435 src
->state
= TCPS_CLOSING
;
6436 if (th
->th_flags
& TH_ACK
) {
6437 if (dst
->state
== TCPS_SYN_SENT
) {
6438 dst
->state
= TCPS_ESTABLISHED
;
6439 if (src
->state
== TCPS_ESTABLISHED
&&
6440 (*state
)->src_node
!= NULL
&&
6441 pf_src_connlimit(state
)) {
6442 REASON_SET(reason
, PFRES_SRCLIMIT
);
6445 } else if (dst
->state
== TCPS_CLOSING
)
6446 dst
->state
= TCPS_FIN_WAIT_2
;
6448 if (th
->th_flags
& TH_RST
)
6449 src
->state
= dst
->state
= TCPS_TIME_WAIT
;
6451 /* update expire time */
6452 (*state
)->expire
= pf_time_second();
6453 if (src
->state
>= TCPS_FIN_WAIT_2
&&
6454 dst
->state
>= TCPS_FIN_WAIT_2
)
6455 (*state
)->timeout
= PFTM_TCP_CLOSED
;
6456 else if (src
->state
>= TCPS_CLOSING
&&
6457 dst
->state
>= TCPS_CLOSING
)
6458 (*state
)->timeout
= PFTM_TCP_FIN_WAIT
;
6459 else if (src
->state
< TCPS_ESTABLISHED
||
6460 dst
->state
< TCPS_ESTABLISHED
)
6461 (*state
)->timeout
= PFTM_TCP_OPENING
;
6462 else if (src
->state
>= TCPS_CLOSING
||
6463 dst
->state
>= TCPS_CLOSING
)
6464 (*state
)->timeout
= PFTM_TCP_CLOSING
;
6466 (*state
)->timeout
= PFTM_TCP_ESTABLISHED
;
6468 /* Fall through to PASS packet */
6470 } else if ((dst
->state
< TCPS_SYN_SENT
||
6471 dst
->state
>= TCPS_FIN_WAIT_2
|| src
->state
>= TCPS_FIN_WAIT_2
) &&
6472 SEQ_GEQ(src
->seqhi
+ MAXACKWINDOW
, end
) &&
6473 /* Within a window forward of the originating packet */
6474 SEQ_GEQ(seq
, src
->seqlo
- MAXACKWINDOW
)) {
6475 /* Within a window backward of the originating packet */
6478 * This currently handles three situations:
6479 * 1) Stupid stacks will shotgun SYNs before their peer
6481 * 2) When PF catches an already established stream (the
6482 * firewall rebooted, the state table was flushed, routes
6484 * 3) Packets get funky immediately after the connection
6485 * closes (this should catch Solaris spurious ACK|FINs
6486 * that web servers like to spew after a close)
6488 * This must be a little more careful than the above code
6489 * since packet floods will also be caught here. We don't
6490 * update the TTL here to mitigate the damage of a packet
6491 * flood and so the same code can handle awkward establishment
6492 * and a loosened connection close.
6493 * In the establishment case, a correct peer response will
6494 * validate the connection, go through the normal state code
6495 * and keep updating the state TTL.
6498 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
6499 printf("pf: loose state match: ");
6500 pf_print_state(*state
);
6501 pf_print_flags(th
->th_flags
);
6502 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
6503 "pkts=%llu:%llu dir=%s,%s\n", seq
, orig_seq
, ack
,
6504 pd
->p_len
, ackskew
, (*state
)->packets
[0],
6505 (*state
)->packets
[1],
6506 direction
== PF_IN
? "in" : "out",
6507 direction
== (*state
)->state_key
->direction
?
6511 if (dst
->scrub
|| src
->scrub
) {
6512 if (pf_normalize_tcp_stateful(m
, off
, pd
, reason
, th
,
6513 *state
, src
, dst
, ©back
))
6515 #ifndef NO_APPLE_EXTENSIONS
6520 /* update max window */
6521 if (src
->max_win
< win
)
6523 /* synchronize sequencing */
6524 if (SEQ_GT(end
, src
->seqlo
))
6526 /* slide the window of what the other end can send */
6527 #ifndef NO_APPLE_MODIFICATIONS
6528 if (SEQ_GEQ(ack
+ ((u_int32_t
)win
<< sws
), dst
->seqhi
))
6529 dst
->seqhi
= ack
+ MAX(((u_int32_t
)win
<< sws
), 1);
6531 if (SEQ_GEQ(ack
+ (win
<< sws
), dst
->seqhi
))
6532 dst
->seqhi
= ack
+ MAX((win
<< sws
), 1);
6536 * Cannot set dst->seqhi here since this could be a shotgunned
6537 * SYN and not an already established connection.
6540 if (th
->th_flags
& TH_FIN
)
6541 if (src
->state
< TCPS_CLOSING
)
6542 src
->state
= TCPS_CLOSING
;
6543 if (th
->th_flags
& TH_RST
)
6544 src
->state
= dst
->state
= TCPS_TIME_WAIT
;
6546 /* Fall through to PASS packet */
6549 if ((*state
)->dst
.state
== TCPS_SYN_SENT
&&
6550 (*state
)->src
.state
== TCPS_SYN_SENT
) {
6551 /* Send RST for state mismatches during handshake */
6552 if (!(th
->th_flags
& TH_RST
))
6553 pf_send_tcp((*state
)->rule
.ptr
, pd
->af
,
6554 pd
->dst
, pd
->src
, th
->th_dport
,
6555 th
->th_sport
, ntohl(th
->th_ack
), 0,
6557 (*state
)->rule
.ptr
->return_ttl
, 1, 0,
6558 pd
->eh
, kif
->pfik_ifp
);
6562 } else if (pf_status
.debug
>= PF_DEBUG_MISC
) {
6563 printf("pf: BAD state: ");
6564 pf_print_state(*state
);
6565 pf_print_flags(th
->th_flags
);
6566 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
6567 "pkts=%llu:%llu dir=%s,%s\n",
6568 seq
, orig_seq
, ack
, pd
->p_len
, ackskew
,
6569 (*state
)->packets
[0], (*state
)->packets
[1],
6570 direction
== PF_IN
? "in" : "out",
6571 direction
== (*state
)->state_key
->direction
?
6573 printf("pf: State failure on: %c %c %c %c | %c %c\n",
6574 SEQ_GEQ(src
->seqhi
, end
) ? ' ' : '1',
6575 #ifndef NO_APPLE_MODIFICATIONS
6577 src
->seqlo
- ((u_int32_t
)dst
->max_win
<< dws
)) ?
6579 SEQ_GEQ(seq
, src
->seqlo
- (dst
->max_win
<< dws
)) ?
6582 (ackskew
>= -MAXACKWINDOW
) ? ' ' : '3',
6583 (ackskew
<= (MAXACKWINDOW
<< sws
)) ? ' ' : '4',
6584 SEQ_GEQ(src
->seqhi
+ MAXACKWINDOW
, end
) ?' ' :'5',
6585 SEQ_GEQ(seq
, src
->seqlo
- MAXACKWINDOW
) ?' ' :'6');
6587 REASON_SET(reason
, PFRES_BADSTATE
);
6591 /* Any packets which have gotten here are to be passed */
6593 #ifndef NO_APPLE_EXTENSIONS
6594 if ((*state
)->state_key
->app_state
&&
6595 (*state
)->state_key
->app_state
->handler
) {
6596 (*state
)->state_key
->app_state
->handler(*state
, direction
,
6597 off
+ (th
->th_off
<< 2), pd
, kif
);
6599 REASON_SET(reason
, PFRES_MEMORY
);
6605 /* translate source/destination address, if necessary */
6606 if (STATE_TRANSLATE((*state
)->state_key
)) {
6607 if (direction
== PF_OUT
)
6608 pf_change_ap(direction
, pd
->mp
, pd
->src
, &th
->th_sport
,
6609 pd
->ip_sum
, &th
->th_sum
,
6610 &(*state
)->state_key
->gwy
.addr
,
6611 (*state
)->state_key
->gwy
.xport
.port
, 0, pd
->af
);
6613 pf_change_ap(direction
, pd
->mp
, pd
->dst
, &th
->th_dport
,
6614 pd
->ip_sum
, &th
->th_sum
,
6615 &(*state
)->state_key
->lan
.addr
,
6616 (*state
)->state_key
->lan
.xport
.port
, 0, pd
->af
);
6617 copyback
= off
+ sizeof (*th
);
6621 m
= pf_lazy_makewritable(pd
, m
, copyback
);
6623 REASON_SET(reason
, PFRES_MEMORY
);
6627 /* Copyback sequence modulation or stateful scrub changes */
6628 m_copyback(m
, off
, sizeof (*th
), th
);
6631 /* translate source/destination address, if necessary */
6632 if (STATE_TRANSLATE((*state
)->state_key
)) {
6633 if (direction
== PF_OUT
)
6634 pf_change_ap(pd
->src
, pd
->mp
, &th
->th_sport
, pd
->ip_sum
,
6635 &th
->th_sum
, &(*state
)->state_key
->gwy
.addr
,
6636 (*state
)->state_key
->gwy
.port
, 0, pd
->af
);
6638 pf_change_ap(pd
->dst
, pd
->mp
, &th
->th_dport
, pd
->ip_sum
,
6639 &th
->th_sum
, &(*state
)->state_key
->lan
.addr
,
6640 (*state
)->state_key
->lan
.port
, 0, pd
->af
);
6641 m_copyback(m
, off
, sizeof (*th
), th
);
6642 } else if (copyback
) {
6643 /* Copyback sequence modulation or stateful scrub changes */
6644 m_copyback(m
, off
, sizeof (*th
), th
);
6651 #ifndef NO_APPLE_EXTENSIONS
6653 pf_test_state_udp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
6654 struct mbuf
*m
, int off
, void *h
, struct pf_pdesc
*pd
, u_short
*reason
)
6656 pf_test_state_udp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
6657 struct mbuf
*m
, int off
, void *h
, struct pf_pdesc
*pd
)
6661 struct pf_state_peer
*src
, *dst
;
6662 struct pf_state_key_cmp key
;
6663 struct udphdr
*uh
= pd
->hdr
.udp
;
6664 #ifndef NO_APPLE_EXTENSIONS
6665 struct pf_app_state as
;
6666 int dx
, action
, extfilter
;
6668 key
.proto_variant
= PF_EXTFILTER_APD
;
6672 key
.proto
= IPPROTO_UDP
;
6673 if (direction
== PF_IN
) {
6674 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
6675 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
6676 #ifndef NO_APPLE_EXTENSIONS
6677 key
.ext
.xport
.port
= uh
->uh_sport
;
6678 key
.gwy
.xport
.port
= uh
->uh_dport
;
6681 key
.ext
.port
= uh
->uh_sport
;
6682 key
.gwy
.port
= uh
->uh_dport
;
6685 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
6686 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
6687 #ifndef NO_APPLE_EXTENSIONS
6688 key
.lan
.xport
.port
= uh
->uh_sport
;
6689 key
.ext
.xport
.port
= uh
->uh_dport
;
6692 key
.lan
.port
= uh
->uh_sport
;
6693 key
.ext
.port
= uh
->uh_dport
;
6697 #ifndef NO_APPLE_EXTENSIONS
6698 if (ntohs(uh
->uh_sport
) == PF_IKE_PORT
&&
6699 ntohs(uh
->uh_dport
) == PF_IKE_PORT
) {
6700 struct pf_ike_hdr ike
;
6701 size_t plen
= m
->m_pkthdr
.len
- off
- sizeof (*uh
);
6702 if (plen
< PF_IKE_PACKET_MINSIZE
) {
6703 DPFPRINTF(PF_DEBUG_MISC
,
6704 ("pf: IKE message too small.\n"));
6708 if (plen
> sizeof (ike
))
6709 plen
= sizeof (ike
);
6710 m_copydata(m
, off
+ sizeof (*uh
), plen
, &ike
);
6712 if (ike
.initiator_cookie
) {
6713 key
.app_state
= &as
;
6714 as
.compare_lan_ext
= pf_ike_compare
;
6715 as
.compare_ext_gwy
= pf_ike_compare
;
6716 as
.u
.ike
.cookie
= ike
.initiator_cookie
;
6719 * <http://tools.ietf.org/html/\
6720 * draft-ietf-ipsec-nat-t-ike-01>
6721 * Support non-standard NAT-T implementations that
6722 * push the ESP packet over the top of the IKE packet.
6723 * Do not drop packet.
6725 DPFPRINTF(PF_DEBUG_MISC
,
6726 ("pf: IKE initiator cookie = 0.\n"));
6730 *state
= pf_find_state(kif
, &key
, dx
);
6732 if (!key
.app_state
&& *state
== 0) {
6733 key
.proto_variant
= PF_EXTFILTER_AD
;
6734 *state
= pf_find_state(kif
, &key
, dx
);
6737 if (!key
.app_state
&& *state
== 0) {
6738 key
.proto_variant
= PF_EXTFILTER_EI
;
6739 *state
= pf_find_state(kif
, &key
, dx
);
6742 if (pf_state_lookup_aux(state
, kif
, direction
, &action
))
6748 if (direction
== (*state
)->state_key
->direction
) {
6749 src
= &(*state
)->src
;
6750 dst
= &(*state
)->dst
;
6752 src
= &(*state
)->dst
;
6753 dst
= &(*state
)->src
;
6757 if (src
->state
< PFUDPS_SINGLE
)
6758 src
->state
= PFUDPS_SINGLE
;
6759 if (dst
->state
== PFUDPS_SINGLE
)
6760 dst
->state
= PFUDPS_MULTIPLE
;
6762 /* update expire time */
6763 (*state
)->expire
= pf_time_second();
6764 if (src
->state
== PFUDPS_MULTIPLE
&& dst
->state
== PFUDPS_MULTIPLE
)
6765 (*state
)->timeout
= PFTM_UDP_MULTIPLE
;
6767 (*state
)->timeout
= PFTM_UDP_SINGLE
;
6769 #ifndef NO_APPLE_EXTENSIONS
6770 extfilter
= (*state
)->state_key
->proto_variant
;
6771 if (extfilter
> PF_EXTFILTER_APD
) {
6772 (*state
)->state_key
->ext
.xport
.port
= key
.ext
.xport
.port
;
6773 if (extfilter
> PF_EXTFILTER_AD
)
6774 PF_ACPY(&(*state
)->state_key
->ext
.addr
,
6775 &key
.ext
.addr
, key
.af
);
6778 if ((*state
)->state_key
->app_state
&&
6779 (*state
)->state_key
->app_state
->handler
) {
6780 (*state
)->state_key
->app_state
->handler(*state
, direction
,
6781 off
+ uh
->uh_ulen
, pd
, kif
);
6783 REASON_SET(reason
, PFRES_MEMORY
);
6789 /* translate source/destination address, if necessary */
6790 if (STATE_TRANSLATE((*state
)->state_key
)) {
6791 m
= pf_lazy_makewritable(pd
, m
, off
+ sizeof (*uh
));
6793 REASON_SET(reason
, PFRES_MEMORY
);
6797 if (direction
== PF_OUT
)
6798 pf_change_ap(direction
, pd
->mp
, pd
->src
, &uh
->uh_sport
,
6799 pd
->ip_sum
, &uh
->uh_sum
,
6800 &(*state
)->state_key
->gwy
.addr
,
6801 (*state
)->state_key
->gwy
.xport
.port
, 1, pd
->af
);
6803 pf_change_ap(direction
, pd
->mp
, pd
->dst
, &uh
->uh_dport
,
6804 pd
->ip_sum
, &uh
->uh_sum
,
6805 &(*state
)->state_key
->lan
.addr
,
6806 (*state
)->state_key
->lan
.xport
.port
, 1, pd
->af
);
6807 m_copyback(m
, off
, sizeof (*uh
), uh
);
6810 /* translate source/destination address, if necessary */
6811 if (STATE_TRANSLATE((*state
)->state_key
)) {
6812 if (direction
== PF_OUT
)
6813 pf_change_ap(pd
->src
, &uh
->uh_sport
, pd
->ip_sum
,
6814 &uh
->uh_sum
, &(*state
)->state_key
->gwy
.addr
,
6815 (*state
)->state_key
->gwy
.port
, 1, pd
->af
);
6817 pf_change_ap(pd
->dst
, &uh
->uh_dport
, pd
->ip_sum
,
6818 &uh
->uh_sum
, &(*state
)->state_key
->lan
.addr
,
6819 (*state
)->state_key
->lan
.port
, 1, pd
->af
);
6820 m_copyback(m
, off
, sizeof (*uh
), uh
);
6828 pf_test_state_icmp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
6829 struct mbuf
*m
, int off
, void *h
, struct pf_pdesc
*pd
, u_short
*reason
)
6832 struct pf_addr
*saddr
= pd
->src
, *daddr
= pd
->dst
;
6833 u_int16_t icmpid
= 0, *icmpsum
;
6836 struct pf_state_key_cmp key
;
6838 #ifndef NO_APPLE_EXTENSIONS
6839 struct pf_app_state as
;
6843 switch (pd
->proto
) {
6846 icmptype
= pd
->hdr
.icmp
->icmp_type
;
6847 icmpid
= pd
->hdr
.icmp
->icmp_id
;
6848 icmpsum
= &pd
->hdr
.icmp
->icmp_cksum
;
6850 if (icmptype
== ICMP_UNREACH
||
6851 icmptype
== ICMP_SOURCEQUENCH
||
6852 icmptype
== ICMP_REDIRECT
||
6853 icmptype
== ICMP_TIMXCEED
||
6854 icmptype
== ICMP_PARAMPROB
)
6859 case IPPROTO_ICMPV6
:
6860 icmptype
= pd
->hdr
.icmp6
->icmp6_type
;
6861 icmpid
= pd
->hdr
.icmp6
->icmp6_id
;
6862 icmpsum
= &pd
->hdr
.icmp6
->icmp6_cksum
;
6864 if (icmptype
== ICMP6_DST_UNREACH
||
6865 icmptype
== ICMP6_PACKET_TOO_BIG
||
6866 icmptype
== ICMP6_TIME_EXCEEDED
||
6867 icmptype
== ICMP6_PARAM_PROB
)
6876 * ICMP query/reply message not related to a TCP/UDP packet.
6877 * Search for an ICMP state.
6880 key
.proto
= pd
->proto
;
6881 if (direction
== PF_IN
) {
6882 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
6883 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
6884 #ifndef NO_APPLE_EXTENSIONS
6885 key
.ext
.xport
.port
= 0;
6886 key
.gwy
.xport
.port
= icmpid
;
6889 key
.gwy
.port
= icmpid
;
6892 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
6893 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
6894 #ifndef NO_APPLE_EXTENSIONS
6895 key
.lan
.xport
.port
= icmpid
;
6896 key
.ext
.xport
.port
= 0;
6898 key
.lan
.port
= icmpid
;
6905 (*state
)->expire
= pf_time_second();
6906 (*state
)->timeout
= PFTM_ICMP_ERROR_REPLY
;
6908 /* translate source/destination address, if necessary */
6909 if (STATE_TRANSLATE((*state
)->state_key
)) {
6910 if (direction
== PF_OUT
) {
6914 pf_change_a(&saddr
->v4
.s_addr
,
6916 (*state
)->state_key
->gwy
.addr
.v4
.s_addr
, 0);
6917 #ifndef NO_APPLE_EXTENSIONS
6918 pd
->hdr
.icmp
->icmp_cksum
=
6920 pd
->hdr
.icmp
->icmp_cksum
, icmpid
,
6921 (*state
)->state_key
->gwy
.xport
.port
, 0);
6922 pd
->hdr
.icmp
->icmp_id
=
6923 (*state
)->state_key
->gwy
.xport
.port
;
6924 m
= pf_lazy_makewritable(pd
, m
,
6929 pd
->hdr
.icmp
->icmp_cksum
=
6931 pd
->hdr
.icmp
->icmp_cksum
, icmpid
,
6932 (*state
)->state_key
->gwy
.port
, 0);
6933 pd
->hdr
.icmp
->icmp_id
=
6934 (*state
)->state_key
->gwy
.port
;
6936 m_copyback(m
, off
, ICMP_MINLEN
,
6943 &pd
->hdr
.icmp6
->icmp6_cksum
,
6944 &(*state
)->state_key
->gwy
.addr
, 0);
6945 #ifndef NO_APPLE_EXTENSIONS
6946 m
= pf_lazy_makewritable(pd
, m
,
6947 off
+ sizeof (struct icmp6_hdr
));
6952 sizeof (struct icmp6_hdr
),
6961 pf_change_a(&daddr
->v4
.s_addr
,
6963 (*state
)->state_key
->lan
.addr
.v4
.s_addr
, 0);
6964 #ifndef NO_APPLE_EXTENSIONS
6965 pd
->hdr
.icmp
->icmp_cksum
=
6967 pd
->hdr
.icmp
->icmp_cksum
, icmpid
,
6968 (*state
)->state_key
->lan
.xport
.port
, 0);
6969 pd
->hdr
.icmp
->icmp_id
=
6970 (*state
)->state_key
->lan
.xport
.port
;
6971 m
= pf_lazy_makewritable(pd
, m
,
6976 pd
->hdr
.icmp
->icmp_cksum
=
6978 pd
->hdr
.icmp
->icmp_cksum
, icmpid
,
6979 (*state
)->state_key
->lan
.port
, 0);
6980 pd
->hdr
.icmp
->icmp_id
=
6981 (*state
)->state_key
->lan
.port
;
6983 m_copyback(m
, off
, ICMP_MINLEN
,
6990 &pd
->hdr
.icmp6
->icmp6_cksum
,
6991 &(*state
)->state_key
->lan
.addr
, 0);
6992 #ifndef NO_APPLE_EXTENSIONS
6993 m
= pf_lazy_makewritable(pd
, m
,
6994 off
+ sizeof (struct icmp6_hdr
));
6999 sizeof (struct icmp6_hdr
),
7011 * ICMP error message in response to a TCP/UDP packet.
7012 * Extract the inner TCP/UDP header and search for that state.
7015 struct pf_pdesc pd2
;
7020 struct ip6_hdr h2_6
;
7026 memset(&pd2
, 0, sizeof (pd2
));
7032 /* offset of h2 in mbuf chain */
7033 ipoff2
= off
+ ICMP_MINLEN
;
7035 if (!pf_pull_hdr(m
, ipoff2
, &h2
, sizeof (h2
),
7036 NULL
, reason
, pd2
.af
)) {
7037 DPFPRINTF(PF_DEBUG_MISC
,
7038 ("pf: ICMP error message too short "
7043 * ICMP error messages don't refer to non-first
7046 if (h2
.ip_off
& htons(IP_OFFMASK
)) {
7047 REASON_SET(reason
, PFRES_FRAG
);
7051 /* offset of protocol header that follows h2 */
7052 off2
= ipoff2
+ (h2
.ip_hl
<< 2);
7054 pd2
.proto
= h2
.ip_p
;
7055 pd2
.src
= (struct pf_addr
*)&h2
.ip_src
;
7056 pd2
.dst
= (struct pf_addr
*)&h2
.ip_dst
;
7057 pd2
.ip_sum
= &h2
.ip_sum
;
7062 ipoff2
= off
+ sizeof (struct icmp6_hdr
);
7064 if (!pf_pull_hdr(m
, ipoff2
, &h2_6
, sizeof (h2_6
),
7065 NULL
, reason
, pd2
.af
)) {
7066 DPFPRINTF(PF_DEBUG_MISC
,
7067 ("pf: ICMP error message too short "
7071 pd2
.proto
= h2_6
.ip6_nxt
;
7072 pd2
.src
= (struct pf_addr
*)&h2_6
.ip6_src
;
7073 pd2
.dst
= (struct pf_addr
*)&h2_6
.ip6_dst
;
7075 off2
= ipoff2
+ sizeof (h2_6
);
7077 switch (pd2
.proto
) {
7078 case IPPROTO_FRAGMENT
:
7080 * ICMPv6 error messages for
7081 * non-first fragments
7083 REASON_SET(reason
, PFRES_FRAG
);
7086 case IPPROTO_HOPOPTS
:
7087 case IPPROTO_ROUTING
:
7088 case IPPROTO_DSTOPTS
: {
7089 /* get next header and header length */
7090 struct ip6_ext opt6
;
7092 if (!pf_pull_hdr(m
, off2
, &opt6
,
7093 sizeof (opt6
), NULL
, reason
,
7095 DPFPRINTF(PF_DEBUG_MISC
,
7096 ("pf: ICMPv6 short opt\n"));
7099 if (pd2
.proto
== IPPROTO_AH
)
7100 off2
+= (opt6
.ip6e_len
+ 2) * 4;
7102 off2
+= (opt6
.ip6e_len
+ 1) * 8;
7103 pd2
.proto
= opt6
.ip6e_nxt
;
7104 /* goto the next header */
7111 } while (!terminal
);
7116 switch (pd2
.proto
) {
7120 struct pf_state_peer
*src
, *dst
;
7125 * Only the first 8 bytes of the TCP header can be
7126 * expected. Don't access any TCP header fields after
7127 * th_seq, an ackskew test is not possible.
7129 if (!pf_pull_hdr(m
, off2
, &th
, 8, NULL
, reason
,
7131 DPFPRINTF(PF_DEBUG_MISC
,
7132 ("pf: ICMP error message too short "
7138 key
.proto
= IPPROTO_TCP
;
7139 if (direction
== PF_IN
) {
7140 PF_ACPY(&key
.ext
.addr
, pd2
.dst
, key
.af
);
7141 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af
);
7142 #ifndef NO_APPLE_EXTENSIONS
7143 key
.ext
.xport
.port
= th
.th_dport
;
7144 key
.gwy
.xport
.port
= th
.th_sport
;
7146 key
.ext
.port
= th
.th_dport
;
7147 key
.gwy
.port
= th
.th_sport
;
7150 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af
);
7151 PF_ACPY(&key
.ext
.addr
, pd2
.src
, key
.af
);
7152 #ifndef NO_APPLE_EXTENSIONS
7153 key
.lan
.xport
.port
= th
.th_dport
;
7154 key
.ext
.xport
.port
= th
.th_sport
;
7156 key
.lan
.port
= th
.th_dport
;
7157 key
.ext
.port
= th
.th_sport
;
7163 if (direction
== (*state
)->state_key
->direction
) {
7164 src
= &(*state
)->dst
;
7165 dst
= &(*state
)->src
;
7167 src
= &(*state
)->src
;
7168 dst
= &(*state
)->dst
;
7171 if (src
->wscale
&& dst
->wscale
)
7172 dws
= dst
->wscale
& PF_WSCALE_MASK
;
7176 /* Demodulate sequence number */
7177 seq
= ntohl(th
.th_seq
) - src
->seqdiff
;
7179 pf_change_a(&th
.th_seq
, icmpsum
,
7184 if (!SEQ_GEQ(src
->seqhi
, seq
) ||
7185 #ifndef NO_APPLE_MODIFICATION
7187 src
->seqlo
- ((u_int32_t
)dst
->max_win
<< dws
))) {
7189 !SEQ_GEQ(seq
, src
->seqlo
- (dst
->max_win
<< dws
))) {
7191 if (pf_status
.debug
>= PF_DEBUG_MISC
) {
7192 printf("pf: BAD ICMP %d:%d ",
7193 icmptype
, pd
->hdr
.icmp
->icmp_code
);
7194 pf_print_host(pd
->src
, 0, pd
->af
);
7196 pf_print_host(pd
->dst
, 0, pd
->af
);
7198 pf_print_state(*state
);
7199 printf(" seq=%u\n", seq
);
7201 REASON_SET(reason
, PFRES_BADSTATE
);
7205 if (STATE_TRANSLATE((*state
)->state_key
)) {
7206 if (direction
== PF_IN
) {
7207 pf_change_icmp(pd2
.src
, &th
.th_sport
,
7208 daddr
, &(*state
)->state_key
->lan
.addr
,
7209 #ifndef NO_APPLE_EXTENSIONS
7210 (*state
)->state_key
->lan
.xport
.port
, NULL
,
7212 (*state
)->state_key
->lan
.port
, NULL
,
7214 pd2
.ip_sum
, icmpsum
,
7215 pd
->ip_sum
, 0, pd2
.af
);
7217 pf_change_icmp(pd2
.dst
, &th
.th_dport
,
7218 saddr
, &(*state
)->state_key
->gwy
.addr
,
7219 #ifndef NO_APPLE_EXTENSIONS
7220 (*state
)->state_key
->gwy
.xport
.port
, NULL
,
7222 (*state
)->state_key
->gwy
.port
, NULL
,
7224 pd2
.ip_sum
, icmpsum
,
7225 pd
->ip_sum
, 0, pd2
.af
);
7231 #ifndef NO_APPLE_EXTENSIONS
7232 m
= pf_lazy_makewritable(pd
, m
, off2
+ 8);
7239 m_copyback(m
, off
, ICMP_MINLEN
,
7241 m_copyback(m
, ipoff2
, sizeof (h2
),
7248 sizeof (struct icmp6_hdr
),
7250 m_copyback(m
, ipoff2
, sizeof (h2_6
),
7255 m_copyback(m
, off2
, 8, &th
);
7263 #ifndef NO_APPLE_EXTENSIONS
7266 if (!pf_pull_hdr(m
, off2
, &uh
, sizeof (uh
),
7267 NULL
, reason
, pd2
.af
)) {
7268 DPFPRINTF(PF_DEBUG_MISC
,
7269 ("pf: ICMP error message too short "
7275 key
.proto
= IPPROTO_UDP
;
7276 if (direction
== PF_IN
) {
7277 PF_ACPY(&key
.ext
.addr
, pd2
.dst
, key
.af
);
7278 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af
);
7279 #ifndef NO_APPLE_EXTENSIONS
7280 key
.ext
.xport
.port
= uh
.uh_dport
;
7281 key
.gwy
.xport
.port
= uh
.uh_sport
;
7284 key
.ext
.port
= uh
.uh_dport
;
7285 key
.gwy
.port
= uh
.uh_sport
;
7288 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af
);
7289 PF_ACPY(&key
.ext
.addr
, pd2
.src
, key
.af
);
7290 #ifndef NO_APPLE_EXTENSIONS
7291 key
.lan
.xport
.port
= uh
.uh_dport
;
7292 key
.ext
.xport
.port
= uh
.uh_sport
;
7295 key
.lan
.port
= uh
.uh_dport
;
7296 key
.ext
.port
= uh
.uh_sport
;
7300 #ifndef NO_APPLE_EXTENSIONS
7301 key
.proto_variant
= PF_EXTFILTER_APD
;
7303 if (ntohs(uh
.uh_sport
) == PF_IKE_PORT
&&
7304 ntohs(uh
.uh_dport
) == PF_IKE_PORT
) {
7305 struct pf_ike_hdr ike
;
7307 m
->m_pkthdr
.len
- off2
- sizeof (uh
);
7308 if (direction
== PF_IN
&&
7309 plen
< 8 /* PF_IKE_PACKET_MINSIZE */) {
7310 DPFPRINTF(PF_DEBUG_MISC
, ("pf: "
7311 "ICMP error, embedded IKE message "
7316 if (plen
> sizeof (ike
))
7317 plen
= sizeof (ike
);
7318 m_copydata(m
, off
+ sizeof (uh
), plen
, &ike
);
7320 key
.app_state
= &as
;
7321 as
.compare_lan_ext
= pf_ike_compare
;
7322 as
.compare_ext_gwy
= pf_ike_compare
;
7323 as
.u
.ike
.cookie
= ike
.initiator_cookie
;
7326 *state
= pf_find_state(kif
, &key
, dx
);
7328 if (key
.app_state
&& *state
== 0) {
7330 *state
= pf_find_state(kif
, &key
, dx
);
7334 key
.proto_variant
= PF_EXTFILTER_AD
;
7335 *state
= pf_find_state(kif
, &key
, dx
);
7339 key
.proto_variant
= PF_EXTFILTER_EI
;
7340 *state
= pf_find_state(kif
, &key
, dx
);
7343 if (pf_state_lookup_aux(state
, kif
, direction
, &action
))
7349 if (STATE_TRANSLATE((*state
)->state_key
)) {
7350 if (direction
== PF_IN
) {
7351 pf_change_icmp(pd2
.src
, &uh
.uh_sport
,
7352 daddr
, &(*state
)->state_key
->lan
.addr
,
7353 #ifndef NO_APPLE_EXTENSIONS
7354 (*state
)->state_key
->lan
.xport
.port
, &uh
.uh_sum
,
7356 (*state
)->state_key
->lan
.port
, &uh
.uh_sum
,
7358 pd2
.ip_sum
, icmpsum
,
7359 pd
->ip_sum
, 1, pd2
.af
);
7361 pf_change_icmp(pd2
.dst
, &uh
.uh_dport
,
7362 saddr
, &(*state
)->state_key
->gwy
.addr
,
7363 #ifndef NO_APPLE_EXTENSIONS
7364 (*state
)->state_key
->gwy
.xport
.port
, &uh
.uh_sum
,
7366 (*state
)->state_key
->gwy
.port
, &uh
.uh_sum
,
7368 pd2
.ip_sum
, icmpsum
,
7369 pd
->ip_sum
, 1, pd2
.af
);
7371 #ifndef NO_APPLE_EXTENSIONS
7372 m
= pf_lazy_makewritable(pd
, m
,
7373 off2
+ sizeof (uh
));
7380 m_copyback(m
, off
, ICMP_MINLEN
,
7382 m_copyback(m
, ipoff2
, sizeof (h2
), &h2
);
7388 sizeof (struct icmp6_hdr
),
7390 m_copyback(m
, ipoff2
, sizeof (h2_6
),
7395 m_copyback(m
, off2
, sizeof (uh
), &uh
);
7402 case IPPROTO_ICMP
: {
7405 if (!pf_pull_hdr(m
, off2
, &iih
, ICMP_MINLEN
,
7406 NULL
, reason
, pd2
.af
)) {
7407 DPFPRINTF(PF_DEBUG_MISC
,
7408 ("pf: ICMP error message too short i"
7414 key
.proto
= IPPROTO_ICMP
;
7415 if (direction
== PF_IN
) {
7416 PF_ACPY(&key
.ext
.addr
, pd2
.dst
, key
.af
);
7417 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af
);
7418 #ifndef NO_APPLE_EXTENSIONS
7419 key
.ext
.xport
.port
= 0;
7420 key
.gwy
.xport
.port
= iih
.icmp_id
;
7423 key
.gwy
.port
= iih
.icmp_id
;
7426 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af
);
7427 PF_ACPY(&key
.ext
.addr
, pd2
.src
, key
.af
);
7428 #ifndef NO_APPLE_EXTENSIONS
7429 key
.lan
.xport
.port
= iih
.icmp_id
;
7430 key
.ext
.xport
.port
= 0;
7432 key
.lan
.port
= iih
.icmp_id
;
7439 if (STATE_TRANSLATE((*state
)->state_key
)) {
7440 if (direction
== PF_IN
) {
7441 pf_change_icmp(pd2
.src
, &iih
.icmp_id
,
7442 daddr
, &(*state
)->state_key
->lan
.addr
,
7443 #ifndef NO_APPLE_EXTENSIONS
7444 (*state
)->state_key
->lan
.xport
.port
, NULL
,
7446 (*state
)->state_key
->lan
.port
, NULL
,
7448 pd2
.ip_sum
, icmpsum
,
7449 pd
->ip_sum
, 0, AF_INET
);
7451 pf_change_icmp(pd2
.dst
, &iih
.icmp_id
,
7452 saddr
, &(*state
)->state_key
->gwy
.addr
,
7453 #ifndef NO_APPLE_EXTENSIONS
7454 (*state
)->state_key
->gwy
.xport
.port
, NULL
,
7456 (*state
)->state_key
->gwy
.port
, NULL
,
7458 pd2
.ip_sum
, icmpsum
,
7459 pd
->ip_sum
, 0, AF_INET
);
7461 #ifndef NO_APPLE_EXTENSIONS
7462 m
= pf_lazy_makewritable(pd
, m
, off2
+ ICMP_MINLEN
);
7466 m_copyback(m
, off
, ICMP_MINLEN
, pd
->hdr
.icmp
);
7467 m_copyback(m
, ipoff2
, sizeof (h2
), &h2
);
7468 m_copyback(m
, off2
, ICMP_MINLEN
, &iih
);
7476 case IPPROTO_ICMPV6
: {
7477 struct icmp6_hdr iih
;
7479 if (!pf_pull_hdr(m
, off2
, &iih
,
7480 sizeof (struct icmp6_hdr
), NULL
, reason
, pd2
.af
)) {
7481 DPFPRINTF(PF_DEBUG_MISC
,
7482 ("pf: ICMP error message too short "
7488 key
.proto
= IPPROTO_ICMPV6
;
7489 if (direction
== PF_IN
) {
7490 PF_ACPY(&key
.ext
.addr
, pd2
.dst
, key
.af
);
7491 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af
);
7492 #ifndef NO_APPLE_EXTENSIONS
7493 key
.ext
.xport
.port
= 0;
7494 key
.gwy
.xport
.port
= iih
.icmp6_id
;
7497 key
.gwy
.port
= iih
.icmp6_id
;
7500 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af
);
7501 PF_ACPY(&key
.ext
.addr
, pd2
.src
, key
.af
);
7502 #ifndef NO_APPLE_EXTENSIONS
7503 key
.lan
.xport
.port
= iih
.icmp6_id
;
7504 key
.ext
.xport
.port
= 0;
7506 key
.lan
.port
= iih
.icmp6_id
;
7513 if (STATE_TRANSLATE((*state
)->state_key
)) {
7514 if (direction
== PF_IN
) {
7515 pf_change_icmp(pd2
.src
, &iih
.icmp6_id
,
7516 daddr
, &(*state
)->state_key
->lan
.addr
,
7517 #ifndef NO_APPLE_EXTENSIONS
7518 (*state
)->state_key
->lan
.xport
.port
, NULL
,
7520 (*state
)->state_key
->lan
.port
, NULL
,
7522 pd2
.ip_sum
, icmpsum
,
7523 pd
->ip_sum
, 0, AF_INET6
);
7525 pf_change_icmp(pd2
.dst
, &iih
.icmp6_id
,
7526 saddr
, &(*state
)->state_key
->gwy
.addr
,
7527 #ifndef NO_APPLE_EXTENSIONS
7528 (*state
)->state_key
->gwy
.xport
.port
, NULL
,
7530 (*state
)->state_key
->gwy
.port
, NULL
,
7532 pd2
.ip_sum
, icmpsum
,
7533 pd
->ip_sum
, 0, AF_INET6
);
7535 #ifndef NO_APPLE_EXTENSIONS
7536 m
= pf_lazy_makewritable(pd
, m
, off2
+
7537 sizeof (struct icmp6_hdr
));
7541 m_copyback(m
, off
, sizeof (struct icmp6_hdr
),
7543 m_copyback(m
, ipoff2
, sizeof (h2_6
), &h2_6
);
7544 m_copyback(m
, off2
, sizeof (struct icmp6_hdr
),
7554 key
.proto
= pd2
.proto
;
7555 if (direction
== PF_IN
) {
7556 PF_ACPY(&key
.ext
.addr
, pd2
.dst
, key
.af
);
7557 PF_ACPY(&key
.gwy
.addr
, pd2
.src
, key
.af
);
7558 #ifndef NO_APPLE_EXTENSIONS
7559 key
.ext
.xport
.port
= 0;
7560 key
.gwy
.xport
.port
= 0;
7566 PF_ACPY(&key
.lan
.addr
, pd2
.dst
, key
.af
);
7567 PF_ACPY(&key
.ext
.addr
, pd2
.src
, key
.af
);
7568 #ifndef NO_APPLE_EXTENSIONS
7569 key
.lan
.xport
.port
= 0;
7570 key
.ext
.xport
.port
= 0;
7579 if (STATE_TRANSLATE((*state
)->state_key
)) {
7580 if (direction
== PF_IN
) {
7581 pf_change_icmp(pd2
.src
, NULL
,
7582 daddr
, &(*state
)->state_key
->lan
.addr
,
7584 pd2
.ip_sum
, icmpsum
,
7585 pd
->ip_sum
, 0, pd2
.af
);
7587 pf_change_icmp(pd2
.dst
, NULL
,
7588 saddr
, &(*state
)->state_key
->gwy
.addr
,
7590 pd2
.ip_sum
, icmpsum
,
7591 pd
->ip_sum
, 0, pd2
.af
);
7596 #ifndef NO_APPLE_EXTENSIONS
7597 m
= pf_lazy_makewritable(pd
, m
,
7598 ipoff2
+ sizeof (h2
));
7602 m_copyback(m
, off
, ICMP_MINLEN
,
7604 m_copyback(m
, ipoff2
, sizeof (h2
), &h2
);
7609 #ifndef NO_APPLE_EXTENSIONS
7610 m
= pf_lazy_makewritable(pd
, m
,
7611 ipoff2
+ sizeof (h2_6
));
7616 sizeof (struct icmp6_hdr
),
7618 m_copyback(m
, ipoff2
, sizeof (h2_6
),
7632 #ifndef NO_APPLE_EXTENSIONS
7634 pf_test_state_grev1(struct pf_state
**state
, int direction
,
7635 struct pfi_kif
*kif
, int off
, struct pf_pdesc
*pd
)
7637 struct pf_state_peer
*src
;
7638 struct pf_state_peer
*dst
;
7639 struct pf_state_key_cmp key
;
7640 struct pf_grev1_hdr
*grev1
= pd
->hdr
.grev1
;
7645 key
.proto
= IPPROTO_GRE
;
7646 key
.proto_variant
= PF_GRE_PPTP_VARIANT
;
7647 if (direction
== PF_IN
) {
7648 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
7649 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
7650 key
.gwy
.xport
.call_id
= grev1
->call_id
;
7652 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
7653 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
7654 key
.ext
.xport
.call_id
= grev1
->call_id
;
7659 if (direction
== (*state
)->state_key
->direction
) {
7660 src
= &(*state
)->src
;
7661 dst
= &(*state
)->dst
;
7663 src
= &(*state
)->dst
;
7664 dst
= &(*state
)->src
;
7668 if (src
->state
< PFGRE1S_INITIATING
)
7669 src
->state
= PFGRE1S_INITIATING
;
7671 /* update expire time */
7672 (*state
)->expire
= pf_time_second();
7673 if (src
->state
>= PFGRE1S_INITIATING
&&
7674 dst
->state
>= PFGRE1S_INITIATING
) {
7675 if ((*state
)->timeout
!= PFTM_TCP_ESTABLISHED
)
7676 (*state
)->timeout
= PFTM_GREv1_ESTABLISHED
;
7677 src
->state
= PFGRE1S_ESTABLISHED
;
7678 dst
->state
= PFGRE1S_ESTABLISHED
;
7680 (*state
)->timeout
= PFTM_GREv1_INITIATING
;
7683 if ((*state
)->state_key
->app_state
)
7684 (*state
)->state_key
->app_state
->u
.grev1
.pptp_state
->expire
=
7687 /* translate source/destination address, if necessary */
7688 if (STATE_GRE_TRANSLATE((*state
)->state_key
)) {
7689 if (direction
== PF_OUT
) {
7693 pf_change_a(&pd
->src
->v4
.s_addr
,
7695 (*state
)->state_key
->gwy
.addr
.v4
.s_addr
, 0);
7700 PF_ACPY(pd
->src
, &(*state
)->state_key
->gwy
.addr
,
7706 grev1
->call_id
= (*state
)->state_key
->lan
.xport
.call_id
;
7711 pf_change_a(&pd
->dst
->v4
.s_addr
,
7713 (*state
)->state_key
->lan
.addr
.v4
.s_addr
, 0);
7718 PF_ACPY(pd
->dst
, &(*state
)->state_key
->lan
.addr
,
7725 m
= pf_lazy_makewritable(pd
, pd
->mp
, off
+ sizeof (*grev1
));
7728 m_copyback(m
, off
, sizeof (*grev1
), grev1
);
7735 pf_test_state_esp(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
7736 int off
, struct pf_pdesc
*pd
)
7739 struct pf_state_peer
*src
;
7740 struct pf_state_peer
*dst
;
7741 struct pf_state_key_cmp key
;
7742 struct pf_esp_hdr
*esp
= pd
->hdr
.esp
;
7745 memset(&key
, 0, sizeof (key
));
7747 key
.proto
= IPPROTO_ESP
;
7748 if (direction
== PF_IN
) {
7749 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
7750 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
7751 key
.gwy
.xport
.spi
= esp
->spi
;
7753 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
7754 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
7755 key
.ext
.xport
.spi
= esp
->spi
;
7758 *state
= pf_find_state(kif
, &key
, direction
);
7765 * No matching state. Look for a blocking state. If we find
7766 * one, then use that state and move it so that it's keyed to
7767 * the SPI in the current packet.
7769 if (direction
== PF_IN
) {
7770 key
.gwy
.xport
.spi
= 0;
7772 s
= pf_find_state(kif
, &key
, direction
);
7774 struct pf_state_key
*sk
= s
->state_key
;
7776 RB_REMOVE(pf_state_tree_ext_gwy
,
7777 &pf_statetbl_ext_gwy
, sk
);
7778 sk
->lan
.xport
.spi
= sk
->gwy
.xport
.spi
=
7781 if (RB_INSERT(pf_state_tree_ext_gwy
,
7782 &pf_statetbl_ext_gwy
, sk
))
7783 pf_detach_state(s
, PF_DT_SKIP_EXTGWY
);
7788 key
.ext
.xport
.spi
= 0;
7790 s
= pf_find_state(kif
, &key
, direction
);
7792 struct pf_state_key
*sk
= s
->state_key
;
7794 RB_REMOVE(pf_state_tree_lan_ext
,
7795 &pf_statetbl_lan_ext
, sk
);
7796 sk
->ext
.xport
.spi
= esp
->spi
;
7798 if (RB_INSERT(pf_state_tree_lan_ext
,
7799 &pf_statetbl_lan_ext
, sk
))
7800 pf_detach_state(s
, PF_DT_SKIP_LANEXT
);
7809 if (s
->creatorid
== pf_status
.hostid
)
7810 pfsync_delete_state(s
);
7812 s
->timeout
= PFTM_UNLINKED
;
7813 hook_runloop(&s
->unlink_hooks
,
7814 HOOK_REMOVE
|HOOK_FREE
);
7815 pf_src_tree_remove_state(s
);
7822 if (pf_state_lookup_aux(state
, kif
, direction
, &action
))
7825 if (direction
== (*state
)->state_key
->direction
) {
7826 src
= &(*state
)->src
;
7827 dst
= &(*state
)->dst
;
7829 src
= &(*state
)->dst
;
7830 dst
= &(*state
)->src
;
7834 if (src
->state
< PFESPS_INITIATING
)
7835 src
->state
= PFESPS_INITIATING
;
7837 /* update expire time */
7838 (*state
)->expire
= pf_time_second();
7839 if (src
->state
>= PFESPS_INITIATING
&&
7840 dst
->state
>= PFESPS_INITIATING
) {
7841 (*state
)->timeout
= PFTM_ESP_ESTABLISHED
;
7842 src
->state
= PFESPS_ESTABLISHED
;
7843 dst
->state
= PFESPS_ESTABLISHED
;
7845 (*state
)->timeout
= PFTM_ESP_INITIATING
;
7847 /* translate source/destination address, if necessary */
7848 if (STATE_ADDR_TRANSLATE((*state
)->state_key
)) {
7849 if (direction
== PF_OUT
) {
7853 pf_change_a(&pd
->src
->v4
.s_addr
,
7855 (*state
)->state_key
->gwy
.addr
.v4
.s_addr
, 0);
7860 PF_ACPY(pd
->src
, &(*state
)->state_key
->gwy
.addr
,
7869 pf_change_a(&pd
->dst
->v4
.s_addr
,
7871 (*state
)->state_key
->lan
.addr
.v4
.s_addr
, 0);
7876 PF_ACPY(pd
->dst
, &(*state
)->state_key
->lan
.addr
,
7889 pf_test_state_other(struct pf_state
**state
, int direction
, struct pfi_kif
*kif
,
7890 struct pf_pdesc
*pd
)
7892 struct pf_state_peer
*src
, *dst
;
7893 struct pf_state_key_cmp key
;
7895 #ifndef NO_APPLE_EXTENSIONS
7899 key
.proto
= pd
->proto
;
7900 if (direction
== PF_IN
) {
7901 PF_ACPY(&key
.ext
.addr
, pd
->src
, key
.af
);
7902 PF_ACPY(&key
.gwy
.addr
, pd
->dst
, key
.af
);
7903 #ifndef NO_APPLE_EXTENSIONS
7904 key
.ext
.xport
.port
= 0;
7905 key
.gwy
.xport
.port
= 0;
7911 PF_ACPY(&key
.lan
.addr
, pd
->src
, key
.af
);
7912 PF_ACPY(&key
.ext
.addr
, pd
->dst
, key
.af
);
7913 #ifndef NO_APPLE_EXTENSIONS
7914 key
.lan
.xport
.port
= 0;
7915 key
.ext
.xport
.port
= 0;
7924 if (direction
== (*state
)->state_key
->direction
) {
7925 src
= &(*state
)->src
;
7926 dst
= &(*state
)->dst
;
7928 src
= &(*state
)->dst
;
7929 dst
= &(*state
)->src
;
7933 if (src
->state
< PFOTHERS_SINGLE
)
7934 src
->state
= PFOTHERS_SINGLE
;
7935 if (dst
->state
== PFOTHERS_SINGLE
)
7936 dst
->state
= PFOTHERS_MULTIPLE
;
7938 /* update expire time */
7939 (*state
)->expire
= pf_time_second();
7940 if (src
->state
== PFOTHERS_MULTIPLE
&& dst
->state
== PFOTHERS_MULTIPLE
)
7941 (*state
)->timeout
= PFTM_OTHER_MULTIPLE
;
7943 (*state
)->timeout
= PFTM_OTHER_SINGLE
;
7945 /* translate source/destination address, if necessary */
7946 #ifndef NO_APPLE_EXTENSIONS
7947 if (STATE_ADDR_TRANSLATE((*state
)->state_key
)) {
7949 if (STATE_TRANSLATE((*state
)->state_key
)) {
7951 if (direction
== PF_OUT
) {
7955 pf_change_a(&pd
->src
->v4
.s_addr
,
7957 (*state
)->state_key
->gwy
.addr
.v4
.s_addr
,
7964 &(*state
)->state_key
->gwy
.addr
, pd
->af
);
7972 pf_change_a(&pd
->dst
->v4
.s_addr
,
7974 (*state
)->state_key
->lan
.addr
.v4
.s_addr
,
7981 &(*state
)->state_key
->lan
.addr
, pd
->af
);
7992 * ipoff and off are measured from the start of the mbuf chain.
7993 * h must be at "ipoff" on the mbuf chain.
7996 pf_pull_hdr(struct mbuf
*m
, int off
, void *p
, int len
,
7997 u_short
*actionp
, u_short
*reasonp
, sa_family_t af
)
8002 struct ip
*h
= mtod(m
, struct ip
*);
8003 u_int16_t fragoff
= (ntohs(h
->ip_off
) & IP_OFFMASK
) << 3;
8006 if (fragoff
>= len
) {
8007 ACTION_SET(actionp
, PF_PASS
);
8009 ACTION_SET(actionp
, PF_DROP
);
8010 REASON_SET(reasonp
, PFRES_FRAG
);
8014 if (m
->m_pkthdr
.len
< off
+ len
||
8015 ntohs(h
->ip_len
) < off
+ len
) {
8016 ACTION_SET(actionp
, PF_DROP
);
8017 REASON_SET(reasonp
, PFRES_SHORT
);
8025 struct ip6_hdr
*h
= mtod(m
, struct ip6_hdr
*);
8027 if (m
->m_pkthdr
.len
< off
+ len
||
8028 (ntohs(h
->ip6_plen
) + sizeof (struct ip6_hdr
)) <
8029 (unsigned)(off
+ len
)) {
8030 ACTION_SET(actionp
, PF_DROP
);
8031 REASON_SET(reasonp
, PFRES_SHORT
);
8038 m_copydata(m
, off
, len
, p
);
8043 pf_routable(struct pf_addr
*addr
, sa_family_t af
, struct pfi_kif
*kif
)
8046 struct sockaddr_in
*dst
;
8049 struct sockaddr_in6
*dst6
;
8050 struct route_in6 ro
;
8055 bzero(&ro
, sizeof (ro
));
8058 dst
= satosin(&ro
.ro_dst
);
8059 dst
->sin_family
= AF_INET
;
8060 dst
->sin_len
= sizeof (*dst
);
8061 dst
->sin_addr
= addr
->v4
;
8065 dst6
= (struct sockaddr_in6
*)&ro
.ro_dst
;
8066 dst6
->sin6_family
= AF_INET6
;
8067 dst6
->sin6_len
= sizeof (*dst6
);
8068 dst6
->sin6_addr
= addr
->v6
;
8075 /* XXX: IFT_ENC is not currently used by anything*/
8076 /* Skip checks for ipsec interfaces */
8077 if (kif
!= NULL
&& kif
->pfik_ifp
->if_type
== IFT_ENC
)
8080 rtalloc((struct route
*)&ro
);
8083 if (ro
.ro_rt
!= NULL
)
8089 pf_rtlabel_match(struct pf_addr
*addr
, sa_family_t af
, struct pf_addr_wrap
*aw
)
8092 struct sockaddr_in
*dst
;
8094 struct sockaddr_in6
*dst6
;
8095 struct route_in6 ro
;
8101 bzero(&ro
, sizeof (ro
));
8104 dst
= satosin(&ro
.ro_dst
);
8105 dst
->sin_family
= AF_INET
;
8106 dst
->sin_len
= sizeof (*dst
);
8107 dst
->sin_addr
= addr
->v4
;
8111 dst6
= (struct sockaddr_in6
*)&ro
.ro_dst
;
8112 dst6
->sin6_family
= AF_INET6
;
8113 dst6
->sin6_len
= sizeof (*dst6
);
8114 dst6
->sin6_addr
= addr
->v6
;
8121 rtalloc((struct route
*)&ro
);
8123 if (ro
.ro_rt
!= NULL
) {
8132 pf_route(struct mbuf
**m
, struct pf_rule
*r
, int dir
, struct ifnet
*oifp
,
8133 struct pf_state
*s
, struct pf_pdesc
*pd
)
8136 struct mbuf
*m0
, *m1
;
8137 struct route iproute
;
8138 struct route
*ro
= NULL
;
8139 struct sockaddr_in
*dst
;
8141 struct ifnet
*ifp
= NULL
;
8142 struct pf_addr naddr
;
8143 struct pf_src_node
*sn
= NULL
;
8147 if (m
== NULL
|| *m
== NULL
|| r
== NULL
||
8148 (dir
!= PF_IN
&& dir
!= PF_OUT
) || oifp
== NULL
)
8149 panic("pf_route: invalid parameters");
8151 if (pd
->pf_mtag
->routed
++ > 3) {
8157 if (r
->rt
== PF_DUPTO
) {
8158 if ((m0
= m_copym(*m
, 0, M_COPYALL
, M_NOWAIT
)) == NULL
)
8161 if ((r
->rt
== PF_REPLYTO
) == (r
->direction
== dir
))
8166 if (m0
->m_len
< (int)sizeof (struct ip
)) {
8167 DPFPRINTF(PF_DEBUG_URGENT
,
8168 ("pf_route: m0->m_len < sizeof (struct ip)\n"));
8172 ip
= mtod(m0
, struct ip
*);
8175 bzero((caddr_t
)ro
, sizeof (*ro
));
8176 dst
= satosin(&ro
->ro_dst
);
8177 dst
->sin_family
= AF_INET
;
8178 dst
->sin_len
= sizeof (*dst
);
8179 dst
->sin_addr
= ip
->ip_dst
;
8181 if (r
->rt
== PF_FASTROUTE
) {
8183 if (ro
->ro_rt
== 0) {
8184 ipstat
.ips_noroute
++;
8188 ifp
= ro
->ro_rt
->rt_ifp
;
8190 ro
->ro_rt
->rt_use
++;
8192 if (ro
->ro_rt
->rt_flags
& RTF_GATEWAY
)
8193 dst
= satosin(ro
->ro_rt
->rt_gateway
);
8194 RT_UNLOCK(ro
->ro_rt
);
8196 if (TAILQ_EMPTY(&r
->rpool
.list
)) {
8197 DPFPRINTF(PF_DEBUG_URGENT
,
8198 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n"));
8202 pf_map_addr(AF_INET
, r
, (struct pf_addr
*)&ip
->ip_src
,
8204 if (!PF_AZERO(&naddr
, AF_INET
))
8205 dst
->sin_addr
.s_addr
= naddr
.v4
.s_addr
;
8206 ifp
= r
->rpool
.cur
->kif
?
8207 r
->rpool
.cur
->kif
->pfik_ifp
: NULL
;
8209 if (!PF_AZERO(&s
->rt_addr
, AF_INET
))
8210 dst
->sin_addr
.s_addr
=
8211 s
->rt_addr
.v4
.s_addr
;
8212 ifp
= s
->rt_kif
? s
->rt_kif
->pfik_ifp
: NULL
;
8219 if (pf_test(PF_OUT
, ifp
, &m0
, NULL
) != PF_PASS
)
8221 else if (m0
== NULL
)
8223 if (m0
->m_len
< (int)sizeof (struct ip
)) {
8224 DPFPRINTF(PF_DEBUG_URGENT
,
8225 ("pf_route: m0->m_len < sizeof (struct ip)\n"));
8228 ip
= mtod(m0
, struct ip
*);
8231 /* Copied from ip_output. */
8233 /* Catch routing changes wrt. hardware checksumming for TCP or UDP. */
8234 m0
->m_pkthdr
.csum_flags
|= CSUM_IP
;
8235 sw_csum
= m0
->m_pkthdr
.csum_flags
&
8236 ~IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
8238 if (ifp
->if_hwassist
& CSUM_TCP_SUM16
) {
8240 * Special case code for GMACE
8241 * frames that can be checksumed by GMACE SUM16 HW:
8242 * frame >64, no fragments, no UDP
8244 if (apple_hwcksum_tx
&& (m0
->m_pkthdr
.csum_flags
& CSUM_TCP
) &&
8245 (ntohs(ip
->ip_len
) > 50) &&
8246 (ntohs(ip
->ip_len
) <= ifp
->if_mtu
)) {
8248 * Apple GMAC HW, expects:
8249 * STUFF_OFFSET << 16 | START_OFFSET
8251 /* IP+Enet header length */
8252 u_short offset
= ((ip
->ip_hl
) << 2) + 14;
8253 u_short csumprev
= m0
->m_pkthdr
.csum_data
& 0xffff;
8254 m0
->m_pkthdr
.csum_flags
= CSUM_DATA_VALID
|
8255 CSUM_TCP_SUM16
; /* for GMAC */
8256 m0
->m_pkthdr
.csum_data
= (csumprev
+ offset
) << 16 ;
8257 m0
->m_pkthdr
.csum_data
+= offset
;
8258 /* do IP hdr chksum in software */
8259 sw_csum
= CSUM_DELAY_IP
;
8261 /* let the software handle any UDP or TCP checksums */
8262 sw_csum
|= (CSUM_DELAY_DATA
& m0
->m_pkthdr
.csum_flags
);
8264 } else if (apple_hwcksum_tx
== 0) {
8265 sw_csum
|= (CSUM_DELAY_DATA
| CSUM_DELAY_IP
) &
8266 m0
->m_pkthdr
.csum_flags
;
8269 if (sw_csum
& CSUM_DELAY_DATA
) {
8270 in_delayed_cksum(m0
);
8271 sw_csum
&= ~CSUM_DELAY_DATA
;
8272 m0
->m_pkthdr
.csum_flags
&= ~CSUM_DELAY_DATA
;
8275 if (apple_hwcksum_tx
!= 0) {
8276 m0
->m_pkthdr
.csum_flags
&=
8277 IF_HWASSIST_CSUM_FLAGS(ifp
->if_hwassist
);
8279 m0
->m_pkthdr
.csum_flags
= 0;
8282 if (ntohs(ip
->ip_len
) <= ifp
->if_mtu
||
8283 (ifp
->if_hwassist
& CSUM_FRAGMENT
)) {
8285 if (sw_csum
& CSUM_DELAY_IP
)
8286 ip
->ip_sum
= in_cksum(m0
, ip
->ip_hl
<< 2);
8287 error
= ifnet_output(ifp
, PF_INET
, m0
, ro
, sintosa(dst
));
8292 * Too large for interface; fragment if possible.
8293 * Must be able to put at least 8 bytes per fragment.
8295 if (ip
->ip_off
& htons(IP_DF
)) {
8296 ipstat
.ips_cantfrag
++;
8297 if (r
->rt
!= PF_DUPTO
) {
8298 icmp_error(m0
, ICMP_UNREACH
, ICMP_UNREACH_NEEDFRAG
, 0,
8307 /* PR-8933605: send ip_len,ip_off to ip_fragment in host byte order */
8308 #if BYTE_ORDER != BIG_ENDIAN
8312 error
= ip_fragment(m0
, ifp
, ifp
->if_mtu
, sw_csum
);
8319 for (m0
= m1
; m0
; m0
= m1
) {
8323 error
= ifnet_output(ifp
, PF_INET
, m0
, ro
,
8330 ipstat
.ips_fragmented
++;
8333 if (r
->rt
!= PF_DUPTO
)
8335 if (ro
== &iproute
&& ro
->ro_rt
)
8347 pf_route6(struct mbuf
**m
, struct pf_rule
*r
, int dir
, struct ifnet
*oifp
,
8348 struct pf_state
*s
, struct pf_pdesc
*pd
)
8352 struct route_in6 ip6route
;
8353 struct route_in6
*ro
;
8354 struct sockaddr_in6
*dst
;
8355 struct ip6_hdr
*ip6
;
8356 struct ifnet
*ifp
= NULL
;
8357 struct pf_addr naddr
;
8358 struct pf_src_node
*sn
= NULL
;
8361 if (m
== NULL
|| *m
== NULL
|| r
== NULL
||
8362 (dir
!= PF_IN
&& dir
!= PF_OUT
) || oifp
== NULL
)
8363 panic("pf_route6: invalid parameters");
8365 if (pd
->pf_mtag
->routed
++ > 3) {
8371 if (r
->rt
== PF_DUPTO
) {
8372 if ((m0
= m_copym(*m
, 0, M_COPYALL
, M_NOWAIT
)) == NULL
)
8375 if ((r
->rt
== PF_REPLYTO
) == (r
->direction
== dir
))
8380 if (m0
->m_len
< (int)sizeof (struct ip6_hdr
)) {
8381 DPFPRINTF(PF_DEBUG_URGENT
,
8382 ("pf_route6: m0->m_len < sizeof (struct ip6_hdr)\n"));
8385 ip6
= mtod(m0
, struct ip6_hdr
*);
8388 bzero((caddr_t
)ro
, sizeof (*ro
));
8389 dst
= (struct sockaddr_in6
*)&ro
->ro_dst
;
8390 dst
->sin6_family
= AF_INET6
;
8391 dst
->sin6_len
= sizeof (*dst
);
8392 dst
->sin6_addr
= ip6
->ip6_dst
;
8394 /* Cheat. XXX why only in the v6 case??? */
8395 if (r
->rt
== PF_FASTROUTE
) {
8396 struct pf_mtag
*pf_mtag
;
8398 if ((pf_mtag
= pf_get_mtag(m0
)) == NULL
)
8400 pf_mtag
->flags
|= PF_TAG_GENERATED
;
8401 ip6_output(m0
, NULL
, NULL
, 0, NULL
, NULL
, NULL
);
8405 if (TAILQ_EMPTY(&r
->rpool
.list
)) {
8406 DPFPRINTF(PF_DEBUG_URGENT
,
8407 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n"));
8411 pf_map_addr(AF_INET6
, r
, (struct pf_addr
*)&ip6
->ip6_src
,
8413 if (!PF_AZERO(&naddr
, AF_INET6
))
8414 PF_ACPY((struct pf_addr
*)&dst
->sin6_addr
,
8416 ifp
= r
->rpool
.cur
->kif
? r
->rpool
.cur
->kif
->pfik_ifp
: NULL
;
8418 if (!PF_AZERO(&s
->rt_addr
, AF_INET6
))
8419 PF_ACPY((struct pf_addr
*)&dst
->sin6_addr
,
8420 &s
->rt_addr
, AF_INET6
);
8421 ifp
= s
->rt_kif
? s
->rt_kif
->pfik_ifp
: NULL
;
8427 if (pf_test6(PF_OUT
, ifp
, &m0
, NULL
) != PF_PASS
)
8429 else if (m0
== NULL
)
8431 if (m0
->m_len
< (int)sizeof (struct ip6_hdr
)) {
8432 DPFPRINTF(PF_DEBUG_URGENT
, ("pf_route6: m0->m_len "
8433 "< sizeof (struct ip6_hdr)\n"));
8436 ip6
= mtod(m0
, struct ip6_hdr
*);
8440 * If the packet is too large for the outgoing interface,
8441 * send back an icmp6 error.
8443 if (IN6_IS_SCOPE_EMBED(&dst
->sin6_addr
))
8444 dst
->sin6_addr
.s6_addr16
[1] = htons(ifp
->if_index
);
8445 if ((unsigned)m0
->m_pkthdr
.len
<= ifp
->if_mtu
) {
8446 error
= nd6_output(ifp
, ifp
, m0
, dst
, NULL
);
8448 in6_ifstat_inc(ifp
, ifs6_in_toobig
);
8449 if (r
->rt
!= PF_DUPTO
)
8450 icmp6_error(m0
, ICMP6_PACKET_TOO_BIG
, 0, ifp
->if_mtu
);
8456 if (r
->rt
!= PF_DUPTO
)
8468 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
8469 * off is the offset where the protocol header starts
8470 * len is the total length of protocol header plus payload
8471 * returns 0 when the checksum is valid, otherwise returns 1.
8474 pf_check_proto_cksum(struct mbuf
*m
, int off
, int len
, u_int8_t p
,
8483 * Optimize for the common case; if the hardware calculated
8484 * value doesn't include pseudo-header checksum, or if it
8485 * is partially-computed (only 16-bit summation), do it in
8488 if (apple_hwcksum_rx
&& (m
->m_pkthdr
.csum_flags
&
8489 (CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
)) &&
8490 (m
->m_pkthdr
.csum_data
^ 0xffff) == 0) {
8496 case IPPROTO_ICMPV6
:
8502 if (off
< (int)sizeof (struct ip
) || len
< (int)sizeof (struct udphdr
))
8504 if (m
->m_pkthdr
.len
< off
+ len
)
8509 if (p
== IPPROTO_ICMP
) {
8514 sum
= in_cksum(m
, len
);
8518 if (m
->m_len
< (int)sizeof (struct ip
))
8520 sum
= inet_cksum(m
, p
, off
, len
);
8526 if (m
->m_len
< (int)sizeof (struct ip6_hdr
))
8528 sum
= inet6_cksum(m
, p
, off
, len
);
8537 tcpstat
.tcps_rcvbadsum
++;
8540 udpstat
.udps_badsum
++;
8543 icmpstat
.icps_checksum
++;
8546 case IPPROTO_ICMPV6
:
8547 icmp6stat
.icp6s_checksum
++;
8557 #ifndef NO_APPLE_EXTENSIONS
8558 #define PF_APPLE_UPDATE_PDESC_IPv4() \
8560 if (m && pd.mp && m != pd.mp) { \
8562 h = mtod(m, struct ip *); \
8568 pf_test(int dir
, struct ifnet
*ifp
, struct mbuf
**m0
,
8569 struct ether_header
*eh
)
8571 struct pfi_kif
*kif
;
8572 u_short action
, reason
= 0, log
= 0;
8573 struct mbuf
*m
= *m0
;
8575 struct pf_rule
*a
= NULL
, *r
= &pf_default_rule
, *tr
, *nr
;
8576 struct pf_state
*s
= NULL
;
8577 struct pf_state_key
*sk
= NULL
;
8578 struct pf_ruleset
*ruleset
= NULL
;
8580 int off
, dirndx
, pqid
= 0;
8582 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
8584 if (!pf_status
.running
)
8587 memset(&pd
, 0, sizeof (pd
));
8589 if ((pd
.pf_mtag
= pf_get_mtag(m
)) == NULL
) {
8590 DPFPRINTF(PF_DEBUG_URGENT
,
8591 ("pf_test: pf_get_mtag returned NULL\n"));
8595 if (pd
.pf_mtag
->flags
& PF_TAG_GENERATED
)
8598 kif
= (struct pfi_kif
*)ifp
->if_pf_kif
;
8601 DPFPRINTF(PF_DEBUG_URGENT
,
8602 ("pf_test: kif == NULL, if_name %s\n", ifp
->if_name
));
8605 if (kif
->pfik_flags
& PFI_IFLAG_SKIP
)
8609 if ((m
->m_flags
& M_PKTHDR
) == 0)
8610 panic("non-M_PKTHDR is passed to pf_test");
8611 #endif /* DIAGNOSTIC */
8613 if (m
->m_pkthdr
.len
< (int)sizeof (*h
)) {
8615 REASON_SET(&reason
, PFRES_SHORT
);
8620 /* We do IP header normalization and packet reassembly here */
8621 if (pf_normalize_ip(m0
, dir
, kif
, &reason
, &pd
) != PF_PASS
) {
8625 m
= *m0
; /* pf_normalize messes with m0 */
8626 h
= mtod(m
, struct ip
*);
8628 off
= h
->ip_hl
<< 2;
8629 if (off
< (int)sizeof (*h
)) {
8631 REASON_SET(&reason
, PFRES_SHORT
);
8636 pd
.src
= (struct pf_addr
*)&h
->ip_src
;
8637 pd
.dst
= (struct pf_addr
*)&h
->ip_dst
;
8638 PF_ACPY(&pd
.baddr
, dir
== PF_OUT
? pd
.src
: pd
.dst
, AF_INET
);
8639 pd
.ip_sum
= &h
->ip_sum
;
8641 #ifndef NO_APPLE_EXTENSIONS
8642 pd
.proto_variant
= 0;
8648 pd
.tot_len
= ntohs(h
->ip_len
);
8651 /* handle fragments that didn't get reassembled by normalization */
8652 if (h
->ip_off
& htons(IP_MF
| IP_OFFMASK
)) {
8653 action
= pf_test_fragment(&r
, dir
, kif
, m
, h
,
8663 if (!pf_pull_hdr(m
, off
, &th
, sizeof (th
),
8664 &action
, &reason
, AF_INET
)) {
8665 log
= action
!= PF_PASS
;
8668 pd
.p_len
= pd
.tot_len
- off
- (th
.th_off
<< 2);
8669 if ((th
.th_flags
& TH_ACK
) && pd
.p_len
== 0)
8671 action
= pf_normalize_tcp(dir
, kif
, m
, 0, off
, h
, &pd
);
8672 #ifndef NO_APPLE_EXTENSIONS
8675 PF_APPLE_UPDATE_PDESC_IPv4();
8677 if (action
== PF_DROP
)
8679 action
= pf_test_state_tcp(&s
, dir
, kif
, m
, off
, h
, &pd
,
8681 #ifndef NO_APPLE_EXTENSIONS
8684 PF_APPLE_UPDATE_PDESC_IPv4();
8686 if (action
== PF_PASS
) {
8688 pfsync_update_state(s
);
8689 #endif /* NPFSYNC */
8693 } else if (s
== NULL
)
8694 action
= pf_test_rule(&r
, &s
, dir
, kif
,
8695 m
, off
, h
, &pd
, &a
, &ruleset
, &ipintrq
);
8703 if (!pf_pull_hdr(m
, off
, &uh
, sizeof (uh
),
8704 &action
, &reason
, AF_INET
)) {
8705 log
= action
!= PF_PASS
;
8708 if (uh
.uh_dport
== 0 ||
8709 ntohs(uh
.uh_ulen
) > m
->m_pkthdr
.len
- off
||
8710 ntohs(uh
.uh_ulen
) < sizeof (struct udphdr
)) {
8712 REASON_SET(&reason
, PFRES_SHORT
);
8715 #ifndef NO_APPLE_EXTENSIONS
8716 action
= pf_test_state_udp(&s
, dir
, kif
, m
, off
, h
, &pd
,
8720 PF_APPLE_UPDATE_PDESC_IPv4();
8722 action
= pf_test_state_udp(&s
, dir
, kif
, m
, off
, h
, &pd
);
8724 if (action
== PF_PASS
) {
8726 pfsync_update_state(s
);
8727 #endif /* NPFSYNC */
8731 } else if (s
== NULL
)
8732 action
= pf_test_rule(&r
, &s
, dir
, kif
,
8733 m
, off
, h
, &pd
, &a
, &ruleset
, &ipintrq
);
8737 case IPPROTO_ICMP
: {
8741 if (!pf_pull_hdr(m
, off
, &ih
, ICMP_MINLEN
,
8742 &action
, &reason
, AF_INET
)) {
8743 log
= action
!= PF_PASS
;
8746 action
= pf_test_state_icmp(&s
, dir
, kif
, m
, off
, h
, &pd
,
8748 #ifndef NO_APPLE_EXTENSIONS
8751 PF_APPLE_UPDATE_PDESC_IPv4();
8753 if (action
== PF_PASS
) {
8755 pfsync_update_state(s
);
8756 #endif /* NPFSYNC */
8760 } else if (s
== NULL
)
8761 action
= pf_test_rule(&r
, &s
, dir
, kif
,
8762 m
, off
, h
, &pd
, &a
, &ruleset
, &ipintrq
);
8766 #ifndef NO_APPLE_EXTENSIONS
8768 struct pf_esp_hdr esp
;
8771 if (!pf_pull_hdr(m
, off
, &esp
, sizeof (esp
), &action
, &reason
,
8773 log
= action
!= PF_PASS
;
8776 action
= pf_test_state_esp(&s
, dir
, kif
, off
, &pd
);
8779 PF_APPLE_UPDATE_PDESC_IPv4();
8780 if (action
== PF_PASS
) {
8782 pfsync_update_state(s
);
8783 #endif /* NPFSYNC */
8787 } else if (s
== NULL
)
8788 action
= pf_test_rule(&r
, &s
, dir
, kif
,
8789 m
, off
, h
, &pd
, &a
, &ruleset
, &ipintrq
);
8794 struct pf_grev1_hdr grev1
;
8795 pd
.hdr
.grev1
= &grev1
;
8796 if (!pf_pull_hdr(m
, off
, &grev1
, sizeof (grev1
), &action
,
8797 &reason
, AF_INET
)) {
8798 log
= (action
!= PF_PASS
);
8801 if ((ntohs(grev1
.flags
) & PF_GRE_FLAG_VERSION_MASK
) == 1 &&
8802 ntohs(grev1
.protocol_type
) == PF_GRE_PPP_ETHERTYPE
) {
8803 if (ntohs(grev1
.payload_length
) >
8804 m
->m_pkthdr
.len
- off
) {
8806 REASON_SET(&reason
, PFRES_SHORT
);
8809 pd
.proto_variant
= PF_GRE_PPTP_VARIANT
;
8810 action
= pf_test_state_grev1(&s
, dir
, kif
, off
, &pd
);
8811 if (pd
.lmw
< 0) goto done
;
8812 PF_APPLE_UPDATE_PDESC_IPv4();
8813 if (action
== PF_PASS
) {
8815 pfsync_update_state(s
);
8816 #endif /* NPFSYNC */
8821 } else if (s
== NULL
) {
8822 action
= pf_test_rule(&r
, &s
, dir
, kif
, m
, off
,
8823 h
, &pd
, &a
, &ruleset
, &ipintrq
);
8824 if (action
== PF_PASS
)
8829 /* not GREv1/PPTP, so treat as ordinary GRE... */
8834 action
= pf_test_state_other(&s
, dir
, kif
, &pd
);
8835 #ifndef NO_APPLE_EXTENSIONS
8838 PF_APPLE_UPDATE_PDESC_IPv4();
8840 if (action
== PF_PASS
) {
8842 pfsync_update_state(s
);
8843 #endif /* NPFSYNC */
8847 } else if (s
== NULL
)
8848 action
= pf_test_rule(&r
, &s
, dir
, kif
, m
, off
, h
,
8849 &pd
, &a
, &ruleset
, &ipintrq
);
8854 #ifndef NO_APPLE_EXTENSIONS
8856 PF_APPLE_UPDATE_PDESC_IPv4();
8859 if (action
== PF_PASS
&& h
->ip_hl
> 5 &&
8860 !((s
&& s
->allow_opts
) || r
->allow_opts
)) {
8862 REASON_SET(&reason
, PFRES_IPOPTIONS
);
8864 DPFPRINTF(PF_DEBUG_MISC
,
8865 ("pf: dropping packet with ip options [hlen=%u]\n",
8866 (unsigned int) h
->ip_hl
));
8869 if ((s
&& s
->tag
) || PF_RTABLEID_IS_VALID(r
->rtableid
))
8870 (void) pf_tag_packet(m
, pd
.pf_mtag
, s
? s
->tag
: 0,
8874 if (action
== PF_PASS
&& r
->qid
) {
8875 if (pqid
|| (pd
.tos
& IPTOS_LOWDELAY
))
8876 pd
.pf_mtag
->qid
= r
->pqid
;
8878 pd
.pf_mtag
->qid
= r
->qid
;
8879 /* add hints for ecn */
8880 pd
.pf_mtag
->hdr
= h
;
8885 * connections redirected to loopback should not match sockets
8886 * bound specifically to loopback due to security implications,
8887 * see tcp_input() and in_pcblookup_listen().
8889 if (dir
== PF_IN
&& action
== PF_PASS
&& (pd
.proto
== IPPROTO_TCP
||
8890 pd
.proto
== IPPROTO_UDP
) && s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
8891 (s
->nat_rule
.ptr
->action
== PF_RDR
||
8892 s
->nat_rule
.ptr
->action
== PF_BINAT
) &&
8893 (ntohl(pd
.dst
->v4
.s_addr
) >> IN_CLASSA_NSHIFT
) == IN_LOOPBACKNET
)
8894 pd
.pf_mtag
->flags
|= PF_TAG_TRANSLATE_LOCALHOST
;
8899 if (s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
8900 s
->nat_rule
.ptr
->log
& PF_LOG_ALL
)
8901 lr
= s
->nat_rule
.ptr
;
8904 PFLOG_PACKET(kif
, h
, m
, AF_INET
, dir
, reason
, lr
, a
, ruleset
,
8908 kif
->pfik_bytes
[0][dir
== PF_OUT
][action
!= PF_PASS
] += pd
.tot_len
;
8909 kif
->pfik_packets
[0][dir
== PF_OUT
][action
!= PF_PASS
]++;
8911 if (action
== PF_PASS
|| r
->action
== PF_DROP
) {
8912 dirndx
= (dir
== PF_OUT
);
8913 r
->packets
[dirndx
]++;
8914 r
->bytes
[dirndx
] += pd
.tot_len
;
8916 a
->packets
[dirndx
]++;
8917 a
->bytes
[dirndx
] += pd
.tot_len
;
8921 if (s
->nat_rule
.ptr
!= NULL
) {
8922 s
->nat_rule
.ptr
->packets
[dirndx
]++;
8923 s
->nat_rule
.ptr
->bytes
[dirndx
] += pd
.tot_len
;
8925 if (s
->src_node
!= NULL
) {
8926 s
->src_node
->packets
[dirndx
]++;
8927 s
->src_node
->bytes
[dirndx
] += pd
.tot_len
;
8929 if (s
->nat_src_node
!= NULL
) {
8930 s
->nat_src_node
->packets
[dirndx
]++;
8931 s
->nat_src_node
->bytes
[dirndx
] += pd
.tot_len
;
8933 dirndx
= (dir
== sk
->direction
) ? 0 : 1;
8934 s
->packets
[dirndx
]++;
8935 s
->bytes
[dirndx
] += pd
.tot_len
;
8938 nr
= (s
!= NULL
) ? s
->nat_rule
.ptr
: pd
.nat_rule
;
8942 * XXX: we need to make sure that the addresses
8943 * passed to pfr_update_stats() are the same than
8944 * the addresses used during matching (pfr_match)
8946 if (r
== &pf_default_rule
) {
8948 x
= (sk
== NULL
|| sk
->direction
== dir
) ?
8949 &pd
.baddr
: &pd
.naddr
;
8951 x
= (sk
== NULL
|| sk
->direction
== dir
) ?
8952 &pd
.naddr
: &pd
.baddr
;
8953 if (x
== &pd
.baddr
|| s
== NULL
) {
8954 /* we need to change the address */
8961 if (tr
->src
.addr
.type
== PF_ADDR_TABLE
)
8962 pfr_update_stats(tr
->src
.addr
.p
.tbl
, (sk
== NULL
||
8963 sk
->direction
== dir
) ?
8964 pd
.src
: pd
.dst
, pd
.af
,
8965 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
8967 if (tr
->dst
.addr
.type
== PF_ADDR_TABLE
)
8968 pfr_update_stats(tr
->dst
.addr
.p
.tbl
, (sk
== NULL
||
8969 sk
->direction
== dir
) ? pd
.dst
: pd
.src
, pd
.af
,
8970 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
8974 #ifndef NO_APPLE_EXTENSIONS
8975 VERIFY(m
== NULL
|| pd
.mp
== NULL
|| pd
.mp
== m
);
8979 REASON_SET(&reason
, PFRES_MEMORY
);
8983 if (action
== PF_DROP
) {
8993 if (action
== PF_SYNPROXY_DROP
) {
8998 /* pf_route can free the mbuf causing *m0 to become NULL */
8999 pf_route(m0
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
9006 #ifndef NO_APPLE_EXTENSIONS
9007 #define PF_APPLE_UPDATE_PDESC_IPv6() \
9009 if (m && pd.mp && m != pd.mp) { \
9013 h = mtod(m, struct ip6_hdr *); \
9019 pf_test6(int dir
, struct ifnet
*ifp
, struct mbuf
**m0
,
9020 struct ether_header
*eh
)
9022 struct pfi_kif
*kif
;
9023 u_short action
, reason
= 0, log
= 0;
9024 struct mbuf
*m
= *m0
, *n
= NULL
;
9026 struct pf_rule
*a
= NULL
, *r
= &pf_default_rule
, *tr
, *nr
;
9027 struct pf_state
*s
= NULL
;
9028 struct pf_state_key
*sk
= NULL
;
9029 struct pf_ruleset
*ruleset
= NULL
;
9031 int off
, terminal
= 0, dirndx
, rh_cnt
= 0;
9033 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
9035 if (!pf_status
.running
)
9038 memset(&pd
, 0, sizeof (pd
));
9040 if ((pd
.pf_mtag
= pf_get_mtag(m
)) == NULL
) {
9041 DPFPRINTF(PF_DEBUG_URGENT
,
9042 ("pf_test6: pf_get_mtag returned NULL\n"));
9046 if (pd
.pf_mtag
->flags
& PF_TAG_GENERATED
)
9049 kif
= (struct pfi_kif
*)ifp
->if_pf_kif
;
9052 DPFPRINTF(PF_DEBUG_URGENT
,
9053 ("pf_test6: kif == NULL, if_name %s\n", ifp
->if_name
));
9056 if (kif
->pfik_flags
& PFI_IFLAG_SKIP
)
9060 if ((m
->m_flags
& M_PKTHDR
) == 0)
9061 panic("non-M_PKTHDR is passed to pf_test6");
9062 #endif /* DIAGNOSTIC */
9064 h
= mtod(m
, struct ip6_hdr
*);
9066 if (m
->m_pkthdr
.len
< (int)sizeof (*h
)) {
9068 REASON_SET(&reason
, PFRES_SHORT
);
9073 /* We do IP header normalization and packet reassembly here */
9074 if (pf_normalize_ip6(m0
, dir
, kif
, &reason
, &pd
) != PF_PASS
) {
9078 m
= *m0
; /* pf_normalize messes with m0 */
9079 h
= mtod(m
, struct ip6_hdr
*);
9083 * we do not support jumbogram yet. if we keep going, zero ip6_plen
9084 * will do something bad, so drop the packet for now.
9086 if (htons(h
->ip6_plen
) == 0) {
9088 REASON_SET(&reason
, PFRES_NORM
); /*XXX*/
9093 pd
.src
= (struct pf_addr
*)&h
->ip6_src
;
9094 pd
.dst
= (struct pf_addr
*)&h
->ip6_dst
;
9095 PF_ACPY(&pd
.baddr
, dir
== PF_OUT
? pd
.src
: pd
.dst
, AF_INET6
);
9099 pd
.tot_len
= ntohs(h
->ip6_plen
) + sizeof (struct ip6_hdr
);
9102 off
= ((caddr_t
)h
- m
->m_data
) + sizeof (struct ip6_hdr
);
9103 pd
.proto
= h
->ip6_nxt
;
9104 #ifndef NO_APPLE_EXTENSIONS
9105 pd
.proto_variant
= 0;
9111 case IPPROTO_FRAGMENT
:
9112 action
= pf_test_fragment(&r
, dir
, kif
, m
, h
,
9114 if (action
== PF_DROP
)
9115 REASON_SET(&reason
, PFRES_FRAG
);
9117 case IPPROTO_ROUTING
: {
9118 struct ip6_rthdr rthdr
;
9121 DPFPRINTF(PF_DEBUG_MISC
,
9122 ("pf: IPv6 more than one rthdr\n"));
9124 REASON_SET(&reason
, PFRES_IPOPTIONS
);
9128 if (!pf_pull_hdr(m
, off
, &rthdr
, sizeof (rthdr
), NULL
,
9130 DPFPRINTF(PF_DEBUG_MISC
,
9131 ("pf: IPv6 short rthdr\n"));
9133 REASON_SET(&reason
, PFRES_SHORT
);
9137 if (rthdr
.ip6r_type
== IPV6_RTHDR_TYPE_0
) {
9138 DPFPRINTF(PF_DEBUG_MISC
,
9139 ("pf: IPv6 rthdr0\n"));
9141 REASON_SET(&reason
, PFRES_IPOPTIONS
);
9148 case IPPROTO_HOPOPTS
:
9149 case IPPROTO_DSTOPTS
: {
9150 /* get next header and header length */
9151 struct ip6_ext opt6
;
9153 if (!pf_pull_hdr(m
, off
, &opt6
, sizeof (opt6
),
9154 NULL
, &reason
, pd
.af
)) {
9155 DPFPRINTF(PF_DEBUG_MISC
,
9156 ("pf: IPv6 short opt\n"));
9161 if (pd
.proto
== IPPROTO_AH
)
9162 off
+= (opt6
.ip6e_len
+ 2) * 4;
9164 off
+= (opt6
.ip6e_len
+ 1) * 8;
9165 pd
.proto
= opt6
.ip6e_nxt
;
9166 /* goto the next header */
9173 } while (!terminal
);
9175 /* if there's no routing header, use unmodified mbuf for checksumming */
9185 if (!pf_pull_hdr(m
, off
, &th
, sizeof (th
),
9186 &action
, &reason
, AF_INET6
)) {
9187 log
= action
!= PF_PASS
;
9190 pd
.p_len
= pd
.tot_len
- off
- (th
.th_off
<< 2);
9191 action
= pf_normalize_tcp(dir
, kif
, m
, 0, off
, h
, &pd
);
9192 #ifndef NO_APPLE_EXTENSIONS
9195 PF_APPLE_UPDATE_PDESC_IPv6();
9197 if (action
== PF_DROP
)
9199 action
= pf_test_state_tcp(&s
, dir
, kif
, m
, off
, h
, &pd
,
9201 #ifndef NO_APPLE_EXTENSIONS
9204 PF_APPLE_UPDATE_PDESC_IPv6();
9206 if (action
== PF_PASS
) {
9208 pfsync_update_state(s
);
9209 #endif /* NPFSYNC */
9213 } else if (s
== NULL
)
9214 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9215 m
, off
, h
, &pd
, &a
, &ruleset
, &ip6intrq
);
9223 if (!pf_pull_hdr(m
, off
, &uh
, sizeof (uh
),
9224 &action
, &reason
, AF_INET6
)) {
9225 log
= action
!= PF_PASS
;
9228 if (uh
.uh_dport
== 0 ||
9229 ntohs(uh
.uh_ulen
) > m
->m_pkthdr
.len
- off
||
9230 ntohs(uh
.uh_ulen
) < sizeof (struct udphdr
)) {
9232 REASON_SET(&reason
, PFRES_SHORT
);
9235 #ifndef NO_APPLE_EXTENSIONS
9236 action
= pf_test_state_udp(&s
, dir
, kif
, m
, off
, h
, &pd
,
9240 PF_APPLE_UPDATE_PDESC_IPv6();
9242 action
= pf_test_state_udp(&s
, dir
, kif
, m
, off
, h
, &pd
);
9244 if (action
== PF_PASS
) {
9246 pfsync_update_state(s
);
9247 #endif /* NPFSYNC */
9251 } else if (s
== NULL
)
9252 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9253 m
, off
, h
, &pd
, &a
, &ruleset
, &ip6intrq
);
9257 case IPPROTO_ICMPV6
: {
9258 struct icmp6_hdr ih
;
9261 if (!pf_pull_hdr(m
, off
, &ih
, sizeof (ih
),
9262 &action
, &reason
, AF_INET6
)) {
9263 log
= action
!= PF_PASS
;
9266 action
= pf_test_state_icmp(&s
, dir
, kif
,
9267 m
, off
, h
, &pd
, &reason
);
9268 #ifndef NO_APPLE_EXTENSIONS
9271 PF_APPLE_UPDATE_PDESC_IPv6();
9273 if (action
== PF_PASS
) {
9275 pfsync_update_state(s
);
9276 #endif /* NPFSYNC */
9280 } else if (s
== NULL
)
9281 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9282 m
, off
, h
, &pd
, &a
, &ruleset
, &ip6intrq
);
9286 #ifndef NO_APPLE_EXTENSIONS
9288 struct pf_esp_hdr esp
;
9291 if (!pf_pull_hdr(m
, off
, &esp
, sizeof (esp
), &action
, &reason
,
9293 log
= action
!= PF_PASS
;
9296 action
= pf_test_state_esp(&s
, dir
, kif
, off
, &pd
);
9299 PF_APPLE_UPDATE_PDESC_IPv6();
9300 if (action
== PF_PASS
) {
9302 pfsync_update_state(s
);
9303 #endif /* NPFSYNC */
9307 } else if (s
== NULL
)
9308 action
= pf_test_rule(&r
, &s
, dir
, kif
,
9309 m
, off
, h
, &pd
, &a
, &ruleset
, &ip6intrq
);
9314 struct pf_grev1_hdr grev1
;
9316 pd
.hdr
.grev1
= &grev1
;
9317 if (!pf_pull_hdr(m
, off
, &grev1
, sizeof (grev1
), &action
,
9318 &reason
, AF_INET6
)) {
9319 log
= (action
!= PF_PASS
);
9322 if ((ntohs(grev1
.flags
) & PF_GRE_FLAG_VERSION_MASK
) == 1 &&
9323 ntohs(grev1
.protocol_type
) == PF_GRE_PPP_ETHERTYPE
) {
9324 if (ntohs(grev1
.payload_length
) >
9325 m
->m_pkthdr
.len
- off
) {
9327 REASON_SET(&reason
, PFRES_SHORT
);
9330 action
= pf_test_state_grev1(&s
, dir
, kif
, off
, &pd
);
9333 PF_APPLE_UPDATE_PDESC_IPv6();
9334 if (action
== PF_PASS
) {
9336 pfsync_update_state(s
);
9337 #endif /* NPFSYNC */
9342 } else if (s
== NULL
) {
9343 action
= pf_test_rule(&r
, &s
, dir
, kif
, m
, off
,
9344 h
, &pd
, &a
, &ruleset
, &ip6intrq
);
9345 if (action
== PF_PASS
)
9350 /* not GREv1/PPTP, so treat as ordinary GRE... */
9355 action
= pf_test_state_other(&s
, dir
, kif
, &pd
);
9356 #ifndef NO_APPLE_EXTENSIONS
9359 PF_APPLE_UPDATE_PDESC_IPv6();
9361 if (action
== PF_PASS
) {
9363 pfsync_update_state(s
);
9364 #endif /* NPFSYNC */
9368 } else if (s
== NULL
)
9369 action
= pf_test_rule(&r
, &s
, dir
, kif
, m
, off
, h
,
9370 &pd
, &a
, &ruleset
, &ip6intrq
);
9375 #ifndef NO_APPLE_EXTENSIONS
9377 PF_APPLE_UPDATE_PDESC_IPv6();
9385 /* handle dangerous IPv6 extension headers. */
9386 if (action
== PF_PASS
&& rh_cnt
&&
9387 !((s
&& s
->allow_opts
) || r
->allow_opts
)) {
9389 REASON_SET(&reason
, PFRES_IPOPTIONS
);
9391 DPFPRINTF(PF_DEBUG_MISC
,
9392 ("pf: dropping packet with dangerous v6 headers\n"));
9395 if ((s
&& s
->tag
) || PF_RTABLEID_IS_VALID(r
->rtableid
))
9396 (void) pf_tag_packet(m
, pd
.pf_mtag
, s
? s
->tag
: 0,
9400 if (action
== PF_PASS
&& r
->qid
) {
9401 if (pd
.tos
& IPTOS_LOWDELAY
)
9402 pd
.pf_mtag
->qid
= r
->pqid
;
9404 pd
.pf_mtag
->qid
= r
->qid
;
9405 /* add hints for ecn */
9406 pd
.pf_mtag
->hdr
= h
;
9410 if (dir
== PF_IN
&& action
== PF_PASS
&& (pd
.proto
== IPPROTO_TCP
||
9411 pd
.proto
== IPPROTO_UDP
) && s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
9412 (s
->nat_rule
.ptr
->action
== PF_RDR
||
9413 s
->nat_rule
.ptr
->action
== PF_BINAT
) &&
9414 IN6_IS_ADDR_LOOPBACK(&pd
.dst
->v6
))
9415 pd
.pf_mtag
->flags
|= PF_TAG_TRANSLATE_LOCALHOST
;
9420 if (s
!= NULL
&& s
->nat_rule
.ptr
!= NULL
&&
9421 s
->nat_rule
.ptr
->log
& PF_LOG_ALL
)
9422 lr
= s
->nat_rule
.ptr
;
9425 PFLOG_PACKET(kif
, h
, m
, AF_INET6
, dir
, reason
, lr
, a
, ruleset
,
9429 kif
->pfik_bytes
[1][dir
== PF_OUT
][action
!= PF_PASS
] += pd
.tot_len
;
9430 kif
->pfik_packets
[1][dir
== PF_OUT
][action
!= PF_PASS
]++;
9432 if (action
== PF_PASS
|| r
->action
== PF_DROP
) {
9433 dirndx
= (dir
== PF_OUT
);
9434 r
->packets
[dirndx
]++;
9435 r
->bytes
[dirndx
] += pd
.tot_len
;
9437 a
->packets
[dirndx
]++;
9438 a
->bytes
[dirndx
] += pd
.tot_len
;
9442 if (s
->nat_rule
.ptr
!= NULL
) {
9443 s
->nat_rule
.ptr
->packets
[dirndx
]++;
9444 s
->nat_rule
.ptr
->bytes
[dirndx
] += pd
.tot_len
;
9446 if (s
->src_node
!= NULL
) {
9447 s
->src_node
->packets
[dirndx
]++;
9448 s
->src_node
->bytes
[dirndx
] += pd
.tot_len
;
9450 if (s
->nat_src_node
!= NULL
) {
9451 s
->nat_src_node
->packets
[dirndx
]++;
9452 s
->nat_src_node
->bytes
[dirndx
] += pd
.tot_len
;
9454 dirndx
= (dir
== sk
->direction
) ? 0 : 1;
9455 s
->packets
[dirndx
]++;
9456 s
->bytes
[dirndx
] += pd
.tot_len
;
9459 nr
= (s
!= NULL
) ? s
->nat_rule
.ptr
: pd
.nat_rule
;
9463 * XXX: we need to make sure that the addresses
9464 * passed to pfr_update_stats() are the same than
9465 * the addresses used during matching (pfr_match)
9467 if (r
== &pf_default_rule
) {
9469 x
= (s
== NULL
|| sk
->direction
== dir
) ?
9470 &pd
.baddr
: &pd
.naddr
;
9472 x
= (s
== NULL
|| sk
->direction
== dir
) ?
9473 &pd
.naddr
: &pd
.baddr
;
9475 if (x
== &pd
.baddr
|| s
== NULL
) {
9482 if (tr
->src
.addr
.type
== PF_ADDR_TABLE
)
9483 pfr_update_stats(tr
->src
.addr
.p
.tbl
, (sk
== NULL
||
9484 sk
->direction
== dir
) ? pd
.src
: pd
.dst
, pd
.af
,
9485 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
9487 if (tr
->dst
.addr
.type
== PF_ADDR_TABLE
)
9488 pfr_update_stats(tr
->dst
.addr
.p
.tbl
, (sk
== NULL
||
9489 sk
->direction
== dir
) ? pd
.dst
: pd
.src
, pd
.af
,
9490 pd
.tot_len
, dir
== PF_OUT
, r
->action
== PF_PASS
,
9495 if (action
== PF_SYNPROXY_DROP
) {
9500 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9501 pf_route6(m0
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
9503 #ifndef NO_APPLE_EXTENSIONS
9504 VERIFY(m
== NULL
|| pd
.mp
== NULL
|| pd
.mp
== m
);
9508 REASON_SET(&reason
, PFRES_MEMORY
);
9512 if (action
== PF_DROP
) {
9521 if (action
== PF_SYNPROXY_DROP
) {
9526 if (action
== PF_PASS
) {
9528 h
= mtod(m
, struct ip6_hdr
*);
9531 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9532 pf_route6(m0
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
9535 if (action
!= PF_SYNPROXY_DROP
&& r
->rt
)
9536 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9537 pf_route6(m0
, r
, dir
, kif
->pfik_ifp
, s
, &pd
);
9539 if (action
== PF_PASS
) {
9541 h
= mtod(m
, struct ip6_hdr
*);
9544 if (action
== PF_SYNPROXY_DROP
) {
9557 pf_check_congestion(struct ifqueue
*ifq
)
9564 pool_init(struct pool
*pp
, size_t size
, unsigned int align
, unsigned int ioff
,
9565 int flags
, const char *wchan
, void *palloc
)
9567 #pragma unused(align, ioff, flags, palloc)
9568 bzero(pp
, sizeof (*pp
));
9569 pp
->pool_zone
= zinit(size
, 1024 * size
, PAGE_SIZE
, wchan
);
9570 if (pp
->pool_zone
!= NULL
) {
9571 zone_change(pp
->pool_zone
, Z_EXPAND
, TRUE
);
9572 zone_change(pp
->pool_zone
, Z_CALLERACCT
, FALSE
);
9573 pp
->pool_hiwat
= pp
->pool_limit
= (unsigned int)-1;
9574 pp
->pool_name
= wchan
;
9578 /* Zones cannot be currently destroyed */
9580 pool_destroy(struct pool
*pp
)
9586 pool_sethiwat(struct pool
*pp
, int n
)
9588 pp
->pool_hiwat
= n
; /* Currently unused */
9592 pool_sethardlimit(struct pool
*pp
, int n
, const char *warnmess
, int ratecap
)
9594 #pragma unused(warnmess, ratecap)
9599 pool_get(struct pool
*pp
, int flags
)
9603 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
9605 if (pp
->pool_count
> pp
->pool_limit
) {
9606 DPFPRINTF(PF_DEBUG_NOISY
,
9607 ("pf: pool %s hard limit reached (%d)\n",
9608 pp
->pool_name
!= NULL
? pp
->pool_name
: "unknown",
9614 buf
= zalloc_canblock(pp
->pool_zone
, (flags
& (PR_NOWAIT
| PR_WAITOK
)));
9617 VERIFY(pp
->pool_count
!= 0);
9623 pool_put(struct pool
*pp
, void *v
)
9625 lck_mtx_assert(pf_lock
, LCK_MTX_ASSERT_OWNED
);
9627 zfree(pp
->pool_zone
, v
);
9628 VERIFY(pp
->pool_count
!= 0);
9633 pf_find_mtag(struct mbuf
*m
)
9638 if ((mtag
= m_tag_locate(m
, KERNEL_MODULE_TAG_ID
,
9639 KERNEL_TAG_TYPE_PF
, NULL
)) == NULL
)
9642 return ((struct pf_mtag
*)(mtag
+ 1));
9644 if (!(m
->m_flags
& M_PKTHDR
))
9647 return (&m
->m_pkthdr
.pf_mtag
);
9648 #endif /* PF_PKTHDR */
9652 pf_get_mtag(struct mbuf
*m
)
9657 if ((mtag
= m_tag_locate(m
, KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_PF
,
9659 mtag
= m_tag_create(KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_PF
,
9660 sizeof (struct pf_mtag
), M_NOWAIT
, m
);
9663 bzero(mtag
+ 1, sizeof (struct pf_mtag
));
9664 m_tag_prepend(m
, mtag
);
9666 return ((struct pf_mtag
*)(mtag
+ 1));
9668 return (pf_find_mtag(m
));
9669 #endif /* PF_PKTHDR */
9673 pf_time_second(void)
9682 pf_calendar_time_second(void)
9691 hook_establish(struct hook_desc_head
*head
, int tail
, hook_fn_t fn
, void *arg
)
9693 struct hook_desc
*hd
;
9695 hd
= _MALLOC(sizeof(*hd
), M_DEVBUF
, M_WAITOK
);
9702 TAILQ_INSERT_TAIL(head
, hd
, hd_list
);
9704 TAILQ_INSERT_HEAD(head
, hd
, hd_list
);
9710 hook_runloop(struct hook_desc_head
*head
, int flags
)
9712 struct hook_desc
*hd
;
9714 if (!(flags
& HOOK_REMOVE
)) {
9715 if (!(flags
& HOOK_ABORT
))
9716 TAILQ_FOREACH(hd
, head
, hd_list
)
9717 hd
->hd_fn(hd
->hd_arg
);
9719 while (!!(hd
= TAILQ_FIRST(head
))) {
9720 TAILQ_REMOVE(head
, hd
, hd_list
);
9721 if (!(flags
& HOOK_ABORT
))
9722 hd
->hd_fn(hd
->hd_arg
);
9723 if (flags
& HOOK_FREE
)
9724 _FREE(hd
, M_DEVBUF
);