]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/pf.c
xnu-2422.115.4.tar.gz
[apple/xnu.git] / bsd / net / pf.c
CommitLineData
39236c6e
A
1/*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
b0d623f7
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
d1ecb069 29/* $apfw: git commit 6602420f2f101b74305cd78f7cd9e0c8fdedae97 $ */
b0d623f7
A
30/* $OpenBSD: pf.c,v 1.567 2008/02/20 23:40:13 henning Exp $ */
31
32/*
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002,2003 Henning Brauer
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * - Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * - Redistributions in binary form must reproduce the above
44 * copyright notice, this list of conditions and the following
45 * disclaimer in the documentation and/or other materials provided
46 * with the distribution.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
51 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
52 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
54 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
55 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
56 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
58 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
59 * POSSIBILITY OF SUCH DAMAGE.
60 *
61 * Effort sponsored in part by the Defense Advanced Research Projects
62 * Agency (DARPA) and Air Force Research Laboratory, Air Force
63 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
64 *
65 */
66
67#include <machine/endian.h>
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/mbuf.h>
71#include <sys/filio.h>
72#include <sys/socket.h>
73#include <sys/socketvar.h>
74#include <sys/kernel.h>
75#include <sys/time.h>
76#include <sys/proc.h>
77#include <sys/random.h>
78#include <sys/mcache.h>
39236c6e 79#include <sys/protosw.h>
b0d623f7
A
80
81#include <libkern/crypto/md5.h>
82#include <libkern/libkern.h>
83
84#include <mach/thread_act.h>
85
86#include <net/if.h>
87#include <net/if_types.h>
88#include <net/bpf.h>
89#include <net/route.h>
39236c6e 90#include <net/dlil.h>
b0d623f7
A
91
92#include <netinet/in.h>
93#include <netinet/in_var.h>
94#include <netinet/in_systm.h>
95#include <netinet/ip.h>
96#include <netinet/ip_var.h>
97#include <netinet/tcp.h>
98#include <netinet/tcp_seq.h>
99#include <netinet/udp.h>
100#include <netinet/ip_icmp.h>
101#include <netinet/in_pcb.h>
102#include <netinet/tcp_timer.h>
103#include <netinet/tcp_var.h>
104#include <netinet/tcp_fsm.h>
105#include <netinet/udp_var.h>
106#include <netinet/icmp_var.h>
107#include <net/if_ether.h>
108#include <net/ethernet.h>
316670eb 109#include <net/flowhash.h>
b0d623f7
A
110#include <net/pfvar.h>
111#include <net/if_pflog.h>
112
113#if NPFSYNC
114#include <net/if_pfsync.h>
115#endif /* NPFSYNC */
116
117#if INET6
118#include <netinet/ip6.h>
119#include <netinet6/in6_pcb.h>
120#include <netinet6/ip6_var.h>
121#include <netinet/icmp6.h>
122#include <netinet6/nd6.h>
123#endif /* INET6 */
124
316670eb
A
125#if DUMMYNET
126#include <netinet/ip_dummynet.h>
127#endif /* DUMMYNET */
b0d623f7 128
39236c6e
A
129/*
130 * For RandomULong(), to get a 32 bits random value
131 * Note that random() returns a 31 bits value, see rdar://11159750
132 */
133#include <dev/random/randomdev.h>
134
316670eb 135#define DPFPRINTF(n, x) (pf_status.debug >= (n) ? printf x : ((void)0))
b0d623f7
A
136
137/*
138 * On Mac OS X, the rtableid value is treated as the interface scope
139 * value that is equivalent to the interface index used for scoped
140 * routing. A valid scope value is anything but IFSCOPE_NONE (0),
141 * as per definition of ifindex which is a positive, non-zero number.
142 * The other BSDs treat a negative rtableid value as invalid, hence
143 * the test against INT_MAX to handle userland apps which initialize
144 * the field with a negative number.
145 */
146#define PF_RTABLEID_IS_VALID(r) \
147 ((r) > IFSCOPE_NONE && (r) <= INT_MAX)
148
149/*
150 * Global variables
151 */
316670eb
A
152decl_lck_mtx_data(,pf_lock_data);
153decl_lck_rw_data(,pf_perim_lock_data);
154lck_mtx_t *pf_lock = &pf_lock_data;
155lck_rw_t *pf_perim_lock = &pf_perim_lock_data;
b0d623f7
A
156
157/* state tables */
158struct pf_state_tree_lan_ext pf_statetbl_lan_ext;
159struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy;
160
161struct pf_palist pf_pabuf;
162struct pf_status pf_status;
163
316670eb 164#if PF_ALTQ
b0d623f7
A
165struct pf_altqqueue pf_altqs[2];
166struct pf_altqqueue *pf_altqs_active;
167struct pf_altqqueue *pf_altqs_inactive;
168u_int32_t ticket_altqs_active;
169u_int32_t ticket_altqs_inactive;
170int altqs_inactive_open;
316670eb 171#endif /* PF_ALTQ */
b0d623f7
A
172u_int32_t ticket_pabuf;
173
174static MD5_CTX pf_tcp_secret_ctx;
175static u_char pf_tcp_secret[16];
176static int pf_tcp_secret_init;
177static int pf_tcp_iss_off;
178
179static struct pf_anchor_stackframe {
180 struct pf_ruleset *rs;
181 struct pf_rule *r;
182 struct pf_anchor_node *parent;
183 struct pf_anchor *child;
184} pf_anchor_stack[64];
185
186struct pool pf_src_tree_pl, pf_rule_pl, pf_pooladdr_pl;
187struct pool pf_state_pl, pf_state_key_pl;
316670eb 188#if PF_ALTQ
b0d623f7 189struct pool pf_altq_pl;
316670eb 190#endif /* PF_ALTQ */
b0d623f7 191
b0d623f7
A
192typedef void (*hook_fn_t)(void *);
193
194struct hook_desc {
195 TAILQ_ENTRY(hook_desc) hd_list;
196 hook_fn_t hd_fn;
197 void *hd_arg;
198};
199
200#define HOOK_REMOVE 0x01
201#define HOOK_FREE 0x02
202#define HOOK_ABORT 0x04
203
204static void *hook_establish(struct hook_desc_head *, int,
205 hook_fn_t, void *);
206static void hook_runloop(struct hook_desc_head *, int flags);
207
208struct pool pf_app_state_pl;
209static void pf_print_addr(struct pf_addr *addr, sa_family_t af);
210static void pf_print_sk_host(struct pf_state_host *, u_int8_t, int,
211 u_int8_t);
b0d623f7
A
212
213static void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t);
214
215static void pf_init_threshold(struct pf_threshold *, u_int32_t,
216 u_int32_t);
217static void pf_add_threshold(struct pf_threshold *);
218static int pf_check_threshold(struct pf_threshold *);
219
220static void pf_change_ap(int, struct mbuf *, struct pf_addr *,
221 u_int16_t *, u_int16_t *, u_int16_t *,
222 struct pf_addr *, u_int16_t, u_int8_t, sa_family_t);
223static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
224 struct tcphdr *, struct pf_state_peer *);
225#if INET6
226static void pf_change_a6(struct pf_addr *, u_int16_t *,
227 struct pf_addr *, u_int8_t);
228#endif /* INET6 */
229static void pf_change_icmp(struct pf_addr *, u_int16_t *,
230 struct pf_addr *, struct pf_addr *, u_int16_t,
231 u_int16_t *, u_int16_t *, u_int16_t *,
232 u_int16_t *, u_int8_t, sa_family_t);
233static void pf_send_tcp(const struct pf_rule *, sa_family_t,
234 const struct pf_addr *, const struct pf_addr *,
235 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
236 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
237 u_int16_t, struct ether_header *, struct ifnet *);
238static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
239 sa_family_t, struct pf_rule *);
b0d623f7
A
240static struct pf_rule *pf_match_translation(struct pf_pdesc *, struct mbuf *,
241 int, int, struct pfi_kif *, struct pf_addr *,
242 union pf_state_xport *, struct pf_addr *,
243 union pf_state_xport *, int);
244static struct pf_rule *pf_get_translation_aux(struct pf_pdesc *,
245 struct mbuf *, int, int, struct pfi_kif *,
246 struct pf_src_node **, struct pf_addr *,
247 union pf_state_xport *, struct pf_addr *,
248 union pf_state_xport *, struct pf_addr *,
249 union pf_state_xport *);
b0d623f7
A
250static void pf_attach_state(struct pf_state_key *,
251 struct pf_state *, int);
252static void pf_detach_state(struct pf_state *, int);
253static u_int32_t pf_tcp_iss(struct pf_pdesc *);
254static int pf_test_rule(struct pf_rule **, struct pf_state **,
255 int, struct pfi_kif *, struct mbuf *, int,
256 void *, struct pf_pdesc *, struct pf_rule **,
257 struct pf_ruleset **, struct ifqueue *);
316670eb
A
258#if DUMMYNET
259static int pf_test_dummynet(struct pf_rule **, int,
260 struct pfi_kif *, struct mbuf **,
261 struct pf_pdesc *, struct ip_fw_args *);
262#endif /* DUMMYNET */
b0d623f7
A
263static int pf_test_fragment(struct pf_rule **, int,
264 struct pfi_kif *, struct mbuf *, void *,
265 struct pf_pdesc *, struct pf_rule **,
266 struct pf_ruleset **);
267static int pf_test_state_tcp(struct pf_state **, int,
268 struct pfi_kif *, struct mbuf *, int,
269 void *, struct pf_pdesc *, u_short *);
270static int pf_test_state_udp(struct pf_state **, int,
271 struct pfi_kif *, struct mbuf *, int,
b7266188 272 void *, struct pf_pdesc *, u_short *);
b0d623f7
A
273static int pf_test_state_icmp(struct pf_state **, int,
274 struct pfi_kif *, struct mbuf *, int,
275 void *, struct pf_pdesc *, u_short *);
276static int pf_test_state_other(struct pf_state **, int,
277 struct pfi_kif *, struct pf_pdesc *);
278static int pf_match_tag(struct mbuf *, struct pf_rule *,
279 struct pf_mtag *, int *);
b0d623f7
A
280static void pf_hash(struct pf_addr *, struct pf_addr *,
281 struct pf_poolhashkey *, sa_family_t);
282static int pf_map_addr(u_int8_t, struct pf_rule *,
283 struct pf_addr *, struct pf_addr *,
284 struct pf_addr *, struct pf_src_node **);
b0d623f7
A
285static int pf_get_sport(struct pf_pdesc *, struct pfi_kif *,
286 struct pf_rule *, struct pf_addr *,
287 union pf_state_xport *, struct pf_addr *,
288 union pf_state_xport *, struct pf_addr *,
289 union pf_state_xport *, struct pf_src_node **);
b0d623f7
A
290static void pf_route(struct mbuf **, struct pf_rule *, int,
291 struct ifnet *, struct pf_state *,
292 struct pf_pdesc *);
293#if INET6
294static void pf_route6(struct mbuf **, struct pf_rule *, int,
295 struct ifnet *, struct pf_state *,
296 struct pf_pdesc *);
297#endif /* INET6 */
298static u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t,
299 sa_family_t);
300static u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t,
301 sa_family_t);
302static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
303 u_int16_t);
304static void pf_set_rt_ifp(struct pf_state *,
305 struct pf_addr *);
306static int pf_check_proto_cksum(struct mbuf *, int, int,
307 u_int8_t, sa_family_t);
308static int pf_addr_wrap_neq(struct pf_addr_wrap *,
309 struct pf_addr_wrap *);
310static struct pf_state *pf_find_state(struct pfi_kif *,
311 struct pf_state_key_cmp *, u_int);
312static int pf_src_connlimit(struct pf_state **);
313static void pf_stateins_err(const char *, struct pf_state *,
314 struct pfi_kif *);
315static int pf_check_congestion(struct ifqueue *);
316
b0d623f7
A
317#if 0
318static const char *pf_pptp_ctrl_type_name(u_int16_t code);
319#endif
320static void pf_pptp_handler(struct pf_state *, int, int,
321 struct pf_pdesc *, struct pfi_kif *);
322static void pf_pptp_unlink(struct pf_state *);
d1ecb069 323static void pf_grev1_unlink(struct pf_state *);
b0d623f7
A
324static int pf_test_state_grev1(struct pf_state **, int,
325 struct pfi_kif *, int, struct pf_pdesc *);
326static int pf_ike_compare(struct pf_app_state *,
327 struct pf_app_state *);
328static int pf_test_state_esp(struct pf_state **, int,
329 struct pfi_kif *, int, struct pf_pdesc *);
b0d623f7
A
330
331extern struct pool pfr_ktable_pl;
332extern struct pool pfr_kentry_pl;
333extern int path_mtu_discovery;
334
335struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = {
336 { &pf_state_pl, PFSTATE_HIWAT },
337 { &pf_app_state_pl, PFAPPSTATE_HIWAT },
338 { &pf_src_tree_pl, PFSNODE_HIWAT },
339 { &pf_frent_pl, PFFRAG_FRENT_HIWAT },
340 { &pfr_ktable_pl, PFR_KTABLE_HIWAT },
316670eb 341 { &pfr_kentry_pl, PFR_KENTRY_HIWAT },
b0d623f7
A
342};
343
b0d623f7
A
344struct mbuf *
345pf_lazy_makewritable(struct pf_pdesc *pd, struct mbuf *m, int len)
346{
347 if (pd->lmw < 0)
348 return (0);
349
350 VERIFY(m == pd->mp);
351
352 if (len > pd->lmw) {
353 if (m_makewritable(&m, 0, len, M_DONTWAIT))
354 len = -1;
355 pd->lmw = len;
356 if (len >= 0 && m != pd->mp) {
357 pd->mp = m;
6d2010ae 358 pd->pf_mtag = pf_find_mtag(m);
b0d623f7
A
359
360 switch (pd->af) {
361 case AF_INET: {
362 struct ip *h = mtod(m, struct ip *);
363 pd->src = (struct pf_addr *)&h->ip_src;
364 pd->dst = (struct pf_addr *)&h->ip_dst;
365 pd->ip_sum = &h->ip_sum;
366 break;
367 }
368#if INET6
369 case AF_INET6: {
370 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
371 pd->src = (struct pf_addr *)&h->ip6_src;
372 pd->dst = (struct pf_addr *)&h->ip6_dst;
373 break;
374 }
375#endif /* INET6 */
376 }
377 }
378 }
379
380 return (len < 0 ? 0 : m);
381}
382
383static const int *
384pf_state_lookup_aux(struct pf_state **state, struct pfi_kif *kif,
385 int direction, int *action)
386{
387 if (*state == NULL || (*state)->timeout == PFTM_PURGE) {
388 *action = PF_DROP;
389 return (action);
390 }
391
392 if (direction == PF_OUT &&
393 (((*state)->rule.ptr->rt == PF_ROUTETO &&
394 (*state)->rule.ptr->direction == PF_OUT) ||
395 ((*state)->rule.ptr->rt == PF_REPLYTO &&
396 (*state)->rule.ptr->direction == PF_IN)) &&
397 (*state)->rt_kif != NULL && (*state)->rt_kif != kif) {
398 *action = PF_PASS;
399 return (action);
400 }
401
402 return (0);
403}
404
405#define STATE_LOOKUP() \
406 do { \
407 int action; \
408 *state = pf_find_state(kif, &key, direction); \
39236c6e
A
409 if (*state != NULL && pd != NULL && \
410 !(pd->pktflags & PKTF_FLOW_ID)) { \
411 pd->flowsrc = (*state)->state_key->flowsrc; \
316670eb 412 pd->flowhash = (*state)->state_key->flowhash; \
39236c6e
A
413 if (pd->flowhash != 0) { \
414 pd->pktflags |= PKTF_FLOW_ID; \
415 pd->pktflags &= ~PKTF_FLOW_ADV; \
416 } \
316670eb 417 } \
b0d623f7
A
418 if (pf_state_lookup_aux(state, kif, direction, &action)) \
419 return (action); \
420 } while (0)
421
422#define STATE_ADDR_TRANSLATE(sk) \
423 (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \
424 ((sk)->af == AF_INET6 && \
425 ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \
426 (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \
427 (sk)->lan.addr.addr32[3] != (sk)->gwy.addr.addr32[3]))
428
429#define STATE_TRANSLATE(sk) \
430 (STATE_ADDR_TRANSLATE(sk) || \
431 (sk)->lan.xport.port != (sk)->gwy.xport.port)
432
433#define STATE_GRE_TRANSLATE(sk) \
434 (STATE_ADDR_TRANSLATE(sk) || \
435 (sk)->lan.xport.call_id != (sk)->gwy.xport.call_id)
436
b0d623f7
A
437#define BOUND_IFACE(r, k) \
438 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
439
b7266188
A
440#define STATE_INC_COUNTERS(s) \
441 do { \
442 s->rule.ptr->states++; \
443 VERIFY(s->rule.ptr->states != 0); \
444 if (s->anchor.ptr != NULL) { \
445 s->anchor.ptr->states++; \
446 VERIFY(s->anchor.ptr->states != 0); \
447 } \
448 if (s->nat_rule.ptr != NULL) { \
449 s->nat_rule.ptr->states++; \
450 VERIFY(s->nat_rule.ptr->states != 0); \
451 } \
b0d623f7
A
452 } while (0)
453
b7266188
A
454#define STATE_DEC_COUNTERS(s) \
455 do { \
456 if (s->nat_rule.ptr != NULL) { \
457 VERIFY(s->nat_rule.ptr->states > 0); \
458 s->nat_rule.ptr->states--; \
459 } \
460 if (s->anchor.ptr != NULL) { \
461 VERIFY(s->anchor.ptr->states > 0); \
462 s->anchor.ptr->states--; \
463 } \
464 VERIFY(s->rule.ptr->states > 0); \
465 s->rule.ptr->states--; \
b0d623f7
A
466 } while (0)
467
468static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
469static __inline int pf_state_compare_lan_ext(struct pf_state_key *,
470 struct pf_state_key *);
471static __inline int pf_state_compare_ext_gwy(struct pf_state_key *,
472 struct pf_state_key *);
473static __inline int pf_state_compare_id(struct pf_state *,
474 struct pf_state *);
475
476struct pf_src_tree tree_src_tracking;
477
478struct pf_state_tree_id tree_id;
479struct pf_state_queue state_list;
480
481RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare);
482RB_GENERATE(pf_state_tree_lan_ext, pf_state_key,
483 entry_lan_ext, pf_state_compare_lan_ext);
484RB_GENERATE(pf_state_tree_ext_gwy, pf_state_key,
485 entry_ext_gwy, pf_state_compare_ext_gwy);
486RB_GENERATE(pf_state_tree_id, pf_state,
487 entry_id, pf_state_compare_id);
488
489#define PF_DT_SKIP_LANEXT 0x01
490#define PF_DT_SKIP_EXTGWY 0x02
491
b7266188
A
492static const u_int16_t PF_PPTP_PORT = 1723;
493static const u_int32_t PF_PPTP_MAGIC_NUMBER = 0x1A2B3C4D;
b0d623f7
A
494
495struct pf_pptp_hdr {
496 u_int16_t length;
497 u_int16_t type;
498 u_int32_t magic;
499};
500
501struct pf_pptp_ctrl_hdr {
502 u_int16_t type;
503 u_int16_t reserved_0;
504};
505
506struct pf_pptp_ctrl_generic {
507 u_int16_t data[0];
508};
509
510#define PF_PPTP_CTRL_TYPE_START_REQ 1
511struct pf_pptp_ctrl_start_req {
512 u_int16_t protocol_version;
513 u_int16_t reserved_1;
514 u_int32_t framing_capabilities;
515 u_int32_t bearer_capabilities;
516 u_int16_t maximum_channels;
517 u_int16_t firmware_revision;
518 u_int8_t host_name[64];
519 u_int8_t vendor_string[64];
520};
521
522#define PF_PPTP_CTRL_TYPE_START_RPY 2
523struct pf_pptp_ctrl_start_rpy {
524 u_int16_t protocol_version;
525 u_int8_t result_code;
526 u_int8_t error_code;
527 u_int32_t framing_capabilities;
528 u_int32_t bearer_capabilities;
529 u_int16_t maximum_channels;
530 u_int16_t firmware_revision;
531 u_int8_t host_name[64];
532 u_int8_t vendor_string[64];
533};
534
535#define PF_PPTP_CTRL_TYPE_STOP_REQ 3
536struct pf_pptp_ctrl_stop_req {
537 u_int8_t reason;
538 u_int8_t reserved_1;
539 u_int16_t reserved_2;
540};
541
542#define PF_PPTP_CTRL_TYPE_STOP_RPY 4
543struct pf_pptp_ctrl_stop_rpy {
544 u_int8_t reason;
545 u_int8_t error_code;
546 u_int16_t reserved_1;
547};
548
549#define PF_PPTP_CTRL_TYPE_ECHO_REQ 5
550struct pf_pptp_ctrl_echo_req {
551 u_int32_t identifier;
552};
553
554#define PF_PPTP_CTRL_TYPE_ECHO_RPY 6
555struct pf_pptp_ctrl_echo_rpy {
556 u_int32_t identifier;
557 u_int8_t result_code;
558 u_int8_t error_code;
559 u_int16_t reserved_1;
560};
561
562#define PF_PPTP_CTRL_TYPE_CALL_OUT_REQ 7
563struct pf_pptp_ctrl_call_out_req {
564 u_int16_t call_id;
565 u_int16_t call_sernum;
566 u_int32_t min_bps;
567 u_int32_t bearer_type;
568 u_int32_t framing_type;
569 u_int16_t rxwindow_size;
570 u_int16_t proc_delay;
571 u_int8_t phone_num[64];
572 u_int8_t sub_addr[64];
573};
574
575#define PF_PPTP_CTRL_TYPE_CALL_OUT_RPY 8
576struct pf_pptp_ctrl_call_out_rpy {
577 u_int16_t call_id;
578 u_int16_t peer_call_id;
579 u_int8_t result_code;
580 u_int8_t error_code;
581 u_int16_t cause_code;
582 u_int32_t connect_speed;
583 u_int16_t rxwindow_size;
584 u_int16_t proc_delay;
585 u_int32_t phy_channel_id;
586};
587
588#define PF_PPTP_CTRL_TYPE_CALL_IN_1ST 9
589struct pf_pptp_ctrl_call_in_1st {
590 u_int16_t call_id;
591 u_int16_t call_sernum;
592 u_int32_t bearer_type;
593 u_int32_t phy_channel_id;
594 u_int16_t dialed_number_len;
595 u_int16_t dialing_number_len;
596 u_int8_t dialed_num[64];
597 u_int8_t dialing_num[64];
598 u_int8_t sub_addr[64];
599};
600
601#define PF_PPTP_CTRL_TYPE_CALL_IN_2ND 10
602struct pf_pptp_ctrl_call_in_2nd {
603 u_int16_t call_id;
604 u_int16_t peer_call_id;
605 u_int8_t result_code;
606 u_int8_t error_code;
607 u_int16_t rxwindow_size;
608 u_int16_t txdelay;
609 u_int16_t reserved_1;
610};
611
612#define PF_PPTP_CTRL_TYPE_CALL_IN_3RD 11
613struct pf_pptp_ctrl_call_in_3rd {
614 u_int16_t call_id;
615 u_int16_t reserved_1;
616 u_int32_t connect_speed;
617 u_int16_t rxwindow_size;
618 u_int16_t txdelay;
619 u_int32_t framing_type;
620};
621
622#define PF_PPTP_CTRL_TYPE_CALL_CLR 12
623struct pf_pptp_ctrl_call_clr {
624 u_int16_t call_id;
625 u_int16_t reserved_1;
626};
627
628#define PF_PPTP_CTRL_TYPE_CALL_DISC 13
629struct pf_pptp_ctrl_call_disc {
630 u_int16_t call_id;
631 u_int8_t result_code;
632 u_int8_t error_code;
633 u_int16_t cause_code;
634 u_int16_t reserved_1;
635 u_int8_t statistics[128];
636};
637
638#define PF_PPTP_CTRL_TYPE_ERROR 14
639struct pf_pptp_ctrl_error {
640 u_int16_t peer_call_id;
641 u_int16_t reserved_1;
642 u_int32_t crc_errors;
643 u_int32_t fr_errors;
644 u_int32_t hw_errors;
645 u_int32_t buf_errors;
646 u_int32_t tim_errors;
647 u_int32_t align_errors;
648};
649
650#define PF_PPTP_CTRL_TYPE_SET_LINKINFO 15
651struct pf_pptp_ctrl_set_linkinfo {
652 u_int16_t peer_call_id;
653 u_int16_t reserved_1;
654 u_int32_t tx_accm;
655 u_int32_t rx_accm;
656};
657
658#if 0
659static const char *pf_pptp_ctrl_type_name(u_int16_t code)
660{
661 code = ntohs(code);
662
663 if (code < PF_PPTP_CTRL_TYPE_START_REQ ||
664 code > PF_PPTP_CTRL_TYPE_SET_LINKINFO) {
665 static char reserved[] = "reserved-00";
666
667 sprintf(&reserved[9], "%02x", code);
668 return (reserved);
669 } else {
670 static const char *name[] = {
671 "start_req", "start_rpy", "stop_req", "stop_rpy",
672 "echo_req", "echo_rpy", "call_out_req", "call_out_rpy",
673 "call_in_1st", "call_in_2nd", "call_in_3rd",
674 "call_clr", "call_disc", "error", "set_linkinfo"
675 };
676
677 return (name[code - 1]);
678 }
679};
680#endif
681
682static const size_t PF_PPTP_CTRL_MSG_MINSIZE =
683 sizeof (struct pf_pptp_hdr) +
684 sizeof (struct pf_pptp_ctrl_hdr) +
685 MIN(sizeof (struct pf_pptp_ctrl_start_req),
686 MIN(sizeof (struct pf_pptp_ctrl_start_rpy),
687 MIN(sizeof (struct pf_pptp_ctrl_stop_req),
688 MIN(sizeof (struct pf_pptp_ctrl_stop_rpy),
689 MIN(sizeof (struct pf_pptp_ctrl_echo_req),
690 MIN(sizeof (struct pf_pptp_ctrl_echo_rpy),
691 MIN(sizeof (struct pf_pptp_ctrl_call_out_req),
692 MIN(sizeof (struct pf_pptp_ctrl_call_out_rpy),
693 MIN(sizeof (struct pf_pptp_ctrl_call_in_1st),
694 MIN(sizeof (struct pf_pptp_ctrl_call_in_2nd),
695 MIN(sizeof (struct pf_pptp_ctrl_call_in_3rd),
696 MIN(sizeof (struct pf_pptp_ctrl_call_clr),
697 MIN(sizeof (struct pf_pptp_ctrl_call_disc),
698 MIN(sizeof (struct pf_pptp_ctrl_error),
699 sizeof (struct pf_pptp_ctrl_set_linkinfo)
700 ))))))))))))));
701
702union pf_pptp_ctrl_msg_union {
703 struct pf_pptp_ctrl_start_req start_req;
704 struct pf_pptp_ctrl_start_rpy start_rpy;
705 struct pf_pptp_ctrl_stop_req stop_req;
706 struct pf_pptp_ctrl_stop_rpy stop_rpy;
707 struct pf_pptp_ctrl_echo_req echo_req;
708 struct pf_pptp_ctrl_echo_rpy echo_rpy;
709 struct pf_pptp_ctrl_call_out_req call_out_req;
710 struct pf_pptp_ctrl_call_out_rpy call_out_rpy;
711 struct pf_pptp_ctrl_call_in_1st call_in_1st;
712 struct pf_pptp_ctrl_call_in_2nd call_in_2nd;
713 struct pf_pptp_ctrl_call_in_3rd call_in_3rd;
714 struct pf_pptp_ctrl_call_clr call_clr;
715 struct pf_pptp_ctrl_call_disc call_disc;
716 struct pf_pptp_ctrl_error error;
717 struct pf_pptp_ctrl_set_linkinfo set_linkinfo;
718 u_int8_t data[0];
719};
720
721struct pf_pptp_ctrl_msg {
722 struct pf_pptp_hdr hdr;
723 struct pf_pptp_ctrl_hdr ctrl;
724 union pf_pptp_ctrl_msg_union msg;
725};
726
727#define PF_GRE_FLAG_CHECKSUM_PRESENT 0x8000
728#define PF_GRE_FLAG_VERSION_MASK 0x0007
729#define PF_GRE_PPP_ETHERTYPE 0x880B
730
731struct pf_grev1_hdr {
732 u_int16_t flags;
733 u_int16_t protocol_type;
734 u_int16_t payload_length;
735 u_int16_t call_id;
736 /*
737 u_int32_t seqno;
738 u_int32_t ackno;
739 */
740};
741
b7266188 742static const u_int16_t PF_IKE_PORT = 500;
b0d623f7
A
743
744struct pf_ike_hdr {
745 u_int64_t initiator_cookie, responder_cookie;
746 u_int8_t next_payload, version, exchange_type, flags;
747 u_int32_t message_id, length;
748};
749
750#define PF_IKE_PACKET_MINSIZE (sizeof (struct pf_ike_hdr))
751
752#define PF_IKEv1_EXCHTYPE_BASE 1
753#define PF_IKEv1_EXCHTYPE_ID_PROTECT 2
754#define PF_IKEv1_EXCHTYPE_AUTH_ONLY 3
755#define PF_IKEv1_EXCHTYPE_AGGRESSIVE 4
756#define PF_IKEv1_EXCHTYPE_INFORMATIONAL 5
757#define PF_IKEv2_EXCHTYPE_SA_INIT 34
758#define PF_IKEv2_EXCHTYPE_AUTH 35
759#define PF_IKEv2_EXCHTYPE_CREATE_CHILD_SA 36
760#define PF_IKEv2_EXCHTYPE_INFORMATIONAL 37
761
762#define PF_IKEv1_FLAG_E 0x01
763#define PF_IKEv1_FLAG_C 0x02
764#define PF_IKEv1_FLAG_A 0x04
765#define PF_IKEv2_FLAG_I 0x08
766#define PF_IKEv2_FLAG_V 0x10
767#define PF_IKEv2_FLAG_R 0x20
768
769struct pf_esp_hdr {
770 u_int32_t spi;
771 u_int32_t seqno;
772 u_int8_t payload[];
773};
b0d623f7
A
774
775static __inline int
776pf_src_compare(struct pf_src_node *a, struct pf_src_node *b)
777{
778 int diff;
779
780 if (a->rule.ptr > b->rule.ptr)
781 return (1);
782 if (a->rule.ptr < b->rule.ptr)
783 return (-1);
784 if ((diff = a->af - b->af) != 0)
785 return (diff);
786 switch (a->af) {
787#if INET
788 case AF_INET:
789 if (a->addr.addr32[0] > b->addr.addr32[0])
790 return (1);
791 if (a->addr.addr32[0] < b->addr.addr32[0])
792 return (-1);
793 break;
794#endif /* INET */
795#if INET6
796 case AF_INET6:
797 if (a->addr.addr32[3] > b->addr.addr32[3])
798 return (1);
799 if (a->addr.addr32[3] < b->addr.addr32[3])
800 return (-1);
801 if (a->addr.addr32[2] > b->addr.addr32[2])
802 return (1);
803 if (a->addr.addr32[2] < b->addr.addr32[2])
804 return (-1);
805 if (a->addr.addr32[1] > b->addr.addr32[1])
806 return (1);
807 if (a->addr.addr32[1] < b->addr.addr32[1])
808 return (-1);
809 if (a->addr.addr32[0] > b->addr.addr32[0])
810 return (1);
811 if (a->addr.addr32[0] < b->addr.addr32[0])
812 return (-1);
813 break;
814#endif /* INET6 */
815 }
816 return (0);
817}
818
819static __inline int
820pf_state_compare_lan_ext(struct pf_state_key *a, struct pf_state_key *b)
821{
822 int diff;
316670eb 823 int extfilter;
b0d623f7
A
824
825 if ((diff = a->proto - b->proto) != 0)
826 return (diff);
827 if ((diff = a->af - b->af) != 0)
828 return (diff);
829
b0d623f7
A
830 extfilter = PF_EXTFILTER_APD;
831
832 switch (a->proto) {
833 case IPPROTO_ICMP:
834 case IPPROTO_ICMPV6:
835 if ((diff = a->lan.xport.port - b->lan.xport.port) != 0)
836 return (diff);
837 break;
838
839 case IPPROTO_TCP:
840 if ((diff = a->lan.xport.port - b->lan.xport.port) != 0)
841 return (diff);
842 if ((diff = a->ext.xport.port - b->ext.xport.port) != 0)
843 return (diff);
844 break;
845
846 case IPPROTO_UDP:
847 if ((diff = a->proto_variant - b->proto_variant))
848 return (diff);
849 extfilter = a->proto_variant;
850 if ((diff = a->lan.xport.port - b->lan.xport.port) != 0)
851 return (diff);
852 if ((extfilter < PF_EXTFILTER_AD) &&
853 (diff = a->ext.xport.port - b->ext.xport.port) != 0)
854 return (diff);
855 break;
856
857 case IPPROTO_GRE:
858 if (a->proto_variant == PF_GRE_PPTP_VARIANT &&
859 a->proto_variant == b->proto_variant) {
860 if (!!(diff = a->ext.xport.call_id -
861 b->ext.xport.call_id))
862 return (diff);
863 }
864 break;
865
866 case IPPROTO_ESP:
867 if (!!(diff = a->ext.xport.spi - b->ext.xport.spi))
868 return (diff);
869 break;
870
871 default:
872 break;
873 }
b0d623f7
A
874
875 switch (a->af) {
876#if INET
877 case AF_INET:
878 if (a->lan.addr.addr32[0] > b->lan.addr.addr32[0])
879 return (1);
880 if (a->lan.addr.addr32[0] < b->lan.addr.addr32[0])
881 return (-1);
b0d623f7
A
882 if (extfilter < PF_EXTFILTER_EI) {
883 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
884 return (1);
885 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
886 return (-1);
887 }
b0d623f7
A
888 break;
889#endif /* INET */
890#if INET6
891 case AF_INET6:
b0d623f7
A
892 if (a->lan.addr.addr32[3] > b->lan.addr.addr32[3])
893 return (1);
894 if (a->lan.addr.addr32[3] < b->lan.addr.addr32[3])
895 return (-1);
896 if (a->lan.addr.addr32[2] > b->lan.addr.addr32[2])
897 return (1);
898 if (a->lan.addr.addr32[2] < b->lan.addr.addr32[2])
899 return (-1);
900 if (a->lan.addr.addr32[1] > b->lan.addr.addr32[1])
901 return (1);
902 if (a->lan.addr.addr32[1] < b->lan.addr.addr32[1])
903 return (-1);
904 if (a->lan.addr.addr32[0] > b->lan.addr.addr32[0])
905 return (1);
906 if (a->lan.addr.addr32[0] < b->lan.addr.addr32[0])
907 return (-1);
908 if (extfilter < PF_EXTFILTER_EI ||
909 !PF_AZERO(&b->ext.addr, AF_INET6)) {
910 if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
911 return (1);
912 if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
913 return (-1);
914 if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
915 return (1);
916 if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
917 return (-1);
918 if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
919 return (1);
920 if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
921 return (-1);
922 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
923 return (1);
924 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
925 return (-1);
926 }
b0d623f7
A
927 break;
928#endif /* INET6 */
929 }
930
b0d623f7
A
931 if (a->app_state && b->app_state) {
932 if (a->app_state->compare_lan_ext &&
933 b->app_state->compare_lan_ext) {
934 diff = (const char *)b->app_state->compare_lan_ext -
935 (const char *)a->app_state->compare_lan_ext;
936 if (diff != 0)
937 return (diff);
938 diff = a->app_state->compare_lan_ext(a->app_state,
939 b->app_state);
940 if (diff != 0)
941 return (diff);
942 }
943 }
b0d623f7
A
944
945 return (0);
946}
947
948static __inline int
949pf_state_compare_ext_gwy(struct pf_state_key *a, struct pf_state_key *b)
950{
951 int diff;
316670eb 952 int extfilter;
b0d623f7
A
953
954 if ((diff = a->proto - b->proto) != 0)
955 return (diff);
956
957 if ((diff = a->af - b->af) != 0)
958 return (diff);
959
b0d623f7
A
960 extfilter = PF_EXTFILTER_APD;
961
962 switch (a->proto) {
963 case IPPROTO_ICMP:
964 case IPPROTO_ICMPV6:
965 if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0)
966 return (diff);
967 break;
968
969 case IPPROTO_TCP:
970 if ((diff = a->ext.xport.port - b->ext.xport.port) != 0)
971 return (diff);
972 if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0)
973 return (diff);
974 break;
975
976 case IPPROTO_UDP:
977 if ((diff = a->proto_variant - b->proto_variant))
978 return (diff);
979 extfilter = a->proto_variant;
980 if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0)
981 return (diff);
982 if ((extfilter < PF_EXTFILTER_AD) &&
983 (diff = a->ext.xport.port - b->ext.xport.port) != 0)
984 return (diff);
985 break;
986
987 case IPPROTO_GRE:
988 if (a->proto_variant == PF_GRE_PPTP_VARIANT &&
989 a->proto_variant == b->proto_variant) {
990 if (!!(diff = a->gwy.xport.call_id -
991 b->gwy.xport.call_id))
992 return (diff);
993 }
994 break;
995
996 case IPPROTO_ESP:
997 if (!!(diff = a->gwy.xport.spi - b->gwy.xport.spi))
998 return (diff);
999 break;
1000
1001 default:
1002 break;
1003 }
b0d623f7
A
1004
1005 switch (a->af) {
1006#if INET
1007 case AF_INET:
b0d623f7
A
1008 if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
1009 return (1);
1010 if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
1011 return (-1);
1012 if (extfilter < PF_EXTFILTER_EI) {
1013 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
1014 return (1);
1015 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
1016 return (-1);
1017 }
b0d623f7
A
1018 break;
1019#endif /* INET */
1020#if INET6
1021 case AF_INET6:
b0d623f7
A
1022 if (a->gwy.addr.addr32[3] > b->gwy.addr.addr32[3])
1023 return (1);
1024 if (a->gwy.addr.addr32[3] < b->gwy.addr.addr32[3])
1025 return (-1);
1026 if (a->gwy.addr.addr32[2] > b->gwy.addr.addr32[2])
1027 return (1);
1028 if (a->gwy.addr.addr32[2] < b->gwy.addr.addr32[2])
1029 return (-1);
1030 if (a->gwy.addr.addr32[1] > b->gwy.addr.addr32[1])
1031 return (1);
1032 if (a->gwy.addr.addr32[1] < b->gwy.addr.addr32[1])
1033 return (-1);
1034 if (a->gwy.addr.addr32[0] > b->gwy.addr.addr32[0])
1035 return (1);
1036 if (a->gwy.addr.addr32[0] < b->gwy.addr.addr32[0])
1037 return (-1);
1038 if (extfilter < PF_EXTFILTER_EI ||
1039 !PF_AZERO(&b->ext.addr, AF_INET6)) {
1040 if (a->ext.addr.addr32[3] > b->ext.addr.addr32[3])
1041 return (1);
1042 if (a->ext.addr.addr32[3] < b->ext.addr.addr32[3])
1043 return (-1);
1044 if (a->ext.addr.addr32[2] > b->ext.addr.addr32[2])
1045 return (1);
1046 if (a->ext.addr.addr32[2] < b->ext.addr.addr32[2])
1047 return (-1);
1048 if (a->ext.addr.addr32[1] > b->ext.addr.addr32[1])
1049 return (1);
1050 if (a->ext.addr.addr32[1] < b->ext.addr.addr32[1])
1051 return (-1);
1052 if (a->ext.addr.addr32[0] > b->ext.addr.addr32[0])
1053 return (1);
1054 if (a->ext.addr.addr32[0] < b->ext.addr.addr32[0])
1055 return (-1);
1056 }
b0d623f7
A
1057 break;
1058#endif /* INET6 */
1059 }
1060
b0d623f7
A
1061 if (a->app_state && b->app_state) {
1062 if (a->app_state->compare_ext_gwy &&
1063 b->app_state->compare_ext_gwy) {
1064 diff = (const char *)b->app_state->compare_ext_gwy -
1065 (const char *)a->app_state->compare_ext_gwy;
1066 if (diff != 0)
1067 return (diff);
1068 diff = a->app_state->compare_ext_gwy(a->app_state,
1069 b->app_state);
1070 if (diff != 0)
1071 return (diff);
1072 }
1073 }
b0d623f7
A
1074
1075 return (0);
1076}
1077
1078static __inline int
1079pf_state_compare_id(struct pf_state *a, struct pf_state *b)
1080{
1081 if (a->id > b->id)
1082 return (1);
1083 if (a->id < b->id)
1084 return (-1);
1085 if (a->creatorid > b->creatorid)
1086 return (1);
1087 if (a->creatorid < b->creatorid)
1088 return (-1);
1089
1090 return (0);
1091}
1092
1093#if INET6
1094void
1095pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
1096{
1097 switch (af) {
1098#if INET
1099 case AF_INET:
1100 dst->addr32[0] = src->addr32[0];
1101 break;
1102#endif /* INET */
1103 case AF_INET6:
1104 dst->addr32[0] = src->addr32[0];
1105 dst->addr32[1] = src->addr32[1];
1106 dst->addr32[2] = src->addr32[2];
1107 dst->addr32[3] = src->addr32[3];
1108 break;
1109 }
1110}
1111#endif /* INET6 */
1112
1113struct pf_state *
1114pf_find_state_byid(struct pf_state_cmp *key)
1115{
1116 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1117
316670eb
A
1118 return (RB_FIND(pf_state_tree_id, &tree_id,
1119 (struct pf_state *)(void *)key));
b0d623f7
A
1120}
1121
1122static struct pf_state *
1123pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
1124{
1125 struct pf_state_key *sk = NULL;
1126 struct pf_state *s;
1127
1128 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1129
1130 switch (dir) {
1131 case PF_OUT:
1132 sk = RB_FIND(pf_state_tree_lan_ext, &pf_statetbl_lan_ext,
1133 (struct pf_state_key *)key);
1134 break;
1135 case PF_IN:
1136 sk = RB_FIND(pf_state_tree_ext_gwy, &pf_statetbl_ext_gwy,
1137 (struct pf_state_key *)key);
1138 break;
1139 default:
1140 panic("pf_find_state");
1141 }
1142
1143 /* list is sorted, if-bound states before floating ones */
1144 if (sk != NULL)
1145 TAILQ_FOREACH(s, &sk->states, next)
1146 if (s->kif == pfi_all || s->kif == kif)
1147 return (s);
1148
1149 return (NULL);
1150}
1151
1152struct pf_state *
1153pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1154{
1155 struct pf_state_key *sk = NULL;
1156 struct pf_state *s, *ret = NULL;
1157
1158 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1159
1160 switch (dir) {
1161 case PF_OUT:
1162 sk = RB_FIND(pf_state_tree_lan_ext,
1163 &pf_statetbl_lan_ext, (struct pf_state_key *)key);
1164 break;
1165 case PF_IN:
1166 sk = RB_FIND(pf_state_tree_ext_gwy,
1167 &pf_statetbl_ext_gwy, (struct pf_state_key *)key);
1168 break;
1169 default:
1170 panic("pf_find_state_all");
1171 }
1172
1173 if (sk != NULL) {
1174 ret = TAILQ_FIRST(&sk->states);
1175 if (more == NULL)
1176 return (ret);
1177
1178 TAILQ_FOREACH(s, &sk->states, next)
1179 (*more)++;
1180 }
1181
1182 return (ret);
1183}
1184
1185static void
1186pf_init_threshold(struct pf_threshold *threshold,
1187 u_int32_t limit, u_int32_t seconds)
1188{
1189 threshold->limit = limit * PF_THRESHOLD_MULT;
1190 threshold->seconds = seconds;
1191 threshold->count = 0;
1192 threshold->last = pf_time_second();
1193}
1194
1195static void
1196pf_add_threshold(struct pf_threshold *threshold)
1197{
1198 u_int32_t t = pf_time_second(), diff = t - threshold->last;
1199
1200 if (diff >= threshold->seconds)
1201 threshold->count = 0;
1202 else
1203 threshold->count -= threshold->count * diff /
1204 threshold->seconds;
1205 threshold->count += PF_THRESHOLD_MULT;
1206 threshold->last = t;
1207}
1208
1209static int
1210pf_check_threshold(struct pf_threshold *threshold)
1211{
1212 return (threshold->count > threshold->limit);
1213}
1214
1215static int
1216pf_src_connlimit(struct pf_state **state)
1217{
1218 int bad = 0;
1219
1220 (*state)->src_node->conn++;
b7266188 1221 VERIFY((*state)->src_node->conn != 0);
b0d623f7
A
1222 (*state)->src.tcp_est = 1;
1223 pf_add_threshold(&(*state)->src_node->conn_rate);
1224
1225 if ((*state)->rule.ptr->max_src_conn &&
1226 (*state)->rule.ptr->max_src_conn <
1227 (*state)->src_node->conn) {
1228 pf_status.lcounters[LCNT_SRCCONN]++;
1229 bad++;
1230 }
1231
1232 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
1233 pf_check_threshold(&(*state)->src_node->conn_rate)) {
1234 pf_status.lcounters[LCNT_SRCCONNRATE]++;
1235 bad++;
1236 }
1237
1238 if (!bad)
1239 return (0);
1240
1241 if ((*state)->rule.ptr->overload_tbl) {
1242 struct pfr_addr p;
1243 u_int32_t killed = 0;
1244
1245 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
1246 if (pf_status.debug >= PF_DEBUG_MISC) {
1247 printf("pf_src_connlimit: blocking address ");
1248 pf_print_host(&(*state)->src_node->addr, 0,
1249 (*state)->state_key->af);
1250 }
1251
1252 bzero(&p, sizeof (p));
1253 p.pfra_af = (*state)->state_key->af;
1254 switch ((*state)->state_key->af) {
1255#if INET
1256 case AF_INET:
1257 p.pfra_net = 32;
1258 p.pfra_ip4addr = (*state)->src_node->addr.v4;
1259 break;
1260#endif /* INET */
1261#if INET6
1262 case AF_INET6:
1263 p.pfra_net = 128;
1264 p.pfra_ip6addr = (*state)->src_node->addr.v6;
1265 break;
1266#endif /* INET6 */
1267 }
1268
1269 pfr_insert_kentry((*state)->rule.ptr->overload_tbl,
d1ecb069 1270 &p, pf_calendar_time_second());
b0d623f7
A
1271
1272 /* kill existing states if that's required. */
1273 if ((*state)->rule.ptr->flush) {
1274 struct pf_state_key *sk;
1275 struct pf_state *st;
1276
1277 pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
1278 RB_FOREACH(st, pf_state_tree_id, &tree_id) {
1279 sk = st->state_key;
1280 /*
1281 * Kill states from this source. (Only those
1282 * from the same rule if PF_FLUSH_GLOBAL is not
1283 * set)
1284 */
1285 if (sk->af ==
1286 (*state)->state_key->af &&
1287 (((*state)->state_key->direction ==
1288 PF_OUT &&
1289 PF_AEQ(&(*state)->src_node->addr,
1290 &sk->lan.addr, sk->af)) ||
1291 ((*state)->state_key->direction == PF_IN &&
1292 PF_AEQ(&(*state)->src_node->addr,
1293 &sk->ext.addr, sk->af))) &&
1294 ((*state)->rule.ptr->flush &
1295 PF_FLUSH_GLOBAL ||
1296 (*state)->rule.ptr == st->rule.ptr)) {
1297 st->timeout = PFTM_PURGE;
1298 st->src.state = st->dst.state =
1299 TCPS_CLOSED;
1300 killed++;
1301 }
1302 }
1303 if (pf_status.debug >= PF_DEBUG_MISC)
1304 printf(", %u states killed", killed);
1305 }
1306 if (pf_status.debug >= PF_DEBUG_MISC)
1307 printf("\n");
1308 }
1309
1310 /* kill this state */
1311 (*state)->timeout = PFTM_PURGE;
1312 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
1313 return (1);
1314}
1315
1316int
1317pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
1318 struct pf_addr *src, sa_family_t af)
1319{
1320 struct pf_src_node k;
1321
1322 if (*sn == NULL) {
1323 k.af = af;
1324 PF_ACPY(&k.addr, src, af);
1325 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
1326 rule->rpool.opts & PF_POOL_STICKYADDR)
1327 k.rule.ptr = rule;
1328 else
1329 k.rule.ptr = NULL;
1330 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
1331 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
1332 }
1333 if (*sn == NULL) {
1334 if (!rule->max_src_nodes ||
1335 rule->src_nodes < rule->max_src_nodes)
1336 (*sn) = pool_get(&pf_src_tree_pl, PR_WAITOK);
1337 else
1338 pf_status.lcounters[LCNT_SRCNODES]++;
1339 if ((*sn) == NULL)
1340 return (-1);
1341 bzero(*sn, sizeof (struct pf_src_node));
1342
1343 pf_init_threshold(&(*sn)->conn_rate,
1344 rule->max_src_conn_rate.limit,
1345 rule->max_src_conn_rate.seconds);
1346
1347 (*sn)->af = af;
1348 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
1349 rule->rpool.opts & PF_POOL_STICKYADDR)
1350 (*sn)->rule.ptr = rule;
1351 else
1352 (*sn)->rule.ptr = NULL;
1353 PF_ACPY(&(*sn)->addr, src, af);
1354 if (RB_INSERT(pf_src_tree,
1355 &tree_src_tracking, *sn) != NULL) {
1356 if (pf_status.debug >= PF_DEBUG_MISC) {
1357 printf("pf: src_tree insert failed: ");
1358 pf_print_host(&(*sn)->addr, 0, af);
1359 printf("\n");
1360 }
1361 pool_put(&pf_src_tree_pl, *sn);
1362 return (-1);
1363 }
1364 (*sn)->creation = pf_time_second();
1365 (*sn)->ruletype = rule->action;
1366 if ((*sn)->rule.ptr != NULL)
1367 (*sn)->rule.ptr->src_nodes++;
1368 pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
1369 pf_status.src_nodes++;
1370 } else {
1371 if (rule->max_src_states &&
1372 (*sn)->states >= rule->max_src_states) {
1373 pf_status.lcounters[LCNT_SRCSTATES]++;
1374 return (-1);
1375 }
1376 }
1377 return (0);
1378}
1379
1380static void
1381pf_stateins_err(const char *tree, struct pf_state *s, struct pfi_kif *kif)
1382{
1383 struct pf_state_key *sk = s->state_key;
1384
1385 if (pf_status.debug >= PF_DEBUG_MISC) {
b0d623f7
A
1386 printf("pf: state insert failed: %s %s ", tree, kif->pfik_name);
1387 switch (sk->proto) {
1388 case IPPROTO_TCP:
1389 printf("TCP");
1390 break;
1391 case IPPROTO_UDP:
1392 printf("UDP");
1393 break;
1394 case IPPROTO_ICMP:
1395 printf("ICMP4");
1396 break;
1397 case IPPROTO_ICMPV6:
1398 printf("ICMP6");
1399 break;
1400 default:
1401 printf("PROTO=%u", sk->proto);
1402 break;
1403 }
1404 printf(" lan: ");
1405 pf_print_sk_host(&sk->lan, sk->af, sk->proto,
1406 sk->proto_variant);
1407 printf(" gwy: ");
1408 pf_print_sk_host(&sk->gwy, sk->af, sk->proto,
1409 sk->proto_variant);
1410 printf(" ext: ");
1411 pf_print_sk_host(&sk->ext, sk->af, sk->proto,
1412 sk->proto_variant);
b0d623f7
A
1413 if (s->sync_flags & PFSTATE_FROMSYNC)
1414 printf(" (from sync)");
1415 printf("\n");
1416 }
1417}
1418
1419int
1420pf_insert_state(struct pfi_kif *kif, struct pf_state *s)
1421{
1422 struct pf_state_key *cur;
1423 struct pf_state *sp;
1424
1425 VERIFY(s->state_key != NULL);
1426 s->kif = kif;
1427
1428 if ((cur = RB_INSERT(pf_state_tree_lan_ext, &pf_statetbl_lan_ext,
1429 s->state_key)) != NULL) {
1430 /* key exists. check for same kif, if none, add to key */
1431 TAILQ_FOREACH(sp, &cur->states, next)
1432 if (sp->kif == kif) { /* collision! */
1433 pf_stateins_err("tree_lan_ext", s, kif);
1434 pf_detach_state(s,
1435 PF_DT_SKIP_LANEXT|PF_DT_SKIP_EXTGWY);
1436 return (-1);
1437 }
1438 pf_detach_state(s, PF_DT_SKIP_LANEXT|PF_DT_SKIP_EXTGWY);
1439 pf_attach_state(cur, s, kif == pfi_all ? 1 : 0);
1440 }
1441
1442 /* if cur != NULL, we already found a state key and attached to it */
1443 if (cur == NULL && (cur = RB_INSERT(pf_state_tree_ext_gwy,
1444 &pf_statetbl_ext_gwy, s->state_key)) != NULL) {
1445 /* must not happen. we must have found the sk above! */
1446 pf_stateins_err("tree_ext_gwy", s, kif);
1447 pf_detach_state(s, PF_DT_SKIP_EXTGWY);
1448 return (-1);
1449 }
1450
1451 if (s->id == 0 && s->creatorid == 0) {
1452 s->id = htobe64(pf_status.stateid++);
1453 s->creatorid = pf_status.hostid;
1454 }
1455 if (RB_INSERT(pf_state_tree_id, &tree_id, s) != NULL) {
1456 if (pf_status.debug >= PF_DEBUG_MISC) {
1457 printf("pf: state insert failed: "
1458 "id: %016llx creatorid: %08x",
1459 be64toh(s->id), ntohl(s->creatorid));
1460 if (s->sync_flags & PFSTATE_FROMSYNC)
1461 printf(" (from sync)");
1462 printf("\n");
1463 }
1464 pf_detach_state(s, 0);
1465 return (-1);
1466 }
1467 TAILQ_INSERT_TAIL(&state_list, s, entry_list);
1468 pf_status.fcounters[FCNT_STATE_INSERT]++;
1469 pf_status.states++;
b7266188 1470 VERIFY(pf_status.states != 0);
b0d623f7
A
1471 pfi_kif_ref(kif, PFI_KIF_REF_STATE);
1472#if NPFSYNC
1473 pfsync_insert_state(s);
1474#endif
1475 return (0);
1476}
1477
316670eb
A
1478static int
1479pf_purge_thread_cont(int err)
b0d623f7 1480{
316670eb
A
1481#pragma unused(err)
1482 static u_int32_t nloops = 0;
1483 int t = 1; /* 1 second */
1484
39236c6e
A
1485 /*
1486 * Update coarse-grained networking timestamp (in sec.); the idea
1487 * is to piggy-back on the periodic timeout callout to update
1488 * the counter returnable via net_uptime().
1489 */
1490 net_update_uptime();
1491
316670eb
A
1492 lck_rw_lock_shared(pf_perim_lock);
1493 lck_mtx_lock(pf_lock);
1494
1495 /* purge everything if not running */
1496 if (!pf_status.running) {
1497 pf_purge_expired_states(pf_status.states);
1498 pf_purge_expired_fragments();
1499 pf_purge_expired_src_nodes();
1500
1501 /* terminate thread (we don't currently do this) */
1502 if (pf_purge_thread == NULL) {
1503 lck_mtx_unlock(pf_lock);
1504 lck_rw_done(pf_perim_lock);
1505
1506 thread_deallocate(current_thread());
1507 thread_terminate(current_thread());
1508 /* NOTREACHED */
1509 return (0);
1510 } else {
1511 /* if there's nothing left, sleep w/o timeout */
1512 if (pf_status.states == 0 &&
1513 pf_normalize_isempty() &&
1514 RB_EMPTY(&tree_src_tracking)) {
1515 nloops = 0;
1516 t = 0;
b0d623f7 1517 }
316670eb 1518 goto done;
b0d623f7 1519 }
316670eb 1520 }
b0d623f7 1521
316670eb
A
1522 /* process a fraction of the state table every second */
1523 pf_purge_expired_states(1 + (pf_status.states
1524 / pf_default_rule.timeout[PFTM_INTERVAL]));
b0d623f7 1525
316670eb
A
1526 /* purge other expired types every PFTM_INTERVAL seconds */
1527 if (++nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) {
1528 pf_purge_expired_fragments();
1529 pf_purge_expired_src_nodes();
1530 nloops = 0;
b0d623f7 1531 }
316670eb
A
1532done:
1533 lck_mtx_unlock(pf_lock);
1534 lck_rw_done(pf_perim_lock);
1535
1536 (void) tsleep0(pf_purge_thread_fn, PWAIT, "pf_purge_cont",
1537 t * hz, pf_purge_thread_cont);
1538 /* NOTREACHED */
1539 VERIFY(0);
1540
1541 return (0);
1542}
1543
1544void
1545pf_purge_thread_fn(void *v, wait_result_t w)
1546{
1547#pragma unused(v, w)
1548 (void) tsleep0(pf_purge_thread_fn, PWAIT, "pf_purge", 0,
1549 pf_purge_thread_cont);
1550 /*
1551 * tsleep0() shouldn't have returned as PCATCH was not set;
1552 * therefore assert in this case.
1553 */
1554 VERIFY(0);
b0d623f7
A
1555}
1556
1557u_int64_t
1558pf_state_expires(const struct pf_state *state)
1559{
1560 u_int32_t t;
1561 u_int32_t start;
1562 u_int32_t end;
1563 u_int32_t states;
1564
1565 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1566
1567 /* handle all PFTM_* > PFTM_MAX here */
1568 if (state->timeout == PFTM_PURGE)
1569 return (pf_time_second());
1570 if (state->timeout == PFTM_UNTIL_PACKET)
1571 return (0);
1572 VERIFY(state->timeout != PFTM_UNLINKED);
1573 VERIFY(state->timeout < PFTM_MAX);
1574 t = state->rule.ptr->timeout[state->timeout];
1575 if (!t)
1576 t = pf_default_rule.timeout[state->timeout];
1577 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1578 if (start) {
1579 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1580 states = state->rule.ptr->states;
1581 } else {
1582 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1583 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1584 states = pf_status.states;
1585 }
1586 if (end && states > start && start < end) {
1587 if (states < end)
1588 return (state->expire + t * (end - states) /
1589 (end - start));
1590 else
1591 return (pf_time_second());
1592 }
1593 return (state->expire + t);
1594}
1595
1596void
1597pf_purge_expired_src_nodes(void)
1598{
1599 struct pf_src_node *cur, *next;
1600
1601 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1602
1603 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) {
1604 next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur);
1605
1606 if (cur->states <= 0 && cur->expire <= pf_time_second()) {
1607 if (cur->rule.ptr != NULL) {
1608 cur->rule.ptr->src_nodes--;
1609 if (cur->rule.ptr->states <= 0 &&
1610 cur->rule.ptr->max_src_nodes <= 0)
1611 pf_rm_rule(NULL, cur->rule.ptr);
1612 }
1613 RB_REMOVE(pf_src_tree, &tree_src_tracking, cur);
1614 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
1615 pf_status.src_nodes--;
1616 pool_put(&pf_src_tree_pl, cur);
1617 }
1618 }
1619}
1620
1621void
1622pf_src_tree_remove_state(struct pf_state *s)
1623{
1624 u_int32_t t;
1625
1626 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1627
1628 if (s->src_node != NULL) {
b7266188
A
1629 if (s->src.tcp_est) {
1630 VERIFY(s->src_node->conn > 0);
b0d623f7 1631 --s->src_node->conn;
b7266188
A
1632 }
1633 VERIFY(s->src_node->states > 0);
b0d623f7
A
1634 if (--s->src_node->states <= 0) {
1635 t = s->rule.ptr->timeout[PFTM_SRC_NODE];
1636 if (!t)
1637 t = pf_default_rule.timeout[PFTM_SRC_NODE];
1638 s->src_node->expire = pf_time_second() + t;
1639 }
1640 }
1641 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
b7266188 1642 VERIFY(s->nat_src_node->states > 0);
b0d623f7
A
1643 if (--s->nat_src_node->states <= 0) {
1644 t = s->rule.ptr->timeout[PFTM_SRC_NODE];
1645 if (!t)
1646 t = pf_default_rule.timeout[PFTM_SRC_NODE];
1647 s->nat_src_node->expire = pf_time_second() + t;
1648 }
1649 }
1650 s->src_node = s->nat_src_node = NULL;
1651}
1652
1653void
1654pf_unlink_state(struct pf_state *cur)
1655{
1656 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1657
b0d623f7
A
1658 if (cur->src.state == PF_TCPS_PROXY_DST) {
1659 pf_send_tcp(cur->rule.ptr, cur->state_key->af,
1660 &cur->state_key->ext.addr, &cur->state_key->lan.addr,
1661 cur->state_key->ext.xport.port,
1662 cur->state_key->lan.xport.port,
1663 cur->src.seqhi, cur->src.seqlo + 1,
1664 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL);
1665 }
1666
1667 hook_runloop(&cur->unlink_hooks, HOOK_REMOVE|HOOK_FREE);
b0d623f7
A
1668 RB_REMOVE(pf_state_tree_id, &tree_id, cur);
1669#if NPFSYNC
1670 if (cur->creatorid == pf_status.hostid)
1671 pfsync_delete_state(cur);
1672#endif
1673 cur->timeout = PFTM_UNLINKED;
1674 pf_src_tree_remove_state(cur);
1675 pf_detach_state(cur, 0);
1676}
1677
1678/* callers should be at splpf and hold the
1679 * write_lock on pf_consistency_lock */
1680void
1681pf_free_state(struct pf_state *cur)
1682{
1683 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1684#if NPFSYNC
1685 if (pfsyncif != NULL &&
1686 (pfsyncif->sc_bulk_send_next == cur ||
1687 pfsyncif->sc_bulk_terminator == cur))
1688 return;
1689#endif
1690 VERIFY(cur->timeout == PFTM_UNLINKED);
b7266188 1691 VERIFY(cur->rule.ptr->states > 0);
b0d623f7
A
1692 if (--cur->rule.ptr->states <= 0 &&
1693 cur->rule.ptr->src_nodes <= 0)
1694 pf_rm_rule(NULL, cur->rule.ptr);
b7266188
A
1695 if (cur->nat_rule.ptr != NULL) {
1696 VERIFY(cur->nat_rule.ptr->states > 0);
b0d623f7
A
1697 if (--cur->nat_rule.ptr->states <= 0 &&
1698 cur->nat_rule.ptr->src_nodes <= 0)
1699 pf_rm_rule(NULL, cur->nat_rule.ptr);
b7266188
A
1700 }
1701 if (cur->anchor.ptr != NULL) {
1702 VERIFY(cur->anchor.ptr->states > 0);
b0d623f7
A
1703 if (--cur->anchor.ptr->states <= 0)
1704 pf_rm_rule(NULL, cur->anchor.ptr);
b7266188 1705 }
b0d623f7
A
1706 pf_normalize_tcp_cleanup(cur);
1707 pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE);
1708 TAILQ_REMOVE(&state_list, cur, entry_list);
1709 if (cur->tag)
1710 pf_tag_unref(cur->tag);
1711 pool_put(&pf_state_pl, cur);
1712 pf_status.fcounters[FCNT_STATE_REMOVALS]++;
b7266188 1713 VERIFY(pf_status.states > 0);
b0d623f7
A
1714 pf_status.states--;
1715}
1716
1717void
1718pf_purge_expired_states(u_int32_t maxcheck)
1719{
1720 static struct pf_state *cur = NULL;
1721 struct pf_state *next;
1722
1723 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1724
1725 while (maxcheck--) {
1726 /* wrap to start of list when we hit the end */
1727 if (cur == NULL) {
1728 cur = TAILQ_FIRST(&state_list);
1729 if (cur == NULL)
1730 break; /* list empty */
1731 }
1732
1733 /* get next state, as cur may get deleted */
1734 next = TAILQ_NEXT(cur, entry_list);
1735
1736 if (cur->timeout == PFTM_UNLINKED) {
1737 pf_free_state(cur);
1738 } else if (pf_state_expires(cur) <= pf_time_second()) {
1739 /* unlink and free expired state */
1740 pf_unlink_state(cur);
1741 pf_free_state(cur);
1742 }
1743 cur = next;
1744 }
1745}
1746
1747int
1748pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw)
1749{
1750 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1751
1752 if (aw->type != PF_ADDR_TABLE)
1753 return (0);
1754 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL)
1755 return (1);
1756 return (0);
1757}
1758
1759void
1760pf_tbladdr_remove(struct pf_addr_wrap *aw)
1761{
1762 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1763
1764 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL)
1765 return;
1766 pfr_detach_table(aw->p.tbl);
1767 aw->p.tbl = NULL;
1768}
1769
1770void
1771pf_tbladdr_copyout(struct pf_addr_wrap *aw)
1772{
1773 struct pfr_ktable *kt = aw->p.tbl;
1774
1775 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
1776
1777 if (aw->type != PF_ADDR_TABLE || kt == NULL)
1778 return;
1779 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1780 kt = kt->pfrkt_root;
1781 aw->p.tbl = NULL;
1782 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
1783 kt->pfrkt_cnt : -1;
1784}
1785
b0d623f7
A
1786static void
1787pf_print_addr(struct pf_addr *addr, sa_family_t af)
1788{
1789 switch (af) {
1790#if INET
1791 case AF_INET: {
1792 u_int32_t a = ntohl(addr->addr32[0]);
1793 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1794 (a>>8)&255, a&255);
1795 break;
1796 }
1797#endif /* INET */
1798#if INET6
1799 case AF_INET6: {
1800 u_int16_t b;
1801 u_int8_t i, curstart = 255, curend = 0,
1802 maxstart = 0, maxend = 0;
1803 for (i = 0; i < 8; i++) {
1804 if (!addr->addr16[i]) {
1805 if (curstart == 255)
1806 curstart = i;
1807 else
1808 curend = i;
1809 } else {
1810 if (curstart) {
1811 if ((curend - curstart) >
1812 (maxend - maxstart)) {
1813 maxstart = curstart;
1814 maxend = curend;
1815 curstart = 255;
1816 }
1817 }
1818 }
1819 }
1820 for (i = 0; i < 8; i++) {
1821 if (i >= maxstart && i <= maxend) {
1822 if (maxend != 7) {
1823 if (i == maxstart)
1824 printf(":");
1825 } else {
1826 if (i == maxend)
1827 printf(":");
1828 }
1829 } else {
1830 b = ntohs(addr->addr16[i]);
1831 printf("%x", b);
1832 if (i < 7)
1833 printf(":");
1834 }
1835 }
1836 break;
1837 }
1838#endif /* INET6 */
1839 }
1840}
1841
1842static void
1843pf_print_sk_host(struct pf_state_host *sh, sa_family_t af, int proto,
1844 u_int8_t proto_variant)
1845{
1846 pf_print_addr(&sh->addr, af);
1847
1848 switch (proto) {
1849 case IPPROTO_ESP:
1850 if (sh->xport.spi)
1851 printf("[%08x]", ntohl(sh->xport.spi));
1852 break;
1853
1854 case IPPROTO_GRE:
1855 if (proto_variant == PF_GRE_PPTP_VARIANT)
1856 printf("[%u]", ntohs(sh->xport.call_id));
1857 break;
1858
1859 case IPPROTO_TCP:
1860 case IPPROTO_UDP:
1861 printf("[%u]", ntohs(sh->xport.port));
1862 break;
1863
1864 default:
1865 break;
1866 }
1867}
b0d623f7
A
1868
1869static void
1870pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1871{
b0d623f7
A
1872 pf_print_addr(addr, af);
1873 if (p)
1874 printf("[%u]", ntohs(p));
b0d623f7
A
1875}
1876
1877void
1878pf_print_state(struct pf_state *s)
1879{
1880 struct pf_state_key *sk = s->state_key;
1881 switch (sk->proto) {
b0d623f7
A
1882 case IPPROTO_ESP:
1883 printf("ESP ");
1884 break;
1885 case IPPROTO_GRE:
1886 printf("GRE%u ", sk->proto_variant);
1887 break;
b0d623f7
A
1888 case IPPROTO_TCP:
1889 printf("TCP ");
1890 break;
1891 case IPPROTO_UDP:
1892 printf("UDP ");
1893 break;
1894 case IPPROTO_ICMP:
1895 printf("ICMP ");
1896 break;
1897 case IPPROTO_ICMPV6:
1898 printf("ICMPV6 ");
1899 break;
1900 default:
1901 printf("%u ", sk->proto);
1902 break;
1903 }
b0d623f7
A
1904 pf_print_sk_host(&sk->lan, sk->af, sk->proto, sk->proto_variant);
1905 printf(" ");
1906 pf_print_sk_host(&sk->gwy, sk->af, sk->proto, sk->proto_variant);
1907 printf(" ");
1908 pf_print_sk_host(&sk->ext, sk->af, sk->proto, sk->proto_variant);
b0d623f7
A
1909 printf(" [lo=%u high=%u win=%u modulator=%u", s->src.seqlo,
1910 s->src.seqhi, s->src.max_win, s->src.seqdiff);
1911 if (s->src.wscale && s->dst.wscale)
1912 printf(" wscale=%u", s->src.wscale & PF_WSCALE_MASK);
1913 printf("]");
1914 printf(" [lo=%u high=%u win=%u modulator=%u", s->dst.seqlo,
1915 s->dst.seqhi, s->dst.max_win, s->dst.seqdiff);
1916 if (s->src.wscale && s->dst.wscale)
1917 printf(" wscale=%u", s->dst.wscale & PF_WSCALE_MASK);
1918 printf("]");
1919 printf(" %u:%u", s->src.state, s->dst.state);
1920}
1921
1922void
1923pf_print_flags(u_int8_t f)
1924{
1925 if (f)
1926 printf(" ");
1927 if (f & TH_FIN)
1928 printf("F");
1929 if (f & TH_SYN)
1930 printf("S");
1931 if (f & TH_RST)
1932 printf("R");
1933 if (f & TH_PUSH)
1934 printf("P");
1935 if (f & TH_ACK)
1936 printf("A");
1937 if (f & TH_URG)
1938 printf("U");
1939 if (f & TH_ECE)
1940 printf("E");
1941 if (f & TH_CWR)
1942 printf("W");
1943}
1944
1945#define PF_SET_SKIP_STEPS(i) \
1946 do { \
1947 while (head[i] != cur) { \
1948 head[i]->skip[i].ptr = cur; \
1949 head[i] = TAILQ_NEXT(head[i], entries); \
1950 } \
1951 } while (0)
1952
1953void
1954pf_calc_skip_steps(struct pf_rulequeue *rules)
1955{
1956 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1957 int i;
1958
1959 cur = TAILQ_FIRST(rules);
1960 prev = cur;
1961 for (i = 0; i < PF_SKIP_COUNT; ++i)
1962 head[i] = cur;
1963 while (cur != NULL) {
1964
1965 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1966 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1967 if (cur->direction != prev->direction)
1968 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1969 if (cur->af != prev->af)
1970 PF_SET_SKIP_STEPS(PF_SKIP_AF);
1971 if (cur->proto != prev->proto)
1972 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1973 if (cur->src.neg != prev->src.neg ||
1974 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1975 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
b0d623f7
A
1976 {
1977 union pf_rule_xport *cx = &cur->src.xport;
1978 union pf_rule_xport *px = &prev->src.xport;
1979
1980 switch (cur->proto) {
1981 case IPPROTO_GRE:
1982 case IPPROTO_ESP:
1983 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1984 break;
1985 default:
1986 if (prev->proto == IPPROTO_GRE ||
1987 prev->proto == IPPROTO_ESP ||
1988 cx->range.op != px->range.op ||
1989 cx->range.port[0] != px->range.port[0] ||
1990 cx->range.port[1] != px->range.port[1])
1991 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1992 break;
1993 }
1994 }
b0d623f7
A
1995 if (cur->dst.neg != prev->dst.neg ||
1996 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1997 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
b0d623f7
A
1998 {
1999 union pf_rule_xport *cx = &cur->dst.xport;
2000 union pf_rule_xport *px = &prev->dst.xport;
2001
2002 switch (cur->proto) {
2003 case IPPROTO_GRE:
2004 if (cur->proto != prev->proto ||
2005 cx->call_id != px->call_id)
2006 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2007 break;
2008 case IPPROTO_ESP:
2009 if (cur->proto != prev->proto ||
2010 cx->spi != px->spi)
2011 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2012 break;
2013 default:
2014 if (prev->proto == IPPROTO_GRE ||
2015 prev->proto == IPPROTO_ESP ||
2016 cx->range.op != px->range.op ||
2017 cx->range.port[0] != px->range.port[0] ||
2018 cx->range.port[1] != px->range.port[1])
2019 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2020 break;
2021 }
2022 }
b0d623f7
A
2023
2024 prev = cur;
2025 cur = TAILQ_NEXT(cur, entries);
2026 }
2027 for (i = 0; i < PF_SKIP_COUNT; ++i)
2028 PF_SET_SKIP_STEPS(i);
2029}
2030
316670eb
A
2031u_int32_t
2032pf_calc_state_key_flowhash(struct pf_state_key *sk)
2033{
2034 struct pf_flowhash_key fh __attribute__((aligned(8)));
39236c6e 2035 uint32_t flowhash = 0;
316670eb
A
2036
2037 bzero(&fh, sizeof (fh));
2038 if (PF_ALEQ(&sk->lan.addr, &sk->ext.addr, sk->af)) {
2039 bcopy(&sk->lan.addr, &fh.ap1.addr, sizeof (fh.ap1.addr));
2040 bcopy(&sk->ext.addr, &fh.ap2.addr, sizeof (fh.ap2.addr));
2041 } else {
2042 bcopy(&sk->ext.addr, &fh.ap1.addr, sizeof (fh.ap1.addr));
2043 bcopy(&sk->lan.addr, &fh.ap2.addr, sizeof (fh.ap2.addr));
2044 }
2045 if (sk->lan.xport.spi <= sk->ext.xport.spi) {
2046 fh.ap1.xport.spi = sk->lan.xport.spi;
2047 fh.ap2.xport.spi = sk->ext.xport.spi;
2048 } else {
2049 fh.ap1.xport.spi = sk->ext.xport.spi;
2050 fh.ap2.xport.spi = sk->lan.xport.spi;
2051 }
2052 fh.af = sk->af;
2053 fh.proto = sk->proto;
2054
39236c6e
A
2055try_again:
2056 flowhash = net_flowhash(&fh, sizeof (fh), pf_hash_seed);
2057 if (flowhash == 0) {
2058 /* try to get a non-zero flowhash */
2059 pf_hash_seed = RandomULong();
2060 goto try_again;
2061 }
2062
2063 return (flowhash);
316670eb
A
2064}
2065
b0d623f7
A
2066static int
2067pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
2068{
2069 if (aw1->type != aw2->type)
2070 return (1);
2071 switch (aw1->type) {
2072 case PF_ADDR_ADDRMASK:
2073 case PF_ADDR_RANGE:
2074 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
2075 return (1);
2076 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
2077 return (1);
2078 return (0);
2079 case PF_ADDR_DYNIFTL:
39236c6e
A
2080 return (aw1->p.dyn == NULL || aw2->p.dyn == NULL ||
2081 aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
b0d623f7
A
2082 case PF_ADDR_NOROUTE:
2083 case PF_ADDR_URPFFAILED:
2084 return (0);
2085 case PF_ADDR_TABLE:
2086 return (aw1->p.tbl != aw2->p.tbl);
2087 case PF_ADDR_RTLABEL:
2088 return (aw1->v.rtlabel != aw2->v.rtlabel);
2089 default:
2090 printf("invalid address type: %d\n", aw1->type);
2091 return (1);
2092 }
2093}
2094
2095u_int16_t
2096pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2097{
2098 u_int32_t l;
2099
2100 if (udp && !cksum)
2101 return (0);
2102 l = cksum + old - new;
2103 l = (l >> 16) + (l & 0xffff);
2104 l = l & 0xffff;
2105 if (udp && !l)
2106 return (0xffff);
2107 return (l);
2108}
2109
2110static void
2111pf_change_ap(int dir, struct mbuf *m, struct pf_addr *a, u_int16_t *p,
2112 u_int16_t *ic, u_int16_t *pc, struct pf_addr *an, u_int16_t pn,
2113 u_int8_t u, sa_family_t af)
2114{
2115 struct pf_addr ao;
2116 u_int16_t po = *p;
2117
2118 PF_ACPY(&ao, a, af);
2119 PF_ACPY(a, an, af);
2120
2121 *p = pn;
2122
2123 switch (af) {
2124#if INET
2125 case AF_INET:
2126 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2127 ao.addr16[0], an->addr16[0], 0),
2128 ao.addr16[1], an->addr16[1], 0);
2129 *p = pn;
2130 /*
2131 * If the packet is originated from an ALG on the NAT gateway
2132 * (source address is loopback or local), in which case the
2133 * TCP/UDP checksum field contains the pseudo header checksum
2134 * that's not yet complemented.
2135 */
2136 if (dir == PF_OUT && m != NULL &&
2137 (m->m_flags & M_PKTHDR) &&
2138 (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))) {
2139 /* Pseudo-header checksum does not include ports */
2140 *pc = ~pf_cksum_fixup(pf_cksum_fixup(~*pc,
2141 ao.addr16[0], an->addr16[0], u),
2142 ao.addr16[1], an->addr16[1], u);
2143 } else {
2144 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2145 ao.addr16[0], an->addr16[0], u),
2146 ao.addr16[1], an->addr16[1], u),
2147 po, pn, u);
2148 }
2149 break;
2150#endif /* INET */
2151#if INET6
2152 case AF_INET6:
6d2010ae
A
2153 /*
2154 * If the packet is originated from an ALG on the NAT gateway
2155 * (source address is loopback or local), in which case the
2156 * TCP/UDP checksum field contains the pseudo header checksum
2157 * that's not yet complemented.
2158 */
2159 if (dir == PF_OUT && m != NULL &&
2160 (m->m_flags & M_PKTHDR) &&
2161 (m->m_pkthdr.csum_flags & (CSUM_TCPIPV6 | CSUM_UDPIPV6))) {
2162 /* Pseudo-header checksum does not include ports */
2163 *pc = ~pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2164 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2165 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(~*pc,
2166 ao.addr16[0], an->addr16[0], u),
2167 ao.addr16[1], an->addr16[1], u),
2168 ao.addr16[2], an->addr16[2], u),
2169 ao.addr16[3], an->addr16[3], u),
2170 ao.addr16[4], an->addr16[4], u),
2171 ao.addr16[5], an->addr16[5], u),
2172 ao.addr16[6], an->addr16[6], u),
2173 ao.addr16[7], an->addr16[7], u),
2174 po, pn, u);
2175 } else {
2176 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2177 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2178 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2179 ao.addr16[0], an->addr16[0], u),
2180 ao.addr16[1], an->addr16[1], u),
2181 ao.addr16[2], an->addr16[2], u),
2182 ao.addr16[3], an->addr16[3], u),
2183 ao.addr16[4], an->addr16[4], u),
2184 ao.addr16[5], an->addr16[5], u),
2185 ao.addr16[6], an->addr16[6], u),
2186 ao.addr16[7], an->addr16[7], u),
2187 po, pn, u);
2188 }
b0d623f7
A
2189 break;
2190#endif /* INET6 */
2191 }
2192}
2193
2194
2195/* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2196void
2197pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2198{
2199 u_int32_t ao;
2200
2201 memcpy(&ao, a, sizeof (ao));
2202 memcpy(a, &an, sizeof (u_int32_t));
2203 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2204 ao % 65536, an % 65536, u);
2205}
2206
2207#if INET6
2208static void
2209pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2210{
2211 struct pf_addr ao;
2212
2213 PF_ACPY(&ao, a, AF_INET6);
2214 PF_ACPY(a, an, AF_INET6);
2215
2216 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2217 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2218 pf_cksum_fixup(pf_cksum_fixup(*c,
2219 ao.addr16[0], an->addr16[0], u),
2220 ao.addr16[1], an->addr16[1], u),
2221 ao.addr16[2], an->addr16[2], u),
2222 ao.addr16[3], an->addr16[3], u),
2223 ao.addr16[4], an->addr16[4], u),
2224 ao.addr16[5], an->addr16[5], u),
2225 ao.addr16[6], an->addr16[6], u),
2226 ao.addr16[7], an->addr16[7], u);
2227}
2228#endif /* INET6 */
2229
2230static void
2231pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2232 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2233 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2234{
2235 struct pf_addr oia, ooa;
2236
2237 PF_ACPY(&oia, ia, af);
2238 PF_ACPY(&ooa, oa, af);
2239
2240 /* Change inner protocol port, fix inner protocol checksum. */
2241 if (ip != NULL) {
2242 u_int16_t oip = *ip;
2243 u_int32_t opc = 0;
2244
2245 if (pc != NULL)
2246 opc = *pc;
2247 *ip = np;
2248 if (pc != NULL)
2249 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2250 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2251 if (pc != NULL)
2252 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2253 }
2254 /* Change inner ip address, fix inner ip and icmp checksums. */
2255 PF_ACPY(ia, na, af);
2256 switch (af) {
2257#if INET
2258 case AF_INET: {
2259 u_int32_t oh2c = *h2c;
2260
2261 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2262 oia.addr16[0], ia->addr16[0], 0),
2263 oia.addr16[1], ia->addr16[1], 0);
2264 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2265 oia.addr16[0], ia->addr16[0], 0),
2266 oia.addr16[1], ia->addr16[1], 0);
2267 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2268 break;
2269 }
2270#endif /* INET */
2271#if INET6
2272 case AF_INET6:
2273 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2274 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2275 pf_cksum_fixup(pf_cksum_fixup(*ic,
2276 oia.addr16[0], ia->addr16[0], u),
2277 oia.addr16[1], ia->addr16[1], u),
2278 oia.addr16[2], ia->addr16[2], u),
2279 oia.addr16[3], ia->addr16[3], u),
2280 oia.addr16[4], ia->addr16[4], u),
2281 oia.addr16[5], ia->addr16[5], u),
2282 oia.addr16[6], ia->addr16[6], u),
2283 oia.addr16[7], ia->addr16[7], u);
2284 break;
2285#endif /* INET6 */
2286 }
2287 /* Change outer ip address, fix outer ip or icmpv6 checksum. */
2288 PF_ACPY(oa, na, af);
2289 switch (af) {
2290#if INET
2291 case AF_INET:
2292 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2293 ooa.addr16[0], oa->addr16[0], 0),
2294 ooa.addr16[1], oa->addr16[1], 0);
2295 break;
2296#endif /* INET */
2297#if INET6
2298 case AF_INET6:
2299 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2300 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2301 pf_cksum_fixup(pf_cksum_fixup(*ic,
2302 ooa.addr16[0], oa->addr16[0], u),
2303 ooa.addr16[1], oa->addr16[1], u),
2304 ooa.addr16[2], oa->addr16[2], u),
2305 ooa.addr16[3], oa->addr16[3], u),
2306 ooa.addr16[4], oa->addr16[4], u),
2307 ooa.addr16[5], oa->addr16[5], u),
2308 ooa.addr16[6], oa->addr16[6], u),
2309 ooa.addr16[7], oa->addr16[7], u);
2310 break;
2311#endif /* INET6 */
2312 }
2313}
2314
2315
2316/*
2317 * Need to modulate the sequence numbers in the TCP SACK option
2318 * (credits to Krzysztof Pfaff for report and patch)
2319 */
2320static int
2321pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2322 struct tcphdr *th, struct pf_state_peer *dst)
2323{
2324 int hlen = (th->th_off << 2) - sizeof (*th), thoptlen = hlen;
2325 u_int8_t opts[MAX_TCPOPTLEN], *opt = opts;
2326 int copyback = 0, i, olen;
2327 struct sackblk sack;
2328
2329#define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2330 if (hlen < TCPOLEN_SACKLEN ||
2331 !pf_pull_hdr(m, off + sizeof (*th), opts, hlen, NULL, NULL, pd->af))
2332 return (0);
2333
2334 while (hlen >= TCPOLEN_SACKLEN) {
2335 olen = opt[1];
2336 switch (*opt) {
2337 case TCPOPT_EOL: /* FALLTHROUGH */
2338 case TCPOPT_NOP:
2339 opt++;
2340 hlen--;
2341 break;
2342 case TCPOPT_SACK:
2343 if (olen > hlen)
2344 olen = hlen;
2345 if (olen >= TCPOLEN_SACKLEN) {
2346 for (i = 2; i + TCPOLEN_SACK <= olen;
2347 i += TCPOLEN_SACK) {
2348 memcpy(&sack, &opt[i], sizeof (sack));
2349 pf_change_a(&sack.start, &th->th_sum,
2350 htonl(ntohl(sack.start) -
2351 dst->seqdiff), 0);
2352 pf_change_a(&sack.end, &th->th_sum,
2353 htonl(ntohl(sack.end) -
2354 dst->seqdiff), 0);
2355 memcpy(&opt[i], &sack, sizeof (sack));
2356 }
b0d623f7 2357 copyback = off + sizeof (*th) + thoptlen;
b0d623f7
A
2358 }
2359 /* FALLTHROUGH */
2360 default:
2361 if (olen < 2)
2362 olen = 2;
2363 hlen -= olen;
2364 opt += olen;
2365 }
2366 }
2367
b0d623f7
A
2368 if (copyback) {
2369 m = pf_lazy_makewritable(pd, m, copyback);
2370 if (!m)
2371 return (-1);
2372 m_copyback(m, off + sizeof (*th), thoptlen, opts);
2373 }
b0d623f7
A
2374 return (copyback);
2375}
2376
2377static void
2378pf_send_tcp(const struct pf_rule *r, sa_family_t af,
2379 const struct pf_addr *saddr, const struct pf_addr *daddr,
2380 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2381 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2382 u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp)
2383{
2384#pragma unused(eh, ifp)
2385 struct mbuf *m;
2386 int len, tlen;
2387#if INET
2388 struct ip *h = NULL;
2389#endif /* INET */
2390#if INET6
2391 struct ip6_hdr *h6 = NULL;
2392#endif /* INET6 */
2393 struct tcphdr *th = NULL;
2394 char *opt;
2395 struct pf_mtag *pf_mtag;
2396
2397 /* maximum segment size tcp option */
2398 tlen = sizeof (struct tcphdr);
2399 if (mss)
2400 tlen += 4;
2401
2402 switch (af) {
2403#if INET
2404 case AF_INET:
2405 len = sizeof (struct ip) + tlen;
2406 break;
2407#endif /* INET */
2408#if INET6
2409 case AF_INET6:
2410 len = sizeof (struct ip6_hdr) + tlen;
2411 break;
2412#endif /* INET6 */
2413 default:
2414 panic("pf_send_tcp: not AF_INET or AF_INET6!");
2415 return;
2416 }
2417
2418 /* create outgoing mbuf */
2419 m = m_gethdr(M_DONTWAIT, MT_HEADER);
2420 if (m == NULL)
2421 return;
2422
2423 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2424 m_free(m);
2425 return;
2426 }
2427
2428 if (tag)
316670eb
A
2429 pf_mtag->pftag_flags |= PF_TAG_GENERATED;
2430 pf_mtag->pftag_tag = rtag;
b0d623f7
A
2431
2432 if (r != NULL && PF_RTABLEID_IS_VALID(r->rtableid))
316670eb 2433 pf_mtag->pftag_rtableid = r->rtableid;
b0d623f7 2434
316670eb
A
2435#if PF_ALTQ
2436 if (altq_allowed && r != NULL && r->qid)
2437 pf_mtag->pftag_qid = r->qid;
2438#endif /* PF_ALTQ */
2439
39236c6e 2440#if PF_ECN
316670eb
A
2441 /* add hints for ecn */
2442 pf_mtag->pftag_hdr = mtod(m, struct ip *);
2443 /* record address family */
2444 pf_mtag->pftag_flags &= ~(PF_TAG_HDR_INET | PF_TAG_HDR_INET6);
2445 switch (af) {
2446#if INET
2447 case AF_INET:
2448 pf_mtag->pftag_flags |= PF_TAG_HDR_INET;
2449 break;
2450#endif /* INET */
2451#if INET6
2452 case AF_INET6:
2453 pf_mtag->pftag_flags |= PF_TAG_HDR_INET6;
2454 break;
2455#endif /* INET6 */
b0d623f7 2456 }
39236c6e
A
2457#endif /* PF_ECN */
2458
316670eb 2459 /* indicate this is TCP */
39236c6e 2460 m->m_pkthdr.pkt_proto = IPPROTO_TCP;
316670eb
A
2461
2462 /* Make sure headers are 32-bit aligned */
b0d623f7
A
2463 m->m_data += max_linkhdr;
2464 m->m_pkthdr.len = m->m_len = len;
2465 m->m_pkthdr.rcvif = NULL;
2466 bzero(m->m_data, len);
2467 switch (af) {
2468#if INET
2469 case AF_INET:
2470 h = mtod(m, struct ip *);
2471
2472 /* IP header fields included in the TCP checksum */
2473 h->ip_p = IPPROTO_TCP;
2474 h->ip_len = htons(tlen);
2475 h->ip_src.s_addr = saddr->v4.s_addr;
2476 h->ip_dst.s_addr = daddr->v4.s_addr;
2477
316670eb 2478 th = (struct tcphdr *)(void *)((caddr_t)h + sizeof (struct ip));
b0d623f7
A
2479 break;
2480#endif /* INET */
2481#if INET6
2482 case AF_INET6:
2483 h6 = mtod(m, struct ip6_hdr *);
2484
2485 /* IP header fields included in the TCP checksum */
2486 h6->ip6_nxt = IPPROTO_TCP;
2487 h6->ip6_plen = htons(tlen);
2488 memcpy(&h6->ip6_src, &saddr->v6, sizeof (struct in6_addr));
2489 memcpy(&h6->ip6_dst, &daddr->v6, sizeof (struct in6_addr));
2490
316670eb
A
2491 th = (struct tcphdr *)(void *)
2492 ((caddr_t)h6 + sizeof (struct ip6_hdr));
b0d623f7
A
2493 break;
2494#endif /* INET6 */
2495 }
2496
2497 /* TCP header */
2498 th->th_sport = sport;
2499 th->th_dport = dport;
2500 th->th_seq = htonl(seq);
2501 th->th_ack = htonl(ack);
2502 th->th_off = tlen >> 2;
2503 th->th_flags = flags;
2504 th->th_win = htons(win);
2505
2506 if (mss) {
2507 opt = (char *)(th + 1);
2508 opt[0] = TCPOPT_MAXSEG;
2509 opt[1] = 4;
2510#if BYTE_ORDER != BIG_ENDIAN
2511 HTONS(mss);
2512#endif
2513 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2514 }
2515
2516 switch (af) {
2517#if INET
2518 case AF_INET: {
2519 struct route ro;
2520
2521 /* TCP checksum */
2522 th->th_sum = in_cksum(m, len);
2523
2524 /* Finish the IP header */
2525 h->ip_v = 4;
2526 h->ip_hl = sizeof (*h) >> 2;
2527 h->ip_tos = IPTOS_LOWDELAY;
2528 /*
2529 * ip_output() expects ip_len and ip_off to be in host order.
2530 */
2531 h->ip_len = len;
2532 h->ip_off = (path_mtu_discovery ? IP_DF : 0);
2533 h->ip_ttl = ttl ? ttl : ip_defttl;
2534 h->ip_sum = 0;
2535
2536 bzero(&ro, sizeof (ro));
2537 ip_output(m, NULL, &ro, 0, NULL, NULL);
39236c6e 2538 ROUTE_RELEASE(&ro);
b0d623f7
A
2539 break;
2540 }
2541#endif /* INET */
2542#if INET6
2543 case AF_INET6: {
2544 struct route_in6 ro6;
2545
2546 /* TCP checksum */
2547 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2548 sizeof (struct ip6_hdr), tlen);
2549
2550 h6->ip6_vfc |= IPV6_VERSION;
2551 h6->ip6_hlim = IPV6_DEFHLIM;
2552
2553 bzero(&ro6, sizeof (ro6));
6d2010ae 2554 ip6_output(m, NULL, &ro6, 0, NULL, NULL, NULL);
39236c6e 2555 ROUTE_RELEASE(&ro6);
b0d623f7
A
2556 break;
2557 }
2558#endif /* INET6 */
2559 }
2560}
2561
2562static void
2563pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2564 struct pf_rule *r)
2565{
2566 struct mbuf *m0;
2567 struct pf_mtag *pf_mtag;
2568
2569 m0 = m_copy(m, 0, M_COPYALL);
2570 if (m0 == NULL)
2571 return;
2572
2573 if ((pf_mtag = pf_get_mtag(m0)) == NULL)
2574 return;
2575
316670eb 2576 pf_mtag->pftag_flags |= PF_TAG_GENERATED;
b0d623f7
A
2577
2578 if (PF_RTABLEID_IS_VALID(r->rtableid))
316670eb
A
2579 pf_mtag->pftag_rtableid = r->rtableid;
2580
2581#if PF_ALTQ
2582 if (altq_allowed && r->qid)
2583 pf_mtag->pftag_qid = r->qid;
2584#endif /* PF_ALTQ */
2585
39236c6e 2586#if PF_ECN
316670eb
A
2587 /* add hints for ecn */
2588 pf_mtag->pftag_hdr = mtod(m0, struct ip *);
2589 /* record address family */
39236c6e 2590 pf_mtag->pftag_flags &= ~(PF_TAG_HDR_INET | PF_TAG_HDR_INET6);
316670eb
A
2591 switch (af) {
2592#if INET
2593 case AF_INET:
2594 pf_mtag->pftag_flags |= PF_TAG_HDR_INET;
39236c6e 2595 m0->m_pkthdr.pkt_proto = IPPROTO_ICMP;
316670eb
A
2596 break;
2597#endif /* INET */
2598#if INET6
2599 case AF_INET6:
2600 pf_mtag->pftag_flags |= PF_TAG_HDR_INET6;
39236c6e 2601 m0->m_pkthdr.pkt_proto = IPPROTO_ICMPV6;
316670eb
A
2602 break;
2603#endif /* INET6 */
b0d623f7 2604 }
39236c6e 2605#endif /* PF_ECN */
316670eb 2606
b0d623f7
A
2607 switch (af) {
2608#if INET
2609 case AF_INET:
2610 icmp_error(m0, type, code, 0, 0);
2611 break;
2612#endif /* INET */
2613#if INET6
2614 case AF_INET6:
2615 icmp6_error(m0, type, code, 0);
2616 break;
2617#endif /* INET6 */
2618 }
2619}
2620
2621/*
2622 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2623 * If n is 0, they match if they are equal. If n is != 0, they match if they
2624 * are different.
2625 */
2626int
2627pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2628 struct pf_addr *b, sa_family_t af)
2629{
2630 int match = 0;
2631
2632 switch (af) {
2633#if INET
2634 case AF_INET:
2635 if ((a->addr32[0] & m->addr32[0]) ==
2636 (b->addr32[0] & m->addr32[0]))
2637 match++;
2638 break;
2639#endif /* INET */
2640#if INET6
2641 case AF_INET6:
2642 if (((a->addr32[0] & m->addr32[0]) ==
2643 (b->addr32[0] & m->addr32[0])) &&
2644 ((a->addr32[1] & m->addr32[1]) ==
2645 (b->addr32[1] & m->addr32[1])) &&
2646 ((a->addr32[2] & m->addr32[2]) ==
2647 (b->addr32[2] & m->addr32[2])) &&
2648 ((a->addr32[3] & m->addr32[3]) ==
2649 (b->addr32[3] & m->addr32[3])))
2650 match++;
2651 break;
2652#endif /* INET6 */
2653 }
2654 if (match) {
2655 if (n)
2656 return (0);
2657 else
2658 return (1);
2659 } else {
2660 if (n)
2661 return (1);
2662 else
2663 return (0);
2664 }
2665}
2666
2667/*
2668 * Return 1 if b <= a <= e, otherwise return 0.
2669 */
2670int
2671pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2672 struct pf_addr *a, sa_family_t af)
2673{
2674 switch (af) {
2675#if INET
2676 case AF_INET:
2677 if ((a->addr32[0] < b->addr32[0]) ||
2678 (a->addr32[0] > e->addr32[0]))
2679 return (0);
2680 break;
2681#endif /* INET */
2682#if INET6
2683 case AF_INET6: {
2684 int i;
2685
2686 /* check a >= b */
2687 for (i = 0; i < 4; ++i)
2688 if (a->addr32[i] > b->addr32[i])
2689 break;
2690 else if (a->addr32[i] < b->addr32[i])
2691 return (0);
2692 /* check a <= e */
2693 for (i = 0; i < 4; ++i)
2694 if (a->addr32[i] < e->addr32[i])
2695 break;
2696 else if (a->addr32[i] > e->addr32[i])
2697 return (0);
2698 break;
2699 }
2700#endif /* INET6 */
2701 }
2702 return (1);
2703}
2704
2705int
2706pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2707{
2708 switch (op) {
2709 case PF_OP_IRG:
2710 return ((p > a1) && (p < a2));
2711 case PF_OP_XRG:
2712 return ((p < a1) || (p > a2));
2713 case PF_OP_RRG:
2714 return ((p >= a1) && (p <= a2));
2715 case PF_OP_EQ:
2716 return (p == a1);
2717 case PF_OP_NE:
2718 return (p != a1);
2719 case PF_OP_LT:
2720 return (p < a1);
2721 case PF_OP_LE:
2722 return (p <= a1);
2723 case PF_OP_GT:
2724 return (p > a1);
2725 case PF_OP_GE:
2726 return (p >= a1);
2727 }
2728 return (0); /* never reached */
2729}
2730
2731int
2732pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2733{
2734#if BYTE_ORDER != BIG_ENDIAN
2735 NTOHS(a1);
2736 NTOHS(a2);
2737 NTOHS(p);
2738#endif
2739 return (pf_match(op, a1, a2, p));
2740}
2741
b0d623f7
A
2742int
2743pf_match_xport(u_int8_t proto, u_int8_t proto_variant, union pf_rule_xport *rx,
2744 union pf_state_xport *sx)
2745{
2746 int d = !0;
2747
2748 if (sx) {
2749 switch (proto) {
2750 case IPPROTO_GRE:
2751 if (proto_variant == PF_GRE_PPTP_VARIANT)
2752 d = (rx->call_id == sx->call_id);
2753 break;
2754
2755 case IPPROTO_ESP:
2756 d = (rx->spi == sx->spi);
2757 break;
2758
2759 case IPPROTO_TCP:
2760 case IPPROTO_UDP:
2761 case IPPROTO_ICMP:
2762 case IPPROTO_ICMPV6:
2763 if (rx->range.op)
2764 d = pf_match_port(rx->range.op,
2765 rx->range.port[0], rx->range.port[1],
2766 sx->port);
2767 break;
2768
2769 default:
2770 break;
2771 }
2772 }
2773
2774 return (d);
2775}
b0d623f7
A
2776
2777int
2778pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2779{
2780 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2781 return (0);
2782 return (pf_match(op, a1, a2, u));
2783}
2784
2785int
2786pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2787{
2788 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2789 return (0);
2790 return (pf_match(op, a1, a2, g));
2791}
2792
2793static int
2794pf_match_tag(struct mbuf *m, struct pf_rule *r, struct pf_mtag *pf_mtag,
2795 int *tag)
2796{
2797#pragma unused(m)
2798 if (*tag == -1)
316670eb 2799 *tag = pf_mtag->pftag_tag;
b0d623f7
A
2800
2801 return ((!r->match_tag_not && r->match_tag == *tag) ||
2802 (r->match_tag_not && r->match_tag != *tag));
2803}
2804
2805int
2806pf_tag_packet(struct mbuf *m, struct pf_mtag *pf_mtag, int tag,
316670eb 2807 unsigned int rtableid, struct pf_pdesc *pd)
b0d623f7 2808{
39236c6e
A
2809 if (tag <= 0 && !PF_RTABLEID_IS_VALID(rtableid) &&
2810 (pd == NULL || !(pd->pktflags & PKTF_FLOW_ID)))
b0d623f7
A
2811 return (0);
2812
2813 if (pf_mtag == NULL && (pf_mtag = pf_get_mtag(m)) == NULL)
2814 return (1);
2815
2816 if (tag > 0)
316670eb 2817 pf_mtag->pftag_tag = tag;
b0d623f7 2818 if (PF_RTABLEID_IS_VALID(rtableid))
316670eb 2819 pf_mtag->pftag_rtableid = rtableid;
39236c6e
A
2820 if (pd != NULL && (pd->pktflags & PKTF_FLOW_ID)) {
2821 m->m_pkthdr.pkt_flowsrc = pd->flowsrc;
2822 m->m_pkthdr.pkt_flowid = pd->flowhash;
2823 m->m_pkthdr.pkt_flags |= pd->pktflags;
2824 m->m_pkthdr.pkt_proto = pd->proto;
316670eb 2825 }
b0d623f7
A
2826
2827 return (0);
2828}
2829
13f56ec4 2830void
b0d623f7
A
2831pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n,
2832 struct pf_rule **r, struct pf_rule **a, int *match)
2833{
2834 struct pf_anchor_stackframe *f;
2835
2836 (*r)->anchor->match = 0;
2837 if (match)
2838 *match = 0;
2839 if (*depth >= (int)sizeof (pf_anchor_stack) /
2840 (int)sizeof (pf_anchor_stack[0])) {
2841 printf("pf_step_into_anchor: stack overflow\n");
2842 *r = TAILQ_NEXT(*r, entries);
2843 return;
2844 } else if (*depth == 0 && a != NULL)
2845 *a = *r;
2846 f = pf_anchor_stack + (*depth)++;
2847 f->rs = *rs;
2848 f->r = *r;
2849 if ((*r)->anchor_wildcard) {
2850 f->parent = &(*r)->anchor->children;
2851 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) ==
2852 NULL) {
2853 *r = NULL;
2854 return;
2855 }
2856 *rs = &f->child->ruleset;
2857 } else {
2858 f->parent = NULL;
2859 f->child = NULL;
2860 *rs = &(*r)->anchor->ruleset;
2861 }
2862 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2863}
2864
13f56ec4 2865int
b0d623f7
A
2866pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n,
2867 struct pf_rule **r, struct pf_rule **a, int *match)
2868{
2869 struct pf_anchor_stackframe *f;
2870 int quick = 0;
2871
2872 do {
2873 if (*depth <= 0)
2874 break;
2875 f = pf_anchor_stack + *depth - 1;
2876 if (f->parent != NULL && f->child != NULL) {
2877 if (f->child->match ||
2878 (match != NULL && *match)) {
2879 f->r->anchor->match = 1;
2880 *match = 0;
2881 }
2882 f->child = RB_NEXT(pf_anchor_node, f->parent, f->child);
2883 if (f->child != NULL) {
2884 *rs = &f->child->ruleset;
2885 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2886 if (*r == NULL)
2887 continue;
2888 else
2889 break;
2890 }
2891 }
2892 (*depth)--;
2893 if (*depth == 0 && a != NULL)
2894 *a = NULL;
2895 *rs = f->rs;
2896 if (f->r->anchor->match || (match != NULL && *match))
2897 quick = f->r->quick;
2898 *r = TAILQ_NEXT(f->r, entries);
2899 } while (*r == NULL);
2900
2901 return (quick);
2902}
2903
2904#if INET6
2905void
2906pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2907 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2908{
2909 switch (af) {
2910#if INET
2911 case AF_INET:
2912 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2913 ((rmask->addr32[0] ^ 0xffffffff) & saddr->addr32[0]);
2914 break;
2915#endif /* INET */
2916 case AF_INET6:
2917 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2918 ((rmask->addr32[0] ^ 0xffffffff) & saddr->addr32[0]);
2919 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2920 ((rmask->addr32[1] ^ 0xffffffff) & saddr->addr32[1]);
2921 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2922 ((rmask->addr32[2] ^ 0xffffffff) & saddr->addr32[2]);
2923 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2924 ((rmask->addr32[3] ^ 0xffffffff) & saddr->addr32[3]);
2925 break;
2926 }
2927}
2928
2929void
2930pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2931{
2932 switch (af) {
2933#if INET
2934 case AF_INET:
2935 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2936 break;
2937#endif /* INET */
2938 case AF_INET6:
2939 if (addr->addr32[3] == 0xffffffff) {
2940 addr->addr32[3] = 0;
2941 if (addr->addr32[2] == 0xffffffff) {
2942 addr->addr32[2] = 0;
2943 if (addr->addr32[1] == 0xffffffff) {
2944 addr->addr32[1] = 0;
2945 addr->addr32[0] =
2946 htonl(ntohl(addr->addr32[0]) + 1);
2947 } else
2948 addr->addr32[1] =
2949 htonl(ntohl(addr->addr32[1]) + 1);
2950 } else
2951 addr->addr32[2] =
2952 htonl(ntohl(addr->addr32[2]) + 1);
2953 } else
2954 addr->addr32[3] =
2955 htonl(ntohl(addr->addr32[3]) + 1);
2956 break;
2957 }
2958}
2959#endif /* INET6 */
2960
2961#define mix(a, b, c) \
2962 do { \
2963 a -= b; a -= c; a ^= (c >> 13); \
2964 b -= c; b -= a; b ^= (a << 8); \
2965 c -= a; c -= b; c ^= (b >> 13); \
2966 a -= b; a -= c; a ^= (c >> 12); \
2967 b -= c; b -= a; b ^= (a << 16); \
2968 c -= a; c -= b; c ^= (b >> 5); \
2969 a -= b; a -= c; a ^= (c >> 3); \
2970 b -= c; b -= a; b ^= (a << 10); \
2971 c -= a; c -= b; c ^= (b >> 15); \
2972 } while (0)
2973
2974/*
2975 * hash function based on bridge_hash in if_bridge.c
2976 */
2977static void
2978pf_hash(struct pf_addr *inaddr, struct pf_addr *hash,
2979 struct pf_poolhashkey *key, sa_family_t af)
2980{
2981 u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0];
2982
2983 switch (af) {
2984#if INET
2985 case AF_INET:
2986 a += inaddr->addr32[0];
2987 b += key->key32[1];
2988 mix(a, b, c);
2989 hash->addr32[0] = c + key->key32[2];
2990 break;
2991#endif /* INET */
2992#if INET6
2993 case AF_INET6:
2994 a += inaddr->addr32[0];
2995 b += inaddr->addr32[2];
2996 mix(a, b, c);
2997 hash->addr32[0] = c;
2998 a += inaddr->addr32[1];
2999 b += inaddr->addr32[3];
3000 c += key->key32[1];
3001 mix(a, b, c);
3002 hash->addr32[1] = c;
3003 a += inaddr->addr32[2];
3004 b += inaddr->addr32[1];
3005 c += key->key32[2];
3006 mix(a, b, c);
3007 hash->addr32[2] = c;
3008 a += inaddr->addr32[3];
3009 b += inaddr->addr32[0];
3010 c += key->key32[3];
3011 mix(a, b, c);
3012 hash->addr32[3] = c;
3013 break;
3014#endif /* INET6 */
3015 }
3016}
3017
3018static int
3019pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr,
3020 struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn)
3021{
3022 unsigned char hash[16];
3023 struct pf_pool *rpool = &r->rpool;
3024 struct pf_addr *raddr = &rpool->cur->addr.v.a.addr;
3025 struct pf_addr *rmask = &rpool->cur->addr.v.a.mask;
3026 struct pf_pooladdr *acur = rpool->cur;
3027 struct pf_src_node k;
3028
3029 if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR &&
3030 (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
3031 k.af = af;
3032 PF_ACPY(&k.addr, saddr, af);
3033 if (r->rule_flag & PFRULE_RULESRCTRACK ||
3034 r->rpool.opts & PF_POOL_STICKYADDR)
3035 k.rule.ptr = r;
3036 else
3037 k.rule.ptr = NULL;
3038 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
3039 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
3040 if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, af)) {
3041 PF_ACPY(naddr, &(*sn)->raddr, af);
3042 if (pf_status.debug >= PF_DEBUG_MISC) {
3043 printf("pf_map_addr: src tracking maps ");
3044 pf_print_host(&k.addr, 0, af);
3045 printf(" to ");
3046 pf_print_host(naddr, 0, af);
3047 printf("\n");
3048 }
3049 return (0);
3050 }
3051 }
3052
3053 if (rpool->cur->addr.type == PF_ADDR_NOROUTE)
3054 return (1);
3055 if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
39236c6e
A
3056 if (rpool->cur->addr.p.dyn == NULL)
3057 return (1);
b0d623f7
A
3058 switch (af) {
3059#if INET
3060 case AF_INET:
3061 if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 &&
3062 (rpool->opts & PF_POOL_TYPEMASK) !=
3063 PF_POOL_ROUNDROBIN)
3064 return (1);
3065 raddr = &rpool->cur->addr.p.dyn->pfid_addr4;
3066 rmask = &rpool->cur->addr.p.dyn->pfid_mask4;
3067 break;
3068#endif /* INET */
3069#if INET6
3070 case AF_INET6:
3071 if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 &&
3072 (rpool->opts & PF_POOL_TYPEMASK) !=
3073 PF_POOL_ROUNDROBIN)
3074 return (1);
3075 raddr = &rpool->cur->addr.p.dyn->pfid_addr6;
3076 rmask = &rpool->cur->addr.p.dyn->pfid_mask6;
3077 break;
3078#endif /* INET6 */
3079 }
3080 } else if (rpool->cur->addr.type == PF_ADDR_TABLE) {
3081 if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN)
3082 return (1); /* unsupported */
3083 } else {
3084 raddr = &rpool->cur->addr.v.a.addr;
3085 rmask = &rpool->cur->addr.v.a.mask;
3086 }
3087
3088 switch (rpool->opts & PF_POOL_TYPEMASK) {
3089 case PF_POOL_NONE:
3090 PF_ACPY(naddr, raddr, af);
3091 break;
3092 case PF_POOL_BITMASK:
3093 PF_POOLMASK(naddr, raddr, rmask, saddr, af);
3094 break;
3095 case PF_POOL_RANDOM:
3096 if (init_addr != NULL && PF_AZERO(init_addr, af)) {
3097 switch (af) {
3098#if INET
3099 case AF_INET:
3100 rpool->counter.addr32[0] = htonl(random());
3101 break;
3102#endif /* INET */
3103#if INET6
3104 case AF_INET6:
3105 if (rmask->addr32[3] != 0xffffffff)
3106 rpool->counter.addr32[3] =
39236c6e 3107 RandomULong();
b0d623f7
A
3108 else
3109 break;
3110 if (rmask->addr32[2] != 0xffffffff)
3111 rpool->counter.addr32[2] =
39236c6e 3112 RandomULong();
b0d623f7
A
3113 else
3114 break;
3115 if (rmask->addr32[1] != 0xffffffff)
3116 rpool->counter.addr32[1] =
39236c6e 3117 RandomULong();
b0d623f7
A
3118 else
3119 break;
3120 if (rmask->addr32[0] != 0xffffffff)
3121 rpool->counter.addr32[0] =
39236c6e 3122 RandomULong();
b0d623f7
A
3123 break;
3124#endif /* INET6 */
3125 }
3126 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
3127 PF_ACPY(init_addr, naddr, af);
3128
3129 } else {
3130 PF_AINC(&rpool->counter, af);
3131 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, af);
3132 }
3133 break;
3134 case PF_POOL_SRCHASH:
316670eb
A
3135 pf_hash(saddr, (struct pf_addr *)(void *)&hash,
3136 &rpool->key, af);
3137 PF_POOLMASK(naddr, raddr, rmask,
3138 (struct pf_addr *)(void *)&hash, af);
b0d623f7
A
3139 break;
3140 case PF_POOL_ROUNDROBIN:
3141 if (rpool->cur->addr.type == PF_ADDR_TABLE) {
3142 if (!pfr_pool_get(rpool->cur->addr.p.tbl,
3143 &rpool->tblidx, &rpool->counter,
3144 &raddr, &rmask, af))
3145 goto get_addr;
3146 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
39236c6e
A
3147 if (rpool->cur->addr.p.dyn != NULL &&
3148 !pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
b0d623f7
A
3149 &rpool->tblidx, &rpool->counter,
3150 &raddr, &rmask, af))
3151 goto get_addr;
3152 } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, af))
3153 goto get_addr;
3154
3155 try_next:
3156 if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL)
3157 rpool->cur = TAILQ_FIRST(&rpool->list);
3158 if (rpool->cur->addr.type == PF_ADDR_TABLE) {
3159 rpool->tblidx = -1;
3160 if (pfr_pool_get(rpool->cur->addr.p.tbl,
3161 &rpool->tblidx, &rpool->counter,
3162 &raddr, &rmask, af)) {
3163 /* table contains no address of type 'af' */
3164 if (rpool->cur != acur)
3165 goto try_next;
3166 return (1);
3167 }
3168 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
3169 rpool->tblidx = -1;
39236c6e
A
3170 if (rpool->cur->addr.p.dyn == NULL)
3171 return (1);
b0d623f7
A
3172 if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
3173 &rpool->tblidx, &rpool->counter,
3174 &raddr, &rmask, af)) {
3175 /* table contains no address of type 'af' */
3176 if (rpool->cur != acur)
3177 goto try_next;
3178 return (1);
3179 }
3180 } else {
3181 raddr = &rpool->cur->addr.v.a.addr;
3182 rmask = &rpool->cur->addr.v.a.mask;
3183 PF_ACPY(&rpool->counter, raddr, af);
3184 }
3185
3186 get_addr:
3187 PF_ACPY(naddr, &rpool->counter, af);
3188 if (init_addr != NULL && PF_AZERO(init_addr, af))
3189 PF_ACPY(init_addr, naddr, af);
3190 PF_AINC(&rpool->counter, af);
3191 break;
3192 }
3193 if (*sn != NULL)
3194 PF_ACPY(&(*sn)->raddr, naddr, af);
3195
3196 if (pf_status.debug >= PF_DEBUG_MISC &&
3197 (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
3198 printf("pf_map_addr: selected address ");
3199 pf_print_host(naddr, 0, af);
3200 printf("\n");
3201 }
3202
3203 return (0);
3204}
3205
b0d623f7
A
3206static int
3207pf_get_sport(struct pf_pdesc *pd, struct pfi_kif *kif, struct pf_rule *r,
3208 struct pf_addr *saddr, union pf_state_xport *sxport, struct pf_addr *daddr,
3209 union pf_state_xport *dxport, struct pf_addr *naddr,
3210 union pf_state_xport *nxport, struct pf_src_node **sn)
b0d623f7
A
3211{
3212#pragma unused(kif)
3213 struct pf_state_key_cmp key;
3214 struct pf_addr init_addr;
b0d623f7
A
3215 unsigned int cut;
3216 sa_family_t af = pd->af;
3217 u_int8_t proto = pd->proto;
b7266188
A
3218 unsigned int low = r->rpool.proxy_port[0];
3219 unsigned int high = r->rpool.proxy_port[1];
b0d623f7
A
3220
3221 bzero(&init_addr, sizeof (init_addr));
3222 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
3223 return (1);
3224
3225 if (proto == IPPROTO_ICMP) {
3226 low = 1;
3227 high = 65535;
3228 }
3229
b0d623f7
A
3230 if (!nxport)
3231 return (0); /* No output necessary. */
3232
3233 /*--- Special mapping rules for UDP ---*/
3234 if (proto == IPPROTO_UDP) {
3235
3236 /*--- Never float IKE source port ---*/
b7266188 3237 if (ntohs(sxport->port) == PF_IKE_PORT) {
b0d623f7
A
3238 nxport->port = sxport->port;
3239 return (0);
3240 }
3241
3242 /*--- Apply exterior mapping options ---*/
3243 if (r->extmap > PF_EXTMAP_APD) {
3244 struct pf_state *s;
3245
3246 TAILQ_FOREACH(s, &state_list, entry_list) {
3247 struct pf_state_key *sk = s->state_key;
3248 if (!sk)
3249 continue;
3250 if (s->nat_rule.ptr != r)
3251 continue;
3252 if (sk->proto != IPPROTO_UDP || sk->af != af)
3253 continue;
3254 if (sk->lan.xport.port != sxport->port)
3255 continue;
3256 if (PF_ANEQ(&sk->lan.addr, saddr, af))
3257 continue;
3258 if (r->extmap < PF_EXTMAP_EI &&
3259 PF_ANEQ(&sk->ext.addr, daddr, af))
3260 continue;
3261
3262 nxport->port = sk->gwy.xport.port;
3263 return (0);
3264 }
3265 }
b7266188
A
3266 } else if (proto == IPPROTO_TCP) {
3267 struct pf_state* s;
3268 /*
3269 * APPLE MODIFICATION: <rdar://problem/6546358>
3270 * Fix allows....NAT to use a single binding for TCP session
3271 * with same source IP and source port
3272 */
3273 TAILQ_FOREACH(s, &state_list, entry_list) {
3274 struct pf_state_key* sk = s->state_key;
3275 if (!sk)
3276 continue;
3277 if (s->nat_rule.ptr != r)
3278 continue;
3279 if (sk->proto != IPPROTO_TCP || sk->af != af)
3280 continue;
3281 if (sk->lan.xport.port != sxport->port)
3282 continue;
3283 if (!(PF_AEQ(&sk->lan.addr, saddr, af)))
3284 continue;
3285 nxport->port = sk->gwy.xport.port;
3286 return (0);
3287 }
b0d623f7 3288 }
b0d623f7
A
3289 do {
3290 key.af = af;
3291 key.proto = proto;
3292 PF_ACPY(&key.ext.addr, daddr, key.af);
3293 PF_ACPY(&key.gwy.addr, naddr, key.af);
b0d623f7
A
3294 switch (proto) {
3295 case IPPROTO_UDP:
3296 key.proto_variant = r->extfilter;
3297 break;
3298 default:
3299 key.proto_variant = 0;
3300 break;
3301 }
3302 if (dxport)
3303 key.ext.xport = *dxport;
3304 else
3305 memset(&key.ext.xport, 0, sizeof (key.ext.xport));
b0d623f7
A
3306 /*
3307 * port search; start random, step;
3308 * similar 2 portloop in in_pcbbind
3309 */
3310 if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP ||
3311 proto == IPPROTO_ICMP)) {
b0d623f7
A
3312 if (dxport)
3313 key.gwy.xport = *dxport;
3314 else
3315 memset(&key.gwy.xport, 0,
3316 sizeof (key.ext.xport));
b0d623f7
A
3317 if (pf_find_state_all(&key, PF_IN, NULL) == NULL)
3318 return (0);
3319 } else if (low == 0 && high == 0) {
b0d623f7 3320 key.gwy.xport = *nxport;
b0d623f7
A
3321 if (pf_find_state_all(&key, PF_IN, NULL) == NULL)
3322 return (0);
3323 } else if (low == high) {
b0d623f7
A
3324 key.gwy.xport.port = htons(low);
3325 if (pf_find_state_all(&key, PF_IN, NULL) == NULL) {
3326 nxport->port = htons(low);
3327 return (0);
3328 }
b0d623f7 3329 } else {
b0d623f7 3330 unsigned int tmp;
b0d623f7
A
3331 if (low > high) {
3332 tmp = low;
3333 low = high;
3334 high = tmp;
3335 }
3336 /* low < high */
3337 cut = htonl(random()) % (1 + high - low) + low;
3338 /* low <= cut <= high */
3339 for (tmp = cut; tmp <= high; ++(tmp)) {
b0d623f7
A
3340 key.gwy.xport.port = htons(tmp);
3341 if (pf_find_state_all(&key, PF_IN, NULL) ==
3342 NULL) {
3343 nxport->port = htons(tmp);
3344 return (0);
3345 }
b0d623f7
A
3346 }
3347 for (tmp = cut - 1; tmp >= low; --(tmp)) {
b0d623f7
A
3348 key.gwy.xport.port = htons(tmp);
3349 if (pf_find_state_all(&key, PF_IN, NULL) ==
3350 NULL) {
3351 nxport->port = htons(tmp);
3352 return (0);
3353 }
b0d623f7
A
3354 }
3355 }
3356
3357 switch (r->rpool.opts & PF_POOL_TYPEMASK) {
3358 case PF_POOL_RANDOM:
3359 case PF_POOL_ROUNDROBIN:
3360 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
3361 return (1);
3362 break;
3363 case PF_POOL_NONE:
3364 case PF_POOL_SRCHASH:
3365 case PF_POOL_BITMASK:
3366 default:
3367 return (1);
3368 }
3369 } while (!PF_AEQ(&init_addr, naddr, af));
3370
3371 return (1); /* none available */
3372}
3373
b0d623f7
A
3374static struct pf_rule *
3375pf_match_translation(struct pf_pdesc *pd, struct mbuf *m, int off,
3376 int direction, struct pfi_kif *kif, struct pf_addr *saddr,
3377 union pf_state_xport *sxport, struct pf_addr *daddr,
3378 union pf_state_xport *dxport, int rs_num)
b0d623f7
A
3379{
3380 struct pf_rule *r, *rm = NULL;
3381 struct pf_ruleset *ruleset = NULL;
3382 int tag = -1;
3383 unsigned int rtableid = IFSCOPE_NONE;
3384 int asd = 0;
3385
3386 r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr);
3387 while (r && rm == NULL) {
3388 struct pf_rule_addr *src = NULL, *dst = NULL;
3389 struct pf_addr_wrap *xdst = NULL;
b0d623f7 3390 struct pf_addr_wrap *xsrc = NULL;
d1ecb069 3391 union pf_rule_xport rdrxport;
b0d623f7
A
3392
3393 if (r->action == PF_BINAT && direction == PF_IN) {
3394 src = &r->dst;
3395 if (r->rpool.cur != NULL)
3396 xdst = &r->rpool.cur->addr;
b0d623f7
A
3397 } else if (r->action == PF_RDR && direction == PF_OUT) {
3398 dst = &r->src;
3399 src = &r->dst;
d1ecb069
A
3400 if (r->rpool.cur != NULL) {
3401 rdrxport.range.op = PF_OP_EQ;
3402 rdrxport.range.port[0] =
3403 htons(r->rpool.proxy_port[0]);
b0d623f7 3404 xsrc = &r->rpool.cur->addr;
d1ecb069 3405 }
b0d623f7
A
3406 } else {
3407 src = &r->src;
3408 dst = &r->dst;
3409 }
3410
3411 r->evaluations++;
3412 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3413 r = r->skip[PF_SKIP_IFP].ptr;
3414 else if (r->direction && r->direction != direction)
3415 r = r->skip[PF_SKIP_DIR].ptr;
3416 else if (r->af && r->af != pd->af)
3417 r = r->skip[PF_SKIP_AF].ptr;
3418 else if (r->proto && r->proto != pd->proto)
3419 r = r->skip[PF_SKIP_PROTO].ptr;
b0d623f7
A
3420 else if (xsrc && PF_MISMATCHAW(xsrc, saddr, pd->af, 0, NULL))
3421 r = TAILQ_NEXT(r, entries);
3422 else if (!xsrc && PF_MISMATCHAW(&src->addr, saddr, pd->af,
3423 src->neg, kif))
d1ecb069
A
3424 r = TAILQ_NEXT(r, entries);
3425 else if (xsrc && (!rdrxport.range.port[0] ||
3426 !pf_match_xport(r->proto, r->proto_variant, &rdrxport,
3427 sxport)))
3428 r = TAILQ_NEXT(r, entries);
3429 else if (!xsrc && !pf_match_xport(r->proto,
b7266188 3430 r->proto_variant, &src->xport, sxport))
b0d623f7
A
3431 r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT :
3432 PF_SKIP_DST_PORT].ptr;
3433 else if (dst != NULL &&
3434 PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL))
3435 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3436 else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af,
3437 0, NULL))
3438 r = TAILQ_NEXT(r, entries);
b0d623f7
A
3439 else if (dst && !pf_match_xport(r->proto, r->proto_variant,
3440 &dst->xport, dxport))
b0d623f7
A
3441 r = r->skip[PF_SKIP_DST_PORT].ptr;
3442 else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
3443 r = TAILQ_NEXT(r, entries);
3444 else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto !=
3445 IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, m,
3446 off, pd->hdr.tcp), r->os_fingerprint)))
3447 r = TAILQ_NEXT(r, entries);
3448 else {
3449 if (r->tag)
3450 tag = r->tag;
3451 if (PF_RTABLEID_IS_VALID(r->rtableid))
3452 rtableid = r->rtableid;
3453 if (r->anchor == NULL) {
3454 rm = r;
3455 } else
3456 pf_step_into_anchor(&asd, &ruleset, rs_num,
3457 &r, NULL, NULL);
3458 }
3459 if (r == NULL)
3460 pf_step_out_of_anchor(&asd, &ruleset, rs_num, &r,
3461 NULL, NULL);
3462 }
316670eb 3463 if (pf_tag_packet(m, pd->pf_mtag, tag, rtableid, NULL))
b0d623f7
A
3464 return (NULL);
3465 if (rm != NULL && (rm->action == PF_NONAT ||
3466 rm->action == PF_NORDR || rm->action == PF_NOBINAT))
3467 return (NULL);
3468 return (rm);
3469}
3470
b0d623f7
A
3471static struct pf_rule *
3472pf_get_translation_aux(struct pf_pdesc *pd, struct mbuf *m, int off,
3473 int direction, struct pfi_kif *kif, struct pf_src_node **sn,
3474 struct pf_addr *saddr, union pf_state_xport *sxport, struct pf_addr *daddr,
3475 union pf_state_xport *dxport, struct pf_addr *naddr,
3476 union pf_state_xport *nxport)
b0d623f7
A
3477{
3478 struct pf_rule *r = NULL;
3479
b0d623f7
A
3480 if (direction == PF_OUT) {
3481 r = pf_match_translation(pd, m, off, direction, kif, saddr,
3482 sxport, daddr, dxport, PF_RULESET_BINAT);
3483 if (r == NULL)
3484 r = pf_match_translation(pd, m, off, direction, kif,
3485 saddr, sxport, daddr, dxport, PF_RULESET_RDR);
3486 if (r == NULL)
3487 r = pf_match_translation(pd, m, off, direction, kif,
3488 saddr, sxport, daddr, dxport, PF_RULESET_NAT);
3489 } else {
3490 r = pf_match_translation(pd, m, off, direction, kif, saddr,
3491 sxport, daddr, dxport, PF_RULESET_RDR);
3492 if (r == NULL)
3493 r = pf_match_translation(pd, m, off, direction, kif,
3494 saddr, sxport, daddr, dxport, PF_RULESET_BINAT);
3495 }
b0d623f7
A
3496
3497 if (r != NULL) {
3498 switch (r->action) {
3499 case PF_NONAT:
3500 case PF_NOBINAT:
3501 case PF_NORDR:
3502 return (NULL);
3503 case PF_NAT:
b0d623f7
A
3504 if (pf_get_sport(pd, kif, r, saddr, sxport, daddr,
3505 dxport, naddr, nxport, sn)) {
b0d623f7
A
3506 DPFPRINTF(PF_DEBUG_MISC,
3507 ("pf: NAT proxy port allocation "
3508 "(%u-%u) failed\n",
3509 r->rpool.proxy_port[0],
3510 r->rpool.proxy_port[1]));
3511 return (NULL);
3512 }
3513 break;
3514 case PF_BINAT:
3515 switch (direction) {
3516 case PF_OUT:
3517 if (r->rpool.cur->addr.type ==
3518 PF_ADDR_DYNIFTL) {
39236c6e
A
3519 if (r->rpool.cur->addr.p.dyn == NULL)
3520 return (NULL);
b0d623f7
A
3521 switch (pd->af) {
3522#if INET
3523 case AF_INET:
3524 if (r->rpool.cur->addr.p.dyn->
3525 pfid_acnt4 < 1)
3526 return (NULL);
3527 PF_POOLMASK(naddr,
3528 &r->rpool.cur->addr.p.dyn->
3529 pfid_addr4,
3530 &r->rpool.cur->addr.p.dyn->
3531 pfid_mask4,
3532 saddr, AF_INET);
3533 break;
3534#endif /* INET */
3535#if INET6
3536 case AF_INET6:
3537 if (r->rpool.cur->addr.p.dyn->
3538 pfid_acnt6 < 1)
3539 return (NULL);
3540 PF_POOLMASK(naddr,
3541 &r->rpool.cur->addr.p.dyn->
3542 pfid_addr6,
3543 &r->rpool.cur->addr.p.dyn->
3544 pfid_mask6,
3545 saddr, AF_INET6);
3546 break;
3547#endif /* INET6 */
3548 }
3549 } else {
3550 PF_POOLMASK(naddr,
3551 &r->rpool.cur->addr.v.a.addr,
3552 &r->rpool.cur->addr.v.a.mask,
3553 saddr, pd->af);
3554 }
3555 break;
3556 case PF_IN:
3557 if (r->src.addr.type == PF_ADDR_DYNIFTL) {
39236c6e
A
3558 if (r->src.addr.p.dyn == NULL)
3559 return (NULL);
b0d623f7
A
3560 switch (pd->af) {
3561#if INET
3562 case AF_INET:
3563 if (r->src.addr.p.dyn->
3564 pfid_acnt4 < 1)
3565 return (NULL);
3566 PF_POOLMASK(naddr,
3567 &r->src.addr.p.dyn->
3568 pfid_addr4,
3569 &r->src.addr.p.dyn->
3570 pfid_mask4,
3571 daddr, AF_INET);
3572 break;
3573#endif /* INET */
3574#if INET6
3575 case AF_INET6:
3576 if (r->src.addr.p.dyn->
3577 pfid_acnt6 < 1)
3578 return (NULL);
3579 PF_POOLMASK(naddr,
3580 &r->src.addr.p.dyn->
3581 pfid_addr6,
3582 &r->src.addr.p.dyn->
3583 pfid_mask6,
3584 daddr, AF_INET6);
3585 break;
3586#endif /* INET6 */
3587 }
3588 } else
3589 PF_POOLMASK(naddr,
3590 &r->src.addr.v.a.addr,
3591 &r->src.addr.v.a.mask, daddr,
3592 pd->af);
3593 break;
3594 }
3595 break;
3596 case PF_RDR: {
b0d623f7
A
3597 switch (direction) {
3598 case PF_OUT:
3599 if (r->dst.addr.type == PF_ADDR_DYNIFTL) {
39236c6e
A
3600 if (r->dst.addr.p.dyn == NULL)
3601 return (NULL);
b0d623f7
A
3602 switch (pd->af) {
3603#if INET
3604 case AF_INET:
3605 if (r->dst.addr.p.dyn->
3606 pfid_acnt4 < 1)
3607 return (NULL);
3608 PF_POOLMASK(naddr,
3609 &r->dst.addr.p.dyn->
3610 pfid_addr4,
3611 &r->dst.addr.p.dyn->
3612 pfid_mask4,
3613 daddr, AF_INET);
3614 break;
3615#endif /* INET */
3616#if INET6
3617 case AF_INET6:
3618 if (r->dst.addr.p.dyn->
3619 pfid_acnt6 < 1)
3620 return (NULL);
3621 PF_POOLMASK(naddr,
3622 &r->dst.addr.p.dyn->
3623 pfid_addr6,
3624 &r->dst.addr.p.dyn->
3625 pfid_mask6,
3626 daddr, AF_INET6);
3627 break;
3628#endif /* INET6 */
3629 }
3630 } else {
3631 PF_POOLMASK(naddr,
3632 &r->dst.addr.v.a.addr,
3633 &r->dst.addr.v.a.mask,
3634 daddr, pd->af);
3635 }
d1ecb069
A
3636 if (nxport && r->dst.xport.range.port[0])
3637 nxport->port =
3638 r->dst.xport.range.port[0];
b0d623f7
A
3639 break;
3640 case PF_IN:
3641 if (pf_map_addr(pd->af, r, saddr,
3642 naddr, NULL, sn))
3643 return (NULL);
3644 if ((r->rpool.opts & PF_POOL_TYPEMASK) ==
3645 PF_POOL_BITMASK)
3646 PF_POOLMASK(naddr, naddr,
3647 &r->rpool.cur->addr.v.a.mask, daddr,
3648 pd->af);
3649
3650 if (nxport && dxport) {
3651 if (r->rpool.proxy_port[1]) {
3652 u_int32_t tmp_nport;
3653
3654 tmp_nport =
3655 ((ntohs(dxport->port) -
3656 ntohs(r->dst.xport.range.
3657 port[0])) %
3658 (r->rpool.proxy_port[1] -
3659 r->rpool.proxy_port[0] +
3660 1)) + r->rpool.proxy_port[0];
3661
3662 /* wrap around if necessary */
3663 if (tmp_nport > 65535)
3664 tmp_nport -= 65535;
3665 nxport->port =
3666 htons((u_int16_t)tmp_nport);
3667 } else if (r->rpool.proxy_port[0]) {
3668 nxport->port = htons(r->rpool.
3669 proxy_port[0]);
3670 }
3671 }
3672 break;
3673 }
b0d623f7
A
3674 break;
3675 }
3676 default:
3677 return (NULL);
3678 }
3679 }
3680
3681 return (r);
3682}
3683
3684int
3685pf_socket_lookup(int direction, struct pf_pdesc *pd)
3686{
3687 struct pf_addr *saddr, *daddr;
3688 u_int16_t sport, dport;
6d2010ae
A
3689 struct inpcbinfo *pi;
3690 int inp = 0;
b0d623f7
A
3691
3692 if (pd == NULL)
3693 return (-1);
3694 pd->lookup.uid = UID_MAX;
3695 pd->lookup.gid = GID_MAX;
3696 pd->lookup.pid = NO_PID;
3697
3698 switch (pd->proto) {
3699 case IPPROTO_TCP:
3700 if (pd->hdr.tcp == NULL)
3701 return (-1);
3702 sport = pd->hdr.tcp->th_sport;
3703 dport = pd->hdr.tcp->th_dport;
3704 pi = &tcbinfo;
3705 break;
3706 case IPPROTO_UDP:
3707 if (pd->hdr.udp == NULL)
3708 return (-1);
3709 sport = pd->hdr.udp->uh_sport;
3710 dport = pd->hdr.udp->uh_dport;
3711 pi = &udbinfo;
3712 break;
3713 default:
3714 return (-1);
3715 }
3716 if (direction == PF_IN) {
3717 saddr = pd->src;
3718 daddr = pd->dst;
3719 } else {
3720 u_int16_t p;
3721
3722 p = sport;
3723 sport = dport;
3724 dport = p;
3725 saddr = pd->dst;
3726 daddr = pd->src;
3727 }
3728 switch (pd->af) {
3729#if INET
3730 case AF_INET:
6d2010ae
A
3731 inp = in_pcblookup_hash_exists(pi, saddr->v4, sport, daddr->v4, dport,
3732 0, &pd->lookup.uid, &pd->lookup.gid, NULL);
b7266188 3733#if INET6
6d2010ae 3734 if (inp == 0) {
b7266188
A
3735 struct in6_addr s6, d6;
3736
3737 memset(&s6, 0, sizeof (s6));
3738 s6.s6_addr16[5] = htons(0xffff);
3739 memcpy(&s6.s6_addr32[3], &saddr->v4,
3740 sizeof (saddr->v4));
3741
3742 memset(&d6, 0, sizeof (d6));
3743 d6.s6_addr16[5] = htons(0xffff);
3744 memcpy(&d6.s6_addr32[3], &daddr->v4,
3745 sizeof (daddr->v4));
3746
6d2010ae
A
3747 inp = in6_pcblookup_hash_exists(pi, &s6, sport,
3748 &d6, dport, 0, &pd->lookup.uid, &pd->lookup.gid, NULL);
3749 if (inp == 0) {
3750 inp = in_pcblookup_hash_exists(pi, saddr->v4, sport,
3751 daddr->v4, dport, INPLOOKUP_WILDCARD, &pd->lookup.uid, &pd->lookup.gid, NULL);
3752 if (inp == 0) {
3753 inp = in6_pcblookup_hash_exists(pi, &s6, sport,
b7266188 3754 &d6, dport, INPLOOKUP_WILDCARD,
6d2010ae
A
3755 &pd->lookup.uid, &pd->lookup.gid, NULL);
3756 if (inp == 0)
b7266188
A
3757 return (-1);
3758 }
3759 }
3760 }
3761#else
6d2010ae
A
3762 if (inp == 0) {
3763 inp = in_pcblookup_hash_exists(pi, saddr->v4, sport,
3764 daddr->v4, dport, INPLOOKUP_WILDCARD,
3765 &pd->lookup.uid, &pd->lookup.gid, NULL);
3766 if (inp == 0)
b0d623f7
A
3767 return (-1);
3768 }
b7266188 3769#endif /* !INET6 */
b0d623f7
A
3770 break;
3771#endif /* INET */
3772#if INET6
3773 case AF_INET6:
6d2010ae
A
3774 inp = in6_pcblookup_hash_exists(pi, &saddr->v6, sport, &daddr->v6,
3775 dport, 0, &pd->lookup.uid, &pd->lookup.gid, NULL);
3776 if (inp == 0) {
3777 inp = in6_pcblookup_hash_exists(pi, &saddr->v6, sport,
3778 &daddr->v6, dport, INPLOOKUP_WILDCARD,
3779 &pd->lookup.uid, &pd->lookup.gid, NULL);
3780 if (inp == 0)
b0d623f7
A
3781 return (-1);
3782 }
3783 break;
3784#endif /* INET6 */
6d2010ae 3785
b0d623f7
A
3786 default:
3787 return (-1);
3788 }
3789
b0d623f7
A
3790 return (1);
3791}
3792
3793static u_int8_t
3794pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3795{
3796 int hlen;
3797 u_int8_t hdr[60];
3798 u_int8_t *opt, optlen;
3799 u_int8_t wscale = 0;
3800
3801 hlen = th_off << 2; /* hlen <= sizeof (hdr) */
3802 if (hlen <= (int)sizeof (struct tcphdr))
3803 return (0);
3804 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3805 return (0);
3806 opt = hdr + sizeof (struct tcphdr);
3807 hlen -= sizeof (struct tcphdr);
3808 while (hlen >= 3) {
3809 switch (*opt) {
3810 case TCPOPT_EOL:
3811 case TCPOPT_NOP:
3812 ++opt;
3813 --hlen;
3814 break;
3815 case TCPOPT_WINDOW:
3816 wscale = opt[2];
3817 if (wscale > TCP_MAX_WINSHIFT)
3818 wscale = TCP_MAX_WINSHIFT;
3819 wscale |= PF_WSCALE_FLAG;
3820 /* FALLTHROUGH */
3821 default:
3822 optlen = opt[1];
3823 if (optlen < 2)
3824 optlen = 2;
3825 hlen -= optlen;
3826 opt += optlen;
3827 break;
3828 }
3829 }
3830 return (wscale);
3831}
3832
3833static u_int16_t
3834pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3835{
3836 int hlen;
3837 u_int8_t hdr[60];
3838 u_int8_t *opt, optlen;
3839 u_int16_t mss = tcp_mssdflt;
3840
3841 hlen = th_off << 2; /* hlen <= sizeof (hdr) */
3842 if (hlen <= (int)sizeof (struct tcphdr))
3843 return (0);
3844 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3845 return (0);
3846 opt = hdr + sizeof (struct tcphdr);
3847 hlen -= sizeof (struct tcphdr);
3848 while (hlen >= TCPOLEN_MAXSEG) {
3849 switch (*opt) {
3850 case TCPOPT_EOL:
3851 case TCPOPT_NOP:
3852 ++opt;
3853 --hlen;
3854 break;
3855 case TCPOPT_MAXSEG:
3856 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
3857#if BYTE_ORDER != BIG_ENDIAN
3858 NTOHS(mss);
3859#endif
3860 /* FALLTHROUGH */
3861 default:
3862 optlen = opt[1];
3863 if (optlen < 2)
3864 optlen = 2;
3865 hlen -= optlen;
3866 opt += optlen;
3867 break;
3868 }
3869 }
3870 return (mss);
3871}
3872
3873static u_int16_t
3874pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer)
3875{
3876#if INET
3877 struct sockaddr_in *dst;
3878 struct route ro;
3879#endif /* INET */
3880#if INET6
3881 struct sockaddr_in6 *dst6;
3882 struct route_in6 ro6;
3883#endif /* INET6 */
3884 struct rtentry *rt = NULL;
3885 int hlen;
3886 u_int16_t mss = tcp_mssdflt;
3887
3888 switch (af) {
3889#if INET
3890 case AF_INET:
3891 hlen = sizeof (struct ip);
3892 bzero(&ro, sizeof (ro));
316670eb 3893 dst = (struct sockaddr_in *)(void *)&ro.ro_dst;
b0d623f7
A
3894 dst->sin_family = AF_INET;
3895 dst->sin_len = sizeof (*dst);
3896 dst->sin_addr = addr->v4;
3897 rtalloc(&ro);
3898 rt = ro.ro_rt;
3899 break;
3900#endif /* INET */
3901#if INET6
3902 case AF_INET6:
3903 hlen = sizeof (struct ip6_hdr);
3904 bzero(&ro6, sizeof (ro6));
316670eb 3905 dst6 = (struct sockaddr_in6 *)(void *)&ro6.ro_dst;
b0d623f7
A
3906 dst6->sin6_family = AF_INET6;
3907 dst6->sin6_len = sizeof (*dst6);
3908 dst6->sin6_addr = addr->v6;
3909 rtalloc((struct route *)&ro);
3910 rt = ro6.ro_rt;
3911 break;
3912#endif /* INET6 */
3913 default:
3914 panic("pf_calc_mss: not AF_INET or AF_INET6!");
3915 return (0);
3916 }
3917
3918 if (rt && rt->rt_ifp) {
3919 mss = rt->rt_ifp->if_mtu - hlen - sizeof (struct tcphdr);
3920 mss = max(tcp_mssdflt, mss);
39236c6e 3921 rtfree(rt);
b0d623f7
A
3922 }
3923 mss = min(mss, offer);
3924 mss = max(mss, 64); /* sanity - at least max opt space */
3925 return (mss);
3926}
3927
3928static void
3929pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr)
3930{
3931 struct pf_rule *r = s->rule.ptr;
3932
3933 s->rt_kif = NULL;
3934 if (!r->rt || r->rt == PF_FASTROUTE)
3935 return;
3936 switch (s->state_key->af) {
3937#if INET
3938 case AF_INET:
3939 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL,
3940 &s->nat_src_node);
3941 s->rt_kif = r->rpool.cur->kif;
3942 break;
3943#endif /* INET */
3944#if INET6
3945 case AF_INET6:
3946 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL,
3947 &s->nat_src_node);
3948 s->rt_kif = r->rpool.cur->kif;
3949 break;
3950#endif /* INET6 */
3951 }
3952}
3953
3954static void
3955pf_attach_state(struct pf_state_key *sk, struct pf_state *s, int tail)
3956{
3957 s->state_key = sk;
3958 sk->refcnt++;
3959
3960 /* list is sorted, if-bound states before floating */
3961 if (tail)
3962 TAILQ_INSERT_TAIL(&sk->states, s, next);
3963 else
3964 TAILQ_INSERT_HEAD(&sk->states, s, next);
3965}
3966
3967static void
3968pf_detach_state(struct pf_state *s, int flags)
3969{
3970 struct pf_state_key *sk = s->state_key;
3971
3972 if (sk == NULL)
3973 return;
3974
3975 s->state_key = NULL;
3976 TAILQ_REMOVE(&sk->states, s, next);
3977 if (--sk->refcnt == 0) {
3978 if (!(flags & PF_DT_SKIP_EXTGWY))
3979 RB_REMOVE(pf_state_tree_ext_gwy,
3980 &pf_statetbl_ext_gwy, sk);
3981 if (!(flags & PF_DT_SKIP_LANEXT))
3982 RB_REMOVE(pf_state_tree_lan_ext,
3983 &pf_statetbl_lan_ext, sk);
b0d623f7
A
3984 if (sk->app_state)
3985 pool_put(&pf_app_state_pl, sk->app_state);
b0d623f7
A
3986 pool_put(&pf_state_key_pl, sk);
3987 }
3988}
3989
3990struct pf_state_key *
316670eb 3991pf_alloc_state_key(struct pf_state *s, struct pf_state_key *psk)
b0d623f7
A
3992{
3993 struct pf_state_key *sk;
3994
3995 if ((sk = pool_get(&pf_state_key_pl, PR_WAITOK)) == NULL)
3996 return (NULL);
3997 bzero(sk, sizeof (*sk));
3998 TAILQ_INIT(&sk->states);
3999 pf_attach_state(sk, s, 0);
4000
316670eb
A
4001 /* initialize state key from psk, if provided */
4002 if (psk != NULL) {
4003 bcopy(&psk->lan, &sk->lan, sizeof (sk->lan));
4004 bcopy(&psk->gwy, &sk->gwy, sizeof (sk->gwy));
4005 bcopy(&psk->ext, &sk->ext, sizeof (sk->ext));
4006 sk->af = psk->af;
4007 sk->proto = psk->proto;
4008 sk->direction = psk->direction;
4009 sk->proto_variant = psk->proto_variant;
4010 VERIFY(psk->app_state == NULL);
39236c6e 4011 sk->flowsrc = psk->flowsrc;
316670eb
A
4012 sk->flowhash = psk->flowhash;
4013 /* don't touch tree entries, states and refcnt on sk */
4014 }
4015
b0d623f7
A
4016 return (sk);
4017}
4018
4019static u_int32_t
4020pf_tcp_iss(struct pf_pdesc *pd)
4021{
4022 MD5_CTX ctx;
4023 u_int32_t digest[4];
4024
4025 if (pf_tcp_secret_init == 0) {
4026 read_random(pf_tcp_secret, sizeof (pf_tcp_secret));
4027 MD5Init(&pf_tcp_secret_ctx);
4028 MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret,
4029 sizeof (pf_tcp_secret));
4030 pf_tcp_secret_init = 1;
4031 }
4032 ctx = pf_tcp_secret_ctx;
4033
4034 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof (u_short));
4035 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof (u_short));
4036 if (pd->af == AF_INET6) {
4037 MD5Update(&ctx, (char *)&pd->src->v6, sizeof (struct in6_addr));
4038 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof (struct in6_addr));
4039 } else {
4040 MD5Update(&ctx, (char *)&pd->src->v4, sizeof (struct in_addr));
4041 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof (struct in_addr));
4042 }
4043 MD5Final((u_char *)digest, &ctx);
4044 pf_tcp_iss_off += 4096;
4045 return (digest[0] + random() + pf_tcp_iss_off);
4046}
4047
4048static int
4049pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
4050 struct pfi_kif *kif, struct mbuf *m, int off, void *h,
4051 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
4052 struct ifqueue *ifq)
4053{
4054#pragma unused(h)
4055 struct pf_rule *nr = NULL;
4056 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
b0d623f7
A
4057 sa_family_t af = pd->af;
4058 struct pf_rule *r, *a = NULL;
4059 struct pf_ruleset *ruleset = NULL;
4060 struct pf_src_node *nsn = NULL;
4061 struct tcphdr *th = pd->hdr.tcp;
4062 u_short reason;
4063 int rewrite = 0, hdrlen = 0;
4064 int tag = -1;
4065 unsigned int rtableid = IFSCOPE_NONE;
4066 int asd = 0;
4067 int match = 0;
4068 int state_icmp = 0;
4069 u_int16_t mss = tcp_mssdflt;
b0d623f7
A
4070 u_int8_t icmptype = 0, icmpcode = 0;
4071
b0d623f7
A
4072 struct pf_grev1_hdr *grev1 = pd->hdr.grev1;
4073 union pf_state_xport bxport, nxport, sxport, dxport;
316670eb 4074 struct pf_state_key psk;
b0d623f7
A
4075
4076 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
4077
4078 if (direction == PF_IN && pf_check_congestion(ifq)) {
4079 REASON_SET(&reason, PFRES_CONGEST);
4080 return (PF_DROP);
4081 }
4082
b0d623f7
A
4083 hdrlen = 0;
4084 sxport.spi = 0;
4085 dxport.spi = 0;
4086 nxport.spi = 0;
b0d623f7
A
4087
4088 switch (pd->proto) {
4089 case IPPROTO_TCP:
b0d623f7
A
4090 sxport.port = th->th_sport;
4091 dxport.port = th->th_dport;
b0d623f7
A
4092 hdrlen = sizeof (*th);
4093 break;
4094 case IPPROTO_UDP:
b0d623f7
A
4095 sxport.port = pd->hdr.udp->uh_sport;
4096 dxport.port = pd->hdr.udp->uh_dport;
b0d623f7
A
4097 hdrlen = sizeof (*pd->hdr.udp);
4098 break;
4099#if INET
4100 case IPPROTO_ICMP:
4101 if (pd->af != AF_INET)
4102 break;
b0d623f7
A
4103 sxport.port = dxport.port = pd->hdr.icmp->icmp_id;
4104 hdrlen = ICMP_MINLEN;
b0d623f7
A
4105 icmptype = pd->hdr.icmp->icmp_type;
4106 icmpcode = pd->hdr.icmp->icmp_code;
4107
4108 if (icmptype == ICMP_UNREACH ||
4109 icmptype == ICMP_SOURCEQUENCH ||
4110 icmptype == ICMP_REDIRECT ||
4111 icmptype == ICMP_TIMXCEED ||
4112 icmptype == ICMP_PARAMPROB)
4113 state_icmp++;
4114 break;
4115#endif /* INET */
4116#if INET6
4117 case IPPROTO_ICMPV6:
4118 if (pd->af != AF_INET6)
4119 break;
b0d623f7 4120 sxport.port = dxport.port = pd->hdr.icmp6->icmp6_id;
b0d623f7
A
4121 hdrlen = sizeof (*pd->hdr.icmp6);
4122 icmptype = pd->hdr.icmp6->icmp6_type;
4123 icmpcode = pd->hdr.icmp6->icmp6_code;
4124
4125 if (icmptype == ICMP6_DST_UNREACH ||
4126 icmptype == ICMP6_PACKET_TOO_BIG ||
4127 icmptype == ICMP6_TIME_EXCEEDED ||
4128 icmptype == ICMP6_PARAM_PROB)
4129 state_icmp++;
4130 break;
4131#endif /* INET6 */
b0d623f7
A
4132 case IPPROTO_GRE:
4133 if (pd->proto_variant == PF_GRE_PPTP_VARIANT) {
4134 sxport.call_id = dxport.call_id =
4135 pd->hdr.grev1->call_id;
4136 hdrlen = sizeof (*pd->hdr.grev1);
4137 }
4138 break;
4139 case IPPROTO_ESP:
4140 sxport.spi = 0;
4141 dxport.spi = pd->hdr.esp->spi;
4142 hdrlen = sizeof (*pd->hdr.esp);
4143 break;
b0d623f7
A
4144 }
4145
4146 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
4147
4148 if (direction == PF_OUT) {
b0d623f7
A
4149 bxport = nxport = sxport;
4150 /* check outgoing packet for BINAT/NAT */
4151 if ((nr = pf_get_translation_aux(pd, m, off, PF_OUT, kif, &nsn,
4152 saddr, &sxport, daddr, &dxport, &pd->naddr, &nxport)) !=
4153 NULL) {
b0d623f7
A
4154 PF_ACPY(&pd->baddr, saddr, af);
4155 switch (pd->proto) {
4156 case IPPROTO_TCP:
b0d623f7
A
4157 pf_change_ap(direction, pd->mp, saddr,
4158 &th->th_sport, pd->ip_sum, &th->th_sum,
4159 &pd->naddr, nxport.port, 0, af);
4160 sxport.port = th->th_sport;
b0d623f7
A
4161 rewrite++;
4162 break;
4163 case IPPROTO_UDP:
b0d623f7
A
4164 pf_change_ap(direction, pd->mp, saddr,
4165 &pd->hdr.udp->uh_sport, pd->ip_sum,
4166 &pd->hdr.udp->uh_sum, &pd->naddr,
4167 nxport.port, 1, af);
4168 sxport.port = pd->hdr.udp->uh_sport;
b0d623f7
A
4169 rewrite++;
4170 break;
4171#if INET
4172 case IPPROTO_ICMP:
316670eb
A
4173 if (pd->af == AF_INET) {
4174 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
4175 pd->naddr.v4.s_addr, 0);
4176 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
4177 pd->hdr.icmp->icmp_cksum, sxport.port,
4178 nxport.port, 0);
4179 pd->hdr.icmp->icmp_id = nxport.port;
4180 ++rewrite;
4181 }
b0d623f7
A
4182 break;
4183#endif /* INET */
4184#if INET6
4185 case IPPROTO_ICMPV6:
316670eb
A
4186 if (pd->af == AF_INET6) {
4187 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
4188 &pd->naddr, 0);
4189 rewrite++;
4190 }
b0d623f7
A
4191 break;
4192#endif /* INET */
b0d623f7
A
4193 case IPPROTO_GRE:
4194 switch (af) {
4195#if INET
4196 case AF_INET:
4197 pf_change_a(&saddr->v4.s_addr,
4198 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4199 break;
4200#endif /* INET */
4201#if INET6
4202 case AF_INET6:
4203 PF_ACPY(saddr, &pd->naddr, AF_INET6);
4204 break;
4205#endif /* INET6 */
4206 }
4207 ++rewrite;
4208 break;
4209 case IPPROTO_ESP:
4210 bxport.spi = 0;
4211 switch (af) {
4212#if INET
4213 case AF_INET:
4214 pf_change_a(&saddr->v4.s_addr,
4215 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4216 break;
4217#endif /* INET */
4218#if INET6
4219 case AF_INET6:
4220 PF_ACPY(saddr, &pd->naddr, AF_INET6);
4221 break;
4222#endif /* INET6 */
4223 }
4224 break;
b0d623f7
A
4225 default:
4226 switch (af) {
4227#if INET
4228 case AF_INET:
4229 pf_change_a(&saddr->v4.s_addr,
4230 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4231 break;
4232#endif /* INET */
4233#if INET6
4234 case AF_INET6:
4235 PF_ACPY(saddr, &pd->naddr, af);
4236 break;
4237#endif /* INET */
4238 }
4239 break;
4240 }
4241
4242 if (nr->natpass)
4243 r = NULL;
4244 pd->nat_rule = nr;
4245 }
4246 } else {
b0d623f7
A
4247 bxport.port = nxport.port = dxport.port;
4248 /* check incoming packet for BINAT/RDR */
4249 if ((nr = pf_get_translation_aux(pd, m, off, PF_IN, kif, &nsn,
4250 saddr, &sxport, daddr, &dxport, &pd->naddr, &nxport)) !=
4251 NULL) {
b0d623f7
A
4252 PF_ACPY(&pd->baddr, daddr, af);
4253 switch (pd->proto) {
4254 case IPPROTO_TCP:
b0d623f7
A
4255 pf_change_ap(direction, pd->mp, daddr,
4256 &th->th_dport, pd->ip_sum, &th->th_sum,
4257 &pd->naddr, nxport.port, 0, af);
4258 dxport.port = th->th_dport;
b0d623f7
A
4259 rewrite++;
4260 break;
4261 case IPPROTO_UDP:
b0d623f7
A
4262 pf_change_ap(direction, pd->mp, daddr,
4263 &pd->hdr.udp->uh_dport, pd->ip_sum,
4264 &pd->hdr.udp->uh_sum, &pd->naddr,
4265 nxport.port, 1, af);
4266 dxport.port = pd->hdr.udp->uh_dport;
b0d623f7
A
4267 rewrite++;
4268 break;
4269#if INET
4270 case IPPROTO_ICMP:
316670eb
A
4271 if (pd->af == AF_INET) {
4272 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
4273 pd->naddr.v4.s_addr, 0);
4274 }
b0d623f7
A
4275 break;
4276#endif /* INET */
4277#if INET6
4278 case IPPROTO_ICMPV6:
316670eb
A
4279 if (pd->af == AF_INET6) {
4280 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
4281 &pd->naddr, 0);
4282 rewrite++;
4283 }
b0d623f7
A
4284 break;
4285#endif /* INET6 */
b0d623f7
A
4286 case IPPROTO_GRE:
4287 if (pd->proto_variant == PF_GRE_PPTP_VARIANT)
4288 grev1->call_id = nxport.call_id;
4289
4290 switch (af) {
4291#if INET
4292 case AF_INET:
4293 pf_change_a(&daddr->v4.s_addr,
4294 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4295 break;
4296#endif /* INET */
4297#if INET6
4298 case AF_INET6:
4299 PF_ACPY(daddr, &pd->naddr, AF_INET6);
4300 break;
4301#endif /* INET6 */
4302 }
4303 ++rewrite;
4304 break;
4305 case IPPROTO_ESP:
4306 switch (af) {
4307#if INET
4308 case AF_INET:
4309 pf_change_a(&daddr->v4.s_addr,
4310 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4311 break;
4312#endif /* INET */
4313#if INET6
4314 case AF_INET6:
4315 PF_ACPY(daddr, &pd->naddr, AF_INET6);
4316 break;
4317#endif /* INET6 */
4318 }
4319 break;
b0d623f7
A
4320 default:
4321 switch (af) {
4322#if INET
4323 case AF_INET:
4324 pf_change_a(&daddr->v4.s_addr,
4325 pd->ip_sum, pd->naddr.v4.s_addr, 0);
4326 break;
4327#endif /* INET */
4328#if INET6
4329 case AF_INET6:
4330 PF_ACPY(daddr, &pd->naddr, af);
4331 break;
4332#endif /* INET */
4333 }
4334 break;
4335 }
4336
4337 if (nr->natpass)
4338 r = NULL;
4339 pd->nat_rule = nr;
4340 }
4341 }
4342
b0d623f7
A
4343 if (nr && nr->tag > 0)
4344 tag = nr->tag;
b0d623f7
A
4345
4346 while (r != NULL) {
4347 r->evaluations++;
4348 if (pfi_kif_match(r->kif, kif) == r->ifnot)
4349 r = r->skip[PF_SKIP_IFP].ptr;
4350 else if (r->direction && r->direction != direction)
4351 r = r->skip[PF_SKIP_DIR].ptr;
4352 else if (r->af && r->af != af)
4353 r = r->skip[PF_SKIP_AF].ptr;
4354 else if (r->proto && r->proto != pd->proto)
4355 r = r->skip[PF_SKIP_PROTO].ptr;
4356 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
4357 r->src.neg, kif))
4358 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
4359 /* tcp/udp only. port_op always 0 in other cases */
b0d623f7
A
4360 else if (r->proto == pd->proto &&
4361 (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) &&
4362 r->src.xport.range.op &&
4363 !pf_match_port(r->src.xport.range.op,
4364 r->src.xport.range.port[0], r->src.xport.range.port[1],
4365 th->th_sport))
b0d623f7
A
4366 r = r->skip[PF_SKIP_SRC_PORT].ptr;
4367 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
4368 r->dst.neg, NULL))
4369 r = r->skip[PF_SKIP_DST_ADDR].ptr;
4370 /* tcp/udp only. port_op always 0 in other cases */
b0d623f7
A
4371 else if (r->proto == pd->proto &&
4372 (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) &&
4373 r->dst.xport.range.op &&
4374 !pf_match_port(r->dst.xport.range.op,
4375 r->dst.xport.range.port[0], r->dst.xport.range.port[1],
4376 th->th_dport))
b0d623f7
A
4377 r = r->skip[PF_SKIP_DST_PORT].ptr;
4378 /* icmp only. type always 0 in other cases */
4379 else if (r->type && r->type != icmptype + 1)
4380 r = TAILQ_NEXT(r, entries);
4381 /* icmp only. type always 0 in other cases */
4382 else if (r->code && r->code != icmpcode + 1)
4383 r = TAILQ_NEXT(r, entries);
316670eb
A
4384 else if ((r->rule_flag & PFRULE_TOS) && r->tos &&
4385 !(r->tos & pd->tos))
4386 r = TAILQ_NEXT(r, entries);
4387 else if ((r->rule_flag & PFRULE_DSCP) && r->tos &&
4388 !(r->tos & (pd->tos & DSCP_MASK)))
4389 r = TAILQ_NEXT(r, entries);
4390 else if ((r->rule_flag & PFRULE_SC) && r->tos &&
4391 ((r->tos & SCIDX_MASK) != pd->sc))
b0d623f7
A
4392 r = TAILQ_NEXT(r, entries);
4393 else if (r->rule_flag & PFRULE_FRAGMENT)
4394 r = TAILQ_NEXT(r, entries);
4395 else if (pd->proto == IPPROTO_TCP &&
4396 (r->flagset & th->th_flags) != r->flags)
4397 r = TAILQ_NEXT(r, entries);
4398 /* tcp/udp only. uid.op always 0 in other cases */
4399 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
4400 pf_socket_lookup(direction, pd), 1)) &&
4401 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
4402 pd->lookup.uid))
4403 r = TAILQ_NEXT(r, entries);
4404 /* tcp/udp only. gid.op always 0 in other cases */
4405 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
4406 pf_socket_lookup(direction, pd), 1)) &&
4407 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
4408 pd->lookup.gid))
4409 r = TAILQ_NEXT(r, entries);
39236c6e 4410 else if (r->prob && r->prob <= (RandomULong() % (UINT_MAX - 1) + 1))
b0d623f7
A
4411 r = TAILQ_NEXT(r, entries);
4412 else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
4413 r = TAILQ_NEXT(r, entries);
4414 else if (r->os_fingerprint != PF_OSFP_ANY &&
4415 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
4416 pf_osfp_fingerprint(pd, m, off, th),
4417 r->os_fingerprint)))
4418 r = TAILQ_NEXT(r, entries);
4419 else {
4420 if (r->tag)
4421 tag = r->tag;
4422 if (PF_RTABLEID_IS_VALID(r->rtableid))
4423 rtableid = r->rtableid;
4424 if (r->anchor == NULL) {
4425 match = 1;
4426 *rm = r;
4427 *am = a;
4428 *rsm = ruleset;
4429 if ((*rm)->quick)
4430 break;
4431 r = TAILQ_NEXT(r, entries);
4432 } else
4433 pf_step_into_anchor(&asd, &ruleset,
4434 PF_RULESET_FILTER, &r, &a, &match);
4435 }
4436 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
4437 PF_RULESET_FILTER, &r, &a, &match))
4438 break;
4439 }
4440 r = *rm;
4441 a = *am;
4442 ruleset = *rsm;
4443
4444 REASON_SET(&reason, PFRES_MATCH);
4445
4446 if (r->log || (nr != NULL && nr->log)) {
b0d623f7
A
4447 if (rewrite > 0) {
4448 if (rewrite < off + hdrlen)
4449 rewrite = off + hdrlen;
4450
4451 m = pf_lazy_makewritable(pd, m, rewrite);
4452 if (!m) {
4453 REASON_SET(&reason, PFRES_MEMORY);
4454 return (PF_DROP);
4455 }
4456
4457 m_copyback(m, off, hdrlen, pd->hdr.any);
4458 }
b0d623f7
A
4459 PFLOG_PACKET(kif, h, m, af, direction, reason, r->log ? r : nr,
4460 a, ruleset, pd);
4461 }
4462
4463 if ((r->action == PF_DROP) &&
4464 ((r->rule_flag & PFRULE_RETURNRST) ||
4465 (r->rule_flag & PFRULE_RETURNICMP) ||
4466 (r->rule_flag & PFRULE_RETURN))) {
4467 /* undo NAT changes, if they have taken place */
4468 if (nr != NULL) {
4469 if (direction == PF_OUT) {
4470 switch (pd->proto) {
4471 case IPPROTO_TCP:
b0d623f7
A
4472 pf_change_ap(direction, pd->mp, saddr,
4473 &th->th_sport, pd->ip_sum,
4474 &th->th_sum, &pd->baddr,
4475 bxport.port, 0, af);
4476 sxport.port = th->th_sport;
b0d623f7
A
4477 rewrite++;
4478 break;
4479 case IPPROTO_UDP:
b0d623f7
A
4480 pf_change_ap(direction, pd->mp, saddr,
4481 &pd->hdr.udp->uh_sport, pd->ip_sum,
4482 &pd->hdr.udp->uh_sum, &pd->baddr,
4483 bxport.port, 1, af);
4484 sxport.port = pd->hdr.udp->uh_sport;
b0d623f7
A
4485 rewrite++;
4486 break;
4487 case IPPROTO_ICMP:
4488#if INET6
4489 case IPPROTO_ICMPV6:
4490#endif
4491 /* nothing! */
4492 break;
b0d623f7
A
4493 case IPPROTO_GRE:
4494 PF_ACPY(&pd->baddr, saddr, af);
4495 ++rewrite;
4496 switch (af) {
4497#if INET
4498 case AF_INET:
4499 pf_change_a(&saddr->v4.s_addr,
4500 pd->ip_sum,
4501 pd->baddr.v4.s_addr, 0);
4502 break;
4503#endif /* INET */
4504#if INET6
4505 case AF_INET6:
4506 PF_ACPY(saddr, &pd->baddr,
4507 AF_INET6);
4508 break;
4509#endif /* INET6 */
4510 }
4511 break;
4512 case IPPROTO_ESP:
4513 PF_ACPY(&pd->baddr, saddr, af);
4514 switch (af) {
4515#if INET
4516 case AF_INET:
4517 pf_change_a(&saddr->v4.s_addr,
4518 pd->ip_sum,
4519 pd->baddr.v4.s_addr, 0);
4520 break;
4521#endif /* INET */
4522#if INET6
4523 case AF_INET6:
4524 PF_ACPY(saddr, &pd->baddr,
4525 AF_INET6);
4526 break;
4527#endif /* INET6 */
4528 }
4529 break;
b0d623f7
A
4530 default:
4531 switch (af) {
4532 case AF_INET:
4533 pf_change_a(&saddr->v4.s_addr,
4534 pd->ip_sum,
4535 pd->baddr.v4.s_addr, 0);
4536 break;
4537 case AF_INET6:
4538 PF_ACPY(saddr, &pd->baddr, af);
4539 break;
4540 }
4541 }
4542 } else {
4543 switch (pd->proto) {
4544 case IPPROTO_TCP:
b0d623f7
A
4545 pf_change_ap(direction, pd->mp, daddr,
4546 &th->th_dport, pd->ip_sum,
4547 &th->th_sum, &pd->baddr,
4548 bxport.port, 0, af);
4549 dxport.port = th->th_dport;
b0d623f7
A
4550 rewrite++;
4551 break;
4552 case IPPROTO_UDP:
b0d623f7
A
4553 pf_change_ap(direction, pd->mp, daddr,
4554 &pd->hdr.udp->uh_dport, pd->ip_sum,
4555 &pd->hdr.udp->uh_sum, &pd->baddr,
4556 bxport.port, 1, af);
4557 dxport.port = pd->hdr.udp->uh_dport;
b0d623f7
A
4558 rewrite++;
4559 break;
4560 case IPPROTO_ICMP:
4561#if INET6
4562 case IPPROTO_ICMPV6:
4563#endif
4564 /* nothing! */
4565 break;
b0d623f7
A
4566 case IPPROTO_GRE:
4567 if (pd->proto_variant ==
4568 PF_GRE_PPTP_VARIANT)
4569 grev1->call_id = bxport.call_id;
4570 ++rewrite;
4571 switch (af) {
4572#if INET
4573 case AF_INET:
4574 pf_change_a(&daddr->v4.s_addr,
4575 pd->ip_sum,
4576 pd->baddr.v4.s_addr, 0);
4577 break;
4578#endif /* INET */
4579#if INET6
4580 case AF_INET6:
4581 PF_ACPY(daddr, &pd->baddr,
4582 AF_INET6);
4583 break;
4584#endif /* INET6 */
4585 }
4586 break;
4587 case IPPROTO_ESP:
4588 switch (af) {
4589#if INET
4590 case AF_INET:
4591 pf_change_a(&daddr->v4.s_addr,
4592 pd->ip_sum,
4593 pd->baddr.v4.s_addr, 0);
4594 break;
4595#endif /* INET */
4596#if INET6
4597 case AF_INET6:
4598 PF_ACPY(daddr, &pd->baddr,
4599 AF_INET6);
4600 break;
4601#endif /* INET6 */
4602 }
4603 break;
b0d623f7
A
4604 default:
4605 switch (af) {
4606 case AF_INET:
4607 pf_change_a(&daddr->v4.s_addr,
4608 pd->ip_sum,
4609 pd->baddr.v4.s_addr, 0);
4610 break;
4611#if INET6
4612 case AF_INET6:
4613 PF_ACPY(daddr, &pd->baddr, af);
4614 break;
4615#endif /* INET6 */
4616 }
4617 }
4618 }
4619 }
4620 if (pd->proto == IPPROTO_TCP &&
4621 ((r->rule_flag & PFRULE_RETURNRST) ||
4622 (r->rule_flag & PFRULE_RETURN)) &&
4623 !(th->th_flags & TH_RST)) {
4624 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
4625 int len = 0;
4626 struct ip *h4;
4627#if INET6
4628 struct ip6_hdr *h6;
4629#endif /* INET6 */
4630
4631 switch (af) {
4632 case AF_INET:
4633 h4 = mtod(m, struct ip *);
4634 len = ntohs(h4->ip_len) - off;
4635 break;
4636#if INET6
4637 case AF_INET6:
4638 h6 = mtod(m, struct ip6_hdr *);
4639 len = ntohs(h6->ip6_plen) -
4640 (off - sizeof (*h6));
4641 break;
4642#endif /* INET6 */
4643 }
4644
4645 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
4646 REASON_SET(&reason, PFRES_PROTCKSUM);
4647 else {
4648 if (th->th_flags & TH_SYN)
4649 ack++;
4650 if (th->th_flags & TH_FIN)
4651 ack++;
4652 pf_send_tcp(r, af, pd->dst,
4653 pd->src, th->th_dport, th->th_sport,
4654 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
4655 r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp);
4656 }
4657 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
b0d623f7 4658 pd->proto != IPPROTO_ESP && pd->proto != IPPROTO_AH &&
b0d623f7
A
4659 r->return_icmp)
4660 pf_send_icmp(m, r->return_icmp >> 8,
4661 r->return_icmp & 255, af, r);
4662 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
b0d623f7 4663 pd->proto != IPPROTO_ESP && pd->proto != IPPROTO_AH &&
b0d623f7
A
4664 r->return_icmp6)
4665 pf_send_icmp(m, r->return_icmp6 >> 8,
4666 r->return_icmp6 & 255, af, r);
4667 }
4668
4669 if (r->action == PF_DROP)
4670 return (PF_DROP);
4671
316670eb
A
4672 /* prepare state key, for flowhash and/or the state (if created) */
4673 bzero(&psk, sizeof (psk));
4674 psk.proto = pd->proto;
4675 psk.direction = direction;
4676 psk.af = af;
4677 if (pd->proto == IPPROTO_UDP) {
4678 if (ntohs(pd->hdr.udp->uh_sport) == PF_IKE_PORT &&
4679 ntohs(pd->hdr.udp->uh_dport) == PF_IKE_PORT) {
4680 psk.proto_variant = PF_EXTFILTER_APD;
4681 } else {
4682 psk.proto_variant = nr ? nr->extfilter : r->extfilter;
4683 if (psk.proto_variant < PF_EXTFILTER_APD)
4684 psk.proto_variant = PF_EXTFILTER_APD;
4685 }
4686 } else if (pd->proto == IPPROTO_GRE) {
4687 psk.proto_variant = pd->proto_variant;
4688 }
4689 if (direction == PF_OUT) {
4690 PF_ACPY(&psk.gwy.addr, saddr, af);
4691 PF_ACPY(&psk.ext.addr, daddr, af);
4692 switch (pd->proto) {
4693 case IPPROTO_UDP:
4694 psk.gwy.xport = sxport;
4695 psk.ext.xport = dxport;
4696 break;
4697 case IPPROTO_ESP:
4698 psk.gwy.xport.spi = 0;
4699 psk.ext.xport.spi = pd->hdr.esp->spi;
4700 break;
4701 case IPPROTO_ICMP:
4702#if INET6
4703 case IPPROTO_ICMPV6:
4704#endif
4705 psk.gwy.xport.port = nxport.port;
4706 psk.ext.xport.spi = 0;
4707 break;
4708 default:
4709 psk.gwy.xport = sxport;
4710 psk.ext.xport = dxport;
4711 break;
4712 }
4713 if (nr != NULL) {
4714 PF_ACPY(&psk.lan.addr, &pd->baddr, af);
4715 psk.lan.xport = bxport;
4716 } else {
4717 PF_ACPY(&psk.lan.addr, &psk.gwy.addr, af);
4718 psk.lan.xport = psk.gwy.xport;
4719 }
4720 } else {
4721 PF_ACPY(&psk.lan.addr, daddr, af);
4722 PF_ACPY(&psk.ext.addr, saddr, af);
4723 switch (pd->proto) {
4724 case IPPROTO_ICMP:
4725#if INET6
4726 case IPPROTO_ICMPV6:
4727#endif
4728 psk.lan.xport = nxport;
4729 psk.ext.xport.spi = 0;
4730 break;
4731 case IPPROTO_ESP:
4732 psk.ext.xport.spi = 0;
4733 psk.lan.xport.spi = pd->hdr.esp->spi;
4734 break;
4735 default:
4736 psk.lan.xport = dxport;
4737 psk.ext.xport = sxport;
4738 break;
4739 }
4740 if (nr != NULL) {
4741 PF_ACPY(&psk.gwy.addr, &pd->baddr, af);
4742 psk.gwy.xport = bxport;
4743 } else {
4744 PF_ACPY(&psk.gwy.addr, &psk.lan.addr, af);
4745 psk.gwy.xport = psk.lan.xport;
4746 }
4747 }
39236c6e
A
4748 if (pd->pktflags & PKTF_FLOW_ID) {
4749 /* flow hash was already computed outside of PF */
4750 psk.flowsrc = pd->flowsrc;
316670eb
A
4751 psk.flowhash = pd->flowhash;
4752 } else {
39236c6e
A
4753 /* compute flow hash and store it in state key */
4754 psk.flowsrc = FLOWSRC_PF;
316670eb 4755 psk.flowhash = pf_calc_state_key_flowhash(&psk);
39236c6e 4756 pd->flowsrc = psk.flowsrc;
316670eb 4757 pd->flowhash = psk.flowhash;
39236c6e
A
4758 pd->pktflags |= PKTF_FLOW_ID;
4759 pd->pktflags &= ~PKTF_FLOW_ADV;
316670eb
A
4760 }
4761
4762 if (pf_tag_packet(m, pd->pf_mtag, tag, rtableid, pd)) {
b0d623f7
A
4763 REASON_SET(&reason, PFRES_MEMORY);
4764 return (PF_DROP);
4765 }
4766
4767 if (!state_icmp && (r->keep_state || nr != NULL ||
4768 (pd->flags & PFDESC_TCP_NORM))) {
4769 /* create new state */
4770 struct pf_state *s = NULL;
4771 struct pf_state_key *sk = NULL;
4772 struct pf_src_node *sn = NULL;
b0d623f7
A
4773 struct pf_ike_hdr ike;
4774
4775 if (pd->proto == IPPROTO_UDP) {
4776 struct udphdr *uh = pd->hdr.udp;
4777 size_t plen = m->m_pkthdr.len - off - sizeof (*uh);
4778
b7266188
A
4779 if (ntohs(uh->uh_sport) == PF_IKE_PORT &&
4780 ntohs(uh->uh_dport) == PF_IKE_PORT &&
b0d623f7
A
4781 plen >= PF_IKE_PACKET_MINSIZE) {
4782 if (plen > PF_IKE_PACKET_MINSIZE)
4783 plen = PF_IKE_PACKET_MINSIZE;
4784 m_copydata(m, off + sizeof (*uh), plen, &ike);
4785 }
4786 }
4787
4788 if (nr != NULL && pd->proto == IPPROTO_ESP &&
4789 direction == PF_OUT) {
4790 struct pf_state_key_cmp sk0;
4791 struct pf_state *s0;
4792
4793 /*
4794 * <jhw@apple.com>
4795 * This squelches state creation if the external
4796 * address matches an existing incomplete state with a
4797 * different internal address. Only one 'blocking'
4798 * partial state is allowed for each external address.
4799 */
4800 memset(&sk0, 0, sizeof (sk0));
4801 sk0.af = pd->af;
4802 sk0.proto = IPPROTO_ESP;
4803 PF_ACPY(&sk0.gwy.addr, saddr, sk0.af);
4804 PF_ACPY(&sk0.ext.addr, daddr, sk0.af);
4805 s0 = pf_find_state(kif, &sk0, PF_IN);
4806
4807 if (s0 && PF_ANEQ(&s0->state_key->lan.addr,
4808 pd->src, pd->af)) {
4809 nsn = 0;
4810 goto cleanup;
4811 }
4812 }
b0d623f7
A
4813
4814 /* check maximums */
4815 if (r->max_states && (r->states >= r->max_states)) {
4816 pf_status.lcounters[LCNT_STATES]++;
4817 REASON_SET(&reason, PFRES_MAXSTATES);
4818 goto cleanup;
4819 }
4820 /* src node for filter rule */
4821 if ((r->rule_flag & PFRULE_SRCTRACK ||
4822 r->rpool.opts & PF_POOL_STICKYADDR) &&
4823 pf_insert_src_node(&sn, r, saddr, af) != 0) {
4824 REASON_SET(&reason, PFRES_SRCLIMIT);
4825 goto cleanup;
4826 }
4827 /* src node for translation rule */
4828 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
4829 ((direction == PF_OUT &&
b0d623f7 4830 nr->action != PF_RDR &&
b0d623f7
A
4831 pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
4832 (pf_insert_src_node(&nsn, nr, saddr, af) != 0))) {
4833 REASON_SET(&reason, PFRES_SRCLIMIT);
4834 goto cleanup;
4835 }
4836 s = pool_get(&pf_state_pl, PR_WAITOK);
4837 if (s == NULL) {
4838 REASON_SET(&reason, PFRES_MEMORY);
4839cleanup:
4840 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
4841 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
4842 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4843 pf_status.src_nodes--;
4844 pool_put(&pf_src_tree_pl, sn);
4845 }
4846 if (nsn != sn && nsn != NULL && nsn->states == 0 &&
4847 nsn->expire == 0) {
4848 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
4849 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
4850 pf_status.src_nodes--;
4851 pool_put(&pf_src_tree_pl, nsn);
4852 }
4853 if (sk != NULL) {
b0d623f7
A
4854 if (sk->app_state)
4855 pool_put(&pf_app_state_pl,
4856 sk->app_state);
b0d623f7
A
4857 pool_put(&pf_state_key_pl, sk);
4858 }
4859 return (PF_DROP);
4860 }
4861 bzero(s, sizeof (*s));
b0d623f7 4862 TAILQ_INIT(&s->unlink_hooks);
b0d623f7
A
4863 s->rule.ptr = r;
4864 s->nat_rule.ptr = nr;
d1ecb069 4865 s->anchor.ptr = a;
b0d623f7
A
4866 STATE_INC_COUNTERS(s);
4867 s->allow_opts = r->allow_opts;
4868 s->log = r->log & PF_LOG_ALL;
4869 if (nr != NULL)
4870 s->log |= nr->log & PF_LOG_ALL;
4871 switch (pd->proto) {
4872 case IPPROTO_TCP:
4873 s->src.seqlo = ntohl(th->th_seq);
4874 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
4875 if ((th->th_flags & (TH_SYN|TH_ACK)) ==
4876 TH_SYN && r->keep_state == PF_STATE_MODULATE) {
4877 /* Generate sequence number modulator */
4878 if ((s->src.seqdiff = pf_tcp_iss(pd) -
4879 s->src.seqlo) == 0)
4880 s->src.seqdiff = 1;
4881 pf_change_a(&th->th_seq, &th->th_sum,
4882 htonl(s->src.seqlo + s->src.seqdiff), 0);
4883 rewrite = off + sizeof (*th);
4884 } else
4885 s->src.seqdiff = 0;
4886 if (th->th_flags & TH_SYN) {
4887 s->src.seqhi++;
4888 s->src.wscale = pf_get_wscale(m, off,
4889 th->th_off, af);
4890 }
4891 s->src.max_win = MAX(ntohs(th->th_win), 1);
4892 if (s->src.wscale & PF_WSCALE_MASK) {
4893 /* Remove scale factor from initial window */
4894 int win = s->src.max_win;
4895 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
4896 s->src.max_win = (win - 1) >>
4897 (s->src.wscale & PF_WSCALE_MASK);
4898 }
4899 if (th->th_flags & TH_FIN)
4900 s->src.seqhi++;
4901 s->dst.seqhi = 1;
4902 s->dst.max_win = 1;
4903 s->src.state = TCPS_SYN_SENT;
4904 s->dst.state = TCPS_CLOSED;
4905 s->timeout = PFTM_TCP_FIRST_PACKET;
4906 break;
4907 case IPPROTO_UDP:
4908 s->src.state = PFUDPS_SINGLE;
4909 s->dst.state = PFUDPS_NO_TRAFFIC;
4910 s->timeout = PFTM_UDP_FIRST_PACKET;
4911 break;
4912 case IPPROTO_ICMP:
4913#if INET6
4914 case IPPROTO_ICMPV6:
4915#endif
4916 s->timeout = PFTM_ICMP_FIRST_PACKET;
4917 break;
b0d623f7
A
4918 case IPPROTO_GRE:
4919 s->src.state = PFGRE1S_INITIATING;
4920 s->dst.state = PFGRE1S_NO_TRAFFIC;
4921 s->timeout = PFTM_GREv1_INITIATING;
4922 break;
4923 case IPPROTO_ESP:
4924 s->src.state = PFESPS_INITIATING;
4925 s->dst.state = PFESPS_NO_TRAFFIC;
4926 s->timeout = PFTM_ESP_FIRST_PACKET;
4927 break;
b0d623f7
A
4928 default:
4929 s->src.state = PFOTHERS_SINGLE;
4930 s->dst.state = PFOTHERS_NO_TRAFFIC;
4931 s->timeout = PFTM_OTHER_FIRST_PACKET;
4932 }
4933
4934 s->creation = pf_time_second();
4935 s->expire = pf_time_second();
4936
4937 if (sn != NULL) {
4938 s->src_node = sn;
4939 s->src_node->states++;
b7266188 4940 VERIFY(s->src_node->states != 0);
b0d623f7
A
4941 }
4942 if (nsn != NULL) {
4943 PF_ACPY(&nsn->raddr, &pd->naddr, af);
4944 s->nat_src_node = nsn;
4945 s->nat_src_node->states++;
b7266188 4946 VERIFY(s->nat_src_node->states != 0);
b0d623f7
A
4947 }
4948 if (pd->proto == IPPROTO_TCP) {
4949 if ((pd->flags & PFDESC_TCP_NORM) &&
4950 pf_normalize_tcp_init(m, off, pd, th, &s->src,
4951 &s->dst)) {
4952 REASON_SET(&reason, PFRES_MEMORY);
4953 pf_src_tree_remove_state(s);
4954 STATE_DEC_COUNTERS(s);
4955 pool_put(&pf_state_pl, s);
4956 return (PF_DROP);
4957 }
4958 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
4959 pf_normalize_tcp_stateful(m, off, pd, &reason,
4960 th, s, &s->src, &s->dst, &rewrite)) {
4961 /* This really shouldn't happen!!! */
4962 DPFPRINTF(PF_DEBUG_URGENT,
4963 ("pf_normalize_tcp_stateful failed on "
4964 "first pkt"));
4965 pf_normalize_tcp_cleanup(s);
4966 pf_src_tree_remove_state(s);
4967 STATE_DEC_COUNTERS(s);
4968 pool_put(&pf_state_pl, s);
4969 return (PF_DROP);
4970 }
4971 }
4972
316670eb
A
4973 /* allocate state key and import values from psk */
4974 if ((sk = pf_alloc_state_key(s, &psk)) == NULL) {
b0d623f7
A
4975 REASON_SET(&reason, PFRES_MEMORY);
4976 goto cleanup;
4977 }
4978
b0d623f7
A
4979 pf_set_rt_ifp(s, saddr); /* needs s->state_key set */
4980
b0d623f7
A
4981 m = pd->mp;
4982
4983 if (sk->app_state == 0) {
4984 switch (pd->proto) {
4985 case IPPROTO_TCP: {
4986 u_int16_t dport = (direction == PF_OUT) ?
4987 sk->ext.xport.port : sk->gwy.xport.port;
4988
b7266188
A
4989 if (nr != NULL &&
4990 ntohs(dport) == PF_PPTP_PORT) {
b0d623f7
A
4991 struct pf_app_state *as;
4992
4993 as = pool_get(&pf_app_state_pl,
4994 PR_WAITOK);
4995 if (!as) {
4996 REASON_SET(&reason,
4997 PFRES_MEMORY);
4998 goto cleanup;
4999 }
5000
5001 bzero(as, sizeof (*as));
5002 as->handler = pf_pptp_handler;
5003 as->compare_lan_ext = 0;
5004 as->compare_ext_gwy = 0;
5005 as->u.pptp.grev1_state = 0;
5006 sk->app_state = as;
5007 (void) hook_establish(&s->unlink_hooks,
5008 0, (hook_fn_t) pf_pptp_unlink, s);
5009 }
5010 break;
5011 }
5012
5013 case IPPROTO_UDP: {
5014 struct udphdr *uh = pd->hdr.udp;
5015
b7266188
A
5016 if (nr != NULL &&
5017 ntohs(uh->uh_sport) == PF_IKE_PORT &&
5018 ntohs(uh->uh_dport) == PF_IKE_PORT) {
b0d623f7
A
5019 struct pf_app_state *as;
5020
5021 as = pool_get(&pf_app_state_pl,
5022 PR_WAITOK);
5023 if (!as) {
5024 REASON_SET(&reason,
5025 PFRES_MEMORY);
5026 goto cleanup;
5027 }
5028
5029 bzero(as, sizeof (*as));
5030 as->compare_lan_ext = pf_ike_compare;
5031 as->compare_ext_gwy = pf_ike_compare;
5032 as->u.ike.cookie = ike.initiator_cookie;
5033 sk->app_state = as;
5034 }
5035 break;
5036 }
5037
5038 default:
5039 break;
5040 }
5041 }
b0d623f7
A
5042
5043 if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
5044 if (pd->proto == IPPROTO_TCP)
5045 pf_normalize_tcp_cleanup(s);
5046 REASON_SET(&reason, PFRES_STATEINS);
5047 pf_src_tree_remove_state(s);
5048 STATE_DEC_COUNTERS(s);
5049 pool_put(&pf_state_pl, s);
5050 return (PF_DROP);
5051 } else
5052 *sm = s;
5053 if (tag > 0) {
5054 pf_tag_ref(tag);
5055 s->tag = tag;
5056 }
5057 if (pd->proto == IPPROTO_TCP &&
5058 (th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
5059 r->keep_state == PF_STATE_SYNPROXY) {
5060 s->src.state = PF_TCPS_PROXY_SRC;
5061 if (nr != NULL) {
b0d623f7
A
5062 if (direction == PF_OUT) {
5063 pf_change_ap(direction, pd->mp, saddr,
5064 &th->th_sport, pd->ip_sum,
5065 &th->th_sum, &pd->baddr,
5066 bxport.port, 0, af);
5067 sxport.port = th->th_sport;
5068 } else {
5069 pf_change_ap(direction, pd->mp, daddr,
5070 &th->th_dport, pd->ip_sum,
5071 &th->th_sum, &pd->baddr,
5072 bxport.port, 0, af);
5073 sxport.port = th->th_dport;
5074 }
b0d623f7
A
5075 }
5076 s->src.seqhi = htonl(random());
5077 /* Find mss option */
5078 mss = pf_get_mss(m, off, th->th_off, af);
5079 mss = pf_calc_mss(saddr, af, mss);
5080 mss = pf_calc_mss(daddr, af, mss);
5081 s->src.mss = mss;
5082 pf_send_tcp(r, af, daddr, saddr, th->th_dport,
5083 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
5084 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL);
5085 REASON_SET(&reason, PFRES_SYNPROXY);
5086 return (PF_SYNPROXY_DROP);
5087 }
5088
b0d623f7
A
5089 if (sk->app_state && sk->app_state->handler) {
5090 int offx = off;
5091
5092 switch (pd->proto) {
5093 case IPPROTO_TCP:
5094 offx += th->th_off << 2;
5095 break;
5096 case IPPROTO_UDP:
5097 offx += pd->hdr.udp->uh_ulen << 2;
5098 break;
5099 default:
5100 /* ALG handlers only apply to TCP and UDP rules */
5101 break;
5102 }
5103
5104 if (offx > off) {
5105 sk->app_state->handler(s, direction, offx,
5106 pd, kif);
5107 if (pd->lmw < 0) {
5108 REASON_SET(&reason, PFRES_MEMORY);
5109 return (PF_DROP);
5110 }
5111 m = pd->mp;
5112 }
5113 }
b0d623f7
A
5114 }
5115
5116 /* copy back packet headers if we performed NAT operations */
b0d623f7
A
5117 if (rewrite) {
5118 if (rewrite < off + hdrlen)
5119 rewrite = off + hdrlen;
5120
5121 m = pf_lazy_makewritable(pd, pd->mp, rewrite);
5122 if (!m) {
5123 REASON_SET(&reason, PFRES_MEMORY);
5124 return (PF_DROP);
5125 }
5126
5127 m_copyback(m, off, hdrlen, pd->hdr.any);
5128 }
b0d623f7
A
5129
5130 return (PF_PASS);
5131}
5132
316670eb
A
5133#if DUMMYNET
5134/*
5135 * When pf_test_dummynet() returns PF_PASS, the rule matching parameter "rm"
5136 * remains unchanged, meaning the packet did not match a dummynet rule.
5137 * when the packet does match a dummynet rule, pf_test_dummynet() returns
5138 * PF_PASS and zero out the mbuf rule as the packet is effectively siphoned
5139 * out by dummynet.
5140 */
5141static int
5142pf_test_dummynet(struct pf_rule **rm, int direction, struct pfi_kif *kif,
5143 struct mbuf **m0, struct pf_pdesc *pd, struct ip_fw_args *fwa)
5144{
5145 struct mbuf *m = *m0;
5146 struct pf_rule *am = NULL;
5147 struct pf_ruleset *rsm = NULL;
5148 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
5149 sa_family_t af = pd->af;
5150 struct pf_rule *r, *a = NULL;
5151 struct pf_ruleset *ruleset = NULL;
5152 struct tcphdr *th = pd->hdr.tcp;
5153 u_short reason;
5154 int hdrlen = 0;
5155 int tag = -1;
5156 unsigned int rtableid = IFSCOPE_NONE;
5157 int asd = 0;
5158 int match = 0;
5159 u_int8_t icmptype = 0, icmpcode = 0;
316670eb
A
5160 struct ip_fw_args dnflow;
5161 struct pf_rule *prev_matching_rule = fwa ? fwa->fwa_pf_rule : NULL;
5162 int found_prev_rule = (prev_matching_rule) ? 0 : 1;
5163
5164 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
5165
5166 if (!DUMMYNET_LOADED)
5167 return (PF_PASS);
5168
4b17d6b6 5169 if (TAILQ_EMPTY(pf_main_ruleset.rules[PF_RULESET_DUMMYNET].active.ptr))
316670eb 5170 return (PF_PASS);
4b17d6b6 5171
316670eb
A
5172 bzero(&dnflow, sizeof(dnflow));
5173
5174 hdrlen = 0;
316670eb
A
5175
5176 /* Fragments don't gave protocol headers */
5177 if (!(pd->flags & PFDESC_IP_FRAG))
5178 switch (pd->proto) {
5179 case IPPROTO_TCP:
5180 dnflow.fwa_id.flags = pd->hdr.tcp->th_flags;
4b17d6b6
A
5181 dnflow.fwa_id.dst_port = ntohs(pd->hdr.tcp->th_dport);
5182 dnflow.fwa_id.src_port = ntohs(pd->hdr.tcp->th_sport);
316670eb
A
5183 hdrlen = sizeof (*th);
5184 break;
5185 case IPPROTO_UDP:
4b17d6b6
A
5186 dnflow.fwa_id.dst_port = ntohs(pd->hdr.udp->uh_dport);
5187 dnflow.fwa_id.src_port = ntohs(pd->hdr.udp->uh_sport);
316670eb
A
5188 hdrlen = sizeof (*pd->hdr.udp);
5189 break;
5190#if INET
5191 case IPPROTO_ICMP:
4b17d6b6 5192 if (af != AF_INET)
316670eb 5193 break;
316670eb
A
5194 hdrlen = ICMP_MINLEN;
5195 icmptype = pd->hdr.icmp->icmp_type;
5196 icmpcode = pd->hdr.icmp->icmp_code;
5197 break;
5198#endif /* INET */
5199#if INET6
5200 case IPPROTO_ICMPV6:
4b17d6b6 5201 if (af != AF_INET6)
316670eb 5202 break;
316670eb
A
5203 hdrlen = sizeof (*pd->hdr.icmp6);
5204 icmptype = pd->hdr.icmp6->icmp6_type;
5205 icmpcode = pd->hdr.icmp6->icmp6_code;
5206 break;
5207#endif /* INET6 */
5208 case IPPROTO_GRE:
4b17d6b6 5209 if (pd->proto_variant == PF_GRE_PPTP_VARIANT)
316670eb 5210 hdrlen = sizeof (*pd->hdr.grev1);
316670eb
A
5211 break;
5212 case IPPROTO_ESP:
316670eb
A
5213 hdrlen = sizeof (*pd->hdr.esp);
5214 break;
5215 }
5216
5217 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_DUMMYNET].active.ptr);
5218
5219 while (r != NULL) {
5220 r->evaluations++;
5221 if (pfi_kif_match(r->kif, kif) == r->ifnot)
5222 r = r->skip[PF_SKIP_IFP].ptr;
5223 else if (r->direction && r->direction != direction)
5224 r = r->skip[PF_SKIP_DIR].ptr;
5225 else if (r->af && r->af != af)
5226 r = r->skip[PF_SKIP_AF].ptr;
5227 else if (r->proto && r->proto != pd->proto)
5228 r = r->skip[PF_SKIP_PROTO].ptr;
5229 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
5230 r->src.neg, kif))
5231 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
5232 /* tcp/udp only. port_op always 0 in other cases */
5233 else if (r->proto == pd->proto &&
5234 (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) &&
5235 ((pd->flags & PFDESC_IP_FRAG) ||
5236 ((r->src.xport.range.op &&
5237 !pf_match_port(r->src.xport.range.op,
5238 r->src.xport.range.port[0], r->src.xport.range.port[1],
5239 th->th_sport)))))
5240 r = r->skip[PF_SKIP_SRC_PORT].ptr;
5241 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
5242 r->dst.neg, NULL))
5243 r = r->skip[PF_SKIP_DST_ADDR].ptr;
5244 /* tcp/udp only. port_op always 0 in other cases */
5245 else if (r->proto == pd->proto &&
5246 (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) &&
5247 r->dst.xport.range.op &&
5248 ((pd->flags & PFDESC_IP_FRAG) ||
5249 !pf_match_port(r->dst.xport.range.op,
5250 r->dst.xport.range.port[0], r->dst.xport.range.port[1],
5251 th->th_dport)))
5252 r = r->skip[PF_SKIP_DST_PORT].ptr;
5253 /* icmp only. type always 0 in other cases */
5254 else if (r->type &&
5255 ((pd->flags & PFDESC_IP_FRAG) ||
5256 r->type != icmptype + 1))
5257 r = TAILQ_NEXT(r, entries);
5258 /* icmp only. type always 0 in other cases */
5259 else if (r->code &&
5260 ((pd->flags & PFDESC_IP_FRAG) ||
5261 r->code != icmpcode + 1))
5262 r = TAILQ_NEXT(r, entries);
5263 else if (r->tos && !(r->tos == pd->tos))
5264 r = TAILQ_NEXT(r, entries);
5265 else if (r->rule_flag & PFRULE_FRAGMENT)
5266 r = TAILQ_NEXT(r, entries);
5267 else if (pd->proto == IPPROTO_TCP &&
5268 ((pd->flags & PFDESC_IP_FRAG) ||
5269 (r->flagset & th->th_flags) != r->flags))
5270 r = TAILQ_NEXT(r, entries);
39236c6e
A
5271 else if (r->prob && r->prob <= (RandomULong() % (UINT_MAX - 1) + 1))
5272 r = TAILQ_NEXT(r, entries);
316670eb
A
5273 else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
5274 r = TAILQ_NEXT(r, entries);
5275 else {
5276 /*
5277 * Need to go past the previous dummynet matching rule
5278 */
5279 if (r->anchor == NULL) {
5280 if (found_prev_rule) {
5281 if (r->tag)
5282 tag = r->tag;
5283 if (PF_RTABLEID_IS_VALID(r->rtableid))
5284 rtableid = r->rtableid;
5285 match = 1;
5286 *rm = r;
5287 am = a;
5288 rsm = ruleset;
5289 if ((*rm)->quick)
5290 break;
5291 } else if (r == prev_matching_rule) {
5292 found_prev_rule = 1;
5293 }
5294 r = TAILQ_NEXT(r, entries);
5295 } else {
5296 pf_step_into_anchor(&asd, &ruleset,
5297 PF_RULESET_DUMMYNET, &r, &a, &match);
5298 }
5299 }
5300 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
5301 PF_RULESET_DUMMYNET, &r, &a, &match))
5302 break;
5303 }
5304 r = *rm;
5305 a = am;
5306 ruleset = rsm;
5307
5308 if (!match)
5309 return (PF_PASS);
5310
5311 REASON_SET(&reason, PFRES_DUMMYNET);
5312
5313 if (r->log) {
5314 PFLOG_PACKET(kif, h, m, af, direction, reason, r,
5315 a, ruleset, pd);
5316 }
5317
5318 if (r->action == PF_NODUMMYNET) {
5319 int dirndx = (direction == PF_OUT);
5320
5321 r->packets[dirndx]++;
5322 r->bytes[dirndx] += pd->tot_len;
5323
5324 return (PF_PASS);
5325 }
5326 if (pf_tag_packet(m, pd->pf_mtag, tag, rtableid, pd)) {
5327 REASON_SET(&reason, PFRES_MEMORY);
5328
5329 return (PF_DROP);
5330 }
5331
5332 if (r->dnpipe && ip_dn_io_ptr != NULL) {
5333 int dirndx = (direction == PF_OUT);
5334
5335 r->packets[dirndx]++;
5336 r->bytes[dirndx] += pd->tot_len;
5337
5338 dnflow.fwa_cookie = r->dnpipe;
5339 dnflow.fwa_pf_rule = r;
316670eb
A
5340 dnflow.fwa_id.proto = pd->proto;
5341 dnflow.fwa_flags = r->dntype;
4b17d6b6
A
5342 switch (af) {
5343 case AF_INET:
5344 dnflow.fwa_id.addr_type = 4;
5345 dnflow.fwa_id.src_ip = ntohl(saddr->v4.s_addr);
5346 dnflow.fwa_id.dst_ip = ntohl(daddr->v4.s_addr);
5347 break;
5348 case AF_INET6:
5349 dnflow.fwa_id.addr_type = 6;
5350 dnflow.fwa_id.src_ip6 = saddr->v6;
5351 dnflow.fwa_id.dst_ip6 = saddr->v6;
5352 break;
5353 }
5354
316670eb
A
5355 if (fwa != NULL) {
5356 dnflow.fwa_oif = fwa->fwa_oif;
5357 dnflow.fwa_oflags = fwa->fwa_oflags;
5358 /*
5359 * Note that fwa_ro, fwa_dst and fwa_ipoa are
5360 * actually in a union so the following does work
5361 * for both IPv4 and IPv6
5362 */
5363 dnflow.fwa_ro = fwa->fwa_ro;
5364 dnflow.fwa_dst = fwa->fwa_dst;
5365 dnflow.fwa_ipoa = fwa->fwa_ipoa;
5366 dnflow.fwa_ro6_pmtu = fwa->fwa_ro6_pmtu;
5367 dnflow.fwa_origifp = fwa->fwa_origifp;
5368 dnflow.fwa_mtu = fwa->fwa_mtu;
5369 dnflow.fwa_alwaysfrag = fwa->fwa_alwaysfrag;
5370 dnflow.fwa_unfragpartlen = fwa->fwa_unfragpartlen;
5371 dnflow.fwa_exthdrs = fwa->fwa_exthdrs;
5372 }
5373
5374 if (af == AF_INET) {
5375 struct ip *iphdr = mtod(m, struct ip *);
5376 NTOHS(iphdr->ip_len);
5377 NTOHS(iphdr->ip_off);
5378 }
5379 /*
5380 * Don't need to unlock pf_lock as NET_THREAD_HELD_PF
5381 * allows for recursive behavior
5382 */
5383 ip_dn_io_ptr(m,
5384 dnflow.fwa_cookie,
5385 af == AF_INET ?
5386 direction == PF_IN ? DN_TO_IP_IN : DN_TO_IP_OUT :
5387 direction == PF_IN ? DN_TO_IP6_IN : DN_TO_IP6_OUT,
5388 &dnflow, DN_CLIENT_PF);
5389
5390 /*
5391 * The packet is siphoned out by dummynet so return a NULL
5392 * mbuf so the caller can still return success.
5393 */
5394 *m0 = NULL;
5395
5396 return (PF_PASS);
5397 }
5398
5399 return (PF_PASS);
5400}
5401#endif /* DUMMYNET */
5402
b0d623f7
A
5403static int
5404pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
5405 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
5406 struct pf_ruleset **rsm)
5407{
5408#pragma unused(h)
5409 struct pf_rule *r, *a = NULL;
5410 struct pf_ruleset *ruleset = NULL;
5411 sa_family_t af = pd->af;
5412 u_short reason;
5413 int tag = -1;
5414 int asd = 0;
5415 int match = 0;
5416
5417 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
5418 while (r != NULL) {
5419 r->evaluations++;
5420 if (pfi_kif_match(r->kif, kif) == r->ifnot)
5421 r = r->skip[PF_SKIP_IFP].ptr;
5422 else if (r->direction && r->direction != direction)
5423 r = r->skip[PF_SKIP_DIR].ptr;
5424 else if (r->af && r->af != af)
5425 r = r->skip[PF_SKIP_AF].ptr;
5426 else if (r->proto && r->proto != pd->proto)
5427 r = r->skip[PF_SKIP_PROTO].ptr;
5428 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
5429 r->src.neg, kif))
5430 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
5431 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
5432 r->dst.neg, NULL))
5433 r = r->skip[PF_SKIP_DST_ADDR].ptr;
316670eb
A
5434 else if ((r->rule_flag & PFRULE_TOS) && r->tos &&
5435 !(r->tos & pd->tos))
5436 r = TAILQ_NEXT(r, entries);
5437 else if ((r->rule_flag & PFRULE_DSCP) && r->tos &&
5438 !(r->tos & (pd->tos & DSCP_MASK)))
5439 r = TAILQ_NEXT(r, entries);
5440 else if ((r->rule_flag & PFRULE_SC) && r->tos &&
5441 ((r->tos & SCIDX_MASK) != pd->sc))
b0d623f7
A
5442 r = TAILQ_NEXT(r, entries);
5443 else if (r->os_fingerprint != PF_OSFP_ANY)
5444 r = TAILQ_NEXT(r, entries);
b0d623f7
A
5445 else if (pd->proto == IPPROTO_UDP &&
5446 (r->src.xport.range.op || r->dst.xport.range.op))
5447 r = TAILQ_NEXT(r, entries);
5448 else if (pd->proto == IPPROTO_TCP &&
5449 (r->src.xport.range.op || r->dst.xport.range.op ||
5450 r->flagset))
5451 r = TAILQ_NEXT(r, entries);
b0d623f7
A
5452 else if ((pd->proto == IPPROTO_ICMP ||
5453 pd->proto == IPPROTO_ICMPV6) &&
5454 (r->type || r->code))
5455 r = TAILQ_NEXT(r, entries);
39236c6e 5456 else if (r->prob && r->prob <= (RandomULong() % (UINT_MAX - 1) + 1))
b0d623f7
A
5457 r = TAILQ_NEXT(r, entries);
5458 else if (r->match_tag && !pf_match_tag(m, r, pd->pf_mtag, &tag))
5459 r = TAILQ_NEXT(r, entries);
5460 else {
5461 if (r->anchor == NULL) {
5462 match = 1;
5463 *rm = r;
5464 *am = a;
5465 *rsm = ruleset;
5466 if ((*rm)->quick)
5467 break;
5468 r = TAILQ_NEXT(r, entries);
5469 } else
5470 pf_step_into_anchor(&asd, &ruleset,
5471 PF_RULESET_FILTER, &r, &a, &match);
5472 }
5473 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
5474 PF_RULESET_FILTER, &r, &a, &match))
5475 break;
5476 }
5477 r = *rm;
5478 a = *am;
5479 ruleset = *rsm;
5480
5481 REASON_SET(&reason, PFRES_MATCH);
5482
5483 if (r->log)
5484 PFLOG_PACKET(kif, h, m, af, direction, reason, r, a, ruleset,
5485 pd);
5486
5487 if (r->action != PF_PASS)
5488 return (PF_DROP);
5489
316670eb 5490 if (pf_tag_packet(m, pd->pf_mtag, tag, -1, NULL)) {
b0d623f7
A
5491 REASON_SET(&reason, PFRES_MEMORY);
5492 return (PF_DROP);
5493 }
5494
5495 return (PF_PASS);
5496}
5497
b0d623f7
A
5498static void
5499pf_pptp_handler(struct pf_state *s, int direction, int off,
5500 struct pf_pdesc *pd, struct pfi_kif *kif)
5501{
5502#pragma unused(direction)
5503 struct tcphdr *th;
d1ecb069 5504 struct pf_pptp_state *pptps;
b0d623f7
A
5505 struct pf_pptp_ctrl_msg cm;
5506 size_t plen;
5507 struct pf_state *gs;
5508 u_int16_t ct;
5509 u_int16_t *pac_call_id;
5510 u_int16_t *pns_call_id;
5511 u_int16_t *spoof_call_id;
5512 u_int8_t *pac_state;
5513 u_int8_t *pns_state;
5514 enum { PF_PPTP_PASS, PF_PPTP_INSERT_GRE, PF_PPTP_REMOVE_GRE } op;
5515 struct mbuf *m;
5516 struct pf_state_key *sk;
5517 struct pf_state_key *gsk;
d1ecb069
A
5518 struct pf_app_state *gas;
5519
5520 sk = s->state_key;
5521 pptps = &sk->app_state->u.pptp;
5522 gs = pptps->grev1_state;
5523
5524 if (gs)
5525 gs->expire = pf_time_second();
b0d623f7
A
5526
5527 m = pd->mp;
5528 plen = min(sizeof (cm), m->m_pkthdr.len - off);
5529 if (plen < PF_PPTP_CTRL_MSG_MINSIZE)
5530 return;
5531
b0d623f7
A
5532 m_copydata(m, off, plen, &cm);
5533
b7266188 5534 if (ntohl(cm.hdr.magic) != PF_PPTP_MAGIC_NUMBER)
b0d623f7 5535 return;
b7266188 5536 if (ntohs(cm.hdr.type) != 1)
b0d623f7
A
5537 return;
5538
b0d623f7
A
5539 if (!gs) {
5540 gs = pool_get(&pf_state_pl, PR_WAITOK);
5541 if (!gs)
5542 return;
5543
5544 memcpy(gs, s, sizeof (*gs));
5545
5546 memset(&gs->entry_id, 0, sizeof (gs->entry_id));
5547 memset(&gs->entry_list, 0, sizeof (gs->entry_list));
5548
5549 TAILQ_INIT(&gs->unlink_hooks);
5550 gs->rt_kif = NULL;
5551 gs->creation = 0;
5552 gs->pfsync_time = 0;
5553 gs->packets[0] = gs->packets[1] = 0;
5554 gs->bytes[0] = gs->bytes[1] = 0;
5555 gs->timeout = PFTM_UNLINKED;
5556 gs->id = gs->creatorid = 0;
5557 gs->src.state = gs->dst.state = PFGRE1S_NO_TRAFFIC;
5558 gs->src.scrub = gs->dst.scrub = 0;
5559
d1ecb069
A
5560 gas = pool_get(&pf_app_state_pl, PR_NOWAIT);
5561 if (!gas) {
5562 pool_put(&pf_state_pl, gs);
5563 return;
5564 }
5565
316670eb 5566 gsk = pf_alloc_state_key(gs, NULL);
b0d623f7 5567 if (!gsk) {
d1ecb069 5568 pool_put(&pf_app_state_pl, gas);
b0d623f7
A
5569 pool_put(&pf_state_pl, gs);
5570 return;
5571 }
5572
5573 memcpy(&gsk->lan, &sk->lan, sizeof (gsk->lan));
5574 memcpy(&gsk->gwy, &sk->gwy, sizeof (gsk->gwy));
5575 memcpy(&gsk->ext, &sk->ext, sizeof (gsk->ext));
5576 gsk->af = sk->af;
5577 gsk->proto = IPPROTO_GRE;
5578 gsk->proto_variant = PF_GRE_PPTP_VARIANT;
d1ecb069 5579 gsk->app_state = gas;
b0d623f7
A
5580 gsk->lan.xport.call_id = 0;
5581 gsk->gwy.xport.call_id = 0;
5582 gsk->ext.xport.call_id = 0;
39236c6e 5583 gsk->flowsrc = FLOWSRC_PF;
316670eb 5584 gsk->flowhash = pf_calc_state_key_flowhash(gsk);
d1ecb069
A
5585 memset(gas, 0, sizeof (*gas));
5586 gas->u.grev1.pptp_state = s;
b7266188 5587 STATE_INC_COUNTERS(gs);
d1ecb069
A
5588 pptps->grev1_state = gs;
5589 (void) hook_establish(&gs->unlink_hooks, 0,
5590 (hook_fn_t) pf_grev1_unlink, gs);
b0d623f7
A
5591 } else {
5592 gsk = gs->state_key;
5593 }
5594
5595 switch (sk->direction) {
5596 case PF_IN:
5597 pns_call_id = &gsk->ext.xport.call_id;
5598 pns_state = &gs->dst.state;
5599 pac_call_id = &gsk->lan.xport.call_id;
5600 pac_state = &gs->src.state;
5601 break;
5602
5603 case PF_OUT:
5604 pns_call_id = &gsk->lan.xport.call_id;
5605 pns_state = &gs->src.state;
5606 pac_call_id = &gsk->ext.xport.call_id;
5607 pac_state = &gs->dst.state;
5608 break;
5609
5610 default:
5611 DPFPRINTF(PF_DEBUG_URGENT,
5612 ("pf_pptp_handler: bad directional!\n"));
5613 return;
5614 }
5615
5616 spoof_call_id = 0;
5617 op = PF_PPTP_PASS;
5618
5619 ct = ntohs(cm.ctrl.type);
5620
5621 switch (ct) {
5622 case PF_PPTP_CTRL_TYPE_CALL_OUT_REQ:
5623 *pns_call_id = cm.msg.call_out_req.call_id;
5624 *pns_state = PFGRE1S_INITIATING;
5625 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
5626 spoof_call_id = &cm.msg.call_out_req.call_id;
5627 break;
5628
5629 case PF_PPTP_CTRL_TYPE_CALL_OUT_RPY:
5630 *pac_call_id = cm.msg.call_out_rpy.call_id;
5631 if (s->nat_rule.ptr)
5632 spoof_call_id =
5633 (pac_call_id == &gsk->lan.xport.call_id) ?
5634 &cm.msg.call_out_rpy.call_id :
5635 &cm.msg.call_out_rpy.peer_call_id;
5636 if (gs->timeout == PFTM_UNLINKED) {
5637 *pac_state = PFGRE1S_INITIATING;
5638 op = PF_PPTP_INSERT_GRE;
5639 }
5640 break;
5641
5642 case PF_PPTP_CTRL_TYPE_CALL_IN_1ST:
5643 *pns_call_id = cm.msg.call_in_1st.call_id;
5644 *pns_state = PFGRE1S_INITIATING;
5645 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
5646 spoof_call_id = &cm.msg.call_in_1st.call_id;
5647 break;
5648
5649 case PF_PPTP_CTRL_TYPE_CALL_IN_2ND:
5650 *pac_call_id = cm.msg.call_in_2nd.call_id;
5651 *pac_state = PFGRE1S_INITIATING;
5652 if (s->nat_rule.ptr)
5653 spoof_call_id =
5654 (pac_call_id == &gsk->lan.xport.call_id) ?
5655 &cm.msg.call_in_2nd.call_id :
5656 &cm.msg.call_in_2nd.peer_call_id;
5657 break;
5658
5659 case PF_PPTP_CTRL_TYPE_CALL_IN_3RD:
5660 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
5661 spoof_call_id = &cm.msg.call_in_3rd.call_id;
5662 if (cm.msg.call_in_3rd.call_id != *pns_call_id) {
5663 break;
5664 }
5665 if (gs->timeout == PFTM_UNLINKED)
5666 op = PF_PPTP_INSERT_GRE;
5667 break;
5668
5669 case PF_PPTP_CTRL_TYPE_CALL_CLR:
5670 if (cm.msg.call_clr.call_id != *pns_call_id)
5671 op = PF_PPTP_REMOVE_GRE;
5672 break;
5673
5674 case PF_PPTP_CTRL_TYPE_CALL_DISC:
5675 if (cm.msg.call_clr.call_id != *pac_call_id)
5676 op = PF_PPTP_REMOVE_GRE;
5677 break;
5678
5679 case PF_PPTP_CTRL_TYPE_ERROR:
5680 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
5681 spoof_call_id = &cm.msg.error.peer_call_id;
5682 break;
5683
5684 case PF_PPTP_CTRL_TYPE_SET_LINKINFO:
5685 if (s->nat_rule.ptr && pac_call_id == &gsk->lan.xport.call_id)
5686 spoof_call_id = &cm.msg.set_linkinfo.peer_call_id;
5687 break;
5688
5689 default:
5690 op = PF_PPTP_PASS;
5691 break;
5692 }
5693
5694 if (!gsk->gwy.xport.call_id && gsk->lan.xport.call_id) {
5695 gsk->gwy.xport.call_id = gsk->lan.xport.call_id;
5696 if (spoof_call_id) {
5697 u_int16_t call_id = 0;
5698 int n = 0;
5699 struct pf_state_key_cmp key;
5700
5701 key.af = gsk->af;
5702 key.proto = IPPROTO_GRE;
5703 key.proto_variant = PF_GRE_PPTP_VARIANT;
5704 PF_ACPY(&key.gwy.addr, &gsk->gwy.addr, key.af);
5705 PF_ACPY(&key.ext.addr, &gsk->ext.addr, key.af);
5706 key.gwy.xport.call_id = gsk->gwy.xport.call_id;
5707 key.ext.xport.call_id = gsk->ext.xport.call_id;
5708 do {
5709 call_id = htonl(random());
5710 } while (!call_id);
5711
5712 while (pf_find_state_all(&key, PF_IN, 0)) {
5713 call_id = ntohs(call_id);
5714 --call_id;
5715 if (--call_id == 0) call_id = 0xffff;
5716 call_id = htons(call_id);
5717
5718 key.gwy.xport.call_id = call_id;
5719
5720 if (++n > 65535) {
5721 DPFPRINTF(PF_DEBUG_URGENT,
5722 ("pf_pptp_handler: failed to spoof "
5723 "call id\n"));
5724 key.gwy.xport.call_id = 0;
5725 break;
5726 }
5727 }
5728
5729 gsk->gwy.xport.call_id = call_id;
5730 }
5731 }
5732
5733 th = pd->hdr.tcp;
5734
5735 if (spoof_call_id && gsk->lan.xport.call_id != gsk->gwy.xport.call_id) {
5736 if (*spoof_call_id == gsk->gwy.xport.call_id) {
5737 *spoof_call_id = gsk->lan.xport.call_id;
5738 th->th_sum = pf_cksum_fixup(th->th_sum,
5739 gsk->gwy.xport.call_id, gsk->lan.xport.call_id, 0);
5740 } else {
5741 *spoof_call_id = gsk->gwy.xport.call_id;
5742 th->th_sum = pf_cksum_fixup(th->th_sum,
5743 gsk->lan.xport.call_id, gsk->gwy.xport.call_id, 0);
5744 }
5745
5746 m = pf_lazy_makewritable(pd, m, off + plen);
b7266188 5747 if (!m) {
d1ecb069 5748 pptps->grev1_state = NULL;
b7266188
A
5749 STATE_DEC_COUNTERS(gs);
5750 pool_put(&pf_state_pl, gs);
b0d623f7 5751 return;
b7266188 5752 }
b0d623f7
A
5753 m_copyback(m, off, plen, &cm);
5754 }
5755
5756 switch (op) {
5757 case PF_PPTP_REMOVE_GRE:
5758 gs->timeout = PFTM_PURGE;
5759 gs->src.state = gs->dst.state = PFGRE1S_NO_TRAFFIC;
5760 gsk->lan.xport.call_id = 0;
5761 gsk->gwy.xport.call_id = 0;
5762 gsk->ext.xport.call_id = 0;
5763 gs->id = gs->creatorid = 0;
5764 break;
5765
5766 case PF_PPTP_INSERT_GRE:
5767 gs->creation = pf_time_second();
5768 gs->expire = pf_time_second();
d1ecb069 5769 gs->timeout = PFTM_TCP_ESTABLISHED;
b7266188
A
5770 if (gs->src_node != NULL) {
5771 ++gs->src_node->states;
5772 VERIFY(gs->src_node->states != 0);
5773 }
5774 if (gs->nat_src_node != NULL) {
5775 ++gs->nat_src_node->states;
5776 VERIFY(gs->nat_src_node->states != 0);
5777 }
b0d623f7
A
5778 pf_set_rt_ifp(gs, &sk->lan.addr);
5779 if (pf_insert_state(BOUND_IFACE(s->rule.ptr, kif), gs)) {
5780
5781 /*
5782 * <jhw@apple.com>
5783 * FIX ME: insertion can fail when multiple PNS
5784 * behind the same NAT open calls to the same PAC
5785 * simultaneously because spoofed call ID numbers
5786 * are chosen before states are inserted. This is
5787 * hard to fix and happens infrequently enough that
5788 * users will normally try again and this ALG will
5789 * succeed. Failures are expected to be rare enough
5790 * that fixing this is a low priority.
5791 */
d1ecb069
A
5792 pptps->grev1_state = NULL;
5793 pd->lmw = -1; /* Force PF_DROP on PFRES_MEMORY */
b0d623f7
A
5794 pf_src_tree_remove_state(gs);
5795 STATE_DEC_COUNTERS(gs);
5796 pool_put(&pf_state_pl, gs);
5797 DPFPRINTF(PF_DEBUG_URGENT, ("pf_pptp_handler: error "
5798 "inserting GREv1 state.\n"));
5799 }
5800 break;
5801
5802 default:
5803 break;
5804 }
5805}
5806
5807static void
5808pf_pptp_unlink(struct pf_state *s)
5809{
5810 struct pf_app_state *as = s->state_key->app_state;
d1ecb069
A
5811 struct pf_state *grev1s = as->u.pptp.grev1_state;
5812
5813 if (grev1s) {
5814 struct pf_app_state *gas = grev1s->state_key->app_state;
b0d623f7 5815
d1ecb069
A
5816 if (grev1s->timeout < PFTM_MAX)
5817 grev1s->timeout = PFTM_PURGE;
5818 gas->u.grev1.pptp_state = NULL;
5819 as->u.pptp.grev1_state = NULL;
5820 }
5821}
5822
5823static void
5824pf_grev1_unlink(struct pf_state *s)
5825{
5826 struct pf_app_state *as = s->state_key->app_state;
5827 struct pf_state *pptps = as->u.grev1.pptp_state;
5828
5829 if (pptps) {
5830 struct pf_app_state *pas = pptps->state_key->app_state;
5831
5832 pas->u.pptp.grev1_state = NULL;
5833 as->u.grev1.pptp_state = NULL;
b0d623f7
A
5834 }
5835}
5836
5837static int
5838pf_ike_compare(struct pf_app_state *a, struct pf_app_state *b)
5839{
5840 int64_t d = a->u.ike.cookie - b->u.ike.cookie;
5841 return ((d > 0) ? 1 : ((d < 0) ? -1 : 0));
5842}
b0d623f7
A
5843
5844static int
5845pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
5846 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
5847 u_short *reason)
5848{
5849#pragma unused(h)
5850 struct pf_state_key_cmp key;
5851 struct tcphdr *th = pd->hdr.tcp;
5852 u_int16_t win = ntohs(th->th_win);
5853 u_int32_t ack, end, seq, orig_seq;
5854 u_int8_t sws, dws;
5855 int ackskew;
5856 int copyback = 0;
5857 struct pf_state_peer *src, *dst;
5858
b0d623f7 5859 key.app_state = 0;
b0d623f7
A
5860 key.af = pd->af;
5861 key.proto = IPPROTO_TCP;
5862 if (direction == PF_IN) {
5863 PF_ACPY(&key.ext.addr, pd->src, key.af);
5864 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
b0d623f7
A
5865 key.ext.xport.port = th->th_sport;
5866 key.gwy.xport.port = th->th_dport;
b0d623f7
A
5867 } else {
5868 PF_ACPY(&key.lan.addr, pd->src, key.af);
5869 PF_ACPY(&key.ext.addr, pd->dst, key.af);
b0d623f7
A
5870 key.lan.xport.port = th->th_sport;
5871 key.ext.xport.port = th->th_dport;
b0d623f7
A
5872 }
5873
5874 STATE_LOOKUP();
5875
5876 if (direction == (*state)->state_key->direction) {
5877 src = &(*state)->src;
5878 dst = &(*state)->dst;
5879 } else {
5880 src = &(*state)->dst;
5881 dst = &(*state)->src;
5882 }
5883
5884 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
5885 if (direction != (*state)->state_key->direction) {
5886 REASON_SET(reason, PFRES_SYNPROXY);
5887 return (PF_SYNPROXY_DROP);
5888 }
5889 if (th->th_flags & TH_SYN) {
5890 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
5891 REASON_SET(reason, PFRES_SYNPROXY);
5892 return (PF_DROP);
5893 }
5894 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
5895 pd->src, th->th_dport, th->th_sport,
5896 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
5897 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1,
5898 0, NULL, NULL);
5899 REASON_SET(reason, PFRES_SYNPROXY);
5900 return (PF_SYNPROXY_DROP);
5901 } else if (!(th->th_flags & TH_ACK) ||
5902 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
5903 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
5904 REASON_SET(reason, PFRES_SYNPROXY);
5905 return (PF_DROP);
5906 } else if ((*state)->src_node != NULL &&
5907 pf_src_connlimit(state)) {
5908 REASON_SET(reason, PFRES_SRCLIMIT);
5909 return (PF_DROP);
5910 } else
5911 (*state)->src.state = PF_TCPS_PROXY_DST;
5912 }
5913 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
5914 struct pf_state_host *psrc, *pdst;
5915
5916 if (direction == PF_OUT) {
5917 psrc = &(*state)->state_key->gwy;
5918 pdst = &(*state)->state_key->ext;
5919 } else {
5920 psrc = &(*state)->state_key->ext;
5921 pdst = &(*state)->state_key->lan;
5922 }
5923 if (direction == (*state)->state_key->direction) {
5924 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
5925 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
5926 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
5927 REASON_SET(reason, PFRES_SYNPROXY);
5928 return (PF_DROP);
5929 }
5930 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
5931 if ((*state)->dst.seqhi == 1)
5932 (*state)->dst.seqhi = htonl(random());
5933 pf_send_tcp((*state)->rule.ptr, pd->af, &psrc->addr,
b0d623f7 5934 &pdst->addr, psrc->xport.port, pdst->xport.port,
b0d623f7
A
5935 (*state)->dst.seqhi, 0, TH_SYN, 0,
5936 (*state)->src.mss, 0, 0, (*state)->tag, NULL, NULL);
5937 REASON_SET(reason, PFRES_SYNPROXY);
5938 return (PF_SYNPROXY_DROP);
5939 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
5940 (TH_SYN|TH_ACK)) ||
5941 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
5942 REASON_SET(reason, PFRES_SYNPROXY);
5943 return (PF_DROP);
5944 } else {
5945 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
5946 (*state)->dst.seqlo = ntohl(th->th_seq);
5947 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
5948 pd->src, th->th_dport, th->th_sport,
5949 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
5950 TH_ACK, (*state)->src.max_win, 0, 0, 0,
5951 (*state)->tag, NULL, NULL);
5952 pf_send_tcp((*state)->rule.ptr, pd->af, &psrc->addr,
b0d623f7 5953 &pdst->addr, psrc->xport.port, pdst->xport.port,
b0d623f7
A
5954 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
5955 TH_ACK, (*state)->dst.max_win, 0, 0, 1,
5956 0, NULL, NULL);
5957 (*state)->src.seqdiff = (*state)->dst.seqhi -
5958 (*state)->src.seqlo;
5959 (*state)->dst.seqdiff = (*state)->src.seqhi -
5960 (*state)->dst.seqlo;
5961 (*state)->src.seqhi = (*state)->src.seqlo +
5962 (*state)->dst.max_win;
5963 (*state)->dst.seqhi = (*state)->dst.seqlo +
5964 (*state)->src.max_win;
5965 (*state)->src.wscale = (*state)->dst.wscale = 0;
5966 (*state)->src.state = (*state)->dst.state =
5967 TCPS_ESTABLISHED;
5968 REASON_SET(reason, PFRES_SYNPROXY);
5969 return (PF_SYNPROXY_DROP);
5970 }
5971 }
5972
5973 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
5974 dst->state >= TCPS_FIN_WAIT_2 &&
5975 src->state >= TCPS_FIN_WAIT_2) {
5976 if (pf_status.debug >= PF_DEBUG_MISC) {
5977 printf("pf: state reuse ");
5978 pf_print_state(*state);
5979 pf_print_flags(th->th_flags);
5980 printf("\n");
5981 }
5982 /* XXX make sure it's the same direction ?? */
5983 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
5984 pf_unlink_state(*state);
5985 *state = NULL;
5986 return (PF_DROP);
5987 }
5988
39236c6e
A
5989 if ((th->th_flags & TH_SYN) == 0) {
5990 sws = (src->wscale & PF_WSCALE_FLAG) ?
5991 (src->wscale & PF_WSCALE_MASK) : TCP_MAX_WINSHIFT;
5992 dws = (dst->wscale & PF_WSCALE_FLAG) ?
5993 (dst->wscale & PF_WSCALE_MASK) : TCP_MAX_WINSHIFT;
5994 }
5995 else
b0d623f7
A
5996 sws = dws = 0;
5997
5998 /*
5999 * Sequence tracking algorithm from Guido van Rooij's paper:
6000 * http://www.madison-gurkha.com/publications/tcp_filtering/
6001 * tcp_filtering.ps
6002 */
6003
6004 orig_seq = seq = ntohl(th->th_seq);
6005 if (src->seqlo == 0) {
6006 /* First packet from this end. Set its state */
6007
6008 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
6009 src->scrub == NULL) {
6010 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
6011 REASON_SET(reason, PFRES_MEMORY);
6012 return (PF_DROP);
6013 }
6014 }
6015
6016 /* Deferred generation of sequence number modulator */
6017 if (dst->seqdiff && !src->seqdiff) {
6018 /* use random iss for the TCP server */
6019 while ((src->seqdiff = random() - seq) == 0)
6020 ;
6021 ack = ntohl(th->th_ack) - dst->seqdiff;
6022 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
6023 src->seqdiff), 0);
6024 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
6025 copyback = off + sizeof (*th);
6026 } else {
6027 ack = ntohl(th->th_ack);
6028 }
6029
6030 end = seq + pd->p_len;
6031 if (th->th_flags & TH_SYN) {
6032 end++;
6033 if (dst->wscale & PF_WSCALE_FLAG) {
6034 src->wscale = pf_get_wscale(m, off, th->th_off,
6035 pd->af);
6036 if (src->wscale & PF_WSCALE_FLAG) {
6037 /*
6038 * Remove scale factor from initial
6039 * window
6040 */
6041 sws = src->wscale & PF_WSCALE_MASK;
6042 win = ((u_int32_t)win + (1 << sws) - 1)
6043 >> sws;
6044 dws = dst->wscale & PF_WSCALE_MASK;
6045 } else {
b7266188 6046 /*
b7266188
A
6047 * Window scale negotiation has failed,
6048 * therefore we must restore the window
6049 * scale in the state record that we
6050 * optimistically removed in
6051 * pf_test_rule(). Care is required to
6052 * prevent arithmetic overflow from
6053 * zeroing the window when it's
316670eb 6054 * truncated down to 16-bits.
b7266188 6055 */
d1ecb069
A
6056 u_int32_t max_win = dst->max_win;
6057 max_win <<=
6058 dst->wscale & PF_WSCALE_MASK;
6059 dst->max_win = MIN(0xffff, max_win);
b0d623f7
A
6060 /* in case of a retrans SYN|ACK */
6061 dst->wscale = 0;
6062 }
6063 }
6064 }
6065 if (th->th_flags & TH_FIN)
6066 end++;
6067
6068 src->seqlo = seq;
6069 if (src->state < TCPS_SYN_SENT)
6070 src->state = TCPS_SYN_SENT;
6071
6072 /*
6073 * May need to slide the window (seqhi may have been set by
6074 * the crappy stack check or if we picked up the connection
6075 * after establishment)
6076 */
b7266188
A
6077 if (src->seqhi == 1 ||
6078 SEQ_GEQ(end + MAX(1, (u_int32_t)dst->max_win << dws),
6079 src->seqhi))
6080 src->seqhi = end + MAX(1, (u_int32_t)dst->max_win << dws);
b0d623f7
A
6081 if (win > src->max_win)
6082 src->max_win = win;
6083
6084 } else {
6085 ack = ntohl(th->th_ack) - dst->seqdiff;
6086 if (src->seqdiff) {
6087 /* Modulate sequence numbers */
6088 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
6089 src->seqdiff), 0);
6090 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
6091 copyback = off+ sizeof (*th);
6092 }
6093 end = seq + pd->p_len;
6094 if (th->th_flags & TH_SYN)
6095 end++;
6096 if (th->th_flags & TH_FIN)
6097 end++;
6098 }
6099
6100 if ((th->th_flags & TH_ACK) == 0) {
6101 /* Let it pass through the ack skew check */
6102 ack = dst->seqlo;
6103 } else if ((ack == 0 &&
6104 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
6105 /* broken tcp stacks do not set ack */
6106 (dst->state < TCPS_SYN_SENT)) {
6107 /*
6108 * Many stacks (ours included) will set the ACK number in an
6109 * FIN|ACK if the SYN times out -- no sequence to ACK.
6110 */
6111 ack = dst->seqlo;
6112 }
6113
6114 if (seq == end) {
6115 /* Ease sequencing restrictions on no data packets */
6116 seq = src->seqlo;
6117 end = seq;
6118 }
6119
6120 ackskew = dst->seqlo - ack;
6121
6122
6123 /*
6124 * Need to demodulate the sequence numbers in any TCP SACK options
6125 * (Selective ACK). We could optionally validate the SACK values
6126 * against the current ACK window, either forwards or backwards, but
6127 * I'm not confident that SACK has been implemented properly
6128 * everywhere. It wouldn't surprise me if several stacks accidently
6129 * SACK too far backwards of previously ACKed data. There really aren't
6130 * any security implications of bad SACKing unless the target stack
6131 * doesn't validate the option length correctly. Someone trying to
6132 * spoof into a TCP connection won't bother blindly sending SACK
6133 * options anyway.
6134 */
6135 if (dst->seqdiff && (th->th_off << 2) > (int)sizeof (struct tcphdr)) {
b0d623f7
A
6136 copyback = pf_modulate_sack(m, off, pd, th, dst);
6137 if (copyback == -1) {
6138 REASON_SET(reason, PFRES_MEMORY);
6139 return (PF_DROP);
6140 }
6141
6142 m = pd->mp;
b0d623f7
A
6143 }
6144
6145
6146#define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
6147 if (SEQ_GEQ(src->seqhi, end) &&
6148 /* Last octet inside other's window space */
b7266188 6149 SEQ_GEQ(seq, src->seqlo - ((u_int32_t)dst->max_win << dws)) &&
b0d623f7
A
6150 /* Retrans: not more than one window back */
6151 (ackskew >= -MAXACKWINDOW) &&
6152 /* Acking not more than one reassembled fragment backwards */
6153 (ackskew <= (MAXACKWINDOW << sws)) &&
6154 /* Acking not more than one window forward */
6155 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
6156 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
6157 (pd->flags & PFDESC_IP_REAS) == 0)) {
6158 /* Require an exact/+1 sequence match on resets when possible */
6159
6160 if (dst->scrub || src->scrub) {
6161 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
6162 *state, src, dst, &copyback))
6163 return (PF_DROP);
6164
b0d623f7 6165 m = pd->mp;
b0d623f7
A
6166 }
6167
6168 /* update max window */
6169 if (src->max_win < win)
6170 src->max_win = win;
6171 /* synchronize sequencing */
6172 if (SEQ_GT(end, src->seqlo))
6173 src->seqlo = end;
6174 /* slide the window of what the other end can send */
b7266188
A
6175 if (SEQ_GEQ(ack + ((u_int32_t)win << sws), dst->seqhi))
6176 dst->seqhi = ack + MAX(((u_int32_t)win << sws), 1);
b0d623f7
A
6177
6178 /* update states */
6179 if (th->th_flags & TH_SYN)
6180 if (src->state < TCPS_SYN_SENT)
6181 src->state = TCPS_SYN_SENT;
6182 if (th->th_flags & TH_FIN)
6183 if (src->state < TCPS_CLOSING)
6184 src->state = TCPS_CLOSING;
6185 if (th->th_flags & TH_ACK) {
6186 if (dst->state == TCPS_SYN_SENT) {
6187 dst->state = TCPS_ESTABLISHED;
6188 if (src->state == TCPS_ESTABLISHED &&
6189 (*state)->src_node != NULL &&
6190 pf_src_connlimit(state)) {
6191 REASON_SET(reason, PFRES_SRCLIMIT);
6192 return (PF_DROP);
6193 }
6194 } else if (dst->state == TCPS_CLOSING)
6195 dst->state = TCPS_FIN_WAIT_2;
6196 }
6197 if (th->th_flags & TH_RST)
6198 src->state = dst->state = TCPS_TIME_WAIT;
6199
6200 /* update expire time */
6201 (*state)->expire = pf_time_second();
6202 if (src->state >= TCPS_FIN_WAIT_2 &&
6203 dst->state >= TCPS_FIN_WAIT_2)
6204 (*state)->timeout = PFTM_TCP_CLOSED;
6205 else if (src->state >= TCPS_CLOSING &&
6206 dst->state >= TCPS_CLOSING)
6207 (*state)->timeout = PFTM_TCP_FIN_WAIT;
6208 else if (src->state < TCPS_ESTABLISHED ||
6209 dst->state < TCPS_ESTABLISHED)
6210 (*state)->timeout = PFTM_TCP_OPENING;
6211 else if (src->state >= TCPS_CLOSING ||
6212 dst->state >= TCPS_CLOSING)
6213 (*state)->timeout = PFTM_TCP_CLOSING;
6214 else
6215 (*state)->timeout = PFTM_TCP_ESTABLISHED;
6216
6217 /* Fall through to PASS packet */
6218
6219 } else if ((dst->state < TCPS_SYN_SENT ||
6220 dst->state >= TCPS_FIN_WAIT_2 || src->state >= TCPS_FIN_WAIT_2) &&
6221 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
6222 /* Within a window forward of the originating packet */
6223 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
6224 /* Within a window backward of the originating packet */
6225
6226 /*
6227 * This currently handles three situations:
6228 * 1) Stupid stacks will shotgun SYNs before their peer
6229 * replies.
6230 * 2) When PF catches an already established stream (the
6231 * firewall rebooted, the state table was flushed, routes
6232 * changed...)
6233 * 3) Packets get funky immediately after the connection
6234 * closes (this should catch Solaris spurious ACK|FINs
6235 * that web servers like to spew after a close)
6236 *
6237 * This must be a little more careful than the above code
6238 * since packet floods will also be caught here. We don't
6239 * update the TTL here to mitigate the damage of a packet
6240 * flood and so the same code can handle awkward establishment
6241 * and a loosened connection close.
6242 * In the establishment case, a correct peer response will
6243 * validate the connection, go through the normal state code
6244 * and keep updating the state TTL.
6245 */
6246
6247 if (pf_status.debug >= PF_DEBUG_MISC) {
6248 printf("pf: loose state match: ");
6249 pf_print_state(*state);
6250 pf_print_flags(th->th_flags);
6251 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
6252 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
6253 pd->p_len, ackskew, (*state)->packets[0],
6254 (*state)->packets[1],
6255 direction == PF_IN ? "in" : "out",
6256 direction == (*state)->state_key->direction ?
6257 "fwd" : "rev");
6258 }
6259
6260 if (dst->scrub || src->scrub) {
6261 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
6262 *state, src, dst, &copyback))
6263 return (PF_DROP);
b0d623f7 6264 m = pd->mp;
b0d623f7
A
6265 }
6266
6267 /* update max window */
6268 if (src->max_win < win)
6269 src->max_win = win;
6270 /* synchronize sequencing */
6271 if (SEQ_GT(end, src->seqlo))
6272 src->seqlo = end;
6273 /* slide the window of what the other end can send */
b7266188
A
6274 if (SEQ_GEQ(ack + ((u_int32_t)win << sws), dst->seqhi))
6275 dst->seqhi = ack + MAX(((u_int32_t)win << sws), 1);
b0d623f7
A
6276
6277 /*
6278 * Cannot set dst->seqhi here since this could be a shotgunned
6279 * SYN and not an already established connection.
6280 */
6281
6282 if (th->th_flags & TH_FIN)
6283 if (src->state < TCPS_CLOSING)
6284 src->state = TCPS_CLOSING;
6285 if (th->th_flags & TH_RST)
6286 src->state = dst->state = TCPS_TIME_WAIT;
6287
6288 /* Fall through to PASS packet */
6289
6290 } else {
6291 if ((*state)->dst.state == TCPS_SYN_SENT &&
6292 (*state)->src.state == TCPS_SYN_SENT) {
6293 /* Send RST for state mismatches during handshake */
6294 if (!(th->th_flags & TH_RST))
6295 pf_send_tcp((*state)->rule.ptr, pd->af,
6296 pd->dst, pd->src, th->th_dport,
6297 th->th_sport, ntohl(th->th_ack), 0,
6298 TH_RST, 0, 0,
6299 (*state)->rule.ptr->return_ttl, 1, 0,
6300 pd->eh, kif->pfik_ifp);
6301 src->seqlo = 0;
6302 src->seqhi = 1;
6303 src->max_win = 1;
6304 } else if (pf_status.debug >= PF_DEBUG_MISC) {
6305 printf("pf: BAD state: ");
6306 pf_print_state(*state);
6307 pf_print_flags(th->th_flags);
39236c6e
A
6308 printf("\n seq=%u (%u) ack=%u len=%u ackskew=%d "
6309 "sws=%u dws=%u pkts=%llu:%llu dir=%s,%s\n",
b0d623f7 6310 seq, orig_seq, ack, pd->p_len, ackskew,
39236c6e 6311 (unsigned int)sws, (unsigned int)dws,
b0d623f7
A
6312 (*state)->packets[0], (*state)->packets[1],
6313 direction == PF_IN ? "in" : "out",
6314 direction == (*state)->state_key->direction ?
6315 "fwd" : "rev");
6316 printf("pf: State failure on: %c %c %c %c | %c %c\n",
6317 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
b7266188
A
6318 SEQ_GEQ(seq,
6319 src->seqlo - ((u_int32_t)dst->max_win << dws)) ?
b0d623f7
A
6320 ' ': '2',
6321 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
6322 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
6323 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
6324 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
6325 }
6326 REASON_SET(reason, PFRES_BADSTATE);
6327 return (PF_DROP);
6328 }
6329
6330 /* Any packets which have gotten here are to be passed */
6331
b0d623f7
A
6332 if ((*state)->state_key->app_state &&
6333 (*state)->state_key->app_state->handler) {
6334 (*state)->state_key->app_state->handler(*state, direction,
6335 off + (th->th_off << 2), pd, kif);
6336 if (pd->lmw < 0) {
6337 REASON_SET(reason, PFRES_MEMORY);
6338 return (PF_DROP);
6339 }
6340 m = pd->mp;
6341 }
6342
6343 /* translate source/destination address, if necessary */
6344 if (STATE_TRANSLATE((*state)->state_key)) {
6345 if (direction == PF_OUT)
6346 pf_change_ap(direction, pd->mp, pd->src, &th->th_sport,
6347 pd->ip_sum, &th->th_sum,
6348 &(*state)->state_key->gwy.addr,
6349 (*state)->state_key->gwy.xport.port, 0, pd->af);
6350 else
6351 pf_change_ap(direction, pd->mp, pd->dst, &th->th_dport,
6352 pd->ip_sum, &th->th_sum,
6353 &(*state)->state_key->lan.addr,
6354 (*state)->state_key->lan.xport.port, 0, pd->af);
6355 copyback = off + sizeof (*th);
6356 }
6357
6358 if (copyback) {
6359 m = pf_lazy_makewritable(pd, m, copyback);
6360 if (!m) {
6361 REASON_SET(reason, PFRES_MEMORY);
6362 return (PF_DROP);
6363 }
6364
6365 /* Copyback sequence modulation or stateful scrub changes */
6366 m_copyback(m, off, sizeof (*th), th);
6367 }
b0d623f7
A
6368
6369 return (PF_PASS);
6370}
6371
6372static int
6373pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
b7266188 6374 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
b0d623f7
A
6375{
6376#pragma unused(h)
6377 struct pf_state_peer *src, *dst;
6378 struct pf_state_key_cmp key;
6379 struct udphdr *uh = pd->hdr.udp;
b0d623f7
A
6380 struct pf_app_state as;
6381 int dx, action, extfilter;
6382 key.app_state = 0;
6383 key.proto_variant = PF_EXTFILTER_APD;
b0d623f7
A
6384
6385 key.af = pd->af;
6386 key.proto = IPPROTO_UDP;
6387 if (direction == PF_IN) {
6388 PF_ACPY(&key.ext.addr, pd->src, key.af);
6389 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
b0d623f7
A
6390 key.ext.xport.port = uh->uh_sport;
6391 key.gwy.xport.port = uh->uh_dport;
6392 dx = PF_IN;
b0d623f7
A
6393 } else {
6394 PF_ACPY(&key.lan.addr, pd->src, key.af);
6395 PF_ACPY(&key.ext.addr, pd->dst, key.af);
b0d623f7
A
6396 key.lan.xport.port = uh->uh_sport;
6397 key.ext.xport.port = uh->uh_dport;
6398 dx = PF_OUT;
b0d623f7
A
6399 }
6400
b7266188
A
6401 if (ntohs(uh->uh_sport) == PF_IKE_PORT &&
6402 ntohs(uh->uh_dport) == PF_IKE_PORT) {
b0d623f7
A
6403 struct pf_ike_hdr ike;
6404 size_t plen = m->m_pkthdr.len - off - sizeof (*uh);
6405 if (plen < PF_IKE_PACKET_MINSIZE) {
6406 DPFPRINTF(PF_DEBUG_MISC,
6407 ("pf: IKE message too small.\n"));
6408 return (PF_DROP);
6409 }
6410
6411 if (plen > sizeof (ike))
6412 plen = sizeof (ike);
6413 m_copydata(m, off + sizeof (*uh), plen, &ike);
6414
6415 if (ike.initiator_cookie) {
6416 key.app_state = &as;
6417 as.compare_lan_ext = pf_ike_compare;
6418 as.compare_ext_gwy = pf_ike_compare;
6419 as.u.ike.cookie = ike.initiator_cookie;
6420 } else {
6421 /*
6422 * <http://tools.ietf.org/html/\
6423 * draft-ietf-ipsec-nat-t-ike-01>
6424 * Support non-standard NAT-T implementations that
6425 * push the ESP packet over the top of the IKE packet.
6426 * Do not drop packet.
6427 */
6428 DPFPRINTF(PF_DEBUG_MISC,
6429 ("pf: IKE initiator cookie = 0.\n"));
6430 }
6431 }
6432
6433 *state = pf_find_state(kif, &key, dx);
6434
6435 if (!key.app_state && *state == 0) {
6436 key.proto_variant = PF_EXTFILTER_AD;
6437 *state = pf_find_state(kif, &key, dx);
6438 }
6439
6440 if (!key.app_state && *state == 0) {
6441 key.proto_variant = PF_EXTFILTER_EI;
6442 *state = pf_find_state(kif, &key, dx);
6443 }
6444
39236c6e
A
6445 /* similar to STATE_LOOKUP() */
6446 if (*state != NULL && pd != NULL && !(pd->pktflags & PKTF_FLOW_ID)) {
6447 pd->flowsrc = (*state)->state_key->flowsrc;
316670eb 6448 pd->flowhash = (*state)->state_key->flowhash;
39236c6e
A
6449 if (pd->flowhash != 0) {
6450 pd->pktflags |= PKTF_FLOW_ID;
6451 pd->pktflags &= ~PKTF_FLOW_ADV;
6452 }
6453 }
316670eb 6454
b0d623f7
A
6455 if (pf_state_lookup_aux(state, kif, direction, &action))
6456 return (action);
b0d623f7
A
6457
6458 if (direction == (*state)->state_key->direction) {
6459 src = &(*state)->src;
6460 dst = &(*state)->dst;
6461 } else {
6462 src = &(*state)->dst;
6463 dst = &(*state)->src;
6464 }
6465
6466 /* update states */
6467 if (src->state < PFUDPS_SINGLE)
6468 src->state = PFUDPS_SINGLE;
6469 if (dst->state == PFUDPS_SINGLE)
6470 dst->state = PFUDPS_MULTIPLE;
6471
6472 /* update expire time */
6473 (*state)->expire = pf_time_second();
6474 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
6475 (*state)->timeout = PFTM_UDP_MULTIPLE;
6476 else
6477 (*state)->timeout = PFTM_UDP_SINGLE;
6478
b0d623f7
A
6479 extfilter = (*state)->state_key->proto_variant;
6480 if (extfilter > PF_EXTFILTER_APD) {
6481 (*state)->state_key->ext.xport.port = key.ext.xport.port;
6482 if (extfilter > PF_EXTFILTER_AD)
6483 PF_ACPY(&(*state)->state_key->ext.addr,
6484 &key.ext.addr, key.af);
6485 }
6486
6487 if ((*state)->state_key->app_state &&
6488 (*state)->state_key->app_state->handler) {
6489 (*state)->state_key->app_state->handler(*state, direction,
6490 off + uh->uh_ulen, pd, kif);
b7266188
A
6491 if (pd->lmw < 0) {
6492 REASON_SET(reason, PFRES_MEMORY);
6493 return (PF_DROP);
6494 }
b0d623f7
A
6495 m = pd->mp;
6496 }
b0d623f7
A
6497
6498 /* translate source/destination address, if necessary */
b0d623f7
A
6499 if (STATE_TRANSLATE((*state)->state_key)) {
6500 m = pf_lazy_makewritable(pd, m, off + sizeof (*uh));
d1ecb069
A
6501 if (!m) {
6502 REASON_SET(reason, PFRES_MEMORY);
b0d623f7 6503 return (PF_DROP);
d1ecb069 6504 }
b0d623f7
A
6505
6506 if (direction == PF_OUT)
6507 pf_change_ap(direction, pd->mp, pd->src, &uh->uh_sport,
6508 pd->ip_sum, &uh->uh_sum,
6509 &(*state)->state_key->gwy.addr,
6510 (*state)->state_key->gwy.xport.port, 1, pd->af);
6511 else
6512 pf_change_ap(direction, pd->mp, pd->dst, &uh->uh_dport,
6513 pd->ip_sum, &uh->uh_sum,
6514 &(*state)->state_key->lan.addr,
6515 (*state)->state_key->lan.xport.port, 1, pd->af);
6516 m_copyback(m, off, sizeof (*uh), uh);
6517 }
b0d623f7
A
6518
6519 return (PF_PASS);
6520}
6521
6522static int
6523pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
6524 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
6525{
6526#pragma unused(h)
6527 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
6528 u_int16_t icmpid = 0, *icmpsum;
6529 u_int8_t icmptype;
6530 int state_icmp = 0;
6531 struct pf_state_key_cmp key;
6532
b0d623f7
A
6533 struct pf_app_state as;
6534 key.app_state = 0;
b0d623f7
A
6535
6536 switch (pd->proto) {
6537#if INET
6538 case IPPROTO_ICMP:
6539 icmptype = pd->hdr.icmp->icmp_type;
6540 icmpid = pd->hdr.icmp->icmp_id;
6541 icmpsum = &pd->hdr.icmp->icmp_cksum;
6542
6543 if (icmptype == ICMP_UNREACH ||
6544 icmptype == ICMP_SOURCEQUENCH ||
6545 icmptype == ICMP_REDIRECT ||
6546 icmptype == ICMP_TIMXCEED ||
6547 icmptype == ICMP_PARAMPROB)
6548 state_icmp++;
6549 break;
6550#endif /* INET */
6551#if INET6
6552 case IPPROTO_ICMPV6:
6553 icmptype = pd->hdr.icmp6->icmp6_type;
6554 icmpid = pd->hdr.icmp6->icmp6_id;
6555 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
6556
6557 if (icmptype == ICMP6_DST_UNREACH ||
6558 icmptype == ICMP6_PACKET_TOO_BIG ||
6559 icmptype == ICMP6_TIME_EXCEEDED ||
6560 icmptype == ICMP6_PARAM_PROB)
6561 state_icmp++;
6562 break;
6563#endif /* INET6 */
6564 }
6565
6566 if (!state_icmp) {
6567
6568 /*
6569 * ICMP query/reply message not related to a TCP/UDP packet.
6570 * Search for an ICMP state.
6571 */
6572 key.af = pd->af;
6573 key.proto = pd->proto;
6574 if (direction == PF_IN) {
6575 PF_ACPY(&key.ext.addr, pd->src, key.af);
6576 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
b0d623f7
A
6577 key.ext.xport.port = 0;
6578 key.gwy.xport.port = icmpid;
b0d623f7
A
6579 } else {
6580 PF_ACPY(&key.lan.addr, pd->src, key.af);
6581 PF_ACPY(&key.ext.addr, pd->dst, key.af);
b0d623f7
A
6582 key.lan.xport.port = icmpid;
6583 key.ext.xport.port = 0;
b0d623f7
A
6584 }
6585
6586 STATE_LOOKUP();
6587
6588 (*state)->expire = pf_time_second();
6589 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
6590
6591 /* translate source/destination address, if necessary */
6592 if (STATE_TRANSLATE((*state)->state_key)) {
6593 if (direction == PF_OUT) {
6594 switch (pd->af) {
6595#if INET
6596 case AF_INET:
6597 pf_change_a(&saddr->v4.s_addr,
6598 pd->ip_sum,
6599 (*state)->state_key->gwy.addr.v4.s_addr, 0);
b0d623f7
A
6600 pd->hdr.icmp->icmp_cksum =
6601 pf_cksum_fixup(
6602 pd->hdr.icmp->icmp_cksum, icmpid,
6603 (*state)->state_key->gwy.xport.port, 0);
6604 pd->hdr.icmp->icmp_id =
6605 (*state)->state_key->gwy.xport.port;
6606 m = pf_lazy_makewritable(pd, m,
6607 off + ICMP_MINLEN);
6608 if (!m)
6609 return (PF_DROP);
b0d623f7
A
6610 m_copyback(m, off, ICMP_MINLEN,
6611 pd->hdr.icmp);
6612 break;
6613#endif /* INET */
6614#if INET6
6615 case AF_INET6:
6616 pf_change_a6(saddr,
6617 &pd->hdr.icmp6->icmp6_cksum,
6618 &(*state)->state_key->gwy.addr, 0);
b0d623f7
A
6619 m = pf_lazy_makewritable(pd, m,
6620 off + sizeof (struct icmp6_hdr));
6621 if (!m)
6622 return (PF_DROP);
b0d623f7
A
6623 m_copyback(m, off,
6624 sizeof (struct icmp6_hdr),
6625 pd->hdr.icmp6);
6626 break;
6627#endif /* INET6 */
6628 }
6629 } else {
6630 switch (pd->af) {
6631#if INET
6632 case AF_INET:
6633 pf_change_a(&daddr->v4.s_addr,
6634 pd->ip_sum,
6635 (*state)->state_key->lan.addr.v4.s_addr, 0);
b0d623f7
A
6636 pd->hdr.icmp->icmp_cksum =
6637 pf_cksum_fixup(
6638 pd->hdr.icmp->icmp_cksum, icmpid,
6639 (*state)->state_key->lan.xport.port, 0);
6640 pd->hdr.icmp->icmp_id =
6641 (*state)->state_key->lan.xport.port;
6642 m = pf_lazy_makewritable(pd, m,
6643 off + ICMP_MINLEN);
6644 if (!m)
6645 return (PF_DROP);
b0d623f7
A
6646 m_copyback(m, off, ICMP_MINLEN,
6647 pd->hdr.icmp);
6648 break;
6649#endif /* INET */
6650#if INET6
6651 case AF_INET6:
6652 pf_change_a6(daddr,
6653 &pd->hdr.icmp6->icmp6_cksum,
6654 &(*state)->state_key->lan.addr, 0);
b0d623f7
A
6655 m = pf_lazy_makewritable(pd, m,
6656 off + sizeof (struct icmp6_hdr));
6657 if (!m)
6658 return (PF_DROP);
b0d623f7
A
6659 m_copyback(m, off,
6660 sizeof (struct icmp6_hdr),
6661 pd->hdr.icmp6);
6662 break;
6663#endif /* INET6 */
6664 }
6665 }
6666 }
6667
6668 return (PF_PASS);
6669
6670 } else {
6671 /*
6672 * ICMP error message in response to a TCP/UDP packet.
6673 * Extract the inner TCP/UDP header and search for that state.
6674 */
6675
6676 struct pf_pdesc pd2;
6677#if INET
6678 struct ip h2;
6679#endif /* INET */
6680#if INET6
6681 struct ip6_hdr h2_6;
6682 int terminal = 0;
6683#endif /* INET6 */
6684 int ipoff2 = 0;
6685 int off2 = 0;
6686
6687 memset(&pd2, 0, sizeof (pd2));
6688
6689 pd2.af = pd->af;
6690 switch (pd->af) {
6691#if INET
6692 case AF_INET:
6693 /* offset of h2 in mbuf chain */
6694 ipoff2 = off + ICMP_MINLEN;
6695
6696 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof (h2),
6697 NULL, reason, pd2.af)) {
6698 DPFPRINTF(PF_DEBUG_MISC,
6699 ("pf: ICMP error message too short "
6700 "(ip)\n"));
6701 return (PF_DROP);
6702 }
6703 /*
6704 * ICMP error messages don't refer to non-first
6705 * fragments
6706 */
6707 if (h2.ip_off & htons(IP_OFFMASK)) {
6708 REASON_SET(reason, PFRES_FRAG);
6709 return (PF_DROP);
6710 }
6711
6712 /* offset of protocol header that follows h2 */
6713 off2 = ipoff2 + (h2.ip_hl << 2);
6714
6715 pd2.proto = h2.ip_p;
6716 pd2.src = (struct pf_addr *)&h2.ip_src;
6717 pd2.dst = (struct pf_addr *)&h2.ip_dst;
6718 pd2.ip_sum = &h2.ip_sum;
6719 break;
6720#endif /* INET */
6721#if INET6
6722 case AF_INET6:
6723 ipoff2 = off + sizeof (struct icmp6_hdr);
6724
6725 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof (h2_6),
6726 NULL, reason, pd2.af)) {
6727 DPFPRINTF(PF_DEBUG_MISC,
6728 ("pf: ICMP error message too short "
6729 "(ip6)\n"));
6730 return (PF_DROP);
6731 }
6732 pd2.proto = h2_6.ip6_nxt;
6733 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
6734 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
6735 pd2.ip_sum = NULL;
6736 off2 = ipoff2 + sizeof (h2_6);
6737 do {
6738 switch (pd2.proto) {
6739 case IPPROTO_FRAGMENT:
6740 /*
6741 * ICMPv6 error messages for
6742 * non-first fragments
6743 */
6744 REASON_SET(reason, PFRES_FRAG);
6745 return (PF_DROP);
6746 case IPPROTO_AH:
6747 case IPPROTO_HOPOPTS:
6748 case IPPROTO_ROUTING:
6749 case IPPROTO_DSTOPTS: {
6750 /* get next header and header length */
6751 struct ip6_ext opt6;
6752
6753 if (!pf_pull_hdr(m, off2, &opt6,
6754 sizeof (opt6), NULL, reason,
6755 pd2.af)) {
6756 DPFPRINTF(PF_DEBUG_MISC,
6757 ("pf: ICMPv6 short opt\n"));
6758 return (PF_DROP);
6759 }
6760 if (pd2.proto == IPPROTO_AH)
6761 off2 += (opt6.ip6e_len + 2) * 4;
6762 else
6763 off2 += (opt6.ip6e_len + 1) * 8;
6764 pd2.proto = opt6.ip6e_nxt;
6765 /* goto the next header */
6766 break;
6767 }
6768 default:
6769 terminal++;
6770 break;
6771 }
6772 } while (!terminal);
6773 break;
6774#endif /* INET6 */
6775 }
6776
6777 switch (pd2.proto) {
6778 case IPPROTO_TCP: {
6779 struct tcphdr th;
6780 u_int32_t seq;
6781 struct pf_state_peer *src, *dst;
6782 u_int8_t dws;
6783 int copyback = 0;
6784
6785 /*
6786 * Only the first 8 bytes of the TCP header can be
6787 * expected. Don't access any TCP header fields after
6788 * th_seq, an ackskew test is not possible.
6789 */
6790 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
6791 pd2.af)) {
6792 DPFPRINTF(PF_DEBUG_MISC,
6793 ("pf: ICMP error message too short "
6794 "(tcp)\n"));
6795 return (PF_DROP);
6796 }
6797
6798 key.af = pd2.af;
6799 key.proto = IPPROTO_TCP;
6800 if (direction == PF_IN) {
6801 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
6802 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
b0d623f7
A
6803 key.ext.xport.port = th.th_dport;
6804 key.gwy.xport.port = th.th_sport;
b0d623f7
A
6805 } else {
6806 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
6807 PF_ACPY(&key.ext.addr, pd2.src, key.af);
b0d623f7
A
6808 key.lan.xport.port = th.th_dport;
6809 key.ext.xport.port = th.th_sport;
b0d623f7
A
6810 }
6811
6812 STATE_LOOKUP();
6813
6814 if (direction == (*state)->state_key->direction) {
6815 src = &(*state)->dst;
6816 dst = &(*state)->src;
6817 } else {
6818 src = &(*state)->src;
6819 dst = &(*state)->dst;
6820 }
6821
39236c6e 6822 if (src->wscale && (dst->wscale & PF_WSCALE_FLAG))
b0d623f7
A
6823 dws = dst->wscale & PF_WSCALE_MASK;
6824 else
39236c6e 6825 dws = TCP_MAX_WINSHIFT;
b0d623f7
A
6826
6827 /* Demodulate sequence number */
6828 seq = ntohl(th.th_seq) - src->seqdiff;
6829 if (src->seqdiff) {
6830 pf_change_a(&th.th_seq, icmpsum,
6831 htonl(seq), 0);
6832 copyback = 1;
6833 }
6834
6835 if (!SEQ_GEQ(src->seqhi, seq) ||
b7266188
A
6836 !SEQ_GEQ(seq,
6837 src->seqlo - ((u_int32_t)dst->max_win << dws))) {
b0d623f7
A
6838 if (pf_status.debug >= PF_DEBUG_MISC) {
6839 printf("pf: BAD ICMP %d:%d ",
6840 icmptype, pd->hdr.icmp->icmp_code);
6841 pf_print_host(pd->src, 0, pd->af);
6842 printf(" -> ");
6843 pf_print_host(pd->dst, 0, pd->af);
6844 printf(" state: ");
6845 pf_print_state(*state);
6846 printf(" seq=%u\n", seq);
6847 }
6848 REASON_SET(reason, PFRES_BADSTATE);
6849 return (PF_DROP);
6850 }
6851
6852 if (STATE_TRANSLATE((*state)->state_key)) {
6853 if (direction == PF_IN) {
6854 pf_change_icmp(pd2.src, &th.th_sport,
6855 daddr, &(*state)->state_key->lan.addr,
b0d623f7 6856 (*state)->state_key->lan.xport.port, NULL,
b0d623f7
A
6857 pd2.ip_sum, icmpsum,
6858 pd->ip_sum, 0, pd2.af);
6859 } else {
6860 pf_change_icmp(pd2.dst, &th.th_dport,
6861 saddr, &(*state)->state_key->gwy.addr,
b0d623f7 6862 (*state)->state_key->gwy.xport.port, NULL,
b0d623f7
A
6863 pd2.ip_sum, icmpsum,
6864 pd->ip_sum, 0, pd2.af);
6865 }
6866 copyback = 1;
6867 }
6868
6869 if (copyback) {
b0d623f7
A
6870 m = pf_lazy_makewritable(pd, m, off2 + 8);
6871 if (!m)
6872 return (PF_DROP);
b0d623f7
A
6873 switch (pd2.af) {
6874#if INET
6875 case AF_INET:
6876 m_copyback(m, off, ICMP_MINLEN,
6877 pd->hdr.icmp);
6878 m_copyback(m, ipoff2, sizeof (h2),
6879 &h2);
6880 break;
6881#endif /* INET */
6882#if INET6
6883 case AF_INET6:
6884 m_copyback(m, off,
6885 sizeof (struct icmp6_hdr),
6886 pd->hdr.icmp6);
6887 m_copyback(m, ipoff2, sizeof (h2_6),
6888 &h2_6);
6889 break;
6890#endif /* INET6 */
6891 }
6892 m_copyback(m, off2, 8, &th);
6893 }
6894
6895 return (PF_PASS);
6896 break;
6897 }
6898 case IPPROTO_UDP: {
6899 struct udphdr uh;
b0d623f7 6900 int dx, action;
b0d623f7
A
6901 if (!pf_pull_hdr(m, off2, &uh, sizeof (uh),
6902 NULL, reason, pd2.af)) {
6903 DPFPRINTF(PF_DEBUG_MISC,
6904 ("pf: ICMP error message too short "
6905 "(udp)\n"));
6906 return (PF_DROP);
6907 }
6908
6909 key.af = pd2.af;
6910 key.proto = IPPROTO_UDP;
6911 if (direction == PF_IN) {
6912 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
6913 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
b0d623f7
A
6914 key.ext.xport.port = uh.uh_dport;
6915 key.gwy.xport.port = uh.uh_sport;
6916 dx = PF_IN;
b0d623f7
A
6917 } else {
6918 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
6919 PF_ACPY(&key.ext.addr, pd2.src, key.af);
b0d623f7
A
6920 key.lan.xport.port = uh.uh_dport;
6921 key.ext.xport.port = uh.uh_sport;
6922 dx = PF_OUT;
b0d623f7
A
6923 }
6924
b0d623f7
A
6925 key.proto_variant = PF_EXTFILTER_APD;
6926
b7266188
A
6927 if (ntohs(uh.uh_sport) == PF_IKE_PORT &&
6928 ntohs(uh.uh_dport) == PF_IKE_PORT) {
b0d623f7
A
6929 struct pf_ike_hdr ike;
6930 size_t plen =
6931 m->m_pkthdr.len - off2 - sizeof (uh);
6932 if (direction == PF_IN &&
6933 plen < 8 /* PF_IKE_PACKET_MINSIZE */) {
6934 DPFPRINTF(PF_DEBUG_MISC, ("pf: "
6935 "ICMP error, embedded IKE message "
6936 "too small.\n"));
6937 return (PF_DROP);
6938 }
6939
6940 if (plen > sizeof (ike))
6941 plen = sizeof (ike);
6942 m_copydata(m, off + sizeof (uh), plen, &ike);
6943
6944 key.app_state = &as;
6945 as.compare_lan_ext = pf_ike_compare;
6946 as.compare_ext_gwy = pf_ike_compare;
6947 as.u.ike.cookie = ike.initiator_cookie;
6948 }
6949
6950 *state = pf_find_state(kif, &key, dx);
6951
6952 if (key.app_state && *state == 0) {
6953 key.app_state = 0;
6954 *state = pf_find_state(kif, &key, dx);
6955 }
6956
6957 if (*state == 0) {
6958 key.proto_variant = PF_EXTFILTER_AD;
6959 *state = pf_find_state(kif, &key, dx);
6960 }
6961
6962 if (*state == 0) {
6963 key.proto_variant = PF_EXTFILTER_EI;
6964 *state = pf_find_state(kif, &key, dx);
6965 }
6966
39236c6e 6967 /* similar to STATE_LOOKUP() */
316670eb 6968 if (*state != NULL && pd != NULL &&
39236c6e
A
6969 !(pd->pktflags & PKTF_FLOW_ID)) {
6970 pd->flowsrc = (*state)->state_key->flowsrc;
316670eb 6971 pd->flowhash = (*state)->state_key->flowhash;
39236c6e
A
6972 if (pd->flowhash != 0) {
6973 pd->pktflags |= PKTF_FLOW_ID;
6974 pd->pktflags &= ~PKTF_FLOW_ADV;
6975 }
6976 }
316670eb 6977
b0d623f7
A
6978 if (pf_state_lookup_aux(state, kif, direction, &action))
6979 return (action);
b0d623f7
A
6980
6981 if (STATE_TRANSLATE((*state)->state_key)) {
6982 if (direction == PF_IN) {
6983 pf_change_icmp(pd2.src, &uh.uh_sport,
6984 daddr, &(*state)->state_key->lan.addr,
b0d623f7 6985 (*state)->state_key->lan.xport.port, &uh.uh_sum,
b0d623f7
A
6986 pd2.ip_sum, icmpsum,
6987 pd->ip_sum, 1, pd2.af);
6988 } else {
6989 pf_change_icmp(pd2.dst, &uh.uh_dport,
6990 saddr, &(*state)->state_key->gwy.addr,
b0d623f7 6991 (*state)->state_key->gwy.xport.port, &uh.uh_sum,
b0d623f7
A
6992 pd2.ip_sum, icmpsum,
6993 pd->ip_sum, 1, pd2.af);
6994 }
b0d623f7
A
6995 m = pf_lazy_makewritable(pd, m,
6996 off2 + sizeof (uh));
6997 if (!m)
6998 return (PF_DROP);
b0d623f7
A
6999 switch (pd2.af) {
7000#if INET
7001 case AF_INET:
7002 m_copyback(m, off, ICMP_MINLEN,
7003 pd->hdr.icmp);
7004 m_copyback(m, ipoff2, sizeof (h2), &h2);
7005 break;
7006#endif /* INET */
7007#if INET6
7008 case AF_INET6:
7009 m_copyback(m, off,
7010 sizeof (struct icmp6_hdr),
7011 pd->hdr.icmp6);
7012 m_copyback(m, ipoff2, sizeof (h2_6),
7013 &h2_6);
7014 break;
7015#endif /* INET6 */
7016 }
7017 m_copyback(m, off2, sizeof (uh), &uh);
7018 }
7019
7020 return (PF_PASS);
7021 break;
7022 }
7023#if INET
7024 case IPPROTO_ICMP: {
7025 struct icmp iih;
7026
7027 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
7028 NULL, reason, pd2.af)) {
7029 DPFPRINTF(PF_DEBUG_MISC,
7030 ("pf: ICMP error message too short i"
7031 "(icmp)\n"));
7032 return (PF_DROP);
7033 }
7034
7035 key.af = pd2.af;
7036 key.proto = IPPROTO_ICMP;
7037 if (direction == PF_IN) {
7038 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
7039 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
b0d623f7
A
7040 key.ext.xport.port = 0;
7041 key.gwy.xport.port = iih.icmp_id;
b0d623f7
A
7042 } else {
7043 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
7044 PF_ACPY(&key.ext.addr, pd2.src, key.af);
b0d623f7
A
7045 key.lan.xport.port = iih.icmp_id;
7046 key.ext.xport.port = 0;
b0d623f7
A
7047 }
7048
7049 STATE_LOOKUP();
7050
7051 if (STATE_TRANSLATE((*state)->state_key)) {
7052 if (direction == PF_IN) {
7053 pf_change_icmp(pd2.src, &iih.icmp_id,
7054 daddr, &(*state)->state_key->lan.addr,
b0d623f7 7055 (*state)->state_key->lan.xport.port, NULL,
b0d623f7
A
7056 pd2.ip_sum, icmpsum,
7057 pd->ip_sum, 0, AF_INET);
7058 } else {
7059 pf_change_icmp(pd2.dst, &iih.icmp_id,
7060 saddr, &(*state)->state_key->gwy.addr,
b0d623f7 7061 (*state)->state_key->gwy.xport.port, NULL,
b0d623f7
A
7062 pd2.ip_sum, icmpsum,
7063 pd->ip_sum, 0, AF_INET);
7064 }
b0d623f7
A
7065 m = pf_lazy_makewritable(pd, m, off2 + ICMP_MINLEN);
7066 if (!m)
7067 return (PF_DROP);
b0d623f7
A
7068 m_copyback(m, off, ICMP_MINLEN, pd->hdr.icmp);
7069 m_copyback(m, ipoff2, sizeof (h2), &h2);
7070 m_copyback(m, off2, ICMP_MINLEN, &iih);
7071 }
7072
7073 return (PF_PASS);
7074 break;
7075 }
7076#endif /* INET */
7077#if INET6
7078 case IPPROTO_ICMPV6: {
7079 struct icmp6_hdr iih;
7080
7081 if (!pf_pull_hdr(m, off2, &iih,
7082 sizeof (struct icmp6_hdr), NULL, reason, pd2.af)) {
7083 DPFPRINTF(PF_DEBUG_MISC,
7084 ("pf: ICMP error message too short "
7085 "(icmp6)\n"));
7086 return (PF_DROP);
7087 }
7088
7089 key.af = pd2.af;
7090 key.proto = IPPROTO_ICMPV6;
7091 if (direction == PF_IN) {
7092 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
7093 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
b0d623f7
A
7094 key.ext.xport.port = 0;
7095 key.gwy.xport.port = iih.icmp6_id;
b0d623f7
A
7096 } else {
7097 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
7098 PF_ACPY(&key.ext.addr, pd2.src, key.af);
b0d623f7
A
7099 key.lan.xport.port = iih.icmp6_id;
7100 key.ext.xport.port = 0;
b0d623f7
A
7101 }
7102
7103 STATE_LOOKUP();
7104
7105 if (STATE_TRANSLATE((*state)->state_key)) {
7106 if (direction == PF_IN) {
7107 pf_change_icmp(pd2.src, &iih.icmp6_id,
7108 daddr, &(*state)->state_key->lan.addr,
b0d623f7 7109 (*state)->state_key->lan.xport.port, NULL,
b0d623f7
A
7110 pd2.ip_sum, icmpsum,
7111 pd->ip_sum, 0, AF_INET6);
7112 } else {
7113 pf_change_icmp(pd2.dst, &iih.icmp6_id,
7114 saddr, &(*state)->state_key->gwy.addr,
b0d623f7 7115 (*state)->state_key->gwy.xport.port, NULL,
b0d623f7
A
7116 pd2.ip_sum, icmpsum,
7117 pd->ip_sum, 0, AF_INET6);
7118 }
b0d623f7
A
7119 m = pf_lazy_makewritable(pd, m, off2 +
7120 sizeof (struct icmp6_hdr));
7121 if (!m)
7122 return (PF_DROP);
b0d623f7
A
7123 m_copyback(m, off, sizeof (struct icmp6_hdr),
7124 pd->hdr.icmp6);
7125 m_copyback(m, ipoff2, sizeof (h2_6), &h2_6);
7126 m_copyback(m, off2, sizeof (struct icmp6_hdr),
7127 &iih);
7128 }
7129
7130 return (PF_PASS);
7131 break;
7132 }
7133#endif /* INET6 */
7134 default: {
7135 key.af = pd2.af;
7136 key.proto = pd2.proto;
7137 if (direction == PF_IN) {
7138 PF_ACPY(&key.ext.addr, pd2.dst, key.af);
7139 PF_ACPY(&key.gwy.addr, pd2.src, key.af);
b0d623f7
A
7140 key.ext.xport.port = 0;
7141 key.gwy.xport.port = 0;
b0d623f7
A
7142 } else {
7143 PF_ACPY(&key.lan.addr, pd2.dst, key.af);
7144 PF_ACPY(&key.ext.addr, pd2.src, key.af);
b0d623f7
A
7145 key.lan.xport.port = 0;
7146 key.ext.xport.port = 0;
b0d623f7
A
7147 }
7148
7149 STATE_LOOKUP();
7150
7151 if (STATE_TRANSLATE((*state)->state_key)) {
7152 if (direction == PF_IN) {
7153 pf_change_icmp(pd2.src, NULL,
7154 daddr, &(*state)->state_key->lan.addr,
7155 0, NULL,
7156 pd2.ip_sum, icmpsum,
7157 pd->ip_sum, 0, pd2.af);
7158 } else {
7159 pf_change_icmp(pd2.dst, NULL,
7160 saddr, &(*state)->state_key->gwy.addr,
7161 0, NULL,
7162 pd2.ip_sum, icmpsum,
7163 pd->ip_sum, 0, pd2.af);
7164 }
7165 switch (pd2.af) {
7166#if INET
7167 case AF_INET:
b0d623f7
A
7168 m = pf_lazy_makewritable(pd, m,
7169 ipoff2 + sizeof (h2));
7170 if (!m)
7171 return (PF_DROP);
b0d623f7
A
7172#endif /* INET */
7173#if INET6
7174 case AF_INET6:
b0d623f7
A
7175 m = pf_lazy_makewritable(pd, m,
7176 ipoff2 + sizeof (h2_6));
7177 if (!m)
7178 return (PF_DROP);
b0d623f7
A
7179 m_copyback(m, off,
7180 sizeof (struct icmp6_hdr),
7181 pd->hdr.icmp6);
7182 m_copyback(m, ipoff2, sizeof (h2_6),
7183 &h2_6);
7184 break;
7185#endif /* INET6 */
7186 }
7187 }
7188
7189 return (PF_PASS);
7190 break;
7191 }
7192 }
7193 }
7194}
7195
b0d623f7
A
7196static int
7197pf_test_state_grev1(struct pf_state **state, int direction,
7198 struct pfi_kif *kif, int off, struct pf_pdesc *pd)
7199{
7200 struct pf_state_peer *src;
7201 struct pf_state_peer *dst;
7202 struct pf_state_key_cmp key;
7203 struct pf_grev1_hdr *grev1 = pd->hdr.grev1;
7204 struct mbuf *m;
7205
b0d623f7 7206 key.app_state = 0;
b0d623f7
A
7207 key.af = pd->af;
7208 key.proto = IPPROTO_GRE;
7209 key.proto_variant = PF_GRE_PPTP_VARIANT;
7210 if (direction == PF_IN) {
7211 PF_ACPY(&key.ext.addr, pd->src, key.af);
7212 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
7213 key.gwy.xport.call_id = grev1->call_id;
7214 } else {
7215 PF_ACPY(&key.lan.addr, pd->src, key.af);
7216 PF_ACPY(&key.ext.addr, pd->dst, key.af);
7217 key.ext.xport.call_id = grev1->call_id;
7218 }
7219
7220 STATE_LOOKUP();
7221
7222 if (direction == (*state)->state_key->direction) {
7223 src = &(*state)->src;
7224 dst = &(*state)->dst;
7225 } else {
7226 src = &(*state)->dst;
7227 dst = &(*state)->src;
7228 }
7229
7230 /* update states */
7231 if (src->state < PFGRE1S_INITIATING)
7232 src->state = PFGRE1S_INITIATING;
7233
7234 /* update expire time */
7235 (*state)->expire = pf_time_second();
7236 if (src->state >= PFGRE1S_INITIATING &&
7237 dst->state >= PFGRE1S_INITIATING) {
d1ecb069
A
7238 if ((*state)->timeout != PFTM_TCP_ESTABLISHED)
7239 (*state)->timeout = PFTM_GREv1_ESTABLISHED;
b0d623f7
A
7240 src->state = PFGRE1S_ESTABLISHED;
7241 dst->state = PFGRE1S_ESTABLISHED;
7242 } else {
7243 (*state)->timeout = PFTM_GREv1_INITIATING;
7244 }
d1ecb069
A
7245
7246 if ((*state)->state_key->app_state)
7247 (*state)->state_key->app_state->u.grev1.pptp_state->expire =
7248 pf_time_second();
7249
b0d623f7
A
7250 /* translate source/destination address, if necessary */
7251 if (STATE_GRE_TRANSLATE((*state)->state_key)) {
7252 if (direction == PF_OUT) {
7253 switch (pd->af) {
7254#if INET
7255 case AF_INET:
7256 pf_change_a(&pd->src->v4.s_addr,
7257 pd->ip_sum,
7258 (*state)->state_key->gwy.addr.v4.s_addr, 0);
7259 break;
7260#endif /* INET */
7261#if INET6
7262 case AF_INET6:
7263 PF_ACPY(pd->src, &(*state)->state_key->gwy.addr,
7264 pd->af);
7265 break;
7266#endif /* INET6 */
7267 }
7268 } else {
7269 grev1->call_id = (*state)->state_key->lan.xport.call_id;
7270
7271 switch (pd->af) {
7272#if INET
7273 case AF_INET:
7274 pf_change_a(&pd->dst->v4.s_addr,
7275 pd->ip_sum,
7276 (*state)->state_key->lan.addr.v4.s_addr, 0);
7277 break;
7278#endif /* INET */
7279#if INET6
7280 case AF_INET6:
7281 PF_ACPY(pd->dst, &(*state)->state_key->lan.addr,
7282 pd->af);
7283 break;
7284#endif /* INET6 */
7285 }
7286 }
7287
7288 m = pf_lazy_makewritable(pd, pd->mp, off + sizeof (*grev1));
7289 if (!m)
7290 return (PF_DROP);
7291 m_copyback(m, off, sizeof (*grev1), grev1);
7292 }
7293
7294 return (PF_PASS);
7295}
7296
316670eb 7297static int
b0d623f7
A
7298pf_test_state_esp(struct pf_state **state, int direction, struct pfi_kif *kif,
7299 int off, struct pf_pdesc *pd)
7300{
7301#pragma unused(off)
7302 struct pf_state_peer *src;
7303 struct pf_state_peer *dst;
7304 struct pf_state_key_cmp key;
7305 struct pf_esp_hdr *esp = pd->hdr.esp;
7306 int action;
7307
7308 memset(&key, 0, sizeof (key));
7309 key.af = pd->af;
7310 key.proto = IPPROTO_ESP;
7311 if (direction == PF_IN) {
7312 PF_ACPY(&key.ext.addr, pd->src, key.af);
7313 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
7314 key.gwy.xport.spi = esp->spi;
7315 } else {
7316 PF_ACPY(&key.lan.addr, pd->src, key.af);
7317 PF_ACPY(&key.ext.addr, pd->dst, key.af);
7318 key.ext.xport.spi = esp->spi;
7319 }
7320
7321 *state = pf_find_state(kif, &key, direction);
7322
7323 if (*state == 0) {
7324 struct pf_state *s;
7325
7326 /*
7327 * <jhw@apple.com>
7328 * No matching state. Look for a blocking state. If we find
7329 * one, then use that state and move it so that it's keyed to
7330 * the SPI in the current packet.
7331 */
7332 if (direction == PF_IN) {
7333 key.gwy.xport.spi = 0;
7334
7335 s = pf_find_state(kif, &key, direction);
7336 if (s) {
7337 struct pf_state_key *sk = s->state_key;
7338
7339 RB_REMOVE(pf_state_tree_ext_gwy,
7340 &pf_statetbl_ext_gwy, sk);
7341 sk->lan.xport.spi = sk->gwy.xport.spi =
7342 esp->spi;
7343
7344 if (RB_INSERT(pf_state_tree_ext_gwy,
7345 &pf_statetbl_ext_gwy, sk))
7346 pf_detach_state(s, PF_DT_SKIP_EXTGWY);
7347 else
7348 *state = s;
7349 }
7350 } else {
7351 key.ext.xport.spi = 0;
7352
7353 s = pf_find_state(kif, &key, direction);
7354 if (s) {
7355 struct pf_state_key *sk = s->state_key;
7356
7357 RB_REMOVE(pf_state_tree_lan_ext,
7358 &pf_statetbl_lan_ext, sk);
7359 sk->ext.xport.spi = esp->spi;
7360
7361 if (RB_INSERT(pf_state_tree_lan_ext,
7362 &pf_statetbl_lan_ext, sk))
7363 pf_detach_state(s, PF_DT_SKIP_LANEXT);
7364 else
7365 *state = s;
7366 }
7367 }
7368
7369 if (s) {
7370 if (*state == 0) {
7371#if NPFSYNC
7372 if (s->creatorid == pf_status.hostid)
7373 pfsync_delete_state(s);
7374#endif
7375 s->timeout = PFTM_UNLINKED;
7376 hook_runloop(&s->unlink_hooks,
7377 HOOK_REMOVE|HOOK_FREE);
7378 pf_src_tree_remove_state(s);
7379 pf_free_state(s);
7380 return (PF_DROP);
7381 }
7382 }
7383 }
7384
39236c6e
A
7385 /* similar to STATE_LOOKUP() */
7386 if (*state != NULL && pd != NULL && !(pd->pktflags & PKTF_FLOW_ID)) {
7387 pd->flowsrc = (*state)->state_key->flowsrc;
316670eb 7388 pd->flowhash = (*state)->state_key->flowhash;
39236c6e
A
7389 if (pd->flowhash != 0) {
7390 pd->pktflags |= PKTF_FLOW_ID;
7391 pd->pktflags &= ~PKTF_FLOW_ADV;
7392 }
316670eb
A
7393 }
7394
b0d623f7
A
7395 if (pf_state_lookup_aux(state, kif, direction, &action))
7396 return (action);
7397
7398 if (direction == (*state)->state_key->direction) {
7399 src = &(*state)->src;
7400 dst = &(*state)->dst;
7401 } else {
7402 src = &(*state)->dst;
7403 dst = &(*state)->src;
7404 }
7405
7406 /* update states */
7407 if (src->state < PFESPS_INITIATING)
7408 src->state = PFESPS_INITIATING;
7409
7410 /* update expire time */
7411 (*state)->expire = pf_time_second();
7412 if (src->state >= PFESPS_INITIATING &&
7413 dst->state >= PFESPS_INITIATING) {
7414 (*state)->timeout = PFTM_ESP_ESTABLISHED;
7415 src->state = PFESPS_ESTABLISHED;
7416 dst->state = PFESPS_ESTABLISHED;
7417 } else {
7418 (*state)->timeout = PFTM_ESP_INITIATING;
7419 }
7420 /* translate source/destination address, if necessary */
7421 if (STATE_ADDR_TRANSLATE((*state)->state_key)) {
7422 if (direction == PF_OUT) {
7423 switch (pd->af) {
7424#if INET
7425 case AF_INET:
7426 pf_change_a(&pd->src->v4.s_addr,
7427 pd->ip_sum,
7428 (*state)->state_key->gwy.addr.v4.s_addr, 0);
7429 break;
7430#endif /* INET */
7431#if INET6
7432 case AF_INET6:
7433 PF_ACPY(pd->src, &(*state)->state_key->gwy.addr,
7434 pd->af);
7435 break;
7436#endif /* INET6 */
7437 }
7438 } else {
7439 switch (pd->af) {
7440#if INET
7441 case AF_INET:
7442 pf_change_a(&pd->dst->v4.s_addr,
7443 pd->ip_sum,
7444 (*state)->state_key->lan.addr.v4.s_addr, 0);
7445 break;
7446#endif /* INET */
7447#if INET6
7448 case AF_INET6:
7449 PF_ACPY(pd->dst, &(*state)->state_key->lan.addr,
7450 pd->af);
7451 break;
7452#endif /* INET6 */
7453 }
7454 }
7455 }
7456
7457 return (PF_PASS);
7458}
b0d623f7
A
7459
7460static int
7461pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
7462 struct pf_pdesc *pd)
7463{
7464 struct pf_state_peer *src, *dst;
7465 struct pf_state_key_cmp key;
7466
b0d623f7 7467 key.app_state = 0;
b0d623f7
A
7468 key.af = pd->af;
7469 key.proto = pd->proto;
7470 if (direction == PF_IN) {
7471 PF_ACPY(&key.ext.addr, pd->src, key.af);
7472 PF_ACPY(&key.gwy.addr, pd->dst, key.af);
b0d623f7
A
7473 key.ext.xport.port = 0;
7474 key.gwy.xport.port = 0;
b0d623f7
A
7475 } else {
7476 PF_ACPY(&key.lan.addr, pd->src, key.af);
7477 PF_ACPY(&key.ext.addr, pd->dst, key.af);
b0d623f7
A
7478 key.lan.xport.port = 0;
7479 key.ext.xport.port = 0;
b0d623f7
A
7480 }
7481
7482 STATE_LOOKUP();
7483
7484 if (direction == (*state)->state_key->direction) {
7485 src = &(*state)->src;
7486 dst = &(*state)->dst;
7487 } else {
7488 src = &(*state)->dst;
7489 dst = &(*state)->src;
7490 }
7491
7492 /* update states */
7493 if (src->state < PFOTHERS_SINGLE)
7494 src->state = PFOTHERS_SINGLE;
7495 if (dst->state == PFOTHERS_SINGLE)
7496 dst->state = PFOTHERS_MULTIPLE;
7497
7498 /* update expire time */
7499 (*state)->expire = pf_time_second();
7500 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
7501 (*state)->timeout = PFTM_OTHER_MULTIPLE;
7502 else
7503 (*state)->timeout = PFTM_OTHER_SINGLE;
7504
7505 /* translate source/destination address, if necessary */
b0d623f7 7506 if (STATE_ADDR_TRANSLATE((*state)->state_key)) {
b0d623f7
A
7507 if (direction == PF_OUT) {
7508 switch (pd->af) {
7509#if INET
7510 case AF_INET:
7511 pf_change_a(&pd->src->v4.s_addr,
7512 pd->ip_sum,
7513 (*state)->state_key->gwy.addr.v4.s_addr,
7514 0);
7515 break;
7516#endif /* INET */
7517#if INET6
7518 case AF_INET6:
7519 PF_ACPY(pd->src,
7520 &(*state)->state_key->gwy.addr, pd->af);
7521 break;
7522#endif /* INET6 */
7523 }
7524 } else {
7525 switch (pd->af) {
7526#if INET
7527 case AF_INET:
7528 pf_change_a(&pd->dst->v4.s_addr,
7529 pd->ip_sum,
7530 (*state)->state_key->lan.addr.v4.s_addr,
7531 0);
7532 break;
7533#endif /* INET */
7534#if INET6
7535 case AF_INET6:
7536 PF_ACPY(pd->dst,
7537 &(*state)->state_key->lan.addr, pd->af);
7538 break;
7539#endif /* INET6 */
7540 }
7541 }
7542 }
7543
7544 return (PF_PASS);
7545}
7546
7547/*
7548 * ipoff and off are measured from the start of the mbuf chain.
7549 * h must be at "ipoff" on the mbuf chain.
7550 */
7551void *
7552pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
7553 u_short *actionp, u_short *reasonp, sa_family_t af)
7554{
7555 switch (af) {
7556#if INET
7557 case AF_INET: {
7558 struct ip *h = mtod(m, struct ip *);
7559 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
7560
7561 if (fragoff) {
7562 if (fragoff >= len) {
7563 ACTION_SET(actionp, PF_PASS);
7564 } else {
7565 ACTION_SET(actionp, PF_DROP);
7566 REASON_SET(reasonp, PFRES_FRAG);
7567 }
7568 return (NULL);
7569 }
7570 if (m->m_pkthdr.len < off + len ||
7571 ntohs(h->ip_len) < off + len) {
7572 ACTION_SET(actionp, PF_DROP);
7573 REASON_SET(reasonp, PFRES_SHORT);
7574 return (NULL);
7575 }
7576 break;
7577 }
7578#endif /* INET */
7579#if INET6
7580 case AF_INET6: {
7581 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
7582
7583 if (m->m_pkthdr.len < off + len ||
7584 (ntohs(h->ip6_plen) + sizeof (struct ip6_hdr)) <
7585 (unsigned)(off + len)) {
7586 ACTION_SET(actionp, PF_DROP);
7587 REASON_SET(reasonp, PFRES_SHORT);
7588 return (NULL);
7589 }
7590 break;
7591 }
7592#endif /* INET6 */
7593 }
7594 m_copydata(m, off, len, p);
7595 return (p);
7596}
7597
7598int
7599pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif)
7600{
7601#pragma unused(kif)
7602 struct sockaddr_in *dst;
7603 int ret = 1;
7604#if INET6
7605 struct sockaddr_in6 *dst6;
7606 struct route_in6 ro;
7607#else
7608 struct route ro;
7609#endif
7610
7611 bzero(&ro, sizeof (ro));
7612 switch (af) {
7613 case AF_INET:
7614 dst = satosin(&ro.ro_dst);
7615 dst->sin_family = AF_INET;
7616 dst->sin_len = sizeof (*dst);
7617 dst->sin_addr = addr->v4;
7618 break;
7619#if INET6
7620 case AF_INET6:
7621 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
7622 dst6->sin6_family = AF_INET6;
7623 dst6->sin6_len = sizeof (*dst6);
7624 dst6->sin6_addr = addr->v6;
7625 break;
7626#endif /* INET6 */
7627 default:
7628 return (0);
7629 }
7630
7631 /* XXX: IFT_ENC is not currently used by anything*/
7632 /* Skip checks for ipsec interfaces */
7633 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
7634 goto out;
7635
39236c6e 7636 /* XXX: what is the point of this? */
b0d623f7
A
7637 rtalloc((struct route *)&ro);
7638
7639out:
39236c6e 7640 ROUTE_RELEASE(&ro);
b0d623f7
A
7641 return (ret);
7642}
7643
7644int
7645pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw)
7646{
7647#pragma unused(aw)
7648 struct sockaddr_in *dst;
7649#if INET6
7650 struct sockaddr_in6 *dst6;
7651 struct route_in6 ro;
7652#else
7653 struct route ro;
7654#endif
7655 int ret = 0;
7656
7657 bzero(&ro, sizeof (ro));
7658 switch (af) {
7659 case AF_INET:
7660 dst = satosin(&ro.ro_dst);
7661 dst->sin_family = AF_INET;
7662 dst->sin_len = sizeof (*dst);
7663 dst->sin_addr = addr->v4;
7664 break;
7665#if INET6
7666 case AF_INET6:
7667 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
7668 dst6->sin6_family = AF_INET6;
7669 dst6->sin6_len = sizeof (*dst6);
7670 dst6->sin6_addr = addr->v6;
7671 break;
7672#endif /* INET6 */
7673 default:
7674 return (0);
7675 }
7676
39236c6e 7677 /* XXX: what is the point of this? */
b0d623f7
A
7678 rtalloc((struct route *)&ro);
7679
39236c6e 7680 ROUTE_RELEASE(&ro);
b0d623f7
A
7681
7682 return (ret);
7683}
7684
7685#if INET
7686static void
7687pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
7688 struct pf_state *s, struct pf_pdesc *pd)
7689{
7690#pragma unused(pd)
7691 struct mbuf *m0, *m1;
7692 struct route iproute;
39236c6e 7693 struct route *ro = &iproute;
b0d623f7
A
7694 struct sockaddr_in *dst;
7695 struct ip *ip;
7696 struct ifnet *ifp = NULL;
7697 struct pf_addr naddr;
7698 struct pf_src_node *sn = NULL;
7699 int error = 0;
39236c6e
A
7700 uint32_t sw_csum;
7701
7702 bzero(&iproute, sizeof (iproute));
b0d623f7
A
7703
7704 if (m == NULL || *m == NULL || r == NULL ||
7705 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
7706 panic("pf_route: invalid parameters");
7707
316670eb 7708 if (pd->pf_mtag->pftag_routed++ > 3) {
b0d623f7
A
7709 m0 = *m;
7710 *m = NULL;
7711 goto bad;
7712 }
7713
7714 if (r->rt == PF_DUPTO) {
7715 if ((m0 = m_copym(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
7716 return;
7717 } else {
7718 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
7719 return;
7720 m0 = *m;
7721 }
7722
7723 if (m0->m_len < (int)sizeof (struct ip)) {
7724 DPFPRINTF(PF_DEBUG_URGENT,
7725 ("pf_route: m0->m_len < sizeof (struct ip)\n"));
7726 goto bad;
7727 }
7728
7729 ip = mtod(m0, struct ip *);
7730
316670eb 7731 dst = satosin((void *)&ro->ro_dst);
b0d623f7
A
7732 dst->sin_family = AF_INET;
7733 dst->sin_len = sizeof (*dst);
7734 dst->sin_addr = ip->ip_dst;
7735
7736 if (r->rt == PF_FASTROUTE) {
7737 rtalloc(ro);
39236c6e 7738 if (ro->ro_rt == NULL) {
b0d623f7
A
7739 ipstat.ips_noroute++;
7740 goto bad;
7741 }
7742
7743 ifp = ro->ro_rt->rt_ifp;
6d2010ae 7744 RT_LOCK(ro->ro_rt);
b0d623f7
A
7745 ro->ro_rt->rt_use++;
7746
7747 if (ro->ro_rt->rt_flags & RTF_GATEWAY)
316670eb 7748 dst = satosin((void *)ro->ro_rt->rt_gateway);
6d2010ae 7749 RT_UNLOCK(ro->ro_rt);
b0d623f7
A
7750 } else {
7751 if (TAILQ_EMPTY(&r->rpool.list)) {
7752 DPFPRINTF(PF_DEBUG_URGENT,
7753 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n"));
7754 goto bad;
7755 }
7756 if (s == NULL) {
7757 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
7758 &naddr, NULL, &sn);
7759 if (!PF_AZERO(&naddr, AF_INET))
7760 dst->sin_addr.s_addr = naddr.v4.s_addr;
7761 ifp = r->rpool.cur->kif ?
7762 r->rpool.cur->kif->pfik_ifp : NULL;
7763 } else {
7764 if (!PF_AZERO(&s->rt_addr, AF_INET))
7765 dst->sin_addr.s_addr =
7766 s->rt_addr.v4.s_addr;
7767 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
7768 }
7769 }
7770 if (ifp == NULL)
7771 goto bad;
7772
7773 if (oifp != ifp) {
316670eb 7774 if (pf_test(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS)
b0d623f7
A
7775 goto bad;
7776 else if (m0 == NULL)
7777 goto done;
7778 if (m0->m_len < (int)sizeof (struct ip)) {
7779 DPFPRINTF(PF_DEBUG_URGENT,
7780 ("pf_route: m0->m_len < sizeof (struct ip)\n"));
7781 goto bad;
7782 }
7783 ip = mtod(m0, struct ip *);
7784 }
7785
b0d623f7 7786 /* Catch routing changes wrt. hardware checksumming for TCP or UDP. */
39236c6e
A
7787 ip_output_checksum(ifp, m0, ((ip->ip_hl) << 2), ntohs(ip->ip_len),
7788 &sw_csum);
b0d623f7 7789
39236c6e
A
7790 if (ntohs(ip->ip_len) <= ifp->if_mtu || TSO_IPV4_OK(ifp, m0) ||
7791 (!(ip->ip_off & htons(IP_DF)) &&
7792 (ifp->if_hwassist & CSUM_FRAGMENT))) {
b0d623f7 7793 ip->ip_sum = 0;
39236c6e 7794 if (sw_csum & CSUM_DELAY_IP) {
b0d623f7 7795 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
39236c6e
A
7796 sw_csum &= ~CSUM_DELAY_IP;
7797 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP;
7798 }
316670eb 7799 error = ifnet_output(ifp, PF_INET, m0, ro->ro_rt, sintosa(dst));
b0d623f7
A
7800 goto done;
7801 }
7802
7803 /*
7804 * Too large for interface; fragment if possible.
7805 * Must be able to put at least 8 bytes per fragment.
39236c6e 7806 * Balk when DF bit is set or the interface didn't support TSO.
b0d623f7 7807 */
39236c6e
A
7808 if ((ip->ip_off & htons(IP_DF)) ||
7809 (m0->m_pkthdr.csum_flags & CSUM_TSO_IPV4)) {
b0d623f7
A
7810 ipstat.ips_cantfrag++;
7811 if (r->rt != PF_DUPTO) {
7812 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
7813 ifp->if_mtu);
7814 goto done;
7815 } else
7816 goto bad;
7817 }
7818
7819 m1 = m0;
6d2010ae
A
7820
7821 /* PR-8933605: send ip_len,ip_off to ip_fragment in host byte order */
316670eb 7822#if BYTE_ORDER != BIG_ENDIAN
6d2010ae
A
7823 NTOHS(ip->ip_off);
7824 NTOHS(ip->ip_len);
7825#endif
b0d623f7 7826 error = ip_fragment(m0, ifp, ifp->if_mtu, sw_csum);
316670eb 7827
b0d623f7
A
7828 if (error) {
7829 m0 = NULL;
7830 goto bad;
7831 }
7832
7833 for (m0 = m1; m0; m0 = m1) {
7834 m1 = m0->m_nextpkt;
7835 m0->m_nextpkt = 0;
7836 if (error == 0)
316670eb 7837 error = ifnet_output(ifp, PF_INET, m0, ro->ro_rt,
b0d623f7
A
7838 sintosa(dst));
7839 else
7840 m_freem(m0);
7841 }
7842
7843 if (error == 0)
7844 ipstat.ips_fragmented++;
7845
7846done:
7847 if (r->rt != PF_DUPTO)
7848 *m = NULL;
39236c6e
A
7849
7850 ROUTE_RELEASE(&iproute);
b0d623f7
A
7851 return;
7852
7853bad:
7854 m_freem(m0);
7855 goto done;
7856}
7857#endif /* INET */
7858
7859#if INET6
7860static void
7861pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
7862 struct pf_state *s, struct pf_pdesc *pd)
7863{
7864#pragma unused(pd)
7865 struct mbuf *m0;
7866 struct route_in6 ip6route;
7867 struct route_in6 *ro;
7868 struct sockaddr_in6 *dst;
7869 struct ip6_hdr *ip6;
7870 struct ifnet *ifp = NULL;
7871 struct pf_addr naddr;
7872 struct pf_src_node *sn = NULL;
7873 int error = 0;
7874
7875 if (m == NULL || *m == NULL || r == NULL ||
7876 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
7877 panic("pf_route6: invalid parameters");
7878
316670eb 7879 if (pd->pf_mtag->pftag_routed++ > 3) {
b0d623f7
A
7880 m0 = *m;
7881 *m = NULL;
7882 goto bad;
7883 }
7884
7885 if (r->rt == PF_DUPTO) {
7886 if ((m0 = m_copym(*m, 0, M_COPYALL, M_NOWAIT)) == NULL)
7887 return;
7888 } else {
7889 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
7890 return;
7891 m0 = *m;
7892 }
7893
7894 if (m0->m_len < (int)sizeof (struct ip6_hdr)) {
7895 DPFPRINTF(PF_DEBUG_URGENT,
7896 ("pf_route6: m0->m_len < sizeof (struct ip6_hdr)\n"));
7897 goto bad;
7898 }
7899 ip6 = mtod(m0, struct ip6_hdr *);
7900
7901 ro = &ip6route;
7902 bzero((caddr_t)ro, sizeof (*ro));
7903 dst = (struct sockaddr_in6 *)&ro->ro_dst;
7904 dst->sin6_family = AF_INET6;
7905 dst->sin6_len = sizeof (*dst);
7906 dst->sin6_addr = ip6->ip6_dst;
7907
7908 /* Cheat. XXX why only in the v6 case??? */
7909 if (r->rt == PF_FASTROUTE) {
7910 struct pf_mtag *pf_mtag;
7911
7912 if ((pf_mtag = pf_get_mtag(m0)) == NULL)
7913 goto bad;
316670eb 7914 pf_mtag->pftag_flags |= PF_TAG_GENERATED;
6d2010ae 7915 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
b0d623f7
A
7916 return;
7917 }
7918
7919 if (TAILQ_EMPTY(&r->rpool.list)) {
7920 DPFPRINTF(PF_DEBUG_URGENT,
7921 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n"));
7922 goto bad;
7923 }
7924 if (s == NULL) {
7925 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
7926 &naddr, NULL, &sn);
7927 if (!PF_AZERO(&naddr, AF_INET6))
7928 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
7929 &naddr, AF_INET6);
7930 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
7931 } else {
7932 if (!PF_AZERO(&s->rt_addr, AF_INET6))
7933 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
7934 &s->rt_addr, AF_INET6);
7935 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
7936 }
7937 if (ifp == NULL)
7938 goto bad;
7939
7940 if (oifp != ifp) {
316670eb 7941 if (pf_test6(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS)
b0d623f7
A
7942 goto bad;
7943 else if (m0 == NULL)
7944 goto done;
7945 if (m0->m_len < (int)sizeof (struct ip6_hdr)) {
7946 DPFPRINTF(PF_DEBUG_URGENT, ("pf_route6: m0->m_len "
7947 "< sizeof (struct ip6_hdr)\n"));
7948 goto bad;
7949 }
7950 ip6 = mtod(m0, struct ip6_hdr *);
7951 }
7952
7953 /*
7954 * If the packet is too large for the outgoing interface,
7955 * send back an icmp6 error.
7956 */
7957 if (IN6_IS_SCOPE_EMBED(&dst->sin6_addr))
7958 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
7959 if ((unsigned)m0->m_pkthdr.len <= ifp->if_mtu) {
316670eb 7960 error = nd6_output(ifp, ifp, m0, dst, NULL, NULL);
b0d623f7
A
7961 } else {
7962 in6_ifstat_inc(ifp, ifs6_in_toobig);
7963 if (r->rt != PF_DUPTO)
7964 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
7965 else
7966 goto bad;
7967 }
7968
7969done:
7970 if (r->rt != PF_DUPTO)
7971 *m = NULL;
7972 return;
7973
7974bad:
7975 m_freem(m0);
7976 goto done;
7977}
7978#endif /* INET6 */
7979
7980
7981/*
7982 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
7983 * off is the offset where the protocol header starts
7984 * len is the total length of protocol header plus payload
7985 * returns 0 when the checksum is valid, otherwise returns 1.
7986 */
7987static int
7988pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p,
7989 sa_family_t af)
7990{
7991 u_int16_t sum;
7992
7993 switch (p) {
7994 case IPPROTO_TCP:
7995 case IPPROTO_UDP:
7996 /*
7997 * Optimize for the common case; if the hardware calculated
7998 * value doesn't include pseudo-header checksum, or if it
7999 * is partially-computed (only 16-bit summation), do it in
8000 * software below.
8001 */
39236c6e
A
8002 if ((m->m_pkthdr.csum_flags &
8003 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
8004 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR) &&
b0d623f7
A
8005 (m->m_pkthdr.csum_data ^ 0xffff) == 0) {
8006 return (0);
8007 }
8008 break;
8009 case IPPROTO_ICMP:
8010#if INET6
8011 case IPPROTO_ICMPV6:
8012#endif /* INET6 */
8013 break;
8014 default:
8015 return (1);
8016 }
8017 if (off < (int)sizeof (struct ip) || len < (int)sizeof (struct udphdr))
8018 return (1);
8019 if (m->m_pkthdr.len < off + len)
8020 return (1);
8021 switch (af) {
8022#if INET
8023 case AF_INET:
8024 if (p == IPPROTO_ICMP) {
8025 if (m->m_len < off)
8026 return (1);
8027 m->m_data += off;
8028 m->m_len -= off;
8029 sum = in_cksum(m, len);
8030 m->m_data -= off;
8031 m->m_len += off;
8032 } else {
8033 if (m->m_len < (int)sizeof (struct ip))
8034 return (1);
8035 sum = inet_cksum(m, p, off, len);
8036 }
8037 break;
8038#endif /* INET */
8039#if INET6
8040 case AF_INET6:
8041 if (m->m_len < (int)sizeof (struct ip6_hdr))
8042 return (1);
8043 sum = inet6_cksum(m, p, off, len);
8044 break;
8045#endif /* INET6 */
8046 default:
8047 return (1);
8048 }
8049 if (sum) {
8050 switch (p) {
8051 case IPPROTO_TCP:
8052 tcpstat.tcps_rcvbadsum++;
8053 break;
8054 case IPPROTO_UDP:
8055 udpstat.udps_badsum++;
8056 break;
8057 case IPPROTO_ICMP:
8058 icmpstat.icps_checksum++;
8059 break;
8060#if INET6
8061 case IPPROTO_ICMPV6:
8062 icmp6stat.icp6s_checksum++;
8063 break;
8064#endif /* INET6 */
8065 }
8066 return (1);
8067 }
8068 return (0);
8069}
8070
8071#if INET
b0d623f7
A
8072#define PF_APPLE_UPDATE_PDESC_IPv4() \
8073 do { \
8074 if (m && pd.mp && m != pd.mp) { \
8075 m = pd.mp; \
8076 h = mtod(m, struct ip *); \
316670eb 8077 pd.pf_mtag = pf_get_mtag(m); \
b0d623f7
A
8078 } \
8079 } while (0)
b0d623f7
A
8080
8081int
8082pf_test(int dir, struct ifnet *ifp, struct mbuf **m0,
316670eb 8083 struct ether_header *eh, struct ip_fw_args *fwa)
b0d623f7 8084{
316670eb
A
8085#if !DUMMYNET
8086#pragma unused(fwa)
8087#endif
b0d623f7 8088 struct pfi_kif *kif;
316670eb 8089 u_short action = PF_PASS, reason = 0, log = 0;
b0d623f7
A
8090 struct mbuf *m = *m0;
8091 struct ip *h = 0;
8092 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
8093 struct pf_state *s = NULL;
8094 struct pf_state_key *sk = NULL;
8095 struct pf_ruleset *ruleset = NULL;
8096 struct pf_pdesc pd;
8097 int off, dirndx, pqid = 0;
8098
8099 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
8100
8101 if (!pf_status.running)
8102 return (PF_PASS);
8103
8104 memset(&pd, 0, sizeof (pd));
8105
8106 if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
8107 DPFPRINTF(PF_DEBUG_URGENT,
8108 ("pf_test: pf_get_mtag returned NULL\n"));
8109 return (PF_DROP);
8110 }
8111
316670eb 8112 if (pd.pf_mtag->pftag_flags & PF_TAG_GENERATED)
b0d623f7
A
8113 return (PF_PASS);
8114
8115 kif = (struct pfi_kif *)ifp->if_pf_kif;
8116
8117 if (kif == NULL) {
8118 DPFPRINTF(PF_DEBUG_URGENT,
8119 ("pf_test: kif == NULL, if_name %s\n", ifp->if_name));
8120 return (PF_DROP);
8121 }
8122 if (kif->pfik_flags & PFI_IFLAG_SKIP)
8123 return (PF_PASS);
8124
39236c6e 8125 VERIFY(m->m_flags & M_PKTHDR);
b0d623f7 8126
316670eb
A
8127 /* initialize enough of pd for the done label */
8128 h = mtod(m, struct ip *);
8129 pd.mp = m;
8130 pd.lmw = 0;
8131 pd.pf_mtag = pf_get_mtag(m);
8132 pd.src = (struct pf_addr *)&h->ip_src;
8133 pd.dst = (struct pf_addr *)&h->ip_dst;
8134 PF_ACPY(&pd.baddr, dir == PF_OUT ? pd.src : pd.dst, AF_INET);
8135 pd.ip_sum = &h->ip_sum;
8136 pd.proto = h->ip_p;
8137 pd.proto_variant = 0;
8138 pd.af = AF_INET;
8139 pd.tos = h->ip_tos;
8140 pd.tot_len = ntohs(h->ip_len);
8141 pd.eh = eh;
8142
b0d623f7
A
8143 if (m->m_pkthdr.len < (int)sizeof (*h)) {
8144 action = PF_DROP;
8145 REASON_SET(&reason, PFRES_SHORT);
8146 log = 1;
8147 goto done;
8148 }
8149
316670eb
A
8150#if DUMMYNET
8151 if (fwa != NULL && fwa->fwa_pf_rule != NULL)
8152 goto nonormalize;
8153#endif /* DUMMYNET */
8154
b0d623f7 8155 /* We do IP header normalization and packet reassembly here */
316670eb
A
8156 action = pf_normalize_ip(m0, dir, kif, &reason, &pd);
8157 pd.mp = m = *m0;
8158 if (action != PF_PASS || pd.lmw < 0) {
b0d623f7
A
8159 action = PF_DROP;
8160 goto done;
8161 }
316670eb
A
8162
8163#if DUMMYNET
8164nonormalize:
8165#endif /* DUMMYNET */
b0d623f7
A
8166 m = *m0; /* pf_normalize messes with m0 */
8167 h = mtod(m, struct ip *);
8168
8169 off = h->ip_hl << 2;
8170 if (off < (int)sizeof (*h)) {
8171 action = PF_DROP;
8172 REASON_SET(&reason, PFRES_SHORT);
8173 log = 1;
8174 goto done;
8175 }
8176
8177 pd.src = (struct pf_addr *)&h->ip_src;
8178 pd.dst = (struct pf_addr *)&h->ip_dst;
8179 PF_ACPY(&pd.baddr, dir == PF_OUT ? pd.src : pd.dst, AF_INET);
8180 pd.ip_sum = &h->ip_sum;
8181 pd.proto = h->ip_p;
b0d623f7
A
8182 pd.proto_variant = 0;
8183 pd.mp = m;
8184 pd.lmw = 0;
316670eb 8185 pd.pf_mtag = pf_get_mtag(m);
b0d623f7
A
8186 pd.af = AF_INET;
8187 pd.tos = h->ip_tos;
316670eb 8188 pd.sc = MBUF_SCIDX(mbuf_get_service_class(m));
b0d623f7
A
8189 pd.tot_len = ntohs(h->ip_len);
8190 pd.eh = eh;
39236c6e
A
8191
8192 if (m->m_pkthdr.pkt_flags & PKTF_FLOW_ID) {
8193 pd.flowsrc = m->m_pkthdr.pkt_flowsrc;
8194 pd.flowhash = m->m_pkthdr.pkt_flowid;
8195 pd.pktflags = (m->m_pkthdr.pkt_flags & PKTF_FLOW_MASK);
316670eb 8196 }
b0d623f7
A
8197
8198 /* handle fragments that didn't get reassembled by normalization */
8199 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
316670eb
A
8200 pd.flags |= PFDESC_IP_FRAG;
8201#if DUMMYNET
8202 /* Traffic goes through dummynet first */
8203 action = pf_test_dummynet(&r, dir, kif, &m, &pd, fwa);
8204 if (action == PF_DROP || m == NULL) {
8205 *m0 = NULL;
8206 return (action);
8207 }
8208#endif /* DUMMYNET */
b0d623f7
A
8209 action = pf_test_fragment(&r, dir, kif, m, h,
8210 &pd, &a, &ruleset);
8211 goto done;
8212 }
8213
8214 switch (h->ip_p) {
8215
8216 case IPPROTO_TCP: {
8217 struct tcphdr th;
8218 pd.hdr.tcp = &th;
8219 if (!pf_pull_hdr(m, off, &th, sizeof (th),
8220 &action, &reason, AF_INET)) {
8221 log = action != PF_PASS;
8222 goto done;
8223 }
8224 pd.p_len = pd.tot_len - off - (th.th_off << 2);
8225 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
8226 pqid = 1;
316670eb
A
8227#if DUMMYNET
8228 /* Traffic goes through dummynet first */
8229 action = pf_test_dummynet(&r, dir, kif, &m, &pd, fwa);
8230 if (action == PF_DROP || m == NULL) {
8231 *m0 = NULL;
8232 return (action);
8233 }
8234#endif /* DUMMYNET */
b0d623f7 8235 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
b7266188 8236 if (pd.lmw < 0)
b0d623f7
A
8237 goto done;
8238 PF_APPLE_UPDATE_PDESC_IPv4();
b7266188
A
8239 if (action == PF_DROP)
8240 goto done;
b0d623f7
A
8241 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
8242 &reason);
b0d623f7
A
8243 if (pd.lmw < 0)
8244 goto done;
8245 PF_APPLE_UPDATE_PDESC_IPv4();
b0d623f7
A
8246 if (action == PF_PASS) {
8247#if NPFSYNC
8248 pfsync_update_state(s);
8249#endif /* NPFSYNC */
8250 r = s->rule.ptr;
8251 a = s->anchor.ptr;
8252 log = s->log;
8253 } else if (s == NULL)
8254 action = pf_test_rule(&r, &s, dir, kif,
39236c6e 8255 m, off, h, &pd, &a, &ruleset, NULL);
b0d623f7
A
8256 break;
8257 }
8258
8259 case IPPROTO_UDP: {
8260 struct udphdr uh;
8261
8262 pd.hdr.udp = &uh;
8263 if (!pf_pull_hdr(m, off, &uh, sizeof (uh),
8264 &action, &reason, AF_INET)) {
8265 log = action != PF_PASS;
8266 goto done;
8267 }
8268 if (uh.uh_dport == 0 ||
8269 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
8270 ntohs(uh.uh_ulen) < sizeof (struct udphdr)) {
8271 action = PF_DROP;
8272 REASON_SET(&reason, PFRES_SHORT);
8273 goto done;
8274 }
316670eb
A
8275#if DUMMYNET
8276 /* Traffic goes through dummynet first */
8277 action = pf_test_dummynet(&r, dir, kif, &m, &pd, fwa);
8278 if (action == PF_DROP || m == NULL) {
8279 *m0 = NULL;
8280 return (action);
8281 }
8282#endif /* DUMMYNET */
b7266188
A
8283 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd,
8284 &reason);
b0d623f7
A
8285 if (pd.lmw < 0)
8286 goto done;
8287 PF_APPLE_UPDATE_PDESC_IPv4();
b0d623f7
A
8288 if (action == PF_PASS) {
8289#if NPFSYNC
8290 pfsync_update_state(s);
8291#endif /* NPFSYNC */
8292 r = s->rule.ptr;
8293 a = s->anchor.ptr;
8294 log = s->log;
8295 } else if (s == NULL)
8296 action = pf_test_rule(&r, &s, dir, kif,
39236c6e 8297 m, off, h, &pd, &a, &ruleset, NULL);
b0d623f7
A
8298 break;
8299 }
8300
8301 case IPPROTO_ICMP: {
8302 struct icmp ih;
8303
8304 pd.hdr.icmp = &ih;
8305 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
8306 &action, &reason, AF_INET)) {
8307 log = action != PF_PASS;
8308 goto done;
8309 }
316670eb
A
8310#if DUMMYNET
8311 /* Traffic goes through dummynet first */
8312 action = pf_test_dummynet(&r, dir, kif, &m, &pd, fwa);
8313 if (action == PF_DROP || m == NULL) {
8314 *m0 = NULL;
8315 return (action);
8316 }
8317#endif /* DUMMYNET */
b0d623f7
A
8318 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
8319 &reason);
b0d623f7
A
8320 if (pd.lmw < 0)
8321 goto done;
8322 PF_APPLE_UPDATE_PDESC_IPv4();
b0d623f7
A
8323 if (action == PF_PASS) {
8324#if NPFSYNC
8325 pfsync_update_state(s);
8326#endif /* NPFSYNC */
8327 r = s->rule.ptr;
8328 a = s->anchor.ptr;
8329 log = s->log;
8330 } else if (s == NULL)
8331 action = pf_test_rule(&r, &s, dir, kif,
39236c6e 8332 m, off, h, &pd, &a, &ruleset, NULL);
b0d623f7
A
8333 break;
8334 }
8335
b0d623f7
A
8336 case IPPROTO_ESP: {
8337 struct pf_esp_hdr esp;
8338
8339 pd.hdr.esp = &esp;
8340 if (!pf_pull_hdr(m, off, &esp, sizeof (esp), &action, &reason,
8341 AF_INET)) {
8342 log = action != PF_PASS;
8343 goto done;
8344 }
316670eb
A
8345#if DUMMYNET
8346 /* Traffic goes through dummynet first */
8347 action = pf_test_dummynet(&r, dir, kif, &m, &pd, fwa);
8348 if (action == PF_DROP || m == NULL) {
8349 *m0 = NULL;
8350 return (action);
8351 }
8352#endif /* DUMMYNET */
b0d623f7
A
8353 action = pf_test_state_esp(&s, dir, kif, off, &pd);
8354 if (pd.lmw < 0)
8355 goto done;
8356 PF_APPLE_UPDATE_PDESC_IPv4();
8357 if (action == PF_PASS) {
8358#if NPFSYNC
8359 pfsync_update_state(s);
8360#endif /* NPFSYNC */
8361 r = s->rule.ptr;
8362 a = s->anchor.ptr;
8363 log = s->log;
8364 } else if (s == NULL)
8365 action = pf_test_rule(&r, &s, dir, kif,
39236c6e 8366 m, off, h, &pd, &a, &ruleset, NULL);
b0d623f7
A
8367 break;
8368 }
8369
8370 case IPPROTO_GRE: {
8371 struct pf_grev1_hdr grev1;
8372 pd.hdr.grev1 = &grev1;
8373 if (!pf_pull_hdr(m, off, &grev1, sizeof (grev1), &action,
8374 &reason, AF_INET)) {
8375 log = (action != PF_PASS);
8376 goto done;
8377 }
316670eb
A
8378#if DUMMYNET
8379 /* Traffic goes through dummynet first */
8380 action = pf_test_dummynet(&r, dir, kif, &m, &pd, fwa);
8381 if (action == PF_DROP || m == NULL) {
8382 *m0 = NULL;
8383 return (action);
8384 }
8385#endif /* DUMMYNET */
b0d623f7
A
8386 if ((ntohs(grev1.flags) & PF_GRE_FLAG_VERSION_MASK) == 1 &&
8387 ntohs(grev1.protocol_type) == PF_GRE_PPP_ETHERTYPE) {
8388 if (ntohs(grev1.payload_length) >
8389 m->m_pkthdr.len - off) {
8390 action = PF_DROP;
8391 REASON_SET(&reason, PFRES_SHORT);
8392 goto done;
8393 }
8394 pd.proto_variant = PF_GRE_PPTP_VARIANT;
8395 action = pf_test_state_grev1(&s, dir, kif, off, &pd);
8396 if (pd.lmw < 0) goto done;
8397 PF_APPLE_UPDATE_PDESC_IPv4();
8398 if (action == PF_PASS) {
8399#if NPFSYNC
8400 pfsync_update_state(s);
8401#endif /* NPFSYNC */
8402 r = s->rule.ptr;
8403 a = s->anchor.ptr;
8404 log = s->log;
8405 break;
8406 } else if (s == NULL) {
8407 action = pf_test_rule(&r, &s, dir, kif, m, off,
39236c6e 8408 h, &pd, &a, &ruleset, NULL);
b0d623f7
A
8409 if (action == PF_PASS)
8410 break;
8411 }
8412 }
8413
8414 /* not GREv1/PPTP, so treat as ordinary GRE... */
8415 }
b0d623f7
A
8416
8417 default:
316670eb
A
8418#if DUMMYNET
8419 /* Traffic goes through dummynet first */
8420 action = pf_test_dummynet(&r, dir, kif, &m, &pd, fwa);
8421 if (action == PF_DROP || m == NULL) {
8422 *m0 = NULL;
8423 return (action);
8424 }
8425#endif /* DUMMYNET */
b0d623f7 8426 action = pf_test_state_other(&s, dir, kif, &pd);
b0d623f7
A
8427 if (pd.lmw < 0)
8428 goto done;
8429 PF_APPLE_UPDATE_PDESC_IPv4();
b0d623f7
A
8430 if (action == PF_PASS) {
8431#if NPFSYNC
8432 pfsync_update_state(s);
8433#endif /* NPFSYNC */
8434 r = s->rule.ptr;
8435 a = s->anchor.ptr;
8436 log = s->log;
8437 } else if (s == NULL)
8438 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
39236c6e 8439 &pd, &a, &ruleset, NULL);
b0d623f7
A
8440 break;
8441 }
8442
8443done:
b7266188 8444 *m0 = pd.mp;
b0d623f7
A
8445 PF_APPLE_UPDATE_PDESC_IPv4();
8446
8447 if (action == PF_PASS && h->ip_hl > 5 &&
8448 !((s && s->allow_opts) || r->allow_opts)) {
8449 action = PF_DROP;
8450 REASON_SET(&reason, PFRES_IPOPTIONS);
8451 log = 1;
8452 DPFPRINTF(PF_DEBUG_MISC,
8453 ("pf: dropping packet with ip options [hlen=%u]\n",
8454 (unsigned int) h->ip_hl));
8455 }
8456
316670eb 8457 if ((s && s->tag) || PF_RTABLEID_IS_VALID(r->rtableid) ||
39236c6e 8458 (pd.pktflags & PKTF_FLOW_ID))
b0d623f7 8459 (void) pf_tag_packet(m, pd.pf_mtag, s ? s->tag : 0,
316670eb 8460 r->rtableid, &pd);
b0d623f7 8461
316670eb
A
8462 if (action == PF_PASS) {
8463#if PF_ALTQ
8464 if (altq_allowed && r->qid) {
8465 if (pqid || (pd.tos & IPTOS_LOWDELAY))
8466 pd.pf_mtag->pftag_qid = r->pqid;
8467 else
8468 pd.pf_mtag->pftag_qid = r->qid;
8469 }
8470#endif /* PF_ALTQ */
39236c6e 8471#if PF_ECN
b0d623f7 8472 /* add hints for ecn */
316670eb
A
8473 pd.pf_mtag->pftag_hdr = h;
8474 /* record address family */
8475 pd.pf_mtag->pftag_flags &= ~PF_TAG_HDR_INET6;
8476 pd.pf_mtag->pftag_flags |= PF_TAG_HDR_INET;
39236c6e
A
8477#endif /* PF_ECN */
8478 /* record protocol */
8479 m->m_pkthdr.pkt_proto = pd.proto;
b0d623f7 8480 }
b0d623f7
A
8481
8482 /*
8483 * connections redirected to loopback should not match sockets
8484 * bound specifically to loopback due to security implications,
8485 * see tcp_input() and in_pcblookup_listen().
8486 */
8487 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
8488 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
8489 (s->nat_rule.ptr->action == PF_RDR ||
8490 s->nat_rule.ptr->action == PF_BINAT) &&
8491 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
316670eb 8492 pd.pf_mtag->pftag_flags |= PF_TAG_TRANSLATE_LOCALHOST;
b0d623f7
A
8493
8494 if (log) {
8495 struct pf_rule *lr;
8496
8497 if (s != NULL && s->nat_rule.ptr != NULL &&
8498 s->nat_rule.ptr->log & PF_LOG_ALL)
8499 lr = s->nat_rule.ptr;
8500 else
8501 lr = r;
8502 PFLOG_PACKET(kif, h, m, AF_INET, dir, reason, lr, a, ruleset,
8503 &pd);
8504 }
8505
8506 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
8507 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
8508
8509 if (action == PF_PASS || r->action == PF_DROP) {
8510 dirndx = (dir == PF_OUT);
8511 r->packets[dirndx]++;
8512 r->bytes[dirndx] += pd.tot_len;
8513 if (a != NULL) {
8514 a->packets[dirndx]++;
8515 a->bytes[dirndx] += pd.tot_len;
8516 }
8517 if (s != NULL) {
8518 sk = s->state_key;
8519 if (s->nat_rule.ptr != NULL) {
8520 s->nat_rule.ptr->packets[dirndx]++;
8521 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
8522 }
8523 if (s->src_node != NULL) {
8524 s->src_node->packets[dirndx]++;
8525 s->src_node->bytes[dirndx] += pd.tot_len;
8526 }
8527 if (s->nat_src_node != NULL) {
8528 s->nat_src_node->packets[dirndx]++;
8529 s->nat_src_node->bytes[dirndx] += pd.tot_len;
8530 }
8531 dirndx = (dir == sk->direction) ? 0 : 1;
8532 s->packets[dirndx]++;
8533 s->bytes[dirndx] += pd.tot_len;
8534 }
8535 tr = r;
8536 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
8537 if (nr != NULL) {
8538 struct pf_addr *x;
8539 /*
8540 * XXX: we need to make sure that the addresses
8541 * passed to pfr_update_stats() are the same than
8542 * the addresses used during matching (pfr_match)
8543 */
8544 if (r == &pf_default_rule) {
8545 tr = nr;
8546 x = (sk == NULL || sk->direction == dir) ?
8547 &pd.baddr : &pd.naddr;
8548 } else
8549 x = (sk == NULL || sk->direction == dir) ?
8550 &pd.naddr : &pd.baddr;
8551 if (x == &pd.baddr || s == NULL) {
8552 /* we need to change the address */
8553 if (dir == PF_OUT)
8554 pd.src = x;
8555 else
8556 pd.dst = x;
8557 }
8558 }
8559 if (tr->src.addr.type == PF_ADDR_TABLE)
8560 pfr_update_stats(tr->src.addr.p.tbl, (sk == NULL ||
8561 sk->direction == dir) ?
8562 pd.src : pd.dst, pd.af,
8563 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
8564 tr->src.neg);
8565 if (tr->dst.addr.type == PF_ADDR_TABLE)
8566 pfr_update_stats(tr->dst.addr.p.tbl, (sk == NULL ||
8567 sk->direction == dir) ? pd.dst : pd.src, pd.af,
8568 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
8569 tr->dst.neg);
8570 }
8571
b7266188
A
8572 VERIFY(m == NULL || pd.mp == NULL || pd.mp == m);
8573
b0d623f7
A
8574 if (*m0) {
8575 if (pd.lmw < 0) {
b7266188
A
8576 REASON_SET(&reason, PFRES_MEMORY);
8577 action = PF_DROP;
8578 }
8579
8580 if (action == PF_DROP) {
b0d623f7
A
8581 m_freem(*m0);
8582 *m0 = NULL;
8583 return (PF_DROP);
8584 }
8585
8586 *m0 = m;
8587 }
b0d623f7
A
8588
8589 if (action == PF_SYNPROXY_DROP) {
8590 m_freem(*m0);
8591 *m0 = NULL;
8592 action = PF_PASS;
8593 } else if (r->rt)
8594 /* pf_route can free the mbuf causing *m0 to become NULL */
8595 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
8596
8597 return (action);
8598}
8599#endif /* INET */
8600
8601#if INET6
b0d623f7
A
8602#define PF_APPLE_UPDATE_PDESC_IPv6() \
8603 do { \
8604 if (m && pd.mp && m != pd.mp) { \
8605 if (n == m) \
8606 n = pd.mp; \
8607 m = pd.mp; \
8608 h = mtod(m, struct ip6_hdr *); \
8609 } \
8610 } while (0)
b0d623f7
A
8611
8612int
8613pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0,
316670eb 8614 struct ether_header *eh, struct ip_fw_args *fwa)
b0d623f7 8615{
316670eb
A
8616#if !DUMMYNET
8617#pragma unused(fwa)
8618#endif
b0d623f7 8619 struct pfi_kif *kif;
316670eb 8620 u_short action = PF_PASS, reason = 0, log = 0;
b0d623f7
A
8621 struct mbuf *m = *m0, *n = NULL;
8622 struct ip6_hdr *h;
8623 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
8624 struct pf_state *s = NULL;
8625 struct pf_state_key *sk = NULL;
8626 struct pf_ruleset *ruleset = NULL;
8627 struct pf_pdesc pd;
8628 int off, terminal = 0, dirndx, rh_cnt = 0;
316670eb 8629 u_int8_t nxt;
b0d623f7
A
8630
8631 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
8632
8633 if (!pf_status.running)
8634 return (PF_PASS);
8635
8636 memset(&pd, 0, sizeof (pd));
8637
8638 if ((pd.pf_mtag = pf_get_mtag(m)) == NULL) {
8639 DPFPRINTF(PF_DEBUG_URGENT,
8640 ("pf_test6: pf_get_mtag returned NULL\n"));
8641 return (PF_DROP);
8642 }
8643
316670eb 8644 if (pd.pf_mtag->pftag_flags & PF_TAG_GENERATED)
b0d623f7
A
8645 return (PF_PASS);
8646
8647 kif = (struct pfi_kif *)ifp->if_pf_kif;
8648
8649 if (kif == NULL) {
8650 DPFPRINTF(PF_DEBUG_URGENT,
8651 ("pf_test6: kif == NULL, if_name %s\n", ifp->if_name));
8652 return (PF_DROP);
8653 }
8654 if (kif->pfik_flags & PFI_IFLAG_SKIP)
8655 return (PF_PASS);
8656
39236c6e 8657 VERIFY(m->m_flags & M_PKTHDR);
b0d623f7
A
8658
8659 h = mtod(m, struct ip6_hdr *);
8660
316670eb
A
8661 nxt = h->ip6_nxt;
8662 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
8663 pd.mp = m;
8664 pd.lmw = 0;
8665 pd.pf_mtag = pf_get_mtag(m);
8666 pd.src = (struct pf_addr *)&h->ip6_src;
8667 pd.dst = (struct pf_addr *)&h->ip6_dst;
8668 PF_ACPY(&pd.baddr, dir == PF_OUT ? pd.src : pd.dst, AF_INET6);
8669 pd.ip_sum = NULL;
8670 pd.af = AF_INET6;
8671 pd.proto = nxt;
8672 pd.proto_variant = 0;
8673 pd.tos = 0;
8674 pd.sc = MBUF_SCIDX(mbuf_get_service_class(m));
8675 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
8676 pd.eh = eh;
8677
39236c6e
A
8678 if (m->m_pkthdr.pkt_flags & PKTF_FLOW_ID) {
8679 pd.flowsrc = m->m_pkthdr.pkt_flowsrc;
8680 pd.flowhash = m->m_pkthdr.pkt_flowid;
8681 pd.pktflags = (m->m_pkthdr.pkt_flags & PKTF_FLOW_MASK);
316670eb
A
8682 }
8683
b0d623f7
A
8684 if (m->m_pkthdr.len < (int)sizeof (*h)) {
8685 action = PF_DROP;
8686 REASON_SET(&reason, PFRES_SHORT);
8687 log = 1;
8688 goto done;
8689 }
8690
316670eb
A
8691#if DUMMYNET
8692 if (fwa != NULL && fwa->fwa_pf_rule != NULL)
8693 goto nonormalize;
8694#endif /* DUMMYNET */
8695
b0d623f7 8696 /* We do IP header normalization and packet reassembly here */
316670eb
A
8697 action = pf_normalize_ip6(m0, dir, kif, &reason, &pd);
8698 pd.mp = m = *m0;
8699 if (action != PF_PASS || pd.lmw < 0) {
b0d623f7
A
8700 action = PF_DROP;
8701 goto done;
8702 }
316670eb
A
8703
8704#if DUMMYNET
8705nonormalize:
8706#endif /* DUMMYNET */
b0d623f7
A
8707 h = mtod(m, struct ip6_hdr *);
8708
8709#if 1
8710 /*
8711 * we do not support jumbogram yet. if we keep going, zero ip6_plen
8712 * will do something bad, so drop the packet for now.
8713 */
8714 if (htons(h->ip6_plen) == 0) {
8715 action = PF_DROP;
8716 REASON_SET(&reason, PFRES_NORM); /*XXX*/
8717 goto done;
8718 }
8719#endif
8720
8721 pd.src = (struct pf_addr *)&h->ip6_src;
8722 pd.dst = (struct pf_addr *)&h->ip6_dst;
8723 PF_ACPY(&pd.baddr, dir == PF_OUT ? pd.src : pd.dst, AF_INET6);
8724 pd.ip_sum = NULL;
8725 pd.af = AF_INET6;
8726 pd.tos = 0;
8727 pd.tot_len = ntohs(h->ip6_plen) + sizeof (struct ip6_hdr);
8728 pd.eh = eh;
8729
8730 off = ((caddr_t)h - m->m_data) + sizeof (struct ip6_hdr);
8731 pd.proto = h->ip6_nxt;
b0d623f7
A
8732 pd.proto_variant = 0;
8733 pd.mp = m;
8734 pd.lmw = 0;
316670eb 8735 pd.pf_mtag = pf_get_mtag(m);
b0d623f7 8736
316670eb
A
8737 do {
8738 switch (nxt) {
8739 case IPPROTO_FRAGMENT: {
8740 struct ip6_frag ip6f;
39236c6e 8741
316670eb
A
8742 pd.flags |= PFDESC_IP_FRAG;
8743 if (!pf_pull_hdr(m, off, &ip6f, sizeof ip6f, NULL,
b0d623f7
A
8744 &reason, pd.af)) {
8745 DPFPRINTF(PF_DEBUG_MISC,
316670eb 8746 ("pf: IPv6 short fragment header\n"));
b0d623f7
A
8747 action = PF_DROP;
8748 REASON_SET(&reason, PFRES_SHORT);
8749 log = 1;
8750 goto done;
8751 }
316670eb
A
8752 pd.proto = nxt = ip6f.ip6f_nxt;
8753#if DUMMYNET
8754 /* Traffic goes through dummynet first */
8755 action = pf_test_dummynet(&r, dir, kif, &m, &pd, fwa);
8756 if (action == PF_DROP || m == NULL) {
8757 *m0 = NULL;
8758 return (action);
8759 }
8760#endif /* DUMMYNET */
8761 action = pf_test_fragment(&r, dir, kif, m, h, &pd, &a,
8762 &ruleset);
8763 if (action == PF_DROP) {
8764 REASON_SET(&reason, PFRES_FRAG);
b0d623f7 8765 log = 1;
b0d623f7 8766 }
316670eb 8767 goto done;
b0d623f7 8768 }
316670eb
A
8769 case IPPROTO_ROUTING:
8770 ++rh_cnt;
8771 /* FALL THROUGH */
8772
b0d623f7
A
8773 case IPPROTO_AH:
8774 case IPPROTO_HOPOPTS:
8775 case IPPROTO_DSTOPTS: {
8776 /* get next header and header length */
8777 struct ip6_ext opt6;
8778
316670eb 8779 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
b0d623f7
A
8780 NULL, &reason, pd.af)) {
8781 DPFPRINTF(PF_DEBUG_MISC,
8782 ("pf: IPv6 short opt\n"));
8783 action = PF_DROP;
8784 log = 1;
8785 goto done;
8786 }
8787 if (pd.proto == IPPROTO_AH)
8788 off += (opt6.ip6e_len + 2) * 4;
8789 else
8790 off += (opt6.ip6e_len + 1) * 8;
316670eb 8791 nxt = opt6.ip6e_nxt;
b0d623f7
A
8792 /* goto the next header */
8793 break;
8794 }
8795 default:
8796 terminal++;
8797 break;
8798 }
8799 } while (!terminal);
8800
8801 /* if there's no routing header, use unmodified mbuf for checksumming */
8802 if (!n)
8803 n = m;
8804
8805 switch (pd.proto) {
8806
8807 case IPPROTO_TCP: {
8808 struct tcphdr th;
8809
8810 pd.hdr.tcp = &th;
8811 if (!pf_pull_hdr(m, off, &th, sizeof (th),
8812 &action, &reason, AF_INET6)) {
8813 log = action != PF_PASS;
8814 goto done;
8815 }
8816 pd.p_len = pd.tot_len - off - (th.th_off << 2);
316670eb
A
8817#if DUMMYNET
8818 /* Traffic goes through dummynet first */
8819 action = pf_test_dummynet(&r, dir, kif, &m, &pd, fwa);
8820 if (action == PF_DROP || m == NULL) {
8821 *m0 = NULL;
8822 return (action);
8823 }
8824#endif /* DUMMYNET */
b0d623f7 8825 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
b7266188 8826 if (pd.lmw < 0)
b0d623f7
A
8827 goto done;
8828 PF_APPLE_UPDATE_PDESC_IPv6();
b7266188
A
8829 if (action == PF_DROP)
8830 goto done;
b0d623f7
A
8831 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
8832 &reason);
b0d623f7
A
8833 if (pd.lmw < 0)
8834 goto done;
8835 PF_APPLE_UPDATE_PDESC_IPv6();
b0d623f7
A
8836 if (action == PF_PASS) {
8837#if NPFSYNC
8838 pfsync_update_state(s);
8839#endif /* NPFSYNC */
8840 r = s->rule.ptr;
8841 a = s->anchor.ptr;
8842 log = s->log;
8843 } else if (s == NULL)
8844 action = pf_test_rule(&r, &s, dir, kif,
39236c6e 8845 m, off, h, &pd, &a, &ruleset, NULL);
b0d623f7
A
8846 break;
8847 }
8848
8849 case IPPROTO_UDP: {
8850 struct udphdr uh;
8851
8852 pd.hdr.udp = &uh;
8853 if (!pf_pull_hdr(m, off, &uh, sizeof (uh),
8854 &action, &reason, AF_INET6)) {
8855 log = action != PF_PASS;
8856 goto done;
8857 }
8858 if (uh.uh_dport == 0 ||
8859 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
8860 ntohs(uh.uh_ulen) < sizeof (struct udphdr)) {
8861 action = PF_DROP;
8862 REASON_SET(&reason, PFRES_SHORT);
8863 goto done;
8864 }
316670eb
A
8865#if DUMMYNET
8866 /* Traffic goes through dummynet first */
8867 action = pf_test_dummynet(&r, dir, kif, &m, &pd, fwa);
8868 if (action == PF_DROP || m == NULL) {
8869 *m0 = NULL;
8870 return (action);
8871 }
8872#endif /* DUMMYNET */
b7266188
A
8873 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd,
8874 &reason);
b0d623f7
A
8875 if (pd.lmw < 0)
8876 goto done;
8877 PF_APPLE_UPDATE_PDESC_IPv6();
b0d623f7
A
8878 if (action == PF_PASS) {
8879#if NPFSYNC
8880 pfsync_update_state(s);
8881#endif /* NPFSYNC */
8882 r = s->rule.ptr;
8883 a = s->anchor.ptr;
8884 log = s->log;
8885 } else if (s == NULL)
8886 action = pf_test_rule(&r, &s, dir, kif,
39236c6e 8887 m, off, h, &pd, &a, &ruleset, NULL);
b0d623f7
A
8888 break;
8889 }
8890
8891 case IPPROTO_ICMPV6: {
8892 struct icmp6_hdr ih;
8893
8894 pd.hdr.icmp6 = &ih;
8895 if (!pf_pull_hdr(m, off, &ih, sizeof (ih),
8896 &action, &reason, AF_INET6)) {
8897 log = action != PF_PASS;
8898 goto done;
8899 }
316670eb
A
8900#if DUMMYNET
8901 /* Traffic goes through dummynet first */
8902 action = pf_test_dummynet(&r, dir, kif, &m, &pd, fwa);
8903 if (action == PF_DROP || m == NULL) {
8904 *m0 = NULL;
8905 return (action);
8906 }
8907#endif /* DUMMYNET */
b0d623f7
A
8908 action = pf_test_state_icmp(&s, dir, kif,
8909 m, off, h, &pd, &reason);
b0d623f7
A
8910 if (pd.lmw < 0)
8911 goto done;
8912 PF_APPLE_UPDATE_PDESC_IPv6();
b0d623f7
A
8913 if (action == PF_PASS) {
8914#if NPFSYNC
8915 pfsync_update_state(s);
8916#endif /* NPFSYNC */
8917 r = s->rule.ptr;
8918 a = s->anchor.ptr;
8919 log = s->log;
8920 } else if (s == NULL)
8921 action = pf_test_rule(&r, &s, dir, kif,
39236c6e 8922 m, off, h, &pd, &a, &ruleset, NULL);
b0d623f7
A
8923 break;
8924 }
8925
b0d623f7
A
8926 case IPPROTO_ESP: {
8927 struct pf_esp_hdr esp;
8928
8929 pd.hdr.esp = &esp;
8930 if (!pf_pull_hdr(m, off, &esp, sizeof (esp), &action, &reason,
8931 AF_INET6)) {
8932 log = action != PF_PASS;
8933 goto done;
8934 }
316670eb
A
8935#if DUMMYNET
8936 /* Traffic goes through dummynet first */
8937 action = pf_test_dummynet(&r, dir, kif, &m, &pd, fwa);
8938 if (action == PF_DROP || m == NULL) {
8939 *m0 = NULL;
8940 return (action);
8941 }
8942#endif /* DUMMYNET */
b0d623f7
A
8943 action = pf_test_state_esp(&s, dir, kif, off, &pd);
8944 if (pd.lmw < 0)
8945 goto done;
8946 PF_APPLE_UPDATE_PDESC_IPv6();
8947 if (action == PF_PASS) {
8948#if NPFSYNC
8949 pfsync_update_state(s);
8950#endif /* NPFSYNC */
8951 r = s->rule.ptr;
8952 a = s->anchor.ptr;
8953 log = s->log;
8954 } else if (s == NULL)
8955 action = pf_test_rule(&r, &s, dir, kif,
39236c6e 8956 m, off, h, &pd, &a, &ruleset, NULL);
b0d623f7
A
8957 break;
8958 }
8959
8960 case IPPROTO_GRE: {
8961 struct pf_grev1_hdr grev1;
8962
8963 pd.hdr.grev1 = &grev1;
8964 if (!pf_pull_hdr(m, off, &grev1, sizeof (grev1), &action,
8965 &reason, AF_INET6)) {
8966 log = (action != PF_PASS);
8967 goto done;
8968 }
316670eb
A
8969#if DUMMYNET
8970 /* Traffic goes through dummynet first */
8971 action = pf_test_dummynet(&r, dir, kif, &m, &pd, fwa);
8972 if (action == PF_DROP || m == NULL) {
8973 *m0 = NULL;
8974 return (action);
8975 }
8976#endif /* DUMMYNET */
b0d623f7
A
8977 if ((ntohs(grev1.flags) & PF_GRE_FLAG_VERSION_MASK) == 1 &&
8978 ntohs(grev1.protocol_type) == PF_GRE_PPP_ETHERTYPE) {
8979 if (ntohs(grev1.payload_length) >
8980 m->m_pkthdr.len - off) {
8981 action = PF_DROP;
8982 REASON_SET(&reason, PFRES_SHORT);
8983 goto done;
8984 }
8985 action = pf_test_state_grev1(&s, dir, kif, off, &pd);
8986 if (pd.lmw < 0)
8987 goto done;
8988 PF_APPLE_UPDATE_PDESC_IPv6();
8989 if (action == PF_PASS) {
8990#if NPFSYNC
8991 pfsync_update_state(s);
8992#endif /* NPFSYNC */
8993 r = s->rule.ptr;
8994 a = s->anchor.ptr;
8995 log = s->log;
8996 break;
8997 } else if (s == NULL) {
8998 action = pf_test_rule(&r, &s, dir, kif, m, off,
39236c6e 8999 h, &pd, &a, &ruleset, NULL);
b0d623f7
A
9000 if (action == PF_PASS)
9001 break;
9002 }
9003 }
9004
9005 /* not GREv1/PPTP, so treat as ordinary GRE... */
9006 }
b0d623f7
A
9007
9008 default:
316670eb
A
9009#if DUMMYNET
9010 /* Traffic goes through dummynet first */
9011 action = pf_test_dummynet(&r, dir, kif, &m, &pd, fwa);
9012 if (action == PF_DROP || m == NULL) {
9013 *m0 = NULL;
9014 return (action);
9015 }
9016#endif /* DUMMYNET */
b0d623f7 9017 action = pf_test_state_other(&s, dir, kif, &pd);
b0d623f7
A
9018 if (pd.lmw < 0)
9019 goto done;
9020 PF_APPLE_UPDATE_PDESC_IPv6();
b0d623f7
A
9021 if (action == PF_PASS) {
9022#if NPFSYNC
9023 pfsync_update_state(s);
9024#endif /* NPFSYNC */
9025 r = s->rule.ptr;
9026 a = s->anchor.ptr;
9027 log = s->log;
9028 } else if (s == NULL)
9029 action = pf_test_rule(&r, &s, dir, kif, m, off, h,
39236c6e 9030 &pd, &a, &ruleset, NULL);
b0d623f7
A
9031 break;
9032 }
9033
9034done:
b7266188 9035 *m0 = pd.mp;
b0d623f7
A
9036 PF_APPLE_UPDATE_PDESC_IPv6();
9037
9038 if (n != m) {
9039 m_freem(n);
9040 n = NULL;
9041 }
9042
9043 /* handle dangerous IPv6 extension headers. */
9044 if (action == PF_PASS && rh_cnt &&
9045 !((s && s->allow_opts) || r->allow_opts)) {
9046 action = PF_DROP;
9047 REASON_SET(&reason, PFRES_IPOPTIONS);
9048 log = 1;
9049 DPFPRINTF(PF_DEBUG_MISC,
9050 ("pf: dropping packet with dangerous v6 headers\n"));
9051 }
9052
39236c6e
A
9053 if ((s && s->tag) || PF_RTABLEID_IS_VALID(r->rtableid) ||
9054 (pd.pktflags & PKTF_FLOW_ID))
b0d623f7 9055 (void) pf_tag_packet(m, pd.pf_mtag, s ? s->tag : 0,
316670eb 9056 r->rtableid, &pd);
b0d623f7 9057
316670eb
A
9058 if (action == PF_PASS) {
9059#if PF_ALTQ
9060 if (altq_allowed && r->qid) {
9061 if (pd.tos & IPTOS_LOWDELAY)
9062 pd.pf_mtag->pftag_qid = r->pqid;
9063 else
9064 pd.pf_mtag->pftag_qid = r->qid;
9065 }
9066#endif /* PF_ALTQ */
39236c6e 9067#if PF_ECN
b0d623f7 9068 /* add hints for ecn */
316670eb
A
9069 pd.pf_mtag->pftag_hdr = h;
9070 /* record address family */
9071 pd.pf_mtag->pftag_flags &= ~PF_TAG_HDR_INET;
9072 pd.pf_mtag->pftag_flags |= PF_TAG_HDR_INET6;
39236c6e
A
9073#endif /* PF_ECN */
9074 /* record protocol */
9075 m->m_pkthdr.pkt_proto = pd.proto;
b0d623f7 9076 }
b0d623f7
A
9077
9078 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
9079 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
9080 (s->nat_rule.ptr->action == PF_RDR ||
9081 s->nat_rule.ptr->action == PF_BINAT) &&
9082 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
316670eb 9083 pd.pf_mtag->pftag_flags |= PF_TAG_TRANSLATE_LOCALHOST;
b0d623f7
A
9084
9085 if (log) {
9086 struct pf_rule *lr;
9087
9088 if (s != NULL && s->nat_rule.ptr != NULL &&
9089 s->nat_rule.ptr->log & PF_LOG_ALL)
9090 lr = s->nat_rule.ptr;
9091 else
9092 lr = r;
9093 PFLOG_PACKET(kif, h, m, AF_INET6, dir, reason, lr, a, ruleset,
9094 &pd);
9095 }
9096
9097 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
9098 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
9099
9100 if (action == PF_PASS || r->action == PF_DROP) {
9101 dirndx = (dir == PF_OUT);
9102 r->packets[dirndx]++;
9103 r->bytes[dirndx] += pd.tot_len;
9104 if (a != NULL) {
9105 a->packets[dirndx]++;
9106 a->bytes[dirndx] += pd.tot_len;
9107 }
9108 if (s != NULL) {
9109 sk = s->state_key;
9110 if (s->nat_rule.ptr != NULL) {
9111 s->nat_rule.ptr->packets[dirndx]++;
9112 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
9113 }
9114 if (s->src_node != NULL) {
9115 s->src_node->packets[dirndx]++;
9116 s->src_node->bytes[dirndx] += pd.tot_len;
9117 }
9118 if (s->nat_src_node != NULL) {
9119 s->nat_src_node->packets[dirndx]++;
9120 s->nat_src_node->bytes[dirndx] += pd.tot_len;
9121 }
9122 dirndx = (dir == sk->direction) ? 0 : 1;
9123 s->packets[dirndx]++;
9124 s->bytes[dirndx] += pd.tot_len;
9125 }
9126 tr = r;
9127 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
9128 if (nr != NULL) {
9129 struct pf_addr *x;
9130 /*
9131 * XXX: we need to make sure that the addresses
9132 * passed to pfr_update_stats() are the same than
9133 * the addresses used during matching (pfr_match)
9134 */
9135 if (r == &pf_default_rule) {
9136 tr = nr;
9137 x = (s == NULL || sk->direction == dir) ?
9138 &pd.baddr : &pd.naddr;
9139 } else {
9140 x = (s == NULL || sk->direction == dir) ?
9141 &pd.naddr : &pd.baddr;
9142 }
9143 if (x == &pd.baddr || s == NULL) {
9144 if (dir == PF_OUT)
9145 pd.src = x;
9146 else
9147 pd.dst = x;
9148 }
9149 }
9150 if (tr->src.addr.type == PF_ADDR_TABLE)
9151 pfr_update_stats(tr->src.addr.p.tbl, (sk == NULL ||
9152 sk->direction == dir) ? pd.src : pd.dst, pd.af,
9153 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
9154 tr->src.neg);
9155 if (tr->dst.addr.type == PF_ADDR_TABLE)
9156 pfr_update_stats(tr->dst.addr.p.tbl, (sk == NULL ||
9157 sk->direction == dir) ? pd.dst : pd.src, pd.af,
9158 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
9159 tr->dst.neg);
9160 }
9161
9162#if 0
9163 if (action == PF_SYNPROXY_DROP) {
9164 m_freem(*m0);
9165 *m0 = NULL;
9166 action = PF_PASS;
9167 } else if (r->rt)
9168 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9169 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
9170#else
b7266188
A
9171 VERIFY(m == NULL || pd.mp == NULL || pd.mp == m);
9172
b0d623f7
A
9173 if (*m0) {
9174 if (pd.lmw < 0) {
b7266188
A
9175 REASON_SET(&reason, PFRES_MEMORY);
9176 action = PF_DROP;
9177 }
9178
9179 if (action == PF_DROP) {
b0d623f7
A
9180 m_freem(*m0);
9181 *m0 = NULL;
9182 return (PF_DROP);
9183 }
9184
9185 *m0 = m;
9186 }
9187
9188 if (action == PF_SYNPROXY_DROP) {
9189 m_freem(*m0);
9190 *m0 = NULL;
9191 action = PF_PASS;
9192 } else if (r->rt) {
9193 if (action == PF_PASS) {
9194 m = *m0;
9195 h = mtod(m, struct ip6_hdr *);
9196 }
9197
9198 /* pf_route6 can free the mbuf causing *m0 to become NULL */
9199 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
9200 }
316670eb 9201#endif /* 0 */
b0d623f7
A
9202
9203 return (action);
9204}
9205#endif /* INET6 */
9206
9207static int
9208pf_check_congestion(struct ifqueue *ifq)
9209{
9210#pragma unused(ifq)
9211 return (0);
9212}
9213
9214void
9215pool_init(struct pool *pp, size_t size, unsigned int align, unsigned int ioff,
9216 int flags, const char *wchan, void *palloc)
9217{
9218#pragma unused(align, ioff, flags, palloc)
9219 bzero(pp, sizeof (*pp));
9220 pp->pool_zone = zinit(size, 1024 * size, PAGE_SIZE, wchan);
9221 if (pp->pool_zone != NULL) {
9222 zone_change(pp->pool_zone, Z_EXPAND, TRUE);
6d2010ae 9223 zone_change(pp->pool_zone, Z_CALLERACCT, FALSE);
b0d623f7
A
9224 pp->pool_hiwat = pp->pool_limit = (unsigned int)-1;
9225 pp->pool_name = wchan;
9226 }
9227}
9228
9229/* Zones cannot be currently destroyed */
9230void
9231pool_destroy(struct pool *pp)
9232{
9233#pragma unused(pp)
9234}
9235
9236void
9237pool_sethiwat(struct pool *pp, int n)
9238{
9239 pp->pool_hiwat = n; /* Currently unused */
9240}
9241
9242void
9243pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
9244{
9245#pragma unused(warnmess, ratecap)
9246 pp->pool_limit = n;
9247}
9248
9249void *
9250pool_get(struct pool *pp, int flags)
9251{
9252 void *buf;
9253
9254 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
9255
9256 if (pp->pool_count > pp->pool_limit) {
9257 DPFPRINTF(PF_DEBUG_NOISY,
9258 ("pf: pool %s hard limit reached (%d)\n",
9259 pp->pool_name != NULL ? pp->pool_name : "unknown",
9260 pp->pool_limit));
9261 pp->pool_fails++;
9262 return (NULL);
9263 }
9264
9265 buf = zalloc_canblock(pp->pool_zone, (flags & (PR_NOWAIT | PR_WAITOK)));
9266 if (buf != NULL) {
9267 pp->pool_count++;
9268 VERIFY(pp->pool_count != 0);
9269 }
9270 return (buf);
9271}
9272
9273void
9274pool_put(struct pool *pp, void *v)
9275{
9276 lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED);
9277
9278 zfree(pp->pool_zone, v);
9279 VERIFY(pp->pool_count != 0);
9280 pp->pool_count--;
9281}
9282
9283struct pf_mtag *
9284pf_find_mtag(struct mbuf *m)
9285{
b0d623f7
A
9286 if (!(m->m_flags & M_PKTHDR))
9287 return (NULL);
9288
316670eb 9289 return (m_pftag(m));
b0d623f7
A
9290}
9291
9292struct pf_mtag *
9293pf_get_mtag(struct mbuf *m)
9294{
b0d623f7 9295 return (pf_find_mtag(m));
b0d623f7
A
9296}
9297
9298uint64_t
9299pf_time_second(void)
9300{
9301 struct timeval t;
9302
b7266188
A
9303 microuptime(&t);
9304 return (t.tv_sec);
9305}
9306
9307uint64_t
9308pf_calendar_time_second(void)
9309{
9310 struct timeval t;
9311
39236c6e 9312 getmicrotime(&t);
b0d623f7
A
9313 return (t.tv_sec);
9314}
9315
9316static void *
9317hook_establish(struct hook_desc_head *head, int tail, hook_fn_t fn, void *arg)
9318{
9319 struct hook_desc *hd;
9320
9321 hd = _MALLOC(sizeof(*hd), M_DEVBUF, M_WAITOK);
9322 if (hd == NULL)
9323 return (NULL);
9324
9325 hd->hd_fn = fn;
9326 hd->hd_arg = arg;
9327 if (tail)
9328 TAILQ_INSERT_TAIL(head, hd, hd_list);
9329 else
9330 TAILQ_INSERT_HEAD(head, hd, hd_list);
9331
9332 return (hd);
9333}
9334
9335static void
9336hook_runloop(struct hook_desc_head *head, int flags)
9337{
9338 struct hook_desc *hd;
9339
9340 if (!(flags & HOOK_REMOVE)) {
9341 if (!(flags & HOOK_ABORT))
9342 TAILQ_FOREACH(hd, head, hd_list)
9343 hd->hd_fn(hd->hd_arg);
9344 } else {
9345 while (!!(hd = TAILQ_FIRST(head))) {
9346 TAILQ_REMOVE(head, hd, hd_list);
9347 if (!(flags & HOOK_ABORT))
9348 hd->hd_fn(hd->hd_arg);
9349 if (flags & HOOK_FREE)
9350 _FREE(hd, M_DEVBUF);
9351 }
9352 }
9353}