]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/pf.c
xnu-4570.41.2.tar.gz
[apple/xnu.git] / bsd / net / pf.c
1 /*
2 * Copyright (c) 2007-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $apfw: git commit 6602420f2f101b74305cd78f7cd9e0c8fdedae97 $ */
30 /* $OpenBSD: pf.c,v 1.567 2008/02/20 23:40:13 henning Exp $ */
31
32 /*
33 * Copyright (c) 2001 Daniel Hartmeier
34 * Copyright (c) 2002 - 2013 Henning Brauer
35 * NAT64 - Copyright (c) 2010 Viagenie Inc. (http://www.viagenie.ca)
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * - Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * - Redistributions in binary form must reproduce the above
45 * copyright notice, this list of conditions and the following
46 * disclaimer in the documentation and/or other materials provided
47 * with the distribution.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
52 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
53 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
54 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
55 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
56 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
57 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
59 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
60 * POSSIBILITY OF SUCH DAMAGE.
61 *
62 * Effort sponsored in part by the Defense Advanced Research Projects
63 * Agency (DARPA) and Air Force Research Laboratory, Air Force
64 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
65 *
66 */
67
68 #include <machine/endian.h>
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/filio.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 #include <sys/kernel.h>
75 #include <sys/time.h>
76 #include <sys/proc.h>
77 #include <sys/random.h>
78 #include <sys/mcache.h>
79 #include <sys/protosw.h>
80
81 #include <libkern/crypto/md5.h>
82 #include <libkern/libkern.h>
83
84 #include <mach/thread_act.h>
85 #include <mach/branch_predicates.h>
86
87 #include <net/if.h>
88 #include <net/if_types.h>
89 #include <net/bpf.h>
90 #include <net/route.h>
91 #include <net/dlil.h>
92
93 #include <netinet/in.h>
94 #include <netinet/in_var.h>
95 #include <netinet/in_systm.h>
96 #include <netinet/ip.h>
97 #include <netinet/ip_var.h>
98 #include <netinet/tcp.h>
99 #include <netinet/tcp_seq.h>
100 #include <netinet/udp.h>
101 #include <netinet/ip_icmp.h>
102 #include <netinet/in_pcb.h>
103 #include <netinet/tcp_timer.h>
104 #include <netinet/tcp_var.h>
105 #include <netinet/tcp_fsm.h>
106 #include <netinet/udp_var.h>
107 #include <netinet/icmp_var.h>
108 #include <net/if_ether.h>
109 #include <net/ethernet.h>
110 #include <net/flowhash.h>
111 #include <net/pfvar.h>
112 #include <net/if_pflog.h>
113
114 #if NPFSYNC
115 #include <net/if_pfsync.h>
116 #endif /* NPFSYNC */
117
118 #if INET6
119 #include <netinet/ip6.h>
120 #include <netinet6/in6_pcb.h>
121 #include <netinet6/ip6_var.h>
122 #include <netinet/icmp6.h>
123 #include <netinet6/nd6.h>
124 #endif /* INET6 */
125
126 #if DUMMYNET
127 #include <netinet/ip_dummynet.h>
128 #endif /* DUMMYNET */
129
130 /*
131 * For RandomULong(), to get a 32 bits random value
132 * Note that random() returns a 31 bits value, see rdar://11159750
133 */
134 #include <dev/random/randomdev.h>
135
136 #define DPFPRINTF(n, x) (pf_status.debug >= (n) ? printf x : ((void)0))
137
138 /*
139 * On Mac OS X, the rtableid value is treated as the interface scope
140 * value that is equivalent to the interface index used for scoped
141 * routing. A valid scope value is anything but IFSCOPE_NONE (0),
142 * as per definition of ifindex which is a positive, non-zero number.
143 * The other BSDs treat a negative rtableid value as invalid, hence
144 * the test against INT_MAX to handle userland apps which initialize
145 * the field with a negative number.
146 */
147 #define PF_RTABLEID_IS_VALID(r) \
148 ((r) > IFSCOPE_NONE && (r) <= INT_MAX)
149
150 /*
151 * Global variables
152 */
153 decl_lck_mtx_data(,pf_lock_data);
154 decl_lck_rw_data(,pf_perim_lock_data);
155 lck_mtx_t *pf_lock = &pf_lock_data;
156 lck_rw_t *pf_perim_lock = &pf_perim_lock_data;
157
158 /* state tables */
159 struct pf_state_tree_lan_ext pf_statetbl_lan_ext;
160 struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy;
161
162 struct pf_palist pf_pabuf;
163 struct pf_status pf_status;
164
165 u_int32_t ticket_pabuf;
166
167 static MD5_CTX pf_tcp_secret_ctx;
168 static u_char pf_tcp_secret[16];
169 static int pf_tcp_secret_init;
170 static int pf_tcp_iss_off;
171
172 static struct pf_anchor_stackframe {
173 struct pf_ruleset *rs;
174 struct pf_rule *r;
175 struct pf_anchor_node *parent;
176 struct pf_anchor *child;
177 } pf_anchor_stack[64];
178
179 struct pool pf_src_tree_pl, pf_rule_pl, pf_pooladdr_pl;
180 struct pool pf_state_pl, pf_state_key_pl;
181
182 typedef void (*hook_fn_t)(void *);
183
184 struct hook_desc {
185 TAILQ_ENTRY(hook_desc) hd_list;
186 hook_fn_t hd_fn;
187 void *hd_arg;
188 };
189
190 #define HOOK_REMOVE 0x01
191 #define HOOK_FREE 0x02
192 #define HOOK_ABORT 0x04
193
194 static void *hook_establish(struct hook_desc_head *, int,
195 hook_fn_t, void *);
196 static void hook_runloop(struct hook_desc_head *, int flags);
197
198 struct pool pf_app_state_pl;
199 static void pf_print_addr(struct pf_addr *addr, sa_family_t af);
200 static void pf_print_sk_host(struct pf_state_host *, u_int8_t, int,
201 u_int8_t);
202
203 static void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t);
204
205 static void pf_init_threshold(struct pf_threshold *, u_int32_t,
206 u_int32_t);
207 static void pf_add_threshold(struct pf_threshold *);
208 static int pf_check_threshold(struct pf_threshold *);
209
210 static void pf_change_ap(int, pbuf_t *, struct pf_addr *,
211 u_int16_t *, u_int16_t *, u_int16_t *,
212 struct pf_addr *, u_int16_t, u_int8_t, sa_family_t,
213 sa_family_t, int);
214 static int pf_modulate_sack(pbuf_t *, int, struct pf_pdesc *,
215 struct tcphdr *, struct pf_state_peer *);
216 #if INET6
217 static void pf_change_a6(struct pf_addr *, u_int16_t *,
218 struct pf_addr *, u_int8_t);
219 void pf_change_addr(struct pf_addr *a, u_int16_t *c,
220 struct pf_addr *an, u_int8_t u,
221 sa_family_t af, sa_family_t afn);
222 #endif /* INET6 */
223 static void pf_change_icmp(struct pf_addr *, u_int16_t *,
224 struct pf_addr *, struct pf_addr *, u_int16_t,
225 u_int16_t *, u_int16_t *, u_int16_t *,
226 u_int16_t *, u_int8_t, sa_family_t);
227 static void pf_send_tcp(const struct pf_rule *, sa_family_t,
228 const struct pf_addr *, const struct pf_addr *,
229 u_int16_t, u_int16_t, u_int32_t, u_int32_t,
230 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
231 u_int16_t, struct ether_header *, struct ifnet *);
232 static void pf_send_icmp(pbuf_t *, u_int8_t, u_int8_t,
233 sa_family_t, struct pf_rule *);
234 static struct pf_rule *pf_match_translation(struct pf_pdesc *, pbuf_t *,
235 int, int, struct pfi_kif *, struct pf_addr *,
236 union pf_state_xport *, struct pf_addr *,
237 union pf_state_xport *, int);
238 static struct pf_rule *pf_get_translation_aux(struct pf_pdesc *,
239 pbuf_t *, int, int, struct pfi_kif *,
240 struct pf_src_node **, struct pf_addr *,
241 union pf_state_xport *, struct pf_addr *,
242 union pf_state_xport *, union pf_state_xport *
243 );
244 static void pf_attach_state(struct pf_state_key *,
245 struct pf_state *, int);
246 static void pf_detach_state(struct pf_state *, int);
247 static u_int32_t pf_tcp_iss(struct pf_pdesc *);
248 static int pf_test_rule(struct pf_rule **, struct pf_state **,
249 int, struct pfi_kif *, pbuf_t *, int,
250 void *, struct pf_pdesc *, struct pf_rule **,
251 struct pf_ruleset **, struct ifqueue *);
252 #if DUMMYNET
253 static int pf_test_dummynet(struct pf_rule **, int,
254 struct pfi_kif *, pbuf_t **,
255 struct pf_pdesc *, struct ip_fw_args *);
256 #endif /* DUMMYNET */
257 static int pf_test_fragment(struct pf_rule **, int,
258 struct pfi_kif *, pbuf_t *, void *,
259 struct pf_pdesc *, struct pf_rule **,
260 struct pf_ruleset **);
261 static int pf_test_state_tcp(struct pf_state **, int,
262 struct pfi_kif *, pbuf_t *, int,
263 void *, struct pf_pdesc *, u_short *);
264 static int pf_test_state_udp(struct pf_state **, int,
265 struct pfi_kif *, pbuf_t *, int,
266 void *, struct pf_pdesc *, u_short *);
267 static int pf_test_state_icmp(struct pf_state **, int,
268 struct pfi_kif *, pbuf_t *, int,
269 void *, struct pf_pdesc *, u_short *);
270 static int pf_test_state_other(struct pf_state **, int,
271 struct pfi_kif *, struct pf_pdesc *);
272 static int pf_match_tag(struct pf_rule *,
273 struct pf_mtag *, int *);
274 static void pf_hash(struct pf_addr *, struct pf_addr *,
275 struct pf_poolhashkey *, sa_family_t);
276 static int pf_map_addr(u_int8_t, struct pf_rule *,
277 struct pf_addr *, struct pf_addr *,
278 struct pf_addr *, struct pf_src_node **);
279 static int pf_get_sport(struct pf_pdesc *, struct pfi_kif *,
280 struct pf_rule *, struct pf_addr *,
281 union pf_state_xport *, struct pf_addr *,
282 union pf_state_xport *, struct pf_addr *,
283 union pf_state_xport *, struct pf_src_node **
284 );
285 static void pf_route(pbuf_t **, struct pf_rule *, int,
286 struct ifnet *, struct pf_state *,
287 struct pf_pdesc *);
288 #if INET6
289 static void pf_route6(pbuf_t **, struct pf_rule *, int,
290 struct ifnet *, struct pf_state *,
291 struct pf_pdesc *);
292 #endif /* INET6 */
293 static u_int8_t pf_get_wscale(pbuf_t *, int, u_int16_t,
294 sa_family_t);
295 static u_int16_t pf_get_mss(pbuf_t *, int, u_int16_t,
296 sa_family_t);
297 static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
298 u_int16_t);
299 static void pf_set_rt_ifp(struct pf_state *,
300 struct pf_addr *, sa_family_t af);
301 static int pf_check_proto_cksum(pbuf_t *, int, int,
302 u_int8_t, sa_family_t);
303 static int pf_addr_wrap_neq(struct pf_addr_wrap *,
304 struct pf_addr_wrap *);
305 static struct pf_state *pf_find_state(struct pfi_kif *,
306 struct pf_state_key_cmp *, u_int);
307 static int pf_src_connlimit(struct pf_state **);
308 static void pf_stateins_err(const char *, struct pf_state *,
309 struct pfi_kif *);
310 static int pf_check_congestion(struct ifqueue *);
311
312 #if 0
313 static const char *pf_pptp_ctrl_type_name(u_int16_t code);
314 #endif
315 static void pf_pptp_handler(struct pf_state *, int, int,
316 struct pf_pdesc *, struct pfi_kif *);
317 static void pf_pptp_unlink(struct pf_state *);
318 static void pf_grev1_unlink(struct pf_state *);
319 static int pf_test_state_grev1(struct pf_state **, int,
320 struct pfi_kif *, int, struct pf_pdesc *);
321 static int pf_ike_compare(struct pf_app_state *,
322 struct pf_app_state *);
323 static int pf_test_state_esp(struct pf_state **, int,
324 struct pfi_kif *, int, struct pf_pdesc *);
325
326 extern struct pool pfr_ktable_pl;
327 extern struct pool pfr_kentry_pl;
328 extern int path_mtu_discovery;
329
330 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = {
331 { &pf_state_pl, PFSTATE_HIWAT },
332 { &pf_app_state_pl, PFAPPSTATE_HIWAT },
333 { &pf_src_tree_pl, PFSNODE_HIWAT },
334 { &pf_frent_pl, PFFRAG_FRENT_HIWAT },
335 { &pfr_ktable_pl, PFR_KTABLE_HIWAT },
336 { &pfr_kentry_pl, PFR_KENTRY_HIWAT },
337 };
338
339 void *
340 pf_lazy_makewritable(struct pf_pdesc *pd, pbuf_t *pbuf, int len)
341 {
342 void *p;
343
344 if (pd->lmw < 0)
345 return (NULL);
346
347 VERIFY(pbuf == pd->mp);
348
349 p = pbuf->pb_data;
350 if (len > pd->lmw) {
351 if ((p = pbuf_ensure_writable(pbuf, len)) == NULL)
352 len = -1;
353 pd->lmw = len;
354 if (len >= 0) {
355 pd->pf_mtag = pf_find_mtag_pbuf(pbuf);
356
357 switch (pd->af) {
358 case AF_INET: {
359 struct ip *h = p;
360 pd->src = (struct pf_addr *)(uintptr_t)&h->ip_src;
361 pd->dst = (struct pf_addr *)(uintptr_t)&h->ip_dst;
362 pd->ip_sum = &h->ip_sum;
363 break;
364 }
365 #if INET6
366 case AF_INET6: {
367 struct ip6_hdr *h = p;
368 pd->src = (struct pf_addr *)(uintptr_t)&h->ip6_src;
369 pd->dst = (struct pf_addr *)(uintptr_t)&h->ip6_dst;
370 break;
371 }
372 #endif /* INET6 */
373 }
374 }
375 }
376
377 return (len < 0 ? NULL : p);
378 }
379
380 static const int *
381 pf_state_lookup_aux(struct pf_state **state, struct pfi_kif *kif,
382 int direction, int *action)
383 {
384 if (*state == NULL || (*state)->timeout == PFTM_PURGE) {
385 *action = PF_DROP;
386 return (action);
387 }
388
389 if (direction == PF_OUT &&
390 (((*state)->rule.ptr->rt == PF_ROUTETO &&
391 (*state)->rule.ptr->direction == PF_OUT) ||
392 ((*state)->rule.ptr->rt == PF_REPLYTO &&
393 (*state)->rule.ptr->direction == PF_IN)) &&
394 (*state)->rt_kif != NULL && (*state)->rt_kif != kif) {
395 *action = PF_PASS;
396 return (action);
397 }
398
399 return (0);
400 }
401
402 #define STATE_LOOKUP() \
403 do { \
404 int action; \
405 *state = pf_find_state(kif, &key, direction); \
406 if (*state != NULL && pd != NULL && \
407 !(pd->pktflags & PKTF_FLOW_ID)) { \
408 pd->flowsrc = (*state)->state_key->flowsrc; \
409 pd->flowhash = (*state)->state_key->flowhash; \
410 if (pd->flowhash != 0) { \
411 pd->pktflags |= PKTF_FLOW_ID; \
412 pd->pktflags &= ~PKTF_FLOW_ADV; \
413 } \
414 } \
415 if (pf_state_lookup_aux(state, kif, direction, &action)) \
416 return (action); \
417 } while (0)
418
419 #define STATE_ADDR_TRANSLATE(sk) \
420 (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \
421 ((sk)->af_lan == AF_INET6 && \
422 ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \
423 (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \
424 (sk)->lan.addr.addr32[3] != (sk)->gwy.addr.addr32[3]))
425
426 #define STATE_TRANSLATE(sk) \
427 ((sk)->af_lan != (sk)->af_gwy || \
428 STATE_ADDR_TRANSLATE(sk) || \
429 (sk)->lan.xport.port != (sk)->gwy.xport.port)
430
431 #define STATE_GRE_TRANSLATE(sk) \
432 (STATE_ADDR_TRANSLATE(sk) || \
433 (sk)->lan.xport.call_id != (sk)->gwy.xport.call_id)
434
435 #define BOUND_IFACE(r, k) \
436 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
437
438 #define STATE_INC_COUNTERS(s) \
439 do { \
440 s->rule.ptr->states++; \
441 VERIFY(s->rule.ptr->states != 0); \
442 if (s->anchor.ptr != NULL) { \
443 s->anchor.ptr->states++; \
444 VERIFY(s->anchor.ptr->states != 0); \
445 } \
446 if (s->nat_rule.ptr != NULL) { \
447 s->nat_rule.ptr->states++; \
448 VERIFY(s->nat_rule.ptr->states != 0); \
449 } \
450 } while (0)
451
452 #define STATE_DEC_COUNTERS(s) \
453 do { \
454 if (s->nat_rule.ptr != NULL) { \
455 VERIFY(s->nat_rule.ptr->states > 0); \
456 s->nat_rule.ptr->states--; \
457 } \
458 if (s->anchor.ptr != NULL) { \
459 VERIFY(s->anchor.ptr->states > 0); \
460 s->anchor.ptr->states--; \
461 } \
462 VERIFY(s->rule.ptr->states > 0); \
463 s->rule.ptr->states--; \
464 } while (0)
465
466 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
467 static __inline int pf_state_compare_lan_ext(struct pf_state_key *,
468 struct pf_state_key *);
469 static __inline int pf_state_compare_ext_gwy(struct pf_state_key *,
470 struct pf_state_key *);
471 static __inline int pf_state_compare_id(struct pf_state *,
472 struct pf_state *);
473
474 struct pf_src_tree tree_src_tracking;
475
476 struct pf_state_tree_id tree_id;
477 struct pf_state_queue state_list;
478
479 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare);
480 RB_GENERATE(pf_state_tree_lan_ext, pf_state_key,
481 entry_lan_ext, pf_state_compare_lan_ext);
482 RB_GENERATE(pf_state_tree_ext_gwy, pf_state_key,
483 entry_ext_gwy, pf_state_compare_ext_gwy);
484 RB_GENERATE(pf_state_tree_id, pf_state,
485 entry_id, pf_state_compare_id);
486
487 #define PF_DT_SKIP_LANEXT 0x01
488 #define PF_DT_SKIP_EXTGWY 0x02
489
490 static const u_int16_t PF_PPTP_PORT = 1723;
491 static const u_int32_t PF_PPTP_MAGIC_NUMBER = 0x1A2B3C4D;
492
493 struct pf_pptp_hdr {
494 u_int16_t length;
495 u_int16_t type;
496 u_int32_t magic;
497 };
498
499 struct pf_pptp_ctrl_hdr {
500 u_int16_t type;
501 u_int16_t reserved_0;
502 };
503
504 struct pf_pptp_ctrl_generic {
505 u_int16_t data[0];
506 };
507
508 #define PF_PPTP_CTRL_TYPE_START_REQ 1
509 struct pf_pptp_ctrl_start_req {
510 u_int16_t protocol_version;
511 u_int16_t reserved_1;
512 u_int32_t framing_capabilities;
513 u_int32_t bearer_capabilities;
514 u_int16_t maximum_channels;
515 u_int16_t firmware_revision;
516 u_int8_t host_name[64];
517 u_int8_t vendor_string[64];
518 };
519
520 #define PF_PPTP_CTRL_TYPE_START_RPY 2
521 struct pf_pptp_ctrl_start_rpy {
522 u_int16_t protocol_version;
523 u_int8_t result_code;
524 u_int8_t error_code;
525 u_int32_t framing_capabilities;
526 u_int32_t bearer_capabilities;
527 u_int16_t maximum_channels;
528 u_int16_t firmware_revision;
529 u_int8_t host_name[64];
530 u_int8_t vendor_string[64];
531 };
532
533 #define PF_PPTP_CTRL_TYPE_STOP_REQ 3
534 struct pf_pptp_ctrl_stop_req {
535 u_int8_t reason;
536 u_int8_t reserved_1;
537 u_int16_t reserved_2;
538 };
539
540 #define PF_PPTP_CTRL_TYPE_STOP_RPY 4
541 struct pf_pptp_ctrl_stop_rpy {
542 u_int8_t reason;
543 u_int8_t error_code;
544 u_int16_t reserved_1;
545 };
546
547 #define PF_PPTP_CTRL_TYPE_ECHO_REQ 5
548 struct pf_pptp_ctrl_echo_req {
549 u_int32_t identifier;
550 };
551
552 #define PF_PPTP_CTRL_TYPE_ECHO_RPY 6
553 struct pf_pptp_ctrl_echo_rpy {
554 u_int32_t identifier;
555 u_int8_t result_code;
556 u_int8_t error_code;
557 u_int16_t reserved_1;
558 };
559
560 #define PF_PPTP_CTRL_TYPE_CALL_OUT_REQ 7
561 struct pf_pptp_ctrl_call_out_req {
562 u_int16_t call_id;
563 u_int16_t call_sernum;
564 u_int32_t min_bps;
565 u_int32_t bearer_type;
566 u_int32_t framing_type;
567 u_int16_t rxwindow_size;
568 u_int16_t proc_delay;
569 u_int8_t phone_num[64];
570 u_int8_t sub_addr[64];
571 };
572
573 #define PF_PPTP_CTRL_TYPE_CALL_OUT_RPY 8
574 struct pf_pptp_ctrl_call_out_rpy {
575 u_int16_t call_id;
576 u_int16_t peer_call_id;
577 u_int8_t result_code;
578 u_int8_t error_code;
579 u_int16_t cause_code;
580 u_int32_t connect_speed;
581 u_int16_t rxwindow_size;
582 u_int16_t proc_delay;
583 u_int32_t phy_channel_id;
584 };
585
586 #define PF_PPTP_CTRL_TYPE_CALL_IN_1ST 9
587 struct pf_pptp_ctrl_call_in_1st {
588 u_int16_t call_id;
589 u_int16_t call_sernum;
590 u_int32_t bearer_type;
591 u_int32_t phy_channel_id;
592 u_int16_t dialed_number_len;
593 u_int16_t dialing_number_len;
594 u_int8_t dialed_num[64];
595 u_int8_t dialing_num[64];
596 u_int8_t sub_addr[64];
597 };
598
599 #define PF_PPTP_CTRL_TYPE_CALL_IN_2ND 10
600 struct pf_pptp_ctrl_call_in_2nd {
601 u_int16_t call_id;
602 u_int16_t peer_call_id;
603 u_int8_t result_code;
604 u_int8_t error_code;
605 u_int16_t rxwindow_size;
606 u_int16_t txdelay;
607 u_int16_t reserved_1;
608 };
609
610 #define PF_PPTP_CTRL_TYPE_CALL_IN_3RD 11
611 struct pf_pptp_ctrl_call_in_3rd {
612 u_int16_t call_id;
613 u_int16_t reserved_1;
614 u_int32_t connect_speed;
615 u_int16_t rxwindow_size;
616 u_int16_t txdelay;
617 u_int32_t framing_type;
618 };
619
620 #define PF_PPTP_CTRL_TYPE_CALL_CLR 12
621 struct pf_pptp_ctrl_call_clr {
622 u_int16_t call_id;
623 u_int16_t reserved_1;
624 };
625
626 #define PF_PPTP_CTRL_TYPE_CALL_DISC 13
627 struct pf_pptp_ctrl_call_disc {
628 u_int16_t call_id;
629 u_int8_t result_code;
630 u_int8_t error_code;
631 u_int16_t cause_code;
632 u_int16_t reserved_1;
633 u_int8_t statistics[128];
634 };
635
636 #define PF_PPTP_CTRL_TYPE_ERROR 14
637 struct pf_pptp_ctrl_error {
638 u_int16_t peer_call_id;
639 u_int16_t reserved_1;
640 u_int32_t crc_errors;
641 u_int32_t fr_errors;
642 u_int32_t hw_errors;
643 u_int32_t buf_errors;
644 u_int32_t tim_errors;
645 u_int32_t align_errors;
646 };
647
648 #define PF_PPTP_CTRL_TYPE_SET_LINKINFO 15
649 struct pf_pptp_ctrl_set_linkinfo {
650 u_int16_t peer_call_id;
651 u_int16_t reserved_1;
652 u_int32_t tx_accm;
653 u_int32_t rx_accm;
654 };
655
656 #if 0
657 static const char *pf_pptp_ctrl_type_name(u_int16_t code)
658 {
659 code = ntohs(code);
660
661 if (code < PF_PPTP_CTRL_TYPE_START_REQ ||
662 code > PF_PPTP_CTRL_TYPE_SET_LINKINFO) {
663 static char reserved[] = "reserved-00";
664
665 sprintf(&reserved[9], "%02x", code);
666 return (reserved);
667 } else {
668 static const char *name[] = {
669 "start_req", "start_rpy", "stop_req", "stop_rpy",
670 "echo_req", "echo_rpy", "call_out_req", "call_out_rpy",
671 "call_in_1st", "call_in_2nd", "call_in_3rd",
672 "call_clr", "call_disc", "error", "set_linkinfo"
673 };
674
675 return (name[code - 1]);
676 }
677 };
678 #endif
679
680 static const size_t PF_PPTP_CTRL_MSG_MINSIZE =
681 sizeof (struct pf_pptp_hdr) + sizeof (struct pf_pptp_ctrl_hdr);
682
683 union pf_pptp_ctrl_msg_union {
684 struct pf_pptp_ctrl_start_req start_req;
685 struct pf_pptp_ctrl_start_rpy start_rpy;
686 struct pf_pptp_ctrl_stop_req stop_req;
687 struct pf_pptp_ctrl_stop_rpy stop_rpy;
688 struct pf_pptp_ctrl_echo_req echo_req;
689 struct pf_pptp_ctrl_echo_rpy echo_rpy;
690 struct pf_pptp_ctrl_call_out_req call_out_req;
691 struct pf_pptp_ctrl_call_out_rpy call_out_rpy;
692 struct pf_pptp_ctrl_call_in_1st call_in_1st;
693 struct pf_pptp_ctrl_call_in_2nd call_in_2nd;
694 struct pf_pptp_ctrl_call_in_3rd call_in_3rd;
695 struct pf_pptp_ctrl_call_clr call_clr;
696 struct pf_pptp_ctrl_call_disc call_disc;
697 struct pf_pptp_ctrl_error error;
698 struct pf_pptp_ctrl_set_linkinfo set_linkinfo;
699 u_int8_t data[0];
700 };
701
702 struct pf_pptp_ctrl_msg {
703 struct pf_pptp_hdr hdr;
704 struct pf_pptp_ctrl_hdr ctrl;
705 union pf_pptp_ctrl_msg_union msg;
706 };
707
708 #define PF_GRE_FLAG_CHECKSUM_PRESENT 0x8000
709 #define PF_GRE_FLAG_VERSION_MASK 0x0007
710 #define PF_GRE_PPP_ETHERTYPE 0x880B
711
712 struct pf_grev1_hdr {
713 u_int16_t flags;
714 u_int16_t protocol_type;
715 u_int16_t payload_length;
716 u_int16_t call_id;
717 /*
718 u_int32_t seqno;
719 u_int32_t ackno;
720 */
721 };
722
723 static const u_int16_t PF_IKE_PORT = 500;
724
725 struct pf_ike_hdr {
726 u_int64_t initiator_cookie, responder_cookie;
727 u_int8_t next_payload, version, exchange_type, flags;
728 u_int32_t message_id, length;
729 };
730
731 #define PF_IKE_PACKET_MINSIZE (sizeof (struct pf_ike_hdr))
732
733 #define PF_IKEv1_EXCHTYPE_BASE 1
734 #define PF_IKEv1_EXCHTYPE_ID_PROTECT 2
735 #define PF_IKEv1_EXCHTYPE_AUTH_ONLY 3
736 #define PF_IKEv1_EXCHTYPE_AGGRESSIVE 4
737 #define PF_IKEv1_EXCHTYPE_INFORMATIONAL 5
738 #define PF_IKEv2_EXCHTYPE_SA_INIT 34
739 #define PF_IKEv2_EXCHTYPE_AUTH 35
740 #define PF_IKEv2_EXCHTYPE_CREATE_CHILD_SA 36
741 #define PF_IKEv2_EXCHTYPE_INFORMATIONAL 37
742
743 #define PF_IKEv1_FLAG_E 0x01
744 #define PF_IKEv1_FLAG_C 0x02
745 #define PF_IKEv1_FLAG_A 0x04
746 #define PF_IKEv2_FLAG_I 0x08
747 #define PF_IKEv2_FLAG_V 0x10
748 #define PF_IKEv2_FLAG_R 0x20
749
750 struct pf_esp_hdr {
751 u_int32_t spi;
752 u_int32_t seqno;
753 u_int8_t payload[];
754 };
755
756 static __inline int
757 pf_addr_compare(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
758 {
759 switch (af) {
760 #ifdef INET
761 case AF_INET:
762 if (a->addr32[0] > b->addr32[0])
763 return (1);
764 if (a->addr32[0] < b->addr32[0])
765 return (-1);
766 break;
767 #endif /* INET */
768 #ifdef INET6
769 case AF_INET6:
770 if (a->addr32[3] > b->addr32[3])
771 return (1);
772 if (a->addr32[3] < b->addr32[3])
773 return (-1);
774 if (a->addr32[2] > b->addr32[2])
775 return (1);
776 if (a->addr32[2] < b->addr32[2])
777 return (-1);
778 if (a->addr32[1] > b->addr32[1])
779 return (1);
780 if (a->addr32[1] < b->addr32[1])
781 return (-1);
782 if (a->addr32[0] > b->addr32[0])
783 return (1);
784 if (a->addr32[0] < b->addr32[0])
785 return (-1);
786 break;
787 #endif /* INET6 */
788 }
789 return (0);
790 }
791
792 static __inline int
793 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b)
794 {
795 int diff;
796
797 if (a->rule.ptr > b->rule.ptr)
798 return (1);
799 if (a->rule.ptr < b->rule.ptr)
800 return (-1);
801 if ((diff = a->af - b->af) != 0)
802 return (diff);
803 if ((diff = pf_addr_compare(&a->addr, &b->addr, a->af)) != 0)
804 return (diff);
805 return (0);
806 }
807
808 static __inline int
809 pf_state_compare_lan_ext(struct pf_state_key *a, struct pf_state_key *b)
810 {
811 int diff;
812 int extfilter;
813
814 if ((diff = a->proto - b->proto) != 0)
815 return (diff);
816 if ((diff = a->af_lan - b->af_lan) != 0)
817 return (diff);
818
819 extfilter = PF_EXTFILTER_APD;
820
821 switch (a->proto) {
822 case IPPROTO_ICMP:
823 case IPPROTO_ICMPV6:
824 if ((diff = a->lan.xport.port - b->lan.xport.port) != 0)
825 return (diff);
826 break;
827
828 case IPPROTO_TCP:
829 if ((diff = a->lan.xport.port - b->lan.xport.port) != 0)
830 return (diff);
831 if ((diff = a->ext_lan.xport.port - b->ext_lan.xport.port) != 0)
832 return (diff);
833 break;
834
835 case IPPROTO_UDP:
836 if ((diff = a->proto_variant - b->proto_variant))
837 return (diff);
838 extfilter = a->proto_variant;
839 if ((diff = a->lan.xport.port - b->lan.xport.port) != 0)
840 return (diff);
841 if ((extfilter < PF_EXTFILTER_AD) &&
842 (diff = a->ext_lan.xport.port - b->ext_lan.xport.port) != 0)
843 return (diff);
844 break;
845
846 case IPPROTO_GRE:
847 if (a->proto_variant == PF_GRE_PPTP_VARIANT &&
848 a->proto_variant == b->proto_variant) {
849 if (!!(diff = a->ext_lan.xport.call_id -
850 b->ext_lan.xport.call_id))
851 return (diff);
852 }
853 break;
854
855 case IPPROTO_ESP:
856 if (!!(diff = a->ext_lan.xport.spi - b->ext_lan.xport.spi))
857 return (diff);
858 break;
859
860 default:
861 break;
862 }
863
864 switch (a->af_lan) {
865 #if INET
866 case AF_INET:
867 if ((diff = pf_addr_compare(&a->lan.addr, &b->lan.addr,
868 a->af_lan)) != 0)
869 return (diff);
870
871 if (extfilter < PF_EXTFILTER_EI) {
872 if ((diff = pf_addr_compare(&a->ext_lan.addr,
873 &b->ext_lan.addr,
874 a->af_lan)) != 0)
875 return (diff);
876 }
877 break;
878 #endif /* INET */
879 #if INET6
880 case AF_INET6:
881 if ((diff = pf_addr_compare(&a->lan.addr, &b->lan.addr,
882 a->af_lan)) != 0)
883 return (diff);
884
885 if (extfilter < PF_EXTFILTER_EI ||
886 !PF_AZERO(&b->ext_lan.addr, AF_INET6)) {
887 if ((diff = pf_addr_compare(&a->ext_lan.addr,
888 &b->ext_lan.addr,
889 a->af_lan)) != 0)
890 return (diff);
891 }
892 break;
893 #endif /* INET6 */
894 }
895
896 if (a->app_state && b->app_state) {
897 if (a->app_state->compare_lan_ext &&
898 b->app_state->compare_lan_ext) {
899 diff = (const char *)b->app_state->compare_lan_ext -
900 (const char *)a->app_state->compare_lan_ext;
901 if (diff != 0)
902 return (diff);
903 diff = a->app_state->compare_lan_ext(a->app_state,
904 b->app_state);
905 if (diff != 0)
906 return (diff);
907 }
908 }
909
910 return (0);
911 }
912
913 static __inline int
914 pf_state_compare_ext_gwy(struct pf_state_key *a, struct pf_state_key *b)
915 {
916 int diff;
917 int extfilter;
918
919 if ((diff = a->proto - b->proto) != 0)
920 return (diff);
921
922 if ((diff = a->af_gwy - b->af_gwy) != 0)
923 return (diff);
924
925 extfilter = PF_EXTFILTER_APD;
926
927 switch (a->proto) {
928 case IPPROTO_ICMP:
929 case IPPROTO_ICMPV6:
930 if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0)
931 return (diff);
932 break;
933
934 case IPPROTO_TCP:
935 if ((diff = a->ext_gwy.xport.port - b->ext_gwy.xport.port) != 0)
936 return (diff);
937 if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0)
938 return (diff);
939 break;
940
941 case IPPROTO_UDP:
942 if ((diff = a->proto_variant - b->proto_variant))
943 return (diff);
944 extfilter = a->proto_variant;
945 if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0)
946 return (diff);
947 if ((extfilter < PF_EXTFILTER_AD) &&
948 (diff = a->ext_gwy.xport.port - b->ext_gwy.xport.port) != 0)
949 return (diff);
950 break;
951
952 case IPPROTO_GRE:
953 if (a->proto_variant == PF_GRE_PPTP_VARIANT &&
954 a->proto_variant == b->proto_variant) {
955 if (!!(diff = a->gwy.xport.call_id -
956 b->gwy.xport.call_id))
957 return (diff);
958 }
959 break;
960
961 case IPPROTO_ESP:
962 if (!!(diff = a->gwy.xport.spi - b->gwy.xport.spi))
963 return (diff);
964 break;
965
966 default:
967 break;
968 }
969
970 switch (a->af_gwy) {
971 #if INET
972 case AF_INET:
973 if ((diff = pf_addr_compare(&a->gwy.addr, &b->gwy.addr,
974 a->af_gwy)) != 0)
975 return (diff);
976
977 if (extfilter < PF_EXTFILTER_EI) {
978 if ((diff = pf_addr_compare(&a->ext_gwy.addr, &b->ext_gwy.addr,
979 a->af_gwy)) != 0)
980 return (diff);
981 }
982 break;
983 #endif /* INET */
984 #if INET6
985 case AF_INET6:
986 if ((diff = pf_addr_compare(&a->gwy.addr, &b->gwy.addr,
987 a->af_gwy)) != 0)
988 return (diff);
989
990 if (extfilter < PF_EXTFILTER_EI ||
991 !PF_AZERO(&b->ext_gwy.addr, AF_INET6)) {
992 if ((diff = pf_addr_compare(&a->ext_gwy.addr, &b->ext_gwy.addr,
993 a->af_gwy)) != 0)
994 return (diff);
995 }
996 break;
997 #endif /* INET6 */
998 }
999
1000 if (a->app_state && b->app_state) {
1001 if (a->app_state->compare_ext_gwy &&
1002 b->app_state->compare_ext_gwy) {
1003 diff = (const char *)b->app_state->compare_ext_gwy -
1004 (const char *)a->app_state->compare_ext_gwy;
1005 if (diff != 0)
1006 return (diff);
1007 diff = a->app_state->compare_ext_gwy(a->app_state,
1008 b->app_state);
1009 if (diff != 0)
1010 return (diff);
1011 }
1012 }
1013
1014 return (0);
1015 }
1016
1017 static __inline int
1018 pf_state_compare_id(struct pf_state *a, struct pf_state *b)
1019 {
1020 if (a->id > b->id)
1021 return (1);
1022 if (a->id < b->id)
1023 return (-1);
1024 if (a->creatorid > b->creatorid)
1025 return (1);
1026 if (a->creatorid < b->creatorid)
1027 return (-1);
1028
1029 return (0);
1030 }
1031
1032 #if INET6
1033 void
1034 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
1035 {
1036 switch (af) {
1037 #if INET
1038 case AF_INET:
1039 dst->addr32[0] = src->addr32[0];
1040 break;
1041 #endif /* INET */
1042 case AF_INET6:
1043 dst->addr32[0] = src->addr32[0];
1044 dst->addr32[1] = src->addr32[1];
1045 dst->addr32[2] = src->addr32[2];
1046 dst->addr32[3] = src->addr32[3];
1047 break;
1048 }
1049 }
1050 #endif /* INET6 */
1051
1052 struct pf_state *
1053 pf_find_state_byid(struct pf_state_cmp *key)
1054 {
1055 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1056
1057 return (RB_FIND(pf_state_tree_id, &tree_id,
1058 (struct pf_state *)(void *)key));
1059 }
1060
1061 static struct pf_state *
1062 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
1063 {
1064 struct pf_state_key *sk = NULL;
1065 struct pf_state *s;
1066
1067 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1068
1069 switch (dir) {
1070 case PF_OUT:
1071 sk = RB_FIND(pf_state_tree_lan_ext, &pf_statetbl_lan_ext,
1072 (struct pf_state_key *)key);
1073 break;
1074 case PF_IN:
1075 sk = RB_FIND(pf_state_tree_ext_gwy, &pf_statetbl_ext_gwy,
1076 (struct pf_state_key *)key);
1077 /*
1078 * NAT64 is done only on input, for packets coming in from
1079 * from the LAN side, need to lookup the lan_ext tree.
1080 */
1081 if (sk == NULL) {
1082 sk = RB_FIND(pf_state_tree_lan_ext,
1083 &pf_statetbl_lan_ext,
1084 (struct pf_state_key *)key);
1085 if (sk && sk->af_lan == sk->af_gwy)
1086 sk = NULL;
1087 }
1088 break;
1089 default:
1090 panic("pf_find_state");
1091 }
1092
1093 /* list is sorted, if-bound states before floating ones */
1094 if (sk != NULL)
1095 TAILQ_FOREACH(s, &sk->states, next)
1096 if (s->kif == pfi_all || s->kif == kif)
1097 return (s);
1098
1099 return (NULL);
1100 }
1101
1102 struct pf_state *
1103 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1104 {
1105 struct pf_state_key *sk = NULL;
1106 struct pf_state *s, *ret = NULL;
1107
1108 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1109
1110 switch (dir) {
1111 case PF_OUT:
1112 sk = RB_FIND(pf_state_tree_lan_ext,
1113 &pf_statetbl_lan_ext, (struct pf_state_key *)key);
1114 break;
1115 case PF_IN:
1116 sk = RB_FIND(pf_state_tree_ext_gwy,
1117 &pf_statetbl_ext_gwy, (struct pf_state_key *)key);
1118 /*
1119 * NAT64 is done only on input, for packets coming in from
1120 * from the LAN side, need to lookup the lan_ext tree.
1121 */
1122 if ((sk == NULL) && pf_nat64_configured) {
1123 sk = RB_FIND(pf_state_tree_lan_ext,
1124 &pf_statetbl_lan_ext,
1125 (struct pf_state_key *)key);
1126 if (sk && sk->af_lan == sk->af_gwy)
1127 sk = NULL;
1128 }
1129 break;
1130 default:
1131 panic("pf_find_state_all");
1132 }
1133
1134 if (sk != NULL) {
1135 ret = TAILQ_FIRST(&sk->states);
1136 if (more == NULL)
1137 return (ret);
1138
1139 TAILQ_FOREACH(s, &sk->states, next)
1140 (*more)++;
1141 }
1142
1143 return (ret);
1144 }
1145
1146 static void
1147 pf_init_threshold(struct pf_threshold *threshold,
1148 u_int32_t limit, u_int32_t seconds)
1149 {
1150 threshold->limit = limit * PF_THRESHOLD_MULT;
1151 threshold->seconds = seconds;
1152 threshold->count = 0;
1153 threshold->last = pf_time_second();
1154 }
1155
1156 static void
1157 pf_add_threshold(struct pf_threshold *threshold)
1158 {
1159 u_int32_t t = pf_time_second(), diff = t - threshold->last;
1160
1161 if (diff >= threshold->seconds)
1162 threshold->count = 0;
1163 else
1164 threshold->count -= threshold->count * diff /
1165 threshold->seconds;
1166 threshold->count += PF_THRESHOLD_MULT;
1167 threshold->last = t;
1168 }
1169
1170 static int
1171 pf_check_threshold(struct pf_threshold *threshold)
1172 {
1173 return (threshold->count > threshold->limit);
1174 }
1175
1176 static int
1177 pf_src_connlimit(struct pf_state **state)
1178 {
1179 int bad = 0;
1180 (*state)->src_node->conn++;
1181 VERIFY((*state)->src_node->conn != 0);
1182 (*state)->src.tcp_est = 1;
1183 pf_add_threshold(&(*state)->src_node->conn_rate);
1184
1185 if ((*state)->rule.ptr->max_src_conn &&
1186 (*state)->rule.ptr->max_src_conn <
1187 (*state)->src_node->conn) {
1188 pf_status.lcounters[LCNT_SRCCONN]++;
1189 bad++;
1190 }
1191
1192 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
1193 pf_check_threshold(&(*state)->src_node->conn_rate)) {
1194 pf_status.lcounters[LCNT_SRCCONNRATE]++;
1195 bad++;
1196 }
1197
1198 if (!bad)
1199 return (0);
1200
1201 if ((*state)->rule.ptr->overload_tbl) {
1202 struct pfr_addr p;
1203 u_int32_t killed = 0;
1204
1205 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
1206 if (pf_status.debug >= PF_DEBUG_MISC) {
1207 printf("pf_src_connlimit: blocking address ");
1208 pf_print_host(&(*state)->src_node->addr, 0,
1209 (*state)->state_key->af_lan);
1210 }
1211
1212 bzero(&p, sizeof (p));
1213 p.pfra_af = (*state)->state_key->af_lan;
1214 switch ((*state)->state_key->af_lan) {
1215 #if INET
1216 case AF_INET:
1217 p.pfra_net = 32;
1218 p.pfra_ip4addr = (*state)->src_node->addr.v4addr;
1219 break;
1220 #endif /* INET */
1221 #if INET6
1222 case AF_INET6:
1223 p.pfra_net = 128;
1224 p.pfra_ip6addr = (*state)->src_node->addr.v6addr;
1225 break;
1226 #endif /* INET6 */
1227 }
1228
1229 pfr_insert_kentry((*state)->rule.ptr->overload_tbl,
1230 &p, pf_calendar_time_second());
1231
1232 /* kill existing states if that's required. */
1233 if ((*state)->rule.ptr->flush) {
1234 struct pf_state_key *sk;
1235 struct pf_state *st;
1236
1237 pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
1238 RB_FOREACH(st, pf_state_tree_id, &tree_id) {
1239 sk = st->state_key;
1240 /*
1241 * Kill states from this source. (Only those
1242 * from the same rule if PF_FLUSH_GLOBAL is not
1243 * set)
1244 */
1245 if (sk->af_lan ==
1246 (*state)->state_key->af_lan &&
1247 (((*state)->state_key->direction ==
1248 PF_OUT &&
1249 PF_AEQ(&(*state)->src_node->addr,
1250 &sk->lan.addr, sk->af_lan)) ||
1251 ((*state)->state_key->direction == PF_IN &&
1252 PF_AEQ(&(*state)->src_node->addr,
1253 &sk->ext_lan.addr, sk->af_lan))) &&
1254 ((*state)->rule.ptr->flush &
1255 PF_FLUSH_GLOBAL ||
1256 (*state)->rule.ptr == st->rule.ptr)) {
1257 st->timeout = PFTM_PURGE;
1258 st->src.state = st->dst.state =
1259 TCPS_CLOSED;
1260 killed++;
1261 }
1262 }
1263 if (pf_status.debug >= PF_DEBUG_MISC)
1264 printf(", %u states killed", killed);
1265 }
1266 if (pf_status.debug >= PF_DEBUG_MISC)
1267 printf("\n");
1268 }
1269
1270 /* kill this state */
1271 (*state)->timeout = PFTM_PURGE;
1272 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
1273 return (1);
1274 }
1275
1276 int
1277 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
1278 struct pf_addr *src, sa_family_t af)
1279 {
1280 struct pf_src_node k;
1281
1282 if (*sn == NULL) {
1283 k.af = af;
1284 PF_ACPY(&k.addr, src, af);
1285 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
1286 rule->rpool.opts & PF_POOL_STICKYADDR)
1287 k.rule.ptr = rule;
1288 else
1289 k.rule.ptr = NULL;
1290 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
1291 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
1292 }
1293 if (*sn == NULL) {
1294 if (!rule->max_src_nodes ||
1295 rule->src_nodes < rule->max_src_nodes)
1296 (*sn) = pool_get(&pf_src_tree_pl, PR_WAITOK);
1297 else
1298 pf_status.lcounters[LCNT_SRCNODES]++;
1299 if ((*sn) == NULL)
1300 return (-1);
1301 bzero(*sn, sizeof (struct pf_src_node));
1302
1303 pf_init_threshold(&(*sn)->conn_rate,
1304 rule->max_src_conn_rate.limit,
1305 rule->max_src_conn_rate.seconds);
1306
1307 (*sn)->af = af;
1308 if (rule->rule_flag & PFRULE_RULESRCTRACK ||
1309 rule->rpool.opts & PF_POOL_STICKYADDR)
1310 (*sn)->rule.ptr = rule;
1311 else
1312 (*sn)->rule.ptr = NULL;
1313 PF_ACPY(&(*sn)->addr, src, af);
1314 if (RB_INSERT(pf_src_tree,
1315 &tree_src_tracking, *sn) != NULL) {
1316 if (pf_status.debug >= PF_DEBUG_MISC) {
1317 printf("pf: src_tree insert failed: ");
1318 pf_print_host(&(*sn)->addr, 0, af);
1319 printf("\n");
1320 }
1321 pool_put(&pf_src_tree_pl, *sn);
1322 return (-1);
1323 }
1324 (*sn)->creation = pf_time_second();
1325 (*sn)->ruletype = rule->action;
1326 if ((*sn)->rule.ptr != NULL)
1327 (*sn)->rule.ptr->src_nodes++;
1328 pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
1329 pf_status.src_nodes++;
1330 } else {
1331 if (rule->max_src_states &&
1332 (*sn)->states >= rule->max_src_states) {
1333 pf_status.lcounters[LCNT_SRCSTATES]++;
1334 return (-1);
1335 }
1336 }
1337 return (0);
1338 }
1339
1340 static void
1341 pf_stateins_err(const char *tree, struct pf_state *s, struct pfi_kif *kif)
1342 {
1343 struct pf_state_key *sk = s->state_key;
1344
1345 if (pf_status.debug >= PF_DEBUG_MISC) {
1346 printf("pf: state insert failed: %s %s ", tree, kif->pfik_name);
1347 switch (sk->proto) {
1348 case IPPROTO_TCP:
1349 printf("TCP");
1350 break;
1351 case IPPROTO_UDP:
1352 printf("UDP");
1353 break;
1354 case IPPROTO_ICMP:
1355 printf("ICMP4");
1356 break;
1357 case IPPROTO_ICMPV6:
1358 printf("ICMP6");
1359 break;
1360 default:
1361 printf("PROTO=%u", sk->proto);
1362 break;
1363 }
1364 printf(" lan: ");
1365 pf_print_sk_host(&sk->lan, sk->af_lan, sk->proto,
1366 sk->proto_variant);
1367 printf(" gwy: ");
1368 pf_print_sk_host(&sk->gwy, sk->af_gwy, sk->proto,
1369 sk->proto_variant);
1370 printf(" ext_lan: ");
1371 pf_print_sk_host(&sk->ext_lan, sk->af_lan, sk->proto,
1372 sk->proto_variant);
1373 printf(" ext_gwy: ");
1374 pf_print_sk_host(&sk->ext_gwy, sk->af_gwy, sk->proto,
1375 sk->proto_variant);
1376 if (s->sync_flags & PFSTATE_FROMSYNC)
1377 printf(" (from sync)");
1378 printf("\n");
1379 }
1380 }
1381
1382 int
1383 pf_insert_state(struct pfi_kif *kif, struct pf_state *s)
1384 {
1385 struct pf_state_key *cur;
1386 struct pf_state *sp;
1387
1388 VERIFY(s->state_key != NULL);
1389 s->kif = kif;
1390
1391 if ((cur = RB_INSERT(pf_state_tree_lan_ext, &pf_statetbl_lan_ext,
1392 s->state_key)) != NULL) {
1393 /* key exists. check for same kif, if none, add to key */
1394 TAILQ_FOREACH(sp, &cur->states, next)
1395 if (sp->kif == kif) { /* collision! */
1396 pf_stateins_err("tree_lan_ext", s, kif);
1397 pf_detach_state(s,
1398 PF_DT_SKIP_LANEXT|PF_DT_SKIP_EXTGWY);
1399 return (-1);
1400 }
1401 pf_detach_state(s, PF_DT_SKIP_LANEXT|PF_DT_SKIP_EXTGWY);
1402 pf_attach_state(cur, s, kif == pfi_all ? 1 : 0);
1403 }
1404
1405 /* if cur != NULL, we already found a state key and attached to it */
1406 if (cur == NULL && (cur = RB_INSERT(pf_state_tree_ext_gwy,
1407 &pf_statetbl_ext_gwy, s->state_key)) != NULL) {
1408 /* must not happen. we must have found the sk above! */
1409 pf_stateins_err("tree_ext_gwy", s, kif);
1410 pf_detach_state(s, PF_DT_SKIP_EXTGWY);
1411 return (-1);
1412 }
1413
1414 if (s->id == 0 && s->creatorid == 0) {
1415 s->id = htobe64(pf_status.stateid++);
1416 s->creatorid = pf_status.hostid;
1417 }
1418 if (RB_INSERT(pf_state_tree_id, &tree_id, s) != NULL) {
1419 if (pf_status.debug >= PF_DEBUG_MISC) {
1420 printf("pf: state insert failed: "
1421 "id: %016llx creatorid: %08x",
1422 be64toh(s->id), ntohl(s->creatorid));
1423 if (s->sync_flags & PFSTATE_FROMSYNC)
1424 printf(" (from sync)");
1425 printf("\n");
1426 }
1427 pf_detach_state(s, 0);
1428 return (-1);
1429 }
1430 TAILQ_INSERT_TAIL(&state_list, s, entry_list);
1431 pf_status.fcounters[FCNT_STATE_INSERT]++;
1432 pf_status.states++;
1433 VERIFY(pf_status.states != 0);
1434 pfi_kif_ref(kif, PFI_KIF_REF_STATE);
1435 #if NPFSYNC
1436 pfsync_insert_state(s);
1437 #endif
1438 return (0);
1439 }
1440
1441 static int
1442 pf_purge_thread_cont(int err)
1443 {
1444 #pragma unused(err)
1445 static u_int32_t nloops = 0;
1446 int t = 1; /* 1 second */
1447
1448 /*
1449 * Update coarse-grained networking timestamp (in sec.); the idea
1450 * is to piggy-back on the periodic timeout callout to update
1451 * the counter returnable via net_uptime().
1452 */
1453 net_update_uptime();
1454
1455 lck_rw_lock_shared(pf_perim_lock);
1456 lck_mtx_lock(pf_lock);
1457
1458 /* purge everything if not running */
1459 if (!pf_status.running) {
1460 pf_purge_expired_states(pf_status.states);
1461 pf_purge_expired_fragments();
1462 pf_purge_expired_src_nodes();
1463
1464 /* terminate thread (we don't currently do this) */
1465 if (pf_purge_thread == NULL) {
1466 lck_mtx_unlock(pf_lock);
1467 lck_rw_done(pf_perim_lock);
1468
1469 thread_deallocate(current_thread());
1470 thread_terminate(current_thread());
1471 /* NOTREACHED */
1472 return (0);
1473 } else {
1474 /* if there's nothing left, sleep w/o timeout */
1475 if (pf_status.states == 0 &&
1476 pf_normalize_isempty() &&
1477 RB_EMPTY(&tree_src_tracking)) {
1478 nloops = 0;
1479 t = 0;
1480 }
1481 goto done;
1482 }
1483 }
1484
1485 /* process a fraction of the state table every second */
1486 pf_purge_expired_states(1 + (pf_status.states
1487 / pf_default_rule.timeout[PFTM_INTERVAL]));
1488
1489 /* purge other expired types every PFTM_INTERVAL seconds */
1490 if (++nloops >= pf_default_rule.timeout[PFTM_INTERVAL]) {
1491 pf_purge_expired_fragments();
1492 pf_purge_expired_src_nodes();
1493 nloops = 0;
1494 }
1495 done:
1496 lck_mtx_unlock(pf_lock);
1497 lck_rw_done(pf_perim_lock);
1498
1499 (void) tsleep0(pf_purge_thread_fn, PWAIT, "pf_purge_cont",
1500 t * hz, pf_purge_thread_cont);
1501 /* NOTREACHED */
1502 VERIFY(0);
1503
1504 return (0);
1505 }
1506
1507 void
1508 pf_purge_thread_fn(void *v, wait_result_t w)
1509 {
1510 #pragma unused(v, w)
1511 (void) tsleep0(pf_purge_thread_fn, PWAIT, "pf_purge", 0,
1512 pf_purge_thread_cont);
1513 /*
1514 * tsleep0() shouldn't have returned as PCATCH was not set;
1515 * therefore assert in this case.
1516 */
1517 VERIFY(0);
1518 }
1519
1520 u_int64_t
1521 pf_state_expires(const struct pf_state *state)
1522 {
1523 u_int32_t t;
1524 u_int32_t start;
1525 u_int32_t end;
1526 u_int32_t states;
1527
1528 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1529
1530 /* handle all PFTM_* > PFTM_MAX here */
1531 if (state->timeout == PFTM_PURGE)
1532 return (pf_time_second());
1533
1534 VERIFY(state->timeout != PFTM_UNLINKED);
1535 VERIFY(state->timeout < PFTM_MAX);
1536 t = state->rule.ptr->timeout[state->timeout];
1537 if (!t)
1538 t = pf_default_rule.timeout[state->timeout];
1539 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1540 if (start) {
1541 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1542 states = state->rule.ptr->states;
1543 } else {
1544 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1545 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1546 states = pf_status.states;
1547 }
1548 if (end && states > start && start < end) {
1549 if (states < end)
1550 return (state->expire + t * (end - states) /
1551 (end - start));
1552 else
1553 return (pf_time_second());
1554 }
1555 return (state->expire + t);
1556 }
1557
1558 void
1559 pf_purge_expired_src_nodes(void)
1560 {
1561 struct pf_src_node *cur, *next;
1562
1563 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1564
1565 for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) {
1566 next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur);
1567
1568 if (cur->states <= 0 && cur->expire <= pf_time_second()) {
1569 if (cur->rule.ptr != NULL) {
1570 cur->rule.ptr->src_nodes--;
1571 if (cur->rule.ptr->states <= 0 &&
1572 cur->rule.ptr->max_src_nodes <= 0)
1573 pf_rm_rule(NULL, cur->rule.ptr);
1574 }
1575 RB_REMOVE(pf_src_tree, &tree_src_tracking, cur);
1576 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
1577 pf_status.src_nodes--;
1578 pool_put(&pf_src_tree_pl, cur);
1579 }
1580 }
1581 }
1582
1583 void
1584 pf_src_tree_remove_state(struct pf_state *s)
1585 {
1586 u_int32_t t;
1587
1588 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1589
1590 if (s->src_node != NULL) {
1591 if (s->src.tcp_est) {
1592 VERIFY(s->src_node->conn > 0);
1593 --s->src_node->conn;
1594 }
1595 VERIFY(s->src_node->states > 0);
1596 if (--s->src_node->states <= 0) {
1597 t = s->rule.ptr->timeout[PFTM_SRC_NODE];
1598 if (!t)
1599 t = pf_default_rule.timeout[PFTM_SRC_NODE];
1600 s->src_node->expire = pf_time_second() + t;
1601 }
1602 }
1603 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1604 VERIFY(s->nat_src_node->states > 0);
1605 if (--s->nat_src_node->states <= 0) {
1606 t = s->rule.ptr->timeout[PFTM_SRC_NODE];
1607 if (!t)
1608 t = pf_default_rule.timeout[PFTM_SRC_NODE];
1609 s->nat_src_node->expire = pf_time_second() + t;
1610 }
1611 }
1612 s->src_node = s->nat_src_node = NULL;
1613 }
1614
1615 void
1616 pf_unlink_state(struct pf_state *cur)
1617 {
1618 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1619
1620 if (cur->src.state == PF_TCPS_PROXY_DST) {
1621 pf_send_tcp(cur->rule.ptr, cur->state_key->af_lan,
1622 &cur->state_key->ext_lan.addr, &cur->state_key->lan.addr,
1623 cur->state_key->ext_lan.xport.port,
1624 cur->state_key->lan.xport.port,
1625 cur->src.seqhi, cur->src.seqlo + 1,
1626 TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL);
1627 }
1628
1629 hook_runloop(&cur->unlink_hooks, HOOK_REMOVE|HOOK_FREE);
1630 RB_REMOVE(pf_state_tree_id, &tree_id, cur);
1631 #if NPFSYNC
1632 if (cur->creatorid == pf_status.hostid)
1633 pfsync_delete_state(cur);
1634 #endif
1635 cur->timeout = PFTM_UNLINKED;
1636 pf_src_tree_remove_state(cur);
1637 pf_detach_state(cur, 0);
1638 }
1639
1640 /* callers should be at splpf and hold the
1641 * write_lock on pf_consistency_lock */
1642 void
1643 pf_free_state(struct pf_state *cur)
1644 {
1645 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1646 #if NPFSYNC
1647 if (pfsyncif != NULL &&
1648 (pfsyncif->sc_bulk_send_next == cur ||
1649 pfsyncif->sc_bulk_terminator == cur))
1650 return;
1651 #endif
1652 VERIFY(cur->timeout == PFTM_UNLINKED);
1653 VERIFY(cur->rule.ptr->states > 0);
1654 if (--cur->rule.ptr->states <= 0 &&
1655 cur->rule.ptr->src_nodes <= 0)
1656 pf_rm_rule(NULL, cur->rule.ptr);
1657 if (cur->nat_rule.ptr != NULL) {
1658 VERIFY(cur->nat_rule.ptr->states > 0);
1659 if (--cur->nat_rule.ptr->states <= 0 &&
1660 cur->nat_rule.ptr->src_nodes <= 0)
1661 pf_rm_rule(NULL, cur->nat_rule.ptr);
1662 }
1663 if (cur->anchor.ptr != NULL) {
1664 VERIFY(cur->anchor.ptr->states > 0);
1665 if (--cur->anchor.ptr->states <= 0)
1666 pf_rm_rule(NULL, cur->anchor.ptr);
1667 }
1668 pf_normalize_tcp_cleanup(cur);
1669 pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE);
1670 TAILQ_REMOVE(&state_list, cur, entry_list);
1671 if (cur->tag)
1672 pf_tag_unref(cur->tag);
1673 pool_put(&pf_state_pl, cur);
1674 pf_status.fcounters[FCNT_STATE_REMOVALS]++;
1675 VERIFY(pf_status.states > 0);
1676 pf_status.states--;
1677 }
1678
1679 void
1680 pf_purge_expired_states(u_int32_t maxcheck)
1681 {
1682 static struct pf_state *cur = NULL;
1683 struct pf_state *next;
1684
1685 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1686
1687 while (maxcheck--) {
1688 /* wrap to start of list when we hit the end */
1689 if (cur == NULL) {
1690 cur = TAILQ_FIRST(&state_list);
1691 if (cur == NULL)
1692 break; /* list empty */
1693 }
1694
1695 /* get next state, as cur may get deleted */
1696 next = TAILQ_NEXT(cur, entry_list);
1697
1698 if (cur->timeout == PFTM_UNLINKED) {
1699 pf_free_state(cur);
1700 } else if (pf_state_expires(cur) <= pf_time_second()) {
1701 /* unlink and free expired state */
1702 pf_unlink_state(cur);
1703 pf_free_state(cur);
1704 }
1705 cur = next;
1706 }
1707 }
1708
1709 int
1710 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw)
1711 {
1712 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1713
1714 if (aw->type != PF_ADDR_TABLE)
1715 return (0);
1716 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL)
1717 return (1);
1718 return (0);
1719 }
1720
1721 void
1722 pf_tbladdr_remove(struct pf_addr_wrap *aw)
1723 {
1724 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1725
1726 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL)
1727 return;
1728 pfr_detach_table(aw->p.tbl);
1729 aw->p.tbl = NULL;
1730 }
1731
1732 void
1733 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
1734 {
1735 struct pfr_ktable *kt = aw->p.tbl;
1736
1737 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
1738
1739 if (aw->type != PF_ADDR_TABLE || kt == NULL)
1740 return;
1741 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
1742 kt = kt->pfrkt_root;
1743 aw->p.tbl = NULL;
1744 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
1745 kt->pfrkt_cnt : -1;
1746 }
1747
1748 static void
1749 pf_print_addr(struct pf_addr *addr, sa_family_t af)
1750 {
1751 switch (af) {
1752 #if INET
1753 case AF_INET: {
1754 u_int32_t a = ntohl(addr->addr32[0]);
1755 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1756 (a>>8)&255, a&255);
1757 break;
1758 }
1759 #endif /* INET */
1760 #if INET6
1761 case AF_INET6: {
1762 u_int16_t b;
1763 u_int8_t i, curstart = 255, curend = 0,
1764 maxstart = 0, maxend = 0;
1765 for (i = 0; i < 8; i++) {
1766 if (!addr->addr16[i]) {
1767 if (curstart == 255)
1768 curstart = i;
1769 else
1770 curend = i;
1771 } else {
1772 if (curstart) {
1773 if ((curend - curstart) >
1774 (maxend - maxstart)) {
1775 maxstart = curstart;
1776 maxend = curend;
1777 curstart = 255;
1778 }
1779 }
1780 }
1781 }
1782 for (i = 0; i < 8; i++) {
1783 if (i >= maxstart && i <= maxend) {
1784 if (maxend != 7) {
1785 if (i == maxstart)
1786 printf(":");
1787 } else {
1788 if (i == maxend)
1789 printf(":");
1790 }
1791 } else {
1792 b = ntohs(addr->addr16[i]);
1793 printf("%x", b);
1794 if (i < 7)
1795 printf(":");
1796 }
1797 }
1798 break;
1799 }
1800 #endif /* INET6 */
1801 }
1802 }
1803
1804 static void
1805 pf_print_sk_host(struct pf_state_host *sh, sa_family_t af, int proto,
1806 u_int8_t proto_variant)
1807 {
1808 pf_print_addr(&sh->addr, af);
1809
1810 switch (proto) {
1811 case IPPROTO_ESP:
1812 if (sh->xport.spi)
1813 printf("[%08x]", ntohl(sh->xport.spi));
1814 break;
1815
1816 case IPPROTO_GRE:
1817 if (proto_variant == PF_GRE_PPTP_VARIANT)
1818 printf("[%u]", ntohs(sh->xport.call_id));
1819 break;
1820
1821 case IPPROTO_TCP:
1822 case IPPROTO_UDP:
1823 printf("[%u]", ntohs(sh->xport.port));
1824 break;
1825
1826 default:
1827 break;
1828 }
1829 }
1830
1831 static void
1832 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1833 {
1834 pf_print_addr(addr, af);
1835 if (p)
1836 printf("[%u]", ntohs(p));
1837 }
1838
1839 void
1840 pf_print_state(struct pf_state *s)
1841 {
1842 struct pf_state_key *sk = s->state_key;
1843 switch (sk->proto) {
1844 case IPPROTO_ESP:
1845 printf("ESP ");
1846 break;
1847 case IPPROTO_GRE:
1848 printf("GRE%u ", sk->proto_variant);
1849 break;
1850 case IPPROTO_TCP:
1851 printf("TCP ");
1852 break;
1853 case IPPROTO_UDP:
1854 printf("UDP ");
1855 break;
1856 case IPPROTO_ICMP:
1857 printf("ICMP ");
1858 break;
1859 case IPPROTO_ICMPV6:
1860 printf("ICMPV6 ");
1861 break;
1862 default:
1863 printf("%u ", sk->proto);
1864 break;
1865 }
1866 pf_print_sk_host(&sk->lan, sk->af_lan, sk->proto, sk->proto_variant);
1867 printf(" ");
1868 pf_print_sk_host(&sk->gwy, sk->af_gwy, sk->proto, sk->proto_variant);
1869 printf(" ");
1870 pf_print_sk_host(&sk->ext_lan, sk->af_lan, sk->proto,
1871 sk->proto_variant);
1872 printf(" ");
1873 pf_print_sk_host(&sk->ext_gwy, sk->af_gwy, sk->proto,
1874 sk->proto_variant);
1875 printf(" [lo=%u high=%u win=%u modulator=%u", s->src.seqlo,
1876 s->src.seqhi, s->src.max_win, s->src.seqdiff);
1877 if (s->src.wscale && s->dst.wscale)
1878 printf(" wscale=%u", s->src.wscale & PF_WSCALE_MASK);
1879 printf("]");
1880 printf(" [lo=%u high=%u win=%u modulator=%u", s->dst.seqlo,
1881 s->dst.seqhi, s->dst.max_win, s->dst.seqdiff);
1882 if (s->src.wscale && s->dst.wscale)
1883 printf(" wscale=%u", s->dst.wscale & PF_WSCALE_MASK);
1884 printf("]");
1885 printf(" %u:%u", s->src.state, s->dst.state);
1886 }
1887
1888 void
1889 pf_print_flags(u_int8_t f)
1890 {
1891 if (f)
1892 printf(" ");
1893 if (f & TH_FIN)
1894 printf("F");
1895 if (f & TH_SYN)
1896 printf("S");
1897 if (f & TH_RST)
1898 printf("R");
1899 if (f & TH_PUSH)
1900 printf("P");
1901 if (f & TH_ACK)
1902 printf("A");
1903 if (f & TH_URG)
1904 printf("U");
1905 if (f & TH_ECE)
1906 printf("E");
1907 if (f & TH_CWR)
1908 printf("W");
1909 }
1910
1911 #define PF_SET_SKIP_STEPS(i) \
1912 do { \
1913 while (head[i] != cur) { \
1914 head[i]->skip[i].ptr = cur; \
1915 head[i] = TAILQ_NEXT(head[i], entries); \
1916 } \
1917 } while (0)
1918
1919 void
1920 pf_calc_skip_steps(struct pf_rulequeue *rules)
1921 {
1922 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1923 int i;
1924
1925 cur = TAILQ_FIRST(rules);
1926 prev = cur;
1927 for (i = 0; i < PF_SKIP_COUNT; ++i)
1928 head[i] = cur;
1929 while (cur != NULL) {
1930
1931 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1932 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1933 if (cur->direction != prev->direction)
1934 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1935 if (cur->af != prev->af)
1936 PF_SET_SKIP_STEPS(PF_SKIP_AF);
1937 if (cur->proto != prev->proto)
1938 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1939 if (cur->src.neg != prev->src.neg ||
1940 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1941 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1942 {
1943 union pf_rule_xport *cx = &cur->src.xport;
1944 union pf_rule_xport *px = &prev->src.xport;
1945
1946 switch (cur->proto) {
1947 case IPPROTO_GRE:
1948 case IPPROTO_ESP:
1949 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1950 break;
1951 default:
1952 if (prev->proto == IPPROTO_GRE ||
1953 prev->proto == IPPROTO_ESP ||
1954 cx->range.op != px->range.op ||
1955 cx->range.port[0] != px->range.port[0] ||
1956 cx->range.port[1] != px->range.port[1])
1957 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1958 break;
1959 }
1960 }
1961 if (cur->dst.neg != prev->dst.neg ||
1962 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1963 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1964 {
1965 union pf_rule_xport *cx = &cur->dst.xport;
1966 union pf_rule_xport *px = &prev->dst.xport;
1967
1968 switch (cur->proto) {
1969 case IPPROTO_GRE:
1970 if (cur->proto != prev->proto ||
1971 cx->call_id != px->call_id)
1972 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1973 break;
1974 case IPPROTO_ESP:
1975 if (cur->proto != prev->proto ||
1976 cx->spi != px->spi)
1977 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1978 break;
1979 default:
1980 if (prev->proto == IPPROTO_GRE ||
1981 prev->proto == IPPROTO_ESP ||
1982 cx->range.op != px->range.op ||
1983 cx->range.port[0] != px->range.port[0] ||
1984 cx->range.port[1] != px->range.port[1])
1985 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1986 break;
1987 }
1988 }
1989
1990 prev = cur;
1991 cur = TAILQ_NEXT(cur, entries);
1992 }
1993 for (i = 0; i < PF_SKIP_COUNT; ++i)
1994 PF_SET_SKIP_STEPS(i);
1995 }
1996
1997 u_int32_t
1998 pf_calc_state_key_flowhash(struct pf_state_key *sk)
1999 {
2000 struct pf_flowhash_key fh __attribute__((aligned(8)));
2001 uint32_t flowhash = 0;
2002
2003 bzero(&fh, sizeof (fh));
2004 if (PF_ALEQ(&sk->lan.addr, &sk->ext_lan.addr, sk->af_lan)) {
2005 bcopy(&sk->lan.addr, &fh.ap1.addr, sizeof (fh.ap1.addr));
2006 bcopy(&sk->ext_lan.addr, &fh.ap2.addr, sizeof (fh.ap2.addr));
2007 } else {
2008 bcopy(&sk->ext_lan.addr, &fh.ap1.addr, sizeof (fh.ap1.addr));
2009 bcopy(&sk->lan.addr, &fh.ap2.addr, sizeof (fh.ap2.addr));
2010 }
2011 if (sk->lan.xport.spi <= sk->ext_lan.xport.spi) {
2012 fh.ap1.xport.spi = sk->lan.xport.spi;
2013 fh.ap2.xport.spi = sk->ext_lan.xport.spi;
2014 } else {
2015 fh.ap1.xport.spi = sk->ext_lan.xport.spi;
2016 fh.ap2.xport.spi = sk->lan.xport.spi;
2017 }
2018 fh.af = sk->af_lan;
2019 fh.proto = sk->proto;
2020
2021 try_again:
2022 flowhash = net_flowhash(&fh, sizeof (fh), pf_hash_seed);
2023 if (flowhash == 0) {
2024 /* try to get a non-zero flowhash */
2025 pf_hash_seed = RandomULong();
2026 goto try_again;
2027 }
2028
2029 return (flowhash);
2030 }
2031
2032 static int
2033 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
2034 {
2035 if (aw1->type != aw2->type)
2036 return (1);
2037 switch (aw1->type) {
2038 case PF_ADDR_ADDRMASK:
2039 case PF_ADDR_RANGE:
2040 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
2041 return (1);
2042 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
2043 return (1);
2044 return (0);
2045 case PF_ADDR_DYNIFTL:
2046 return (aw1->p.dyn == NULL || aw2->p.dyn == NULL ||
2047 aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
2048 case PF_ADDR_NOROUTE:
2049 case PF_ADDR_URPFFAILED:
2050 return (0);
2051 case PF_ADDR_TABLE:
2052 return (aw1->p.tbl != aw2->p.tbl);
2053 case PF_ADDR_RTLABEL:
2054 return (aw1->v.rtlabel != aw2->v.rtlabel);
2055 default:
2056 printf("invalid address type: %d\n", aw1->type);
2057 return (1);
2058 }
2059 }
2060
2061 u_int16_t
2062 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2063 {
2064 u_int32_t l;
2065
2066 if (udp && !cksum)
2067 return (0);
2068 l = cksum + old - new;
2069 l = (l >> 16) + (l & 0xffff);
2070 l = l & 0xffff;
2071 if (udp && !l)
2072 return (0xffff);
2073 return (l);
2074 }
2075
2076 /*
2077 * change ip address & port
2078 * dir : packet direction
2079 * a : address to be changed
2080 * p : port to be changed
2081 * ic : ip header checksum
2082 * pc : protocol checksum
2083 * an : new ip address
2084 * pn : new port
2085 * u : should be 1 if UDP packet else 0
2086 * af : address family of the packet
2087 * afn : address family of the new address
2088 * ua : should be 1 if ip address needs to be updated in the packet else
2089 * only the checksum is recalculated & updated.
2090 */
2091 static void
2092 pf_change_ap(int dir, pbuf_t *pbuf, struct pf_addr *a, u_int16_t *p,
2093 u_int16_t *ic, u_int16_t *pc, struct pf_addr *an, u_int16_t pn,
2094 u_int8_t u, sa_family_t af, sa_family_t afn, int ua)
2095 {
2096 struct pf_addr ao;
2097 u_int16_t po = *p;
2098
2099 PF_ACPY(&ao, a, af);
2100 if (ua)
2101 PF_ACPY(a, an, afn);
2102
2103 *p = pn;
2104
2105 switch (af) {
2106 #if INET
2107 case AF_INET:
2108 switch (afn) {
2109 case AF_INET:
2110 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2111 ao.addr16[0], an->addr16[0], 0),
2112 ao.addr16[1], an->addr16[1], 0);
2113 *p = pn;
2114 /*
2115 * If the packet is originated from an ALG on the NAT gateway
2116 * (source address is loopback or local), in which case the
2117 * TCP/UDP checksum field contains the pseudo header checksum
2118 * that's not yet complemented. A packet generated locally
2119 * will have UDP/TCP CSUM flag set (gets set in protocol
2120 * output).
2121 */
2122 if (dir == PF_OUT && pbuf != NULL &&
2123 (*pbuf->pb_csum_flags & (CSUM_TCP | CSUM_UDP))) {
2124 /* Pseudo-header checksum does not include ports */
2125 *pc = ~pf_cksum_fixup(pf_cksum_fixup(~*pc,
2126 ao.addr16[0], an->addr16[0], u),
2127 ao.addr16[1], an->addr16[1], u);
2128 } else {
2129 *pc =
2130 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2131 *pc, ao.addr16[0], an->addr16[0], u),
2132 ao.addr16[1], an->addr16[1], u),
2133 po, pn, u);
2134 }
2135 break;
2136 #ifdef INET6
2137 case AF_INET6:
2138 *p = pn;
2139 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2140 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2141
2142 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2143 ao.addr16[0], an->addr16[0], u),
2144 ao.addr16[1], an->addr16[1], u),
2145 0, an->addr16[2], u),
2146 0, an->addr16[3], u),
2147 0, an->addr16[4], u),
2148 0, an->addr16[5], u),
2149 0, an->addr16[6], u),
2150 0, an->addr16[7], u),
2151 po, pn, u);
2152 break;
2153 #endif /* INET6 */
2154 }
2155 break;
2156 #endif /* INET */
2157 #if INET6
2158 case AF_INET6:
2159 switch (afn) {
2160 case AF_INET6:
2161 /*
2162 * If the packet is originated from an ALG on the NAT gateway
2163 * (source address is loopback or local), in which case the
2164 * TCP/UDP checksum field contains the pseudo header checksum
2165 * that's not yet complemented.
2166 * A packet generated locally
2167 * will have UDP/TCP CSUM flag set (gets set in protocol
2168 * output).
2169 */
2170 if (dir == PF_OUT && pbuf != NULL &&
2171 (*pbuf->pb_csum_flags & (CSUM_TCPIPV6 |
2172 CSUM_UDPIPV6))) {
2173 /* Pseudo-header checksum does not include ports */
2174 *pc =
2175 ~pf_cksum_fixup(pf_cksum_fixup(
2176 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2177 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2178 ~*pc,
2179 ao.addr16[0], an->addr16[0], u),
2180 ao.addr16[1], an->addr16[1], u),
2181 ao.addr16[2], an->addr16[2], u),
2182 ao.addr16[3], an->addr16[3], u),
2183 ao.addr16[4], an->addr16[4], u),
2184 ao.addr16[5], an->addr16[5], u),
2185 ao.addr16[6], an->addr16[6], u),
2186 ao.addr16[7], an->addr16[7], u);
2187 } else {
2188 *pc =
2189 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2190 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2191 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2192 *pc,
2193 ao.addr16[0], an->addr16[0], u),
2194 ao.addr16[1], an->addr16[1], u),
2195 ao.addr16[2], an->addr16[2], u),
2196 ao.addr16[3], an->addr16[3], u),
2197 ao.addr16[4], an->addr16[4], u),
2198 ao.addr16[5], an->addr16[5], u),
2199 ao.addr16[6], an->addr16[6], u),
2200 ao.addr16[7], an->addr16[7], u),
2201 po, pn, u);
2202 }
2203 break;
2204 #ifdef INET
2205 case AF_INET:
2206 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2207 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2208 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
2209 ao.addr16[0], an->addr16[0], u),
2210 ao.addr16[1], an->addr16[1], u),
2211 ao.addr16[2], 0, u),
2212 ao.addr16[3], 0, u),
2213 ao.addr16[4], 0, u),
2214 ao.addr16[5], 0, u),
2215 ao.addr16[6], 0, u),
2216 ao.addr16[7], 0, u),
2217 po, pn, u);
2218 break;
2219 #endif /* INET */
2220 }
2221 break;
2222 #endif /* INET6 */
2223 }
2224 }
2225
2226
2227 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2228 void
2229 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2230 {
2231 u_int32_t ao;
2232
2233 memcpy(&ao, a, sizeof (ao));
2234 memcpy(a, &an, sizeof (u_int32_t));
2235 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2236 ao % 65536, an % 65536, u);
2237 }
2238
2239 #if INET6
2240 static void
2241 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2242 {
2243 struct pf_addr ao;
2244
2245 PF_ACPY(&ao, a, AF_INET6);
2246 PF_ACPY(a, an, AF_INET6);
2247
2248 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2249 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2250 pf_cksum_fixup(pf_cksum_fixup(*c,
2251 ao.addr16[0], an->addr16[0], u),
2252 ao.addr16[1], an->addr16[1], u),
2253 ao.addr16[2], an->addr16[2], u),
2254 ao.addr16[3], an->addr16[3], u),
2255 ao.addr16[4], an->addr16[4], u),
2256 ao.addr16[5], an->addr16[5], u),
2257 ao.addr16[6], an->addr16[6], u),
2258 ao.addr16[7], an->addr16[7], u);
2259 }
2260
2261 void
2262 pf_change_addr(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u,
2263 sa_family_t af, sa_family_t afn)
2264 {
2265 struct pf_addr ao;
2266
2267 PF_ACPY(&ao, a, af);
2268 PF_ACPY(a, an, afn);
2269
2270 switch (af) {
2271 case AF_INET:
2272 switch (afn) {
2273 case AF_INET:
2274 pf_change_a(a, c, an->v4addr.s_addr, u);
2275 break;
2276 case AF_INET6:
2277 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2278 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2279 pf_cksum_fixup(pf_cksum_fixup(*c,
2280 ao.addr16[0], an->addr16[0], u),
2281 ao.addr16[1], an->addr16[1], u),
2282 0, an->addr16[2], u),
2283 0, an->addr16[3], u),
2284 0, an->addr16[4], u),
2285 0, an->addr16[5], u),
2286 0, an->addr16[6], u),
2287 0, an->addr16[7], u);
2288 break;
2289 }
2290 break;
2291 case AF_INET6:
2292 switch (afn) {
2293 case AF_INET:
2294 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2295 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2296 pf_cksum_fixup(pf_cksum_fixup(*c,
2297 ao.addr16[0], an->addr16[0], u),
2298 ao.addr16[1], an->addr16[1], u),
2299 ao.addr16[2], 0, u),
2300 ao.addr16[3], 0, u),
2301 ao.addr16[4], 0, u),
2302 ao.addr16[5], 0, u),
2303 ao.addr16[6], 0, u),
2304 ao.addr16[7], 0, u);
2305 break;
2306 case AF_INET6:
2307 pf_change_a6(a, c, an, u);
2308 break;
2309 }
2310 break;
2311 }
2312 }
2313
2314 #endif /* INET6 */
2315
2316 static void
2317 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2318 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2319 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2320 {
2321 struct pf_addr oia, ooa;
2322
2323 PF_ACPY(&oia, ia, af);
2324 PF_ACPY(&ooa, oa, af);
2325
2326 /* Change inner protocol port, fix inner protocol checksum. */
2327 if (ip != NULL) {
2328 u_int16_t oip = *ip;
2329 u_int32_t opc = 0;
2330
2331 if (pc != NULL)
2332 opc = *pc;
2333 *ip = np;
2334 if (pc != NULL)
2335 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2336 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2337 if (pc != NULL)
2338 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2339 }
2340 /* Change inner ip address, fix inner ip and icmp checksums. */
2341 PF_ACPY(ia, na, af);
2342 switch (af) {
2343 #if INET
2344 case AF_INET: {
2345 u_int32_t oh2c = *h2c;
2346
2347 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2348 oia.addr16[0], ia->addr16[0], 0),
2349 oia.addr16[1], ia->addr16[1], 0);
2350 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2351 oia.addr16[0], ia->addr16[0], 0),
2352 oia.addr16[1], ia->addr16[1], 0);
2353 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2354 break;
2355 }
2356 #endif /* INET */
2357 #if INET6
2358 case AF_INET6:
2359 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2360 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2361 pf_cksum_fixup(pf_cksum_fixup(*ic,
2362 oia.addr16[0], ia->addr16[0], u),
2363 oia.addr16[1], ia->addr16[1], u),
2364 oia.addr16[2], ia->addr16[2], u),
2365 oia.addr16[3], ia->addr16[3], u),
2366 oia.addr16[4], ia->addr16[4], u),
2367 oia.addr16[5], ia->addr16[5], u),
2368 oia.addr16[6], ia->addr16[6], u),
2369 oia.addr16[7], ia->addr16[7], u);
2370 break;
2371 #endif /* INET6 */
2372 }
2373 /* Change outer ip address, fix outer ip or icmpv6 checksum. */
2374 PF_ACPY(oa, na, af);
2375 switch (af) {
2376 #if INET
2377 case AF_INET:
2378 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2379 ooa.addr16[0], oa->addr16[0], 0),
2380 ooa.addr16[1], oa->addr16[1], 0);
2381 break;
2382 #endif /* INET */
2383 #if INET6
2384 case AF_INET6:
2385 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2386 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2387 pf_cksum_fixup(pf_cksum_fixup(*ic,
2388 ooa.addr16[0], oa->addr16[0], u),
2389 ooa.addr16[1], oa->addr16[1], u),
2390 ooa.addr16[2], oa->addr16[2], u),
2391 ooa.addr16[3], oa->addr16[3], u),
2392 ooa.addr16[4], oa->addr16[4], u),
2393 ooa.addr16[5], oa->addr16[5], u),
2394 ooa.addr16[6], oa->addr16[6], u),
2395 ooa.addr16[7], oa->addr16[7], u);
2396 break;
2397 #endif /* INET6 */
2398 }
2399 }
2400
2401
2402 /*
2403 * Need to modulate the sequence numbers in the TCP SACK option
2404 * (credits to Krzysztof Pfaff for report and patch)
2405 */
2406 static int
2407 pf_modulate_sack(pbuf_t *pbuf, int off, struct pf_pdesc *pd,
2408 struct tcphdr *th, struct pf_state_peer *dst)
2409 {
2410 int hlen = (th->th_off << 2) - sizeof (*th), thoptlen = hlen;
2411 u_int8_t opts[MAX_TCPOPTLEN], *opt = opts;
2412 int copyback = 0, i, olen;
2413 struct sackblk sack;
2414
2415 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2416 if (hlen < TCPOLEN_SACKLEN ||
2417 !pf_pull_hdr(pbuf, off + sizeof (*th), opts, hlen, NULL, NULL, pd->af))
2418 return (0);
2419
2420 while (hlen >= TCPOLEN_SACKLEN) {
2421 olen = opt[1];
2422 switch (*opt) {
2423 case TCPOPT_EOL: /* FALLTHROUGH */
2424 case TCPOPT_NOP:
2425 opt++;
2426 hlen--;
2427 break;
2428 case TCPOPT_SACK:
2429 if (olen > hlen)
2430 olen = hlen;
2431 if (olen >= TCPOLEN_SACKLEN) {
2432 for (i = 2; i + TCPOLEN_SACK <= olen;
2433 i += TCPOLEN_SACK) {
2434 memcpy(&sack, &opt[i], sizeof (sack));
2435 pf_change_a(&sack.start, &th->th_sum,
2436 htonl(ntohl(sack.start) -
2437 dst->seqdiff), 0);
2438 pf_change_a(&sack.end, &th->th_sum,
2439 htonl(ntohl(sack.end) -
2440 dst->seqdiff), 0);
2441 memcpy(&opt[i], &sack, sizeof (sack));
2442 }
2443 copyback = off + sizeof (*th) + thoptlen;
2444 }
2445 /* FALLTHROUGH */
2446 default:
2447 if (olen < 2)
2448 olen = 2;
2449 hlen -= olen;
2450 opt += olen;
2451 }
2452 }
2453
2454 if (copyback) {
2455 if (pf_lazy_makewritable(pd, pbuf, copyback) == NULL)
2456 return (-1);
2457 pbuf_copy_back(pbuf, off + sizeof (*th), thoptlen, opts);
2458 }
2459 return (copyback);
2460 }
2461
2462 /*
2463 * XXX
2464 *
2465 * The following functions (pf_send_tcp and pf_send_icmp) are somewhat
2466 * special in that they originate "spurious" packets rather than
2467 * filter/NAT existing packets. As such, they're not a great fit for
2468 * the 'pbuf' shim, which assumes the underlying packet buffers are
2469 * allocated elsewhere.
2470 *
2471 * Since these functions are rarely used, we'll carry on allocating mbufs
2472 * and passing them to the IP stack for eventual routing.
2473 */
2474 static void
2475 pf_send_tcp(const struct pf_rule *r, sa_family_t af,
2476 const struct pf_addr *saddr, const struct pf_addr *daddr,
2477 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2478 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2479 u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp)
2480 {
2481 #pragma unused(eh, ifp)
2482 struct mbuf *m;
2483 int len, tlen;
2484 #if INET
2485 struct ip *h = NULL;
2486 #endif /* INET */
2487 #if INET6
2488 struct ip6_hdr *h6 = NULL;
2489 #endif /* INET6 */
2490 struct tcphdr *th = NULL;
2491 char *opt;
2492 struct pf_mtag *pf_mtag;
2493
2494 /* maximum segment size tcp option */
2495 tlen = sizeof (struct tcphdr);
2496 if (mss)
2497 tlen += 4;
2498
2499 switch (af) {
2500 #if INET
2501 case AF_INET:
2502 len = sizeof (struct ip) + tlen;
2503 break;
2504 #endif /* INET */
2505 #if INET6
2506 case AF_INET6:
2507 len = sizeof (struct ip6_hdr) + tlen;
2508 break;
2509 #endif /* INET6 */
2510 default:
2511 panic("pf_send_tcp: not AF_INET or AF_INET6!");
2512 return;
2513 }
2514
2515 /* create outgoing mbuf */
2516 m = m_gethdr(M_DONTWAIT, MT_HEADER);
2517 if (m == NULL)
2518 return;
2519
2520 if ((pf_mtag = pf_get_mtag(m)) == NULL)
2521 return;
2522
2523 if (tag)
2524 pf_mtag->pftag_flags |= PF_TAG_GENERATED;
2525 pf_mtag->pftag_tag = rtag;
2526
2527 if (r != NULL && PF_RTABLEID_IS_VALID(r->rtableid))
2528 pf_mtag->pftag_rtableid = r->rtableid;
2529
2530 #if PF_ECN
2531 /* add hints for ecn */
2532 pf_mtag->pftag_hdr = mtod(m, struct ip *);
2533 /* record address family */
2534 pf_mtag->pftag_flags &= ~(PF_TAG_HDR_INET | PF_TAG_HDR_INET6);
2535 switch (af) {
2536 #if INET
2537 case AF_INET:
2538 pf_mtag->pftag_flags |= PF_TAG_HDR_INET;
2539 break;
2540 #endif /* INET */
2541 #if INET6
2542 case AF_INET6:
2543 pf_mtag->pftag_flags |= PF_TAG_HDR_INET6;
2544 break;
2545 #endif /* INET6 */
2546 }
2547 #endif /* PF_ECN */
2548
2549 /* indicate this is TCP */
2550 m->m_pkthdr.pkt_proto = IPPROTO_TCP;
2551
2552 /* Make sure headers are 32-bit aligned */
2553 m->m_data += max_linkhdr;
2554 m->m_pkthdr.len = m->m_len = len;
2555 m->m_pkthdr.rcvif = NULL;
2556 bzero(m->m_data, len);
2557 switch (af) {
2558 #if INET
2559 case AF_INET:
2560 h = mtod(m, struct ip *);
2561
2562 /* IP header fields included in the TCP checksum */
2563 h->ip_p = IPPROTO_TCP;
2564 h->ip_len = htons(tlen);
2565 h->ip_src.s_addr = saddr->v4addr.s_addr;
2566 h->ip_dst.s_addr = daddr->v4addr.s_addr;
2567
2568 th = (struct tcphdr *)(void *)((caddr_t)h + sizeof (struct ip));
2569 break;
2570 #endif /* INET */
2571 #if INET6
2572 case AF_INET6:
2573 h6 = mtod(m, struct ip6_hdr *);
2574
2575 /* IP header fields included in the TCP checksum */
2576 h6->ip6_nxt = IPPROTO_TCP;
2577 h6->ip6_plen = htons(tlen);
2578 memcpy(&h6->ip6_src, &saddr->v6addr, sizeof (struct in6_addr));
2579 memcpy(&h6->ip6_dst, &daddr->v6addr, sizeof (struct in6_addr));
2580
2581 th = (struct tcphdr *)(void *)
2582 ((caddr_t)h6 + sizeof (struct ip6_hdr));
2583 break;
2584 #endif /* INET6 */
2585 }
2586
2587 /* TCP header */
2588 th->th_sport = sport;
2589 th->th_dport = dport;
2590 th->th_seq = htonl(seq);
2591 th->th_ack = htonl(ack);
2592 th->th_off = tlen >> 2;
2593 th->th_flags = flags;
2594 th->th_win = htons(win);
2595
2596 if (mss) {
2597 opt = (char *)(th + 1);
2598 opt[0] = TCPOPT_MAXSEG;
2599 opt[1] = 4;
2600 #if BYTE_ORDER != BIG_ENDIAN
2601 HTONS(mss);
2602 #endif
2603 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2604 }
2605
2606 switch (af) {
2607 #if INET
2608 case AF_INET: {
2609 struct route ro;
2610
2611 /* TCP checksum */
2612 th->th_sum = in_cksum(m, len);
2613
2614 /* Finish the IP header */
2615 h->ip_v = 4;
2616 h->ip_hl = sizeof (*h) >> 2;
2617 h->ip_tos = IPTOS_LOWDELAY;
2618 /*
2619 * ip_output() expects ip_len and ip_off to be in host order.
2620 */
2621 h->ip_len = len;
2622 h->ip_off = (path_mtu_discovery ? IP_DF : 0);
2623 h->ip_ttl = ttl ? ttl : ip_defttl;
2624 h->ip_sum = 0;
2625
2626 bzero(&ro, sizeof (ro));
2627 ip_output(m, NULL, &ro, 0, NULL, NULL);
2628 ROUTE_RELEASE(&ro);
2629 break;
2630 }
2631 #endif /* INET */
2632 #if INET6
2633 case AF_INET6: {
2634 struct route_in6 ro6;
2635
2636 /* TCP checksum */
2637 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2638 sizeof (struct ip6_hdr), tlen);
2639
2640 h6->ip6_vfc |= IPV6_VERSION;
2641 h6->ip6_hlim = IPV6_DEFHLIM;
2642
2643 bzero(&ro6, sizeof (ro6));
2644 ip6_output(m, NULL, &ro6, 0, NULL, NULL, NULL);
2645 ROUTE_RELEASE(&ro6);
2646 break;
2647 }
2648 #endif /* INET6 */
2649 }
2650 }
2651
2652 static void
2653 pf_send_icmp(pbuf_t *pbuf, u_int8_t type, u_int8_t code, sa_family_t af,
2654 struct pf_rule *r)
2655 {
2656 struct mbuf *m0;
2657 struct pf_mtag *pf_mtag;
2658
2659 m0 = pbuf_clone_to_mbuf(pbuf);
2660 if (m0 == NULL)
2661 return;
2662
2663 if ((pf_mtag = pf_get_mtag(m0)) == NULL)
2664 return;
2665
2666 pf_mtag->pftag_flags |= PF_TAG_GENERATED;
2667
2668 if (PF_RTABLEID_IS_VALID(r->rtableid))
2669 pf_mtag->pftag_rtableid = r->rtableid;
2670
2671 #if PF_ECN
2672 /* add hints for ecn */
2673 pf_mtag->pftag_hdr = mtod(m0, struct ip *);
2674 /* record address family */
2675 pf_mtag->pftag_flags &= ~(PF_TAG_HDR_INET | PF_TAG_HDR_INET6);
2676 switch (af) {
2677 #if INET
2678 case AF_INET:
2679 pf_mtag->pftag_flags |= PF_TAG_HDR_INET;
2680 m0->m_pkthdr.pkt_proto = IPPROTO_ICMP;
2681 break;
2682 #endif /* INET */
2683 #if INET6
2684 case AF_INET6:
2685 pf_mtag->pftag_flags |= PF_TAG_HDR_INET6;
2686 m0->m_pkthdr.pkt_proto = IPPROTO_ICMPV6;
2687 break;
2688 #endif /* INET6 */
2689 }
2690 #endif /* PF_ECN */
2691
2692 switch (af) {
2693 #if INET
2694 case AF_INET:
2695 icmp_error(m0, type, code, 0, 0);
2696 break;
2697 #endif /* INET */
2698 #if INET6
2699 case AF_INET6:
2700 icmp6_error(m0, type, code, 0);
2701 break;
2702 #endif /* INET6 */
2703 }
2704 }
2705
2706 /*
2707 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2708 * If n is 0, they match if they are equal. If n is != 0, they match if they
2709 * are different.
2710 */
2711 int
2712 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2713 struct pf_addr *b, sa_family_t af)
2714 {
2715 int match = 0;
2716
2717 switch (af) {
2718 #if INET
2719 case AF_INET:
2720 if ((a->addr32[0] & m->addr32[0]) ==
2721 (b->addr32[0] & m->addr32[0]))
2722 match++;
2723 break;
2724 #endif /* INET */
2725 #if INET6
2726 case AF_INET6:
2727 if (((a->addr32[0] & m->addr32[0]) ==
2728 (b->addr32[0] & m->addr32[0])) &&
2729 ((a->addr32[1] & m->addr32[1]) ==
2730 (b->addr32[1] & m->addr32[1])) &&
2731 ((a->addr32[2] & m->addr32[2]) ==
2732 (b->addr32[2] & m->addr32[2])) &&
2733 ((a->addr32[3] & m->addr32[3]) ==
2734 (b->addr32[3] & m->addr32[3])))
2735 match++;
2736 break;
2737 #endif /* INET6 */
2738 }
2739 if (match) {
2740 if (n)
2741 return (0);
2742 else
2743 return (1);
2744 } else {
2745 if (n)
2746 return (1);
2747 else
2748 return (0);
2749 }
2750 }
2751
2752 /*
2753 * Return 1 if b <= a <= e, otherwise return 0.
2754 */
2755 int
2756 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2757 struct pf_addr *a, sa_family_t af)
2758 {
2759 switch (af) {
2760 #if INET
2761 case AF_INET:
2762 if ((a->addr32[0] < b->addr32[0]) ||
2763 (a->addr32[0] > e->addr32[0]))
2764 return (0);
2765 break;
2766 #endif /* INET */
2767 #if INET6
2768 case AF_INET6: {
2769 int i;
2770
2771 /* check a >= b */
2772 for (i = 0; i < 4; ++i)
2773 if (a->addr32[i] > b->addr32[i])
2774 break;
2775 else if (a->addr32[i] < b->addr32[i])
2776 return (0);
2777 /* check a <= e */
2778 for (i = 0; i < 4; ++i)
2779 if (a->addr32[i] < e->addr32[i])
2780 break;
2781 else if (a->addr32[i] > e->addr32[i])
2782 return (0);
2783 break;
2784 }
2785 #endif /* INET6 */
2786 }
2787 return (1);
2788 }
2789
2790 int
2791 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2792 {
2793 switch (op) {
2794 case PF_OP_IRG:
2795 return ((p > a1) && (p < a2));
2796 case PF_OP_XRG:
2797 return ((p < a1) || (p > a2));
2798 case PF_OP_RRG:
2799 return ((p >= a1) && (p <= a2));
2800 case PF_OP_EQ:
2801 return (p == a1);
2802 case PF_OP_NE:
2803 return (p != a1);
2804 case PF_OP_LT:
2805 return (p < a1);
2806 case PF_OP_LE:
2807 return (p <= a1);
2808 case PF_OP_GT:
2809 return (p > a1);
2810 case PF_OP_GE:
2811 return (p >= a1);
2812 }
2813 return (0); /* never reached */
2814 }
2815
2816 int
2817 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2818 {
2819 #if BYTE_ORDER != BIG_ENDIAN
2820 NTOHS(a1);
2821 NTOHS(a2);
2822 NTOHS(p);
2823 #endif
2824 return (pf_match(op, a1, a2, p));
2825 }
2826
2827 int
2828 pf_match_xport(u_int8_t proto, u_int8_t proto_variant, union pf_rule_xport *rx,
2829 union pf_state_xport *sx)
2830 {
2831 int d = !0;
2832
2833 if (sx) {
2834 switch (proto) {
2835 case IPPROTO_GRE:
2836 if (proto_variant == PF_GRE_PPTP_VARIANT)
2837 d = (rx->call_id == sx->call_id);
2838 break;
2839
2840 case IPPROTO_ESP:
2841 d = (rx->spi == sx->spi);
2842 break;
2843
2844 case IPPROTO_TCP:
2845 case IPPROTO_UDP:
2846 case IPPROTO_ICMP:
2847 case IPPROTO_ICMPV6:
2848 if (rx->range.op)
2849 d = pf_match_port(rx->range.op,
2850 rx->range.port[0], rx->range.port[1],
2851 sx->port);
2852 break;
2853
2854 default:
2855 break;
2856 }
2857 }
2858
2859 return (d);
2860 }
2861
2862 int
2863 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2864 {
2865 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2866 return (0);
2867 return (pf_match(op, a1, a2, u));
2868 }
2869
2870 int
2871 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2872 {
2873 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2874 return (0);
2875 return (pf_match(op, a1, a2, g));
2876 }
2877
2878 static int
2879 pf_match_tag(struct pf_rule *r, struct pf_mtag *pf_mtag,
2880 int *tag)
2881 {
2882 if (*tag == -1)
2883 *tag = pf_mtag->pftag_tag;
2884
2885 return ((!r->match_tag_not && r->match_tag == *tag) ||
2886 (r->match_tag_not && r->match_tag != *tag));
2887 }
2888
2889 int
2890 pf_tag_packet(pbuf_t *pbuf, struct pf_mtag *pf_mtag, int tag,
2891 unsigned int rtableid, struct pf_pdesc *pd)
2892 {
2893 if (tag <= 0 && !PF_RTABLEID_IS_VALID(rtableid) &&
2894 (pd == NULL || !(pd->pktflags & PKTF_FLOW_ID)))
2895 return (0);
2896
2897 if (pf_mtag == NULL && (pf_mtag = pf_get_mtag_pbuf(pbuf)) == NULL)
2898 return (1);
2899
2900 if (tag > 0)
2901 pf_mtag->pftag_tag = tag;
2902 if (PF_RTABLEID_IS_VALID(rtableid))
2903 pf_mtag->pftag_rtableid = rtableid;
2904 if (pd != NULL && (pd->pktflags & PKTF_FLOW_ID)) {
2905 *pbuf->pb_flowsrc = pd->flowsrc;
2906 *pbuf->pb_flowid = pd->flowhash;
2907 *pbuf->pb_flags |= pd->pktflags;
2908 *pbuf->pb_proto = pd->proto;
2909 }
2910
2911 return (0);
2912 }
2913
2914 void
2915 pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n,
2916 struct pf_rule **r, struct pf_rule **a, int *match)
2917 {
2918 struct pf_anchor_stackframe *f;
2919
2920 (*r)->anchor->match = 0;
2921 if (match)
2922 *match = 0;
2923 if (*depth >= (int)sizeof (pf_anchor_stack) /
2924 (int)sizeof (pf_anchor_stack[0])) {
2925 printf("pf_step_into_anchor: stack overflow\n");
2926 *r = TAILQ_NEXT(*r, entries);
2927 return;
2928 } else if (*depth == 0 && a != NULL)
2929 *a = *r;
2930 f = pf_anchor_stack + (*depth)++;
2931 f->rs = *rs;
2932 f->r = *r;
2933 if ((*r)->anchor_wildcard) {
2934 f->parent = &(*r)->anchor->children;
2935 if ((f->child = RB_MIN(pf_anchor_node, f->parent)) ==
2936 NULL) {
2937 *r = NULL;
2938 return;
2939 }
2940 *rs = &f->child->ruleset;
2941 } else {
2942 f->parent = NULL;
2943 f->child = NULL;
2944 *rs = &(*r)->anchor->ruleset;
2945 }
2946 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2947 }
2948
2949 int
2950 pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n,
2951 struct pf_rule **r, struct pf_rule **a, int *match)
2952 {
2953 struct pf_anchor_stackframe *f;
2954 int quick = 0;
2955
2956 do {
2957 if (*depth <= 0)
2958 break;
2959 f = pf_anchor_stack + *depth - 1;
2960 if (f->parent != NULL && f->child != NULL) {
2961 if (f->child->match ||
2962 (match != NULL && *match)) {
2963 f->r->anchor->match = 1;
2964 *match = 0;
2965 }
2966 f->child = RB_NEXT(pf_anchor_node, f->parent, f->child);
2967 if (f->child != NULL) {
2968 *rs = &f->child->ruleset;
2969 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2970 if (*r == NULL)
2971 continue;
2972 else
2973 break;
2974 }
2975 }
2976 (*depth)--;
2977 if (*depth == 0 && a != NULL)
2978 *a = NULL;
2979 *rs = f->rs;
2980 if (f->r->anchor->match || (match != NULL && *match))
2981 quick = f->r->quick;
2982 *r = TAILQ_NEXT(f->r, entries);
2983 } while (*r == NULL);
2984
2985 return (quick);
2986 }
2987
2988 #if INET6
2989 void
2990 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2991 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2992 {
2993 switch (af) {
2994 #if INET
2995 case AF_INET:
2996 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2997 ((rmask->addr32[0] ^ 0xffffffff) & saddr->addr32[0]);
2998 break;
2999 #endif /* INET */
3000 case AF_INET6:
3001 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3002 ((rmask->addr32[0] ^ 0xffffffff) & saddr->addr32[0]);
3003 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
3004 ((rmask->addr32[1] ^ 0xffffffff) & saddr->addr32[1]);
3005 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
3006 ((rmask->addr32[2] ^ 0xffffffff) & saddr->addr32[2]);
3007 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
3008 ((rmask->addr32[3] ^ 0xffffffff) & saddr->addr32[3]);
3009 break;
3010 }
3011 }
3012
3013 void
3014 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
3015 {
3016 switch (af) {
3017 #if INET
3018 case AF_INET:
3019 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
3020 break;
3021 #endif /* INET */
3022 case AF_INET6:
3023 if (addr->addr32[3] == 0xffffffff) {
3024 addr->addr32[3] = 0;
3025 if (addr->addr32[2] == 0xffffffff) {
3026 addr->addr32[2] = 0;
3027 if (addr->addr32[1] == 0xffffffff) {
3028 addr->addr32[1] = 0;
3029 addr->addr32[0] =
3030 htonl(ntohl(addr->addr32[0]) + 1);
3031 } else
3032 addr->addr32[1] =
3033 htonl(ntohl(addr->addr32[1]) + 1);
3034 } else
3035 addr->addr32[2] =
3036 htonl(ntohl(addr->addr32[2]) + 1);
3037 } else
3038 addr->addr32[3] =
3039 htonl(ntohl(addr->addr32[3]) + 1);
3040 break;
3041 }
3042 }
3043 #endif /* INET6 */
3044
3045 #define mix(a, b, c) \
3046 do { \
3047 a -= b; a -= c; a ^= (c >> 13); \
3048 b -= c; b -= a; b ^= (a << 8); \
3049 c -= a; c -= b; c ^= (b >> 13); \
3050 a -= b; a -= c; a ^= (c >> 12); \
3051 b -= c; b -= a; b ^= (a << 16); \
3052 c -= a; c -= b; c ^= (b >> 5); \
3053 a -= b; a -= c; a ^= (c >> 3); \
3054 b -= c; b -= a; b ^= (a << 10); \
3055 c -= a; c -= b; c ^= (b >> 15); \
3056 } while (0)
3057
3058 /*
3059 * hash function based on bridge_hash in if_bridge.c
3060 */
3061 static void
3062 pf_hash(struct pf_addr *inaddr, struct pf_addr *hash,
3063 struct pf_poolhashkey *key, sa_family_t af)
3064 {
3065 u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0];
3066
3067 switch (af) {
3068 #if INET
3069 case AF_INET:
3070 a += inaddr->addr32[0];
3071 b += key->key32[1];
3072 mix(a, b, c);
3073 hash->addr32[0] = c + key->key32[2];
3074 break;
3075 #endif /* INET */
3076 #if INET6
3077 case AF_INET6:
3078 a += inaddr->addr32[0];
3079 b += inaddr->addr32[2];
3080 mix(a, b, c);
3081 hash->addr32[0] = c;
3082 a += inaddr->addr32[1];
3083 b += inaddr->addr32[3];
3084 c += key->key32[1];
3085 mix(a, b, c);
3086 hash->addr32[1] = c;
3087 a += inaddr->addr32[2];
3088 b += inaddr->addr32[1];
3089 c += key->key32[2];
3090 mix(a, b, c);
3091 hash->addr32[2] = c;
3092 a += inaddr->addr32[3];
3093 b += inaddr->addr32[0];
3094 c += key->key32[3];
3095 mix(a, b, c);
3096 hash->addr32[3] = c;
3097 break;
3098 #endif /* INET6 */
3099 }
3100 }
3101
3102 static int
3103 pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr,
3104 struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn)
3105 {
3106 unsigned char hash[16];
3107 struct pf_pool *rpool = &r->rpool;
3108 struct pf_addr *raddr = &rpool->cur->addr.v.a.addr;
3109 struct pf_addr *rmask = &rpool->cur->addr.v.a.mask;
3110 struct pf_pooladdr *acur = rpool->cur;
3111 struct pf_src_node k;
3112
3113 if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR &&
3114 (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
3115 k.af = af;
3116 PF_ACPY(&k.addr, saddr, af);
3117 if (r->rule_flag & PFRULE_RULESRCTRACK ||
3118 r->rpool.opts & PF_POOL_STICKYADDR)
3119 k.rule.ptr = r;
3120 else
3121 k.rule.ptr = NULL;
3122 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
3123 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
3124 if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, rpool->af)) {
3125 PF_ACPY(naddr, &(*sn)->raddr, rpool->af);
3126 if (pf_status.debug >= PF_DEBUG_MISC) {
3127 printf("pf_map_addr: src tracking maps ");
3128 pf_print_host(&k.addr, 0, af);
3129 printf(" to ");
3130 pf_print_host(naddr, 0, rpool->af);
3131 printf("\n");
3132 }
3133 return (0);
3134 }
3135 }
3136
3137 if (rpool->cur->addr.type == PF_ADDR_NOROUTE)
3138 return (1);
3139 if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
3140 if (rpool->cur->addr.p.dyn == NULL)
3141 return (1);
3142 switch (rpool->af) {
3143 #if INET
3144 case AF_INET:
3145 if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 &&
3146 (rpool->opts & PF_POOL_TYPEMASK) !=
3147 PF_POOL_ROUNDROBIN)
3148 return (1);
3149 raddr = &rpool->cur->addr.p.dyn->pfid_addr4;
3150 rmask = &rpool->cur->addr.p.dyn->pfid_mask4;
3151 break;
3152 #endif /* INET */
3153 #if INET6
3154 case AF_INET6:
3155 if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 &&
3156 (rpool->opts & PF_POOL_TYPEMASK) !=
3157 PF_POOL_ROUNDROBIN)
3158 return (1);
3159 raddr = &rpool->cur->addr.p.dyn->pfid_addr6;
3160 rmask = &rpool->cur->addr.p.dyn->pfid_mask6;
3161 break;
3162 #endif /* INET6 */
3163 }
3164 } else if (rpool->cur->addr.type == PF_ADDR_TABLE) {
3165 if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN)
3166 return (1); /* unsupported */
3167 } else {
3168 raddr = &rpool->cur->addr.v.a.addr;
3169 rmask = &rpool->cur->addr.v.a.mask;
3170 }
3171
3172 switch (rpool->opts & PF_POOL_TYPEMASK) {
3173 case PF_POOL_NONE:
3174 PF_ACPY(naddr, raddr, rpool->af);
3175 break;
3176 case PF_POOL_BITMASK:
3177 ASSERT(af == rpool->af);
3178 PF_POOLMASK(naddr, raddr, rmask, saddr, af);
3179 break;
3180 case PF_POOL_RANDOM:
3181 if (init_addr != NULL && PF_AZERO(init_addr, rpool->af)) {
3182 switch (af) {
3183 #if INET
3184 case AF_INET:
3185 rpool->counter.addr32[0] = htonl(random());
3186 break;
3187 #endif /* INET */
3188 #if INET6
3189 case AF_INET6:
3190 if (rmask->addr32[3] != 0xffffffff)
3191 rpool->counter.addr32[3] =
3192 RandomULong();
3193 else
3194 break;
3195 if (rmask->addr32[2] != 0xffffffff)
3196 rpool->counter.addr32[2] =
3197 RandomULong();
3198 else
3199 break;
3200 if (rmask->addr32[1] != 0xffffffff)
3201 rpool->counter.addr32[1] =
3202 RandomULong();
3203 else
3204 break;
3205 if (rmask->addr32[0] != 0xffffffff)
3206 rpool->counter.addr32[0] =
3207 RandomULong();
3208 break;
3209 #endif /* INET6 */
3210 }
3211 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter,
3212 rpool->af);
3213 PF_ACPY(init_addr, naddr, rpool->af);
3214
3215 } else {
3216 PF_AINC(&rpool->counter, rpool->af);
3217 PF_POOLMASK(naddr, raddr, rmask, &rpool->counter,
3218 rpool->af);
3219 }
3220 break;
3221 case PF_POOL_SRCHASH:
3222 ASSERT(af == rpool->af);
3223 PF_POOLMASK(naddr, raddr, rmask, saddr, af);
3224 pf_hash(saddr, (struct pf_addr *)(void *)&hash,
3225 &rpool->key, af);
3226 PF_POOLMASK(naddr, raddr, rmask,
3227 (struct pf_addr *)(void *)&hash, af);
3228 break;
3229 case PF_POOL_ROUNDROBIN:
3230 if (rpool->cur->addr.type == PF_ADDR_TABLE) {
3231 if (!pfr_pool_get(rpool->cur->addr.p.tbl,
3232 &rpool->tblidx, &rpool->counter,
3233 &raddr, &rmask, rpool->af))
3234 goto get_addr;
3235 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
3236 if (rpool->cur->addr.p.dyn != NULL &&
3237 !pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
3238 &rpool->tblidx, &rpool->counter,
3239 &raddr, &rmask, af))
3240 goto get_addr;
3241 } else if (pf_match_addr(0, raddr, rmask, &rpool->counter,
3242 rpool->af))
3243 goto get_addr;
3244
3245 try_next:
3246 if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL)
3247 rpool->cur = TAILQ_FIRST(&rpool->list);
3248 if (rpool->cur->addr.type == PF_ADDR_TABLE) {
3249 rpool->tblidx = -1;
3250 if (pfr_pool_get(rpool->cur->addr.p.tbl,
3251 &rpool->tblidx, &rpool->counter,
3252 &raddr, &rmask, rpool->af)) {
3253 /* table contains no address of type
3254 * 'rpool->af' */
3255 if (rpool->cur != acur)
3256 goto try_next;
3257 return (1);
3258 }
3259 } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) {
3260 rpool->tblidx = -1;
3261 if (rpool->cur->addr.p.dyn == NULL)
3262 return (1);
3263 if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt,
3264 &rpool->tblidx, &rpool->counter,
3265 &raddr, &rmask, rpool->af)) {
3266 /* table contains no address of type
3267 * 'rpool->af' */
3268 if (rpool->cur != acur)
3269 goto try_next;
3270 return (1);
3271 }
3272 } else {
3273 raddr = &rpool->cur->addr.v.a.addr;
3274 rmask = &rpool->cur->addr.v.a.mask;
3275 PF_ACPY(&rpool->counter, raddr, rpool->af);
3276 }
3277
3278 get_addr:
3279 PF_ACPY(naddr, &rpool->counter, rpool->af);
3280 if (init_addr != NULL && PF_AZERO(init_addr, rpool->af))
3281 PF_ACPY(init_addr, naddr, rpool->af);
3282 PF_AINC(&rpool->counter, rpool->af);
3283 break;
3284 }
3285 if (*sn != NULL)
3286 PF_ACPY(&(*sn)->raddr, naddr, rpool->af);
3287
3288 if (pf_status.debug >= PF_DEBUG_MISC &&
3289 (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) {
3290 printf("pf_map_addr: selected address ");
3291 pf_print_host(naddr, 0, rpool->af);
3292 printf("\n");
3293 }
3294
3295 return (0);
3296 }
3297
3298 static int
3299 pf_get_sport(struct pf_pdesc *pd, struct pfi_kif *kif, struct pf_rule *r,
3300 struct pf_addr *saddr, union pf_state_xport *sxport, struct pf_addr *daddr,
3301 union pf_state_xport *dxport, struct pf_addr *naddr,
3302 union pf_state_xport *nxport, struct pf_src_node **sn
3303 )
3304 {
3305 #pragma unused(kif)
3306 struct pf_state_key_cmp key;
3307 struct pf_addr init_addr;
3308 unsigned int cut;
3309 sa_family_t af = pd->af;
3310 u_int8_t proto = pd->proto;
3311 unsigned int low = r->rpool.proxy_port[0];
3312 unsigned int high = r->rpool.proxy_port[1];
3313
3314 bzero(&init_addr, sizeof (init_addr));
3315 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
3316 return (1);
3317
3318 if (proto == IPPROTO_ICMP) {
3319 low = 1;
3320 high = 65535;
3321 }
3322
3323 if (!nxport)
3324 return (0); /* No output necessary. */
3325
3326 /*--- Special mapping rules for UDP ---*/
3327 if (proto == IPPROTO_UDP) {
3328
3329 /*--- Never float IKE source port ---*/
3330 if (ntohs(sxport->port) == PF_IKE_PORT) {
3331 nxport->port = sxport->port;
3332 return (0);
3333 }
3334
3335 /*--- Apply exterior mapping options ---*/
3336 if (r->extmap > PF_EXTMAP_APD) {
3337 struct pf_state *s;
3338
3339 TAILQ_FOREACH(s, &state_list, entry_list) {
3340 struct pf_state_key *sk = s->state_key;
3341 if (!sk)
3342 continue;
3343 if (s->nat_rule.ptr != r)
3344 continue;
3345 if (sk->proto != IPPROTO_UDP ||
3346 sk->af_lan != af)
3347 continue;
3348 if (sk->lan.xport.port != sxport->port)
3349 continue;
3350 if (PF_ANEQ(&sk->lan.addr, saddr, af))
3351 continue;
3352 if (r->extmap < PF_EXTMAP_EI &&
3353 PF_ANEQ(&sk->ext_lan.addr, daddr, af))
3354 continue;
3355
3356 nxport->port = sk->gwy.xport.port;
3357 return (0);
3358 }
3359 }
3360 } else if (proto == IPPROTO_TCP) {
3361 struct pf_state* s;
3362 /*
3363 * APPLE MODIFICATION: <rdar://problem/6546358>
3364 * Fix allows....NAT to use a single binding for TCP session
3365 * with same source IP and source port
3366 */
3367 TAILQ_FOREACH(s, &state_list, entry_list) {
3368 struct pf_state_key* sk = s->state_key;
3369 if (!sk)
3370 continue;
3371 if (s->nat_rule.ptr != r)
3372 continue;
3373 if (sk->proto != IPPROTO_TCP || sk->af_lan != af)
3374 continue;
3375 if (sk->lan.xport.port != sxport->port)
3376 continue;
3377 if (!(PF_AEQ(&sk->lan.addr, saddr, af)))
3378 continue;
3379 nxport->port = sk->gwy.xport.port;
3380 return (0);
3381 }
3382 }
3383 do {
3384 key.af_gwy = af;
3385 key.proto = proto;
3386 PF_ACPY(&key.ext_gwy.addr, daddr, key.af_gwy);
3387 PF_ACPY(&key.gwy.addr, naddr, key.af_gwy);
3388 switch (proto) {
3389 case IPPROTO_UDP:
3390 key.proto_variant = r->extfilter;
3391 break;
3392 default:
3393 key.proto_variant = 0;
3394 break;
3395 }
3396 if (dxport)
3397 key.ext_gwy.xport = *dxport;
3398 else
3399 memset(&key.ext_gwy.xport, 0,
3400 sizeof (key.ext_gwy.xport));
3401 /*
3402 * port search; start random, step;
3403 * similar 2 portloop in in_pcbbind
3404 */
3405 if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP ||
3406 proto == IPPROTO_ICMP)) {
3407 if (dxport)
3408 key.gwy.xport = *dxport;
3409 else
3410 memset(&key.gwy.xport, 0,
3411 sizeof (key.gwy.xport));
3412 if (pf_find_state_all(&key, PF_IN, NULL) == NULL)
3413 return (0);
3414 } else if (low == 0 && high == 0) {
3415 key.gwy.xport = *nxport;
3416 if (pf_find_state_all(&key, PF_IN, NULL) == NULL
3417 ) {
3418 return (0);
3419 }
3420 } else if (low == high) {
3421 key.gwy.xport.port = htons(low);
3422 if (pf_find_state_all(&key, PF_IN, NULL) == NULL
3423 ) {
3424 nxport->port = htons(low);
3425 return (0);
3426 }
3427 } else {
3428 unsigned int tmp;
3429 if (low > high) {
3430 tmp = low;
3431 low = high;
3432 high = tmp;
3433 }
3434 /* low < high */
3435 cut = htonl(random()) % (1 + high - low) + low;
3436 /* low <= cut <= high */
3437 for (tmp = cut; tmp <= high; ++(tmp)) {
3438 key.gwy.xport.port = htons(tmp);
3439 if (pf_find_state_all(&key, PF_IN, NULL) == NULL
3440 ) {
3441 nxport->port = htons(tmp);
3442 return (0);
3443 }
3444 }
3445 for (tmp = cut - 1; tmp >= low; --(tmp)) {
3446 key.gwy.xport.port = htons(tmp);
3447 if (pf_find_state_all(&key, PF_IN, NULL) == NULL
3448 ) {
3449 nxport->port = htons(tmp);
3450 return (0);
3451 }
3452 }
3453 }
3454
3455 switch (r->rpool.opts & PF_POOL_TYPEMASK) {
3456 case PF_POOL_RANDOM:
3457 case PF_POOL_ROUNDROBIN:
3458 if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn))
3459 return (1);
3460 break;
3461 case PF_POOL_NONE:
3462 case PF_POOL_SRCHASH:
3463 case PF_POOL_BITMASK:
3464 default:
3465 return (1);
3466 }
3467 } while (!PF_AEQ(&init_addr, naddr, af));
3468
3469 return (1); /* none available */
3470 }
3471
3472 static struct pf_rule *
3473 pf_match_translation(struct pf_pdesc *pd, pbuf_t *pbuf, int off,
3474 int direction, struct pfi_kif *kif, struct pf_addr *saddr,
3475 union pf_state_xport *sxport, struct pf_addr *daddr,
3476 union pf_state_xport *dxport, int rs_num)
3477 {
3478 struct pf_rule *r, *rm = NULL;
3479 struct pf_ruleset *ruleset = NULL;
3480 int tag = -1;
3481 unsigned int rtableid = IFSCOPE_NONE;
3482 int asd = 0;
3483
3484 r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr);
3485 while (r && rm == NULL) {
3486 struct pf_rule_addr *src = NULL, *dst = NULL;
3487 struct pf_addr_wrap *xdst = NULL;
3488 struct pf_addr_wrap *xsrc = NULL;
3489 union pf_rule_xport rdrxport;
3490
3491 if (r->action == PF_BINAT && direction == PF_IN) {
3492 src = &r->dst;
3493 if (r->rpool.cur != NULL)
3494 xdst = &r->rpool.cur->addr;
3495 } else if (r->action == PF_RDR && direction == PF_OUT) {
3496 dst = &r->src;
3497 src = &r->dst;
3498 if (r->rpool.cur != NULL) {
3499 rdrxport.range.op = PF_OP_EQ;
3500 rdrxport.range.port[0] =
3501 htons(r->rpool.proxy_port[0]);
3502 xsrc = &r->rpool.cur->addr;
3503 }
3504 } else {
3505 src = &r->src;
3506 dst = &r->dst;
3507 }
3508
3509 r->evaluations++;
3510 if (pfi_kif_match(r->kif, kif) == r->ifnot)
3511 r = r->skip[PF_SKIP_IFP].ptr;
3512 else if (r->direction && r->direction != direction)
3513 r = r->skip[PF_SKIP_DIR].ptr;
3514 else if (r->af && r->af != pd->af)
3515 r = r->skip[PF_SKIP_AF].ptr;
3516 else if (r->proto && r->proto != pd->proto)
3517 r = r->skip[PF_SKIP_PROTO].ptr;
3518 else if (xsrc && PF_MISMATCHAW(xsrc, saddr, pd->af, 0, NULL))
3519 r = TAILQ_NEXT(r, entries);
3520 else if (!xsrc && PF_MISMATCHAW(&src->addr, saddr, pd->af,
3521 src->neg, kif))
3522 r = TAILQ_NEXT(r, entries);
3523 else if (xsrc && (!rdrxport.range.port[0] ||
3524 !pf_match_xport(r->proto, r->proto_variant, &rdrxport,
3525 sxport)))
3526 r = TAILQ_NEXT(r, entries);
3527 else if (!xsrc && !pf_match_xport(r->proto,
3528 r->proto_variant, &src->xport, sxport))
3529 r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT :
3530 PF_SKIP_DST_PORT].ptr;
3531 else if (dst != NULL &&
3532 PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL))
3533 r = r->skip[PF_SKIP_DST_ADDR].ptr;
3534 else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af,
3535 0, NULL))
3536 r = TAILQ_NEXT(r, entries);
3537 else if (dst && !pf_match_xport(r->proto, r->proto_variant,
3538 &dst->xport, dxport))
3539 r = r->skip[PF_SKIP_DST_PORT].ptr;
3540 else if (r->match_tag && !pf_match_tag(r, pd->pf_mtag, &tag))
3541 r = TAILQ_NEXT(r, entries);
3542 else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto !=
3543 IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, pbuf,
3544 off, pd->hdr.tcp), r->os_fingerprint)))
3545 r = TAILQ_NEXT(r, entries);
3546 else {
3547 if (r->tag)
3548 tag = r->tag;
3549 if (PF_RTABLEID_IS_VALID(r->rtableid))
3550 rtableid = r->rtableid;
3551 if (r->anchor == NULL) {
3552 rm = r;
3553 } else
3554 pf_step_into_anchor(&asd, &ruleset, rs_num,
3555 &r, NULL, NULL);
3556 }
3557 if (r == NULL)
3558 pf_step_out_of_anchor(&asd, &ruleset, rs_num, &r,
3559 NULL, NULL);
3560 }
3561 if (pf_tag_packet(pbuf, pd->pf_mtag, tag, rtableid, NULL))
3562 return (NULL);
3563 if (rm != NULL && (rm->action == PF_NONAT ||
3564 rm->action == PF_NORDR || rm->action == PF_NOBINAT ||
3565 rm->action == PF_NONAT64))
3566 return (NULL);
3567 return (rm);
3568 }
3569
3570 /*
3571 * Get address translation information for NAT/BINAT/RDR
3572 * pd : pf packet descriptor
3573 * pbuf : pbuf holding the packet
3574 * off : offset to protocol header
3575 * direction : direction of packet
3576 * kif : pf interface info obtained from the packet's recv interface
3577 * sn : source node pointer (output)
3578 * saddr : packet source address
3579 * sxport : packet source port
3580 * daddr : packet destination address
3581 * dxport : packet destination port
3582 * nsxport : translated source port (output)
3583 *
3584 * Translated source & destination address are updated in pd->nsaddr &
3585 * pd->ndaddr
3586 */
3587 static struct pf_rule *
3588 pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off,
3589 int direction, struct pfi_kif *kif, struct pf_src_node **sn,
3590 struct pf_addr *saddr, union pf_state_xport *sxport, struct pf_addr *daddr,
3591 union pf_state_xport *dxport, union pf_state_xport *nsxport
3592 )
3593 {
3594 struct pf_rule *r = NULL;
3595 pd->naf = pd->af;
3596
3597 if (direction == PF_OUT) {
3598 r = pf_match_translation(pd, pbuf, off, direction, kif, saddr,
3599 sxport, daddr, dxport, PF_RULESET_BINAT);
3600 if (r == NULL)
3601 r = pf_match_translation(pd, pbuf, off, direction, kif,
3602 saddr, sxport, daddr, dxport, PF_RULESET_RDR);
3603 if (r == NULL)
3604 r = pf_match_translation(pd, pbuf, off, direction, kif,
3605 saddr, sxport, daddr, dxport, PF_RULESET_NAT);
3606 } else {
3607 r = pf_match_translation(pd, pbuf, off, direction, kif, saddr,
3608 sxport, daddr, dxport, PF_RULESET_RDR);
3609 if (r == NULL)
3610 r = pf_match_translation(pd, pbuf, off, direction, kif,
3611 saddr, sxport, daddr, dxport, PF_RULESET_BINAT);
3612 }
3613
3614 if (r != NULL) {
3615 struct pf_addr *nsaddr = &pd->naddr;
3616 struct pf_addr *ndaddr = &pd->ndaddr;
3617
3618 *nsaddr = *saddr;
3619 *ndaddr = *daddr;
3620
3621 switch (r->action) {
3622 case PF_NONAT:
3623 case PF_NONAT64:
3624 case PF_NOBINAT:
3625 case PF_NORDR:
3626 return (NULL);
3627 case PF_NAT:
3628 case PF_NAT64:
3629 /*
3630 * we do NAT64 on incoming path and we call ip_input
3631 * which asserts receive interface to be not NULL.
3632 * The below check is to prevent NAT64 action on any
3633 * packet generated by local entity using synthesized
3634 * IPv6 address.
3635 */
3636 if ((r->action == PF_NAT64) && (direction == PF_OUT))
3637 return (NULL);
3638
3639 if (pf_get_sport(pd, kif, r, saddr, sxport, daddr,
3640 dxport, nsaddr, nsxport, sn
3641 ))
3642 {
3643 DPFPRINTF(PF_DEBUG_MISC,
3644 ("pf: NAT proxy port allocation "
3645 "(%u-%u) failed\n",
3646 r->rpool.proxy_port[0],
3647 r->rpool.proxy_port[1]));
3648 return (NULL);
3649 }
3650 /*
3651 * For NAT64 the destination IPv4 address is derived
3652 * from the last 32 bits of synthesized IPv6 address
3653 */
3654 if (r->action == PF_NAT64) {
3655 ndaddr->v4addr.s_addr = daddr->addr32[3];
3656 pd->naf = AF_INET;
3657 }
3658 break;
3659 case PF_BINAT:
3660 switch (direction) {
3661 case PF_OUT:
3662 if (r->rpool.cur->addr.type ==
3663 PF_ADDR_DYNIFTL) {
3664 if (r->rpool.cur->addr.p.dyn == NULL)
3665 return (NULL);
3666 switch (pd->af) {
3667 #if INET
3668 case AF_INET:
3669 if (r->rpool.cur->addr.p.dyn->
3670 pfid_acnt4 < 1)
3671 return (NULL);
3672 PF_POOLMASK(nsaddr,
3673 &r->rpool.cur->addr.p.dyn->
3674 pfid_addr4,
3675 &r->rpool.cur->addr.p.dyn->
3676 pfid_mask4,
3677 saddr, AF_INET);
3678 break;
3679 #endif /* INET */
3680 #if INET6
3681 case AF_INET6:
3682 if (r->rpool.cur->addr.p.dyn->
3683 pfid_acnt6 < 1)
3684 return (NULL);
3685 PF_POOLMASK(nsaddr,
3686 &r->rpool.cur->addr.p.dyn->
3687 pfid_addr6,
3688 &r->rpool.cur->addr.p.dyn->
3689 pfid_mask6,
3690 saddr, AF_INET6);
3691 break;
3692 #endif /* INET6 */
3693 }
3694 } else {
3695 PF_POOLMASK(nsaddr,
3696 &r->rpool.cur->addr.v.a.addr,
3697 &r->rpool.cur->addr.v.a.mask,
3698 saddr, pd->af);
3699 }
3700 break;
3701 case PF_IN:
3702 if (r->src.addr.type == PF_ADDR_DYNIFTL) {
3703 if (r->src.addr.p.dyn == NULL)
3704 return (NULL);
3705 switch (pd->af) {
3706 #if INET
3707 case AF_INET:
3708 if (r->src.addr.p.dyn->
3709 pfid_acnt4 < 1)
3710 return (NULL);
3711 PF_POOLMASK(ndaddr,
3712 &r->src.addr.p.dyn->
3713 pfid_addr4,
3714 &r->src.addr.p.dyn->
3715 pfid_mask4,
3716 daddr, AF_INET);
3717 break;
3718 #endif /* INET */
3719 #if INET6
3720 case AF_INET6:
3721 if (r->src.addr.p.dyn->
3722 pfid_acnt6 < 1)
3723 return (NULL);
3724 PF_POOLMASK(ndaddr,
3725 &r->src.addr.p.dyn->
3726 pfid_addr6,
3727 &r->src.addr.p.dyn->
3728 pfid_mask6,
3729 daddr, AF_INET6);
3730 break;
3731 #endif /* INET6 */
3732 }
3733 } else
3734 PF_POOLMASK(ndaddr,
3735 &r->src.addr.v.a.addr,
3736 &r->src.addr.v.a.mask, daddr,
3737 pd->af);
3738 break;
3739 }
3740 break;
3741 case PF_RDR: {
3742 switch (direction) {
3743 case PF_OUT:
3744 if (r->dst.addr.type == PF_ADDR_DYNIFTL) {
3745 if (r->dst.addr.p.dyn == NULL)
3746 return (NULL);
3747 switch (pd->af) {
3748 #if INET
3749 case AF_INET:
3750 if (r->dst.addr.p.dyn->
3751 pfid_acnt4 < 1)
3752 return (NULL);
3753 PF_POOLMASK(nsaddr,
3754 &r->dst.addr.p.dyn->
3755 pfid_addr4,
3756 &r->dst.addr.p.dyn->
3757 pfid_mask4,
3758 daddr, AF_INET);
3759 break;
3760 #endif /* INET */
3761 #if INET6
3762 case AF_INET6:
3763 if (r->dst.addr.p.dyn->
3764 pfid_acnt6 < 1)
3765 return (NULL);
3766 PF_POOLMASK(nsaddr,
3767 &r->dst.addr.p.dyn->
3768 pfid_addr6,
3769 &r->dst.addr.p.dyn->
3770 pfid_mask6,
3771 daddr, AF_INET6);
3772 break;
3773 #endif /* INET6 */
3774 }
3775 } else {
3776 PF_POOLMASK(nsaddr,
3777 &r->dst.addr.v.a.addr,
3778 &r->dst.addr.v.a.mask,
3779 daddr, pd->af);
3780 }
3781 if (nsxport && r->dst.xport.range.port[0])
3782 nsxport->port =
3783 r->dst.xport.range.port[0];
3784 break;
3785 case PF_IN:
3786 if (pf_map_addr(pd->af, r, saddr,
3787 ndaddr, NULL, sn))
3788 return (NULL);
3789 if ((r->rpool.opts & PF_POOL_TYPEMASK) ==
3790 PF_POOL_BITMASK)
3791 PF_POOLMASK(ndaddr, ndaddr,
3792 &r->rpool.cur->addr.v.a.mask, daddr,
3793 pd->af);
3794
3795 if (nsxport && dxport) {
3796 if (r->rpool.proxy_port[1]) {
3797 u_int32_t tmp_nport;
3798
3799 tmp_nport =
3800 ((ntohs(dxport->port) -
3801 ntohs(r->dst.xport.range.
3802 port[0])) %
3803 (r->rpool.proxy_port[1] -
3804 r->rpool.proxy_port[0] +
3805 1)) + r->rpool.proxy_port[0];
3806
3807 /* wrap around if necessary */
3808 if (tmp_nport > 65535)
3809 tmp_nport -= 65535;
3810 nsxport->port =
3811 htons((u_int16_t)tmp_nport);
3812 } else if (r->rpool.proxy_port[0]) {
3813 nsxport->port = htons(r->rpool.
3814 proxy_port[0]);
3815 }
3816 }
3817 break;
3818 }
3819 break;
3820 }
3821 default:
3822 return (NULL);
3823 }
3824 }
3825
3826 return (r);
3827 }
3828
3829 int
3830 pf_socket_lookup(int direction, struct pf_pdesc *pd)
3831 {
3832 struct pf_addr *saddr, *daddr;
3833 u_int16_t sport, dport;
3834 struct inpcbinfo *pi;
3835 int inp = 0;
3836
3837 if (pd == NULL)
3838 return (-1);
3839 pd->lookup.uid = UID_MAX;
3840 pd->lookup.gid = GID_MAX;
3841 pd->lookup.pid = NO_PID;
3842
3843 switch (pd->proto) {
3844 case IPPROTO_TCP:
3845 if (pd->hdr.tcp == NULL)
3846 return (-1);
3847 sport = pd->hdr.tcp->th_sport;
3848 dport = pd->hdr.tcp->th_dport;
3849 pi = &tcbinfo;
3850 break;
3851 case IPPROTO_UDP:
3852 if (pd->hdr.udp == NULL)
3853 return (-1);
3854 sport = pd->hdr.udp->uh_sport;
3855 dport = pd->hdr.udp->uh_dport;
3856 pi = &udbinfo;
3857 break;
3858 default:
3859 return (-1);
3860 }
3861 if (direction == PF_IN) {
3862 saddr = pd->src;
3863 daddr = pd->dst;
3864 } else {
3865 u_int16_t p;
3866
3867 p = sport;
3868 sport = dport;
3869 dport = p;
3870 saddr = pd->dst;
3871 daddr = pd->src;
3872 }
3873 switch (pd->af) {
3874 #if INET
3875 case AF_INET:
3876 inp = in_pcblookup_hash_exists(pi, saddr->v4addr, sport, daddr->v4addr, dport,
3877 0, &pd->lookup.uid, &pd->lookup.gid, NULL);
3878 #if INET6
3879 if (inp == 0) {
3880 struct in6_addr s6, d6;
3881
3882 memset(&s6, 0, sizeof (s6));
3883 s6.s6_addr16[5] = htons(0xffff);
3884 memcpy(&s6.s6_addr32[3], &saddr->v4addr,
3885 sizeof (saddr->v4addr));
3886
3887 memset(&d6, 0, sizeof (d6));
3888 d6.s6_addr16[5] = htons(0xffff);
3889 memcpy(&d6.s6_addr32[3], &daddr->v4addr,
3890 sizeof (daddr->v4addr));
3891
3892 inp = in6_pcblookup_hash_exists(pi, &s6, sport,
3893 &d6, dport, 0, &pd->lookup.uid, &pd->lookup.gid, NULL);
3894 if (inp == 0) {
3895 inp = in_pcblookup_hash_exists(pi, saddr->v4addr, sport,
3896 daddr->v4addr, dport, INPLOOKUP_WILDCARD, &pd->lookup.uid, &pd->lookup.gid, NULL);
3897 if (inp == 0) {
3898 inp = in6_pcblookup_hash_exists(pi, &s6, sport,
3899 &d6, dport, INPLOOKUP_WILDCARD,
3900 &pd->lookup.uid, &pd->lookup.gid, NULL);
3901 if (inp == 0)
3902 return (-1);
3903 }
3904 }
3905 }
3906 #else
3907 if (inp == 0) {
3908 inp = in_pcblookup_hash_exists(pi, saddr->v4addr, sport,
3909 daddr->v4addr, dport, INPLOOKUP_WILDCARD,
3910 &pd->lookup.uid, &pd->lookup.gid, NULL);
3911 if (inp == 0)
3912 return (-1);
3913 }
3914 #endif /* !INET6 */
3915 break;
3916 #endif /* INET */
3917 #if INET6
3918 case AF_INET6:
3919 inp = in6_pcblookup_hash_exists(pi, &saddr->v6addr, sport, &daddr->v6addr,
3920 dport, 0, &pd->lookup.uid, &pd->lookup.gid, NULL);
3921 if (inp == 0) {
3922 inp = in6_pcblookup_hash_exists(pi, &saddr->v6addr, sport,
3923 &daddr->v6addr, dport, INPLOOKUP_WILDCARD,
3924 &pd->lookup.uid, &pd->lookup.gid, NULL);
3925 if (inp == 0)
3926 return (-1);
3927 }
3928 break;
3929 #endif /* INET6 */
3930
3931 default:
3932 return (-1);
3933 }
3934
3935 return (1);
3936 }
3937
3938 static u_int8_t
3939 pf_get_wscale(pbuf_t *pbuf, int off, u_int16_t th_off, sa_family_t af)
3940 {
3941 int hlen;
3942 u_int8_t hdr[60];
3943 u_int8_t *opt, optlen;
3944 u_int8_t wscale = 0;
3945
3946 hlen = th_off << 2; /* hlen <= sizeof (hdr) */
3947 if (hlen <= (int)sizeof (struct tcphdr))
3948 return (0);
3949 if (!pf_pull_hdr(pbuf, off, hdr, hlen, NULL, NULL, af))
3950 return (0);
3951 opt = hdr + sizeof (struct tcphdr);
3952 hlen -= sizeof (struct tcphdr);
3953 while (hlen >= 3) {
3954 switch (*opt) {
3955 case TCPOPT_EOL:
3956 case TCPOPT_NOP:
3957 ++opt;
3958 --hlen;
3959 break;
3960 case TCPOPT_WINDOW:
3961 wscale = opt[2];
3962 if (wscale > TCP_MAX_WINSHIFT)
3963 wscale = TCP_MAX_WINSHIFT;
3964 wscale |= PF_WSCALE_FLAG;
3965 /* FALLTHROUGH */
3966 default:
3967 optlen = opt[1];
3968 if (optlen < 2)
3969 optlen = 2;
3970 hlen -= optlen;
3971 opt += optlen;
3972 break;
3973 }
3974 }
3975 return (wscale);
3976 }
3977
3978 static u_int16_t
3979 pf_get_mss(pbuf_t *pbuf, int off, u_int16_t th_off, sa_family_t af)
3980 {
3981 int hlen;
3982 u_int8_t hdr[60];
3983 u_int8_t *opt, optlen;
3984 u_int16_t mss = tcp_mssdflt;
3985
3986 hlen = th_off << 2; /* hlen <= sizeof (hdr) */
3987 if (hlen <= (int)sizeof (struct tcphdr))
3988 return (0);
3989 if (!pf_pull_hdr(pbuf, off, hdr, hlen, NULL, NULL, af))
3990 return (0);
3991 opt = hdr + sizeof (struct tcphdr);
3992 hlen -= sizeof (struct tcphdr);
3993 while (hlen >= TCPOLEN_MAXSEG) {
3994 switch (*opt) {
3995 case TCPOPT_EOL:
3996 case TCPOPT_NOP:
3997 ++opt;
3998 --hlen;
3999 break;
4000 case TCPOPT_MAXSEG:
4001 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
4002 #if BYTE_ORDER != BIG_ENDIAN
4003 NTOHS(mss);
4004 #endif
4005 /* FALLTHROUGH */
4006 default:
4007 optlen = opt[1];
4008 if (optlen < 2)
4009 optlen = 2;
4010 hlen -= optlen;
4011 opt += optlen;
4012 break;
4013 }
4014 }
4015 return (mss);
4016 }
4017
4018 static u_int16_t
4019 pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer)
4020 {
4021 #if INET
4022 struct sockaddr_in *dst;
4023 struct route ro;
4024 #endif /* INET */
4025 #if INET6
4026 struct sockaddr_in6 *dst6;
4027 struct route_in6 ro6;
4028 #endif /* INET6 */
4029 struct rtentry *rt = NULL;
4030 int hlen;
4031 u_int16_t mss = tcp_mssdflt;
4032
4033 switch (af) {
4034 #if INET
4035 case AF_INET:
4036 hlen = sizeof (struct ip);
4037 bzero(&ro, sizeof (ro));
4038 dst = (struct sockaddr_in *)(void *)&ro.ro_dst;
4039 dst->sin_family = AF_INET;
4040 dst->sin_len = sizeof (*dst);
4041 dst->sin_addr = addr->v4addr;
4042 rtalloc(&ro);
4043 rt = ro.ro_rt;
4044 break;
4045 #endif /* INET */
4046 #if INET6
4047 case AF_INET6:
4048 hlen = sizeof (struct ip6_hdr);
4049 bzero(&ro6, sizeof (ro6));
4050 dst6 = (struct sockaddr_in6 *)(void *)&ro6.ro_dst;
4051 dst6->sin6_family = AF_INET6;
4052 dst6->sin6_len = sizeof (*dst6);
4053 dst6->sin6_addr = addr->v6addr;
4054 rtalloc((struct route *)&ro);
4055 rt = ro6.ro_rt;
4056 break;
4057 #endif /* INET6 */
4058 default:
4059 panic("pf_calc_mss: not AF_INET or AF_INET6!");
4060 return (0);
4061 }
4062
4063 if (rt && rt->rt_ifp) {
4064 mss = rt->rt_ifp->if_mtu - hlen - sizeof (struct tcphdr);
4065 mss = max(tcp_mssdflt, mss);
4066 rtfree(rt);
4067 }
4068 mss = min(mss, offer);
4069 mss = max(mss, 64); /* sanity - at least max opt space */
4070 return (mss);
4071 }
4072
4073 static void
4074 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr, sa_family_t af)
4075 {
4076 struct pf_rule *r = s->rule.ptr;
4077
4078 s->rt_kif = NULL;
4079
4080 if (!r->rt || r->rt == PF_FASTROUTE)
4081 return;
4082 if ((af == AF_INET) || (af == AF_INET6)) {
4083 pf_map_addr(af, r, saddr, &s->rt_addr, NULL,
4084 &s->nat_src_node);
4085 s->rt_kif = r->rpool.cur->kif;
4086 }
4087
4088 return;
4089 }
4090
4091 static void
4092 pf_attach_state(struct pf_state_key *sk, struct pf_state *s, int tail)
4093 {
4094 s->state_key = sk;
4095 sk->refcnt++;
4096
4097 /* list is sorted, if-bound states before floating */
4098 if (tail)
4099 TAILQ_INSERT_TAIL(&sk->states, s, next);
4100 else
4101 TAILQ_INSERT_HEAD(&sk->states, s, next);
4102 }
4103
4104 static void
4105 pf_detach_state(struct pf_state *s, int flags)
4106 {
4107 struct pf_state_key *sk = s->state_key;
4108
4109 if (sk == NULL)
4110 return;
4111
4112 s->state_key = NULL;
4113 TAILQ_REMOVE(&sk->states, s, next);
4114 if (--sk->refcnt == 0) {
4115 if (!(flags & PF_DT_SKIP_EXTGWY))
4116 RB_REMOVE(pf_state_tree_ext_gwy,
4117 &pf_statetbl_ext_gwy, sk);
4118 if (!(flags & PF_DT_SKIP_LANEXT))
4119 RB_REMOVE(pf_state_tree_lan_ext,
4120 &pf_statetbl_lan_ext, sk);
4121 if (sk->app_state)
4122 pool_put(&pf_app_state_pl, sk->app_state);
4123 pool_put(&pf_state_key_pl, sk);
4124 }
4125 }
4126
4127 struct pf_state_key *
4128 pf_alloc_state_key(struct pf_state *s, struct pf_state_key *psk)
4129 {
4130 struct pf_state_key *sk;
4131
4132 if ((sk = pool_get(&pf_state_key_pl, PR_WAITOK)) == NULL)
4133 return (NULL);
4134 bzero(sk, sizeof (*sk));
4135 TAILQ_INIT(&sk->states);
4136 pf_attach_state(sk, s, 0);
4137
4138 /* initialize state key from psk, if provided */
4139 if (psk != NULL) {
4140 bcopy(&psk->lan, &sk->lan, sizeof (sk->lan));
4141 bcopy(&psk->gwy, &sk->gwy, sizeof (sk->gwy));
4142 bcopy(&psk->ext_lan, &sk->ext_lan, sizeof (sk->ext_lan));
4143 bcopy(&psk->ext_gwy, &sk->ext_gwy, sizeof (sk->ext_gwy));
4144 sk->af_lan = psk->af_lan;
4145 sk->af_gwy = psk->af_gwy;
4146 sk->proto = psk->proto;
4147 sk->direction = psk->direction;
4148 sk->proto_variant = psk->proto_variant;
4149 VERIFY(psk->app_state == NULL);
4150 sk->flowsrc = psk->flowsrc;
4151 sk->flowhash = psk->flowhash;
4152 /* don't touch tree entries, states and refcnt on sk */
4153 }
4154
4155 return (sk);
4156 }
4157
4158 static u_int32_t
4159 pf_tcp_iss(struct pf_pdesc *pd)
4160 {
4161 MD5_CTX ctx;
4162 u_int32_t digest[4];
4163
4164 if (pf_tcp_secret_init == 0) {
4165 read_frandom(pf_tcp_secret, sizeof (pf_tcp_secret));
4166 MD5Init(&pf_tcp_secret_ctx);
4167 MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret,
4168 sizeof (pf_tcp_secret));
4169 pf_tcp_secret_init = 1;
4170 }
4171 ctx = pf_tcp_secret_ctx;
4172
4173 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof (u_short));
4174 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof (u_short));
4175 if (pd->af == AF_INET6) {
4176 MD5Update(&ctx, (char *)&pd->src->v6addr, sizeof (struct in6_addr));
4177 MD5Update(&ctx, (char *)&pd->dst->v6addr, sizeof (struct in6_addr));
4178 } else {
4179 MD5Update(&ctx, (char *)&pd->src->v4addr, sizeof (struct in_addr));
4180 MD5Update(&ctx, (char *)&pd->dst->v4addr, sizeof (struct in_addr));
4181 }
4182 MD5Final((u_char *)digest, &ctx);
4183 pf_tcp_iss_off += 4096;
4184 return (digest[0] + random() + pf_tcp_iss_off);
4185 }
4186
4187 /*
4188 * This routine is called to perform address family translation on the
4189 * inner IP header (that may come as payload) of an ICMP(v4addr/6) error
4190 * response.
4191 */
4192 static int
4193 pf_change_icmp_af(pbuf_t *pbuf, int off,
4194 struct pf_pdesc *pd, struct pf_pdesc *pd2, struct pf_addr *src,
4195 struct pf_addr *dst, sa_family_t af, sa_family_t naf)
4196 {
4197 struct ip *ip4 = NULL;
4198 struct ip6_hdr *ip6 = NULL;
4199 void *hdr;
4200 int hlen, olen;
4201
4202 if (af == naf || (af != AF_INET && af != AF_INET6) ||
4203 (naf != AF_INET && naf != AF_INET6))
4204 return (-1);
4205
4206 /* old header */
4207 olen = pd2->off - off;
4208 /* new header */
4209 hlen = naf == AF_INET ? sizeof(*ip4) : sizeof(*ip6);
4210
4211 /* Modify the pbuf to accommodate the new header */
4212 hdr = pbuf_resize_segment(pbuf, off, olen, hlen);
4213 if (hdr == NULL)
4214 return (-1);
4215
4216 /* translate inner ip/ip6 header */
4217 switch (naf) {
4218 case AF_INET:
4219 ip4 = hdr;
4220 bzero(ip4, sizeof(*ip4));
4221 ip4->ip_v = IPVERSION;
4222 ip4->ip_hl = sizeof(*ip4) >> 2;
4223 ip4->ip_len = htons(sizeof(*ip4) + pd2->tot_len - olen);
4224 ip4->ip_id = rfc6864 ? 0 : htons(ip_randomid());
4225 ip4->ip_off = htons(IP_DF);
4226 ip4->ip_ttl = pd2->ttl;
4227 if (pd2->proto == IPPROTO_ICMPV6)
4228 ip4->ip_p = IPPROTO_ICMP;
4229 else
4230 ip4->ip_p = pd2->proto;
4231 ip4->ip_src = src->v4addr;
4232 ip4->ip_dst = dst->v4addr;
4233 ip4->ip_sum = pbuf_inet_cksum(pbuf, 0, 0, ip4->ip_hl << 2);
4234 break;
4235 case AF_INET6:
4236 ip6 = hdr;
4237 bzero(ip6, sizeof(*ip6));
4238 ip6->ip6_vfc = IPV6_VERSION;
4239 ip6->ip6_plen = htons(pd2->tot_len - olen);
4240 if (pd2->proto == IPPROTO_ICMP)
4241 ip6->ip6_nxt = IPPROTO_ICMPV6;
4242 else
4243 ip6->ip6_nxt = pd2->proto;
4244 if (!pd2->ttl || pd2->ttl > IPV6_DEFHLIM)
4245 ip6->ip6_hlim = IPV6_DEFHLIM;
4246 else
4247 ip6->ip6_hlim = pd2->ttl;
4248 ip6->ip6_src = src->v6addr;
4249 ip6->ip6_dst = dst->v6addr;
4250 break;
4251 }
4252
4253 /* adjust payload offset and total packet length */
4254 pd2->off += hlen - olen;
4255 pd->tot_len += hlen - olen;
4256
4257 return (0);
4258 }
4259
4260 #define PTR_IP(field) ((int32_t)offsetof(struct ip, field))
4261 #define PTR_IP6(field) ((int32_t)offsetof(struct ip6_hdr, field))
4262
4263 static int
4264 pf_translate_icmp_af(int af, void *arg)
4265 {
4266 struct icmp *icmp4;
4267 struct icmp6_hdr *icmp6;
4268 u_int32_t mtu;
4269 int32_t ptr = -1;
4270 u_int8_t type;
4271 u_int8_t code;
4272
4273 switch (af) {
4274 case AF_INET:
4275 icmp6 = arg;
4276 type = icmp6->icmp6_type;
4277 code = icmp6->icmp6_code;
4278 mtu = ntohl(icmp6->icmp6_mtu);
4279
4280 switch (type) {
4281 case ICMP6_ECHO_REQUEST:
4282 type = ICMP_ECHO;
4283 break;
4284 case ICMP6_ECHO_REPLY:
4285 type = ICMP_ECHOREPLY;
4286 break;
4287 case ICMP6_DST_UNREACH:
4288 type = ICMP_UNREACH;
4289 switch (code) {
4290 case ICMP6_DST_UNREACH_NOROUTE:
4291 case ICMP6_DST_UNREACH_BEYONDSCOPE:
4292 case ICMP6_DST_UNREACH_ADDR:
4293 code = ICMP_UNREACH_HOST;
4294 break;
4295 case ICMP6_DST_UNREACH_ADMIN:
4296 code = ICMP_UNREACH_HOST_PROHIB;
4297 break;
4298 case ICMP6_DST_UNREACH_NOPORT:
4299 code = ICMP_UNREACH_PORT;
4300 break;
4301 default:
4302 return (-1);
4303 }
4304 break;
4305 case ICMP6_PACKET_TOO_BIG:
4306 type = ICMP_UNREACH;
4307 code = ICMP_UNREACH_NEEDFRAG;
4308 mtu -= 20;
4309 break;
4310 case ICMP6_TIME_EXCEEDED:
4311 type = ICMP_TIMXCEED;
4312 break;
4313 case ICMP6_PARAM_PROB:
4314 switch (code) {
4315 case ICMP6_PARAMPROB_HEADER:
4316 type = ICMP_PARAMPROB;
4317 code = ICMP_PARAMPROB_ERRATPTR;
4318 ptr = ntohl(icmp6->icmp6_pptr);
4319
4320 if (ptr == PTR_IP6(ip6_vfc))
4321 ; /* preserve */
4322 else if (ptr == PTR_IP6(ip6_vfc) + 1)
4323 ptr = PTR_IP(ip_tos);
4324 else if (ptr == PTR_IP6(ip6_plen) ||
4325 ptr == PTR_IP6(ip6_plen) + 1)
4326 ptr = PTR_IP(ip_len);
4327 else if (ptr == PTR_IP6(ip6_nxt))
4328 ptr = PTR_IP(ip_p);
4329 else if (ptr == PTR_IP6(ip6_hlim))
4330 ptr = PTR_IP(ip_ttl);
4331 else if (ptr >= PTR_IP6(ip6_src) &&
4332 ptr < PTR_IP6(ip6_dst))
4333 ptr = PTR_IP(ip_src);
4334 else if (ptr >= PTR_IP6(ip6_dst) &&
4335 ptr < (int32_t)sizeof(struct ip6_hdr))
4336 ptr = PTR_IP(ip_dst);
4337 else {
4338 return (-1);
4339 }
4340 break;
4341 case ICMP6_PARAMPROB_NEXTHEADER:
4342 type = ICMP_UNREACH;
4343 code = ICMP_UNREACH_PROTOCOL;
4344 break;
4345 default:
4346 return (-1);
4347 }
4348 break;
4349 default:
4350 return (-1);
4351 }
4352 icmp6->icmp6_type = type;
4353 icmp6->icmp6_code = code;
4354 /* aligns well with a icmpv4 nextmtu */
4355 icmp6->icmp6_mtu = htonl(mtu);
4356 /* icmpv4 pptr is a one most significant byte */
4357 if (ptr >= 0)
4358 icmp6->icmp6_pptr = htonl(ptr << 24);
4359 break;
4360
4361 case AF_INET6:
4362 icmp4 = arg;
4363 type = icmp4->icmp_type;
4364 code = icmp4->icmp_code;
4365 mtu = ntohs(icmp4->icmp_nextmtu);
4366
4367 switch (type) {
4368 case ICMP_ECHO:
4369 type = ICMP6_ECHO_REQUEST;
4370 break;
4371 case ICMP_ECHOREPLY:
4372 type = ICMP6_ECHO_REPLY;
4373 break;
4374 case ICMP_UNREACH:
4375 type = ICMP6_DST_UNREACH;
4376 switch (code) {
4377 case ICMP_UNREACH_NET:
4378 case ICMP_UNREACH_HOST:
4379 case ICMP_UNREACH_NET_UNKNOWN:
4380 case ICMP_UNREACH_HOST_UNKNOWN:
4381 case ICMP_UNREACH_ISOLATED:
4382 case ICMP_UNREACH_TOSNET:
4383 case ICMP_UNREACH_TOSHOST:
4384 code = ICMP6_DST_UNREACH_NOROUTE;
4385 break;
4386 case ICMP_UNREACH_PORT:
4387 code = ICMP6_DST_UNREACH_NOPORT;
4388 break;
4389 case ICMP_UNREACH_NET_PROHIB:
4390 case ICMP_UNREACH_HOST_PROHIB:
4391 case ICMP_UNREACH_FILTER_PROHIB:
4392 case ICMP_UNREACH_PRECEDENCE_CUTOFF:
4393 code = ICMP6_DST_UNREACH_ADMIN;
4394 break;
4395 case ICMP_UNREACH_PROTOCOL:
4396 type = ICMP6_PARAM_PROB;
4397 code = ICMP6_PARAMPROB_NEXTHEADER;
4398 ptr = offsetof(struct ip6_hdr, ip6_nxt);
4399 break;
4400 case ICMP_UNREACH_NEEDFRAG:
4401 type = ICMP6_PACKET_TOO_BIG;
4402 code = 0;
4403 mtu += 20;
4404 break;
4405 default:
4406 return (-1);
4407 }
4408 break;
4409 case ICMP_TIMXCEED:
4410 type = ICMP6_TIME_EXCEEDED;
4411 break;
4412 case ICMP_PARAMPROB:
4413 type = ICMP6_PARAM_PROB;
4414 switch (code) {
4415 case ICMP_PARAMPROB_ERRATPTR:
4416 code = ICMP6_PARAMPROB_HEADER;
4417 break;
4418 case ICMP_PARAMPROB_LENGTH:
4419 code = ICMP6_PARAMPROB_HEADER;
4420 break;
4421 default:
4422 return (-1);
4423 }
4424
4425 ptr = icmp4->icmp_pptr;
4426 if (ptr == 0 || ptr == PTR_IP(ip_tos))
4427 ; /* preserve */
4428 else if (ptr == PTR_IP(ip_len) ||
4429 ptr == PTR_IP(ip_len) + 1)
4430 ptr = PTR_IP6(ip6_plen);
4431 else if (ptr == PTR_IP(ip_ttl))
4432 ptr = PTR_IP6(ip6_hlim);
4433 else if (ptr == PTR_IP(ip_p))
4434 ptr = PTR_IP6(ip6_nxt);
4435 else if (ptr >= PTR_IP(ip_src) &&
4436 ptr < PTR_IP(ip_dst))
4437 ptr = PTR_IP6(ip6_src);
4438 else if (ptr >= PTR_IP(ip_dst) &&
4439 ptr < (int32_t)sizeof(struct ip))
4440 ptr = PTR_IP6(ip6_dst);
4441 else {
4442 return (-1);
4443 }
4444 break;
4445 default:
4446 return (-1);
4447 }
4448 icmp4->icmp_type = type;
4449 icmp4->icmp_code = code;
4450 icmp4->icmp_nextmtu = htons(mtu);
4451 if (ptr >= 0)
4452 icmp4->icmp_void = htonl(ptr);
4453 break;
4454 }
4455
4456 return (0);
4457 }
4458
4459 /* Note: frees pbuf if PF_NAT64 is returned */
4460 static int
4461 pf_nat64_ipv6(pbuf_t *pbuf, int off, struct pf_pdesc *pd)
4462 {
4463 struct ip *ip4;
4464 struct mbuf *m;
4465
4466 /*
4467 * ip_input asserts for rcvif to be not NULL
4468 * That may not be true for two corner cases
4469 * 1. If for some reason a local app sends DNS
4470 * AAAA query to local host
4471 * 2. If IPv6 stack in kernel internally generates a
4472 * message destined for a synthesized IPv6 end-point.
4473 */
4474 if (pbuf->pb_ifp == NULL)
4475 return (PF_DROP);
4476
4477 ip4 = (struct ip *)pbuf_resize_segment(pbuf, 0, off, sizeof(*ip4));
4478 if (ip4 == NULL)
4479 return (PF_DROP);
4480
4481 ip4->ip_v = 4;
4482 ip4->ip_hl = 5;
4483 ip4->ip_tos = pd->tos & htonl(0x0ff00000);
4484 ip4->ip_len = htons(sizeof(*ip4) + (pd->tot_len - off));
4485 ip4->ip_id = 0;
4486 ip4->ip_off = htons(IP_DF);
4487 ip4->ip_ttl = pd->ttl;
4488 ip4->ip_p = pd->proto;
4489 ip4->ip_sum = 0;
4490 ip4->ip_src = pd->naddr.v4addr;
4491 ip4->ip_dst = pd->ndaddr.v4addr;
4492 ip4->ip_sum = pbuf_inet_cksum(pbuf, 0, 0, ip4->ip_hl << 2);
4493
4494 /* recalculate icmp checksums */
4495 if (pd->proto == IPPROTO_ICMP) {
4496 struct icmp *icmp;
4497 int hlen = sizeof(*ip4);
4498
4499 icmp = (struct icmp *)pbuf_contig_segment(pbuf, hlen,
4500 ICMP_MINLEN);
4501 if (icmp == NULL)
4502 return (PF_NAT64);
4503
4504 icmp->icmp_cksum = 0;
4505 icmp->icmp_cksum = pbuf_inet_cksum(pbuf, 0, hlen,
4506 ntohs(ip4->ip_len) - hlen);
4507 }
4508
4509 if ((m = pbuf_to_mbuf(pbuf, TRUE)) != NULL)
4510 ip_input(m);
4511
4512 return (PF_NAT64);
4513 }
4514
4515 static int
4516 pf_nat64_ipv4(pbuf_t *pbuf, int off, struct pf_pdesc *pd)
4517 {
4518 struct ip6_hdr *ip6;
4519 struct mbuf *m;
4520
4521 if (pbuf->pb_ifp == NULL)
4522 return (PF_DROP);
4523
4524 ip6 = (struct ip6_hdr *)pbuf_resize_segment(pbuf, 0, off, sizeof(*ip6));
4525 if (ip6 == NULL)
4526 return (PF_DROP);
4527
4528 ip6->ip6_vfc = htonl((6 << 28) | (pd->tos << 20));
4529 ip6->ip6_plen = htons(pd->tot_len - off);
4530 ip6->ip6_nxt = pd->proto;
4531 ip6->ip6_hlim = pd->ttl;
4532 ip6->ip6_src = pd->naddr.v6addr;
4533 ip6->ip6_dst = pd->ndaddr.v6addr;
4534
4535 /* recalculate icmp6 checksums */
4536 if (pd->proto == IPPROTO_ICMPV6) {
4537 struct icmp6_hdr *icmp6;
4538 int hlen = sizeof(*ip6);
4539
4540 icmp6 = (struct icmp6_hdr *)pbuf_contig_segment(pbuf, hlen,
4541 sizeof(*icmp6));
4542 if (icmp6 == NULL)
4543 return (PF_DROP);
4544
4545 icmp6->icmp6_cksum = 0;
4546 icmp6->icmp6_cksum = pbuf_inet6_cksum(pbuf,
4547 IPPROTO_ICMPV6, hlen,
4548 ntohs(ip6->ip6_plen));
4549 } else if (pd->proto == IPPROTO_UDP) {
4550 struct udphdr *uh;
4551 int hlen = sizeof(*ip6);
4552
4553 uh = (struct udphdr *)pbuf_contig_segment(pbuf, hlen,
4554 sizeof(*uh));
4555 if (uh == NULL)
4556 return (PF_DROP);
4557
4558 if (uh->uh_sum == 0)
4559 uh->uh_sum = pbuf_inet6_cksum(pbuf, IPPROTO_UDP,
4560 hlen, ntohs(ip6->ip6_plen));
4561 }
4562
4563 if ((m = pbuf_to_mbuf(pbuf, TRUE)) != NULL)
4564 ip6_input(m);
4565
4566 return (PF_NAT64);
4567 }
4568
4569 static int
4570 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
4571 struct pfi_kif *kif, pbuf_t *pbuf, int off, void *h,
4572 struct pf_pdesc *pd, struct pf_rule **am, struct pf_ruleset **rsm,
4573 struct ifqueue *ifq)
4574 {
4575 #pragma unused(h)
4576 struct pf_rule *nr = NULL;
4577 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
4578 sa_family_t af = pd->af;
4579 struct pf_rule *r, *a = NULL;
4580 struct pf_ruleset *ruleset = NULL;
4581 struct pf_src_node *nsn = NULL;
4582 struct tcphdr *th = pd->hdr.tcp;
4583 struct udphdr *uh = pd->hdr.udp;
4584 u_short reason;
4585 int rewrite = 0, hdrlen = 0;
4586 int tag = -1;
4587 unsigned int rtableid = IFSCOPE_NONE;
4588 int asd = 0;
4589 int match = 0;
4590 int state_icmp = 0;
4591 u_int16_t mss = tcp_mssdflt;
4592 u_int8_t icmptype = 0, icmpcode = 0;
4593
4594 struct pf_grev1_hdr *grev1 = pd->hdr.grev1;
4595 union pf_state_xport bxport, bdxport, nxport, sxport, dxport;
4596 struct pf_state_key psk;
4597
4598 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
4599
4600 if (direction == PF_IN && pf_check_congestion(ifq)) {
4601 REASON_SET(&reason, PFRES_CONGEST);
4602 return (PF_DROP);
4603 }
4604
4605 hdrlen = 0;
4606 sxport.spi = 0;
4607 dxport.spi = 0;
4608 nxport.spi = 0;
4609
4610 switch (pd->proto) {
4611 case IPPROTO_TCP:
4612 sxport.port = th->th_sport;
4613 dxport.port = th->th_dport;
4614 hdrlen = sizeof (*th);
4615 break;
4616 case IPPROTO_UDP:
4617 sxport.port = uh->uh_sport;
4618 dxport.port = uh->uh_dport;
4619 hdrlen = sizeof (*uh);
4620 break;
4621 #if INET
4622 case IPPROTO_ICMP:
4623 if (pd->af != AF_INET)
4624 break;
4625 sxport.port = dxport.port = pd->hdr.icmp->icmp_id;
4626 hdrlen = ICMP_MINLEN;
4627 icmptype = pd->hdr.icmp->icmp_type;
4628 icmpcode = pd->hdr.icmp->icmp_code;
4629
4630 if (icmptype == ICMP_UNREACH ||
4631 icmptype == ICMP_SOURCEQUENCH ||
4632 icmptype == ICMP_REDIRECT ||
4633 icmptype == ICMP_TIMXCEED ||
4634 icmptype == ICMP_PARAMPROB)
4635 state_icmp++;
4636 break;
4637 #endif /* INET */
4638 #if INET6
4639 case IPPROTO_ICMPV6:
4640 if (pd->af != AF_INET6)
4641 break;
4642 sxport.port = dxport.port = pd->hdr.icmp6->icmp6_id;
4643 hdrlen = sizeof (*pd->hdr.icmp6);
4644 icmptype = pd->hdr.icmp6->icmp6_type;
4645 icmpcode = pd->hdr.icmp6->icmp6_code;
4646
4647 if (icmptype == ICMP6_DST_UNREACH ||
4648 icmptype == ICMP6_PACKET_TOO_BIG ||
4649 icmptype == ICMP6_TIME_EXCEEDED ||
4650 icmptype == ICMP6_PARAM_PROB)
4651 state_icmp++;
4652 break;
4653 #endif /* INET6 */
4654 case IPPROTO_GRE:
4655 if (pd->proto_variant == PF_GRE_PPTP_VARIANT) {
4656 sxport.call_id = dxport.call_id =
4657 pd->hdr.grev1->call_id;
4658 hdrlen = sizeof (*pd->hdr.grev1);
4659 }
4660 break;
4661 case IPPROTO_ESP:
4662 sxport.spi = 0;
4663 dxport.spi = pd->hdr.esp->spi;
4664 hdrlen = sizeof (*pd->hdr.esp);
4665 break;
4666 }
4667
4668 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
4669
4670 bxport = sxport;
4671 bdxport = dxport;
4672
4673 if (direction == PF_OUT)
4674 nxport = sxport;
4675 else
4676 nxport = dxport;
4677
4678 /* check packet for BINAT/NAT/RDR */
4679 if ((nr = pf_get_translation_aux(pd, pbuf, off, direction, kif, &nsn,
4680 saddr, &sxport, daddr, &dxport, &nxport
4681 )) != NULL) {
4682 int ua;
4683 u_int16_t dport;
4684
4685 if (pd->af != pd->naf)
4686 ua = 0;
4687 else
4688 ua = 1;
4689
4690 PF_ACPY(&pd->baddr, saddr, af);
4691 PF_ACPY(&pd->bdaddr, daddr, af);
4692
4693 switch (pd->proto) {
4694 case IPPROTO_TCP:
4695 if (pd->af != pd->naf ||
4696 PF_ANEQ(saddr, &pd->naddr, pd->af)) {
4697 pf_change_ap(direction, pd->mp, saddr,
4698 &th->th_sport, pd->ip_sum, &th->th_sum,
4699 &pd->naddr, nxport.port, 0, af,
4700 pd->naf, ua);
4701 sxport.port = th->th_sport;
4702 }
4703
4704 if (pd->af != pd->naf ||
4705 PF_ANEQ(daddr, &pd->ndaddr, pd->af) ||
4706 (nr && (nr->action == PF_RDR) &&
4707 (th->th_dport != nxport.port))) {
4708 if (nr && nr->action == PF_RDR)
4709 dport = nxport.port;
4710 else
4711 dport = th->th_dport;
4712 pf_change_ap(direction, pd->mp, daddr,
4713 &th->th_dport, pd->ip_sum,
4714 &th->th_sum, &pd->ndaddr,
4715 dport, 0, af, pd->naf, ua);
4716 dxport.port = th->th_dport;
4717 }
4718 rewrite++;
4719 break;
4720
4721 case IPPROTO_UDP:
4722 if (pd->af != pd->naf ||
4723 PF_ANEQ(saddr, &pd->naddr, pd->af)) {
4724 pf_change_ap(direction, pd->mp, saddr,
4725 &uh->uh_sport, pd->ip_sum,
4726 &uh->uh_sum, &pd->naddr,
4727 nxport.port, 1, af, pd->naf, ua);
4728 sxport.port = uh->uh_sport;
4729 }
4730
4731 if (pd->af != pd->naf ||
4732 PF_ANEQ(daddr, &pd->ndaddr, pd->af) ||
4733 (nr && (nr->action == PF_RDR) &&
4734 (uh->uh_dport != nxport.port))) {
4735 if (nr && nr->action == PF_RDR)
4736 dport = nxport.port;
4737 else
4738 dport = uh->uh_dport;
4739 pf_change_ap(direction, pd->mp, daddr,
4740 &uh->uh_dport, pd->ip_sum,
4741 &uh->uh_sum, &pd->ndaddr,
4742 dport, 0, af, pd->naf, ua);
4743 dxport.port = uh->uh_dport;
4744 }
4745 rewrite++;
4746 break;
4747 #if INET
4748 case IPPROTO_ICMP:
4749 if (pd->af != AF_INET)
4750 break;
4751 /*
4752 * TODO:
4753 * pd->af != pd->naf not handled yet here and would be
4754 * needed for NAT46 needed to support XLAT.
4755 * Will cross the bridge when it comes.
4756 */
4757 if (PF_ANEQ(saddr, &pd->naddr, pd->af)) {
4758 pf_change_a(&saddr->v4addr.s_addr, pd->ip_sum,
4759 pd->naddr.v4addr.s_addr, 0);
4760 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
4761 pd->hdr.icmp->icmp_cksum, sxport.port,
4762 nxport.port, 0);
4763 pd->hdr.icmp->icmp_id = nxport.port;
4764 }
4765
4766 if (PF_ANEQ(daddr, &pd->ndaddr, pd->af)) {
4767 pf_change_a(&daddr->v4addr.s_addr, pd->ip_sum,
4768 pd->ndaddr.v4addr.s_addr, 0);
4769 }
4770 ++rewrite;
4771 break;
4772 #endif /* INET */
4773 #if INET6
4774 case IPPROTO_ICMPV6:
4775 if (pd->af != AF_INET6)
4776 break;
4777
4778 if (pd->af != pd->naf ||
4779 PF_ANEQ(saddr, &pd->naddr, pd->af)) {
4780 pf_change_addr(saddr,
4781 &pd->hdr.icmp6->icmp6_cksum,
4782 &pd->naddr, 0, pd->af, pd->naf);
4783 }
4784
4785 if (pd->af != pd->naf ||
4786 PF_ANEQ(daddr, &pd->ndaddr, pd->af)) {
4787 pf_change_addr(daddr,
4788 &pd->hdr.icmp6->icmp6_cksum,
4789 &pd->ndaddr, 0, pd->af, pd->naf);
4790 }
4791
4792 if (pd->af != pd->naf) {
4793 if (pf_translate_icmp_af(AF_INET,
4794 pd->hdr.icmp6))
4795 return (PF_DROP);
4796 pd->proto = IPPROTO_ICMP;
4797 }
4798 rewrite++;
4799 break;
4800 #endif /* INET */
4801 case IPPROTO_GRE:
4802 if ((direction == PF_IN) &&
4803 (pd->proto_variant == PF_GRE_PPTP_VARIANT))
4804 grev1->call_id = nxport.call_id;
4805
4806 switch (pd->af) {
4807 #if INET
4808 case AF_INET:
4809 if (PF_ANEQ(saddr, &pd->naddr, pd->af)) {
4810 pf_change_a(&saddr->v4addr.s_addr,
4811 pd->ip_sum,
4812 pd->naddr.v4addr.s_addr, 0);
4813 }
4814 if (PF_ANEQ(daddr, &pd->ndaddr, pd->af)) {
4815 pf_change_a(&daddr->v4addr.s_addr,
4816 pd->ip_sum,
4817 pd->ndaddr.v4addr.s_addr, 0);
4818 }
4819 break;
4820 #endif /* INET */
4821 #if INET6
4822 case AF_INET6:
4823 if (PF_ANEQ(saddr, &pd->naddr, pd->af))
4824 PF_ACPY(saddr, &pd->naddr, AF_INET6);
4825 if (PF_ANEQ(daddr, &pd->ndaddr, pd->af))
4826 PF_ACPY(daddr, &pd->ndaddr, AF_INET6);
4827 break;
4828 #endif /* INET6 */
4829 }
4830 ++rewrite;
4831 break;
4832 case IPPROTO_ESP:
4833 if (direction == PF_OUT)
4834 bxport.spi = 0;
4835
4836 switch (pd->af) {
4837 #if INET
4838 case AF_INET:
4839 if (PF_ANEQ(saddr, &pd->naddr, pd->af)) {
4840 pf_change_a(&saddr->v4addr.s_addr,
4841 pd->ip_sum, pd->naddr.v4addr.s_addr, 0);
4842 }
4843 if (PF_ANEQ(daddr, &pd->ndaddr, pd->af)) {
4844 pf_change_a(&daddr->v4addr.s_addr,
4845 pd->ip_sum,
4846 pd->ndaddr.v4addr.s_addr, 0);
4847 }
4848 break;
4849 #endif /* INET */
4850 #if INET6
4851 case AF_INET6:
4852 if (PF_ANEQ(saddr, &pd->naddr, pd->af))
4853 PF_ACPY(saddr, &pd->naddr, AF_INET6);
4854 if (PF_ANEQ(daddr, &pd->ndaddr, pd->af))
4855 PF_ACPY(daddr, &pd->ndaddr, AF_INET6);
4856 break;
4857 #endif /* INET6 */
4858 }
4859 break;
4860 default:
4861 switch (pd->af) {
4862 #if INET
4863 case AF_INET:
4864 if ((pd->naf != AF_INET) ||
4865 (PF_ANEQ(saddr, &pd->naddr, pd->af))) {
4866 pf_change_addr(saddr, pd->ip_sum,
4867 &pd->naddr, 0, af, pd->naf);
4868 }
4869
4870 if ((pd->naf != AF_INET) ||
4871 (PF_ANEQ(daddr, &pd->ndaddr, pd->af))) {
4872 pf_change_addr(daddr, pd->ip_sum,
4873 &pd->ndaddr, 0, af, pd->naf);
4874 }
4875 break;
4876 #endif /* INET */
4877 #if INET6
4878 case AF_INET6:
4879 if (PF_ANEQ(saddr, &pd->naddr, pd->af))
4880 PF_ACPY(saddr, &pd->naddr, af);
4881 if (PF_ANEQ(daddr, &pd->ndaddr, pd->af))
4882 PF_ACPY(daddr, &pd->ndaddr, af);
4883 break;
4884 #endif /* INET */
4885 }
4886 break;
4887 }
4888
4889 if (nr->natpass)
4890 r = NULL;
4891 pd->nat_rule = nr;
4892 pd->af = pd->naf;
4893 } else {
4894 }
4895
4896 if (nr && nr->tag > 0)
4897 tag = nr->tag;
4898
4899 while (r != NULL) {
4900 r->evaluations++;
4901 if (pfi_kif_match(r->kif, kif) == r->ifnot)
4902 r = r->skip[PF_SKIP_IFP].ptr;
4903 else if (r->direction && r->direction != direction)
4904 r = r->skip[PF_SKIP_DIR].ptr;
4905 else if (r->af && r->af != pd->af)
4906 r = r->skip[PF_SKIP_AF].ptr;
4907 else if (r->proto && r->proto != pd->proto)
4908 r = r->skip[PF_SKIP_PROTO].ptr;
4909 else if (PF_MISMATCHAW(&r->src.addr, saddr, pd->af,
4910 r->src.neg, kif))
4911 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
4912 /* tcp/udp only. port_op always 0 in other cases */
4913 else if (r->proto == pd->proto &&
4914 (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) &&
4915 r->src.xport.range.op &&
4916 !pf_match_port(r->src.xport.range.op,
4917 r->src.xport.range.port[0], r->src.xport.range.port[1],
4918 th->th_sport))
4919 r = r->skip[PF_SKIP_SRC_PORT].ptr;
4920 else if (PF_MISMATCHAW(&r->dst.addr, daddr, pd->af,
4921 r->dst.neg, NULL))
4922 r = r->skip[PF_SKIP_DST_ADDR].ptr;
4923 /* tcp/udp only. port_op always 0 in other cases */
4924 else if (r->proto == pd->proto &&
4925 (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) &&
4926 r->dst.xport.range.op &&
4927 !pf_match_port(r->dst.xport.range.op,
4928 r->dst.xport.range.port[0], r->dst.xport.range.port[1],
4929 th->th_dport))
4930 r = r->skip[PF_SKIP_DST_PORT].ptr;
4931 /* icmp only. type always 0 in other cases */
4932 else if (r->type && r->type != icmptype + 1)
4933 r = TAILQ_NEXT(r, entries);
4934 /* icmp only. type always 0 in other cases */
4935 else if (r->code && r->code != icmpcode + 1)
4936 r = TAILQ_NEXT(r, entries);
4937 else if ((r->rule_flag & PFRULE_TOS) && r->tos &&
4938 !(r->tos & pd->tos))
4939 r = TAILQ_NEXT(r, entries);
4940 else if ((r->rule_flag & PFRULE_DSCP) && r->tos &&
4941 !(r->tos & (pd->tos & DSCP_MASK)))
4942 r = TAILQ_NEXT(r, entries);
4943 else if ((r->rule_flag & PFRULE_SC) && r->tos &&
4944 ((r->tos & SCIDX_MASK) != pd->sc))
4945 r = TAILQ_NEXT(r, entries);
4946 else if (r->rule_flag & PFRULE_FRAGMENT)
4947 r = TAILQ_NEXT(r, entries);
4948 else if (pd->proto == IPPROTO_TCP &&
4949 (r->flagset & th->th_flags) != r->flags)
4950 r = TAILQ_NEXT(r, entries);
4951 /* tcp/udp only. uid.op always 0 in other cases */
4952 else if (r->uid.op && (pd->lookup.done || ((void)(pd->lookup.done =
4953 pf_socket_lookup(direction, pd)), 1)) &&
4954 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
4955 pd->lookup.uid))
4956 r = TAILQ_NEXT(r, entries);
4957 /* tcp/udp only. gid.op always 0 in other cases */
4958 else if (r->gid.op && (pd->lookup.done || ((void)(pd->lookup.done =
4959 pf_socket_lookup(direction, pd)), 1)) &&
4960 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
4961 pd->lookup.gid))
4962 r = TAILQ_NEXT(r, entries);
4963 else if (r->prob && r->prob <= (RandomULong() % (UINT_MAX - 1) + 1))
4964 r = TAILQ_NEXT(r, entries);
4965 else if (r->match_tag && !pf_match_tag(r, pd->pf_mtag, &tag))
4966 r = TAILQ_NEXT(r, entries);
4967 else if (r->os_fingerprint != PF_OSFP_ANY &&
4968 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
4969 pf_osfp_fingerprint(pd, pbuf, off, th),
4970 r->os_fingerprint)))
4971 r = TAILQ_NEXT(r, entries);
4972 else {
4973 if (r->tag)
4974 tag = r->tag;
4975 if (PF_RTABLEID_IS_VALID(r->rtableid))
4976 rtableid = r->rtableid;
4977 if (r->anchor == NULL) {
4978 match = 1;
4979 *rm = r;
4980 *am = a;
4981 *rsm = ruleset;
4982 if ((*rm)->quick)
4983 break;
4984 r = TAILQ_NEXT(r, entries);
4985 } else
4986 pf_step_into_anchor(&asd, &ruleset,
4987 PF_RULESET_FILTER, &r, &a, &match);
4988 }
4989 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
4990 PF_RULESET_FILTER, &r, &a, &match))
4991 break;
4992 }
4993 r = *rm;
4994 a = *am;
4995 ruleset = *rsm;
4996
4997 REASON_SET(&reason, PFRES_MATCH);
4998
4999 if (r->log || (nr != NULL && nr->log)) {
5000 if (rewrite > 0) {
5001 if (rewrite < off + hdrlen)
5002 rewrite = off + hdrlen;
5003
5004 if (pf_lazy_makewritable(pd, pbuf, rewrite) == NULL) {
5005 REASON_SET(&reason, PFRES_MEMORY);
5006 return (PF_DROP);
5007 }
5008
5009 pbuf_copy_back(pbuf, off, hdrlen, pd->hdr.any);
5010 }
5011 PFLOG_PACKET(kif, h, pbuf, pd->af, direction, reason,
5012 r->log ? r : nr, a, ruleset, pd);
5013 }
5014
5015 if ((r->action == PF_DROP) &&
5016 ((r->rule_flag & PFRULE_RETURNRST) ||
5017 (r->rule_flag & PFRULE_RETURNICMP) ||
5018 (r->rule_flag & PFRULE_RETURN))) {
5019 /* undo NAT changes, if they have taken place */
5020 /* XXX For NAT64 we are not reverting the changes */
5021 if (nr != NULL && nr->action != PF_NAT64) {
5022 if (direction == PF_OUT) {
5023 pd->af = af;
5024 switch (pd->proto) {
5025 case IPPROTO_TCP:
5026 pf_change_ap(direction, pd->mp, saddr,
5027 &th->th_sport, pd->ip_sum,
5028 &th->th_sum, &pd->baddr,
5029 bxport.port, 0, af, pd->af, 1);
5030 sxport.port = th->th_sport;
5031 rewrite++;
5032 break;
5033 case IPPROTO_UDP:
5034 pf_change_ap(direction, pd->mp, saddr,
5035 &pd->hdr.udp->uh_sport, pd->ip_sum,
5036 &pd->hdr.udp->uh_sum, &pd->baddr,
5037 bxport.port, 1, af, pd->af, 1);
5038 sxport.port = pd->hdr.udp->uh_sport;
5039 rewrite++;
5040 break;
5041 case IPPROTO_ICMP:
5042 #if INET6
5043 case IPPROTO_ICMPV6:
5044 #endif
5045 /* nothing! */
5046 break;
5047 case IPPROTO_GRE:
5048 PF_ACPY(&pd->baddr, saddr, af);
5049 ++rewrite;
5050 switch (af) {
5051 #if INET
5052 case AF_INET:
5053 pf_change_a(&saddr->v4addr.s_addr,
5054 pd->ip_sum,
5055 pd->baddr.v4addr.s_addr, 0);
5056 break;
5057 #endif /* INET */
5058 #if INET6
5059 case AF_INET6:
5060 PF_ACPY(saddr, &pd->baddr,
5061 AF_INET6);
5062 break;
5063 #endif /* INET6 */
5064 }
5065 break;
5066 case IPPROTO_ESP:
5067 PF_ACPY(&pd->baddr, saddr, af);
5068 switch (af) {
5069 #if INET
5070 case AF_INET:
5071 pf_change_a(&saddr->v4addr.s_addr,
5072 pd->ip_sum,
5073 pd->baddr.v4addr.s_addr, 0);
5074 break;
5075 #endif /* INET */
5076 #if INET6
5077 case AF_INET6:
5078 PF_ACPY(saddr, &pd->baddr,
5079 AF_INET6);
5080 break;
5081 #endif /* INET6 */
5082 }
5083 break;
5084 default:
5085 switch (af) {
5086 case AF_INET:
5087 pf_change_a(&saddr->v4addr.s_addr,
5088 pd->ip_sum,
5089 pd->baddr.v4addr.s_addr, 0);
5090 break;
5091 case AF_INET6:
5092 PF_ACPY(saddr, &pd->baddr, af);
5093 break;
5094 }
5095 }
5096 } else {
5097 switch (pd->proto) {
5098 case IPPROTO_TCP:
5099 pf_change_ap(direction, pd->mp, daddr,
5100 &th->th_dport, pd->ip_sum,
5101 &th->th_sum, &pd->bdaddr,
5102 bdxport.port, 0, af, pd->af, 1);
5103 dxport.port = th->th_dport;
5104 rewrite++;
5105 break;
5106 case IPPROTO_UDP:
5107 pf_change_ap(direction, pd->mp, daddr,
5108 &pd->hdr.udp->uh_dport, pd->ip_sum,
5109 &pd->hdr.udp->uh_sum, &pd->bdaddr,
5110 bdxport.port, 1, af, pd->af, 1);
5111 dxport.port = pd->hdr.udp->uh_dport;
5112 rewrite++;
5113 break;
5114 case IPPROTO_ICMP:
5115 #if INET6
5116 case IPPROTO_ICMPV6:
5117 #endif
5118 /* nothing! */
5119 break;
5120 case IPPROTO_GRE:
5121 if (pd->proto_variant ==
5122 PF_GRE_PPTP_VARIANT)
5123 grev1->call_id =
5124 bdxport.call_id;
5125 ++rewrite;
5126 switch (af) {
5127 #if INET
5128 case AF_INET:
5129 pf_change_a(&daddr->v4addr.s_addr,
5130 pd->ip_sum,
5131 pd->bdaddr.v4addr.s_addr, 0);
5132 break;
5133 #endif /* INET */
5134 #if INET6
5135 case AF_INET6:
5136 PF_ACPY(daddr, &pd->bdaddr,
5137 AF_INET6);
5138 break;
5139 #endif /* INET6 */
5140 }
5141 break;
5142 case IPPROTO_ESP:
5143 switch (af) {
5144 #if INET
5145 case AF_INET:
5146 pf_change_a(&daddr->v4addr.s_addr,
5147 pd->ip_sum,
5148 pd->bdaddr.v4addr.s_addr, 0);
5149 break;
5150 #endif /* INET */
5151 #if INET6
5152 case AF_INET6:
5153 PF_ACPY(daddr, &pd->bdaddr,
5154 AF_INET6);
5155 break;
5156 #endif /* INET6 */
5157 }
5158 break;
5159 default:
5160 switch (af) {
5161 case AF_INET:
5162 pf_change_a(&daddr->v4addr.s_addr,
5163 pd->ip_sum,
5164 pd->bdaddr.v4addr.s_addr, 0);
5165 break;
5166 #if INET6
5167 case AF_INET6:
5168 PF_ACPY(daddr, &pd->bdaddr, af);
5169 break;
5170 #endif /* INET6 */
5171 }
5172 }
5173 }
5174 }
5175 if (pd->proto == IPPROTO_TCP &&
5176 ((r->rule_flag & PFRULE_RETURNRST) ||
5177 (r->rule_flag & PFRULE_RETURN)) &&
5178 !(th->th_flags & TH_RST)) {
5179 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
5180 int len = 0;
5181 struct ip *h4;
5182 #if INET6
5183 struct ip6_hdr *h6;
5184 #endif /* INET6 */
5185
5186 switch (pd->af) {
5187 case AF_INET:
5188 h4 = pbuf->pb_data;
5189 len = ntohs(h4->ip_len) - off;
5190 break;
5191 #if INET6
5192 case AF_INET6:
5193 h6 = pbuf->pb_data;
5194 len = ntohs(h6->ip6_plen) -
5195 (off - sizeof (*h6));
5196 break;
5197 #endif /* INET6 */
5198 }
5199
5200 if (pf_check_proto_cksum(pbuf, off, len, IPPROTO_TCP,
5201 pd->af))
5202 REASON_SET(&reason, PFRES_PROTCKSUM);
5203 else {
5204 if (th->th_flags & TH_SYN)
5205 ack++;
5206 if (th->th_flags & TH_FIN)
5207 ack++;
5208 pf_send_tcp(r, pd->af, pd->dst,
5209 pd->src, th->th_dport, th->th_sport,
5210 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
5211 r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp);
5212 }
5213 } else if (pd->proto != IPPROTO_ICMP && pd->af == AF_INET &&
5214 pd->proto != IPPROTO_ESP && pd->proto != IPPROTO_AH &&
5215 r->return_icmp)
5216 pf_send_icmp(pbuf, r->return_icmp >> 8,
5217 r->return_icmp & 255, pd->af, r);
5218 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
5219 pd->proto != IPPROTO_ESP && pd->proto != IPPROTO_AH &&
5220 r->return_icmp6)
5221 pf_send_icmp(pbuf, r->return_icmp6 >> 8,
5222 r->return_icmp6 & 255, pd->af, r);
5223 }
5224
5225 if (r->action == PF_DROP) {
5226 return (PF_DROP);
5227 }
5228
5229 /* prepare state key, for flowhash and/or the state (if created) */
5230 bzero(&psk, sizeof (psk));
5231 psk.proto = pd->proto;
5232 psk.direction = direction;
5233 if (pd->proto == IPPROTO_UDP) {
5234 if (ntohs(pd->hdr.udp->uh_sport) == PF_IKE_PORT &&
5235 ntohs(pd->hdr.udp->uh_dport) == PF_IKE_PORT) {
5236 psk.proto_variant = PF_EXTFILTER_APD;
5237 } else {
5238 psk.proto_variant = nr ? nr->extfilter : r->extfilter;
5239 if (psk.proto_variant < PF_EXTFILTER_APD)
5240 psk.proto_variant = PF_EXTFILTER_APD;
5241 }
5242 } else if (pd->proto == IPPROTO_GRE) {
5243 psk.proto_variant = pd->proto_variant;
5244 }
5245 if (direction == PF_OUT) {
5246 psk.af_gwy = af;
5247 PF_ACPY(&psk.gwy.addr, saddr, af);
5248 PF_ACPY(&psk.ext_gwy.addr, daddr, af);
5249 switch (pd->proto) {
5250 case IPPROTO_ESP:
5251 psk.gwy.xport.spi = 0;
5252 psk.ext_gwy.xport.spi = pd->hdr.esp->spi;
5253 break;
5254 case IPPROTO_ICMP:
5255 #if INET6
5256 case IPPROTO_ICMPV6:
5257 #endif
5258 /*
5259 * NAT64 requires protocol translation between ICMPv4
5260 * and ICMPv6. TCP and UDP do not require protocol
5261 * translation. To avoid adding complexity just to
5262 * handle ICMP(v4addr/v6addr), we always lookup for
5263 * proto = IPPROTO_ICMP on both LAN and WAN side
5264 */
5265 psk.proto = IPPROTO_ICMP;
5266 psk.gwy.xport.port = nxport.port;
5267 psk.ext_gwy.xport.spi = 0;
5268 break;
5269 default:
5270 psk.gwy.xport = sxport;
5271 psk.ext_gwy.xport = dxport;
5272 break;
5273 }
5274 psk.af_lan = af;
5275 if (nr != NULL) {
5276 PF_ACPY(&psk.lan.addr, &pd->baddr, af);
5277 psk.lan.xport = bxport;
5278 PF_ACPY(&psk.ext_lan.addr, &pd->bdaddr, af);
5279 psk.ext_lan.xport = bdxport;
5280 } else {
5281 PF_ACPY(&psk.lan.addr, &psk.gwy.addr, af);
5282 psk.lan.xport = psk.gwy.xport;
5283 PF_ACPY(&psk.ext_lan.addr, &psk.ext_gwy.addr, af);
5284 psk.ext_lan.xport = psk.ext_gwy.xport;
5285 }
5286 } else {
5287 psk.af_lan = af;
5288 if (nr && nr->action == PF_NAT64) {
5289 PF_ACPY(&psk.lan.addr, &pd->baddr, af);
5290 PF_ACPY(&psk.ext_lan.addr, &pd->bdaddr, af);
5291 } else {
5292 PF_ACPY(&psk.lan.addr, daddr, af);
5293 PF_ACPY(&psk.ext_lan.addr, saddr, af);
5294 }
5295 switch (pd->proto) {
5296 case IPPROTO_ICMP:
5297 #if INET6
5298 case IPPROTO_ICMPV6:
5299 #endif
5300 /*
5301 * NAT64 requires protocol translation between ICMPv4
5302 * and ICMPv6. TCP and UDP do not require protocol
5303 * translation. To avoid adding complexity just to
5304 * handle ICMP(v4addr/v6addr), we always lookup for
5305 * proto = IPPROTO_ICMP on both LAN and WAN side
5306 */
5307 psk.proto = IPPROTO_ICMP;
5308 if (nr && nr->action == PF_NAT64) {
5309 psk.lan.xport = bxport;
5310 psk.ext_lan.xport = bxport;
5311 } else {
5312 psk.lan.xport = nxport;
5313 psk.ext_lan.xport.spi = 0;
5314 }
5315 break;
5316 case IPPROTO_ESP:
5317 psk.ext_lan.xport.spi = 0;
5318 psk.lan.xport.spi = pd->hdr.esp->spi;
5319 break;
5320 default:
5321 if (nr != NULL) {
5322 if (nr->action == PF_NAT64) {
5323 psk.lan.xport = bxport;
5324 psk.ext_lan.xport = bdxport;
5325 } else {
5326 psk.lan.xport = dxport;
5327 psk.ext_lan.xport = sxport;
5328 }
5329 } else {
5330 psk.lan.xport = dxport;
5331 psk.ext_lan.xport = sxport;
5332 }
5333 break;
5334 }
5335 psk.af_gwy = pd->naf;
5336 if (nr != NULL) {
5337 if (nr->action == PF_NAT64) {
5338 PF_ACPY(&psk.gwy.addr, &pd->naddr, pd->naf);
5339 PF_ACPY(&psk.ext_gwy.addr, &pd->ndaddr,
5340 pd->naf);
5341 if ((pd->proto == IPPROTO_ICMPV6) ||
5342 (pd->proto == IPPROTO_ICMP)) {
5343 psk.gwy.xport = nxport;
5344 psk.ext_gwy.xport = nxport;
5345 } else {
5346 psk.gwy.xport = sxport;
5347 psk.ext_gwy.xport = dxport;
5348 }
5349 } else {
5350 PF_ACPY(&psk.gwy.addr, &pd->bdaddr, af);
5351 psk.gwy.xport = bdxport;
5352 PF_ACPY(&psk.ext_gwy.addr, saddr, af);
5353 psk.ext_gwy.xport = sxport;
5354 }
5355 } else {
5356 PF_ACPY(&psk.gwy.addr, &psk.lan.addr, af);
5357 psk.gwy.xport = psk.lan.xport;
5358 PF_ACPY(&psk.ext_gwy.addr, &psk.ext_lan.addr, af);
5359 psk.ext_gwy.xport = psk.ext_lan.xport;
5360 }
5361 }
5362 if (pd->pktflags & PKTF_FLOW_ID) {
5363 /* flow hash was already computed outside of PF */
5364 psk.flowsrc = pd->flowsrc;
5365 psk.flowhash = pd->flowhash;
5366 } else {
5367 /* compute flow hash and store it in state key */
5368 psk.flowsrc = FLOWSRC_PF;
5369 psk.flowhash = pf_calc_state_key_flowhash(&psk);
5370 pd->flowsrc = psk.flowsrc;
5371 pd->flowhash = psk.flowhash;
5372 pd->pktflags |= PKTF_FLOW_ID;
5373 pd->pktflags &= ~PKTF_FLOW_ADV;
5374 }
5375
5376 if (pf_tag_packet(pbuf, pd->pf_mtag, tag, rtableid, pd)) {
5377 REASON_SET(&reason, PFRES_MEMORY);
5378 return (PF_DROP);
5379 }
5380
5381 if (!state_icmp && (r->keep_state || nr != NULL ||
5382 (pd->flags & PFDESC_TCP_NORM))) {
5383 /* create new state */
5384 struct pf_state *s = NULL;
5385 struct pf_state_key *sk = NULL;
5386 struct pf_src_node *sn = NULL;
5387 struct pf_ike_hdr ike;
5388
5389 if (pd->proto == IPPROTO_UDP) {
5390 size_t plen = pbuf->pb_packet_len - off - sizeof(*uh);
5391
5392 if (ntohs(uh->uh_sport) == PF_IKE_PORT &&
5393 ntohs(uh->uh_dport) == PF_IKE_PORT &&
5394 plen >= PF_IKE_PACKET_MINSIZE) {
5395 if (plen > PF_IKE_PACKET_MINSIZE)
5396 plen = PF_IKE_PACKET_MINSIZE;
5397 pbuf_copy_data(pbuf, off + sizeof (*uh), plen,
5398 &ike);
5399 }
5400 }
5401
5402 if (nr != NULL && pd->proto == IPPROTO_ESP &&
5403 direction == PF_OUT) {
5404 struct pf_state_key_cmp sk0;
5405 struct pf_state *s0;
5406
5407 /*
5408 * <jhw@apple.com>
5409 * This squelches state creation if the external
5410 * address matches an existing incomplete state with a
5411 * different internal address. Only one 'blocking'
5412 * partial state is allowed for each external address.
5413 */
5414 memset(&sk0, 0, sizeof (sk0));
5415 sk0.af_gwy = pd->af;
5416 sk0.proto = IPPROTO_ESP;
5417 PF_ACPY(&sk0.gwy.addr, saddr, sk0.af_gwy);
5418 PF_ACPY(&sk0.ext_gwy.addr, daddr, sk0.af_gwy);
5419 s0 = pf_find_state(kif, &sk0, PF_IN);
5420
5421 if (s0 && PF_ANEQ(&s0->state_key->lan.addr,
5422 pd->src, pd->af)) {
5423 nsn = 0;
5424 goto cleanup;
5425 }
5426 }
5427
5428 /* check maximums */
5429 if (r->max_states && (r->states >= r->max_states)) {
5430 pf_status.lcounters[LCNT_STATES]++;
5431 REASON_SET(&reason, PFRES_MAXSTATES);
5432 goto cleanup;
5433 }
5434 /* src node for filter rule */
5435 if ((r->rule_flag & PFRULE_SRCTRACK ||
5436 r->rpool.opts & PF_POOL_STICKYADDR) &&
5437 pf_insert_src_node(&sn, r, saddr, af) != 0) {
5438 REASON_SET(&reason, PFRES_SRCLIMIT);
5439 goto cleanup;
5440 }
5441 /* src node for translation rule */
5442 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
5443 ((direction == PF_OUT &&
5444 nr->action != PF_RDR &&
5445 pf_insert_src_node(&nsn, nr, &pd->baddr, af) != 0) ||
5446 (pf_insert_src_node(&nsn, nr, saddr, af) != 0))) {
5447 REASON_SET(&reason, PFRES_SRCLIMIT);
5448 goto cleanup;
5449 }
5450 s = pool_get(&pf_state_pl, PR_WAITOK);
5451 if (s == NULL) {
5452 REASON_SET(&reason, PFRES_MEMORY);
5453 cleanup:
5454 if (sn != NULL && sn->states == 0 && sn->expire == 0) {
5455 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
5456 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
5457 pf_status.src_nodes--;
5458 pool_put(&pf_src_tree_pl, sn);
5459 }
5460 if (nsn != sn && nsn != NULL && nsn->states == 0 &&
5461 nsn->expire == 0) {
5462 RB_REMOVE(pf_src_tree, &tree_src_tracking, nsn);
5463 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
5464 pf_status.src_nodes--;
5465 pool_put(&pf_src_tree_pl, nsn);
5466 }
5467 if (sk != NULL) {
5468 if (sk->app_state)
5469 pool_put(&pf_app_state_pl,
5470 sk->app_state);
5471 pool_put(&pf_state_key_pl, sk);
5472 }
5473 return (PF_DROP);
5474 }
5475 bzero(s, sizeof (*s));
5476 TAILQ_INIT(&s->unlink_hooks);
5477 s->rule.ptr = r;
5478 s->nat_rule.ptr = nr;
5479 s->anchor.ptr = a;
5480 STATE_INC_COUNTERS(s);
5481 s->allow_opts = r->allow_opts;
5482 s->log = r->log & PF_LOG_ALL;
5483 if (nr != NULL)
5484 s->log |= nr->log & PF_LOG_ALL;
5485 switch (pd->proto) {
5486 case IPPROTO_TCP:
5487 s->src.seqlo = ntohl(th->th_seq);
5488 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
5489 if ((th->th_flags & (TH_SYN|TH_ACK)) ==
5490 TH_SYN && r->keep_state == PF_STATE_MODULATE) {
5491 /* Generate sequence number modulator */
5492 if ((s->src.seqdiff = pf_tcp_iss(pd) -
5493 s->src.seqlo) == 0)
5494 s->src.seqdiff = 1;
5495 pf_change_a(&th->th_seq, &th->th_sum,
5496 htonl(s->src.seqlo + s->src.seqdiff), 0);
5497 rewrite = off + sizeof (*th);
5498 } else
5499 s->src.seqdiff = 0;
5500 if (th->th_flags & TH_SYN) {
5501 s->src.seqhi++;
5502 s->src.wscale = pf_get_wscale(pbuf, off,
5503 th->th_off, af);
5504 }
5505 s->src.max_win = MAX(ntohs(th->th_win), 1);
5506 if (s->src.wscale & PF_WSCALE_MASK) {
5507 /* Remove scale factor from initial window */
5508 int win = s->src.max_win;
5509 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
5510 s->src.max_win = (win - 1) >>
5511 (s->src.wscale & PF_WSCALE_MASK);
5512 }
5513 if (th->th_flags & TH_FIN)
5514 s->src.seqhi++;
5515 s->dst.seqhi = 1;
5516 s->dst.max_win = 1;
5517 s->src.state = TCPS_SYN_SENT;
5518 s->dst.state = TCPS_CLOSED;
5519 s->timeout = PFTM_TCP_FIRST_PACKET;
5520 break;
5521 case IPPROTO_UDP:
5522 s->src.state = PFUDPS_SINGLE;
5523 s->dst.state = PFUDPS_NO_TRAFFIC;
5524 s->timeout = PFTM_UDP_FIRST_PACKET;
5525 break;
5526 case IPPROTO_ICMP:
5527 #if INET6
5528 case IPPROTO_ICMPV6:
5529 #endif
5530 s->timeout = PFTM_ICMP_FIRST_PACKET;
5531 break;
5532 case IPPROTO_GRE:
5533 s->src.state = PFGRE1S_INITIATING;
5534 s->dst.state = PFGRE1S_NO_TRAFFIC;
5535 s->timeout = PFTM_GREv1_INITIATING;
5536 break;
5537 case IPPROTO_ESP:
5538 s->src.state = PFESPS_INITIATING;
5539 s->dst.state = PFESPS_NO_TRAFFIC;
5540 s->timeout = PFTM_ESP_FIRST_PACKET;
5541 break;
5542 default:
5543 s->src.state = PFOTHERS_SINGLE;
5544 s->dst.state = PFOTHERS_NO_TRAFFIC;
5545 s->timeout = PFTM_OTHER_FIRST_PACKET;
5546 }
5547
5548 s->creation = pf_time_second();
5549 s->expire = pf_time_second();
5550
5551 if (sn != NULL) {
5552 s->src_node = sn;
5553 s->src_node->states++;
5554 VERIFY(s->src_node->states != 0);
5555 }
5556 if (nsn != NULL) {
5557 PF_ACPY(&nsn->raddr, &pd->naddr, af);
5558 s->nat_src_node = nsn;
5559 s->nat_src_node->states++;
5560 VERIFY(s->nat_src_node->states != 0);
5561 }
5562 if (pd->proto == IPPROTO_TCP) {
5563 if ((pd->flags & PFDESC_TCP_NORM) &&
5564 pf_normalize_tcp_init(pbuf, off, pd, th, &s->src,
5565 &s->dst)) {
5566 REASON_SET(&reason, PFRES_MEMORY);
5567 pf_src_tree_remove_state(s);
5568 STATE_DEC_COUNTERS(s);
5569 pool_put(&pf_state_pl, s);
5570 return (PF_DROP);
5571 }
5572 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
5573 pf_normalize_tcp_stateful(pbuf, off, pd, &reason,
5574 th, s, &s->src, &s->dst, &rewrite)) {
5575 /* This really shouldn't happen!!! */
5576 DPFPRINTF(PF_DEBUG_URGENT,
5577 ("pf_normalize_tcp_stateful failed on "
5578 "first pkt"));
5579 pf_normalize_tcp_cleanup(s);
5580 pf_src_tree_remove_state(s);
5581 STATE_DEC_COUNTERS(s);
5582 pool_put(&pf_state_pl, s);
5583 return (PF_DROP);
5584 }
5585 }
5586
5587 /* allocate state key and import values from psk */
5588 if ((sk = pf_alloc_state_key(s, &psk)) == NULL) {
5589 REASON_SET(&reason, PFRES_MEMORY);
5590 /*
5591 * XXXSCW: This will leak the freshly-allocated
5592 * state structure 's'. Although it should
5593 * eventually be aged-out and removed.
5594 */
5595 goto cleanup;
5596 }
5597
5598 pf_set_rt_ifp(s, saddr, af); /* needs s->state_key set */
5599
5600 pbuf = pd->mp; // XXXSCW: Why?
5601
5602 if (sk->app_state == 0) {
5603 switch (pd->proto) {
5604 case IPPROTO_TCP: {
5605 u_int16_t dport = (direction == PF_OUT) ?
5606 sk->ext_gwy.xport.port : sk->gwy.xport.port;
5607
5608 if (nr != NULL &&
5609 ntohs(dport) == PF_PPTP_PORT) {
5610 struct pf_app_state *as;
5611
5612 as = pool_get(&pf_app_state_pl,
5613 PR_WAITOK);
5614 if (!as) {
5615 REASON_SET(&reason,
5616 PFRES_MEMORY);
5617 goto cleanup;
5618 }
5619
5620 bzero(as, sizeof (*as));
5621 as->handler = pf_pptp_handler;
5622 as->compare_lan_ext = 0;
5623 as->compare_ext_gwy = 0;
5624 as->u.pptp.grev1_state = 0;
5625 sk->app_state = as;
5626 (void) hook_establish(&s->unlink_hooks,
5627 0, (hook_fn_t) pf_pptp_unlink, s);
5628 }
5629 break;
5630 }
5631
5632 case IPPROTO_UDP: {
5633 if (nr != NULL &&
5634 ntohs(uh->uh_sport) == PF_IKE_PORT &&
5635 ntohs(uh->uh_dport) == PF_IKE_PORT) {
5636 struct pf_app_state *as;
5637
5638 as = pool_get(&pf_app_state_pl,
5639 PR_WAITOK);
5640 if (!as) {
5641 REASON_SET(&reason,
5642 PFRES_MEMORY);
5643 goto cleanup;
5644 }
5645
5646 bzero(as, sizeof (*as));
5647 as->compare_lan_ext = pf_ike_compare;
5648 as->compare_ext_gwy = pf_ike_compare;
5649 as->u.ike.cookie = ike.initiator_cookie;
5650 sk->app_state = as;
5651 }
5652 break;
5653 }
5654
5655 default:
5656 break;
5657 }
5658 }
5659
5660 if (pf_insert_state(BOUND_IFACE(r, kif), s)) {
5661 if (pd->proto == IPPROTO_TCP)
5662 pf_normalize_tcp_cleanup(s);
5663 REASON_SET(&reason, PFRES_STATEINS);
5664 pf_src_tree_remove_state(s);
5665 STATE_DEC_COUNTERS(s);
5666 pool_put(&pf_state_pl, s);
5667 return (PF_DROP);
5668 } else {
5669 *sm = s;
5670 }
5671 if (tag > 0) {
5672 pf_tag_ref(tag);
5673 s->tag = tag;
5674 }
5675 if (pd->proto == IPPROTO_TCP &&
5676 (th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
5677 r->keep_state == PF_STATE_SYNPROXY) {
5678 int ua = (sk->af_lan == sk->af_gwy) ? 1 : 0;
5679 s->src.state = PF_TCPS_PROXY_SRC;
5680 if (nr != NULL) {
5681 if (direction == PF_OUT) {
5682 pf_change_ap(direction, pd->mp, saddr,
5683 &th->th_sport, pd->ip_sum,
5684 &th->th_sum, &pd->baddr,
5685 bxport.port, 0, af, pd->af, ua);
5686 sxport.port = th->th_sport;
5687 } else {
5688 pf_change_ap(direction, pd->mp, daddr,
5689 &th->th_dport, pd->ip_sum,
5690 &th->th_sum, &pd->baddr,
5691 bxport.port, 0, af, pd->af, ua);
5692 sxport.port = th->th_dport;
5693 }
5694 }
5695 s->src.seqhi = htonl(random());
5696 /* Find mss option */
5697 mss = pf_get_mss(pbuf, off, th->th_off, af);
5698 mss = pf_calc_mss(saddr, af, mss);
5699 mss = pf_calc_mss(daddr, af, mss);
5700 s->src.mss = mss;
5701 pf_send_tcp(r, af, daddr, saddr, th->th_dport,
5702 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
5703 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL);
5704 REASON_SET(&reason, PFRES_SYNPROXY);
5705 return (PF_SYNPROXY_DROP);
5706 }
5707
5708 if (sk->app_state && sk->app_state->handler) {
5709 int offx = off;
5710
5711 switch (pd->proto) {
5712 case IPPROTO_TCP:
5713 offx += th->th_off << 2;
5714 break;
5715 case IPPROTO_UDP:
5716 offx += pd->hdr.udp->uh_ulen << 2;
5717 break;
5718 default:
5719 /* ALG handlers only apply to TCP and UDP rules */
5720 break;
5721 }
5722
5723 if (offx > off) {
5724 sk->app_state->handler(s, direction, offx,
5725 pd, kif);
5726 if (pd->lmw < 0) {
5727 REASON_SET(&reason, PFRES_MEMORY);
5728 return (PF_DROP);
5729 }
5730 pbuf = pd->mp; // XXXSCW: Why?
5731 }
5732 }
5733 }
5734
5735 /* copy back packet headers if we performed NAT operations */
5736 if (rewrite) {
5737 if (rewrite < off + hdrlen)
5738 rewrite = off + hdrlen;
5739
5740 if (pf_lazy_makewritable(pd, pd->mp, rewrite) == NULL) {
5741 REASON_SET(&reason, PFRES_MEMORY);
5742 return (PF_DROP);
5743 }
5744
5745 pbuf_copy_back(pbuf, off, hdrlen, pd->hdr.any);
5746 if (af == AF_INET6 && pd->naf == AF_INET)
5747 return pf_nat64_ipv6(pbuf, off, pd);
5748 else if (af == AF_INET && pd->naf == AF_INET6)
5749 return pf_nat64_ipv4(pbuf, off, pd);
5750
5751 }
5752
5753 return (PF_PASS);
5754 }
5755
5756 boolean_t is_nlc_enabled_glb = FALSE;
5757
5758 static inline boolean_t
5759 pf_is_dummynet_enabled(void)
5760 {
5761 #if DUMMYNET
5762 if (__probable(!PF_IS_ENABLED))
5763 return (FALSE);
5764
5765 if (__probable(!DUMMYNET_LOADED))
5766 return (FALSE);
5767
5768 if (__probable(TAILQ_EMPTY(pf_main_ruleset.
5769 rules[PF_RULESET_DUMMYNET].active.ptr)))
5770 return (FALSE);
5771
5772 return (TRUE);
5773 #else
5774 return (FALSE);
5775 #endif /* DUMMYNET */
5776 }
5777
5778 boolean_t
5779 pf_is_nlc_enabled(void)
5780 {
5781 #if DUMMYNET
5782 if (__probable(!pf_is_dummynet_enabled()))
5783 return (FALSE);
5784
5785 if (__probable(!is_nlc_enabled_glb))
5786 return (FALSE);
5787
5788 return (TRUE);
5789 #else
5790 return (FALSE);
5791 #endif /* DUMMYNET */
5792 }
5793
5794 #if DUMMYNET
5795 /*
5796 * When pf_test_dummynet() returns PF_PASS, the rule matching parameter "rm"
5797 * remains unchanged, meaning the packet did not match a dummynet rule.
5798 * when the packet does match a dummynet rule, pf_test_dummynet() returns
5799 * PF_PASS and zero out the mbuf rule as the packet is effectively siphoned
5800 * out by dummynet.
5801 */
5802 static int
5803 pf_test_dummynet(struct pf_rule **rm, int direction, struct pfi_kif *kif,
5804 pbuf_t **pbuf0, struct pf_pdesc *pd, struct ip_fw_args *fwa)
5805 {
5806 pbuf_t *pbuf = *pbuf0;
5807 struct pf_rule *am = NULL;
5808 struct pf_ruleset *rsm = NULL;
5809 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
5810 sa_family_t af = pd->af;
5811 struct pf_rule *r, *a = NULL;
5812 struct pf_ruleset *ruleset = NULL;
5813 struct tcphdr *th = pd->hdr.tcp;
5814 u_short reason;
5815 int hdrlen = 0;
5816 int tag = -1;
5817 unsigned int rtableid = IFSCOPE_NONE;
5818 int asd = 0;
5819 int match = 0;
5820 u_int8_t icmptype = 0, icmpcode = 0;
5821 struct ip_fw_args dnflow;
5822 struct pf_rule *prev_matching_rule = fwa ? fwa->fwa_pf_rule : NULL;
5823 int found_prev_rule = (prev_matching_rule) ? 0 : 1;
5824
5825 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
5826
5827 if (!pf_is_dummynet_enabled())
5828 return (PF_PASS);
5829
5830 bzero(&dnflow, sizeof(dnflow));
5831
5832 hdrlen = 0;
5833
5834 /* Fragments don't gave protocol headers */
5835 if (!(pd->flags & PFDESC_IP_FRAG))
5836 switch (pd->proto) {
5837 case IPPROTO_TCP:
5838 dnflow.fwa_id.flags = pd->hdr.tcp->th_flags;
5839 dnflow.fwa_id.dst_port = ntohs(pd->hdr.tcp->th_dport);
5840 dnflow.fwa_id.src_port = ntohs(pd->hdr.tcp->th_sport);
5841 hdrlen = sizeof (*th);
5842 break;
5843 case IPPROTO_UDP:
5844 dnflow.fwa_id.dst_port = ntohs(pd->hdr.udp->uh_dport);
5845 dnflow.fwa_id.src_port = ntohs(pd->hdr.udp->uh_sport);
5846 hdrlen = sizeof (*pd->hdr.udp);
5847 break;
5848 #if INET
5849 case IPPROTO_ICMP:
5850 if (af != AF_INET)
5851 break;
5852 hdrlen = ICMP_MINLEN;
5853 icmptype = pd->hdr.icmp->icmp_type;
5854 icmpcode = pd->hdr.icmp->icmp_code;
5855 break;
5856 #endif /* INET */
5857 #if INET6
5858 case IPPROTO_ICMPV6:
5859 if (af != AF_INET6)
5860 break;
5861 hdrlen = sizeof (*pd->hdr.icmp6);
5862 icmptype = pd->hdr.icmp6->icmp6_type;
5863 icmpcode = pd->hdr.icmp6->icmp6_code;
5864 break;
5865 #endif /* INET6 */
5866 case IPPROTO_GRE:
5867 if (pd->proto_variant == PF_GRE_PPTP_VARIANT)
5868 hdrlen = sizeof (*pd->hdr.grev1);
5869 break;
5870 case IPPROTO_ESP:
5871 hdrlen = sizeof (*pd->hdr.esp);
5872 break;
5873 }
5874
5875 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_DUMMYNET].active.ptr);
5876
5877 while (r != NULL) {
5878 r->evaluations++;
5879 if (pfi_kif_match(r->kif, kif) == r->ifnot)
5880 r = r->skip[PF_SKIP_IFP].ptr;
5881 else if (r->direction && r->direction != direction)
5882 r = r->skip[PF_SKIP_DIR].ptr;
5883 else if (r->af && r->af != af)
5884 r = r->skip[PF_SKIP_AF].ptr;
5885 else if (r->proto && r->proto != pd->proto)
5886 r = r->skip[PF_SKIP_PROTO].ptr;
5887 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
5888 r->src.neg, kif))
5889 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
5890 /* tcp/udp only. port_op always 0 in other cases */
5891 else if (r->proto == pd->proto &&
5892 (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) &&
5893 ((pd->flags & PFDESC_IP_FRAG) ||
5894 ((r->src.xport.range.op &&
5895 !pf_match_port(r->src.xport.range.op,
5896 r->src.xport.range.port[0], r->src.xport.range.port[1],
5897 th->th_sport)))))
5898 r = r->skip[PF_SKIP_SRC_PORT].ptr;
5899 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
5900 r->dst.neg, NULL))
5901 r = r->skip[PF_SKIP_DST_ADDR].ptr;
5902 /* tcp/udp only. port_op always 0 in other cases */
5903 else if (r->proto == pd->proto &&
5904 (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) &&
5905 r->dst.xport.range.op &&
5906 ((pd->flags & PFDESC_IP_FRAG) ||
5907 !pf_match_port(r->dst.xport.range.op,
5908 r->dst.xport.range.port[0], r->dst.xport.range.port[1],
5909 th->th_dport)))
5910 r = r->skip[PF_SKIP_DST_PORT].ptr;
5911 /* icmp only. type always 0 in other cases */
5912 else if (r->type &&
5913 ((pd->flags & PFDESC_IP_FRAG) ||
5914 r->type != icmptype + 1))
5915 r = TAILQ_NEXT(r, entries);
5916 /* icmp only. type always 0 in other cases */
5917 else if (r->code &&
5918 ((pd->flags & PFDESC_IP_FRAG) ||
5919 r->code != icmpcode + 1))
5920 r = TAILQ_NEXT(r, entries);
5921 else if (r->tos && !(r->tos == pd->tos))
5922 r = TAILQ_NEXT(r, entries);
5923 else if (r->rule_flag & PFRULE_FRAGMENT)
5924 r = TAILQ_NEXT(r, entries);
5925 else if (pd->proto == IPPROTO_TCP &&
5926 ((pd->flags & PFDESC_IP_FRAG) ||
5927 (r->flagset & th->th_flags) != r->flags))
5928 r = TAILQ_NEXT(r, entries);
5929 else if (r->prob && r->prob <= (RandomULong() % (UINT_MAX - 1) + 1))
5930 r = TAILQ_NEXT(r, entries);
5931 else if (r->match_tag && !pf_match_tag(r, pd->pf_mtag, &tag))
5932 r = TAILQ_NEXT(r, entries);
5933 else {
5934 /*
5935 * Need to go past the previous dummynet matching rule
5936 */
5937 if (r->anchor == NULL) {
5938 if (found_prev_rule) {
5939 if (r->tag)
5940 tag = r->tag;
5941 if (PF_RTABLEID_IS_VALID(r->rtableid))
5942 rtableid = r->rtableid;
5943 match = 1;
5944 *rm = r;
5945 am = a;
5946 rsm = ruleset;
5947 if ((*rm)->quick)
5948 break;
5949 } else if (r == prev_matching_rule) {
5950 found_prev_rule = 1;
5951 }
5952 r = TAILQ_NEXT(r, entries);
5953 } else {
5954 pf_step_into_anchor(&asd, &ruleset,
5955 PF_RULESET_DUMMYNET, &r, &a, &match);
5956 }
5957 }
5958 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
5959 PF_RULESET_DUMMYNET, &r, &a, &match))
5960 break;
5961 }
5962 r = *rm;
5963 a = am;
5964 ruleset = rsm;
5965
5966 if (!match)
5967 return (PF_PASS);
5968
5969 REASON_SET(&reason, PFRES_DUMMYNET);
5970
5971 if (r->log) {
5972 PFLOG_PACKET(kif, h, pbuf, af, direction, reason, r,
5973 a, ruleset, pd);
5974 }
5975
5976 if (r->action == PF_NODUMMYNET) {
5977 int dirndx = (direction == PF_OUT);
5978
5979 r->packets[dirndx]++;
5980 r->bytes[dirndx] += pd->tot_len;
5981
5982 return (PF_PASS);
5983 }
5984 if (pf_tag_packet(pbuf, pd->pf_mtag, tag, rtableid, pd)) {
5985 REASON_SET(&reason, PFRES_MEMORY);
5986
5987 return (PF_DROP);
5988 }
5989
5990 if (r->dnpipe && ip_dn_io_ptr != NULL) {
5991 struct mbuf *m;
5992 int dirndx = (direction == PF_OUT);
5993
5994 r->packets[dirndx]++;
5995 r->bytes[dirndx] += pd->tot_len;
5996
5997 dnflow.fwa_cookie = r->dnpipe;
5998 dnflow.fwa_pf_rule = r;
5999 dnflow.fwa_id.proto = pd->proto;
6000 dnflow.fwa_flags = r->dntype;
6001 switch (af) {
6002 case AF_INET:
6003 dnflow.fwa_id.addr_type = 4;
6004 dnflow.fwa_id.src_ip = ntohl(saddr->v4addr.s_addr);
6005 dnflow.fwa_id.dst_ip = ntohl(daddr->v4addr.s_addr);
6006 break;
6007 case AF_INET6:
6008 dnflow.fwa_id.addr_type = 6;
6009 dnflow.fwa_id.src_ip6 = saddr->v6addr;
6010 dnflow.fwa_id.dst_ip6 = saddr->v6addr;
6011 break;
6012 }
6013
6014 if (fwa != NULL) {
6015 dnflow.fwa_oif = fwa->fwa_oif;
6016 dnflow.fwa_oflags = fwa->fwa_oflags;
6017 /*
6018 * Note that fwa_ro, fwa_dst and fwa_ipoa are
6019 * actually in a union so the following does work
6020 * for both IPv4 and IPv6
6021 */
6022 dnflow.fwa_ro = fwa->fwa_ro;
6023 dnflow.fwa_dst = fwa->fwa_dst;
6024 dnflow.fwa_ipoa = fwa->fwa_ipoa;
6025 dnflow.fwa_ro6_pmtu = fwa->fwa_ro6_pmtu;
6026 dnflow.fwa_origifp = fwa->fwa_origifp;
6027 dnflow.fwa_mtu = fwa->fwa_mtu;
6028 dnflow.fwa_alwaysfrag = fwa->fwa_alwaysfrag;
6029 dnflow.fwa_unfragpartlen = fwa->fwa_unfragpartlen;
6030 dnflow.fwa_exthdrs = fwa->fwa_exthdrs;
6031 }
6032
6033 if (af == AF_INET) {
6034 struct ip *iphdr = pbuf->pb_data;
6035 NTOHS(iphdr->ip_len);
6036 NTOHS(iphdr->ip_off);
6037 }
6038 /*
6039 * Don't need to unlock pf_lock as NET_THREAD_HELD_PF
6040 * allows for recursive behavior
6041 */
6042 m = pbuf_to_mbuf(pbuf, TRUE);
6043 if (m != NULL) {
6044 ip_dn_io_ptr(m,
6045 dnflow.fwa_cookie, (af == AF_INET) ?
6046 ((direction==PF_IN) ? DN_TO_IP_IN : DN_TO_IP_OUT) :
6047 ((direction==PF_IN) ? DN_TO_IP6_IN : DN_TO_IP6_OUT),
6048 &dnflow, DN_CLIENT_PF);
6049 }
6050
6051 /*
6052 * The packet is siphoned out by dummynet so return a NULL
6053 * pbuf so the caller can still return success.
6054 */
6055 *pbuf0 = NULL;
6056
6057 return (PF_PASS);
6058 }
6059
6060 return (PF_PASS);
6061 }
6062 #endif /* DUMMYNET */
6063
6064 static int
6065 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
6066 pbuf_t *pbuf, void *h, struct pf_pdesc *pd, struct pf_rule **am,
6067 struct pf_ruleset **rsm)
6068 {
6069 #pragma unused(h)
6070 struct pf_rule *r, *a = NULL;
6071 struct pf_ruleset *ruleset = NULL;
6072 sa_family_t af = pd->af;
6073 u_short reason;
6074 int tag = -1;
6075 int asd = 0;
6076 int match = 0;
6077
6078 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
6079 while (r != NULL) {
6080 r->evaluations++;
6081 if (pfi_kif_match(r->kif, kif) == r->ifnot)
6082 r = r->skip[PF_SKIP_IFP].ptr;
6083 else if (r->direction && r->direction != direction)
6084 r = r->skip[PF_SKIP_DIR].ptr;
6085 else if (r->af && r->af != af)
6086 r = r->skip[PF_SKIP_AF].ptr;
6087 else if (r->proto && r->proto != pd->proto)
6088 r = r->skip[PF_SKIP_PROTO].ptr;
6089 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
6090 r->src.neg, kif))
6091 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
6092 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
6093 r->dst.neg, NULL))
6094 r = r->skip[PF_SKIP_DST_ADDR].ptr;
6095 else if ((r->rule_flag & PFRULE_TOS) && r->tos &&
6096 !(r->tos & pd->tos))
6097 r = TAILQ_NEXT(r, entries);
6098 else if ((r->rule_flag & PFRULE_DSCP) && r->tos &&
6099 !(r->tos & (pd->tos & DSCP_MASK)))
6100 r = TAILQ_NEXT(r, entries);
6101 else if ((r->rule_flag & PFRULE_SC) && r->tos &&
6102 ((r->tos & SCIDX_MASK) != pd->sc))
6103 r = TAILQ_NEXT(r, entries);
6104 else if (r->os_fingerprint != PF_OSFP_ANY)
6105 r = TAILQ_NEXT(r, entries);
6106 else if (pd->proto == IPPROTO_UDP &&
6107 (r->src.xport.range.op || r->dst.xport.range.op))
6108 r = TAILQ_NEXT(r, entries);
6109 else if (pd->proto == IPPROTO_TCP &&
6110 (r->src.xport.range.op || r->dst.xport.range.op ||
6111 r->flagset))
6112 r = TAILQ_NEXT(r, entries);
6113 else if ((pd->proto == IPPROTO_ICMP ||
6114 pd->proto == IPPROTO_ICMPV6) &&
6115 (r->type || r->code))
6116 r = TAILQ_NEXT(r, entries);
6117 else if (r->prob && r->prob <= (RandomULong() % (UINT_MAX - 1) + 1))
6118 r = TAILQ_NEXT(r, entries);
6119 else if (r->match_tag && !pf_match_tag(r, pd->pf_mtag, &tag))
6120 r = TAILQ_NEXT(r, entries);
6121 else {
6122 if (r->anchor == NULL) {
6123 match = 1;
6124 *rm = r;
6125 *am = a;
6126 *rsm = ruleset;
6127 if ((*rm)->quick)
6128 break;
6129 r = TAILQ_NEXT(r, entries);
6130 } else
6131 pf_step_into_anchor(&asd, &ruleset,
6132 PF_RULESET_FILTER, &r, &a, &match);
6133 }
6134 if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset,
6135 PF_RULESET_FILTER, &r, &a, &match))
6136 break;
6137 }
6138 r = *rm;
6139 a = *am;
6140 ruleset = *rsm;
6141
6142 REASON_SET(&reason, PFRES_MATCH);
6143
6144 if (r->log)
6145 PFLOG_PACKET(kif, h, pbuf, af, direction, reason, r, a, ruleset,
6146 pd);
6147
6148 if (r->action != PF_PASS)
6149 return (PF_DROP);
6150
6151 if (pf_tag_packet(pbuf, pd->pf_mtag, tag, -1, NULL)) {
6152 REASON_SET(&reason, PFRES_MEMORY);
6153 return (PF_DROP);
6154 }
6155
6156 return (PF_PASS);
6157 }
6158
6159 static void
6160 pf_pptp_handler(struct pf_state *s, int direction, int off,
6161 struct pf_pdesc *pd, struct pfi_kif *kif)
6162 {
6163 #pragma unused(direction)
6164 struct tcphdr *th;
6165 struct pf_pptp_state *pptps;
6166 struct pf_pptp_ctrl_msg cm;
6167 size_t plen, tlen;
6168 struct pf_state *gs;
6169 u_int16_t ct;
6170 u_int16_t *pac_call_id;
6171 u_int16_t *pns_call_id;
6172 u_int16_t *spoof_call_id;
6173 u_int8_t *pac_state;
6174 u_int8_t *pns_state;
6175 enum { PF_PPTP_PASS, PF_PPTP_INSERT_GRE, PF_PPTP_REMOVE_GRE } op;
6176 pbuf_t *pbuf;
6177 struct pf_state_key *sk;
6178 struct pf_state_key *gsk;
6179 struct pf_app_state *gas;
6180
6181 sk = s->state_key;
6182 pptps = &sk->app_state->u.pptp;
6183 gs = pptps->grev1_state;
6184
6185 if (gs)
6186 gs->expire = pf_time_second();
6187
6188 pbuf = pd->mp;
6189 plen = min(sizeof (cm), pbuf->pb_packet_len - off);
6190 if (plen < PF_PPTP_CTRL_MSG_MINSIZE)
6191 return;
6192 tlen = plen - PF_PPTP_CTRL_MSG_MINSIZE;
6193 pbuf_copy_data(pbuf, off, plen, &cm);
6194
6195 if (ntohl(cm.hdr.magic) != PF_PPTP_MAGIC_NUMBER)
6196 return;
6197 if (ntohs(cm.hdr.type) != 1)
6198 return;
6199
6200 #define TYPE_LEN_CHECK(_type, _name) \
6201 case PF_PPTP_CTRL_TYPE_##_type: \
6202 if (tlen < sizeof(struct pf_pptp_ctrl_##_name)) \
6203 return; \
6204 break;
6205
6206 switch (cm.ctrl.type) {
6207 TYPE_LEN_CHECK(START_REQ, start_req);
6208 TYPE_LEN_CHECK(START_RPY, start_rpy);
6209 TYPE_LEN_CHECK(STOP_REQ, stop_req);
6210 TYPE_LEN_CHECK(STOP_RPY, stop_rpy);
6211 TYPE_LEN_CHECK(ECHO_REQ, echo_req);
6212 TYPE_LEN_CHECK(ECHO_RPY, echo_rpy);
6213 TYPE_LEN_CHECK(CALL_OUT_REQ, call_out_req);
6214 TYPE_LEN_CHECK(CALL_OUT_RPY, call_out_rpy);
6215 TYPE_LEN_CHECK(CALL_IN_1ST, call_in_1st);
6216 TYPE_LEN_CHECK(CALL_IN_2ND, call_in_2nd);
6217 TYPE_LEN_CHECK(CALL_IN_3RD, call_in_3rd);
6218 TYPE_LEN_CHECK(CALL_CLR, call_clr);
6219 TYPE_LEN_CHECK(CALL_DISC, call_disc);
6220 TYPE_LEN_CHECK(ERROR, error);
6221 TYPE_LEN_CHECK(SET_LINKINFO, set_linkinfo);
6222 default:
6223 return;
6224 }
6225 #undef TYPE_LEN_CHECK
6226
6227 if (!gs) {
6228 gs = pool_get(&pf_state_pl, PR_WAITOK);
6229 if (!gs)
6230 return;
6231
6232 memcpy(gs, s, sizeof (*gs));
6233
6234 memset(&gs->entry_id, 0, sizeof (gs->entry_id));
6235 memset(&gs->entry_list, 0, sizeof (gs->entry_list));
6236
6237 TAILQ_INIT(&gs->unlink_hooks);
6238 gs->rt_kif = NULL;
6239 gs->creation = 0;
6240 gs->pfsync_time = 0;
6241 gs->packets[0] = gs->packets[1] = 0;
6242 gs->bytes[0] = gs->bytes[1] = 0;
6243 gs->timeout = PFTM_UNLINKED;
6244 gs->id = gs->creatorid = 0;
6245 gs->src.state = gs->dst.state = PFGRE1S_NO_TRAFFIC;
6246 gs->src.scrub = gs->dst.scrub = 0;
6247
6248 gas = pool_get(&pf_app_state_pl, PR_NOWAIT);
6249 if (!gas) {
6250 pool_put(&pf_state_pl, gs);
6251 return;
6252 }
6253
6254 gsk = pf_alloc_state_key(gs, NULL);
6255 if (!gsk) {
6256 pool_put(&pf_app_state_pl, gas);
6257 pool_put(&pf_state_pl, gs);
6258 return;
6259 }
6260
6261 memcpy(&gsk->lan, &sk->lan, sizeof (gsk->lan));
6262 memcpy(&gsk->gwy, &sk->gwy, sizeof (gsk->gwy));
6263 memcpy(&gsk->ext_lan, &sk->ext_lan, sizeof (gsk->ext_lan));
6264 memcpy(&gsk->ext_gwy, &sk->ext_gwy, sizeof (gsk->ext_gwy));
6265 gsk->af_lan = sk->af_lan;
6266 gsk->af_gwy = sk->af_gwy;
6267 gsk->proto = IPPROTO_GRE;
6268 gsk->proto_variant = PF_GRE_PPTP_VARIANT;
6269 gsk->app_state = gas;
6270 gsk->lan.xport.call_id = 0;
6271 gsk->gwy.xport.call_id = 0;
6272 gsk->ext_lan.xport.call_id = 0;
6273 gsk->ext_gwy.xport.call_id = 0;
6274 gsk->flowsrc = FLOWSRC_PF;
6275 gsk->flowhash = pf_calc_state_key_flowhash(gsk);
6276 memset(gas, 0, sizeof (*gas));
6277 gas->u.grev1.pptp_state = s;
6278 STATE_INC_COUNTERS(gs);
6279 pptps->grev1_state = gs;
6280 (void) hook_establish(&gs->unlink_hooks, 0,
6281 (hook_fn_t) pf_grev1_unlink, gs);
6282 } else {
6283 gsk = gs->state_key;
6284 }
6285
6286 switch (sk->direction) {
6287 case PF_IN:
6288 pns_call_id = &gsk->ext_lan.xport.call_id;
6289 pns_state = &gs->dst.state;
6290 pac_call_id = &gsk->lan.xport.call_id;
6291 pac_state = &gs->src.state;
6292 break;
6293
6294 case PF_OUT:
6295 pns_call_id = &gsk->lan.xport.call_id;
6296 pns_state = &gs->src.state;
6297 pac_call_id = &gsk->ext_lan.xport.call_id;
6298 pac_state = &gs->dst.state;
6299 break;
6300
6301 default:
6302 DPFPRINTF(PF_DEBUG_URGENT,
6303 ("pf_pptp_handler: bad directional!\n"));
6304 return;
6305 }
6306
6307 spoof_call_id = 0;
6308 op = PF_PPTP_PASS;
6309
6310 ct = ntohs(cm.ctrl.type);
6311
6312 switch (ct) {
6313 case PF_PPTP_CTRL_TYPE_CALL_OUT_REQ:
6314 *pns_call_id = cm.msg.call_out_req.call_id;
6315 *pns_state = PFGRE1S_INITIATING;
6316 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
6317 spoof_call_id = &cm.msg.call_out_req.call_id;
6318 break;
6319
6320 case PF_PPTP_CTRL_TYPE_CALL_OUT_RPY:
6321 *pac_call_id = cm.msg.call_out_rpy.call_id;
6322 if (s->nat_rule.ptr)
6323 spoof_call_id =
6324 (pac_call_id == &gsk->lan.xport.call_id) ?
6325 &cm.msg.call_out_rpy.call_id :
6326 &cm.msg.call_out_rpy.peer_call_id;
6327 if (gs->timeout == PFTM_UNLINKED) {
6328 *pac_state = PFGRE1S_INITIATING;
6329 op = PF_PPTP_INSERT_GRE;
6330 }
6331 break;
6332
6333 case PF_PPTP_CTRL_TYPE_CALL_IN_1ST:
6334 *pns_call_id = cm.msg.call_in_1st.call_id;
6335 *pns_state = PFGRE1S_INITIATING;
6336 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
6337 spoof_call_id = &cm.msg.call_in_1st.call_id;
6338 break;
6339
6340 case PF_PPTP_CTRL_TYPE_CALL_IN_2ND:
6341 *pac_call_id = cm.msg.call_in_2nd.call_id;
6342 *pac_state = PFGRE1S_INITIATING;
6343 if (s->nat_rule.ptr)
6344 spoof_call_id =
6345 (pac_call_id == &gsk->lan.xport.call_id) ?
6346 &cm.msg.call_in_2nd.call_id :
6347 &cm.msg.call_in_2nd.peer_call_id;
6348 break;
6349
6350 case PF_PPTP_CTRL_TYPE_CALL_IN_3RD:
6351 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
6352 spoof_call_id = &cm.msg.call_in_3rd.call_id;
6353 if (cm.msg.call_in_3rd.call_id != *pns_call_id) {
6354 break;
6355 }
6356 if (gs->timeout == PFTM_UNLINKED)
6357 op = PF_PPTP_INSERT_GRE;
6358 break;
6359
6360 case PF_PPTP_CTRL_TYPE_CALL_CLR:
6361 if (cm.msg.call_clr.call_id != *pns_call_id)
6362 op = PF_PPTP_REMOVE_GRE;
6363 break;
6364
6365 case PF_PPTP_CTRL_TYPE_CALL_DISC:
6366 if (cm.msg.call_clr.call_id != *pac_call_id)
6367 op = PF_PPTP_REMOVE_GRE;
6368 break;
6369
6370 case PF_PPTP_CTRL_TYPE_ERROR:
6371 if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id)
6372 spoof_call_id = &cm.msg.error.peer_call_id;
6373 break;
6374
6375 case PF_PPTP_CTRL_TYPE_SET_LINKINFO:
6376 if (s->nat_rule.ptr && pac_call_id == &gsk->lan.xport.call_id)
6377 spoof_call_id = &cm.msg.set_linkinfo.peer_call_id;
6378 break;
6379
6380 default:
6381 op = PF_PPTP_PASS;
6382 break;
6383 }
6384
6385 if (!gsk->gwy.xport.call_id && gsk->lan.xport.call_id) {
6386 gsk->gwy.xport.call_id = gsk->lan.xport.call_id;
6387 if (spoof_call_id) {
6388 u_int16_t call_id = 0;
6389 int n = 0;
6390 struct pf_state_key_cmp key;
6391
6392 key.af_gwy = gsk->af_gwy;
6393 key.proto = IPPROTO_GRE;
6394 key.proto_variant = PF_GRE_PPTP_VARIANT;
6395 PF_ACPY(&key.gwy.addr, &gsk->gwy.addr, key.af_gwy);
6396 PF_ACPY(&key.ext_gwy.addr, &gsk->ext_gwy.addr, key.af_gwy);
6397 key.gwy.xport.call_id = gsk->gwy.xport.call_id;
6398 key.ext_gwy.xport.call_id = gsk->ext_gwy.xport.call_id;
6399 do {
6400 call_id = htonl(random());
6401 } while (!call_id);
6402
6403 while (pf_find_state_all(&key, PF_IN, 0)) {
6404 call_id = ntohs(call_id);
6405 --call_id;
6406 if (--call_id == 0) call_id = 0xffff;
6407 call_id = htons(call_id);
6408
6409 key.gwy.xport.call_id = call_id;
6410
6411 if (++n > 65535) {
6412 DPFPRINTF(PF_DEBUG_URGENT,
6413 ("pf_pptp_handler: failed to spoof "
6414 "call id\n"));
6415 key.gwy.xport.call_id = 0;
6416 break;
6417 }
6418 }
6419
6420 gsk->gwy.xport.call_id = call_id;
6421 }
6422 }
6423
6424 th = pd->hdr.tcp;
6425
6426 if (spoof_call_id && gsk->lan.xport.call_id != gsk->gwy.xport.call_id) {
6427 if (*spoof_call_id == gsk->gwy.xport.call_id) {
6428 *spoof_call_id = gsk->lan.xport.call_id;
6429 th->th_sum = pf_cksum_fixup(th->th_sum,
6430 gsk->gwy.xport.call_id, gsk->lan.xport.call_id, 0);
6431 } else {
6432 *spoof_call_id = gsk->gwy.xport.call_id;
6433 th->th_sum = pf_cksum_fixup(th->th_sum,
6434 gsk->lan.xport.call_id, gsk->gwy.xport.call_id, 0);
6435 }
6436
6437 if (pf_lazy_makewritable(pd, pbuf, off + plen) == NULL) {
6438 pptps->grev1_state = NULL;
6439 STATE_DEC_COUNTERS(gs);
6440 pool_put(&pf_state_pl, gs);
6441 return;
6442 }
6443 pbuf_copy_back(pbuf, off, plen, &cm);
6444 }
6445
6446 switch (op) {
6447 case PF_PPTP_REMOVE_GRE:
6448 gs->timeout = PFTM_PURGE;
6449 gs->src.state = gs->dst.state = PFGRE1S_NO_TRAFFIC;
6450 gsk->lan.xport.call_id = 0;
6451 gsk->gwy.xport.call_id = 0;
6452 gsk->ext_lan.xport.call_id = 0;
6453 gsk->ext_gwy.xport.call_id = 0;
6454 gs->id = gs->creatorid = 0;
6455 break;
6456
6457 case PF_PPTP_INSERT_GRE:
6458 gs->creation = pf_time_second();
6459 gs->expire = pf_time_second();
6460 gs->timeout = PFTM_TCP_ESTABLISHED;
6461 if (gs->src_node != NULL) {
6462 ++gs->src_node->states;
6463 VERIFY(gs->src_node->states != 0);
6464 }
6465 if (gs->nat_src_node != NULL) {
6466 ++gs->nat_src_node->states;
6467 VERIFY(gs->nat_src_node->states != 0);
6468 }
6469 pf_set_rt_ifp(gs, &sk->lan.addr, sk->af_lan);
6470 if (pf_insert_state(BOUND_IFACE(s->rule.ptr, kif), gs)) {
6471
6472 /*
6473 * <jhw@apple.com>
6474 * FIX ME: insertion can fail when multiple PNS
6475 * behind the same NAT open calls to the same PAC
6476 * simultaneously because spoofed call ID numbers
6477 * are chosen before states are inserted. This is
6478 * hard to fix and happens infrequently enough that
6479 * users will normally try again and this ALG will
6480 * succeed. Failures are expected to be rare enough
6481 * that fixing this is a low priority.
6482 */
6483 pptps->grev1_state = NULL;
6484 pd->lmw = -1; /* Force PF_DROP on PFRES_MEMORY */
6485 pf_src_tree_remove_state(gs);
6486 STATE_DEC_COUNTERS(gs);
6487 pool_put(&pf_state_pl, gs);
6488 DPFPRINTF(PF_DEBUG_URGENT, ("pf_pptp_handler: error "
6489 "inserting GREv1 state.\n"));
6490 }
6491 break;
6492
6493 default:
6494 break;
6495 }
6496 }
6497
6498 static void
6499 pf_pptp_unlink(struct pf_state *s)
6500 {
6501 struct pf_app_state *as = s->state_key->app_state;
6502 struct pf_state *grev1s = as->u.pptp.grev1_state;
6503
6504 if (grev1s) {
6505 struct pf_app_state *gas = grev1s->state_key->app_state;
6506
6507 if (grev1s->timeout < PFTM_MAX)
6508 grev1s->timeout = PFTM_PURGE;
6509 gas->u.grev1.pptp_state = NULL;
6510 as->u.pptp.grev1_state = NULL;
6511 }
6512 }
6513
6514 static void
6515 pf_grev1_unlink(struct pf_state *s)
6516 {
6517 struct pf_app_state *as = s->state_key->app_state;
6518 struct pf_state *pptps = as->u.grev1.pptp_state;
6519
6520 if (pptps) {
6521 struct pf_app_state *pas = pptps->state_key->app_state;
6522
6523 pas->u.pptp.grev1_state = NULL;
6524 as->u.grev1.pptp_state = NULL;
6525 }
6526 }
6527
6528 static int
6529 pf_ike_compare(struct pf_app_state *a, struct pf_app_state *b)
6530 {
6531 int64_t d = a->u.ike.cookie - b->u.ike.cookie;
6532 return ((d > 0) ? 1 : ((d < 0) ? -1 : 0));
6533 }
6534
6535 static int
6536 pf_do_nat64(struct pf_state_key *sk, struct pf_pdesc *pd, pbuf_t *pbuf,
6537 int off)
6538 {
6539 if (pd->af == AF_INET) {
6540 if (pd->af != sk->af_lan) {
6541 pd->ndaddr = sk->lan.addr;
6542 pd->naddr = sk->ext_lan.addr;
6543 } else {
6544 pd->naddr = sk->gwy.addr;
6545 pd->ndaddr = sk->ext_gwy.addr;
6546 }
6547 return (pf_nat64_ipv4(pbuf, off, pd));
6548 }
6549 else if (pd->af == AF_INET6) {
6550 if (pd->af != sk->af_lan) {
6551 pd->ndaddr = sk->lan.addr;
6552 pd->naddr = sk->ext_lan.addr;
6553 } else {
6554 pd->naddr = sk->gwy.addr;
6555 pd->ndaddr = sk->ext_gwy.addr;
6556 }
6557 return (pf_nat64_ipv6(pbuf, off, pd));
6558 }
6559 return (PF_DROP);
6560 }
6561
6562 static int
6563 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
6564 pbuf_t *pbuf, int off, void *h, struct pf_pdesc *pd,
6565 u_short *reason)
6566 {
6567 #pragma unused(h)
6568 struct pf_state_key_cmp key;
6569 struct tcphdr *th = pd->hdr.tcp;
6570 u_int16_t win = ntohs(th->th_win);
6571 u_int32_t ack, end, seq, orig_seq;
6572 u_int8_t sws, dws;
6573 int ackskew;
6574 int copyback = 0;
6575 struct pf_state_peer *src, *dst;
6576 struct pf_state_key *sk;
6577
6578 key.app_state = 0;
6579 key.proto = IPPROTO_TCP;
6580 key.af_lan = key.af_gwy = pd->af;
6581
6582 /*
6583 * For NAT64 the first time rule search and state creation
6584 * is done on the incoming side only.
6585 * Once the state gets created, NAT64's LAN side (ipv6) will
6586 * not be able to find the state in ext-gwy tree as that normally
6587 * is intended to be looked up for incoming traffic from the
6588 * WAN side.
6589 * Therefore to handle NAT64 case we init keys here for both
6590 * lan-ext as well as ext-gwy trees.
6591 * In the state lookup we attempt a lookup on both trees if
6592 * first one does not return any result and return a match if
6593 * the match state's was created by NAT64 rule.
6594 */
6595 PF_ACPY(&key.ext_gwy.addr, pd->src, key.af_gwy);
6596 PF_ACPY(&key.gwy.addr, pd->dst, key.af_gwy);
6597 key.ext_gwy.xport.port = th->th_sport;
6598 key.gwy.xport.port = th->th_dport;
6599
6600 PF_ACPY(&key.lan.addr, pd->src, key.af_lan);
6601 PF_ACPY(&key.ext_lan.addr, pd->dst, key.af_lan);
6602 key.lan.xport.port = th->th_sport;
6603 key.ext_lan.xport.port = th->th_dport;
6604
6605 STATE_LOOKUP();
6606
6607 sk = (*state)->state_key;
6608 /*
6609 * In case of NAT64 the translation is first applied on the LAN
6610 * side. Therefore for stack's address family comparison
6611 * we use sk->af_lan.
6612 */
6613 if ((direction == sk->direction) && (pd->af == sk->af_lan)) {
6614 src = &(*state)->src;
6615 dst = &(*state)->dst;
6616 } else {
6617 src = &(*state)->dst;
6618 dst = &(*state)->src;
6619 }
6620
6621 if (src->state == PF_TCPS_PROXY_SRC) {
6622 if (direction != sk->direction) {
6623 REASON_SET(reason, PFRES_SYNPROXY);
6624 return (PF_SYNPROXY_DROP);
6625 }
6626 if (th->th_flags & TH_SYN) {
6627 if (ntohl(th->th_seq) != src->seqlo) {
6628 REASON_SET(reason, PFRES_SYNPROXY);
6629 return (PF_DROP);
6630 }
6631 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
6632 pd->src, th->th_dport, th->th_sport,
6633 src->seqhi, ntohl(th->th_seq) + 1,
6634 TH_SYN|TH_ACK, 0, src->mss, 0, 1,
6635 0, NULL, NULL);
6636 REASON_SET(reason, PFRES_SYNPROXY);
6637 return (PF_SYNPROXY_DROP);
6638 } else if (!(th->th_flags & TH_ACK) ||
6639 (ntohl(th->th_ack) != src->seqhi + 1) ||
6640 (ntohl(th->th_seq) != src->seqlo + 1)) {
6641 REASON_SET(reason, PFRES_SYNPROXY);
6642 return (PF_DROP);
6643 } else if ((*state)->src_node != NULL &&
6644 pf_src_connlimit(state)) {
6645 REASON_SET(reason, PFRES_SRCLIMIT);
6646 return (PF_DROP);
6647 } else
6648 src->state = PF_TCPS_PROXY_DST;
6649 }
6650 if (src->state == PF_TCPS_PROXY_DST) {
6651 struct pf_state_host *psrc, *pdst;
6652
6653 if (direction == PF_OUT) {
6654 psrc = &sk->gwy;
6655 pdst = &sk->ext_gwy;
6656 } else {
6657 psrc = &sk->ext_lan;
6658 pdst = &sk->lan;
6659 }
6660 if (direction == sk->direction) {
6661 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
6662 (ntohl(th->th_ack) != src->seqhi + 1) ||
6663 (ntohl(th->th_seq) != src->seqlo + 1)) {
6664 REASON_SET(reason, PFRES_SYNPROXY);
6665 return (PF_DROP);
6666 }
6667 src->max_win = MAX(ntohs(th->th_win), 1);
6668 if (dst->seqhi == 1)
6669 dst->seqhi = htonl(random());
6670 pf_send_tcp((*state)->rule.ptr, pd->af, &psrc->addr,
6671 &pdst->addr, psrc->xport.port, pdst->xport.port,
6672 dst->seqhi, 0, TH_SYN, 0,
6673 src->mss, 0, 0, (*state)->tag, NULL, NULL);
6674 REASON_SET(reason, PFRES_SYNPROXY);
6675 return (PF_SYNPROXY_DROP);
6676 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
6677 (TH_SYN|TH_ACK)) ||
6678 (ntohl(th->th_ack) != dst->seqhi + 1)) {
6679 REASON_SET(reason, PFRES_SYNPROXY);
6680 return (PF_DROP);
6681 } else {
6682 dst->max_win = MAX(ntohs(th->th_win), 1);
6683 dst->seqlo = ntohl(th->th_seq);
6684 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
6685 pd->src, th->th_dport, th->th_sport,
6686 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
6687 TH_ACK, src->max_win, 0, 0, 0,
6688 (*state)->tag, NULL, NULL);
6689 pf_send_tcp((*state)->rule.ptr, pd->af, &psrc->addr,
6690 &pdst->addr, psrc->xport.port, pdst->xport.port,
6691 src->seqhi + 1, src->seqlo + 1,
6692 TH_ACK, dst->max_win, 0, 0, 1,
6693 0, NULL, NULL);
6694 src->seqdiff = dst->seqhi -
6695 src->seqlo;
6696 dst->seqdiff = src->seqhi -
6697 dst->seqlo;
6698 src->seqhi = src->seqlo +
6699 dst->max_win;
6700 dst->seqhi = dst->seqlo +
6701 src->max_win;
6702 src->wscale = dst->wscale = 0;
6703 src->state = dst->state =
6704 TCPS_ESTABLISHED;
6705 REASON_SET(reason, PFRES_SYNPROXY);
6706 return (PF_SYNPROXY_DROP);
6707 }
6708 }
6709
6710 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
6711 dst->state >= TCPS_FIN_WAIT_2 &&
6712 src->state >= TCPS_FIN_WAIT_2) {
6713 if (pf_status.debug >= PF_DEBUG_MISC) {
6714 printf("pf: state reuse ");
6715 pf_print_state(*state);
6716 pf_print_flags(th->th_flags);
6717 printf("\n");
6718 }
6719 /* XXX make sure it's the same direction ?? */
6720 src->state = dst->state = TCPS_CLOSED;
6721 pf_unlink_state(*state);
6722 *state = NULL;
6723 return (PF_DROP);
6724 }
6725
6726 if ((th->th_flags & TH_SYN) == 0) {
6727 sws = (src->wscale & PF_WSCALE_FLAG) ?
6728 (src->wscale & PF_WSCALE_MASK) : TCP_MAX_WINSHIFT;
6729 dws = (dst->wscale & PF_WSCALE_FLAG) ?
6730 (dst->wscale & PF_WSCALE_MASK) : TCP_MAX_WINSHIFT;
6731 }
6732 else
6733 sws = dws = 0;
6734
6735 /*
6736 * Sequence tracking algorithm from Guido van Rooij's paper:
6737 * http://www.madison-gurkha.com/publications/tcp_filtering/
6738 * tcp_filtering.ps
6739 */
6740
6741 orig_seq = seq = ntohl(th->th_seq);
6742 if (src->seqlo == 0) {
6743 /* First packet from this end. Set its state */
6744
6745 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
6746 src->scrub == NULL) {
6747 if (pf_normalize_tcp_init(pbuf, off, pd, th, src, dst)) {
6748 REASON_SET(reason, PFRES_MEMORY);
6749 return (PF_DROP);
6750 }
6751 }
6752
6753 /* Deferred generation of sequence number modulator */
6754 if (dst->seqdiff && !src->seqdiff) {
6755 /* use random iss for the TCP server */
6756 while ((src->seqdiff = random() - seq) == 0)
6757 ;
6758 ack = ntohl(th->th_ack) - dst->seqdiff;
6759 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
6760 src->seqdiff), 0);
6761 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
6762 copyback = off + sizeof (*th);
6763 } else {
6764 ack = ntohl(th->th_ack);
6765 }
6766
6767 end = seq + pd->p_len;
6768 if (th->th_flags & TH_SYN) {
6769 end++;
6770 if (dst->wscale & PF_WSCALE_FLAG) {
6771 src->wscale = pf_get_wscale(pbuf, off,
6772 th->th_off, pd->af);
6773 if (src->wscale & PF_WSCALE_FLAG) {
6774 /*
6775 * Remove scale factor from initial
6776 * window
6777 */
6778 sws = src->wscale & PF_WSCALE_MASK;
6779 win = ((u_int32_t)win + (1 << sws) - 1)
6780 >> sws;
6781 dws = dst->wscale & PF_WSCALE_MASK;
6782 } else {
6783 /*
6784 * Window scale negotiation has failed,
6785 * therefore we must restore the window
6786 * scale in the state record that we
6787 * optimistically removed in
6788 * pf_test_rule(). Care is required to
6789 * prevent arithmetic overflow from
6790 * zeroing the window when it's
6791 * truncated down to 16-bits.
6792 */
6793 u_int32_t max_win = dst->max_win;
6794 max_win <<=
6795 dst->wscale & PF_WSCALE_MASK;
6796 dst->max_win = MIN(0xffff, max_win);
6797 /* in case of a retrans SYN|ACK */
6798 dst->wscale = 0;
6799 }
6800 }
6801 }
6802 if (th->th_flags & TH_FIN)
6803 end++;
6804
6805 src->seqlo = seq;
6806 if (src->state < TCPS_SYN_SENT)
6807 src->state = TCPS_SYN_SENT;
6808
6809 /*
6810 * May need to slide the window (seqhi may have been set by
6811 * the crappy stack check or if we picked up the connection
6812 * after establishment)
6813 */
6814 if (src->seqhi == 1 ||
6815 SEQ_GEQ(end + MAX(1, (u_int32_t)dst->max_win << dws),
6816 src->seqhi))
6817 src->seqhi = end + MAX(1, (u_int32_t)dst->max_win << dws);
6818 if (win > src->max_win)
6819 src->max_win = win;
6820
6821 } else {
6822 ack = ntohl(th->th_ack) - dst->seqdiff;
6823 if (src->seqdiff) {
6824 /* Modulate sequence numbers */
6825 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
6826 src->seqdiff), 0);
6827 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
6828 copyback = off+ sizeof (*th);
6829 }
6830 end = seq + pd->p_len;
6831 if (th->th_flags & TH_SYN)
6832 end++;
6833 if (th->th_flags & TH_FIN)
6834 end++;
6835 }
6836
6837 if ((th->th_flags & TH_ACK) == 0) {
6838 /* Let it pass through the ack skew check */
6839 ack = dst->seqlo;
6840 } else if ((ack == 0 &&
6841 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
6842 /* broken tcp stacks do not set ack */
6843 (dst->state < TCPS_SYN_SENT)) {
6844 /*
6845 * Many stacks (ours included) will set the ACK number in an
6846 * FIN|ACK if the SYN times out -- no sequence to ACK.
6847 */
6848 ack = dst->seqlo;
6849 }
6850
6851 if (seq == end) {
6852 /* Ease sequencing restrictions on no data packets */
6853 seq = src->seqlo;
6854 end = seq;
6855 }
6856
6857 ackskew = dst->seqlo - ack;
6858
6859
6860 /*
6861 * Need to demodulate the sequence numbers in any TCP SACK options
6862 * (Selective ACK). We could optionally validate the SACK values
6863 * against the current ACK window, either forwards or backwards, but
6864 * I'm not confident that SACK has been implemented properly
6865 * everywhere. It wouldn't surprise me if several stacks accidently
6866 * SACK too far backwards of previously ACKed data. There really aren't
6867 * any security implications of bad SACKing unless the target stack
6868 * doesn't validate the option length correctly. Someone trying to
6869 * spoof into a TCP connection won't bother blindly sending SACK
6870 * options anyway.
6871 */
6872 if (dst->seqdiff && (th->th_off << 2) > (int)sizeof (struct tcphdr)) {
6873 copyback = pf_modulate_sack(pbuf, off, pd, th, dst);
6874 if (copyback == -1) {
6875 REASON_SET(reason, PFRES_MEMORY);
6876 return (PF_DROP);
6877 }
6878
6879 pbuf = pd->mp; // XXXSCW: Why?
6880 }
6881
6882
6883 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
6884 if (SEQ_GEQ(src->seqhi, end) &&
6885 /* Last octet inside other's window space */
6886 SEQ_GEQ(seq, src->seqlo - ((u_int32_t)dst->max_win << dws)) &&
6887 /* Retrans: not more than one window back */
6888 (ackskew >= -MAXACKWINDOW) &&
6889 /* Acking not more than one reassembled fragment backwards */
6890 (ackskew <= (MAXACKWINDOW << sws)) &&
6891 /* Acking not more than one window forward */
6892 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
6893 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
6894 (pd->flags & PFDESC_IP_REAS) == 0)) {
6895 /* Require an exact/+1 sequence match on resets when possible */
6896
6897 if (dst->scrub || src->scrub) {
6898 if (pf_normalize_tcp_stateful(pbuf, off, pd, reason, th,
6899 *state, src, dst, &copyback))
6900 return (PF_DROP);
6901
6902 pbuf = pd->mp; // XXXSCW: Why?
6903 }
6904
6905 /* update max window */
6906 if (src->max_win < win)
6907 src->max_win = win;
6908 /* synchronize sequencing */
6909 if (SEQ_GT(end, src->seqlo))
6910 src->seqlo = end;
6911 /* slide the window of what the other end can send */
6912 if (SEQ_GEQ(ack + ((u_int32_t)win << sws), dst->seqhi))
6913 dst->seqhi = ack + MAX(((u_int32_t)win << sws), 1);
6914
6915 /* update states */
6916 if (th->th_flags & TH_SYN)
6917 if (src->state < TCPS_SYN_SENT)
6918 src->state = TCPS_SYN_SENT;
6919 if (th->th_flags & TH_FIN)
6920 if (src->state < TCPS_CLOSING)
6921 src->state = TCPS_CLOSING;
6922 if (th->th_flags & TH_ACK) {
6923 if (dst->state == TCPS_SYN_SENT) {
6924 dst->state = TCPS_ESTABLISHED;
6925 if (src->state == TCPS_ESTABLISHED &&
6926 (*state)->src_node != NULL &&
6927 pf_src_connlimit(state)) {
6928 REASON_SET(reason, PFRES_SRCLIMIT);
6929 return (PF_DROP);
6930 }
6931 } else if (dst->state == TCPS_CLOSING)
6932 dst->state = TCPS_FIN_WAIT_2;
6933 }
6934 if (th->th_flags & TH_RST)
6935 src->state = dst->state = TCPS_TIME_WAIT;
6936
6937 /* update expire time */
6938 (*state)->expire = pf_time_second();
6939 if (src->state >= TCPS_FIN_WAIT_2 &&
6940 dst->state >= TCPS_FIN_WAIT_2)
6941 (*state)->timeout = PFTM_TCP_CLOSED;
6942 else if (src->state >= TCPS_CLOSING &&
6943 dst->state >= TCPS_CLOSING)
6944 (*state)->timeout = PFTM_TCP_FIN_WAIT;
6945 else if (src->state < TCPS_ESTABLISHED ||
6946 dst->state < TCPS_ESTABLISHED)
6947 (*state)->timeout = PFTM_TCP_OPENING;
6948 else if (src->state >= TCPS_CLOSING ||
6949 dst->state >= TCPS_CLOSING)
6950 (*state)->timeout = PFTM_TCP_CLOSING;
6951 else
6952 (*state)->timeout = PFTM_TCP_ESTABLISHED;
6953
6954 /* Fall through to PASS packet */
6955
6956 } else if ((dst->state < TCPS_SYN_SENT ||
6957 dst->state >= TCPS_FIN_WAIT_2 || src->state >= TCPS_FIN_WAIT_2) &&
6958 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
6959 /* Within a window forward of the originating packet */
6960 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
6961 /* Within a window backward of the originating packet */
6962
6963 /*
6964 * This currently handles three situations:
6965 * 1) Stupid stacks will shotgun SYNs before their peer
6966 * replies.
6967 * 2) When PF catches an already established stream (the
6968 * firewall rebooted, the state table was flushed, routes
6969 * changed...)
6970 * 3) Packets get funky immediately after the connection
6971 * closes (this should catch Solaris spurious ACK|FINs
6972 * that web servers like to spew after a close)
6973 *
6974 * This must be a little more careful than the above code
6975 * since packet floods will also be caught here. We don't
6976 * update the TTL here to mitigate the damage of a packet
6977 * flood and so the same code can handle awkward establishment
6978 * and a loosened connection close.
6979 * In the establishment case, a correct peer response will
6980 * validate the connection, go through the normal state code
6981 * and keep updating the state TTL.
6982 */
6983
6984 if (pf_status.debug >= PF_DEBUG_MISC) {
6985 printf("pf: loose state match: ");
6986 pf_print_state(*state);
6987 pf_print_flags(th->th_flags);
6988 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
6989 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
6990 pd->p_len, ackskew, (*state)->packets[0],
6991 (*state)->packets[1],
6992 direction == PF_IN ? "in" : "out",
6993 direction == sk->direction ?
6994 "fwd" : "rev");
6995 }
6996
6997 if (dst->scrub || src->scrub) {
6998 if (pf_normalize_tcp_stateful(pbuf, off, pd, reason, th,
6999 *state, src, dst, &copyback))
7000 return (PF_DROP);
7001 pbuf = pd->mp; // XXXSCW: Why?
7002 }
7003
7004 /* update max window */
7005 if (src->max_win < win)
7006 src->max_win = win;
7007 /* synchronize sequencing */
7008 if (SEQ_GT(end, src->seqlo))
7009 src->seqlo = end;
7010 /* slide the window of what the other end can send */
7011 if (SEQ_GEQ(ack + ((u_int32_t)win << sws), dst->seqhi))
7012 dst->seqhi = ack + MAX(((u_int32_t)win << sws), 1);
7013
7014 /*
7015 * Cannot set dst->seqhi here since this could be a shotgunned
7016 * SYN and not an already established connection.
7017 */
7018
7019 if (th->th_flags & TH_FIN)
7020 if (src->state < TCPS_CLOSING)
7021 src->state = TCPS_CLOSING;
7022 if (th->th_flags & TH_RST)
7023 src->state = dst->state = TCPS_TIME_WAIT;
7024
7025 /* Fall through to PASS packet */
7026
7027 } else {
7028 if (dst->state == TCPS_SYN_SENT &&
7029 src->state == TCPS_SYN_SENT) {
7030 /* Send RST for state mismatches during handshake */
7031 if (!(th->th_flags & TH_RST))
7032 pf_send_tcp((*state)->rule.ptr, pd->af,
7033 pd->dst, pd->src, th->th_dport,
7034 th->th_sport, ntohl(th->th_ack), 0,
7035 TH_RST, 0, 0,
7036 (*state)->rule.ptr->return_ttl, 1, 0,
7037 pd->eh, kif->pfik_ifp);
7038 src->seqlo = 0;
7039 src->seqhi = 1;
7040 src->max_win = 1;
7041 } else if (pf_status.debug >= PF_DEBUG_MISC) {
7042 printf("pf: BAD state: ");
7043 pf_print_state(*state);
7044 pf_print_flags(th->th_flags);
7045 printf("\n seq=%u (%u) ack=%u len=%u ackskew=%d "
7046 "sws=%u dws=%u pkts=%llu:%llu dir=%s,%s\n",
7047 seq, orig_seq, ack, pd->p_len, ackskew,
7048 (unsigned int)sws, (unsigned int)dws,
7049 (*state)->packets[0], (*state)->packets[1],
7050 direction == PF_IN ? "in" : "out",
7051 direction == sk->direction ?
7052 "fwd" : "rev");
7053 printf("pf: State failure on: %c %c %c %c | %c %c\n",
7054 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
7055 SEQ_GEQ(seq,
7056 src->seqlo - ((u_int32_t)dst->max_win << dws)) ?
7057 ' ': '2',
7058 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
7059 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
7060 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
7061 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
7062 }
7063 REASON_SET(reason, PFRES_BADSTATE);
7064 return (PF_DROP);
7065 }
7066
7067 /* Any packets which have gotten here are to be passed */
7068
7069 if (sk->app_state &&
7070 sk->app_state->handler) {
7071 sk->app_state->handler(*state, direction,
7072 off + (th->th_off << 2), pd, kif);
7073 if (pd->lmw < 0) {
7074 REASON_SET(reason, PFRES_MEMORY);
7075 return (PF_DROP);
7076 }
7077 pbuf = pd->mp; // XXXSCW: Why?
7078 }
7079
7080 /* translate source/destination address, if necessary */
7081 if (STATE_TRANSLATE(sk)) {
7082 pd->naf = (pd->af == sk->af_lan) ? sk->af_gwy : sk->af_lan;
7083
7084 if (direction == PF_OUT) {
7085 pf_change_ap(direction, pd->mp, pd->src, &th->th_sport,
7086 pd->ip_sum, &th->th_sum, &sk->gwy.addr,
7087 sk->gwy.xport.port, 0, pd->af, pd->naf, 1);
7088 } else {
7089 if (pd->af != pd->naf) {
7090 if (pd->af == sk->af_gwy) {
7091 pf_change_ap(direction, pd->mp, pd->dst,
7092 &th->th_dport, pd->ip_sum,
7093 &th->th_sum, &sk->lan.addr,
7094 sk->lan.xport.port, 0,
7095 pd->af, pd->naf, 0);
7096
7097 pf_change_ap(direction, pd->mp, pd->src,
7098 &th->th_sport, pd->ip_sum,
7099 &th->th_sum, &sk->ext_lan.addr,
7100 th->th_sport, 0, pd->af,
7101 pd->naf, 0);
7102
7103 } else {
7104 pf_change_ap(direction, pd->mp, pd->dst,
7105 &th->th_dport, pd->ip_sum,
7106 &th->th_sum, &sk->ext_gwy.addr,
7107 th->th_dport, 0, pd->af,
7108 pd->naf, 0);
7109
7110 pf_change_ap(direction, pd->mp, pd->src,
7111 &th->th_sport, pd->ip_sum,
7112 &th->th_sum, &sk->gwy.addr,
7113 sk->gwy.xport.port, 0, pd->af,
7114 pd->naf, 0);
7115 }
7116 } else {
7117 pf_change_ap(direction, pd->mp, pd->dst,
7118 &th->th_dport, pd->ip_sum,
7119 &th->th_sum, &sk->lan.addr,
7120 sk->lan.xport.port, 0, pd->af,
7121 pd->naf, 1);
7122 }
7123 }
7124
7125 copyback = off + sizeof (*th);
7126 }
7127
7128 if (copyback) {
7129 if (pf_lazy_makewritable(pd, pbuf, copyback) == NULL) {
7130 REASON_SET(reason, PFRES_MEMORY);
7131 return (PF_DROP);
7132 }
7133
7134 /* Copyback sequence modulation or stateful scrub changes */
7135 pbuf_copy_back(pbuf, off, sizeof (*th), th);
7136
7137 if (sk->af_lan != sk->af_gwy)
7138 return (pf_do_nat64(sk, pd, pbuf, off));
7139 }
7140 return (PF_PASS);
7141 }
7142
7143 static int
7144 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
7145 pbuf_t *pbuf, int off, void *h, struct pf_pdesc *pd, u_short *reason)
7146 {
7147 #pragma unused(h)
7148 struct pf_state_peer *src, *dst;
7149 struct pf_state_key_cmp key;
7150 struct pf_state_key *sk;
7151 struct udphdr *uh = pd->hdr.udp;
7152 struct pf_app_state as;
7153 int action, extfilter;
7154 key.app_state = 0;
7155 key.proto_variant = PF_EXTFILTER_APD;
7156
7157 key.proto = IPPROTO_UDP;
7158 key.af_lan = key.af_gwy = pd->af;
7159
7160 /*
7161 * For NAT64 the first time rule search and state creation
7162 * is done on the incoming side only.
7163 * Once the state gets created, NAT64's LAN side (ipv6) will
7164 * not be able to find the state in ext-gwy tree as that normally
7165 * is intended to be looked up for incoming traffic from the
7166 * WAN side.
7167 * Therefore to handle NAT64 case we init keys here for both
7168 * lan-ext as well as ext-gwy trees.
7169 * In the state lookup we attempt a lookup on both trees if
7170 * first one does not return any result and return a match if
7171 * the match state's was created by NAT64 rule.
7172 */
7173 PF_ACPY(&key.ext_gwy.addr, pd->src, key.af_gwy);
7174 PF_ACPY(&key.gwy.addr, pd->dst, key.af_gwy);
7175 key.ext_gwy.xport.port = uh->uh_sport;
7176 key.gwy.xport.port = uh->uh_dport;
7177
7178 PF_ACPY(&key.lan.addr, pd->src, key.af_lan);
7179 PF_ACPY(&key.ext_lan.addr, pd->dst, key.af_lan);
7180 key.lan.xport.port = uh->uh_sport;
7181 key.ext_lan.xport.port = uh->uh_dport;
7182
7183 if (ntohs(uh->uh_sport) == PF_IKE_PORT &&
7184 ntohs(uh->uh_dport) == PF_IKE_PORT) {
7185 struct pf_ike_hdr ike;
7186 size_t plen = pbuf->pb_packet_len - off - sizeof (*uh);
7187 if (plen < PF_IKE_PACKET_MINSIZE) {
7188 DPFPRINTF(PF_DEBUG_MISC,
7189 ("pf: IKE message too small.\n"));
7190 return (PF_DROP);
7191 }
7192
7193 if (plen > sizeof (ike))
7194 plen = sizeof (ike);
7195 pbuf_copy_data(pbuf, off + sizeof (*uh), plen, &ike);
7196
7197 if (ike.initiator_cookie) {
7198 key.app_state = &as;
7199 as.compare_lan_ext = pf_ike_compare;
7200 as.compare_ext_gwy = pf_ike_compare;
7201 as.u.ike.cookie = ike.initiator_cookie;
7202 } else {
7203 /*
7204 * <http://tools.ietf.org/html/\
7205 * draft-ietf-ipsec-nat-t-ike-01>
7206 * Support non-standard NAT-T implementations that
7207 * push the ESP packet over the top of the IKE packet.
7208 * Do not drop packet.
7209 */
7210 DPFPRINTF(PF_DEBUG_MISC,
7211 ("pf: IKE initiator cookie = 0.\n"));
7212 }
7213 }
7214
7215 *state = pf_find_state(kif, &key, direction);
7216
7217 if (!key.app_state && *state == 0) {
7218 key.proto_variant = PF_EXTFILTER_AD;
7219 *state = pf_find_state(kif, &key, direction);
7220 }
7221
7222 if (!key.app_state && *state == 0) {
7223 key.proto_variant = PF_EXTFILTER_EI;
7224 *state = pf_find_state(kif, &key, direction);
7225 }
7226
7227 /* similar to STATE_LOOKUP() */
7228 if (*state != NULL && pd != NULL && !(pd->pktflags & PKTF_FLOW_ID)) {
7229 pd->flowsrc = (*state)->state_key->flowsrc;
7230 pd->flowhash = (*state)->state_key->flowhash;
7231 if (pd->flowhash != 0) {
7232 pd->pktflags |= PKTF_FLOW_ID;
7233 pd->pktflags &= ~PKTF_FLOW_ADV;
7234 }
7235 }
7236
7237 if (pf_state_lookup_aux(state, kif, direction, &action))
7238 return (action);
7239
7240 sk = (*state)->state_key;
7241
7242 /*
7243 * In case of NAT64 the translation is first applied on the LAN
7244 * side. Therefore for stack's address family comparison
7245 * we use sk->af_lan.
7246 */
7247 if ((direction == sk->direction) && (pd->af == sk->af_lan)) {
7248 src = &(*state)->src;
7249 dst = &(*state)->dst;
7250 } else {
7251 src = &(*state)->dst;
7252 dst = &(*state)->src;
7253 }
7254
7255 /* update states */
7256 if (src->state < PFUDPS_SINGLE)
7257 src->state = PFUDPS_SINGLE;
7258 if (dst->state == PFUDPS_SINGLE)
7259 dst->state = PFUDPS_MULTIPLE;
7260
7261 /* update expire time */
7262 (*state)->expire = pf_time_second();
7263 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
7264 (*state)->timeout = PFTM_UDP_MULTIPLE;
7265 else
7266 (*state)->timeout = PFTM_UDP_SINGLE;
7267
7268 extfilter = sk->proto_variant;
7269 if (extfilter > PF_EXTFILTER_APD) {
7270 if (direction == PF_OUT) {
7271 sk->ext_lan.xport.port = key.ext_lan.xport.port;
7272 if (extfilter > PF_EXTFILTER_AD)
7273 PF_ACPY(&sk->ext_lan.addr, &key.ext_lan.addr,
7274 key.af_lan);
7275 } else {
7276 sk->ext_gwy.xport.port = key.ext_gwy.xport.port;
7277 if (extfilter > PF_EXTFILTER_AD)
7278 PF_ACPY(&sk->ext_gwy.addr, &key.ext_gwy.addr,
7279 key.af_gwy);
7280 }
7281 }
7282
7283 if (sk->app_state && sk->app_state->handler) {
7284 sk->app_state->handler(*state, direction, off + uh->uh_ulen,
7285 pd, kif);
7286 if (pd->lmw < 0) {
7287 REASON_SET(reason, PFRES_MEMORY);
7288 return (PF_DROP);
7289 }
7290 pbuf = pd->mp; // XXXSCW: Why?
7291 }
7292
7293 /* translate source/destination address, if necessary */
7294 if (STATE_TRANSLATE(sk)) {
7295 if (pf_lazy_makewritable(pd, pbuf, off + sizeof (*uh)) == NULL) {
7296 REASON_SET(reason, PFRES_MEMORY);
7297 return (PF_DROP);
7298 }
7299
7300 pd->naf = (pd->af == sk->af_lan) ? sk->af_gwy : sk->af_lan;
7301
7302 if (direction == PF_OUT) {
7303 pf_change_ap(direction, pd->mp, pd->src, &uh->uh_sport,
7304 pd->ip_sum, &uh->uh_sum, &sk->gwy.addr,
7305 sk->gwy.xport.port, 1, pd->af, pd->naf, 1);
7306 } else {
7307 if (pd->af != pd->naf) {
7308
7309 if (pd->af == sk->af_gwy) {
7310 pf_change_ap(direction, pd->mp, pd->dst,
7311 &uh->uh_dport, pd->ip_sum,
7312 &uh->uh_sum, &sk->lan.addr,
7313 sk->lan.xport.port, 1,
7314 pd->af, pd->naf, 0);
7315
7316 pf_change_ap(direction, pd->mp, pd->src,
7317 &uh->uh_sport, pd->ip_sum,
7318 &uh->uh_sum, &sk->ext_lan.addr,
7319 uh->uh_sport, 1, pd->af,
7320 pd->naf, 0);
7321
7322 } else {
7323 pf_change_ap(direction, pd->mp, pd->dst,
7324 &uh->uh_dport, pd->ip_sum,
7325 &uh->uh_sum, &sk->ext_gwy.addr,
7326 uh->uh_dport, 1, pd->af,
7327 pd->naf, 0);
7328
7329 pf_change_ap(direction, pd->mp, pd->src,
7330 &uh->uh_sport, pd->ip_sum,
7331 &uh->uh_sum, &sk->gwy.addr,
7332 sk->gwy.xport.port, 1, pd->af,
7333 pd->naf, 0);
7334 }
7335 } else {
7336 pf_change_ap(direction, pd->mp, pd->dst,
7337 &uh->uh_dport, pd->ip_sum,
7338 &uh->uh_sum, &sk->lan.addr,
7339 sk->lan.xport.port, 1,
7340 pd->af, pd->naf, 1);
7341 }
7342 }
7343
7344 pbuf_copy_back(pbuf, off, sizeof (*uh), uh);
7345 if (sk->af_lan != sk->af_gwy)
7346 return (pf_do_nat64(sk, pd, pbuf, off));
7347 }
7348 return (PF_PASS);
7349 }
7350
7351 static int
7352 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
7353 pbuf_t *pbuf, int off, void *h, struct pf_pdesc *pd, u_short *reason)
7354 {
7355 #pragma unused(h)
7356 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
7357 struct in_addr srcv4_inaddr = saddr->v4addr;
7358 u_int16_t icmpid = 0, *icmpsum = NULL;
7359 u_int8_t icmptype = 0;
7360 int state_icmp = 0;
7361 struct pf_state_key_cmp key;
7362 struct pf_state_key *sk;
7363
7364 struct pf_app_state as;
7365 key.app_state = 0;
7366
7367 pd->off = off;
7368
7369 switch (pd->proto) {
7370 #if INET
7371 case IPPROTO_ICMP:
7372 icmptype = pd->hdr.icmp->icmp_type;
7373 icmpid = pd->hdr.icmp->icmp_id;
7374 icmpsum = &pd->hdr.icmp->icmp_cksum;
7375
7376 if (icmptype == ICMP_UNREACH ||
7377 icmptype == ICMP_SOURCEQUENCH ||
7378 icmptype == ICMP_REDIRECT ||
7379 icmptype == ICMP_TIMXCEED ||
7380 icmptype == ICMP_PARAMPROB)
7381 state_icmp++;
7382 break;
7383 #endif /* INET */
7384 #if INET6
7385 case IPPROTO_ICMPV6:
7386 icmptype = pd->hdr.icmp6->icmp6_type;
7387 icmpid = pd->hdr.icmp6->icmp6_id;
7388 icmpsum = &pd->hdr.icmp6->icmp6_cksum;
7389
7390 if (icmptype == ICMP6_DST_UNREACH ||
7391 icmptype == ICMP6_PACKET_TOO_BIG ||
7392 icmptype == ICMP6_TIME_EXCEEDED ||
7393 icmptype == ICMP6_PARAM_PROB)
7394 state_icmp++;
7395 break;
7396 #endif /* INET6 */
7397 }
7398
7399 if (!state_icmp) {
7400
7401 /*
7402 * ICMP query/reply message not related to a TCP/UDP packet.
7403 * Search for an ICMP state.
7404 */
7405 /*
7406 * NAT64 requires protocol translation between ICMPv4
7407 * and ICMPv6. TCP and UDP do not require protocol
7408 * translation. To avoid adding complexity just to
7409 * handle ICMP(v4addr/v6addr), we always lookup for
7410 * proto = IPPROTO_ICMP on both LAN and WAN side
7411 */
7412 key.proto = IPPROTO_ICMP;
7413 key.af_lan = key.af_gwy = pd->af;
7414
7415 PF_ACPY(&key.ext_gwy.addr, pd->src, key.af_gwy);
7416 PF_ACPY(&key.gwy.addr, pd->dst, key.af_gwy);
7417 key.ext_gwy.xport.port = 0;
7418 key.gwy.xport.port = icmpid;
7419
7420 PF_ACPY(&key.lan.addr, pd->src, key.af_lan);
7421 PF_ACPY(&key.ext_lan.addr, pd->dst, key.af_lan);
7422 key.lan.xport.port = icmpid;
7423 key.ext_lan.xport.port = 0;
7424
7425 STATE_LOOKUP();
7426
7427 sk = (*state)->state_key;
7428 (*state)->expire = pf_time_second();
7429 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
7430
7431 /* translate source/destination address, if necessary */
7432 if (STATE_TRANSLATE(sk)) {
7433 pd->naf = (pd->af == sk->af_lan) ?
7434 sk->af_gwy : sk->af_lan;
7435 if (direction == PF_OUT) {
7436 switch (pd->af) {
7437 #if INET
7438 case AF_INET:
7439 pf_change_a(&saddr->v4addr.s_addr,
7440 pd->ip_sum,
7441 sk->gwy.addr.v4addr.s_addr, 0);
7442 pd->hdr.icmp->icmp_cksum =
7443 pf_cksum_fixup(
7444 pd->hdr.icmp->icmp_cksum, icmpid,
7445 sk->gwy.xport.port, 0);
7446 pd->hdr.icmp->icmp_id =
7447 sk->gwy.xport.port;
7448 if (pf_lazy_makewritable(pd, pbuf,
7449 off + ICMP_MINLEN) == NULL)
7450 return (PF_DROP);
7451 pbuf_copy_back(pbuf, off, ICMP_MINLEN,
7452 pd->hdr.icmp);
7453 break;
7454 #endif /* INET */
7455 #if INET6
7456 case AF_INET6:
7457 pf_change_a6(saddr,
7458 &pd->hdr.icmp6->icmp6_cksum,
7459 &sk->gwy.addr, 0);
7460 if (pf_lazy_makewritable(pd, NULL,
7461 off + sizeof (struct icmp6_hdr)) ==
7462 NULL)
7463 return (PF_DROP);
7464 pbuf_copy_back(pbuf, off,
7465 sizeof (struct icmp6_hdr),
7466 pd->hdr.icmp6);
7467 break;
7468 #endif /* INET6 */
7469 }
7470 } else {
7471 switch (pd->af) {
7472 #if INET
7473 case AF_INET:
7474 if (pd->naf != AF_INET) {
7475 if (pf_translate_icmp_af(
7476 AF_INET6, pd->hdr.icmp))
7477 return (PF_DROP);
7478
7479 pd->proto = IPPROTO_ICMPV6;
7480
7481 } else {
7482
7483 pf_change_a(&daddr->v4addr.s_addr,
7484 pd->ip_sum,
7485 sk->lan.addr.v4addr.s_addr, 0);
7486
7487 pd->hdr.icmp->icmp_cksum =
7488 pf_cksum_fixup(
7489 pd->hdr.icmp->icmp_cksum,
7490 icmpid, sk->lan.xport.port, 0);
7491
7492 pd->hdr.icmp->icmp_id =
7493 sk->lan.xport.port;
7494 }
7495
7496 if (pf_lazy_makewritable(pd, pbuf,
7497 off + ICMP_MINLEN) == NULL)
7498 return (PF_DROP);
7499 pbuf_copy_back(pbuf, off, ICMP_MINLEN,
7500 pd->hdr.icmp);
7501 if (sk->af_lan != sk->af_gwy)
7502 return (pf_do_nat64(sk, pd,
7503 pbuf, off));
7504 break;
7505 #endif /* INET */
7506 #if INET6
7507 case AF_INET6:
7508 if (pd->naf != AF_INET6) {
7509 if (pf_translate_icmp_af(
7510 AF_INET, pd->hdr.icmp6))
7511 return (PF_DROP);
7512
7513 pd->proto = IPPROTO_ICMP;
7514 } else {
7515 pf_change_a6(daddr,
7516 &pd->hdr.icmp6->icmp6_cksum,
7517 &sk->lan.addr, 0);
7518 }
7519 if (pf_lazy_makewritable(pd, pbuf,
7520 off + sizeof (struct icmp6_hdr)) ==
7521 NULL)
7522 return (PF_DROP);
7523 pbuf_copy_back(pbuf, off,
7524 sizeof (struct icmp6_hdr),
7525 pd->hdr.icmp6);
7526 if (sk->af_lan != sk->af_gwy)
7527 return (pf_do_nat64(sk, pd,
7528 pbuf, off));
7529 break;
7530 #endif /* INET6 */
7531 }
7532 }
7533 }
7534
7535 return (PF_PASS);
7536
7537 } else {
7538 /*
7539 * ICMP error message in response to a TCP/UDP packet.
7540 * Extract the inner TCP/UDP header and search for that state.
7541 */
7542 struct pf_pdesc pd2; /* For inner (original) header */
7543 #if INET
7544 struct ip h2;
7545 #endif /* INET */
7546 #if INET6
7547 struct ip6_hdr h2_6;
7548 int terminal = 0;
7549 #endif /* INET6 */
7550 int ipoff2 = 0;
7551 int off2 = 0;
7552
7553 memset(&pd2, 0, sizeof (pd2));
7554
7555 pd2.af = pd->af;
7556 switch (pd->af) {
7557 #if INET
7558 case AF_INET:
7559 /* offset of h2 in mbuf chain */
7560 ipoff2 = off + ICMP_MINLEN;
7561
7562 if (!pf_pull_hdr(pbuf, ipoff2, &h2, sizeof (h2),
7563 NULL, reason, pd2.af)) {
7564 DPFPRINTF(PF_DEBUG_MISC,
7565 ("pf: ICMP error message too short "
7566 "(ip)\n"));
7567 return (PF_DROP);
7568 }
7569 /*
7570 * ICMP error messages don't refer to non-first
7571 * fragments
7572 */
7573 if (h2.ip_off & htons(IP_OFFMASK)) {
7574 REASON_SET(reason, PFRES_FRAG);
7575 return (PF_DROP);
7576 }
7577
7578 /* offset of protocol header that follows h2 */
7579 off2 = ipoff2 + (h2.ip_hl << 2);
7580 /* TODO */
7581 pd2.off = ipoff2 + (h2.ip_hl << 2);
7582
7583 pd2.proto = h2.ip_p;
7584 pd2.src = (struct pf_addr *)&h2.ip_src;
7585 pd2.dst = (struct pf_addr *)&h2.ip_dst;
7586 pd2.ip_sum = &h2.ip_sum;
7587 break;
7588 #endif /* INET */
7589 #if INET6
7590 case AF_INET6:
7591 ipoff2 = off + sizeof (struct icmp6_hdr);
7592
7593 if (!pf_pull_hdr(pbuf, ipoff2, &h2_6, sizeof (h2_6),
7594 NULL, reason, pd2.af)) {
7595 DPFPRINTF(PF_DEBUG_MISC,
7596 ("pf: ICMP error message too short "
7597 "(ip6)\n"));
7598 return (PF_DROP);
7599 }
7600 pd2.proto = h2_6.ip6_nxt;
7601 pd2.src = (struct pf_addr *)(uintptr_t)&h2_6.ip6_src;
7602 pd2.dst = (struct pf_addr *)(uintptr_t)&h2_6.ip6_dst;
7603 pd2.ip_sum = NULL;
7604 off2 = ipoff2 + sizeof (h2_6);
7605 do {
7606 switch (pd2.proto) {
7607 case IPPROTO_FRAGMENT:
7608 /*
7609 * ICMPv6 error messages for
7610 * non-first fragments
7611 */
7612 REASON_SET(reason, PFRES_FRAG);
7613 return (PF_DROP);
7614 case IPPROTO_AH:
7615 case IPPROTO_HOPOPTS:
7616 case IPPROTO_ROUTING:
7617 case IPPROTO_DSTOPTS: {
7618 /* get next header and header length */
7619 struct ip6_ext opt6;
7620
7621 if (!pf_pull_hdr(pbuf, off2, &opt6,
7622 sizeof (opt6), NULL, reason,
7623 pd2.af)) {
7624 DPFPRINTF(PF_DEBUG_MISC,
7625 ("pf: ICMPv6 short opt\n"));
7626 return (PF_DROP);
7627 }
7628 if (pd2.proto == IPPROTO_AH)
7629 off2 += (opt6.ip6e_len + 2) * 4;
7630 else
7631 off2 += (opt6.ip6e_len + 1) * 8;
7632 pd2.proto = opt6.ip6e_nxt;
7633 /* goto the next header */
7634 break;
7635 }
7636 default:
7637 terminal++;
7638 break;
7639 }
7640 } while (!terminal);
7641 /* TODO */
7642 pd2.off = ipoff2;
7643 break;
7644 #endif /* INET6 */
7645 }
7646
7647 switch (pd2.proto) {
7648 case IPPROTO_TCP: {
7649 struct tcphdr th;
7650 u_int32_t seq;
7651 struct pf_state_peer *src, *dst;
7652 u_int8_t dws;
7653 int copyback = 0;
7654
7655 /*
7656 * Only the first 8 bytes of the TCP header can be
7657 * expected. Don't access any TCP header fields after
7658 * th_seq, an ackskew test is not possible.
7659 */
7660 if (!pf_pull_hdr(pbuf, off2, &th, 8, NULL, reason,
7661 pd2.af)) {
7662 DPFPRINTF(PF_DEBUG_MISC,
7663 ("pf: ICMP error message too short "
7664 "(tcp)\n"));
7665 return (PF_DROP);
7666 }
7667
7668 key.proto = IPPROTO_TCP;
7669 key.af_gwy = pd2.af;
7670 PF_ACPY(&key.ext_gwy.addr, pd2.dst, key.af_gwy);
7671 PF_ACPY(&key.gwy.addr, pd2.src, key.af_gwy);
7672 key.ext_gwy.xport.port = th.th_dport;
7673 key.gwy.xport.port = th.th_sport;
7674
7675 key.af_lan = pd2.af;
7676 PF_ACPY(&key.lan.addr, pd2.dst, key.af_lan);
7677 PF_ACPY(&key.ext_lan.addr, pd2.src, key.af_lan);
7678 key.lan.xport.port = th.th_dport;
7679 key.ext_lan.xport.port = th.th_sport;
7680
7681 STATE_LOOKUP();
7682
7683 sk = (*state)->state_key;
7684 if ((direction == sk->direction) &&
7685 ((sk->af_lan == sk->af_gwy) ||
7686 (pd2.af == sk->af_lan))) {
7687 src = &(*state)->dst;
7688 dst = &(*state)->src;
7689 } else {
7690 src = &(*state)->src;
7691 dst = &(*state)->dst;
7692 }
7693
7694 if (src->wscale && (dst->wscale & PF_WSCALE_FLAG))
7695 dws = dst->wscale & PF_WSCALE_MASK;
7696 else
7697 dws = TCP_MAX_WINSHIFT;
7698
7699 /* Demodulate sequence number */
7700 seq = ntohl(th.th_seq) - src->seqdiff;
7701 if (src->seqdiff) {
7702 pf_change_a(&th.th_seq, icmpsum,
7703 htonl(seq), 0);
7704 copyback = 1;
7705 }
7706
7707 if (!SEQ_GEQ(src->seqhi, seq) ||
7708 !SEQ_GEQ(seq,
7709 src->seqlo - ((u_int32_t)dst->max_win << dws))) {
7710 if (pf_status.debug >= PF_DEBUG_MISC) {
7711 printf("pf: BAD ICMP %d:%d ",
7712 icmptype, pd->hdr.icmp->icmp_code);
7713 pf_print_host(pd->src, 0, pd->af);
7714 printf(" -> ");
7715 pf_print_host(pd->dst, 0, pd->af);
7716 printf(" state: ");
7717 pf_print_state(*state);
7718 printf(" seq=%u\n", seq);
7719 }
7720 REASON_SET(reason, PFRES_BADSTATE);
7721 return (PF_DROP);
7722 }
7723
7724 pd->naf = pd2.naf = (pd2.af == sk->af_lan) ?
7725 sk->af_gwy : sk->af_lan;
7726
7727 if (STATE_TRANSLATE(sk)) {
7728 /* NAT64 case */
7729 if (sk->af_lan != sk->af_gwy) {
7730 struct pf_state_host *saddr2, *daddr2;
7731
7732 if (pd2.naf == sk->af_lan) {
7733 saddr2 = &sk->lan;
7734 daddr2 = &sk->ext_lan;
7735 } else {
7736 saddr2 = &sk->ext_gwy;
7737 daddr2 = &sk->gwy;
7738 }
7739
7740 /* translate ICMP message types and codes */
7741 if (pf_translate_icmp_af(pd->naf,
7742 pd->hdr.icmp))
7743 return (PF_DROP);
7744
7745 if (pf_lazy_makewritable(pd, pbuf,
7746 off2 + 8) == NULL)
7747 return (PF_DROP);
7748
7749 pbuf_copy_back(pbuf, pd->off,
7750 sizeof(struct icmp6_hdr),
7751 pd->hdr.icmp6);
7752
7753 /*
7754 * translate inner ip header within the
7755 * ICMP message
7756 */
7757 if (pf_change_icmp_af(pbuf, ipoff2, pd,
7758 &pd2, &saddr2->addr, &daddr2->addr,
7759 pd->af, pd->naf))
7760 return (PF_DROP);
7761
7762 if (pd->naf == AF_INET)
7763 pd->proto = IPPROTO_ICMP;
7764 else
7765 pd->proto = IPPROTO_ICMPV6;
7766
7767 /*
7768 * translate inner tcp header within
7769 * the ICMP message
7770 */
7771 pf_change_ap(direction, NULL, pd2.src,
7772 &th.th_sport, pd2.ip_sum,
7773 &th.th_sum, &daddr2->addr,
7774 saddr2->xport.port, 0, pd2.af,
7775 pd2.naf, 0);
7776
7777 pf_change_ap(direction, NULL, pd2.dst,
7778 &th.th_dport, pd2.ip_sum,
7779 &th.th_sum, &saddr2->addr,
7780 daddr2->xport.port, 0, pd2.af,
7781 pd2.naf, 0);
7782
7783 pbuf_copy_back(pbuf, pd2.off, 8, &th);
7784
7785 /* translate outer ip header */
7786 PF_ACPY(&pd->naddr, &daddr2->addr,
7787 pd->naf);
7788 PF_ACPY(&pd->ndaddr, &saddr2->addr,
7789 pd->naf);
7790 if (pd->af == AF_INET) {
7791 memcpy(&pd->naddr.addr32[3],
7792 &srcv4_inaddr,
7793 sizeof(pd->naddr.addr32[3]));
7794 return (pf_nat64_ipv4(pbuf, off,
7795 pd));
7796 } else {
7797 return (pf_nat64_ipv6(pbuf, off,
7798 pd));
7799 }
7800 }
7801 if (direction == PF_IN) {
7802 pf_change_icmp(pd2.src, &th.th_sport,
7803 daddr, &sk->lan.addr,
7804 sk->lan.xport.port, NULL,
7805 pd2.ip_sum, icmpsum,
7806 pd->ip_sum, 0, pd2.af);
7807 } else {
7808 pf_change_icmp(pd2.dst, &th.th_dport,
7809 saddr, &sk->gwy.addr,
7810 sk->gwy.xport.port, NULL,
7811 pd2.ip_sum, icmpsum,
7812 pd->ip_sum, 0, pd2.af);
7813 }
7814 copyback = 1;
7815 }
7816
7817 if (copyback) {
7818 if (pf_lazy_makewritable(pd, pbuf, off2 + 8) ==
7819 NULL)
7820 return (PF_DROP);
7821 switch (pd2.af) {
7822 #if INET
7823 case AF_INET:
7824 pbuf_copy_back(pbuf, off, ICMP_MINLEN,
7825 pd->hdr.icmp);
7826 pbuf_copy_back(pbuf, ipoff2, sizeof(h2),
7827 &h2);
7828 break;
7829 #endif /* INET */
7830 #if INET6
7831 case AF_INET6:
7832 pbuf_copy_back(pbuf, off,
7833 sizeof (struct icmp6_hdr),
7834 pd->hdr.icmp6);
7835 pbuf_copy_back(pbuf, ipoff2,
7836 sizeof (h2_6), &h2_6);
7837 break;
7838 #endif /* INET6 */
7839 }
7840 pbuf_copy_back(pbuf, off2, 8, &th);
7841 }
7842
7843 return (PF_PASS);
7844 }
7845 case IPPROTO_UDP: {
7846 struct udphdr uh;
7847 int dx, action;
7848 if (!pf_pull_hdr(pbuf, off2, &uh, sizeof (uh),
7849 NULL, reason, pd2.af)) {
7850 DPFPRINTF(PF_DEBUG_MISC,
7851 ("pf: ICMP error message too short "
7852 "(udp)\n"));
7853 return (PF_DROP);
7854 }
7855
7856 key.af_gwy = pd2.af;
7857 PF_ACPY(&key.ext_gwy.addr, pd2.dst, key.af_gwy);
7858 PF_ACPY(&key.gwy.addr, pd2.src, key.af_gwy);
7859 key.ext_gwy.xport.port = uh.uh_dport;
7860 key.gwy.xport.port = uh.uh_sport;
7861
7862 key.af_lan = pd2.af;
7863 PF_ACPY(&key.lan.addr, pd2.dst, key.af_lan);
7864 PF_ACPY(&key.ext_lan.addr, pd2.src, key.af_lan);
7865 key.lan.xport.port = uh.uh_dport;
7866 key.ext_lan.xport.port = uh.uh_sport;
7867
7868 key.proto = IPPROTO_UDP;
7869 key.proto_variant = PF_EXTFILTER_APD;
7870 dx = direction;
7871
7872 if (ntohs(uh.uh_sport) == PF_IKE_PORT &&
7873 ntohs(uh.uh_dport) == PF_IKE_PORT) {
7874 struct pf_ike_hdr ike;
7875 size_t plen = pbuf->pb_packet_len - off2 -
7876 sizeof (uh);
7877 if (direction == PF_IN &&
7878 plen < 8 /* PF_IKE_PACKET_MINSIZE */) {
7879 DPFPRINTF(PF_DEBUG_MISC, ("pf: "
7880 "ICMP error, embedded IKE message "
7881 "too small.\n"));
7882 return (PF_DROP);
7883 }
7884
7885 if (plen > sizeof (ike))
7886 plen = sizeof (ike);
7887 pbuf_copy_data(pbuf, off + sizeof (uh), plen,
7888 &ike);
7889
7890 key.app_state = &as;
7891 as.compare_lan_ext = pf_ike_compare;
7892 as.compare_ext_gwy = pf_ike_compare;
7893 as.u.ike.cookie = ike.initiator_cookie;
7894 }
7895
7896 *state = pf_find_state(kif, &key, dx);
7897
7898 if (key.app_state && *state == 0) {
7899 key.app_state = 0;
7900 *state = pf_find_state(kif, &key, dx);
7901 }
7902
7903 if (*state == 0) {
7904 key.proto_variant = PF_EXTFILTER_AD;
7905 *state = pf_find_state(kif, &key, dx);
7906 }
7907
7908 if (*state == 0) {
7909 key.proto_variant = PF_EXTFILTER_EI;
7910 *state = pf_find_state(kif, &key, dx);
7911 }
7912
7913 /* similar to STATE_LOOKUP() */
7914 if (*state != NULL && pd != NULL &&
7915 !(pd->pktflags & PKTF_FLOW_ID)) {
7916 pd->flowsrc = (*state)->state_key->flowsrc;
7917 pd->flowhash = (*state)->state_key->flowhash;
7918 if (pd->flowhash != 0) {
7919 pd->pktflags |= PKTF_FLOW_ID;
7920 pd->pktflags &= ~PKTF_FLOW_ADV;
7921 }
7922 }
7923
7924 if (pf_state_lookup_aux(state, kif, direction, &action))
7925 return (action);
7926
7927 sk = (*state)->state_key;
7928 pd->naf = pd2.naf = (pd2.af == sk->af_lan) ?
7929 sk->af_gwy : sk->af_lan;
7930
7931 if (STATE_TRANSLATE(sk)) {
7932 /* NAT64 case */
7933 if (sk->af_lan != sk->af_gwy) {
7934 struct pf_state_host *saddr2, *daddr2;
7935
7936 if (pd2.naf == sk->af_lan) {
7937 saddr2 = &sk->lan;
7938 daddr2 = &sk->ext_lan;
7939 } else {
7940 saddr2 = &sk->ext_gwy;
7941 daddr2 = &sk->gwy;
7942 }
7943
7944 /* translate ICMP message */
7945 if (pf_translate_icmp_af(pd->naf,
7946 pd->hdr.icmp))
7947 return (PF_DROP);
7948 if (pf_lazy_makewritable(pd, pbuf,
7949 off2 + 8) == NULL)
7950 return (PF_DROP);
7951
7952 pbuf_copy_back(pbuf, pd->off,
7953 sizeof(struct icmp6_hdr),
7954 pd->hdr.icmp6);
7955
7956 /*
7957 * translate inner ip header within the
7958 * ICMP message
7959 */
7960 if (pf_change_icmp_af(pbuf, ipoff2, pd,
7961 &pd2, &saddr2->addr, &daddr2->addr,
7962 pd->af, pd->naf))
7963 return (PF_DROP);
7964
7965 if (pd->naf == AF_INET)
7966 pd->proto = IPPROTO_ICMP;
7967 else
7968 pd->proto = IPPROTO_ICMPV6;
7969
7970 /*
7971 * translate inner udp header within
7972 * the ICMP message
7973 */
7974 pf_change_ap(direction, NULL, pd2.src,
7975 &uh.uh_sport, pd2.ip_sum,
7976 &uh.uh_sum, &daddr2->addr,
7977 saddr2->xport.port, 0, pd2.af,
7978 pd2.naf, 0);
7979
7980 pf_change_ap(direction, NULL, pd2.dst,
7981 &uh.uh_dport, pd2.ip_sum,
7982 &uh.uh_sum, &saddr2->addr,
7983 daddr2->xport.port, 0, pd2.af,
7984 pd2.naf, 0);
7985
7986 pbuf_copy_back(pbuf, pd2.off,
7987 sizeof(uh), &uh);
7988
7989 /* translate outer ip header */
7990 PF_ACPY(&pd->naddr, &daddr2->addr,
7991 pd->naf);
7992 PF_ACPY(&pd->ndaddr, &saddr2->addr,
7993 pd->naf);
7994 if (pd->af == AF_INET) {
7995 memcpy(&pd->naddr.addr32[3],
7996 &srcv4_inaddr,
7997 sizeof(pd->naddr.addr32[3]));
7998 return (pf_nat64_ipv4(pbuf, off,
7999 pd));
8000 } else {
8001 return (pf_nat64_ipv6(pbuf, off,
8002 pd));
8003 }
8004 }
8005 if (direction == PF_IN) {
8006 pf_change_icmp(pd2.src, &uh.uh_sport,
8007 daddr, &sk->lan.addr,
8008 sk->lan.xport.port, &uh.uh_sum,
8009 pd2.ip_sum, icmpsum,
8010 pd->ip_sum, 1, pd2.af);
8011 } else {
8012 pf_change_icmp(pd2.dst, &uh.uh_dport,
8013 saddr, &sk->gwy.addr,
8014 sk->gwy.xport.port, &uh.uh_sum,
8015 pd2.ip_sum, icmpsum,
8016 pd->ip_sum, 1, pd2.af);
8017 }
8018 if (pf_lazy_makewritable(pd, pbuf,
8019 off2 + sizeof (uh)) == NULL)
8020 return (PF_DROP);
8021 switch (pd2.af) {
8022 #if INET
8023 case AF_INET:
8024 pbuf_copy_back(pbuf, off, ICMP_MINLEN,
8025 pd->hdr.icmp);
8026 pbuf_copy_back(pbuf, ipoff2,
8027 sizeof (h2), &h2);
8028 break;
8029 #endif /* INET */
8030 #if INET6
8031 case AF_INET6:
8032 pbuf_copy_back(pbuf, off,
8033 sizeof (struct icmp6_hdr),
8034 pd->hdr.icmp6);
8035 pbuf_copy_back(pbuf, ipoff2,
8036 sizeof (h2_6), &h2_6);
8037 break;
8038 #endif /* INET6 */
8039 }
8040 pbuf_copy_back(pbuf, off2, sizeof (uh), &uh);
8041 }
8042
8043 return (PF_PASS);
8044 }
8045 #if INET
8046 case IPPROTO_ICMP: {
8047 struct icmp iih;
8048
8049 if (!pf_pull_hdr(pbuf, off2, &iih, ICMP_MINLEN,
8050 NULL, reason, pd2.af)) {
8051 DPFPRINTF(PF_DEBUG_MISC,
8052 ("pf: ICMP error message too short i"
8053 "(icmp)\n"));
8054 return (PF_DROP);
8055 }
8056
8057 key.proto = IPPROTO_ICMP;
8058 if (direction == PF_IN) {
8059 key.af_gwy = pd2.af;
8060 PF_ACPY(&key.ext_gwy.addr, pd2.dst, key.af_gwy);
8061 PF_ACPY(&key.gwy.addr, pd2.src, key.af_gwy);
8062 key.ext_gwy.xport.port = 0;
8063 key.gwy.xport.port = iih.icmp_id;
8064 } else {
8065 key.af_lan = pd2.af;
8066 PF_ACPY(&key.lan.addr, pd2.dst, key.af_lan);
8067 PF_ACPY(&key.ext_lan.addr, pd2.src, key.af_lan);
8068 key.lan.xport.port = iih.icmp_id;
8069 key.ext_lan.xport.port = 0;
8070 }
8071
8072 STATE_LOOKUP();
8073
8074 sk = (*state)->state_key;
8075 if (STATE_TRANSLATE(sk)) {
8076 if (direction == PF_IN) {
8077 pf_change_icmp(pd2.src, &iih.icmp_id,
8078 daddr, &sk->lan.addr,
8079 sk->lan.xport.port, NULL,
8080 pd2.ip_sum, icmpsum,
8081 pd->ip_sum, 0, AF_INET);
8082 } else {
8083 pf_change_icmp(pd2.dst, &iih.icmp_id,
8084 saddr, &sk->gwy.addr,
8085 sk->gwy.xport.port, NULL,
8086 pd2.ip_sum, icmpsum,
8087 pd->ip_sum, 0, AF_INET);
8088 }
8089 if (pf_lazy_makewritable(pd, pbuf,
8090 off2 + ICMP_MINLEN) == NULL)
8091 return (PF_DROP);
8092 pbuf_copy_back(pbuf, off, ICMP_MINLEN,
8093 pd->hdr.icmp);
8094 pbuf_copy_back(pbuf, ipoff2, sizeof (h2), &h2);
8095 pbuf_copy_back(pbuf, off2, ICMP_MINLEN, &iih);
8096 }
8097
8098 return (PF_PASS);
8099 }
8100 #endif /* INET */
8101 #if INET6
8102 case IPPROTO_ICMPV6: {
8103 struct icmp6_hdr iih;
8104
8105 if (!pf_pull_hdr(pbuf, off2, &iih,
8106 sizeof (struct icmp6_hdr), NULL, reason, pd2.af)) {
8107 DPFPRINTF(PF_DEBUG_MISC,
8108 ("pf: ICMP error message too short "
8109 "(icmp6)\n"));
8110 return (PF_DROP);
8111 }
8112
8113 key.proto = IPPROTO_ICMPV6;
8114 if (direction == PF_IN) {
8115 key.af_gwy = pd2.af;
8116 PF_ACPY(&key.ext_gwy.addr, pd2.dst, key.af_gwy);
8117 PF_ACPY(&key.gwy.addr, pd2.src, key.af_gwy);
8118 key.ext_gwy.xport.port = 0;
8119 key.gwy.xport.port = iih.icmp6_id;
8120 } else {
8121 key.af_lan = pd2.af;
8122 PF_ACPY(&key.lan.addr, pd2.dst, key.af_lan);
8123 PF_ACPY(&key.ext_lan.addr, pd2.src, key.af_lan);
8124 key.lan.xport.port = iih.icmp6_id;
8125 key.ext_lan.xport.port = 0;
8126 }
8127
8128 STATE_LOOKUP();
8129
8130 sk = (*state)->state_key;
8131 if (STATE_TRANSLATE(sk)) {
8132 if (direction == PF_IN) {
8133 pf_change_icmp(pd2.src, &iih.icmp6_id,
8134 daddr, &sk->lan.addr,
8135 sk->lan.xport.port, NULL,
8136 pd2.ip_sum, icmpsum,
8137 pd->ip_sum, 0, AF_INET6);
8138 } else {
8139 pf_change_icmp(pd2.dst, &iih.icmp6_id,
8140 saddr, &sk->gwy.addr,
8141 sk->gwy.xport.port, NULL,
8142 pd2.ip_sum, icmpsum,
8143 pd->ip_sum, 0, AF_INET6);
8144 }
8145 if (pf_lazy_makewritable(pd, pbuf, off2 +
8146 sizeof (struct icmp6_hdr)) == NULL)
8147 return (PF_DROP);
8148 pbuf_copy_back(pbuf, off,
8149 sizeof (struct icmp6_hdr), pd->hdr.icmp6);
8150 pbuf_copy_back(pbuf, ipoff2, sizeof (h2_6),
8151 &h2_6);
8152 pbuf_copy_back(pbuf, off2,
8153 sizeof (struct icmp6_hdr), &iih);
8154 }
8155
8156 return (PF_PASS);
8157 }
8158 #endif /* INET6 */
8159 default: {
8160 key.proto = pd2.proto;
8161 if (direction == PF_IN) {
8162 key.af_gwy = pd2.af;
8163 PF_ACPY(&key.ext_gwy.addr, pd2.dst, key.af_gwy);
8164 PF_ACPY(&key.gwy.addr, pd2.src, key.af_gwy);
8165 key.ext_gwy.xport.port = 0;
8166 key.gwy.xport.port = 0;
8167 } else {
8168 key.af_lan = pd2.af;
8169 PF_ACPY(&key.lan.addr, pd2.dst, key.af_lan);
8170 PF_ACPY(&key.ext_lan.addr, pd2.src, key.af_lan);
8171 key.lan.xport.port = 0;
8172 key.ext_lan.xport.port = 0;
8173 }
8174
8175 STATE_LOOKUP();
8176
8177 sk = (*state)->state_key;
8178 if (STATE_TRANSLATE(sk)) {
8179 if (direction == PF_IN) {
8180 pf_change_icmp(pd2.src, NULL, daddr,
8181 &sk->lan.addr, 0, NULL,
8182 pd2.ip_sum, icmpsum,
8183 pd->ip_sum, 0, pd2.af);
8184 } else {
8185 pf_change_icmp(pd2.dst, NULL, saddr,
8186 &sk->gwy.addr, 0, NULL,
8187 pd2.ip_sum, icmpsum,
8188 pd->ip_sum, 0, pd2.af);
8189 }
8190 switch (pd2.af) {
8191 #if INET
8192 case AF_INET:
8193 if (pf_lazy_makewritable(pd, pbuf,
8194 ipoff2 + sizeof (h2)) == NULL)
8195 return (PF_DROP);
8196 /*
8197 * <XXXSCW>
8198 * Xnu was missing the following...
8199 */
8200 pbuf_copy_back(pbuf, off, ICMP_MINLEN,
8201 pd->hdr.icmp);
8202 pbuf_copy_back(pbuf, ipoff2,
8203 sizeof(h2), &h2);
8204 break;
8205 /*
8206 * </XXXSCW>
8207 */
8208 #endif /* INET */
8209 #if INET6
8210 case AF_INET6:
8211 if (pf_lazy_makewritable(pd, pbuf,
8212 ipoff2 + sizeof (h2_6)) == NULL)
8213 return (PF_DROP);
8214 pbuf_copy_back(pbuf, off,
8215 sizeof (struct icmp6_hdr),
8216 pd->hdr.icmp6);
8217 pbuf_copy_back(pbuf, ipoff2,
8218 sizeof (h2_6), &h2_6);
8219 break;
8220 #endif /* INET6 */
8221 }
8222 }
8223
8224 return (PF_PASS);
8225 }
8226 }
8227 }
8228 }
8229
8230 static int
8231 pf_test_state_grev1(struct pf_state **state, int direction,
8232 struct pfi_kif *kif, int off, struct pf_pdesc *pd)
8233 {
8234 struct pf_state_peer *src;
8235 struct pf_state_peer *dst;
8236 struct pf_state_key_cmp key;
8237 struct pf_grev1_hdr *grev1 = pd->hdr.grev1;
8238
8239 key.app_state = 0;
8240 key.proto = IPPROTO_GRE;
8241 key.proto_variant = PF_GRE_PPTP_VARIANT;
8242 if (direction == PF_IN) {
8243 key.af_gwy = pd->af;
8244 PF_ACPY(&key.ext_gwy.addr, pd->src, key.af_gwy);
8245 PF_ACPY(&key.gwy.addr, pd->dst, key.af_gwy);
8246 key.gwy.xport.call_id = grev1->call_id;
8247 } else {
8248 key.af_lan = pd->af;
8249 PF_ACPY(&key.lan.addr, pd->src, key.af_lan);
8250 PF_ACPY(&key.ext_lan.addr, pd->dst, key.af_lan);
8251 key.ext_lan.xport.call_id = grev1->call_id;
8252 }
8253
8254 STATE_LOOKUP();
8255
8256 if (direction == (*state)->state_key->direction) {
8257 src = &(*state)->src;
8258 dst = &(*state)->dst;
8259 } else {
8260 src = &(*state)->dst;
8261 dst = &(*state)->src;
8262 }
8263
8264 /* update states */
8265 if (src->state < PFGRE1S_INITIATING)
8266 src->state = PFGRE1S_INITIATING;
8267
8268 /* update expire time */
8269 (*state)->expire = pf_time_second();
8270 if (src->state >= PFGRE1S_INITIATING &&
8271 dst->state >= PFGRE1S_INITIATING) {
8272 if ((*state)->timeout != PFTM_TCP_ESTABLISHED)
8273 (*state)->timeout = PFTM_GREv1_ESTABLISHED;
8274 src->state = PFGRE1S_ESTABLISHED;
8275 dst->state = PFGRE1S_ESTABLISHED;
8276 } else {
8277 (*state)->timeout = PFTM_GREv1_INITIATING;
8278 }
8279
8280 if ((*state)->state_key->app_state)
8281 (*state)->state_key->app_state->u.grev1.pptp_state->expire =
8282 pf_time_second();
8283
8284 /* translate source/destination address, if necessary */
8285 if (STATE_GRE_TRANSLATE((*state)->state_key)) {
8286 if (direction == PF_OUT) {
8287 switch (pd->af) {
8288 #if INET
8289 case AF_INET:
8290 pf_change_a(&pd->src->v4addr.s_addr,
8291 pd->ip_sum,
8292 (*state)->state_key->gwy.addr.v4addr.s_addr, 0);
8293 break;
8294 #endif /* INET */
8295 #if INET6
8296 case AF_INET6:
8297 PF_ACPY(pd->src, &(*state)->state_key->gwy.addr,
8298 pd->af);
8299 break;
8300 #endif /* INET6 */
8301 }
8302 } else {
8303 grev1->call_id = (*state)->state_key->lan.xport.call_id;
8304
8305 switch (pd->af) {
8306 #if INET
8307 case AF_INET:
8308 pf_change_a(&pd->dst->v4addr.s_addr,
8309 pd->ip_sum,
8310 (*state)->state_key->lan.addr.v4addr.s_addr, 0);
8311 break;
8312 #endif /* INET */
8313 #if INET6
8314 case AF_INET6:
8315 PF_ACPY(pd->dst, &(*state)->state_key->lan.addr,
8316 pd->af);
8317 break;
8318 #endif /* INET6 */
8319 }
8320 }
8321
8322 if (pf_lazy_makewritable(pd, pd->mp, off + sizeof (*grev1)) ==
8323 NULL)
8324 return (PF_DROP);
8325 pbuf_copy_back(pd->mp, off, sizeof (*grev1), grev1);
8326 }
8327
8328 return (PF_PASS);
8329 }
8330
8331 static int
8332 pf_test_state_esp(struct pf_state **state, int direction, struct pfi_kif *kif,
8333 int off, struct pf_pdesc *pd)
8334 {
8335 #pragma unused(off)
8336 struct pf_state_peer *src;
8337 struct pf_state_peer *dst;
8338 struct pf_state_key_cmp key;
8339 struct pf_esp_hdr *esp = pd->hdr.esp;
8340 int action;
8341
8342 memset(&key, 0, sizeof (key));
8343 key.proto = IPPROTO_ESP;
8344 if (direction == PF_IN) {
8345 key.af_gwy = pd->af;
8346 PF_ACPY(&key.ext_gwy.addr, pd->src, key.af_gwy);
8347 PF_ACPY(&key.gwy.addr, pd->dst, key.af_gwy);
8348 key.gwy.xport.spi = esp->spi;
8349 } else {
8350 key.af_lan = pd->af;
8351 PF_ACPY(&key.lan.addr, pd->src, key.af_lan);
8352 PF_ACPY(&key.ext_lan.addr, pd->dst, key.af_lan);
8353 key.ext_lan.xport.spi = esp->spi;
8354 }
8355
8356 *state = pf_find_state(kif, &key, direction);
8357
8358 if (*state == 0) {
8359 struct pf_state *s;
8360
8361 /*
8362 * <jhw@apple.com>
8363 * No matching state. Look for a blocking state. If we find
8364 * one, then use that state and move it so that it's keyed to
8365 * the SPI in the current packet.
8366 */
8367 if (direction == PF_IN) {
8368 key.gwy.xport.spi = 0;
8369
8370 s = pf_find_state(kif, &key, direction);
8371 if (s) {
8372 struct pf_state_key *sk = s->state_key;
8373
8374 RB_REMOVE(pf_state_tree_ext_gwy,
8375 &pf_statetbl_ext_gwy, sk);
8376 sk->lan.xport.spi = sk->gwy.xport.spi =
8377 esp->spi;
8378
8379 if (RB_INSERT(pf_state_tree_ext_gwy,
8380 &pf_statetbl_ext_gwy, sk))
8381 pf_detach_state(s, PF_DT_SKIP_EXTGWY);
8382 else
8383 *state = s;
8384 }
8385 } else {
8386 key.ext_lan.xport.spi = 0;
8387
8388 s = pf_find_state(kif, &key, direction);
8389 if (s) {
8390 struct pf_state_key *sk = s->state_key;
8391
8392 RB_REMOVE(pf_state_tree_lan_ext,
8393 &pf_statetbl_lan_ext, sk);
8394 sk->ext_lan.xport.spi = esp->spi;
8395
8396 if (RB_INSERT(pf_state_tree_lan_ext,
8397 &pf_statetbl_lan_ext, sk))
8398 pf_detach_state(s, PF_DT_SKIP_LANEXT);
8399 else
8400 *state = s;
8401 }
8402 }
8403
8404 if (s) {
8405 if (*state == 0) {
8406 #if NPFSYNC
8407 if (s->creatorid == pf_status.hostid)
8408 pfsync_delete_state(s);
8409 #endif
8410 s->timeout = PFTM_UNLINKED;
8411 hook_runloop(&s->unlink_hooks,
8412 HOOK_REMOVE|HOOK_FREE);
8413 pf_src_tree_remove_state(s);
8414 pf_free_state(s);
8415 return (PF_DROP);
8416 }
8417 }
8418 }
8419
8420 /* similar to STATE_LOOKUP() */
8421 if (*state != NULL && pd != NULL && !(pd->pktflags & PKTF_FLOW_ID)) {
8422 pd->flowsrc = (*state)->state_key->flowsrc;
8423 pd->flowhash = (*state)->state_key->flowhash;
8424 if (pd->flowhash != 0) {
8425 pd->pktflags |= PKTF_FLOW_ID;
8426 pd->pktflags &= ~PKTF_FLOW_ADV;
8427 }
8428 }
8429
8430 if (pf_state_lookup_aux(state, kif, direction, &action))
8431 return (action);
8432
8433 if (direction == (*state)->state_key->direction) {
8434 src = &(*state)->src;
8435 dst = &(*state)->dst;
8436 } else {
8437 src = &(*state)->dst;
8438 dst = &(*state)->src;
8439 }
8440
8441 /* update states */
8442 if (src->state < PFESPS_INITIATING)
8443 src->state = PFESPS_INITIATING;
8444
8445 /* update expire time */
8446 (*state)->expire = pf_time_second();
8447 if (src->state >= PFESPS_INITIATING &&
8448 dst->state >= PFESPS_INITIATING) {
8449 (*state)->timeout = PFTM_ESP_ESTABLISHED;
8450 src->state = PFESPS_ESTABLISHED;
8451 dst->state = PFESPS_ESTABLISHED;
8452 } else {
8453 (*state)->timeout = PFTM_ESP_INITIATING;
8454 }
8455 /* translate source/destination address, if necessary */
8456 if (STATE_ADDR_TRANSLATE((*state)->state_key)) {
8457 if (direction == PF_OUT) {
8458 switch (pd->af) {
8459 #if INET
8460 case AF_INET:
8461 pf_change_a(&pd->src->v4addr.s_addr,
8462 pd->ip_sum,
8463 (*state)->state_key->gwy.addr.v4addr.s_addr, 0);
8464 break;
8465 #endif /* INET */
8466 #if INET6
8467 case AF_INET6:
8468 PF_ACPY(pd->src, &(*state)->state_key->gwy.addr,
8469 pd->af);
8470 break;
8471 #endif /* INET6 */
8472 }
8473 } else {
8474 switch (pd->af) {
8475 #if INET
8476 case AF_INET:
8477 pf_change_a(&pd->dst->v4addr.s_addr,
8478 pd->ip_sum,
8479 (*state)->state_key->lan.addr.v4addr.s_addr, 0);
8480 break;
8481 #endif /* INET */
8482 #if INET6
8483 case AF_INET6:
8484 PF_ACPY(pd->dst, &(*state)->state_key->lan.addr,
8485 pd->af);
8486 break;
8487 #endif /* INET6 */
8488 }
8489 }
8490 }
8491
8492 return (PF_PASS);
8493 }
8494
8495 static int
8496 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
8497 struct pf_pdesc *pd)
8498 {
8499 struct pf_state_peer *src, *dst;
8500 struct pf_state_key_cmp key;
8501
8502 key.app_state = 0;
8503 key.proto = pd->proto;
8504 if (direction == PF_IN) {
8505 key.af_gwy = pd->af;
8506 PF_ACPY(&key.ext_gwy.addr, pd->src, key.af_gwy);
8507 PF_ACPY(&key.gwy.addr, pd->dst, key.af_gwy);
8508 key.ext_gwy.xport.port = 0;
8509 key.gwy.xport.port = 0;
8510 } else {
8511 key.af_lan = pd->af;
8512 PF_ACPY(&key.lan.addr, pd->src, key.af_lan);
8513 PF_ACPY(&key.ext_lan.addr, pd->dst, key.af_lan);
8514 key.lan.xport.port = 0;
8515 key.ext_lan.xport.port = 0;
8516 }
8517
8518 STATE_LOOKUP();
8519
8520 if (direction == (*state)->state_key->direction) {
8521 src = &(*state)->src;
8522 dst = &(*state)->dst;
8523 } else {
8524 src = &(*state)->dst;
8525 dst = &(*state)->src;
8526 }
8527
8528 /* update states */
8529 if (src->state < PFOTHERS_SINGLE)
8530 src->state = PFOTHERS_SINGLE;
8531 if (dst->state == PFOTHERS_SINGLE)
8532 dst->state = PFOTHERS_MULTIPLE;
8533
8534 /* update expire time */
8535 (*state)->expire = pf_time_second();
8536 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
8537 (*state)->timeout = PFTM_OTHER_MULTIPLE;
8538 else
8539 (*state)->timeout = PFTM_OTHER_SINGLE;
8540
8541 /* translate source/destination address, if necessary */
8542 if (STATE_ADDR_TRANSLATE((*state)->state_key)) {
8543 if (direction == PF_OUT) {
8544 switch (pd->af) {
8545 #if INET
8546 case AF_INET:
8547 pf_change_a(&pd->src->v4addr.s_addr,
8548 pd->ip_sum,
8549 (*state)->state_key->gwy.addr.v4addr.s_addr,
8550 0);
8551 break;
8552 #endif /* INET */
8553 #if INET6
8554 case AF_INET6:
8555 PF_ACPY(pd->src,
8556 &(*state)->state_key->gwy.addr, pd->af);
8557 break;
8558 #endif /* INET6 */
8559 }
8560 } else {
8561 switch (pd->af) {
8562 #if INET
8563 case AF_INET:
8564 pf_change_a(&pd->dst->v4addr.s_addr,
8565 pd->ip_sum,
8566 (*state)->state_key->lan.addr.v4addr.s_addr,
8567 0);
8568 break;
8569 #endif /* INET */
8570 #if INET6
8571 case AF_INET6:
8572 PF_ACPY(pd->dst,
8573 &(*state)->state_key->lan.addr, pd->af);
8574 break;
8575 #endif /* INET6 */
8576 }
8577 }
8578 }
8579
8580 return (PF_PASS);
8581 }
8582
8583 /*
8584 * ipoff and off are measured from the start of the mbuf chain.
8585 * h must be at "ipoff" on the mbuf chain.
8586 */
8587 void *
8588 pf_pull_hdr(pbuf_t *pbuf, int off, void *p, int len,
8589 u_short *actionp, u_short *reasonp, sa_family_t af)
8590 {
8591 switch (af) {
8592 #if INET
8593 case AF_INET: {
8594 struct ip *h = pbuf->pb_data;
8595 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
8596
8597 if (fragoff) {
8598 if (fragoff >= len) {
8599 ACTION_SET(actionp, PF_PASS);
8600 } else {
8601 ACTION_SET(actionp, PF_DROP);
8602 REASON_SET(reasonp, PFRES_FRAG);
8603 }
8604 return (NULL);
8605 }
8606 if (pbuf->pb_packet_len < (unsigned)(off + len) ||
8607 ntohs(h->ip_len) < off + len) {
8608 ACTION_SET(actionp, PF_DROP);
8609 REASON_SET(reasonp, PFRES_SHORT);
8610 return (NULL);
8611 }
8612 break;
8613 }
8614 #endif /* INET */
8615 #if INET6
8616 case AF_INET6: {
8617 struct ip6_hdr *h = pbuf->pb_data;
8618
8619 if (pbuf->pb_packet_len < (unsigned)(off + len) ||
8620 (ntohs(h->ip6_plen) + sizeof (struct ip6_hdr)) <
8621 (unsigned)(off + len)) {
8622 ACTION_SET(actionp, PF_DROP);
8623 REASON_SET(reasonp, PFRES_SHORT);
8624 return (NULL);
8625 }
8626 break;
8627 }
8628 #endif /* INET6 */
8629 }
8630 pbuf_copy_data(pbuf, off, len, p);
8631 return (p);
8632 }
8633
8634 int
8635 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif)
8636 {
8637 #pragma unused(kif)
8638 struct sockaddr_in *dst;
8639 int ret = 1;
8640 #if INET6
8641 struct sockaddr_in6 *dst6;
8642 struct route_in6 ro;
8643 #else
8644 struct route ro;
8645 #endif
8646
8647 bzero(&ro, sizeof (ro));
8648 switch (af) {
8649 case AF_INET:
8650 dst = satosin(&ro.ro_dst);
8651 dst->sin_family = AF_INET;
8652 dst->sin_len = sizeof (*dst);
8653 dst->sin_addr = addr->v4addr;
8654 break;
8655 #if INET6
8656 case AF_INET6:
8657 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
8658 dst6->sin6_family = AF_INET6;
8659 dst6->sin6_len = sizeof (*dst6);
8660 dst6->sin6_addr = addr->v6addr;
8661 break;
8662 #endif /* INET6 */
8663 default:
8664 return (0);
8665 }
8666
8667 /* XXX: IFT_ENC is not currently used by anything*/
8668 /* Skip checks for ipsec interfaces */
8669 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
8670 goto out;
8671
8672 /* XXX: what is the point of this? */
8673 rtalloc((struct route *)&ro);
8674
8675 out:
8676 ROUTE_RELEASE(&ro);
8677 return (ret);
8678 }
8679
8680 int
8681 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw)
8682 {
8683 #pragma unused(aw)
8684 struct sockaddr_in *dst;
8685 #if INET6
8686 struct sockaddr_in6 *dst6;
8687 struct route_in6 ro;
8688 #else
8689 struct route ro;
8690 #endif
8691 int ret = 0;
8692
8693 bzero(&ro, sizeof (ro));
8694 switch (af) {
8695 case AF_INET:
8696 dst = satosin(&ro.ro_dst);
8697 dst->sin_family = AF_INET;
8698 dst->sin_len = sizeof (*dst);
8699 dst->sin_addr = addr->v4addr;
8700 break;
8701 #if INET6
8702 case AF_INET6:
8703 dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
8704 dst6->sin6_family = AF_INET6;
8705 dst6->sin6_len = sizeof (*dst6);
8706 dst6->sin6_addr = addr->v6addr;
8707 break;
8708 #endif /* INET6 */
8709 default:
8710 return (0);
8711 }
8712
8713 /* XXX: what is the point of this? */
8714 rtalloc((struct route *)&ro);
8715
8716 ROUTE_RELEASE(&ro);
8717
8718 return (ret);
8719 }
8720
8721 #if INET
8722 static void
8723 pf_route(pbuf_t **pbufp, struct pf_rule *r, int dir, struct ifnet *oifp,
8724 struct pf_state *s, struct pf_pdesc *pd)
8725 {
8726 #pragma unused(pd)
8727 struct mbuf *m0, *m1;
8728 struct route iproute;
8729 struct route *ro = &iproute;
8730 struct sockaddr_in *dst;
8731 struct ip *ip;
8732 struct ifnet *ifp = NULL;
8733 struct pf_addr naddr;
8734 struct pf_src_node *sn = NULL;
8735 int error = 0;
8736 uint32_t sw_csum;
8737
8738 bzero(&iproute, sizeof (iproute));
8739
8740 if (pbufp == NULL || !pbuf_is_valid(*pbufp) || r == NULL ||
8741 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
8742 panic("pf_route: invalid parameters");
8743
8744 if (pd->pf_mtag->pftag_routed++ > 3) {
8745 pbuf_destroy(*pbufp);
8746 *pbufp = NULL;
8747 m0 = NULL;
8748 goto bad;
8749 }
8750
8751 /*
8752 * Since this is something of an edge case and may involve the
8753 * host stack (for routing, at least for now), we convert the
8754 * incoming pbuf into an mbuf.
8755 */
8756 if (r->rt == PF_DUPTO)
8757 m0 = pbuf_clone_to_mbuf(*pbufp);
8758 else
8759 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
8760 return;
8761 else {
8762 /* We're going to consume this packet */
8763 m0 = pbuf_to_mbuf(*pbufp, TRUE);
8764 *pbufp = NULL;
8765 }
8766
8767 if (m0 == NULL)
8768 goto bad;
8769
8770 /* We now have the packet in an mbuf (m0) */
8771
8772 if (m0->m_len < (int)sizeof (struct ip)) {
8773 DPFPRINTF(PF_DEBUG_URGENT,
8774 ("pf_route: packet length < sizeof (struct ip)\n"));
8775 goto bad;
8776 }
8777
8778 ip = mtod(m0, struct ip *);
8779
8780 dst = satosin((void *)&ro->ro_dst);
8781 dst->sin_family = AF_INET;
8782 dst->sin_len = sizeof (*dst);
8783 dst->sin_addr = ip->ip_dst;
8784
8785 if (r->rt == PF_FASTROUTE) {
8786 rtalloc(ro);
8787 if (ro->ro_rt == NULL) {
8788 ipstat.ips_noroute++;
8789 goto bad;
8790 }
8791
8792 ifp = ro->ro_rt->rt_ifp;
8793 RT_LOCK(ro->ro_rt);
8794 ro->ro_rt->rt_use++;
8795
8796 if (ro->ro_rt->rt_flags & RTF_GATEWAY)
8797 dst = satosin((void *)ro->ro_rt->rt_gateway);
8798 RT_UNLOCK(ro->ro_rt);
8799 } else {
8800 if (TAILQ_EMPTY(&r->rpool.list)) {
8801 DPFPRINTF(PF_DEBUG_URGENT,
8802 ("pf_route: TAILQ_EMPTY(&r->rpool.list)\n"));
8803 goto bad;
8804 }
8805 if (s == NULL) {
8806 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
8807 &naddr, NULL, &sn);
8808 if (!PF_AZERO(&naddr, AF_INET))
8809 dst->sin_addr.s_addr = naddr.v4addr.s_addr;
8810 ifp = r->rpool.cur->kif ?
8811 r->rpool.cur->kif->pfik_ifp : NULL;
8812 } else {
8813 if (!PF_AZERO(&s->rt_addr, AF_INET))
8814 dst->sin_addr.s_addr =
8815 s->rt_addr.v4addr.s_addr;
8816 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
8817 }
8818 }
8819 if (ifp == NULL)
8820 goto bad;
8821
8822 if (oifp != ifp) {
8823 if (pf_test_mbuf(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS)
8824 goto bad;
8825 else if (m0 == NULL)
8826 goto done;
8827 if (m0->m_len < (int)sizeof (struct ip)) {
8828 DPFPRINTF(PF_DEBUG_URGENT,
8829 ("pf_route: packet length < sizeof (struct ip)\n"));
8830 goto bad;
8831 }
8832 ip = mtod(m0, struct ip *);
8833 }
8834
8835 /* Catch routing changes wrt. hardware checksumming for TCP or UDP. */
8836 ip_output_checksum(ifp, m0, ((ip->ip_hl) << 2), ntohs(ip->ip_len),
8837 &sw_csum);
8838
8839 if (ntohs(ip->ip_len) <= ifp->if_mtu || TSO_IPV4_OK(ifp, m0) ||
8840 (!(ip->ip_off & htons(IP_DF)) &&
8841 (ifp->if_hwassist & CSUM_FRAGMENT))) {
8842 ip->ip_sum = 0;
8843 if (sw_csum & CSUM_DELAY_IP) {
8844 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
8845 sw_csum &= ~CSUM_DELAY_IP;
8846 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP;
8847 }
8848 error = ifnet_output(ifp, PF_INET, m0, ro->ro_rt, sintosa(dst));
8849 goto done;
8850 }
8851
8852 /*
8853 * Too large for interface; fragment if possible.
8854 * Must be able to put at least 8 bytes per fragment.
8855 * Balk when DF bit is set or the interface didn't support TSO.
8856 */
8857 if ((ip->ip_off & htons(IP_DF)) ||
8858 (m0->m_pkthdr.csum_flags & CSUM_TSO_IPV4)) {
8859 ipstat.ips_cantfrag++;
8860 if (r->rt != PF_DUPTO) {
8861 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
8862 ifp->if_mtu);
8863 goto done;
8864 } else
8865 goto bad;
8866 }
8867
8868 m1 = m0;
8869
8870 /* PR-8933605: send ip_len,ip_off to ip_fragment in host byte order */
8871 #if BYTE_ORDER != BIG_ENDIAN
8872 NTOHS(ip->ip_off);
8873 NTOHS(ip->ip_len);
8874 #endif
8875 error = ip_fragment(m0, ifp, ifp->if_mtu, sw_csum);
8876
8877 if (error) {
8878 m0 = NULL;
8879 goto bad;
8880 }
8881
8882 for (m0 = m1; m0; m0 = m1) {
8883 m1 = m0->m_nextpkt;
8884 m0->m_nextpkt = 0;
8885 if (error == 0)
8886 error = ifnet_output(ifp, PF_INET, m0, ro->ro_rt,
8887 sintosa(dst));
8888 else
8889 m_freem(m0);
8890 }
8891
8892 if (error == 0)
8893 ipstat.ips_fragmented++;
8894
8895 done:
8896 ROUTE_RELEASE(&iproute);
8897 return;
8898
8899 bad:
8900 if (m0)
8901 m_freem(m0);
8902 goto done;
8903 }
8904 #endif /* INET */
8905
8906 #if INET6
8907 static void
8908 pf_route6(pbuf_t **pbufp, struct pf_rule *r, int dir, struct ifnet *oifp,
8909 struct pf_state *s, struct pf_pdesc *pd)
8910 {
8911 #pragma unused(pd)
8912 struct mbuf *m0;
8913 struct route_in6 ip6route;
8914 struct route_in6 *ro;
8915 struct sockaddr_in6 *dst;
8916 struct ip6_hdr *ip6;
8917 struct ifnet *ifp = NULL;
8918 struct pf_addr naddr;
8919 struct pf_src_node *sn = NULL;
8920 int error = 0;
8921
8922 if (pbufp == NULL || !pbuf_is_valid(*pbufp) || r == NULL ||
8923 (dir != PF_IN && dir != PF_OUT) || oifp == NULL)
8924 panic("pf_route6: invalid parameters");
8925
8926 if (pd->pf_mtag->pftag_routed++ > 3) {
8927 pbuf_destroy(*pbufp);
8928 *pbufp = NULL;
8929 m0 = NULL;
8930 goto bad;
8931 }
8932
8933 /*
8934 * Since this is something of an edge case and may involve the
8935 * host stack (for routing, at least for now), we convert the
8936 * incoming pbuf into an mbuf.
8937 */
8938 if (r->rt == PF_DUPTO) {
8939 m0 = pbuf_clone_to_mbuf(*pbufp);
8940 } else
8941 if ((r->rt == PF_REPLYTO) == (r->direction == dir))
8942 return;
8943 else {
8944 /* We're about to consume this packet */
8945 m0 = pbuf_to_mbuf(*pbufp, TRUE);
8946 *pbufp = NULL;
8947 }
8948
8949 if (m0 == NULL)
8950 goto bad;
8951
8952 if (m0->m_len < (int)sizeof (struct ip6_hdr)) {
8953 DPFPRINTF(PF_DEBUG_URGENT,
8954 ("pf_route6: m0->m_len < sizeof (struct ip6_hdr)\n"));
8955 goto bad;
8956 }
8957 ip6 = mtod(m0, struct ip6_hdr *);
8958
8959 ro = &ip6route;
8960 bzero((caddr_t)ro, sizeof (*ro));
8961 dst = (struct sockaddr_in6 *)&ro->ro_dst;
8962 dst->sin6_family = AF_INET6;
8963 dst->sin6_len = sizeof (*dst);
8964 dst->sin6_addr = ip6->ip6_dst;
8965
8966 /* Cheat. XXX why only in the v6addr case??? */
8967 if (r->rt == PF_FASTROUTE) {
8968 struct pf_mtag *pf_mtag;
8969
8970 if ((pf_mtag = pf_get_mtag(m0)) == NULL)
8971 goto bad;
8972 pf_mtag->pftag_flags |= PF_TAG_GENERATED;
8973 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
8974 return;
8975 }
8976
8977 if (TAILQ_EMPTY(&r->rpool.list)) {
8978 DPFPRINTF(PF_DEBUG_URGENT,
8979 ("pf_route6: TAILQ_EMPTY(&r->rpool.list)\n"));
8980 goto bad;
8981 }
8982 if (s == NULL) {
8983 pf_map_addr(AF_INET6, r, (struct pf_addr *)(uintptr_t)&ip6->ip6_src,
8984 &naddr, NULL, &sn);
8985 if (!PF_AZERO(&naddr, AF_INET6))
8986 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
8987 &naddr, AF_INET6);
8988 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
8989 } else {
8990 if (!PF_AZERO(&s->rt_addr, AF_INET6))
8991 PF_ACPY((struct pf_addr *)&dst->sin6_addr,
8992 &s->rt_addr, AF_INET6);
8993 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
8994 }
8995 if (ifp == NULL)
8996 goto bad;
8997
8998 if (oifp != ifp) {
8999 if (pf_test6_mbuf(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS)
9000 goto bad;
9001 else if (m0 == NULL)
9002 goto done;
9003 if (m0->m_len < (int)sizeof (struct ip6_hdr)) {
9004 DPFPRINTF(PF_DEBUG_URGENT, ("pf_route6: m0->m_len "
9005 "< sizeof (struct ip6_hdr)\n"));
9006 goto bad;
9007 }
9008 ip6 = mtod(m0, struct ip6_hdr *);
9009 }
9010
9011 /*
9012 * If the packet is too large for the outgoing interface,
9013 * send back an icmp6 error.
9014 */
9015 if (IN6_IS_SCOPE_EMBED(&dst->sin6_addr))
9016 dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index);
9017 if ((unsigned)m0->m_pkthdr.len <= ifp->if_mtu) {
9018 error = nd6_output(ifp, ifp, m0, dst, NULL, NULL);
9019 } else {
9020 in6_ifstat_inc(ifp, ifs6_in_toobig);
9021 if (r->rt != PF_DUPTO)
9022 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
9023 else
9024 goto bad;
9025 }
9026
9027 done:
9028 return;
9029
9030 bad:
9031 if (m0)
9032 m_freem(m0);
9033 goto done;
9034 }
9035 #endif /* INET6 */
9036
9037
9038 /*
9039 * check protocol (tcp/udp/icmp/icmp6) checksum and set mbuf flag
9040 * off is the offset where the protocol header starts
9041 * len is the total length of protocol header plus payload
9042 * returns 0 when the checksum is valid, otherwise returns 1.
9043 */
9044 static int
9045 pf_check_proto_cksum(pbuf_t *pbuf, int off, int len, u_int8_t p,
9046 sa_family_t af)
9047 {
9048 u_int16_t sum;
9049
9050 switch (p) {
9051 case IPPROTO_TCP:
9052 case IPPROTO_UDP:
9053 /*
9054 * Optimize for the common case; if the hardware calculated
9055 * value doesn't include pseudo-header checksum, or if it
9056 * is partially-computed (only 16-bit summation), do it in
9057 * software below.
9058 */
9059 if ((*pbuf->pb_csum_flags &
9060 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
9061 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR) &&
9062 (*pbuf->pb_csum_data ^ 0xffff) == 0) {
9063 return (0);
9064 }
9065 break;
9066 case IPPROTO_ICMP:
9067 #if INET6
9068 case IPPROTO_ICMPV6:
9069 #endif /* INET6 */
9070 break;
9071 default:
9072 return (1);
9073 }
9074 if (off < (int)sizeof (struct ip) || len < (int)sizeof (struct udphdr))
9075 return (1);
9076 if (pbuf->pb_packet_len < (unsigned)(off + len))
9077 return (1);
9078 switch (af) {
9079 #if INET
9080 case AF_INET:
9081 if (p == IPPROTO_ICMP) {
9082 #if 0
9083 if (m->m_len < off)
9084 return (1);
9085 m->m_data += off;
9086 m->m_len -= off;
9087 sum = in_cksum(m, len);
9088 m->m_data -= off;
9089 m->m_len += off;
9090 #else
9091 if (pbuf->pb_contig_len < (unsigned)off)
9092 return (1);
9093 sum = pbuf_inet_cksum(pbuf, 0, off, len);
9094 #endif
9095 } else {
9096 if (pbuf->pb_contig_len < (int)sizeof (struct ip))
9097 return (1);
9098 sum = pbuf_inet_cksum(pbuf, p, off, len);
9099 }
9100 break;
9101 #endif /* INET */
9102 #if INET6
9103 case AF_INET6:
9104 if (pbuf->pb_contig_len < (int)sizeof (struct ip6_hdr))
9105 return (1);
9106 sum = pbuf_inet6_cksum(pbuf, p, off, len);
9107 break;
9108 #endif /* INET6 */
9109 default:
9110 return (1);
9111 }
9112 if (sum) {
9113 switch (p) {
9114 case IPPROTO_TCP:
9115 tcpstat.tcps_rcvbadsum++;
9116 break;
9117 case IPPROTO_UDP:
9118 udpstat.udps_badsum++;
9119 break;
9120 case IPPROTO_ICMP:
9121 icmpstat.icps_checksum++;
9122 break;
9123 #if INET6
9124 case IPPROTO_ICMPV6:
9125 icmp6stat.icp6s_checksum++;
9126 break;
9127 #endif /* INET6 */
9128 }
9129 return (1);
9130 }
9131 return (0);
9132 }
9133
9134 #if INET
9135 #define PF_APPLE_UPDATE_PDESC_IPv4() \
9136 do { \
9137 if (pbuf && pd.mp && pbuf != pd.mp) { \
9138 pbuf = pd.mp; \
9139 h = pbuf->pb_data; \
9140 pd.pf_mtag = pf_get_mtag_pbuf(pbuf); \
9141 } \
9142 } while (0)
9143
9144 int
9145 pf_test_mbuf(int dir, struct ifnet *ifp, struct mbuf **m0,
9146 struct ether_header *eh, struct ip_fw_args *fwa)
9147 {
9148 pbuf_t pbuf_store, *pbuf;
9149 int rv;
9150
9151 pbuf_init_mbuf(&pbuf_store, *m0, (*m0)->m_pkthdr.rcvif);
9152 pbuf = &pbuf_store;
9153
9154 rv = pf_test(dir, ifp, &pbuf, eh, fwa);
9155
9156 if (pbuf_is_valid(pbuf)) {
9157 *m0 = pbuf->pb_mbuf;
9158 pbuf->pb_mbuf = NULL;
9159 pbuf_destroy(pbuf);
9160 } else
9161 *m0 = NULL;
9162
9163 return (rv);
9164 }
9165
9166 int
9167 pf_test(int dir, struct ifnet *ifp, pbuf_t **pbufp,
9168 struct ether_header *eh, struct ip_fw_args *fwa)
9169 {
9170 #if !DUMMYNET
9171 #pragma unused(fwa)
9172 #endif
9173 struct pfi_kif *kif;
9174 u_short action = PF_PASS, reason = 0, log = 0;
9175 pbuf_t *pbuf = *pbufp;
9176 struct ip *h = 0;
9177 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
9178 struct pf_state *s = NULL;
9179 struct pf_state_key *sk = NULL;
9180 struct pf_ruleset *ruleset = NULL;
9181 struct pf_pdesc pd;
9182 int off, dirndx, pqid = 0;
9183
9184 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
9185
9186 if (!pf_status.running)
9187 return (PF_PASS);
9188
9189 memset(&pd, 0, sizeof (pd));
9190
9191 if ((pd.pf_mtag = pf_get_mtag_pbuf(pbuf)) == NULL) {
9192 DPFPRINTF(PF_DEBUG_URGENT,
9193 ("pf_test: pf_get_mtag_pbuf returned NULL\n"));
9194 return (PF_DROP);
9195 }
9196
9197 if (pd.pf_mtag->pftag_flags & PF_TAG_GENERATED)
9198 return (PF_PASS);
9199
9200 kif = (struct pfi_kif *)ifp->if_pf_kif;
9201
9202 if (kif == NULL) {
9203 DPFPRINTF(PF_DEBUG_URGENT,
9204 ("pf_test: kif == NULL, if_name %s\n", ifp->if_name));
9205 return (PF_DROP);
9206 }
9207 if (kif->pfik_flags & PFI_IFLAG_SKIP)
9208 return (PF_PASS);
9209
9210 /* initialize enough of pd for the done label */
9211 h = pbuf->pb_data;
9212 pd.mp = pbuf;
9213 pd.lmw = 0;
9214 pd.pf_mtag = pf_get_mtag_pbuf(pbuf);
9215 pd.src = (struct pf_addr *)&h->ip_src;
9216 pd.dst = (struct pf_addr *)&h->ip_dst;
9217 PF_ACPY(&pd.baddr, pd.src, AF_INET);
9218 PF_ACPY(&pd.bdaddr, pd.dst, AF_INET);
9219 pd.ip_sum = &h->ip_sum;
9220 pd.proto = h->ip_p;
9221 pd.proto_variant = 0;
9222 pd.af = AF_INET;
9223 pd.tos = h->ip_tos;
9224 pd.ttl = h->ip_ttl;
9225 pd.tot_len = ntohs(h->ip_len);
9226 pd.eh = eh;
9227
9228 if (pbuf->pb_packet_len < (int)sizeof (*h)) {
9229 action = PF_DROP;
9230 REASON_SET(&reason, PFRES_SHORT);
9231 log = 1;
9232 goto done;
9233 }
9234
9235 #if DUMMYNET
9236 if (fwa != NULL && fwa->fwa_pf_rule != NULL)
9237 goto nonormalize;
9238 #endif /* DUMMYNET */
9239
9240 /* We do IP header normalization and packet reassembly here */
9241 action = pf_normalize_ip(pbuf, dir, kif, &reason, &pd);
9242 if (action != PF_PASS || pd.lmw < 0) {
9243 action = PF_DROP;
9244 goto done;
9245 }
9246
9247 #if DUMMYNET
9248 nonormalize:
9249 #endif /* DUMMYNET */
9250 /* pf_normalize can mess with pb_data */
9251 h = pbuf->pb_data;
9252
9253 off = h->ip_hl << 2;
9254 if (off < (int)sizeof (*h)) {
9255 action = PF_DROP;
9256 REASON_SET(&reason, PFRES_SHORT);
9257 log = 1;
9258 goto done;
9259 }
9260
9261 pd.src = (struct pf_addr *)&h->ip_src;
9262 pd.dst = (struct pf_addr *)&h->ip_dst;
9263 PF_ACPY(&pd.baddr, pd.src, AF_INET);
9264 PF_ACPY(&pd.bdaddr, pd.dst, AF_INET);
9265 pd.ip_sum = &h->ip_sum;
9266 pd.proto = h->ip_p;
9267 pd.proto_variant = 0;
9268 pd.mp = pbuf;
9269 pd.lmw = 0;
9270 pd.pf_mtag = pf_get_mtag_pbuf(pbuf);
9271 pd.af = AF_INET;
9272 pd.tos = h->ip_tos;
9273 pd.ttl = h->ip_ttl;
9274 pd.sc = MBUF_SCIDX(pbuf_get_service_class(pbuf));
9275 pd.tot_len = ntohs(h->ip_len);
9276 pd.eh = eh;
9277
9278 if (*pbuf->pb_flags & PKTF_FLOW_ID) {
9279 pd.flowsrc = *pbuf->pb_flowsrc;
9280 pd.flowhash = *pbuf->pb_flowid;
9281 pd.pktflags = *pbuf->pb_flags & PKTF_FLOW_MASK;
9282 }
9283
9284 /* handle fragments that didn't get reassembled by normalization */
9285 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
9286 pd.flags |= PFDESC_IP_FRAG;
9287 #if DUMMYNET
9288 /* Traffic goes through dummynet first */
9289 action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa);
9290 if (action == PF_DROP || pbuf == NULL) {
9291 *pbufp = NULL;
9292 return (action);
9293 }
9294 #endif /* DUMMYNET */
9295 action = pf_test_fragment(&r, dir, kif, pbuf, h,
9296 &pd, &a, &ruleset);
9297 goto done;
9298 }
9299
9300 switch (h->ip_p) {
9301
9302 case IPPROTO_TCP: {
9303 struct tcphdr th;
9304 pd.hdr.tcp = &th;
9305 if (!pf_pull_hdr(pbuf, off, &th, sizeof (th),
9306 &action, &reason, AF_INET)) {
9307 log = action != PF_PASS;
9308 goto done;
9309 }
9310 pd.p_len = pd.tot_len - off - (th.th_off << 2);
9311 if ((th.th_flags & TH_ACK) && pd.p_len == 0)
9312 pqid = 1;
9313 #if DUMMYNET
9314 /* Traffic goes through dummynet first */
9315 action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa);
9316 if (action == PF_DROP || pbuf == NULL) {
9317 *pbufp = NULL;
9318 return (action);
9319 }
9320 #endif /* DUMMYNET */
9321 action = pf_normalize_tcp(dir, kif, pbuf, 0, off, h, &pd);
9322 if (pd.lmw < 0)
9323 goto done;
9324 PF_APPLE_UPDATE_PDESC_IPv4();
9325 if (action == PF_DROP)
9326 goto done;
9327 action = pf_test_state_tcp(&s, dir, kif, pbuf, off, h, &pd,
9328 &reason);
9329 if (action == PF_NAT64)
9330 goto done;
9331 if (pd.lmw < 0)
9332 goto done;
9333 PF_APPLE_UPDATE_PDESC_IPv4();
9334 if (action == PF_PASS) {
9335 #if NPFSYNC
9336 pfsync_update_state(s);
9337 #endif /* NPFSYNC */
9338 r = s->rule.ptr;
9339 a = s->anchor.ptr;
9340 log = s->log;
9341 } else if (s == NULL)
9342 action = pf_test_rule(&r, &s, dir, kif,
9343 pbuf, off, h, &pd, &a, &ruleset, NULL);
9344 break;
9345 }
9346
9347 case IPPROTO_UDP: {
9348 struct udphdr uh;
9349
9350 pd.hdr.udp = &uh;
9351 if (!pf_pull_hdr(pbuf, off, &uh, sizeof (uh),
9352 &action, &reason, AF_INET)) {
9353 log = action != PF_PASS;
9354 goto done;
9355 }
9356 if (uh.uh_dport == 0 ||
9357 ntohs(uh.uh_ulen) > pbuf->pb_packet_len - off ||
9358 ntohs(uh.uh_ulen) < sizeof (struct udphdr)) {
9359 action = PF_DROP;
9360 REASON_SET(&reason, PFRES_SHORT);
9361 goto done;
9362 }
9363 #if DUMMYNET
9364 /* Traffic goes through dummynet first */
9365 action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa);
9366 if (action == PF_DROP || pbuf == NULL) {
9367 *pbufp = NULL;
9368 return (action);
9369 }
9370 #endif /* DUMMYNET */
9371 action = pf_test_state_udp(&s, dir, kif, pbuf, off, h, &pd,
9372 &reason);
9373 if (action == PF_NAT64)
9374 goto done;
9375 if (pd.lmw < 0)
9376 goto done;
9377 PF_APPLE_UPDATE_PDESC_IPv4();
9378 if (action == PF_PASS) {
9379 #if NPFSYNC
9380 pfsync_update_state(s);
9381 #endif /* NPFSYNC */
9382 r = s->rule.ptr;
9383 a = s->anchor.ptr;
9384 log = s->log;
9385 } else if (s == NULL)
9386 action = pf_test_rule(&r, &s, dir, kif,
9387 pbuf, off, h, &pd, &a, &ruleset, NULL);
9388 break;
9389 }
9390
9391 case IPPROTO_ICMP: {
9392 struct icmp ih;
9393
9394 pd.hdr.icmp = &ih;
9395 if (!pf_pull_hdr(pbuf, off, &ih, ICMP_MINLEN,
9396 &action, &reason, AF_INET)) {
9397 log = action != PF_PASS;
9398 goto done;
9399 }
9400 #if DUMMYNET
9401 /* Traffic goes through dummynet first */
9402 action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa);
9403 if (action == PF_DROP || pbuf == NULL) {
9404 *pbufp = NULL;
9405 return (action);
9406 }
9407 #endif /* DUMMYNET */
9408 action = pf_test_state_icmp(&s, dir, kif, pbuf, off, h, &pd,
9409 &reason);
9410 if (action == PF_NAT64)
9411 goto done;
9412 if (pd.lmw < 0)
9413 goto done;
9414 PF_APPLE_UPDATE_PDESC_IPv4();
9415 if (action == PF_PASS) {
9416 #if NPFSYNC
9417 pfsync_update_state(s);
9418 #endif /* NPFSYNC */
9419 r = s->rule.ptr;
9420 a = s->anchor.ptr;
9421 log = s->log;
9422 } else if (s == NULL)
9423 action = pf_test_rule(&r, &s, dir, kif,
9424 pbuf, off, h, &pd, &a, &ruleset, NULL);
9425 break;
9426 }
9427
9428 case IPPROTO_ESP: {
9429 struct pf_esp_hdr esp;
9430
9431 pd.hdr.esp = &esp;
9432 if (!pf_pull_hdr(pbuf, off, &esp, sizeof (esp), &action, &reason,
9433 AF_INET)) {
9434 log = action != PF_PASS;
9435 goto done;
9436 }
9437 #if DUMMYNET
9438 /* Traffic goes through dummynet first */
9439 action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa);
9440 if (action == PF_DROP || pbuf == NULL) {
9441 *pbufp = NULL;
9442 return (action);
9443 }
9444 #endif /* DUMMYNET */
9445 action = pf_test_state_esp(&s, dir, kif, off, &pd);
9446 if (pd.lmw < 0)
9447 goto done;
9448 PF_APPLE_UPDATE_PDESC_IPv4();
9449 if (action == PF_PASS) {
9450 #if NPFSYNC
9451 pfsync_update_state(s);
9452 #endif /* NPFSYNC */
9453 r = s->rule.ptr;
9454 a = s->anchor.ptr;
9455 log = s->log;
9456 } else if (s == NULL)
9457 action = pf_test_rule(&r, &s, dir, kif,
9458 pbuf, off, h, &pd, &a, &ruleset, NULL);
9459 break;
9460 }
9461
9462 case IPPROTO_GRE: {
9463 struct pf_grev1_hdr grev1;
9464 pd.hdr.grev1 = &grev1;
9465 if (!pf_pull_hdr(pbuf, off, &grev1, sizeof (grev1), &action,
9466 &reason, AF_INET)) {
9467 log = (action != PF_PASS);
9468 goto done;
9469 }
9470 #if DUMMYNET
9471 /* Traffic goes through dummynet first */
9472 action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa);
9473 if (action == PF_DROP || pbuf == NULL) {
9474 *pbufp = NULL;
9475 return (action);
9476 }
9477 #endif /* DUMMYNET */
9478 if ((ntohs(grev1.flags) & PF_GRE_FLAG_VERSION_MASK) == 1 &&
9479 ntohs(grev1.protocol_type) == PF_GRE_PPP_ETHERTYPE) {
9480 if (ntohs(grev1.payload_length) >
9481 pbuf->pb_packet_len - off) {
9482 action = PF_DROP;
9483 REASON_SET(&reason, PFRES_SHORT);
9484 goto done;
9485 }
9486 pd.proto_variant = PF_GRE_PPTP_VARIANT;
9487 action = pf_test_state_grev1(&s, dir, kif, off, &pd);
9488 if (pd.lmw < 0) goto done;
9489 PF_APPLE_UPDATE_PDESC_IPv4();
9490 if (action == PF_PASS) {
9491 #if NPFSYNC
9492 pfsync_update_state(s);
9493 #endif /* NPFSYNC */
9494 r = s->rule.ptr;
9495 a = s->anchor.ptr;
9496 log = s->log;
9497 break;
9498 } else if (s == NULL) {
9499 action = pf_test_rule(&r, &s, dir, kif, pbuf,
9500 off, h, &pd, &a, &ruleset, NULL);
9501 if (action == PF_PASS)
9502 break;
9503 }
9504 }
9505
9506 /* not GREv1/PPTP, so treat as ordinary GRE... */
9507 }
9508
9509 default:
9510 #if DUMMYNET
9511 /* Traffic goes through dummynet first */
9512 action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa);
9513 if (action == PF_DROP || pbuf == NULL) {
9514 *pbufp = NULL;
9515 return (action);
9516 }
9517 #endif /* DUMMYNET */
9518 action = pf_test_state_other(&s, dir, kif, &pd);
9519 if (pd.lmw < 0)
9520 goto done;
9521 PF_APPLE_UPDATE_PDESC_IPv4();
9522 if (action == PF_PASS) {
9523 #if NPFSYNC
9524 pfsync_update_state(s);
9525 #endif /* NPFSYNC */
9526 r = s->rule.ptr;
9527 a = s->anchor.ptr;
9528 log = s->log;
9529 } else if (s == NULL)
9530 action = pf_test_rule(&r, &s, dir, kif, pbuf, off, h,
9531 &pd, &a, &ruleset, NULL);
9532 break;
9533 }
9534
9535 done:
9536 if (action == PF_NAT64) {
9537 *pbufp = NULL;
9538 return (action);
9539 }
9540
9541 *pbufp = pd.mp;
9542 PF_APPLE_UPDATE_PDESC_IPv4();
9543
9544 if (action != PF_DROP) {
9545 if (action == PF_PASS && h->ip_hl > 5 &&
9546 !((s && s->allow_opts) || r->allow_opts)) {
9547 action = PF_DROP;
9548 REASON_SET(&reason, PFRES_IPOPTIONS);
9549 log = 1;
9550 DPFPRINTF(PF_DEBUG_MISC,
9551 ("pf: dropping packet with ip options [hlen=%u]\n",
9552 (unsigned int) h->ip_hl));
9553 }
9554
9555 if ((s && s->tag) || PF_RTABLEID_IS_VALID(r->rtableid) ||
9556 (pd.pktflags & PKTF_FLOW_ID))
9557 (void) pf_tag_packet(pbuf, pd.pf_mtag, s ? s->tag : 0,
9558 r->rtableid, &pd);
9559
9560 if (action == PF_PASS) {
9561 #if PF_ECN
9562 /* add hints for ecn */
9563 pd.pf_mtag->pftag_hdr = h;
9564 /* record address family */
9565 pd.pf_mtag->pftag_flags &= ~PF_TAG_HDR_INET6;
9566 pd.pf_mtag->pftag_flags |= PF_TAG_HDR_INET;
9567 #endif /* PF_ECN */
9568 /* record protocol */
9569 *pbuf->pb_proto = pd.proto;
9570
9571 /*
9572 * connections redirected to loopback should not match sockets
9573 * bound specifically to loopback due to security implications,
9574 * see tcp_input() and in_pcblookup_listen().
9575 */
9576 if (dir == PF_IN && (pd.proto == IPPROTO_TCP ||
9577 pd.proto == IPPROTO_UDP) && s != NULL &&
9578 s->nat_rule.ptr != NULL &&
9579 (s->nat_rule.ptr->action == PF_RDR ||
9580 s->nat_rule.ptr->action == PF_BINAT) &&
9581 (ntohl(pd.dst->v4addr.s_addr) >> IN_CLASSA_NSHIFT)
9582 == IN_LOOPBACKNET)
9583 pd.pf_mtag->pftag_flags |= PF_TAG_TRANSLATE_LOCALHOST;
9584 }
9585 }
9586
9587 if (log) {
9588 struct pf_rule *lr;
9589
9590 if (s != NULL && s->nat_rule.ptr != NULL &&
9591 s->nat_rule.ptr->log & PF_LOG_ALL)
9592 lr = s->nat_rule.ptr;
9593 else
9594 lr = r;
9595 PFLOG_PACKET(kif, h, pbuf, AF_INET, dir, reason, lr, a, ruleset,
9596 &pd);
9597 }
9598
9599 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
9600 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
9601
9602 if (action == PF_PASS || r->action == PF_DROP) {
9603 dirndx = (dir == PF_OUT);
9604 r->packets[dirndx]++;
9605 r->bytes[dirndx] += pd.tot_len;
9606 if (a != NULL) {
9607 a->packets[dirndx]++;
9608 a->bytes[dirndx] += pd.tot_len;
9609 }
9610 if (s != NULL) {
9611 sk = s->state_key;
9612 if (s->nat_rule.ptr != NULL) {
9613 s->nat_rule.ptr->packets[dirndx]++;
9614 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
9615 }
9616 if (s->src_node != NULL) {
9617 s->src_node->packets[dirndx]++;
9618 s->src_node->bytes[dirndx] += pd.tot_len;
9619 }
9620 if (s->nat_src_node != NULL) {
9621 s->nat_src_node->packets[dirndx]++;
9622 s->nat_src_node->bytes[dirndx] += pd.tot_len;
9623 }
9624 dirndx = (dir == sk->direction) ? 0 : 1;
9625 s->packets[dirndx]++;
9626 s->bytes[dirndx] += pd.tot_len;
9627 }
9628 tr = r;
9629 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
9630 if (nr != NULL) {
9631 struct pf_addr *x;
9632 /*
9633 * XXX: we need to make sure that the addresses
9634 * passed to pfr_update_stats() are the same than
9635 * the addresses used during matching (pfr_match)
9636 */
9637 if (r == &pf_default_rule) {
9638 tr = nr;
9639 x = (sk == NULL || sk->direction == dir) ?
9640 &pd.baddr : &pd.naddr;
9641 } else
9642 x = (sk == NULL || sk->direction == dir) ?
9643 &pd.naddr : &pd.baddr;
9644 if (x == &pd.baddr || s == NULL) {
9645 /* we need to change the address */
9646 if (dir == PF_OUT)
9647 pd.src = x;
9648 else
9649 pd.dst = x;
9650 }
9651 }
9652 if (tr->src.addr.type == PF_ADDR_TABLE)
9653 pfr_update_stats(tr->src.addr.p.tbl, (sk == NULL ||
9654 sk->direction == dir) ?
9655 pd.src : pd.dst, pd.af,
9656 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
9657 tr->src.neg);
9658 if (tr->dst.addr.type == PF_ADDR_TABLE)
9659 pfr_update_stats(tr->dst.addr.p.tbl, (sk == NULL ||
9660 sk->direction == dir) ? pd.dst : pd.src, pd.af,
9661 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
9662 tr->dst.neg);
9663 }
9664
9665 VERIFY(pbuf == NULL || pd.mp == NULL || pd.mp == pbuf);
9666
9667 if (*pbufp) {
9668 if (pd.lmw < 0) {
9669 REASON_SET(&reason, PFRES_MEMORY);
9670 action = PF_DROP;
9671 }
9672
9673 if (action == PF_DROP) {
9674 pbuf_destroy(*pbufp);
9675 *pbufp = NULL;
9676 return (PF_DROP);
9677 }
9678
9679 *pbufp = pbuf;
9680 }
9681
9682 if (action == PF_SYNPROXY_DROP) {
9683 pbuf_destroy(*pbufp);
9684 *pbufp = NULL;
9685 action = PF_PASS;
9686 } else if (r->rt)
9687 /* pf_route can free the pbuf causing *pbufp to become NULL */
9688 pf_route(pbufp, r, dir, kif->pfik_ifp, s, &pd);
9689
9690 return (action);
9691 }
9692 #endif /* INET */
9693
9694 #if INET6
9695 #define PF_APPLE_UPDATE_PDESC_IPv6() \
9696 do { \
9697 if (pbuf && pd.mp && pbuf != pd.mp) { \
9698 pbuf = pd.mp; \
9699 } \
9700 h = pbuf->pb_data; \
9701 } while (0)
9702
9703 int
9704 pf_test6_mbuf(int dir, struct ifnet *ifp, struct mbuf **m0,
9705 struct ether_header *eh, struct ip_fw_args *fwa)
9706 {
9707 pbuf_t pbuf_store, *pbuf;
9708 int rv;
9709
9710 pbuf_init_mbuf(&pbuf_store, *m0, (*m0)->m_pkthdr.rcvif);
9711 pbuf = &pbuf_store;
9712
9713 rv = pf_test6(dir, ifp, &pbuf, eh, fwa);
9714
9715 if (pbuf_is_valid(pbuf)) {
9716 *m0 = pbuf->pb_mbuf;
9717 pbuf->pb_mbuf = NULL;
9718 pbuf_destroy(pbuf);
9719 } else
9720 *m0 = NULL;
9721
9722 return (rv);
9723 }
9724
9725 int
9726 pf_test6(int dir, struct ifnet *ifp, pbuf_t **pbufp,
9727 struct ether_header *eh, struct ip_fw_args *fwa)
9728 {
9729 #if !DUMMYNET
9730 #pragma unused(fwa)
9731 #endif
9732 struct pfi_kif *kif;
9733 u_short action = PF_PASS, reason = 0, log = 0;
9734 pbuf_t *pbuf = *pbufp;
9735 struct ip6_hdr *h;
9736 struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr;
9737 struct pf_state *s = NULL;
9738 struct pf_state_key *sk = NULL;
9739 struct pf_ruleset *ruleset = NULL;
9740 struct pf_pdesc pd;
9741 int off, terminal = 0, dirndx, rh_cnt = 0;
9742 u_int8_t nxt;
9743
9744 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
9745
9746 if (!pf_status.running)
9747 return (PF_PASS);
9748
9749 memset(&pd, 0, sizeof (pd));
9750
9751 if ((pd.pf_mtag = pf_get_mtag_pbuf(pbuf)) == NULL) {
9752 DPFPRINTF(PF_DEBUG_URGENT,
9753 ("pf_test6: pf_get_mtag_pbuf returned NULL\n"));
9754 return (PF_DROP);
9755 }
9756
9757 if (pd.pf_mtag->pftag_flags & PF_TAG_GENERATED)
9758 return (PF_PASS);
9759
9760 kif = (struct pfi_kif *)ifp->if_pf_kif;
9761
9762 if (kif == NULL) {
9763 DPFPRINTF(PF_DEBUG_URGENT,
9764 ("pf_test6: kif == NULL, if_name %s\n", ifp->if_name));
9765 return (PF_DROP);
9766 }
9767 if (kif->pfik_flags & PFI_IFLAG_SKIP)
9768 return (PF_PASS);
9769
9770 h = pbuf->pb_data;
9771
9772 nxt = h->ip6_nxt;
9773 off = ((caddr_t)h - (caddr_t)pbuf->pb_data) + sizeof(struct ip6_hdr);
9774 pd.mp = pbuf;
9775 pd.lmw = 0;
9776 pd.pf_mtag = pf_get_mtag_pbuf(pbuf);
9777 pd.src = (struct pf_addr *)(uintptr_t)&h->ip6_src;
9778 pd.dst = (struct pf_addr *)(uintptr_t)&h->ip6_dst;
9779 PF_ACPY(&pd.baddr, pd.src, AF_INET6);
9780 PF_ACPY(&pd.bdaddr, pd.dst, AF_INET6);
9781 pd.ip_sum = NULL;
9782 pd.af = AF_INET6;
9783 pd.proto = nxt;
9784 pd.proto_variant = 0;
9785 pd.tos = 0;
9786 pd.ttl = h->ip6_hlim;
9787 pd.sc = MBUF_SCIDX(pbuf_get_service_class(pbuf));
9788 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
9789 pd.eh = eh;
9790
9791 if (*pbuf->pb_flags & PKTF_FLOW_ID) {
9792 pd.flowsrc = *pbuf->pb_flowsrc;
9793 pd.flowhash = *pbuf->pb_flowid;
9794 pd.pktflags = (*pbuf->pb_flags & PKTF_FLOW_MASK);
9795 }
9796
9797 if (pbuf->pb_packet_len < (int)sizeof (*h)) {
9798 action = PF_DROP;
9799 REASON_SET(&reason, PFRES_SHORT);
9800 log = 1;
9801 goto done;
9802 }
9803
9804 #if DUMMYNET
9805 if (fwa != NULL && fwa->fwa_pf_rule != NULL)
9806 goto nonormalize;
9807 #endif /* DUMMYNET */
9808
9809 /* We do IP header normalization and packet reassembly here */
9810 action = pf_normalize_ip6(pbuf, dir, kif, &reason, &pd);
9811 if (action != PF_PASS || pd.lmw < 0) {
9812 action = PF_DROP;
9813 goto done;
9814 }
9815
9816 #if DUMMYNET
9817 nonormalize:
9818 #endif /* DUMMYNET */
9819 h = pbuf->pb_data;
9820
9821 #if 1
9822 /*
9823 * we do not support jumbogram yet. if we keep going, zero ip6_plen
9824 * will do something bad, so drop the packet for now.
9825 */
9826 if (htons(h->ip6_plen) == 0) {
9827 action = PF_DROP;
9828 REASON_SET(&reason, PFRES_NORM); /*XXX*/
9829 goto done;
9830 }
9831 #endif
9832
9833 pd.src = (struct pf_addr *)(uintptr_t)&h->ip6_src;
9834 pd.dst = (struct pf_addr *)(uintptr_t)&h->ip6_dst;
9835 PF_ACPY(&pd.baddr, pd.src, AF_INET6);
9836 PF_ACPY(&pd.bdaddr, pd.dst, AF_INET6);
9837 pd.ip_sum = NULL;
9838 pd.af = AF_INET6;
9839 pd.tos = 0;
9840 pd.ttl = h->ip6_hlim;
9841 pd.tot_len = ntohs(h->ip6_plen) + sizeof (struct ip6_hdr);
9842 pd.eh = eh;
9843
9844 off = ((caddr_t)h - (caddr_t)pbuf->pb_data) + sizeof (struct ip6_hdr);
9845 pd.proto = h->ip6_nxt;
9846 pd.proto_variant = 0;
9847 pd.mp = pbuf;
9848 pd.lmw = 0;
9849 pd.pf_mtag = pf_get_mtag_pbuf(pbuf);
9850
9851 do {
9852 switch (nxt) {
9853 case IPPROTO_FRAGMENT: {
9854 struct ip6_frag ip6f;
9855
9856 pd.flags |= PFDESC_IP_FRAG;
9857 if (!pf_pull_hdr(pbuf, off, &ip6f, sizeof ip6f, NULL,
9858 &reason, pd.af)) {
9859 DPFPRINTF(PF_DEBUG_MISC,
9860 ("pf: IPv6 short fragment header\n"));
9861 action = PF_DROP;
9862 REASON_SET(&reason, PFRES_SHORT);
9863 log = 1;
9864 goto done;
9865 }
9866 pd.proto = nxt = ip6f.ip6f_nxt;
9867 #if DUMMYNET
9868 /* Traffic goes through dummynet first */
9869 action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd,
9870 fwa);
9871 if (action == PF_DROP || pbuf == NULL) {
9872 *pbufp = NULL;
9873 return (action);
9874 }
9875 #endif /* DUMMYNET */
9876 action = pf_test_fragment(&r, dir, kif, pbuf, h, &pd,
9877 &a, &ruleset);
9878 if (action == PF_DROP) {
9879 REASON_SET(&reason, PFRES_FRAG);
9880 log = 1;
9881 }
9882 goto done;
9883 }
9884 case IPPROTO_ROUTING:
9885 ++rh_cnt;
9886 /* FALL THROUGH */
9887
9888 case IPPROTO_AH:
9889 case IPPROTO_HOPOPTS:
9890 case IPPROTO_DSTOPTS: {
9891 /* get next header and header length */
9892 struct ip6_ext opt6;
9893
9894 if (!pf_pull_hdr(pbuf, off, &opt6, sizeof(opt6),
9895 NULL, &reason, pd.af)) {
9896 DPFPRINTF(PF_DEBUG_MISC,
9897 ("pf: IPv6 short opt\n"));
9898 action = PF_DROP;
9899 log = 1;
9900 goto done;
9901 }
9902 if (pd.proto == IPPROTO_AH)
9903 off += (opt6.ip6e_len + 2) * 4;
9904 else
9905 off += (opt6.ip6e_len + 1) * 8;
9906 nxt = opt6.ip6e_nxt;
9907 /* goto the next header */
9908 break;
9909 }
9910 default:
9911 terminal++;
9912 break;
9913 }
9914 } while (!terminal);
9915
9916
9917 switch (pd.proto) {
9918
9919 case IPPROTO_TCP: {
9920 struct tcphdr th;
9921
9922 pd.hdr.tcp = &th;
9923 if (!pf_pull_hdr(pbuf, off, &th, sizeof (th),
9924 &action, &reason, AF_INET6)) {
9925 log = action != PF_PASS;
9926 goto done;
9927 }
9928 pd.p_len = pd.tot_len - off - (th.th_off << 2);
9929 #if DUMMYNET
9930 /* Traffic goes through dummynet first */
9931 action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa);
9932 if (action == PF_DROP || pbuf == NULL) {
9933 *pbufp = NULL;
9934 return (action);
9935 }
9936 #endif /* DUMMYNET */
9937 action = pf_normalize_tcp(dir, kif, pbuf, 0, off, h, &pd);
9938 if (pd.lmw < 0)
9939 goto done;
9940 PF_APPLE_UPDATE_PDESC_IPv6();
9941 if (action == PF_DROP)
9942 goto done;
9943 action = pf_test_state_tcp(&s, dir, kif, pbuf, off, h, &pd,
9944 &reason);
9945 if (action == PF_NAT64)
9946 goto done;
9947 if (pd.lmw < 0)
9948 goto done;
9949 PF_APPLE_UPDATE_PDESC_IPv6();
9950 if (action == PF_PASS) {
9951 #if NPFSYNC
9952 pfsync_update_state(s);
9953 #endif /* NPFSYNC */
9954 r = s->rule.ptr;
9955 a = s->anchor.ptr;
9956 log = s->log;
9957 } else if (s == NULL)
9958 action = pf_test_rule(&r, &s, dir, kif,
9959 pbuf, off, h, &pd, &a, &ruleset, NULL);
9960 break;
9961 }
9962
9963 case IPPROTO_UDP: {
9964 struct udphdr uh;
9965
9966 pd.hdr.udp = &uh;
9967 if (!pf_pull_hdr(pbuf, off, &uh, sizeof (uh),
9968 &action, &reason, AF_INET6)) {
9969 log = action != PF_PASS;
9970 goto done;
9971 }
9972 if (uh.uh_dport == 0 ||
9973 ntohs(uh.uh_ulen) > pbuf->pb_packet_len - off ||
9974 ntohs(uh.uh_ulen) < sizeof (struct udphdr)) {
9975 action = PF_DROP;
9976 REASON_SET(&reason, PFRES_SHORT);
9977 goto done;
9978 }
9979 #if DUMMYNET
9980 /* Traffic goes through dummynet first */
9981 action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa);
9982 if (action == PF_DROP || pbuf == NULL) {
9983 *pbufp = NULL;
9984 return (action);
9985 }
9986 #endif /* DUMMYNET */
9987 action = pf_test_state_udp(&s, dir, kif, pbuf, off, h, &pd,
9988 &reason);
9989 if (action == PF_NAT64)
9990 goto done;
9991 if (pd.lmw < 0)
9992 goto done;
9993 PF_APPLE_UPDATE_PDESC_IPv6();
9994 if (action == PF_PASS) {
9995 #if NPFSYNC
9996 pfsync_update_state(s);
9997 #endif /* NPFSYNC */
9998 r = s->rule.ptr;
9999 a = s->anchor.ptr;
10000 log = s->log;
10001 } else if (s == NULL)
10002 action = pf_test_rule(&r, &s, dir, kif,
10003 pbuf, off, h, &pd, &a, &ruleset, NULL);
10004 break;
10005 }
10006
10007 case IPPROTO_ICMPV6: {
10008 struct icmp6_hdr ih;
10009
10010 pd.hdr.icmp6 = &ih;
10011 if (!pf_pull_hdr(pbuf, off, &ih, sizeof (ih),
10012 &action, &reason, AF_INET6)) {
10013 log = action != PF_PASS;
10014 goto done;
10015 }
10016 #if DUMMYNET
10017 /* Traffic goes through dummynet first */
10018 action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa);
10019 if (action == PF_DROP || pbuf == NULL) {
10020 *pbufp = NULL;
10021 return (action);
10022 }
10023 #endif /* DUMMYNET */
10024 action = pf_test_state_icmp(&s, dir, kif,
10025 pbuf, off, h, &pd, &reason);
10026 if (action == PF_NAT64)
10027 goto done;
10028 if (pd.lmw < 0)
10029 goto done;
10030 PF_APPLE_UPDATE_PDESC_IPv6();
10031 if (action == PF_PASS) {
10032 #if NPFSYNC
10033 pfsync_update_state(s);
10034 #endif /* NPFSYNC */
10035 r = s->rule.ptr;
10036 a = s->anchor.ptr;
10037 log = s->log;
10038 } else if (s == NULL)
10039 action = pf_test_rule(&r, &s, dir, kif,
10040 pbuf, off, h, &pd, &a, &ruleset, NULL);
10041 break;
10042 }
10043
10044 case IPPROTO_ESP: {
10045 struct pf_esp_hdr esp;
10046
10047 pd.hdr.esp = &esp;
10048 if (!pf_pull_hdr(pbuf, off, &esp, sizeof (esp), &action,
10049 &reason, AF_INET6)) {
10050 log = action != PF_PASS;
10051 goto done;
10052 }
10053 #if DUMMYNET
10054 /* Traffic goes through dummynet first */
10055 action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa);
10056 if (action == PF_DROP || pbuf == NULL) {
10057 *pbufp = NULL;
10058 return (action);
10059 }
10060 #endif /* DUMMYNET */
10061 action = pf_test_state_esp(&s, dir, kif, off, &pd);
10062 if (pd.lmw < 0)
10063 goto done;
10064 PF_APPLE_UPDATE_PDESC_IPv6();
10065 if (action == PF_PASS) {
10066 #if NPFSYNC
10067 pfsync_update_state(s);
10068 #endif /* NPFSYNC */
10069 r = s->rule.ptr;
10070 a = s->anchor.ptr;
10071 log = s->log;
10072 } else if (s == NULL)
10073 action = pf_test_rule(&r, &s, dir, kif,
10074 pbuf, off, h, &pd, &a, &ruleset, NULL);
10075 break;
10076 }
10077
10078 case IPPROTO_GRE: {
10079 struct pf_grev1_hdr grev1;
10080
10081 pd.hdr.grev1 = &grev1;
10082 if (!pf_pull_hdr(pbuf, off, &grev1, sizeof (grev1), &action,
10083 &reason, AF_INET6)) {
10084 log = (action != PF_PASS);
10085 goto done;
10086 }
10087 #if DUMMYNET
10088 /* Traffic goes through dummynet first */
10089 action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa);
10090 if (action == PF_DROP || pbuf == NULL) {
10091 *pbufp = NULL;
10092 return (action);
10093 }
10094 #endif /* DUMMYNET */
10095 if ((ntohs(grev1.flags) & PF_GRE_FLAG_VERSION_MASK) == 1 &&
10096 ntohs(grev1.protocol_type) == PF_GRE_PPP_ETHERTYPE) {
10097 if (ntohs(grev1.payload_length) >
10098 pbuf->pb_packet_len - off) {
10099 action = PF_DROP;
10100 REASON_SET(&reason, PFRES_SHORT);
10101 goto done;
10102 }
10103 action = pf_test_state_grev1(&s, dir, kif, off, &pd);
10104 if (pd.lmw < 0)
10105 goto done;
10106 PF_APPLE_UPDATE_PDESC_IPv6();
10107 if (action == PF_PASS) {
10108 #if NPFSYNC
10109 pfsync_update_state(s);
10110 #endif /* NPFSYNC */
10111 r = s->rule.ptr;
10112 a = s->anchor.ptr;
10113 log = s->log;
10114 break;
10115 } else if (s == NULL) {
10116 action = pf_test_rule(&r, &s, dir, kif, pbuf,
10117 off, h, &pd, &a, &ruleset, NULL);
10118 if (action == PF_PASS)
10119 break;
10120 }
10121 }
10122
10123 /* not GREv1/PPTP, so treat as ordinary GRE... */
10124 }
10125
10126 default:
10127 #if DUMMYNET
10128 /* Traffic goes through dummynet first */
10129 action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa);
10130 if (action == PF_DROP || pbuf == NULL) {
10131 *pbufp = NULL;
10132 return (action);
10133 }
10134 #endif /* DUMMYNET */
10135 action = pf_test_state_other(&s, dir, kif, &pd);
10136 if (pd.lmw < 0)
10137 goto done;
10138 PF_APPLE_UPDATE_PDESC_IPv6();
10139 if (action == PF_PASS) {
10140 #if NPFSYNC
10141 pfsync_update_state(s);
10142 #endif /* NPFSYNC */
10143 r = s->rule.ptr;
10144 a = s->anchor.ptr;
10145 log = s->log;
10146 } else if (s == NULL)
10147 action = pf_test_rule(&r, &s, dir, kif, pbuf, off, h,
10148 &pd, &a, &ruleset, NULL);
10149 break;
10150 }
10151
10152 done:
10153 if (action == PF_NAT64) {
10154 *pbufp = NULL;
10155 return (action);
10156 }
10157
10158 *pbufp = pd.mp;
10159 PF_APPLE_UPDATE_PDESC_IPv6();
10160
10161 /* handle dangerous IPv6 extension headers. */
10162 if (action != PF_DROP) {
10163 if (action == PF_PASS && rh_cnt &&
10164 !((s && s->allow_opts) || r->allow_opts)) {
10165 action = PF_DROP;
10166 REASON_SET(&reason, PFRES_IPOPTIONS);
10167 log = 1;
10168 DPFPRINTF(PF_DEBUG_MISC,
10169 ("pf: dropping packet with dangerous v6addr headers\n"));
10170 }
10171
10172 if ((s && s->tag) || PF_RTABLEID_IS_VALID(r->rtableid) ||
10173 (pd.pktflags & PKTF_FLOW_ID))
10174 (void) pf_tag_packet(pbuf, pd.pf_mtag, s ? s->tag : 0,
10175 r->rtableid, &pd);
10176
10177 if (action == PF_PASS) {
10178 #if PF_ECN
10179 /* add hints for ecn */
10180 pd.pf_mtag->pftag_hdr = h;
10181 /* record address family */
10182 pd.pf_mtag->pftag_flags &= ~PF_TAG_HDR_INET;
10183 pd.pf_mtag->pftag_flags |= PF_TAG_HDR_INET6;
10184 #endif /* PF_ECN */
10185 /* record protocol */
10186 *pbuf->pb_proto = pd.proto;
10187 if (dir == PF_IN && (pd.proto == IPPROTO_TCP ||
10188 pd.proto == IPPROTO_UDP) && s != NULL &&
10189 s->nat_rule.ptr != NULL &&
10190 (s->nat_rule.ptr->action == PF_RDR ||
10191 s->nat_rule.ptr->action == PF_BINAT) &&
10192 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6addr))
10193 pd.pf_mtag->pftag_flags |= PF_TAG_TRANSLATE_LOCALHOST;
10194 }
10195 }
10196
10197
10198 if (log) {
10199 struct pf_rule *lr;
10200
10201 if (s != NULL && s->nat_rule.ptr != NULL &&
10202 s->nat_rule.ptr->log & PF_LOG_ALL)
10203 lr = s->nat_rule.ptr;
10204 else
10205 lr = r;
10206 PFLOG_PACKET(kif, h, pbuf, AF_INET6, dir, reason, lr, a, ruleset,
10207 &pd);
10208 }
10209
10210 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
10211 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
10212
10213 if (action == PF_PASS || r->action == PF_DROP) {
10214 dirndx = (dir == PF_OUT);
10215 r->packets[dirndx]++;
10216 r->bytes[dirndx] += pd.tot_len;
10217 if (a != NULL) {
10218 a->packets[dirndx]++;
10219 a->bytes[dirndx] += pd.tot_len;
10220 }
10221 if (s != NULL) {
10222 sk = s->state_key;
10223 if (s->nat_rule.ptr != NULL) {
10224 s->nat_rule.ptr->packets[dirndx]++;
10225 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
10226 }
10227 if (s->src_node != NULL) {
10228 s->src_node->packets[dirndx]++;
10229 s->src_node->bytes[dirndx] += pd.tot_len;
10230 }
10231 if (s->nat_src_node != NULL) {
10232 s->nat_src_node->packets[dirndx]++;
10233 s->nat_src_node->bytes[dirndx] += pd.tot_len;
10234 }
10235 dirndx = (dir == sk->direction) ? 0 : 1;
10236 s->packets[dirndx]++;
10237 s->bytes[dirndx] += pd.tot_len;
10238 }
10239 tr = r;
10240 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
10241 if (nr != NULL) {
10242 struct pf_addr *x;
10243 /*
10244 * XXX: we need to make sure that the addresses
10245 * passed to pfr_update_stats() are the same than
10246 * the addresses used during matching (pfr_match)
10247 */
10248 if (r == &pf_default_rule) {
10249 tr = nr;
10250 x = (s == NULL || sk->direction == dir) ?
10251 &pd.baddr : &pd.naddr;
10252 } else {
10253 x = (s == NULL || sk->direction == dir) ?
10254 &pd.naddr : &pd.baddr;
10255 }
10256 if (x == &pd.baddr || s == NULL) {
10257 if (dir == PF_OUT)
10258 pd.src = x;
10259 else
10260 pd.dst = x;
10261 }
10262 }
10263 if (tr->src.addr.type == PF_ADDR_TABLE)
10264 pfr_update_stats(tr->src.addr.p.tbl, (sk == NULL ||
10265 sk->direction == dir) ? pd.src : pd.dst, pd.af,
10266 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
10267 tr->src.neg);
10268 if (tr->dst.addr.type == PF_ADDR_TABLE)
10269 pfr_update_stats(tr->dst.addr.p.tbl, (sk == NULL ||
10270 sk->direction == dir) ? pd.dst : pd.src, pd.af,
10271 pd.tot_len, dir == PF_OUT, r->action == PF_PASS,
10272 tr->dst.neg);
10273 }
10274
10275 #if 0
10276 if (action == PF_SYNPROXY_DROP) {
10277 m_freem(*m0);
10278 *m0 = NULL;
10279 action = PF_PASS;
10280 } else if (r->rt)
10281 /* pf_route6 can free the mbuf causing *m0 to become NULL */
10282 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
10283 #else
10284 VERIFY(pbuf == NULL || pd.mp == NULL || pd.mp == pbuf);
10285
10286 if (*pbufp) {
10287 if (pd.lmw < 0) {
10288 REASON_SET(&reason, PFRES_MEMORY);
10289 action = PF_DROP;
10290 }
10291
10292 if (action == PF_DROP) {
10293 pbuf_destroy(*pbufp);
10294 *pbufp = NULL;
10295 return (PF_DROP);
10296 }
10297
10298 *pbufp = pbuf;
10299 }
10300
10301 if (action == PF_SYNPROXY_DROP) {
10302 pbuf_destroy(*pbufp);
10303 *pbufp = NULL;
10304 action = PF_PASS;
10305 } else if (r->rt) {
10306 /* pf_route6 can free the mbuf causing *m0 to become NULL */
10307 pf_route6(pbufp, r, dir, kif->pfik_ifp, s, &pd);
10308 }
10309 #endif /* 0 */
10310
10311 return (action);
10312 }
10313 #endif /* INET6 */
10314
10315 static int
10316 pf_check_congestion(struct ifqueue *ifq)
10317 {
10318 #pragma unused(ifq)
10319 return (0);
10320 }
10321
10322 void
10323 pool_init(struct pool *pp, size_t size, unsigned int align, unsigned int ioff,
10324 int flags, const char *wchan, void *palloc)
10325 {
10326 #pragma unused(align, ioff, flags, palloc)
10327 bzero(pp, sizeof (*pp));
10328 pp->pool_zone = zinit(size, 1024 * size, PAGE_SIZE, wchan);
10329 if (pp->pool_zone != NULL) {
10330 zone_change(pp->pool_zone, Z_EXPAND, TRUE);
10331 zone_change(pp->pool_zone, Z_CALLERACCT, FALSE);
10332 pp->pool_hiwat = pp->pool_limit = (unsigned int)-1;
10333 pp->pool_name = wchan;
10334 }
10335 }
10336
10337 /* Zones cannot be currently destroyed */
10338 void
10339 pool_destroy(struct pool *pp)
10340 {
10341 #pragma unused(pp)
10342 }
10343
10344 void
10345 pool_sethiwat(struct pool *pp, int n)
10346 {
10347 pp->pool_hiwat = n; /* Currently unused */
10348 }
10349
10350 void
10351 pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
10352 {
10353 #pragma unused(warnmess, ratecap)
10354 pp->pool_limit = n;
10355 }
10356
10357 void *
10358 pool_get(struct pool *pp, int flags)
10359 {
10360 void *buf;
10361
10362 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
10363
10364 if (pp->pool_count > pp->pool_limit) {
10365 DPFPRINTF(PF_DEBUG_NOISY,
10366 ("pf: pool %s hard limit reached (%d)\n",
10367 pp->pool_name != NULL ? pp->pool_name : "unknown",
10368 pp->pool_limit));
10369 pp->pool_fails++;
10370 return (NULL);
10371 }
10372
10373 buf = zalloc_canblock(pp->pool_zone, (flags & (PR_NOWAIT | PR_WAITOK)));
10374 if (buf != NULL) {
10375 pp->pool_count++;
10376 VERIFY(pp->pool_count != 0);
10377 }
10378 return (buf);
10379 }
10380
10381 void
10382 pool_put(struct pool *pp, void *v)
10383 {
10384 LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);
10385
10386 zfree(pp->pool_zone, v);
10387 VERIFY(pp->pool_count != 0);
10388 pp->pool_count--;
10389 }
10390
10391 struct pf_mtag *
10392 pf_find_mtag_pbuf(pbuf_t *pbuf)
10393 {
10394
10395 return (pbuf->pb_pftag);
10396 }
10397
10398 struct pf_mtag *
10399 pf_find_mtag(struct mbuf *m)
10400 {
10401
10402 return (m_pftag(m));
10403 }
10404
10405 struct pf_mtag *
10406 pf_get_mtag(struct mbuf *m)
10407 {
10408 return (pf_find_mtag(m));
10409 }
10410
10411 struct pf_mtag *
10412 pf_get_mtag_pbuf(pbuf_t *pbuf)
10413 {
10414 return (pf_find_mtag_pbuf(pbuf));
10415 }
10416
10417 uint64_t
10418 pf_time_second(void)
10419 {
10420 struct timeval t;
10421
10422 microuptime(&t);
10423 return (t.tv_sec);
10424 }
10425
10426 uint64_t
10427 pf_calendar_time_second(void)
10428 {
10429 struct timeval t;
10430
10431 getmicrotime(&t);
10432 return (t.tv_sec);
10433 }
10434
10435 static void *
10436 hook_establish(struct hook_desc_head *head, int tail, hook_fn_t fn, void *arg)
10437 {
10438 struct hook_desc *hd;
10439
10440 hd = _MALLOC(sizeof(*hd), M_DEVBUF, M_WAITOK);
10441 if (hd == NULL)
10442 return (NULL);
10443
10444 hd->hd_fn = fn;
10445 hd->hd_arg = arg;
10446 if (tail)
10447 TAILQ_INSERT_TAIL(head, hd, hd_list);
10448 else
10449 TAILQ_INSERT_HEAD(head, hd, hd_list);
10450
10451 return (hd);
10452 }
10453
10454 static void
10455 hook_runloop(struct hook_desc_head *head, int flags)
10456 {
10457 struct hook_desc *hd;
10458
10459 if (!(flags & HOOK_REMOVE)) {
10460 if (!(flags & HOOK_ABORT))
10461 TAILQ_FOREACH(hd, head, hd_list)
10462 hd->hd_fn(hd->hd_arg);
10463 } else {
10464 while (!!(hd = TAILQ_FIRST(head))) {
10465 TAILQ_REMOVE(head, hd, hd_list);
10466 if (!(flags & HOOK_ABORT))
10467 hd->hd_fn(hd->hd_arg);
10468 if (flags & HOOK_FREE)
10469 _FREE(hd, M_DEVBUF);
10470 }
10471 }
10472 }